diff options
262 files changed, 1094 insertions, 1403 deletions
diff --git a/Documentation/kbuild/makefiles.txt b/Documentation/kbuild/makefiles.txt index 385a5ef41c17..9b9c4797fc55 100644 --- a/Documentation/kbuild/makefiles.txt +++ b/Documentation/kbuild/makefiles.txt @@ -41,6 +41,7 @@ This document describes the Linux kernel Makefiles. --- 6.8 Custom kbuild commands --- 6.9 Preprocessing linker scripts --- 6.10 Generic header files + --- 6.11 Post-link pass === 7 Kbuild syntax for exported headers --- 7.1 header-y @@ -1237,6 +1238,21 @@ When kbuild executes, the following steps are followed (roughly): to list the file in the Kbuild file. See "7.4 generic-y" for further info on syntax etc. +--- 6.11 Post-link pass + + If the file arch/xxx/Makefile.postlink exists, this makefile + will be invoked for post-link objects (vmlinux and modules.ko) + for architectures to run post-link passes on. Must also handle + the clean target. + + This pass runs after kallsyms generation. If the architecture + needs to modify symbol locations, rather than manipulate the + kallsyms, it may be easier to add another postlink target for + .tmp_vmlinux? targets to be called from link-vmlinux.sh. + + For example, powerpc uses this to check relocation sanity of + the linked vmlinux file. + === 7 Kbuild syntax for exported headers The kernel includes a set of headers that is exported to userspace. @@ -623,6 +623,11 @@ KBUILD_CFLAGS += $(call cc-option,-fno-delete-null-pointer-checks,) KBUILD_CFLAGS += $(call cc-disable-warning,maybe-uninitialized,) KBUILD_CFLAGS += $(call cc-disable-warning,frame-address,) +ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION +KBUILD_CFLAGS += $(call cc-option,-ffunction-sections,) +KBUILD_CFLAGS += $(call cc-option,-fdata-sections,) +endif + ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE KBUILD_CFLAGS += -Os else @@ -803,6 +808,10 @@ LDFLAGS_BUILD_ID = $(patsubst -Wl$(comma)%,%,\ KBUILD_LDFLAGS_MODULE += $(LDFLAGS_BUILD_ID) LDFLAGS_vmlinux += $(LDFLAGS_BUILD_ID) +ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION +LDFLAGS_vmlinux += $(call ld-option, --gc-sections,) +endif + ifeq ($(CONFIG_STRIP_ASM_SYMS),y) LDFLAGS_vmlinux += $(call ld-option, -X,) endif @@ -942,9 +951,12 @@ endif include/generated/autoksyms.h: FORCE $(Q)$(CONFIG_SHELL) $(srctree)/scripts/adjust_autoksyms.sh true -# Final link of vmlinux - cmd_link-vmlinux = $(CONFIG_SHELL) $< $(LD) $(LDFLAGS) $(LDFLAGS_vmlinux) -quiet_cmd_link-vmlinux = LINK $@ +ARCH_POSTLINK := $(wildcard $(srctree)/arch/$(SRCARCH)/Makefile.postlink) + +# Final link of vmlinux with optional arch pass after final link + cmd_link-vmlinux = \ + $(CONFIG_SHELL) $< $(LD) $(LDFLAGS) $(LDFLAGS_vmlinux) ; \ + $(if $(ARCH_POSTLINK), $(MAKE) -f $(ARCH_POSTLINK) $@, true) vmlinux: scripts/link-vmlinux.sh vmlinux_prereq $(vmlinux-deps) FORCE +$(call if_changed,link-vmlinux) @@ -1271,6 +1283,7 @@ $(clean-dirs): vmlinuxclean: $(Q)$(CONFIG_SHELL) $(srctree)/scripts/link-vmlinux.sh clean + $(Q)$(if $(ARCH_POSTLINK), $(MAKE) -f $(ARCH_POSTLINK) clean) clean: archclean vmlinuxclean diff --git a/arch/Kconfig b/arch/Kconfig index 180ea33164dc..11d349561ece 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -450,6 +450,27 @@ config CC_STACKPROTECTOR_STRONG endchoice +config THIN_ARCHIVES + bool + help + Select this if the architecture wants to use thin archives + instead of ld -r to create the built-in.o files. + +config LD_DEAD_CODE_DATA_ELIMINATION + bool + help + Select this if the architecture wants to do dead code and + data elimination with the linker by compiling with + -ffunction-sections -fdata-sections and linking with + --gc-sections. + + This requires that the arch annotates or otherwise protects + its external entry points from being discarded. Linker scripts + must also merge .text.*, .data.*, and .bss.* correctly into + output sections. Care must be taken not to pull in unrelated + sections (e.g., '.text.init'). Typically '.' in section names + is used to distinguish them from label names / C identifiers. + config HAVE_ARCH_WITHIN_STACK_FRAMES bool help diff --git a/arch/alpha/include/asm/Kbuild b/arch/alpha/include/asm/Kbuild index ffd9cf5ec8c4..bf8475ce85ee 100644 --- a/arch/alpha/include/asm/Kbuild +++ b/arch/alpha/include/asm/Kbuild @@ -3,6 +3,7 @@ generic-y += clkdev.h generic-y += cputime.h generic-y += exec.h +generic-y += export.h generic-y += irq_work.h generic-y += mcs_spinlock.h generic-y += mm-arch-hooks.h diff --git a/arch/alpha/kernel/Makefile b/arch/alpha/kernel/Makefile index 3ecac0106c8a..8ce13d7a2ad3 100644 --- a/arch/alpha/kernel/Makefile +++ b/arch/alpha/kernel/Makefile @@ -8,7 +8,7 @@ ccflags-y := -Wno-sign-compare obj-y := entry.o traps.o process.o osf_sys.o irq.o \ irq_alpha.o signal.o setup.o ptrace.o time.o \ - alpha_ksyms.o systbls.o err_common.o io.o + systbls.o err_common.o io.o obj-$(CONFIG_VGA_HOSE) += console.o obj-$(CONFIG_SMP) += smp.o diff --git a/arch/alpha/kernel/alpha_ksyms.c b/arch/alpha/kernel/alpha_ksyms.c deleted file mode 100644 index f4c7ab6f43b0..000000000000 --- a/arch/alpha/kernel/alpha_ksyms.c +++ /dev/null @@ -1,102 +0,0 @@ -/* - * linux/arch/alpha/kernel/alpha_ksyms.c - * - * Export the alpha-specific functions that are needed for loadable - * modules. - */ - -#include <linux/module.h> -#include <asm/console.h> -#include <asm/uaccess.h> -#include <asm/checksum.h> -#include <asm/fpu.h> -#include <asm/machvec.h> - -#include <linux/syscalls.h> - -/* these are C runtime functions with special calling conventions: */ -extern void __divl (void); -extern void __reml (void); -extern void __divq (void); -extern void __remq (void); -extern void __divlu (void); -extern void __remlu (void); -extern void __divqu (void); -extern void __remqu (void); - -EXPORT_SYMBOL(alpha_mv); -EXPORT_SYMBOL(callback_getenv); -EXPORT_SYMBOL(callback_setenv); -EXPORT_SYMBOL(callback_save_env); - -/* platform dependent support */ -EXPORT_SYMBOL(strcat); -EXPORT_SYMBOL(strcpy); -EXPORT_SYMBOL(strlen); -EXPORT_SYMBOL(strncpy); -EXPORT_SYMBOL(strncat); -EXPORT_SYMBOL(strchr); -EXPORT_SYMBOL(strrchr); -EXPORT_SYMBOL(memmove); -EXPORT_SYMBOL(__memcpy); -EXPORT_SYMBOL(__memset); -EXPORT_SYMBOL(___memset); -EXPORT_SYMBOL(__memsetw); -EXPORT_SYMBOL(__constant_c_memset); -EXPORT_SYMBOL(copy_page); -EXPORT_SYMBOL(clear_page); - -EXPORT_SYMBOL(alpha_read_fp_reg); -EXPORT_SYMBOL(alpha_read_fp_reg_s); -EXPORT_SYMBOL(alpha_write_fp_reg); -EXPORT_SYMBOL(alpha_write_fp_reg_s); - -/* Networking helper routines. */ -EXPORT_SYMBOL(csum_tcpudp_magic); -EXPORT_SYMBOL(ip_compute_csum); -EXPORT_SYMBOL(ip_fast_csum); -EXPORT_SYMBOL(csum_partial_copy_nocheck); -EXPORT_SYMBOL(csum_partial_copy_from_user); -EXPORT_SYMBOL(csum_ipv6_magic); - -#ifdef CONFIG_MATHEMU_MODULE -extern long (*alpha_fp_emul_imprecise)(struct pt_regs *, unsigned long); -extern long (*alpha_fp_emul) (unsigned long pc); -EXPORT_SYMBOL(alpha_fp_emul_imprecise); -EXPORT_SYMBOL(alpha_fp_emul); -#endif - -/* - * The following are specially called from the uaccess assembly stubs. - */ -EXPORT_SYMBOL(__copy_user); -EXPORT_SYMBOL(__do_clear_user); - -/* - * SMP-specific symbols. - */ - -#ifdef CONFIG_SMP -EXPORT_SYMBOL(_atomic_dec_and_lock); -#endif /* CONFIG_SMP */ - -/* - * The following are special because they're not called - * explicitly (the C compiler or assembler generates them in - * response to division operations). Fortunately, their - * interface isn't gonna change any time soon now, so it's OK - * to leave it out of version control. - */ -# undef memcpy -# undef memset -EXPORT_SYMBOL(__divl); -EXPORT_SYMBOL(__divlu); -EXPORT_SYMBOL(__divq); -EXPORT_SYMBOL(__divqu); -EXPORT_SYMBOL(__reml); -EXPORT_SYMBOL(__remlu); -EXPORT_SYMBOL(__remq); -EXPORT_SYMBOL(__remqu); -EXPORT_SYMBOL(memcpy); -EXPORT_SYMBOL(memset); -EXPORT_SYMBOL(memchr); diff --git a/arch/alpha/kernel/machvec_impl.h b/arch/alpha/kernel/machvec_impl.h index d3398f6ab74c..b7d69604b6d2 100644 --- a/arch/alpha/kernel/machvec_impl.h +++ b/arch/alpha/kernel/machvec_impl.h @@ -144,9 +144,11 @@ else beforehand. Fine. We'll do it ourselves. */ #if 0 #define ALIAS_MV(system) \ - struct alpha_machine_vector alpha_mv __attribute__((alias(#system "_mv"))); + struct alpha_machine_vector alpha_mv __attribute__((alias(#system "_mv"))); \ + EXPORT_SYMBOL(alpha_mv); #else #define ALIAS_MV(system) \ - asm(".global alpha_mv\nalpha_mv = " #system "_mv"); + asm(".global alpha_mv\nalpha_mv = " #system "_mv"); \ + EXPORT_SYMBOL(alpha_mv); #endif #endif /* GENERIC */ diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c index b20af76f12c1..4811e54069fc 100644 --- a/arch/alpha/kernel/setup.c +++ b/arch/alpha/kernel/setup.c @@ -115,6 +115,7 @@ unsigned long alpha_agpgart_size = DEFAULT_AGP_APER_SIZE; #ifdef CONFIG_ALPHA_GENERIC struct alpha_machine_vector alpha_mv; +EXPORT_SYMBOL(alpha_mv); #endif #ifndef alpha_using_srm diff --git a/arch/alpha/lib/callback_srm.S b/arch/alpha/lib/callback_srm.S index 8804bec2c644..6093addc931a 100644 --- a/arch/alpha/lib/callback_srm.S +++ b/arch/alpha/lib/callback_srm.S @@ -3,6 +3,7 @@ */ #include <asm/console.h> +#include <asm/export.h> .text #define HWRPB_CRB_OFFSET 0xc0 @@ -92,6 +93,10 @@ CALLBACK(reset_env, CCB_RESET_ENV, 4) CALLBACK(save_env, CCB_SAVE_ENV, 1) CALLBACK(pswitch, CCB_PSWITCH, 3) CALLBACK(bios_emul, CCB_BIOS_EMUL, 5) + +EXPORT_SYMBOL(callback_getenv) +EXPORT_SYMBOL(callback_setenv) +EXPORT_SYMBOL(callback_save_env) .data __alpha_using_srm: # For use by bootpheader diff --git a/arch/alpha/lib/checksum.c b/arch/alpha/lib/checksum.c index 377f9e34eb97..b57f8007db14 100644 --- a/arch/alpha/lib/checksum.c +++ b/arch/alpha/lib/checksum.c @@ -48,6 +48,7 @@ __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, (__force u64)saddr + (__force u64)daddr + (__force u64)sum + ((len + proto) << 8)); } +EXPORT_SYMBOL(csum_tcpudp_magic); __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len, __u8 proto, __wsum sum) @@ -144,6 +145,7 @@ __sum16 ip_fast_csum(const void *iph, unsigned int ihl) { return (__force __sum16)~do_csum(iph,ihl*4); } +EXPORT_SYMBOL(ip_fast_csum); /* * computes the checksum of a memory block at buff, length len, @@ -178,3 +180,4 @@ __sum16 ip_compute_csum(const void *buff, int len) { return (__force __sum16)~from64to16(do_csum(buff,len)); } +EXPORT_SYMBOL(ip_compute_csum); diff --git a/arch/alpha/lib/clear_page.S b/arch/alpha/lib/clear_page.S index a221ae266e29..263d7393c0e7 100644 --- a/arch/alpha/lib/clear_page.S +++ b/arch/alpha/lib/clear_page.S @@ -3,7 +3,7 @@ * * Zero an entire page. */ - +#include <asm/export.h> .text .align 4 .global clear_page @@ -37,3 +37,4 @@ clear_page: nop .end clear_page + EXPORT_SYMBOL(clear_page) diff --git a/arch/alpha/lib/clear_user.S b/arch/alpha/lib/clear_user.S index 8860316c1957..bf5b931866ba 100644 --- a/arch/alpha/lib/clear_user.S +++ b/arch/alpha/lib/clear_user.S @@ -24,6 +24,7 @@ * Clobbers: * $1,$2,$3,$4,$5,$6 */ +#include <asm/export.h> /* Allow an exception for an insn; exit if we get one. */ #define EX(x,y...) \ @@ -111,3 +112,4 @@ $exception: ret $31, ($28), 1 # .. e1 : .end __do_clear_user + EXPORT_SYMBOL(__do_clear_user) diff --git a/arch/alpha/lib/copy_page.S b/arch/alpha/lib/copy_page.S index 9f3b97459cc6..2ee0bd0508c5 100644 --- a/arch/alpha/lib/copy_page.S +++ b/arch/alpha/lib/copy_page.S @@ -3,7 +3,7 @@ * * Copy an entire page. */ - +#include <asm/export.h> .text .align 4 .global copy_page @@ -47,3 +47,4 @@ copy_page: nop .end copy_page + EXPORT_SYMBOL(copy_page) diff --git a/arch/alpha/lib/copy_user.S b/arch/alpha/lib/copy_user.S index 6f3fab9eb434..2238068b1b40 100644 --- a/arch/alpha/lib/copy_user.S +++ b/arch/alpha/lib/copy_user.S @@ -26,6 +26,8 @@ * $1,$2,$3,$4,$5,$6,$7 */ +#include <asm/export.h> + /* Allow an exception for an insn; exit if we get one. */ #define EXI(x,y...) \ 99: x,##y; \ @@ -143,3 +145,4 @@ $101: ret $31,($28),1 .end __copy_user +EXPORT_SYMBOL(__copy_user) diff --git a/arch/alpha/lib/csum_ipv6_magic.S b/arch/alpha/lib/csum_ipv6_magic.S index 2c2acb96deb6..e74b4544b0cc 100644 --- a/arch/alpha/lib/csum_ipv6_magic.S +++ b/arch/alpha/lib/csum_ipv6_magic.S @@ -12,6 +12,7 @@ * added by Ivan Kokshaysky <ink@jurassic.park.msu.ru> */ +#include <asm/export.h> .globl csum_ipv6_magic .align 4 .ent csum_ipv6_magic @@ -113,3 +114,4 @@ csum_ipv6_magic: ret # .. e1 : .end csum_ipv6_magic + EXPORT_SYMBOL(csum_ipv6_magic) diff --git a/arch/alpha/lib/csum_partial_copy.c b/arch/alpha/lib/csum_partial_copy.c index 5675dca8dbb1..b4ff3b683bcd 100644 --- a/arch/alpha/lib/csum_partial_copy.c +++ b/arch/alpha/lib/csum_partial_copy.c @@ -374,6 +374,7 @@ csum_partial_copy_from_user(const void __user *src, void *dst, int len, } return (__force __wsum)checksum; } +EXPORT_SYMBOL(csum_partial_copy_from_user); __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum) @@ -386,3 +387,4 @@ csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum) set_fs(oldfs); return checksum; } +EXPORT_SYMBOL(csum_partial_copy_nocheck); diff --git a/arch/alpha/lib/dec_and_lock.c b/arch/alpha/lib/dec_and_lock.c index f9f5fe830e9f..4221b40167ee 100644 --- a/arch/alpha/lib/dec_and_lock.c +++ b/arch/alpha/lib/dec_and_lock.c @@ -7,6 +7,7 @@ #include <linux/spinlock.h> #include <linux/atomic.h> +#include <linux/export.h> asm (".text \n\ .global _atomic_dec_and_lock \n\ @@ -39,3 +40,4 @@ static int __used atomic_dec_and_lock_1(atomic_t *atomic, spinlock_t *lock) spin_unlock(lock); return 0; } +EXPORT_SYMBOL(_atomic_dec_and_lock); diff --git a/arch/alpha/lib/divide.S b/arch/alpha/lib/divide.S index 2d1a0484a99e..1e33bd127621 100644 --- a/arch/alpha/lib/divide.S +++ b/arch/alpha/lib/divide.S @@ -45,6 +45,7 @@ * $28 - compare status */ +#include <asm/export.h> #define halt .long 0 /* @@ -151,6 +152,7 @@ ufunction: addq $30,STACK,$30 ret $31,($23),1 .end ufunction +EXPORT_SYMBOL(ufunction) /* * Uhh.. Ugly signed division. I'd rather not have it at all, but @@ -193,3 +195,4 @@ sfunction: addq $30,STACK,$30 ret $31,($23),1 .end sfunction +EXPORT_SYMBOL(sfunction) diff --git a/arch/alpha/lib/ev6-clear_page.S b/arch/alpha/lib/ev6-clear_page.S index adf4f7be0e2b..abe99e69a194 100644 --- a/arch/alpha/lib/ev6-clear_page.S +++ b/arch/alpha/lib/ev6-clear_page.S @@ -3,7 +3,7 @@ * * Zero an entire page. */ - +#include <asm/export.h> .text .align 4 .global clear_page @@ -52,3 +52,4 @@ clear_page: nop .end clear_page + EXPORT_SYMBOL(clear_page) diff --git a/arch/alpha/lib/ev6-clear_user.S b/arch/alpha/lib/ev6-clear_user.S index 4f42a16b7f53..05bef6b50598 100644 --- a/arch/alpha/lib/ev6-clear_user.S +++ b/arch/alpha/lib/ev6-clear_user.S @@ -43,6 +43,7 @@ * want to leave a hole (and we also want to avoid repeating lots of work) */ +#include <asm/export.h> /* Allow an exception for an insn; exit if we get one. */ #define EX(x,y...) \ 99: x,##y; \ @@ -222,4 +223,4 @@ $exception: # Destination for exception recovery(?) nop # .. E .. .. : ret $31, ($28), 1 # L0 .. .. .. : L U L U .end __do_clear_user - + EXPORT_SYMBOL(__do_clear_user) diff --git a/arch/alpha/lib/ev6-copy_page.S b/arch/alpha/lib/ev6-copy_page.S index b789db192754..77935061bddb 100644 --- a/arch/alpha/lib/ev6-copy_page.S +++ b/arch/alpha/lib/ev6-copy_page.S @@ -56,7 +56,7 @@ destination pages are in the dcache, but it is my guess that this is less important than the dcache miss case. */ - +#include <asm/export.h> .text .align 4 .global copy_page @@ -201,3 +201,4 @@ copy_page: nop .end copy_page + EXPORT_SYMBOL(copy_page) diff --git a/arch/alpha/lib/ev6-copy_user.S b/arch/alpha/lib/ev6-copy_user.S index db42ffe9c350..debcc3b6b704 100644 --- a/arch/alpha/lib/ev6-copy_user.S +++ b/arch/alpha/lib/ev6-copy_user.S @@ -37,6 +37,7 @@ * L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1 */ +#include <asm/export.h> /* Allow an exception for an insn; exit if we get one. */ #define EXI(x,y...) \ 99: x,##y; \ @@ -256,4 +257,4 @@ $101: ret $31,($28),1 # L0 .end __copy_user - + EXPORT_SYMBOL(__copy_user) diff --git a/arch/alpha/lib/ev6-csum_ipv6_magic.S b/arch/alpha/lib/ev6-csum_ipv6_magic.S index fc0bc399f872..de62627ac4fe 100644 --- a/arch/alpha/lib/ev6-csum_ipv6_magic.S +++ b/arch/alpha/lib/ev6-csum_ipv6_magic.S @@ -52,6 +52,7 @@ * may cause additional delay in rare cases (load-load replay traps). */ +#include <asm/export.h> .globl csum_ipv6_magic .align 4 .ent csum_ipv6_magic @@ -148,3 +149,4 @@ csum_ipv6_magic: ret # L0 : L U L U .end csum_ipv6_magic + EXPORT_SYMBOL(csum_ipv6_magic) diff --git a/arch/alpha/lib/ev6-divide.S b/arch/alpha/lib/ev6-divide.S index 2a82b9be93fa..d18dc0e96e3d 100644 --- a/arch/alpha/lib/ev6-divide.S +++ b/arch/alpha/lib/ev6-divide.S @@ -55,6 +55,7 @@ * Try not to change the actual algorithm if possible for consistency. */ +#include <asm/export.h> #define halt .long 0 /* @@ -205,6 +206,7 @@ ufunction: addq $30,STACK,$30 # E : ret $31,($23),1 # L0 : L U U L .end ufunction +EXPORT_SYMBOL(ufunction) /* * Uhh.. Ugly signed division. I'd rather not have it at all, but @@ -257,3 +259,4 @@ sfunction: addq $30,STACK,$30 # E : ret $31,($23),1 # L0 : L U U L .end sfunction +EXPORT_SYMBOL(sfunction) diff --git a/arch/alpha/lib/ev6-memchr.S b/arch/alpha/lib/ev6-memchr.S index 1a5f71b9d8b1..419adc53ccb4 100644 --- a/arch/alpha/lib/ev6-memchr.S +++ b/arch/alpha/lib/ev6-memchr.S @@ -27,7 +27,7 @@ * L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1 * Try not to change the actual algorithm if possible for consistency. */ - +#include <asm/export.h> .set noreorder .set noat @@ -189,3 +189,4 @@ $not_found: ret # L0 : .end memchr + EXPORT_SYMBOL(memchr) diff --git a/arch/alpha/lib/ev6-memcpy.S b/arch/alpha/lib/ev6-memcpy.S index 52b37b0f2af5..b19798b2efc0 100644 --- a/arch/alpha/lib/ev6-memcpy.S +++ b/arch/alpha/lib/ev6-memcpy.S @@ -19,7 +19,7 @@ * Temp usage notes: * $1,$2, - scratch */ - +#include <asm/export.h> .set noreorder .set noat @@ -242,6 +242,7 @@ $nomoredata: nop # E : .end memcpy + EXPORT_SYMBOL(memcpy) /* For backwards module compatibility. */ __memcpy = memcpy diff --git a/arch/alpha/lib/ev6-memset.S b/arch/alpha/lib/ev6-memset.S index 356bb2fdd705..fed21c6893e8 100644 --- a/arch/alpha/lib/ev6-memset.S +++ b/arch/alpha/lib/ev6-memset.S @@ -26,7 +26,7 @@ * as fixes will need to be made in multiple places. The performance gain * is worth it. */ - +#include <asm/export.h> .set noat .set noreorder .text @@ -229,6 +229,7 @@ end_b: nop ret $31,($26),1 # L0 : .end ___memset + EXPORT_SYMBOL(___memset) /* * This is the original body of code, prior to replication and @@ -406,6 +407,7 @@ end: nop ret $31,($26),1 # L0 : .end __constant_c_memset + EXPORT_SYMBOL(__constant_c_memset) /* * This is a replicant of the __constant_c_memset code, rescheduled @@ -594,6 +596,9 @@ end_w: ret $31,($26),1 # L0 : .end __memsetw + EXPORT_SYMBOL(__memsetw) memset = ___memset __memset = ___memset + EXPORT_SYMBOL(memset) + EXPORT_SYMBOL(__memset) diff --git a/arch/alpha/lib/ev67-strcat.S b/arch/alpha/lib/ev67-strcat.S index c426fe3ed72f..b69f60419be1 100644 --- a/arch/alpha/lib/ev67-strcat.S +++ b/arch/alpha/lib/ev67-strcat.S @@ -19,7 +19,7 @@ * string once. */ - +#include <asm/export.h> .text .align 4 @@ -52,3 +52,4 @@ $found: cttz $2, $3 # U0 : br __stxcpy # L0 : .end strcat + EXPORT_SYMBOL(strcat) diff --git a/arch/alpha/lib/ev67-strchr.S b/arch/alpha/lib/ev67-strchr.S index fbb7b4ffade9..ea8f2f35db9c 100644 --- a/arch/alpha/lib/ev67-strchr.S +++ b/arch/alpha/lib/ev67-strchr.S @@ -15,7 +15,7 @@ * L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1 * Try not to change the actual algorithm if possible for consistency. */ - +#include <asm/export.h> #include <asm/regdef.h> .set noreorder @@ -86,3 +86,4 @@ $found: negq t0, t1 # E : clear all but least set bit ret # L0 : .end strchr + EXPORT_SYMBOL(strchr) diff --git a/arch/alpha/lib/ev67-strlen.S b/arch/alpha/lib/ev67-strlen.S index 503928072523..736fd41884a8 100644 --- a/arch/alpha/lib/ev67-strlen.S +++ b/arch/alpha/lib/ev67-strlen.S @@ -17,7 +17,7 @@ * U - upper subcluster; U0 - subcluster U0; U1 - subcluster U1 * L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1 */ - +#include <asm/export.h> .set noreorder .set noat @@ -47,3 +47,4 @@ $found: ret $31, ($26) # L0 : .end strlen + EXPORT_SYMBOL(strlen) diff --git a/arch/alpha/lib/ev67-strncat.S b/arch/alpha/lib/ev67-strncat.S index 4ae716cd2bfb..cd35cbade73a 100644 --- a/arch/alpha/lib/ev67-strncat.S +++ b/arch/alpha/lib/ev67-strncat.S @@ -20,7 +20,7 @@ * Try not to change the actual algorithm if possible for consistency. */ - +#include <asm/export.h> .text .align 4 @@ -92,3 +92,4 @@ $zerocount: ret # L0 : .end strncat + EXPORT_SYMBOL(strncat) diff --git a/arch/alpha/lib/ev67-strrchr.S b/arch/alpha/lib/ev67-strrchr.S index dd0d8c6b9f59..747455f0328c 100644 --- a/arch/alpha/lib/ev67-strrchr.S +++ b/arch/alpha/lib/ev67-strrchr.S @@ -18,7 +18,7 @@ * L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1 */ - +#include <asm/export.h> #include <asm/regdef.h> .set noreorder @@ -107,3 +107,4 @@ $eos: nop .end strrchr + EXPORT_SYMBOL(strrchr) diff --git a/arch/alpha/lib/fpreg.c b/arch/alpha/lib/fpreg.c index 05017ba34c3c..4aa6dbfa14ee 100644 --- a/arch/alpha/lib/fpreg.c +++ b/arch/alpha/lib/fpreg.c @@ -4,6 +4,9 @@ * (C) Copyright 1998 Linus Torvalds */ +#include <linux/compiler.h> +#include <linux/export.h> + #if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67) #define STT(reg,val) asm volatile ("ftoit $f"#reg",%0" : "=r"(val)); #else @@ -52,6 +55,7 @@ alpha_read_fp_reg (unsigned long reg) } return val; } +EXPORT_SYMBOL(alpha_read_fp_reg); #if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67) #define LDT(reg,val) asm volatile ("itoft %0,$f"#reg : : "r"(val)); @@ -97,6 +101,7 @@ alpha_write_fp_reg (unsigned long reg, unsigned long val) case 31: LDT(31, val); break; } } +EXPORT_SYMBOL(alpha_write_fp_reg); #if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67) #define STS(reg,val) asm volatile ("ftois $f"#reg",%0" : "=r"(val)); @@ -146,6 +151,7 @@ alpha_read_fp_reg_s (unsigned long reg) } return val; } +EXPORT_SYMBOL(alpha_read_fp_reg_s); #if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67) #define LDS(reg,val) asm volatile ("itofs %0,$f"#reg : : "r"(val)); @@ -191,3 +197,4 @@ alpha_write_fp_reg_s (unsigned long reg, unsigned long val) case 31: LDS(31, val); break; } } +EXPORT_SYMBOL(alpha_write_fp_reg_s); diff --git a/arch/alpha/lib/memchr.S b/arch/alpha/lib/memchr.S index 14427eeb555e..c13d3eca2e05 100644 --- a/arch/alpha/lib/memchr.S +++ b/arch/alpha/lib/memchr.S @@ -31,7 +31,7 @@ For correctness consider that: - only minimum number of quadwords may be accessed - the third argument is an unsigned long */ - +#include <asm/export.h> .set noreorder .set noat @@ -162,3 +162,4 @@ $not_found: ret # .. e1 : .end memchr + EXPORT_SYMBOL(memchr) diff --git a/arch/alpha/lib/memcpy.c b/arch/alpha/lib/memcpy.c index 64083fc73238..57d9291ad172 100644 --- a/arch/alpha/lib/memcpy.c +++ b/arch/alpha/lib/memcpy.c @@ -16,6 +16,7 @@ */ #include <linux/types.h> +#include <linux/export.h> /* * This should be done in one go with ldq_u*2/mask/stq_u. Do it @@ -158,6 +159,4 @@ void * memcpy(void * dest, const void *src, size_t n) __memcpy_unaligned_up ((unsigned long) dest, (unsigned long) src, n); return dest; } - -/* For backward modules compatibility, define __memcpy. */ -asm("__memcpy = memcpy; .globl __memcpy"); +EXPORT_SYMBOL(memcpy); diff --git a/arch/alpha/lib/memmove.S b/arch/alpha/lib/memmove.S index eb3b6e02242f..6872c85cb5e5 100644 --- a/arch/alpha/lib/memmove.S +++ b/arch/alpha/lib/memmove.S @@ -6,7 +6,7 @@ * This is hand-massaged output from the original memcpy.c. We defer to * memcpy whenever possible; the backwards copy loops are not unrolled. */ - +#include <asm/export.h> .set noat .set noreorder .text @@ -179,3 +179,4 @@ $egress: nop .end memmove + EXPORT_SYMBOL(memmove) diff --git a/arch/alpha/lib/memset.S b/arch/alpha/lib/memset.S index 76ccc6d1f364..89a26f5e89de 100644 --- a/arch/alpha/lib/memset.S +++ b/arch/alpha/lib/memset.S @@ -13,7 +13,7 @@ * The scheduling comments are according to the EV5 documentation (and done by * hand, so they might well be incorrect, please do tell me about it..) */ - +#include <asm/export.h> .set noat .set noreorder .text @@ -106,6 +106,8 @@ within_one_quad: end: ret $31,($26),1 /* E1 */ .end ___memset +EXPORT_SYMBOL(___memset) +EXPORT_SYMBOL(__constant_c_memset) .align 5 .ent __memsetw @@ -122,6 +124,9 @@ __memsetw: br __constant_c_memset /* .. E1 */ .end __memsetw +EXPORT_SYMBOL(__memsetw) memset = ___memset __memset = ___memset + EXPORT_SYMBOL(memset) + EXPORT_SYMBOL(__memset) diff --git a/arch/alpha/lib/strcat.S b/arch/alpha/lib/strcat.S index 393f50384878..249837b03d4b 100644 --- a/arch/alpha/lib/strcat.S +++ b/arch/alpha/lib/strcat.S @@ -4,6 +4,7 @@ * * Append a null-terminated string from SRC to DST. */ +#include <asm/export.h> .text @@ -50,3 +51,4 @@ $found: negq $2, $3 # clear all but least set bit br __stxcpy .end strcat +EXPORT_SYMBOL(strcat); diff --git a/arch/alpha/lib/strchr.S b/arch/alpha/lib/strchr.S index 011a175e8329..7412a173ea39 100644 --- a/arch/alpha/lib/strchr.S +++ b/arch/alpha/lib/strchr.S @@ -5,7 +5,7 @@ * Return the address of a given character within a null-terminated * string, or null if it is not found. */ - +#include <asm/export.h> #include <asm/regdef.h> .set noreorder @@ -68,3 +68,4 @@ $retnull: ret # .. e1 : .end strchr + EXPORT_SYMBOL(strchr) diff --git a/arch/alpha/lib/strcpy.S b/arch/alpha/lib/strcpy.S index e0728e4ad21f..98deae1e4d08 100644 --- a/arch/alpha/lib/strcpy.S +++ b/arch/alpha/lib/strcpy.S @@ -5,7 +5,7 @@ * Copy a null-terminated string from SRC to DST. Return a pointer * to the null-terminator in the source. */ - +#include <asm/export.h> .text .align 3 @@ -21,3 +21,4 @@ strcpy: br __stxcpy # do the copy .end strcpy + EXPORT_SYMBOL(strcpy) diff --git a/arch/alpha/lib/strlen.S b/arch/alpha/lib/strlen.S index fe63353de152..79c416f71bac 100644 --- a/arch/alpha/lib/strlen.S +++ b/arch/alpha/lib/strlen.S @@ -11,7 +11,7 @@ * do this instead of the 9 instructions that * binary search needs). */ - +#include <asm/export.h> .set noreorder .set noat @@ -55,3 +55,4 @@ done: subq $0, $16, $0 ret $31, ($26) .end strlen + EXPORT_SYMBOL(strlen) diff --git a/arch/alpha/lib/strncat.S b/arch/alpha/lib/strncat.S index a8278163c972..6c29ea60869a 100644 --- a/arch/alpha/lib/strncat.S +++ b/arch/alpha/lib/strncat.S @@ -9,7 +9,7 @@ * past count, whereas libc may write to count+1. This follows the generic * implementation in lib/string.c and is, IMHO, more sensible. */ - +#include <asm/export.h> .text .align 3 @@ -82,3 +82,4 @@ $zerocount: ret .end strncat + EXPORT_SYMBOL(strncat) diff --git a/arch/alpha/lib/strncpy.S b/arch/alpha/lib/strncpy.S index a46f7f3ad8c7..e102cf1567dd 100644 --- a/arch/alpha/lib/strncpy.S +++ b/arch/alpha/lib/strncpy.S @@ -10,7 +10,7 @@ * version has cropped that bit o' nastiness as well as assuming that * __stxncpy is in range of a branch. */ - +#include <asm/export.h> .set noat .set noreorder @@ -79,3 +79,4 @@ $zerolen: ret .end strncpy + EXPORT_SYMBOL(strncpy) diff --git a/arch/alpha/lib/strrchr.S b/arch/alpha/lib/strrchr.S index 1970dc07cfd1..4bc6cb4b9812 100644 --- a/arch/alpha/lib/strrchr.S +++ b/arch/alpha/lib/strrchr.S @@ -5,7 +5,7 @@ * Return the address of the last occurrence of a given character * within a null-terminated string, or null if it is not found. */ - +#include <asm/export.h> #include <asm/regdef.h> .set noreorder @@ -85,3 +85,4 @@ $retnull: ret # .. e1 : .end strrchr + EXPORT_SYMBOL(strrchr) diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild index 55e0e3ea9cb6..0745538b26d3 100644 --- a/arch/arm/include/asm/Kbuild +++ b/arch/arm/include/asm/Kbuild @@ -8,6 +8,7 @@ generic-y += early_ioremap.h generic-y += emergency-restart.h generic-y += errno.h generic-y += exec.h +generic-y += export.h generic-y += ioctl.h generic-y += ipcbuf.h generic-y += irq_regs.h diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index ad325a8c7e1e..68c2c097cffe 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile @@ -33,7 +33,7 @@ endif obj-$(CONFIG_CPU_IDLE) += cpuidle.o obj-$(CONFIG_ISA_DMA_API) += dma.o obj-$(CONFIG_FIQ) += fiq.o fiqasm.o -obj-$(CONFIG_MODULES) += armksyms.o module.o +obj-$(CONFIG_MODULES) += module.o obj-$(CONFIG_ARM_MODULE_PLTS) += module-plts.o obj-$(CONFIG_ISA_DMA) += dma-isa.o obj-$(CONFIG_PCI) += bios32.o isa.o diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c deleted file mode 100644 index 7e45f69a0ddc..000000000000 --- a/arch/arm/kernel/armksyms.c +++ /dev/null @@ -1,183 +0,0 @@ -/* - * linux/arch/arm/kernel/armksyms.c - * - * Copyright (C) 2000 Russell King - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#include <linux/export.h> -#include <linux/sched.h> -#include <linux/string.h> -#include <linux/cryptohash.h> -#include <linux/delay.h> -#include <linux/in6.h> -#include <linux/syscalls.h> -#include <linux/uaccess.h> -#include <linux/io.h> -#include <linux/arm-smccc.h> - -#include <asm/checksum.h> -#include <asm/ftrace.h> - -/* - * libgcc functions - functions that are used internally by the - * compiler... (prototypes are not correct though, but that - * doesn't really matter since they're not versioned). - */ -extern void __ashldi3(void); -extern void __ashrdi3(void); -extern void __divsi3(void); -extern void __lshrdi3(void); -extern void __modsi3(void); -extern void __muldi3(void); -extern void __ucmpdi2(void); -extern void __udivsi3(void); -extern void __umodsi3(void); -extern void __do_div64(void); -extern void __bswapsi2(void); -extern void __bswapdi2(void); - -extern void __aeabi_idiv(void); -extern void __aeabi_idivmod(void); -extern void __aeabi_lasr(void); -extern void __aeabi_llsl(void); -extern void __aeabi_llsr(void); -extern void __aeabi_lmul(void); -extern void __aeabi_uidiv(void); -extern void __aeabi_uidivmod(void); -extern void __aeabi_ulcmp(void); - -extern void fpundefinstr(void); - -void mmioset(void *, unsigned int, size_t); -void mmiocpy(void *, const void *, size_t); - - /* platform dependent support */ -EXPORT_SYMBOL(arm_delay_ops); - - /* networking */ -EXPORT_SYMBOL(csum_partial); -EXPORT_SYMBOL(csum_partial_copy_from_user); -EXPORT_SYMBOL(csum_partial_copy_nocheck); -EXPORT_SYMBOL(__csum_ipv6_magic); - - /* io */ -#ifndef __raw_readsb -EXPORT_SYMBOL(__raw_readsb); -#endif -#ifndef __raw_readsw -EXPORT_SYMBOL(__raw_readsw); -#endif -#ifndef __raw_readsl -EXPORT_SYMBOL(__raw_readsl); -#endif -#ifndef __raw_writesb -EXPORT_SYMBOL(__raw_writesb); -#endif -#ifndef __raw_writesw -EXPORT_SYMBOL(__raw_writesw); -#endif -#ifndef __raw_writesl -EXPORT_SYMBOL(__raw_writesl); -#endif - - /* string / mem functions */ -EXPORT_SYMBOL(strchr); -EXPORT_SYMBOL(strrchr); -EXPORT_SYMBOL(memset); -EXPORT_SYMBOL(memcpy); -EXPORT_SYMBOL(memmove); -EXPORT_SYMBOL(memchr); -EXPORT_SYMBOL(__memzero); - -EXPORT_SYMBOL(mmioset); -EXPORT_SYMBOL(mmiocpy); - -#ifdef CONFIG_MMU -EXPORT_SYMBOL(copy_page); - -EXPORT_SYMBOL(arm_copy_from_user); -EXPORT_SYMBOL(arm_copy_to_user); -EXPORT_SYMBOL(arm_clear_user); - -EXPORT_SYMBOL(__get_user_1); -EXPORT_SYMBOL(__get_user_2); -EXPORT_SYMBOL(__get_user_4); -EXPORT_SYMBOL(__get_user_8); - -#ifdef __ARMEB__ -EXPORT_SYMBOL(__get_user_64t_1); -EXPORT_SYMBOL(__get_user_64t_2); -EXPORT_SYMBOL(__get_user_64t_4); -EXPORT_SYMBOL(__get_user_32t_8); -#endif - -EXPORT_SYMBOL(__put_user_1); -EXPORT_SYMBOL(__put_user_2); -EXPORT_SYMBOL(__put_user_4); -EXPORT_SYMBOL(__put_user_8); -#endif - - /* gcc lib functions */ -EXPORT_SYMBOL(__ashldi3); -EXPORT_SYMBOL(__ashrdi3); -EXPORT_SYMBOL(__divsi3); -EXPORT_SYMBOL(__lshrdi3); -EXPORT_SYMBOL(__modsi3); -EXPORT_SYMBOL(__muldi3); -EXPORT_SYMBOL(__ucmpdi2); -EXPORT_SYMBOL(__udivsi3); -EXPORT_SYMBOL(__umodsi3); -EXPORT_SYMBOL(__do_div64); -EXPORT_SYMBOL(__bswapsi2); -EXPORT_SYMBOL(__bswapdi2); - -#ifdef CONFIG_AEABI -EXPORT_SYMBOL(__aeabi_idiv); -EXPORT_SYMBOL(__aeabi_idivmod); -EXPORT_SYMBOL(__aeabi_lasr); -EXPORT_SYMBOL(__aeabi_llsl); -EXPORT_SYMBOL(__aeabi_llsr); -EXPORT_SYMBOL(__aeabi_lmul); -EXPORT_SYMBOL(__aeabi_uidiv); -EXPORT_SYMBOL(__aeabi_uidivmod); -EXPORT_SYMBOL(__aeabi_ulcmp); -#endif - - /* bitops */ -EXPORT_SYMBOL(_set_bit); -EXPORT_SYMBOL(_test_and_set_bit); -EXPORT_SYMBOL(_clear_bit); -EXPORT_SYMBOL(_test_and_clear_bit); -EXPORT_SYMBOL(_change_bit); -EXPORT_SYMBOL(_test_and_change_bit); -EXPORT_SYMBOL(_find_first_zero_bit_le); -EXPORT_SYMBOL(_find_next_zero_bit_le); -EXPORT_SYMBOL(_find_first_bit_le); -EXPORT_SYMBOL(_find_next_bit_le); - -#ifdef __ARMEB__ -EXPORT_SYMBOL(_find_first_zero_bit_be); -EXPORT_SYMBOL(_find_next_zero_bit_be); -EXPORT_SYMBOL(_find_first_bit_be); -EXPORT_SYMBOL(_find_next_bit_be); -#endif - -#ifdef CONFIG_FUNCTION_TRACER -#ifdef CONFIG_OLD_MCOUNT -EXPORT_SYMBOL(mcount); -#endif -EXPORT_SYMBOL(__gnu_mcount_nc); -#endif - -#ifdef CONFIG_ARM_PATCH_PHYS_VIRT -EXPORT_SYMBOL(__pv_phys_pfn_offset); -EXPORT_SYMBOL(__pv_offset); -#endif - -#ifdef CONFIG_HAVE_ARM_SMCCC -EXPORT_SYMBOL(arm_smccc_smc); -EXPORT_SYMBOL(arm_smccc_hvc); -#endif diff --git a/arch/arm/kernel/entry-ftrace.S b/arch/arm/kernel/entry-ftrace.S index c73c4030ca5d..b629d3f11c3d 100644 --- a/arch/arm/kernel/entry-ftrace.S +++ b/arch/arm/kernel/entry-ftrace.S @@ -7,6 +7,7 @@ #include <asm/assembler.h> #include <asm/ftrace.h> #include <asm/unwind.h> +#include <asm/export.h> #include "entry-header.S" @@ -153,6 +154,7 @@ ENTRY(mcount) __mcount _old #endif ENDPROC(mcount) +EXPORT_SYMBOL(mcount) #ifdef CONFIG_DYNAMIC_FTRACE ENTRY(ftrace_caller_old) @@ -205,6 +207,7 @@ UNWIND(.fnstart) #endif UNWIND(.fnend) ENDPROC(__gnu_mcount_nc) +EXPORT_SYMBOL(__gnu_mcount_nc) #ifdef CONFIG_DYNAMIC_FTRACE ENTRY(ftrace_caller) diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 04286fd9e09c..f41cee4c5746 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -22,6 +22,7 @@ #include <asm/memory.h> #include <asm/thread_info.h> #include <asm/pgtable.h> +#include <asm/export.h> #if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_SEMIHOSTING) #include CONFIG_DEBUG_LL_INCLUDE @@ -727,6 +728,8 @@ __pv_phys_pfn_offset: __pv_offset: .quad 0 .size __pv_offset, . -__pv_offset +EXPORT_SYMBOL(__pv_phys_pfn_offset) +EXPORT_SYMBOL(__pv_offset) #endif #include "head-common.S" diff --git a/arch/arm/kernel/smccc-call.S b/arch/arm/kernel/smccc-call.S index 2e48b674aab1..37669e7e13af 100644 --- a/arch/arm/kernel/smccc-call.S +++ b/arch/arm/kernel/smccc-call.S @@ -16,6 +16,7 @@ #include <asm/opcodes-sec.h> #include <asm/opcodes-virt.h> #include <asm/unwind.h> +#include <asm/export.h> /* * Wrap c macros in asm macros to delay expansion until after the @@ -51,6 +52,7 @@ UNWIND( .fnend) ENTRY(arm_smccc_smc) SMCCC SMCCC_SMC ENDPROC(arm_smccc_smc) +EXPORT_SYMBOL(arm_smccc_smc) /* * void smccc_hvc(unsigned long a0, unsigned long a1, unsigned long a2, @@ -60,3 +62,4 @@ ENDPROC(arm_smccc_smc) ENTRY(arm_smccc_hvc) SMCCC SMCCC_HVC ENDPROC(arm_smccc_hvc) +EXPORT_SYMBOL(arm_smccc_hvc) diff --git a/arch/arm/lib/ashldi3.S b/arch/arm/lib/ashldi3.S index b05e95840651..a7e7de89bd75 100644 --- a/arch/arm/lib/ashldi3.S +++ b/arch/arm/lib/ashldi3.S @@ -28,6 +28,7 @@ Boston, MA 02110-1301, USA. */ #include <linux/linkage.h> #include <asm/assembler.h> +#include <asm/export.h> #ifdef __ARMEB__ #define al r1 @@ -52,3 +53,5 @@ ENTRY(__aeabi_llsl) ENDPROC(__ashldi3) ENDPROC(__aeabi_llsl) +EXPORT_SYMBOL(__ashldi3) +EXPORT_SYMBOL(__aeabi_llsl) diff --git a/arch/arm/lib/ashrdi3.S b/arch/arm/lib/ashrdi3.S index 275d7d2341a4..490336e42518 100644 --- a/arch/arm/lib/ashrdi3.S +++ b/arch/arm/lib/ashrdi3.S @@ -28,6 +28,7 @@ Boston, MA 02110-1301, USA. */ #include <linux/linkage.h> #include <asm/assembler.h> +#include <asm/export.h> #ifdef __ARMEB__ #define al r1 @@ -52,3 +53,5 @@ ENTRY(__aeabi_lasr) ENDPROC(__ashrdi3) ENDPROC(__aeabi_lasr) +EXPORT_SYMBOL(__ashrdi3) +EXPORT_SYMBOL(__aeabi_lasr) diff --git a/arch/arm/lib/bitops.h b/arch/arm/lib/bitops.h index 7d807cfd8ef5..df06638b327c 100644 --- a/arch/arm/lib/bitops.h +++ b/arch/arm/lib/bitops.h @@ -1,5 +1,6 @@ #include <asm/assembler.h> #include <asm/unwind.h> +#include <asm/export.h> #if __LINUX_ARM_ARCH__ >= 6 .macro bitop, name, instr @@ -25,6 +26,7 @@ UNWIND( .fnstart ) bx lr UNWIND( .fnend ) ENDPROC(\name ) +EXPORT_SYMBOL(\name ) .endm .macro testop, name, instr, store @@ -55,6 +57,7 @@ UNWIND( .fnstart ) 2: bx lr UNWIND( .fnend ) ENDPROC(\name ) +EXPORT_SYMBOL(\name ) .endm #else .macro bitop, name, instr @@ -74,6 +77,7 @@ UNWIND( .fnstart ) ret lr UNWIND( .fnend ) ENDPROC(\name ) +EXPORT_SYMBOL(\name ) .endm /** @@ -102,5 +106,6 @@ UNWIND( .fnstart ) ret lr UNWIND( .fnend ) ENDPROC(\name ) +EXPORT_SYMBOL(\name ) .endm #endif diff --git a/arch/arm/lib/bswapsdi2.S b/arch/arm/lib/bswapsdi2.S index 07cda737bb11..f05f78247304 100644 --- a/arch/arm/lib/bswapsdi2.S +++ b/arch/arm/lib/bswapsdi2.S @@ -1,5 +1,6 @@ #include <linux/linkage.h> #include <asm/assembler.h> +#include <asm/export.h> #if __LINUX_ARM_ARCH__ >= 6 ENTRY(__bswapsi2) @@ -35,3 +36,5 @@ ENTRY(__bswapdi2) ret lr ENDPROC(__bswapdi2) #endif +EXPORT_SYMBOL(__bswapsi2) +EXPORT_SYMBOL(__bswapdi2) diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S index e936352ccb00..b566154f5cf4 100644 --- a/arch/arm/lib/clear_user.S +++ b/arch/arm/lib/clear_user.S @@ -10,6 +10,7 @@ #include <linux/linkage.h> #include <asm/assembler.h> #include <asm/unwind.h> +#include <asm/export.h> .text @@ -50,6 +51,9 @@ USER( strnebt r2, [r0]) UNWIND(.fnend) ENDPROC(arm_clear_user) ENDPROC(__clear_user_std) +#ifndef CONFIG_UACCESS_WITH_MEMCPY +EXPORT_SYMBOL(arm_clear_user) +#endif .pushsection .text.fixup,"ax" .align 0 diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S index 1512bebfbf1b..f549c57ea435 100644 --- a/arch/arm/lib/copy_from_user.S +++ b/arch/arm/lib/copy_from_user.S @@ -13,6 +13,7 @@ #include <linux/linkage.h> #include <asm/assembler.h> #include <asm/unwind.h> +#include <asm/export.h> /* * Prototype: @@ -94,6 +95,7 @@ ENTRY(arm_copy_from_user) #include "copy_template.S" ENDPROC(arm_copy_from_user) +EXPORT_SYMBOL(arm_copy_from_user) .pushsection .fixup,"ax" .align 0 diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S index 6ee2f6706f86..d97851d4af7a 100644 --- a/arch/arm/lib/copy_page.S +++ b/arch/arm/lib/copy_page.S @@ -13,6 +13,7 @@ #include <asm/assembler.h> #include <asm/asm-offsets.h> #include <asm/cache.h> +#include <asm/export.h> #define COPY_COUNT (PAGE_SZ / (2 * L1_CACHE_BYTES) PLD( -1 )) @@ -45,3 +46,4 @@ ENTRY(copy_page) PLD( beq 2b ) ldmfd sp!, {r4, pc} @ 3 ENDPROC(copy_page) +EXPORT_SYMBOL(copy_page) diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S index caf5019d8161..592c179112d1 100644 --- a/arch/arm/lib/copy_to_user.S +++ b/arch/arm/lib/copy_to_user.S @@ -13,6 +13,7 @@ #include <linux/linkage.h> #include <asm/assembler.h> #include <asm/unwind.h> +#include <asm/export.h> /* * Prototype: @@ -99,6 +100,9 @@ WEAK(arm_copy_to_user) ENDPROC(arm_copy_to_user) ENDPROC(__copy_to_user_std) +#ifndef CONFIG_UACCESS_WITH_MEMCPY +EXPORT_SYMBOL(arm_copy_to_user) +#endif .pushsection .text.fixup,"ax" .align 0 diff --git a/arch/arm/lib/csumipv6.S b/arch/arm/lib/csumipv6.S index 3ac6ef01bc43..68603b5ee537 100644 --- a/arch/arm/lib/csumipv6.S +++ b/arch/arm/lib/csumipv6.S @@ -9,6 +9,7 @@ */ #include <linux/linkage.h> #include <asm/assembler.h> +#include <asm/export.h> .text @@ -30,4 +31,4 @@ ENTRY(__csum_ipv6_magic) adcs r0, r0, #0 ldmfd sp!, {pc} ENDPROC(__csum_ipv6_magic) - +EXPORT_SYMBOL(__csum_ipv6_magic) diff --git a/arch/arm/lib/csumpartial.S b/arch/arm/lib/csumpartial.S index 984e0f29d548..830b20e81c37 100644 --- a/arch/arm/lib/csumpartial.S +++ b/arch/arm/lib/csumpartial.S @@ -9,6 +9,7 @@ */ #include <linux/linkage.h> #include <asm/assembler.h> +#include <asm/export.h> .text @@ -140,3 +141,4 @@ ENTRY(csum_partial) bne 4b b .Lless4 ENDPROC(csum_partial) +EXPORT_SYMBOL(csum_partial) diff --git a/arch/arm/lib/csumpartialcopy.S b/arch/arm/lib/csumpartialcopy.S index d03fc71fc88c..9c3383fed129 100644 --- a/arch/arm/lib/csumpartialcopy.S +++ b/arch/arm/lib/csumpartialcopy.S @@ -49,5 +49,6 @@ #define FN_ENTRY ENTRY(csum_partial_copy_nocheck) #define FN_EXIT ENDPROC(csum_partial_copy_nocheck) +#define FN_EXPORT EXPORT_SYMBOL(csum_partial_copy_nocheck) #include "csumpartialcopygeneric.S" diff --git a/arch/arm/lib/csumpartialcopygeneric.S b/arch/arm/lib/csumpartialcopygeneric.S index 10b45909610c..8b94d20e51d1 100644 --- a/arch/arm/lib/csumpartialcopygeneric.S +++ b/arch/arm/lib/csumpartialcopygeneric.S @@ -8,6 +8,7 @@ * published by the Free Software Foundation. */ #include <asm/assembler.h> +#include <asm/export.h> /* * unsigned int @@ -331,3 +332,4 @@ FN_ENTRY mov r5, r4, get_byte_1 b .Lexit FN_EXIT +FN_EXPORT diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S index 1712f132b80d..5d495edf3d83 100644 --- a/arch/arm/lib/csumpartialcopyuser.S +++ b/arch/arm/lib/csumpartialcopyuser.S @@ -73,6 +73,7 @@ #define FN_ENTRY ENTRY(csum_partial_copy_from_user) #define FN_EXIT ENDPROC(csum_partial_copy_from_user) +#define FN_EXPORT EXPORT_SYMBOL(csum_partial_copy_from_user) #include "csumpartialcopygeneric.S" diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c index 2cef11884857..69aad80a3af4 100644 --- a/arch/arm/lib/delay.c +++ b/arch/arm/lib/delay.c @@ -24,6 +24,7 @@ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> +#include <linux/export.h> #include <linux/timex.h> /* @@ -34,6 +35,7 @@ struct arm_delay_ops arm_delay_ops __ro_after_init = { .const_udelay = __loop_const_udelay, .udelay = __loop_udelay, }; +EXPORT_SYMBOL(arm_delay_ops); static const struct delay_timer *delay_timer; static bool delay_calibrated; diff --git a/arch/arm/lib/div64.S b/arch/arm/lib/div64.S index a9eafe4981eb..0c9e1c18fc9e 100644 --- a/arch/arm/lib/div64.S +++ b/arch/arm/lib/div64.S @@ -15,6 +15,7 @@ #include <linux/linkage.h> #include <asm/assembler.h> #include <asm/unwind.h> +#include <asm/export.h> #ifdef __ARMEB__ #define xh r0 @@ -210,3 +211,4 @@ Ldiv0_64: UNWIND(.fnend) ENDPROC(__do_div64) +EXPORT_SYMBOL(__do_div64) diff --git a/arch/arm/lib/findbit.S b/arch/arm/lib/findbit.S index 7848780e8834..26302b8cd38f 100644 --- a/arch/arm/lib/findbit.S +++ b/arch/arm/lib/findbit.S @@ -15,6 +15,7 @@ */ #include <linux/linkage.h> #include <asm/assembler.h> +#include <asm/export.h> .text /* @@ -37,6 +38,7 @@ ENTRY(_find_first_zero_bit_le) 3: mov r0, r1 @ no free bits ret lr ENDPROC(_find_first_zero_bit_le) +EXPORT_SYMBOL(_find_first_zero_bit_le) /* * Purpose : Find next 'zero' bit @@ -57,6 +59,7 @@ ENTRY(_find_next_zero_bit_le) add r2, r2, #1 @ align bit pointer b 2b @ loop for next bit ENDPROC(_find_next_zero_bit_le) +EXPORT_SYMBOL(_find_next_zero_bit_le) /* * Purpose : Find a 'one' bit @@ -78,6 +81,7 @@ ENTRY(_find_first_bit_le) 3: mov r0, r1 @ no free bits ret lr ENDPROC(_find_first_bit_le) +EXPORT_SYMBOL(_find_first_bit_le) /* * Purpose : Find next 'one' bit @@ -97,6 +101,7 @@ ENTRY(_find_next_bit_le) add r2, r2, #1 @ align bit pointer b 2b @ loop for next bit ENDPROC(_find_next_bit_le) +EXPORT_SYMBOL(_find_next_bit_le) #ifdef __ARMEB__ @@ -116,6 +121,7 @@ ENTRY(_find_first_zero_bit_be) 3: mov r0, r1 @ no free bits ret lr ENDPROC(_find_first_zero_bit_be) +EXPORT_SYMBOL(_find_first_zero_bit_be) ENTRY(_find_next_zero_bit_be) teq r1, #0 @@ -133,6 +139,7 @@ ENTRY(_find_next_zero_bit_be) add r2, r2, #1 @ align bit pointer b 2b @ loop for next bit ENDPROC(_find_next_zero_bit_be) +EXPORT_SYMBOL(_find_next_zero_bit_be) ENTRY(_find_first_bit_be) teq r1, #0 @@ -150,6 +157,7 @@ ENTRY(_find_first_bit_be) 3: mov r0, r1 @ no free bits ret lr ENDPROC(_find_first_bit_be) +EXPORT_SYMBOL(_find_first_bit_be) ENTRY(_find_next_bit_be) teq r1, #0 @@ -166,6 +174,7 @@ ENTRY(_find_next_bit_be) add r2, r2, #1 @ align bit pointer b 2b @ loop for next bit ENDPROC(_find_next_bit_be) +EXPORT_SYMBOL(_find_next_bit_be) #endif diff --git a/arch/arm/lib/getuser.S b/arch/arm/lib/getuser.S index 8ecfd15c3a02..9d09a38e73af 100644 --- a/arch/arm/lib/getuser.S +++ b/arch/arm/lib/getuser.S @@ -31,6 +31,7 @@ #include <asm/assembler.h> #include <asm/errno.h> #include <asm/domain.h> +#include <asm/export.h> ENTRY(__get_user_1) check_uaccess r0, 1, r1, r2, __get_user_bad @@ -38,6 +39,7 @@ ENTRY(__get_user_1) mov r0, #0 ret lr ENDPROC(__get_user_1) +EXPORT_SYMBOL(__get_user_1) ENTRY(__get_user_2) check_uaccess r0, 2, r1, r2, __get_user_bad @@ -58,6 +60,7 @@ rb .req r0 mov r0, #0 ret lr ENDPROC(__get_user_2) +EXPORT_SYMBOL(__get_user_2) ENTRY(__get_user_4) check_uaccess r0, 4, r1, r2, __get_user_bad @@ -65,6 +68,7 @@ ENTRY(__get_user_4) mov r0, #0 ret lr ENDPROC(__get_user_4) +EXPORT_SYMBOL(__get_user_4) ENTRY(__get_user_8) check_uaccess r0, 8, r1, r2, __get_user_bad @@ -78,6 +82,7 @@ ENTRY(__get_user_8) mov r0, #0 ret lr ENDPROC(__get_user_8) +EXPORT_SYMBOL(__get_user_8) #ifdef __ARMEB__ ENTRY(__get_user_32t_8) @@ -91,6 +96,7 @@ ENTRY(__get_user_32t_8) mov r0, #0 ret lr ENDPROC(__get_user_32t_8) +EXPORT_SYMBOL(__get_user_32t_8) ENTRY(__get_user_64t_1) check_uaccess r0, 1, r1, r2, __get_user_bad8 @@ -98,6 +104,7 @@ ENTRY(__get_user_64t_1) mov r0, #0 ret lr ENDPROC(__get_user_64t_1) +EXPORT_SYMBOL(__get_user_64t_1) ENTRY(__get_user_64t_2) check_uaccess r0, 2, r1, r2, __get_user_bad8 @@ -114,6 +121,7 @@ rb .req r0 mov r0, #0 ret lr ENDPROC(__get_user_64t_2) +EXPORT_SYMBOL(__get_user_64t_2) ENTRY(__get_user_64t_4) check_uaccess r0, 4, r1, r2, __get_user_bad8 @@ -121,6 +129,7 @@ ENTRY(__get_user_64t_4) mov r0, #0 ret lr ENDPROC(__get_user_64t_4) +EXPORT_SYMBOL(__get_user_64t_4) #endif __get_user_bad8: diff --git a/arch/arm/lib/io-readsb.S b/arch/arm/lib/io-readsb.S index c31b2f3153f1..3dff7a3a2aef 100644 --- a/arch/arm/lib/io-readsb.S +++ b/arch/arm/lib/io-readsb.S @@ -9,6 +9,7 @@ */ #include <linux/linkage.h> #include <asm/assembler.h> +#include <asm/export.h> .Linsb_align: rsb ip, ip, #4 cmp ip, r2 @@ -121,3 +122,4 @@ ENTRY(__raw_readsb) ldmfd sp!, {r4 - r6, pc} ENDPROC(__raw_readsb) +EXPORT_SYMBOL(__raw_readsb) diff --git a/arch/arm/lib/io-readsl.S b/arch/arm/lib/io-readsl.S index 2ed86fa5465f..bfd39682325b 100644 --- a/arch/arm/lib/io-readsl.S +++ b/arch/arm/lib/io-readsl.S @@ -9,6 +9,7 @@ */ #include <linux/linkage.h> #include <asm/assembler.h> +#include <asm/export.h> ENTRY(__raw_readsl) teq r2, #0 @ do we have to check for the zero len? @@ -77,3 +78,4 @@ ENTRY(__raw_readsl) strb r3, [r1, #0] ret lr ENDPROC(__raw_readsl) +EXPORT_SYMBOL(__raw_readsl) diff --git a/arch/arm/lib/io-readsw-armv3.S b/arch/arm/lib/io-readsw-armv3.S index 413da9914529..b3af3db6caac 100644 --- a/arch/arm/lib/io-readsw-armv3.S +++ b/arch/arm/lib/io-readsw-armv3.S @@ -9,6 +9,7 @@ */ #include <linux/linkage.h> #include <asm/assembler.h> +#include <asm/export.h> .Linsw_bad_alignment: adr r0, .Linsw_bad_align_msg @@ -103,4 +104,4 @@ ENTRY(__raw_readsw) ldmfd sp!, {r4, r5, r6, pc} - +EXPORT_SYMBOL(__raw_readsw) diff --git a/arch/arm/lib/io-readsw-armv4.S b/arch/arm/lib/io-readsw-armv4.S index d9a45e9692ae..3c7a7a40b33e 100644 --- a/arch/arm/lib/io-readsw-armv4.S +++ b/arch/arm/lib/io-readsw-armv4.S @@ -9,6 +9,7 @@ */ #include <linux/linkage.h> #include <asm/assembler.h> +#include <asm/export.h> .macro pack, rd, hw1, hw2 #ifndef __ARMEB__ @@ -129,3 +130,4 @@ ENTRY(__raw_readsw) strneb ip, [r1] ldmfd sp!, {r4, pc} ENDPROC(__raw_readsw) +EXPORT_SYMBOL(__raw_readsw) diff --git a/arch/arm/lib/io-writesb.S b/arch/arm/lib/io-writesb.S index a46bbc9b168b..fa3633594415 100644 --- a/arch/arm/lib/io-writesb.S +++ b/arch/arm/lib/io-writesb.S @@ -9,6 +9,7 @@ */ #include <linux/linkage.h> #include <asm/assembler.h> +#include <asm/export.h> .macro outword, rd #ifndef __ARMEB__ @@ -92,3 +93,4 @@ ENTRY(__raw_writesb) ldmfd sp!, {r4, r5, pc} ENDPROC(__raw_writesb) +EXPORT_SYMBOL(__raw_writesb) diff --git a/arch/arm/lib/io-writesl.S b/arch/arm/lib/io-writesl.S index 4ea2435988c1..98ed6aec0b47 100644 --- a/arch/arm/lib/io-writesl.S +++ b/arch/arm/lib/io-writesl.S @@ -9,6 +9,7 @@ */ #include <linux/linkage.h> #include <asm/assembler.h> +#include <asm/export.h> ENTRY(__raw_writesl) teq r2, #0 @ do we have to check for the zero len? @@ -65,3 +66,4 @@ ENTRY(__raw_writesl) bne 6b ret lr ENDPROC(__raw_writesl) +EXPORT_SYMBOL(__raw_writesl) diff --git a/arch/arm/lib/io-writesw-armv3.S b/arch/arm/lib/io-writesw-armv3.S index 121789eb6802..577184c082bb 100644 --- a/arch/arm/lib/io-writesw-armv3.S +++ b/arch/arm/lib/io-writesw-armv3.S @@ -9,6 +9,7 @@ */ #include <linux/linkage.h> #include <asm/assembler.h> +#include <asm/export.h> .Loutsw_bad_alignment: adr r0, .Loutsw_bad_align_msg @@ -124,3 +125,4 @@ ENTRY(__raw_writesw) strne ip, [r0] ldmfd sp!, {r4, r5, r6, pc} +EXPORT_SYMBOL(__raw_writesw) diff --git a/arch/arm/lib/io-writesw-armv4.S b/arch/arm/lib/io-writesw-armv4.S index 269f90c51ad2..e335f489d1fc 100644 --- a/arch/arm/lib/io-writesw-armv4.S +++ b/arch/arm/lib/io-writesw-armv4.S @@ -9,6 +9,7 @@ */ #include <linux/linkage.h> #include <asm/assembler.h> +#include <asm/export.h> .macro outword, rd #ifndef __ARMEB__ @@ -98,3 +99,4 @@ ENTRY(__raw_writesw) strneh ip, [r0] ret lr ENDPROC(__raw_writesw) +EXPORT_SYMBOL(__raw_writesw) diff --git a/arch/arm/lib/lib1funcs.S b/arch/arm/lib/lib1funcs.S index 9397b2e532af..f541bc013bff 100644 --- a/arch/arm/lib/lib1funcs.S +++ b/arch/arm/lib/lib1funcs.S @@ -36,6 +36,7 @@ Boston, MA 02111-1307, USA. */ #include <linux/linkage.h> #include <asm/assembler.h> #include <asm/unwind.h> +#include <asm/export.h> .macro ARM_DIV_BODY dividend, divisor, result, curbit @@ -238,6 +239,8 @@ UNWIND(.fnstart) UNWIND(.fnend) ENDPROC(__udivsi3) ENDPROC(__aeabi_uidiv) +EXPORT_SYMBOL(__udivsi3) +EXPORT_SYMBOL(__aeabi_uidiv) ENTRY(__umodsi3) UNWIND(.fnstart) @@ -256,6 +259,7 @@ UNWIND(.fnstart) UNWIND(.fnend) ENDPROC(__umodsi3) +EXPORT_SYMBOL(__umodsi3) #ifdef CONFIG_ARM_PATCH_IDIV .align 3 @@ -303,6 +307,8 @@ UNWIND(.fnstart) UNWIND(.fnend) ENDPROC(__divsi3) ENDPROC(__aeabi_idiv) +EXPORT_SYMBOL(__divsi3) +EXPORT_SYMBOL(__aeabi_idiv) ENTRY(__modsi3) UNWIND(.fnstart) @@ -327,6 +333,7 @@ UNWIND(.fnstart) UNWIND(.fnend) ENDPROC(__modsi3) +EXPORT_SYMBOL(__modsi3) #ifdef CONFIG_AEABI @@ -343,6 +350,7 @@ UNWIND(.save {r0, r1, ip, lr} ) UNWIND(.fnend) ENDPROC(__aeabi_uidivmod) +EXPORT_SYMBOL(__aeabi_uidivmod) ENTRY(__aeabi_idivmod) UNWIND(.fnstart) @@ -356,6 +364,7 @@ UNWIND(.save {r0, r1, ip, lr} ) UNWIND(.fnend) ENDPROC(__aeabi_idivmod) +EXPORT_SYMBOL(__aeabi_idivmod) #endif diff --git a/arch/arm/lib/lshrdi3.S b/arch/arm/lib/lshrdi3.S index 922dcd88b02b..e40833981417 100644 --- a/arch/arm/lib/lshrdi3.S +++ b/arch/arm/lib/lshrdi3.S @@ -28,6 +28,7 @@ Boston, MA 02110-1301, USA. */ #include <linux/linkage.h> #include <asm/assembler.h> +#include <asm/export.h> #ifdef __ARMEB__ #define al r1 @@ -52,3 +53,5 @@ ENTRY(__aeabi_llsr) ENDPROC(__lshrdi3) ENDPROC(__aeabi_llsr) +EXPORT_SYMBOL(__lshrdi3) +EXPORT_SYMBOL(__aeabi_llsr) diff --git a/arch/arm/lib/memchr.S b/arch/arm/lib/memchr.S index 74a5bed6d999..44182bf686a5 100644 --- a/arch/arm/lib/memchr.S +++ b/arch/arm/lib/memchr.S @@ -11,6 +11,7 @@ */ #include <linux/linkage.h> #include <asm/assembler.h> +#include <asm/export.h> .text .align 5 @@ -24,3 +25,4 @@ ENTRY(memchr) 2: movne r0, #0 ret lr ENDPROC(memchr) +EXPORT_SYMBOL(memchr) diff --git a/arch/arm/lib/memcpy.S b/arch/arm/lib/memcpy.S index 64111bd4440b..1be5b6ddf37c 100644 --- a/arch/arm/lib/memcpy.S +++ b/arch/arm/lib/memcpy.S @@ -13,6 +13,7 @@ #include <linux/linkage.h> #include <asm/assembler.h> #include <asm/unwind.h> +#include <asm/export.h> #define LDR1W_SHIFT 0 #define STR1W_SHIFT 0 @@ -68,3 +69,5 @@ ENTRY(memcpy) ENDPROC(memcpy) ENDPROC(mmiocpy) +EXPORT_SYMBOL(memcpy) +EXPORT_SYMBOL(mmiocpy) diff --git a/arch/arm/lib/memmove.S b/arch/arm/lib/memmove.S index 69a9d47fc5ab..71dcc5400d02 100644 --- a/arch/arm/lib/memmove.S +++ b/arch/arm/lib/memmove.S @@ -13,6 +13,7 @@ #include <linux/linkage.h> #include <asm/assembler.h> #include <asm/unwind.h> +#include <asm/export.h> .text @@ -225,3 +226,4 @@ ENTRY(memmove) 18: backward_copy_shift push=24 pull=8 ENDPROC(memmove) +EXPORT_SYMBOL(memmove) diff --git a/arch/arm/lib/memset.S b/arch/arm/lib/memset.S index 3c65e3bd790f..7b72044cba62 100644 --- a/arch/arm/lib/memset.S +++ b/arch/arm/lib/memset.S @@ -12,6 +12,7 @@ #include <linux/linkage.h> #include <asm/assembler.h> #include <asm/unwind.h> +#include <asm/export.h> .text .align 5 @@ -135,3 +136,5 @@ UNWIND( .fnstart ) UNWIND( .fnend ) ENDPROC(memset) ENDPROC(mmioset) +EXPORT_SYMBOL(memset) +EXPORT_SYMBOL(mmioset) diff --git a/arch/arm/lib/memzero.S b/arch/arm/lib/memzero.S index 0eded952e089..6dec26ed5bcc 100644 --- a/arch/arm/lib/memzero.S +++ b/arch/arm/lib/memzero.S @@ -10,6 +10,7 @@ #include <linux/linkage.h> #include <asm/assembler.h> #include <asm/unwind.h> +#include <asm/export.h> .text .align 5 @@ -135,3 +136,4 @@ UNWIND( .fnstart ) ret lr @ 1 UNWIND( .fnend ) ENDPROC(__memzero) +EXPORT_SYMBOL(__memzero) diff --git a/arch/arm/lib/muldi3.S b/arch/arm/lib/muldi3.S index 204305956925..b8f12388ccac 100644 --- a/arch/arm/lib/muldi3.S +++ b/arch/arm/lib/muldi3.S @@ -12,6 +12,7 @@ #include <linux/linkage.h> #include <asm/assembler.h> +#include <asm/export.h> #ifdef __ARMEB__ #define xh r0 @@ -46,3 +47,5 @@ ENTRY(__aeabi_lmul) ENDPROC(__muldi3) ENDPROC(__aeabi_lmul) +EXPORT_SYMBOL(__muldi3) +EXPORT_SYMBOL(__aeabi_lmul) diff --git a/arch/arm/lib/putuser.S b/arch/arm/lib/putuser.S index 38d660d3705f..11de126e2ed6 100644 --- a/arch/arm/lib/putuser.S +++ b/arch/arm/lib/putuser.S @@ -31,6 +31,7 @@ #include <asm/assembler.h> #include <asm/errno.h> #include <asm/domain.h> +#include <asm/export.h> ENTRY(__put_user_1) check_uaccess r0, 1, r1, ip, __put_user_bad @@ -38,6 +39,7 @@ ENTRY(__put_user_1) mov r0, #0 ret lr ENDPROC(__put_user_1) +EXPORT_SYMBOL(__put_user_1) ENTRY(__put_user_2) check_uaccess r0, 2, r1, ip, __put_user_bad @@ -62,6 +64,7 @@ ENTRY(__put_user_2) mov r0, #0 ret lr ENDPROC(__put_user_2) +EXPORT_SYMBOL(__put_user_2) ENTRY(__put_user_4) check_uaccess r0, 4, r1, ip, __put_user_bad @@ -69,6 +72,7 @@ ENTRY(__put_user_4) mov r0, #0 ret lr ENDPROC(__put_user_4) +EXPORT_SYMBOL(__put_user_4) ENTRY(__put_user_8) check_uaccess r0, 8, r1, ip, __put_user_bad @@ -82,6 +86,7 @@ ENTRY(__put_user_8) mov r0, #0 ret lr ENDPROC(__put_user_8) +EXPORT_SYMBOL(__put_user_8) __put_user_bad: mov r0, #-EFAULT diff --git a/arch/arm/lib/strchr.S b/arch/arm/lib/strchr.S index 013d64c71e8d..7301f6e6046c 100644 --- a/arch/arm/lib/strchr.S +++ b/arch/arm/lib/strchr.S @@ -11,6 +11,7 @@ */ #include <linux/linkage.h> #include <asm/assembler.h> +#include <asm/export.h> .text .align 5 @@ -25,3 +26,4 @@ ENTRY(strchr) subeq r0, r0, #1 ret lr ENDPROC(strchr) +EXPORT_SYMBOL(strchr) diff --git a/arch/arm/lib/strrchr.S b/arch/arm/lib/strrchr.S index 3cec1c7482c4..aaf9fd98b754 100644 --- a/arch/arm/lib/strrchr.S +++ b/arch/arm/lib/strrchr.S @@ -11,6 +11,7 @@ */ #include <linux/linkage.h> #include <asm/assembler.h> +#include <asm/export.h> .text .align 5 @@ -24,3 +25,4 @@ ENTRY(strrchr) mov r0, r3 ret lr ENDPROC(strrchr) +EXPORT_SYMBOL(strrchr) diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c index 6bd1089b07e0..1626e3a551a1 100644 --- a/arch/arm/lib/uaccess_with_memcpy.c +++ b/arch/arm/lib/uaccess_with_memcpy.c @@ -19,6 +19,7 @@ #include <linux/gfp.h> #include <linux/highmem.h> #include <linux/hugetlb.h> +#include <linux/export.h> #include <asm/current.h> #include <asm/page.h> @@ -156,6 +157,7 @@ arm_copy_to_user(void __user *to, const void *from, unsigned long n) } return n; } +EXPORT_SYMBOL(arm_copy_to_user); static unsigned long noinline __clear_user_memset(void __user *addr, unsigned long n) @@ -213,6 +215,7 @@ unsigned long arm_clear_user(void __user *addr, unsigned long n) } return n; } +EXPORT_SYMBOL(arm_clear_user); #if 0 diff --git a/arch/arm/lib/ucmpdi2.S b/arch/arm/lib/ucmpdi2.S index ad4a6309141a..127a91af46f3 100644 --- a/arch/arm/lib/ucmpdi2.S +++ b/arch/arm/lib/ucmpdi2.S @@ -12,6 +12,7 @@ #include <linux/linkage.h> #include <asm/assembler.h> +#include <asm/export.h> #ifdef __ARMEB__ #define xh r0 @@ -35,6 +36,7 @@ ENTRY(__ucmpdi2) ret lr ENDPROC(__ucmpdi2) +EXPORT_SYMBOL(__ucmpdi2) #ifdef CONFIG_AEABI @@ -48,6 +50,7 @@ ENTRY(__aeabi_ulcmp) ret lr ENDPROC(__aeabi_ulcmp) +EXPORT_SYMBOL(__aeabi_ulcmp) #endif diff --git a/arch/arm/mach-imx/Makefile b/arch/arm/mach-imx/Makefile index cab128913e72..737450fe790c 100644 --- a/arch/arm/mach-imx/Makefile +++ b/arch/arm/mach-imx/Makefile @@ -32,7 +32,6 @@ endif ifdef CONFIG_SND_IMX_SOC obj-y += ssi-fiq.o -obj-y += ssi-fiq-ksym.o endif # i.MX21 based machines diff --git a/arch/arm/mach-imx/ssi-fiq-ksym.c b/arch/arm/mach-imx/ssi-fiq-ksym.c deleted file mode 100644 index 792090f9a032..000000000000 --- a/arch/arm/mach-imx/ssi-fiq-ksym.c +++ /dev/null @@ -1,20 +0,0 @@ -/* - * Exported ksyms for the SSI FIQ handler - * - * Copyright (C) 2009, Sascha Hauer <s.hauer@pengutronix.de> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include <linux/module.h> - -#include <linux/platform_data/asoc-imx-ssi.h> - -EXPORT_SYMBOL(imx_ssi_fiq_tx_buffer); -EXPORT_SYMBOL(imx_ssi_fiq_rx_buffer); -EXPORT_SYMBOL(imx_ssi_fiq_start); -EXPORT_SYMBOL(imx_ssi_fiq_end); -EXPORT_SYMBOL(imx_ssi_fiq_base); - diff --git a/arch/arm/mach-imx/ssi-fiq.S b/arch/arm/mach-imx/ssi-fiq.S index a8b93c5f29b5..fd7917f1c204 100644 --- a/arch/arm/mach-imx/ssi-fiq.S +++ b/arch/arm/mach-imx/ssi-fiq.S @@ -8,6 +8,7 @@ #include <linux/linkage.h> #include <asm/assembler.h> +#include <asm/export.h> /* * r8 = bit 0-15: tx offset, bit 16-31: tx buffer size @@ -144,4 +145,8 @@ imx_ssi_fiq_tx_buffer: .word 0x0 .L_imx_ssi_fiq_end: imx_ssi_fiq_end: - +EXPORT_SYMBOL(imx_ssi_fiq_tx_buffer) +EXPORT_SYMBOL(imx_ssi_fiq_rx_buffer) +EXPORT_SYMBOL(imx_ssi_fiq_start) +EXPORT_SYMBOL(imx_ssi_fiq_end) +EXPORT_SYMBOL(imx_ssi_fiq_base) diff --git a/arch/ia64/hp/sim/boot/Makefile b/arch/ia64/hp/sim/boot/Makefile index 2e805e0cc560..df6e9968c845 100644 --- a/arch/ia64/hp/sim/boot/Makefile +++ b/arch/ia64/hp/sim/boot/Makefile @@ -33,5 +33,5 @@ $(obj)/vmlinux.bin: vmlinux FORCE LDFLAGS_bootloader = -static -T $(obj)/bootloader: $(src)/bootloader.lds $(obj)/bootloader.o $(obj)/boot_head.o $(obj)/fw-emu.o \ - lib/lib.a arch/ia64/lib/built-in.o arch/ia64/lib/lib.a FORCE + lib/lib.a arch/ia64/lib/lib.a FORCE $(call if_changed,ld) diff --git a/arch/ia64/include/asm/export.h b/arch/ia64/include/asm/export.h new file mode 100644 index 000000000000..ad18c6583252 --- /dev/null +++ b/arch/ia64/include/asm/export.h @@ -0,0 +1,3 @@ +/* EXPORT_DATA_SYMBOL != EXPORT_SYMBOL here */ +#define KSYM_FUNC(name) @fptr(name) +#include <asm-generic/export.h> diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S index cfaa7b25084c..6f27a663177c 100644 --- a/arch/ia64/kernel/entry.S +++ b/arch/ia64/kernel/entry.S @@ -48,6 +48,7 @@ #include <asm/thread_info.h> #include <asm/unistd.h> #include <asm/ftrace.h> +#include <asm/export.h> #include "minstate.h" @@ -1345,12 +1346,14 @@ GLOBAL_ENTRY(unw_init_running) mov rp=loc0 br.ret.sptk.many rp END(unw_init_running) +EXPORT_SYMBOL(unw_init_running) #ifdef CONFIG_FUNCTION_TRACER #ifdef CONFIG_DYNAMIC_FTRACE GLOBAL_ENTRY(_mcount) br ftrace_stub END(_mcount) +EXPORT_SYMBOL(_mcount) .here: br.ret.sptk.many b0 diff --git a/arch/ia64/kernel/esi_stub.S b/arch/ia64/kernel/esi_stub.S index 6b3d6c1f99b6..2c369bf77c4b 100644 --- a/arch/ia64/kernel/esi_stub.S +++ b/arch/ia64/kernel/esi_stub.S @@ -35,6 +35,7 @@ #include <asm/processor.h> #include <asm/asmmacro.h> +#include <asm/export.h> /* * Inputs: @@ -94,3 +95,4 @@ GLOBAL_ENTRY(esi_call_phys) mov gp=loc2 br.ret.sptk.many rp END(esi_call_phys) +EXPORT_SYMBOL_GPL(esi_call_phys) diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S index bb748c596443..c9b5e942f671 100644 --- a/arch/ia64/kernel/head.S +++ b/arch/ia64/kernel/head.S @@ -32,6 +32,7 @@ #include <asm/mca_asm.h> #include <linux/init.h> #include <linux/linkage.h> +#include <asm/export.h> #ifdef CONFIG_HOTPLUG_CPU #define SAL_PSR_BITS_TO_SET \ @@ -168,6 +169,7 @@ RestRR: \ __PAGE_ALIGNED_DATA .global empty_zero_page +EXPORT_DATA_SYMBOL_GPL(empty_zero_page) empty_zero_page: .skip PAGE_SIZE diff --git a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c index 096731049538..d111248af719 100644 --- a/arch/ia64/kernel/ia64_ksyms.c +++ b/arch/ia64/kernel/ia64_ksyms.c @@ -1,101 +1,11 @@ /* * Architecture-specific kernel symbols - * - * Don't put any exports here unless it's defined in an assembler file. - * All other exports should be put directly after the definition. */ -#include <linux/module.h> - -#include <linux/string.h> -EXPORT_SYMBOL(memset); -EXPORT_SYMBOL(memcpy); -EXPORT_SYMBOL(strlen); - -#include <asm/pgtable.h> -EXPORT_SYMBOL_GPL(empty_zero_page); - -#include <asm/checksum.h> -EXPORT_SYMBOL(ip_fast_csum); /* hand-coded assembly */ -EXPORT_SYMBOL(csum_ipv6_magic); - -#include <asm/page.h> -EXPORT_SYMBOL(clear_page); -EXPORT_SYMBOL(copy_page); - #ifdef CONFIG_VIRTUAL_MEM_MAP +#include <linux/compiler.h> +#include <linux/export.h> #include <linux/bootmem.h> EXPORT_SYMBOL(min_low_pfn); /* defined by bootmem.c, but not exported by generic code */ EXPORT_SYMBOL(max_low_pfn); /* defined by bootmem.c, but not exported by generic code */ #endif - -#include <asm/processor.h> -EXPORT_SYMBOL(ia64_cpu_info); -#ifdef CONFIG_SMP -EXPORT_SYMBOL(local_per_cpu_offset); -#endif - -#include <asm/uaccess.h> -EXPORT_SYMBOL(__copy_user); -EXPORT_SYMBOL(__do_clear_user); -EXPORT_SYMBOL(__strlen_user); -EXPORT_SYMBOL(__strncpy_from_user); -EXPORT_SYMBOL(__strnlen_user); - -/* from arch/ia64/lib */ -extern void __divsi3(void); -extern void __udivsi3(void); -extern void __modsi3(void); -extern void __umodsi3(void); -extern void __divdi3(void); -extern void __udivdi3(void); -extern void __moddi3(void); -extern void __umoddi3(void); - -EXPORT_SYMBOL(__divsi3); -EXPORT_SYMBOL(__udivsi3); -EXPORT_SYMBOL(__modsi3); -EXPORT_SYMBOL(__umodsi3); -EXPORT_SYMBOL(__divdi3); -EXPORT_SYMBOL(__udivdi3); -EXPORT_SYMBOL(__moddi3); -EXPORT_SYMBOL(__umoddi3); - -#if defined(CONFIG_MD_RAID456) || defined(CONFIG_MD_RAID456_MODULE) -extern void xor_ia64_2(void); -extern void xor_ia64_3(void); -extern void xor_ia64_4(void); -extern void xor_ia64_5(void); - -EXPORT_SYMBOL(xor_ia64_2); -EXPORT_SYMBOL(xor_ia64_3); -EXPORT_SYMBOL(xor_ia64_4); -EXPORT_SYMBOL(xor_ia64_5); -#endif - -#include <asm/pal.h> -EXPORT_SYMBOL(ia64_pal_call_phys_stacked); -EXPORT_SYMBOL(ia64_pal_call_phys_static); -EXPORT_SYMBOL(ia64_pal_call_stacked); -EXPORT_SYMBOL(ia64_pal_call_static); -EXPORT_SYMBOL(ia64_load_scratch_fpregs); -EXPORT_SYMBOL(ia64_save_scratch_fpregs); - -#include <asm/unwind.h> -EXPORT_SYMBOL(unw_init_running); - -#if defined(CONFIG_IA64_ESI) || defined(CONFIG_IA64_ESI_MODULE) -extern void esi_call_phys (void); -EXPORT_SYMBOL_GPL(esi_call_phys); -#endif -extern char ia64_ivt[]; -EXPORT_SYMBOL(ia64_ivt); - -#include <asm/ftrace.h> -#ifdef CONFIG_FUNCTION_TRACER -/* mcount is defined in assembly */ -EXPORT_SYMBOL(_mcount); -#endif - -#include <asm/cacheflush.h> -EXPORT_SYMBOL_GPL(flush_icache_range); diff --git a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S index b1c3cfc93e71..44a103a5de2b 100644 --- a/arch/ia64/kernel/ivt.S +++ b/arch/ia64/kernel/ivt.S @@ -57,6 +57,7 @@ #include <asm/thread_info.h> #include <asm/unistd.h> #include <asm/errno.h> +#include <asm/export.h> #if 0 # define PSR_DEFAULT_BITS psr.ac @@ -85,6 +86,7 @@ .align 32768 // align on 32KB boundary .global ia64_ivt + EXPORT_DATA_SYMBOL(ia64_ivt) ia64_ivt: ///////////////////////////////////////////////////////////////////////////////////////// // 0x0000 Entry 0 (size 64 bundles) VHPT Translation (8,20,47) diff --git a/arch/ia64/kernel/pal.S b/arch/ia64/kernel/pal.S index 0b533441c3c9..94fb2e395498 100644 --- a/arch/ia64/kernel/pal.S +++ b/arch/ia64/kernel/pal.S @@ -14,6 +14,7 @@ #include <asm/asmmacro.h> #include <asm/processor.h> +#include <asm/export.h> .data pal_entry_point: @@ -87,6 +88,7 @@ GLOBAL_ENTRY(ia64_pal_call_static) srlz.d // seralize restoration of psr.l br.ret.sptk.many b0 END(ia64_pal_call_static) +EXPORT_SYMBOL(ia64_pal_call_static) /* * Make a PAL call using the stacked registers calling convention. @@ -122,6 +124,7 @@ GLOBAL_ENTRY(ia64_pal_call_stacked) srlz.d // serialize restoration of psr.l br.ret.sptk.many b0 END(ia64_pal_call_stacked) +EXPORT_SYMBOL(ia64_pal_call_stacked) /* * Make a physical mode PAL call using the static registers calling convention. @@ -193,6 +196,7 @@ GLOBAL_ENTRY(ia64_pal_call_phys_static) srlz.d // seralize restoration of psr.l br.ret.sptk.many b0 END(ia64_pal_call_phys_static) +EXPORT_SYMBOL(ia64_pal_call_phys_static) /* * Make a PAL call using the stacked registers in physical mode. @@ -250,6 +254,7 @@ GLOBAL_ENTRY(ia64_pal_call_phys_stacked) srlz.d // seralize restoration of psr.l br.ret.sptk.many b0 END(ia64_pal_call_phys_stacked) +EXPORT_SYMBOL(ia64_pal_call_phys_stacked) /* * Save scratch fp scratch regs which aren't saved in pt_regs already @@ -275,6 +280,7 @@ GLOBAL_ENTRY(ia64_save_scratch_fpregs) stf.spill [r2] = f15,32 br.ret.sptk.many rp END(ia64_save_scratch_fpregs) +EXPORT_SYMBOL(ia64_save_scratch_fpregs) /* * Load scratch fp scratch regs (fp10-fp15) @@ -296,3 +302,4 @@ GLOBAL_ENTRY(ia64_load_scratch_fpregs) ldf.fill f15 = [r2],32 br.ret.sptk.many rp END(ia64_load_scratch_fpregs) +EXPORT_SYMBOL(ia64_load_scratch_fpregs) diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index afddb3e80a29..7ec7acc844c2 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c @@ -71,7 +71,11 @@ EXPORT_SYMBOL(__per_cpu_offset); #endif DEFINE_PER_CPU(struct cpuinfo_ia64, ia64_cpu_info); +EXPORT_SYMBOL(ia64_cpu_info); DEFINE_PER_CPU(unsigned long, local_per_cpu_offset); +#ifdef CONFIG_SMP +EXPORT_SYMBOL(local_per_cpu_offset); +#endif unsigned long ia64_cycles_per_usec; struct ia64_boot_param *ia64_boot_param; struct screen_info screen_info; diff --git a/arch/ia64/lib/Makefile b/arch/ia64/lib/Makefile index 98771e2a78af..1f3d3877618f 100644 --- a/arch/ia64/lib/Makefile +++ b/arch/ia64/lib/Makefile @@ -2,17 +2,15 @@ # Makefile for ia64-specific library routines.. # -obj-y := io.o - -lib-y := __divsi3.o __udivsi3.o __modsi3.o __umodsi3.o \ +lib-y := io.o __divsi3.o __udivsi3.o __modsi3.o __umodsi3.o \ __divdi3.o __udivdi3.o __moddi3.o __umoddi3.o \ checksum.o clear_page.o csum_partial_copy.o \ clear_user.o strncpy_from_user.o strlen_user.o strnlen_user.o \ flush.o ip_fast_csum.o do_csum.o \ memset.o strlen.o xor.o -obj-$(CONFIG_ITANIUM) += copy_page.o copy_user.o memcpy.o -obj-$(CONFIG_MCKINLEY) += copy_page_mck.o memcpy_mck.o +lib-$(CONFIG_ITANIUM) += copy_page.o copy_user.o memcpy.o +lib-$(CONFIG_MCKINLEY) += copy_page_mck.o memcpy_mck.o lib-$(CONFIG_PERFMON) += carta_random.o AFLAGS___divdi3.o = diff --git a/arch/ia64/lib/clear_page.S b/arch/ia64/lib/clear_page.S index 2d814e7ed191..3cf5b76e587f 100644 --- a/arch/ia64/lib/clear_page.S +++ b/arch/ia64/lib/clear_page.S @@ -11,6 +11,7 @@ #include <asm/asmmacro.h> #include <asm/page.h> +#include <asm/export.h> #ifdef CONFIG_ITANIUM # define L3_LINE_SIZE 64 // Itanium L3 line size @@ -74,3 +75,4 @@ GLOBAL_ENTRY(clear_page) mov ar.lc = saved_lc // restore lc br.ret.sptk.many rp END(clear_page) +EXPORT_SYMBOL(clear_page) diff --git a/arch/ia64/lib/clear_user.S b/arch/ia64/lib/clear_user.S index eecd8577b209..7b40731ee5d8 100644 --- a/arch/ia64/lib/clear_user.S +++ b/arch/ia64/lib/clear_user.S @@ -12,6 +12,7 @@ */ #include <asm/asmmacro.h> +#include <asm/export.h> // // arguments @@ -207,3 +208,4 @@ GLOBAL_ENTRY(__do_clear_user) mov ar.lc=saved_lc br.ret.sptk.many rp END(__do_clear_user) +EXPORT_SYMBOL(__do_clear_user) diff --git a/arch/ia64/lib/copy_page.S b/arch/ia64/lib/copy_page.S index 127d1d050d78..cbdb9e323ffb 100644 --- a/arch/ia64/lib/copy_page.S +++ b/arch/ia64/lib/copy_page.S @@ -16,6 +16,7 @@ */ #include <asm/asmmacro.h> #include <asm/page.h> +#include <asm/export.h> #define PIPE_DEPTH 3 #define EPI p[PIPE_DEPTH-1] @@ -96,3 +97,4 @@ GLOBAL_ENTRY(copy_page) mov ar.lc=saved_lc br.ret.sptk.many rp END(copy_page) +EXPORT_SYMBOL(copy_page) diff --git a/arch/ia64/lib/copy_page_mck.S b/arch/ia64/lib/copy_page_mck.S index 3c45d60a81b4..c13f69036876 100644 --- a/arch/ia64/lib/copy_page_mck.S +++ b/arch/ia64/lib/copy_page_mck.S @@ -61,6 +61,7 @@ */ #include <asm/asmmacro.h> #include <asm/page.h> +#include <asm/export.h> #define PREFETCH_DIST 8 // McKinley sustains 16 outstanding L2 misses (8 ld, 8 st) @@ -183,3 +184,4 @@ GLOBAL_ENTRY(copy_page) mov pr = saved_pr, -1 br.ret.sptk.many rp END(copy_page) +EXPORT_SYMBOL(copy_page) diff --git a/arch/ia64/lib/copy_user.S b/arch/ia64/lib/copy_user.S index c952bdc6a093..66facd52e8d0 100644 --- a/arch/ia64/lib/copy_user.S +++ b/arch/ia64/lib/copy_user.S @@ -30,6 +30,7 @@ */ #include <asm/asmmacro.h> +#include <asm/export.h> // // Tuneable parameters @@ -608,3 +609,4 @@ GLOBAL_ENTRY(__copy_user) mov ar.pfs=saved_pfs br.ret.sptk.many rp END(__copy_user) +EXPORT_SYMBOL(__copy_user) diff --git a/arch/ia64/lib/flush.S b/arch/ia64/lib/flush.S index 1d8c88860063..9a5a2f9fad13 100644 --- a/arch/ia64/lib/flush.S +++ b/arch/ia64/lib/flush.S @@ -8,6 +8,7 @@ */ #include <asm/asmmacro.h> +#include <asm/export.h> /* @@ -60,6 +61,7 @@ GLOBAL_ENTRY(flush_icache_range) mov ar.lc=r3 // restore ar.lc br.ret.sptk.many rp END(flush_icache_range) +EXPORT_SYMBOL_GPL(flush_icache_range) /* * clflush_cache_range(start,size) diff --git a/arch/ia64/lib/idiv32.S b/arch/ia64/lib/idiv32.S index c91b5b0129ff..715aed79a9ce 100644 --- a/arch/ia64/lib/idiv32.S +++ b/arch/ia64/lib/idiv32.S @@ -15,6 +15,7 @@ */ #include <asm/asmmacro.h> +#include <asm/export.h> #ifdef MODULO # define OP mod @@ -81,3 +82,4 @@ GLOBAL_ENTRY(NAME) getf.sig r8 = f6 // transfer result to result register br.ret.sptk.many rp END(NAME) +EXPORT_SYMBOL(NAME) diff --git a/arch/ia64/lib/idiv64.S b/arch/ia64/lib/idiv64.S index 627573c4ceb1..25840f697753 100644 --- a/arch/ia64/lib/idiv64.S +++ b/arch/ia64/lib/idiv64.S @@ -15,6 +15,7 @@ */ #include <asm/asmmacro.h> +#include <asm/export.h> #ifdef MODULO # define OP mod @@ -78,3 +79,4 @@ GLOBAL_ENTRY(NAME) getf.sig r8 = f11 // transfer result to result register br.ret.sptk.many rp END(NAME) +EXPORT_SYMBOL(NAME) diff --git a/arch/ia64/lib/ip_fast_csum.S b/arch/ia64/lib/ip_fast_csum.S index 620d9dc5220f..648e0d4a4839 100644 --- a/arch/ia64/lib/ip_fast_csum.S +++ b/arch/ia64/lib/ip_fast_csum.S @@ -13,6 +13,7 @@ */ #include <asm/asmmacro.h> +#include <asm/export.h> /* * Since we know that most likely this function is called with buf aligned @@ -92,6 +93,7 @@ GLOBAL_ENTRY(ip_fast_csum) mov b0=r34 br.ret.sptk.many b0 END(ip_fast_csum) +EXPORT_SYMBOL(ip_fast_csum) GLOBAL_ENTRY(csum_ipv6_magic) ld4 r20=[in0],4 @@ -142,3 +144,4 @@ GLOBAL_ENTRY(csum_ipv6_magic) andcm r8=r9,r8 br.ret.sptk.many b0 END(csum_ipv6_magic) +EXPORT_SYMBOL(csum_ipv6_magic) diff --git a/arch/ia64/lib/memcpy.S b/arch/ia64/lib/memcpy.S index 448908d80b69..ba172fd6acf4 100644 --- a/arch/ia64/lib/memcpy.S +++ b/arch/ia64/lib/memcpy.S @@ -14,6 +14,7 @@ * David Mosberger-Tang <davidm@hpl.hp.com> */ #include <asm/asmmacro.h> +#include <asm/export.h> GLOBAL_ENTRY(memcpy) @@ -299,3 +300,4 @@ GLOBAL_ENTRY(memcpy) COPY(56, 0) END(memcpy) +EXPORT_SYMBOL(memcpy) diff --git a/arch/ia64/lib/memcpy_mck.S b/arch/ia64/lib/memcpy_mck.S index ab0f87639729..b264b6a7967b 100644 --- a/arch/ia64/lib/memcpy_mck.S +++ b/arch/ia64/lib/memcpy_mck.S @@ -15,6 +15,7 @@ */ #include <asm/asmmacro.h> #include <asm/page.h> +#include <asm/export.h> #define EK(y...) EX(y) @@ -78,6 +79,7 @@ GLOBAL_ENTRY(memcpy) br.cond.sptk .common_code ;; END(memcpy) +EXPORT_SYMBOL(memcpy) GLOBAL_ENTRY(__copy_user) .prologue // check dest alignment @@ -664,3 +666,4 @@ EK(.ex_handler, (p17) st8 [dst1]=r39,8); \ /* end of McKinley specific optimization */ END(__copy_user) +EXPORT_SYMBOL(__copy_user) diff --git a/arch/ia64/lib/memset.S b/arch/ia64/lib/memset.S index f26c16aefb1c..87b974704075 100644 --- a/arch/ia64/lib/memset.S +++ b/arch/ia64/lib/memset.S @@ -18,6 +18,7 @@ to get peak speed when value = 0. */ #include <asm/asmmacro.h> +#include <asm/export.h> #undef ret #define dest in0 @@ -360,3 +361,4 @@ GLOBAL_ENTRY(memset) br.ret.sptk.many rp } END(memset) +EXPORT_SYMBOL(memset) diff --git a/arch/ia64/lib/strlen.S b/arch/ia64/lib/strlen.S index e0cdac0a85b8..1a6e17c657b4 100644 --- a/arch/ia64/lib/strlen.S +++ b/arch/ia64/lib/strlen.S @@ -17,6 +17,7 @@ */ #include <asm/asmmacro.h> +#include <asm/export.h> // // @@ -190,3 +191,4 @@ GLOBAL_ENTRY(strlen) mov ar.pfs=saved_pfs // because of ar.ec, restore no matter what br.ret.sptk.many rp // end of successful recovery code END(strlen) +EXPORT_SYMBOL(strlen) diff --git a/arch/ia64/lib/strlen_user.S b/arch/ia64/lib/strlen_user.S index c71eded4285e..9d257684e733 100644 --- a/arch/ia64/lib/strlen_user.S +++ b/arch/ia64/lib/strlen_user.S @@ -16,6 +16,7 @@ */ #include <asm/asmmacro.h> +#include <asm/export.h> // // int strlen_user(char *) @@ -196,3 +197,4 @@ GLOBAL_ENTRY(__strlen_user) mov ar.pfs=saved_pfs // because of ar.ec, restore no matter what br.ret.sptk.many rp END(__strlen_user) +EXPORT_SYMBOL(__strlen_user) diff --git a/arch/ia64/lib/strncpy_from_user.S b/arch/ia64/lib/strncpy_from_user.S index a504381f31eb..ca9ccf280e2e 100644 --- a/arch/ia64/lib/strncpy_from_user.S +++ b/arch/ia64/lib/strncpy_from_user.S @@ -17,6 +17,7 @@ */ #include <asm/asmmacro.h> +#include <asm/export.h> GLOBAL_ENTRY(__strncpy_from_user) alloc r2=ar.pfs,3,0,0,0 @@ -42,3 +43,4 @@ GLOBAL_ENTRY(__strncpy_from_user) [.Lexit:] br.ret.sptk.many rp END(__strncpy_from_user) +EXPORT_SYMBOL(__strncpy_from_user) diff --git a/arch/ia64/lib/strnlen_user.S b/arch/ia64/lib/strnlen_user.S index d09066b1e49d..80a5dfd1d402 100644 --- a/arch/ia64/lib/strnlen_user.S +++ b/arch/ia64/lib/strnlen_user.S @@ -13,6 +13,7 @@ */ #include <asm/asmmacro.h> +#include <asm/export.h> GLOBAL_ENTRY(__strnlen_user) .prologue @@ -43,3 +44,4 @@ GLOBAL_ENTRY(__strnlen_user) mov ar.lc=r16 // restore ar.lc br.ret.sptk.many rp END(__strnlen_user) +EXPORT_SYMBOL(__strnlen_user) diff --git a/arch/ia64/lib/xor.S b/arch/ia64/lib/xor.S index 54e3f7eab8e9..c83f1c410691 100644 --- a/arch/ia64/lib/xor.S +++ b/arch/ia64/lib/xor.S @@ -14,6 +14,7 @@ */ #include <asm/asmmacro.h> +#include <asm/export.h> GLOBAL_ENTRY(xor_ia64_2) .prologue @@ -51,6 +52,7 @@ GLOBAL_ENTRY(xor_ia64_2) mov pr = r29, -1 br.ret.sptk.few rp END(xor_ia64_2) +EXPORT_SYMBOL(xor_ia64_2) GLOBAL_ENTRY(xor_ia64_3) .prologue @@ -91,6 +93,7 @@ GLOBAL_ENTRY(xor_ia64_3) mov pr = r29, -1 br.ret.sptk.few rp END(xor_ia64_3) +EXPORT_SYMBOL(xor_ia64_3) GLOBAL_ENTRY(xor_ia64_4) .prologue @@ -134,6 +137,7 @@ GLOBAL_ENTRY(xor_ia64_4) mov pr = r29, -1 br.ret.sptk.few rp END(xor_ia64_4) +EXPORT_SYMBOL(xor_ia64_4) GLOBAL_ENTRY(xor_ia64_5) .prologue @@ -182,3 +186,4 @@ GLOBAL_ENTRY(xor_ia64_5) mov pr = r29, -1 br.ret.sptk.few rp END(xor_ia64_5) +EXPORT_SYMBOL(xor_ia64_5) diff --git a/arch/m68k/include/asm/export.h b/arch/m68k/include/asm/export.h new file mode 100644 index 000000000000..0af20f48bd07 --- /dev/null +++ b/arch/m68k/include/asm/export.h @@ -0,0 +1,3 @@ +#define KSYM_ALIGN 2 +#define KCRC_ALIGN 2 +#include <asm-generic/export.h> diff --git a/arch/m68k/kernel/Makefile b/arch/m68k/kernel/Makefile index 8a1c4d3f91c8..74c898ced8cc 100644 --- a/arch/m68k/kernel/Makefile +++ b/arch/m68k/kernel/Makefile @@ -13,7 +13,7 @@ extra-$(CONFIG_SUN3X) := head.o extra-$(CONFIG_SUN3) := sun3-head.o extra-y += vmlinux.lds -obj-y := entry.o irq.o m68k_ksyms.o module.o process.o ptrace.o +obj-y := entry.o irq.o module.o process.o ptrace.o obj-y += setup.o signal.o sys_m68k.o syscalltable.o time.o traps.o obj-$(CONFIG_MMU_MOTOROLA) += ints.o vectors.o diff --git a/arch/m68k/kernel/m68k_ksyms.c b/arch/m68k/kernel/m68k_ksyms.c deleted file mode 100644 index 774c1bd59c36..000000000000 --- a/arch/m68k/kernel/m68k_ksyms.c +++ /dev/null @@ -1,32 +0,0 @@ -#include <linux/module.h> - -asmlinkage long long __ashldi3 (long long, int); -asmlinkage long long __ashrdi3 (long long, int); -asmlinkage long long __lshrdi3 (long long, int); -asmlinkage long long __muldi3 (long long, long long); - -/* The following are special because they're not called - explicitly (the C compiler generates them). Fortunately, - their interface isn't gonna change any time soon now, so - it's OK to leave it out of version control. */ -EXPORT_SYMBOL(__ashldi3); -EXPORT_SYMBOL(__ashrdi3); -EXPORT_SYMBOL(__lshrdi3); -EXPORT_SYMBOL(__muldi3); - -#if defined(CONFIG_CPU_HAS_NO_MULDIV64) -/* - * Simpler 68k and ColdFire parts also need a few other gcc functions. - */ -extern long long __divsi3(long long, long long); -extern long long __modsi3(long long, long long); -extern long long __mulsi3(long long, long long); -extern long long __udivsi3(long long, long long); -extern long long __umodsi3(long long, long long); - -EXPORT_SYMBOL(__divsi3); -EXPORT_SYMBOL(__modsi3); -EXPORT_SYMBOL(__mulsi3); -EXPORT_SYMBOL(__udivsi3); -EXPORT_SYMBOL(__umodsi3); -#endif diff --git a/arch/m68k/lib/ashldi3.c b/arch/m68k/lib/ashldi3.c index 37234c2df47f..8dffd36ec4f2 100644 --- a/arch/m68k/lib/ashldi3.c +++ b/arch/m68k/lib/ashldi3.c @@ -13,6 +13,9 @@ but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. */ +#include <linux/compiler.h> +#include <linux/export.h> + #define BITS_PER_UNIT 8 typedef int SItype __attribute__ ((mode (SI))); @@ -55,3 +58,4 @@ __ashldi3 (DItype u, word_type b) return w.ll; } +EXPORT_SYMBOL(__ashldi3); diff --git a/arch/m68k/lib/ashrdi3.c b/arch/m68k/lib/ashrdi3.c index 1d59345f36c6..e6565a3ee2c3 100644 --- a/arch/m68k/lib/ashrdi3.c +++ b/arch/m68k/lib/ashrdi3.c @@ -13,6 +13,9 @@ but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. */ +#include <linux/compiler.h> +#include <linux/export.h> + #define BITS_PER_UNIT 8 typedef int SItype __attribute__ ((mode (SI))); @@ -56,3 +59,4 @@ __ashrdi3 (DItype u, word_type b) return w.ll; } +EXPORT_SYMBOL(__ashrdi3); diff --git a/arch/m68k/lib/divsi3.S b/arch/m68k/lib/divsi3.S index 2c0ec85ac661..3a2143f51631 100644 --- a/arch/m68k/lib/divsi3.S +++ b/arch/m68k/lib/divsi3.S @@ -33,6 +33,8 @@ General Public License for more details. */ D. V. Henkel-Wallace (gumby@cygnus.com) Fete Bastille, 1992 */ +#include <asm/export.h> + /* These are predefined by new versions of GNU cpp. */ #ifndef __USER_LABEL_PREFIX__ @@ -118,3 +120,4 @@ L2: movel d1, sp@- L3: movel sp@+, d2 rts + EXPORT_SYMBOL(__divsi3) diff --git a/arch/m68k/lib/lshrdi3.c b/arch/m68k/lib/lshrdi3.c index 49e1ec8f2cc2..039779737c7d 100644 --- a/arch/m68k/lib/lshrdi3.c +++ b/arch/m68k/lib/lshrdi3.c @@ -13,6 +13,9 @@ but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. */ +#include <linux/compiler.h> +#include <linux/export.h> + #define BITS_PER_UNIT 8 typedef int SItype __attribute__ ((mode (SI))); @@ -55,3 +58,4 @@ __lshrdi3 (DItype u, word_type b) return w.ll; } +EXPORT_SYMBOL(__lshrdi3); diff --git a/arch/m68k/lib/modsi3.S b/arch/m68k/lib/modsi3.S index 1d9e0efdf31d..1c967649a4e0 100644 --- a/arch/m68k/lib/modsi3.S +++ b/arch/m68k/lib/modsi3.S @@ -33,6 +33,8 @@ General Public License for more details. */ D. V. Henkel-Wallace (gumby@cygnus.com) Fete Bastille, 1992 */ +#include <asm/export.h> + /* These are predefined by new versions of GNU cpp. */ #ifndef __USER_LABEL_PREFIX__ @@ -106,3 +108,4 @@ SYM (__modsi3): movel d1, d0 rts + EXPORT_SYMBOL(__modsi3) diff --git a/arch/m68k/lib/muldi3.c b/arch/m68k/lib/muldi3.c index 9006d15b8721..6459af5b2af0 100644 --- a/arch/m68k/lib/muldi3.c +++ b/arch/m68k/lib/muldi3.c @@ -14,6 +14,9 @@ but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. */ +#include <linux/compiler.h> +#include <linux/export.h> + #ifdef CONFIG_CPU_HAS_NO_MULDIV64 #define SI_TYPE_SIZE 32 @@ -90,3 +93,4 @@ __muldi3 (DItype u, DItype v) return w.ll; } +EXPORT_SYMBOL(__muldi3); diff --git a/arch/m68k/lib/mulsi3.S b/arch/m68k/lib/mulsi3.S index c39ad4e738e9..855675e69a8a 100644 --- a/arch/m68k/lib/mulsi3.S +++ b/arch/m68k/lib/mulsi3.S @@ -32,7 +32,7 @@ General Public License for more details. */ Some of this code comes from MINIX, via the folks at ericsson. D. V. Henkel-Wallace (gumby@cygnus.com) Fete Bastille, 1992 */ - +#include <asm/export.h> /* These are predefined by new versions of GNU cpp. */ #ifndef __USER_LABEL_PREFIX__ @@ -102,4 +102,4 @@ SYM (__mulsi3): addl d1, d0 rts - + EXPORT_SYMBOL(__mulsi3) diff --git a/arch/m68k/lib/udivsi3.S b/arch/m68k/lib/udivsi3.S index 35a5446572a5..78440ae513bf 100644 --- a/arch/m68k/lib/udivsi3.S +++ b/arch/m68k/lib/udivsi3.S @@ -32,7 +32,7 @@ General Public License for more details. */ Some of this code comes from MINIX, via the folks at ericsson. D. V. Henkel-Wallace (gumby@cygnus.com) Fete Bastille, 1992 */ - +#include <asm/export.h> /* These are predefined by new versions of GNU cpp. */ #ifndef __USER_LABEL_PREFIX__ @@ -154,4 +154,4 @@ L2: subql IMM (1),d4 unlk a6 | and return rts #endif /* __mcf5200__ || __mcoldfire__ */ - + EXPORT_SYMBOL(__udivsi3) diff --git a/arch/m68k/lib/umodsi3.S b/arch/m68k/lib/umodsi3.S index 099da514a8fd..b6fd11f58948 100644 --- a/arch/m68k/lib/umodsi3.S +++ b/arch/m68k/lib/umodsi3.S @@ -32,7 +32,7 @@ General Public License for more details. */ Some of this code comes from MINIX, via the folks at ericsson. D. V. Henkel-Wallace (gumby@cygnus.com) Fete Bastille, 1992 */ - +#include <asm/export.h> /* These are predefined by new versions of GNU cpp. */ #ifndef __USER_LABEL_PREFIX__ @@ -105,4 +105,4 @@ SYM (__umodsi3): subl d0, d1 /* d1 = a - (a/b)*b */ movel d1, d0 rts - + EXPORT_SYMBOL(__umodsi3) diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild index ab9f4e0ed4cf..5c4fbc80dc6c 100644 --- a/arch/powerpc/include/asm/Kbuild +++ b/arch/powerpc/include/asm/Kbuild @@ -1,5 +1,6 @@ generic-y += clkdev.h generic-y += div64.h +generic-y += export.h generic-y += irq_regs.h generic-y += irq_work.h generic-y += local64.h diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index aded29ad2e8f..6913f6725ce1 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -90,10 +90,6 @@ obj-$(CONFIG_RELOCATABLE) += reloc_$(BITS).o obj-$(CONFIG_PPC32) += entry_32.o setup_32.o obj-$(CONFIG_PPC64) += dma-iommu.o iommu.o obj-$(CONFIG_KGDB) += kgdb.o -obj-$(CONFIG_MODULES) += ppc_ksyms.o -ifeq ($(CONFIG_PPC32),y) -obj-$(CONFIG_MODULES) += ppc_ksyms_32.o -endif obj-$(CONFIG_BOOTX_TEXT) += btext.o obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_KPROBES) += kprobes.o diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index 83428a283fa0..3841d749a430 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -33,6 +33,7 @@ #include <asm/unistd.h> #include <asm/ftrace.h> #include <asm/ptrace.h> +#include <asm/export.h> /* * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE. @@ -1358,6 +1359,7 @@ _GLOBAL(_mcount) MCOUNT_RESTORE_FRAME bctr #endif +EXPORT_SYMBOL(_mcount) _GLOBAL(ftrace_stub) blr diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 51df82b61084..6432d4bf08c8 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -38,6 +38,7 @@ #include <asm/context_tracking.h> #include <asm/tm.h> #include <asm/ppc-opcode.h> +#include <asm/export.h> /* * System calls. @@ -1177,6 +1178,7 @@ _GLOBAL(enter_prom) #ifdef CONFIG_DYNAMIC_FTRACE _GLOBAL(mcount) _GLOBAL(_mcount) +EXPORT_SYMBOL(_mcount) mflr r12 mtctr r12 mtlr r0 @@ -1413,6 +1415,7 @@ livepatch_handler: #else _GLOBAL_TOC(_mcount) +EXPORT_SYMBOL(_mcount) /* Taken from output of objdump from lib64/glibc */ mflr r3 ld r11, 0(r1) diff --git a/arch/powerpc/kernel/epapr_hcalls.S b/arch/powerpc/kernel/epapr_hcalls.S index 9f1ebf7338f1..52ca2471ee1a 100644 --- a/arch/powerpc/kernel/epapr_hcalls.S +++ b/arch/powerpc/kernel/epapr_hcalls.S @@ -16,6 +16,7 @@ #include <asm/ppc_asm.h> #include <asm/asm-compat.h> #include <asm/asm-offsets.h> +#include <asm/export.h> #ifndef CONFIG_PPC64 /* epapr_ev_idle() was derived from e500_idle() */ @@ -53,3 +54,4 @@ epapr_hypercall_start: nop nop blr +EXPORT_SYMBOL(epapr_hypercall_start) diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S index 08d14b096eb9..6c509f39bbde 100644 --- a/arch/powerpc/kernel/fpu.S +++ b/arch/powerpc/kernel/fpu.S @@ -24,6 +24,7 @@ #include <asm/ppc_asm.h> #include <asm/asm-offsets.h> #include <asm/ptrace.h> +#include <asm/export.h> #ifdef CONFIG_VSX #define __REST_32FPVSRS(n,c,base) \ @@ -59,6 +60,7 @@ _GLOBAL(load_fp_state) MTFSF_L(fr0) REST_32FPVSRS(0, R4, R3) blr +EXPORT_SYMBOL(load_fp_state) /* * Store FP state into memory, including FPSCR @@ -69,6 +71,7 @@ _GLOBAL(store_fp_state) mffs fr0 stfd fr0,FPSTATE_FPSCR(r3) blr +EXPORT_SYMBOL(store_fp_state) /* * This task wants to use the FPU now. diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S index a3f821eb7e9a..9d963547d243 100644 --- a/arch/powerpc/kernel/head_32.S +++ b/arch/powerpc/kernel/head_32.S @@ -34,6 +34,7 @@ #include <asm/ptrace.h> #include <asm/bug.h> #include <asm/kvm_book3s_asm.h> +#include <asm/export.h> /* 601 only have IBAT; cr0.eq is set on 601 when using this macro */ #define LOAD_BAT(n, reg, RA, RB) \ @@ -738,6 +739,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_NEED_DTLB_SW_LRU) .globl mol_trampoline .set mol_trampoline, i0x2f00 + EXPORT_SYMBOL(mol_trampoline) . = 0x3000 @@ -1045,6 +1047,7 @@ _ENTRY(switch_mmu_context) 4: trap EMIT_BUG_ENTRY 4b,__FILE__,__LINE__,0 blr +EXPORT_SYMBOL(switch_mmu_context) /* * An undocumented "feature" of 604e requires that the v bit @@ -1272,6 +1275,7 @@ sdata: .globl empty_zero_page empty_zero_page: .space 4096 +EXPORT_SYMBOL(empty_zero_page) .globl swapper_pg_dir swapper_pg_dir: @@ -1285,6 +1289,7 @@ intercept_table: .long 0, 0, 0, 0, 0, 0, 0, 0 .long 0, 0, 0, 0, 0, 0, 0, 0 .long 0, 0, 0, 0, 0, 0, 0, 0 +EXPORT_SYMBOL(intercept_table) /* Room for two PTE pointers, usually the kernel and current user pointers * to their respective root page table. diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S index 7d7d8635227a..41374a468d1c 100644 --- a/arch/powerpc/kernel/head_40x.S +++ b/arch/powerpc/kernel/head_40x.S @@ -41,6 +41,7 @@ #include <asm/ppc_asm.h> #include <asm/asm-offsets.h> #include <asm/ptrace.h> +#include <asm/export.h> /* As with the other PowerPC ports, it is expected that when code * execution begins here, the following registers contain valid, yet @@ -971,6 +972,7 @@ sdata: .globl empty_zero_page empty_zero_page: .space 4096 +EXPORT_SYMBOL(empty_zero_page) .globl swapper_pg_dir swapper_pg_dir: .space PGD_TABLE_SIZE diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S index 9cdf5c71e426..37e4a7cf0065 100644 --- a/arch/powerpc/kernel/head_44x.S +++ b/arch/powerpc/kernel/head_44x.S @@ -39,6 +39,7 @@ #include <asm/asm-offsets.h> #include <asm/ptrace.h> #include <asm/synch.h> +#include <asm/export.h> #include "head_booke.h" @@ -1254,6 +1255,7 @@ sdata: .globl empty_zero_page empty_zero_page: .space PAGE_SIZE +EXPORT_SYMBOL(empty_zero_page) /* * To support >32-bit physical addresses, we use an 8KB pgdir. diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 79da0641bae2..04c546e20cc0 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -43,6 +43,7 @@ #include <asm/hw_irq.h> #include <asm/cputhreads.h> #include <asm/ppc-opcode.h> +#include <asm/export.h> /* The physical memory is laid out such that the secondary processor * spin code sits at 0x0000...0x00ff. On server, the vectors follow @@ -1002,3 +1003,4 @@ swapper_pg_dir: .globl empty_zero_page empty_zero_page: .space PAGE_SIZE +EXPORT_SYMBOL(empty_zero_page) diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S index 033a6b735487..fb133a163263 100644 --- a/arch/powerpc/kernel/head_8xx.S +++ b/arch/powerpc/kernel/head_8xx.S @@ -31,6 +31,7 @@ #include <asm/asm-offsets.h> #include <asm/ptrace.h> #include <asm/fixmap.h> +#include <asm/export.h> /* Macro to make the code more readable. */ #ifdef CONFIG_8xx_CPU6 @@ -884,6 +885,7 @@ sdata: .align PAGE_SHIFT empty_zero_page: .space PAGE_SIZE +EXPORT_SYMBOL(empty_zero_page) .globl swapper_pg_dir swapper_pg_dir: diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S index 3bfa3150911f..bf4c6021515f 100644 --- a/arch/powerpc/kernel/head_fsl_booke.S +++ b/arch/powerpc/kernel/head_fsl_booke.S @@ -42,6 +42,7 @@ #include <asm/asm-offsets.h> #include <asm/cache.h> #include <asm/ptrace.h> +#include <asm/export.h> #include "head_booke.h" /* As with the other PowerPC ports, it is expected that when code @@ -1223,6 +1224,7 @@ sdata: .globl empty_zero_page empty_zero_page: .space 4096 +EXPORT_SYMBOL(empty_zero_page) .globl swapper_pg_dir swapper_pg_dir: .space PGD_TABLE_SIZE diff --git a/arch/powerpc/kernel/misc.S b/arch/powerpc/kernel/misc.S index 0d432194c018..384357cb8bc0 100644 --- a/arch/powerpc/kernel/misc.S +++ b/arch/powerpc/kernel/misc.S @@ -18,6 +18,7 @@ #include <asm/unistd.h> #include <asm/asm-compat.h> #include <asm/asm-offsets.h> +#include <asm/export.h> .text @@ -118,3 +119,4 @@ _GLOBAL(longjmp) _GLOBAL(current_stack_pointer) PPC_LL r3,0(r1) blr +EXPORT_SYMBOL(current_stack_pointer) diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S index 03756ffdcd71..93cf7a5846a6 100644 --- a/arch/powerpc/kernel/misc_32.S +++ b/arch/powerpc/kernel/misc_32.S @@ -33,6 +33,7 @@ #include <asm/kexec.h> #include <asm/bug.h> #include <asm/ptrace.h> +#include <asm/export.h> .text @@ -319,6 +320,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_UNIFIED_ID_CACHE) #endif /* CONFIG_4xx */ isync blr +EXPORT_SYMBOL(flush_instruction_cache) #endif /* CONFIG_PPC_8xx */ /* @@ -359,6 +361,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) isync blr _ASM_NOKPROBE_SYMBOL(flush_icache_range) +EXPORT_SYMBOL(flush_icache_range) /* * Flush a particular page from the data cache to RAM. @@ -497,6 +500,7 @@ _GLOBAL(copy_page) li r0,MAX_COPY_PREFETCH li r11,4 b 2b +EXPORT_SYMBOL(copy_page) /* * Extended precision shifts. @@ -524,6 +528,7 @@ _GLOBAL(__ashrdi3) sraw r3,r3,r5 # MSW = MSW >> count or r4,r4,r7 # LSW |= t2 blr +EXPORT_SYMBOL(__ashrdi3) _GLOBAL(__ashldi3) subfic r6,r5,32 @@ -535,6 +540,7 @@ _GLOBAL(__ashldi3) slw r4,r4,r5 # LSW = LSW << count or r3,r3,r7 # MSW |= t2 blr +EXPORT_SYMBOL(__ashldi3) _GLOBAL(__lshrdi3) subfic r6,r5,32 @@ -546,6 +552,7 @@ _GLOBAL(__lshrdi3) srw r3,r3,r5 # MSW = MSW >> count or r4,r4,r7 # LSW |= t2 blr +EXPORT_SYMBOL(__lshrdi3) /* * 64-bit comparison: __cmpdi2(s64 a, s64 b) @@ -561,6 +568,7 @@ _GLOBAL(__cmpdi2) bltlr li r3,2 blr +EXPORT_SYMBOL(__cmpdi2) /* * 64-bit comparison: __ucmpdi2(u64 a, u64 b) * Returns 0 if a < b, 1 if a == b, 2 if a > b. @@ -575,6 +583,7 @@ _GLOBAL(__ucmpdi2) bltlr li r3,2 blr +EXPORT_SYMBOL(__ucmpdi2) _GLOBAL(__bswapdi2) rotlwi r9,r4,8 @@ -586,6 +595,7 @@ _GLOBAL(__bswapdi2) mr r3,r9 mr r4,r10 blr +EXPORT_SYMBOL(__bswapdi2) #ifdef CONFIG_SMP _GLOBAL(start_secondary_resume) diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S index 9f0bed214bcb..4f178671f230 100644 --- a/arch/powerpc/kernel/misc_64.S +++ b/arch/powerpc/kernel/misc_64.S @@ -27,6 +27,7 @@ #include <asm/kexec.h> #include <asm/ptrace.h> #include <asm/mmu.h> +#include <asm/export.h> .text @@ -110,6 +111,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) isync blr _ASM_NOKPROBE_SYMBOL(flush_icache_range) +EXPORT_SYMBOL(flush_icache_range) /* * Like above, but only do the D-cache. @@ -140,6 +142,7 @@ _GLOBAL(flush_dcache_range) bdnz 0b sync blr +EXPORT_SYMBOL(flush_dcache_range) /* * Like above, but works on non-mapped physical addresses. @@ -243,6 +246,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) blr _GLOBAL(__bswapdi2) +EXPORT_SYMBOL(__bswapdi2) srdi r8,r3,32 rlwinm r7,r3,8,0xffffffff rlwimi r7,r3,24,0,7 diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c index 95d3769a2e26..74bec5498972 100644 --- a/arch/powerpc/kernel/pci-common.c +++ b/arch/powerpc/kernel/pci-common.c @@ -56,6 +56,7 @@ static DECLARE_BITMAP(phb_bitmap, MAX_PHBS); /* ISA Memory physical address */ resource_size_t isa_mem_base; +EXPORT_SYMBOL(isa_mem_base); static struct dma_map_ops *pci_dma_ops = &dma_direct_ops; diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c index 1f7930037cb7..678f87a63645 100644 --- a/arch/powerpc/kernel/pci_32.c +++ b/arch/powerpc/kernel/pci_32.c @@ -32,6 +32,8 @@ unsigned long isa_io_base = 0; unsigned long pci_dram_offset = 0; int pcibios_assign_bus_offset = 1; +EXPORT_SYMBOL(isa_io_base); +EXPORT_SYMBOL(pci_dram_offset); void pcibios_make_OF_bus_map(void); diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c deleted file mode 100644 index 9f01e28ecef3..000000000000 --- a/arch/powerpc/kernel/ppc_ksyms.c +++ /dev/null @@ -1,37 +0,0 @@ -#include <linux/ftrace.h> -#include <linux/mm.h> - -#include <asm/processor.h> -#include <asm/switch_to.h> -#include <asm/cacheflush.h> -#include <asm/epapr_hcalls.h> - -#ifdef CONFIG_PPC64 -EXPORT_SYMBOL(flush_dcache_range); -#endif -EXPORT_SYMBOL(flush_icache_range); - -EXPORT_SYMBOL(empty_zero_page); - -long long __bswapdi2(long long); -EXPORT_SYMBOL(__bswapdi2); - -#ifdef CONFIG_FUNCTION_TRACER -EXPORT_SYMBOL(_mcount); -#endif - -#ifdef CONFIG_PPC_FPU -EXPORT_SYMBOL(load_fp_state); -EXPORT_SYMBOL(store_fp_state); -#endif - -#ifdef CONFIG_ALTIVEC -EXPORT_SYMBOL(load_vr_state); -EXPORT_SYMBOL(store_vr_state); -#endif - -#ifdef CONFIG_EPAPR_PARAVIRT -EXPORT_SYMBOL(epapr_hypercall_start); -#endif - -EXPORT_SYMBOL(current_stack_pointer); diff --git a/arch/powerpc/kernel/ppc_ksyms_32.c b/arch/powerpc/kernel/ppc_ksyms_32.c deleted file mode 100644 index 2bfaafe5be99..000000000000 --- a/arch/powerpc/kernel/ppc_ksyms_32.c +++ /dev/null @@ -1,60 +0,0 @@ -#include <linux/export.h> -#include <linux/smp.h> - -#include <asm/page.h> -#include <asm/dma.h> -#include <asm/io.h> -#include <asm/hw_irq.h> -#include <asm/time.h> -#include <asm/mmu_context.h> -#include <asm/pgtable.h> -#include <asm/dcr.h> - -EXPORT_SYMBOL(ISA_DMA_THRESHOLD); -EXPORT_SYMBOL(DMA_MODE_READ); -EXPORT_SYMBOL(DMA_MODE_WRITE); - -#if defined(CONFIG_PCI) -EXPORT_SYMBOL(isa_io_base); -EXPORT_SYMBOL(isa_mem_base); -EXPORT_SYMBOL(pci_dram_offset); -#endif - -#ifdef CONFIG_SMP -EXPORT_SYMBOL(smp_hw_index); -#endif - -long long __ashrdi3(long long, int); -long long __ashldi3(long long, int); -long long __lshrdi3(long long, int); -int __ucmpdi2(unsigned long long, unsigned long long); -int __cmpdi2(long long, long long); -EXPORT_SYMBOL(__ashrdi3); -EXPORT_SYMBOL(__ashldi3); -EXPORT_SYMBOL(__lshrdi3); -EXPORT_SYMBOL(__ucmpdi2); -EXPORT_SYMBOL(__cmpdi2); - -EXPORT_SYMBOL(timer_interrupt); -EXPORT_SYMBOL(tb_ticks_per_jiffy); - -EXPORT_SYMBOL(switch_mmu_context); - -#ifdef CONFIG_PPC_STD_MMU_32 -extern long mol_trampoline; -EXPORT_SYMBOL(mol_trampoline); /* For MOL */ -EXPORT_SYMBOL(flush_hash_pages); /* For MOL */ -#ifdef CONFIG_SMP -extern int mmu_hash_lock; -EXPORT_SYMBOL(mmu_hash_lock); /* For MOL */ -#endif /* CONFIG_SMP */ -extern long *intercept_table; -EXPORT_SYMBOL(intercept_table); -#endif /* CONFIG_PPC_STD_MMU_32 */ - -#ifdef CONFIG_PPC_DCR_NATIVE -EXPORT_SYMBOL(__mtdcr); -EXPORT_SYMBOL(__mfdcr); -#endif - -EXPORT_SYMBOL(flush_instruction_cache); diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index 24ec3ea4b3a2..5fe79182f0fa 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c @@ -16,6 +16,7 @@ #include <linux/cpu.h> #include <linux/console.h> #include <linux/memblock.h> +#include <linux/export.h> #include <asm/io.h> #include <asm/prom.h> @@ -47,11 +48,16 @@ int boot_cpuid_phys; EXPORT_SYMBOL_GPL(boot_cpuid_phys); int smp_hw_index[NR_CPUS]; +EXPORT_SYMBOL(smp_hw_index); unsigned long ISA_DMA_THRESHOLD; unsigned int DMA_MODE_READ; unsigned int DMA_MODE_WRITE; +EXPORT_SYMBOL(ISA_DMA_THRESHOLD); +EXPORT_SYMBOL(DMA_MODE_READ); +EXPORT_SYMBOL(DMA_MODE_WRITE); + /* * These are used in binfmt_elf.c to put aux entries on the stack * for each elf executable being started. diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 67859b7d1c97..bc3f7d0d7b79 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -596,6 +596,7 @@ void timer_interrupt(struct pt_regs * regs) irq_exit(); set_irq_regs(old_regs); } +EXPORT_SYMBOL(timer_interrupt); /* * Hypervisor decrementer interrupts shouldn't occur but are sometimes diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S index bc85bdff4e01..0c123f3406cd 100644 --- a/arch/powerpc/kernel/vector.S +++ b/arch/powerpc/kernel/vector.S @@ -6,6 +6,7 @@ #include <asm/thread_info.h> #include <asm/page.h> #include <asm/ptrace.h> +#include <asm/export.h> /* * Load state from memory into VMX registers including VSCR. @@ -17,6 +18,7 @@ _GLOBAL(load_vr_state) mtvscr v0 REST_32VRS(0,r4,r3) blr +EXPORT_SYMBOL(load_vr_state) /* * Store VMX state into memory, including VSCR. @@ -28,6 +30,7 @@ _GLOBAL(store_vr_state) li r4, VRSTATE_VSCR stvx v0, r4, r3 blr +EXPORT_SYMBOL(store_vr_state) /* * Disable VMX for the task which had it previously, diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile index ad5290005ca4..309361e86523 100644 --- a/arch/powerpc/lib/Makefile +++ b/arch/powerpc/lib/Makefile @@ -9,7 +9,7 @@ ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC) CFLAGS_REMOVE_code-patching.o = $(CC_FLAGS_FTRACE) CFLAGS_REMOVE_feature-fixups.o = $(CC_FLAGS_FTRACE) -obj-y += string.o alloc.o crtsavres.o ppc_ksyms.o code-patching.o \ +obj-y += string.o alloc.o crtsavres.o code-patching.o \ feature-fixups.o obj-$(CONFIG_PPC32) += div64.o copy_32.o diff --git a/arch/powerpc/lib/checksum_32.S b/arch/powerpc/lib/checksum_32.S index aa8214f30c92..ea29a5d67743 100644 --- a/arch/powerpc/lib/checksum_32.S +++ b/arch/powerpc/lib/checksum_32.S @@ -17,6 +17,7 @@ #include <asm/cache.h> #include <asm/errno.h> #include <asm/ppc_asm.h> +#include <asm/export.h> .text @@ -68,6 +69,7 @@ _GLOBAL(__csum_partial) adde r5,r5,r0 5: addze r3,r5 /* add in final carry */ blr +EXPORT_SYMBOL(__csum_partial) /* * Computes the checksum of a memory block at src, length len, @@ -297,3 +299,4 @@ dst_error: .long 41b,dst_error .long 50b,src_error .long 51b,dst_error +EXPORT_SYMBOL(csum_partial_copy_generic) diff --git a/arch/powerpc/lib/checksum_64.S b/arch/powerpc/lib/checksum_64.S index fdec6e613e95..fd9176671f9f 100644 --- a/arch/powerpc/lib/checksum_64.S +++ b/arch/powerpc/lib/checksum_64.S @@ -16,6 +16,7 @@ #include <asm/processor.h> #include <asm/errno.h> #include <asm/ppc_asm.h> +#include <asm/export.h> /* * Computes the checksum of a memory block at buff, length len, @@ -176,6 +177,7 @@ _GLOBAL(__csum_partial) add r3,r4,r0 srdi r3,r3,32 blr +EXPORT_SYMBOL(__csum_partial) .macro srcnr @@ -430,3 +432,4 @@ dstnr; stb r6,0(r4) li r6,-EFAULT stw r6,0(r8) blr +EXPORT_SYMBOL(csum_partial_copy_generic) diff --git a/arch/powerpc/lib/copy_32.S b/arch/powerpc/lib/copy_32.S index 99f37f24185c..40cce33b08d6 100644 --- a/arch/powerpc/lib/copy_32.S +++ b/arch/powerpc/lib/copy_32.S @@ -12,6 +12,7 @@ #include <asm/cache.h> #include <asm/errno.h> #include <asm/ppc_asm.h> +#include <asm/export.h> #define COPY_16_BYTES \ lwz r7,4(r4); \ @@ -92,6 +93,7 @@ _GLOBAL(memset) subf r6,r0,r6 cmplwi 0,r4,0 bne 2f /* Use normal procedure if r4 is not zero */ +EXPORT_SYMBOL(memset) _GLOBAL(memset_nocache_branch) b 2f /* Skip optimised bloc until cache is enabled */ @@ -216,6 +218,8 @@ _GLOBAL(memcpy) stbu r0,1(r6) bdnz 40b 65: blr +EXPORT_SYMBOL(memcpy) +EXPORT_SYMBOL(memmove) generic_memcpy: srwi. r7,r5,3 @@ -507,3 +511,4 @@ _GLOBAL(__copy_tofrom_user) .long 112b,120b .long 114b,120b .text +EXPORT_SYMBOL(__copy_tofrom_user) diff --git a/arch/powerpc/lib/copypage_64.S b/arch/powerpc/lib/copypage_64.S index a3c4dc4defdd..21367b3a8146 100644 --- a/arch/powerpc/lib/copypage_64.S +++ b/arch/powerpc/lib/copypage_64.S @@ -10,6 +10,7 @@ #include <asm/processor.h> #include <asm/ppc_asm.h> #include <asm/asm-offsets.h> +#include <asm/export.h> .section ".toc","aw" PPC64_CACHES: @@ -110,3 +111,4 @@ END_FTR_SECTION_IFSET(CPU_FTR_CP_USE_DCBTZ) std r11,120(r3) std r12,128(r3) blr +EXPORT_SYMBOL(copy_page) diff --git a/arch/powerpc/lib/copyuser_64.S b/arch/powerpc/lib/copyuser_64.S index 7b22624f332c..60386b2c99bb 100644 --- a/arch/powerpc/lib/copyuser_64.S +++ b/arch/powerpc/lib/copyuser_64.S @@ -8,6 +8,7 @@ */ #include <asm/processor.h> #include <asm/ppc_asm.h> +#include <asm/export.h> #ifdef __BIG_ENDIAN__ #define sLd sld /* Shift towards low-numbered address. */ @@ -671,3 +672,4 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) .llong 89b,100b .llong 90b,100b .llong 91b,100b +EXPORT_SYMBOL(__copy_tofrom_user) diff --git a/arch/powerpc/lib/hweight_64.S b/arch/powerpc/lib/hweight_64.S index 19e66001a4f9..3de7ac154f24 100644 --- a/arch/powerpc/lib/hweight_64.S +++ b/arch/powerpc/lib/hweight_64.S @@ -19,6 +19,7 @@ */ #include <asm/processor.h> #include <asm/ppc_asm.h> +#include <asm/export.h> /* Note: This code relies on -mminimal-toc */ @@ -32,6 +33,7 @@ FTR_SECTION_ELSE clrldi r3,r3,64-8 blr ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POPCNTB) +EXPORT_SYMBOL(__arch_hweight8) _GLOBAL(__arch_hweight16) BEGIN_FTR_SECTION @@ -54,6 +56,7 @@ FTR_SECTION_ELSE blr ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_POPCNTD, 50) ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POPCNTB) +EXPORT_SYMBOL(__arch_hweight16) _GLOBAL(__arch_hweight32) BEGIN_FTR_SECTION @@ -79,6 +82,7 @@ FTR_SECTION_ELSE blr ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_POPCNTD, 51) ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POPCNTB) +EXPORT_SYMBOL(__arch_hweight32) _GLOBAL(__arch_hweight64) BEGIN_FTR_SECTION @@ -108,3 +112,4 @@ FTR_SECTION_ELSE blr ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_POPCNTD, 52) ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POPCNTB) +EXPORT_SYMBOL(__arch_hweight64) diff --git a/arch/powerpc/lib/mem_64.S b/arch/powerpc/lib/mem_64.S index eda7a96161ab..85fa9869aec5 100644 --- a/arch/powerpc/lib/mem_64.S +++ b/arch/powerpc/lib/mem_64.S @@ -11,6 +11,7 @@ #include <asm/processor.h> #include <asm/errno.h> #include <asm/ppc_asm.h> +#include <asm/export.h> _GLOBAL(memset) neg r0,r3 @@ -77,6 +78,7 @@ _GLOBAL(memset) 10: bflr 31 stb r4,0(r6) blr +EXPORT_SYMBOL(memset) _GLOBAL_TOC(memmove) cmplw 0,r3,r4 @@ -119,3 +121,4 @@ _GLOBAL(backwards_memcpy) beq 2b mtctr r7 b 1b +EXPORT_SYMBOL(memmove) diff --git a/arch/powerpc/lib/memcmp_64.S b/arch/powerpc/lib/memcmp_64.S index 8953d2382a65..d75d18b7bd55 100644 --- a/arch/powerpc/lib/memcmp_64.S +++ b/arch/powerpc/lib/memcmp_64.S @@ -8,6 +8,7 @@ * 2 of the License, or (at your option) any later version. */ #include <asm/ppc_asm.h> +#include <asm/export.h> #define off8 r6 #define off16 r7 @@ -231,3 +232,4 @@ _GLOBAL(memcmp) ld r28,-32(r1) ld r27,-40(r1) blr +EXPORT_SYMBOL(memcmp) diff --git a/arch/powerpc/lib/memcpy_64.S b/arch/powerpc/lib/memcpy_64.S index 32a06ec395d2..f4d6088e2d53 100644 --- a/arch/powerpc/lib/memcpy_64.S +++ b/arch/powerpc/lib/memcpy_64.S @@ -8,6 +8,7 @@ */ #include <asm/processor.h> #include <asm/ppc_asm.h> +#include <asm/export.h> .align 7 _GLOBAL_TOC(memcpy) @@ -219,3 +220,4 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) 4: ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1) /* return dest pointer */ blr #endif +EXPORT_SYMBOL(memcpy) diff --git a/arch/powerpc/lib/ppc_ksyms.c b/arch/powerpc/lib/ppc_ksyms.c deleted file mode 100644 index ae69d846a841..000000000000 --- a/arch/powerpc/lib/ppc_ksyms.c +++ /dev/null @@ -1,29 +0,0 @@ -#include <linux/string.h> -#include <linux/uaccess.h> -#include <linux/bitops.h> -#include <net/checksum.h> - -EXPORT_SYMBOL(memcpy); -EXPORT_SYMBOL(memset); -EXPORT_SYMBOL(memmove); -EXPORT_SYMBOL(memcmp); -EXPORT_SYMBOL(memchr); - -EXPORT_SYMBOL(strncpy); -EXPORT_SYMBOL(strncmp); - -#ifndef CONFIG_GENERIC_CSUM -EXPORT_SYMBOL(__csum_partial); -EXPORT_SYMBOL(csum_partial_copy_generic); -#endif - -EXPORT_SYMBOL(__copy_tofrom_user); -EXPORT_SYMBOL(__clear_user); -EXPORT_SYMBOL(copy_page); - -#ifdef CONFIG_PPC64 -EXPORT_SYMBOL(__arch_hweight8); -EXPORT_SYMBOL(__arch_hweight16); -EXPORT_SYMBOL(__arch_hweight32); -EXPORT_SYMBOL(__arch_hweight64); -#endif diff --git a/arch/powerpc/lib/string.S b/arch/powerpc/lib/string.S index beabc68d9a1e..d13e07603519 100644 --- a/arch/powerpc/lib/string.S +++ b/arch/powerpc/lib/string.S @@ -11,6 +11,7 @@ #include <asm/processor.h> #include <asm/errno.h> #include <asm/ppc_asm.h> +#include <asm/export.h> .section __ex_table,"a" PPC_LONG_ALIGN @@ -36,6 +37,7 @@ _GLOBAL(strncpy) 2: stbu r0,1(r6) /* clear it out if so */ bdnz 2b blr +EXPORT_SYMBOL(strncpy) _GLOBAL(strncmp) PPC_LCMPI 0,r5,0 @@ -53,6 +55,7 @@ _GLOBAL(strncmp) blr 2: li r3,0 blr +EXPORT_SYMBOL(strncmp) #ifdef CONFIG_PPC32 _GLOBAL(memcmp) @@ -68,6 +71,7 @@ _GLOBAL(memcmp) blr 2: li r3,0 blr +EXPORT_SYMBOL(memcmp) #endif _GLOBAL(memchr) @@ -82,6 +86,7 @@ _GLOBAL(memchr) beqlr 2: li r3,0 blr +EXPORT_SYMBOL(memchr) #ifdef CONFIG_PPC32 _GLOBAL(__clear_user) @@ -125,4 +130,5 @@ _GLOBAL(__clear_user) PPC_LONG 1b,91b PPC_LONG 8b,92b .text +EXPORT_SYMBOL(__clear_user) #endif diff --git a/arch/powerpc/lib/string_64.S b/arch/powerpc/lib/string_64.S index 7bd9549a90a2..57ace356c949 100644 --- a/arch/powerpc/lib/string_64.S +++ b/arch/powerpc/lib/string_64.S @@ -20,6 +20,7 @@ #include <asm/ppc_asm.h> #include <asm/asm-offsets.h> +#include <asm/export.h> .section ".toc","aw" PPC64_CACHES: @@ -200,3 +201,4 @@ err1; dcbz r0,r3 cmpdi r4,32 blt .Lshort_clear b .Lmedium_clear +EXPORT_SYMBOL(__clear_user) diff --git a/arch/powerpc/mm/hash_low_32.S b/arch/powerpc/mm/hash_low_32.S index 115347f74ce5..09cc50c8dace 100644 --- a/arch/powerpc/mm/hash_low_32.S +++ b/arch/powerpc/mm/hash_low_32.S @@ -26,6 +26,7 @@ #include <asm/ppc_asm.h> #include <asm/thread_info.h> #include <asm/asm-offsets.h> +#include <asm/export.h> #ifdef CONFIG_SMP .section .bss @@ -33,6 +34,7 @@ .globl mmu_hash_lock mmu_hash_lock: .space 4 +EXPORT_SYMBOL(mmu_hash_lock) #endif /* CONFIG_SMP */ /* @@ -575,6 +577,7 @@ _GLOBAL(flush_hash_pages) rlwinm r8,r8,0,31,29 /* clear HASHPTE bit */ stwcx. r8,0,r5 /* update the pte */ bne- 33b +EXPORT_SYMBOL(flush_hash_pages) /* Get the address of the primary PTE group in the hash table (r3) */ _GLOBAL(flush_hash_patch_A) diff --git a/arch/powerpc/relocs_check.sh b/arch/powerpc/relocs_check.sh index 2e4ebd0e25b3..ec2d5c835170 100755 --- a/arch/powerpc/relocs_check.sh +++ b/arch/powerpc/relocs_check.sh @@ -30,6 +30,7 @@ bad_relocs=$( # On PPC64: # R_PPC64_RELATIVE, R_PPC64_NONE # R_PPC64_ADDR64 mach_<name> + # R_PPC64_ADDR64 __crc_<name> # On PPC: # R_PPC_RELATIVE, R_PPC_ADDR16_HI, # R_PPC_ADDR16_HA,R_PPC_ADDR16_LO, @@ -41,7 +42,8 @@ R_PPC_ADDR16_HI R_PPC_ADDR16_HA R_PPC_RELATIVE R_PPC_NONE' | - grep -E -v '\<R_PPC64_ADDR64[[:space:]]+mach_' + grep -E -v '\<R_PPC64_ADDR64[[:space:]]+mach_' | + grep -E -v '\<R_PPC64_ADDR64[[:space:]]+__crc_' ) if [ -z "$bad_relocs" ]; then diff --git a/arch/powerpc/sysdev/dcr-low.S b/arch/powerpc/sysdev/dcr-low.S index d3098ef1404a..e687bb2003ff 100644 --- a/arch/powerpc/sysdev/dcr-low.S +++ b/arch/powerpc/sysdev/dcr-low.S @@ -12,6 +12,7 @@ #include <asm/ppc_asm.h> #include <asm/processor.h> #include <asm/bug.h> +#include <asm/export.h> #define DCR_ACCESS_PROLOG(table) \ cmpli cr0,r3,1024; \ @@ -28,9 +29,11 @@ _GLOBAL(__mfdcr) DCR_ACCESS_PROLOG(__mfdcr_table) +EXPORT_SYMBOL(__mfdcr) _GLOBAL(__mtdcr) DCR_ACCESS_PROLOG(__mtdcr_table) +EXPORT_SYMBOL(__mtdcr) __mfdcr_table: mfdcr r3,0; blr diff --git a/arch/s390/include/asm/Kbuild b/arch/s390/include/asm/Kbuild index 9043d2e1e2ae..20f196b82a6e 100644 --- a/arch/s390/include/asm/Kbuild +++ b/arch/s390/include/asm/Kbuild @@ -1,6 +1,7 @@ generic-y += clkdev.h +generic-y += export.h generic-y += irq_work.h generic-y += mcs_spinlock.h generic-y += mm-arch-hooks.h diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index 72ccc41444dc..1f0fe98f6db9 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile @@ -61,7 +61,7 @@ obj-y += entry.o reipl.o relocate_kernel.o extra-y += head.o head64.o vmlinux.lds -obj-$(CONFIG_MODULES) += s390_ksyms.o module.o +obj-$(CONFIG_MODULES) += module.o obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_SCHED_TOPOLOGY) += topology.o obj-$(CONFIG_HIBERNATION) += suspend.o swsusp.o diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index c51650a1ed16..49a30737adde 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -23,6 +23,7 @@ #include <asm/vx-insn.h> #include <asm/setup.h> #include <asm/nmi.h> +#include <asm/export.h> __PT_R0 = __PT_GPRS __PT_R1 = __PT_GPRS + 8 @@ -259,6 +260,8 @@ sie_exit: EX_TABLE(.Lrewind_pad,.Lsie_fault) EX_TABLE(sie_exit,.Lsie_fault) +EXPORT_SYMBOL(sie64a) +EXPORT_SYMBOL(sie_exit) #endif /* @@ -825,6 +828,9 @@ ENTRY(save_fpu_regs) oi __LC_CPU_FLAGS+7,_CIF_FPU br %r14 .Lsave_fpu_regs_end: +#if IS_ENABLED(CONFIG_KVM) +EXPORT_SYMBOL(save_fpu_regs) +#endif /* * Load floating-point controls and floating-point or vector registers. diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S index e499370fbccb..9a17e4475d27 100644 --- a/arch/s390/kernel/mcount.S +++ b/arch/s390/kernel/mcount.S @@ -9,6 +9,7 @@ #include <asm/asm-offsets.h> #include <asm/ftrace.h> #include <asm/ptrace.h> +#include <asm/export.h> .section .kprobes.text, "ax" @@ -23,6 +24,8 @@ ENTRY(ftrace_stub) ENTRY(_mcount) br %r14 +EXPORT_SYMBOL(_mcount) + ENTRY(ftrace_caller) .globl ftrace_regs_caller .set ftrace_regs_caller,ftrace_caller diff --git a/arch/s390/kernel/s390_ksyms.c b/arch/s390/kernel/s390_ksyms.c deleted file mode 100644 index e67453b73c3c..000000000000 --- a/arch/s390/kernel/s390_ksyms.c +++ /dev/null @@ -1,15 +0,0 @@ -#include <linux/module.h> -#include <linux/kvm_host.h> -#include <asm/fpu/api.h> -#include <asm/ftrace.h> - -#ifdef CONFIG_FUNCTION_TRACER -EXPORT_SYMBOL(_mcount); -#endif -#if IS_ENABLED(CONFIG_KVM) -EXPORT_SYMBOL(sie64a); -EXPORT_SYMBOL(sie_exit); -EXPORT_SYMBOL(save_fpu_regs); -#endif -EXPORT_SYMBOL(memcpy); -EXPORT_SYMBOL(memset); diff --git a/arch/s390/lib/mem.S b/arch/s390/lib/mem.S index c6d553e85ab1..be9fa65bfac4 100644 --- a/arch/s390/lib/mem.S +++ b/arch/s390/lib/mem.S @@ -5,6 +5,7 @@ */ #include <linux/linkage.h> +#include <asm/export.h> /* * memset implementation @@ -60,6 +61,7 @@ ENTRY(memset) xc 0(1,%r1),0(%r1) .Lmemset_mvc: mvc 1(1,%r1),0(%r1) +EXPORT_SYMBOL(memset) /* * memcpy implementation @@ -86,3 +88,4 @@ ENTRY(memcpy) j .Lmemcpy_rest .Lmemcpy_mvc: mvc 0(1,%r1),0(%r3) +EXPORT_SYMBOL(memcpy) diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild index 6024c26c0585..cfc918067f80 100644 --- a/arch/sparc/include/asm/Kbuild +++ b/arch/sparc/include/asm/Kbuild @@ -6,6 +6,7 @@ generic-y += cputime.h generic-y += div64.h generic-y += emergency-restart.h generic-y += exec.h +generic-y += export.h generic-y += irq_regs.h generic-y += irq_work.h generic-y += linkage.h diff --git a/arch/sparc/include/asm/string.h b/arch/sparc/include/asm/string.h index 98b72a0c8e6e..86f34be14ce0 100644 --- a/arch/sparc/include/asm/string.h +++ b/arch/sparc/include/asm/string.h @@ -5,4 +5,38 @@ #else #include <asm/string_32.h> #endif + +/* First the mem*() things. */ +#define __HAVE_ARCH_MEMMOVE +void *memmove(void *, const void *, __kernel_size_t); + +#define __HAVE_ARCH_MEMCPY +#define memcpy(t, f, n) __builtin_memcpy(t, f, n) + +#define __HAVE_ARCH_MEMSET +#define memset(s, c, count) __builtin_memset(s, c, count) + +#define __HAVE_ARCH_MEMSCAN + +#define memscan(__arg0, __char, __arg2) \ +({ \ + void *__memscan_zero(void *, size_t); \ + void *__memscan_generic(void *, int, size_t); \ + void *__retval, *__addr = (__arg0); \ + size_t __size = (__arg2); \ + \ + if(__builtin_constant_p(__char) && !(__char)) \ + __retval = __memscan_zero(__addr, __size); \ + else \ + __retval = __memscan_generic(__addr, (__char), __size); \ + \ + __retval; \ +}) + +#define __HAVE_ARCH_MEMCMP +int memcmp(const void *,const void *,__kernel_size_t); + +#define __HAVE_ARCH_STRNCMP +int strncmp(const char *, const char *, __kernel_size_t); + #endif diff --git a/arch/sparc/include/asm/string_32.h b/arch/sparc/include/asm/string_32.h index 69974e924611..649412476a69 100644 --- a/arch/sparc/include/asm/string_32.h +++ b/arch/sparc/include/asm/string_32.h @@ -11,60 +11,4 @@ #include <asm/page.h> -/* Really, userland/ksyms should not see any of this stuff. */ - -#ifdef __KERNEL__ - -void __memmove(void *,const void *,__kernel_size_t); - -#ifndef EXPORT_SYMTAB_STROPS - -/* First the mem*() things. */ -#define __HAVE_ARCH_MEMMOVE -#undef memmove -#define memmove(_to, _from, _n) \ -({ \ - void *_t = (_to); \ - __memmove(_t, (_from), (_n)); \ - _t; \ -}) - -#define __HAVE_ARCH_MEMCPY -#define memcpy(t, f, n) __builtin_memcpy(t, f, n) - -#define __HAVE_ARCH_MEMSET -#define memset(s, c, count) __builtin_memset(s, c, count) - -#define __HAVE_ARCH_MEMSCAN - -#undef memscan -#define memscan(__arg0, __char, __arg2) \ -({ \ - void *__memscan_zero(void *, size_t); \ - void *__memscan_generic(void *, int, size_t); \ - void *__retval, *__addr = (__arg0); \ - size_t __size = (__arg2); \ - \ - if(__builtin_constant_p(__char) && !(__char)) \ - __retval = __memscan_zero(__addr, __size); \ - else \ - __retval = __memscan_generic(__addr, (__char), __size); \ - \ - __retval; \ -}) - -#define __HAVE_ARCH_MEMCMP -int memcmp(const void *,const void *,__kernel_size_t); - -/* Now the str*() stuff... */ -#define __HAVE_ARCH_STRLEN -__kernel_size_t strlen(const char *); - -#define __HAVE_ARCH_STRNCMP -int strncmp(const char *, const char *, __kernel_size_t); - -#endif /* !EXPORT_SYMTAB_STROPS */ - -#endif /* __KERNEL__ */ - #endif /* !(__SPARC_STRING_H__) */ diff --git a/arch/sparc/include/asm/string_64.h b/arch/sparc/include/asm/string_64.h index 5936b8ff3c05..6b9ccb308605 100644 --- a/arch/sparc/include/asm/string_64.h +++ b/arch/sparc/include/asm/string_64.h @@ -9,54 +9,10 @@ #ifndef __SPARC64_STRING_H__ #define __SPARC64_STRING_H__ -/* Really, userland/ksyms should not see any of this stuff. */ - -#ifdef __KERNEL__ - #include <asm/asi.h> -#ifndef EXPORT_SYMTAB_STROPS - -/* First the mem*() things. */ -#define __HAVE_ARCH_MEMMOVE -void *memmove(void *, const void *, __kernel_size_t); - -#define __HAVE_ARCH_MEMCPY -#define memcpy(t, f, n) __builtin_memcpy(t, f, n) - -#define __HAVE_ARCH_MEMSET -#define memset(s, c, count) __builtin_memset(s, c, count) - -#define __HAVE_ARCH_MEMSCAN - -#undef memscan -#define memscan(__arg0, __char, __arg2) \ -({ \ - void *__memscan_zero(void *, size_t); \ - void *__memscan_generic(void *, int, size_t); \ - void *__retval, *__addr = (__arg0); \ - size_t __size = (__arg2); \ - \ - if(__builtin_constant_p(__char) && !(__char)) \ - __retval = __memscan_zero(__addr, __size); \ - else \ - __retval = __memscan_generic(__addr, (__char), __size); \ - \ - __retval; \ -}) - -#define __HAVE_ARCH_MEMCMP -int memcmp(const void *,const void *,__kernel_size_t); - /* Now the str*() stuff... */ #define __HAVE_ARCH_STRLEN __kernel_size_t strlen(const char *); -#define __HAVE_ARCH_STRNCMP -int strncmp(const char *, const char *, __kernel_size_t); - -#endif /* !EXPORT_SYMTAB_STROPS */ - -#endif /* __KERNEL__ */ - #endif /* !(__SPARC64_STRING_H__) */ diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile index fdb13327fded..fa3c02d41138 100644 --- a/arch/sparc/kernel/Makefile +++ b/arch/sparc/kernel/Makefile @@ -86,7 +86,7 @@ obj-y += auxio_$(BITS).o obj-$(CONFIG_SUN_PM) += apc.o pmc.o obj-$(CONFIG_MODULES) += module.o -obj-$(CONFIG_MODULES) += sparc_ksyms_$(BITS).o +obj-$(CONFIG_MODULES) += sparc_ksyms.o obj-$(CONFIG_SPARC_LED) += led.o obj-$(CONFIG_KGDB) += kgdb_$(BITS).o diff --git a/arch/sparc/kernel/entry.S b/arch/sparc/kernel/entry.S index 07918ab3062e..d85bdb999819 100644 --- a/arch/sparc/kernel/entry.S +++ b/arch/sparc/kernel/entry.S @@ -29,6 +29,7 @@ #include <asm/unistd.h> #include <asm/asmmacro.h> +#include <asm/export.h> #define curptr g6 @@ -1207,6 +1208,8 @@ delay_continue: ret restore +EXPORT_SYMBOL(__udelay) +EXPORT_SYMBOL(__ndelay) /* Handle a software breakpoint */ /* We have to inform parent that child has stopped */ diff --git a/arch/sparc/kernel/head_32.S b/arch/sparc/kernel/head_32.S index 3d92c0a8f6c4..7bb317b87dde 100644 --- a/arch/sparc/kernel/head_32.S +++ b/arch/sparc/kernel/head_32.S @@ -24,6 +24,7 @@ #include <asm/thread_info.h> /* TI_UWINMASK */ #include <asm/errno.h> #include <asm/pgtsrmmu.h> /* SRMMU_PGDIR_SHIFT */ +#include <asm/export.h> .data /* The following are used with the prom_vector node-ops to figure out @@ -60,6 +61,7 @@ sun4e_notsup: */ .globl empty_zero_page empty_zero_page: .skip PAGE_SIZE +EXPORT_SYMBOL(empty_zero_page) .global root_flags .global ram_flags @@ -813,3 +815,4 @@ lvl14_save: __ret_efault: ret restore %g0, -EFAULT, %o0 +EXPORT_SYMBOL(__ret_efault) diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S index a076b4249e62..beba6c11554c 100644 --- a/arch/sparc/kernel/head_64.S +++ b/arch/sparc/kernel/head_64.S @@ -32,7 +32,8 @@ #include <asm/estate.h> #include <asm/sfafsr.h> #include <asm/unistd.h> - +#include <asm/export.h> + /* This section from from _start to sparc64_boot_end should fit into * 0x0000000000404000 to 0x0000000000408000. */ @@ -143,6 +144,7 @@ prom_cpu_compatible: .skip 64 prom_root_node: .word 0 +EXPORT_SYMBOL(prom_root_node) prom_mmu_ihandle_cache: .word 0 prom_boot_mapped_pc: @@ -158,6 +160,7 @@ is_sun4v: .word 0 sun4v_chip_type: .word SUN4V_CHIP_INVALID +EXPORT_SYMBOL(sun4v_chip_type) 1: rd %pc, %l0 @@ -920,6 +923,7 @@ swapper_4m_tsb: .globl prom_tba, tlb_type prom_tba: .xword 0 tlb_type: .word 0 /* Must NOT end up in BSS */ +EXPORT_SYMBOL(tlb_type) .section ".fixup",#alloc,#execinstr .globl __ret_efault, __retl_efault, __ret_one, __retl_one @@ -927,6 +931,7 @@ ENTRY(__ret_efault) ret restore %g0, -EFAULT, %o0 ENDPROC(__ret_efault) +EXPORT_SYMBOL(__ret_efault) ENTRY(__retl_efault) retl diff --git a/arch/sparc/kernel/helpers.S b/arch/sparc/kernel/helpers.S index 314dd0c9fc5b..e4e5b832fcb6 100644 --- a/arch/sparc/kernel/helpers.S +++ b/arch/sparc/kernel/helpers.S @@ -15,6 +15,7 @@ __flushw_user: 2: retl nop .size __flushw_user,.-__flushw_user +EXPORT_SYMBOL(__flushw_user) /* Flush %fp and %i7 to the stack for all register * windows active inside of the cpu. This allows @@ -61,3 +62,4 @@ real_hard_smp_processor_id: .size hard_smp_processor_id,.-hard_smp_processor_id #endif .size real_hard_smp_processor_id,.-real_hard_smp_processor_id +EXPORT_SYMBOL_GPL(real_hard_smp_processor_id) diff --git a/arch/sparc/kernel/hvcalls.S b/arch/sparc/kernel/hvcalls.S index d127130bf424..4116ee5c7791 100644 --- a/arch/sparc/kernel/hvcalls.S +++ b/arch/sparc/kernel/hvcalls.S @@ -343,6 +343,7 @@ ENTRY(sun4v_mach_set_watchdog) 0: retl nop ENDPROC(sun4v_mach_set_watchdog) +EXPORT_SYMBOL(sun4v_mach_set_watchdog) /* No inputs and does not return. */ ENTRY(sun4v_mach_sir) @@ -776,6 +777,7 @@ ENTRY(sun4v_niagara_getperf) retl nop ENDPROC(sun4v_niagara_getperf) +EXPORT_SYMBOL(sun4v_niagara_getperf) ENTRY(sun4v_niagara_setperf) mov HV_FAST_SET_PERFREG, %o5 @@ -783,6 +785,7 @@ ENTRY(sun4v_niagara_setperf) retl nop ENDPROC(sun4v_niagara_setperf) +EXPORT_SYMBOL(sun4v_niagara_setperf) ENTRY(sun4v_niagara2_getperf) mov %o0, %o4 @@ -792,6 +795,7 @@ ENTRY(sun4v_niagara2_getperf) retl nop ENDPROC(sun4v_niagara2_getperf) +EXPORT_SYMBOL(sun4v_niagara2_getperf) ENTRY(sun4v_niagara2_setperf) mov HV_FAST_N2_SET_PERFREG, %o5 @@ -799,6 +803,7 @@ ENTRY(sun4v_niagara2_setperf) retl nop ENDPROC(sun4v_niagara2_setperf) +EXPORT_SYMBOL(sun4v_niagara2_setperf) ENTRY(sun4v_reboot_data_set) mov HV_FAST_REBOOT_DATA_SET, %o5 diff --git a/arch/sparc/kernel/sparc_ksyms.c b/arch/sparc/kernel/sparc_ksyms.c new file mode 100644 index 000000000000..09aa69e422e5 --- /dev/null +++ b/arch/sparc/kernel/sparc_ksyms.c @@ -0,0 +1,12 @@ +/* + * arch/sparc/kernel/ksyms.c: Sparc specific ksyms support. + * + * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) + * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) + */ + +#include <linux/init.h> +#include <linux/export.h> + +/* This is needed only for drivers/sbus/char/openprom.c */ +EXPORT_SYMBOL(saved_command_line); diff --git a/arch/sparc/kernel/sparc_ksyms_32.c b/arch/sparc/kernel/sparc_ksyms_32.c deleted file mode 100644 index bf4ccb10a78c..000000000000 --- a/arch/sparc/kernel/sparc_ksyms_32.c +++ /dev/null @@ -1,31 +0,0 @@ -/* - * arch/sparc/kernel/ksyms.c: Sparc specific ksyms support. - * - * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) - * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) - */ - -#include <linux/module.h> - -#include <asm/pgtable.h> -#include <asm/uaccess.h> -#include <asm/delay.h> -#include <asm/head.h> -#include <asm/dma.h> - -struct poll { - int fd; - short events; - short revents; -}; - -/* from entry.S */ -EXPORT_SYMBOL(__udelay); -EXPORT_SYMBOL(__ndelay); - -/* from head_32.S */ -EXPORT_SYMBOL(__ret_efault); -EXPORT_SYMBOL(empty_zero_page); - -/* Exporting a symbol from /init/main.c */ -EXPORT_SYMBOL(saved_command_line); diff --git a/arch/sparc/kernel/sparc_ksyms_64.c b/arch/sparc/kernel/sparc_ksyms_64.c deleted file mode 100644 index 9e034f29dcc5..000000000000 --- a/arch/sparc/kernel/sparc_ksyms_64.c +++ /dev/null @@ -1,53 +0,0 @@ -/* arch/sparc64/kernel/sparc64_ksyms.c: Sparc64 specific ksyms support. - * - * Copyright (C) 1996, 2007 David S. Miller (davem@davemloft.net) - * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) - * Copyright (C) 1999 Jakub Jelinek (jj@ultra.linux.cz) - */ - -#include <linux/export.h> -#include <linux/pci.h> -#include <linux/bitops.h> - -#include <asm/cpudata.h> -#include <asm/uaccess.h> -#include <asm/spitfire.h> -#include <asm/oplib.h> -#include <asm/hypervisor.h> -#include <asm/cacheflush.h> - -struct poll { - int fd; - short events; - short revents; -}; - -/* from helpers.S */ -EXPORT_SYMBOL(__flushw_user); -EXPORT_SYMBOL_GPL(real_hard_smp_processor_id); - -/* from head_64.S */ -EXPORT_SYMBOL(__ret_efault); -EXPORT_SYMBOL(tlb_type); -EXPORT_SYMBOL(sun4v_chip_type); -EXPORT_SYMBOL(prom_root_node); - -/* from hvcalls.S */ -EXPORT_SYMBOL(sun4v_niagara_getperf); -EXPORT_SYMBOL(sun4v_niagara_setperf); -EXPORT_SYMBOL(sun4v_niagara2_getperf); -EXPORT_SYMBOL(sun4v_niagara2_setperf); -EXPORT_SYMBOL(sun4v_mach_set_watchdog); - -/* from hweight.S */ -EXPORT_SYMBOL(__arch_hweight8); -EXPORT_SYMBOL(__arch_hweight16); -EXPORT_SYMBOL(__arch_hweight32); -EXPORT_SYMBOL(__arch_hweight64); - -/* from ffs_ffz.S */ -EXPORT_SYMBOL(ffs); -EXPORT_SYMBOL(__ffs); - -/* Exporting a symbol from /init/main.c */ -EXPORT_SYMBOL(saved_command_line); diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile index 3269b0234093..885f00e81d1a 100644 --- a/arch/sparc/lib/Makefile +++ b/arch/sparc/lib/Makefile @@ -43,5 +43,4 @@ lib-$(CONFIG_SPARC64) += mcount.o ipcsum.o xor.o hweight.o ffs.o obj-$(CONFIG_SPARC64) += iomap.o obj-$(CONFIG_SPARC32) += atomic32.o ucmpdi2.o -obj-y += ksyms.o obj-$(CONFIG_SPARC64) += PeeCeeI.o diff --git a/arch/sparc/lib/U1memcpy.S b/arch/sparc/lib/U1memcpy.S index 3e6209ebb7d7..97e1b211090c 100644 --- a/arch/sparc/lib/U1memcpy.S +++ b/arch/sparc/lib/U1memcpy.S @@ -7,6 +7,7 @@ #ifdef __KERNEL__ #include <asm/visasm.h> #include <asm/asi.h> +#include <asm/export.h> #define GLOBAL_SPARE g7 #else #define GLOBAL_SPARE g5 @@ -567,3 +568,4 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ mov EX_RETVAL(%o4), %o0 .size FUNC_NAME, .-FUNC_NAME +EXPORT_SYMBOL(FUNC_NAME) diff --git a/arch/sparc/lib/VISsave.S b/arch/sparc/lib/VISsave.S index 62c2647bd5ce..1c7b6a39b942 100644 --- a/arch/sparc/lib/VISsave.S +++ b/arch/sparc/lib/VISsave.S @@ -13,6 +13,7 @@ #include <asm/ptrace.h> #include <asm/visasm.h> #include <asm/thread_info.h> +#include <asm/export.h> /* On entry: %o5=current FPRS value, %g7 is callers address */ /* May clobber %o5, %g1, %g2, %g3, %g7, %icc, %xcc */ @@ -79,3 +80,4 @@ vis1: ldub [%g6 + TI_FPSAVED], %g3 80: jmpl %g7 + %g0, %g0 nop ENDPROC(VISenter) +EXPORT_SYMBOL(VISenter) diff --git a/arch/sparc/lib/ashldi3.S b/arch/sparc/lib/ashldi3.S index 86f60de07b0a..c8b1cf71bc73 100644 --- a/arch/sparc/lib/ashldi3.S +++ b/arch/sparc/lib/ashldi3.S @@ -6,6 +6,7 @@ */ #include <linux/linkage.h> +#include <asm/export.h> .text ENTRY(__ashldi3) @@ -33,3 +34,4 @@ ENTRY(__ashldi3) retl nop ENDPROC(__ashldi3) +EXPORT_SYMBOL(__ashldi3) diff --git a/arch/sparc/lib/ashrdi3.S b/arch/sparc/lib/ashrdi3.S index 6eb8ba2dd50e..4310256e7964 100644 --- a/arch/sparc/lib/ashrdi3.S +++ b/arch/sparc/lib/ashrdi3.S @@ -6,6 +6,7 @@ */ #include <linux/linkage.h> +#include <asm/export.h> .text ENTRY(__ashrdi3) @@ -35,3 +36,4 @@ ENTRY(__ashrdi3) jmpl %o7 + 8, %g0 nop ENDPROC(__ashrdi3) +EXPORT_SYMBOL(__ashrdi3) diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S index a5c5a0279ccc..1c6a1bde5138 100644 --- a/arch/sparc/lib/atomic_64.S +++ b/arch/sparc/lib/atomic_64.S @@ -6,6 +6,7 @@ #include <linux/linkage.h> #include <asm/asi.h> #include <asm/backoff.h> +#include <asm/export.h> .text @@ -29,6 +30,7 @@ ENTRY(atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */ \ nop; \ 2: BACKOFF_SPIN(%o2, %o3, 1b); \ ENDPROC(atomic_##op); \ +EXPORT_SYMBOL(atomic_##op); #define ATOMIC_OP_RETURN(op) \ ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \ @@ -42,7 +44,8 @@ ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \ retl; \ sra %g1, 0, %o0; \ 2: BACKOFF_SPIN(%o2, %o3, 1b); \ -ENDPROC(atomic_##op##_return); +ENDPROC(atomic_##op##_return); \ +EXPORT_SYMBOL(atomic_##op##_return); #define ATOMIC_FETCH_OP(op) \ ENTRY(atomic_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \ @@ -56,7 +59,8 @@ ENTRY(atomic_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \ retl; \ sra %g1, 0, %o0; \ 2: BACKOFF_SPIN(%o2, %o3, 1b); \ -ENDPROC(atomic_fetch_##op); +ENDPROC(atomic_fetch_##op); \ +EXPORT_SYMBOL(atomic_fetch_##op); #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op) @@ -88,6 +92,7 @@ ENTRY(atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \ nop; \ 2: BACKOFF_SPIN(%o2, %o3, 1b); \ ENDPROC(atomic64_##op); \ +EXPORT_SYMBOL(atomic64_##op); #define ATOMIC64_OP_RETURN(op) \ ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \ @@ -101,7 +106,8 @@ ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \ retl; \ op %g1, %o0, %o0; \ 2: BACKOFF_SPIN(%o2, %o3, 1b); \ -ENDPROC(atomic64_##op##_return); +ENDPROC(atomic64_##op##_return); \ +EXPORT_SYMBOL(atomic64_##op##_return); #define ATOMIC64_FETCH_OP(op) \ ENTRY(atomic64_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \ @@ -115,7 +121,8 @@ ENTRY(atomic64_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \ retl; \ mov %g1, %o0; \ 2: BACKOFF_SPIN(%o2, %o3, 1b); \ -ENDPROC(atomic64_fetch_##op); +ENDPROC(atomic64_fetch_##op); \ +EXPORT_SYMBOL(atomic64_fetch_##op); #define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op) ATOMIC64_FETCH_OP(op) @@ -147,3 +154,4 @@ ENTRY(atomic64_dec_if_positive) /* %o0 = atomic_ptr */ sub %g1, 1, %o0 2: BACKOFF_SPIN(%o2, %o3, 1b) ENDPROC(atomic64_dec_if_positive) +EXPORT_SYMBOL(atomic64_dec_if_positive) diff --git a/arch/sparc/lib/bitops.S b/arch/sparc/lib/bitops.S index 36f72cc0e67e..7031bf1587cb 100644 --- a/arch/sparc/lib/bitops.S +++ b/arch/sparc/lib/bitops.S @@ -6,6 +6,7 @@ #include <linux/linkage.h> #include <asm/asi.h> #include <asm/backoff.h> +#include <asm/export.h> .text @@ -29,6 +30,7 @@ ENTRY(test_and_set_bit) /* %o0=nr, %o1=addr */ nop 2: BACKOFF_SPIN(%o3, %o4, 1b) ENDPROC(test_and_set_bit) +EXPORT_SYMBOL(test_and_set_bit) ENTRY(test_and_clear_bit) /* %o0=nr, %o1=addr */ BACKOFF_SETUP(%o3) @@ -50,6 +52,7 @@ ENTRY(test_and_clear_bit) /* %o0=nr, %o1=addr */ nop 2: BACKOFF_SPIN(%o3, %o4, 1b) ENDPROC(test_and_clear_bit) +EXPORT_SYMBOL(test_and_clear_bit) ENTRY(test_and_change_bit) /* %o0=nr, %o1=addr */ BACKOFF_SETUP(%o3) @@ -71,6 +74,7 @@ ENTRY(test_and_change_bit) /* %o0=nr, %o1=addr */ nop 2: BACKOFF_SPIN(%o3, %o4, 1b) ENDPROC(test_and_change_bit) +EXPORT_SYMBOL(test_and_change_bit) ENTRY(set_bit) /* %o0=nr, %o1=addr */ BACKOFF_SETUP(%o3) @@ -90,6 +94,7 @@ ENTRY(set_bit) /* %o0=nr, %o1=addr */ nop 2: BACKOFF_SPIN(%o3, %o4, 1b) ENDPROC(set_bit) +EXPORT_SYMBOL(set_bit) ENTRY(clear_bit) /* %o0=nr, %o1=addr */ BACKOFF_SETUP(%o3) @@ -109,6 +114,7 @@ ENTRY(clear_bit) /* %o0=nr, %o1=addr */ nop 2: BACKOFF_SPIN(%o3, %o4, 1b) ENDPROC(clear_bit) +EXPORT_SYMBOL(clear_bit) ENTRY(change_bit) /* %o0=nr, %o1=addr */ BACKOFF_SETUP(%o3) @@ -128,3 +134,4 @@ ENTRY(change_bit) /* %o0=nr, %o1=addr */ nop 2: BACKOFF_SPIN(%o3, %o4, 1b) ENDPROC(change_bit) +EXPORT_SYMBOL(change_bit) diff --git a/arch/sparc/lib/blockops.S b/arch/sparc/lib/blockops.S index 3c771011ff4b..1f2692d59d18 100644 --- a/arch/sparc/lib/blockops.S +++ b/arch/sparc/lib/blockops.S @@ -6,6 +6,7 @@ #include <linux/linkage.h> #include <asm/page.h> +#include <asm/export.h> /* Zero out 64 bytes of memory at (buf + offset). * Assumes %g1 contains zero. @@ -64,6 +65,7 @@ ENTRY(bzero_1page) retl nop ENDPROC(bzero_1page) +EXPORT_SYMBOL(bzero_1page) ENTRY(__copy_1page) /* NOTE: If you change the number of insns of this routine, please check @@ -87,3 +89,4 @@ ENTRY(__copy_1page) retl nop ENDPROC(__copy_1page) +EXPORT_SYMBOL(__copy_1page) diff --git a/arch/sparc/lib/bzero.S b/arch/sparc/lib/bzero.S index 8c058114b649..3bb1914c4fa4 100644 --- a/arch/sparc/lib/bzero.S +++ b/arch/sparc/lib/bzero.S @@ -5,6 +5,7 @@ */ #include <linux/linkage.h> +#include <asm/export.h> .text @@ -78,6 +79,8 @@ __bzero_done: mov %o3, %o0 ENDPROC(__bzero) ENDPROC(memset) +EXPORT_SYMBOL(__bzero) +EXPORT_SYMBOL(memset) #define EX_ST(x,y) \ 98: x,y; \ @@ -143,3 +146,4 @@ __clear_user_done: retl clr %o0 ENDPROC(__clear_user) +EXPORT_SYMBOL(__clear_user) diff --git a/arch/sparc/lib/checksum_32.S b/arch/sparc/lib/checksum_32.S index 0084c3361e15..c9d8b6232111 100644 --- a/arch/sparc/lib/checksum_32.S +++ b/arch/sparc/lib/checksum_32.S @@ -14,6 +14,7 @@ */ #include <asm/errno.h> +#include <asm/export.h> #define CSUM_BIGCHUNK(buf, offset, sum, t0, t1, t2, t3, t4, t5) \ ldd [buf + offset + 0x00], t0; \ @@ -104,6 +105,7 @@ csum_partial_fix_alignment: * buffer of size 0x20. Follow the code path for that case. */ .globl csum_partial + EXPORT_SYMBOL(csum_partial) csum_partial: /* %o0=buf, %o1=len, %o2=sum */ andcc %o0, 0x7, %g0 ! alignment problems? bne csum_partial_fix_alignment ! yep, handle it @@ -335,6 +337,7 @@ cc_dword_align: */ .align 8 .globl __csum_partial_copy_sparc_generic + EXPORT_SYMBOL(__csum_partial_copy_sparc_generic) __csum_partial_copy_sparc_generic: /* %o0=src, %o1=dest, %g1=len, %g7=sum */ xor %o0, %o1, %o4 ! get changing bits diff --git a/arch/sparc/lib/checksum_64.S b/arch/sparc/lib/checksum_64.S index 1d230f693dc4..f6732174fe6b 100644 --- a/arch/sparc/lib/checksum_64.S +++ b/arch/sparc/lib/checksum_64.S @@ -13,6 +13,7 @@ * BSD4.4 portable checksum routine */ +#include <asm/export.h> .text csum_partial_fix_alignment: @@ -37,6 +38,7 @@ csum_partial_fix_alignment: .align 32 .globl csum_partial + EXPORT_SYMBOL(csum_partial) csum_partial: /* %o0=buff, %o1=len, %o2=sum */ prefetch [%o0 + 0x000], #n_reads clr %o4 diff --git a/arch/sparc/lib/clear_page.S b/arch/sparc/lib/clear_page.S index 46272dfc26e8..f30d6b78afbd 100644 --- a/arch/sparc/lib/clear_page.S +++ b/arch/sparc/lib/clear_page.S @@ -10,6 +10,7 @@ #include <asm/pgtable.h> #include <asm/spitfire.h> #include <asm/head.h> +#include <asm/export.h> /* What we used to do was lock a TLB entry into a specific * TLB slot, clear the page with interrupts disabled, then @@ -26,6 +27,7 @@ .text .globl _clear_page + EXPORT_SYMBOL(_clear_page) _clear_page: /* %o0=dest */ ba,pt %xcc, clear_page_common clr %o4 @@ -35,6 +37,7 @@ _clear_page: /* %o0=dest */ */ .align 32 .globl clear_user_page + EXPORT_SYMBOL(clear_user_page) clear_user_page: /* %o0=dest, %o1=vaddr */ lduw [%g6 + TI_PRE_COUNT], %o2 sethi %hi(PAGE_OFFSET), %g2 diff --git a/arch/sparc/lib/copy_in_user.S b/arch/sparc/lib/copy_in_user.S index 302c0e60dc2c..482de093bdae 100644 --- a/arch/sparc/lib/copy_in_user.S +++ b/arch/sparc/lib/copy_in_user.S @@ -5,6 +5,7 @@ #include <linux/linkage.h> #include <asm/asi.h> +#include <asm/export.h> #define XCC xcc @@ -90,3 +91,4 @@ ENTRY(___copy_in_user) /* %o0=dst, %o1=src, %o2=len */ retl clr %o0 ENDPROC(___copy_in_user) +EXPORT_SYMBOL(___copy_in_user) diff --git a/arch/sparc/lib/copy_page.S b/arch/sparc/lib/copy_page.S index dd16c61f3263..7197b7250895 100644 --- a/arch/sparc/lib/copy_page.S +++ b/arch/sparc/lib/copy_page.S @@ -10,6 +10,7 @@ #include <asm/pgtable.h> #include <asm/spitfire.h> #include <asm/head.h> +#include <asm/export.h> /* What we used to do was lock a TLB entry into a specific * TLB slot, clear the page with interrupts disabled, then @@ -44,6 +45,7 @@ .align 32 .globl copy_user_page .type copy_user_page,#function + EXPORT_SYMBOL(copy_user_page) copy_user_page: /* %o0=dest, %o1=src, %o2=vaddr */ lduw [%g6 + TI_PRE_COUNT], %o4 sethi %hi(PAGE_OFFSET), %g2 diff --git a/arch/sparc/lib/copy_user.S b/arch/sparc/lib/copy_user.S index ef095b6c43b1..cea644dc67a6 100644 --- a/arch/sparc/lib/copy_user.S +++ b/arch/sparc/lib/copy_user.S @@ -15,6 +15,7 @@ #include <asm/asmmacro.h> #include <asm/page.h> #include <asm/thread_info.h> +#include <asm/export.h> /* Work around cpp -rob */ #define ALLOC #alloc @@ -119,6 +120,7 @@ __copy_user_begin: .globl __copy_user + EXPORT_SYMBOL(__copy_user) dword_align: andcc %o1, 1, %g0 be 4f diff --git a/arch/sparc/lib/csum_copy.S b/arch/sparc/lib/csum_copy.S index e566c770a0f6..0ecbafc30fd0 100644 --- a/arch/sparc/lib/csum_copy.S +++ b/arch/sparc/lib/csum_copy.S @@ -3,6 +3,8 @@ * Copyright (C) 2005 David S. Miller <davem@davemloft.net> */ +#include <asm/export.h> + #ifdef __KERNEL__ #define GLOBAL_SPARE %g7 #else @@ -63,6 +65,7 @@ add %o5, %o4, %o4 .globl FUNC_NAME + EXPORT_SYMBOL(FUNC_NAME) FUNC_NAME: /* %o0=src, %o1=dst, %o2=len, %o3=sum */ LOAD(prefetch, %o0 + 0x000, #n_reads) xor %o0, %o1, %g1 diff --git a/arch/sparc/lib/divdi3.S b/arch/sparc/lib/divdi3.S index 9614b48b6ef8..a2b5a976be33 100644 --- a/arch/sparc/lib/divdi3.S +++ b/arch/sparc/lib/divdi3.S @@ -17,6 +17,7 @@ along with GNU CC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ +#include <asm/export.h> .text .align 4 .globl __divdi3 @@ -279,3 +280,4 @@ __divdi3: .LL81: ret restore +EXPORT_SYMBOL(__divdi3) diff --git a/arch/sparc/lib/ffs.S b/arch/sparc/lib/ffs.S index b39389f69899..23aab144d28e 100644 --- a/arch/sparc/lib/ffs.S +++ b/arch/sparc/lib/ffs.S @@ -1,4 +1,5 @@ #include <linux/linkage.h> +#include <asm/export.h> .register %g2,#scratch @@ -65,6 +66,8 @@ ENTRY(__ffs) add %o2, %g1, %o0 ENDPROC(ffs) ENDPROC(__ffs) +EXPORT_SYMBOL(__ffs) +EXPORT_SYMBOL(ffs) .section .popc_6insn_patch, "ax" .word ffs diff --git a/arch/sparc/lib/hweight.S b/arch/sparc/lib/hweight.S index 95414e0a6808..f9985f129fb6 100644 --- a/arch/sparc/lib/hweight.S +++ b/arch/sparc/lib/hweight.S @@ -1,4 +1,5 @@ #include <linux/linkage.h> +#include <asm/export.h> .text .align 32 @@ -7,6 +8,7 @@ ENTRY(__arch_hweight8) nop nop ENDPROC(__arch_hweight8) +EXPORT_SYMBOL(__arch_hweight8) .section .popc_3insn_patch, "ax" .word __arch_hweight8 sllx %o0, 64-8, %g1 @@ -19,6 +21,7 @@ ENTRY(__arch_hweight16) nop nop ENDPROC(__arch_hweight16) +EXPORT_SYMBOL(__arch_hweight16) .section .popc_3insn_patch, "ax" .word __arch_hweight16 sllx %o0, 64-16, %g1 @@ -31,6 +34,7 @@ ENTRY(__arch_hweight32) nop nop ENDPROC(__arch_hweight32) +EXPORT_SYMBOL(__arch_hweight32) .section .popc_3insn_patch, "ax" .word __arch_hweight32 sllx %o0, 64-32, %g1 @@ -43,6 +47,7 @@ ENTRY(__arch_hweight64) nop nop ENDPROC(__arch_hweight64) +EXPORT_SYMBOL(__arch_hweight64) .section .popc_3insn_patch, "ax" .word __arch_hweight64 retl diff --git a/arch/sparc/lib/ipcsum.S b/arch/sparc/lib/ipcsum.S index 4742d59029ee..5d61648b53dd 100644 --- a/arch/sparc/lib/ipcsum.S +++ b/arch/sparc/lib/ipcsum.S @@ -1,4 +1,5 @@ #include <linux/linkage.h> +#include <asm/export.h> .text ENTRY(ip_fast_csum) /* %o0 = iph, %o1 = ihl */ @@ -31,3 +32,4 @@ ENTRY(ip_fast_csum) /* %o0 = iph, %o1 = ihl */ retl and %o2, %o1, %o0 ENDPROC(ip_fast_csum) +EXPORT_SYMBOL(ip_fast_csum) diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c deleted file mode 100644 index de5e97817bdb..000000000000 --- a/arch/sparc/lib/ksyms.c +++ /dev/null @@ -1,174 +0,0 @@ -/* - * Export of symbols defined in assembler - */ - -/* Tell string.h we don't want memcpy etc. as cpp defines */ -#define EXPORT_SYMTAB_STROPS - -#include <linux/module.h> -#include <linux/string.h> -#include <linux/types.h> - -#include <asm/checksum.h> -#include <asm/uaccess.h> -#include <asm/ftrace.h> - -/* string functions */ -EXPORT_SYMBOL(strlen); -EXPORT_SYMBOL(strncmp); - -/* mem* functions */ -extern void *__memscan_zero(void *, size_t); -extern void *__memscan_generic(void *, int, size_t); -extern void *__bzero(void *, size_t); - -EXPORT_SYMBOL(memscan); -EXPORT_SYMBOL(__memscan_zero); -EXPORT_SYMBOL(__memscan_generic); -EXPORT_SYMBOL(memcmp); -EXPORT_SYMBOL(memcpy); -EXPORT_SYMBOL(memset); -EXPORT_SYMBOL(memmove); -EXPORT_SYMBOL(__bzero); - -/* Networking helper routines. */ -EXPORT_SYMBOL(csum_partial); - -#ifdef CONFIG_MCOUNT -EXPORT_SYMBOL(_mcount); -#endif - -/* - * sparc - */ -#ifdef CONFIG_SPARC32 -extern int __ashrdi3(int, int); -extern int __ashldi3(int, int); -extern int __lshrdi3(int, int); -extern int __muldi3(int, int); -extern int __divdi3(int, int); - -extern void (*__copy_1page)(void *, const void *); -extern void (*bzero_1page)(void *); - -extern void ___rw_read_enter(void); -extern void ___rw_read_try(void); -extern void ___rw_read_exit(void); -extern void ___rw_write_enter(void); - -/* Networking helper routines. */ -EXPORT_SYMBOL(__csum_partial_copy_sparc_generic); - -/* Special internal versions of library functions. */ -EXPORT_SYMBOL(__copy_1page); -EXPORT_SYMBOL(__memmove); -EXPORT_SYMBOL(bzero_1page); - -/* Moving data to/from/in userspace. */ -EXPORT_SYMBOL(__copy_user); - -/* Used by asm/spinlock.h */ -#ifdef CONFIG_SMP -EXPORT_SYMBOL(___rw_read_enter); -EXPORT_SYMBOL(___rw_read_try); -EXPORT_SYMBOL(___rw_read_exit); -EXPORT_SYMBOL(___rw_write_enter); -#endif - -EXPORT_SYMBOL(__ashrdi3); -EXPORT_SYMBOL(__ashldi3); -EXPORT_SYMBOL(__lshrdi3); -EXPORT_SYMBOL(__muldi3); -EXPORT_SYMBOL(__divdi3); -#endif - -/* - * sparc64 - */ -#ifdef CONFIG_SPARC64 -/* Networking helper routines. */ -EXPORT_SYMBOL(csum_partial_copy_nocheck); -EXPORT_SYMBOL(__csum_partial_copy_from_user); -EXPORT_SYMBOL(__csum_partial_copy_to_user); -EXPORT_SYMBOL(ip_fast_csum); - -/* Moving data to/from/in userspace. */ -EXPORT_SYMBOL(___copy_to_user); -EXPORT_SYMBOL(___copy_from_user); -EXPORT_SYMBOL(___copy_in_user); -EXPORT_SYMBOL(__clear_user); - -/* Atomic counter implementation. */ -#define ATOMIC_OP(op) \ -EXPORT_SYMBOL(atomic_##op); \ -EXPORT_SYMBOL(atomic64_##op); - -#define ATOMIC_OP_RETURN(op) \ -EXPORT_SYMBOL(atomic_##op##_return); \ -EXPORT_SYMBOL(atomic64_##op##_return); - -#define ATOMIC_FETCH_OP(op) \ -EXPORT_SYMBOL(atomic_fetch_##op); \ -EXPORT_SYMBOL(atomic64_fetch_##op); - -#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op) - -ATOMIC_OPS(add) -ATOMIC_OPS(sub) - -#undef ATOMIC_OPS -#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op) - -ATOMIC_OPS(and) -ATOMIC_OPS(or) -ATOMIC_OPS(xor) - -#undef ATOMIC_OPS -#undef ATOMIC_FETCH_OP -#undef ATOMIC_OP_RETURN -#undef ATOMIC_OP - -EXPORT_SYMBOL(atomic64_dec_if_positive); - -/* Atomic bit operations. */ -EXPORT_SYMBOL(test_and_set_bit); -EXPORT_SYMBOL(test_and_clear_bit); -EXPORT_SYMBOL(test_and_change_bit); -EXPORT_SYMBOL(set_bit); -EXPORT_SYMBOL(clear_bit); -EXPORT_SYMBOL(change_bit); - -/* Special internal versions of library functions. */ -EXPORT_SYMBOL(_clear_page); -EXPORT_SYMBOL(clear_user_page); -EXPORT_SYMBOL(copy_user_page); - -/* RAID code needs this */ -void VISenter(void); -EXPORT_SYMBOL(VISenter); - -extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *); -extern void xor_vis_3(unsigned long, unsigned long *, unsigned long *, - unsigned long *); -extern void xor_vis_4(unsigned long, unsigned long *, unsigned long *, - unsigned long *, unsigned long *); -extern void xor_vis_5(unsigned long, unsigned long *, unsigned long *, - unsigned long *, unsigned long *, unsigned long *); -EXPORT_SYMBOL(xor_vis_2); -EXPORT_SYMBOL(xor_vis_3); -EXPORT_SYMBOL(xor_vis_4); -EXPORT_SYMBOL(xor_vis_5); - -extern void xor_niagara_2(unsigned long, unsigned long *, unsigned long *); -extern void xor_niagara_3(unsigned long, unsigned long *, unsigned long *, - unsigned long *); -extern void xor_niagara_4(unsigned long, unsigned long *, unsigned long *, - unsigned long *, unsigned long *); -extern void xor_niagara_5(unsigned long, unsigned long *, unsigned long *, - unsigned long *, unsigned long *, unsigned long *); - -EXPORT_SYMBOL(xor_niagara_2); -EXPORT_SYMBOL(xor_niagara_3); -EXPORT_SYMBOL(xor_niagara_4); -EXPORT_SYMBOL(xor_niagara_5); -#endif diff --git a/arch/sparc/lib/locks.S b/arch/sparc/lib/locks.S index 64f53f2b673d..f38c4e59d078 100644 --- a/arch/sparc/lib/locks.S +++ b/arch/sparc/lib/locks.S @@ -10,6 +10,7 @@ #include <asm/psr.h> #include <asm/smp.h> #include <asm/spinlock.h> +#include <asm/export.h> .text .align 4 @@ -48,6 +49,7 @@ ___rw_write_enter_spin_on_wlock: ld [%g1], %g2 .globl ___rw_read_enter +EXPORT_SYMBOL(___rw_read_enter) ___rw_read_enter: orcc %g2, 0x0, %g0 bne,a ___rw_read_enter_spin_on_wlock @@ -59,6 +61,7 @@ ___rw_read_enter: mov %g4, %o7 .globl ___rw_read_exit +EXPORT_SYMBOL(___rw_read_exit) ___rw_read_exit: orcc %g2, 0x0, %g0 bne,a ___rw_read_exit_spin_on_wlock @@ -70,6 +73,7 @@ ___rw_read_exit: mov %g4, %o7 .globl ___rw_read_try +EXPORT_SYMBOL(___rw_read_try) ___rw_read_try: orcc %g2, 0x0, %g0 bne ___rw_read_try_spin_on_wlock @@ -81,6 +85,7 @@ ___rw_read_try: mov %g4, %o7 .globl ___rw_write_enter +EXPORT_SYMBOL(___rw_write_enter) ___rw_write_enter: orcc %g2, 0x0, %g0 bne ___rw_write_enter_spin_on_wlock diff --git a/arch/sparc/lib/lshrdi3.S b/arch/sparc/lib/lshrdi3.S index 60ebc7cdbee0..c9b9373f8d81 100644 --- a/arch/sparc/lib/lshrdi3.S +++ b/arch/sparc/lib/lshrdi3.S @@ -1,4 +1,5 @@ #include <linux/linkage.h> +#include <asm/export.h> ENTRY(__lshrdi3) cmp %o2, 0 @@ -25,3 +26,4 @@ ENTRY(__lshrdi3) retl nop ENDPROC(__lshrdi3) +EXPORT_SYMBOL(__lshrdi3) diff --git a/arch/sparc/lib/mcount.S b/arch/sparc/lib/mcount.S index 0b0ed4d34219..194f383611c0 100644 --- a/arch/sparc/lib/mcount.S +++ b/arch/sparc/lib/mcount.S @@ -6,6 +6,7 @@ */ #include <linux/linkage.h> +#include <asm/export.h> /* * This is the main variant and is called by C code. GCC's -pg option @@ -16,6 +17,7 @@ .align 32 .globl _mcount .type _mcount,#function + EXPORT_SYMBOL(_mcount) .globl mcount .type mcount,#function _mcount: diff --git a/arch/sparc/lib/memcmp.S b/arch/sparc/lib/memcmp.S index efa106c41ed0..cee7f30dbb61 100644 --- a/arch/sparc/lib/memcmp.S +++ b/arch/sparc/lib/memcmp.S @@ -6,6 +6,7 @@ #include <linux/linkage.h> #include <asm/asm.h> +#include <asm/export.h> .text ENTRY(memcmp) @@ -25,3 +26,4 @@ ENTRY(memcmp) 2: retl mov 0, %o0 ENDPROC(memcmp) +EXPORT_SYMBOL(memcmp) diff --git a/arch/sparc/lib/memcpy.S b/arch/sparc/lib/memcpy.S index 4d8c497517bd..8913feaa7ac7 100644 --- a/arch/sparc/lib/memcpy.S +++ b/arch/sparc/lib/memcpy.S @@ -7,6 +7,7 @@ * Copyright (C) 1996 Jakub Jelinek (jj@sunsite.mff.cuni.cz) */ +#include <asm/export.h> #define FUNC(x) \ .globl x; \ .type x,@function; \ @@ -58,93 +59,11 @@ x: stb %t0, [%dst - (offset) - 0x02]; \ stb %t1, [%dst - (offset) - 0x01]; -/* Both these macros have to start with exactly the same insn */ -#define RMOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ - ldd [%src - (offset) - 0x20], %t0; \ - ldd [%src - (offset) - 0x18], %t2; \ - ldd [%src - (offset) - 0x10], %t4; \ - ldd [%src - (offset) - 0x08], %t6; \ - st %t0, [%dst - (offset) - 0x20]; \ - st %t1, [%dst - (offset) - 0x1c]; \ - st %t2, [%dst - (offset) - 0x18]; \ - st %t3, [%dst - (offset) - 0x14]; \ - st %t4, [%dst - (offset) - 0x10]; \ - st %t5, [%dst - (offset) - 0x0c]; \ - st %t6, [%dst - (offset) - 0x08]; \ - st %t7, [%dst - (offset) - 0x04]; - -#define RMOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ - ldd [%src - (offset) - 0x20], %t0; \ - ldd [%src - (offset) - 0x18], %t2; \ - ldd [%src - (offset) - 0x10], %t4; \ - ldd [%src - (offset) - 0x08], %t6; \ - std %t0, [%dst - (offset) - 0x20]; \ - std %t2, [%dst - (offset) - 0x18]; \ - std %t4, [%dst - (offset) - 0x10]; \ - std %t6, [%dst - (offset) - 0x08]; - -#define RMOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \ - ldd [%src + (offset) + 0x00], %t0; \ - ldd [%src + (offset) + 0x08], %t2; \ - st %t0, [%dst + (offset) + 0x00]; \ - st %t1, [%dst + (offset) + 0x04]; \ - st %t2, [%dst + (offset) + 0x08]; \ - st %t3, [%dst + (offset) + 0x0c]; - -#define RMOVE_SHORTCHUNK(src, dst, offset, t0, t1) \ - ldub [%src + (offset) + 0x00], %t0; \ - ldub [%src + (offset) + 0x01], %t1; \ - stb %t0, [%dst + (offset) + 0x00]; \ - stb %t1, [%dst + (offset) + 0x01]; - -#define SMOVE_CHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, prev, shil, shir, offset2) \ - ldd [%src + (offset) + 0x00], %t0; \ - ldd [%src + (offset) + 0x08], %t2; \ - srl %t0, shir, %t5; \ - srl %t1, shir, %t6; \ - sll %t0, shil, %t0; \ - or %t5, %prev, %t5; \ - sll %t1, shil, %prev; \ - or %t6, %t0, %t0; \ - srl %t2, shir, %t1; \ - srl %t3, shir, %t6; \ - sll %t2, shil, %t2; \ - or %t1, %prev, %t1; \ - std %t4, [%dst + (offset) + (offset2) - 0x04]; \ - std %t0, [%dst + (offset) + (offset2) + 0x04]; \ - sll %t3, shil, %prev; \ - or %t6, %t2, %t4; - -#define SMOVE_ALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, prev, shil, shir, offset2) \ - ldd [%src + (offset) + 0x00], %t0; \ - ldd [%src + (offset) + 0x08], %t2; \ - srl %t0, shir, %t4; \ - srl %t1, shir, %t5; \ - sll %t0, shil, %t6; \ - or %t4, %prev, %t0; \ - sll %t1, shil, %prev; \ - or %t5, %t6, %t1; \ - srl %t2, shir, %t4; \ - srl %t3, shir, %t5; \ - sll %t2, shil, %t6; \ - or %t4, %prev, %t2; \ - sll %t3, shil, %prev; \ - or %t5, %t6, %t3; \ - std %t0, [%dst + (offset) + (offset2) + 0x00]; \ - std %t2, [%dst + (offset) + (offset2) + 0x08]; - .text .align 4 -0: - retl - nop ! Only bcopy returns here and it retuns void... - -#ifdef __KERNEL__ -FUNC(amemmove) -FUNC(__memmove) -#endif FUNC(memmove) +EXPORT_SYMBOL(memmove) cmp %o0, %o1 mov %o0, %g7 bleu 9f @@ -202,6 +121,7 @@ FUNC(memmove) add %o0, 2, %o0 FUNC(memcpy) /* %o0=dst %o1=src %o2=len */ +EXPORT_SYMBOL(memcpy) sub %o0, %o1, %o4 mov %o0, %g7 diff --git a/arch/sparc/lib/memmove.S b/arch/sparc/lib/memmove.S index 857ad4f8905f..012cdb6ca467 100644 --- a/arch/sparc/lib/memmove.S +++ b/arch/sparc/lib/memmove.S @@ -5,6 +5,7 @@ */ #include <linux/linkage.h> +#include <asm/export.h> .text ENTRY(memmove) /* o0=dst o1=src o2=len */ @@ -57,3 +58,4 @@ ENTRY(memmove) /* o0=dst o1=src o2=len */ stb %g7, [%o0 - 0x1] ba,a,pt %xcc, 99b ENDPROC(memmove) +EXPORT_SYMBOL(memmove) diff --git a/arch/sparc/lib/memscan_32.S b/arch/sparc/lib/memscan_32.S index 4ff1657dfc24..51ce690c42a8 100644 --- a/arch/sparc/lib/memscan_32.S +++ b/arch/sparc/lib/memscan_32.S @@ -4,6 +4,8 @@ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) */ +#include <asm/export.h> + /* In essence, this is just a fancy strlen. */ #define LO_MAGIC 0x01010101 @@ -13,6 +15,8 @@ .align 4 .globl __memscan_zero, __memscan_generic .globl memscan +EXPORT_SYMBOL(__memscan_zero) +EXPORT_SYMBOL(__memscan_generic) __memscan_zero: /* %o0 = addr, %o1 = size */ cmp %o1, 0 diff --git a/arch/sparc/lib/memscan_64.S b/arch/sparc/lib/memscan_64.S index 5686dfa5dc15..daa96f4b03e6 100644 --- a/arch/sparc/lib/memscan_64.S +++ b/arch/sparc/lib/memscan_64.S @@ -5,6 +5,8 @@ * Copyright (C) 1998 David S. Miller (davem@redhat.com) */ + #include <asm/export.h> + #define HI_MAGIC 0x8080808080808080 #define LO_MAGIC 0x0101010101010101 #define ASI_PL 0x88 @@ -13,6 +15,8 @@ .align 32 .globl __memscan_zero, __memscan_generic .globl memscan + EXPORT_SYMBOL(__memscan_zero) + EXPORT_SYMBOL(__memscan_generic) __memscan_zero: /* %o0 = bufp, %o1 = size */ diff --git a/arch/sparc/lib/memset.S b/arch/sparc/lib/memset.S index f75e6906df14..bb539b42b088 100644 --- a/arch/sparc/lib/memset.S +++ b/arch/sparc/lib/memset.S @@ -9,6 +9,7 @@ */ #include <asm/ptrace.h> +#include <asm/export.h> /* Work around cpp -rob */ #define ALLOC #alloc @@ -63,6 +64,8 @@ __bzero_begin: .globl __bzero .globl memset + EXPORT_SYMBOL(__bzero) + EXPORT_SYMBOL(memset) .globl __memset_start, __memset_end __memset_start: memset: diff --git a/arch/sparc/lib/muldi3.S b/arch/sparc/lib/muldi3.S index 9794939d1c12..17a0f49aef3c 100644 --- a/arch/sparc/lib/muldi3.S +++ b/arch/sparc/lib/muldi3.S @@ -17,6 +17,7 @@ along with GNU CC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ +#include <asm/export.h> .text .align 4 .globl __muldi3 @@ -74,3 +75,4 @@ __muldi3: add %l2, %l0, %i0 ret restore %g0, %l3, %o1 +EXPORT_SYMBOL(__muldi3) diff --git a/arch/sparc/lib/strlen.S b/arch/sparc/lib/strlen.S index 536f83507fbf..ca0e7077e871 100644 --- a/arch/sparc/lib/strlen.S +++ b/arch/sparc/lib/strlen.S @@ -7,6 +7,7 @@ #include <linux/linkage.h> #include <asm/asm.h> +#include <asm/export.h> #define LO_MAGIC 0x01010101 #define HI_MAGIC 0x80808080 @@ -78,3 +79,4 @@ ENTRY(strlen) retl mov 2, %o0 ENDPROC(strlen) +EXPORT_SYMBOL(strlen) diff --git a/arch/sparc/lib/strncmp_32.S b/arch/sparc/lib/strncmp_32.S index c0d1b568c1c5..e3fe014813af 100644 --- a/arch/sparc/lib/strncmp_32.S +++ b/arch/sparc/lib/strncmp_32.S @@ -4,6 +4,7 @@ */ #include <linux/linkage.h> +#include <asm/export.h> .text ENTRY(strncmp) @@ -116,3 +117,4 @@ ENTRY(strncmp) retl sub %o3, %o0, %o0 ENDPROC(strncmp) +EXPORT_SYMBOL(strncmp) diff --git a/arch/sparc/lib/strncmp_64.S b/arch/sparc/lib/strncmp_64.S index 0656627166f3..efb5f884330d 100644 --- a/arch/sparc/lib/strncmp_64.S +++ b/arch/sparc/lib/strncmp_64.S @@ -6,6 +6,7 @@ #include <linux/linkage.h> #include <asm/asi.h> +#include <asm/export.h> .text ENTRY(strncmp) @@ -28,3 +29,4 @@ ENTRY(strncmp) retl clr %o0 ENDPROC(strncmp) +EXPORT_SYMBOL(strncmp) diff --git a/arch/sparc/lib/xor.S b/arch/sparc/lib/xor.S index 2c05641c3263..45a49cb618b5 100644 --- a/arch/sparc/lib/xor.S +++ b/arch/sparc/lib/xor.S @@ -13,6 +13,7 @@ #include <asm/asi.h> #include <asm/dcu.h> #include <asm/spitfire.h> +#include <asm/export.h> /* * Requirements: @@ -90,6 +91,7 @@ ENTRY(xor_vis_2) retl wr %g0, 0, %fprs ENDPROC(xor_vis_2) +EXPORT_SYMBOL(xor_vis_2) ENTRY(xor_vis_3) rd %fprs, %o5 @@ -156,6 +158,7 @@ ENTRY(xor_vis_3) retl wr %g0, 0, %fprs ENDPROC(xor_vis_3) +EXPORT_SYMBOL(xor_vis_3) ENTRY(xor_vis_4) rd %fprs, %o5 @@ -241,6 +244,7 @@ ENTRY(xor_vis_4) retl wr %g0, 0, %fprs ENDPROC(xor_vis_4) +EXPORT_SYMBOL(xor_vis_4) ENTRY(xor_vis_5) save %sp, -192, %sp @@ -347,6 +351,7 @@ ENTRY(xor_vis_5) ret restore ENDPROC(xor_vis_5) +EXPORT_SYMBOL(xor_vis_5) /* Niagara versions. */ ENTRY(xor_niagara_2) /* %o0=bytes, %o1=dest, %o2=src */ @@ -393,6 +398,7 @@ ENTRY(xor_niagara_2) /* %o0=bytes, %o1=dest, %o2=src */ ret restore ENDPROC(xor_niagara_2) +EXPORT_SYMBOL(xor_niagara_2) ENTRY(xor_niagara_3) /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2 */ save %sp, -192, %sp @@ -454,6 +460,7 @@ ENTRY(xor_niagara_3) /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2 */ ret restore ENDPROC(xor_niagara_3) +EXPORT_SYMBOL(xor_niagara_3) ENTRY(xor_niagara_4) /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2, %o4=src3 */ save %sp, -192, %sp @@ -536,6 +543,7 @@ ENTRY(xor_niagara_4) /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2, %o4=src3 */ ret restore ENDPROC(xor_niagara_4) +EXPORT_SYMBOL(xor_niagara_4) ENTRY(xor_niagara_5) /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2, %o4=src3, %o5=src4 */ save %sp, -192, %sp @@ -634,3 +642,4 @@ ENTRY(xor_niagara_5) /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2, %o4=src3, %o5=s ret restore ENDPROC(xor_niagara_5) +EXPORT_SYMBOL(xor_niagara_5) diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index b75a8bcd2d23..21b352a11b49 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S @@ -44,6 +44,7 @@ #include <asm/alternative-asm.h> #include <asm/asm.h> #include <asm/smap.h> +#include <asm/export.h> .section .entry.text, "ax" @@ -991,6 +992,7 @@ trace: jmp ftrace_stub END(mcount) #endif /* CONFIG_DYNAMIC_FTRACE */ +EXPORT_SYMBOL(mcount) #endif /* CONFIG_FUNCTION_TRACER */ #ifdef CONFIG_FUNCTION_GRAPH_TRACER diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index c98ec2efd750..ef766a358b37 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -35,6 +35,7 @@ #include <asm/asm.h> #include <asm/smap.h> #include <asm/pgtable_types.h> +#include <asm/export.h> #include <linux/err.h> /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */ @@ -875,6 +876,7 @@ ENTRY(native_load_gs_index) popfq ret END(native_load_gs_index) +EXPORT_SYMBOL(native_load_gs_index) _ASM_EXTABLE(.Lgs_change, bad_gs) .section .fixup, "ax" diff --git a/arch/x86/entry/thunk_32.S b/arch/x86/entry/thunk_32.S index e5a17114a8c4..fee6bc79b987 100644 --- a/arch/x86/entry/thunk_32.S +++ b/arch/x86/entry/thunk_32.S @@ -6,6 +6,7 @@ */ #include <linux/linkage.h> #include <asm/asm.h> + #include <asm/export.h> /* put return address in eax (arg1) */ .macro THUNK name, func, put_ret_addr_in_eax=0 @@ -36,5 +37,7 @@ #ifdef CONFIG_PREEMPT THUNK ___preempt_schedule, preempt_schedule THUNK ___preempt_schedule_notrace, preempt_schedule_notrace + EXPORT_SYMBOL(___preempt_schedule) + EXPORT_SYMBOL(___preempt_schedule_notrace) #endif diff --git a/arch/x86/entry/thunk_64.S b/arch/x86/entry/thunk_64.S index 627ecbcb2e62..be36bf4e0957 100644 --- a/arch/x86/entry/thunk_64.S +++ b/arch/x86/entry/thunk_64.S @@ -8,6 +8,7 @@ #include <linux/linkage.h> #include "calling.h" #include <asm/asm.h> +#include <asm/export.h> /* rdi: arg1 ... normal C conventions. rax is saved/restored. */ .macro THUNK name, func, put_ret_addr_in_rdi=0 @@ -49,6 +50,8 @@ #ifdef CONFIG_PREEMPT THUNK ___preempt_schedule, preempt_schedule THUNK ___preempt_schedule_notrace, preempt_schedule_notrace + EXPORT_SYMBOL(___preempt_schedule) + EXPORT_SYMBOL(___preempt_schedule_notrace) #endif #if defined(CONFIG_TRACE_IRQFLAGS) \ diff --git a/arch/x86/include/asm/export.h b/arch/x86/include/asm/export.h new file mode 100644 index 000000000000..138de56b13eb --- /dev/null +++ b/arch/x86/include/asm/export.h @@ -0,0 +1,4 @@ +#ifdef CONFIG_64BIT +#define KSYM_ALIGN 16 +#endif +#include <asm-generic/export.h> diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 4dd5d500eb60..79076d75bdbf 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -46,9 +46,7 @@ obj-$(CONFIG_MODIFY_LDT_SYSCALL) += ldt.o obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o obj-$(CONFIG_IRQ_WORK) += irq_work.o obj-y += probe_roms.o -obj-$(CONFIG_X86_32) += i386_ksyms_32.o -obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o -obj-$(CONFIG_X86_64) += mcount_64.o +obj-$(CONFIG_X86_64) += sys_x86_64.o mcount_64.o obj-$(CONFIG_X86_ESPFIX64) += espfix_64.o obj-$(CONFIG_SYSFS) += ksysfs.o obj-y += bootflag.o e820.o diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index 5f401262f12d..b6b2f0264af3 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S @@ -23,6 +23,7 @@ #include <asm/percpu.h> #include <asm/nops.h> #include <asm/bootparam.h> +#include <asm/export.h> /* Physical address */ #define pa(X) ((X) - __PAGE_OFFSET) @@ -673,6 +674,7 @@ ENTRY(empty_zero_page) .fill 4096,1,0 ENTRY(swapper_pg_dir) .fill 1024,4,0 +EXPORT_SYMBOL(empty_zero_page) /* * This starts the data section. diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index c98a559c346e..b4421cc191b0 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S @@ -21,6 +21,7 @@ #include <asm/percpu.h> #include <asm/nops.h> #include "../entry/calling.h" +#include <asm/export.h> #ifdef CONFIG_PARAVIRT #include <asm/asm-offsets.h> @@ -486,10 +487,12 @@ early_gdt_descr_base: ENTRY(phys_base) /* This must match the first entry in level2_kernel_pgt */ .quad 0x0000000000000000 +EXPORT_SYMBOL(phys_base) #include "../../x86/xen/xen-head.S" __PAGE_ALIGNED_BSS NEXT_PAGE(empty_zero_page) .skip PAGE_SIZE +EXPORT_SYMBOL(empty_zero_page) diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c deleted file mode 100644 index 1f9b878ef5ef..000000000000 --- a/arch/x86/kernel/i386_ksyms_32.c +++ /dev/null @@ -1,47 +0,0 @@ -#include <linux/export.h> -#include <linux/spinlock_types.h> - -#include <asm/checksum.h> -#include <asm/pgtable.h> -#include <asm/desc.h> -#include <asm/ftrace.h> - -#ifdef CONFIG_FUNCTION_TRACER -/* mcount is defined in assembly */ -EXPORT_SYMBOL(mcount); -#endif - -/* - * Note, this is a prototype to get at the symbol for - * the export, but dont use it from C code, it is used - * by assembly code and is not using C calling convention! - */ -#ifndef CONFIG_X86_CMPXCHG64 -extern void cmpxchg8b_emu(void); -EXPORT_SYMBOL(cmpxchg8b_emu); -#endif - -/* Networking helper routines. */ -EXPORT_SYMBOL(csum_partial_copy_generic); - -EXPORT_SYMBOL(__get_user_1); -EXPORT_SYMBOL(__get_user_2); -EXPORT_SYMBOL(__get_user_4); -EXPORT_SYMBOL(__get_user_8); - -EXPORT_SYMBOL(__put_user_1); -EXPORT_SYMBOL(__put_user_2); -EXPORT_SYMBOL(__put_user_4); -EXPORT_SYMBOL(__put_user_8); - -EXPORT_SYMBOL(strstr); - -EXPORT_SYMBOL(csum_partial); -EXPORT_SYMBOL(empty_zero_page); - -#ifdef CONFIG_PREEMPT -EXPORT_SYMBOL(___preempt_schedule); -EXPORT_SYMBOL(___preempt_schedule_notrace); -#endif - -EXPORT_SYMBOL(__sw_hweight32); diff --git a/arch/x86/kernel/mcount_64.S b/arch/x86/kernel/mcount_64.S index 61924222a9e1..efe73aacf966 100644 --- a/arch/x86/kernel/mcount_64.S +++ b/arch/x86/kernel/mcount_64.S @@ -7,6 +7,7 @@ #include <linux/linkage.h> #include <asm/ptrace.h> #include <asm/ftrace.h> +#include <asm/export.h> .code64 @@ -294,6 +295,7 @@ trace: jmp fgraph_trace END(function_hook) #endif /* CONFIG_DYNAMIC_FTRACE */ +EXPORT_SYMBOL(function_hook) #endif /* CONFIG_FUNCTION_TRACER */ #ifdef CONFIG_FUNCTION_GRAPH_TRACER diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c deleted file mode 100644 index b2cee3d19477..000000000000 --- a/arch/x86/kernel/x8664_ksyms_64.c +++ /dev/null @@ -1,85 +0,0 @@ -/* Exports for assembly files. - All C exports should go in the respective C files. */ - -#include <linux/export.h> -#include <linux/spinlock_types.h> -#include <linux/smp.h> - -#include <net/checksum.h> - -#include <asm/processor.h> -#include <asm/pgtable.h> -#include <asm/uaccess.h> -#include <asm/desc.h> -#include <asm/ftrace.h> - -#ifdef CONFIG_FUNCTION_TRACER -/* mcount and __fentry__ are defined in assembly */ -#ifdef CC_USING_FENTRY -EXPORT_SYMBOL(__fentry__); -#else -EXPORT_SYMBOL(mcount); -#endif -#endif - -EXPORT_SYMBOL(__get_user_1); -EXPORT_SYMBOL(__get_user_2); -EXPORT_SYMBOL(__get_user_4); -EXPORT_SYMBOL(__get_user_8); -EXPORT_SYMBOL(__put_user_1); -EXPORT_SYMBOL(__put_user_2); -EXPORT_SYMBOL(__put_user_4); -EXPORT_SYMBOL(__put_user_8); - -EXPORT_SYMBOL(copy_user_generic_string); -EXPORT_SYMBOL(copy_user_generic_unrolled); -EXPORT_SYMBOL(copy_user_enhanced_fast_string); -EXPORT_SYMBOL(__copy_user_nocache); -EXPORT_SYMBOL(_copy_from_user); -EXPORT_SYMBOL(_copy_to_user); - -EXPORT_SYMBOL_GPL(memcpy_mcsafe_unrolled); - -EXPORT_SYMBOL(copy_page); -EXPORT_SYMBOL(clear_page); - -EXPORT_SYMBOL(csum_partial); - -EXPORT_SYMBOL(__sw_hweight32); -EXPORT_SYMBOL(__sw_hweight64); - -/* - * Export string functions. We normally rely on gcc builtin for most of these, - * but gcc sometimes decides not to inline them. - */ -#undef memcpy -#undef memset -#undef memmove - -extern void *__memset(void *, int, __kernel_size_t); -extern void *__memcpy(void *, const void *, __kernel_size_t); -extern void *__memmove(void *, const void *, __kernel_size_t); -extern void *memset(void *, int, __kernel_size_t); -extern void *memcpy(void *, const void *, __kernel_size_t); -extern void *memmove(void *, const void *, __kernel_size_t); - -EXPORT_SYMBOL(__memset); -EXPORT_SYMBOL(__memcpy); -EXPORT_SYMBOL(__memmove); - -EXPORT_SYMBOL(memset); -EXPORT_SYMBOL(memcpy); -EXPORT_SYMBOL(memmove); - -#ifndef CONFIG_DEBUG_VIRTUAL -EXPORT_SYMBOL(phys_base); -#endif -EXPORT_SYMBOL(empty_zero_page); -#ifndef CONFIG_PARAVIRT -EXPORT_SYMBOL(native_load_gs_index); -#endif - -#ifdef CONFIG_PREEMPT -EXPORT_SYMBOL(___preempt_schedule); -EXPORT_SYMBOL(___preempt_schedule_notrace); -#endif diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S index c1e623209853..4d34bb548b41 100644 --- a/arch/x86/lib/checksum_32.S +++ b/arch/x86/lib/checksum_32.S @@ -28,6 +28,7 @@ #include <linux/linkage.h> #include <asm/errno.h> #include <asm/asm.h> +#include <asm/export.h> /* * computes a partial checksum, e.g. for TCP/UDP fragments @@ -251,6 +252,7 @@ ENTRY(csum_partial) ENDPROC(csum_partial) #endif +EXPORT_SYMBOL(csum_partial) /* unsigned int csum_partial_copy_generic (const char *src, char *dst, @@ -490,3 +492,4 @@ ENDPROC(csum_partial_copy_generic) #undef ROUND1 #endif +EXPORT_SYMBOL(csum_partial_copy_generic) diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S index 65be7cfaf947..5e2af3a88cf5 100644 --- a/arch/x86/lib/clear_page_64.S +++ b/arch/x86/lib/clear_page_64.S @@ -1,6 +1,7 @@ #include <linux/linkage.h> #include <asm/cpufeatures.h> #include <asm/alternative-asm.h> +#include <asm/export.h> /* * Most CPUs support enhanced REP MOVSB/STOSB instructions. It is @@ -23,6 +24,7 @@ ENTRY(clear_page) rep stosq ret ENDPROC(clear_page) +EXPORT_SYMBOL(clear_page) ENTRY(clear_page_orig) diff --git a/arch/x86/lib/cmpxchg8b_emu.S b/arch/x86/lib/cmpxchg8b_emu.S index ad5349778490..03a186fc06ea 100644 --- a/arch/x86/lib/cmpxchg8b_emu.S +++ b/arch/x86/lib/cmpxchg8b_emu.S @@ -7,6 +7,7 @@ */ #include <linux/linkage.h> +#include <asm/export.h> .text @@ -48,3 +49,4 @@ ENTRY(cmpxchg8b_emu) ret ENDPROC(cmpxchg8b_emu) +EXPORT_SYMBOL(cmpxchg8b_emu) diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S index 24ef1c2104d4..e8508156c99d 100644 --- a/arch/x86/lib/copy_page_64.S +++ b/arch/x86/lib/copy_page_64.S @@ -3,6 +3,7 @@ #include <linux/linkage.h> #include <asm/cpufeatures.h> #include <asm/alternative-asm.h> +#include <asm/export.h> /* * Some CPUs run faster using the string copy instructions (sane microcode). @@ -17,6 +18,7 @@ ENTRY(copy_page) rep movsq ret ENDPROC(copy_page) +EXPORT_SYMBOL(copy_page) ENTRY(copy_page_regs) subq $2*8, %rsp diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S index bf603ebbfd8e..d376e4b48f88 100644 --- a/arch/x86/lib/copy_user_64.S +++ b/arch/x86/lib/copy_user_64.S @@ -14,6 +14,7 @@ #include <asm/alternative-asm.h> #include <asm/asm.h> #include <asm/smap.h> +#include <asm/export.h> /* Standard copy_to_user with segment limit checking */ ENTRY(_copy_to_user) @@ -29,6 +30,7 @@ ENTRY(_copy_to_user) "jmp copy_user_enhanced_fast_string", \ X86_FEATURE_ERMS ENDPROC(_copy_to_user) +EXPORT_SYMBOL(_copy_to_user) /* Standard copy_from_user with segment limit checking */ ENTRY(_copy_from_user) @@ -44,6 +46,8 @@ ENTRY(_copy_from_user) "jmp copy_user_enhanced_fast_string", \ X86_FEATURE_ERMS ENDPROC(_copy_from_user) +EXPORT_SYMBOL(_copy_from_user) + .section .fixup,"ax" /* must zero dest */ @@ -155,6 +159,7 @@ ENTRY(copy_user_generic_unrolled) _ASM_EXTABLE(21b,50b) _ASM_EXTABLE(22b,50b) ENDPROC(copy_user_generic_unrolled) +EXPORT_SYMBOL(copy_user_generic_unrolled) /* Some CPUs run faster using the string copy instructions. * This is also a lot simpler. Use them when possible. @@ -200,6 +205,7 @@ ENTRY(copy_user_generic_string) _ASM_EXTABLE(1b,11b) _ASM_EXTABLE(3b,12b) ENDPROC(copy_user_generic_string) +EXPORT_SYMBOL(copy_user_generic_string) /* * Some CPUs are adding enhanced REP MOVSB/STOSB instructions. @@ -229,6 +235,7 @@ ENTRY(copy_user_enhanced_fast_string) _ASM_EXTABLE(1b,12b) ENDPROC(copy_user_enhanced_fast_string) +EXPORT_SYMBOL(copy_user_enhanced_fast_string) /* * copy_user_nocache - Uncached memory copy with exception handling @@ -379,3 +386,4 @@ ENTRY(__copy_user_nocache) _ASM_EXTABLE(40b,.L_fixup_1b_copy) _ASM_EXTABLE(41b,.L_fixup_1b_copy) ENDPROC(__copy_user_nocache) +EXPORT_SYMBOL(__copy_user_nocache) diff --git a/arch/x86/lib/csum-partial_64.c b/arch/x86/lib/csum-partial_64.c index 9a7fe6a70491..378e5d5bf9b1 100644 --- a/arch/x86/lib/csum-partial_64.c +++ b/arch/x86/lib/csum-partial_64.c @@ -135,6 +135,7 @@ __wsum csum_partial(const void *buff, int len, __wsum sum) return (__force __wsum)add32_with_carry(do_csum(buff, len), (__force u32)sum); } +EXPORT_SYMBOL(csum_partial); /* * this routine is used for miscellaneous IP-like checksums, mainly diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S index 0ef5128c2de8..37b62d412148 100644 --- a/arch/x86/lib/getuser.S +++ b/arch/x86/lib/getuser.S @@ -32,6 +32,7 @@ #include <asm/thread_info.h> #include <asm/asm.h> #include <asm/smap.h> +#include <asm/export.h> .text ENTRY(__get_user_1) @@ -44,6 +45,7 @@ ENTRY(__get_user_1) ASM_CLAC ret ENDPROC(__get_user_1) +EXPORT_SYMBOL(__get_user_1) ENTRY(__get_user_2) add $1,%_ASM_AX @@ -57,6 +59,7 @@ ENTRY(__get_user_2) ASM_CLAC ret ENDPROC(__get_user_2) +EXPORT_SYMBOL(__get_user_2) ENTRY(__get_user_4) add $3,%_ASM_AX @@ -70,6 +73,7 @@ ENTRY(__get_user_4) ASM_CLAC ret ENDPROC(__get_user_4) +EXPORT_SYMBOL(__get_user_4) ENTRY(__get_user_8) #ifdef CONFIG_X86_64 @@ -97,6 +101,7 @@ ENTRY(__get_user_8) ret #endif ENDPROC(__get_user_8) +EXPORT_SYMBOL(__get_user_8) bad_get_user: diff --git a/arch/x86/lib/hweight.S b/arch/x86/lib/hweight.S index 8a602a1e404a..23d893cbc200 100644 --- a/arch/x86/lib/hweight.S +++ b/arch/x86/lib/hweight.S @@ -1,4 +1,5 @@ #include <linux/linkage.h> +#include <asm/export.h> #include <asm/asm.h> @@ -32,6 +33,7 @@ ENTRY(__sw_hweight32) __ASM_SIZE(pop,) %__ASM_REG(dx) ret ENDPROC(__sw_hweight32) +EXPORT_SYMBOL(__sw_hweight32) ENTRY(__sw_hweight64) #ifdef CONFIG_X86_64 @@ -77,3 +79,4 @@ ENTRY(__sw_hweight64) ret #endif ENDPROC(__sw_hweight64) +EXPORT_SYMBOL(__sw_hweight64) diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S index 49e6ebac7e73..779782f58324 100644 --- a/arch/x86/lib/memcpy_64.S +++ b/arch/x86/lib/memcpy_64.S @@ -4,6 +4,7 @@ #include <asm/errno.h> #include <asm/cpufeatures.h> #include <asm/alternative-asm.h> +#include <asm/export.h> /* * We build a jump to memcpy_orig by default which gets NOPped out on @@ -40,6 +41,8 @@ ENTRY(memcpy) ret ENDPROC(memcpy) ENDPROC(__memcpy) +EXPORT_SYMBOL(memcpy) +EXPORT_SYMBOL(__memcpy) /* * memcpy_erms() - enhanced fast string memcpy. This is faster and @@ -274,6 +277,7 @@ ENTRY(memcpy_mcsafe_unrolled) xorq %rax, %rax ret ENDPROC(memcpy_mcsafe_unrolled) +EXPORT_SYMBOL_GPL(memcpy_mcsafe_unrolled) .section .fixup, "ax" /* Return -EFAULT for any failure */ diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S index 90ce01bee00c..15de86cd15b0 100644 --- a/arch/x86/lib/memmove_64.S +++ b/arch/x86/lib/memmove_64.S @@ -8,6 +8,7 @@ #include <linux/linkage.h> #include <asm/cpufeatures.h> #include <asm/alternative-asm.h> +#include <asm/export.h> #undef memmove @@ -207,3 +208,5 @@ ENTRY(__memmove) retq ENDPROC(__memmove) ENDPROC(memmove) +EXPORT_SYMBOL(__memmove) +EXPORT_SYMBOL(memmove) diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S index e1229ecd2a82..55b95db30a61 100644 --- a/arch/x86/lib/memset_64.S +++ b/arch/x86/lib/memset_64.S @@ -3,6 +3,7 @@ #include <linux/linkage.h> #include <asm/cpufeatures.h> #include <asm/alternative-asm.h> +#include <asm/export.h> .weak memset @@ -43,6 +44,8 @@ ENTRY(__memset) ret ENDPROC(memset) ENDPROC(__memset) +EXPORT_SYMBOL(memset) +EXPORT_SYMBOL(__memset) /* * ISO C memset - set a memory block to a byte value. This function uses diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S index c891ece81e5b..cd5d716d2897 100644 --- a/arch/x86/lib/putuser.S +++ b/arch/x86/lib/putuser.S @@ -15,6 +15,7 @@ #include <asm/errno.h> #include <asm/asm.h> #include <asm/smap.h> +#include <asm/export.h> /* @@ -43,6 +44,7 @@ ENTRY(__put_user_1) xor %eax,%eax EXIT ENDPROC(__put_user_1) +EXPORT_SYMBOL(__put_user_1) ENTRY(__put_user_2) ENTER @@ -55,6 +57,7 @@ ENTRY(__put_user_2) xor %eax,%eax EXIT ENDPROC(__put_user_2) +EXPORT_SYMBOL(__put_user_2) ENTRY(__put_user_4) ENTER @@ -67,6 +70,7 @@ ENTRY(__put_user_4) xor %eax,%eax EXIT ENDPROC(__put_user_4) +EXPORT_SYMBOL(__put_user_4) ENTRY(__put_user_8) ENTER @@ -82,6 +86,7 @@ ENTRY(__put_user_8) xor %eax,%eax EXIT ENDPROC(__put_user_8) +EXPORT_SYMBOL(__put_user_8) bad_put_user: movl $-EFAULT,%eax diff --git a/arch/x86/lib/strstr_32.c b/arch/x86/lib/strstr_32.c index 8e2d55f754bf..a03b1c750bfe 100644 --- a/arch/x86/lib/strstr_32.c +++ b/arch/x86/lib/strstr_32.c @@ -1,4 +1,5 @@ #include <linux/string.h> +#include <linux/export.h> char *strstr(const char *cs, const char *ct) { @@ -28,4 +29,4 @@ __asm__ __volatile__( : "dx", "di"); return __res; } - +EXPORT_SYMBOL(strstr); diff --git a/arch/x86/um/Makefile b/arch/x86/um/Makefile index 3ee2bb6b440b..e7e7055a8658 100644 --- a/arch/x86/um/Makefile +++ b/arch/x86/um/Makefile @@ -8,7 +8,7 @@ else BITS := 64 endif -obj-y = bug.o bugs_$(BITS).o delay.o fault.o ksyms.o ldt.o \ +obj-y = bug.o bugs_$(BITS).o delay.o fault.o ldt.o \ ptrace_$(BITS).o ptrace_user.o setjmp_$(BITS).o signal.o \ stub_$(BITS).o stub_segv.o \ sys_call_table_$(BITS).o sysrq_$(BITS).o tls_$(BITS).o \ diff --git a/arch/x86/um/checksum_32.S b/arch/x86/um/checksum_32.S index fa4b8b9841ff..b9933eb9274a 100644 --- a/arch/x86/um/checksum_32.S +++ b/arch/x86/um/checksum_32.S @@ -27,6 +27,7 @@ #include <asm/errno.h> #include <asm/asm.h> +#include <asm/export.h> /* * computes a partial checksum, e.g. for TCP/UDP fragments @@ -214,3 +215,4 @@ csum_partial: ret #endif + EXPORT_SYMBOL(csum_partial) diff --git a/arch/x86/um/ksyms.c b/arch/x86/um/ksyms.c deleted file mode 100644 index 2e8f43ec6214..000000000000 --- a/arch/x86/um/ksyms.c +++ /dev/null @@ -1,13 +0,0 @@ -#include <linux/module.h> -#include <asm/string.h> -#include <asm/checksum.h> - -#ifndef CONFIG_X86_32 -/*XXX: we need them because they would be exported by x86_64 */ -#if (__GNUC__ == 4 && __GNUC_MINOR__ >= 3) || __GNUC__ > 4 -EXPORT_SYMBOL(memcpy); -#else -EXPORT_SYMBOL(__memcpy); -#endif -#endif -EXPORT_SYMBOL(csum_partial); diff --git a/include/asm-generic/export.h b/include/asm-generic/export.h new file mode 100644 index 000000000000..43199a049da5 --- /dev/null +++ b/include/asm-generic/export.h @@ -0,0 +1,94 @@ +#ifndef __ASM_GENERIC_EXPORT_H +#define __ASM_GENERIC_EXPORT_H + +#ifndef KSYM_FUNC +#define KSYM_FUNC(x) x +#endif +#ifdef CONFIG_64BIT +#define __put .quad +#ifndef KSYM_ALIGN +#define KSYM_ALIGN 8 +#endif +#ifndef KCRC_ALIGN +#define KCRC_ALIGN 8 +#endif +#else +#define __put .long +#ifndef KSYM_ALIGN +#define KSYM_ALIGN 4 +#endif +#ifndef KCRC_ALIGN +#define KCRC_ALIGN 4 +#endif +#endif + +#ifdef CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX +#define KSYM(name) _##name +#else +#define KSYM(name) name +#endif + +/* + * note on .section use: @progbits vs %progbits nastiness doesn't matter, + * since we immediately emit into those sections anyway. + */ +.macro ___EXPORT_SYMBOL name,val,sec +#ifdef CONFIG_MODULES + .globl KSYM(__ksymtab_\name) + .section ___ksymtab\sec+\name,"a" + .balign KSYM_ALIGN +KSYM(__ksymtab_\name): + __put \val, KSYM(__kstrtab_\name) + .previous + .section __ksymtab_strings,"a" +KSYM(__kstrtab_\name): +#ifdef CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX + .asciz "_\name" +#else + .asciz "\name" +#endif + .previous +#ifdef CONFIG_MODVERSIONS + .section ___kcrctab\sec+\name,"a" + .balign KCRC_ALIGN +KSYM(__kcrctab_\name): + __put KSYM(__crc_\name) + .weak KSYM(__crc_\name) + .previous +#endif +#endif +.endm +#undef __put + +#if defined(__KSYM_DEPS__) + +#define __EXPORT_SYMBOL(sym, val, sec) === __KSYM_##sym === + +#elif defined(CONFIG_TRIM_UNUSED_KSYMS) + +#include <linux/kconfig.h> +#include <generated/autoksyms.h> + +#define __EXPORT_SYMBOL(sym, val, sec) \ + __cond_export_sym(sym, val, sec, config_enabled(__KSYM_##sym)) +#define __cond_export_sym(sym, val, sec, conf) \ + ___cond_export_sym(sym, val, sec, conf) +#define ___cond_export_sym(sym, val, sec, enabled) \ + __cond_export_sym_##enabled(sym, val, sec) +#define __cond_export_sym_1(sym, val, sec) ___EXPORT_SYMBOL sym, val, sec +#define __cond_export_sym_0(sym, val, sec) /* nothing */ + +#else +#define __EXPORT_SYMBOL(sym, val, sec) ___EXPORT_SYMBOL sym, val, sec +#endif + +#define EXPORT_SYMBOL(name) \ + __EXPORT_SYMBOL(name, KSYM_FUNC(KSYM(name)),) +#define EXPORT_SYMBOL_GPL(name) \ + __EXPORT_SYMBOL(name, KSYM_FUNC(KSYM(name)), _gpl) +#define EXPORT_DATA_SYMBOL(name) \ + __EXPORT_SYMBOL(name, KSYM(name),) +#define EXPORT_DATA_SYMBOL_GPL(name) \ + __EXPORT_SYMBOL(name, KSYM(name),_gpl) + +#endif diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 3e42bcdd014b..30747960bc54 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -196,9 +196,14 @@ *(.dtb.init.rodata) \ VMLINUX_SYMBOL(__dtb_end) = .; -/* .data section */ +/* + * .data section + * LD_DEAD_CODE_DATA_ELIMINATION option enables -fdata-sections generates + * .data.identifier which needs to be pulled in with .data, but don't want to + * pull in .data..stuff which has its own requirements. Same for bss. + */ #define DATA_DATA \ - *(.data) \ + *(.data .data.[0-9a-zA-Z_]*) \ *(.ref.data) \ *(.data..shared_aligned) /* percpu related */ \ MEM_KEEP(init.data) \ @@ -320,76 +325,76 @@ /* Kernel symbol table: Normal symbols */ \ __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__start___ksymtab) = .; \ - *(SORT(___ksymtab+*)) \ + KEEP(*(SORT(___ksymtab+*))) \ VMLINUX_SYMBOL(__stop___ksymtab) = .; \ } \ \ /* Kernel symbol table: GPL-only symbols */ \ __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \ - *(SORT(___ksymtab_gpl+*)) \ + KEEP(*(SORT(___ksymtab_gpl+*))) \ VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \ } \ \ /* Kernel symbol table: Normal unused symbols */ \ __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \ - *(SORT(___ksymtab_unused+*)) \ + KEEP(*(SORT(___ksymtab_unused+*))) \ VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \ } \ \ /* Kernel symbol table: GPL-only unused symbols */ \ __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \ - *(SORT(___ksymtab_unused_gpl+*)) \ + KEEP(*(SORT(___ksymtab_unused_gpl+*))) \ VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \ } \ \ /* Kernel symbol table: GPL-future-only symbols */ \ __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \ - *(SORT(___ksymtab_gpl_future+*)) \ + KEEP(*(SORT(___ksymtab_gpl_future+*))) \ VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \ } \ \ /* Kernel symbol table: Normal symbols */ \ __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__start___kcrctab) = .; \ - *(SORT(___kcrctab+*)) \ + KEEP(*(SORT(___kcrctab+*))) \ VMLINUX_SYMBOL(__stop___kcrctab) = .; \ } \ \ /* Kernel symbol table: GPL-only symbols */ \ __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \ - *(SORT(___kcrctab_gpl+*)) \ + KEEP(*(SORT(___kcrctab_gpl+*))) \ VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \ } \ \ /* Kernel symbol table: Normal unused symbols */ \ __kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__start___kcrctab_unused) = .; \ - *(SORT(___kcrctab_unused+*)) \ + KEEP(*(SORT(___kcrctab_unused+*))) \ VMLINUX_SYMBOL(__stop___kcrctab_unused) = .; \ } \ \ /* Kernel symbol table: GPL-only unused symbols */ \ __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \ - *(SORT(___kcrctab_unused_gpl+*)) \ + KEEP(*(SORT(___kcrctab_unused_gpl+*))) \ VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \ } \ \ /* Kernel symbol table: GPL-future-only symbols */ \ __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \ - *(SORT(___kcrctab_gpl_future+*)) \ + KEEP(*(SORT(___kcrctab_gpl_future+*))) \ VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \ } \ \ /* Kernel symbol table: strings */ \ __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \ - *(__ksymtab_strings) \ + KEEP(*(__ksymtab_strings)) \ } \ \ /* __*init sections */ \ @@ -424,12 +429,17 @@ #define SECURITY_INIT \ .security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__security_initcall_start) = .; \ - *(.security_initcall.init) \ + KEEP(*(.security_initcall.init)) \ VMLINUX_SYMBOL(__security_initcall_end) = .; \ } /* .text section. Map to function alignment to avoid address changes - * during second ld run in second ld pass when generating System.map */ + * during second ld run in second ld pass when generating System.map + * LD_DEAD_CODE_DATA_ELIMINATION option enables -ffunction-sections generates + * .text.identifier which needs to be pulled in with .text , but some + * architectures define .text.foo which is not intended to be pulled in here. + * Those enabling LD_DEAD_CODE_DATA_ELIMINATION must ensure they don't have + * conflicting section names, and must pull in .text.[0-9a-zA-Z_]* */ #define TEXT_TEXT \ ALIGN_FUNCTION(); \ *(.text.hot .text .text.fixup .text.unlikely) \ @@ -533,6 +543,7 @@ /* init and exit section handling */ #define INIT_DATA \ + KEEP(*(SORT(___kentry+*))) \ *(.init.data) \ MEM_DISCARD(init.data) \ KERNEL_CTORS() \ @@ -599,7 +610,7 @@ BSS_FIRST_SECTIONS \ *(.bss..page_aligned) \ *(.dynbss) \ - *(.bss) \ + *(.bss .bss.[0-9a-zA-Z_]*) \ *(COMMON) \ } @@ -682,12 +693,12 @@ #define INIT_CALLS_LEVEL(level) \ VMLINUX_SYMBOL(__initcall##level##_start) = .; \ - *(.initcall##level##.init) \ - *(.initcall##level##s.init) \ + KEEP(*(.initcall##level##.init)) \ + KEEP(*(.initcall##level##s.init)) \ #define INIT_CALLS \ VMLINUX_SYMBOL(__initcall_start) = .; \ - *(.initcallearly.init) \ + KEEP(*(.initcallearly.init)) \ INIT_CALLS_LEVEL(0) \ INIT_CALLS_LEVEL(1) \ INIT_CALLS_LEVEL(2) \ @@ -701,21 +712,21 @@ #define CON_INITCALL \ VMLINUX_SYMBOL(__con_initcall_start) = .; \ - *(.con_initcall.init) \ + KEEP(*(.con_initcall.init)) \ VMLINUX_SYMBOL(__con_initcall_end) = .; #define SECURITY_INITCALL \ VMLINUX_SYMBOL(__security_initcall_start) = .; \ - *(.security_initcall.init) \ + KEEP(*(.security_initcall.init)) \ VMLINUX_SYMBOL(__security_initcall_end) = .; #ifdef CONFIG_BLK_DEV_INITRD #define INIT_RAM_FS \ . = ALIGN(4); \ VMLINUX_SYMBOL(__initramfs_start) = .; \ - *(.init.ramfs) \ + KEEP(*(.init.ramfs)) \ . = ALIGN(8); \ - *(.init.ramfs.info) + KEEP(*(.init.ramfs.info)) #else #define INIT_RAM_FS #endif diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 668569844d37..f1bfa15b6f9b 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -182,6 +182,29 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); # define unreachable() do { } while (1) #endif +/* + * KENTRY - kernel entry point + * This can be used to annotate symbols (functions or data) that are used + * without their linker symbol being referenced explicitly. For example, + * interrupt vector handlers, or functions in the kernel image that are found + * programatically. + * + * Not required for symbols exported with EXPORT_SYMBOL, or initcalls. Those + * are handled in their own way (with KEEP() in linker scripts). + * + * KENTRY can be avoided if the symbols in question are marked as KEEP() in the + * linker script. For example an architecture could KEEP() its entire + * boot/exception vector code rather than annotate each function and data. + */ +#ifndef KENTRY +# define KENTRY(sym) \ + extern typeof(sym) sym; \ + static const unsigned long __kentry_##sym \ + __used \ + __attribute__((section("___kentry" "+" #sym ), used)) \ + = (unsigned long)&sym; +#endif + #ifndef RELOC_HIDE # define RELOC_HIDE(ptr, off) \ ({ unsigned long __ptr; \ diff --git a/include/linux/export.h b/include/linux/export.h index d7df4922da1d..2a0f61fbc731 100644 --- a/include/linux/export.h +++ b/include/linux/export.h @@ -1,5 +1,6 @@ #ifndef _LINUX_EXPORT_H #define _LINUX_EXPORT_H + /* * Export symbols from the kernel to modules. Forked from module.h * to reduce the amount of pointless cruft we feed to gcc when only @@ -42,27 +43,26 @@ extern struct module __this_module; #ifdef CONFIG_MODVERSIONS /* Mark the CRC weak since genksyms apparently decides not to * generate a checksums for some symbols */ -#define __CRC_SYMBOL(sym, sec) \ - extern __visible void *__crc_##sym __attribute__((weak)); \ - static const unsigned long __kcrctab_##sym \ - __used \ - __attribute__((section("___kcrctab" sec "+" #sym), unused)) \ +#define __CRC_SYMBOL(sym, sec) \ + extern __visible void *__crc_##sym __attribute__((weak)); \ + static const unsigned long __kcrctab_##sym \ + __used \ + __attribute__((section("___kcrctab" sec "+" #sym), used)) \ = (unsigned long) &__crc_##sym; #else #define __CRC_SYMBOL(sym, sec) #endif /* For every exported symbol, place a struct in the __ksymtab section */ -#define ___EXPORT_SYMBOL(sym, sec) \ - extern typeof(sym) sym; \ - __CRC_SYMBOL(sym, sec) \ - static const char __kstrtab_##sym[] \ - __attribute__((section("__ksymtab_strings"), aligned(1))) \ - = VMLINUX_SYMBOL_STR(sym); \ - extern const struct kernel_symbol __ksymtab_##sym; \ - __visible const struct kernel_symbol __ksymtab_##sym \ - __used \ - __attribute__((section("___ksymtab" sec "+" #sym), unused)) \ +#define ___EXPORT_SYMBOL(sym, sec) \ + extern typeof(sym) sym; \ + __CRC_SYMBOL(sym, sec) \ + static const char __kstrtab_##sym[] \ + __attribute__((section("__ksymtab_strings"), aligned(1))) \ + = VMLINUX_SYMBOL_STR(sym); \ + static const struct kernel_symbol __ksymtab_##sym \ + __used \ + __attribute__((section("___ksymtab" sec "+" #sym), used)) \ = { (unsigned long)&sym, __kstrtab_##sym } #if defined(__KSYM_DEPS__) diff --git a/include/linux/init.h b/include/linux/init.h index 5a3321a7909b..024a0b5b3ed0 100644 --- a/include/linux/init.h +++ b/include/linux/init.h @@ -139,24 +139,8 @@ extern bool initcall_debug; #ifndef __ASSEMBLY__ -#ifdef CONFIG_LTO -/* Work around a LTO gcc problem: when there is no reference to a variable - * in a module it will be moved to the end of the program. This causes - * reordering of initcalls which the kernel does not like. - * Add a dummy reference function to avoid this. The function is - * deleted by the linker. - */ -#define LTO_REFERENCE_INITCALL(x) \ - ; /* yes this is needed */ \ - static __used __exit void *reference_##x(void) \ - { \ - return &x; \ - } -#else -#define LTO_REFERENCE_INITCALL(x) -#endif - -/* initcalls are now grouped by functionality into separate +/* + * initcalls are now grouped by functionality into separate * subsections. Ordering inside the subsections is determined * by link order. * For backwards compatibility, initcall() puts the call in @@ -164,12 +148,16 @@ extern bool initcall_debug; * * The `id' arg to __define_initcall() is needed so that multiple initcalls * can point at the same handler without causing duplicate-symbol build errors. + * + * Initcalls are run by placing pointers in initcall sections that the + * kernel iterates at runtime. The linker can do dead code / data elimination + * and remove that completely, so the initcall sections have to be marked + * as KEEP() in the linker script. */ #define __define_initcall(fn, id) \ static initcall_t __initcall_##fn##id __used \ - __attribute__((__section__(".initcall" #id ".init"))) = fn; \ - LTO_REFERENCE_INITCALL(__initcall_##fn##id) + __attribute__((__section__(".initcall" #id ".init"))) = fn; /* * Early initcalls run before initializing SMP. @@ -205,15 +193,15 @@ extern bool initcall_debug; #define __initcall(fn) device_initcall(fn) -#define __exitcall(fn) \ +#define __exitcall(fn) \ static exitcall_t __exitcall_##fn __exit_call = fn -#define console_initcall(fn) \ - static initcall_t __initcall_##fn \ +#define console_initcall(fn) \ + static initcall_t __initcall_##fn \ __used __section(.con_initcall.init) = fn -#define security_initcall(fn) \ - static initcall_t __initcall_##fn \ +#define security_initcall(fn) \ + static initcall_t __initcall_##fn \ __used __section(.security_initcall.init) = fn struct obs_kernel_param { diff --git a/init/Makefile b/init/Makefile index 7bc47ee31c36..c4fb45525d08 100644 --- a/init/Makefile +++ b/init/Makefile @@ -2,6 +2,8 @@ # Makefile for the linux kernel. # +ccflags-y := -fno-function-sections -fno-data-sections + obj-y := main.o version.o mounts.o ifneq ($(CONFIG_BLK_DEV_INITRD),y) obj-y += noinitramfs.o diff --git a/scripts/Makefile.build b/scripts/Makefile.build index 11602e5efb3b..de46ab03f063 100644 --- a/scripts/Makefile.build +++ b/scripts/Makefile.build @@ -81,6 +81,7 @@ endif ifneq ($(strip $(lib-y) $(lib-m) $(lib-)),) lib-target := $(obj)/lib.a +obj-y += $(obj)/lib-ksyms.o endif ifneq ($(strip $(obj-y) $(obj-m) $(obj-) $(subdir-m) $(lib-target)),) @@ -358,12 +359,22 @@ $(sort $(subdir-obj-y)): $(subdir-ym) ; # Rule to compile a set of .o files into one .o file # ifdef builtin-target -quiet_cmd_link_o_target = LD $@ + +ifdef CONFIG_THIN_ARCHIVES + cmd_make_builtin = rm -f $@; $(AR) rcST$(KBUILD_ARFLAGS) + cmd_make_empty_builtin = rm -f $@; $(AR) rcST$(KBUILD_ARFLAGS) + quiet_cmd_link_o_target = AR $@ +else + cmd_make_builtin = $(LD) $(ld_flags) -r -o + cmd_make_empty_builtin = rm -f $@; $(AR) rcs$(KBUILD_ARFLAGS) + quiet_cmd_link_o_target = LD $@ +endif + # If the list of objects to link is empty, just create an empty built-in.o cmd_link_o_target = $(if $(strip $(obj-y)),\ - $(LD) $(ld_flags) -r -o $@ $(filter $(obj-y), $^) \ + $(cmd_make_builtin) $@ $(filter $(obj-y), $^) \ $(cmd_secanalysis),\ - rm -f $@; $(AR) rcs$(KBUILD_ARFLAGS) $@) + $(cmd_make_empty_builtin) $@) $(builtin-target): $(obj-y) FORCE $(call if_changed,link_o_target) @@ -389,12 +400,36 @@ $(modorder-target): $(subdir-ym) FORCE # ifdef lib-target quiet_cmd_link_l_target = AR $@ -cmd_link_l_target = rm -f $@; $(AR) rcs$(KBUILD_ARFLAGS) $@ $(lib-y) + +ifdef CONFIG_THIN_ARCHIVES + cmd_link_l_target = rm -f $@; $(AR) rcsT$(KBUILD_ARFLAGS) $@ $(lib-y) +else + cmd_link_l_target = rm -f $@; $(AR) rcs$(KBUILD_ARFLAGS) $@ $(lib-y) +endif $(lib-target): $(lib-y) FORCE $(call if_changed,link_l_target) targets += $(lib-target) + +dummy-object = $(obj)/.lib_exports.o +ksyms-lds = $(dot-target).lds +ifdef CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX +ref_prefix = EXTERN(_ +else +ref_prefix = EXTERN( +endif + +quiet_cmd_export_list = EXPORTS $@ +cmd_export_list = $(OBJDUMP) -h $< | \ + sed -ne '/___ksymtab/{s/.*+/$(ref_prefix)/;s/ .*/)/;p}' >$(ksyms-lds);\ + rm -f $(dummy-object);\ + $(AR) rcs$(KBUILD_ARFLAGS) $(dummy-object);\ + $(LD) $(ld_flags) -r -o $@ -T $(ksyms-lds) $(dummy-object);\ + rm $(dummy-object) $(ksyms-lds) + +$(obj)/lib-ksyms.o: $(lib-target) FORCE + $(call if_changed,export_list) endif # diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost index 1366a94b6c39..16923ba4b5b1 100644 --- a/scripts/Makefile.modpost +++ b/scripts/Makefile.modpost @@ -115,14 +115,18 @@ $(modules:.ko=.mod.o): %.mod.o: %.mod.c FORCE targets += $(modules:.ko=.mod.o) -# Step 6), final link of the modules +ARCH_POSTLINK := $(wildcard $(srctree)/arch/$(SRCARCH)/Makefile.postlink) + +# Step 6), final link of the modules with optional arch pass after final link quiet_cmd_ld_ko_o = LD [M] $@ - cmd_ld_ko_o = $(LD) -r $(LDFLAGS) \ - $(KBUILD_LDFLAGS_MODULE) $(LDFLAGS_MODULE) \ - -o $@ $(filter-out FORCE,$^) + cmd_ld_ko_o = \ + $(LD) -r $(LDFLAGS) \ + $(KBUILD_LDFLAGS_MODULE) $(LDFLAGS_MODULE) \ + -o $@ $(filter-out FORCE,$^) ; \ + $(if $(ARCH_POSTLINK), $(MAKE) -f $(ARCH_POSTLINK) $@, true) $(modules): %.ko :%.o %.mod.o FORCE - $(call if_changed,ld_ko_o) + +$(call if_changed,ld_ko_o) targets += $(modules) diff --git a/scripts/basic/fixdep.c b/scripts/basic/fixdep.c index 746ec1ece614..fff818b92acb 100644 --- a/scripts/basic/fixdep.c +++ b/scripts/basic/fixdep.c @@ -82,8 +82,7 @@ * to date before even starting the recursive build, so it's too late * at this point anyway. * - * The algorithm to grep for "CONFIG_..." is bit unusual, but should - * be fast ;-) We don't even try to really parse the header files, but + * We don't even try to really parse the header files, but * merely grep, i.e. if CONFIG_FOO is mentioned in a comment, it will * be picked up as well. It's not a problem with respect to * correctness, since that can only give too many dependencies, thus @@ -115,11 +114,6 @@ #include <ctype.h> #include <arpa/inet.h> -#define INT_CONF ntohl(0x434f4e46) -#define INT_ONFI ntohl(0x4f4e4649) -#define INT_NFIG ntohl(0x4e464947) -#define INT_FIG_ ntohl(0x4649475f) - int insert_extra_deps; char *target; char *depfile; @@ -241,37 +235,22 @@ static void use_config(const char *m, int slen) print_config(m, slen); } -static void parse_config_file(const char *map, size_t len) +static void parse_config_file(const char *p) { - const int *end = (const int *) (map + len); - /* start at +1, so that p can never be < map */ - const int *m = (const int *) map + 1; - const char *p, *q; - - for (; m < end; m++) { - if (*m == INT_CONF) { p = (char *) m ; goto conf; } - if (*m == INT_ONFI) { p = (char *) m-1; goto conf; } - if (*m == INT_NFIG) { p = (char *) m-2; goto conf; } - if (*m == INT_FIG_) { p = (char *) m-3; goto conf; } - continue; - conf: - if (p > map + len - 7) - continue; - if (memcmp(p, "CONFIG_", 7)) - continue; + const char *q, *r; + + while ((p = strstr(p, "CONFIG_"))) { p += 7; - for (q = p; q < map + len; q++) { - if (!(isalnum(*q) || *q == '_')) - goto found; - } - continue; - - found: - if (!memcmp(q - 7, "_MODULE", 7)) - q -= 7; - if (q - p < 0) - continue; - use_config(p, q - p); + q = p; + while (*q && (isalnum(*q) || *q == '_')) + q++; + if (memcmp(q - 7, "_MODULE", 7) == 0) + r = q - 7; + else + r = q; + if (r > p) + use_config(p, r - p); + p = q; } } @@ -291,7 +270,7 @@ static void do_config_file(const char *filename) { struct stat st; int fd; - void *map; + char *map; fd = open(filename, O_RDONLY); if (fd < 0) { @@ -308,18 +287,23 @@ static void do_config_file(const char *filename) close(fd); return; } - map = mmap(NULL, st.st_size, PROT_READ, MAP_PRIVATE, fd, 0); - if ((long) map == -1) { - perror("fixdep: mmap"); + map = malloc(st.st_size + 1); + if (!map) { + perror("fixdep: malloc"); close(fd); return; } + if (read(fd, map, st.st_size) != st.st_size) { + perror("fixdep: read"); + close(fd); + return; + } + map[st.st_size] = '\0'; + close(fd); - parse_config_file(map, st.st_size); - - munmap(map, st.st_size); + parse_config_file(map); - close(fd); + free(map); } /* @@ -446,22 +430,8 @@ static void print_deps(void) close(fd); } -static void traps(void) -{ - static char test[] __attribute__((aligned(sizeof(int)))) = "CONF"; - int *p = (int *)test; - - if (*p != INT_CONF) { - fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianness? %#x\n", - *p); - exit(2); - } -} - int main(int argc, char *argv[]) { - traps(); - if (argc == 5 && !strcmp(argv[1], "-e")) { insert_extra_deps = 1; argv++; diff --git a/scripts/gen_initramfs_list.sh b/scripts/gen_initramfs_list.sh index 17fa901418ae..0055b07b03b6 100755 --- a/scripts/gen_initramfs_list.sh +++ b/scripts/gen_initramfs_list.sh @@ -97,7 +97,10 @@ print_mtime() { } list_parse() { - [ ! -L "$1" ] && echo "$1 \\" || : + if [ -L "$1" ]; then + return + fi + echo "$1" | sed 's/:/\\:/g; s/$/ \\/' } # for each file print a line in following format diff --git a/scripts/genksyms/lex.l b/scripts/genksyms/lex.l index e583565f2011..5235aa507ba5 100644 --- a/scripts/genksyms/lex.l +++ b/scripts/genksyms/lex.l @@ -289,6 +289,23 @@ repeat: } break; + case ST_TYPEOF_1: + if (token == IDENT) + { + if (is_reserved_word(yytext, yyleng) + || find_symbol(yytext, SYM_TYPEDEF, 1)) + { + yyless(0); + unput('('); + lexstate = ST_NORMAL; + token = TYPEOF_KEYW; + break; + } + _APP("(", 1); + } + lexstate = ST_TYPEOF; + /* FALLTHRU */ + case ST_TYPEOF: switch (token) { @@ -313,24 +330,6 @@ repeat: } break; - case ST_TYPEOF_1: - if (token == IDENT) - { - if (is_reserved_word(yytext, yyleng) - || find_symbol(yytext, SYM_TYPEDEF, 1)) - { - yyless(0); - unput('('); - lexstate = ST_NORMAL; - token = TYPEOF_KEYW; - break; - } - _APP("(", 1); - } - APP; - lexstate = ST_TYPEOF; - goto repeat; - case ST_BRACKET: APP; switch (token) diff --git a/scripts/genksyms/lex.lex.c_shipped b/scripts/genksyms/lex.lex.c_shipped index f82740a69b85..985c5541aae4 100644 --- a/scripts/genksyms/lex.lex.c_shipped +++ b/scripts/genksyms/lex.lex.c_shipped @@ -2098,6 +2098,23 @@ repeat: } break; + case ST_TYPEOF_1: + if (token == IDENT) + { + if (is_reserved_word(yytext, yyleng) + || find_symbol(yytext, SYM_TYPEDEF, 1)) + { + yyless(0); + unput('('); + lexstate = ST_NORMAL; + token = TYPEOF_KEYW; + break; + } + _APP("(", 1); + } + lexstate = ST_TYPEOF; + /* FALLTHRU */ + case ST_TYPEOF: switch (token) { @@ -2122,24 +2139,6 @@ repeat: } break; - case ST_TYPEOF_1: - if (token == IDENT) - { - if (is_reserved_word(yytext, yyleng) - || find_symbol(yytext, SYM_TYPEDEF, 1)) - { - yyless(0); - unput('('); - lexstate = ST_NORMAL; - token = TYPEOF_KEYW; - break; - } - _APP("(", 1); - } - APP; - lexstate = ST_TYPEOF; - goto repeat; - case ST_BRACKET: APP; switch (token) diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh index 4f727eb5ec43..f742c65108b9 100755 --- a/scripts/link-vmlinux.sh +++ b/scripts/link-vmlinux.sh @@ -37,12 +37,40 @@ info() fi } +# Thin archive build here makes a final archive with +# symbol table and indexes from vmlinux objects, which can be +# used as input to linker. +# +# Traditional incremental style of link does not require this step +# +# built-in.o output file +# +archive_builtin() +{ + if [ -n "${CONFIG_THIN_ARCHIVES}" ]; then + info AR built-in.o + rm -f built-in.o; + ${AR} rcsT${KBUILD_ARFLAGS} built-in.o \ + ${KBUILD_VMLINUX_INIT} \ + ${KBUILD_VMLINUX_MAIN} + fi +} + # Link of vmlinux.o used for section mismatch analysis # ${1} output file modpost_link() { - ${LD} ${LDFLAGS} -r -o ${1} ${KBUILD_VMLINUX_INIT} \ - --start-group ${KBUILD_VMLINUX_MAIN} --end-group + local objects + + if [ -n "${CONFIG_THIN_ARCHIVES}" ]; then + objects="--whole-archive built-in.o" + else + objects="${KBUILD_VMLINUX_INIT} \ + --start-group \ + ${KBUILD_VMLINUX_MAIN} \ + --end-group" + fi + ${LD} ${LDFLAGS} -r -o ${1} ${objects} } # Link of vmlinux @@ -51,18 +79,36 @@ modpost_link() vmlinux_link() { local lds="${objtree}/${KBUILD_LDS}" + local objects if [ "${SRCARCH}" != "um" ]; then - ${LD} ${LDFLAGS} ${LDFLAGS_vmlinux} -o ${2} \ - -T ${lds} ${KBUILD_VMLINUX_INIT} \ - --start-group ${KBUILD_VMLINUX_MAIN} --end-group ${1} + if [ -n "${CONFIG_THIN_ARCHIVES}" ]; then + objects="--whole-archive built-in.o ${1}" + else + objects="${KBUILD_VMLINUX_INIT} \ + --start-group \ + ${KBUILD_VMLINUX_MAIN} \ + --end-group \ + ${1}" + fi + + ${LD} ${LDFLAGS} ${LDFLAGS_vmlinux} -o ${2} \ + -T ${lds} ${objects} else - ${CC} ${CFLAGS_vmlinux} -o ${2} \ - -Wl,-T,${lds} ${KBUILD_VMLINUX_INIT} \ - -Wl,--start-group \ - ${KBUILD_VMLINUX_MAIN} \ - -Wl,--end-group \ - -lutil -lrt -lpthread ${1} + if [ -n "${CONFIG_THIN_ARCHIVES}" ]; then + objects="-Wl,--whole-archive built-in.o ${1}" + else + objects="${KBUILD_VMLINUX_INIT} \ + -Wl,--start-group \ + ${KBUILD_VMLINUX_MAIN} \ + -Wl,--end-group \ + ${1}" + fi + + ${CC} ${CFLAGS_vmlinux} -o ${2} \ + -Wl,-T,${lds} \ + ${objects} \ + -lutil -lrt -lpthread rm -f linux fi } @@ -119,6 +165,7 @@ cleanup() rm -f .tmp_kallsyms* rm -f .tmp_version rm -f .tmp_vmlinux* + rm -f built-in.o rm -f System.map rm -f vmlinux rm -f vmlinux.o @@ -162,6 +209,8 @@ case "${KCONFIG_CONFIG}" in . "./${KCONFIG_CONFIG}" esac +archive_builtin + #link vmlinux.o info LD vmlinux.o modpost_link vmlinux.o |