diff options
author | Andy Lutomirski <luto@amacapital.net> | 2014-03-20 15:01:21 -0700 |
---|---|---|
committer | H. Peter Anvin <hpa@linux.intel.com> | 2014-03-20 15:19:14 -0700 |
commit | b67e612cef1e5964efc6fa99fb7ad3d31c4db01a (patch) | |
tree | c037de472a7e20c143dc3ca05e3f9327dc917d09 /arch/x86/vdso/vdso32-setup.c | |
parent | 4e40112c4ff6a577dd06d92b2a54cdf06265bf74 (diff) | |
download | linux-b67e612cef1e5964efc6fa99fb7ad3d31c4db01a.tar.gz linux-b67e612cef1e5964efc6fa99fb7ad3d31c4db01a.tar.bz2 linux-b67e612cef1e5964efc6fa99fb7ad3d31c4db01a.zip |
x86: Load the 32-bit vdso in place, just like the 64-bit vdsos
This replaces a decent amount of incomprehensible and buggy code
with much more straightforward code. It also brings the 32-bit vdso
more in line with the 64-bit vdsos, so maybe someday they can share
even more code.
This wastes a small amount of kernel .data and .text space, but it
avoids a couple of allocations on startup, so it should be more or
less a wash memory-wise.
Signed-off-by: Andy Lutomirski <luto@amacapital.net>
Cc: Stefani Seibold <stefani@seibold.net>
Link: http://lkml.kernel.org/r/b8093933fad09ce181edb08a61dcd5d2592e9814.1395352498.git.luto@amacapital.net
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch/x86/vdso/vdso32-setup.c')
-rw-r--r-- | arch/x86/vdso/vdso32-setup.c | 50 |
1 files changed, 29 insertions, 21 deletions
diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c index 5b4aaefb6b42..b45528ee8e19 100644 --- a/arch/x86/vdso/vdso32-setup.c +++ b/arch/x86/vdso/vdso32-setup.c @@ -29,6 +29,7 @@ #include <asm/fixmap.h> #include <asm/hpet.h> #include <asm/vvar.h> +#include "vdso_image.h" #ifdef CONFIG_COMPAT_VDSO #define VDSO_DEFAULT 0 @@ -41,6 +42,12 @@ #define arch_setup_additional_pages syscall32_setup_pages #endif +DECLARE_VDSO_IMAGE(vdso32_int80); +#ifdef CONFIG_COMPAT +DECLARE_VDSO_IMAGE(vdso32_syscall); +#endif +DECLARE_VDSO_IMAGE(vdso32_sysenter); + /* * Should the kernel map a VDSO page into processes and pass its * address down to glibc upon exec()? @@ -71,7 +78,7 @@ EXPORT_SYMBOL_GPL(vdso_enabled); #endif static struct page **vdso32_pages; -static unsigned int vdso32_size; +static unsigned vdso32_size; #ifdef CONFIG_X86_64 @@ -117,31 +124,32 @@ void enable_sep_cpu(void) int __init sysenter_setup(void) { - void *vdso_pages; - const void *vdso; - size_t vdso_len; - unsigned int i; + char *vdso32_start, *vdso32_end; + int npages, i; +#ifdef CONFIG_COMPAT if (vdso32_syscall()) { - vdso = &vdso32_syscall_start; - vdso_len = &vdso32_syscall_end - &vdso32_syscall_start; - } else if (vdso32_sysenter()){ - vdso = &vdso32_sysenter_start; - vdso_len = &vdso32_sysenter_end - &vdso32_sysenter_start; + vdso32_start = vdso32_syscall_start; + vdso32_end = vdso32_syscall_end; + vdso32_pages = vdso32_syscall_pages; + } else +#endif + if (vdso32_sysenter()) { + vdso32_start = vdso32_sysenter_start; + vdso32_end = vdso32_sysenter_end; + vdso32_pages = vdso32_sysenter_pages; } else { - vdso = &vdso32_int80_start; - vdso_len = &vdso32_int80_end - &vdso32_int80_start; + vdso32_start = vdso32_int80_start; + vdso32_end = vdso32_int80_end; + vdso32_pages = vdso32_int80_pages; } - vdso32_size = (vdso_len + PAGE_SIZE - 1) / PAGE_SIZE; - vdso32_pages = kmalloc(sizeof(*vdso32_pages) * vdso32_size, GFP_ATOMIC); - vdso_pages = kmalloc(VDSO_OFFSET(vdso32_size), GFP_ATOMIC); - - for(i = 0; i != vdso32_size; ++i) - vdso32_pages[i] = virt_to_page(vdso_pages + VDSO_OFFSET(i)); + npages = ((vdso32_end - vdso32_start) + PAGE_SIZE - 1) / PAGE_SIZE; + vdso32_size = npages << PAGE_SHIFT; + for (i = 0; i < npages; i++) + vdso32_pages[i] = virt_to_page(vdso32_start + i*PAGE_SIZE); - memcpy(vdso_pages, vdso, vdso_len); - patch_vdso32(vdso_pages, vdso_len); + patch_vdso32(vdso32_start, vdso32_size); return 0; } @@ -177,7 +185,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) */ ret = install_special_mapping(mm, addr, - VDSO_OFFSET(vdso32_size), + vdso32_size, VM_READ|VM_EXEC| VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, vdso32_pages); |