summaryrefslogtreecommitdiffstats
path: root/arch/sparc/vdso
diff options
context:
space:
mode:
authorNagarathnam Muthusamy <nagarathnam.muthusamy@oracle.com>2017-09-21 11:05:31 -0400
committerDavid S. Miller <davem@davemloft.net>2017-11-15 14:21:03 +0900
commit9a08862a5d2e266ecea1865547463da2745fc687 (patch)
treec651ef6b7c2d7dc75e0ec73fcc6d2e2f15f72a9e /arch/sparc/vdso
parent23198ddffb6cddb5d5824230af4dd4b46e4046a4 (diff)
downloadlinux-stable-9a08862a5d2e266ecea1865547463da2745fc687.tar.gz
linux-stable-9a08862a5d2e266ecea1865547463da2745fc687.tar.bz2
linux-stable-9a08862a5d2e266ecea1865547463da2745fc687.zip
vDSO for sparc
Following patch is based on work done by Nick Alcock on 64-bit vDSO for sparc in Oracle linux. I have extended it to include support for 32-bit vDSO for sparc on 64-bit kernel. vDSO for sparc is based on the X86 implementation. This patch provides vDSO support for both 64-bit and 32-bit programs on 64-bit kernel. vDSO will be disabled on 32-bit linux kernel on sparc. *) vclock_gettime.c contains all the vdso functions. Since data page is mapped before the vdso code page, the pointer to data page is got by subracting offset from an address in the vdso code page. The return address stored in %i7 is used for this purpose. *) During compilation, both 32-bit and 64-bit vdso images are compiled and are converted into raw bytes by vdso2c program to be ready for mapping into the process. 32-bit images are compiled only if CONFIG_COMPAT is enabled. vdso2c generates two files vdso-image-64.c and vdso-image-32.c which contains the respective vDSO image in C structure. *) During vdso initialization, required number of vdso pages are allocated and raw bytes are copied into the pages. *) During every exec, these pages are mapped into the process through arch_setup_additional_pages and the location of mapping is passed on to the process through aux vector AT_SYSINFO_EHDR which is used by glibc. *) A new update_vsyscall routine for sparc is added to keep the data page in vdso updated. *) As vDSO cannot contain dynamically relocatable references, a new version of cpu_relax is added for the use of vDSO. This change also requires a putback to glibc to use vDSO. For testing, programs planning to try vDSO can be compiled against the generated vdso(64/32).so in the source. Testing: ======== [root@localhost ~]# cat vdso_test.c int main() { struct timespec tv_start, tv_end; struct timeval tv_tmp; int i; int count = 1 * 1000 * 10000; long long diff; clock_gettime(0, &tv_start); for (i = 0; i < count; i++) gettimeofday(&tv_tmp, NULL); clock_gettime(0, &tv_end); diff = (long long)(tv_end.tv_sec - tv_start.tv_sec)*(1*1000*1000*1000); diff += (tv_end.tv_nsec - tv_start.tv_nsec); printf("Start sec: %d\n", tv_start.tv_sec); printf("End sec : %d\n", tv_end.tv_sec); printf("%d cycles in %lld ns = %f ns/cycle\n", count, diff, (double)diff / (double)count); return 0; } [root@localhost ~]# cc vdso_test.c -o t32_without_fix -m32 -lrt [root@localhost ~]# ./t32_without_fix Start sec: 1502396130 End sec : 1502396140 10000000 cycles in 9565148528 ns = 956.514853 ns/cycle [root@localhost ~]# cc vdso_test.c -o t32_with_fix -m32 ./vdso32.so.dbg [root@localhost ~]# ./t32_with_fix Start sec: 1502396168 End sec : 1502396169 10000000 cycles in 798141262 ns = 79.814126 ns/cycle [root@localhost ~]# cc vdso_test.c -o t64_without_fix -m64 -lrt [root@localhost ~]# ./t64_without_fix Start sec: 1502396208 End sec : 1502396218 10000000 cycles in 9846091800 ns = 984.609180 ns/cycle [root@localhost ~]# cc vdso_test.c -o t64_with_fix -m64 ./vdso64.so.dbg [root@localhost ~]# ./t64_with_fix Start sec: 1502396257 End sec : 1502396257 10000000 cycles in 380984048 ns = 38.098405 ns/cycle V1 to V2 Changes: ================= Added hot patching code to switch the read stick instruction to read tick instruction based on the hardware. V2 to V3 Changes: ================= Merged latest changes from sparc-next and moved the initialization of clocksource_tick.archdata.vclock_mode to time_init_early. Disabled queued spinlock and rwlock configuration when simulating 32-bit config to compile 32-bit VDSO. V3 to V4 Changes: ================= Hardcoded the page size as 8192 in linker script for both 64-bit and 32-bit binaries. Removed unused variables in vdso2c.h. Added -mv8plus flag to Makefile to prevent the generation of relocation entries for __lshrdi3 in 32-bit vdso binary. Signed-off-by: Nick Alcock <nick.alcock@oracle.com> Signed-off-by: Nagarathnam Muthusamy <nagarathnam.muthusamy@oracle.com> Reviewed-by: Shannon Nelson <shannon.nelson@oracle.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc/vdso')
-rw-r--r--arch/sparc/vdso/.gitignore3
-rw-r--r--arch/sparc/vdso/Makefile149
-rw-r--r--arch/sparc/vdso/vclock_gettime.c264
-rw-r--r--arch/sparc/vdso/vdso-layout.lds.S104
-rw-r--r--arch/sparc/vdso/vdso-note.S12
-rw-r--r--arch/sparc/vdso/vdso.lds.S25
-rw-r--r--arch/sparc/vdso/vdso2c.c234
-rw-r--r--arch/sparc/vdso/vdso2c.h143
-rw-r--r--arch/sparc/vdso/vdso32/.gitignore1
-rw-r--r--arch/sparc/vdso/vdso32/vclock_gettime.c26
-rw-r--r--arch/sparc/vdso/vdso32/vdso-note.S12
-rw-r--r--arch/sparc/vdso/vdso32/vdso32.lds.S24
-rw-r--r--arch/sparc/vdso/vma.c268
13 files changed, 1265 insertions, 0 deletions
diff --git a/arch/sparc/vdso/.gitignore b/arch/sparc/vdso/.gitignore
new file mode 100644
index 000000000000..ef925b998222
--- /dev/null
+++ b/arch/sparc/vdso/.gitignore
@@ -0,0 +1,3 @@
+vdso.lds
+vdso-image-*.c
+vdso2c
diff --git a/arch/sparc/vdso/Makefile b/arch/sparc/vdso/Makefile
new file mode 100644
index 000000000000..a6615d8864f7
--- /dev/null
+++ b/arch/sparc/vdso/Makefile
@@ -0,0 +1,149 @@
+#
+# Building vDSO images for sparc.
+#
+
+KBUILD_CFLAGS += $(DISABLE_LTO)
+
+VDSO64-$(CONFIG_SPARC64) := y
+VDSOCOMPAT-$(CONFIG_COMPAT) := y
+
+# files to link into the vdso
+vobjs-y := vdso-note.o vclock_gettime.o
+
+# files to link into kernel
+obj-y += vma.o
+
+# vDSO images to build
+vdso_img-$(VDSO64-y) += 64
+vdso_img-$(VDSOCOMPAT-y) += 32
+
+vobjs := $(foreach F,$(vobjs-y),$(obj)/$F)
+
+$(obj)/vdso.o: $(obj)/vdso.so
+
+targets += vdso.lds $(vobjs-y)
+
+# Build the vDSO image C files and link them in.
+vdso_img_objs := $(vdso_img-y:%=vdso-image-%.o)
+vdso_img_cfiles := $(vdso_img-y:%=vdso-image-%.c)
+vdso_img_sodbg := $(vdso_img-y:%=vdso%.so.dbg)
+obj-y += $(vdso_img_objs)
+targets += $(vdso_img_cfiles)
+targets += $(vdso_img_sodbg)
+.SECONDARY: $(vdso_img-y:%=$(obj)/vdso-image-%.c) \
+ $(vdso_img-y:%=$(obj)/vdso%.so)
+
+export CPPFLAGS_vdso.lds += -P -C
+
+VDSO_LDFLAGS_vdso.lds = -m64 -Wl,-soname=linux-vdso.so.1 \
+ -Wl,--no-undefined \
+ -Wl,-z,max-page-size=8192 -Wl,-z,common-page-size=8192 \
+ $(DISABLE_LTO)
+
+$(obj)/vdso64.so.dbg: $(src)/vdso.lds $(vobjs) FORCE
+ $(call if_changed,vdso)
+
+HOST_EXTRACFLAGS += -I$(srctree)/tools/include
+hostprogs-y += vdso2c
+
+quiet_cmd_vdso2c = VDSO2C $@
+define cmd_vdso2c
+ $(obj)/vdso2c $< $(<:%.dbg=%) $@
+endef
+
+$(obj)/vdso-image-%.c: $(obj)/vdso%.so.dbg $(obj)/vdso%.so $(obj)/vdso2c FORCE
+ $(call if_changed,vdso2c)
+
+#
+# Don't omit frame pointers for ease of userspace debugging, but do
+# optimize sibling calls.
+#
+CFL := $(PROFILING) -mcmodel=medlow -fPIC -O2 -fasynchronous-unwind-tables \
+ -m64 -ffixed-g2 -ffixed-g3 -fcall-used-g4 -fcall-used-g5 -ffixed-g6 \
+ -ffixed-g7 $(filter -g%,$(KBUILD_CFLAGS)) \
+ $(call cc-option, -fno-stack-protector) -fno-omit-frame-pointer \
+ -foptimize-sibling-calls -DBUILD_VDSO
+
+$(vobjs): KBUILD_CFLAGS += $(CFL)
+
+#
+# vDSO code runs in userspace and -pg doesn't help with profiling anyway.
+#
+CFLAGS_REMOVE_vdso-note.o = -pg
+CFLAGS_REMOVE_vclock_gettime.o = -pg
+
+$(obj)/%.so: OBJCOPYFLAGS := -S
+$(obj)/%.so: $(obj)/%.so.dbg
+ $(call if_changed,objcopy)
+
+CPPFLAGS_vdso32.lds = $(CPPFLAGS_vdso.lds)
+VDSO_LDFLAGS_vdso32.lds = -m32 -Wl,-m,elf32_sparc,-soname=linux-gate.so.1
+
+#This makes sure the $(obj) subdirectory exists even though vdso32/
+#is not a kbuild sub-make subdirectory
+override obj-dirs = $(dir $(obj)) $(obj)/vdso32/
+
+targets += vdso32/vdso32.lds
+targets += vdso32/vdso-note.o
+targets += vdso32/vclock_gettime.o
+
+KBUILD_AFLAGS_32 := $(filter-out -m64,$(KBUILD_AFLAGS)) -DBUILD_VDSO
+$(obj)/vdso32.so.dbg: KBUILD_AFLAGS = $(KBUILD_AFLAGS_32)
+$(obj)/vdso32.so.dbg: asflags-$(CONFIG_SPARC64) += -m32
+
+KBUILD_CFLAGS_32 := $(filter-out -m64,$(KBUILD_CFLAGS))
+KBUILD_CFLAGS_32 := $(filter-out -mcmodel=medlow,$(KBUILD_CFLAGS_32))
+KBUILD_CFLAGS_32 := $(filter-out -fno-pic,$(KBUILD_CFLAGS_32))
+KBUILD_CFLAGS_32 := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS_32))
+KBUILD_CFLAGS_32 += -m32 -msoft-float -fpic -mno-app-regs -ffixed-g7
+KBUILD_CFLAGS_32 += $(call cc-option, -fno-stack-protector)
+KBUILD_CFLAGS_32 += $(call cc-option, -foptimize-sibling-calls)
+KBUILD_CFLAGS_32 += -fno-omit-frame-pointer
+KBUILD_CFLAGS_32 += -DDISABLE_BRANCH_PROFILING
+KBUILD_CFLAGS_32 += -mv8plus
+$(obj)/vdso32.so.dbg: KBUILD_CFLAGS = $(KBUILD_CFLAGS_32)
+
+$(obj)/vdso32.so.dbg: FORCE \
+ $(obj)/vdso32/vdso32.lds \
+ $(obj)/vdso32/vclock_gettime.o \
+ $(obj)/vdso32/vdso-note.o
+ $(call if_changed,vdso)
+
+#
+# The DSO images are built using a special linker script.
+#
+quiet_cmd_vdso = VDSO $@
+ cmd_vdso = $(CC) -nostdlib -o $@ \
+ $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \
+ -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^)
+
+VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
+ $(call cc-ldoption, -Wl$(comma)--build-id) -Wl,-Bsymbolic
+GCOV_PROFILE := n
+
+#
+# Install the unstripped copies of vdso*.so. If our toolchain supports
+# build-id, install .build-id links as well.
+#
+quiet_cmd_vdso_install = INSTALL $(@:install_%=%)
+define cmd_vdso_install
+ cp $< "$(MODLIB)/vdso/$(@:install_%=%)"; \
+ if readelf -n $< |grep -q 'Build ID'; then \
+ buildid=`readelf -n $< |grep 'Build ID' |sed -e 's/^.*Build ID: \(.*\)$$/\1/'`; \
+ first=`echo $$buildid | cut -b-2`; \
+ last=`echo $$buildid | cut -b3-`; \
+ mkdir -p "$(MODLIB)/vdso/.build-id/$$first"; \
+ ln -sf "../../$(@:install_%=%)" "$(MODLIB)/vdso/.build-id/$$first/$$last.debug"; \
+ fi
+endef
+
+vdso_img_insttargets := $(vdso_img_sodbg:%.dbg=install_%)
+
+$(MODLIB)/vdso: FORCE
+ @mkdir -p $(MODLIB)/vdso
+
+$(vdso_img_insttargets): install_%: $(obj)/%.dbg $(MODLIB)/vdso FORCE
+ $(call cmd,vdso_install)
+
+PHONY += vdso_install $(vdso_img_insttargets)
+vdso_install: $(vdso_img_insttargets) FORCE
diff --git a/arch/sparc/vdso/vclock_gettime.c b/arch/sparc/vdso/vclock_gettime.c
new file mode 100644
index 000000000000..3feb3d960ca5
--- /dev/null
+++ b/arch/sparc/vdso/vclock_gettime.c
@@ -0,0 +1,264 @@
+/*
+ * Copyright 2006 Andi Kleen, SUSE Labs.
+ * Subject to the GNU Public License, v.2
+ *
+ * Fast user context implementation of clock_gettime, gettimeofday, and time.
+ *
+ * The code should have no internal unresolved relocations.
+ * Check with readelf after changing.
+ * Also alternative() doesn't work.
+ */
+/*
+ * Copyright (c) 2017 Oracle and/or its affiliates. All rights reserved.
+ */
+
+/* Disable profiling for userspace code: */
+#ifndef DISABLE_BRANCH_PROFILING
+#define DISABLE_BRANCH_PROFILING
+#endif
+
+#include <linux/kernel.h>
+#include <linux/time.h>
+#include <linux/string.h>
+#include <asm/io.h>
+#include <asm/unistd.h>
+#include <asm/timex.h>
+#include <asm/clocksource.h>
+#include <asm/vvar.h>
+
+#undef TICK_PRIV_BIT
+#ifdef CONFIG_SPARC64
+#define TICK_PRIV_BIT (1UL << 63)
+#else
+#define TICK_PRIV_BIT (1ULL << 63)
+#endif
+
+#define SYSCALL_STRING \
+ "ta 0x6d;" \
+ "sub %%g0, %%o0, %%o0;" \
+
+#define SYSCALL_CLOBBERS \
+ "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", \
+ "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15", \
+ "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23", \
+ "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31", \
+ "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46", \
+ "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62", \
+ "cc", "memory"
+
+/*
+ * Compute the vvar page's address in the process address space, and return it
+ * as a pointer to the vvar_data.
+ */
+static notrace noinline struct vvar_data *
+get_vvar_data(void)
+{
+ unsigned long ret;
+
+ /*
+ * vdso data page is the first vDSO page so grab the return address
+ * and move up a page to get to the data page.
+ */
+ ret = (unsigned long)__builtin_return_address(0);
+ ret &= ~(8192 - 1);
+ ret -= 8192;
+
+ return (struct vvar_data *) ret;
+}
+
+static notrace long
+vdso_fallback_gettime(long clock, struct timespec *ts)
+{
+ register long num __asm__("g1") = __NR_clock_gettime;
+ register long o0 __asm__("o0") = clock;
+ register long o1 __asm__("o1") = (long) ts;
+
+ __asm__ __volatile__(SYSCALL_STRING : "=r" (o0) : "r" (num),
+ "0" (o0), "r" (o1) : SYSCALL_CLOBBERS);
+ return o0;
+}
+
+static notrace __always_inline long
+vdso_fallback_gettimeofday(struct timeval *tv, struct timezone *tz)
+{
+ register long num __asm__("g1") = __NR_gettimeofday;
+ register long o0 __asm__("o0") = (long) tv;
+ register long o1 __asm__("o1") = (long) tz;
+
+ __asm__ __volatile__(SYSCALL_STRING : "=r" (o0) : "r" (num),
+ "0" (o0), "r" (o1) : SYSCALL_CLOBBERS);
+ return o0;
+}
+
+#ifdef CONFIG_SPARC64
+static notrace noinline u64
+vread_tick(void) {
+ u64 ret;
+
+ __asm__ __volatile__("rd %%asr24, %0 \n"
+ ".section .vread_tick_patch, \"ax\" \n"
+ "rd %%tick, %0 \n"
+ ".previous \n"
+ : "=&r" (ret));
+ return ret & ~TICK_PRIV_BIT;
+}
+#else
+static notrace noinline u64
+vread_tick(void)
+{
+ unsigned int lo, hi;
+
+ __asm__ __volatile__("rd %%asr24, %%g1\n\t"
+ "srlx %%g1, 32, %1\n\t"
+ "srl %%g1, 0, %0\n"
+ ".section .vread_tick_patch, \"ax\" \n"
+ "rd %%tick, %%g1\n"
+ ".previous \n"
+ : "=&r" (lo), "=&r" (hi)
+ :
+ : "g1");
+ return lo | ((u64)hi << 32);
+}
+#endif
+
+static notrace inline u64
+vgetsns(struct vvar_data *vvar)
+{
+ u64 v;
+ u64 cycles;
+
+ cycles = vread_tick();
+ v = (cycles - vvar->clock.cycle_last) & vvar->clock.mask;
+ return v * vvar->clock.mult;
+}
+
+static notrace noinline int
+do_realtime(struct vvar_data *vvar, struct timespec *ts)
+{
+ unsigned long seq;
+ u64 ns;
+
+ ts->tv_nsec = 0;
+ do {
+ seq = vvar_read_begin(vvar);
+ ts->tv_sec = vvar->wall_time_sec;
+ ns = vvar->wall_time_snsec;
+ ns += vgetsns(vvar);
+ ns >>= vvar->clock.shift;
+ } while (unlikely(vvar_read_retry(vvar, seq)));
+
+ timespec_add_ns(ts, ns);
+
+ return 0;
+}
+
+static notrace noinline int
+do_monotonic(struct vvar_data *vvar, struct timespec *ts)
+{
+ unsigned long seq;
+ u64 ns;
+
+ ts->tv_nsec = 0;
+ do {
+ seq = vvar_read_begin(vvar);
+ ts->tv_sec = vvar->monotonic_time_sec;
+ ns = vvar->monotonic_time_snsec;
+ ns += vgetsns(vvar);
+ ns >>= vvar->clock.shift;
+ } while (unlikely(vvar_read_retry(vvar, seq)));
+
+ timespec_add_ns(ts, ns);
+
+ return 0;
+}
+
+static notrace noinline int
+do_realtime_coarse(struct vvar_data *vvar, struct timespec *ts)
+{
+ unsigned long seq;
+
+ do {
+ seq = vvar_read_begin(vvar);
+ ts->tv_sec = vvar->wall_time_coarse_sec;
+ ts->tv_nsec = vvar->wall_time_coarse_nsec;
+ } while (unlikely(vvar_read_retry(vvar, seq)));
+ return 0;
+}
+
+static notrace noinline int
+do_monotonic_coarse(struct vvar_data *vvar, struct timespec *ts)
+{
+ unsigned long seq;
+
+ do {
+ seq = vvar_read_begin(vvar);
+ ts->tv_sec = vvar->monotonic_time_coarse_sec;
+ ts->tv_nsec = vvar->monotonic_time_coarse_nsec;
+ } while (unlikely(vvar_read_retry(vvar, seq)));
+
+ return 0;
+}
+
+notrace int
+__vdso_clock_gettime(clockid_t clock, struct timespec *ts)
+{
+ struct vvar_data *vvd = get_vvar_data();
+
+ switch (clock) {
+ case CLOCK_REALTIME:
+ if (unlikely(vvd->vclock_mode == VCLOCK_NONE))
+ break;
+ return do_realtime(vvd, ts);
+ case CLOCK_MONOTONIC:
+ if (unlikely(vvd->vclock_mode == VCLOCK_NONE))
+ break;
+ return do_monotonic(vvd, ts);
+ case CLOCK_REALTIME_COARSE:
+ return do_realtime_coarse(vvd, ts);
+ case CLOCK_MONOTONIC_COARSE:
+ return do_monotonic_coarse(vvd, ts);
+ }
+ /*
+ * Unknown clock ID ? Fall back to the syscall.
+ */
+ return vdso_fallback_gettime(clock, ts);
+}
+int
+clock_gettime(clockid_t, struct timespec *)
+ __attribute__((weak, alias("__vdso_clock_gettime")));
+
+notrace int
+__vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
+{
+ struct vvar_data *vvd = get_vvar_data();
+
+ if (likely(vvd->vclock_mode != VCLOCK_NONE)) {
+ if (likely(tv != NULL)) {
+ union tstv_t {
+ struct timespec ts;
+ struct timeval tv;
+ } *tstv = (union tstv_t *) tv;
+ do_realtime(vvd, &tstv->ts);
+ /*
+ * Assign before dividing to ensure that the division is
+ * done in the type of tv_usec, not tv_nsec.
+ *
+ * There cannot be > 1 billion usec in a second:
+ * do_realtime() has already distributed such overflow
+ * into tv_sec. So we can assign it to an int safely.
+ */
+ tstv->tv.tv_usec = tstv->ts.tv_nsec;
+ tstv->tv.tv_usec /= 1000;
+ }
+ if (unlikely(tz != NULL)) {
+ /* Avoid memcpy. Some old compilers fail to inline it */
+ tz->tz_minuteswest = vvd->tz_minuteswest;
+ tz->tz_dsttime = vvd->tz_dsttime;
+ }
+ return 0;
+ }
+ return vdso_fallback_gettimeofday(tv, tz);
+}
+int
+gettimeofday(struct timeval *, struct timezone *)
+ __attribute__((weak, alias("__vdso_gettimeofday")));
diff --git a/arch/sparc/vdso/vdso-layout.lds.S b/arch/sparc/vdso/vdso-layout.lds.S
new file mode 100644
index 000000000000..f2c83abaca12
--- /dev/null
+++ b/arch/sparc/vdso/vdso-layout.lds.S
@@ -0,0 +1,104 @@
+/*
+ * Linker script for vDSO. This is an ELF shared object prelinked to
+ * its virtual address, and with only one read-only segment.
+ * This script controls its layout.
+ */
+
+#if defined(BUILD_VDSO64)
+# define SHDR_SIZE 64
+#elif defined(BUILD_VDSO32)
+# define SHDR_SIZE 40
+#else
+# error unknown VDSO target
+#endif
+
+#define NUM_FAKE_SHDRS 7
+
+SECTIONS
+{
+ /*
+ * User/kernel shared data is before the vDSO. This may be a little
+ * uglier than putting it after the vDSO, but it avoids issues with
+ * non-allocatable things that dangle past the end of the PT_LOAD
+ * segment. Page size is 8192 for both 64-bit and 32-bit vdso binaries
+ */
+
+ vvar_start = . -8192;
+ vvar_data = vvar_start;
+
+ . = SIZEOF_HEADERS;
+
+ .hash : { *(.hash) } :text
+ .gnu.hash : { *(.gnu.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+
+ .dynamic : { *(.dynamic) } :text :dynamic
+
+ .rodata : {
+ *(.rodata*)
+ *(.data*)
+ *(.sdata*)
+ *(.got.plt) *(.got)
+ *(.gnu.linkonce.d.*)
+ *(.bss*)
+ *(.dynbss*)
+ *(.gnu.linkonce.b.*)
+
+ /*
+ * Ideally this would live in a C file: kept in here for
+ * compatibility with x86-64.
+ */
+ VDSO_FAKE_SECTION_TABLE_START = .;
+ . = . + NUM_FAKE_SHDRS * SHDR_SIZE;
+ VDSO_FAKE_SECTION_TABLE_END = .;
+ } :text
+
+ .fake_shstrtab : { *(.fake_shstrtab) } :text
+
+
+ .note : { *(.note.*) } :text :note
+
+ .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr
+ .eh_frame : { KEEP (*(.eh_frame)) } :text
+
+
+ /*
+ * Text is well-separated from actual data: there's plenty of
+ * stuff that isn't used at runtime in between.
+ */
+
+ .text : { *(.text*) } :text =0x90909090,
+
+ .vread_tick_patch : {
+ vread_tick_patch_start = .;
+ *(.vread_tick_patch)
+ vread_tick_patch_end = .;
+ }
+
+ /DISCARD/ : {
+ *(.discard)
+ *(.discard.*)
+ *(__bug_table)
+ }
+}
+
+/*
+ * Very old versions of ld do not recognize this name token; use the constant.
+ */
+#define PT_GNU_EH_FRAME 0x6474e550
+
+/*
+ * We must supply the ELF program headers explicitly to get just one
+ * PT_LOAD segment, and set the flags explicitly to make segments read-only.
+ */
+PHDRS
+{
+ text PT_LOAD FLAGS(5) FILEHDR PHDRS; /* PF_R|PF_X */
+ dynamic PT_DYNAMIC FLAGS(4); /* PF_R */
+ note PT_NOTE FLAGS(4); /* PF_R */
+ eh_frame_hdr PT_GNU_EH_FRAME;
+}
diff --git a/arch/sparc/vdso/vdso-note.S b/arch/sparc/vdso/vdso-note.S
new file mode 100644
index 000000000000..79a071e4357e
--- /dev/null
+++ b/arch/sparc/vdso/vdso-note.S
@@ -0,0 +1,12 @@
+/*
+ * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text.
+ * Here we can supply some information useful to userland.
+ */
+
+#include <linux/uts.h>
+#include <linux/version.h>
+#include <linux/elfnote.h>
+
+ELFNOTE_START(Linux, 0, "a")
+ .long LINUX_VERSION_CODE
+ELFNOTE_END
diff --git a/arch/sparc/vdso/vdso.lds.S b/arch/sparc/vdso/vdso.lds.S
new file mode 100644
index 000000000000..f3caa29a331c
--- /dev/null
+++ b/arch/sparc/vdso/vdso.lds.S
@@ -0,0 +1,25 @@
+/*
+ * Linker script for 64-bit vDSO.
+ * We #include the file to define the layout details.
+ *
+ * This file defines the version script giving the user-exported symbols in
+ * the DSO.
+ */
+
+#define BUILD_VDSO64
+
+#include "vdso-layout.lds.S"
+
+/*
+ * This controls what userland symbols we export from the vDSO.
+ */
+VERSION {
+ LINUX_2.6 {
+ global:
+ clock_gettime;
+ __vdso_clock_gettime;
+ gettimeofday;
+ __vdso_gettimeofday;
+ local: *;
+ };
+}
diff --git a/arch/sparc/vdso/vdso2c.c b/arch/sparc/vdso/vdso2c.c
new file mode 100644
index 000000000000..9f5b1cd6d51d
--- /dev/null
+++ b/arch/sparc/vdso/vdso2c.c
@@ -0,0 +1,234 @@
+/*
+ * vdso2c - A vdso image preparation tool
+ * Copyright (c) 2014 Andy Lutomirski and others
+ * Licensed under the GPL v2
+ *
+ * vdso2c requires stripped and unstripped input. It would be trivial
+ * to fully strip the input in here, but, for reasons described below,
+ * we need to write a section table. Doing this is more or less
+ * equivalent to dropping all non-allocatable sections, but it's
+ * easier to let objcopy handle that instead of doing it ourselves.
+ * If we ever need to do something fancier than what objcopy provides,
+ * it would be straightforward to add here.
+ *
+ * We keep a section table for a few reasons:
+ *
+ * Binutils has issues debugging the vDSO: it reads the section table to
+ * find SHT_NOTE; it won't look at PT_NOTE for the in-memory vDSO, which
+ * would break build-id if we removed the section table. Binutils
+ * also requires that shstrndx != 0. See:
+ * https://sourceware.org/bugzilla/show_bug.cgi?id=17064
+ *
+ * elfutils might not look for PT_NOTE if there is a section table at
+ * all. I don't know whether this matters for any practical purpose.
+ *
+ * For simplicity, rather than hacking up a partial section table, we
+ * just write a mostly complete one. We omit non-dynamic symbols,
+ * though, since they're rather large.
+ *
+ * Once binutils gets fixed, we might be able to drop this for all but
+ * the 64-bit vdso, since build-id only works in kernel RPMs, and
+ * systems that update to new enough kernel RPMs will likely update
+ * binutils in sync. build-id has never worked for home-built kernel
+ * RPMs without manual symlinking, and I suspect that no one ever does
+ * that.
+ */
+
+/*
+ * Copyright (c) 2017 Oracle and/or its affiliates. All rights reserved.
+ */
+
+#include <inttypes.h>
+#include <stdint.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <fcntl.h>
+#include <err.h>
+
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <tools/be_byteshift.h>
+
+#include <linux/elf.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+
+const char *outfilename;
+
+/* Symbols that we need in vdso2c. */
+enum {
+ sym_vvar_start,
+ sym_VDSO_FAKE_SECTION_TABLE_START,
+ sym_VDSO_FAKE_SECTION_TABLE_END,
+ sym_vread_tick,
+ sym_vread_tick_patch_start,
+ sym_vread_tick_patch_end
+};
+
+struct vdso_sym {
+ const char *name;
+ int export;
+};
+
+struct vdso_sym required_syms[] = {
+ [sym_vvar_start] = {"vvar_start", 1},
+ [sym_VDSO_FAKE_SECTION_TABLE_START] = {
+ "VDSO_FAKE_SECTION_TABLE_START", 0
+ },
+ [sym_VDSO_FAKE_SECTION_TABLE_END] = {
+ "VDSO_FAKE_SECTION_TABLE_END", 0
+ },
+ [sym_vread_tick] = {"vread_tick", 1},
+ [sym_vread_tick_patch_start] = {"vread_tick_patch_start", 1},
+ [sym_vread_tick_patch_end] = {"vread_tick_patch_end", 1}
+};
+
+__attribute__((format(printf, 1, 2))) __attribute__((noreturn))
+static void fail(const char *format, ...)
+{
+ va_list ap;
+
+ va_start(ap, format);
+ fprintf(stderr, "Error: ");
+ vfprintf(stderr, format, ap);
+ if (outfilename)
+ unlink(outfilename);
+ exit(1);
+ va_end(ap);
+}
+
+/*
+ * Evil macros for big-endian reads and writes
+ */
+#define GBE(x, bits, ifnot) \
+ __builtin_choose_expr( \
+ (sizeof(*(x)) == bits/8), \
+ (__typeof__(*(x)))get_unaligned_be##bits(x), ifnot)
+
+#define LAST_GBE(x) \
+ __builtin_choose_expr(sizeof(*(x)) == 1, *(x), (void)(0))
+
+#define GET_BE(x) \
+ GBE(x, 64, GBE(x, 32, GBE(x, 16, LAST_GBE(x))))
+
+#define PBE(x, val, bits, ifnot) \
+ __builtin_choose_expr( \
+ (sizeof(*(x)) == bits/8), \
+ put_unaligned_be##bits((val), (x)), ifnot)
+
+#define LAST_PBE(x, val) \
+ __builtin_choose_expr(sizeof(*(x)) == 1, *(x) = (val), (void)(0))
+
+#define PUT_BE(x, val) \
+ PBE(x, val, 64, PBE(x, val, 32, PBE(x, val, 16, LAST_PBE(x, val))))
+
+#define NSYMS ARRAY_SIZE(required_syms)
+
+#define BITSFUNC3(name, bits, suffix) name##bits##suffix
+#define BITSFUNC2(name, bits, suffix) BITSFUNC3(name, bits, suffix)
+#define BITSFUNC(name) BITSFUNC2(name, ELF_BITS, )
+
+#define INT_BITS BITSFUNC2(int, ELF_BITS, _t)
+
+#define ELF_BITS_XFORM2(bits, x) Elf##bits##_##x
+#define ELF_BITS_XFORM(bits, x) ELF_BITS_XFORM2(bits, x)
+#define ELF(x) ELF_BITS_XFORM(ELF_BITS, x)
+
+#define ELF_BITS 64
+#include "vdso2c.h"
+#undef ELF_BITS
+
+#define ELF_BITS 32
+#include "vdso2c.h"
+#undef ELF_BITS
+
+static void go(void *raw_addr, size_t raw_len,
+ void *stripped_addr, size_t stripped_len,
+ FILE *outfile, const char *name)
+{
+ Elf64_Ehdr *hdr = (Elf64_Ehdr *)raw_addr;
+
+ if (hdr->e_ident[EI_CLASS] == ELFCLASS64) {
+ go64(raw_addr, raw_len, stripped_addr, stripped_len,
+ outfile, name);
+ } else if (hdr->e_ident[EI_CLASS] == ELFCLASS32) {
+ go32(raw_addr, raw_len, stripped_addr, stripped_len,
+ outfile, name);
+ } else {
+ fail("unknown ELF class\n");
+ }
+}
+
+static void map_input(const char *name, void **addr, size_t *len, int prot)
+{
+ off_t tmp_len;
+
+ int fd = open(name, O_RDONLY);
+
+ if (fd == -1)
+ err(1, "%s", name);
+
+ tmp_len = lseek(fd, 0, SEEK_END);
+ if (tmp_len == (off_t)-1)
+ err(1, "lseek");
+ *len = (size_t)tmp_len;
+
+ *addr = mmap(NULL, tmp_len, prot, MAP_PRIVATE, fd, 0);
+ if (*addr == MAP_FAILED)
+ err(1, "mmap");
+
+ close(fd);
+}
+
+int main(int argc, char **argv)
+{
+ size_t raw_len, stripped_len;
+ void *raw_addr, *stripped_addr;
+ FILE *outfile;
+ char *name, *tmp;
+ int namelen;
+
+ if (argc != 4) {
+ printf("Usage: vdso2c RAW_INPUT STRIPPED_INPUT OUTPUT\n");
+ return 1;
+ }
+
+ /*
+ * Figure out the struct name. If we're writing to a .so file,
+ * generate raw output insted.
+ */
+ name = strdup(argv[3]);
+ namelen = strlen(name);
+ if (namelen >= 3 && !strcmp(name + namelen - 3, ".so")) {
+ name = NULL;
+ } else {
+ tmp = strrchr(name, '/');
+ if (tmp)
+ name = tmp + 1;
+ tmp = strchr(name, '.');
+ if (tmp)
+ *tmp = '\0';
+ for (tmp = name; *tmp; tmp++)
+ if (*tmp == '-')
+ *tmp = '_';
+ }
+
+ map_input(argv[1], &raw_addr, &raw_len, PROT_READ);
+ map_input(argv[2], &stripped_addr, &stripped_len, PROT_READ);
+
+ outfilename = argv[3];
+ outfile = fopen(outfilename, "w");
+ if (!outfile)
+ err(1, "%s", argv[2]);
+
+ go(raw_addr, raw_len, stripped_addr, stripped_len, outfile, name);
+
+ munmap(raw_addr, raw_len);
+ munmap(stripped_addr, stripped_len);
+ fclose(outfile);
+
+ return 0;
+}
diff --git a/arch/sparc/vdso/vdso2c.h b/arch/sparc/vdso/vdso2c.h
new file mode 100644
index 000000000000..808decb0f7be
--- /dev/null
+++ b/arch/sparc/vdso/vdso2c.h
@@ -0,0 +1,143 @@
+/*
+ * Copyright (c) 2017 Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
+ * This file is included up to twice from vdso2c.c. It generates code for
+ * 32-bit and 64-bit vDSOs. We will eventually need both for 64-bit builds,
+ * since 32-bit vDSOs will then be built for 32-bit userspace.
+ */
+
+static void BITSFUNC(go)(void *raw_addr, size_t raw_len,
+ void *stripped_addr, size_t stripped_len,
+ FILE *outfile, const char *name)
+{
+ int found_load = 0;
+ unsigned long load_size = -1; /* Work around bogus warning */
+ unsigned long mapping_size;
+ int i;
+ unsigned long j;
+
+ ELF(Shdr) *symtab_hdr = NULL, *strtab_hdr;
+ ELF(Ehdr) *hdr = (ELF(Ehdr) *)raw_addr;
+ ELF(Dyn) *dyn = 0, *dyn_end = 0;
+ INT_BITS syms[NSYMS] = {};
+
+ ELF(Phdr) *pt = (ELF(Phdr) *)(raw_addr + GET_BE(&hdr->e_phoff));
+
+ /* Walk the segment table. */
+ for (i = 0; i < GET_BE(&hdr->e_phnum); i++) {
+ if (GET_BE(&pt[i].p_type) == PT_LOAD) {
+ if (found_load)
+ fail("multiple PT_LOAD segs\n");
+
+ if (GET_BE(&pt[i].p_offset) != 0 ||
+ GET_BE(&pt[i].p_vaddr) != 0)
+ fail("PT_LOAD in wrong place\n");
+
+ if (GET_BE(&pt[i].p_memsz) != GET_BE(&pt[i].p_filesz))
+ fail("cannot handle memsz != filesz\n");
+
+ load_size = GET_BE(&pt[i].p_memsz);
+ found_load = 1;
+ } else if (GET_BE(&pt[i].p_type) == PT_DYNAMIC) {
+ dyn = raw_addr + GET_BE(&pt[i].p_offset);
+ dyn_end = raw_addr + GET_BE(&pt[i].p_offset) +
+ GET_BE(&pt[i].p_memsz);
+ }
+ }
+ if (!found_load)
+ fail("no PT_LOAD seg\n");
+
+ if (stripped_len < load_size)
+ fail("stripped input is too short\n");
+
+ /* Walk the dynamic table */
+ for (i = 0; dyn + i < dyn_end &&
+ GET_BE(&dyn[i].d_tag) != DT_NULL; i++) {
+ typeof(dyn[i].d_tag) tag = GET_BE(&dyn[i].d_tag);
+ typeof(dyn[i].d_un.d_val) val = GET_BE(&dyn[i].d_un.d_val);
+
+ if ((tag == DT_RELSZ || tag == DT_RELASZ) && (val != 0))
+ fail("vdso image contains dynamic relocations\n");
+ }
+
+ /* Walk the section table */
+ for (i = 0; i < GET_BE(&hdr->e_shnum); i++) {
+ ELF(Shdr) *sh = raw_addr + GET_BE(&hdr->e_shoff) +
+ GET_BE(&hdr->e_shentsize) * i;
+ if (GET_BE(&sh->sh_type) == SHT_SYMTAB)
+ symtab_hdr = sh;
+ }
+
+ if (!symtab_hdr)
+ fail("no symbol table\n");
+
+ strtab_hdr = raw_addr + GET_BE(&hdr->e_shoff) +
+ GET_BE(&hdr->e_shentsize) * GET_BE(&symtab_hdr->sh_link);
+
+ /* Walk the symbol table */
+ for (i = 0;
+ i < GET_BE(&symtab_hdr->sh_size) / GET_BE(&symtab_hdr->sh_entsize);
+ i++) {
+ int k;
+
+ ELF(Sym) *sym = raw_addr + GET_BE(&symtab_hdr->sh_offset) +
+ GET_BE(&symtab_hdr->sh_entsize) * i;
+ const char *name = raw_addr + GET_BE(&strtab_hdr->sh_offset) +
+ GET_BE(&sym->st_name);
+
+ for (k = 0; k < NSYMS; k++) {
+ if (!strcmp(name, required_syms[k].name)) {
+ if (syms[k]) {
+ fail("duplicate symbol %s\n",
+ required_syms[k].name);
+ }
+
+ /*
+ * Careful: we use negative addresses, but
+ * st_value is unsigned, so we rely
+ * on syms[k] being a signed type of the
+ * correct width.
+ */
+ syms[k] = GET_BE(&sym->st_value);
+ }
+ }
+ }
+
+ /* Validate mapping addresses. */
+ if (syms[sym_vvar_start] % 8192)
+ fail("vvar_begin must be a multiple of 8192\n");
+
+ if (!name) {
+ fwrite(stripped_addr, stripped_len, 1, outfile);
+ return;
+ }
+
+ mapping_size = (stripped_len + 8191) / 8192 * 8192;
+
+ fprintf(outfile, "/* AUTOMATICALLY GENERATED -- DO NOT EDIT */\n\n");
+ fprintf(outfile, "#include <linux/cache.h>\n");
+ fprintf(outfile, "#include <asm/vdso.h>\n");
+ fprintf(outfile, "\n");
+ fprintf(outfile,
+ "static unsigned char raw_data[%lu] __ro_after_init __aligned(8192)= {",
+ mapping_size);
+ for (j = 0; j < stripped_len; j++) {
+ if (j % 10 == 0)
+ fprintf(outfile, "\n\t");
+ fprintf(outfile, "0x%02X, ",
+ (int)((unsigned char *)stripped_addr)[j]);
+ }
+ fprintf(outfile, "\n};\n\n");
+
+ fprintf(outfile, "const struct vdso_image %s_builtin = {\n", name);
+ fprintf(outfile, "\t.data = raw_data,\n");
+ fprintf(outfile, "\t.size = %lu,\n", mapping_size);
+ for (i = 0; i < NSYMS; i++) {
+ if (required_syms[i].export && syms[i])
+ fprintf(outfile, "\t.sym_%s = %" PRIi64 ",\n",
+ required_syms[i].name, (int64_t)syms[i]);
+ }
+ fprintf(outfile, "};\n");
+}
diff --git a/arch/sparc/vdso/vdso32/.gitignore b/arch/sparc/vdso/vdso32/.gitignore
new file mode 100644
index 000000000000..e45fba9d0ced
--- /dev/null
+++ b/arch/sparc/vdso/vdso32/.gitignore
@@ -0,0 +1 @@
+vdso32.lds
diff --git a/arch/sparc/vdso/vdso32/vclock_gettime.c b/arch/sparc/vdso/vdso32/vclock_gettime.c
new file mode 100644
index 000000000000..026abb3b826c
--- /dev/null
+++ b/arch/sparc/vdso/vdso32/vclock_gettime.c
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2017 Oracle and/or its affiliates. All rights reserved.
+ */
+
+#define BUILD_VDSO32
+
+#ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE
+#undef CONFIG_OPTIMIZE_INLINING
+#endif
+
+#ifdef CONFIG_SPARC64
+
+/*
+ * in case of a 32 bit VDSO for a 64 bit kernel fake a 32 bit kernel
+ * configuration
+ */
+#undef CONFIG_64BIT
+#undef CONFIG_SPARC64
+#define BUILD_VDSO32_64
+#define CONFIG_32BIT
+#undef CONFIG_QUEUED_RWLOCKS
+#undef CONFIG_QUEUED_SPINLOCKS
+
+#endif
+
+#include "../vclock_gettime.c"
diff --git a/arch/sparc/vdso/vdso32/vdso-note.S b/arch/sparc/vdso/vdso32/vdso-note.S
new file mode 100644
index 000000000000..e234983cf0d8
--- /dev/null
+++ b/arch/sparc/vdso/vdso32/vdso-note.S
@@ -0,0 +1,12 @@
+/*
+ * This supplies .note.* sections to go into the PT_NOTE inside the vDSO
+ * text. Here we can supply some information useful to userland.
+ */
+
+#include <linux/uts.h>
+#include <linux/version.h>
+#include <linux/elfnote.h>
+
+ELFNOTE_START(Linux, 0, "a")
+ .long LINUX_VERSION_CODE
+ELFNOTE_END
diff --git a/arch/sparc/vdso/vdso32/vdso32.lds.S b/arch/sparc/vdso/vdso32/vdso32.lds.S
new file mode 100644
index 000000000000..53575ee154c4
--- /dev/null
+++ b/arch/sparc/vdso/vdso32/vdso32.lds.S
@@ -0,0 +1,24 @@
+/*
+ * Linker script for sparc32 vDSO
+ * We #include the file to define the layout details.
+ *
+ * This file defines the version script giving the user-exported symbols in
+ * the DSO.
+ */
+
+#define BUILD_VDSO32
+#include "../vdso-layout.lds.S"
+
+/*
+ * This controls what userland symbols we export from the vDSO.
+ */
+VERSION {
+ LINUX_2.6 {
+ global:
+ clock_gettime;
+ __vdso_clock_gettime;
+ gettimeofday;
+ __vdso_gettimeofday;
+ local: *;
+ };
+}
diff --git a/arch/sparc/vdso/vma.c b/arch/sparc/vdso/vma.c
new file mode 100644
index 000000000000..0a6f50098e23
--- /dev/null
+++ b/arch/sparc/vdso/vma.c
@@ -0,0 +1,268 @@
+/*
+ * Set up the VMAs to tell the VM about the vDSO.
+ * Copyright 2007 Andi Kleen, SUSE Labs.
+ * Subject to the GPL, v.2
+ */
+
+/*
+ * Copyright (c) 2017 Oracle and/or its affiliates. All rights reserved.
+ */
+
+#include <linux/mm.h>
+#include <linux/err.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/linkage.h>
+#include <linux/random.h>
+#include <linux/elf.h>
+#include <asm/vdso.h>
+#include <asm/vvar.h>
+#include <asm/page.h>
+
+unsigned int __read_mostly vdso_enabled = 1;
+
+static struct vm_special_mapping vvar_mapping = {
+ .name = "[vvar]"
+};
+
+#ifdef CONFIG_SPARC64
+static struct vm_special_mapping vdso_mapping64 = {
+ .name = "[vdso]"
+};
+#endif
+
+#ifdef CONFIG_COMPAT
+static struct vm_special_mapping vdso_mapping32 = {
+ .name = "[vdso]"
+};
+#endif
+
+struct vvar_data *vvar_data;
+
+#define SAVE_INSTR_SIZE 4
+
+/*
+ * Allocate pages for the vdso and vvar, and copy in the vdso text from the
+ * kernel image.
+ */
+int __init init_vdso_image(const struct vdso_image *image,
+ struct vm_special_mapping *vdso_mapping)
+{
+ int i;
+ struct page *dp, **dpp = NULL;
+ int dnpages = 0;
+ struct page *cp, **cpp = NULL;
+ int cnpages = (image->size) / PAGE_SIZE;
+
+ /*
+ * First, the vdso text. This is initialied data, an integral number of
+ * pages long.
+ */
+ if (WARN_ON(image->size % PAGE_SIZE != 0))
+ goto oom;
+
+ cpp = kcalloc(cnpages, sizeof(struct page *), GFP_KERNEL);
+ vdso_mapping->pages = cpp;
+
+ if (!cpp)
+ goto oom;
+
+ if (vdso_fix_stick) {
+ /*
+ * If the system uses %tick instead of %stick, patch the VDSO
+ * with instruction reading %tick instead of %stick.
+ */
+ unsigned int j, k = SAVE_INSTR_SIZE;
+ unsigned char *data = image->data;
+
+ for (j = image->sym_vread_tick_patch_start;
+ j < image->sym_vread_tick_patch_end; j++) {
+
+ data[image->sym_vread_tick + k] = data[j];
+ k++;
+ }
+ }
+
+ for (i = 0; i < cnpages; i++) {
+ cp = alloc_page(GFP_KERNEL);
+ if (!cp)
+ goto oom;
+ cpp[i] = cp;
+ copy_page(page_address(cp), image->data + i * PAGE_SIZE);
+ }
+
+ /*
+ * Now the vvar page. This is uninitialized data.
+ */
+
+ if (vvar_data == NULL) {
+ dnpages = (sizeof(struct vvar_data) / PAGE_SIZE) + 1;
+ if (WARN_ON(dnpages != 1))
+ goto oom;
+ dpp = kcalloc(dnpages, sizeof(struct page *), GFP_KERNEL);
+ vvar_mapping.pages = dpp;
+
+ if (!dpp)
+ goto oom;
+
+ dp = alloc_page(GFP_KERNEL);
+ if (!dp)
+ goto oom;
+
+ dpp[0] = dp;
+ vvar_data = page_address(dp);
+ memset(vvar_data, 0, PAGE_SIZE);
+
+ vvar_data->seq = 0;
+ }
+
+ return 0;
+ oom:
+ if (cpp != NULL) {
+ for (i = 0; i < cnpages; i++) {
+ if (cpp[i] != NULL)
+ __free_page(cpp[i]);
+ }
+ kfree(cpp);
+ vdso_mapping->pages = NULL;
+ }
+
+ if (dpp != NULL) {
+ for (i = 0; i < dnpages; i++) {
+ if (dpp[i] != NULL)
+ __free_page(dpp[i]);
+ }
+ kfree(dpp);
+ vvar_mapping.pages = NULL;
+ }
+
+ pr_warn("Cannot allocate vdso\n");
+ vdso_enabled = 0;
+ return -ENOMEM;
+}
+
+static int __init init_vdso(void)
+{
+ int err = 0;
+#ifdef CONFIG_SPARC64
+ err = init_vdso_image(&vdso_image_64_builtin, &vdso_mapping64);
+ if (err)
+ return err;
+#endif
+
+#ifdef CONFIG_COMPAT
+ err = init_vdso_image(&vdso_image_32_builtin, &vdso_mapping32);
+#endif
+ return err;
+
+}
+subsys_initcall(init_vdso);
+
+struct linux_binprm;
+
+/* Shuffle the vdso up a bit, randomly. */
+static unsigned long vdso_addr(unsigned long start, unsigned int len)
+{
+ unsigned int offset;
+
+ /* This loses some more bits than a modulo, but is cheaper */
+ offset = get_random_int() & (PTRS_PER_PTE - 1);
+ return start + (offset << PAGE_SHIFT);
+}
+
+static int map_vdso(const struct vdso_image *image,
+ struct vm_special_mapping *vdso_mapping)
+{
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+ unsigned long text_start, addr = 0;
+ int ret = 0;
+
+ down_write(&mm->mmap_sem);
+
+ /*
+ * First, get an unmapped region: then randomize it, and make sure that
+ * region is free.
+ */
+ if (current->flags & PF_RANDOMIZE) {
+ addr = get_unmapped_area(NULL, 0,
+ image->size - image->sym_vvar_start,
+ 0, 0);
+ if (IS_ERR_VALUE(addr)) {
+ ret = addr;
+ goto up_fail;
+ }
+ addr = vdso_addr(addr, image->size - image->sym_vvar_start);
+ }
+ addr = get_unmapped_area(NULL, addr,
+ image->size - image->sym_vvar_start, 0, 0);
+ if (IS_ERR_VALUE(addr)) {
+ ret = addr;
+ goto up_fail;
+ }
+
+ text_start = addr - image->sym_vvar_start;
+ current->mm->context.vdso = (void __user *)text_start;
+
+ /*
+ * MAYWRITE to allow gdb to COW and set breakpoints
+ */
+ vma = _install_special_mapping(mm,
+ text_start,
+ image->size,
+ VM_READ|VM_EXEC|
+ VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
+ vdso_mapping);
+
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto up_fail;
+ }
+
+ vma = _install_special_mapping(mm,
+ addr,
+ -image->sym_vvar_start,
+ VM_READ|VM_MAYREAD,
+ &vvar_mapping);
+
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ do_munmap(mm, text_start, image->size, NULL);
+ }
+
+up_fail:
+ if (ret)
+ current->mm->context.vdso = NULL;
+
+ up_write(&mm->mmap_sem);
+ return ret;
+}
+
+int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+{
+
+ if (!vdso_enabled)
+ return 0;
+
+#if defined CONFIG_COMPAT
+ if (!(is_32bit_task()))
+ return map_vdso(&vdso_image_64_builtin, &vdso_mapping64);
+ else
+ return map_vdso(&vdso_image_32_builtin, &vdso_mapping32);
+#else
+ return map_vdso(&vdso_image_64_builtin, &vdso_mapping64);
+#endif
+
+}
+
+static __init int vdso_setup(char *s)
+{
+ int err;
+ unsigned long val;
+
+ err = kstrtoul(s, 10, &val);
+ vdso_enabled = val;
+ return err;
+}
+__setup("vdso=", vdso_setup);