diff options
Diffstat (limited to 'arch/x86/entry/vdso/vma.c')
-rw-r--r-- | arch/x86/entry/vdso/vma.c | 165 |
1 files changed, 46 insertions, 119 deletions
diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c index 6d83ceb7f1ba..adb299d3b6a1 100644 --- a/arch/x86/entry/vdso/vma.c +++ b/arch/x86/entry/vdso/vma.c @@ -14,29 +14,20 @@ #include <linux/elf.h> #include <linux/cpu.h> #include <linux/ptrace.h> -#include <linux/time_namespace.h> +#include <linux/vdso_datastore.h> #include <asm/pvclock.h> #include <asm/vgtod.h> #include <asm/proto.h> #include <asm/vdso.h> -#include <asm/vvar.h> #include <asm/tlb.h> #include <asm/page.h> #include <asm/desc.h> #include <asm/cpufeature.h> +#include <asm/vdso/vsyscall.h> #include <clocksource/hyperv_timer.h> -#undef _ASM_X86_VVAR_H -#define EMIT_VVAR(name, offset) \ - const size_t name ## _offset = offset; -#include <asm/vvar.h> - -struct vdso_data *arch_get_vdso_data(void *vvar_page) -{ - return (struct vdso_data *)(vvar_page + _vdso_data_offset); -} -#undef EMIT_VVAR +static_assert(VDSO_NR_PAGES + VDSO_NR_VCLOCK_PAGES == __VDSO_PAGES); unsigned int vclocks_used __read_mostly; @@ -56,7 +47,6 @@ int __init init_vdso_image(const struct vdso_image *image) return 0; } -static const struct vm_special_mapping vvar_mapping; struct linux_binprm; static vm_fault_t vdso_fault(const struct vm_special_mapping *sm, @@ -100,106 +90,32 @@ static int vdso_mremap(const struct vm_special_mapping *sm, return 0; } -#ifdef CONFIG_TIME_NS -/* - * The vvar page layout depends on whether a task belongs to the root or - * non-root time namespace. Whenever a task changes its namespace, the VVAR - * page tables are cleared and then they will re-faulted with a - * corresponding layout. - * See also the comment near timens_setup_vdso_data() for details. - */ -int vdso_join_timens(struct task_struct *task, struct time_namespace *ns) -{ - struct mm_struct *mm = task->mm; - struct vm_area_struct *vma; - VMA_ITERATOR(vmi, mm, 0); - - mmap_read_lock(mm); - for_each_vma(vmi, vma) { - if (vma_is_special_mapping(vma, &vvar_mapping)) - zap_vma_pages(vma); - } - mmap_read_unlock(mm); - - return 0; -} -#endif - -static vm_fault_t vvar_fault(const struct vm_special_mapping *sm, - struct vm_area_struct *vma, struct vm_fault *vmf) +static vm_fault_t vvar_vclock_fault(const struct vm_special_mapping *sm, + struct vm_area_struct *vma, struct vm_fault *vmf) { - const struct vdso_image *image = vma->vm_mm->context.vdso_image; - unsigned long pfn; - long sym_offset; - - if (!image) - return VM_FAULT_SIGBUS; - - sym_offset = (long)(vmf->pgoff << PAGE_SHIFT) + - image->sym_vvar_start; - - /* - * Sanity check: a symbol offset of zero means that the page - * does not exist for this vdso image, not that the page is at - * offset zero relative to the text mapping. This should be - * impossible here, because sym_offset should only be zero for - * the page past the end of the vvar mapping. - */ - if (sym_offset == 0) - return VM_FAULT_SIGBUS; - - if (sym_offset == image->sym_vvar_page) { - struct page *timens_page = find_timens_vvar_page(vma); - - pfn = __pa_symbol(&__vvar_page) >> PAGE_SHIFT; - - /* - * If a task belongs to a time namespace then a namespace - * specific VVAR is mapped with the sym_vvar_page offset and - * the real VVAR page is mapped with the sym_timens_page - * offset. - * See also the comment near timens_setup_vdso_data(). - */ - if (timens_page) { - unsigned long addr; - vm_fault_t err; - - /* - * Optimization: inside time namespace pre-fault - * VVAR page too. As on timens page there are only - * offsets for clocks on VVAR, it'll be faulted - * shortly by VDSO code. - */ - addr = vmf->address + (image->sym_timens_page - sym_offset); - err = vmf_insert_pfn(vma, addr, pfn); - if (unlikely(err & VM_FAULT_ERROR)) - return err; - - pfn = page_to_pfn(timens_page); - } - - return vmf_insert_pfn(vma, vmf->address, pfn); - } else if (sym_offset == image->sym_pvclock_page) { + switch (vmf->pgoff) { +#ifdef CONFIG_PARAVIRT_CLOCK + case VDSO_PAGE_PVCLOCK_OFFSET: + { struct pvclock_vsyscall_time_info *pvti = pvclock_get_pvti_cpu0_va(); - if (pvti && vclock_was_used(VDSO_CLOCKMODE_PVCLOCK)) { + + if (pvti && vclock_was_used(VDSO_CLOCKMODE_PVCLOCK)) return vmf_insert_pfn_prot(vma, vmf->address, __pa(pvti) >> PAGE_SHIFT, pgprot_decrypted(vma->vm_page_prot)); - } - } else if (sym_offset == image->sym_hvclock_page) { - pfn = hv_get_tsc_pfn(); - + break; + } +#endif /* CONFIG_PARAVIRT_CLOCK */ +#ifdef CONFIG_HYPERV_TIMER + case VDSO_PAGE_HVCLOCK_OFFSET: + { + unsigned long pfn = hv_get_tsc_pfn(); if (pfn && vclock_was_used(VDSO_CLOCKMODE_HVCLOCK)) return vmf_insert_pfn(vma, vmf->address, pfn); - } else if (sym_offset == image->sym_timens_page) { - struct page *timens_page = find_timens_vvar_page(vma); - - if (!timens_page) - return VM_FAULT_SIGBUS; - - pfn = __pa_symbol(&__vvar_page) >> PAGE_SHIFT; - return vmf_insert_pfn(vma, vmf->address, pfn); + break; + } +#endif /* CONFIG_HYPERV_TIMER */ } return VM_FAULT_SIGBUS; @@ -210,9 +126,9 @@ static const struct vm_special_mapping vdso_mapping = { .fault = vdso_fault, .mremap = vdso_mremap, }; -static const struct vm_special_mapping vvar_mapping = { - .name = "[vvar]", - .fault = vvar_fault, +static const struct vm_special_mapping vvar_vclock_mapping = { + .name = "[vvar_vclock]", + .fault = vvar_vclock_fault, }; /* @@ -231,13 +147,13 @@ static int map_vdso(const struct vdso_image *image, unsigned long addr) return -EINTR; addr = get_unmapped_area(NULL, addr, - image->size - image->sym_vvar_start, 0, 0); + image->size + __VDSO_PAGES * PAGE_SIZE, 0, 0); if (IS_ERR_VALUE(addr)) { ret = addr; goto up_fail; } - text_start = addr - image->sym_vvar_start; + text_start = addr + __VDSO_PAGES * PAGE_SIZE; /* * MAYWRITE to allow gdb to COW and set breakpoints @@ -246,7 +162,8 @@ static int map_vdso(const struct vdso_image *image, unsigned long addr) text_start, image->size, VM_READ|VM_EXEC| - VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, + VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC| + VM_SEALED_SYSMAP, &vdso_mapping); if (IS_ERR(vma)) { @@ -254,21 +171,30 @@ static int map_vdso(const struct vdso_image *image, unsigned long addr) goto up_fail; } + vma = vdso_install_vvar_mapping(mm, addr); + if (IS_ERR(vma)) { + ret = PTR_ERR(vma); + do_munmap(mm, text_start, image->size, NULL); + goto up_fail; + } + vma = _install_special_mapping(mm, - addr, - -image->sym_vvar_start, + VDSO_VCLOCK_PAGES_START(addr), + VDSO_NR_VCLOCK_PAGES * PAGE_SIZE, VM_READ|VM_MAYREAD|VM_IO|VM_DONTDUMP| - VM_PFNMAP, - &vvar_mapping); + VM_PFNMAP|VM_SEALED_SYSMAP, + &vvar_vclock_mapping); if (IS_ERR(vma)) { ret = PTR_ERR(vma); do_munmap(mm, text_start, image->size, NULL); - } else { - current->mm->context.vdso = (void __user *)text_start; - current->mm->context.vdso_image = image; + do_munmap(mm, addr, image->size, NULL); + goto up_fail; } + current->mm->context.vdso = (void __user *)text_start; + current->mm->context.vdso_image = image; + up_fail: mmap_write_unlock(mm); return ret; @@ -290,7 +216,8 @@ int map_vdso_once(const struct vdso_image *image, unsigned long addr) */ for_each_vma(vmi, vma) { if (vma_is_special_mapping(vma, &vdso_mapping) || - vma_is_special_mapping(vma, &vvar_mapping)) { + vma_is_special_mapping(vma, &vdso_vvar_mapping) || + vma_is_special_mapping(vma, &vvar_vclock_mapping)) { mmap_write_unlock(mm); return -EEXIST; } |