diff options
author | Ingo Molnar <mingo@kernel.org> | 2018-10-29 07:12:34 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2018-10-29 07:12:34 +0100 |
commit | 97ec37c57dd411d0815455bca07166411c0da1df (patch) | |
tree | d64850c43778d15c137772957788093a8b7d032c /arch | |
parent | ace6485a03266cc3c198ce8e927a1ce0ce139699 (diff) | |
parent | b59dfdaef173677b0b7e10f375226c0a1114fd20 (diff) | |
download | linux-stable-97ec37c57dd411d0815455bca07166411c0da1df.tar.gz linux-stable-97ec37c57dd411d0815455bca07166411c0da1df.tar.bz2 linux-stable-97ec37c57dd411d0815455bca07166411c0da1df.zip |
Merge branch 'linus' into x86/urgent, to refresh the tree
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch')
728 files changed, 17654 insertions, 20671 deletions
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index 5b4f88363453..620b0a711ee4 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig @@ -31,6 +31,8 @@ config ALPHA select ODD_RT_SIGACTION select OLD_SIGSUSPEND select CPU_NO_EFFICIENT_FFS if !ALPHA_EV67 + select HAVE_MEMBLOCK + select NO_BOOTMEM help The Alpha is a 64-bit general-purpose processor designed and marketed by the Digital Equipment Corporation of blessed memory, diff --git a/arch/alpha/include/asm/unistd.h b/arch/alpha/include/asm/unistd.h index d6e29a1de4cc..9ff37aa1165f 100644 --- a/arch/alpha/include/asm/unistd.h +++ b/arch/alpha/include/asm/unistd.h @@ -6,6 +6,7 @@ #define NR_SYSCALLS 523 +#define __ARCH_WANT_NEW_STAT #define __ARCH_WANT_OLD_READDIR #define __ARCH_WANT_STAT64 #define __ARCH_WANT_SYS_GETHOSTNAME @@ -13,6 +14,7 @@ #define __ARCH_WANT_SYS_GETPGRP #define __ARCH_WANT_SYS_OLDUMOUNT #define __ARCH_WANT_SYS_SIGPENDING +#define __ARCH_WANT_SYS_UTIME #define __ARCH_WANT_SYS_FORK #define __ARCH_WANT_SYS_VFORK #define __ARCH_WANT_SYS_CLONE diff --git a/arch/alpha/include/uapi/asm/siginfo.h b/arch/alpha/include/uapi/asm/siginfo.h index db3f0138536f..6e1a2af2f962 100644 --- a/arch/alpha/include/uapi/asm/siginfo.h +++ b/arch/alpha/include/uapi/asm/siginfo.h @@ -2,7 +2,6 @@ #ifndef _ALPHA_SIGINFO_H #define _ALPHA_SIGINFO_H -#define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int)) #define __ARCH_SI_TRAPNO #include <asm-generic/siginfo.h> diff --git a/arch/alpha/kernel/core_irongate.c b/arch/alpha/kernel/core_irongate.c index aec757250e07..f70986683fc6 100644 --- a/arch/alpha/kernel/core_irongate.c +++ b/arch/alpha/kernel/core_irongate.c @@ -21,6 +21,7 @@ #include <linux/init.h> #include <linux/initrd.h> #include <linux/bootmem.h> +#include <linux/memblock.h> #include <asm/ptrace.h> #include <asm/cacheflush.h> @@ -241,8 +242,7 @@ albacore_init_arch(void) size / 1024); } #endif - reserve_bootmem_node(NODE_DATA(0), pci_mem, memtop - - pci_mem, BOOTMEM_DEFAULT); + memblock_reserve(pci_mem, memtop - pci_mem); printk("irongate_init_arch: temporarily reserving " "region %08lx-%08lx for PCI\n", pci_mem, memtop - 1); } diff --git a/arch/alpha/kernel/entry.S b/arch/alpha/kernel/entry.S index c64806a2daf5..2e09248f8324 100644 --- a/arch/alpha/kernel/entry.S +++ b/arch/alpha/kernel/entry.S @@ -473,7 +473,7 @@ entSys: bne $3, strace beq $4, 1f ldq $27, 0($5) -1: jsr $26, ($27), alpha_ni_syscall +1: jsr $26, ($27), sys_ni_syscall ldgp $gp, 0($26) blt $0, $syscall_error /* the call failed */ stq $0, 0($sp) @@ -587,7 +587,7 @@ strace: /* get the system call pointer.. */ lda $1, NR_SYSCALLS($31) lda $2, sys_call_table - lda $27, alpha_ni_syscall + lda $27, sys_ni_syscall cmpult $0, $1, $1 s8addq $0, $2, $2 beq $1, 1f @@ -791,7 +791,7 @@ ret_from_kernel_thread: /* * Special system calls. Most of these are special in that they either - * have to play switch_stack games or in some way use the pt_regs struct. + * have to play switch_stack games. */ .macro fork_like name @@ -812,46 +812,41 @@ fork_like fork fork_like vfork fork_like clone +.macro sigreturn_like name .align 4 - .globl sys_sigreturn - .ent sys_sigreturn -sys_sigreturn: + .globl sys_\name + .ent sys_\name +sys_\name: .prologue 0 lda $9, ret_from_straced cmpult $26, $9, $9 lda $sp, -SWITCH_STACK_SIZE($sp) - jsr $26, do_sigreturn + jsr $26, do_\name bne $9, 1f jsr $26, syscall_trace_leave 1: br $1, undo_switch_stack br ret_from_sys_call -.end sys_sigreturn +.end sys_\name +.endm - .align 4 - .globl sys_rt_sigreturn - .ent sys_rt_sigreturn -sys_rt_sigreturn: - .prologue 0 - lda $9, ret_from_straced - cmpult $26, $9, $9 - lda $sp, -SWITCH_STACK_SIZE($sp) - jsr $26, do_rt_sigreturn - bne $9, 1f - jsr $26, syscall_trace_leave -1: br $1, undo_switch_stack - br ret_from_sys_call -.end sys_rt_sigreturn +sigreturn_like sigreturn +sigreturn_like rt_sigreturn .align 4 - .globl alpha_ni_syscall - .ent alpha_ni_syscall -alpha_ni_syscall: + .globl alpha_syscall_zero + .ent alpha_syscall_zero +alpha_syscall_zero: .prologue 0 - /* Special because it also implements overflow handling via - syscall number 0. And if you recall, zero is a special - trigger for "not an error". Store large non-zero there. */ + /* Special because it needs to do something opposite to + force_successful_syscall_return(). We use the saved + syscall number for that, zero meaning "not an error". + That works nicely, but for real syscall 0 we need to + make sure that this logics doesn't get confused. + Store a non-zero there - -ENOSYS we need in register + for our return value will do just fine. + */ lda $0, -ENOSYS unop stq $0, 0($sp) ret -.end alpha_ni_syscall +.end alpha_syscall_zero diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c index 5576f7646fb6..4f0d94471bc9 100644 --- a/arch/alpha/kernel/setup.c +++ b/arch/alpha/kernel/setup.c @@ -30,6 +30,7 @@ #include <linux/ioport.h> #include <linux/platform_device.h> #include <linux/bootmem.h> +#include <linux/memblock.h> #include <linux/pci.h> #include <linux/seq_file.h> #include <linux/root_dev.h> @@ -312,9 +313,7 @@ setup_memory(void *kernel_end) { struct memclust_struct * cluster; struct memdesc_struct * memdesc; - unsigned long start_kernel_pfn, end_kernel_pfn; - unsigned long bootmap_size, bootmap_pages, bootmap_start; - unsigned long start, end; + unsigned long kernel_size; unsigned long i; /* Find free clusters, and init and free the bootmem accordingly. */ @@ -322,6 +321,8 @@ setup_memory(void *kernel_end) (hwrpb->mddt_offset + (unsigned long) hwrpb); for_each_mem_cluster(memdesc, cluster, i) { + unsigned long end; + printk("memcluster %lu, usage %01lx, start %8lu, end %8lu\n", i, cluster->usage, cluster->start_pfn, cluster->start_pfn + cluster->numpages); @@ -335,6 +336,9 @@ setup_memory(void *kernel_end) end = cluster->start_pfn + cluster->numpages; if (end > max_low_pfn) max_low_pfn = end; + + memblock_add(PFN_PHYS(cluster->start_pfn), + cluster->numpages << PAGE_SHIFT); } /* @@ -363,87 +367,9 @@ setup_memory(void *kernel_end) max_low_pfn = mem_size_limit; } - /* Find the bounds of kernel memory. */ - start_kernel_pfn = PFN_DOWN(KERNEL_START_PHYS); - end_kernel_pfn = PFN_UP(virt_to_phys(kernel_end)); - bootmap_start = -1; - - try_again: - if (max_low_pfn <= end_kernel_pfn) - panic("not enough memory to boot"); - - /* We need to know how many physically contiguous pages - we'll need for the bootmap. */ - bootmap_pages = bootmem_bootmap_pages(max_low_pfn); - - /* Now find a good region where to allocate the bootmap. */ - for_each_mem_cluster(memdesc, cluster, i) { - if (cluster->usage & 3) - continue; - - start = cluster->start_pfn; - end = start + cluster->numpages; - if (start >= max_low_pfn) - continue; - if (end > max_low_pfn) - end = max_low_pfn; - if (start < start_kernel_pfn) { - if (end > end_kernel_pfn - && end - end_kernel_pfn >= bootmap_pages) { - bootmap_start = end_kernel_pfn; - break; - } else if (end > start_kernel_pfn) - end = start_kernel_pfn; - } else if (start < end_kernel_pfn) - start = end_kernel_pfn; - if (end - start >= bootmap_pages) { - bootmap_start = start; - break; - } - } - - if (bootmap_start == ~0UL) { - max_low_pfn >>= 1; - goto try_again; - } - - /* Allocate the bootmap and mark the whole MM as reserved. */ - bootmap_size = init_bootmem(bootmap_start, max_low_pfn); - - /* Mark the free regions. */ - for_each_mem_cluster(memdesc, cluster, i) { - if (cluster->usage & 3) - continue; - - start = cluster->start_pfn; - end = cluster->start_pfn + cluster->numpages; - if (start >= max_low_pfn) - continue; - if (end > max_low_pfn) - end = max_low_pfn; - if (start < start_kernel_pfn) { - if (end > end_kernel_pfn) { - free_bootmem(PFN_PHYS(start), - (PFN_PHYS(start_kernel_pfn) - - PFN_PHYS(start))); - printk("freeing pages %ld:%ld\n", - start, start_kernel_pfn); - start = end_kernel_pfn; - } else if (end > start_kernel_pfn) - end = start_kernel_pfn; - } else if (start < end_kernel_pfn) - start = end_kernel_pfn; - if (start >= end) - continue; - - free_bootmem(PFN_PHYS(start), PFN_PHYS(end) - PFN_PHYS(start)); - printk("freeing pages %ld:%ld\n", start, end); - } - - /* Reserve the bootmap memory. */ - reserve_bootmem(PFN_PHYS(bootmap_start), bootmap_size, - BOOTMEM_DEFAULT); - printk("reserving pages %ld:%ld\n", bootmap_start, bootmap_start+PFN_UP(bootmap_size)); + /* Reserve the kernel memory. */ + kernel_size = virt_to_phys(kernel_end) - KERNEL_START_PHYS; + memblock_reserve(KERNEL_START_PHYS, kernel_size); #ifdef CONFIG_BLK_DEV_INITRD initrd_start = INITRD_START; @@ -459,8 +385,8 @@ setup_memory(void *kernel_end) initrd_end, phys_to_virt(PFN_PHYS(max_low_pfn))); } else { - reserve_bootmem(virt_to_phys((void *)initrd_start), - INITRD_SIZE, BOOTMEM_DEFAULT); + memblock_reserve(virt_to_phys((void *)initrd_start), + INITRD_SIZE); } } #endif /* CONFIG_BLK_DEV_INITRD */ diff --git a/arch/alpha/kernel/systbls.S b/arch/alpha/kernel/systbls.S index 1374e591511f..5b2e8ecb7ce3 100644 --- a/arch/alpha/kernel/systbls.S +++ b/arch/alpha/kernel/systbls.S @@ -11,93 +11,93 @@ .align 3 .globl sys_call_table sys_call_table: - .quad alpha_ni_syscall /* 0 */ + .quad alpha_syscall_zero /* 0 */ .quad sys_exit .quad alpha_fork .quad sys_read .quad sys_write - .quad alpha_ni_syscall /* 5 */ + .quad sys_ni_syscall /* 5 */ .quad sys_close .quad sys_osf_wait4 - .quad alpha_ni_syscall + .quad sys_ni_syscall .quad sys_link .quad sys_unlink /* 10 */ - .quad alpha_ni_syscall + .quad sys_ni_syscall .quad sys_chdir .quad sys_fchdir .quad sys_mknod .quad sys_chmod /* 15 */ .quad sys_chown .quad sys_osf_brk - .quad alpha_ni_syscall + .quad sys_ni_syscall .quad sys_lseek .quad sys_getxpid /* 20 */ .quad sys_osf_mount .quad sys_umount .quad sys_setuid .quad sys_getxuid - .quad alpha_ni_syscall /* 25 */ + .quad sys_ni_syscall /* 25 */ .quad sys_ptrace - .quad alpha_ni_syscall - .quad alpha_ni_syscall - .quad alpha_ni_syscall - .quad alpha_ni_syscall /* 30 */ - .quad alpha_ni_syscall - .quad alpha_ni_syscall + .quad sys_ni_syscall + .quad sys_ni_syscall + .quad sys_ni_syscall + .quad sys_ni_syscall /* 30 */ + .quad sys_ni_syscall + .quad sys_ni_syscall .quad sys_access - .quad alpha_ni_syscall - .quad alpha_ni_syscall /* 35 */ + .quad sys_ni_syscall + .quad sys_ni_syscall /* 35 */ .quad sys_sync .quad sys_kill - .quad alpha_ni_syscall + .quad sys_ni_syscall .quad sys_setpgid - .quad alpha_ni_syscall /* 40 */ + .quad sys_ni_syscall /* 40 */ .quad sys_dup .quad sys_alpha_pipe .quad sys_osf_set_program_attributes - .quad alpha_ni_syscall + .quad sys_ni_syscall .quad sys_open /* 45 */ - .quad alpha_ni_syscall + .quad sys_ni_syscall .quad sys_getxgid .quad sys_osf_sigprocmask - .quad alpha_ni_syscall - .quad alpha_ni_syscall /* 50 */ + .quad sys_ni_syscall + .quad sys_ni_syscall /* 50 */ .quad sys_acct .quad sys_sigpending - .quad alpha_ni_syscall + .quad sys_ni_syscall .quad sys_ioctl - .quad alpha_ni_syscall /* 55 */ - .quad alpha_ni_syscall + .quad sys_ni_syscall /* 55 */ + .quad sys_ni_syscall .quad sys_symlink .quad sys_readlink .quad sys_execve .quad sys_umask /* 60 */ .quad sys_chroot - .quad alpha_ni_syscall + .quad sys_ni_syscall .quad sys_getpgrp .quad sys_getpagesize - .quad alpha_ni_syscall /* 65 */ + .quad sys_ni_syscall /* 65 */ .quad alpha_vfork .quad sys_newstat .quad sys_newlstat - .quad alpha_ni_syscall - .quad alpha_ni_syscall /* 70 */ + .quad sys_ni_syscall + .quad sys_ni_syscall /* 70 */ .quad sys_osf_mmap - .quad alpha_ni_syscall + .quad sys_ni_syscall .quad sys_munmap .quad sys_mprotect .quad sys_madvise /* 75 */ .quad sys_vhangup - .quad alpha_ni_syscall - .quad alpha_ni_syscall + .quad sys_ni_syscall + .quad sys_ni_syscall .quad sys_getgroups /* map BSD's setpgrp to sys_setpgid for binary compatibility: */ .quad sys_setgroups /* 80 */ - .quad alpha_ni_syscall + .quad sys_ni_syscall .quad sys_setpgid .quad sys_osf_setitimer - .quad alpha_ni_syscall - .quad alpha_ni_syscall /* 85 */ + .quad sys_ni_syscall + .quad sys_ni_syscall /* 85 */ .quad sys_osf_getitimer .quad sys_gethostname .quad sys_sethostname @@ -119,19 +119,19 @@ sys_call_table: .quad sys_bind .quad sys_setsockopt /* 105 */ .quad sys_listen - .quad alpha_ni_syscall - .quad alpha_ni_syscall - .quad alpha_ni_syscall - .quad alpha_ni_syscall /* 110 */ + .quad sys_ni_syscall + .quad sys_ni_syscall + .quad sys_ni_syscall + .quad sys_ni_syscall /* 110 */ .quad sys_sigsuspend .quad sys_osf_sigstack .quad sys_recvmsg .quad sys_sendmsg - .quad alpha_ni_syscall /* 115 */ + .quad sys_ni_syscall /* 115 */ .quad sys_osf_gettimeofday .quad sys_osf_getrusage .quad sys_getsockopt - .quad alpha_ni_syscall + .quad sys_ni_syscall #ifdef CONFIG_OSF4_COMPAT .quad sys_osf_readv /* 120 */ .quad sys_osf_writev @@ -156,66 +156,66 @@ sys_call_table: .quad sys_mkdir .quad sys_rmdir .quad sys_osf_utimes - .quad alpha_ni_syscall - .quad alpha_ni_syscall /* 140 */ + .quad sys_ni_syscall + .quad sys_ni_syscall /* 140 */ .quad sys_getpeername - .quad alpha_ni_syscall - .quad alpha_ni_syscall + .quad sys_ni_syscall + .quad sys_ni_syscall .quad sys_getrlimit .quad sys_setrlimit /* 145 */ - .quad alpha_ni_syscall + .quad sys_ni_syscall .quad sys_setsid .quad sys_quotactl - .quad alpha_ni_syscall + .quad sys_ni_syscall .quad sys_getsockname /* 150 */ - .quad alpha_ni_syscall - .quad alpha_ni_syscall - .quad alpha_ni_syscall - .quad alpha_ni_syscall - .quad alpha_ni_syscall /* 155 */ + .quad sys_ni_syscall + .quad sys_ni_syscall + .quad sys_ni_syscall + .quad sys_ni_syscall + .quad sys_ni_syscall /* 155 */ .quad sys_osf_sigaction - .quad alpha_ni_syscall - .quad alpha_ni_syscall + .quad sys_ni_syscall + .quad sys_ni_syscall .quad sys_osf_getdirentries .quad sys_osf_statfs /* 160 */ .quad sys_osf_fstatfs - .quad alpha_ni_syscall - .quad alpha_ni_syscall - .quad alpha_ni_syscall + .quad sys_ni_syscall + .quad sys_ni_syscall + .quad sys_ni_syscall .quad sys_osf_getdomainname /* 165 */ .quad sys_setdomainname - .quad alpha_ni_syscall - .quad alpha_ni_syscall - .quad alpha_ni_syscall - .quad alpha_ni_syscall /* 170 */ - .quad alpha_ni_syscall - .quad alpha_ni_syscall - .quad alpha_ni_syscall - .quad alpha_ni_syscall - .quad alpha_ni_syscall /* 175 */ - .quad alpha_ni_syscall - .quad alpha_ni_syscall - .quad alpha_ni_syscall - .quad alpha_ni_syscall - .quad alpha_ni_syscall /* 180 */ - .quad alpha_ni_syscall - .quad alpha_ni_syscall - .quad alpha_ni_syscall - .quad alpha_ni_syscall - .quad alpha_ni_syscall /* 185 */ - .quad alpha_ni_syscall - .quad alpha_ni_syscall - .quad alpha_ni_syscall - .quad alpha_ni_syscall - .quad alpha_ni_syscall /* 190 */ - .quad alpha_ni_syscall - .quad alpha_ni_syscall - .quad alpha_ni_syscall - .quad alpha_ni_syscall - .quad alpha_ni_syscall /* 195 */ - .quad alpha_ni_syscall - .quad alpha_ni_syscall - .quad alpha_ni_syscall + .quad sys_ni_syscall + .quad sys_ni_syscall + .quad sys_ni_syscall + .quad sys_ni_syscall /* 170 */ + .quad sys_ni_syscall + .quad sys_ni_syscall + .quad sys_ni_syscall + .quad sys_ni_syscall + .quad sys_ni_syscall /* 175 */ + .quad sys_ni_syscall + .quad sys_ni_syscall + .quad sys_ni_syscall + .quad sys_ni_syscall + .quad sys_ni_syscall /* 180 */ + .quad sys_ni_syscall + .quad sys_ni_syscall + .quad sys_ni_syscall + .quad sys_ni_syscall + .quad sys_ni_syscall /* 185 */ + .quad sys_ni_syscall + .quad sys_ni_syscall + .quad sys_ni_syscall + .quad sys_ni_syscall + .quad sys_ni_syscall /* 190 */ + .quad sys_ni_syscall + .quad sys_ni_syscall + .quad sys_ni_syscall + .quad sys_ni_syscall + .quad sys_ni_syscall /* 195 */ + .quad sys_ni_syscall + .quad sys_ni_syscall + .quad sys_ni_syscall /* The OSF swapon has two extra arguments, but we ignore them. */ .quad sys_swapon .quad sys_msgctl /* 200 */ @@ -231,93 +231,93 @@ sys_call_table: .quad sys_shmctl /* 210 */ .quad sys_shmdt .quad sys_shmget - .quad alpha_ni_syscall - .quad alpha_ni_syscall - .quad alpha_ni_syscall /* 215 */ - .quad alpha_ni_syscall + .quad sys_ni_syscall + .quad sys_ni_syscall + .quad sys_ni_syscall /* 215 */ + .quad sys_ni_syscall .quad sys_msync - .quad alpha_ni_syscall - .quad alpha_ni_syscall - .quad alpha_ni_syscall /* 220 */ - .quad alpha_ni_syscall - .quad alpha_ni_syscall - .quad alpha_ni_syscall + .quad sys_ni_syscall + .quad sys_ni_syscall + .quad sys_ni_syscall /* 220 */ + .quad sys_ni_syscall + .quad sys_ni_syscall + .quad sys_ni_syscall .quad sys_osf_stat .quad sys_osf_lstat /* 225 */ .quad sys_osf_fstat .quad sys_osf_statfs64 .quad sys_osf_fstatfs64 - .quad alpha_ni_syscall - .quad alpha_ni_syscall /* 230 */ - .quad alpha_ni_syscall - .quad alpha_ni_syscall + .quad sys_ni_syscall + .quad sys_ni_syscall /* 230 */ + .quad sys_ni_syscall + .quad sys_ni_syscall .quad sys_getpgid .quad sys_getsid .quad sys_sigaltstack /* 235 */ - .quad alpha_ni_syscall - .quad alpha_ni_syscall - .quad alpha_ni_syscall - .quad alpha_ni_syscall - .quad alpha_ni_syscall /* 240 */ + .quad sys_ni_syscall + .quad sys_ni_syscall + .quad sys_ni_syscall + .quad sys_ni_syscall + .quad sys_ni_syscall /* 240 */ .quad sys_osf_sysinfo - .quad alpha_ni_syscall - .quad alpha_ni_syscall + .quad sys_ni_syscall + .quad sys_ni_syscall .quad sys_osf_proplist_syscall - .quad alpha_ni_syscall /* 245 */ - .quad alpha_ni_syscall - .quad alpha_ni_syscall - .quad alpha_ni_syscall - .quad alpha_ni_syscall - .quad alpha_ni_syscall /* 250 */ + .quad sys_ni_syscall /* 245 */ + .quad sys_ni_syscall + .quad sys_ni_syscall + .quad sys_ni_syscall + .quad sys_ni_syscall + .quad sys_ni_syscall /* 250 */ .quad sys_osf_usleep_thread - .quad alpha_ni_syscall - .quad alpha_ni_syscall + .quad sys_ni_syscall + .quad sys_ni_syscall .quad sys_sysfs - .quad alpha_ni_syscall /* 255 */ + .quad sys_ni_syscall /* 255 */ .quad sys_osf_getsysinfo .quad sys_osf_setsysinfo - .quad alpha_ni_syscall - .quad alpha_ni_syscall - .quad alpha_ni_syscall /* 260 */ - .quad alpha_ni_syscall - .quad alpha_ni_syscall - .quad alpha_ni_syscall - .quad alpha_ni_syscall - .quad alpha_ni_syscall /* 265 */ - .quad alpha_ni_syscall - .quad alpha_ni_syscall - .quad alpha_ni_syscall - .quad alpha_ni_syscall - .quad alpha_ni_syscall /* 270 */ - .quad alpha_ni_syscall - .quad alpha_ni_syscall - .quad alpha_ni_syscall - .quad alpha_ni_syscall - .quad alpha_ni_syscall /* 275 */ - .quad alpha_ni_syscall - .quad alpha_ni_syscall - .quad alpha_ni_syscall - .quad alpha_ni_syscall - .quad alpha_ni_syscall /* 280 */ - .quad alpha_ni_syscall - .quad alpha_ni_syscall - .quad alpha_ni_syscall - .quad alpha_ni_syscall - .quad alpha_ni_syscall /* 285 */ - .quad alpha_ni_syscall - .quad alpha_ni_syscall - .quad alpha_ni_syscall - .quad alpha_ni_syscall - .quad alpha_ni_syscall /* 290 */ - .quad alpha_ni_syscall - .quad alpha_ni_syscall - .quad alpha_ni_syscall - .quad alpha_ni_syscall - .quad alpha_ni_syscall /* 295 */ - .quad alpha_ni_syscall - .quad alpha_ni_syscall - .quad alpha_ni_syscall - .quad alpha_ni_syscall + .quad sys_ni_syscall + .quad sys_ni_syscall + .quad sys_ni_syscall /* 260 */ + .quad sys_ni_syscall + .quad sys_ni_syscall + .quad sys_ni_syscall + .quad sys_ni_syscall + .quad sys_ni_syscall /* 265 */ + .quad sys_ni_syscall + .quad sys_ni_syscall + .quad sys_ni_syscall + .quad sys_ni_syscall + .quad sys_ni_syscall /* 270 */ + .quad sys_ni_syscall + .quad sys_ni_syscall + .quad sys_ni_syscall + .quad sys_ni_syscall + .quad sys_ni_syscall /* 275 */ + .quad sys_ni_syscall + .quad sys_ni_syscall + .quad sys_ni_syscall + .quad sys_ni_syscall + .quad sys_ni_syscall /* 280 */ + .quad sys_ni_syscall + .quad sys_ni_syscall + .quad sys_ni_syscall + .quad sys_ni_syscall + .quad sys_ni_syscall /* 285 */ + .quad sys_ni_syscall + .quad sys_ni_syscall + .quad sys_ni_syscall + .quad sys_ni_syscall + .quad sys_ni_syscall /* 290 */ + .quad sys_ni_syscall + .quad sys_ni_syscall + .quad sys_ni_syscall + .quad sys_ni_syscall + .quad sys_ni_syscall /* 295 */ + .quad sys_ni_syscall + .quad sys_ni_syscall + .quad sys_ni_syscall + .quad sys_ni_syscall /* linux-specific system calls start at 300 */ .quad sys_bdflush /* 300 */ .quad sys_sethae diff --git a/arch/alpha/mm/numa.c b/arch/alpha/mm/numa.c index a9e86475f169..26cd925d19b1 100644 --- a/arch/alpha/mm/numa.c +++ b/arch/alpha/mm/numa.c @@ -11,6 +11,7 @@ #include <linux/kernel.h> #include <linux/mm.h> #include <linux/bootmem.h> +#include <linux/memblock.h> #include <linux/swap.h> #include <linux/initrd.h> #include <linux/pfn.h> @@ -59,12 +60,10 @@ setup_memory_node(int nid, void *kernel_end) struct memclust_struct * cluster; struct memdesc_struct * memdesc; unsigned long start_kernel_pfn, end_kernel_pfn; - unsigned long bootmap_size, bootmap_pages, bootmap_start; unsigned long start, end; unsigned long node_pfn_start, node_pfn_end; unsigned long node_min_pfn, node_max_pfn; int i; - unsigned long node_datasz = PFN_UP(sizeof(pg_data_t)); int show_init = 0; /* Find the bounds of current node */ @@ -134,24 +133,14 @@ setup_memory_node(int nid, void *kernel_end) /* Cute trick to make sure our local node data is on local memory */ node_data[nid] = (pg_data_t *)(__va(node_min_pfn << PAGE_SHIFT)); #endif - /* Quasi-mark the pg_data_t as in-use */ - node_min_pfn += node_datasz; - if (node_min_pfn >= node_max_pfn) { - printk(" not enough mem to reserve NODE_DATA"); - return; - } - NODE_DATA(nid)->bdata = &bootmem_node_data[nid]; - printk(" Detected node memory: start %8lu, end %8lu\n", node_min_pfn, node_max_pfn); DBGDCONT(" DISCONTIG: node_data[%d] is at 0x%p\n", nid, NODE_DATA(nid)); - DBGDCONT(" DISCONTIG: NODE_DATA(%d)->bdata is at 0x%p\n", nid, NODE_DATA(nid)->bdata); /* Find the bounds of kernel memory. */ start_kernel_pfn = PFN_DOWN(KERNEL_START_PHYS); end_kernel_pfn = PFN_UP(virt_to_phys(kernel_end)); - bootmap_start = -1; if (!nid && (node_max_pfn < end_kernel_pfn || node_min_pfn > start_kernel_pfn)) panic("kernel loaded out of ram"); @@ -161,89 +150,11 @@ setup_memory_node(int nid, void *kernel_end) has much larger alignment than 8Mb, so it's safe. */ node_min_pfn &= ~((1UL << (MAX_ORDER-1))-1); - /* We need to know how many physically contiguous pages - we'll need for the bootmap. */ - bootmap_pages = bootmem_bootmap_pages(node_max_pfn-node_min_pfn); - - /* Now find a good region where to allocate the bootmap. */ - for_each_mem_cluster(memdesc, cluster, i) { - if (cluster->usage & 3) - continue; - - start = cluster->start_pfn; - end = start + cluster->numpages; - - if (start >= node_max_pfn || end <= node_min_pfn) - continue; - - if (end > node_max_pfn) - end = node_max_pfn; - if (start < node_min_pfn) - start = node_min_pfn; - - if (start < start_kernel_pfn) { - if (end > end_kernel_pfn - && end - end_kernel_pfn >= bootmap_pages) { - bootmap_start = end_kernel_pfn; - break; - } else if (end > start_kernel_pfn) - end = start_kernel_pfn; - } else if (start < end_kernel_pfn) - start = end_kernel_pfn; - if (end - start >= bootmap_pages) { - bootmap_start = start; - break; - } - } - - if (bootmap_start == -1) - panic("couldn't find a contiguous place for the bootmap"); - - /* Allocate the bootmap and mark the whole MM as reserved. */ - bootmap_size = init_bootmem_node(NODE_DATA(nid), bootmap_start, - node_min_pfn, node_max_pfn); - DBGDCONT(" bootmap_start %lu, bootmap_size %lu, bootmap_pages %lu\n", - bootmap_start, bootmap_size, bootmap_pages); + memblock_add(PFN_PHYS(node_min_pfn), + (node_max_pfn - node_min_pfn) << PAGE_SHIFT); - /* Mark the free regions. */ - for_each_mem_cluster(memdesc, cluster, i) { - if (cluster->usage & 3) - continue; - - start = cluster->start_pfn; - end = cluster->start_pfn + cluster->numpages; - - if (start >= node_max_pfn || end <= node_min_pfn) - continue; - - if (end > node_max_pfn) - end = node_max_pfn; - if (start < node_min_pfn) - start = node_min_pfn; - - if (start < start_kernel_pfn) { - if (end > end_kernel_pfn) { - free_bootmem_node(NODE_DATA(nid), PFN_PHYS(start), - (PFN_PHYS(start_kernel_pfn) - - PFN_PHYS(start))); - printk(" freeing pages %ld:%ld\n", - start, start_kernel_pfn); - start = end_kernel_pfn; - } else if (end > start_kernel_pfn) - end = start_kernel_pfn; - } else if (start < end_kernel_pfn) - start = end_kernel_pfn; - if (start >= end) - continue; - - free_bootmem_node(NODE_DATA(nid), PFN_PHYS(start), PFN_PHYS(end) - PFN_PHYS(start)); - printk(" freeing pages %ld:%ld\n", start, end); - } - - /* Reserve the bootmap memory. */ - reserve_bootmem_node(NODE_DATA(nid), PFN_PHYS(bootmap_start), - bootmap_size, BOOTMEM_DEFAULT); - printk(" reserving pages %ld:%ld\n", bootmap_start, bootmap_start+PFN_UP(bootmap_size)); + NODE_DATA(nid)->node_start_pfn = node_min_pfn; + NODE_DATA(nid)->node_present_pages = node_max_pfn - node_min_pfn; node_set_online(nid); } @@ -251,6 +162,7 @@ setup_memory_node(int nid, void *kernel_end) void __init setup_memory(void *kernel_end) { + unsigned long kernel_size; int nid; show_mem_layout(); @@ -262,6 +174,9 @@ setup_memory(void *kernel_end) for (nid = 0; nid < MAX_NUMNODES; nid++) setup_memory_node(nid, kernel_end); + kernel_size = virt_to_phys(kernel_end) - KERNEL_START_PHYS; + memblock_reserve(KERNEL_START_PHYS, kernel_size); + #ifdef CONFIG_BLK_DEV_INITRD initrd_start = INITRD_START; if (initrd_start) { @@ -279,9 +194,8 @@ setup_memory(void *kernel_end) phys_to_virt(PFN_PHYS(max_low_pfn))); } else { nid = kvaddr_to_nid(initrd_start); - reserve_bootmem_node(NODE_DATA(nid), - virt_to_phys((void *)initrd_start), - INITRD_SIZE, BOOTMEM_DEFAULT); + memblock_reserve(virt_to_phys((void *)initrd_start), + INITRD_SIZE); } } #endif /* CONFIG_BLK_DEV_INITRD */ @@ -303,9 +217,8 @@ void __init paging_init(void) dma_local_pfn = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT; for_each_online_node(nid) { - bootmem_data_t *bdata = &bootmem_node_data[nid]; - unsigned long start_pfn = bdata->node_min_pfn; - unsigned long end_pfn = bdata->node_low_pfn; + unsigned long start_pfn = NODE_DATA(nid)->node_start_pfn; + unsigned long end_pfn = start_pfn + NODE_DATA(nid)->node_present_pages; if (dma_local_pfn >= end_pfn - start_pfn) zones_size[ZONE_DMA] = end_pfn - start_pfn; diff --git a/arch/arc/Makefile b/arch/arc/Makefile index 644815c0516e..c64c505d966c 100644 --- a/arch/arc/Makefile +++ b/arch/arc/Makefile @@ -102,11 +102,5 @@ boot_targets += uImage uImage.bin uImage.gz $(boot_targets): vmlinux $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ -%.dtb %.dtb.S %.dtb.o: scripts - $(Q)$(MAKE) $(build)=$(boot)/dts $(boot)/dts/$@ - -dtbs: scripts - $(Q)$(MAKE) $(build)=$(boot)/dts - archclean: $(Q)$(MAKE) $(clean)=$(boot) diff --git a/arch/arc/include/uapi/asm/unistd.h b/arch/arc/include/uapi/asm/unistd.h index 517178b1daef..3b3543fd151c 100644 --- a/arch/arc/include/uapi/asm/unistd.h +++ b/arch/arc/include/uapi/asm/unistd.h @@ -17,6 +17,7 @@ #define _UAPI_ASM_ARC_UNISTD_H #define __ARCH_WANT_RENAMEAT +#define __ARCH_WANT_STAT64 #define __ARCH_WANT_SYS_EXECVE #define __ARCH_WANT_SYS_CLONE #define __ARCH_WANT_SYS_VFORK diff --git a/arch/arc/kernel/traps.c b/arch/arc/kernel/traps.c index b123558bf0bb..a7fcbc0d3943 100644 --- a/arch/arc/kernel/traps.c +++ b/arch/arc/kernel/traps.c @@ -42,21 +42,22 @@ void die(const char *str, struct pt_regs *regs, unsigned long address) * -for kernel, chk if due to copy_(to|from)_user, otherwise die() */ static noinline int -unhandled_exception(const char *str, struct pt_regs *regs, siginfo_t *info) +unhandled_exception(const char *str, struct pt_regs *regs, + int signo, int si_code, void __user *addr) { if (user_mode(regs)) { struct task_struct *tsk = current; - tsk->thread.fault_address = (__force unsigned int)info->si_addr; + tsk->thread.fault_address = (__force unsigned int)addr; - force_sig_info(info->si_signo, info, tsk); + force_sig_fault(signo, si_code, addr, tsk); } else { /* If not due to copy_(to|from)_user, we are doomed */ if (fixup_exception(regs)) return 0; - die(str, regs, (unsigned long)info->si_addr); + die(str, regs, (unsigned long)addr); } return 1; @@ -64,16 +65,9 @@ unhandled_exception(const char *str, struct pt_regs *regs, siginfo_t *info) #define DO_ERROR_INFO(signr, str, name, sicode) \ int name(unsigned long address, struct pt_regs *regs) \ -{ \ - siginfo_t info; \ - \ - clear_siginfo(&info); \ - info.si_signo = signr; \ - info.si_errno = 0; \ - info.si_code = sicode; \ - info.si_addr = (void __user *)address; \ - \ - return unhandled_exception(str, regs, &info);\ +{ \ + return unhandled_exception(str, regs, signr, sicode, \ + (void __user *)address); \ } /* diff --git a/arch/arc/kernel/vmlinux.lds.S b/arch/arc/kernel/vmlinux.lds.S index f35ed578e007..8fb16bdabdcf 100644 --- a/arch/arc/kernel/vmlinux.lds.S +++ b/arch/arc/kernel/vmlinux.lds.S @@ -71,7 +71,6 @@ SECTIONS INIT_SETUP(L1_CACHE_BYTES) INIT_CALLS CON_INITCALL - SECURITY_INITCALL } .init.arch.info : { diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c index db6913094be3..c9da6102eb4f 100644 --- a/arch/arc/mm/fault.c +++ b/arch/arc/mm/fault.c @@ -66,14 +66,12 @@ void do_page_fault(unsigned long address, struct pt_regs *regs) struct vm_area_struct *vma = NULL; struct task_struct *tsk = current; struct mm_struct *mm = tsk->mm; - siginfo_t info; + int si_code; int ret; vm_fault_t fault; int write = regs->ecr_cause & ECR_C_PROTV_STORE; /* ST/EX */ unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; - clear_siginfo(&info); - /* * We fault-in kernel-space virtual memory on-demand. The * 'reference' page table is init_mm.pgd. @@ -91,7 +89,7 @@ void do_page_fault(unsigned long address, struct pt_regs *regs) return; } - info.si_code = SEGV_MAPERR; + si_code = SEGV_MAPERR; /* * If we're in an interrupt or have no user @@ -119,7 +117,7 @@ retry: * we can handle it.. */ good_area: - info.si_code = SEGV_ACCERR; + si_code = SEGV_ACCERR; /* Handle protection violation, execute on heap or stack */ @@ -199,11 +197,7 @@ bad_area_nosemaphore: /* User mode accesses just cause a SIGSEGV */ if (user_mode(regs)) { tsk->thread.fault_address = address; - info.si_signo = SIGSEGV; - info.si_errno = 0; - /* info.si_code has been set above */ - info.si_addr = (void __user *)address; - force_sig_info(SIGSEGV, &info, tsk); + force_sig_fault(SIGSEGV, si_code, (void __user *)address, tsk); return; } @@ -238,9 +232,5 @@ do_sigbus: goto no_context; tsk->thread.fault_address = address; - info.si_signo = SIGBUS; - info.si_errno = 0; - info.si_code = BUS_ADRERR; - info.si_addr = (void __user *)address; - force_sig_info(SIGBUS, &info, tsk); + force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address, tsk); } diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug index f6fcb8a79889..a810fa8ba404 100644 --- a/arch/arm/Kconfig.debug +++ b/arch/arm/Kconfig.debug @@ -45,35 +45,42 @@ config DEBUG_WX If in doubt, say "Y". -# RMK wants arm kernels compiled with frame pointers or stack unwinding. -# If you know what you are doing and are willing to live without stack -# traces, you can get a slightly smaller kernel by setting this option to -# n, but then RMK will have to kill you ;). -config FRAME_POINTER - bool - depends on !THUMB2_KERNEL - default y if !ARM_UNWIND || FUNCTION_GRAPH_TRACER +choice + prompt "Choose kernel unwinder" + default UNWINDER_ARM if AEABI && !FUNCTION_GRAPH_TRACER + default UNWINDER_FRAME_POINTER if !AEABI || FUNCTION_GRAPH_TRACER help - If you say N here, the resulting kernel will be slightly smaller and - faster. However, if neither FRAME_POINTER nor ARM_UNWIND are enabled, - when a problem occurs with the kernel, the information that is - reported is severely limited. + This determines which method will be used for unwinding kernel stack + traces for panics, oopses, bugs, warnings, perf, /proc/<pid>/stack, + livepatch, lockdep, and more. + +config UNWINDER_FRAME_POINTER + bool "Frame pointer unwinder" + depends on !THUMB2_KERNEL && !CC_IS_CLANG + select ARCH_WANT_FRAME_POINTERS + select FRAME_POINTER + help + This option enables the frame pointer unwinder for unwinding + kernel stack traces. -config ARM_UNWIND - bool "Enable stack unwinding support (EXPERIMENTAL)" +config UNWINDER_ARM + bool "ARM EABI stack unwinder" depends on AEABI - default y + select ARM_UNWIND help This option enables stack unwinding support in the kernel using the information automatically generated by the compiler. The resulting kernel image is slightly bigger but the performance is not affected. Currently, this feature - only works with EABI compilers. If unsure say Y. + only works with EABI compilers. -config OLD_MCOUNT +endchoice + +config ARM_UNWIND + bool + +config FRAME_POINTER bool - depends on FUNCTION_TRACER && FRAME_POINTER - default y config DEBUG_USER bool "Verbose user fault messages" diff --git a/arch/arm/Makefile b/arch/arm/Makefile index d1516f85f25d..05a91d8b89f3 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile @@ -74,7 +74,7 @@ endif arch-$(CONFIG_CPU_32v5) =-D__LINUX_ARM_ARCH__=5 $(call cc-option,-march=armv5te,-march=armv4t) arch-$(CONFIG_CPU_32v4T) =-D__LINUX_ARM_ARCH__=4 -march=armv4t arch-$(CONFIG_CPU_32v4) =-D__LINUX_ARM_ARCH__=4 -march=armv4 -arch-$(CONFIG_CPU_32v3) =-D__LINUX_ARM_ARCH__=3 -march=armv3 +arch-$(CONFIG_CPU_32v3) =-D__LINUX_ARM_ARCH__=3 -march=armv3m # Evaluate arch cc-option calls now arch-y := $(arch-y) @@ -264,13 +264,9 @@ platdirs := $(patsubst %,arch/arm/plat-%/,$(sort $(plat-y))) ifneq ($(CONFIG_ARCH_MULTIPLATFORM),y) ifneq ($(CONFIG_ARM_SINGLE_ARMV7M),y) -ifeq ($(KBUILD_SRC),) -KBUILD_CPPFLAGS += $(patsubst %,-I%include,$(machdirs) $(platdirs)) -else KBUILD_CPPFLAGS += $(patsubst %,-I$(srctree)/%include,$(machdirs) $(platdirs)) endif endif -endif export TEXT_OFFSET GZFLAGS MMUEXT @@ -307,12 +303,7 @@ else KBUILD_IMAGE := $(boot)/zImage endif -# Build the DT binary blobs if we have OF configured -ifeq ($(CONFIG_USE_OF),y) -KBUILD_DTBS := dtbs -endif - -all: $(notdir $(KBUILD_IMAGE)) $(KBUILD_DTBS) +all: $(notdir $(KBUILD_IMAGE)) archheaders: @@ -339,17 +330,6 @@ $(BOOT_TARGETS): vmlinux $(INSTALL_TARGETS): $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $@ -%.dtb: | scripts - $(Q)$(MAKE) $(build)=$(boot)/dts MACHINE=$(MACHINE) $(boot)/dts/$@ - -PHONY += dtbs dtbs_install - -dtbs: prepare scripts - $(Q)$(MAKE) $(build)=$(boot)/dts - -dtbs_install: - $(Q)$(MAKE) $(dtbinst)=$(boot)/dts - PHONY += vdso_install vdso_install: ifeq ($(CONFIG_VDSO),y) @@ -371,8 +351,6 @@ define archhelp echo ' uImage - U-Boot wrapped zImage' echo ' bootpImage - Combined zImage and initial RAM disk' echo ' (supply initrd image via make variable INITRD=<path>)' - echo '* dtbs - Build device tree blobs for enabled boards' - echo ' dtbs_install - Install dtbs to $(INSTALL_DTBS_PATH)' echo ' install - Install uncompressed kernel' echo ' zinstall - Install compressed kernel' echo ' uinstall - Install U-Boot wrapped compressed kernel' diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S index 517e0e18f0b8..6c7ccb428c07 100644 --- a/arch/arm/boot/compressed/head.S +++ b/arch/arm/boot/compressed/head.S @@ -114,6 +114,35 @@ #endif .endm + /* + * Debug kernel copy by printing the memory addresses involved + */ + .macro dbgkc, begin, end, cbegin, cend +#ifdef DEBUG + kputc #'\n' + kputc #'C' + kputc #':' + kputc #'0' + kputc #'x' + kphex \begin, 8 /* Start of compressed kernel */ + kputc #'-' + kputc #'0' + kputc #'x' + kphex \end, 8 /* End of compressed kernel */ + kputc #'-' + kputc #'>' + kputc #'0' + kputc #'x' + kphex \cbegin, 8 /* Start of kernel copy */ + kputc #'-' + kputc #'0' + kputc #'x' + kphex \cend, 8 /* End of kernel copy */ + kputc #'\n' + kputc #'\r' +#endif + .endm + .section ".start", #alloc, #execinstr /* * sort out different calling conventions @@ -450,6 +479,20 @@ dtb_check_done: add r6, r9, r5 add r9, r9, r10 +#ifdef DEBUG + sub r10, r6, r5 + sub r10, r9, r10 + /* + * We are about to copy the kernel to a new memory area. + * The boundaries of the new memory area can be found in + * r10 and r9, whilst r5 and r6 contain the boundaries + * of the memory we are going to copy. + * Calling dbgkc will help with the printing of this + * information. + */ + dbgkc r5, r6, r10, r9 +#endif + 1: ldmdb r6!, {r0 - r3, r10 - r12, lr} cmp r6, r5 stmdb r9!, {r0 - r3, r10 - r12, lr} diff --git a/arch/arm/boot/compressed/libfdt_env.h b/arch/arm/boot/compressed/libfdt_env.h index 07437816e098..b36c0289a308 100644 --- a/arch/arm/boot/compressed/libfdt_env.h +++ b/arch/arm/boot/compressed/libfdt_env.h @@ -6,6 +6,8 @@ #include <linux/string.h> #include <asm/byteorder.h> +#define INT_MAX ((int)(~0U>>1)) + typedef __be16 fdt16_t; typedef __be32 fdt32_t; typedef __be64 fdt64_t; diff --git a/arch/arm/boot/dts/imx7d.dtsi b/arch/arm/boot/dts/imx7d.dtsi index 7234e8330a57..efbdeaaa8dcd 100644 --- a/arch/arm/boot/dts/imx7d.dtsi +++ b/arch/arm/boot/dts/imx7d.dtsi @@ -146,8 +146,9 @@ fsl,max-link-speed = <2>; power-domains = <&pgc_pcie_phy>; resets = <&src IMX7_RESET_PCIEPHY>, - <&src IMX7_RESET_PCIE_CTRL_APPS_EN>; - reset-names = "pciephy", "apps"; + <&src IMX7_RESET_PCIE_CTRL_APPS_EN>, + <&src IMX7_RESET_PCIE_CTRL_APPS_TURNOFF>; + reset-names = "pciephy", "apps", "turnoff"; status = "disabled"; }; }; diff --git a/arch/arm/crypto/Kconfig b/arch/arm/crypto/Kconfig index 925d1364727a..ef0c7feea6e2 100644 --- a/arch/arm/crypto/Kconfig +++ b/arch/arm/crypto/Kconfig @@ -99,6 +99,7 @@ config CRYPTO_GHASH_ARM_CE depends on KERNEL_MODE_NEON select CRYPTO_HASH select CRYPTO_CRYPTD + select CRYPTO_GF128MUL help Use an implementation of GHASH (used by the GCM AEAD chaining mode) that uses the 64x64 to 128 bit polynomial multiplication (vmull.p64) @@ -121,10 +122,4 @@ config CRYPTO_CHACHA20_NEON select CRYPTO_BLKCIPHER select CRYPTO_CHACHA20 -config CRYPTO_SPECK_NEON - tristate "NEON accelerated Speck cipher algorithms" - depends on KERNEL_MODE_NEON - select CRYPTO_BLKCIPHER - select CRYPTO_SPECK - endif diff --git a/arch/arm/crypto/Makefile b/arch/arm/crypto/Makefile index 8de542c48ade..bd5bceef0605 100644 --- a/arch/arm/crypto/Makefile +++ b/arch/arm/crypto/Makefile @@ -10,7 +10,6 @@ obj-$(CONFIG_CRYPTO_SHA1_ARM_NEON) += sha1-arm-neon.o obj-$(CONFIG_CRYPTO_SHA256_ARM) += sha256-arm.o obj-$(CONFIG_CRYPTO_SHA512_ARM) += sha512-arm.o obj-$(CONFIG_CRYPTO_CHACHA20_NEON) += chacha20-neon.o -obj-$(CONFIG_CRYPTO_SPECK_NEON) += speck-neon.o ce-obj-$(CONFIG_CRYPTO_AES_ARM_CE) += aes-arm-ce.o ce-obj-$(CONFIG_CRYPTO_SHA1_ARM_CE) += sha1-arm-ce.o @@ -54,7 +53,6 @@ ghash-arm-ce-y := ghash-ce-core.o ghash-ce-glue.o crct10dif-arm-ce-y := crct10dif-ce-core.o crct10dif-ce-glue.o crc32-arm-ce-y:= crc32-ce-core.o crc32-ce-glue.o chacha20-neon-y := chacha20-neon-core.o chacha20-neon-glue.o -speck-neon-y := speck-neon-core.o speck-neon-glue.o ifdef REGENERATE_ARM_CRYPTO quiet_cmd_perl = PERL $@ diff --git a/arch/arm/crypto/chacha20-neon-core.S b/arch/arm/crypto/chacha20-neon-core.S index 451a849ad518..50e7b9896818 100644 --- a/arch/arm/crypto/chacha20-neon-core.S +++ b/arch/arm/crypto/chacha20-neon-core.S @@ -18,6 +18,34 @@ * (at your option) any later version. */ + /* + * NEON doesn't have a rotate instruction. The alternatives are, more or less: + * + * (a) vshl.u32 + vsri.u32 (needs temporary register) + * (b) vshl.u32 + vshr.u32 + vorr (needs temporary register) + * (c) vrev32.16 (16-bit rotations only) + * (d) vtbl.8 + vtbl.8 (multiple of 8 bits rotations only, + * needs index vector) + * + * ChaCha20 has 16, 12, 8, and 7-bit rotations. For the 12 and 7-bit + * rotations, the only choices are (a) and (b). We use (a) since it takes + * two-thirds the cycles of (b) on both Cortex-A7 and Cortex-A53. + * + * For the 16-bit rotation, we use vrev32.16 since it's consistently fastest + * and doesn't need a temporary register. + * + * For the 8-bit rotation, we use vtbl.8 + vtbl.8. On Cortex-A7, this sequence + * is twice as fast as (a), even when doing (a) on multiple registers + * simultaneously to eliminate the stall between vshl and vsri. Also, it + * parallelizes better when temporary registers are scarce. + * + * A disadvantage is that on Cortex-A53, the vtbl sequence is the same speed as + * (a), so the need to load the rotation table actually makes the vtbl method + * slightly slower overall on that CPU (~1.3% slower ChaCha20). Still, it + * seems to be a good compromise to get a more significant speed boost on some + * CPUs, e.g. ~4.8% faster ChaCha20 on Cortex-A7. + */ + #include <linux/linkage.h> .text @@ -46,7 +74,9 @@ ENTRY(chacha20_block_xor_neon) vmov q10, q2 vmov q11, q3 + adr ip, .Lrol8_table mov r3, #10 + vld1.8 {d10}, [ip, :64] .Ldoubleround: // x0 += x1, x3 = rotl32(x3 ^ x0, 16) @@ -62,9 +92,9 @@ ENTRY(chacha20_block_xor_neon) // x0 += x1, x3 = rotl32(x3 ^ x0, 8) vadd.i32 q0, q0, q1 - veor q4, q3, q0 - vshl.u32 q3, q4, #8 - vsri.u32 q3, q4, #24 + veor q3, q3, q0 + vtbl.8 d6, {d6}, d10 + vtbl.8 d7, {d7}, d10 // x2 += x3, x1 = rotl32(x1 ^ x2, 7) vadd.i32 q2, q2, q3 @@ -92,9 +122,9 @@ ENTRY(chacha20_block_xor_neon) // x0 += x1, x3 = rotl32(x3 ^ x0, 8) vadd.i32 q0, q0, q1 - veor q4, q3, q0 - vshl.u32 q3, q4, #8 - vsri.u32 q3, q4, #24 + veor q3, q3, q0 + vtbl.8 d6, {d6}, d10 + vtbl.8 d7, {d7}, d10 // x2 += x3, x1 = rotl32(x1 ^ x2, 7) vadd.i32 q2, q2, q3 @@ -139,13 +169,17 @@ ENTRY(chacha20_block_xor_neon) bx lr ENDPROC(chacha20_block_xor_neon) + .align 4 +.Lctrinc: .word 0, 1, 2, 3 +.Lrol8_table: .byte 3, 0, 1, 2, 7, 4, 5, 6 + .align 5 ENTRY(chacha20_4block_xor_neon) - push {r4-r6, lr} - mov ip, sp // preserve the stack pointer - sub r3, sp, #0x20 // allocate a 32 byte buffer - bic r3, r3, #0x1f // aligned to 32 bytes - mov sp, r3 + push {r4-r5} + mov r4, sp // preserve the stack pointer + sub ip, sp, #0x20 // allocate a 32 byte buffer + bic ip, ip, #0x1f // aligned to 32 bytes + mov sp, ip // r0: Input state matrix, s // r1: 4 data blocks output, o @@ -155,25 +189,24 @@ ENTRY(chacha20_4block_xor_neon) // This function encrypts four consecutive ChaCha20 blocks by loading // the state matrix in NEON registers four times. The algorithm performs // each operation on the corresponding word of each state matrix, hence - // requires no word shuffling. For final XORing step we transpose the - // matrix by interleaving 32- and then 64-bit words, which allows us to - // do XOR in NEON registers. + // requires no word shuffling. The words are re-interleaved before the + // final addition of the original state and the XORing step. // - // x0..15[0-3] = s0..3[0..3] - add r3, r0, #0x20 + // x0..15[0-3] = s0..15[0-3] + add ip, r0, #0x20 vld1.32 {q0-q1}, [r0] - vld1.32 {q2-q3}, [r3] + vld1.32 {q2-q3}, [ip] - adr r3, CTRINC + adr r5, .Lctrinc vdup.32 q15, d7[1] vdup.32 q14, d7[0] - vld1.32 {q11}, [r3, :128] + vld1.32 {q4}, [r5, :128] vdup.32 q13, d6[1] vdup.32 q12, d6[0] - vadd.i32 q12, q12, q11 // x12 += counter values 0-3 vdup.32 q11, d5[1] vdup.32 q10, d5[0] + vadd.u32 q12, q12, q4 // x12 += counter values 0-3 vdup.32 q9, d4[1] vdup.32 q8, d4[0] vdup.32 q7, d3[1] @@ -185,9 +218,13 @@ ENTRY(chacha20_4block_xor_neon) vdup.32 q1, d0[1] vdup.32 q0, d0[0] + adr ip, .Lrol8_table mov r3, #10 + b 1f .Ldoubleround4: + vld1.32 {q8-q9}, [sp, :256] +1: // x0 += x4, x12 = rotl32(x12 ^ x0, 16) // x1 += x5, x13 = rotl32(x13 ^ x1, 16) // x2 += x6, x14 = rotl32(x14 ^ x2, 16) @@ -236,24 +273,25 @@ ENTRY(chacha20_4block_xor_neon) // x1 += x5, x13 = rotl32(x13 ^ x1, 8) // x2 += x6, x14 = rotl32(x14 ^ x2, 8) // x3 += x7, x15 = rotl32(x15 ^ x3, 8) + vld1.8 {d16}, [ip, :64] vadd.i32 q0, q0, q4 vadd.i32 q1, q1, q5 vadd.i32 q2, q2, q6 vadd.i32 q3, q3, q7 - veor q8, q12, q0 - veor q9, q13, q1 - vshl.u32 q12, q8, #8 - vshl.u32 q13, q9, #8 - vsri.u32 q12, q8, #24 - vsri.u32 q13, q9, #24 + veor q12, q12, q0 + veor q13, q13, q1 + veor q14, q14, q2 + veor q15, q15, q3 - veor q8, q14, q2 - veor q9, q15, q3 - vshl.u32 q14, q8, #8 - vshl.u32 q15, q9, #8 - vsri.u32 q14, q8, #24 - vsri.u32 q15, q9, #24 + vtbl.8 d24, {d24}, d16 + vtbl.8 d25, {d25}, d16 + vtbl.8 d26, {d26}, d16 + vtbl.8 d27, {d27}, d16 + vtbl.8 d28, {d28}, d16 + vtbl.8 d29, {d29}, d16 + vtbl.8 d30, {d30}, d16 + vtbl.8 d31, {d31}, d16 vld1.32 {q8-q9}, [sp, :256] @@ -332,24 +370,25 @@ ENTRY(chacha20_4block_xor_neon) // x1 += x6, x12 = rotl32(x12 ^ x1, 8) // x2 += x7, x13 = rotl32(x13 ^ x2, 8) // x3 += x4, x14 = rotl32(x14 ^ x3, 8) + vld1.8 {d16}, [ip, :64] vadd.i32 q0, q0, q5 vadd.i32 q1, q1, q6 vadd.i32 q2, q2, q7 vadd.i32 q3, q3, q4 - veor q8, q15, q0 - veor q9, q12, q1 - vshl.u32 q15, q8, #8 - vshl.u32 q12, q9, #8 - vsri.u32 q15, q8, #24 - vsri.u32 q12, q9, #24 + veor q15, q15, q0 + veor q12, q12, q1 + veor q13, q13, q2 + veor q14, q14, q3 - veor q8, q13, q2 - veor q9, q14, q3 - vshl.u32 q13, q8, #8 - vshl.u32 q14, q9, #8 - vsri.u32 q13, q8, #24 - vsri.u32 q14, q9, #24 + vtbl.8 d30, {d30}, d16 + vtbl.8 d31, {d31}, d16 + vtbl.8 d24, {d24}, d16 + vtbl.8 d25, {d25}, d16 + vtbl.8 d26, {d26}, d16 + vtbl.8 d27, {d27}, d16 + vtbl.8 d28, {d28}, d16 + vtbl.8 d29, {d29}, d16 vld1.32 {q8-q9}, [sp, :256] @@ -379,104 +418,76 @@ ENTRY(chacha20_4block_xor_neon) vsri.u32 q6, q9, #25 subs r3, r3, #1 - beq 0f - - vld1.32 {q8-q9}, [sp, :256] - b .Ldoubleround4 - - // x0[0-3] += s0[0] - // x1[0-3] += s0[1] - // x2[0-3] += s0[2] - // x3[0-3] += s0[3] -0: ldmia r0!, {r3-r6} - vdup.32 q8, r3 - vdup.32 q9, r4 - vadd.i32 q0, q0, q8 - vadd.i32 q1, q1, q9 - vdup.32 q8, r5 - vdup.32 q9, r6 - vadd.i32 q2, q2, q8 - vadd.i32 q3, q3, q9 - - // x4[0-3] += s1[0] - // x5[0-3] += s1[1] - // x6[0-3] += s1[2] - // x7[0-3] += s1[3] - ldmia r0!, {r3-r6} - vdup.32 q8, r3 - vdup.32 q9, r4 - vadd.i32 q4, q4, q8 - vadd.i32 q5, q5, q9 - vdup.32 q8, r5 - vdup.32 q9, r6 - vadd.i32 q6, q6, q8 - vadd.i32 q7, q7, q9 - - // interleave 32-bit words in state n, n+1 - vzip.32 q0, q1 - vzip.32 q2, q3 - vzip.32 q4, q5 - vzip.32 q6, q7 - - // interleave 64-bit words in state n, n+2 + bne .Ldoubleround4 + + // x0..7[0-3] are in q0-q7, x10..15[0-3] are in q10-q15. + // x8..9[0-3] are on the stack. + + // Re-interleave the words in the first two rows of each block (x0..7). + // Also add the counter values 0-3 to x12[0-3]. + vld1.32 {q8}, [r5, :128] // load counter values 0-3 + vzip.32 q0, q1 // => (0 1 0 1) (0 1 0 1) + vzip.32 q2, q3 // => (2 3 2 3) (2 3 2 3) + vzip.32 q4, q5 // => (4 5 4 5) (4 5 4 5) + vzip.32 q6, q7 // => (6 7 6 7) (6 7 6 7) + vadd.u32 q12, q8 // x12 += counter values 0-3 vswp d1, d4 vswp d3, d6 + vld1.32 {q8-q9}, [r0]! // load s0..7 vswp d9, d12 vswp d11, d14 - // xor with corresponding input, write to output + // Swap q1 and q4 so that we'll free up consecutive registers (q0-q1) + // after XORing the first 32 bytes. + vswp q1, q4 + + // First two rows of each block are (q0 q1) (q2 q6) (q4 q5) (q3 q7) + + // x0..3[0-3] += s0..3[0-3] (add orig state to 1st row of each block) + vadd.u32 q0, q0, q8 + vadd.u32 q2, q2, q8 + vadd.u32 q4, q4, q8 + vadd.u32 q3, q3, q8 + + // x4..7[0-3] += s4..7[0-3] (add orig state to 2nd row of each block) + vadd.u32 q1, q1, q9 + vadd.u32 q6, q6, q9 + vadd.u32 q5, q5, q9 + vadd.u32 q7, q7, q9 + + // XOR first 32 bytes using keystream from first two rows of first block vld1.8 {q8-q9}, [r2]! veor q8, q8, q0 - veor q9, q9, q4 + veor q9, q9, q1 vst1.8 {q8-q9}, [r1]! + // Re-interleave the words in the last two rows of each block (x8..15). vld1.32 {q8-q9}, [sp, :256] - - // x8[0-3] += s2[0] - // x9[0-3] += s2[1] - // x10[0-3] += s2[2] - // x11[0-3] += s2[3] - ldmia r0!, {r3-r6} - vdup.32 q0, r3 - vdup.32 q4, r4 - vadd.i32 q8, q8, q0 - vadd.i32 q9, q9, q4 - vdup.32 q0, r5 - vdup.32 q4, r6 - vadd.i32 q10, q10, q0 - vadd.i32 q11, q11, q4 - - // x12[0-3] += s3[0] - // x13[0-3] += s3[1] - // x14[0-3] += s3[2] - // x15[0-3] += s3[3] - ldmia r0!, {r3-r6} - vdup.32 q0, r3 - vdup.32 q4, r4 - adr r3, CTRINC - vadd.i32 q12, q12, q0 - vld1.32 {q0}, [r3, :128] - vadd.i32 q13, q13, q4 - vadd.i32 q12, q12, q0 // x12 += counter values 0-3 - - vdup.32 q0, r5 - vdup.32 q4, r6 - vadd.i32 q14, q14, q0 - vadd.i32 q15, q15, q4 - - // interleave 32-bit words in state n, n+1 - vzip.32 q8, q9 - vzip.32 q10, q11 - vzip.32 q12, q13 - vzip.32 q14, q15 - - // interleave 64-bit words in state n, n+2 - vswp d17, d20 - vswp d19, d22 + vzip.32 q12, q13 // => (12 13 12 13) (12 13 12 13) + vzip.32 q14, q15 // => (14 15 14 15) (14 15 14 15) + vzip.32 q8, q9 // => (8 9 8 9) (8 9 8 9) + vzip.32 q10, q11 // => (10 11 10 11) (10 11 10 11) + vld1.32 {q0-q1}, [r0] // load s8..15 vswp d25, d28 vswp d27, d30 + vswp d17, d20 + vswp d19, d22 + + // Last two rows of each block are (q8 q12) (q10 q14) (q9 q13) (q11 q15) + + // x8..11[0-3] += s8..11[0-3] (add orig state to 3rd row of each block) + vadd.u32 q8, q8, q0 + vadd.u32 q10, q10, q0 + vadd.u32 q9, q9, q0 + vadd.u32 q11, q11, q0 + + // x12..15[0-3] += s12..15[0-3] (add orig state to 4th row of each block) + vadd.u32 q12, q12, q1 + vadd.u32 q14, q14, q1 + vadd.u32 q13, q13, q1 + vadd.u32 q15, q15, q1 - vmov q4, q1 + // XOR the rest of the data with the keystream vld1.8 {q0-q1}, [r2]! veor q0, q0, q8 @@ -509,13 +520,11 @@ ENTRY(chacha20_4block_xor_neon) vst1.8 {q0-q1}, [r1]! vld1.8 {q0-q1}, [r2] + mov sp, r4 // restore original stack pointer veor q0, q0, q11 veor q1, q1, q15 vst1.8 {q0-q1}, [r1] - mov sp, ip - pop {r4-r6, pc} + pop {r4-r5} + bx lr ENDPROC(chacha20_4block_xor_neon) - - .align 4 -CTRINC: .word 0, 1, 2, 3 diff --git a/arch/arm/crypto/crc32-ce-glue.c b/arch/arm/crypto/crc32-ce-glue.c index 96e62ec105d0..cd9e93b46c2d 100644 --- a/arch/arm/crypto/crc32-ce-glue.c +++ b/arch/arm/crypto/crc32-ce-glue.c @@ -236,7 +236,7 @@ static void __exit crc32_pmull_mod_exit(void) ARRAY_SIZE(crc32_pmull_algs)); } -static const struct cpu_feature crc32_cpu_feature[] = { +static const struct cpu_feature __maybe_unused crc32_cpu_feature[] = { { cpu_feature(CRC32) }, { cpu_feature(PMULL) }, { } }; MODULE_DEVICE_TABLE(cpu, crc32_cpu_feature); diff --git a/arch/arm/crypto/ghash-ce-core.S b/arch/arm/crypto/ghash-ce-core.S index 2f78c10b1881..406009afa9cf 100644 --- a/arch/arm/crypto/ghash-ce-core.S +++ b/arch/arm/crypto/ghash-ce-core.S @@ -63,6 +63,33 @@ k48 .req d31 SHASH2_p64 .req d31 + HH .req q10 + HH3 .req q11 + HH4 .req q12 + HH34 .req q13 + + HH_L .req d20 + HH_H .req d21 + HH3_L .req d22 + HH3_H .req d23 + HH4_L .req d24 + HH4_H .req d25 + HH34_L .req d26 + HH34_H .req d27 + SHASH2_H .req d29 + + XL2 .req q5 + XM2 .req q6 + XH2 .req q7 + T3 .req q8 + + XL2_L .req d10 + XL2_H .req d11 + XM2_L .req d12 + XM2_H .req d13 + T3_L .req d16 + T3_H .req d17 + .text .fpu crypto-neon-fp-armv8 @@ -175,12 +202,77 @@ beq 0f vld1.64 {T1}, [ip] teq r0, #0 - b 1f + b 3f + +0: .ifc \pn, p64 + tst r0, #3 // skip until #blocks is a + bne 2f // round multiple of 4 + + vld1.8 {XL2-XM2}, [r2]! +1: vld1.8 {T3-T2}, [r2]! + vrev64.8 XL2, XL2 + vrev64.8 XM2, XM2 + + subs r0, r0, #4 + + vext.8 T1, XL2, XL2, #8 + veor XL2_H, XL2_H, XL_L + veor XL, XL, T1 + + vrev64.8 T3, T3 + vrev64.8 T1, T2 + + vmull.p64 XH, HH4_H, XL_H // a1 * b1 + veor XL2_H, XL2_H, XL_H + vmull.p64 XL, HH4_L, XL_L // a0 * b0 + vmull.p64 XM, HH34_H, XL2_H // (a1 + a0)(b1 + b0) + + vmull.p64 XH2, HH3_H, XM2_L // a1 * b1 + veor XM2_L, XM2_L, XM2_H + vmull.p64 XL2, HH3_L, XM2_H // a0 * b0 + vmull.p64 XM2, HH34_L, XM2_L // (a1 + a0)(b1 + b0) + + veor XH, XH, XH2 + veor XL, XL, XL2 + veor XM, XM, XM2 + + vmull.p64 XH2, HH_H, T3_L // a1 * b1 + veor T3_L, T3_L, T3_H + vmull.p64 XL2, HH_L, T3_H // a0 * b0 + vmull.p64 XM2, SHASH2_H, T3_L // (a1 + a0)(b1 + b0) + + veor XH, XH, XH2 + veor XL, XL, XL2 + veor XM, XM, XM2 + + vmull.p64 XH2, SHASH_H, T1_L // a1 * b1 + veor T1_L, T1_L, T1_H + vmull.p64 XL2, SHASH_L, T1_H // a0 * b0 + vmull.p64 XM2, SHASH2_p64, T1_L // (a1 + a0)(b1 + b0) + + veor XH, XH, XH2 + veor XL, XL, XL2 + veor XM, XM, XM2 -0: vld1.64 {T1}, [r2]! + beq 4f + + vld1.8 {XL2-XM2}, [r2]! + + veor T1, XL, XH + veor XM, XM, T1 + + __pmull_reduce_p64 + + veor T1, T1, XH + veor XL, XL, T1 + + b 1b + .endif + +2: vld1.64 {T1}, [r2]! subs r0, r0, #1 -1: /* multiply XL by SHASH in GF(2^128) */ +3: /* multiply XL by SHASH in GF(2^128) */ #ifndef CONFIG_CPU_BIG_ENDIAN vrev64.8 T1, T1 #endif @@ -193,7 +285,7 @@ __pmull_\pn XL, XL_L, SHASH_L, s1l, s2l, s3l, s4l @ a0 * b0 __pmull_\pn XM, T1_L, SHASH2_\pn @ (a1+a0)(b1+b0) - veor T1, XL, XH +4: veor T1, XL, XH veor XM, XM, T1 __pmull_reduce_\pn @@ -212,8 +304,14 @@ * struct ghash_key const *k, const char *head) */ ENTRY(pmull_ghash_update_p64) - vld1.64 {SHASH}, [r3] + vld1.64 {SHASH}, [r3]! + vld1.64 {HH}, [r3]! + vld1.64 {HH3-HH4}, [r3] + veor SHASH2_p64, SHASH_L, SHASH_H + veor SHASH2_H, HH_L, HH_H + veor HH34_L, HH3_L, HH3_H + veor HH34_H, HH4_L, HH4_H vmov.i8 MASK, #0xe1 vshl.u64 MASK, MASK, #57 diff --git a/arch/arm/crypto/ghash-ce-glue.c b/arch/arm/crypto/ghash-ce-glue.c index 8930fc4e7c22..b7d30b6cf49c 100644 --- a/arch/arm/crypto/ghash-ce-glue.c +++ b/arch/arm/crypto/ghash-ce-glue.c @@ -1,7 +1,7 @@ /* * Accelerated GHASH implementation with ARMv8 vmull.p64 instructions. * - * Copyright (C) 2015 Linaro Ltd. <ard.biesheuvel@linaro.org> + * Copyright (C) 2015 - 2018 Linaro Ltd. <ard.biesheuvel@linaro.org> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published @@ -28,8 +28,10 @@ MODULE_ALIAS_CRYPTO("ghash"); #define GHASH_DIGEST_SIZE 16 struct ghash_key { - u64 a; - u64 b; + u64 h[2]; + u64 h2[2]; + u64 h3[2]; + u64 h4[2]; }; struct ghash_desc_ctx { @@ -117,26 +119,40 @@ static int ghash_final(struct shash_desc *desc, u8 *dst) return 0; } +static void ghash_reflect(u64 h[], const be128 *k) +{ + u64 carry = be64_to_cpu(k->a) >> 63; + + h[0] = (be64_to_cpu(k->b) << 1) | carry; + h[1] = (be64_to_cpu(k->a) << 1) | (be64_to_cpu(k->b) >> 63); + + if (carry) + h[1] ^= 0xc200000000000000UL; +} + static int ghash_setkey(struct crypto_shash *tfm, const u8 *inkey, unsigned int keylen) { struct ghash_key *key = crypto_shash_ctx(tfm); - u64 a, b; + be128 h, k; if (keylen != GHASH_BLOCK_SIZE) { crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } - /* perform multiplication by 'x' in GF(2^128) */ - b = get_unaligned_be64(inkey); - a = get_unaligned_be64(inkey + 8); + memcpy(&k, inkey, GHASH_BLOCK_SIZE); + ghash_reflect(key->h, &k); + + h = k; + gf128mul_lle(&h, &k); + ghash_reflect(key->h2, &h); - key->a = (a << 1) | (b >> 63); - key->b = (b << 1) | (a >> 63); + gf128mul_lle(&h, &k); + ghash_reflect(key->h3, &h); - if (b >> 63) - key->b ^= 0xc200000000000000UL; + gf128mul_lle(&h, &k); + ghash_reflect(key->h4, &h); return 0; } diff --git a/arch/arm/crypto/speck-neon-core.S b/arch/arm/crypto/speck-neon-core.S deleted file mode 100644 index 57caa742016e..000000000000 --- a/arch/arm/crypto/speck-neon-core.S +++ /dev/null @@ -1,434 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * NEON-accelerated implementation of Speck128-XTS and Speck64-XTS - * - * Copyright (c) 2018 Google, Inc - * - * Author: Eric Biggers <ebiggers@google.com> - */ - -#include <linux/linkage.h> - - .text - .fpu neon - - // arguments - ROUND_KEYS .req r0 // const {u64,u32} *round_keys - NROUNDS .req r1 // int nrounds - DST .req r2 // void *dst - SRC .req r3 // const void *src - NBYTES .req r4 // unsigned int nbytes - TWEAK .req r5 // void *tweak - - // registers which hold the data being encrypted/decrypted - X0 .req q0 - X0_L .req d0 - X0_H .req d1 - Y0 .req q1 - Y0_H .req d3 - X1 .req q2 - X1_L .req d4 - X1_H .req d5 - Y1 .req q3 - Y1_H .req d7 - X2 .req q4 - X2_L .req d8 - X2_H .req d9 - Y2 .req q5 - Y2_H .req d11 - X3 .req q6 - X3_L .req d12 - X3_H .req d13 - Y3 .req q7 - Y3_H .req d15 - - // the round key, duplicated in all lanes - ROUND_KEY .req q8 - ROUND_KEY_L .req d16 - ROUND_KEY_H .req d17 - - // index vector for vtbl-based 8-bit rotates - ROTATE_TABLE .req d18 - - // multiplication table for updating XTS tweaks - GF128MUL_TABLE .req d19 - GF64MUL_TABLE .req d19 - - // current XTS tweak value(s) - TWEAKV .req q10 - TWEAKV_L .req d20 - TWEAKV_H .req d21 - - TMP0 .req q12 - TMP0_L .req d24 - TMP0_H .req d25 - TMP1 .req q13 - TMP2 .req q14 - TMP3 .req q15 - - .align 4 -.Lror64_8_table: - .byte 1, 2, 3, 4, 5, 6, 7, 0 -.Lror32_8_table: - .byte 1, 2, 3, 0, 5, 6, 7, 4 -.Lrol64_8_table: - .byte 7, 0, 1, 2, 3, 4, 5, 6 -.Lrol32_8_table: - .byte 3, 0, 1, 2, 7, 4, 5, 6 -.Lgf128mul_table: - .byte 0, 0x87 - .fill 14 -.Lgf64mul_table: - .byte 0, 0x1b, (0x1b << 1), (0x1b << 1) ^ 0x1b - .fill 12 - -/* - * _speck_round_128bytes() - Speck encryption round on 128 bytes at a time - * - * Do one Speck encryption round on the 128 bytes (8 blocks for Speck128, 16 for - * Speck64) stored in X0-X3 and Y0-Y3, using the round key stored in all lanes - * of ROUND_KEY. 'n' is the lane size: 64 for Speck128, or 32 for Speck64. - * - * The 8-bit rotates are implemented using vtbl instead of vshr + vsli because - * the vtbl approach is faster on some processors and the same speed on others. - */ -.macro _speck_round_128bytes n - - // x = ror(x, 8) - vtbl.8 X0_L, {X0_L}, ROTATE_TABLE - vtbl.8 X0_H, {X0_H}, ROTATE_TABLE - vtbl.8 X1_L, {X1_L}, ROTATE_TABLE - vtbl.8 X1_H, {X1_H}, ROTATE_TABLE - vtbl.8 X2_L, {X2_L}, ROTATE_TABLE - vtbl.8 X2_H, {X2_H}, ROTATE_TABLE - vtbl.8 X3_L, {X3_L}, ROTATE_TABLE - vtbl.8 X3_H, {X3_H}, ROTATE_TABLE - - // x += y - vadd.u\n X0, Y0 - vadd.u\n X1, Y1 - vadd.u\n X2, Y2 - vadd.u\n X3, Y3 - - // x ^= k - veor X0, ROUND_KEY - veor X1, ROUND_KEY - veor X2, ROUND_KEY - veor X3, ROUND_KEY - - // y = rol(y, 3) - vshl.u\n TMP0, Y0, #3 - vshl.u\n TMP1, Y1, #3 - vshl.u\n TMP2, Y2, #3 - vshl.u\n TMP3, Y3, #3 - vsri.u\n TMP0, Y0, #(\n - 3) - vsri.u\n TMP1, Y1, #(\n - 3) - vsri.u\n TMP2, Y2, #(\n - 3) - vsri.u\n TMP3, Y3, #(\n - 3) - - // y ^= x - veor Y0, TMP0, X0 - veor Y1, TMP1, X1 - veor Y2, TMP2, X2 - veor Y3, TMP3, X3 -.endm - -/* - * _speck_unround_128bytes() - Speck decryption round on 128 bytes at a time - * - * This is the inverse of _speck_round_128bytes(). - */ -.macro _speck_unround_128bytes n - - // y ^= x - veor TMP0, Y0, X0 - veor TMP1, Y1, X1 - veor TMP2, Y2, X2 - veor TMP3, Y3, X3 - - // y = ror(y, 3) - vshr.u\n Y0, TMP0, #3 - vshr.u\n Y1, TMP1, #3 - vshr.u\n Y2, TMP2, #3 - vshr.u\n Y3, TMP3, #3 - vsli.u\n Y0, TMP0, #(\n - 3) - vsli.u\n Y1, TMP1, #(\n - 3) - vsli.u\n Y2, TMP2, #(\n - 3) - vsli.u\n Y3, TMP3, #(\n - 3) - - // x ^= k - veor X0, ROUND_KEY - veor X1, ROUND_KEY - veor X2, ROUND_KEY - veor X3, ROUND_KEY - - // x -= y - vsub.u\n X0, Y0 - vsub.u\n X1, Y1 - vsub.u\n X2, Y2 - vsub.u\n X3, Y3 - - // x = rol(x, 8); - vtbl.8 X0_L, {X0_L}, ROTATE_TABLE - vtbl.8 X0_H, {X0_H}, ROTATE_TABLE - vtbl.8 X1_L, {X1_L}, ROTATE_TABLE - vtbl.8 X1_H, {X1_H}, ROTATE_TABLE - vtbl.8 X2_L, {X2_L}, ROTATE_TABLE - vtbl.8 X2_H, {X2_H}, ROTATE_TABLE - vtbl.8 X3_L, {X3_L}, ROTATE_TABLE - vtbl.8 X3_H, {X3_H}, ROTATE_TABLE -.endm - -.macro _xts128_precrypt_one dst_reg, tweak_buf, tmp - - // Load the next source block - vld1.8 {\dst_reg}, [SRC]! - - // Save the current tweak in the tweak buffer - vst1.8 {TWEAKV}, [\tweak_buf:128]! - - // XOR the next source block with the current tweak - veor \dst_reg, TWEAKV - - /* - * Calculate the next tweak by multiplying the current one by x, - * modulo p(x) = x^128 + x^7 + x^2 + x + 1. - */ - vshr.u64 \tmp, TWEAKV, #63 - vshl.u64 TWEAKV, #1 - veor TWEAKV_H, \tmp\()_L - vtbl.8 \tmp\()_H, {GF128MUL_TABLE}, \tmp\()_H - veor TWEAKV_L, \tmp\()_H -.endm - -.macro _xts64_precrypt_two dst_reg, tweak_buf, tmp - - // Load the next two source blocks - vld1.8 {\dst_reg}, [SRC]! - - // Save the current two tweaks in the tweak buffer - vst1.8 {TWEAKV}, [\tweak_buf:128]! - - // XOR the next two source blocks with the current two tweaks - veor \dst_reg, TWEAKV - - /* - * Calculate the next two tweaks by multiplying the current ones by x^2, - * modulo p(x) = x^64 + x^4 + x^3 + x + 1. - */ - vshr.u64 \tmp, TWEAKV, #62 - vshl.u64 TWEAKV, #2 - vtbl.8 \tmp\()_L, {GF64MUL_TABLE}, \tmp\()_L - vtbl.8 \tmp\()_H, {GF64MUL_TABLE}, \tmp\()_H - veor TWEAKV, \tmp -.endm - -/* - * _speck_xts_crypt() - Speck-XTS encryption/decryption - * - * Encrypt or decrypt NBYTES bytes of data from the SRC buffer to the DST buffer - * using Speck-XTS, specifically the variant with a block size of '2n' and round - * count given by NROUNDS. The expanded round keys are given in ROUND_KEYS, and - * the current XTS tweak value is given in TWEAK. It's assumed that NBYTES is a - * nonzero multiple of 128. - */ -.macro _speck_xts_crypt n, decrypting - push {r4-r7} - mov r7, sp - - /* - * The first four parameters were passed in registers r0-r3. Load the - * additional parameters, which were passed on the stack. - */ - ldr NBYTES, [sp, #16] - ldr TWEAK, [sp, #20] - - /* - * If decrypting, modify the ROUND_KEYS parameter to point to the last - * round key rather than the first, since for decryption the round keys - * are used in reverse order. - */ -.if \decrypting -.if \n == 64 - add ROUND_KEYS, ROUND_KEYS, NROUNDS, lsl #3 - sub ROUND_KEYS, #8 -.else - add ROUND_KEYS, ROUND_KEYS, NROUNDS, lsl #2 - sub ROUND_KEYS, #4 -.endif -.endif - - // Load the index vector for vtbl-based 8-bit rotates -.if \decrypting - ldr r12, =.Lrol\n\()_8_table -.else - ldr r12, =.Lror\n\()_8_table -.endif - vld1.8 {ROTATE_TABLE}, [r12:64] - - // One-time XTS preparation - - /* - * Allocate stack space to store 128 bytes worth of tweaks. For - * performance, this space is aligned to a 16-byte boundary so that we - * can use the load/store instructions that declare 16-byte alignment. - * For Thumb2 compatibility, don't do the 'bic' directly on 'sp'. - */ - sub r12, sp, #128 - bic r12, #0xf - mov sp, r12 - -.if \n == 64 - // Load first tweak - vld1.8 {TWEAKV}, [TWEAK] - - // Load GF(2^128) multiplication table - ldr r12, =.Lgf128mul_table - vld1.8 {GF128MUL_TABLE}, [r12:64] -.else - // Load first tweak - vld1.8 {TWEAKV_L}, [TWEAK] - - // Load GF(2^64) multiplication table - ldr r12, =.Lgf64mul_table - vld1.8 {GF64MUL_TABLE}, [r12:64] - - // Calculate second tweak, packing it together with the first - vshr.u64 TMP0_L, TWEAKV_L, #63 - vtbl.u8 TMP0_L, {GF64MUL_TABLE}, TMP0_L - vshl.u64 TWEAKV_H, TWEAKV_L, #1 - veor TWEAKV_H, TMP0_L -.endif - -.Lnext_128bytes_\@: - - /* - * Load the source blocks into {X,Y}[0-3], XOR them with their XTS tweak - * values, and save the tweaks on the stack for later. Then - * de-interleave the 'x' and 'y' elements of each block, i.e. make it so - * that the X[0-3] registers contain only the second halves of blocks, - * and the Y[0-3] registers contain only the first halves of blocks. - * (Speck uses the order (y, x) rather than the more intuitive (x, y).) - */ - mov r12, sp -.if \n == 64 - _xts128_precrypt_one X0, r12, TMP0 - _xts128_precrypt_one Y0, r12, TMP0 - _xts128_precrypt_one X1, r12, TMP0 - _xts128_precrypt_one Y1, r12, TMP0 - _xts128_precrypt_one X2, r12, TMP0 - _xts128_precrypt_one Y2, r12, TMP0 - _xts128_precrypt_one X3, r12, TMP0 - _xts128_precrypt_one Y3, r12, TMP0 - vswp X0_L, Y0_H - vswp X1_L, Y1_H - vswp X2_L, Y2_H - vswp X3_L, Y3_H -.else - _xts64_precrypt_two X0, r12, TMP0 - _xts64_precrypt_two Y0, r12, TMP0 - _xts64_precrypt_two X1, r12, TMP0 - _xts64_precrypt_two Y1, r12, TMP0 - _xts64_precrypt_two X2, r12, TMP0 - _xts64_precrypt_two Y2, r12, TMP0 - _xts64_precrypt_two X3, r12, TMP0 - _xts64_precrypt_two Y3, r12, TMP0 - vuzp.32 Y0, X0 - vuzp.32 Y1, X1 - vuzp.32 Y2, X2 - vuzp.32 Y3, X3 -.endif - - // Do the cipher rounds - - mov r12, ROUND_KEYS - mov r6, NROUNDS - -.Lnext_round_\@: -.if \decrypting -.if \n == 64 - vld1.64 ROUND_KEY_L, [r12] - sub r12, #8 - vmov ROUND_KEY_H, ROUND_KEY_L -.else - vld1.32 {ROUND_KEY_L[],ROUND_KEY_H[]}, [r12] - sub r12, #4 -.endif - _speck_unround_128bytes \n -.else -.if \n == 64 - vld1.64 ROUND_KEY_L, [r12]! - vmov ROUND_KEY_H, ROUND_KEY_L -.else - vld1.32 {ROUND_KEY_L[],ROUND_KEY_H[]}, [r12]! -.endif - _speck_round_128bytes \n -.endif - subs r6, r6, #1 - bne .Lnext_round_\@ - - // Re-interleave the 'x' and 'y' elements of each block -.if \n == 64 - vswp X0_L, Y0_H - vswp X1_L, Y1_H - vswp X2_L, Y2_H - vswp X3_L, Y3_H -.else - vzip.32 Y0, X0 - vzip.32 Y1, X1 - vzip.32 Y2, X2 - vzip.32 Y3, X3 -.endif - - // XOR the encrypted/decrypted blocks with the tweaks we saved earlier - mov r12, sp - vld1.8 {TMP0, TMP1}, [r12:128]! - vld1.8 {TMP2, TMP3}, [r12:128]! - veor X0, TMP0 - veor Y0, TMP1 - veor X1, TMP2 - veor Y1, TMP3 - vld1.8 {TMP0, TMP1}, [r12:128]! - vld1.8 {TMP2, TMP3}, [r12:128]! - veor X2, TMP0 - veor Y2, TMP1 - veor X3, TMP2 - veor Y3, TMP3 - - // Store the ciphertext in the destination buffer - vst1.8 {X0, Y0}, [DST]! - vst1.8 {X1, Y1}, [DST]! - vst1.8 {X2, Y2}, [DST]! - vst1.8 {X3, Y3}, [DST]! - - // Continue if there are more 128-byte chunks remaining, else return - subs NBYTES, #128 - bne .Lnext_128bytes_\@ - - // Store the next tweak -.if \n == 64 - vst1.8 {TWEAKV}, [TWEAK] -.else - vst1.8 {TWEAKV_L}, [TWEAK] -.endif - - mov sp, r7 - pop {r4-r7} - bx lr -.endm - -ENTRY(speck128_xts_encrypt_neon) - _speck_xts_crypt n=64, decrypting=0 -ENDPROC(speck128_xts_encrypt_neon) - -ENTRY(speck128_xts_decrypt_neon) - _speck_xts_crypt n=64, decrypting=1 -ENDPROC(speck128_xts_decrypt_neon) - -ENTRY(speck64_xts_encrypt_neon) - _speck_xts_crypt n=32, decrypting=0 -ENDPROC(speck64_xts_encrypt_neon) - -ENTRY(speck64_xts_decrypt_neon) - _speck_xts_crypt n=32, decrypting=1 -ENDPROC(speck64_xts_decrypt_neon) diff --git a/arch/arm/crypto/speck-neon-glue.c b/arch/arm/crypto/speck-neon-glue.c deleted file mode 100644 index f012c3ea998f..000000000000 --- a/arch/arm/crypto/speck-neon-glue.c +++ /dev/null @@ -1,288 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * NEON-accelerated implementation of Speck128-XTS and Speck64-XTS - * - * Copyright (c) 2018 Google, Inc - * - * Note: the NIST recommendation for XTS only specifies a 128-bit block size, - * but a 64-bit version (needed for Speck64) is fairly straightforward; the math - * is just done in GF(2^64) instead of GF(2^128), with the reducing polynomial - * x^64 + x^4 + x^3 + x + 1 from the original XEX paper (Rogaway, 2004: - * "Efficient Instantiations of Tweakable Blockciphers and Refinements to Modes - * OCB and PMAC"), represented as 0x1B. - */ - -#include <asm/hwcap.h> -#include <asm/neon.h> -#include <asm/simd.h> -#include <crypto/algapi.h> -#include <crypto/gf128mul.h> -#include <crypto/internal/skcipher.h> -#include <crypto/speck.h> -#include <crypto/xts.h> -#include <linux/kernel.h> -#include <linux/module.h> - -/* The assembly functions only handle multiples of 128 bytes */ -#define SPECK_NEON_CHUNK_SIZE 128 - -/* Speck128 */ - -struct speck128_xts_tfm_ctx { - struct speck128_tfm_ctx main_key; - struct speck128_tfm_ctx tweak_key; -}; - -asmlinkage void speck128_xts_encrypt_neon(const u64 *round_keys, int nrounds, - void *dst, const void *src, - unsigned int nbytes, void *tweak); - -asmlinkage void speck128_xts_decrypt_neon(const u64 *round_keys, int nrounds, - void *dst, const void *src, - unsigned int nbytes, void *tweak); - -typedef void (*speck128_crypt_one_t)(const struct speck128_tfm_ctx *, - u8 *, const u8 *); -typedef void (*speck128_xts_crypt_many_t)(const u64 *, int, void *, - const void *, unsigned int, void *); - -static __always_inline int -__speck128_xts_crypt(struct skcipher_request *req, - speck128_crypt_one_t crypt_one, - speck128_xts_crypt_many_t crypt_many) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - const struct speck128_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); - struct skcipher_walk walk; - le128 tweak; - int err; - - err = skcipher_walk_virt(&walk, req, true); - - crypto_speck128_encrypt(&ctx->tweak_key, (u8 *)&tweak, walk.iv); - - while (walk.nbytes > 0) { - unsigned int nbytes = walk.nbytes; - u8 *dst = walk.dst.virt.addr; - const u8 *src = walk.src.virt.addr; - - if (nbytes >= SPECK_NEON_CHUNK_SIZE && may_use_simd()) { - unsigned int count; - - count = round_down(nbytes, SPECK_NEON_CHUNK_SIZE); - kernel_neon_begin(); - (*crypt_many)(ctx->main_key.round_keys, - ctx->main_key.nrounds, - dst, src, count, &tweak); - kernel_neon_end(); - dst += count; - src += count; - nbytes -= count; - } - - /* Handle any remainder with generic code */ - while (nbytes >= sizeof(tweak)) { - le128_xor((le128 *)dst, (const le128 *)src, &tweak); - (*crypt_one)(&ctx->main_key, dst, dst); - le128_xor((le128 *)dst, (const le128 *)dst, &tweak); - gf128mul_x_ble(&tweak, &tweak); - - dst += sizeof(tweak); - src += sizeof(tweak); - nbytes -= sizeof(tweak); - } - err = skcipher_walk_done(&walk, nbytes); - } - - return err; -} - -static int speck128_xts_encrypt(struct skcipher_request *req) -{ - return __speck128_xts_crypt(req, crypto_speck128_encrypt, - speck128_xts_encrypt_neon); -} - -static int speck128_xts_decrypt(struct skcipher_request *req) -{ - return __speck128_xts_crypt(req, crypto_speck128_decrypt, - speck128_xts_decrypt_neon); -} - -static int speck128_xts_setkey(struct crypto_skcipher *tfm, const u8 *key, - unsigned int keylen) -{ - struct speck128_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); - int err; - - err = xts_verify_key(tfm, key, keylen); - if (err) - return err; - - keylen /= 2; - - err = crypto_speck128_setkey(&ctx->main_key, key, keylen); - if (err) - return err; - - return crypto_speck128_setkey(&ctx->tweak_key, key + keylen, keylen); -} - -/* Speck64 */ - -struct speck64_xts_tfm_ctx { - struct speck64_tfm_ctx main_key; - struct speck64_tfm_ctx tweak_key; -}; - -asmlinkage void speck64_xts_encrypt_neon(const u32 *round_keys, int nrounds, - void *dst, const void *src, - unsigned int nbytes, void *tweak); - -asmlinkage void speck64_xts_decrypt_neon(const u32 *round_keys, int nrounds, - void *dst, const void *src, - unsigned int nbytes, void *tweak); - -typedef void (*speck64_crypt_one_t)(const struct speck64_tfm_ctx *, - u8 *, const u8 *); -typedef void (*speck64_xts_crypt_many_t)(const u32 *, int, void *, - const void *, unsigned int, void *); - -static __always_inline int -__speck64_xts_crypt(struct skcipher_request *req, speck64_crypt_one_t crypt_one, - speck64_xts_crypt_many_t crypt_many) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - const struct speck64_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); - struct skcipher_walk walk; - __le64 tweak; - int err; - - err = skcipher_walk_virt(&walk, req, true); - - crypto_speck64_encrypt(&ctx->tweak_key, (u8 *)&tweak, walk.iv); - - while (walk.nbytes > 0) { - unsigned int nbytes = walk.nbytes; - u8 *dst = walk.dst.virt.addr; - const u8 *src = walk.src.virt.addr; - - if (nbytes >= SPECK_NEON_CHUNK_SIZE && may_use_simd()) { - unsigned int count; - - count = round_down(nbytes, SPECK_NEON_CHUNK_SIZE); - kernel_neon_begin(); - (*crypt_many)(ctx->main_key.round_keys, - ctx->main_key.nrounds, - dst, src, count, &tweak); - kernel_neon_end(); - dst += count; - src += count; - nbytes -= count; - } - - /* Handle any remainder with generic code */ - while (nbytes >= sizeof(tweak)) { - *(__le64 *)dst = *(__le64 *)src ^ tweak; - (*crypt_one)(&ctx->main_key, dst, dst); - *(__le64 *)dst ^= tweak; - tweak = cpu_to_le64((le64_to_cpu(tweak) << 1) ^ - ((tweak & cpu_to_le64(1ULL << 63)) ? - 0x1B : 0)); - dst += sizeof(tweak); - src += sizeof(tweak); - nbytes -= sizeof(tweak); - } - err = skcipher_walk_done(&walk, nbytes); - } - - return err; -} - -static int speck64_xts_encrypt(struct skcipher_request *req) -{ - return __speck64_xts_crypt(req, crypto_speck64_encrypt, - speck64_xts_encrypt_neon); -} - -static int speck64_xts_decrypt(struct skcipher_request *req) -{ - return __speck64_xts_crypt(req, crypto_speck64_decrypt, - speck64_xts_decrypt_neon); -} - -static int speck64_xts_setkey(struct crypto_skcipher *tfm, const u8 *key, - unsigned int keylen) -{ - struct speck64_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); - int err; - - err = xts_verify_key(tfm, key, keylen); - if (err) - return err; - - keylen /= 2; - - err = crypto_speck64_setkey(&ctx->main_key, key, keylen); - if (err) - return err; - - return crypto_speck64_setkey(&ctx->tweak_key, key + keylen, keylen); -} - -static struct skcipher_alg speck_algs[] = { - { - .base.cra_name = "xts(speck128)", - .base.cra_driver_name = "xts-speck128-neon", - .base.cra_priority = 300, - .base.cra_blocksize = SPECK128_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct speck128_xts_tfm_ctx), - .base.cra_alignmask = 7, - .base.cra_module = THIS_MODULE, - .min_keysize = 2 * SPECK128_128_KEY_SIZE, - .max_keysize = 2 * SPECK128_256_KEY_SIZE, - .ivsize = SPECK128_BLOCK_SIZE, - .walksize = SPECK_NEON_CHUNK_SIZE, - .setkey = speck128_xts_setkey, - .encrypt = speck128_xts_encrypt, - .decrypt = speck128_xts_decrypt, - }, { - .base.cra_name = "xts(speck64)", - .base.cra_driver_name = "xts-speck64-neon", - .base.cra_priority = 300, - .base.cra_blocksize = SPECK64_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct speck64_xts_tfm_ctx), - .base.cra_alignmask = 7, - .base.cra_module = THIS_MODULE, - .min_keysize = 2 * SPECK64_96_KEY_SIZE, - .max_keysize = 2 * SPECK64_128_KEY_SIZE, - .ivsize = SPECK64_BLOCK_SIZE, - .walksize = SPECK_NEON_CHUNK_SIZE, - .setkey = speck64_xts_setkey, - .encrypt = speck64_xts_encrypt, - .decrypt = speck64_xts_decrypt, - } -}; - -static int __init speck_neon_module_init(void) -{ - if (!(elf_hwcap & HWCAP_NEON)) - return -ENODEV; - return crypto_register_skciphers(speck_algs, ARRAY_SIZE(speck_algs)); -} - -static void __exit speck_neon_module_exit(void) -{ - crypto_unregister_skciphers(speck_algs, ARRAY_SIZE(speck_algs)); -} - -module_init(speck_neon_module_init); -module_exit(speck_neon_module_exit); - -MODULE_DESCRIPTION("Speck block cipher (NEON-accelerated)"); -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>"); -MODULE_ALIAS_CRYPTO("xts(speck128)"); -MODULE_ALIAS_CRYPTO("xts-speck128-neon"); -MODULE_ALIAS_CRYPTO("xts(speck64)"); -MODULE_ALIAS_CRYPTO("xts-speck64-neon"); diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index b17ee03d280b..88286dd483ff 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h @@ -467,6 +467,17 @@ THUMB( orr \reg , \reg , #PSR_T_BIT ) #endif .endm + .macro uaccess_mask_range_ptr, addr:req, size:req, limit:req, tmp:req +#ifdef CONFIG_CPU_SPECTRE + sub \tmp, \limit, #1 + subs \tmp, \tmp, \addr @ tmp = limit - 1 - addr + addhs \tmp, \tmp, #1 @ if (tmp >= 0) { + subhss \tmp, \tmp, \size @ tmp = limit - (addr + size) } + movlo \addr, #0 @ if (tmp < 0) addr = NULL + csdb +#endif + .endm + .macro uaccess_disable, tmp, isb=1 #ifdef CONFIG_CPU_SW_DOMAIN_PAN /* diff --git a/arch/arm/include/asm/bug.h b/arch/arm/include/asm/bug.h index 237aa52d8733..36c951dd23b8 100644 --- a/arch/arm/include/asm/bug.h +++ b/arch/arm/include/asm/bug.h @@ -62,8 +62,8 @@ do { \ struct pt_regs; void die(const char *msg, struct pt_regs *regs, int err); -struct siginfo; -void arm_notify_die(const char *str, struct pt_regs *regs, struct siginfo *info, +void arm_notify_die(const char *str, struct pt_regs *regs, + int signo, int si_code, void __user *addr, unsigned long err, unsigned long trap); #ifdef CONFIG_ARM_LPAE diff --git a/arch/arm/include/asm/ftrace.h b/arch/arm/include/asm/ftrace.h index 9e842ff41768..18b0197f2384 100644 --- a/arch/arm/include/asm/ftrace.h +++ b/arch/arm/include/asm/ftrace.h @@ -16,9 +16,6 @@ extern void __gnu_mcount_nc(void); #ifdef CONFIG_DYNAMIC_FTRACE struct dyn_arch_ftrace { -#ifdef CONFIG_OLD_MCOUNT - bool old_mcount; -#endif }; static inline unsigned long ftrace_call_adjust(unsigned long addr) diff --git a/arch/arm/include/asm/hugetlb-3level.h b/arch/arm/include/asm/hugetlb-3level.h index d4014fbe5ea3..0d9f3918fa7e 100644 --- a/arch/arm/include/asm/hugetlb-3level.h +++ b/arch/arm/include/asm/hugetlb-3level.h @@ -29,6 +29,7 @@ * ptes. * (The valid bit is automatically cleared by set_pte_at for PROT_NONE ptes). */ +#define __HAVE_ARCH_HUGE_PTEP_GET static inline pte_t huge_ptep_get(pte_t *ptep) { pte_t retval = *ptep; @@ -37,35 +38,4 @@ static inline pte_t huge_ptep_get(pte_t *ptep) return retval; } -static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, - pte_t *ptep, pte_t pte) -{ - set_pte_at(mm, addr, ptep, pte); -} - -static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, - unsigned long addr, pte_t *ptep) -{ - ptep_clear_flush(vma, addr, ptep); -} - -static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, - unsigned long addr, pte_t *ptep) -{ - ptep_set_wrprotect(mm, addr, ptep); -} - -static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, - unsigned long addr, pte_t *ptep) -{ - return ptep_get_and_clear(mm, addr, ptep); -} - -static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, - unsigned long addr, pte_t *ptep, - pte_t pte, int dirty) -{ - return ptep_set_access_flags(vma, addr, ptep, pte, dirty); -} - #endif /* _ASM_ARM_HUGETLB_3LEVEL_H */ diff --git a/arch/arm/include/asm/hugetlb.h b/arch/arm/include/asm/hugetlb.h index 7d26f6c4f0f5..b67256c22b08 100644 --- a/arch/arm/include/asm/hugetlb.h +++ b/arch/arm/include/asm/hugetlb.h @@ -23,18 +23,8 @@ #define _ASM_ARM_HUGETLB_H #include <asm/page.h> -#include <asm-generic/hugetlb.h> - #include <asm/hugetlb-3level.h> - -static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb, - unsigned long addr, unsigned long end, - unsigned long floor, - unsigned long ceiling) -{ - free_pgd_range(tlb, addr, end, floor, ceiling); -} - +#include <asm-generic/hugetlb.h> static inline int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr, unsigned long len) @@ -42,27 +32,6 @@ static inline int is_hugepage_only_range(struct mm_struct *mm, return 0; } -static inline int prepare_hugepage_range(struct file *file, - unsigned long addr, unsigned long len) -{ - struct hstate *h = hstate_file(file); - if (len & ~huge_page_mask(h)) - return -EINVAL; - if (addr & ~huge_page_mask(h)) - return -EINVAL; - return 0; -} - -static inline int huge_pte_none(pte_t pte) -{ - return pte_none(pte); -} - -static inline pte_t huge_pte_wrprotect(pte_t pte) -{ - return pte_wrprotect(pte); -} - static inline void arch_clear_hugepage_flags(struct page *page) { clear_bit(PG_dcache_clean, &page->flags); diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h index 2d43dca29c72..b95f8d0d9f17 100644 --- a/arch/arm/include/asm/kvm_arm.h +++ b/arch/arm/include/asm/kvm_arm.h @@ -133,8 +133,7 @@ * space. */ #define KVM_PHYS_SHIFT (40) -#define KVM_PHYS_SIZE (_AC(1, ULL) << KVM_PHYS_SHIFT) -#define KVM_PHYS_MASK (KVM_PHYS_SIZE - _AC(1, ULL)) + #define PTRS_PER_S2_PGD (_AC(1, ULL) << (KVM_PHYS_SHIFT - 30)) /* Virtualization Translation Control Register (VTCR) bits */ diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index 3ad482d2f1eb..5ca5d9af0c26 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -273,7 +273,7 @@ static inline void __cpu_init_stage2(void) kvm_call_hyp(__init_stage2_translation); } -static inline int kvm_arch_dev_ioctl_check_extension(struct kvm *kvm, long ext) +static inline int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext) { return 0; } @@ -354,4 +354,15 @@ static inline void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu) {} struct kvm *kvm_arch_alloc_vm(void); void kvm_arch_free_vm(struct kvm *kvm); +static inline int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type) +{ + /* + * On 32bit ARM, VMs get a static 40bit IPA stage2 setup, + * so any non-zero value used as type is illegal. + */ + if (type) + return -EINVAL; + return 0; +} + #endif /* __ARM_KVM_HOST_H__ */ diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h index 847f01fa429d..1098ffc3d54b 100644 --- a/arch/arm/include/asm/kvm_mmu.h +++ b/arch/arm/include/asm/kvm_mmu.h @@ -35,16 +35,12 @@ addr; \ }) -/* - * KVM_MMU_CACHE_MIN_PAGES is the number of stage2 page table translation levels. - */ -#define KVM_MMU_CACHE_MIN_PAGES 2 - #ifndef __ASSEMBLY__ #include <linux/highmem.h> #include <asm/cacheflush.h> #include <asm/cputype.h> +#include <asm/kvm_arm.h> #include <asm/kvm_hyp.h> #include <asm/pgalloc.h> #include <asm/stage2_pgtable.h> @@ -52,6 +48,13 @@ /* Ensure compatibility with arm64 */ #define VA_BITS 32 +#define kvm_phys_shift(kvm) KVM_PHYS_SHIFT +#define kvm_phys_size(kvm) (1ULL << kvm_phys_shift(kvm)) +#define kvm_phys_mask(kvm) (kvm_phys_size(kvm) - 1ULL) +#define kvm_vttbr_baddr_mask(kvm) VTTBR_BADDR_MASK + +#define stage2_pgd_size(kvm) (PTRS_PER_S2_PGD * sizeof(pgd_t)) + int create_hyp_mappings(void *from, void *to, pgprot_t prot); int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size, void __iomem **kaddr, @@ -355,6 +358,8 @@ static inline int hyp_map_aux_data(void) #define kvm_phys_to_vttbr(addr) (addr) +static inline void kvm_set_ipa_limit(void) {} + static inline bool kvm_cpu_has_cnp(void) { return false; diff --git a/arch/arm/include/asm/stage2_pgtable.h b/arch/arm/include/asm/stage2_pgtable.h index 460d616bb2d6..f6a7ea805232 100644 --- a/arch/arm/include/asm/stage2_pgtable.h +++ b/arch/arm/include/asm/stage2_pgtable.h @@ -19,43 +19,53 @@ #ifndef __ARM_S2_PGTABLE_H_ #define __ARM_S2_PGTABLE_H_ -#define stage2_pgd_none(pgd) pgd_none(pgd) -#define stage2_pgd_clear(pgd) pgd_clear(pgd) -#define stage2_pgd_present(pgd) pgd_present(pgd) -#define stage2_pgd_populate(pgd, pud) pgd_populate(NULL, pgd, pud) -#define stage2_pud_offset(pgd, address) pud_offset(pgd, address) -#define stage2_pud_free(pud) pud_free(NULL, pud) - -#define stage2_pud_none(pud) pud_none(pud) -#define stage2_pud_clear(pud) pud_clear(pud) -#define stage2_pud_present(pud) pud_present(pud) -#define stage2_pud_populate(pud, pmd) pud_populate(NULL, pud, pmd) -#define stage2_pmd_offset(pud, address) pmd_offset(pud, address) -#define stage2_pmd_free(pmd) pmd_free(NULL, pmd) - -#define stage2_pud_huge(pud) pud_huge(pud) +/* + * kvm_mmu_cache_min_pages() is the number of pages required + * to install a stage-2 translation. We pre-allocate the entry + * level table at VM creation. Since we have a 3 level page-table, + * we need only two pages to add a new mapping. + */ +#define kvm_mmu_cache_min_pages(kvm) 2 + +#define stage2_pgd_none(kvm, pgd) pgd_none(pgd) +#define stage2_pgd_clear(kvm, pgd) pgd_clear(pgd) +#define stage2_pgd_present(kvm, pgd) pgd_present(pgd) +#define stage2_pgd_populate(kvm, pgd, pud) pgd_populate(NULL, pgd, pud) +#define stage2_pud_offset(kvm, pgd, address) pud_offset(pgd, address) +#define stage2_pud_free(kvm, pud) pud_free(NULL, pud) + +#define stage2_pud_none(kvm, pud) pud_none(pud) +#define stage2_pud_clear(kvm, pud) pud_clear(pud) +#define stage2_pud_present(kvm, pud) pud_present(pud) +#define stage2_pud_populate(kvm, pud, pmd) pud_populate(NULL, pud, pmd) +#define stage2_pmd_offset(kvm, pud, address) pmd_offset(pud, address) +#define stage2_pmd_free(kvm, pmd) pmd_free(NULL, pmd) + +#define stage2_pud_huge(kvm, pud) pud_huge(pud) /* Open coded p*d_addr_end that can deal with 64bit addresses */ -static inline phys_addr_t stage2_pgd_addr_end(phys_addr_t addr, phys_addr_t end) +static inline phys_addr_t +stage2_pgd_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end) { phys_addr_t boundary = (addr + PGDIR_SIZE) & PGDIR_MASK; return (boundary - 1 < end - 1) ? boundary : end; } -#define stage2_pud_addr_end(addr, end) (end) +#define stage2_pud_addr_end(kvm, addr, end) (end) -static inline phys_addr_t stage2_pmd_addr_end(phys_addr_t addr, phys_addr_t end) +static inline phys_addr_t +stage2_pmd_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end) { phys_addr_t boundary = (addr + PMD_SIZE) & PMD_MASK; return (boundary - 1 < end - 1) ? boundary : end; } -#define stage2_pgd_index(addr) pgd_index(addr) +#define stage2_pgd_index(kvm, addr) pgd_index(addr) -#define stage2_pte_table_empty(ptep) kvm_page_empty(ptep) -#define stage2_pmd_table_empty(pmdp) kvm_page_empty(pmdp) -#define stage2_pud_table_empty(pudp) false +#define stage2_pte_table_empty(kvm, ptep) kvm_page_empty(ptep) +#define stage2_pmd_table_empty(kvm, pmdp) kvm_page_empty(pmdp) +#define stage2_pud_table_empty(kvm, pudp) false #endif /* __ARM_S2_PGTABLE_H_ */ diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index 9b37b6ab27fe..8f55dc520a3e 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h @@ -121,8 +121,8 @@ extern void vfp_flush_hwstate(struct thread_info *); struct user_vfp; struct user_vfp_exc; -extern int vfp_preserve_user_clear_hwstate(struct user_vfp __user *, - struct user_vfp_exc __user *); +extern int vfp_preserve_user_clear_hwstate(struct user_vfp *, + struct user_vfp_exc *); extern int vfp_restore_user_hwstate(struct user_vfp *, struct user_vfp_exc *); #endif diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index 5451e1f05a19..c136eef8f690 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h @@ -69,6 +69,14 @@ extern int __put_user_bad(void); static inline void set_fs(mm_segment_t fs) { current_thread_info()->addr_limit = fs; + + /* + * Prevent a mispredicted conditional call to set_fs from forwarding + * the wrong address limit to access_ok under speculation. + */ + dsb(nsh); + isb(); + modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER); } @@ -92,6 +100,32 @@ static inline void set_fs(mm_segment_t fs) __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL)) /* + * Sanitise a uaccess pointer such that it becomes NULL if addr+size + * is above the current addr_limit. + */ +#define uaccess_mask_range_ptr(ptr, size) \ + ((__typeof__(ptr))__uaccess_mask_range_ptr(ptr, size)) +static inline void __user *__uaccess_mask_range_ptr(const void __user *ptr, + size_t size) +{ + void __user *safe_ptr = (void __user *)ptr; + unsigned long tmp; + + asm volatile( + " sub %1, %3, #1\n" + " subs %1, %1, %0\n" + " addhs %1, %1, #1\n" + " subhss %1, %1, %2\n" + " movlo %0, #0\n" + : "+r" (safe_ptr), "=&r" (tmp) + : "r" (size), "r" (current_thread_info()->addr_limit) + : "cc"); + + csdb(); + return safe_ptr; +} + +/* * Single-value transfer routines. They automatically use the right * size if we just have the right pointer type. Note that the functions * which read from user space (*get_*) need to take care not to leak @@ -362,6 +396,14 @@ do { \ __pu_err; \ }) +#ifdef CONFIG_CPU_SPECTRE +/* + * When mitigating Spectre variant 1.1, all accessors need to include + * verification of the address space. + */ +#define __put_user(x, ptr) put_user(x, ptr) + +#else #define __put_user(x, ptr) \ ({ \ long __pu_err = 0; \ @@ -369,12 +411,6 @@ do { \ __pu_err; \ }) -#define __put_user_error(x, ptr, err) \ -({ \ - __put_user_switch((x), (ptr), (err), __put_user_nocheck); \ - (void) 0; \ -}) - #define __put_user_nocheck(x, __pu_ptr, __err, __size) \ do { \ unsigned long __pu_addr = (unsigned long)__pu_ptr; \ @@ -454,6 +490,7 @@ do { \ : "r" (x), "i" (-EFAULT) \ : "cc") +#endif /* !CONFIG_CPU_SPECTRE */ #ifdef CONFIG_MMU extern unsigned long __must_check diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h index 076090d2dbf5..88ef2ce1f69a 100644 --- a/arch/arm/include/asm/unistd.h +++ b/arch/arm/include/asm/unistd.h @@ -16,23 +16,23 @@ #include <uapi/asm/unistd.h> #include <asm/unistd-nr.h> +#define __ARCH_WANT_NEW_STAT #define __ARCH_WANT_STAT64 #define __ARCH_WANT_SYS_GETHOSTNAME #define __ARCH_WANT_SYS_PAUSE #define __ARCH_WANT_SYS_GETPGRP -#define __ARCH_WANT_SYS_LLSEEK #define __ARCH_WANT_SYS_NICE #define __ARCH_WANT_SYS_SIGPENDING #define __ARCH_WANT_SYS_SIGPROCMASK #define __ARCH_WANT_SYS_OLD_MMAP #define __ARCH_WANT_SYS_OLD_SELECT +#define __ARCH_WANT_SYS_UTIME #if !defined(CONFIG_AEABI) || defined(CONFIG_OABI_COMPAT) #define __ARCH_WANT_SYS_TIME #define __ARCH_WANT_SYS_IPC #define __ARCH_WANT_SYS_OLDUMOUNT #define __ARCH_WANT_SYS_ALARM -#define __ARCH_WANT_SYS_UTIME #define __ARCH_WANT_SYS_OLD_GETRLIMIT #define __ARCH_WANT_OLD_READDIR #define __ARCH_WANT_SYS_SOCKETCALL diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c index 783fbb4de5f9..8fa2dc21d332 100644 --- a/arch/arm/kernel/armksyms.c +++ b/arch/arm/kernel/armksyms.c @@ -167,9 +167,6 @@ EXPORT_SYMBOL(_find_next_bit_be); #endif #ifdef CONFIG_FUNCTION_TRACER -#ifdef CONFIG_OLD_MCOUNT -EXPORT_SYMBOL(mcount); -#endif EXPORT_SYMBOL(__gnu_mcount_nc); #endif diff --git a/arch/arm/kernel/devtree.c b/arch/arm/kernel/devtree.c index ecaa68dd1af5..13bcd3b867cb 100644 --- a/arch/arm/kernel/devtree.c +++ b/arch/arm/kernel/devtree.c @@ -87,14 +87,11 @@ void __init arm_dt_init_cpu_maps(void) if (!cpus) return; - for_each_child_of_node(cpus, cpu) { + for_each_of_cpu_node(cpu) { const __be32 *cell; int prop_bytes; u32 hwid; - if (of_node_cmp(cpu->type, "cpu")) - continue; - pr_debug(" * %pOF...\n", cpu); /* * A device tree containing CPU nodes with missing "reg" diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index 746565a876dc..0465d65d23de 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -296,16 +296,15 @@ __sys_trace: cmp scno, #-1 @ skip the syscall? bne 2b add sp, sp, #S_OFF @ restore stack - b ret_slow_syscall -__sys_trace_return: - str r0, [sp, #S_R0 + S_OFF]! @ save returned r0 +__sys_trace_return_nosave: + enable_irq_notrace mov r0, sp bl syscall_trace_exit b ret_slow_syscall -__sys_trace_return_nosave: - enable_irq_notrace +__sys_trace_return: + str r0, [sp, #S_R0 + S_OFF]! @ save returned r0 mov r0, sp bl syscall_trace_exit b ret_slow_syscall diff --git a/arch/arm/kernel/entry-ftrace.S b/arch/arm/kernel/entry-ftrace.S index efcd9f25a14b..0be69e551a64 100644 --- a/arch/arm/kernel/entry-ftrace.S +++ b/arch/arm/kernel/entry-ftrace.S @@ -15,23 +15,8 @@ * start of every function. In mcount, apart from the function's address (in * lr), we need to get hold of the function's caller's address. * - * Older GCCs (pre-4.4) inserted a call to a routine called mcount like this: - * - * bl mcount - * - * These versions have the limitation that in order for the mcount routine to - * be able to determine the function's caller's address, an APCS-style frame - * pointer (which is set up with something like the code below) is required. - * - * mov ip, sp - * push {fp, ip, lr, pc} - * sub fp, ip, #4 - * - * With EABI, these frame pointers are not available unless -mapcs-frame is - * specified, and if building as Thumb-2, not even then. - * - * Newer GCCs (4.4+) solve this problem by introducing a new version of mcount, - * with call sites like: + * Newer GCCs (4.4+) solve this problem by using a version of mcount with call + * sites like: * * push {lr} * bl __gnu_mcount_nc @@ -46,17 +31,10 @@ * allows it to be clobbered in subroutines and doesn't use it to hold * parameters.) * - * When using dynamic ftrace, we patch out the mcount call by a "mov r0, r0" - * for the mcount case, and a "pop {lr}" for the __gnu_mcount_nc case (see - * arch/arm/kernel/ftrace.c). + * When using dynamic ftrace, we patch out the mcount call by a "pop {lr}" + * instead of the __gnu_mcount_nc call (see arch/arm/kernel/ftrace.c). */ -#ifndef CONFIG_OLD_MCOUNT -#if (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 4)) -#error Ftrace requires CONFIG_FRAME_POINTER=y with GCC older than 4.4.0. -#endif -#endif - .macro mcount_adjust_addr rd, rn bic \rd, \rn, #1 @ clear the Thumb bit if present sub \rd, \rd, #MCOUNT_INSN_SIZE @@ -209,51 +187,6 @@ ftrace_graph_call\suffix: mcount_exit .endm -#ifdef CONFIG_OLD_MCOUNT -/* - * mcount - */ - -.macro mcount_enter - stmdb sp!, {r0-r3, lr} -.endm - -.macro mcount_get_lr reg - ldr \reg, [fp, #-4] -.endm - -.macro mcount_exit - ldr lr, [fp, #-4] - ldmia sp!, {r0-r3, pc} -.endm - -ENTRY(mcount) -#ifdef CONFIG_DYNAMIC_FTRACE - stmdb sp!, {lr} - ldr lr, [fp, #-4] - ldmia sp!, {pc} -#else - __mcount _old -#endif -ENDPROC(mcount) - -#ifdef CONFIG_DYNAMIC_FTRACE -ENTRY(ftrace_caller_old) - __ftrace_caller _old -ENDPROC(ftrace_caller_old) -#endif - -#ifdef CONFIG_FUNCTION_GRAPH_TRACER -ENTRY(ftrace_graph_caller_old) - __ftrace_graph_caller -ENDPROC(ftrace_graph_caller_old) -#endif - -.purgem mcount_enter -.purgem mcount_get_lr -.purgem mcount_exit -#endif - /* * __gnu_mcount_nc */ diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c index 5617932a83df..0142fcfcc3d3 100644 --- a/arch/arm/kernel/ftrace.c +++ b/arch/arm/kernel/ftrace.c @@ -47,30 +47,6 @@ void arch_ftrace_update_code(int command) stop_machine(__ftrace_modify_code, &command, NULL); } -#ifdef CONFIG_OLD_MCOUNT -#define OLD_MCOUNT_ADDR ((unsigned long) mcount) -#define OLD_FTRACE_ADDR ((unsigned long) ftrace_caller_old) - -#define OLD_NOP 0xe1a00000 /* mov r0, r0 */ - -static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec) -{ - return rec->arch.old_mcount ? OLD_NOP : NOP; -} - -static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr) -{ - if (!rec->arch.old_mcount) - return addr; - - if (addr == MCOUNT_ADDR) - addr = OLD_MCOUNT_ADDR; - else if (addr == FTRACE_ADDR) - addr = OLD_FTRACE_ADDR; - - return addr; -} -#else static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec) { return NOP; @@ -80,7 +56,6 @@ static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr) { return addr; } -#endif int ftrace_arch_code_modify_prepare(void) { @@ -150,15 +125,6 @@ int ftrace_update_ftrace_func(ftrace_func_t func) } #endif -#ifdef CONFIG_OLD_MCOUNT - if (!ret) { - pc = (unsigned long)&ftrace_call_old; - new = ftrace_call_replace(pc, (unsigned long)func); - - ret = ftrace_modify_code(pc, 0, new, false); - } -#endif - return ret; } @@ -203,16 +169,6 @@ int ftrace_make_nop(struct module *mod, new = ftrace_nop_replace(rec); ret = ftrace_modify_code(ip, old, new, true); -#ifdef CONFIG_OLD_MCOUNT - if (ret == -EINVAL && addr == MCOUNT_ADDR) { - rec->arch.old_mcount = true; - - old = ftrace_call_replace(ip, adjust_address(rec, addr)); - new = ftrace_nop_replace(rec); - ret = ftrace_modify_code(ip, old, new, true); - } -#endif - return ret; } @@ -290,13 +246,6 @@ static int ftrace_modify_graph_caller(bool enable) #endif -#ifdef CONFIG_OLD_MCOUNT - if (!ret) - ret = __ftrace_modify_caller(&ftrace_graph_call_old, - ftrace_graph_caller_old, - enable); -#endif - return ret; } diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c index 36718a424358..6fa5b6387556 100644 --- a/arch/arm/kernel/ptrace.c +++ b/arch/arm/kernel/ptrace.c @@ -203,15 +203,8 @@ void ptrace_disable(struct task_struct *child) */ void ptrace_break(struct task_struct *tsk, struct pt_regs *regs) { - siginfo_t info; - - clear_siginfo(&info); - info.si_signo = SIGTRAP; - info.si_errno = 0; - info.si_code = TRAP_BRKPT; - info.si_addr = (void __user *)instruction_pointer(regs); - - force_sig_info(SIGTRAP, &info, tsk); + force_sig_fault(SIGTRAP, TRAP_BRKPT, + (void __user *)instruction_pointer(regs), tsk); } static int break_trap(struct pt_regs *regs, unsigned int instr) diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index b8f766cf3a90..b908382b69ff 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -77,8 +77,6 @@ static int preserve_iwmmxt_context(struct iwmmxt_sigframe __user *frame) kframe->magic = IWMMXT_MAGIC; kframe->size = IWMMXT_STORAGE_SIZE; iwmmxt_task_copy(current_thread_info(), &kframe->storage); - - err = __copy_to_user(frame, kframe, sizeof(*frame)); } else { /* * For bug-compatibility with older kernels, some space @@ -86,10 +84,14 @@ static int preserve_iwmmxt_context(struct iwmmxt_sigframe __user *frame) * Set the magic and size appropriately so that properly * written userspace can skip it reliably: */ - __put_user_error(DUMMY_MAGIC, &frame->magic, err); - __put_user_error(IWMMXT_STORAGE_SIZE, &frame->size, err); + *kframe = (struct iwmmxt_sigframe) { + .magic = DUMMY_MAGIC, + .size = IWMMXT_STORAGE_SIZE, + }; } + err = __copy_to_user(frame, kframe, sizeof(*kframe)); + return err; } @@ -135,17 +137,18 @@ static int restore_iwmmxt_context(char __user **auxp) static int preserve_vfp_context(struct vfp_sigframe __user *frame) { - const unsigned long magic = VFP_MAGIC; - const unsigned long size = VFP_STORAGE_SIZE; + struct vfp_sigframe kframe; int err = 0; - __put_user_error(magic, &frame->magic, err); - __put_user_error(size, &frame->size, err); + memset(&kframe, 0, sizeof(kframe)); + kframe.magic = VFP_MAGIC; + kframe.size = VFP_STORAGE_SIZE; + err = vfp_preserve_user_clear_hwstate(&kframe.ufp, &kframe.ufp_exc); if (err) - return -EFAULT; + return err; - return vfp_preserve_user_clear_hwstate(&frame->ufp, &frame->ufp_exc); + return __copy_to_user(frame, &kframe, sizeof(kframe)); } static int restore_vfp_context(char __user **auxp) @@ -288,30 +291,35 @@ static int setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set) { struct aux_sigframe __user *aux; + struct sigcontext context; int err = 0; - __put_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err); - __put_user_error(regs->ARM_r1, &sf->uc.uc_mcontext.arm_r1, err); - __put_user_error(regs->ARM_r2, &sf->uc.uc_mcontext.arm_r2, err); - __put_user_error(regs->ARM_r3, &sf->uc.uc_mcontext.arm_r3, err); - __put_user_error(regs->ARM_r4, &sf->uc.uc_mcontext.arm_r4, err); - __put_user_error(regs->ARM_r5, &sf->uc.uc_mcontext.arm_r5, err); - __put_user_error(regs->ARM_r6, &sf->uc.uc_mcontext.arm_r6, err); - __put_user_error(regs->ARM_r7, &sf->uc.uc_mcontext.arm_r7, err); - __put_user_error(regs->ARM_r8, &sf->uc.uc_mcontext.arm_r8, err); - __put_user_error(regs->ARM_r9, &sf->uc.uc_mcontext.arm_r9, err); - __put_user_error(regs->ARM_r10, &sf->uc.uc_mcontext.arm_r10, err); - __put_user_error(regs->ARM_fp, &sf->uc.uc_mcontext.arm_fp, err); - __put_user_error(regs->ARM_ip, &sf->uc.uc_mcontext.arm_ip, err); - __put_user_error(regs->ARM_sp, &sf->uc.uc_mcontext.arm_sp, err); - __put_user_error(regs->ARM_lr, &sf->uc.uc_mcontext.arm_lr, err); - __put_user_error(regs->ARM_pc, &sf->uc.uc_mcontext.arm_pc, err); - __put_user_error(regs->ARM_cpsr, &sf->uc.uc_mcontext.arm_cpsr, err); - - __put_user_error(current->thread.trap_no, &sf->uc.uc_mcontext.trap_no, err); - __put_user_error(current->thread.error_code, &sf->uc.uc_mcontext.error_code, err); - __put_user_error(current->thread.address, &sf->uc.uc_mcontext.fault_address, err); - __put_user_error(set->sig[0], &sf->uc.uc_mcontext.oldmask, err); + context = (struct sigcontext) { + .arm_r0 = regs->ARM_r0, + .arm_r1 = regs->ARM_r1, + .arm_r2 = regs->ARM_r2, + .arm_r3 = regs->ARM_r3, + .arm_r4 = regs->ARM_r4, + .arm_r5 = regs->ARM_r5, + .arm_r6 = regs->ARM_r6, + .arm_r7 = regs->ARM_r7, + .arm_r8 = regs->ARM_r8, + .arm_r9 = regs->ARM_r9, + .arm_r10 = regs->ARM_r10, + .arm_fp = regs->ARM_fp, + .arm_ip = regs->ARM_ip, + .arm_sp = regs->ARM_sp, + .arm_lr = regs->ARM_lr, + .arm_pc = regs->ARM_pc, + .arm_cpsr = regs->ARM_cpsr, + + .trap_no = current->thread.trap_no, + .error_code = current->thread.error_code, + .fault_address = current->thread.address, + .oldmask = set->sig[0], + }; + + err |= __copy_to_user(&sf->uc.uc_mcontext, &context, sizeof(context)); err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set)); @@ -328,7 +336,7 @@ setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set) if (err == 0) err |= preserve_vfp_context(&aux->vfp); #endif - __put_user_error(0, &aux->end_magic, err); + err |= __put_user(0, &aux->end_magic); return err; } @@ -491,7 +499,7 @@ setup_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs) /* * Set uc.uc_flags to a value which sc.trap_no would never have. */ - __put_user_error(0x5ac3c35a, &frame->uc.uc_flags, err); + err = __put_user(0x5ac3c35a, &frame->uc.uc_flags); err |= setup_sigframe(frame, regs, set); if (err == 0) @@ -511,8 +519,8 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs) err |= copy_siginfo_to_user(&frame->info, &ksig->info); - __put_user_error(0, &frame->sig.uc.uc_flags, err); - __put_user_error(NULL, &frame->sig.uc.uc_link, err); + err |= __put_user(0, &frame->sig.uc.uc_flags); + err |= __put_user(NULL, &frame->sig.uc.uc_link); err |= __save_altstack(&frame->sig.uc.uc_stack, regs->ARM_sp); err |= setup_sigframe(&frame->sig, regs, set); diff --git a/arch/arm/kernel/swp_emulate.c b/arch/arm/kernel/swp_emulate.c index 80517f293eb9..a188d5e8ab7f 100644 --- a/arch/arm/kernel/swp_emulate.c +++ b/arch/arm/kernel/swp_emulate.c @@ -98,22 +98,20 @@ static int proc_status_show(struct seq_file *m, void *v) */ static void set_segfault(struct pt_regs *regs, unsigned long addr) { - siginfo_t info; + int si_code; - clear_siginfo(&info); down_read(¤t->mm->mmap_sem); if (find_vma(current->mm, addr) == NULL) - info.si_code = SEGV_MAPERR; + si_code = SEGV_MAPERR; else - info.si_code = SEGV_ACCERR; + si_code = SEGV_ACCERR; up_read(¤t->mm->mmap_sem); - info.si_signo = SIGSEGV; - info.si_errno = 0; - info.si_addr = (void *) instruction_pointer(regs); - pr_debug("SWP{B} emulation: access caused memory abort!\n"); - arm_notify_die("Illegal memory access", regs, &info, 0, 0); + arm_notify_die("Illegal memory access", regs, + SIGSEGV, si_code, + (void __user *)instruction_pointer(regs), + 0, 0); abtcounter++; } diff --git a/arch/arm/kernel/sys_oabi-compat.c b/arch/arm/kernel/sys_oabi-compat.c index f0dd4b6ebb63..40da0872170f 100644 --- a/arch/arm/kernel/sys_oabi-compat.c +++ b/arch/arm/kernel/sys_oabi-compat.c @@ -277,6 +277,7 @@ asmlinkage long sys_oabi_epoll_wait(int epfd, int maxevents, int timeout) { struct epoll_event *kbuf; + struct oabi_epoll_event e; mm_segment_t fs; long ret, err, i; @@ -295,8 +296,11 @@ asmlinkage long sys_oabi_epoll_wait(int epfd, set_fs(fs); err = 0; for (i = 0; i < ret; i++) { - __put_user_error(kbuf[i].events, &events->events, err); - __put_user_error(kbuf[i].data, &events->data, err); + e.events = kbuf[i].events; + e.data = kbuf[i].data; + err = __copy_to_user(events, &e, sizeof(e)); + if (err) + break; events++; } kfree(kbuf); diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c index 24ac3cab411d..60e375ce1ab2 100644 --- a/arch/arm/kernel/topology.c +++ b/arch/arm/kernel/topology.c @@ -94,12 +94,6 @@ static void __init parse_dt_topology(void) __cpu_capacity = kcalloc(nr_cpu_ids, sizeof(*__cpu_capacity), GFP_NOWAIT); - cn = of_find_node_by_path("/cpus"); - if (!cn) { - pr_err("No CPU information found in DT\n"); - return; - } - for_each_possible_cpu(cpu) { const u32 *rate; int len; diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index badf02ca3693..2d668cff8ef4 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -365,13 +365,14 @@ void die(const char *str, struct pt_regs *regs, int err) } void arm_notify_die(const char *str, struct pt_regs *regs, - struct siginfo *info, unsigned long err, unsigned long trap) + int signo, int si_code, void __user *addr, + unsigned long err, unsigned long trap) { if (user_mode(regs)) { current->thread.error_code = err; current->thread.trap_no = trap; - force_sig_info(info->si_signo, info, current); + force_sig_fault(signo, si_code, addr, current); } else { die(str, regs, err); } @@ -438,10 +439,8 @@ int call_undef_hook(struct pt_regs *regs, unsigned int instr) asmlinkage void do_undefinstr(struct pt_regs *regs) { unsigned int instr; - siginfo_t info; void __user *pc; - clear_siginfo(&info); pc = (void __user *)instruction_pointer(regs); if (processor_mode(regs) == SVC_MODE) { @@ -485,13 +484,8 @@ die_sig: dump_instr(KERN_INFO, regs); } #endif - - info.si_signo = SIGILL; - info.si_errno = 0; - info.si_code = ILL_ILLOPC; - info.si_addr = pc; - - arm_notify_die("Oops - undefined instruction", regs, &info, 0, 6); + arm_notify_die("Oops - undefined instruction", regs, + SIGILL, ILL_ILLOPC, pc, 0, 6); } NOKPROBE_SYMBOL(do_undefinstr) @@ -539,9 +533,6 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason) static int bad_syscall(int n, struct pt_regs *regs) { - siginfo_t info; - - clear_siginfo(&info); if ((current->personality & PER_MASK) != PER_LINUX) { send_sig(SIGSEGV, current, 1); return regs->ARM_r0; @@ -555,13 +546,10 @@ static int bad_syscall(int n, struct pt_regs *regs) } #endif - info.si_signo = SIGILL; - info.si_errno = 0; - info.si_code = ILL_ILLTRP; - info.si_addr = (void __user *)instruction_pointer(regs) - - (thumb_mode(regs) ? 2 : 4); - - arm_notify_die("Oops - bad syscall", regs, &info, n, 0); + arm_notify_die("Oops - bad syscall", regs, SIGILL, ILL_ILLTRP, + (void __user *)instruction_pointer(regs) - + (thumb_mode(regs) ? 2 : 4), + n, 0); return regs->ARM_r0; } @@ -607,20 +595,13 @@ do_cache_op(unsigned long start, unsigned long end, int flags) #define NR(x) ((__ARM_NR_##x) - __ARM_NR_BASE) asmlinkage int arm_syscall(int no, struct pt_regs *regs) { - siginfo_t info; - - clear_siginfo(&info); if ((no >> 16) != (__ARM_NR_BASE>> 16)) return bad_syscall(no, regs); switch (no & 0xffff) { case 0: /* branch through 0 */ - info.si_signo = SIGSEGV; - info.si_errno = 0; - info.si_code = SEGV_MAPERR; - info.si_addr = NULL; - - arm_notify_die("branch through zero", regs, &info, 0, 0); + arm_notify_die("branch through zero", regs, + SIGSEGV, SEGV_MAPERR, NULL, 0, 0); return 0; case NR(breakpoint): /* SWI BREAK_POINT */ @@ -688,13 +669,10 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs) } } #endif - info.si_signo = SIGILL; - info.si_errno = 0; - info.si_code = ILL_ILLTRP; - info.si_addr = (void __user *)instruction_pointer(regs) - - (thumb_mode(regs) ? 2 : 4); - - arm_notify_die("Oops - bad syscall(2)", regs, &info, no, 0); + arm_notify_die("Oops - bad syscall(2)", regs, SIGILL, ILL_ILLTRP, + (void __user *)instruction_pointer(regs) - + (thumb_mode(regs) ? 2 : 4), + no, 0); return 0; } @@ -744,9 +722,6 @@ asmlinkage void baddataabort(int code, unsigned long instr, struct pt_regs *regs) { unsigned long addr = instruction_pointer(regs); - siginfo_t info; - - clear_siginfo(&info); #ifdef CONFIG_DEBUG_USER if (user_debug & UDBG_BADABORT) { @@ -757,12 +732,8 @@ baddataabort(int code, unsigned long instr, struct pt_regs *regs) } #endif - info.si_signo = SIGILL; - info.si_errno = 0; - info.si_code = ILL_ILLOPC; - info.si_addr = (void __user *)addr; - - arm_notify_die("unknown data abort code", regs, &info, instr, 0); + arm_notify_die("unknown data abort code", regs, + SIGILL, ILL_ILLOPC, (void __user *)addr, instr, 0); } void __readwrite_bug(const char *fn) diff --git a/arch/arm/kernel/vmlinux-xip.lds.S b/arch/arm/kernel/vmlinux-xip.lds.S index 3593d5c1acd2..8c74037ade22 100644 --- a/arch/arm/kernel/vmlinux-xip.lds.S +++ b/arch/arm/kernel/vmlinux-xip.lds.S @@ -96,7 +96,6 @@ SECTIONS INIT_SETUP(16) INIT_CALLS CON_INITCALL - SECURITY_INITCALL INIT_RAM_FS } diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S index a826df3d3814..6709a8d33963 100644 --- a/arch/arm/lib/copy_from_user.S +++ b/arch/arm/lib/copy_from_user.S @@ -93,11 +93,7 @@ ENTRY(arm_copy_from_user) #ifdef CONFIG_CPU_SPECTRE get_thread_info r3 ldr r3, [r3, #TI_ADDR_LIMIT] - adds ip, r1, r2 @ ip=addr+size - sub r3, r3, #1 @ addr_limit - 1 - cmpcc ip, r3 @ if (addr+size > addr_limit - 1) - movcs r1, #0 @ addr = NULL - csdb + uaccess_mask_range_ptr r1, r2, r3, ip #endif #include "copy_template.S" diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S index caf5019d8161..970abe521197 100644 --- a/arch/arm/lib/copy_to_user.S +++ b/arch/arm/lib/copy_to_user.S @@ -94,6 +94,11 @@ ENTRY(__copy_to_user_std) WEAK(arm_copy_to_user) +#ifdef CONFIG_CPU_SPECTRE + get_thread_info r3 + ldr r3, [r3, #TI_ADDR_LIMIT] + uaccess_mask_range_ptr r0, r2, r3, ip +#endif #include "copy_template.S" @@ -108,4 +113,3 @@ ENDPROC(__copy_to_user_std) rsb r0, r0, r2 copy_abort_end .popsection - diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c index 9b4ed1728616..73dc7360cbdd 100644 --- a/arch/arm/lib/uaccess_with_memcpy.c +++ b/arch/arm/lib/uaccess_with_memcpy.c @@ -152,7 +152,8 @@ arm_copy_to_user(void __user *to, const void *from, unsigned long n) n = __copy_to_user_std(to, from, n); uaccess_restore(ua_flags); } else { - n = __copy_to_user_memcpy(to, from, n); + n = __copy_to_user_memcpy(uaccess_mask_range_ptr(to, n), + from, n); } return n; } diff --git a/arch/arm/mach-at91/pm_suspend.S b/arch/arm/mach-at91/pm_suspend.S index a7c6ae13c945..bfe1c4d06901 100644 --- a/arch/arm/mach-at91/pm_suspend.S +++ b/arch/arm/mach-at91/pm_suspend.S @@ -149,6 +149,14 @@ exit_suspend: ENDPROC(at91_pm_suspend_in_sram) ENTRY(at91_backup_mode) + /* Switch the master clock source to slow clock. */ + ldr pmc, .pmc_base + ldr tmp1, [pmc, #AT91_PMC_MCKR] + bic tmp1, tmp1, #AT91_PMC_CSS + str tmp1, [pmc, #AT91_PMC_MCKR] + + wait_mckrdy + /*BUMEN*/ ldr r0, .sfr mov tmp1, #0x1 diff --git a/arch/arm/mach-mmp/devices.c b/arch/arm/mach-mmp/devices.c index 671c7a09ab3d..0fca63c80e1a 100644 --- a/arch/arm/mach-mmp/devices.c +++ b/arch/arm/mach-mmp/devices.c @@ -277,21 +277,12 @@ struct platform_device pxa168_device_u2o = { #if IS_ENABLED(CONFIG_USB_EHCI_MV_U2O) struct resource pxa168_u2oehci_resources[] = { - /* regbase */ [0] = { - .start = PXA168_U2O_REGBASE + U2x_CAPREGS_OFFSET, + .start = PXA168_U2O_REGBASE, .end = PXA168_U2O_REGBASE + USB_REG_RANGE, .flags = IORESOURCE_MEM, - .name = "capregs", }, - /* phybase */ [1] = { - .start = PXA168_U2O_PHYBASE, - .end = PXA168_U2O_PHYBASE + USB_PHY_RANGE, - .flags = IORESOURCE_MEM, - .name = "phyregs", - }, - [2] = { .start = IRQ_PXA168_USB1, .end = IRQ_PXA168_USB1, .flags = IORESOURCE_IRQ, diff --git a/arch/arm/mach-shmobile/pm-rcar-gen2.c b/arch/arm/mach-shmobile/pm-rcar-gen2.c index 345af3ebcc3a..7efe95bd584f 100644 --- a/arch/arm/mach-shmobile/pm-rcar-gen2.c +++ b/arch/arm/mach-shmobile/pm-rcar-gen2.c @@ -50,7 +50,7 @@ void __init rcar_gen2_pm_init(void) void __iomem *p; u32 bar; static int once; - struct device_node *np, *cpus; + struct device_node *np; bool has_a7 = false; bool has_a15 = false; struct resource res; @@ -59,11 +59,7 @@ void __init rcar_gen2_pm_init(void) if (once++) return; - cpus = of_find_node_by_path("/cpus"); - if (!cpus) - return; - - for_each_child_of_node(cpus, np) { + for_each_of_cpu_node(np) { if (of_device_is_compatible(np, "arm,cortex-a15")) has_a15 = true; else if (of_device_is_compatible(np, "arm,cortex-a7")) diff --git a/arch/arm/mach-shmobile/pm-rmobile.c b/arch/arm/mach-shmobile/pm-rmobile.c index e348bcfe389d..94fdeef11b81 100644 --- a/arch/arm/mach-shmobile/pm-rmobile.c +++ b/arch/arm/mach-shmobile/pm-rmobile.c @@ -202,7 +202,7 @@ static void __init get_special_pds(void) const struct of_device_id *id; /* PM domains containing CPUs */ - for_each_node_by_type(np, "cpu") + for_each_of_cpu_node(np) add_special_pd(np, PD_CPU); /* PM domain containing console */ diff --git a/arch/arm/mach-shmobile/timer.c b/arch/arm/mach-shmobile/timer.c index 828e8aea037e..e48b0939693f 100644 --- a/arch/arm/mach-shmobile/timer.c +++ b/arch/arm/mach-shmobile/timer.c @@ -22,22 +22,16 @@ void __init shmobile_init_delay(void) { - struct device_node *np, *cpus; + struct device_node *np; u32 max_freq = 0; - cpus = of_find_node_by_path("/cpus"); - if (!cpus) - return; - - for_each_child_of_node(cpus, np) { + for_each_of_cpu_node(np) { u32 freq; if (!of_property_read_u32(np, "clock-frequency", &freq)) max_freq = max(max_freq, freq); } - of_node_put(cpus); - if (!max_freq) return; diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index bd2c739d8083..b54f8f8def36 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c @@ -948,15 +948,7 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) goto fixup; if (ai_usermode & UM_SIGNAL) { - siginfo_t si; - - clear_siginfo(&si); - si.si_signo = SIGBUS; - si.si_errno = 0; - si.si_code = BUS_ADRALN; - si.si_addr = (void __user *)addr; - - force_sig_info(si.si_signo, &si, current); + force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)addr, current); } else { /* * We're about to disable the alignment trap and return to diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index 3232afb6fdc0..f4ea4c62c613 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -161,13 +161,9 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr, unsigned int fsr, unsigned int sig, int code, struct pt_regs *regs) { - struct siginfo si; - if (addr > TASK_SIZE) harden_branch_predictor(); - clear_siginfo(&si); - #ifdef CONFIG_DEBUG_USER if (((user_debug & UDBG_SEGV) && (sig == SIGSEGV)) || ((user_debug & UDBG_BUS) && (sig == SIGBUS))) { @@ -181,11 +177,7 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr, tsk->thread.address = addr; tsk->thread.error_code = fsr; tsk->thread.trap_no = 14; - si.si_signo = sig; - si.si_errno = 0; - si.si_code = code; - si.si_addr = (void __user *)addr; - force_sig_info(sig, &si, tsk); + force_sig_fault(sig, code, (void __user *)addr, tsk); } void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs) @@ -554,7 +546,6 @@ asmlinkage void do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs) { const struct fsr_info *inf = fsr_info + fsr_fs(fsr); - struct siginfo info; if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs)) return; @@ -563,12 +554,8 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs) inf->name, fsr, addr); show_pte(current->mm, addr); - clear_siginfo(&info); - info.si_signo = inf->sig; - info.si_errno = 0; - info.si_code = inf->code; - info.si_addr = (void __user *)addr; - arm_notify_die("", regs, &info, fsr, 0); + arm_notify_die("", regs, inf->sig, inf->code, (void __user *)addr, + fsr, 0); } void __init @@ -588,7 +575,6 @@ asmlinkage void do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs) { const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr); - struct siginfo info; if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs)) return; @@ -596,12 +582,8 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs) pr_alert("Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n", inf->name, ifsr, addr); - clear_siginfo(&info); - info.si_signo = inf->sig; - info.si_errno = 0; - info.si_code = inf->code; - info.si_addr = (void __user *)addr; - arm_notify_die("", regs, &info, ifsr, 0); + arm_notify_die("", regs, inf->sig, inf->code, (void __user *)addr, + ifsr, 0); } /* diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c index dc7e6b50ef67..aff6e6eadc70 100644 --- a/arch/arm/vfp/vfpmodule.c +++ b/arch/arm/vfp/vfpmodule.c @@ -216,13 +216,6 @@ static struct notifier_block vfp_notifier_block = { */ static void vfp_raise_sigfpe(unsigned int sicode, struct pt_regs *regs) { - siginfo_t info; - - clear_siginfo(&info); - info.si_signo = SIGFPE; - info.si_code = sicode; - info.si_addr = (void __user *)(instruction_pointer(regs) - 4); - /* * This is the same as NWFPE, because it's not clear what * this is used for @@ -230,7 +223,9 @@ static void vfp_raise_sigfpe(unsigned int sicode, struct pt_regs *regs) current->thread.error_code = 0; current->thread.trap_no = 6; - send_sig_info(SIGFPE, &info, current); + send_sig_fault(SIGFPE, sicode, + (void __user *)(instruction_pointer(regs) - 4), + current); } static void vfp_panic(char *reason, u32 inst) @@ -553,12 +548,11 @@ void vfp_flush_hwstate(struct thread_info *thread) * Save the current VFP state into the provided structures and prepare * for entry into a new function (signal handler). */ -int vfp_preserve_user_clear_hwstate(struct user_vfp __user *ufp, - struct user_vfp_exc __user *ufp_exc) +int vfp_preserve_user_clear_hwstate(struct user_vfp *ufp, + struct user_vfp_exc *ufp_exc) { struct thread_info *thread = current_thread_info(); struct vfp_hard_struct *hwstate = &thread->vfpstate.hard; - int err = 0; /* Ensure that the saved hwstate is up-to-date. */ vfp_sync_hwstate(thread); @@ -567,22 +561,19 @@ int vfp_preserve_user_clear_hwstate(struct user_vfp __user *ufp, * Copy the floating point registers. There can be unused * registers see asm/hwcap.h for details. */ - err |= __copy_to_user(&ufp->fpregs, &hwstate->fpregs, - sizeof(hwstate->fpregs)); + memcpy(&ufp->fpregs, &hwstate->fpregs, sizeof(hwstate->fpregs)); + /* * Copy the status and control register. */ - __put_user_error(hwstate->fpscr, &ufp->fpscr, err); + ufp->fpscr = hwstate->fpscr; /* * Copy the exception registers. */ - __put_user_error(hwstate->fpexc, &ufp_exc->fpexc, err); - __put_user_error(hwstate->fpinst, &ufp_exc->fpinst, err); - __put_user_error(hwstate->fpinst2, &ufp_exc->fpinst2, err); - - if (err) - return -EFAULT; + ufp_exc->fpexc = hwstate->fpexc; + ufp_exc->fpinst = hwstate->fpinst; + ufp_exc->fpinst2 = ufp_exc->fpinst2; /* Ensure that VFP is disabled. */ vfp_flush_hwstate(thread); diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index c03cd0d765d3..964f682a2b7b 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -11,6 +11,8 @@ config ARM64 select ARCH_CLOCKSOURCE_DATA select ARCH_HAS_DEBUG_VIRTUAL select ARCH_HAS_DEVMEM_IS_ALLOWED + select ARCH_HAS_DMA_COHERENT_TO_PFN + select ARCH_HAS_DMA_MMAP_PGPROT select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI select ARCH_HAS_ELF_RANDOMIZE select ARCH_HAS_FAST_MULTIPLIER @@ -24,6 +26,8 @@ config ARM64 select ARCH_HAS_SG_CHAIN select ARCH_HAS_STRICT_KERNEL_RWX select ARCH_HAS_STRICT_MODULE_RWX + select ARCH_HAS_SYNC_DMA_FOR_DEVICE + select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_SYSCALL_WRAPPER select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_HAVE_NMI_SAFE_CMPXCHG diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms index 393d2b524284..5a89a957641b 100644 --- a/arch/arm64/Kconfig.platforms +++ b/arch/arm64/Kconfig.platforms @@ -128,6 +128,7 @@ config ARCH_MVEBU select MVEBU_ICU select MVEBU_ODMI select MVEBU_PIC + select MVEBU_SEI select OF_GPIO select PINCTRL select PINCTRL_ARMADA_37XX diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 106039d25e2f..b4e994cd3a42 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -113,9 +113,8 @@ core-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a # Default target when executing plain make boot := arch/arm64/boot KBUILD_IMAGE := $(boot)/Image.gz -KBUILD_DTBS := dtbs -all: Image.gz $(KBUILD_DTBS) +all: Image.gz Image: vmlinux @@ -127,17 +126,6 @@ Image.%: Image zinstall install: $(Q)$(MAKE) $(build)=$(boot) $@ -%.dtb: scripts - $(Q)$(MAKE) $(build)=$(boot)/dts $(boot)/dts/$@ - -PHONY += dtbs dtbs_install - -dtbs: prepare scripts - $(Q)$(MAKE) $(build)=$(boot)/dts - -dtbs_install: - $(Q)$(MAKE) $(dtbinst)=$(boot)/dts - PHONY += vdso_install vdso_install: $(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso $@ @@ -145,7 +133,6 @@ vdso_install: # We use MRPROPER_FILES and CLEAN_FILES now archclean: $(Q)$(MAKE) $(clean)=$(boot) - $(Q)$(MAKE) $(clean)=$(boot)/dts # We need to generate vdso-offsets.h before compiling certain files in kernel/. # In order to do that, we should use the archprepare target, but we can't since @@ -160,8 +147,6 @@ vdso_prepare: prepare0 define archhelp echo '* Image.gz - Compressed kernel image (arch/$(ARCH)/boot/Image.gz)' echo ' Image - Uncompressed kernel image (arch/$(ARCH)/boot/Image)' - echo '* dtbs - Build device tree blobs for enabled boards' - echo ' dtbs_install - Install dtbs to $(INSTALL_DTBS_PATH)' echo ' install - Install uncompressed kernel' echo ' zinstall - Install compressed kernel' echo ' Install using (your) ~/bin/installkernel or' diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi index d033da401c26..fb3d2ee77c56 100644 --- a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi +++ b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi @@ -137,6 +137,8 @@ reset-names = "stmmaceth", "stmmaceth-ocp"; clocks = <&clkmgr STRATIX10_EMAC0_CLK>; clock-names = "stmmaceth"; + tx-fifo-depth = <16384>; + rx-fifo-depth = <16384>; status = "disabled"; }; @@ -150,6 +152,8 @@ reset-names = "stmmaceth", "stmmaceth-ocp"; clocks = <&clkmgr STRATIX10_EMAC1_CLK>; clock-names = "stmmaceth"; + tx-fifo-depth = <16384>; + rx-fifo-depth = <16384>; status = "disabled"; }; @@ -163,6 +167,8 @@ reset-names = "stmmaceth", "stmmaceth-ocp"; clocks = <&clkmgr STRATIX10_EMAC2_CLK>; clock-names = "stmmaceth"; + tx-fifo-depth = <16384>; + rx-fifo-depth = <16384>; status = "disabled"; }; @@ -467,16 +473,51 @@ status = "disabled"; }; + sdr: sdr@f8011100 { + compatible = "altr,sdr-ctl", "syscon"; + reg = <0xf8011100 0xc0>; + }; + eccmgr { - compatible = "altr,socfpga-s10-ecc-manager"; + compatible = "altr,socfpga-a10-ecc-manager"; + altr,sysmgr-syscon = <&sysmgr>; + #address-cells = <1>; + #size-cells = <1>; interrupts = <0 15 4>, <0 95 4>; interrupt-controller; #interrupt-cells = <2>; + ranges; sdramedac { compatible = "altr,sdram-edac-s10"; + altr,sdr-syscon = <&sdr>; interrupts = <16 4>, <48 4>; }; + + usb0-ecc@ff8c4000 { + compatible = "altr,socfpga-usb-ecc"; + reg = <0xff8c4000 0x100>; + altr,ecc-parent = <&usb0>; + interrupts = <2 4>, + <34 4>; + }; + + emac0-rx-ecc@ff8c0000 { + compatible = "altr,socfpga-eth-mac-ecc"; + reg = <0xff8c0000 0x100>; + altr,ecc-parent = <&gmac0>; + interrupts = <4 4>, + <36 4>; + }; + + emac0-tx-ecc@ff8c0400 { + compatible = "altr,socfpga-eth-mac-ecc"; + reg = <0xff8c0400 0x100>; + altr,ecc-parent = <&gmac0>; + interrupts = <5 4>, + <37 4>; + }; + }; qspi: spi@ff8d2000 { diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts b/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts index 6edc4fa9fd42..7c661753bfaf 100644 --- a/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts +++ b/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts @@ -76,7 +76,7 @@ phy-mode = "rgmii"; phy-handle = <&phy0>; - max-frame-size = <3800>; + max-frame-size = <9000>; mdio0 { #address-cells = <1>; diff --git a/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi index 8cb78dd99672..90c0faf8579f 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi +++ b/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi @@ -148,6 +148,7 @@ #address-cells = <2>; #size-cells = <2>; ranges; + dma-ranges = <0x0 0x0 0x0 0x0 0x10000 0x00000000>; clockgen: clocking@1300000 { compatible = "fsl,ls2080a-clockgen"; @@ -321,6 +322,8 @@ reg = <0x00000008 0x0c000000 0 0x40>, /* MC portal base */ <0x00000000 0x08340000 0 0x40000>; /* MC control reg */ msi-parent = <&its>; + iommu-map = <0 &smmu 0 0>; /* This is fixed-up by u-boot */ + dma-coherent; #address-cells = <3>; #size-cells = <1>; @@ -424,6 +427,9 @@ compatible = "arm,mmu-500"; reg = <0 0x5000000 0 0x800000>; #global-interrupts = <12>; + #iommu-cells = <1>; + stream-match-mask = <0x7C00>; + dma-coherent; interrupts = <0 13 4>, /* global secure fault */ <0 14 4>, /* combined secure interrupt */ <0 15 4>, /* global non-secure fault */ @@ -466,7 +472,6 @@ <0 204 4>, <0 205 4>, <0 206 4>, <0 207 4>, <0 208 4>, <0 209 4>; - mmu-masters = <&fsl_mc 0x300 0>; }; dspi: dspi@2100000 { diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index db8d364f8476..3d165b4cdd2a 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig @@ -698,6 +698,7 @@ CONFIG_MEMTEST=y CONFIG_SECURITY=y CONFIG_CRYPTO_ECHAINIV=y CONFIG_CRYPTO_ANSI_CPRNG=y +CONFIG_CRYPTO_DEV_FSL_DPAA2_CAAM=y CONFIG_ARM64_CRYPTO=y CONFIG_CRYPTO_SHA1_ARM64_CE=y CONFIG_CRYPTO_SHA2_ARM64_CE=y @@ -706,7 +707,6 @@ CONFIG_CRYPTO_SHA3_ARM64=m CONFIG_CRYPTO_SM3_ARM64_CE=m CONFIG_CRYPTO_GHASH_ARM64_CE=y CONFIG_CRYPTO_CRCT10DIF_ARM64_CE=m -CONFIG_CRYPTO_CRC32_ARM64_CE=m CONFIG_CRYPTO_AES_ARM64_CE_CCM=y CONFIG_CRYPTO_AES_ARM64_CE_BLK=y CONFIG_CRYPTO_CHACHA20_NEON=m diff --git a/arch/arm64/crypto/Kconfig b/arch/arm64/crypto/Kconfig index e3fdb0fd6f70..a5606823ed4d 100644 --- a/arch/arm64/crypto/Kconfig +++ b/arch/arm64/crypto/Kconfig @@ -66,11 +66,6 @@ config CRYPTO_CRCT10DIF_ARM64_CE depends on KERNEL_MODE_NEON && CRC_T10DIF select CRYPTO_HASH -config CRYPTO_CRC32_ARM64_CE - tristate "CRC32 and CRC32C digest algorithms using ARMv8 extensions" - depends on CRC32 - select CRYPTO_HASH - config CRYPTO_AES_ARM64 tristate "AES core cipher using scalar instructions" select CRYPTO_AES @@ -119,10 +114,4 @@ config CRYPTO_AES_ARM64_BS select CRYPTO_AES_ARM64 select CRYPTO_SIMD -config CRYPTO_SPECK_NEON - tristate "NEON accelerated Speck cipher algorithms" - depends on KERNEL_MODE_NEON - select CRYPTO_BLKCIPHER - select CRYPTO_SPECK - endif diff --git a/arch/arm64/crypto/Makefile b/arch/arm64/crypto/Makefile index bcafd016618e..f476fede09ba 100644 --- a/arch/arm64/crypto/Makefile +++ b/arch/arm64/crypto/Makefile @@ -32,9 +32,6 @@ ghash-ce-y := ghash-ce-glue.o ghash-ce-core.o obj-$(CONFIG_CRYPTO_CRCT10DIF_ARM64_CE) += crct10dif-ce.o crct10dif-ce-y := crct10dif-ce-core.o crct10dif-ce-glue.o -obj-$(CONFIG_CRYPTO_CRC32_ARM64_CE) += crc32-ce.o -crc32-ce-y:= crc32-ce-core.o crc32-ce-glue.o - obj-$(CONFIG_CRYPTO_AES_ARM64_CE) += aes-ce-cipher.o aes-ce-cipher-y := aes-ce-core.o aes-ce-glue.o @@ -56,9 +53,6 @@ sha512-arm64-y := sha512-glue.o sha512-core.o obj-$(CONFIG_CRYPTO_CHACHA20_NEON) += chacha20-neon.o chacha20-neon-y := chacha20-neon-core.o chacha20-neon-glue.o -obj-$(CONFIG_CRYPTO_SPECK_NEON) += speck-neon.o -speck-neon-y := speck-neon-core.o speck-neon-glue.o - obj-$(CONFIG_CRYPTO_AES_ARM64) += aes-arm64.o aes-arm64-y := aes-cipher-core.o aes-cipher-glue.o diff --git a/arch/arm64/crypto/aes-ce.S b/arch/arm64/crypto/aes-ce.S index 623e74ed1c67..143070510809 100644 --- a/arch/arm64/crypto/aes-ce.S +++ b/arch/arm64/crypto/aes-ce.S @@ -17,6 +17,11 @@ .arch armv8-a+crypto + xtsmask .req v16 + + .macro xts_reload_mask, tmp + .endm + /* preload all round keys */ .macro load_round_keys, rounds, rk cmp \rounds, #12 diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c index adcb83eb683c..1e676625ef33 100644 --- a/arch/arm64/crypto/aes-glue.c +++ b/arch/arm64/crypto/aes-glue.c @@ -15,6 +15,7 @@ #include <crypto/internal/hash.h> #include <crypto/internal/simd.h> #include <crypto/internal/skcipher.h> +#include <crypto/scatterwalk.h> #include <linux/module.h> #include <linux/cpufeature.h> #include <crypto/xts.h> @@ -31,6 +32,8 @@ #define aes_ecb_decrypt ce_aes_ecb_decrypt #define aes_cbc_encrypt ce_aes_cbc_encrypt #define aes_cbc_decrypt ce_aes_cbc_decrypt +#define aes_cbc_cts_encrypt ce_aes_cbc_cts_encrypt +#define aes_cbc_cts_decrypt ce_aes_cbc_cts_decrypt #define aes_ctr_encrypt ce_aes_ctr_encrypt #define aes_xts_encrypt ce_aes_xts_encrypt #define aes_xts_decrypt ce_aes_xts_decrypt @@ -45,6 +48,8 @@ MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS using ARMv8 Crypto Extensions"); #define aes_ecb_decrypt neon_aes_ecb_decrypt #define aes_cbc_encrypt neon_aes_cbc_encrypt #define aes_cbc_decrypt neon_aes_cbc_decrypt +#define aes_cbc_cts_encrypt neon_aes_cbc_cts_encrypt +#define aes_cbc_cts_decrypt neon_aes_cbc_cts_decrypt #define aes_ctr_encrypt neon_aes_ctr_encrypt #define aes_xts_encrypt neon_aes_xts_encrypt #define aes_xts_decrypt neon_aes_xts_decrypt @@ -63,30 +68,41 @@ MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); MODULE_LICENSE("GPL v2"); /* defined in aes-modes.S */ -asmlinkage void aes_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[], +asmlinkage void aes_ecb_encrypt(u8 out[], u8 const in[], u32 const rk[], int rounds, int blocks); -asmlinkage void aes_ecb_decrypt(u8 out[], u8 const in[], u8 const rk[], +asmlinkage void aes_ecb_decrypt(u8 out[], u8 const in[], u32 const rk[], int rounds, int blocks); -asmlinkage void aes_cbc_encrypt(u8 out[], u8 const in[], u8 const rk[], +asmlinkage void aes_cbc_encrypt(u8 out[], u8 const in[], u32 const rk[], int rounds, int blocks, u8 iv[]); -asmlinkage void aes_cbc_decrypt(u8 out[], u8 const in[], u8 const rk[], +asmlinkage void aes_cbc_decrypt(u8 out[], u8 const in[], u32 const rk[], int rounds, int blocks, u8 iv[]); -asmlinkage void aes_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[], +asmlinkage void aes_cbc_cts_encrypt(u8 out[], u8 const in[], u32 const rk[], + int rounds, int bytes, u8 const iv[]); +asmlinkage void aes_cbc_cts_decrypt(u8 out[], u8 const in[], u32 const rk[], + int rounds, int bytes, u8 const iv[]); + +asmlinkage void aes_ctr_encrypt(u8 out[], u8 const in[], u32 const rk[], int rounds, int blocks, u8 ctr[]); -asmlinkage void aes_xts_encrypt(u8 out[], u8 const in[], u8 const rk1[], - int rounds, int blocks, u8 const rk2[], u8 iv[], +asmlinkage void aes_xts_encrypt(u8 out[], u8 const in[], u32 const rk1[], + int rounds, int blocks, u32 const rk2[], u8 iv[], int first); -asmlinkage void aes_xts_decrypt(u8 out[], u8 const in[], u8 const rk1[], - int rounds, int blocks, u8 const rk2[], u8 iv[], +asmlinkage void aes_xts_decrypt(u8 out[], u8 const in[], u32 const rk1[], + int rounds, int blocks, u32 const rk2[], u8 iv[], int first); asmlinkage void aes_mac_update(u8 const in[], u32 const rk[], int rounds, int blocks, u8 dg[], int enc_before, int enc_after); +struct cts_cbc_req_ctx { + struct scatterlist sg_src[2]; + struct scatterlist sg_dst[2]; + struct skcipher_request subreq; +}; + struct crypto_aes_xts_ctx { struct crypto_aes_ctx key1; struct crypto_aes_ctx __aligned(8) key2; @@ -142,7 +158,7 @@ static int ecb_encrypt(struct skcipher_request *req) while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) { kernel_neon_begin(); aes_ecb_encrypt(walk.dst.virt.addr, walk.src.virt.addr, - (u8 *)ctx->key_enc, rounds, blocks); + ctx->key_enc, rounds, blocks); kernel_neon_end(); err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); } @@ -162,7 +178,7 @@ static int ecb_decrypt(struct skcipher_request *req) while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) { kernel_neon_begin(); aes_ecb_decrypt(walk.dst.virt.addr, walk.src.virt.addr, - (u8 *)ctx->key_dec, rounds, blocks); + ctx->key_dec, rounds, blocks); kernel_neon_end(); err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); } @@ -182,7 +198,7 @@ static int cbc_encrypt(struct skcipher_request *req) while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) { kernel_neon_begin(); aes_cbc_encrypt(walk.dst.virt.addr, walk.src.virt.addr, - (u8 *)ctx->key_enc, rounds, blocks, walk.iv); + ctx->key_enc, rounds, blocks, walk.iv); kernel_neon_end(); err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); } @@ -202,13 +218,149 @@ static int cbc_decrypt(struct skcipher_request *req) while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) { kernel_neon_begin(); aes_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr, - (u8 *)ctx->key_dec, rounds, blocks, walk.iv); + ctx->key_dec, rounds, blocks, walk.iv); kernel_neon_end(); err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); } return err; } +static int cts_cbc_init_tfm(struct crypto_skcipher *tfm) +{ + crypto_skcipher_set_reqsize(tfm, sizeof(struct cts_cbc_req_ctx)); + return 0; +} + +static int cts_cbc_encrypt(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm); + struct cts_cbc_req_ctx *rctx = skcipher_request_ctx(req); + int err, rounds = 6 + ctx->key_length / 4; + int cbc_blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2; + struct scatterlist *src = req->src, *dst = req->dst; + struct skcipher_walk walk; + + skcipher_request_set_tfm(&rctx->subreq, tfm); + + if (req->cryptlen <= AES_BLOCK_SIZE) { + if (req->cryptlen < AES_BLOCK_SIZE) + return -EINVAL; + cbc_blocks = 1; + } + + if (cbc_blocks > 0) { + unsigned int blocks; + + skcipher_request_set_crypt(&rctx->subreq, req->src, req->dst, + cbc_blocks * AES_BLOCK_SIZE, + req->iv); + + err = skcipher_walk_virt(&walk, &rctx->subreq, false); + + while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) { + kernel_neon_begin(); + aes_cbc_encrypt(walk.dst.virt.addr, walk.src.virt.addr, + ctx->key_enc, rounds, blocks, walk.iv); + kernel_neon_end(); + err = skcipher_walk_done(&walk, + walk.nbytes % AES_BLOCK_SIZE); + } + if (err) + return err; + + if (req->cryptlen == AES_BLOCK_SIZE) + return 0; + + dst = src = scatterwalk_ffwd(rctx->sg_src, req->src, + rctx->subreq.cryptlen); + if (req->dst != req->src) + dst = scatterwalk_ffwd(rctx->sg_dst, req->dst, + rctx->subreq.cryptlen); + } + + /* handle ciphertext stealing */ + skcipher_request_set_crypt(&rctx->subreq, src, dst, + req->cryptlen - cbc_blocks * AES_BLOCK_SIZE, + req->iv); + + err = skcipher_walk_virt(&walk, &rctx->subreq, false); + if (err) + return err; + + kernel_neon_begin(); + aes_cbc_cts_encrypt(walk.dst.virt.addr, walk.src.virt.addr, + ctx->key_enc, rounds, walk.nbytes, walk.iv); + kernel_neon_end(); + + return skcipher_walk_done(&walk, 0); +} + +static int cts_cbc_decrypt(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm); + struct cts_cbc_req_ctx *rctx = skcipher_request_ctx(req); + int err, rounds = 6 + ctx->key_length / 4; + int cbc_blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2; + struct scatterlist *src = req->src, *dst = req->dst; + struct skcipher_walk walk; + + skcipher_request_set_tfm(&rctx->subreq, tfm); + + if (req->cryptlen <= AES_BLOCK_SIZE) { + if (req->cryptlen < AES_BLOCK_SIZE) + return -EINVAL; + cbc_blocks = 1; + } + + if (cbc_blocks > 0) { + unsigned int blocks; + + skcipher_request_set_crypt(&rctx->subreq, req->src, req->dst, + cbc_blocks * AES_BLOCK_SIZE, + req->iv); + + err = skcipher_walk_virt(&walk, &rctx->subreq, false); + + while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) { + kernel_neon_begin(); + aes_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr, + ctx->key_dec, rounds, blocks, walk.iv); + kernel_neon_end(); + err = skcipher_walk_done(&walk, + walk.nbytes % AES_BLOCK_SIZE); + } + if (err) + return err; + + if (req->cryptlen == AES_BLOCK_SIZE) + return 0; + + dst = src = scatterwalk_ffwd(rctx->sg_src, req->src, + rctx->subreq.cryptlen); + if (req->dst != req->src) + dst = scatterwalk_ffwd(rctx->sg_dst, req->dst, + rctx->subreq.cryptlen); + } + + /* handle ciphertext stealing */ + skcipher_request_set_crypt(&rctx->subreq, src, dst, + req->cryptlen - cbc_blocks * AES_BLOCK_SIZE, + req->iv); + + err = skcipher_walk_virt(&walk, &rctx->subreq, false); + if (err) + return err; + + kernel_neon_begin(); + aes_cbc_cts_decrypt(walk.dst.virt.addr, walk.src.virt.addr, + ctx->key_dec, rounds, walk.nbytes, walk.iv); + kernel_neon_end(); + + return skcipher_walk_done(&walk, 0); +} + static int ctr_encrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); @@ -222,7 +374,7 @@ static int ctr_encrypt(struct skcipher_request *req) while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) { kernel_neon_begin(); aes_ctr_encrypt(walk.dst.virt.addr, walk.src.virt.addr, - (u8 *)ctx->key_enc, rounds, blocks, walk.iv); + ctx->key_enc, rounds, blocks, walk.iv); kernel_neon_end(); err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); } @@ -238,7 +390,7 @@ static int ctr_encrypt(struct skcipher_request *req) blocks = -1; kernel_neon_begin(); - aes_ctr_encrypt(tail, NULL, (u8 *)ctx->key_enc, rounds, + aes_ctr_encrypt(tail, NULL, ctx->key_enc, rounds, blocks, walk.iv); kernel_neon_end(); crypto_xor_cpy(tdst, tsrc, tail, nbytes); @@ -272,8 +424,8 @@ static int xts_encrypt(struct skcipher_request *req) for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) { kernel_neon_begin(); aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr, - (u8 *)ctx->key1.key_enc, rounds, blocks, - (u8 *)ctx->key2.key_enc, walk.iv, first); + ctx->key1.key_enc, rounds, blocks, + ctx->key2.key_enc, walk.iv, first); kernel_neon_end(); err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); } @@ -294,8 +446,8 @@ static int xts_decrypt(struct skcipher_request *req) for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) { kernel_neon_begin(); aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr, - (u8 *)ctx->key1.key_dec, rounds, blocks, - (u8 *)ctx->key2.key_enc, walk.iv, first); + ctx->key1.key_dec, rounds, blocks, + ctx->key2.key_enc, walk.iv, first); kernel_neon_end(); err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); } @@ -336,6 +488,24 @@ static struct skcipher_alg aes_algs[] = { { .decrypt = cbc_decrypt, }, { .base = { + .cra_name = "__cts(cbc(aes))", + .cra_driver_name = "__cts-cbc-aes-" MODE, + .cra_priority = PRIO, + .cra_flags = CRYPTO_ALG_INTERNAL, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct crypto_aes_ctx), + .cra_module = THIS_MODULE, + }, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .walksize = 2 * AES_BLOCK_SIZE, + .setkey = skcipher_aes_setkey, + .encrypt = cts_cbc_encrypt, + .decrypt = cts_cbc_decrypt, + .init = cts_cbc_init_tfm, +}, { + .base = { .cra_name = "__ctr(aes)", .cra_driver_name = "__ctr-aes-" MODE, .cra_priority = PRIO, @@ -412,7 +582,6 @@ static int cmac_setkey(struct crypto_shash *tfm, const u8 *in_key, { struct mac_tfm_ctx *ctx = crypto_shash_ctx(tfm); be128 *consts = (be128 *)ctx->consts; - u8 *rk = (u8 *)ctx->key.key_enc; int rounds = 6 + key_len / 4; int err; @@ -422,7 +591,8 @@ static int cmac_setkey(struct crypto_shash *tfm, const u8 *in_key, /* encrypt the zero vector */ kernel_neon_begin(); - aes_ecb_encrypt(ctx->consts, (u8[AES_BLOCK_SIZE]){}, rk, rounds, 1); + aes_ecb_encrypt(ctx->consts, (u8[AES_BLOCK_SIZE]){}, ctx->key.key_enc, + rounds, 1); kernel_neon_end(); cmac_gf128_mul_by_x(consts, consts); @@ -441,7 +611,6 @@ static int xcbc_setkey(struct crypto_shash *tfm, const u8 *in_key, }; struct mac_tfm_ctx *ctx = crypto_shash_ctx(tfm); - u8 *rk = (u8 *)ctx->key.key_enc; int rounds = 6 + key_len / 4; u8 key[AES_BLOCK_SIZE]; int err; @@ -451,8 +620,8 @@ static int xcbc_setkey(struct crypto_shash *tfm, const u8 *in_key, return err; kernel_neon_begin(); - aes_ecb_encrypt(key, ks[0], rk, rounds, 1); - aes_ecb_encrypt(ctx->consts, ks[1], rk, rounds, 2); + aes_ecb_encrypt(key, ks[0], ctx->key.key_enc, rounds, 1); + aes_ecb_encrypt(ctx->consts, ks[1], ctx->key.key_enc, rounds, 2); kernel_neon_end(); return cbcmac_setkey(tfm, key, sizeof(key)); diff --git a/arch/arm64/crypto/aes-modes.S b/arch/arm64/crypto/aes-modes.S index 483a7130cf0e..67700045a0e0 100644 --- a/arch/arm64/crypto/aes-modes.S +++ b/arch/arm64/crypto/aes-modes.S @@ -14,12 +14,12 @@ .align 4 aes_encrypt_block4x: - encrypt_block4x v0, v1, v2, v3, w22, x21, x8, w7 + encrypt_block4x v0, v1, v2, v3, w3, x2, x8, w7 ret ENDPROC(aes_encrypt_block4x) aes_decrypt_block4x: - decrypt_block4x v0, v1, v2, v3, w22, x21, x8, w7 + decrypt_block4x v0, v1, v2, v3, w3, x2, x8, w7 ret ENDPROC(aes_decrypt_block4x) @@ -31,71 +31,57 @@ ENDPROC(aes_decrypt_block4x) */ AES_ENTRY(aes_ecb_encrypt) - frame_push 5 + stp x29, x30, [sp, #-16]! + mov x29, sp - mov x19, x0 - mov x20, x1 - mov x21, x2 - mov x22, x3 - mov x23, x4 - -.Lecbencrestart: - enc_prepare w22, x21, x5 + enc_prepare w3, x2, x5 .LecbencloopNx: - subs w23, w23, #4 + subs w4, w4, #4 bmi .Lecbenc1x - ld1 {v0.16b-v3.16b}, [x20], #64 /* get 4 pt blocks */ + ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 pt blocks */ bl aes_encrypt_block4x - st1 {v0.16b-v3.16b}, [x19], #64 - cond_yield_neon .Lecbencrestart + st1 {v0.16b-v3.16b}, [x0], #64 b .LecbencloopNx .Lecbenc1x: - adds w23, w23, #4 + adds w4, w4, #4 beq .Lecbencout .Lecbencloop: - ld1 {v0.16b}, [x20], #16 /* get next pt block */ - encrypt_block v0, w22, x21, x5, w6 - st1 {v0.16b}, [x19], #16 - subs w23, w23, #1 + ld1 {v0.16b}, [x1], #16 /* get next pt block */ + encrypt_block v0, w3, x2, x5, w6 + st1 {v0.16b}, [x0], #16 + subs w4, w4, #1 bne .Lecbencloop .Lecbencout: - frame_pop + ldp x29, x30, [sp], #16 ret AES_ENDPROC(aes_ecb_encrypt) AES_ENTRY(aes_ecb_decrypt) - frame_push 5 + stp x29, x30, [sp, #-16]! + mov x29, sp - mov x19, x0 - mov x20, x1 - mov x21, x2 - mov x22, x3 - mov x23, x4 - -.Lecbdecrestart: - dec_prepare w22, x21, x5 + dec_prepare w3, x2, x5 .LecbdecloopNx: - subs w23, w23, #4 + subs w4, w4, #4 bmi .Lecbdec1x - ld1 {v0.16b-v3.16b}, [x20], #64 /* get 4 ct blocks */ + ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 ct blocks */ bl aes_decrypt_block4x - st1 {v0.16b-v3.16b}, [x19], #64 - cond_yield_neon .Lecbdecrestart + st1 {v0.16b-v3.16b}, [x0], #64 b .LecbdecloopNx .Lecbdec1x: - adds w23, w23, #4 + adds w4, w4, #4 beq .Lecbdecout .Lecbdecloop: - ld1 {v0.16b}, [x20], #16 /* get next ct block */ - decrypt_block v0, w22, x21, x5, w6 - st1 {v0.16b}, [x19], #16 - subs w23, w23, #1 + ld1 {v0.16b}, [x1], #16 /* get next ct block */ + decrypt_block v0, w3, x2, x5, w6 + st1 {v0.16b}, [x0], #16 + subs w4, w4, #1 bne .Lecbdecloop .Lecbdecout: - frame_pop + ldp x29, x30, [sp], #16 ret AES_ENDPROC(aes_ecb_decrypt) @@ -108,162 +94,211 @@ AES_ENDPROC(aes_ecb_decrypt) */ AES_ENTRY(aes_cbc_encrypt) - frame_push 6 - - mov x19, x0 - mov x20, x1 - mov x21, x2 - mov x22, x3 - mov x23, x4 - mov x24, x5 - -.Lcbcencrestart: - ld1 {v4.16b}, [x24] /* get iv */ - enc_prepare w22, x21, x6 + ld1 {v4.16b}, [x5] /* get iv */ + enc_prepare w3, x2, x6 .Lcbcencloop4x: - subs w23, w23, #4 + subs w4, w4, #4 bmi .Lcbcenc1x - ld1 {v0.16b-v3.16b}, [x20], #64 /* get 4 pt blocks */ + ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 pt blocks */ eor v0.16b, v0.16b, v4.16b /* ..and xor with iv */ - encrypt_block v0, w22, x21, x6, w7 + encrypt_block v0, w3, x2, x6, w7 eor v1.16b, v1.16b, v0.16b - encrypt_block v1, w22, x21, x6, w7 + encrypt_block v1, w3, x2, x6, w7 eor v2.16b, v2.16b, v1.16b - encrypt_block v2, w22, x21, x6, w7 + encrypt_block v2, w3, x2, x6, w7 eor v3.16b, v3.16b, v2.16b - encrypt_block v3, w22, x21, x6, w7 - st1 {v0.16b-v3.16b}, [x19], #64 + encrypt_block v3, w3, x2, x6, w7 + st1 {v0.16b-v3.16b}, [x0], #64 mov v4.16b, v3.16b - st1 {v4.16b}, [x24] /* return iv */ - cond_yield_neon .Lcbcencrestart b .Lcbcencloop4x .Lcbcenc1x: - adds w23, w23, #4 + adds w4, w4, #4 beq .Lcbcencout .Lcbcencloop: - ld1 {v0.16b}, [x20], #16 /* get next pt block */ + ld1 {v0.16b}, [x1], #16 /* get next pt block */ eor v4.16b, v4.16b, v0.16b /* ..and xor with iv */ - encrypt_block v4, w22, x21, x6, w7 - st1 {v4.16b}, [x19], #16 - subs w23, w23, #1 + encrypt_block v4, w3, x2, x6, w7 + st1 {v4.16b}, [x0], #16 + subs w4, w4, #1 bne .Lcbcencloop .Lcbcencout: - st1 {v4.16b}, [x24] /* return iv */ - frame_pop + st1 {v4.16b}, [x5] /* return iv */ ret AES_ENDPROC(aes_cbc_encrypt) AES_ENTRY(aes_cbc_decrypt) - frame_push 6 - - mov x19, x0 - mov x20, x1 - mov x21, x2 - mov x22, x3 - mov x23, x4 - mov x24, x5 + stp x29, x30, [sp, #-16]! + mov x29, sp -.Lcbcdecrestart: - ld1 {v7.16b}, [x24] /* get iv */ - dec_prepare w22, x21, x6 + ld1 {v7.16b}, [x5] /* get iv */ + dec_prepare w3, x2, x6 .LcbcdecloopNx: - subs w23, w23, #4 + subs w4, w4, #4 bmi .Lcbcdec1x - ld1 {v0.16b-v3.16b}, [x20], #64 /* get 4 ct blocks */ + ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 ct blocks */ mov v4.16b, v0.16b mov v5.16b, v1.16b mov v6.16b, v2.16b bl aes_decrypt_block4x - sub x20, x20, #16 + sub x1, x1, #16 eor v0.16b, v0.16b, v7.16b eor v1.16b, v1.16b, v4.16b - ld1 {v7.16b}, [x20], #16 /* reload 1 ct block */ + ld1 {v7.16b}, [x1], #16 /* reload 1 ct block */ eor v2.16b, v2.16b, v5.16b eor v3.16b, v3.16b, v6.16b - st1 {v0.16b-v3.16b}, [x19], #64 - st1 {v7.16b}, [x24] /* return iv */ - cond_yield_neon .Lcbcdecrestart + st1 {v0.16b-v3.16b}, [x0], #64 b .LcbcdecloopNx .Lcbcdec1x: - adds w23, w23, #4 + adds w4, w4, #4 beq .Lcbcdecout .Lcbcdecloop: - ld1 {v1.16b}, [x20], #16 /* get next ct block */ + ld1 {v1.16b}, [x1], #16 /* get next ct block */ mov v0.16b, v1.16b /* ...and copy to v0 */ - decrypt_block v0, w22, x21, x6, w7 + decrypt_block v0, w3, x2, x6, w7 eor v0.16b, v0.16b, v7.16b /* xor with iv => pt */ mov v7.16b, v1.16b /* ct is next iv */ - st1 {v0.16b}, [x19], #16 - subs w23, w23, #1 + st1 {v0.16b}, [x0], #16 + subs w4, w4, #1 bne .Lcbcdecloop .Lcbcdecout: - st1 {v7.16b}, [x24] /* return iv */ - frame_pop + st1 {v7.16b}, [x5] /* return iv */ + ldp x29, x30, [sp], #16 ret AES_ENDPROC(aes_cbc_decrypt) /* + * aes_cbc_cts_encrypt(u8 out[], u8 const in[], u32 const rk[], + * int rounds, int bytes, u8 const iv[]) + * aes_cbc_cts_decrypt(u8 out[], u8 const in[], u32 const rk[], + * int rounds, int bytes, u8 const iv[]) + */ + +AES_ENTRY(aes_cbc_cts_encrypt) + adr_l x8, .Lcts_permute_table + sub x4, x4, #16 + add x9, x8, #32 + add x8, x8, x4 + sub x9, x9, x4 + ld1 {v3.16b}, [x8] + ld1 {v4.16b}, [x9] + + ld1 {v0.16b}, [x1], x4 /* overlapping loads */ + ld1 {v1.16b}, [x1] + + ld1 {v5.16b}, [x5] /* get iv */ + enc_prepare w3, x2, x6 + + eor v0.16b, v0.16b, v5.16b /* xor with iv */ + tbl v1.16b, {v1.16b}, v4.16b + encrypt_block v0, w3, x2, x6, w7 + + eor v1.16b, v1.16b, v0.16b + tbl v0.16b, {v0.16b}, v3.16b + encrypt_block v1, w3, x2, x6, w7 + + add x4, x0, x4 + st1 {v0.16b}, [x4] /* overlapping stores */ + st1 {v1.16b}, [x0] + ret +AES_ENDPROC(aes_cbc_cts_encrypt) + +AES_ENTRY(aes_cbc_cts_decrypt) + adr_l x8, .Lcts_permute_table + sub x4, x4, #16 + add x9, x8, #32 + add x8, x8, x4 + sub x9, x9, x4 + ld1 {v3.16b}, [x8] + ld1 {v4.16b}, [x9] + + ld1 {v0.16b}, [x1], x4 /* overlapping loads */ + ld1 {v1.16b}, [x1] + + ld1 {v5.16b}, [x5] /* get iv */ + dec_prepare w3, x2, x6 + + tbl v2.16b, {v1.16b}, v4.16b + decrypt_block v0, w3, x2, x6, w7 + eor v2.16b, v2.16b, v0.16b + + tbx v0.16b, {v1.16b}, v4.16b + tbl v2.16b, {v2.16b}, v3.16b + decrypt_block v0, w3, x2, x6, w7 + eor v0.16b, v0.16b, v5.16b /* xor with iv */ + + add x4, x0, x4 + st1 {v2.16b}, [x4] /* overlapping stores */ + st1 {v0.16b}, [x0] + ret +AES_ENDPROC(aes_cbc_cts_decrypt) + + .section ".rodata", "a" + .align 6 +.Lcts_permute_table: + .byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff + .byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff + .byte 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7 + .byte 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf + .byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff + .byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff + .previous + + + /* * aes_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds, * int blocks, u8 ctr[]) */ AES_ENTRY(aes_ctr_encrypt) - frame_push 6 + stp x29, x30, [sp, #-16]! + mov x29, sp - mov x19, x0 - mov x20, x1 - mov x21, x2 - mov x22, x3 - mov x23, x4 - mov x24, x5 - -.Lctrrestart: - enc_prepare w22, x21, x6 - ld1 {v4.16b}, [x24] + enc_prepare w3, x2, x6 + ld1 {v4.16b}, [x5] umov x6, v4.d[1] /* keep swabbed ctr in reg */ rev x6, x6 + cmn w6, w4 /* 32 bit overflow? */ + bcs .Lctrloop .LctrloopNx: - subs w23, w23, #4 + subs w4, w4, #4 bmi .Lctr1x - cmn w6, #4 /* 32 bit overflow? */ - bcs .Lctr1x - ldr q8, =0x30000000200000001 /* addends 1,2,3[,0] */ - dup v7.4s, w6 + add w7, w6, #1 mov v0.16b, v4.16b - add v7.4s, v7.4s, v8.4s + add w8, w6, #2 mov v1.16b, v4.16b - rev32 v8.16b, v7.16b + add w9, w6, #3 mov v2.16b, v4.16b + rev w7, w7 mov v3.16b, v4.16b - mov v1.s[3], v8.s[0] - mov v2.s[3], v8.s[1] - mov v3.s[3], v8.s[2] - ld1 {v5.16b-v7.16b}, [x20], #48 /* get 3 input blocks */ + rev w8, w8 + mov v1.s[3], w7 + rev w9, w9 + mov v2.s[3], w8 + mov v3.s[3], w9 + ld1 {v5.16b-v7.16b}, [x1], #48 /* get 3 input blocks */ bl aes_encrypt_block4x eor v0.16b, v5.16b, v0.16b - ld1 {v5.16b}, [x20], #16 /* get 1 input block */ + ld1 {v5.16b}, [x1], #16 /* get 1 input block */ eor v1.16b, v6.16b, v1.16b eor v2.16b, v7.16b, v2.16b eor v3.16b, v5.16b, v3.16b - st1 {v0.16b-v3.16b}, [x19], #64 + st1 {v0.16b-v3.16b}, [x0], #64 add x6, x6, #4 rev x7, x6 ins v4.d[1], x7 - cbz w23, .Lctrout - st1 {v4.16b}, [x24] /* return next CTR value */ - cond_yield_neon .Lctrrestart + cbz w4, .Lctrout b .LctrloopNx .Lctr1x: - adds w23, w23, #4 + adds w4, w4, #4 beq .Lctrout .Lctrloop: mov v0.16b, v4.16b - encrypt_block v0, w22, x21, x8, w7 + encrypt_block v0, w3, x2, x8, w7 adds x6, x6, #1 /* increment BE ctr */ rev x7, x6 @@ -271,22 +306,22 @@ AES_ENTRY(aes_ctr_encrypt) bcs .Lctrcarry /* overflow? */ .Lctrcarrydone: - subs w23, w23, #1 + subs w4, w4, #1 bmi .Lctrtailblock /* blocks <0 means tail block */ - ld1 {v3.16b}, [x20], #16 + ld1 {v3.16b}, [x1], #16 eor v3.16b, v0.16b, v3.16b - st1 {v3.16b}, [x19], #16 + st1 {v3.16b}, [x0], #16 bne .Lctrloop .Lctrout: - st1 {v4.16b}, [x24] /* return next CTR value */ -.Lctrret: - frame_pop + st1 {v4.16b}, [x5] /* return next CTR value */ + ldp x29, x30, [sp], #16 ret .Lctrtailblock: - st1 {v0.16b}, [x19] - b .Lctrret + st1 {v0.16b}, [x0] + ldp x29, x30, [sp], #16 + ret .Lctrcarry: umov x7, v4.d[0] /* load upper word of ctr */ @@ -296,7 +331,6 @@ AES_ENTRY(aes_ctr_encrypt) ins v4.d[0], x7 b .Lctrcarrydone AES_ENDPROC(aes_ctr_encrypt) - .ltorg /* @@ -306,150 +340,132 @@ AES_ENDPROC(aes_ctr_encrypt) * int blocks, u8 const rk2[], u8 iv[], int first) */ - .macro next_tweak, out, in, const, tmp + .macro next_tweak, out, in, tmp sshr \tmp\().2d, \in\().2d, #63 - and \tmp\().16b, \tmp\().16b, \const\().16b + and \tmp\().16b, \tmp\().16b, xtsmask.16b add \out\().2d, \in\().2d, \in\().2d ext \tmp\().16b, \tmp\().16b, \tmp\().16b, #8 eor \out\().16b, \out\().16b, \tmp\().16b .endm -.Lxts_mul_x: -CPU_LE( .quad 1, 0x87 ) -CPU_BE( .quad 0x87, 1 ) + .macro xts_load_mask, tmp + movi xtsmask.2s, #0x1 + movi \tmp\().2s, #0x87 + uzp1 xtsmask.4s, xtsmask.4s, \tmp\().4s + .endm AES_ENTRY(aes_xts_encrypt) - frame_push 6 + stp x29, x30, [sp, #-16]! + mov x29, sp - mov x19, x0 - mov x20, x1 - mov x21, x2 - mov x22, x3 - mov x23, x4 - mov x24, x6 - - ld1 {v4.16b}, [x24] + ld1 {v4.16b}, [x6] + xts_load_mask v8 cbz w7, .Lxtsencnotfirst enc_prepare w3, x5, x8 encrypt_block v4, w3, x5, x8, w7 /* first tweak */ enc_switch_key w3, x2, x8 - ldr q7, .Lxts_mul_x b .LxtsencNx -.Lxtsencrestart: - ld1 {v4.16b}, [x24] .Lxtsencnotfirst: - enc_prepare w22, x21, x8 + enc_prepare w3, x2, x8 .LxtsencloopNx: - ldr q7, .Lxts_mul_x - next_tweak v4, v4, v7, v8 + next_tweak v4, v4, v8 .LxtsencNx: - subs w23, w23, #4 + subs w4, w4, #4 bmi .Lxtsenc1x - ld1 {v0.16b-v3.16b}, [x20], #64 /* get 4 pt blocks */ - next_tweak v5, v4, v7, v8 + ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 pt blocks */ + next_tweak v5, v4, v8 eor v0.16b, v0.16b, v4.16b - next_tweak v6, v5, v7, v8 + next_tweak v6, v5, v8 eor v1.16b, v1.16b, v5.16b eor v2.16b, v2.16b, v6.16b - next_tweak v7, v6, v7, v8 + next_tweak v7, v6, v8 eor v3.16b, v3.16b, v7.16b bl aes_encrypt_block4x eor v3.16b, v3.16b, v7.16b eor v0.16b, v0.16b, v4.16b eor v1.16b, v1.16b, v5.16b eor v2.16b, v2.16b, v6.16b - st1 {v0.16b-v3.16b}, [x19], #64 + st1 {v0.16b-v3.16b}, [x0], #64 mov v4.16b, v7.16b - cbz w23, .Lxtsencout - st1 {v4.16b}, [x24] - cond_yield_neon .Lxtsencrestart + cbz w4, .Lxtsencout + xts_reload_mask v8 b .LxtsencloopNx .Lxtsenc1x: - adds w23, w23, #4 + adds w4, w4, #4 beq .Lxtsencout .Lxtsencloop: - ld1 {v1.16b}, [x20], #16 + ld1 {v1.16b}, [x1], #16 eor v0.16b, v1.16b, v4.16b - encrypt_block v0, w22, x21, x8, w7 + encrypt_block v0, w3, x2, x8, w7 eor v0.16b, v0.16b, v4.16b - st1 {v0.16b}, [x19], #16 - subs w23, w23, #1 + st1 {v0.16b}, [x0], #16 + subs w4, w4, #1 beq .Lxtsencout - next_tweak v4, v4, v7, v8 + next_tweak v4, v4, v8 b .Lxtsencloop .Lxtsencout: - st1 {v4.16b}, [x24] - frame_pop + st1 {v4.16b}, [x6] + ldp x29, x30, [sp], #16 ret AES_ENDPROC(aes_xts_encrypt) AES_ENTRY(aes_xts_decrypt) - frame_push 6 + stp x29, x30, [sp, #-16]! + mov x29, sp - mov x19, x0 - mov x20, x1 - mov x21, x2 - mov x22, x3 - mov x23, x4 - mov x24, x6 - - ld1 {v4.16b}, [x24] + ld1 {v4.16b}, [x6] + xts_load_mask v8 cbz w7, .Lxtsdecnotfirst enc_prepare w3, x5, x8 encrypt_block v4, w3, x5, x8, w7 /* first tweak */ dec_prepare w3, x2, x8 - ldr q7, .Lxts_mul_x b .LxtsdecNx -.Lxtsdecrestart: - ld1 {v4.16b}, [x24] .Lxtsdecnotfirst: - dec_prepare w22, x21, x8 + dec_prepare w3, x2, x8 .LxtsdecloopNx: - ldr q7, .Lxts_mul_x - next_tweak v4, v4, v7, v8 + next_tweak v4, v4, v8 .LxtsdecNx: - subs w23, w23, #4 + subs w4, w4, #4 bmi .Lxtsdec1x - ld1 {v0.16b-v3.16b}, [x20], #64 /* get 4 ct blocks */ - next_tweak v5, v4, v7, v8 + ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 ct blocks */ + next_tweak v5, v4, v8 eor v0.16b, v0.16b, v4.16b - next_tweak v6, v5, v7, v8 + next_tweak v6, v5, v8 eor v1.16b, v1.16b, v5.16b eor v2.16b, v2.16b, v6.16b - next_tweak v7, v6, v7, v8 + next_tweak v7, v6, v8 eor v3.16b, v3.16b, v7.16b bl aes_decrypt_block4x eor v3.16b, v3.16b, v7.16b eor v0.16b, v0.16b, v4.16b eor v1.16b, v1.16b, v5.16b eor v2.16b, v2.16b, v6.16b - st1 {v0.16b-v3.16b}, [x19], #64 + st1 {v0.16b-v3.16b}, [x0], #64 mov v4.16b, v7.16b - cbz w23, .Lxtsdecout - st1 {v4.16b}, [x24] - cond_yield_neon .Lxtsdecrestart + cbz w4, .Lxtsdecout + xts_reload_mask v8 b .LxtsdecloopNx .Lxtsdec1x: - adds w23, w23, #4 + adds w4, w4, #4 beq .Lxtsdecout .Lxtsdecloop: - ld1 {v1.16b}, [x20], #16 + ld1 {v1.16b}, [x1], #16 eor v0.16b, v1.16b, v4.16b - decrypt_block v0, w22, x21, x8, w7 + decrypt_block v0, w3, x2, x8, w7 eor v0.16b, v0.16b, v4.16b - st1 {v0.16b}, [x19], #16 - subs w23, w23, #1 + st1 {v0.16b}, [x0], #16 + subs w4, w4, #1 beq .Lxtsdecout - next_tweak v4, v4, v7, v8 + next_tweak v4, v4, v8 b .Lxtsdecloop .Lxtsdecout: - st1 {v4.16b}, [x24] - frame_pop + st1 {v4.16b}, [x6] + ldp x29, x30, [sp], #16 ret AES_ENDPROC(aes_xts_decrypt) diff --git a/arch/arm64/crypto/aes-neon.S b/arch/arm64/crypto/aes-neon.S index 1c7b45b7268e..29100f692e8a 100644 --- a/arch/arm64/crypto/aes-neon.S +++ b/arch/arm64/crypto/aes-neon.S @@ -14,6 +14,12 @@ #define AES_ENTRY(func) ENTRY(neon_ ## func) #define AES_ENDPROC(func) ENDPROC(neon_ ## func) + xtsmask .req v7 + + .macro xts_reload_mask, tmp + xts_load_mask \tmp + .endm + /* multiply by polynomial 'x' in GF(2^8) */ .macro mul_by_x, out, in, temp, const sshr \temp, \in, #7 diff --git a/arch/arm64/crypto/crc32-ce-core.S b/arch/arm64/crypto/crc32-ce-core.S deleted file mode 100644 index 8061bf0f9c66..000000000000 --- a/arch/arm64/crypto/crc32-ce-core.S +++ /dev/null @@ -1,287 +0,0 @@ -/* - * Accelerated CRC32(C) using arm64 CRC, NEON and Crypto Extensions instructions - * - * Copyright (C) 2016 Linaro Ltd <ard.biesheuvel@linaro.org> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -/* GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see http://www.gnu.org/licenses - * - * Please visit http://www.xyratex.com/contact if you need additional - * information or have any questions. - * - * GPL HEADER END - */ - -/* - * Copyright 2012 Xyratex Technology Limited - * - * Using hardware provided PCLMULQDQ instruction to accelerate the CRC32 - * calculation. - * CRC32 polynomial:0x04c11db7(BE)/0xEDB88320(LE) - * PCLMULQDQ is a new instruction in Intel SSE4.2, the reference can be found - * at: - * http://www.intel.com/products/processor/manuals/ - * Intel(R) 64 and IA-32 Architectures Software Developer's Manual - * Volume 2B: Instruction Set Reference, N-Z - * - * Authors: Gregory Prestas <Gregory_Prestas@us.xyratex.com> - * Alexander Boyko <Alexander_Boyko@xyratex.com> - */ - -#include <linux/linkage.h> -#include <asm/assembler.h> - - .section ".rodata", "a" - .align 6 - .cpu generic+crypto+crc - -.Lcrc32_constants: - /* - * [x4*128+32 mod P(x) << 32)]' << 1 = 0x154442bd4 - * #define CONSTANT_R1 0x154442bd4LL - * - * [(x4*128-32 mod P(x) << 32)]' << 1 = 0x1c6e41596 - * #define CONSTANT_R2 0x1c6e41596LL - */ - .octa 0x00000001c6e415960000000154442bd4 - - /* - * [(x128+32 mod P(x) << 32)]' << 1 = 0x1751997d0 - * #define CONSTANT_R3 0x1751997d0LL - * - * [(x128-32 mod P(x) << 32)]' << 1 = 0x0ccaa009e - * #define CONSTANT_R4 0x0ccaa009eLL - */ - .octa 0x00000000ccaa009e00000001751997d0 - - /* - * [(x64 mod P(x) << 32)]' << 1 = 0x163cd6124 - * #define CONSTANT_R5 0x163cd6124LL - */ - .quad 0x0000000163cd6124 - .quad 0x00000000FFFFFFFF - - /* - * #define CRCPOLY_TRUE_LE_FULL 0x1DB710641LL - * - * Barrett Reduction constant (u64`) = u` = (x**64 / P(x))` - * = 0x1F7011641LL - * #define CONSTANT_RU 0x1F7011641LL - */ - .octa 0x00000001F701164100000001DB710641 - -.Lcrc32c_constants: - .octa 0x000000009e4addf800000000740eef02 - .octa 0x000000014cd00bd600000000f20c0dfe - .quad 0x00000000dd45aab8 - .quad 0x00000000FFFFFFFF - .octa 0x00000000dea713f10000000105ec76f0 - - vCONSTANT .req v0 - dCONSTANT .req d0 - qCONSTANT .req q0 - - BUF .req x19 - LEN .req x20 - CRC .req x21 - CONST .req x22 - - vzr .req v9 - - /** - * Calculate crc32 - * BUF - buffer - * LEN - sizeof buffer (multiple of 16 bytes), LEN should be > 63 - * CRC - initial crc32 - * return %eax crc32 - * uint crc32_pmull_le(unsigned char const *buffer, - * size_t len, uint crc32) - */ - .text -ENTRY(crc32_pmull_le) - adr_l x3, .Lcrc32_constants - b 0f - -ENTRY(crc32c_pmull_le) - adr_l x3, .Lcrc32c_constants - -0: frame_push 4, 64 - - mov BUF, x0 - mov LEN, x1 - mov CRC, x2 - mov CONST, x3 - - bic LEN, LEN, #15 - ld1 {v1.16b-v4.16b}, [BUF], #0x40 - movi vzr.16b, #0 - fmov dCONSTANT, CRC - eor v1.16b, v1.16b, vCONSTANT.16b - sub LEN, LEN, #0x40 - cmp LEN, #0x40 - b.lt less_64 - - ldr qCONSTANT, [CONST] - -loop_64: /* 64 bytes Full cache line folding */ - sub LEN, LEN, #0x40 - - pmull2 v5.1q, v1.2d, vCONSTANT.2d - pmull2 v6.1q, v2.2d, vCONSTANT.2d - pmull2 v7.1q, v3.2d, vCONSTANT.2d - pmull2 v8.1q, v4.2d, vCONSTANT.2d - - pmull v1.1q, v1.1d, vCONSTANT.1d - pmull v2.1q, v2.1d, vCONSTANT.1d - pmull v3.1q, v3.1d, vCONSTANT.1d - pmull v4.1q, v4.1d, vCONSTANT.1d - - eor v1.16b, v1.16b, v5.16b - ld1 {v5.16b}, [BUF], #0x10 - eor v2.16b, v2.16b, v6.16b - ld1 {v6.16b}, [BUF], #0x10 - eor v3.16b, v3.16b, v7.16b - ld1 {v7.16b}, [BUF], #0x10 - eor v4.16b, v4.16b, v8.16b - ld1 {v8.16b}, [BUF], #0x10 - - eor v1.16b, v1.16b, v5.16b - eor v2.16b, v2.16b, v6.16b - eor v3.16b, v3.16b, v7.16b - eor v4.16b, v4.16b, v8.16b - - cmp LEN, #0x40 - b.lt less_64 - - if_will_cond_yield_neon - stp q1, q2, [sp, #.Lframe_local_offset] - stp q3, q4, [sp, #.Lframe_local_offset + 32] - do_cond_yield_neon - ldp q1, q2, [sp, #.Lframe_local_offset] - ldp q3, q4, [sp, #.Lframe_local_offset + 32] - ldr qCONSTANT, [CONST] - movi vzr.16b, #0 - endif_yield_neon - b loop_64 - -less_64: /* Folding cache line into 128bit */ - ldr qCONSTANT, [CONST, #16] - - pmull2 v5.1q, v1.2d, vCONSTANT.2d - pmull v1.1q, v1.1d, vCONSTANT.1d - eor v1.16b, v1.16b, v5.16b - eor v1.16b, v1.16b, v2.16b - - pmull2 v5.1q, v1.2d, vCONSTANT.2d - pmull v1.1q, v1.1d, vCONSTANT.1d - eor v1.16b, v1.16b, v5.16b - eor v1.16b, v1.16b, v3.16b - - pmull2 v5.1q, v1.2d, vCONSTANT.2d - pmull v1.1q, v1.1d, vCONSTANT.1d - eor v1.16b, v1.16b, v5.16b - eor v1.16b, v1.16b, v4.16b - - cbz LEN, fold_64 - -loop_16: /* Folding rest buffer into 128bit */ - subs LEN, LEN, #0x10 - - ld1 {v2.16b}, [BUF], #0x10 - pmull2 v5.1q, v1.2d, vCONSTANT.2d - pmull v1.1q, v1.1d, vCONSTANT.1d - eor v1.16b, v1.16b, v5.16b - eor v1.16b, v1.16b, v2.16b - - b.ne loop_16 - -fold_64: - /* perform the last 64 bit fold, also adds 32 zeroes - * to the input stream */ - ext v2.16b, v1.16b, v1.16b, #8 - pmull2 v2.1q, v2.2d, vCONSTANT.2d - ext v1.16b, v1.16b, vzr.16b, #8 - eor v1.16b, v1.16b, v2.16b - - /* final 32-bit fold */ - ldr dCONSTANT, [CONST, #32] - ldr d3, [CONST, #40] - - ext v2.16b, v1.16b, vzr.16b, #4 - and v1.16b, v1.16b, v3.16b - pmull v1.1q, v1.1d, vCONSTANT.1d - eor v1.16b, v1.16b, v2.16b - - /* Finish up with the bit-reversed barrett reduction 64 ==> 32 bits */ - ldr qCONSTANT, [CONST, #48] - - and v2.16b, v1.16b, v3.16b - ext v2.16b, vzr.16b, v2.16b, #8 - pmull2 v2.1q, v2.2d, vCONSTANT.2d - and v2.16b, v2.16b, v3.16b - pmull v2.1q, v2.1d, vCONSTANT.1d - eor v1.16b, v1.16b, v2.16b - mov w0, v1.s[1] - - frame_pop - ret -ENDPROC(crc32_pmull_le) -ENDPROC(crc32c_pmull_le) - - .macro __crc32, c -0: subs x2, x2, #16 - b.mi 8f - ldp x3, x4, [x1], #16 -CPU_BE( rev x3, x3 ) -CPU_BE( rev x4, x4 ) - crc32\c\()x w0, w0, x3 - crc32\c\()x w0, w0, x4 - b.ne 0b - ret - -8: tbz x2, #3, 4f - ldr x3, [x1], #8 -CPU_BE( rev x3, x3 ) - crc32\c\()x w0, w0, x3 -4: tbz x2, #2, 2f - ldr w3, [x1], #4 -CPU_BE( rev w3, w3 ) - crc32\c\()w w0, w0, w3 -2: tbz x2, #1, 1f - ldrh w3, [x1], #2 -CPU_BE( rev16 w3, w3 ) - crc32\c\()h w0, w0, w3 -1: tbz x2, #0, 0f - ldrb w3, [x1] - crc32\c\()b w0, w0, w3 -0: ret - .endm - - .align 5 -ENTRY(crc32_armv8_le) - __crc32 -ENDPROC(crc32_armv8_le) - - .align 5 -ENTRY(crc32c_armv8_le) - __crc32 c -ENDPROC(crc32c_armv8_le) diff --git a/arch/arm64/crypto/crc32-ce-glue.c b/arch/arm64/crypto/crc32-ce-glue.c deleted file mode 100644 index 34b4e3d46aab..000000000000 --- a/arch/arm64/crypto/crc32-ce-glue.c +++ /dev/null @@ -1,244 +0,0 @@ -/* - * Accelerated CRC32(C) using arm64 NEON and Crypto Extensions instructions - * - * Copyright (C) 2016 - 2017 Linaro Ltd <ard.biesheuvel@linaro.org> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include <linux/cpufeature.h> -#include <linux/crc32.h> -#include <linux/init.h> -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/string.h> - -#include <crypto/internal/hash.h> - -#include <asm/hwcap.h> -#include <asm/neon.h> -#include <asm/simd.h> -#include <asm/unaligned.h> - -#define PMULL_MIN_LEN 64L /* minimum size of buffer - * for crc32_pmull_le_16 */ -#define SCALE_F 16L /* size of NEON register */ - -asmlinkage u32 crc32_pmull_le(const u8 buf[], u64 len, u32 init_crc); -asmlinkage u32 crc32_armv8_le(u32 init_crc, const u8 buf[], size_t len); - -asmlinkage u32 crc32c_pmull_le(const u8 buf[], u64 len, u32 init_crc); -asmlinkage u32 crc32c_armv8_le(u32 init_crc, const u8 buf[], size_t len); - -static u32 (*fallback_crc32)(u32 init_crc, const u8 buf[], size_t len); -static u32 (*fallback_crc32c)(u32 init_crc, const u8 buf[], size_t len); - -static int crc32_pmull_cra_init(struct crypto_tfm *tfm) -{ - u32 *key = crypto_tfm_ctx(tfm); - - *key = 0; - return 0; -} - -static int crc32c_pmull_cra_init(struct crypto_tfm *tfm) -{ - u32 *key = crypto_tfm_ctx(tfm); - - *key = ~0; - return 0; -} - -static int crc32_pmull_setkey(struct crypto_shash *hash, const u8 *key, - unsigned int keylen) -{ - u32 *mctx = crypto_shash_ctx(hash); - - if (keylen != sizeof(u32)) { - crypto_shash_set_flags(hash, CRYPTO_TFM_RES_BAD_KEY_LEN); - return -EINVAL; - } - *mctx = le32_to_cpup((__le32 *)key); - return 0; -} - -static int crc32_pmull_init(struct shash_desc *desc) -{ - u32 *mctx = crypto_shash_ctx(desc->tfm); - u32 *crc = shash_desc_ctx(desc); - - *crc = *mctx; - return 0; -} - -static int crc32_update(struct shash_desc *desc, const u8 *data, - unsigned int length) -{ - u32 *crc = shash_desc_ctx(desc); - - *crc = crc32_armv8_le(*crc, data, length); - return 0; -} - -static int crc32c_update(struct shash_desc *desc, const u8 *data, - unsigned int length) -{ - u32 *crc = shash_desc_ctx(desc); - - *crc = crc32c_armv8_le(*crc, data, length); - return 0; -} - -static int crc32_pmull_update(struct shash_desc *desc, const u8 *data, - unsigned int length) -{ - u32 *crc = shash_desc_ctx(desc); - unsigned int l; - - if ((u64)data % SCALE_F) { - l = min_t(u32, length, SCALE_F - ((u64)data % SCALE_F)); - - *crc = fallback_crc32(*crc, data, l); - - data += l; - length -= l; - } - - if (length >= PMULL_MIN_LEN && may_use_simd()) { - l = round_down(length, SCALE_F); - - kernel_neon_begin(); - *crc = crc32_pmull_le(data, l, *crc); - kernel_neon_end(); - - data += l; - length -= l; - } - - if (length > 0) - *crc = fallback_crc32(*crc, data, length); - - return 0; -} - -static int crc32c_pmull_update(struct shash_desc *desc, const u8 *data, - unsigned int length) -{ - u32 *crc = shash_desc_ctx(desc); - unsigned int l; - - if ((u64)data % SCALE_F) { - l = min_t(u32, length, SCALE_F - ((u64)data % SCALE_F)); - - *crc = fallback_crc32c(*crc, data, l); - - data += l; - length -= l; - } - - if (length >= PMULL_MIN_LEN && may_use_simd()) { - l = round_down(length, SCALE_F); - - kernel_neon_begin(); - *crc = crc32c_pmull_le(data, l, *crc); - kernel_neon_end(); - - data += l; - length -= l; - } - - if (length > 0) { - *crc = fallback_crc32c(*crc, data, length); - } - - return 0; -} - -static int crc32_pmull_final(struct shash_desc *desc, u8 *out) -{ - u32 *crc = shash_desc_ctx(desc); - - put_unaligned_le32(*crc, out); - return 0; -} - -static int crc32c_pmull_final(struct shash_desc *desc, u8 *out) -{ - u32 *crc = shash_desc_ctx(desc); - - put_unaligned_le32(~*crc, out); - return 0; -} - -static struct shash_alg crc32_pmull_algs[] = { { - .setkey = crc32_pmull_setkey, - .init = crc32_pmull_init, - .update = crc32_update, - .final = crc32_pmull_final, - .descsize = sizeof(u32), - .digestsize = sizeof(u32), - - .base.cra_ctxsize = sizeof(u32), - .base.cra_init = crc32_pmull_cra_init, - .base.cra_name = "crc32", - .base.cra_driver_name = "crc32-arm64-ce", - .base.cra_priority = 200, - .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, - .base.cra_blocksize = 1, - .base.cra_module = THIS_MODULE, -}, { - .setkey = crc32_pmull_setkey, - .init = crc32_pmull_init, - .update = crc32c_update, - .final = crc32c_pmull_final, - .descsize = sizeof(u32), - .digestsize = sizeof(u32), - - .base.cra_ctxsize = sizeof(u32), - .base.cra_init = crc32c_pmull_cra_init, - .base.cra_name = "crc32c", - .base.cra_driver_name = "crc32c-arm64-ce", - .base.cra_priority = 200, - .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, - .base.cra_blocksize = 1, - .base.cra_module = THIS_MODULE, -} }; - -static int __init crc32_pmull_mod_init(void) -{ - if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && (elf_hwcap & HWCAP_PMULL)) { - crc32_pmull_algs[0].update = crc32_pmull_update; - crc32_pmull_algs[1].update = crc32c_pmull_update; - - if (elf_hwcap & HWCAP_CRC32) { - fallback_crc32 = crc32_armv8_le; - fallback_crc32c = crc32c_armv8_le; - } else { - fallback_crc32 = crc32_le; - fallback_crc32c = __crc32c_le; - } - } else if (!(elf_hwcap & HWCAP_CRC32)) { - return -ENODEV; - } - return crypto_register_shashes(crc32_pmull_algs, - ARRAY_SIZE(crc32_pmull_algs)); -} - -static void __exit crc32_pmull_mod_exit(void) -{ - crypto_unregister_shashes(crc32_pmull_algs, - ARRAY_SIZE(crc32_pmull_algs)); -} - -static const struct cpu_feature crc32_cpu_feature[] = { - { cpu_feature(CRC32) }, { cpu_feature(PMULL) }, { } -}; -MODULE_DEVICE_TABLE(cpu, crc32_cpu_feature); - -module_init(crc32_pmull_mod_init); -module_exit(crc32_pmull_mod_exit); - -MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); -MODULE_LICENSE("GPL v2"); diff --git a/arch/arm64/crypto/crct10dif-ce-core.S b/arch/arm64/crypto/crct10dif-ce-core.S index 663ea71cdb38..9e82e8e8ed05 100644 --- a/arch/arm64/crypto/crct10dif-ce-core.S +++ b/arch/arm64/crypto/crct10dif-ce-core.S @@ -80,7 +80,186 @@ vzr .req v13 -ENTRY(crc_t10dif_pmull) + ad .req v14 + bd .req v10 + + k00_16 .req v15 + k32_48 .req v16 + + t3 .req v17 + t4 .req v18 + t5 .req v19 + t6 .req v20 + t7 .req v21 + t8 .req v22 + t9 .req v23 + + perm1 .req v24 + perm2 .req v25 + perm3 .req v26 + perm4 .req v27 + + bd1 .req v28 + bd2 .req v29 + bd3 .req v30 + bd4 .req v31 + + .macro __pmull_init_p64 + .endm + + .macro __pmull_pre_p64, bd + .endm + + .macro __pmull_init_p8 + // k00_16 := 0x0000000000000000_000000000000ffff + // k32_48 := 0x00000000ffffffff_0000ffffffffffff + movi k32_48.2d, #0xffffffff + mov k32_48.h[2], k32_48.h[0] + ushr k00_16.2d, k32_48.2d, #32 + + // prepare the permutation vectors + mov_q x5, 0x080f0e0d0c0b0a09 + movi perm4.8b, #8 + dup perm1.2d, x5 + eor perm1.16b, perm1.16b, perm4.16b + ushr perm2.2d, perm1.2d, #8 + ushr perm3.2d, perm1.2d, #16 + ushr perm4.2d, perm1.2d, #24 + sli perm2.2d, perm1.2d, #56 + sli perm3.2d, perm1.2d, #48 + sli perm4.2d, perm1.2d, #40 + .endm + + .macro __pmull_pre_p8, bd + tbl bd1.16b, {\bd\().16b}, perm1.16b + tbl bd2.16b, {\bd\().16b}, perm2.16b + tbl bd3.16b, {\bd\().16b}, perm3.16b + tbl bd4.16b, {\bd\().16b}, perm4.16b + .endm + +__pmull_p8_core: +.L__pmull_p8_core: + ext t4.8b, ad.8b, ad.8b, #1 // A1 + ext t5.8b, ad.8b, ad.8b, #2 // A2 + ext t6.8b, ad.8b, ad.8b, #3 // A3 + + pmull t4.8h, t4.8b, bd.8b // F = A1*B + pmull t8.8h, ad.8b, bd1.8b // E = A*B1 + pmull t5.8h, t5.8b, bd.8b // H = A2*B + pmull t7.8h, ad.8b, bd2.8b // G = A*B2 + pmull t6.8h, t6.8b, bd.8b // J = A3*B + pmull t9.8h, ad.8b, bd3.8b // I = A*B3 + pmull t3.8h, ad.8b, bd4.8b // K = A*B4 + b 0f + +.L__pmull_p8_core2: + tbl t4.16b, {ad.16b}, perm1.16b // A1 + tbl t5.16b, {ad.16b}, perm2.16b // A2 + tbl t6.16b, {ad.16b}, perm3.16b // A3 + + pmull2 t4.8h, t4.16b, bd.16b // F = A1*B + pmull2 t8.8h, ad.16b, bd1.16b // E = A*B1 + pmull2 t5.8h, t5.16b, bd.16b // H = A2*B + pmull2 t7.8h, ad.16b, bd2.16b // G = A*B2 + pmull2 t6.8h, t6.16b, bd.16b // J = A3*B + pmull2 t9.8h, ad.16b, bd3.16b // I = A*B3 + pmull2 t3.8h, ad.16b, bd4.16b // K = A*B4 + +0: eor t4.16b, t4.16b, t8.16b // L = E + F + eor t5.16b, t5.16b, t7.16b // M = G + H + eor t6.16b, t6.16b, t9.16b // N = I + J + + uzp1 t8.2d, t4.2d, t5.2d + uzp2 t4.2d, t4.2d, t5.2d + uzp1 t7.2d, t6.2d, t3.2d + uzp2 t6.2d, t6.2d, t3.2d + + // t4 = (L) (P0 + P1) << 8 + // t5 = (M) (P2 + P3) << 16 + eor t8.16b, t8.16b, t4.16b + and t4.16b, t4.16b, k32_48.16b + + // t6 = (N) (P4 + P5) << 24 + // t7 = (K) (P6 + P7) << 32 + eor t7.16b, t7.16b, t6.16b + and t6.16b, t6.16b, k00_16.16b + + eor t8.16b, t8.16b, t4.16b + eor t7.16b, t7.16b, t6.16b + + zip2 t5.2d, t8.2d, t4.2d + zip1 t4.2d, t8.2d, t4.2d + zip2 t3.2d, t7.2d, t6.2d + zip1 t6.2d, t7.2d, t6.2d + + ext t4.16b, t4.16b, t4.16b, #15 + ext t5.16b, t5.16b, t5.16b, #14 + ext t6.16b, t6.16b, t6.16b, #13 + ext t3.16b, t3.16b, t3.16b, #12 + + eor t4.16b, t4.16b, t5.16b + eor t6.16b, t6.16b, t3.16b + ret +ENDPROC(__pmull_p8_core) + + .macro __pmull_p8, rq, ad, bd, i + .ifnc \bd, v10 + .err + .endif + mov ad.16b, \ad\().16b + .ifb \i + pmull \rq\().8h, \ad\().8b, bd.8b // D = A*B + .else + pmull2 \rq\().8h, \ad\().16b, bd.16b // D = A*B + .endif + + bl .L__pmull_p8_core\i + + eor \rq\().16b, \rq\().16b, t4.16b + eor \rq\().16b, \rq\().16b, t6.16b + .endm + + .macro fold64, p, reg1, reg2 + ldp q11, q12, [arg2], #0x20 + + __pmull_\p v8, \reg1, v10, 2 + __pmull_\p \reg1, \reg1, v10 + +CPU_LE( rev64 v11.16b, v11.16b ) +CPU_LE( rev64 v12.16b, v12.16b ) + + __pmull_\p v9, \reg2, v10, 2 + __pmull_\p \reg2, \reg2, v10 + +CPU_LE( ext v11.16b, v11.16b, v11.16b, #8 ) +CPU_LE( ext v12.16b, v12.16b, v12.16b, #8 ) + + eor \reg1\().16b, \reg1\().16b, v8.16b + eor \reg2\().16b, \reg2\().16b, v9.16b + eor \reg1\().16b, \reg1\().16b, v11.16b + eor \reg2\().16b, \reg2\().16b, v12.16b + .endm + + .macro fold16, p, reg, rk + __pmull_\p v8, \reg, v10 + __pmull_\p \reg, \reg, v10, 2 + .ifnb \rk + ldr_l q10, \rk, x8 + __pmull_pre_\p v10 + .endif + eor v7.16b, v7.16b, v8.16b + eor v7.16b, v7.16b, \reg\().16b + .endm + + .macro __pmull_p64, rd, rn, rm, n + .ifb \n + pmull \rd\().1q, \rn\().1d, \rm\().1d + .else + pmull2 \rd\().1q, \rn\().2d, \rm\().2d + .endif + .endm + + .macro crc_t10dif_pmull, p frame_push 3, 128 mov arg1_low32, w0 @@ -89,6 +268,8 @@ ENTRY(crc_t10dif_pmull) movi vzr.16b, #0 // init zero register + __pmull_init_\p + // adjust the 16-bit initial_crc value, scale it to 32 bits lsl arg1_low32, arg1_low32, #16 @@ -96,7 +277,7 @@ ENTRY(crc_t10dif_pmull) cmp arg3, #256 // for sizes less than 128, we can't fold 64B at a time... - b.lt _less_than_128 + b.lt .L_less_than_128_\@ // load the initial crc value // crc value does not need to be byte-reflected, but it needs @@ -137,6 +318,7 @@ CPU_LE( ext v7.16b, v7.16b, v7.16b, #8 ) ldr_l q10, rk3, x8 // xmm10 has rk3 and rk4 // type of pmull instruction // will determine which constant to use + __pmull_pre_\p v10 // // we subtract 256 instead of 128 to save one instruction from the loop @@ -147,41 +329,19 @@ CPU_LE( ext v7.16b, v7.16b, v7.16b, #8 ) // buffer. The _fold_64_B_loop will fold 64B at a time // until we have 64+y Bytes of buffer - // fold 64B at a time. This section of the code folds 4 vector // registers in parallel -_fold_64_B_loop: +.L_fold_64_B_loop_\@: - .macro fold64, reg1, reg2 - ldp q11, q12, [arg2], #0x20 - - pmull2 v8.1q, \reg1\().2d, v10.2d - pmull \reg1\().1q, \reg1\().1d, v10.1d - -CPU_LE( rev64 v11.16b, v11.16b ) -CPU_LE( rev64 v12.16b, v12.16b ) - - pmull2 v9.1q, \reg2\().2d, v10.2d - pmull \reg2\().1q, \reg2\().1d, v10.1d - -CPU_LE( ext v11.16b, v11.16b, v11.16b, #8 ) -CPU_LE( ext v12.16b, v12.16b, v12.16b, #8 ) - - eor \reg1\().16b, \reg1\().16b, v8.16b - eor \reg2\().16b, \reg2\().16b, v9.16b - eor \reg1\().16b, \reg1\().16b, v11.16b - eor \reg2\().16b, \reg2\().16b, v12.16b - .endm - - fold64 v0, v1 - fold64 v2, v3 - fold64 v4, v5 - fold64 v6, v7 + fold64 \p, v0, v1 + fold64 \p, v2, v3 + fold64 \p, v4, v5 + fold64 \p, v6, v7 subs arg3, arg3, #128 // check if there is another 64B in the buffer to be able to fold - b.lt _fold_64_B_end + b.lt .L_fold_64_B_end_\@ if_will_cond_yield_neon stp q0, q1, [sp, #.Lframe_local_offset] @@ -195,11 +355,13 @@ CPU_LE( ext v12.16b, v12.16b, v12.16b, #8 ) ldp q6, q7, [sp, #.Lframe_local_offset + 96] ldr_l q10, rk3, x8 movi vzr.16b, #0 // init zero register + __pmull_init_\p + __pmull_pre_\p v10 endif_yield_neon - b _fold_64_B_loop + b .L_fold_64_B_loop_\@ -_fold_64_B_end: +.L_fold_64_B_end_\@: // at this point, the buffer pointer is pointing at the last y Bytes // of the buffer the 64B of folded data is in 4 of the vector // registers: v0, v1, v2, v3 @@ -208,38 +370,29 @@ _fold_64_B_end: // constants ldr_l q10, rk9, x8 + __pmull_pre_\p v10 - .macro fold16, reg, rk - pmull v8.1q, \reg\().1d, v10.1d - pmull2 \reg\().1q, \reg\().2d, v10.2d - .ifnb \rk - ldr_l q10, \rk, x8 - .endif - eor v7.16b, v7.16b, v8.16b - eor v7.16b, v7.16b, \reg\().16b - .endm - - fold16 v0, rk11 - fold16 v1, rk13 - fold16 v2, rk15 - fold16 v3, rk17 - fold16 v4, rk19 - fold16 v5, rk1 - fold16 v6 + fold16 \p, v0, rk11 + fold16 \p, v1, rk13 + fold16 \p, v2, rk15 + fold16 \p, v3, rk17 + fold16 \p, v4, rk19 + fold16 \p, v5, rk1 + fold16 \p, v6 // instead of 64, we add 48 to the loop counter to save 1 instruction // from the loop instead of a cmp instruction, we use the negative // flag with the jl instruction adds arg3, arg3, #(128-16) - b.lt _final_reduction_for_128 + b.lt .L_final_reduction_for_128_\@ // now we have 16+y bytes left to reduce. 16 Bytes is in register v7 // and the rest is in memory. We can fold 16 bytes at a time if y>=16 // continue folding 16B at a time -_16B_reduction_loop: - pmull v8.1q, v7.1d, v10.1d - pmull2 v7.1q, v7.2d, v10.2d +.L_16B_reduction_loop_\@: + __pmull_\p v8, v7, v10 + __pmull_\p v7, v7, v10, 2 eor v7.16b, v7.16b, v8.16b ldr q0, [arg2], #16 @@ -251,22 +404,22 @@ CPU_LE( ext v0.16b, v0.16b, v0.16b, #8 ) // instead of a cmp instruction, we utilize the flags with the // jge instruction equivalent of: cmp arg3, 16-16 // check if there is any more 16B in the buffer to be able to fold - b.ge _16B_reduction_loop + b.ge .L_16B_reduction_loop_\@ // now we have 16+z bytes left to reduce, where 0<= z < 16. // first, we reduce the data in the xmm7 register -_final_reduction_for_128: +.L_final_reduction_for_128_\@: // check if any more data to fold. If not, compute the CRC of // the final 128 bits adds arg3, arg3, #16 - b.eq _128_done + b.eq .L_128_done_\@ // here we are getting data that is less than 16 bytes. // since we know that there was data before the pointer, we can // offset the input pointer before the actual point, to receive // exactly 16 bytes. after that the registers need to be adjusted. -_get_last_two_regs: +.L_get_last_two_regs_\@: add arg2, arg2, arg3 ldr q1, [arg2, #-16] CPU_LE( rev64 v1.16b, v1.16b ) @@ -291,47 +444,48 @@ CPU_LE( ext v1.16b, v1.16b, v1.16b, #8 ) bsl v0.16b, v2.16b, v1.16b // fold 16 Bytes - pmull v8.1q, v7.1d, v10.1d - pmull2 v7.1q, v7.2d, v10.2d + __pmull_\p v8, v7, v10 + __pmull_\p v7, v7, v10, 2 eor v7.16b, v7.16b, v8.16b eor v7.16b, v7.16b, v0.16b -_128_done: +.L_128_done_\@: // compute crc of a 128-bit value ldr_l q10, rk5, x8 // rk5 and rk6 in xmm10 + __pmull_pre_\p v10 // 64b fold ext v0.16b, vzr.16b, v7.16b, #8 mov v7.d[0], v7.d[1] - pmull v7.1q, v7.1d, v10.1d + __pmull_\p v7, v7, v10 eor v7.16b, v7.16b, v0.16b // 32b fold ext v0.16b, v7.16b, vzr.16b, #4 mov v7.s[3], vzr.s[0] - pmull2 v0.1q, v0.2d, v10.2d + __pmull_\p v0, v0, v10, 2 eor v7.16b, v7.16b, v0.16b // barrett reduction -_barrett: ldr_l q10, rk7, x8 + __pmull_pre_\p v10 mov v0.d[0], v7.d[1] - pmull v0.1q, v0.1d, v10.1d + __pmull_\p v0, v0, v10 ext v0.16b, vzr.16b, v0.16b, #12 - pmull2 v0.1q, v0.2d, v10.2d + __pmull_\p v0, v0, v10, 2 ext v0.16b, vzr.16b, v0.16b, #12 eor v7.16b, v7.16b, v0.16b mov w0, v7.s[1] -_cleanup: +.L_cleanup_\@: // scale the result back to 16 bits lsr x0, x0, #16 frame_pop ret -_less_than_128: - cbz arg3, _cleanup +.L_less_than_128_\@: + cbz arg3, .L_cleanup_\@ movi v0.16b, #0 mov v0.s[3], arg1_low32 // get the initial crc value @@ -342,20 +496,21 @@ CPU_LE( ext v7.16b, v7.16b, v7.16b, #8 ) eor v7.16b, v7.16b, v0.16b // xor the initial crc value cmp arg3, #16 - b.eq _128_done // exactly 16 left - b.lt _less_than_16_left + b.eq .L_128_done_\@ // exactly 16 left + b.lt .L_less_than_16_left_\@ ldr_l q10, rk1, x8 // rk1 and rk2 in xmm10 + __pmull_pre_\p v10 // update the counter. subtract 32 instead of 16 to save one // instruction from the loop subs arg3, arg3, #32 - b.ge _16B_reduction_loop + b.ge .L_16B_reduction_loop_\@ add arg3, arg3, #16 - b _get_last_two_regs + b .L_get_last_two_regs_\@ -_less_than_16_left: +.L_less_than_16_left_\@: // shl r9, 4 adr_l x0, tbl_shf_table + 16 sub x0, x0, arg3 @@ -363,8 +518,17 @@ _less_than_16_left: movi v9.16b, #0x80 eor v0.16b, v0.16b, v9.16b tbl v7.16b, {v7.16b}, v0.16b - b _128_done -ENDPROC(crc_t10dif_pmull) + b .L_128_done_\@ + .endm + +ENTRY(crc_t10dif_pmull_p8) + crc_t10dif_pmull p8 +ENDPROC(crc_t10dif_pmull_p8) + + .align 5 +ENTRY(crc_t10dif_pmull_p64) + crc_t10dif_pmull p64 +ENDPROC(crc_t10dif_pmull_p64) // precomputed constants // these constants are precomputed from the poly: diff --git a/arch/arm64/crypto/crct10dif-ce-glue.c b/arch/arm64/crypto/crct10dif-ce-glue.c index 96f0cae4a022..b461d62023f2 100644 --- a/arch/arm64/crypto/crct10dif-ce-glue.c +++ b/arch/arm64/crypto/crct10dif-ce-glue.c @@ -22,7 +22,10 @@ #define CRC_T10DIF_PMULL_CHUNK_SIZE 16U -asmlinkage u16 crc_t10dif_pmull(u16 init_crc, const u8 buf[], u64 len); +asmlinkage u16 crc_t10dif_pmull_p64(u16 init_crc, const u8 buf[], u64 len); +asmlinkage u16 crc_t10dif_pmull_p8(u16 init_crc, const u8 buf[], u64 len); + +static u16 (*crc_t10dif_pmull)(u16 init_crc, const u8 buf[], u64 len); static int crct10dif_init(struct shash_desc *desc) { @@ -85,6 +88,11 @@ static struct shash_alg crc_t10dif_alg = { static int __init crc_t10dif_mod_init(void) { + if (elf_hwcap & HWCAP_PMULL) + crc_t10dif_pmull = crc_t10dif_pmull_p64; + else + crc_t10dif_pmull = crc_t10dif_pmull_p8; + return crypto_register_shash(&crc_t10dif_alg); } @@ -93,8 +101,10 @@ static void __exit crc_t10dif_mod_exit(void) crypto_unregister_shash(&crc_t10dif_alg); } -module_cpu_feature_match(PMULL, crc_t10dif_mod_init); +module_cpu_feature_match(ASIMD, crc_t10dif_mod_init); module_exit(crc_t10dif_mod_exit); MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); MODULE_LICENSE("GPL v2"); +MODULE_ALIAS_CRYPTO("crct10dif"); +MODULE_ALIAS_CRYPTO("crct10dif-arm64-ce"); diff --git a/arch/arm64/crypto/speck-neon-core.S b/arch/arm64/crypto/speck-neon-core.S deleted file mode 100644 index b14463438b09..000000000000 --- a/arch/arm64/crypto/speck-neon-core.S +++ /dev/null @@ -1,352 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * ARM64 NEON-accelerated implementation of Speck128-XTS and Speck64-XTS - * - * Copyright (c) 2018 Google, Inc - * - * Author: Eric Biggers <ebiggers@google.com> - */ - -#include <linux/linkage.h> - - .text - - // arguments - ROUND_KEYS .req x0 // const {u64,u32} *round_keys - NROUNDS .req w1 // int nrounds - NROUNDS_X .req x1 - DST .req x2 // void *dst - SRC .req x3 // const void *src - NBYTES .req w4 // unsigned int nbytes - TWEAK .req x5 // void *tweak - - // registers which hold the data being encrypted/decrypted - // (underscores avoid a naming collision with ARM64 registers x0-x3) - X_0 .req v0 - Y_0 .req v1 - X_1 .req v2 - Y_1 .req v3 - X_2 .req v4 - Y_2 .req v5 - X_3 .req v6 - Y_3 .req v7 - - // the round key, duplicated in all lanes - ROUND_KEY .req v8 - - // index vector for tbl-based 8-bit rotates - ROTATE_TABLE .req v9 - ROTATE_TABLE_Q .req q9 - - // temporary registers - TMP0 .req v10 - TMP1 .req v11 - TMP2 .req v12 - TMP3 .req v13 - - // multiplication table for updating XTS tweaks - GFMUL_TABLE .req v14 - GFMUL_TABLE_Q .req q14 - - // next XTS tweak value(s) - TWEAKV_NEXT .req v15 - - // XTS tweaks for the blocks currently being encrypted/decrypted - TWEAKV0 .req v16 - TWEAKV1 .req v17 - TWEAKV2 .req v18 - TWEAKV3 .req v19 - TWEAKV4 .req v20 - TWEAKV5 .req v21 - TWEAKV6 .req v22 - TWEAKV7 .req v23 - - .align 4 -.Lror64_8_table: - .octa 0x080f0e0d0c0b0a090007060504030201 -.Lror32_8_table: - .octa 0x0c0f0e0d080b0a090407060500030201 -.Lrol64_8_table: - .octa 0x0e0d0c0b0a09080f0605040302010007 -.Lrol32_8_table: - .octa 0x0e0d0c0f0a09080b0605040702010003 -.Lgf128mul_table: - .octa 0x00000000000000870000000000000001 -.Lgf64mul_table: - .octa 0x0000000000000000000000002d361b00 - -/* - * _speck_round_128bytes() - Speck encryption round on 128 bytes at a time - * - * Do one Speck encryption round on the 128 bytes (8 blocks for Speck128, 16 for - * Speck64) stored in X0-X3 and Y0-Y3, using the round key stored in all lanes - * of ROUND_KEY. 'n' is the lane size: 64 for Speck128, or 32 for Speck64. - * 'lanes' is the lane specifier: "2d" for Speck128 or "4s" for Speck64. - */ -.macro _speck_round_128bytes n, lanes - - // x = ror(x, 8) - tbl X_0.16b, {X_0.16b}, ROTATE_TABLE.16b - tbl X_1.16b, {X_1.16b}, ROTATE_TABLE.16b - tbl X_2.16b, {X_2.16b}, ROTATE_TABLE.16b - tbl X_3.16b, {X_3.16b}, ROTATE_TABLE.16b - - // x += y - add X_0.\lanes, X_0.\lanes, Y_0.\lanes - add X_1.\lanes, X_1.\lanes, Y_1.\lanes - add X_2.\lanes, X_2.\lanes, Y_2.\lanes - add X_3.\lanes, X_3.\lanes, Y_3.\lanes - - // x ^= k - eor X_0.16b, X_0.16b, ROUND_KEY.16b - eor X_1.16b, X_1.16b, ROUND_KEY.16b - eor X_2.16b, X_2.16b, ROUND_KEY.16b - eor X_3.16b, X_3.16b, ROUND_KEY.16b - - // y = rol(y, 3) - shl TMP0.\lanes, Y_0.\lanes, #3 - shl TMP1.\lanes, Y_1.\lanes, #3 - shl TMP2.\lanes, Y_2.\lanes, #3 - shl TMP3.\lanes, Y_3.\lanes, #3 - sri TMP0.\lanes, Y_0.\lanes, #(\n - 3) - sri TMP1.\lanes, Y_1.\lanes, #(\n - 3) - sri TMP2.\lanes, Y_2.\lanes, #(\n - 3) - sri TMP3.\lanes, Y_3.\lanes, #(\n - 3) - - // y ^= x - eor Y_0.16b, TMP0.16b, X_0.16b - eor Y_1.16b, TMP1.16b, X_1.16b - eor Y_2.16b, TMP2.16b, X_2.16b - eor Y_3.16b, TMP3.16b, X_3.16b -.endm - -/* - * _speck_unround_128bytes() - Speck decryption round on 128 bytes at a time - * - * This is the inverse of _speck_round_128bytes(). - */ -.macro _speck_unround_128bytes n, lanes - - // y ^= x - eor TMP0.16b, Y_0.16b, X_0.16b - eor TMP1.16b, Y_1.16b, X_1.16b - eor TMP2.16b, Y_2.16b, X_2.16b - eor TMP3.16b, Y_3.16b, X_3.16b - - // y = ror(y, 3) - ushr Y_0.\lanes, TMP0.\lanes, #3 - ushr Y_1.\lanes, TMP1.\lanes, #3 - ushr Y_2.\lanes, TMP2.\lanes, #3 - ushr Y_3.\lanes, TMP3.\lanes, #3 - sli Y_0.\lanes, TMP0.\lanes, #(\n - 3) - sli Y_1.\lanes, TMP1.\lanes, #(\n - 3) - sli Y_2.\lanes, TMP2.\lanes, #(\n - 3) - sli Y_3.\lanes, TMP3.\lanes, #(\n - 3) - - // x ^= k - eor X_0.16b, X_0.16b, ROUND_KEY.16b - eor X_1.16b, X_1.16b, ROUND_KEY.16b - eor X_2.16b, X_2.16b, ROUND_KEY.16b - eor X_3.16b, X_3.16b, ROUND_KEY.16b - - // x -= y - sub X_0.\lanes, X_0.\lanes, Y_0.\lanes - sub X_1.\lanes, X_1.\lanes, Y_1.\lanes - sub X_2.\lanes, X_2.\lanes, Y_2.\lanes - sub X_3.\lanes, X_3.\lanes, Y_3.\lanes - - // x = rol(x, 8) - tbl X_0.16b, {X_0.16b}, ROTATE_TABLE.16b - tbl X_1.16b, {X_1.16b}, ROTATE_TABLE.16b - tbl X_2.16b, {X_2.16b}, ROTATE_TABLE.16b - tbl X_3.16b, {X_3.16b}, ROTATE_TABLE.16b -.endm - -.macro _next_xts_tweak next, cur, tmp, n -.if \n == 64 - /* - * Calculate the next tweak by multiplying the current one by x, - * modulo p(x) = x^128 + x^7 + x^2 + x + 1. - */ - sshr \tmp\().2d, \cur\().2d, #63 - and \tmp\().16b, \tmp\().16b, GFMUL_TABLE.16b - shl \next\().2d, \cur\().2d, #1 - ext \tmp\().16b, \tmp\().16b, \tmp\().16b, #8 - eor \next\().16b, \next\().16b, \tmp\().16b -.else - /* - * Calculate the next two tweaks by multiplying the current ones by x^2, - * modulo p(x) = x^64 + x^4 + x^3 + x + 1. - */ - ushr \tmp\().2d, \cur\().2d, #62 - shl \next\().2d, \cur\().2d, #2 - tbl \tmp\().16b, {GFMUL_TABLE.16b}, \tmp\().16b - eor \next\().16b, \next\().16b, \tmp\().16b -.endif -.endm - -/* - * _speck_xts_crypt() - Speck-XTS encryption/decryption - * - * Encrypt or decrypt NBYTES bytes of data from the SRC buffer to the DST buffer - * using Speck-XTS, specifically the variant with a block size of '2n' and round - * count given by NROUNDS. The expanded round keys are given in ROUND_KEYS, and - * the current XTS tweak value is given in TWEAK. It's assumed that NBYTES is a - * nonzero multiple of 128. - */ -.macro _speck_xts_crypt n, lanes, decrypting - - /* - * If decrypting, modify the ROUND_KEYS parameter to point to the last - * round key rather than the first, since for decryption the round keys - * are used in reverse order. - */ -.if \decrypting - mov NROUNDS, NROUNDS /* zero the high 32 bits */ -.if \n == 64 - add ROUND_KEYS, ROUND_KEYS, NROUNDS_X, lsl #3 - sub ROUND_KEYS, ROUND_KEYS, #8 -.else - add ROUND_KEYS, ROUND_KEYS, NROUNDS_X, lsl #2 - sub ROUND_KEYS, ROUND_KEYS, #4 -.endif -.endif - - // Load the index vector for tbl-based 8-bit rotates -.if \decrypting - ldr ROTATE_TABLE_Q, .Lrol\n\()_8_table -.else - ldr ROTATE_TABLE_Q, .Lror\n\()_8_table -.endif - - // One-time XTS preparation -.if \n == 64 - // Load first tweak - ld1 {TWEAKV0.16b}, [TWEAK] - - // Load GF(2^128) multiplication table - ldr GFMUL_TABLE_Q, .Lgf128mul_table -.else - // Load first tweak - ld1 {TWEAKV0.8b}, [TWEAK] - - // Load GF(2^64) multiplication table - ldr GFMUL_TABLE_Q, .Lgf64mul_table - - // Calculate second tweak, packing it together with the first - ushr TMP0.2d, TWEAKV0.2d, #63 - shl TMP1.2d, TWEAKV0.2d, #1 - tbl TMP0.8b, {GFMUL_TABLE.16b}, TMP0.8b - eor TMP0.8b, TMP0.8b, TMP1.8b - mov TWEAKV0.d[1], TMP0.d[0] -.endif - -.Lnext_128bytes_\@: - - // Calculate XTS tweaks for next 128 bytes - _next_xts_tweak TWEAKV1, TWEAKV0, TMP0, \n - _next_xts_tweak TWEAKV2, TWEAKV1, TMP0, \n - _next_xts_tweak TWEAKV3, TWEAKV2, TMP0, \n - _next_xts_tweak TWEAKV4, TWEAKV3, TMP0, \n - _next_xts_tweak TWEAKV5, TWEAKV4, TMP0, \n - _next_xts_tweak TWEAKV6, TWEAKV5, TMP0, \n - _next_xts_tweak TWEAKV7, TWEAKV6, TMP0, \n - _next_xts_tweak TWEAKV_NEXT, TWEAKV7, TMP0, \n - - // Load the next source blocks into {X,Y}[0-3] - ld1 {X_0.16b-Y_1.16b}, [SRC], #64 - ld1 {X_2.16b-Y_3.16b}, [SRC], #64 - - // XOR the source blocks with their XTS tweaks - eor TMP0.16b, X_0.16b, TWEAKV0.16b - eor Y_0.16b, Y_0.16b, TWEAKV1.16b - eor TMP1.16b, X_1.16b, TWEAKV2.16b - eor Y_1.16b, Y_1.16b, TWEAKV3.16b - eor TMP2.16b, X_2.16b, TWEAKV4.16b - eor Y_2.16b, Y_2.16b, TWEAKV5.16b - eor TMP3.16b, X_3.16b, TWEAKV6.16b - eor Y_3.16b, Y_3.16b, TWEAKV7.16b - - /* - * De-interleave the 'x' and 'y' elements of each block, i.e. make it so - * that the X[0-3] registers contain only the second halves of blocks, - * and the Y[0-3] registers contain only the first halves of blocks. - * (Speck uses the order (y, x) rather than the more intuitive (x, y).) - */ - uzp2 X_0.\lanes, TMP0.\lanes, Y_0.\lanes - uzp1 Y_0.\lanes, TMP0.\lanes, Y_0.\lanes - uzp2 X_1.\lanes, TMP1.\lanes, Y_1.\lanes - uzp1 Y_1.\lanes, TMP1.\lanes, Y_1.\lanes - uzp2 X_2.\lanes, TMP2.\lanes, Y_2.\lanes - uzp1 Y_2.\lanes, TMP2.\lanes, Y_2.\lanes - uzp2 X_3.\lanes, TMP3.\lanes, Y_3.\lanes - uzp1 Y_3.\lanes, TMP3.\lanes, Y_3.\lanes - - // Do the cipher rounds - mov x6, ROUND_KEYS - mov w7, NROUNDS -.Lnext_round_\@: -.if \decrypting - ld1r {ROUND_KEY.\lanes}, [x6] - sub x6, x6, #( \n / 8 ) - _speck_unround_128bytes \n, \lanes -.else - ld1r {ROUND_KEY.\lanes}, [x6], #( \n / 8 ) - _speck_round_128bytes \n, \lanes -.endif - subs w7, w7, #1 - bne .Lnext_round_\@ - - // Re-interleave the 'x' and 'y' elements of each block - zip1 TMP0.\lanes, Y_0.\lanes, X_0.\lanes - zip2 Y_0.\lanes, Y_0.\lanes, X_0.\lanes - zip1 TMP1.\lanes, Y_1.\lanes, X_1.\lanes - zip2 Y_1.\lanes, Y_1.\lanes, X_1.\lanes - zip1 TMP2.\lanes, Y_2.\lanes, X_2.\lanes - zip2 Y_2.\lanes, Y_2.\lanes, X_2.\lanes - zip1 TMP3.\lanes, Y_3.\lanes, X_3.\lanes - zip2 Y_3.\lanes, Y_3.\lanes, X_3.\lanes - - // XOR the encrypted/decrypted blocks with the tweaks calculated earlier - eor X_0.16b, TMP0.16b, TWEAKV0.16b - eor Y_0.16b, Y_0.16b, TWEAKV1.16b - eor X_1.16b, TMP1.16b, TWEAKV2.16b - eor Y_1.16b, Y_1.16b, TWEAKV3.16b - eor X_2.16b, TMP2.16b, TWEAKV4.16b - eor Y_2.16b, Y_2.16b, TWEAKV5.16b - eor X_3.16b, TMP3.16b, TWEAKV6.16b - eor Y_3.16b, Y_3.16b, TWEAKV7.16b - mov TWEAKV0.16b, TWEAKV_NEXT.16b - - // Store the ciphertext in the destination buffer - st1 {X_0.16b-Y_1.16b}, [DST], #64 - st1 {X_2.16b-Y_3.16b}, [DST], #64 - - // Continue if there are more 128-byte chunks remaining - subs NBYTES, NBYTES, #128 - bne .Lnext_128bytes_\@ - - // Store the next tweak and return -.if \n == 64 - st1 {TWEAKV_NEXT.16b}, [TWEAK] -.else - st1 {TWEAKV_NEXT.8b}, [TWEAK] -.endif - ret -.endm - -ENTRY(speck128_xts_encrypt_neon) - _speck_xts_crypt n=64, lanes=2d, decrypting=0 -ENDPROC(speck128_xts_encrypt_neon) - -ENTRY(speck128_xts_decrypt_neon) - _speck_xts_crypt n=64, lanes=2d, decrypting=1 -ENDPROC(speck128_xts_decrypt_neon) - -ENTRY(speck64_xts_encrypt_neon) - _speck_xts_crypt n=32, lanes=4s, decrypting=0 -ENDPROC(speck64_xts_encrypt_neon) - -ENTRY(speck64_xts_decrypt_neon) - _speck_xts_crypt n=32, lanes=4s, decrypting=1 -ENDPROC(speck64_xts_decrypt_neon) diff --git a/arch/arm64/crypto/speck-neon-glue.c b/arch/arm64/crypto/speck-neon-glue.c deleted file mode 100644 index 6e233aeb4ff4..000000000000 --- a/arch/arm64/crypto/speck-neon-glue.c +++ /dev/null @@ -1,282 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * NEON-accelerated implementation of Speck128-XTS and Speck64-XTS - * (64-bit version; based on the 32-bit version) - * - * Copyright (c) 2018 Google, Inc - */ - -#include <asm/hwcap.h> -#include <asm/neon.h> -#include <asm/simd.h> -#include <crypto/algapi.h> -#include <crypto/gf128mul.h> -#include <crypto/internal/skcipher.h> -#include <crypto/speck.h> -#include <crypto/xts.h> -#include <linux/kernel.h> -#include <linux/module.h> - -/* The assembly functions only handle multiples of 128 bytes */ -#define SPECK_NEON_CHUNK_SIZE 128 - -/* Speck128 */ - -struct speck128_xts_tfm_ctx { - struct speck128_tfm_ctx main_key; - struct speck128_tfm_ctx tweak_key; -}; - -asmlinkage void speck128_xts_encrypt_neon(const u64 *round_keys, int nrounds, - void *dst, const void *src, - unsigned int nbytes, void *tweak); - -asmlinkage void speck128_xts_decrypt_neon(const u64 *round_keys, int nrounds, - void *dst, const void *src, - unsigned int nbytes, void *tweak); - -typedef void (*speck128_crypt_one_t)(const struct speck128_tfm_ctx *, - u8 *, const u8 *); -typedef void (*speck128_xts_crypt_many_t)(const u64 *, int, void *, - const void *, unsigned int, void *); - -static __always_inline int -__speck128_xts_crypt(struct skcipher_request *req, - speck128_crypt_one_t crypt_one, - speck128_xts_crypt_many_t crypt_many) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - const struct speck128_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); - struct skcipher_walk walk; - le128 tweak; - int err; - - err = skcipher_walk_virt(&walk, req, true); - - crypto_speck128_encrypt(&ctx->tweak_key, (u8 *)&tweak, walk.iv); - - while (walk.nbytes > 0) { - unsigned int nbytes = walk.nbytes; - u8 *dst = walk.dst.virt.addr; - const u8 *src = walk.src.virt.addr; - - if (nbytes >= SPECK_NEON_CHUNK_SIZE && may_use_simd()) { - unsigned int count; - - count = round_down(nbytes, SPECK_NEON_CHUNK_SIZE); - kernel_neon_begin(); - (*crypt_many)(ctx->main_key.round_keys, - ctx->main_key.nrounds, - dst, src, count, &tweak); - kernel_neon_end(); - dst += count; - src += count; - nbytes -= count; - } - - /* Handle any remainder with generic code */ - while (nbytes >= sizeof(tweak)) { - le128_xor((le128 *)dst, (const le128 *)src, &tweak); - (*crypt_one)(&ctx->main_key, dst, dst); - le128_xor((le128 *)dst, (const le128 *)dst, &tweak); - gf128mul_x_ble(&tweak, &tweak); - - dst += sizeof(tweak); - src += sizeof(tweak); - nbytes -= sizeof(tweak); - } - err = skcipher_walk_done(&walk, nbytes); - } - - return err; -} - -static int speck128_xts_encrypt(struct skcipher_request *req) -{ - return __speck128_xts_crypt(req, crypto_speck128_encrypt, - speck128_xts_encrypt_neon); -} - -static int speck128_xts_decrypt(struct skcipher_request *req) -{ - return __speck128_xts_crypt(req, crypto_speck128_decrypt, - speck128_xts_decrypt_neon); -} - -static int speck128_xts_setkey(struct crypto_skcipher *tfm, const u8 *key, - unsigned int keylen) -{ - struct speck128_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); - int err; - - err = xts_verify_key(tfm, key, keylen); - if (err) - return err; - - keylen /= 2; - - err = crypto_speck128_setkey(&ctx->main_key, key, keylen); - if (err) - return err; - - return crypto_speck128_setkey(&ctx->tweak_key, key + keylen, keylen); -} - -/* Speck64 */ - -struct speck64_xts_tfm_ctx { - struct speck64_tfm_ctx main_key; - struct speck64_tfm_ctx tweak_key; -}; - -asmlinkage void speck64_xts_encrypt_neon(const u32 *round_keys, int nrounds, - void *dst, const void *src, - unsigned int nbytes, void *tweak); - -asmlinkage void speck64_xts_decrypt_neon(const u32 *round_keys, int nrounds, - void *dst, const void *src, - unsigned int nbytes, void *tweak); - -typedef void (*speck64_crypt_one_t)(const struct speck64_tfm_ctx *, - u8 *, const u8 *); -typedef void (*speck64_xts_crypt_many_t)(const u32 *, int, void *, - const void *, unsigned int, void *); - -static __always_inline int -__speck64_xts_crypt(struct skcipher_request *req, speck64_crypt_one_t crypt_one, - speck64_xts_crypt_many_t crypt_many) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - const struct speck64_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); - struct skcipher_walk walk; - __le64 tweak; - int err; - - err = skcipher_walk_virt(&walk, req, true); - - crypto_speck64_encrypt(&ctx->tweak_key, (u8 *)&tweak, walk.iv); - - while (walk.nbytes > 0) { - unsigned int nbytes = walk.nbytes; - u8 *dst = walk.dst.virt.addr; - const u8 *src = walk.src.virt.addr; - - if (nbytes >= SPECK_NEON_CHUNK_SIZE && may_use_simd()) { - unsigned int count; - - count = round_down(nbytes, SPECK_NEON_CHUNK_SIZE); - kernel_neon_begin(); - (*crypt_many)(ctx->main_key.round_keys, - ctx->main_key.nrounds, - dst, src, count, &tweak); - kernel_neon_end(); - dst += count; - src += count; - nbytes -= count; - } - - /* Handle any remainder with generic code */ - while (nbytes >= sizeof(tweak)) { - *(__le64 *)dst = *(__le64 *)src ^ tweak; - (*crypt_one)(&ctx->main_key, dst, dst); - *(__le64 *)dst ^= tweak; - tweak = cpu_to_le64((le64_to_cpu(tweak) << 1) ^ - ((tweak & cpu_to_le64(1ULL << 63)) ? - 0x1B : 0)); - dst += sizeof(tweak); - src += sizeof(tweak); - nbytes -= sizeof(tweak); - } - err = skcipher_walk_done(&walk, nbytes); - } - - return err; -} - -static int speck64_xts_encrypt(struct skcipher_request *req) -{ - return __speck64_xts_crypt(req, crypto_speck64_encrypt, - speck64_xts_encrypt_neon); -} - -static int speck64_xts_decrypt(struct skcipher_request *req) -{ - return __speck64_xts_crypt(req, crypto_speck64_decrypt, - speck64_xts_decrypt_neon); -} - -static int speck64_xts_setkey(struct crypto_skcipher *tfm, const u8 *key, - unsigned int keylen) -{ - struct speck64_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); - int err; - - err = xts_verify_key(tfm, key, keylen); - if (err) - return err; - - keylen /= 2; - - err = crypto_speck64_setkey(&ctx->main_key, key, keylen); - if (err) - return err; - - return crypto_speck64_setkey(&ctx->tweak_key, key + keylen, keylen); -} - -static struct skcipher_alg speck_algs[] = { - { - .base.cra_name = "xts(speck128)", - .base.cra_driver_name = "xts-speck128-neon", - .base.cra_priority = 300, - .base.cra_blocksize = SPECK128_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct speck128_xts_tfm_ctx), - .base.cra_alignmask = 7, - .base.cra_module = THIS_MODULE, - .min_keysize = 2 * SPECK128_128_KEY_SIZE, - .max_keysize = 2 * SPECK128_256_KEY_SIZE, - .ivsize = SPECK128_BLOCK_SIZE, - .walksize = SPECK_NEON_CHUNK_SIZE, - .setkey = speck128_xts_setkey, - .encrypt = speck128_xts_encrypt, - .decrypt = speck128_xts_decrypt, - }, { - .base.cra_name = "xts(speck64)", - .base.cra_driver_name = "xts-speck64-neon", - .base.cra_priority = 300, - .base.cra_blocksize = SPECK64_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct speck64_xts_tfm_ctx), - .base.cra_alignmask = 7, - .base.cra_module = THIS_MODULE, - .min_keysize = 2 * SPECK64_96_KEY_SIZE, - .max_keysize = 2 * SPECK64_128_KEY_SIZE, - .ivsize = SPECK64_BLOCK_SIZE, - .walksize = SPECK_NEON_CHUNK_SIZE, - .setkey = speck64_xts_setkey, - .encrypt = speck64_xts_encrypt, - .decrypt = speck64_xts_decrypt, - } -}; - -static int __init speck_neon_module_init(void) -{ - if (!(elf_hwcap & HWCAP_ASIMD)) - return -ENODEV; - return crypto_register_skciphers(speck_algs, ARRAY_SIZE(speck_algs)); -} - -static void __exit speck_neon_module_exit(void) -{ - crypto_unregister_skciphers(speck_algs, ARRAY_SIZE(speck_algs)); -} - -module_init(speck_neon_module_init); -module_exit(speck_neon_module_exit); - -MODULE_DESCRIPTION("Speck block cipher (NEON-accelerated)"); -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>"); -MODULE_ALIAS_CRYPTO("xts(speck128)"); -MODULE_ALIAS_CRYPTO("xts-speck128-neon"); -MODULE_ALIAS_CRYPTO("xts(speck64)"); -MODULE_ALIAS_CRYPTO("xts-speck64-neon"); diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h index cee28a05ee98..93ce86d5dae1 100644 --- a/arch/arm64/include/asm/compat.h +++ b/arch/arm64/include/asm/compat.h @@ -25,6 +25,8 @@ #include <linux/sched.h> #include <linux/sched/task_stack.h> +#include <asm-generic/compat.h> + #define COMPAT_USER_HZ 100 #ifdef __AARCH64EB__ #define COMPAT_UTS_MACHINE "armv8b\0\0" @@ -32,10 +34,6 @@ #define COMPAT_UTS_MACHINE "armv8l\0\0" #endif -typedef u32 compat_size_t; -typedef s32 compat_ssize_t; -typedef s32 compat_clock_t; -typedef s32 compat_pid_t; typedef u16 __compat_uid_t; typedef u16 __compat_gid_t; typedef u16 __compat_uid16_t; @@ -43,27 +41,13 @@ typedef u16 __compat_gid16_t; typedef u32 __compat_uid32_t; typedef u32 __compat_gid32_t; typedef u16 compat_mode_t; -typedef u32 compat_ino_t; typedef u32 compat_dev_t; -typedef s32 compat_off_t; -typedef s64 compat_loff_t; typedef s32 compat_nlink_t; typedef u16 compat_ipc_pid_t; -typedef s32 compat_daddr_t; typedef u32 compat_caddr_t; typedef __kernel_fsid_t compat_fsid_t; -typedef s32 compat_key_t; -typedef s32 compat_timer_t; - -typedef s16 compat_short_t; -typedef s32 compat_int_t; -typedef s32 compat_long_t; typedef s64 compat_s64; -typedef u16 compat_ushort_t; -typedef u32 compat_uint_t; -typedef u32 compat_ulong_t; typedef u64 compat_u64; -typedef u32 compat_uptr_t; struct compat_stat { #ifdef __AARCH64EB__ @@ -86,11 +70,11 @@ struct compat_stat { compat_off_t st_size; compat_off_t st_blksize; compat_off_t st_blocks; - compat_time_t st_atime; + old_time32_t st_atime; compat_ulong_t st_atime_nsec; - compat_time_t st_mtime; + old_time32_t st_mtime; compat_ulong_t st_mtime_nsec; - compat_time_t st_ctime; + old_time32_t st_ctime; compat_ulong_t st_ctime_nsec; compat_ulong_t __unused4[2]; }; diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 6db48d90ad63..7e2ec64aa414 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -537,6 +537,27 @@ static inline void arm64_set_ssbd_mitigation(bool state) {} #endif extern int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt); + +static inline u32 id_aa64mmfr0_parange_to_phys_shift(int parange) +{ + switch (parange) { + case 0: return 32; + case 1: return 36; + case 2: return 40; + case 3: return 42; + case 4: return 44; + case 5: return 48; + case 6: return 52; + /* + * A future PE could use a value unknown to the kernel. + * However, by the "D10.1.4 Principles of the ID scheme + * for fields in ID registers", ARM DDI 0487C.a, any new + * value is guaranteed to be higher than what we know already. + * As a safe limit, we return the limit supported by the kernel. + */ + default: return CONFIG_ARM64_PA_BITS; + } +} #endif /* __ASSEMBLY__ */ #endif diff --git a/arch/arm64/include/asm/device.h b/arch/arm64/include/asm/device.h index 5a5fa47a6b18..3dd3d664c5c5 100644 --- a/arch/arm64/include/asm/device.h +++ b/arch/arm64/include/asm/device.h @@ -23,7 +23,6 @@ struct dev_archdata { #ifdef CONFIG_XEN const struct dma_map_ops *dev_dma_ops; #endif - bool dma_coherent; }; struct pdev_archdata { diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h index b7847eb8a7bb..c41f3fb1446c 100644 --- a/arch/arm64/include/asm/dma-mapping.h +++ b/arch/arm64/include/asm/dma-mapping.h @@ -44,10 +44,13 @@ void arch_teardown_dma_ops(struct device *dev); #define arch_teardown_dma_ops arch_teardown_dma_ops #endif -/* do not use this function in a driver */ +/* + * Do not use this function in a driver, it is only provided for + * arch/arm/mm/xen.c, which is used by arm64 as well. + */ static inline bool is_device_dma_coherent(struct device *dev) { - return dev->archdata.dma_coherent; + return dev->dma_coherent; } #endif /* __KERNEL__ */ diff --git a/arch/arm64/include/asm/hugetlb.h b/arch/arm64/include/asm/hugetlb.h index e73f68569624..fb6609875455 100644 --- a/arch/arm64/include/asm/hugetlb.h +++ b/arch/arm64/include/asm/hugetlb.h @@ -20,48 +20,18 @@ #include <asm/page.h> +#define __HAVE_ARCH_HUGE_PTEP_GET static inline pte_t huge_ptep_get(pte_t *ptep) { return READ_ONCE(*ptep); } - - -static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb, - unsigned long addr, unsigned long end, - unsigned long floor, - unsigned long ceiling) -{ - free_pgd_range(tlb, addr, end, floor, ceiling); -} - static inline int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr, unsigned long len) { return 0; } -static inline int prepare_hugepage_range(struct file *file, - unsigned long addr, unsigned long len) -{ - struct hstate *h = hstate_file(file); - if (len & ~huge_page_mask(h)) - return -EINVAL; - if (addr & ~huge_page_mask(h)) - return -EINVAL; - return 0; -} - -static inline int huge_pte_none(pte_t pte) -{ - return pte_none(pte); -} - -static inline pte_t huge_pte_wrprotect(pte_t pte) -{ - return pte_wrprotect(pte); -} - static inline void arch_clear_hugepage_flags(struct page *page) { clear_bit(PG_dcache_clean, &page->flags); @@ -70,20 +40,25 @@ static inline void arch_clear_hugepage_flags(struct page *page) extern pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma, struct page *page, int writable); #define arch_make_huge_pte arch_make_huge_pte +#define __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT extern void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte); +#define __HAVE_ARCH_HUGE_PTEP_SET_ACCESS_FLAGS extern int huge_ptep_set_access_flags(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, pte_t pte, int dirty); +#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR extern pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep); +#define __HAVE_ARCH_HUGE_PTEP_SET_WRPROTECT extern void huge_ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep); +#define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH extern void huge_ptep_clear_flush(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep); +#define __HAVE_ARCH_HUGE_PTE_CLEAR extern void huge_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep, unsigned long sz); -#define huge_pte_clear huge_pte_clear extern void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte, unsigned long sz); #define set_huge_swap_pte_at set_huge_swap_pte_at diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index b476bc46f0ab..6f602af5263c 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -107,6 +107,7 @@ #define VTCR_EL2_RES1 (1 << 31) #define VTCR_EL2_HD (1 << 22) #define VTCR_EL2_HA (1 << 21) +#define VTCR_EL2_PS_SHIFT TCR_EL2_PS_SHIFT #define VTCR_EL2_PS_MASK TCR_EL2_PS_MASK #define VTCR_EL2_TG0_MASK TCR_TG0_MASK #define VTCR_EL2_TG0_4K TCR_TG0_4K @@ -120,63 +121,150 @@ #define VTCR_EL2_IRGN0_WBWA TCR_IRGN0_WBWA #define VTCR_EL2_SL0_SHIFT 6 #define VTCR_EL2_SL0_MASK (3 << VTCR_EL2_SL0_SHIFT) -#define VTCR_EL2_SL0_LVL1 (1 << VTCR_EL2_SL0_SHIFT) #define VTCR_EL2_T0SZ_MASK 0x3f -#define VTCR_EL2_T0SZ_40B 24 #define VTCR_EL2_VS_SHIFT 19 #define VTCR_EL2_VS_8BIT (0 << VTCR_EL2_VS_SHIFT) #define VTCR_EL2_VS_16BIT (1 << VTCR_EL2_VS_SHIFT) +#define VTCR_EL2_T0SZ(x) TCR_T0SZ(x) + /* * We configure the Stage-2 page tables to always restrict the IPA space to be * 40 bits wide (T0SZ = 24). Systems with a PARange smaller than 40 bits are * not known to exist and will break with this configuration. * - * VTCR_EL2.PS is extracted from ID_AA64MMFR0_EL1.PARange at boot time - * (see hyp-init.S). + * The VTCR_EL2 is configured per VM and is initialised in kvm_arm_setup_stage2(). * * Note that when using 4K pages, we concatenate two first level page tables * together. With 16K pages, we concatenate 16 first level page tables. * - * The magic numbers used for VTTBR_X in this patch can be found in Tables - * D4-23 and D4-25 in ARM DDI 0487A.b. */ -#define VTCR_EL2_T0SZ_IPA VTCR_EL2_T0SZ_40B #define VTCR_EL2_COMMON_BITS (VTCR_EL2_SH0_INNER | VTCR_EL2_ORGN0_WBWA | \ VTCR_EL2_IRGN0_WBWA | VTCR_EL2_RES1) -#ifdef CONFIG_ARM64_64K_PAGES /* - * Stage2 translation configuration: - * 64kB pages (TG0 = 1) - * 2 level page tables (SL = 1) + * VTCR_EL2:SL0 indicates the entry level for Stage2 translation. + * Interestingly, it depends on the page size. + * See D.10.2.121, VTCR_EL2, in ARM DDI 0487C.a + * + * ----------------------------------------- + * | Entry level | 4K | 16K/64K | + * ------------------------------------------ + * | Level: 0 | 2 | - | + * ------------------------------------------ + * | Level: 1 | 1 | 2 | + * ------------------------------------------ + * | Level: 2 | 0 | 1 | + * ------------------------------------------ + * | Level: 3 | - | 0 | + * ------------------------------------------ + * + * The table roughly translates to : + * + * SL0(PAGE_SIZE, Entry_level) = TGRAN_SL0_BASE - Entry_Level + * + * Where TGRAN_SL0_BASE is a magic number depending on the page size: + * TGRAN_SL0_BASE(4K) = 2 + * TGRAN_SL0_BASE(16K) = 3 + * TGRAN_SL0_BASE(64K) = 3 + * provided we take care of ruling out the unsupported cases and + * Entry_Level = 4 - Number_of_levels. + * */ -#define VTCR_EL2_TGRAN_FLAGS (VTCR_EL2_TG0_64K | VTCR_EL2_SL0_LVL1) -#define VTTBR_X_TGRAN_MAGIC 38 +#ifdef CONFIG_ARM64_64K_PAGES + +#define VTCR_EL2_TGRAN VTCR_EL2_TG0_64K +#define VTCR_EL2_TGRAN_SL0_BASE 3UL + #elif defined(CONFIG_ARM64_16K_PAGES) -/* - * Stage2 translation configuration: - * 16kB pages (TG0 = 2) - * 2 level page tables (SL = 1) - */ -#define VTCR_EL2_TGRAN_FLAGS (VTCR_EL2_TG0_16K | VTCR_EL2_SL0_LVL1) -#define VTTBR_X_TGRAN_MAGIC 42 + +#define VTCR_EL2_TGRAN VTCR_EL2_TG0_16K +#define VTCR_EL2_TGRAN_SL0_BASE 3UL + #else /* 4K */ -/* - * Stage2 translation configuration: - * 4kB pages (TG0 = 0) - * 3 level page tables (SL = 1) - */ -#define VTCR_EL2_TGRAN_FLAGS (VTCR_EL2_TG0_4K | VTCR_EL2_SL0_LVL1) -#define VTTBR_X_TGRAN_MAGIC 37 + +#define VTCR_EL2_TGRAN VTCR_EL2_TG0_4K +#define VTCR_EL2_TGRAN_SL0_BASE 2UL + #endif -#define VTCR_EL2_FLAGS (VTCR_EL2_COMMON_BITS | VTCR_EL2_TGRAN_FLAGS) -#define VTTBR_X (VTTBR_X_TGRAN_MAGIC - VTCR_EL2_T0SZ_IPA) +#define VTCR_EL2_LVLS_TO_SL0(levels) \ + ((VTCR_EL2_TGRAN_SL0_BASE - (4 - (levels))) << VTCR_EL2_SL0_SHIFT) +#define VTCR_EL2_SL0_TO_LVLS(sl0) \ + ((sl0) + 4 - VTCR_EL2_TGRAN_SL0_BASE) +#define VTCR_EL2_LVLS(vtcr) \ + VTCR_EL2_SL0_TO_LVLS(((vtcr) & VTCR_EL2_SL0_MASK) >> VTCR_EL2_SL0_SHIFT) + +#define VTCR_EL2_FLAGS (VTCR_EL2_COMMON_BITS | VTCR_EL2_TGRAN) +#define VTCR_EL2_IPA(vtcr) (64 - ((vtcr) & VTCR_EL2_T0SZ_MASK)) + +/* + * ARM VMSAv8-64 defines an algorithm for finding the translation table + * descriptors in section D4.2.8 in ARM DDI 0487C.a. + * + * The algorithm defines the expectations on the translation table + * addresses for each level, based on PAGE_SIZE, entry level + * and the translation table size (T0SZ). The variable "x" in the + * algorithm determines the alignment of a table base address at a given + * level and thus determines the alignment of VTTBR:BADDR for stage2 + * page table entry level. + * Since the number of bits resolved at the entry level could vary + * depending on the T0SZ, the value of "x" is defined based on a + * Magic constant for a given PAGE_SIZE and Entry Level. The + * intermediate levels must be always aligned to the PAGE_SIZE (i.e, + * x = PAGE_SHIFT). + * + * The value of "x" for entry level is calculated as : + * x = Magic_N - T0SZ + * + * where Magic_N is an integer depending on the page size and the entry + * level of the page table as below: + * + * -------------------------------------------- + * | Entry level | 4K 16K 64K | + * -------------------------------------------- + * | Level: 0 (4 levels) | 28 | - | - | + * -------------------------------------------- + * | Level: 1 (3 levels) | 37 | 31 | 25 | + * -------------------------------------------- + * | Level: 2 (2 levels) | 46 | 42 | 38 | + * -------------------------------------------- + * | Level: 3 (1 level) | - | 53 | 51 | + * -------------------------------------------- + * + * We have a magic formula for the Magic_N below: + * + * Magic_N(PAGE_SIZE, Level) = 64 - ((PAGE_SHIFT - 3) * Number_of_levels) + * + * where Number_of_levels = (4 - Level). We are only interested in the + * value for Entry_Level for the stage2 page table. + * + * So, given that T0SZ = (64 - IPA_SHIFT), we can compute 'x' as follows: + * + * x = (64 - ((PAGE_SHIFT - 3) * Number_of_levels)) - (64 - IPA_SHIFT) + * = IPA_SHIFT - ((PAGE_SHIFT - 3) * Number of levels) + * + * Here is one way to explain the Magic Formula: + * + * x = log2(Size_of_Entry_Level_Table) + * + * Since, we can resolve (PAGE_SHIFT - 3) bits at each level, and another + * PAGE_SHIFT bits in the PTE, we have : + * + * Bits_Entry_level = IPA_SHIFT - ((PAGE_SHIFT - 3) * (n - 1) + PAGE_SHIFT) + * = IPA_SHIFT - (PAGE_SHIFT - 3) * n - 3 + * where n = number of levels, and since each pointer is 8bytes, we have: + * + * x = Bits_Entry_Level + 3 + * = IPA_SHIFT - (PAGE_SHIFT - 3) * n + * + * The only constraint here is that, we have to find the number of page table + * levels for a given IPA size (which we do, see stage2_pt_levels()) + */ +#define ARM64_VTTBR_X(ipa, levels) ((ipa) - ((levels) * (PAGE_SHIFT - 3))) #define VTTBR_CNP_BIT (UL(1)) -#define VTTBR_BADDR_MASK (((UL(1) << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_X) #define VTTBR_VMID_SHIFT (UL(48)) #define VTTBR_VMID_MASK(size) (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT) @@ -224,6 +312,13 @@ /* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */ #define HPFAR_MASK (~UL(0xf)) +/* + * We have + * PAR [PA_Shift - 1 : 12] = PA [PA_Shift - 1 : 12] + * HPFAR [PA_Shift - 9 : 4] = FIPA [PA_Shift - 1 : 12] + */ +#define PAR_TO_HPFAR(par) \ + (((par) & GENMASK_ULL(PHYS_MASK_SHIFT - 1, 12)) >> 8) #define kvm_arm_exception_type \ {0, "IRQ" }, \ diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index 102b5a5c47b6..aea01a09eb94 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -30,6 +30,7 @@ #define ARM_EXCEPTION_IRQ 0 #define ARM_EXCEPTION_EL1_SERROR 1 #define ARM_EXCEPTION_TRAP 2 +#define ARM_EXCEPTION_IL 3 /* The hyp-stub will return this for any kvm_call_hyp() call */ #define ARM_EXCEPTION_HYP_GONE HVC_STUB_ERR @@ -72,8 +73,6 @@ extern void __vgic_v3_init_lrs(void); extern u32 __kvm_get_mdcr_el2(void); -extern u32 __init_stage2_translation(void); - /* Home-grown __this_cpu_{ptr,read} variants that always work at HYP */ #define __hyp_this_cpu_ptr(sym) \ ({ \ diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 2842bf149029..52fbc823ff8c 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -53,7 +53,7 @@ DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use); int __attribute_const__ kvm_target_cpu(void); int kvm_reset_vcpu(struct kvm_vcpu *vcpu); -int kvm_arch_dev_ioctl_check_extension(struct kvm *kvm, long ext); +int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext); void __extended_idmap_trampoline(phys_addr_t boot_pgd, phys_addr_t idmap_start); struct kvm_arch { @@ -61,11 +61,13 @@ struct kvm_arch { u64 vmid_gen; u32 vmid; - /* 1-level 2nd stage table, protected by kvm->mmu_lock */ + /* stage2 entry level table */ pgd_t *pgd; /* VTTBR value associated with above pgd and vmid */ u64 vttbr; + /* VTCR_EL2 value for this VM */ + u64 vtcr; /* The last vcpu id that ran on each physical CPU */ int __percpu *last_vcpu_ran; @@ -451,13 +453,7 @@ int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu, int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr); -static inline void __cpu_init_stage2(void) -{ - u32 parange = kvm_call_hyp(__init_stage2_translation); - - WARN_ONCE(parange < 40, - "PARange is %d bits, unsupported configuration!", parange); -} +static inline void __cpu_init_stage2(void) {} /* Guest/host FPSIMD coordination helpers */ int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu); @@ -520,8 +516,12 @@ static inline int kvm_arm_have_ssbd(void) void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu); void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu); +void kvm_set_ipa_limit(void); + #define __KVM_HAVE_ARCH_VM_ALLOC struct kvm *kvm_arch_alloc_vm(void); void kvm_arch_free_vm(struct kvm *kvm); +int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type); + #endif /* __ARM64_KVM_HOST_H__ */ diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h index 384c34397619..23aca66767f9 100644 --- a/arch/arm64/include/asm/kvm_hyp.h +++ b/arch/arm64/include/asm/kvm_hyp.h @@ -155,5 +155,15 @@ void deactivate_traps_vhe_put(void); u64 __guest_enter(struct kvm_vcpu *vcpu, struct kvm_cpu_context *host_ctxt); void __noreturn __hyp_do_panic(unsigned long, ...); +/* + * Must be called from hyp code running at EL2 with an updated VTTBR + * and interrupts disabled. + */ +static __always_inline void __hyp_text __load_guest_stage2(struct kvm *kvm) +{ + write_sysreg(kvm->arch.vtcr, vtcr_el2); + write_sysreg(kvm->arch.vttbr, vttbr_el2); +} + #endif /* __ARM64_KVM_HYP_H__ */ diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 64337afbf124..658657367f2f 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -141,8 +141,16 @@ static inline unsigned long __kern_hyp_va(unsigned long v) * We currently only support a 40bit IPA. */ #define KVM_PHYS_SHIFT (40) -#define KVM_PHYS_SIZE (1UL << KVM_PHYS_SHIFT) -#define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1UL) + +#define kvm_phys_shift(kvm) VTCR_EL2_IPA(kvm->arch.vtcr) +#define kvm_phys_size(kvm) (_AC(1, ULL) << kvm_phys_shift(kvm)) +#define kvm_phys_mask(kvm) (kvm_phys_size(kvm) - _AC(1, ULL)) + +static inline bool kvm_page_empty(void *ptr) +{ + struct page *ptr_page = virt_to_page(ptr); + return page_count(ptr_page) == 1; +} #include <asm/stage2_pgtable.h> @@ -238,12 +246,6 @@ static inline bool kvm_s2pmd_exec(pmd_t *pmdp) return !(READ_ONCE(pmd_val(*pmdp)) & PMD_S2_XN); } -static inline bool kvm_page_empty(void *ptr) -{ - struct page *ptr_page = virt_to_page(ptr); - return page_count(ptr_page) == 1; -} - #define hyp_pte_table_empty(ptep) kvm_page_empty(ptep) #ifdef __PAGETABLE_PMD_FOLDED @@ -517,6 +519,30 @@ static inline int hyp_map_aux_data(void) #define kvm_phys_to_vttbr(addr) phys_to_ttbr(addr) +/* + * Get the magic number 'x' for VTTBR:BADDR of this KVM instance. + * With v8.2 LVA extensions, 'x' should be a minimum of 6 with + * 52bit IPS. + */ +static inline int arm64_vttbr_x(u32 ipa_shift, u32 levels) +{ + int x = ARM64_VTTBR_X(ipa_shift, levels); + + return (IS_ENABLED(CONFIG_ARM64_PA_BITS_52) && x < 6) ? 6 : x; +} + +static inline u64 vttbr_baddr_mask(u32 ipa_shift, u32 levels) +{ + unsigned int x = arm64_vttbr_x(ipa_shift, levels); + + return GENMASK_ULL(PHYS_MASK_SHIFT - 1, x); +} + +static inline u64 kvm_vttbr_baddr_mask(struct kvm *kvm) +{ + return vttbr_baddr_mask(kvm_phys_shift(kvm), kvm_stage2_levels(kvm)); +} + static inline bool kvm_cpu_has_cnp(void) { return system_supports_cnp(); diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h index 6bc43889d11e..fce22c4b2f73 100644 --- a/arch/arm64/include/asm/ptrace.h +++ b/arch/arm64/include/asm/ptrace.h @@ -25,6 +25,9 @@ #define CurrentEL_EL1 (1 << 2) #define CurrentEL_EL2 (2 << 2) +/* Additional SPSR bits not exposed in the UABI */ +#define PSR_IL_BIT (1 << 20) + /* AArch32-specific ptrace requests */ #define COMPAT_PTRACE_GETREGS 12 #define COMPAT_PTRACE_SETREGS 13 diff --git a/arch/arm64/include/asm/stage2_pgtable-nopmd.h b/arch/arm64/include/asm/stage2_pgtable-nopmd.h deleted file mode 100644 index 2656a0fd05a6..000000000000 --- a/arch/arm64/include/asm/stage2_pgtable-nopmd.h +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright (C) 2016 - ARM Ltd - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. - */ - -#ifndef __ARM64_S2_PGTABLE_NOPMD_H_ -#define __ARM64_S2_PGTABLE_NOPMD_H_ - -#include <asm/stage2_pgtable-nopud.h> - -#define __S2_PGTABLE_PMD_FOLDED - -#define S2_PMD_SHIFT S2_PUD_SHIFT -#define S2_PTRS_PER_PMD 1 -#define S2_PMD_SIZE (1UL << S2_PMD_SHIFT) -#define S2_PMD_MASK (~(S2_PMD_SIZE-1)) - -#define stage2_pud_none(pud) (0) -#define stage2_pud_present(pud) (1) -#define stage2_pud_clear(pud) do { } while (0) -#define stage2_pud_populate(pud, pmd) do { } while (0) -#define stage2_pmd_offset(pud, address) ((pmd_t *)(pud)) - -#define stage2_pmd_free(pmd) do { } while (0) - -#define stage2_pmd_addr_end(addr, end) (end) - -#define stage2_pud_huge(pud) (0) -#define stage2_pmd_table_empty(pmdp) (0) - -#endif diff --git a/arch/arm64/include/asm/stage2_pgtable-nopud.h b/arch/arm64/include/asm/stage2_pgtable-nopud.h deleted file mode 100644 index 5ee87b54ebf3..000000000000 --- a/arch/arm64/include/asm/stage2_pgtable-nopud.h +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright (C) 2016 - ARM Ltd - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. - */ - -#ifndef __ARM64_S2_PGTABLE_NOPUD_H_ -#define __ARM64_S2_PGTABLE_NOPUD_H_ - -#define __S2_PGTABLE_PUD_FOLDED - -#define S2_PUD_SHIFT S2_PGDIR_SHIFT -#define S2_PTRS_PER_PUD 1 -#define S2_PUD_SIZE (_AC(1, UL) << S2_PUD_SHIFT) -#define S2_PUD_MASK (~(S2_PUD_SIZE-1)) - -#define stage2_pgd_none(pgd) (0) -#define stage2_pgd_present(pgd) (1) -#define stage2_pgd_clear(pgd) do { } while (0) -#define stage2_pgd_populate(pgd, pud) do { } while (0) - -#define stage2_pud_offset(pgd, address) ((pud_t *)(pgd)) - -#define stage2_pud_free(x) do { } while (0) - -#define stage2_pud_addr_end(addr, end) (end) -#define stage2_pud_table_empty(pmdp) (0) - -#endif diff --git a/arch/arm64/include/asm/stage2_pgtable.h b/arch/arm64/include/asm/stage2_pgtable.h index 8b68099348e5..d352f6df8d2c 100644 --- a/arch/arm64/include/asm/stage2_pgtable.h +++ b/arch/arm64/include/asm/stage2_pgtable.h @@ -19,9 +19,17 @@ #ifndef __ARM64_S2_PGTABLE_H_ #define __ARM64_S2_PGTABLE_H_ +#include <linux/hugetlb.h> #include <asm/pgtable.h> /* + * PGDIR_SHIFT determines the size a top-level page table entry can map + * and depends on the number of levels in the page table. Compute the + * PGDIR_SHIFT for a given number of levels. + */ +#define pt_levels_pgdir_shift(lvls) ARM64_HW_PGTABLE_LEVEL_SHIFT(4 - (lvls)) + +/* * The hardware supports concatenation of up to 16 tables at stage2 entry level * and we use the feature whenever possible. * @@ -29,112 +37,208 @@ * On arm64, the smallest PAGE_SIZE supported is 4k, which means * (PAGE_SHIFT - 3) > 4 holds for all page sizes. * This implies, the total number of page table levels at stage2 expected - * by the hardware is actually the number of levels required for (KVM_PHYS_SHIFT - 4) + * by the hardware is actually the number of levels required for (IPA_SHIFT - 4) * in normal translations(e.g, stage1), since we cannot have another level in - * the range (KVM_PHYS_SHIFT, KVM_PHYS_SHIFT - 4). + * the range (IPA_SHIFT, IPA_SHIFT - 4). */ -#define STAGE2_PGTABLE_LEVELS ARM64_HW_PGTABLE_LEVELS(KVM_PHYS_SHIFT - 4) +#define stage2_pgtable_levels(ipa) ARM64_HW_PGTABLE_LEVELS((ipa) - 4) +#define kvm_stage2_levels(kvm) VTCR_EL2_LVLS(kvm->arch.vtcr) -/* - * With all the supported VA_BITs and 40bit guest IPA, the following condition - * is always true: - * - * STAGE2_PGTABLE_LEVELS <= CONFIG_PGTABLE_LEVELS - * - * We base our stage-2 page table walker helpers on this assumption and - * fall back to using the host version of the helper wherever possible. - * i.e, if a particular level is not folded (e.g, PUD) at stage2, we fall back - * to using the host version, since it is guaranteed it is not folded at host. - * - * If the condition breaks in the future, we can rearrange the host level - * definitions and reuse them for stage2. Till then... - */ -#if STAGE2_PGTABLE_LEVELS > CONFIG_PGTABLE_LEVELS -#error "Unsupported combination of guest IPA and host VA_BITS." -#endif - -/* S2_PGDIR_SHIFT is the size mapped by top-level stage2 entry */ -#define S2_PGDIR_SHIFT ARM64_HW_PGTABLE_LEVEL_SHIFT(4 - STAGE2_PGTABLE_LEVELS) -#define S2_PGDIR_SIZE (_AC(1, UL) << S2_PGDIR_SHIFT) -#define S2_PGDIR_MASK (~(S2_PGDIR_SIZE - 1)) +/* stage2_pgdir_shift() is the size mapped by top-level stage2 entry for the VM */ +#define stage2_pgdir_shift(kvm) pt_levels_pgdir_shift(kvm_stage2_levels(kvm)) +#define stage2_pgdir_size(kvm) (1ULL << stage2_pgdir_shift(kvm)) +#define stage2_pgdir_mask(kvm) ~(stage2_pgdir_size(kvm) - 1) /* * The number of PTRS across all concatenated stage2 tables given by the * number of bits resolved at the initial level. + * If we force more levels than necessary, we may have (stage2_pgdir_shift > IPA), + * in which case, stage2_pgd_ptrs will have one entry. */ -#define PTRS_PER_S2_PGD (1 << (KVM_PHYS_SHIFT - S2_PGDIR_SHIFT)) +#define pgd_ptrs_shift(ipa, pgdir_shift) \ + ((ipa) > (pgdir_shift) ? ((ipa) - (pgdir_shift)) : 0) +#define __s2_pgd_ptrs(ipa, lvls) \ + (1 << (pgd_ptrs_shift((ipa), pt_levels_pgdir_shift(lvls)))) +#define __s2_pgd_size(ipa, lvls) (__s2_pgd_ptrs((ipa), (lvls)) * sizeof(pgd_t)) + +#define stage2_pgd_ptrs(kvm) __s2_pgd_ptrs(kvm_phys_shift(kvm), kvm_stage2_levels(kvm)) +#define stage2_pgd_size(kvm) __s2_pgd_size(kvm_phys_shift(kvm), kvm_stage2_levels(kvm)) /* - * KVM_MMU_CACHE_MIN_PAGES is the number of stage2 page table translation - * levels in addition to the PGD. + * kvm_mmmu_cache_min_pages() is the number of pages required to install + * a stage-2 translation. We pre-allocate the entry level page table at + * the VM creation. */ -#define KVM_MMU_CACHE_MIN_PAGES (STAGE2_PGTABLE_LEVELS - 1) +#define kvm_mmu_cache_min_pages(kvm) (kvm_stage2_levels(kvm) - 1) - -#if STAGE2_PGTABLE_LEVELS > 3 +/* Stage2 PUD definitions when the level is present */ +static inline bool kvm_stage2_has_pud(struct kvm *kvm) +{ + return (CONFIG_PGTABLE_LEVELS > 3) && (kvm_stage2_levels(kvm) > 3); +} #define S2_PUD_SHIFT ARM64_HW_PGTABLE_LEVEL_SHIFT(1) -#define S2_PUD_SIZE (_AC(1, UL) << S2_PUD_SHIFT) +#define S2_PUD_SIZE (1UL << S2_PUD_SHIFT) #define S2_PUD_MASK (~(S2_PUD_SIZE - 1)) -#define stage2_pgd_none(pgd) pgd_none(pgd) -#define stage2_pgd_clear(pgd) pgd_clear(pgd) -#define stage2_pgd_present(pgd) pgd_present(pgd) -#define stage2_pgd_populate(pgd, pud) pgd_populate(NULL, pgd, pud) -#define stage2_pud_offset(pgd, address) pud_offset(pgd, address) -#define stage2_pud_free(pud) pud_free(NULL, pud) +static inline bool stage2_pgd_none(struct kvm *kvm, pgd_t pgd) +{ + if (kvm_stage2_has_pud(kvm)) + return pgd_none(pgd); + else + return 0; +} -#define stage2_pud_table_empty(pudp) kvm_page_empty(pudp) +static inline void stage2_pgd_clear(struct kvm *kvm, pgd_t *pgdp) +{ + if (kvm_stage2_has_pud(kvm)) + pgd_clear(pgdp); +} -static inline phys_addr_t stage2_pud_addr_end(phys_addr_t addr, phys_addr_t end) +static inline bool stage2_pgd_present(struct kvm *kvm, pgd_t pgd) { - phys_addr_t boundary = (addr + S2_PUD_SIZE) & S2_PUD_MASK; + if (kvm_stage2_has_pud(kvm)) + return pgd_present(pgd); + else + return 1; +} - return (boundary - 1 < end - 1) ? boundary : end; +static inline void stage2_pgd_populate(struct kvm *kvm, pgd_t *pgd, pud_t *pud) +{ + if (kvm_stage2_has_pud(kvm)) + pgd_populate(NULL, pgd, pud); +} + +static inline pud_t *stage2_pud_offset(struct kvm *kvm, + pgd_t *pgd, unsigned long address) +{ + if (kvm_stage2_has_pud(kvm)) + return pud_offset(pgd, address); + else + return (pud_t *)pgd; } -#endif /* STAGE2_PGTABLE_LEVELS > 3 */ +static inline void stage2_pud_free(struct kvm *kvm, pud_t *pud) +{ + if (kvm_stage2_has_pud(kvm)) + pud_free(NULL, pud); +} +static inline bool stage2_pud_table_empty(struct kvm *kvm, pud_t *pudp) +{ + if (kvm_stage2_has_pud(kvm)) + return kvm_page_empty(pudp); + else + return false; +} -#if STAGE2_PGTABLE_LEVELS > 2 +static inline phys_addr_t +stage2_pud_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end) +{ + if (kvm_stage2_has_pud(kvm)) { + phys_addr_t boundary = (addr + S2_PUD_SIZE) & S2_PUD_MASK; + + return (boundary - 1 < end - 1) ? boundary : end; + } else { + return end; + } +} + +/* Stage2 PMD definitions when the level is present */ +static inline bool kvm_stage2_has_pmd(struct kvm *kvm) +{ + return (CONFIG_PGTABLE_LEVELS > 2) && (kvm_stage2_levels(kvm) > 2); +} #define S2_PMD_SHIFT ARM64_HW_PGTABLE_LEVEL_SHIFT(2) -#define S2_PMD_SIZE (_AC(1, UL) << S2_PMD_SHIFT) +#define S2_PMD_SIZE (1UL << S2_PMD_SHIFT) #define S2_PMD_MASK (~(S2_PMD_SIZE - 1)) -#define stage2_pud_none(pud) pud_none(pud) -#define stage2_pud_clear(pud) pud_clear(pud) -#define stage2_pud_present(pud) pud_present(pud) -#define stage2_pud_populate(pud, pmd) pud_populate(NULL, pud, pmd) -#define stage2_pmd_offset(pud, address) pmd_offset(pud, address) -#define stage2_pmd_free(pmd) pmd_free(NULL, pmd) +static inline bool stage2_pud_none(struct kvm *kvm, pud_t pud) +{ + if (kvm_stage2_has_pmd(kvm)) + return pud_none(pud); + else + return 0; +} + +static inline void stage2_pud_clear(struct kvm *kvm, pud_t *pud) +{ + if (kvm_stage2_has_pmd(kvm)) + pud_clear(pud); +} -#define stage2_pud_huge(pud) pud_huge(pud) -#define stage2_pmd_table_empty(pmdp) kvm_page_empty(pmdp) +static inline bool stage2_pud_present(struct kvm *kvm, pud_t pud) +{ + if (kvm_stage2_has_pmd(kvm)) + return pud_present(pud); + else + return 1; +} -static inline phys_addr_t stage2_pmd_addr_end(phys_addr_t addr, phys_addr_t end) +static inline void stage2_pud_populate(struct kvm *kvm, pud_t *pud, pmd_t *pmd) { - phys_addr_t boundary = (addr + S2_PMD_SIZE) & S2_PMD_MASK; + if (kvm_stage2_has_pmd(kvm)) + pud_populate(NULL, pud, pmd); +} - return (boundary - 1 < end - 1) ? boundary : end; +static inline pmd_t *stage2_pmd_offset(struct kvm *kvm, + pud_t *pud, unsigned long address) +{ + if (kvm_stage2_has_pmd(kvm)) + return pmd_offset(pud, address); + else + return (pmd_t *)pud; } -#endif /* STAGE2_PGTABLE_LEVELS > 2 */ +static inline void stage2_pmd_free(struct kvm *kvm, pmd_t *pmd) +{ + if (kvm_stage2_has_pmd(kvm)) + pmd_free(NULL, pmd); +} + +static inline bool stage2_pud_huge(struct kvm *kvm, pud_t pud) +{ + if (kvm_stage2_has_pmd(kvm)) + return pud_huge(pud); + else + return 0; +} + +static inline bool stage2_pmd_table_empty(struct kvm *kvm, pmd_t *pmdp) +{ + if (kvm_stage2_has_pmd(kvm)) + return kvm_page_empty(pmdp); + else + return 0; +} -#define stage2_pte_table_empty(ptep) kvm_page_empty(ptep) +static inline phys_addr_t +stage2_pmd_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end) +{ + if (kvm_stage2_has_pmd(kvm)) { + phys_addr_t boundary = (addr + S2_PMD_SIZE) & S2_PMD_MASK; -#if STAGE2_PGTABLE_LEVELS == 2 -#include <asm/stage2_pgtable-nopmd.h> -#elif STAGE2_PGTABLE_LEVELS == 3 -#include <asm/stage2_pgtable-nopud.h> -#endif + return (boundary - 1 < end - 1) ? boundary : end; + } else { + return end; + } +} +static inline bool stage2_pte_table_empty(struct kvm *kvm, pte_t *ptep) +{ + return kvm_page_empty(ptep); +} -#define stage2_pgd_index(addr) (((addr) >> S2_PGDIR_SHIFT) & (PTRS_PER_S2_PGD - 1)) +static inline unsigned long stage2_pgd_index(struct kvm *kvm, phys_addr_t addr) +{ + return (((addr) >> stage2_pgdir_shift(kvm)) & (stage2_pgd_ptrs(kvm) - 1)); +} -static inline phys_addr_t stage2_pgd_addr_end(phys_addr_t addr, phys_addr_t end) +static inline phys_addr_t +stage2_pgd_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end) { - phys_addr_t boundary = (addr + S2_PGDIR_SIZE) & S2_PGDIR_MASK; + phys_addr_t boundary = (addr + stage2_pgdir_size(kvm)) & stage2_pgdir_mask(kvm); return (boundary - 1 < end - 1) ? boundary : end; } diff --git a/arch/arm64/include/asm/stat.h b/arch/arm64/include/asm/stat.h index eab738019707..397c6ccd04e7 100644 --- a/arch/arm64/include/asm/stat.h +++ b/arch/arm64/include/asm/stat.h @@ -20,7 +20,7 @@ #ifdef CONFIG_COMPAT -#include <linux/compat_time.h> +#include <linux/time.h> #include <asm/compat.h> /* diff --git a/arch/arm64/include/asm/string.h b/arch/arm64/include/asm/string.h index dd95d33a5bd5..03a6c256b7ec 100644 --- a/arch/arm64/include/asm/string.h +++ b/arch/arm64/include/asm/string.h @@ -16,6 +16,7 @@ #ifndef __ASM_STRING_H #define __ASM_STRING_H +#ifndef CONFIG_KASAN #define __HAVE_ARCH_STRRCHR extern char *strrchr(const char *, int c); @@ -34,6 +35,13 @@ extern __kernel_size_t strlen(const char *); #define __HAVE_ARCH_STRNLEN extern __kernel_size_t strnlen(const char *, __kernel_size_t); +#define __HAVE_ARCH_MEMCMP +extern int memcmp(const void *, const void *, size_t); + +#define __HAVE_ARCH_MEMCHR +extern void *memchr(const void *, int, __kernel_size_t); +#endif + #define __HAVE_ARCH_MEMCPY extern void *memcpy(void *, const void *, __kernel_size_t); extern void *__memcpy(void *, const void *, __kernel_size_t); @@ -42,16 +50,10 @@ extern void *__memcpy(void *, const void *, __kernel_size_t); extern void *memmove(void *, const void *, __kernel_size_t); extern void *__memmove(void *, const void *, __kernel_size_t); -#define __HAVE_ARCH_MEMCHR -extern void *memchr(const void *, int, __kernel_size_t); - #define __HAVE_ARCH_MEMSET extern void *memset(void *, int, __kernel_size_t); extern void *__memset(void *, int, __kernel_size_t); -#define __HAVE_ARCH_MEMCMP -extern int memcmp(const void *, const void *, size_t); - #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE #define __HAVE_ARCH_MEMCPY_FLUSHCACHE void memcpy_flushcache(void *dst, const void *src, size_t cnt); diff --git a/arch/arm64/include/asm/system_misc.h b/arch/arm64/include/asm/system_misc.h index 28893a0b141d..0e2a0ecaf484 100644 --- a/arch/arm64/include/asm/system_misc.h +++ b/arch/arm64/include/asm/system_misc.h @@ -33,7 +33,8 @@ void die(const char *msg, struct pt_regs *regs, int err); struct siginfo; void arm64_notify_die(const char *str, struct pt_regs *regs, - struct siginfo *info, int err); + int signo, int sicode, void __user *addr, + int err); void hook_debug_fault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *), diff --git a/arch/arm64/include/asm/traps.h b/arch/arm64/include/asm/traps.h index c320f3bf6c57..f9c1aa6167d2 100644 --- a/arch/arm64/include/asm/traps.h +++ b/arch/arm64/include/asm/traps.h @@ -37,8 +37,9 @@ void register_undef_hook(struct undef_hook *hook); void unregister_undef_hook(struct undef_hook *hook); void force_signal_inject(int signal, int code, unsigned long address); void arm64_notify_segfault(unsigned long addr); -void arm64_force_sig_info(struct siginfo *info, const char *str, - struct task_struct *tsk); +void arm64_force_sig_fault(int signo, int code, void __user *addr, const char *str); +void arm64_force_sig_mceerr(int code, void __user *addr, short lsb, const char *str); +void arm64_force_sig_ptrace_errno_trap(int errno, void __user *addr, const char *str); /* * Move regs->pc to next instruction and do necessary setup before it diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h index e0d0f5b856e7..b13ca091f833 100644 --- a/arch/arm64/include/asm/unistd.h +++ b/arch/arm64/include/asm/unistd.h @@ -18,11 +18,11 @@ #define __ARCH_WANT_SYS_GETHOSTNAME #define __ARCH_WANT_SYS_PAUSE #define __ARCH_WANT_SYS_GETPGRP -#define __ARCH_WANT_SYS_LLSEEK #define __ARCH_WANT_SYS_NICE #define __ARCH_WANT_SYS_SIGPENDING #define __ARCH_WANT_SYS_SIGPROCMASK #define __ARCH_WANT_COMPAT_SYS_SENDFILE +#define __ARCH_WANT_SYS_UTIME32 #define __ARCH_WANT_SYS_FORK #define __ARCH_WANT_SYS_VFORK diff --git a/arch/arm64/include/uapi/asm/Kbuild b/arch/arm64/include/uapi/asm/Kbuild index 198afbf0688f..6c5adf458690 100644 --- a/arch/arm64/include/uapi/asm/Kbuild +++ b/arch/arm64/include/uapi/asm/Kbuild @@ -19,3 +19,4 @@ generic-y += swab.h generic-y += termbits.h generic-y += termios.h generic-y += types.h +generic-y += siginfo.h diff --git a/arch/arm64/include/uapi/asm/siginfo.h b/arch/arm64/include/uapi/asm/siginfo.h deleted file mode 100644 index 574d12f86039..000000000000 --- a/arch/arm64/include/uapi/asm/siginfo.h +++ /dev/null @@ -1,24 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -/* - * Copyright (C) 2012 ARM Ltd. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. - */ -#ifndef __ASM_SIGINFO_H -#define __ASM_SIGINFO_H - -#define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int)) - -#include <asm-generic/siginfo.h> - -#endif diff --git a/arch/arm64/include/uapi/asm/unistd.h b/arch/arm64/include/uapi/asm/unistd.h index 5072cbd15c82..dae1584cf017 100644 --- a/arch/arm64/include/uapi/asm/unistd.h +++ b/arch/arm64/include/uapi/asm/unistd.h @@ -16,5 +16,6 @@ */ #define __ARCH_WANT_RENAMEAT +#define __ARCH_WANT_NEW_STAT #include <asm-generic/unistd.h> diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c index d894a20b70b2..72f63a59b008 100644 --- a/arch/arm64/kernel/arm64ksyms.c +++ b/arch/arm64/kernel/arm64ksyms.c @@ -44,20 +44,23 @@ EXPORT_SYMBOL(__arch_copy_in_user); EXPORT_SYMBOL(memstart_addr); /* string / mem functions */ +#ifndef CONFIG_KASAN EXPORT_SYMBOL(strchr); EXPORT_SYMBOL(strrchr); EXPORT_SYMBOL(strcmp); EXPORT_SYMBOL(strncmp); EXPORT_SYMBOL(strlen); EXPORT_SYMBOL(strnlen); +EXPORT_SYMBOL(memcmp); +EXPORT_SYMBOL(memchr); +#endif + EXPORT_SYMBOL(memset); EXPORT_SYMBOL(memcpy); EXPORT_SYMBOL(memmove); EXPORT_SYMBOL(__memset); EXPORT_SYMBOL(__memcpy); EXPORT_SYMBOL(__memmove); -EXPORT_SYMBOL(memchr); -EXPORT_SYMBOL(memcmp); /* atomic bitops */ EXPORT_SYMBOL(set_bit); diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c index 06ca574495af..d7bb6aefae0a 100644 --- a/arch/arm64/kernel/debug-monitors.c +++ b/arch/arm64/kernel/debug-monitors.c @@ -210,13 +210,6 @@ NOKPROBE_SYMBOL(call_step_hook); static void send_user_sigtrap(int si_code) { struct pt_regs *regs = current_pt_regs(); - siginfo_t info; - - clear_siginfo(&info); - info.si_signo = SIGTRAP; - info.si_errno = 0; - info.si_code = si_code; - info.si_addr = (void __user *)instruction_pointer(regs); if (WARN_ON(!user_mode(regs))) return; @@ -224,7 +217,9 @@ static void send_user_sigtrap(int si_code) if (interrupts_enabled(regs)) local_irq_enable(); - arm64_force_sig_info(&info, "User debug trap", current); + arm64_force_sig_fault(SIGTRAP, si_code, + (void __user *)instruction_pointer(regs), + "User debug trap"); } static int single_step_handler(unsigned long addr, unsigned int esr, diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index 58c53bc96928..5ebe73b69961 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -842,7 +842,6 @@ asmlinkage void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs) */ asmlinkage void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs) { - siginfo_t info; unsigned int si_code = FPE_FLTUNK; if (esr & ESR_ELx_FP_EXC_TFV) { @@ -858,12 +857,9 @@ asmlinkage void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs) si_code = FPE_FLTRES; } - clear_siginfo(&info); - info.si_signo = SIGFPE; - info.si_code = si_code; - info.si_addr = (void __user *)instruction_pointer(regs); - - send_sig_info(SIGFPE, &info, current); + send_sig_fault(SIGFPE, si_code, + (void __user *)instruction_pointer(regs), + current); } void fpsimd_thread_switch(struct task_struct *next) diff --git a/arch/arm64/kernel/pci.c b/arch/arm64/kernel/pci.c index 0e2ea1c78542..bb85e2f4603f 100644 --- a/arch/arm64/kernel/pci.c +++ b/arch/arm64/kernel/pci.c @@ -165,16 +165,15 @@ static void pci_acpi_generic_release_info(struct acpi_pci_root_info *ci) /* Interface called from ACPI code to setup PCI host controller */ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root) { - int node = acpi_get_node(root->device->handle); struct acpi_pci_generic_root_info *ri; struct pci_bus *bus, *child; struct acpi_pci_root_ops *root_ops; - ri = kzalloc_node(sizeof(*ri), GFP_KERNEL, node); + ri = kzalloc(sizeof(*ri), GFP_KERNEL); if (!ri) return NULL; - root_ops = kzalloc_node(sizeof(*root_ops), GFP_KERNEL, node); + root_ops = kzalloc(sizeof(*root_ops), GFP_KERNEL); if (!root_ops) { kfree(ri); return NULL; diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 6219486fa25f..1710a2d01669 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -182,13 +182,7 @@ static void ptrace_hbptriggered(struct perf_event *bp, struct pt_regs *regs) { struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp); - siginfo_t info; - - clear_siginfo(&info); - info.si_signo = SIGTRAP; - info.si_errno = 0; - info.si_code = TRAP_HWBKPT; - info.si_addr = (void __user *)(bkpt->trigger); + const char *desc = "Hardware breakpoint trap (ptrace)"; #ifdef CONFIG_COMPAT if (is_compat_task()) { @@ -208,10 +202,14 @@ static void ptrace_hbptriggered(struct perf_event *bp, break; } } - force_sig_ptrace_errno_trap(si_errno, (void __user *)bkpt->trigger); + arm64_force_sig_ptrace_errno_trap(si_errno, + (void __user *)bkpt->trigger, + desc); } #endif - arm64_force_sig_info(&info, "Hardware breakpoint trap (ptrace)", current); + arm64_force_sig_fault(SIGTRAP, TRAP_HWBKPT, + (void __user *)(bkpt->trigger), + desc); } /* diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 25fcd22a4bb2..96b8f2f51ab2 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -602,7 +602,7 @@ static void __init of_parse_and_init_cpus(void) { struct device_node *dn; - for_each_node_by_type(dn, "cpu") { + for_each_of_cpu_node(dn) { u64 hwid = of_get_cpu_mpidr(dn); if (hwid == INVALID_HWID) diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c index a6109825eeb9..32653d156747 100644 --- a/arch/arm64/kernel/sys_compat.c +++ b/arch/arm64/kernel/sys_compat.c @@ -68,8 +68,8 @@ do_compat_cache_op(unsigned long start, unsigned long end, int flags) */ long compat_arm_syscall(struct pt_regs *regs) { - siginfo_t info; unsigned int no = regs->regs[7]; + void __user *addr; switch (no) { /* @@ -112,13 +112,10 @@ long compat_arm_syscall(struct pt_regs *regs) break; } - clear_siginfo(&info); - info.si_signo = SIGILL; - info.si_errno = 0; - info.si_code = ILL_ILLTRP; - info.si_addr = (void __user *)instruction_pointer(regs) - - (compat_thumb_mode(regs) ? 2 : 4); + addr = (void __user *)instruction_pointer(regs) - + (compat_thumb_mode(regs) ? 2 : 4); - arm64_notify_die("Oops - bad compat syscall(2)", regs, &info, no); + arm64_notify_die("Oops - bad compat syscall(2)", regs, + SIGILL, ILL_ILLTRP, addr, no); return 0; } diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 4066da7f1e5e..5f4d9acb32f5 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -224,24 +224,19 @@ void die(const char *str, struct pt_regs *regs, int err) do_exit(SIGSEGV); } -static bool show_unhandled_signals_ratelimited(void) +static void arm64_show_signal(int signo, const char *str) { static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST); - return show_unhandled_signals && __ratelimit(&rs); -} - -void arm64_force_sig_info(struct siginfo *info, const char *str, - struct task_struct *tsk) -{ + struct task_struct *tsk = current; unsigned int esr = tsk->thread.fault_code; struct pt_regs *regs = task_pt_regs(tsk); - if (!unhandled_signal(tsk, info->si_signo)) - goto send_sig; - - if (!show_unhandled_signals_ratelimited()) - goto send_sig; + /* Leave if the signal won't be shown */ + if (!show_unhandled_signals || + !unhandled_signal(tsk, signo) || + !__ratelimit(&rs)) + return; pr_info("%s[%d]: unhandled exception: ", tsk->comm, task_pid_nr(tsk)); if (esr) @@ -251,19 +246,39 @@ void arm64_force_sig_info(struct siginfo *info, const char *str, print_vma_addr(KERN_CONT " in ", regs->pc); pr_cont("\n"); __show_regs(regs); +} + +void arm64_force_sig_fault(int signo, int code, void __user *addr, + const char *str) +{ + arm64_show_signal(signo, str); + force_sig_fault(signo, code, addr, current); +} -send_sig: - force_sig_info(info->si_signo, info, tsk); +void arm64_force_sig_mceerr(int code, void __user *addr, short lsb, + const char *str) +{ + arm64_show_signal(SIGBUS, str); + force_sig_mceerr(code, addr, lsb, current); +} + +void arm64_force_sig_ptrace_errno_trap(int errno, void __user *addr, + const char *str) +{ + arm64_show_signal(SIGTRAP, str); + force_sig_ptrace_errno_trap(errno, addr); } void arm64_notify_die(const char *str, struct pt_regs *regs, - struct siginfo *info, int err) + int signo, int sicode, void __user *addr, + int err) { if (user_mode(regs)) { WARN_ON(regs != current_pt_regs()); current->thread.fault_address = 0; current->thread.fault_code = err; - arm64_force_sig_info(info, str, current); + + arm64_force_sig_fault(signo, sicode, addr, str); } else { die(str, regs, err); } @@ -350,15 +365,12 @@ exit: void force_signal_inject(int signal, int code, unsigned long address) { - siginfo_t info; const char *desc; struct pt_regs *regs = current_pt_regs(); if (WARN_ON(!user_mode(regs))) return; - clear_siginfo(&info); - switch (signal) { case SIGILL: desc = "undefined instruction"; @@ -377,12 +389,7 @@ void force_signal_inject(int signal, int code, unsigned long address) signal = SIGKILL; } - info.si_signo = signal; - info.si_errno = 0; - info.si_code = code; - info.si_addr = (void __user *)address; - - arm64_notify_die(desc, regs, &info, 0); + arm64_notify_die(desc, regs, signal, code, (void __user *)address, 0); } /* @@ -799,19 +806,13 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr) */ asmlinkage void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr) { - siginfo_t info; void __user *pc = (void __user *)instruction_pointer(regs); - clear_siginfo(&info); - info.si_signo = SIGILL; - info.si_errno = 0; - info.si_code = ILL_ILLOPC; - info.si_addr = pc; - current->thread.fault_address = 0; current->thread.fault_code = esr; - arm64_force_sig_info(&info, "Bad EL0 synchronous exception", current); + arm64_force_sig_fault(SIGILL, ILL_ILLOPC, pc, + "Bad EL0 synchronous exception"); } #ifdef CONFIG_VMAP_STACK diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index ab29c06a7d4b..03b00007553d 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -183,7 +183,6 @@ SECTIONS INIT_SETUP(16) INIT_CALLS CON_INITCALL - SECURITY_INITCALL INIT_RAM_FS *(.init.rodata.* .init.bss) /* from the EFI stub */ } diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c index a6c9fbaeaefc..dd436a50fce7 100644 --- a/arch/arm64/kvm/guest.c +++ b/arch/arm64/kvm/guest.c @@ -391,15 +391,15 @@ int __attribute_const__ kvm_target_cpu(void) return KVM_ARM_TARGET_CORTEX_A53; case ARM_CPU_PART_CORTEX_A57: return KVM_ARM_TARGET_CORTEX_A57; - }; + } break; case ARM_CPU_IMP_APM: switch (part_number) { case APM_CPU_PART_POTENZA: return KVM_ARM_TARGET_XGENE_POTENZA; - }; + } break; - }; + } /* Return a default generic target */ return KVM_ARM_TARGET_GENERIC_V8; diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index e5e741bfffe1..35a81bebd02b 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c @@ -284,6 +284,13 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, */ run->exit_reason = KVM_EXIT_FAIL_ENTRY; return 0; + case ARM_EXCEPTION_IL: + /* + * We attempted an illegal exception return. Guest state must + * have been corrupted somehow. Give up. + */ + run->exit_reason = KVM_EXIT_FAIL_ENTRY; + return -EINVAL; default: kvm_pr_unimpl("Unsupported exception type: %d", exception_index); diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile index 2fabc2dc1966..82d1904328ad 100644 --- a/arch/arm64/kvm/hyp/Makefile +++ b/arch/arm64/kvm/hyp/Makefile @@ -19,7 +19,6 @@ obj-$(CONFIG_KVM_ARM_HOST) += switch.o obj-$(CONFIG_KVM_ARM_HOST) += fpsimd.o obj-$(CONFIG_KVM_ARM_HOST) += tlb.o obj-$(CONFIG_KVM_ARM_HOST) += hyp-entry.o -obj-$(CONFIG_KVM_ARM_HOST) += s2-setup.o # KVM code is run at a different exception code with a different map, so # compiler instrumentation that inserts callbacks or checks into the code may diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S index 24b4fbafe3e4..b1f14f736962 100644 --- a/arch/arm64/kvm/hyp/hyp-entry.S +++ b/arch/arm64/kvm/hyp/hyp-entry.S @@ -162,6 +162,20 @@ el1_error: mov x0, #ARM_EXCEPTION_EL1_SERROR b __guest_exit +el2_sync: + /* Check for illegal exception return, otherwise panic */ + mrs x0, spsr_el2 + + /* if this was something else, then panic! */ + tst x0, #PSR_IL_BIT + b.eq __hyp_panic + + /* Let's attempt a recovery from the illegal exception return */ + get_vcpu_ptr x1, x0 + mov x0, #ARM_EXCEPTION_IL + b __guest_exit + + el2_error: ldp x0, x1, [sp], #16 @@ -240,7 +254,7 @@ ENTRY(__kvm_hyp_vector) invalid_vect el2t_fiq_invalid // FIQ EL2t invalid_vect el2t_error_invalid // Error EL2t - invalid_vect el2h_sync_invalid // Synchronous EL2h + valid_vect el2_sync // Synchronous EL2h invalid_vect el2h_irq_invalid // IRQ EL2h invalid_vect el2h_fiq_invalid // FIQ EL2h valid_vect el2_error // Error EL2h diff --git a/arch/arm64/kvm/hyp/s2-setup.c b/arch/arm64/kvm/hyp/s2-setup.c deleted file mode 100644 index 603e1ee83e89..000000000000 --- a/arch/arm64/kvm/hyp/s2-setup.c +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Copyright (C) 2016 - ARM Ltd - * Author: Marc Zyngier <marc.zyngier@arm.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. - */ - -#include <linux/types.h> -#include <asm/kvm_arm.h> -#include <asm/kvm_asm.h> -#include <asm/kvm_hyp.h> - -u32 __hyp_text __init_stage2_translation(void) -{ - u64 val = VTCR_EL2_FLAGS; - u64 parange; - u64 tmp; - - /* - * Read the PARange bits from ID_AA64MMFR0_EL1 and set the PS - * bits in VTCR_EL2. Amusingly, the PARange is 4 bits, while - * PS is only 3. Fortunately, bit 19 is RES0 in VTCR_EL2... - */ - parange = read_sysreg(id_aa64mmfr0_el1) & 7; - if (parange > ID_AA64MMFR0_PARANGE_MAX) - parange = ID_AA64MMFR0_PARANGE_MAX; - val |= parange << 16; - - /* Compute the actual PARange... */ - switch (parange) { - case 0: - parange = 32; - break; - case 1: - parange = 36; - break; - case 2: - parange = 40; - break; - case 3: - parange = 42; - break; - case 4: - parange = 44; - break; - case 5: - default: - parange = 48; - break; - } - - /* - * ... and clamp it to 40 bits, unless we have some braindead - * HW that implements less than that. In all cases, we'll - * return that value for the rest of the kernel to decide what - * to do. - */ - val |= 64 - (parange > 40 ? 40 : parange); - - /* - * Check the availability of Hardware Access Flag / Dirty Bit - * Management in ID_AA64MMFR1_EL1 and enable the feature in VTCR_EL2. - */ - tmp = (read_sysreg(id_aa64mmfr1_el1) >> ID_AA64MMFR1_HADBS_SHIFT) & 0xf; - if (tmp) - val |= VTCR_EL2_HA; - - /* - * Read the VMIDBits bits from ID_AA64MMFR1_EL1 and set the VS - * bit in VTCR_EL2. - */ - tmp = (read_sysreg(id_aa64mmfr1_el1) >> ID_AA64MMFR1_VMIDBITS_SHIFT) & 0xf; - val |= (tmp == ID_AA64MMFR1_VMIDBITS_16) ? - VTCR_EL2_VS_16BIT : - VTCR_EL2_VS_8BIT; - - write_sysreg(val, vtcr_el2); - - return parange; -} diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index ca46153d7915..7cc175c88a37 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -198,7 +198,7 @@ void deactivate_traps_vhe_put(void) static void __hyp_text __activate_vm(struct kvm *kvm) { - write_sysreg(kvm->arch.vttbr, vttbr_el2); + __load_guest_stage2(kvm); } static void __hyp_text __deactivate_vm(struct kvm_vcpu *vcpu) @@ -263,7 +263,7 @@ static bool __hyp_text __translate_far_to_hpfar(u64 far, u64 *hpfar) return false; /* Translation failed, back to guest */ /* Convert PAR to HPFAR format */ - *hpfar = ((tmp >> 12) & ((1UL << 36) - 1)) << 4; + *hpfar = PAR_TO_HPFAR(tmp); return true; } diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c index 76d016b446b2..68d6f7c3b237 100644 --- a/arch/arm64/kvm/hyp/sysreg-sr.c +++ b/arch/arm64/kvm/hyp/sysreg-sr.c @@ -152,8 +152,25 @@ static void __hyp_text __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt) static void __hyp_text __sysreg_restore_el2_return_state(struct kvm_cpu_context *ctxt) { + u64 pstate = ctxt->gp_regs.regs.pstate; + u64 mode = pstate & PSR_AA32_MODE_MASK; + + /* + * Safety check to ensure we're setting the CPU up to enter the guest + * in a less privileged mode. + * + * If we are attempting a return to EL2 or higher in AArch64 state, + * program SPSR_EL2 with M=EL2h and the IL bit set which ensures that + * we'll take an illegal exception state exception immediately after + * the ERET to the guest. Attempts to return to AArch32 Hyp will + * result in an illegal exception return because EL2's execution state + * is determined by SCR_EL3.RW. + */ + if (!(mode & PSR_MODE32_BIT) && mode >= PSR_MODE_EL2t) + pstate = PSR_MODE_EL2h | PSR_IL_BIT; + write_sysreg_el2(ctxt->gp_regs.regs.pc, elr); - write_sysreg_el2(ctxt->gp_regs.regs.pstate, spsr); + write_sysreg_el2(pstate, spsr); if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN)) write_sysreg_s(ctxt->sys_regs[DISR_EL1], SYS_VDISR_EL2); diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c index 131c7772703c..4dbd9c69a96d 100644 --- a/arch/arm64/kvm/hyp/tlb.c +++ b/arch/arm64/kvm/hyp/tlb.c @@ -30,7 +30,7 @@ static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm) * bits. Changing E2H is impossible (goodbye TTBR1_EL2), so * let's flip TGE before executing the TLB operation. */ - write_sysreg(kvm->arch.vttbr, vttbr_el2); + __load_guest_stage2(kvm); val = read_sysreg(hcr_el2); val &= ~HCR_TGE; write_sysreg(val, hcr_el2); @@ -39,7 +39,7 @@ static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm) static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm) { - write_sysreg(kvm->arch.vttbr, vttbr_el2); + __load_guest_stage2(kvm); isb(); } diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index e37c78bbe1ca..b72a3dd56204 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -26,6 +26,7 @@ #include <kvm/arm_arch_timer.h> +#include <asm/cpufeature.h> #include <asm/cputype.h> #include <asm/ptrace.h> #include <asm/kvm_arm.h> @@ -33,6 +34,9 @@ #include <asm/kvm_coproc.h> #include <asm/kvm_mmu.h> +/* Maximum phys_shift supported for any VM on this host */ +static u32 kvm_ipa_limit; + /* * ARMv8 Reset Values */ @@ -55,12 +59,12 @@ static bool cpu_has_32bit_el1(void) } /** - * kvm_arch_dev_ioctl_check_extension + * kvm_arch_vm_ioctl_check_extension * * We currently assume that the number of HW registers is uniform * across all CPUs (see cpuinfo_sanity_check). */ -int kvm_arch_dev_ioctl_check_extension(struct kvm *kvm, long ext) +int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext) { int r; @@ -82,9 +86,11 @@ int kvm_arch_dev_ioctl_check_extension(struct kvm *kvm, long ext) break; case KVM_CAP_SET_GUEST_DEBUG: case KVM_CAP_VCPU_ATTRIBUTES: - case KVM_CAP_VCPU_EVENTS: r = 1; break; + case KVM_CAP_ARM_VM_IPA_SIZE: + r = kvm_ipa_limit; + break; default: r = 0; } @@ -133,3 +139,99 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) /* Reset timer */ return kvm_timer_vcpu_reset(vcpu); } + +void kvm_set_ipa_limit(void) +{ + unsigned int ipa_max, pa_max, va_max, parange; + + parange = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1) & 0x7; + pa_max = id_aa64mmfr0_parange_to_phys_shift(parange); + + /* Clamp the IPA limit to the PA size supported by the kernel */ + ipa_max = (pa_max > PHYS_MASK_SHIFT) ? PHYS_MASK_SHIFT : pa_max; + /* + * Since our stage2 table is dependent on the stage1 page table code, + * we must always honor the following condition: + * + * Number of levels in Stage1 >= Number of levels in Stage2. + * + * So clamp the ipa limit further down to limit the number of levels. + * Since we can concatenate upto 16 tables at entry level, we could + * go upto 4bits above the maximum VA addressible with the current + * number of levels. + */ + va_max = PGDIR_SHIFT + PAGE_SHIFT - 3; + va_max += 4; + + if (va_max < ipa_max) + ipa_max = va_max; + + /* + * If the final limit is lower than the real physical address + * limit of the CPUs, report the reason. + */ + if (ipa_max < pa_max) + pr_info("kvm: Limiting the IPA size due to kernel %s Address limit\n", + (va_max < pa_max) ? "Virtual" : "Physical"); + + WARN(ipa_max < KVM_PHYS_SHIFT, + "KVM IPA limit (%d bit) is smaller than default size\n", ipa_max); + kvm_ipa_limit = ipa_max; + kvm_info("IPA Size Limit: %dbits\n", kvm_ipa_limit); +} + +/* + * Configure the VTCR_EL2 for this VM. The VTCR value is common + * across all the physical CPUs on the system. We use system wide + * sanitised values to fill in different fields, except for Hardware + * Management of Access Flags. HA Flag is set unconditionally on + * all CPUs, as it is safe to run with or without the feature and + * the bit is RES0 on CPUs that don't support it. + */ +int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type) +{ + u64 vtcr = VTCR_EL2_FLAGS; + u32 parange, phys_shift; + u8 lvls; + + if (type & ~KVM_VM_TYPE_ARM_IPA_SIZE_MASK) + return -EINVAL; + + phys_shift = KVM_VM_TYPE_ARM_IPA_SIZE(type); + if (phys_shift) { + if (phys_shift > kvm_ipa_limit || + phys_shift < 32) + return -EINVAL; + } else { + phys_shift = KVM_PHYS_SHIFT; + } + + parange = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1) & 7; + if (parange > ID_AA64MMFR0_PARANGE_MAX) + parange = ID_AA64MMFR0_PARANGE_MAX; + vtcr |= parange << VTCR_EL2_PS_SHIFT; + + vtcr |= VTCR_EL2_T0SZ(phys_shift); + /* + * Use a minimum 2 level page table to prevent splitting + * host PMD huge pages at stage2. + */ + lvls = stage2_pgtable_levels(phys_shift); + if (lvls < 2) + lvls = 2; + vtcr |= VTCR_EL2_LVLS_TO_SL0(lvls); + + /* + * Enable the Hardware Access Flag management, unconditionally + * on all CPUs. The features is RES0 on CPUs without the support + * and must be ignored by the CPUs. + */ + vtcr |= VTCR_EL2_HA; + + /* Set the vmid bits */ + vtcr |= (kvm_get_vmid_bits() == 16) ? + VTCR_EL2_VS_16BIT : + VTCR_EL2_VS_8BIT; + kvm->arch.vtcr = vtcr; + return 0; +} diff --git a/arch/arm64/lib/memchr.S b/arch/arm64/lib/memchr.S index 4444c1d25f4b..0f164a4baf52 100644 --- a/arch/arm64/lib/memchr.S +++ b/arch/arm64/lib/memchr.S @@ -30,7 +30,7 @@ * Returns: * x0 - address of first occurrence of 'c' or 0 */ -ENTRY(memchr) +WEAK(memchr) and w1, w1, #0xff 1: subs x2, x2, #1 b.mi 2f diff --git a/arch/arm64/lib/memcmp.S b/arch/arm64/lib/memcmp.S index 2a4e239bd17a..fb295f52e9f8 100644 --- a/arch/arm64/lib/memcmp.S +++ b/arch/arm64/lib/memcmp.S @@ -58,7 +58,7 @@ pos .req x11 limit_wd .req x12 mask .req x13 -ENTRY(memcmp) +WEAK(memcmp) cbz limit, .Lret0 eor tmp1, src1, src2 tst tmp1, #7 diff --git a/arch/arm64/lib/strchr.S b/arch/arm64/lib/strchr.S index dae0cf5591f9..7c83091d1bcd 100644 --- a/arch/arm64/lib/strchr.S +++ b/arch/arm64/lib/strchr.S @@ -29,7 +29,7 @@ * Returns: * x0 - address of first occurrence of 'c' or 0 */ -ENTRY(strchr) +WEAK(strchr) and w1, w1, #0xff 1: ldrb w2, [x0], #1 cmp w2, w1 diff --git a/arch/arm64/lib/strcmp.S b/arch/arm64/lib/strcmp.S index 471fe61760ef..7d5d15398bfb 100644 --- a/arch/arm64/lib/strcmp.S +++ b/arch/arm64/lib/strcmp.S @@ -60,7 +60,7 @@ tmp3 .req x9 zeroones .req x10 pos .req x11 -ENTRY(strcmp) +WEAK(strcmp) eor tmp1, src1, src2 mov zeroones, #REP8_01 tst tmp1, #7 diff --git a/arch/arm64/lib/strlen.S b/arch/arm64/lib/strlen.S index 55ccc8e24c08..8e0b14205dcb 100644 --- a/arch/arm64/lib/strlen.S +++ b/arch/arm64/lib/strlen.S @@ -56,7 +56,7 @@ pos .req x12 #define REP8_7f 0x7f7f7f7f7f7f7f7f #define REP8_80 0x8080808080808080 -ENTRY(strlen) +WEAK(strlen) mov zeroones, #REP8_01 bic src, srcin, #15 ands tmp1, srcin, #15 diff --git a/arch/arm64/lib/strncmp.S b/arch/arm64/lib/strncmp.S index e267044761c6..66bd145935d9 100644 --- a/arch/arm64/lib/strncmp.S +++ b/arch/arm64/lib/strncmp.S @@ -64,7 +64,7 @@ limit_wd .req x13 mask .req x14 endloop .req x15 -ENTRY(strncmp) +WEAK(strncmp) cbz limit, .Lret0 eor tmp1, src1, src2 mov zeroones, #REP8_01 diff --git a/arch/arm64/lib/strnlen.S b/arch/arm64/lib/strnlen.S index eae38da6e0bb..355be04441fe 100644 --- a/arch/arm64/lib/strnlen.S +++ b/arch/arm64/lib/strnlen.S @@ -59,7 +59,7 @@ limit_wd .req x14 #define REP8_7f 0x7f7f7f7f7f7f7f7f #define REP8_80 0x8080808080808080 -ENTRY(strnlen) +WEAK(strnlen) cbz limit, .Lhit_limit mov zeroones, #REP8_01 bic src, srcin, #15 diff --git a/arch/arm64/lib/strrchr.S b/arch/arm64/lib/strrchr.S index f8e2784d5752..ea84924d5990 100644 --- a/arch/arm64/lib/strrchr.S +++ b/arch/arm64/lib/strrchr.S @@ -29,7 +29,7 @@ * Returns: * x0 - address of last occurrence of 'c' or 0 */ -ENTRY(strrchr) +WEAK(strrchr) mov x3, #0 and w1, w1, #0xff 1: ldrb w2, [x0], #1 diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index 072c51fb07d7..d190612b8f33 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -25,6 +25,7 @@ #include <linux/slab.h> #include <linux/genalloc.h> #include <linux/dma-direct.h> +#include <linux/dma-noncoherent.h> #include <linux/dma-contiguous.h> #include <linux/vmalloc.h> #include <linux/swiotlb.h> @@ -32,16 +33,6 @@ #include <asm/cacheflush.h> -static int swiotlb __ro_after_init; - -static pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot, - bool coherent) -{ - if (!coherent || (attrs & DMA_ATTR_WRITE_COMBINE)) - return pgprot_writecombine(prot); - return prot; -} - static struct gen_pool *atomic_pool __ro_after_init; #define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K @@ -91,18 +82,16 @@ static int __free_from_pool(void *start, size_t size) return 1; } -static void *__dma_alloc(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flags, - unsigned long attrs) +void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, + gfp_t flags, unsigned long attrs) { struct page *page; void *ptr, *coherent_ptr; - bool coherent = is_device_dma_coherent(dev); - pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, false); + pgprot_t prot = pgprot_writecombine(PAGE_KERNEL); size = PAGE_ALIGN(size); - if (!coherent && !gfpflags_allow_blocking(flags)) { + if (!gfpflags_allow_blocking(flags)) { struct page *page = NULL; void *addr = __alloc_from_pool(size, &page, flags); @@ -112,14 +101,10 @@ static void *__dma_alloc(struct device *dev, size_t size, return addr; } - ptr = swiotlb_alloc(dev, size, dma_handle, flags, attrs); + ptr = dma_direct_alloc_pages(dev, size, dma_handle, flags, attrs); if (!ptr) goto no_mem; - /* no need for non-cacheable mapping if coherent */ - if (coherent) - return ptr; - /* remove any dirty cache lines on the kernel alias */ __dma_flush_area(ptr, size); @@ -133,130 +118,57 @@ static void *__dma_alloc(struct device *dev, size_t size, return coherent_ptr; no_map: - swiotlb_free(dev, size, ptr, *dma_handle, attrs); + dma_direct_free_pages(dev, size, ptr, *dma_handle, attrs); no_mem: return NULL; } -static void __dma_free(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle, - unsigned long attrs) +void arch_dma_free(struct device *dev, size_t size, void *vaddr, + dma_addr_t dma_handle, unsigned long attrs) { - void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle)); + if (!__free_from_pool(vaddr, PAGE_ALIGN(size))) { + void *kaddr = phys_to_virt(dma_to_phys(dev, dma_handle)); - size = PAGE_ALIGN(size); - - if (!is_device_dma_coherent(dev)) { - if (__free_from_pool(vaddr, size)) - return; vunmap(vaddr); + dma_direct_free_pages(dev, size, kaddr, dma_handle, attrs); } - swiotlb_free(dev, size, swiotlb_addr, dma_handle, attrs); } -static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction dir, - unsigned long attrs) +long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr, + dma_addr_t dma_addr) { - dma_addr_t dev_addr; - - dev_addr = swiotlb_map_page(dev, page, offset, size, dir, attrs); - if (!is_device_dma_coherent(dev) && - (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) - __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir); - - return dev_addr; -} - - -static void __swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr, - size_t size, enum dma_data_direction dir, - unsigned long attrs) -{ - if (!is_device_dma_coherent(dev) && - (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) - __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir); - swiotlb_unmap_page(dev, dev_addr, size, dir, attrs); + return __phys_to_pfn(dma_to_phys(dev, dma_addr)); } -static int __swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl, - int nelems, enum dma_data_direction dir, - unsigned long attrs) +pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot, + unsigned long attrs) { - struct scatterlist *sg; - int i, ret; - - ret = swiotlb_map_sg_attrs(dev, sgl, nelems, dir, attrs); - if (!is_device_dma_coherent(dev) && - (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) - for_each_sg(sgl, sg, ret, i) - __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)), - sg->length, dir); - - return ret; -} - -static void __swiotlb_unmap_sg_attrs(struct device *dev, - struct scatterlist *sgl, int nelems, - enum dma_data_direction dir, - unsigned long attrs) -{ - struct scatterlist *sg; - int i; - - if (!is_device_dma_coherent(dev) && - (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) - for_each_sg(sgl, sg, nelems, i) - __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)), - sg->length, dir); - swiotlb_unmap_sg_attrs(dev, sgl, nelems, dir, attrs); + if (!dev_is_dma_coherent(dev) || (attrs & DMA_ATTR_WRITE_COMBINE)) + return pgprot_writecombine(prot); + return prot; } -static void __swiotlb_sync_single_for_cpu(struct device *dev, - dma_addr_t dev_addr, size_t size, - enum dma_data_direction dir) +void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, + size_t size, enum dma_data_direction dir) { - if (!is_device_dma_coherent(dev)) - __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir); - swiotlb_sync_single_for_cpu(dev, dev_addr, size, dir); + __dma_map_area(phys_to_virt(paddr), size, dir); } -static void __swiotlb_sync_single_for_device(struct device *dev, - dma_addr_t dev_addr, size_t size, - enum dma_data_direction dir) +void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, + size_t size, enum dma_data_direction dir) { - swiotlb_sync_single_for_device(dev, dev_addr, size, dir); - if (!is_device_dma_coherent(dev)) - __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir); + __dma_unmap_area(phys_to_virt(paddr), size, dir); } -static void __swiotlb_sync_sg_for_cpu(struct device *dev, - struct scatterlist *sgl, int nelems, - enum dma_data_direction dir) +static int __swiotlb_get_sgtable_page(struct sg_table *sgt, + struct page *page, size_t size) { - struct scatterlist *sg; - int i; - - if (!is_device_dma_coherent(dev)) - for_each_sg(sgl, sg, nelems, i) - __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)), - sg->length, dir); - swiotlb_sync_sg_for_cpu(dev, sgl, nelems, dir); -} + int ret = sg_alloc_table(sgt, 1, GFP_KERNEL); -static void __swiotlb_sync_sg_for_device(struct device *dev, - struct scatterlist *sgl, int nelems, - enum dma_data_direction dir) -{ - struct scatterlist *sg; - int i; + if (!ret) + sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); - swiotlb_sync_sg_for_device(dev, sgl, nelems, dir); - if (!is_device_dma_coherent(dev)) - for_each_sg(sgl, sg, nelems, i) - __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)), - sg->length, dir); + return ret; } static int __swiotlb_mmap_pfn(struct vm_area_struct *vma, @@ -277,74 +189,6 @@ static int __swiotlb_mmap_pfn(struct vm_area_struct *vma, return ret; } -static int __swiotlb_mmap(struct device *dev, - struct vm_area_struct *vma, - void *cpu_addr, dma_addr_t dma_addr, size_t size, - unsigned long attrs) -{ - int ret; - unsigned long pfn = dma_to_phys(dev, dma_addr) >> PAGE_SHIFT; - - vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot, - is_device_dma_coherent(dev)); - - if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) - return ret; - - return __swiotlb_mmap_pfn(vma, pfn, size); -} - -static int __swiotlb_get_sgtable_page(struct sg_table *sgt, - struct page *page, size_t size) -{ - int ret = sg_alloc_table(sgt, 1, GFP_KERNEL); - - if (!ret) - sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); - - return ret; -} - -static int __swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt, - void *cpu_addr, dma_addr_t handle, size_t size, - unsigned long attrs) -{ - struct page *page = phys_to_page(dma_to_phys(dev, handle)); - - return __swiotlb_get_sgtable_page(sgt, page, size); -} - -static int __swiotlb_dma_supported(struct device *hwdev, u64 mask) -{ - if (swiotlb) - return swiotlb_dma_supported(hwdev, mask); - return 1; -} - -static int __swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t addr) -{ - if (swiotlb) - return swiotlb_dma_mapping_error(hwdev, addr); - return 0; -} - -static const struct dma_map_ops arm64_swiotlb_dma_ops = { - .alloc = __dma_alloc, - .free = __dma_free, - .mmap = __swiotlb_mmap, - .get_sgtable = __swiotlb_get_sgtable, - .map_page = __swiotlb_map_page, - .unmap_page = __swiotlb_unmap_page, - .map_sg = __swiotlb_map_sg_attrs, - .unmap_sg = __swiotlb_unmap_sg_attrs, - .sync_single_for_cpu = __swiotlb_sync_single_for_cpu, - .sync_single_for_device = __swiotlb_sync_single_for_device, - .sync_sg_for_cpu = __swiotlb_sync_sg_for_cpu, - .sync_sg_for_device = __swiotlb_sync_sg_for_device, - .dma_supported = __swiotlb_dma_supported, - .mapping_error = __swiotlb_dma_mapping_error, -}; - static int __init atomic_pool_init(void) { pgprot_t prot = __pgprot(PROT_NORMAL_NC); @@ -500,10 +344,6 @@ EXPORT_SYMBOL(dummy_dma_ops); static int __init arm64_dma_init(void) { - if (swiotlb_force == SWIOTLB_FORCE || - max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT)) - swiotlb = 1; - WARN_TAINT(ARCH_DMA_MINALIGN < cache_line_size(), TAINT_CPU_OUT_OF_SPEC, "ARCH_DMA_MINALIGN smaller than CTR_EL0.CWG (%d < %d)", @@ -528,7 +368,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, unsigned long attrs) { - bool coherent = is_device_dma_coherent(dev); + bool coherent = dev_is_dma_coherent(dev); int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs); size_t iosize = size; void *addr; @@ -569,7 +409,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size, addr = NULL; } } else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) { - pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent); + pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs); struct page *page; page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT, @@ -596,7 +436,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size, size >> PAGE_SHIFT); } } else { - pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent); + pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs); struct page **pages; pages = iommu_dma_alloc(dev, iosize, gfp, attrs, ioprot, @@ -658,8 +498,7 @@ static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma, struct vm_struct *area; int ret; - vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot, - is_device_dma_coherent(dev)); + vma->vm_page_prot = arch_dma_mmap_pgprot(dev, vma->vm_page_prot, attrs); if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) return ret; @@ -709,11 +548,11 @@ static void __iommu_sync_single_for_cpu(struct device *dev, { phys_addr_t phys; - if (is_device_dma_coherent(dev)) + if (dev_is_dma_coherent(dev)) return; - phys = iommu_iova_to_phys(iommu_get_domain_for_dev(dev), dev_addr); - __dma_unmap_area(phys_to_virt(phys), size, dir); + phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dev_addr); + arch_sync_dma_for_cpu(dev, phys, size, dir); } static void __iommu_sync_single_for_device(struct device *dev, @@ -722,11 +561,11 @@ static void __iommu_sync_single_for_device(struct device *dev, { phys_addr_t phys; - if (is_device_dma_coherent(dev)) + if (dev_is_dma_coherent(dev)) return; - phys = iommu_iova_to_phys(iommu_get_domain_for_dev(dev), dev_addr); - __dma_map_area(phys_to_virt(phys), size, dir); + phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dev_addr); + arch_sync_dma_for_device(dev, phys, size, dir); } static dma_addr_t __iommu_map_page(struct device *dev, struct page *page, @@ -734,13 +573,13 @@ static dma_addr_t __iommu_map_page(struct device *dev, struct page *page, enum dma_data_direction dir, unsigned long attrs) { - bool coherent = is_device_dma_coherent(dev); + bool coherent = dev_is_dma_coherent(dev); int prot = dma_info_to_prot(dir, coherent, attrs); dma_addr_t dev_addr = iommu_dma_map_page(dev, page, offset, size, prot); - if (!iommu_dma_mapping_error(dev, dev_addr) && - (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) - __iommu_sync_single_for_device(dev, dev_addr, size, dir); + if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) && + !iommu_dma_mapping_error(dev, dev_addr)) + __dma_map_area(page_address(page) + offset, size, dir); return dev_addr; } @@ -762,11 +601,11 @@ static void __iommu_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg; int i; - if (is_device_dma_coherent(dev)) + if (dev_is_dma_coherent(dev)) return; for_each_sg(sgl, sg, nelems, i) - __dma_unmap_area(sg_virt(sg), sg->length, dir); + arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir); } static void __iommu_sync_sg_for_device(struct device *dev, @@ -776,18 +615,18 @@ static void __iommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg; int i; - if (is_device_dma_coherent(dev)) + if (dev_is_dma_coherent(dev)) return; for_each_sg(sgl, sg, nelems, i) - __dma_map_area(sg_virt(sg), sg->length, dir); + arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir); } static int __iommu_map_sg_attrs(struct device *dev, struct scatterlist *sgl, int nelems, enum dma_data_direction dir, unsigned long attrs) { - bool coherent = is_device_dma_coherent(dev); + bool coherent = dev_is_dma_coherent(dev); if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) __iommu_sync_sg_for_device(dev, sgl, nelems, dir); @@ -879,9 +718,9 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, const struct iommu_ops *iommu, bool coherent) { if (!dev->dma_ops) - dev->dma_ops = &arm64_swiotlb_dma_ops; + dev->dma_ops = &swiotlb_dma_ops; - dev->archdata.dma_coherent = coherent; + dev->dma_coherent = coherent; __iommu_setup_dma_ops(dev, dma_base, size, iommu); #ifdef CONFIG_XEN diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index d0e638ef3af6..7d9571f4ae3d 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -303,9 +303,9 @@ static void __do_kernel_fault(unsigned long addr, unsigned int esr, die_kernel_fault(msg, addr, esr, regs); } -static void __do_user_fault(struct siginfo *info, unsigned int esr) +static void set_thread_esr(unsigned long address, unsigned int esr) { - current->thread.fault_address = (unsigned long)info->si_addr; + current->thread.fault_address = address; /* * If the faulting address is in the kernel, we must sanitize the ESR. @@ -358,7 +358,6 @@ static void __do_user_fault(struct siginfo *info, unsigned int esr) } current->thread.fault_code = esr; - arm64_force_sig_info(info, esr_to_fault_info(esr)->name, current); } static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *regs) @@ -369,14 +368,10 @@ static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *re */ if (user_mode(regs)) { const struct fault_info *inf = esr_to_fault_info(esr); - struct siginfo si; - clear_siginfo(&si); - si.si_signo = inf->sig; - si.si_code = inf->code; - si.si_addr = (void __user *)addr; - - __do_user_fault(&si, esr); + set_thread_esr(addr, esr); + arm64_force_sig_fault(inf->sig, inf->code, (void __user *)addr, + inf->name); } else { __do_kernel_fault(addr, esr, regs); } @@ -430,9 +425,9 @@ static bool is_el0_instruction_abort(unsigned int esr) static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, struct pt_regs *regs) { + const struct fault_info *inf; struct task_struct *tsk; struct mm_struct *mm; - struct siginfo si; vm_fault_t fault, major = 0; unsigned long vm_flags = VM_READ | VM_WRITE; unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; @@ -568,37 +563,35 @@ retry: return 0; } - clear_siginfo(&si); - si.si_addr = (void __user *)addr; - + inf = esr_to_fault_info(esr); + set_thread_esr(addr, esr); if (fault & VM_FAULT_SIGBUS) { /* * We had some memory, but were unable to successfully fix up * this page fault. */ - si.si_signo = SIGBUS; - si.si_code = BUS_ADRERR; - } else if (fault & VM_FAULT_HWPOISON_LARGE) { - unsigned int hindex = VM_FAULT_GET_HINDEX(fault); - - si.si_signo = SIGBUS; - si.si_code = BUS_MCEERR_AR; - si.si_addr_lsb = hstate_index_to_shift(hindex); - } else if (fault & VM_FAULT_HWPOISON) { - si.si_signo = SIGBUS; - si.si_code = BUS_MCEERR_AR; - si.si_addr_lsb = PAGE_SHIFT; + arm64_force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)addr, + inf->name); + } else if (fault & (VM_FAULT_HWPOISON_LARGE | VM_FAULT_HWPOISON)) { + unsigned int lsb; + + lsb = PAGE_SHIFT; + if (fault & VM_FAULT_HWPOISON_LARGE) + lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault)); + + arm64_force_sig_mceerr(BUS_MCEERR_AR, (void __user *)addr, lsb, + inf->name); } else { /* * Something tried to access memory that isn't in our memory * map. */ - si.si_signo = SIGSEGV; - si.si_code = fault == VM_FAULT_BADACCESS ? - SEGV_ACCERR : SEGV_MAPERR; + arm64_force_sig_fault(SIGSEGV, + fault == VM_FAULT_BADACCESS ? SEGV_ACCERR : SEGV_MAPERR, + (void __user *)addr, + inf->name); } - __do_user_fault(&si, esr); return 0; no_context: @@ -631,8 +624,8 @@ static int do_bad(unsigned long addr, unsigned int esr, struct pt_regs *regs) static int do_sea(unsigned long addr, unsigned int esr, struct pt_regs *regs) { - struct siginfo info; const struct fault_info *inf; + void __user *siaddr; inf = esr_to_fault_info(esr); @@ -651,15 +644,11 @@ static int do_sea(unsigned long addr, unsigned int esr, struct pt_regs *regs) nmi_exit(); } - clear_siginfo(&info); - info.si_signo = inf->sig; - info.si_errno = 0; - info.si_code = inf->code; if (esr & ESR_ELx_FnV) - info.si_addr = NULL; + siaddr = NULL; else - info.si_addr = (void __user *)addr; - arm64_notify_die(inf->name, regs, &info, esr); + siaddr = (void __user *)addr; + arm64_notify_die(inf->name, regs, inf->sig, inf->code, siaddr, esr); return 0; } @@ -740,7 +729,6 @@ asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs) { const struct fault_info *inf = esr_to_fault_info(esr); - struct siginfo info; if (!inf->fn(addr, esr, regs)) return; @@ -751,12 +739,8 @@ asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr, show_pte(addr); } - clear_siginfo(&info); - info.si_signo = inf->sig; - info.si_errno = 0; - info.si_code = inf->code; - info.si_addr = (void __user *)addr; - arm64_notify_die(inf->name, regs, &info, esr); + arm64_notify_die(inf->name, regs, + inf->sig, inf->code, (void __user *)addr, esr); } asmlinkage void __exception do_el0_irq_bp_hardening(void) @@ -786,20 +770,14 @@ asmlinkage void __exception do_sp_pc_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs) { - struct siginfo info; - if (user_mode(regs)) { if (instruction_pointer(regs) > TASK_SIZE) arm64_apply_bp_hardening(); local_daif_restore(DAIF_PROCCTX); } - clear_siginfo(&info); - info.si_signo = SIGBUS; - info.si_errno = 0; - info.si_code = BUS_ADRALN; - info.si_addr = (void __user *)addr; - arm64_notify_die("SP/PC alignment exception", regs, &info, esr); + arm64_notify_die("SP/PC alignment exception", regs, + SIGBUS, BUS_ADRALN, (void __user *)addr, esr); } int __init early_brk64(unsigned long addr, unsigned int esr, @@ -853,14 +831,8 @@ asmlinkage int __exception do_debug_exception(unsigned long addr, if (!inf->fn(addr, esr, regs)) { rv = 1; } else { - struct siginfo info; - - clear_siginfo(&info); - info.si_signo = inf->sig; - info.si_errno = 0; - info.si_code = inf->code; - info.si_addr = (void __user *)addr; - arm64_notify_die(inf->name, regs, &info, esr); + arm64_notify_die(inf->name, regs, + inf->sig, inf->code, (void __user *)addr, esr); rv = 0; } diff --git a/arch/c6x/Makefile b/arch/c6x/Makefile index 3fe8a948e94c..b7aa854f7008 100644 --- a/arch/c6x/Makefile +++ b/arch/c6x/Makefile @@ -40,9 +40,7 @@ boot := arch/$(ARCH)/boot DTB:=$(subst dtbImage.,,$(filter dtbImage.%, $(MAKECMDGOALS))) export DTB -ifneq ($(DTB),) core-y += $(boot)/dts/ -endif # With make 3.82 we cannot mix normal and wildcard targets diff --git a/arch/c6x/boot/dts/Makefile b/arch/c6x/boot/dts/Makefile index b212d278ebc4..f438285c3640 100644 --- a/arch/c6x/boot/dts/Makefile +++ b/arch/c6x/boot/dts/Makefile @@ -5,15 +5,12 @@ DTC_FLAGS ?= -p 1024 +dtb-$(CONFIG_SOC_TMS320C6455) += dsk6455.dtb +dtb-$(CONFIG_SOC_TMS320C6457) += evmc6457.dtb +dtb-$(CONFIG_SOC_TMS320C6472) += evmc6472.dtb +dtb-$(CONFIG_SOC_TMS320C6474) += evmc6474.dtb +dtb-$(CONFIG_SOC_TMS320C6678) += evmc6678.dtb + ifneq ($(DTB),) -obj-y += linked_dtb.o +obj-y += $(DTB).dtb.o endif - -quiet_cmd_cp = CP $< $@$2 - cmd_cp = cat $< >$@$2 || (rm -f $@ && echo false) - -# Generate builtin.dtb from $(DTB).dtb -$(obj)/builtin.dtb: $(obj)/$(DTB).dtb - $(call if_changed,cp) - -$(obj)/linked_dtb.o: $(obj)/builtin.dtb diff --git a/arch/c6x/boot/dts/linked_dtb.S b/arch/c6x/boot/dts/linked_dtb.S deleted file mode 100644 index cf347f1d16ce..000000000000 --- a/arch/c6x/boot/dts/linked_dtb.S +++ /dev/null @@ -1,2 +0,0 @@ -.section __fdt_blob,"a" -.incbin "arch/c6x/boot/dts/builtin.dtb" diff --git a/arch/c6x/include/asm/sections.h b/arch/c6x/include/asm/sections.h index d6c591ab5b7e..dc2f15eb3bde 100644 --- a/arch/c6x/include/asm/sections.h +++ b/arch/c6x/include/asm/sections.h @@ -8,6 +8,5 @@ extern char _vectors_start[]; extern char _vectors_end[]; extern char _data_lma[]; -extern char _fdt_start[], _fdt_end[]; #endif /* _ASM_C6X_SECTIONS_H */ diff --git a/arch/c6x/include/uapi/asm/unistd.h b/arch/c6x/include/uapi/asm/unistd.h index 0d2daf7f9809..6b2fe792de9d 100644 --- a/arch/c6x/include/uapi/asm/unistd.h +++ b/arch/c6x/include/uapi/asm/unistd.h @@ -16,6 +16,7 @@ */ #define __ARCH_WANT_RENAMEAT +#define __ARCH_WANT_STAT64 #define __ARCH_WANT_SYS_CLONE /* Use the standard ABI for syscalls. */ diff --git a/arch/c6x/kernel/setup.c b/arch/c6x/kernel/setup.c index 786e36e2f61d..05d96a9541b5 100644 --- a/arch/c6x/kernel/setup.c +++ b/arch/c6x/kernel/setup.c @@ -96,7 +96,7 @@ static void __init get_cpuinfo(void) unsigned long core_khz; u64 tmp; struct cpuinfo_c6x *p; - struct device_node *node, *np; + struct device_node *node; p = &per_cpu(cpu_data, smp_processor_id()); @@ -190,13 +190,8 @@ static void __init get_cpuinfo(void) p->core_id = get_coreid(); - node = of_find_node_by_name(NULL, "cpus"); - if (node) { - for_each_child_of_node(node, np) - if (!strcmp("cpu", np->name)) - ++c6x_num_cores; - of_node_put(node); - } + for_each_of_cpu_node(node) + ++c6x_num_cores; node = of_find_node_by_name(NULL, "soc"); if (node) { @@ -270,7 +265,7 @@ int __init c6x_add_memory(phys_addr_t start, unsigned long size) notrace void __init machine_init(unsigned long dt_ptr) { void *dtb = __va(dt_ptr); - void *fdt = _fdt_start; + void *fdt = __dtb_start; /* interrupts must be masked */ set_creg(IER, 2); @@ -363,7 +358,7 @@ void __init setup_arch(char **cmdline_p) memory_end >> PAGE_SHIFT); memblock_reserve(memory_start, bootmap_size); - unflatten_device_tree(); + unflatten_and_copy_device_tree(); c6x_cache_init(); diff --git a/arch/c6x/kernel/vmlinux.lds.S b/arch/c6x/kernel/vmlinux.lds.S index 1fba5b421eee..584bab2bace6 100644 --- a/arch/c6x/kernel/vmlinux.lds.S +++ b/arch/c6x/kernel/vmlinux.lds.S @@ -90,16 +90,6 @@ SECTIONS *(.switch) } - . = ALIGN (8) ; - __fdt_blob : AT(ADDR(__fdt_blob) - LOAD_OFFSET) - { - _fdt_start = . ; /* place for fdt blob */ - *(__fdt_blob) ; /* Any link-placed DTB */ - BYTE(0); /* section always has contents */ - . = _fdt_start + 0x4000; /* Pad up to 16kbyte */ - _fdt_end = . ; - } - _etext = .; /* diff --git a/arch/h8300/Makefile b/arch/h8300/Makefile index 58634e6bae92..4003ddc616e1 100644 --- a/arch/h8300/Makefile +++ b/arch/h8300/Makefile @@ -31,21 +31,12 @@ CROSS_COMPILE := h8300-unknown-linux- endif core-y += arch/$(ARCH)/kernel/ arch/$(ARCH)/mm/ -ifneq '$(CONFIG_H8300_BUILTIN_DTB)' '""' -core-y += arch/h8300/boot/dts/ -endif +core-y += arch/$(ARCH)/boot/dts/ libs-y += arch/$(ARCH)/lib/ boot := arch/h8300/boot -%.dtb %.dtb.S %.dtb.o: | scripts - $(Q)$(MAKE) $(build)=arch/h8300/boot/dts arch/h8300/boot/dts/$@ - -PHONY += dtbs -dtbs: scripts - $(Q)$(MAKE) $(build)=arch/h8300/boot/dts - archmrproper: archclean: diff --git a/arch/h8300/include/uapi/asm/unistd.h b/arch/h8300/include/uapi/asm/unistd.h index 7dd20ef7625a..628195823816 100644 --- a/arch/h8300/include/uapi/asm/unistd.h +++ b/arch/h8300/include/uapi/asm/unistd.h @@ -1,5 +1,6 @@ #define __ARCH_NOMMU #define __ARCH_WANT_RENAMEAT +#define __ARCH_WANT_STAT64 #include <asm-generic/unistd.h> diff --git a/arch/h8300/kernel/vmlinux.lds.S b/arch/h8300/kernel/vmlinux.lds.S index 35716a3048de..49f716c0a1df 100644 --- a/arch/h8300/kernel/vmlinux.lds.S +++ b/arch/h8300/kernel/vmlinux.lds.S @@ -56,7 +56,6 @@ SECTIONS __init_begin = .; INIT_TEXT_SECTION(4) INIT_DATA_SECTION(4) - SECURITY_INIT __init_end = .; _edata = . ; _begin_data = LOADADDR(.data); diff --git a/arch/hexagon/Kconfig b/arch/hexagon/Kconfig index 3ef46522e89f..7b25d7c8fa49 100644 --- a/arch/hexagon/Kconfig +++ b/arch/hexagon/Kconfig @@ -21,6 +21,9 @@ config HEXAGON select GENERIC_IRQ_SHOW select HAVE_ARCH_KGDB select HAVE_ARCH_TRACEHOOK + select HAVE_MEMBLOCK + select ARCH_DISCARD_MEMBLOCK + select NO_BOOTMEM select NEED_SG_DMA_LENGTH select NO_IOPORT_MAP select GENERIC_IOMAP diff --git a/arch/hexagon/include/uapi/asm/unistd.h b/arch/hexagon/include/uapi/asm/unistd.h index ea181e79162e..c91ca7d02461 100644 --- a/arch/hexagon/include/uapi/asm/unistd.h +++ b/arch/hexagon/include/uapi/asm/unistd.h @@ -29,6 +29,7 @@ #define sys_mmap2 sys_mmap_pgoff #define __ARCH_WANT_RENAMEAT +#define __ARCH_WANT_STAT64 #define __ARCH_WANT_SYS_EXECVE #define __ARCH_WANT_SYS_CLONE #define __ARCH_WANT_SYS_VFORK diff --git a/arch/hexagon/mm/init.c b/arch/hexagon/mm/init.c index 1495d45e472d..d789b9cc0189 100644 --- a/arch/hexagon/mm/init.c +++ b/arch/hexagon/mm/init.c @@ -21,6 +21,7 @@ #include <linux/init.h> #include <linux/mm.h> #include <linux/bootmem.h> +#include <linux/memblock.h> #include <asm/atomic.h> #include <linux/highmem.h> #include <asm/tlb.h> @@ -176,7 +177,6 @@ size_t hexagon_coherent_pool_size = (size_t) (DMA_RESERVE << 22); void __init setup_arch_memory(void) { - int bootmap_size; /* XXX Todo: this probably should be cleaned up */ u32 *segtable = (u32 *) &swapper_pg_dir[0]; u32 *segtable_end; @@ -195,18 +195,22 @@ void __init setup_arch_memory(void) bootmem_lastpg = PFN_DOWN((bootmem_lastpg << PAGE_SHIFT) & ~((BIG_KERNEL_PAGE_SIZE) - 1)); + memblock_add(PHYS_OFFSET, + (bootmem_lastpg - ARCH_PFN_OFFSET) << PAGE_SHIFT); + + /* Reserve kernel text/data/bss */ + memblock_reserve(PHYS_OFFSET, + (bootmem_startpg - ARCH_PFN_OFFSET) << PAGE_SHIFT); /* * Reserve the top DMA_RESERVE bytes of RAM for DMA (uncached) * memory allocation */ - max_low_pfn = bootmem_lastpg - PFN_DOWN(DMA_RESERVED_BYTES); min_low_pfn = ARCH_PFN_OFFSET; - bootmap_size = init_bootmem_node(NODE_DATA(0), bootmem_startpg, min_low_pfn, max_low_pfn); + memblock_reserve(PFN_PHYS(max_low_pfn), DMA_RESERVED_BYTES); printk(KERN_INFO "bootmem_startpg: 0x%08lx\n", bootmem_startpg); printk(KERN_INFO "bootmem_lastpg: 0x%08lx\n", bootmem_lastpg); - printk(KERN_INFO "bootmap_size: %d\n", bootmap_size); printk(KERN_INFO "min_low_pfn: 0x%08lx\n", min_low_pfn); printk(KERN_INFO "max_low_pfn: 0x%08lx\n", max_low_pfn); @@ -257,14 +261,6 @@ void __init setup_arch_memory(void) #endif /* - * Free all the memory that wasn't taken up by the bootmap, the DMA - * reserve, or kernel itself. - */ - free_bootmem(PFN_PHYS(bootmem_startpg) + bootmap_size, - PFN_PHYS(bootmem_lastpg - bootmem_startpg) - bootmap_size - - DMA_RESERVED_BYTES); - - /* * The bootmem allocator seemingly just lives to feed memory * to the paging system */ diff --git a/arch/ia64/hp/sim/simserial.c b/arch/ia64/hp/sim/simserial.c index 663388a73d4e..7aeb48a18576 100644 --- a/arch/ia64/hp/sim/simserial.c +++ b/arch/ia64/hp/sim/simserial.c @@ -297,29 +297,29 @@ static void rs_unthrottle(struct tty_struct * tty) printk(KERN_INFO "simrs_unthrottle called\n"); } +static int rs_setserial(struct tty_struct *tty, struct serial_struct *ss) +{ + return 0; +} + +static int rs_getserial(struct tty_struct *tty, struct serial_struct *ss) +{ + return 0; +} + static int rs_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { - if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) && - (cmd != TIOCSERCONFIG) && (cmd != TIOCSERGSTRUCT) && - (cmd != TIOCMIWAIT)) { + if ((cmd != TIOCSERCONFIG) && (cmd != TIOCMIWAIT)) { if (tty_io_error(tty)) return -EIO; } switch (cmd) { - case TIOCGSERIAL: - case TIOCSSERIAL: - case TIOCSERGSTRUCT: case TIOCMIWAIT: return 0; case TIOCSERCONFIG: case TIOCSERGETLSR: /* Get line status register */ return -EINVAL; - case TIOCSERGWILD: - case TIOCSERSWILD: - /* "setserial -W" is called in Debian boot */ - printk (KERN_INFO "TIOCSER?WILD ioctl obsolete, ignored.\n"); - return 0; } return -ENOIOCTLCMD; } @@ -448,6 +448,8 @@ static const struct tty_operations hp_ops = { .throttle = rs_throttle, .unthrottle = rs_unthrottle, .send_xchar = rs_send_xchar, + .set_serial = rs_setserial, + .get_serial = rs_getserial, .hangup = rs_hangup, .proc_show = rs_proc_show, }; diff --git a/arch/ia64/include/asm/hugetlb.h b/arch/ia64/include/asm/hugetlb.h index 74d2a5540aaf..36cc0396b214 100644 --- a/arch/ia64/include/asm/hugetlb.h +++ b/arch/ia64/include/asm/hugetlb.h @@ -3,13 +3,13 @@ #define _ASM_IA64_HUGETLB_H #include <asm/page.h> -#include <asm-generic/hugetlb.h> - +#define __HAVE_ARCH_HUGETLB_FREE_PGD_RANGE void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr, unsigned long end, unsigned long floor, unsigned long ceiling); +#define __HAVE_ARCH_PREPARE_HUGEPAGE_RANGE int prepare_hugepage_range(struct file *file, unsigned long addr, unsigned long len); @@ -21,53 +21,16 @@ static inline int is_hugepage_only_range(struct mm_struct *mm, REGION_NUMBER((addr)+(len)-1) == RGN_HPAGE); } -static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, - pte_t *ptep, pte_t pte) -{ - set_pte_at(mm, addr, ptep, pte); -} - -static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, - unsigned long addr, pte_t *ptep) -{ - return ptep_get_and_clear(mm, addr, ptep); -} - +#define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { } -static inline int huge_pte_none(pte_t pte) -{ - return pte_none(pte); -} - -static inline pte_t huge_pte_wrprotect(pte_t pte) -{ - return pte_wrprotect(pte); -} - -static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, - unsigned long addr, pte_t *ptep) -{ - ptep_set_wrprotect(mm, addr, ptep); -} - -static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, - unsigned long addr, pte_t *ptep, - pte_t pte, int dirty) -{ - return ptep_set_access_flags(vma, addr, ptep, pte, dirty); -} - -static inline pte_t huge_ptep_get(pte_t *ptep) -{ - return *ptep; -} - static inline void arch_clear_hugepage_flags(struct page *page) { } +#include <asm-generic/hugetlb.h> + #endif /* _ASM_IA64_HUGETLB_H */ diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h index 165827774bea..b1e7468eb65a 100644 --- a/arch/ia64/include/asm/pgtable.h +++ b/arch/ia64/include/asm/pgtable.h @@ -544,7 +544,6 @@ extern struct page *zero_page_memmap_ptr; # ifdef CONFIG_VIRTUAL_MEM_MAP /* arch mem_map init routine is needed due to holes in a virtual mem_map */ -# define __HAVE_ARCH_MEMMAP_INIT extern void memmap_init (unsigned long size, int nid, unsigned long zone, unsigned long start_pfn); # endif /* CONFIG_VIRTUAL_MEM_MAP */ diff --git a/arch/ia64/include/asm/unistd.h b/arch/ia64/include/asm/unistd.h index ffb705dc9c13..49e34db2529c 100644 --- a/arch/ia64/include/asm/unistd.h +++ b/arch/ia64/include/asm/unistd.h @@ -28,6 +28,9 @@ #define __IGNORE_vfork /* clone() */ #define __IGNORE_umount2 /* umount() */ +#define __ARCH_WANT_NEW_STAT +#define __ARCH_WANT_SYS_UTIME + #if !defined(__ASSEMBLY__) && !defined(ASSEMBLER) #include <linux/types.h> diff --git a/arch/ia64/include/uapi/asm/siginfo.h b/arch/ia64/include/uapi/asm/siginfo.h index 52b5af424511..796af1ccaa7e 100644 --- a/arch/ia64/include/uapi/asm/siginfo.h +++ b/arch/ia64/include/uapi/asm/siginfo.h @@ -9,8 +9,6 @@ #define _UAPI_ASM_IA64_SIGINFO_H -#define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int)) - #include <asm-generic/siginfo.h> #define si_imm _sifields._sigfault._imm /* as per UNIX SysV ABI spec */ diff --git a/arch/ia64/kernel/brl_emu.c b/arch/ia64/kernel/brl_emu.c index a61f6c6a36f8..c0239bf77a09 100644 --- a/arch/ia64/kernel/brl_emu.c +++ b/arch/ia64/kernel/brl_emu.c @@ -58,11 +58,9 @@ ia64_emulate_brl (struct pt_regs *regs, unsigned long ar_ec) unsigned long bundle[2]; unsigned long opcode, btype, qp, offset, cpl; unsigned long next_ip; - struct siginfo siginfo; struct illegal_op_return rv; long tmp_taken, unimplemented_address; - clear_siginfo(&siginfo); rv.fkt = (unsigned long) -1; /* @@ -198,39 +196,22 @@ ia64_emulate_brl (struct pt_regs *regs, unsigned long ar_ec) * The target address contains unimplemented bits. */ printk(KERN_DEBUG "Woah! Unimplemented Instruction Address Trap!\n"); - siginfo.si_signo = SIGILL; - siginfo.si_errno = 0; - siginfo.si_flags = 0; - siginfo.si_isr = 0; - siginfo.si_imm = 0; - siginfo.si_code = ILL_BADIADDR; - force_sig_info(SIGILL, &siginfo, current); + force_sig_fault(SIGILL, ILL_BADIADDR, (void __user *)NULL, + 0, 0, 0, current); } else if (ia64_psr(regs)->tb) { /* * Branch Tracing is enabled. * Force a taken branch signal. */ - siginfo.si_signo = SIGTRAP; - siginfo.si_errno = 0; - siginfo.si_code = TRAP_BRANCH; - siginfo.si_flags = 0; - siginfo.si_isr = 0; - siginfo.si_addr = 0; - siginfo.si_imm = 0; - force_sig_info(SIGTRAP, &siginfo, current); + force_sig_fault(SIGTRAP, TRAP_BRANCH, (void __user *)NULL, + 0, 0, 0, current); } else if (ia64_psr(regs)->ss) { /* * Single Step is enabled. * Force a trace signal. */ - siginfo.si_signo = SIGTRAP; - siginfo.si_errno = 0; - siginfo.si_code = TRAP_TRACE; - siginfo.si_flags = 0; - siginfo.si_isr = 0; - siginfo.si_addr = 0; - siginfo.si_imm = 0; - force_sig_info(SIGTRAP, &siginfo, current); + force_sig_fault(SIGTRAP, TRAP_TRACE, (void __user *)NULL, + 0, 0, 0, current); } return rv; } diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c index d1234a5ba4c5..9a960829a01d 100644 --- a/arch/ia64/kernel/signal.c +++ b/arch/ia64/kernel/signal.c @@ -110,7 +110,6 @@ ia64_rt_sigreturn (struct sigscratch *scr) { extern char ia64_strace_leave_kernel, ia64_leave_kernel; struct sigcontext __user *sc; - struct siginfo si; sigset_t set; long retval; @@ -153,14 +152,7 @@ ia64_rt_sigreturn (struct sigscratch *scr) return retval; give_sigsegv: - clear_siginfo(&si); - si.si_signo = SIGSEGV; - si.si_errno = 0; - si.si_code = SI_KERNEL; - si.si_pid = task_pid_vnr(current); - si.si_uid = from_kuid_munged(current_user_ns(), current_uid()); - si.si_addr = sc; - force_sig_info(SIGSEGV, &si, current); + force_sig(SIGSEGV, current); return retval; } @@ -232,37 +224,6 @@ rbs_on_sig_stack (unsigned long bsp) } static long -force_sigsegv_info (int sig, void __user *addr) -{ - unsigned long flags; - struct siginfo si; - - clear_siginfo(&si); - if (sig == SIGSEGV) { - /* - * Acquiring siglock around the sa_handler-update is almost - * certainly overkill, but this isn't a - * performance-critical path and I'd rather play it safe - * here than having to debug a nasty race if and when - * something changes in kernel/signal.c that would make it - * no longer safe to modify sa_handler without holding the - * lock. - */ - spin_lock_irqsave(¤t->sighand->siglock, flags); - current->sighand->action[sig - 1].sa.sa_handler = SIG_DFL; - spin_unlock_irqrestore(¤t->sighand->siglock, flags); - } - si.si_signo = SIGSEGV; - si.si_errno = 0; - si.si_code = SI_KERNEL; - si.si_pid = task_pid_vnr(current); - si.si_uid = from_kuid_munged(current_user_ns(), current_uid()); - si.si_addr = addr; - force_sig_info(SIGSEGV, &si, current); - return 1; -} - -static long setup_frame(struct ksignal *ksig, sigset_t *set, struct sigscratch *scr) { extern char __kernel_sigtramp[]; @@ -295,15 +256,18 @@ setup_frame(struct ksignal *ksig, sigset_t *set, struct sigscratch *scr) * instead so we will die with SIGSEGV. */ check_sp = (new_sp - sizeof(*frame)) & -STACK_ALIGN; - if (!likely(on_sig_stack(check_sp))) - return force_sigsegv_info(ksig->sig, (void __user *) - check_sp); + if (!likely(on_sig_stack(check_sp))) { + force_sigsegv(ksig->sig, current); + return 1; + } } } frame = (void __user *) ((new_sp - sizeof(*frame)) & -STACK_ALIGN); - if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) - return force_sigsegv_info(ksig->sig, frame); + if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) { + force_sigsegv(ksig->sig, current); + return 1; + } err = __put_user(ksig->sig, &frame->arg0); err |= __put_user(&frame->info, &frame->arg1); @@ -317,8 +281,10 @@ setup_frame(struct ksignal *ksig, sigset_t *set, struct sigscratch *scr) err |= __save_altstack(&frame->sc.sc_stack, scr->pt.r12); err |= setup_sigcontext(&frame->sc, set, scr); - if (unlikely(err)) - return force_sigsegv_info(ksig->sig, frame); + if (unlikely(err)) { + force_sigsegv(ksig->sig, current); + return 1; + } scr->pt.r12 = (unsigned long) frame - 16; /* new stack pointer */ scr->pt.ar_fpsr = FPSR_DEFAULT; /* reset fpsr for signal handler */ diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c index c6f4932073a1..85d8616ac4f6 100644 --- a/arch/ia64/kernel/traps.c +++ b/arch/ia64/kernel/traps.c @@ -100,16 +100,8 @@ die_if_kernel (char *str, struct pt_regs *regs, long err) void __kprobes ia64_bad_break (unsigned long break_num, struct pt_regs *regs) { - siginfo_t siginfo; int sig, code; - /* SIGILL, SIGFPE, SIGSEGV, and SIGBUS want these field initialized: */ - clear_siginfo(&siginfo); - siginfo.si_addr = (void __user *) (regs->cr_iip + ia64_psr(regs)->ri); - siginfo.si_imm = break_num; - siginfo.si_flags = 0; /* clear __ISR_VALID */ - siginfo.si_isr = 0; - switch (break_num) { case 0: /* unknown error (used by GCC for __builtin_abort()) */ if (notify_die(DIE_BREAK, "break 0", regs, break_num, TRAP_BRKPT, SIGTRAP) @@ -182,10 +174,9 @@ __kprobes ia64_bad_break (unsigned long break_num, struct pt_regs *regs) sig = SIGTRAP; code = TRAP_BRKPT; } } - siginfo.si_signo = sig; - siginfo.si_errno = 0; - siginfo.si_code = code; - force_sig_info(sig, &siginfo, current); + force_sig_fault(sig, code, + (void __user *) (regs->cr_iip + ia64_psr(regs)->ri), + break_num, 0 /* clear __ISR_VALID */, 0, current); } /* @@ -344,30 +335,25 @@ handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr) printk(KERN_ERR "handle_fpu_swa: fp_emulate() returned -1\n"); return -1; } else { - struct siginfo siginfo; - /* is next instruction a trap? */ + int si_code; + if (exception & 2) { ia64_increment_ip(regs); } - clear_siginfo(&siginfo); - siginfo.si_signo = SIGFPE; - siginfo.si_errno = 0; - siginfo.si_code = FPE_FLTUNK; /* default code */ - siginfo.si_addr = (void __user *) (regs->cr_iip + ia64_psr(regs)->ri); + si_code = FPE_FLTUNK; /* default code */ if (isr & 0x11) { - siginfo.si_code = FPE_FLTINV; + si_code = FPE_FLTINV; } else if (isr & 0x22) { /* denormal operand gets the same si_code as underflow * see arch/i386/kernel/traps.c:math_error() */ - siginfo.si_code = FPE_FLTUND; + si_code = FPE_FLTUND; } else if (isr & 0x44) { - siginfo.si_code = FPE_FLTDIV; + si_code = FPE_FLTDIV; } - siginfo.si_isr = isr; - siginfo.si_flags = __ISR_VALID; - siginfo.si_imm = 0; - force_sig_info(SIGFPE, &siginfo, current); + force_sig_fault(SIGFPE, si_code, + (void __user *) (regs->cr_iip + ia64_psr(regs)->ri), + 0, __ISR_VALID, isr, current); } } else { if (exception == -1) { @@ -375,24 +361,19 @@ handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr) return -1; } else if (exception != 0) { /* raise exception */ - struct siginfo siginfo; + int si_code; - clear_siginfo(&siginfo); - siginfo.si_signo = SIGFPE; - siginfo.si_errno = 0; - siginfo.si_code = FPE_FLTUNK; /* default code */ - siginfo.si_addr = (void __user *) (regs->cr_iip + ia64_psr(regs)->ri); + si_code = FPE_FLTUNK; /* default code */ if (isr & 0x880) { - siginfo.si_code = FPE_FLTOVF; + si_code = FPE_FLTOVF; } else if (isr & 0x1100) { - siginfo.si_code = FPE_FLTUND; + si_code = FPE_FLTUND; } else if (isr & 0x2200) { - siginfo.si_code = FPE_FLTRES; + si_code = FPE_FLTRES; } - siginfo.si_isr = isr; - siginfo.si_flags = __ISR_VALID; - siginfo.si_imm = 0; - force_sig_info(SIGFPE, &siginfo, current); + force_sig_fault(SIGFPE, si_code, + (void __user *) (regs->cr_iip + ia64_psr(regs)->ri), + 0, __ISR_VALID, isr, current); } } return 0; @@ -408,7 +389,6 @@ ia64_illegal_op_fault (unsigned long ec, long arg1, long arg2, long arg3, struct pt_regs regs) { struct illegal_op_return rv; - struct siginfo si; char buf[128]; #ifdef CONFIG_IA64_BRL_EMU @@ -426,11 +406,9 @@ ia64_illegal_op_fault (unsigned long ec, long arg1, long arg2, long arg3, if (die_if_kernel(buf, ®s, 0)) return rv; - clear_siginfo(&si); - si.si_signo = SIGILL; - si.si_code = ILL_ILLOPC; - si.si_addr = (void __user *) (regs.cr_iip + ia64_psr(®s)->ri); - force_sig_info(SIGILL, &si, current); + force_sig_fault(SIGILL, ILL_ILLOPC, + (void __user *) (regs.cr_iip + ia64_psr(®s)->ri), + 0, 0, 0, current); return rv; } @@ -441,7 +419,7 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa, { unsigned long code, error = isr, iip; char buf[128]; - int result, sig; + int result, sig, si_code; static const char *reason[] = { "IA-64 Illegal Operation fault", "IA-64 Privileged Operation fault", @@ -490,7 +468,6 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa, case 26: /* NaT Consumption */ if (user_mode(®s)) { - struct siginfo siginfo; void __user *addr; if (((isr >> 4) & 0xf) == 2) { @@ -505,15 +482,8 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa, addr = (void __user *) (regs.cr_iip + ia64_psr(®s)->ri); } - clear_siginfo(&siginfo); - siginfo.si_signo = sig; - siginfo.si_code = code; - siginfo.si_errno = 0; - siginfo.si_addr = addr; - siginfo.si_imm = vector; - siginfo.si_flags = __ISR_VALID; - siginfo.si_isr = isr; - force_sig_info(sig, &siginfo, current); + force_sig_fault(sig, code, addr, + vector, __ISR_VALID, isr, current); return; } else if (ia64_done_with_exception(®s)) return; @@ -522,17 +492,8 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa, case 31: /* Unsupported Data Reference */ if (user_mode(®s)) { - struct siginfo siginfo; - - clear_siginfo(&siginfo); - siginfo.si_signo = SIGILL; - siginfo.si_code = ILL_ILLOPN; - siginfo.si_errno = 0; - siginfo.si_addr = (void __user *) iip; - siginfo.si_imm = vector; - siginfo.si_flags = __ISR_VALID; - siginfo.si_isr = isr; - force_sig_info(SIGILL, &siginfo, current); + force_sig_fault(SIGILL, ILL_ILLOPN, (void __user *) iip, + vector, __ISR_VALID, isr, current); return; } sprintf(buf, "Unsupported data reference"); @@ -541,10 +502,6 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa, case 29: /* Debug */ case 35: /* Taken Branch Trap */ case 36: /* Single Step Trap */ - { - struct siginfo siginfo; - - clear_siginfo(&siginfo); if (fsys_mode(current, ®s)) { extern char __kernel_syscall_via_break[]; /* @@ -568,7 +525,7 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa, switch (vector) { default: case 29: - siginfo.si_code = TRAP_HWBKPT; + si_code = TRAP_HWBKPT; #ifdef CONFIG_ITANIUM /* * Erratum 10 (IFA may contain incorrect address) now has @@ -578,37 +535,22 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa, ifa = regs.cr_iip; #endif break; - case 35: siginfo.si_code = TRAP_BRANCH; ifa = 0; break; - case 36: siginfo.si_code = TRAP_TRACE; ifa = 0; break; + case 35: si_code = TRAP_BRANCH; ifa = 0; break; + case 36: si_code = TRAP_TRACE; ifa = 0; break; } - if (notify_die(DIE_FAULT, "ia64_fault", ®s, vector, siginfo.si_code, SIGTRAP) + if (notify_die(DIE_FAULT, "ia64_fault", ®s, vector, si_code, SIGTRAP) == NOTIFY_STOP) return; - siginfo.si_signo = SIGTRAP; - siginfo.si_errno = 0; - siginfo.si_addr = (void __user *) ifa; - siginfo.si_imm = 0; - siginfo.si_flags = __ISR_VALID; - siginfo.si_isr = isr; - force_sig_info(SIGTRAP, &siginfo, current); + force_sig_fault(SIGTRAP, si_code, (void __user *) ifa, + 0, __ISR_VALID, isr, current); return; - } case 32: /* fp fault */ case 33: /* fp trap */ result = handle_fpu_swa((vector == 32) ? 1 : 0, ®s, isr); if ((result < 0) || (current->thread.flags & IA64_THREAD_FPEMU_SIGFPE)) { - struct siginfo siginfo; - - clear_siginfo(&siginfo); - siginfo.si_signo = SIGFPE; - siginfo.si_errno = 0; - siginfo.si_code = FPE_FLTINV; - siginfo.si_addr = (void __user *) iip; - siginfo.si_flags = __ISR_VALID; - siginfo.si_isr = isr; - siginfo.si_imm = 0; - force_sig_info(SIGFPE, &siginfo, current); + force_sig_fault(SIGFPE, FPE_FLTINV, (void __user *) iip, + 0, __ISR_VALID, isr, current); } return; @@ -634,17 +576,9 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa, } else { /* Unimplemented Instr. Address Trap */ if (user_mode(®s)) { - struct siginfo siginfo; - - clear_siginfo(&siginfo); - siginfo.si_signo = SIGILL; - siginfo.si_code = ILL_BADIADDR; - siginfo.si_errno = 0; - siginfo.si_flags = 0; - siginfo.si_isr = 0; - siginfo.si_imm = 0; - siginfo.si_addr = (void __user *) iip; - force_sig_info(SIGILL, &siginfo, current); + force_sig_fault(SIGILL, ILL_BADIADDR, + (void __user *) iip, + 0, 0, 0, current); return; } sprintf(buf, "Unimplemented Instruction Address fault"); diff --git a/arch/ia64/kernel/unaligned.c b/arch/ia64/kernel/unaligned.c index e309f9859acc..a167a3824b35 100644 --- a/arch/ia64/kernel/unaligned.c +++ b/arch/ia64/kernel/unaligned.c @@ -1298,7 +1298,6 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs) mm_segment_t old_fs = get_fs(); unsigned long bundle[2]; unsigned long opcode; - struct siginfo si; const struct exception_table_entry *eh = NULL; union { unsigned long l; @@ -1537,14 +1536,7 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs) /* NOT_REACHED */ } force_sigbus: - clear_siginfo(&si); - si.si_signo = SIGBUS; - si.si_errno = 0; - si.si_code = BUS_ADRALN; - si.si_addr = (void __user *) ifa; - si.si_flags = 0; - si.si_isr = 0; - si.si_imm = 0; - force_sig_info(SIGBUS, &si, current); + force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *) ifa, + 0, 0, 0, current); goto done; } diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c index a9d55ad8d67b..5baeb022f474 100644 --- a/arch/ia64/mm/fault.c +++ b/arch/ia64/mm/fault.c @@ -248,16 +248,8 @@ retry: return; } if (user_mode(regs)) { - struct siginfo si; - - clear_siginfo(&si); - si.si_signo = signal; - si.si_errno = 0; - si.si_code = code; - si.si_addr = (void __user *) address; - si.si_isr = isr; - si.si_flags = __ISR_VALID; - force_sig_info(signal, &si, current); + force_sig_fault(signal, code, (void __user *) address, + 0, __ISR_VALID, isr, current); return; } diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig index 1d5483f6e457..85904b73e261 100644 --- a/arch/m68k/configs/amiga_defconfig +++ b/arch/m68k/configs/amiga_defconfig @@ -621,7 +621,6 @@ CONFIG_CRYPTO_ECDH=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m CONFIG_CRYPTO_CRYPTD=m -CONFIG_CRYPTO_MCRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_AEGIS128=m @@ -657,7 +656,6 @@ CONFIG_CRYPTO_SALSA20=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_SM4=m -CONFIG_CRYPTO_SPECK=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_LZO=m diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig index 52a0af127951..9b3818bbb68b 100644 --- a/arch/m68k/configs/apollo_defconfig +++ b/arch/m68k/configs/apollo_defconfig @@ -578,7 +578,6 @@ CONFIG_CRYPTO_ECDH=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m CONFIG_CRYPTO_CRYPTD=m -CONFIG_CRYPTO_MCRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_AEGIS128=m @@ -614,7 +613,6 @@ CONFIG_CRYPTO_SALSA20=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_SM4=m -CONFIG_CRYPTO_SPECK=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_LZO=m diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig index b3103e51268a..769677809945 100644 --- a/arch/m68k/configs/atari_defconfig +++ b/arch/m68k/configs/atari_defconfig @@ -599,7 +599,6 @@ CONFIG_CRYPTO_ECDH=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m CONFIG_CRYPTO_CRYPTD=m -CONFIG_CRYPTO_MCRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_AEGIS128=m @@ -635,7 +634,6 @@ CONFIG_CRYPTO_SALSA20=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_SM4=m -CONFIG_CRYPTO_SPECK=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_LZO=m diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig index fb7d651a4cab..7dd264ddf2ea 100644 --- a/arch/m68k/configs/bvme6000_defconfig +++ b/arch/m68k/configs/bvme6000_defconfig @@ -570,7 +570,6 @@ CONFIG_CRYPTO_ECDH=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m CONFIG_CRYPTO_CRYPTD=m -CONFIG_CRYPTO_MCRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_AEGIS128=m @@ -606,7 +605,6 @@ CONFIG_CRYPTO_SALSA20=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_SM4=m -CONFIG_CRYPTO_SPECK=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_LZO=m diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig index 6b37f5537c39..515f7439c755 100644 --- a/arch/m68k/configs/hp300_defconfig +++ b/arch/m68k/configs/hp300_defconfig @@ -580,7 +580,6 @@ CONFIG_CRYPTO_ECDH=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m CONFIG_CRYPTO_CRYPTD=m -CONFIG_CRYPTO_MCRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_AEGIS128=m @@ -616,7 +615,6 @@ CONFIG_CRYPTO_SALSA20=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_SM4=m -CONFIG_CRYPTO_SPECK=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_LZO=m diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig index c717bf879449..8e1038ceb407 100644 --- a/arch/m68k/configs/mac_defconfig +++ b/arch/m68k/configs/mac_defconfig @@ -602,7 +602,6 @@ CONFIG_CRYPTO_ECDH=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m CONFIG_CRYPTO_CRYPTD=m -CONFIG_CRYPTO_MCRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_AEGIS128=m @@ -638,7 +637,6 @@ CONFIG_CRYPTO_SALSA20=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_SM4=m -CONFIG_CRYPTO_SPECK=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_LZO=m diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig index 226c994ce794..62c8aaa15cc7 100644 --- a/arch/m68k/configs/multi_defconfig +++ b/arch/m68k/configs/multi_defconfig @@ -684,7 +684,6 @@ CONFIG_CRYPTO_ECDH=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m CONFIG_CRYPTO_CRYPTD=m -CONFIG_CRYPTO_MCRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_AEGIS128=m @@ -720,7 +719,6 @@ CONFIG_CRYPTO_SALSA20=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_SM4=m -CONFIG_CRYPTO_SPECK=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_LZO=m diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig index b383327fd77a..733973f91297 100644 --- a/arch/m68k/configs/mvme147_defconfig +++ b/arch/m68k/configs/mvme147_defconfig @@ -570,7 +570,6 @@ CONFIG_CRYPTO_ECDH=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m CONFIG_CRYPTO_CRYPTD=m -CONFIG_CRYPTO_MCRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_AEGIS128=m @@ -606,7 +605,6 @@ CONFIG_CRYPTO_SALSA20=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_SM4=m -CONFIG_CRYPTO_SPECK=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_LZO=m diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig index 9783d3deb9e9..fee30cc9ac16 100644 --- a/arch/m68k/configs/mvme16x_defconfig +++ b/arch/m68k/configs/mvme16x_defconfig @@ -570,7 +570,6 @@ CONFIG_CRYPTO_ECDH=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m CONFIG_CRYPTO_CRYPTD=m -CONFIG_CRYPTO_MCRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_AEGIS128=m @@ -606,7 +605,6 @@ CONFIG_CRYPTO_SALSA20=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_SM4=m -CONFIG_CRYPTO_SPECK=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_LZO=m diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig index a35d10ee10cb..eebf9c9088e7 100644 --- a/arch/m68k/configs/q40_defconfig +++ b/arch/m68k/configs/q40_defconfig @@ -593,7 +593,6 @@ CONFIG_CRYPTO_ECDH=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m CONFIG_CRYPTO_CRYPTD=m -CONFIG_CRYPTO_MCRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_AEGIS128=m @@ -629,7 +628,6 @@ CONFIG_CRYPTO_SALSA20=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_SM4=m -CONFIG_CRYPTO_SPECK=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_LZO=m diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig index 573bf922d448..dabc54318c09 100644 --- a/arch/m68k/configs/sun3_defconfig +++ b/arch/m68k/configs/sun3_defconfig @@ -571,7 +571,6 @@ CONFIG_CRYPTO_ECDH=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m CONFIG_CRYPTO_CRYPTD=m -CONFIG_CRYPTO_MCRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_AEGIS128=m @@ -607,7 +606,6 @@ CONFIG_CRYPTO_SALSA20=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_SM4=m -CONFIG_CRYPTO_SPECK=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_LZO=m diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig index efb27a7fcc55..0d9a5c2a311a 100644 --- a/arch/m68k/configs/sun3x_defconfig +++ b/arch/m68k/configs/sun3x_defconfig @@ -572,7 +572,6 @@ CONFIG_CRYPTO_ECDH=m CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_USER=m CONFIG_CRYPTO_CRYPTD=m -CONFIG_CRYPTO_MCRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_AEGIS128=m @@ -608,7 +607,6 @@ CONFIG_CRYPTO_SALSA20=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_SM4=m -CONFIG_CRYPTO_SPECK=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_LZO=m diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h index 30d0d3fbd4ef..e680031bda7b 100644 --- a/arch/m68k/include/asm/unistd.h +++ b/arch/m68k/include/asm/unistd.h @@ -7,6 +7,7 @@ #define NR_syscalls 380 +#define __ARCH_WANT_NEW_STAT #define __ARCH_WANT_OLD_READDIR #define __ARCH_WANT_OLD_STAT #define __ARCH_WANT_STAT64 @@ -21,7 +22,6 @@ #define __ARCH_WANT_SYS_SOCKETCALL #define __ARCH_WANT_SYS_FADVISE64 #define __ARCH_WANT_SYS_GETPGRP -#define __ARCH_WANT_SYS_LLSEEK #define __ARCH_WANT_SYS_NICE #define __ARCH_WANT_SYS_OLD_GETRLIMIT #define __ARCH_WANT_SYS_OLD_MMAP diff --git a/arch/m68k/mac/misc.c b/arch/m68k/mac/misc.c index 1b083c500b9a..ebb3b6d169ea 100644 --- a/arch/m68k/mac/misc.c +++ b/arch/m68k/mac/misc.c @@ -37,35 +37,6 @@ static void (*rom_reset)(void); #ifdef CONFIG_ADB_CUDA -static time64_t cuda_read_time(void) -{ - struct adb_request req; - time64_t time; - - if (cuda_request(&req, NULL, 2, CUDA_PACKET, CUDA_GET_TIME) < 0) - return 0; - while (!req.complete) - cuda_poll(); - - time = (u32)((req.reply[3] << 24) | (req.reply[4] << 16) | - (req.reply[5] << 8) | req.reply[6]); - - return time - RTC_OFFSET; -} - -static void cuda_write_time(time64_t time) -{ - struct adb_request req; - u32 data = lower_32_bits(time + RTC_OFFSET); - - if (cuda_request(&req, NULL, 6, CUDA_PACKET, CUDA_SET_TIME, - (data >> 24) & 0xFF, (data >> 16) & 0xFF, - (data >> 8) & 0xFF, data & 0xFF) < 0) - return; - while (!req.complete) - cuda_poll(); -} - static __u8 cuda_read_pram(int offset) { struct adb_request req; @@ -91,33 +62,6 @@ static void cuda_write_pram(int offset, __u8 data) #endif /* CONFIG_ADB_CUDA */ #ifdef CONFIG_ADB_PMU -static time64_t pmu_read_time(void) -{ - struct adb_request req; - time64_t time; - - if (pmu_request(&req, NULL, 1, PMU_READ_RTC) < 0) - return 0; - pmu_wait_complete(&req); - - time = (u32)((req.reply[0] << 24) | (req.reply[1] << 16) | - (req.reply[2] << 8) | req.reply[3]); - - return time - RTC_OFFSET; -} - -static void pmu_write_time(time64_t time) -{ - struct adb_request req; - u32 data = lower_32_bits(time + RTC_OFFSET); - - if (pmu_request(&req, NULL, 5, PMU_SET_RTC, - (data >> 24) & 0xFF, (data >> 16) & 0xFF, - (data >> 8) & 0xFF, data & 0xFF) < 0) - return; - pmu_wait_complete(&req); -} - static __u8 pmu_read_pram(int offset) { struct adb_request req; @@ -295,13 +239,17 @@ static time64_t via_read_time(void) * is basically any machine with Mac II-style ADB. */ -static void via_write_time(time64_t time) +static void via_set_rtc_time(struct rtc_time *tm) { union { __u8 cdata[4]; __u32 idata; } data; __u8 temp; + time64_t time; + + time = mktime64(tm->tm_year + 1900, tm->tm_mon + 1, tm->tm_mday, + tm->tm_hour, tm->tm_min, tm->tm_sec); /* Clear the write protect bit */ @@ -641,12 +589,12 @@ int mac_hwclk(int op, struct rtc_time *t) #ifdef CONFIG_ADB_CUDA case MAC_ADB_EGRET: case MAC_ADB_CUDA: - now = cuda_read_time(); + now = cuda_get_time(); break; #endif #ifdef CONFIG_ADB_PMU case MAC_ADB_PB2: - now = pmu_read_time(); + now = pmu_get_time(); break; #endif default: @@ -665,24 +613,21 @@ int mac_hwclk(int op, struct rtc_time *t) __func__, t->tm_year + 1900, t->tm_mon + 1, t->tm_mday, t->tm_hour, t->tm_min, t->tm_sec); - now = mktime64(t->tm_year + 1900, t->tm_mon + 1, t->tm_mday, - t->tm_hour, t->tm_min, t->tm_sec); - switch (macintosh_config->adb_type) { case MAC_ADB_IOP: case MAC_ADB_II: case MAC_ADB_PB1: - via_write_time(now); + via_set_rtc_time(t); break; #ifdef CONFIG_ADB_CUDA case MAC_ADB_EGRET: case MAC_ADB_CUDA: - cuda_write_time(now); + cuda_set_rtc_time(t); break; #endif #ifdef CONFIG_ADB_PMU case MAC_ADB_PB2: - pmu_write_time(now); + pmu_set_rtc_time(t); break; #endif default: diff --git a/arch/microblaze/Makefile b/arch/microblaze/Makefile index 4f3ab5707265..0823d291fbeb 100644 --- a/arch/microblaze/Makefile +++ b/arch/microblaze/Makefile @@ -65,9 +65,7 @@ boot := arch/microblaze/boot # Are we making a simpleImage.<boardname> target? If so, crack out the boardname DTB:=$(subst simpleImage.,,$(filter simpleImage.%, $(MAKECMDGOALS))) -ifneq ($(DTB),) - core-y += $(boot)/dts/ -endif +core-y += $(boot)/dts/ # defines filename extension depending memory management type ifeq ($(CONFIG_MMU),) diff --git a/arch/microblaze/boot/dts/Makefile b/arch/microblaze/boot/dts/Makefile index 1f77913d404d..c7324e74f9ef 100644 --- a/arch/microblaze/boot/dts/Makefile +++ b/arch/microblaze/boot/dts/Makefile @@ -1,6 +1,9 @@ # SPDX-License-Identifier: GPL-2.0 # +dtb-y := system.dtb + +ifneq ($(DTB),) obj-y += linked_dtb.o # Ensure system.dtb exists @@ -11,6 +14,7 @@ ifneq ($(DTB),system) $(obj)/system.dtb: $(obj)/$(DTB).dtb $(call if_changed,cp) endif +endif quiet_cmd_cp = CP $< $@$2 cmd_cp = cat $< >$@$2 || (rm -f $@ && echo false) diff --git a/arch/microblaze/include/asm/unistd.h b/arch/microblaze/include/asm/unistd.h index a62d09420a47..f42c40f5001b 100644 --- a/arch/microblaze/include/asm/unistd.h +++ b/arch/microblaze/include/asm/unistd.h @@ -15,6 +15,7 @@ /* #define __ARCH_WANT_OLD_READDIR */ /* #define __ARCH_WANT_OLD_STAT */ +#define __ARCH_WANT_NEW_STAT #define __ARCH_WANT_STAT64 #define __ARCH_WANT_SYS_ALARM #define __ARCH_WANT_SYS_GETHOSTNAME @@ -26,7 +27,6 @@ #define __ARCH_WANT_SYS_SOCKETCALL #define __ARCH_WANT_SYS_FADVISE64 #define __ARCH_WANT_SYS_GETPGRP -#define __ARCH_WANT_SYS_LLSEEK #define __ARCH_WANT_SYS_NICE /* #define __ARCH_WANT_SYS_OLD_GETRLIMIT */ #define __ARCH_WANT_SYS_OLDUMOUNT diff --git a/arch/microblaze/kernel/cpu/cpuinfo.c b/arch/microblaze/kernel/cpu/cpuinfo.c index 96b3f26d16be..ef2f49471a2a 100644 --- a/arch/microblaze/kernel/cpu/cpuinfo.c +++ b/arch/microblaze/kernel/cpu/cpuinfo.c @@ -89,9 +89,9 @@ static struct device_node *cpu; void __init setup_cpuinfo(void) { - cpu = (struct device_node *) of_find_node_by_type(NULL, "cpu"); + cpu = of_get_cpu_node(0, NULL); if (!cpu) - pr_err("You don't have cpu!!!\n"); + pr_err("You don't have cpu or are missing cpu reg property!!!\n"); pr_info("%s: initialising\n", __func__); @@ -117,6 +117,8 @@ void __init setup_cpuinfo(void) if (cpuinfo.mmu_privins) pr_warn("%s: Stream instructions enabled" " - USERSPACE CAN LOCK THIS KERNEL!\n", __func__); + + of_node_put(cpu); } void __init setup_cpuinfo_clk(void) diff --git a/arch/microblaze/kernel/vmlinux.lds.S b/arch/microblaze/kernel/vmlinux.lds.S index 289d0e7f3e3a..e1f3e8741292 100644 --- a/arch/microblaze/kernel/vmlinux.lds.S +++ b/arch/microblaze/kernel/vmlinux.lds.S @@ -117,8 +117,6 @@ SECTIONS { CON_INITCALL } - SECURITY_INIT - __init_end_before_initramfs = .; .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) { diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 77c022e56e6e..53e75ddbba1c 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -21,6 +21,7 @@ config MIPS select GENERIC_CLOCKEVENTS select GENERIC_CMOS_UPDATE select GENERIC_CPU_AUTOPROBE + select GENERIC_IOMAP select GENERIC_IRQ_PROBE select GENERIC_IRQ_SHOW select GENERIC_LIB_ASHLDI3 @@ -28,7 +29,6 @@ config MIPS select GENERIC_LIB_CMPDI2 select GENERIC_LIB_LSHRDI3 select GENERIC_LIB_UCMPDI2 - select GENERIC_PCI_IOMAP select GENERIC_SCHED_CLOCK if !CAVIUM_OCTEON_SOC select GENERIC_SMP_IDLE_THREAD select GENERIC_TIME_VSYSCALL @@ -78,6 +78,7 @@ config MIPS select RTC_LIB if !MACH_LOONGSON64 select SYSCTL_EXCEPTION_TRACE select VIRT_TO_BUS + select NO_BOOTMEM menu "Machine selection" @@ -132,6 +133,7 @@ config MIPS_GENERIC select USB_UHCI_BIG_ENDIAN_DESC if CPU_BIG_ENDIAN select USB_UHCI_BIG_ENDIAN_MMIO if CPU_BIG_ENDIAN select USE_OF + select UHI_BOOT help Select this to build a kernel which aims to support multiple boards, generally using a flattened device tree passed from the bootloader @@ -1149,6 +1151,7 @@ config NO_IOPORT_MAP config GENERIC_CSUM bool + default y if !CPU_HAS_LOAD_STORE_LR config GENERIC_ISA_DMA bool @@ -1367,6 +1370,7 @@ config CPU_LOONGSON3 select CPU_SUPPORTS_64BIT_KERNEL select CPU_SUPPORTS_HIGHMEM select CPU_SUPPORTS_HUGEPAGES + select CPU_HAS_LOAD_STORE_LR select WEAK_ORDERING select WEAK_REORDERING_BEYOND_LLSC select MIPS_PGD_C0_CONTEXT @@ -1443,6 +1447,7 @@ config CPU_MIPS32_R1 bool "MIPS32 Release 1" depends on SYS_HAS_CPU_MIPS32_R1 select CPU_HAS_PREFETCH + select CPU_HAS_LOAD_STORE_LR select CPU_SUPPORTS_32BIT_KERNEL select CPU_SUPPORTS_HIGHMEM help @@ -1460,6 +1465,7 @@ config CPU_MIPS32_R2 bool "MIPS32 Release 2" depends on SYS_HAS_CPU_MIPS32_R2 select CPU_HAS_PREFETCH + select CPU_HAS_LOAD_STORE_LR select CPU_SUPPORTS_32BIT_KERNEL select CPU_SUPPORTS_HIGHMEM select CPU_SUPPORTS_MSA @@ -1478,7 +1484,6 @@ config CPU_MIPS32_R6 select CPU_SUPPORTS_32BIT_KERNEL select CPU_SUPPORTS_HIGHMEM select CPU_SUPPORTS_MSA - select GENERIC_CSUM select HAVE_KVM select MIPS_O32_FP64_SUPPORT help @@ -1491,6 +1496,7 @@ config CPU_MIPS64_R1 bool "MIPS64 Release 1" depends on SYS_HAS_CPU_MIPS64_R1 select CPU_HAS_PREFETCH + select CPU_HAS_LOAD_STORE_LR select CPU_SUPPORTS_32BIT_KERNEL select CPU_SUPPORTS_64BIT_KERNEL select CPU_SUPPORTS_HIGHMEM @@ -1510,6 +1516,7 @@ config CPU_MIPS64_R2 bool "MIPS64 Release 2" depends on SYS_HAS_CPU_MIPS64_R2 select CPU_HAS_PREFETCH + select CPU_HAS_LOAD_STORE_LR select CPU_SUPPORTS_32BIT_KERNEL select CPU_SUPPORTS_64BIT_KERNEL select CPU_SUPPORTS_HIGHMEM @@ -1531,7 +1538,6 @@ config CPU_MIPS64_R6 select CPU_SUPPORTS_64BIT_KERNEL select CPU_SUPPORTS_HIGHMEM select CPU_SUPPORTS_MSA - select GENERIC_CSUM select MIPS_O32_FP64_SUPPORT if 32BIT || MIPS32_O32 select HAVE_KVM help @@ -1544,6 +1550,7 @@ config CPU_R3000 bool "R3000" depends on SYS_HAS_CPU_R3000 select CPU_HAS_WB + select CPU_HAS_LOAD_STORE_LR select CPU_SUPPORTS_32BIT_KERNEL select CPU_SUPPORTS_HIGHMEM help @@ -1558,12 +1565,14 @@ config CPU_TX39XX bool "R39XX" depends on SYS_HAS_CPU_TX39XX select CPU_SUPPORTS_32BIT_KERNEL + select CPU_HAS_LOAD_STORE_LR config CPU_VR41XX bool "R41xx" depends on SYS_HAS_CPU_VR41XX select CPU_SUPPORTS_32BIT_KERNEL select CPU_SUPPORTS_64BIT_KERNEL + select CPU_HAS_LOAD_STORE_LR help The options selects support for the NEC VR4100 series of processors. Only choose this option if you have one of these processors as a @@ -1575,6 +1584,7 @@ config CPU_R4300 depends on SYS_HAS_CPU_R4300 select CPU_SUPPORTS_32BIT_KERNEL select CPU_SUPPORTS_64BIT_KERNEL + select CPU_HAS_LOAD_STORE_LR help MIPS Technologies R4300-series processors. @@ -1584,6 +1594,7 @@ config CPU_R4X00 select CPU_SUPPORTS_32BIT_KERNEL select CPU_SUPPORTS_64BIT_KERNEL select CPU_SUPPORTS_HUGEPAGES + select CPU_HAS_LOAD_STORE_LR help MIPS Technologies R4000-series processors other than 4300, including the R4000, R4400, R4600, and 4700. @@ -1592,6 +1603,7 @@ config CPU_TX49XX bool "R49XX" depends on SYS_HAS_CPU_TX49XX select CPU_HAS_PREFETCH + select CPU_HAS_LOAD_STORE_LR select CPU_SUPPORTS_32BIT_KERNEL select CPU_SUPPORTS_64BIT_KERNEL select CPU_SUPPORTS_HUGEPAGES @@ -1602,6 +1614,7 @@ config CPU_R5000 select CPU_SUPPORTS_32BIT_KERNEL select CPU_SUPPORTS_64BIT_KERNEL select CPU_SUPPORTS_HUGEPAGES + select CPU_HAS_LOAD_STORE_LR help MIPS Technologies R5000-series processors other than the Nevada. @@ -1611,6 +1624,7 @@ config CPU_R5432 select CPU_SUPPORTS_32BIT_KERNEL select CPU_SUPPORTS_64BIT_KERNEL select CPU_SUPPORTS_HUGEPAGES + select CPU_HAS_LOAD_STORE_LR config CPU_R5500 bool "R5500" @@ -1618,6 +1632,7 @@ config CPU_R5500 select CPU_SUPPORTS_32BIT_KERNEL select CPU_SUPPORTS_64BIT_KERNEL select CPU_SUPPORTS_HUGEPAGES + select CPU_HAS_LOAD_STORE_LR help NEC VR5500 and VR5500A series processors implement 64-bit MIPS IV instruction set. @@ -1628,6 +1643,7 @@ config CPU_NEVADA select CPU_SUPPORTS_32BIT_KERNEL select CPU_SUPPORTS_64BIT_KERNEL select CPU_SUPPORTS_HUGEPAGES + select CPU_HAS_LOAD_STORE_LR help QED / PMC-Sierra RM52xx-series ("Nevada") processors. @@ -1635,6 +1651,7 @@ config CPU_R8000 bool "R8000" depends on SYS_HAS_CPU_R8000 select CPU_HAS_PREFETCH + select CPU_HAS_LOAD_STORE_LR select CPU_SUPPORTS_64BIT_KERNEL help MIPS Technologies R8000 processors. Note these processors are @@ -1644,6 +1661,7 @@ config CPU_R10000 bool "R10000" depends on SYS_HAS_CPU_R10000 select CPU_HAS_PREFETCH + select CPU_HAS_LOAD_STORE_LR select CPU_SUPPORTS_32BIT_KERNEL select CPU_SUPPORTS_64BIT_KERNEL select CPU_SUPPORTS_HIGHMEM @@ -1655,6 +1673,7 @@ config CPU_RM7000 bool "RM7000" depends on SYS_HAS_CPU_RM7000 select CPU_HAS_PREFETCH + select CPU_HAS_LOAD_STORE_LR select CPU_SUPPORTS_32BIT_KERNEL select CPU_SUPPORTS_64BIT_KERNEL select CPU_SUPPORTS_HIGHMEM @@ -1663,6 +1682,7 @@ config CPU_RM7000 config CPU_SB1 bool "SB1" depends on SYS_HAS_CPU_SB1 + select CPU_HAS_LOAD_STORE_LR select CPU_SUPPORTS_32BIT_KERNEL select CPU_SUPPORTS_64BIT_KERNEL select CPU_SUPPORTS_HIGHMEM @@ -1673,6 +1693,7 @@ config CPU_CAVIUM_OCTEON bool "Cavium Octeon processor" depends on SYS_HAS_CPU_CAVIUM_OCTEON select CPU_HAS_PREFETCH + select CPU_HAS_LOAD_STORE_LR select CPU_SUPPORTS_64BIT_KERNEL select WEAK_ORDERING select CPU_SUPPORTS_HIGHMEM @@ -1702,6 +1723,7 @@ config CPU_BMIPS select WEAK_ORDERING select CPU_SUPPORTS_HIGHMEM select CPU_HAS_PREFETCH + select CPU_HAS_LOAD_STORE_LR select CPU_SUPPORTS_CPUFREQ select MIPS_EXTERNAL_TIMER help @@ -1710,6 +1732,7 @@ config CPU_BMIPS config CPU_XLR bool "Netlogic XLR SoC" depends on SYS_HAS_CPU_XLR + select CPU_HAS_LOAD_STORE_LR select CPU_SUPPORTS_32BIT_KERNEL select CPU_SUPPORTS_64BIT_KERNEL select CPU_SUPPORTS_HIGHMEM @@ -1728,6 +1751,7 @@ config CPU_XLP select WEAK_ORDERING select WEAK_REORDERING_BEYOND_LLSC select CPU_HAS_PREFETCH + select CPU_HAS_LOAD_STORE_LR select CPU_MIPSR2 select CPU_SUPPORTS_HUGEPAGES select MIPS_ASID_BITS_VARIABLE @@ -1833,12 +1857,14 @@ config CPU_LOONGSON2 select CPU_SUPPORTS_HIGHMEM select CPU_SUPPORTS_HUGEPAGES select ARCH_HAS_PHYS_TO_DMA + select CPU_HAS_LOAD_STORE_LR config CPU_LOONGSON1 bool select CPU_MIPS32 select CPU_MIPSR1 select CPU_HAS_PREFETCH + select CPU_HAS_LOAD_STORE_LR select CPU_SUPPORTS_32BIT_KERNEL select CPU_SUPPORTS_HIGHMEM select CPU_SUPPORTS_CPUFREQ @@ -2452,6 +2478,13 @@ config XKS01 config CPU_HAS_RIXI bool +config CPU_HAS_LOAD_STORE_LR + bool + help + CPU has support for unaligned load and store instructions: + LWL, LWR, SWL, SWR (Load/store word left/right). + LDL, LDR, SDL, SDR (Load/store doubleword left/right, for 64bit systems). + # # Vectored interrupt mode is an R2 feature # @@ -2899,6 +2932,9 @@ config USE_OF select OF_EARLY_FLATTREE select IRQ_DOMAIN +config UHI_BOOT + bool + config BUILTIN_DTB bool diff --git a/arch/mips/Makefile b/arch/mips/Makefile index d74b3742fa5d..15a84cfd0719 100644 --- a/arch/mips/Makefile +++ b/arch/mips/Makefile @@ -13,6 +13,7 @@ # archscripts: scripts_basic + $(Q)$(MAKE) $(build)=arch/mips/tools elf-entry $(Q)$(MAKE) $(build)=arch/mips/boot/tools relocs KBUILD_DEFCONFIG := 32r2el_defconfig @@ -230,6 +231,8 @@ toolchain-xpa := $(call cc-option-yn,$(xpa-cflags-y) -mxpa) cflags-$(toolchain-xpa) += -DTOOLCHAIN_SUPPORTS_XPA toolchain-crc := $(call cc-option-yn,$(mips-cflags) -Wa$(comma)-mcrc) cflags-$(toolchain-crc) += -DTOOLCHAIN_SUPPORTS_CRC +toolchain-dsp := $(call cc-option-yn,$(mips-cflags) -Wa$(comma)-mdsp) +cflags-$(toolchain-dsp) += -DTOOLCHAIN_SUPPORTS_DSP # # Firmware support @@ -257,13 +260,7 @@ ifdef CONFIG_PHYSICAL_START load-y = $(CONFIG_PHYSICAL_START) endif -# Sign-extend the entry point to 64 bits if retrieved as a 32-bit number. -entry-y = $(shell $(OBJDUMP) -f vmlinux 2>/dev/null \ - | sed -n '/^start address / { \ - s/^.* //; \ - s/0x\([0-7].......\)$$/0x00000000\1/; \ - s/0x\(........\)$$/0xffffffff\1/; p }') - +entry-y = $(shell $(objtree)/arch/mips/tools/elf-entry vmlinux) cflags-y += -I$(srctree)/arch/mips/include/asm/mach-generic drivers-$(CONFIG_PCI) += arch/mips/pci/ @@ -407,18 +404,7 @@ endif CLEAN_FILES += vmlinux.32 vmlinux.64 # device-trees -core-$(CONFIG_BUILTIN_DTB) += arch/mips/boot/dts/ - -%.dtb %.dtb.S %.dtb.o: | scripts - $(Q)$(MAKE) $(build)=arch/mips/boot/dts arch/mips/boot/dts/$@ - -PHONY += dtbs -dtbs: scripts - $(Q)$(MAKE) $(build)=arch/mips/boot/dts - -PHONY += dtbs_install -dtbs_install: - $(Q)$(MAKE) $(dtbinst)=arch/mips/boot/dts +core-y += arch/mips/boot/dts/ archprepare: ifdef CONFIG_MIPS32_N32 @@ -461,8 +447,6 @@ define archhelp echo ' uImage.lzma - U-Boot image (lzma)' echo ' uImage.lzo - U-Boot image (lzo)' echo ' uzImage.bin - U-Boot image (self-extracting)' - echo ' dtbs - Device-tree blobs for enabled boards' - echo ' dtbs_install - Install dtbs to $(INSTALL_DTBS_PATH)' echo echo ' These will be default as appropriate for a configured platform.' echo diff --git a/arch/mips/bcm47xx/workarounds.c b/arch/mips/bcm47xx/workarounds.c index 1a8a07e7a563..46eddbec8d9f 100644 --- a/arch/mips/bcm47xx/workarounds.c +++ b/arch/mips/bcm47xx/workarounds.c @@ -5,9 +5,8 @@ #include <bcm47xx_board.h> #include <bcm47xx.h> -static void __init bcm47xx_workarounds_netgear_wnr3500l(void) +static void __init bcm47xx_workarounds_enable_usb_power(int usb_power) { - const int usb_power = 12; int err; err = gpio_request_one(usb_power, GPIOF_OUT_INIT_HIGH, "usb_power"); @@ -23,7 +22,10 @@ void __init bcm47xx_workarounds(void) switch (board) { case BCM47XX_BOARD_NETGEAR_WNR3500L: - bcm47xx_workarounds_netgear_wnr3500l(); + bcm47xx_workarounds_enable_usb_power(12); + break; + case BCM47XX_BOARD_NETGEAR_WNDR3400_V3: + bcm47xx_workarounds_enable_usb_power(21); break; default: /* No workaround(s) needed */ diff --git a/arch/mips/bmips/setup.c b/arch/mips/bmips/setup.c index 231fc5ce375e..6329c5f780d6 100644 --- a/arch/mips/bmips/setup.c +++ b/arch/mips/bmips/setup.c @@ -153,8 +153,6 @@ void __init plat_time_init(void) mips_hpt_frequency = freq; } -extern const char __appended_dtb; - void __init plat_mem_setup(void) { void *dtb; @@ -164,15 +162,10 @@ void __init plat_mem_setup(void) ioport_resource.start = 0; ioport_resource.end = ~0; -#ifdef CONFIG_MIPS_ELF_APPENDED_DTB - if (!fdt_check_header(&__appended_dtb)) - dtb = (void *)&__appended_dtb; - else -#endif /* intended to somewhat resemble ARM; see Documentation/arm/Booting */ if (fw_arg0 == 0 && fw_arg1 == 0xffffffff) dtb = phys_to_virt(fw_arg2); - else if (fw_passed_dtb) /* UHI interface */ + else if (fw_passed_dtb) /* UHI interface or appended dtb */ dtb = (void *)fw_passed_dtb; else if (__dtb_start != __dtb_end) dtb = (void *)__dtb_start; diff --git a/arch/mips/boot/dts/ingenic/jz4740.dtsi b/arch/mips/boot/dts/ingenic/jz4740.dtsi index 26c6b561d6f7..6fb16fd24035 100644 --- a/arch/mips/boot/dts/ingenic/jz4740.dtsi +++ b/arch/mips/boot/dts/ingenic/jz4740.dtsi @@ -154,6 +154,21 @@ clock-names = "baud", "module"; }; + dmac: dma-controller@13020000 { + compatible = "ingenic,jz4740-dma"; + reg = <0x13020000 0xbc + 0x13020300 0x14>; + #dma-cells = <2>; + + interrupt-parent = <&intc>; + interrupts = <29>; + + clocks = <&cgu JZ4740_CLK_DMA>; + + /* Disable dmac until we have something that uses it */ + status = "disabled"; + }; + uhc: uhc@13030000 { compatible = "ingenic,jz4740-ohci", "generic-ohci"; reg = <0x13030000 0x1000>; diff --git a/arch/mips/boot/dts/ingenic/jz4770.dtsi b/arch/mips/boot/dts/ingenic/jz4770.dtsi index 7c2804f3f5f1..49ede6c14ff3 100644 --- a/arch/mips/boot/dts/ingenic/jz4770.dtsi +++ b/arch/mips/boot/dts/ingenic/jz4770.dtsi @@ -196,6 +196,36 @@ status = "disabled"; }; + dmac0: dma-controller@13420000 { + compatible = "ingenic,jz4770-dma"; + reg = <0x13420000 0xC0 + 0x13420300 0x20>; + + #dma-cells = <1>; + + clocks = <&cgu JZ4770_CLK_DMA>; + interrupt-parent = <&intc>; + interrupts = <24>; + + /* Disable dmac0 until we have something that uses it */ + status = "disabled"; + }; + + dmac1: dma-controller@13420100 { + compatible = "ingenic,jz4770-dma"; + reg = <0x13420100 0xC0 + 0x13420400 0x20>; + + #dma-cells = <1>; + + clocks = <&cgu JZ4770_CLK_DMA>; + interrupt-parent = <&intc>; + interrupts = <23>; + + /* Disable dmac1 until we have something that uses it */ + status = "disabled"; + }; + uhc: uhc@13430000 { compatible = "generic-ohci"; reg = <0x13430000 0x1000>; diff --git a/arch/mips/boot/dts/ingenic/jz4780.dtsi b/arch/mips/boot/dts/ingenic/jz4780.dtsi index ce93d57f1b4d..b03cdec56de9 100644 --- a/arch/mips/boot/dts/ingenic/jz4780.dtsi +++ b/arch/mips/boot/dts/ingenic/jz4780.dtsi @@ -266,7 +266,8 @@ dma: dma@13420000 { compatible = "ingenic,jz4780-dma"; - reg = <0x13420000 0x10000>; + reg = <0x13420000 0x400 + 0x13421000 0x40>; #dma-cells = <2>; interrupt-parent = <&intc>; diff --git a/arch/mips/boot/dts/lantiq/danube.dtsi b/arch/mips/boot/dts/lantiq/danube.dtsi index 2dd950181f8a..510be63c8bdf 100644 --- a/arch/mips/boot/dts/lantiq/danube.dtsi +++ b/arch/mips/boot/dts/lantiq/danube.dtsi @@ -10,12 +10,12 @@ }; }; - biu@1F800000 { + biu@1f800000 { #address-cells = <1>; #size-cells = <1>; compatible = "lantiq,biu", "simple-bus"; - reg = <0x1F800000 0x800000>; - ranges = <0x0 0x1F800000 0x7FFFFF>; + reg = <0x1f800000 0x800000>; + ranges = <0x0 0x1f800000 0x7fffff>; icu0: icu@80200 { #interrupt-cells = <1>; @@ -24,18 +24,18 @@ reg = <0x80200 0x120>; }; - watchdog@803F0 { + watchdog@803f0 { compatible = "lantiq,wdt"; - reg = <0x803F0 0x10>; + reg = <0x803f0 0x10>; }; }; - sram@1F000000 { + sram@1f000000 { #address-cells = <1>; #size-cells = <1>; compatible = "lantiq,sram"; - reg = <0x1F000000 0x800000>; - ranges = <0x0 0x1F000000 0x7FFFFF>; + reg = <0x1f000000 0x800000>; + ranges = <0x0 0x1f000000 0x7fffff>; eiu0: eiu@101000 { #interrupt-cells = <1>; @@ -66,41 +66,41 @@ #address-cells = <1>; #size-cells = <1>; compatible = "lantiq,fpi", "simple-bus"; - ranges = <0x0 0x10000000 0xEEFFFFF>; - reg = <0x10000000 0xEF00000>; + ranges = <0x0 0x10000000 0xeefffff>; + reg = <0x10000000 0xef00000>; - gptu@E100A00 { + gptu@e100a00 { compatible = "lantiq,gptu-xway"; - reg = <0xE100A00 0x100>; + reg = <0xe100a00 0x100>; }; - serial@E100C00 { + serial@e100c00 { compatible = "lantiq,asc"; - reg = <0xE100C00 0x400>; + reg = <0xe100c00 0x400>; interrupt-parent = <&icu0>; interrupts = <112 113 114>; }; - dma0: dma@E104100 { + dma0: dma@e104100 { compatible = "lantiq,dma-xway"; - reg = <0xE104100 0x800>; + reg = <0xe104100 0x800>; }; - ebu0: ebu@E105300 { + ebu0: ebu@e105300 { compatible = "lantiq,ebu-xway"; - reg = <0xE105300 0x100>; + reg = <0xe105300 0x100>; }; - pci0: pci@E105400 { + pci0: pci@e105400 { #address-cells = <3>; #size-cells = <2>; #interrupt-cells = <1>; compatible = "lantiq,pci-xway"; bus-range = <0x0 0x0>; ranges = <0x2000000 0 0x8000000 0x8000000 0 0x2000000 /* pci memory */ - 0x1000000 0 0x00000000 0xAE00000 0 0x200000>; /* io space */ + 0x1000000 0 0x00000000 0xae00000 0 0x200000>; /* io space */ reg = <0x7000000 0x8000 /* config space */ - 0xE105400 0x400>; /* pci bridge */ + 0xe105400 0x400>; /* pci bridge */ }; }; }; diff --git a/arch/mips/boot/dts/lantiq/easy50712.dts b/arch/mips/boot/dts/lantiq/easy50712.dts index c37a33962f28..1ce20b7d05cb 100644 --- a/arch/mips/boot/dts/lantiq/easy50712.dts +++ b/arch/mips/boot/dts/lantiq/easy50712.dts @@ -52,14 +52,14 @@ }; }; - gpio: pinmux@E100B10 { + gpio: pinmux@e100b10 { compatible = "lantiq,danube-pinctrl"; pinctrl-names = "default"; pinctrl-0 = <&state_default>; #gpio-cells = <2>; gpio-controller; - reg = <0xE100B10 0xA0>; + reg = <0xe100b10 0xa0>; state_default: pinmux { stp { @@ -82,26 +82,26 @@ }; }; - etop@E180000 { + etop@e180000 { compatible = "lantiq,etop-xway"; - reg = <0xE180000 0x40000>; + reg = <0xe180000 0x40000>; interrupt-parent = <&icu0>; interrupts = <73 78>; phy-mode = "rmii"; mac-address = [ 00 11 22 33 44 55 ]; }; - stp0: stp@E100BB0 { + stp0: stp@e100bb0 { #gpio-cells = <2>; compatible = "lantiq,gpio-stp-xway"; gpio-controller; - reg = <0xE100BB0 0x40>; + reg = <0xe100bb0 0x40>; lantiq,shadow = <0xfff>; lantiq,groups = <0x3>; }; - pci@E105400 { + pci@e105400 { lantiq,bus-clock = <33333333>; interrupt-map-mask = <0xf800 0x0 0x0 0x7>; interrupt-map = < diff --git a/arch/mips/boot/dts/mscc/Makefile b/arch/mips/boot/dts/mscc/Makefile index 9a9bb7ea0503..ec6f5b2bf093 100644 --- a/arch/mips/boot/dts/mscc/Makefile +++ b/arch/mips/boot/dts/mscc/Makefile @@ -1,3 +1,3 @@ -dtb-$(CONFIG_MSCC_OCELOT) += ocelot_pcb123.dtb +dtb-$(CONFIG_MSCC_OCELOT) += ocelot_pcb123.dtb ocelot_pcb120.dtb obj-$(CONFIG_BUILTIN_DTB) += $(addsuffix .o, $(dtb-y)) diff --git a/arch/mips/boot/dts/mscc/ocelot.dtsi b/arch/mips/boot/dts/mscc/ocelot.dtsi index f7eb612b46ba..90c60d42f571 100644 --- a/arch/mips/boot/dts/mscc/ocelot.dtsi +++ b/arch/mips/boot/dts/mscc/ocelot.dtsi @@ -78,6 +78,19 @@ status = "disabled"; }; + i2c: i2c@100400 { + compatible = "mscc,ocelot-i2c", "snps,designware-i2c"; + pinctrl-0 = <&i2c_pins>; + pinctrl-names = "default"; + reg = <0x100400 0x100>, <0x198 0x8>; + #address-cells = <1>; + #size-cells = <0>; + interrupts = <8>; + clocks = <&ahb_clk>; + + status = "disabled"; + }; + uart2: serial@100800 { pinctrl-0 = <&uart2_pins>; pinctrl-names = "default"; @@ -107,7 +120,6 @@ reg = <0x1010000 0x10000>, <0x1030000 0x10000>, <0x1080000 0x100>, - <0x10d0000 0x10000>, <0x11e0000 0x100>, <0x11f0000 0x100>, <0x1200000 0x100>, @@ -121,10 +133,10 @@ <0x1280000 0x100>, <0x1800000 0x80000>, <0x1880000 0x10000>; - reg-names = "sys", "rew", "qs", "hsio", "port0", - "port1", "port2", "port3", "port4", "port5", - "port6", "port7", "port8", "port9", "port10", - "qsys", "ana"; + reg-names = "sys", "rew", "qs", "port0", "port1", + "port2", "port3", "port4", "port5", "port6", + "port7", "port8", "port9", "port10", "qsys", + "ana"; interrupts = <21 22>; interrupt-names = "xtr", "inj"; @@ -183,6 +195,11 @@ interrupts = <13>; #interrupt-cells = <2>; + i2c_pins: i2c-pins { + pins = "GPIO_16", "GPIO_17"; + function = "twi"; + }; + uart_pins: uart-pins { pins = "GPIO_6", "GPIO_7"; function = "uart"; @@ -197,6 +214,7 @@ pins = "GPIO_14", "GPIO_15"; function = "miim1"; }; + }; mdio0: mdio@107009c { @@ -231,5 +249,15 @@ pinctrl-0 = <&miim1>; status = "disabled"; }; + + hsio: syscon@10d0000 { + compatible = "mscc,ocelot-hsio", "syscon", "simple-mfd"; + reg = <0x10d0000 0x10000>; + + serdes: serdes { + compatible = "mscc,vsc7514-serdes"; + #phy-cells = <2>; + }; + }; }; }; diff --git a/arch/mips/boot/dts/mscc/ocelot_pcb120.dts b/arch/mips/boot/dts/mscc/ocelot_pcb120.dts new file mode 100644 index 000000000000..33991fd209f5 --- /dev/null +++ b/arch/mips/boot/dts/mscc/ocelot_pcb120.dts @@ -0,0 +1,107 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* Copyright (c) 2017 Microsemi Corporation */ + +/dts-v1/; + +#include <dt-bindings/interrupt-controller/irq.h> +#include <dt-bindings/phy/phy-ocelot-serdes.h> +#include "ocelot.dtsi" + +/ { + compatible = "mscc,ocelot-pcb120", "mscc,ocelot"; + + chosen { + stdout-path = "serial0:115200n8"; + }; + + memory@0 { + device_type = "memory"; + reg = <0x0 0x0e000000>; + }; +}; + +&gpio { + phy_int_pins: phy_int_pins { + pins = "GPIO_4"; + function = "gpio"; + }; +}; + +&mdio0 { + status = "okay"; +}; + +&mdio1 { + status = "okay"; + pinctrl-names = "default"; + pinctrl-0 = <&miim1>, <&phy_int_pins>; + + phy7: ethernet-phy@0 { + reg = <0>; + interrupts = <4 IRQ_TYPE_LEVEL_HIGH>; + interrupt-parent = <&gpio>; + }; + phy6: ethernet-phy@1 { + reg = <1>; + interrupts = <4 IRQ_TYPE_LEVEL_HIGH>; + interrupt-parent = <&gpio>; + }; + phy5: ethernet-phy@2 { + reg = <2>; + interrupts = <4 IRQ_TYPE_LEVEL_HIGH>; + interrupt-parent = <&gpio>; + }; + phy4: ethernet-phy@3 { + reg = <3>; + interrupts = <4 IRQ_TYPE_LEVEL_HIGH>; + interrupt-parent = <&gpio>; + }; +}; + +&port0 { + phy-handle = <&phy0>; +}; + +&port1 { + phy-handle = <&phy1>; +}; + +&port2 { + phy-handle = <&phy2>; +}; + +&port3 { + phy-handle = <&phy3>; +}; + +&port4 { + phy-handle = <&phy7>; + phy-mode = "sgmii"; + phys = <&serdes 4 SERDES1G(2)>; +}; + +&port5 { + phy-handle = <&phy4>; + phy-mode = "sgmii"; + phys = <&serdes 5 SERDES1G(5)>; +}; + +&port6 { + phy-handle = <&phy6>; + phy-mode = "sgmii"; + phys = <&serdes 6 SERDES1G(3)>; +}; + +&port9 { + phy-handle = <&phy5>; + phy-mode = "sgmii"; + phys = <&serdes 9 SERDES1G(4)>; +}; + +&uart0 { + status = "okay"; +}; + +&uart2 { + status = "okay"; +}; diff --git a/arch/mips/boot/dts/mscc/ocelot_pcb123.dts b/arch/mips/boot/dts/mscc/ocelot_pcb123.dts index 2266027759f9..ef852f382da8 100644 --- a/arch/mips/boot/dts/mscc/ocelot_pcb123.dts +++ b/arch/mips/boot/dts/mscc/ocelot_pcb123.dts @@ -36,6 +36,12 @@ }; }; +&i2c { + clock-frequency = <100000>; + i2c-sda-hold-time-ns = <300>; + status = "okay"; +}; + &mdio0 { status = "okay"; }; diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c index 8272d8c648ca..cc1d8525e651 100644 --- a/arch/mips/cavium-octeon/octeon-irq.c +++ b/arch/mips/cavium-octeon/octeon-irq.c @@ -1180,8 +1180,8 @@ static int octeon_irq_gpio_xlat(struct irq_domain *d, type = IRQ_TYPE_LEVEL_LOW; break; default: - pr_err("Error: (%s) Invalid irq trigger specification: %x\n", - node->name, + pr_err("Error: (%pOFn) Invalid irq trigger specification: %x\n", + node, trigger); type = IRQ_TYPE_LEVEL_LOW; break; @@ -2271,8 +2271,8 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node, parent_irq = irq_of_parse_and_map(ciu_node, 0); if (!parent_irq) { - pr_err("ERROR: Couldn't acquire parent_irq for %s\n", - ciu_node->name); + pr_err("ERROR: Couldn't acquire parent_irq for %pOFn\n", + ciu_node); return -EINVAL; } @@ -2283,7 +2283,7 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node, addr = of_get_address(ciu_node, 0, NULL, NULL); if (!addr) { - pr_err("ERROR: Couldn't acquire reg(0) %s\n", ciu_node->name); + pr_err("ERROR: Couldn't acquire reg(0) %pOFn\n", ciu_node); return -EINVAL; } host_data->raw_reg = (u64)phys_to_virt( @@ -2291,7 +2291,7 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node, addr = of_get_address(ciu_node, 1, NULL, NULL); if (!addr) { - pr_err("ERROR: Couldn't acquire reg(1) %s\n", ciu_node->name); + pr_err("ERROR: Couldn't acquire reg(1) %pOFn\n", ciu_node); return -EINVAL; } host_data->en_reg = (u64)phys_to_virt( @@ -2299,8 +2299,8 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node, r = of_property_read_u32(ciu_node, "cavium,max-bits", &val); if (r) { - pr_err("ERROR: Couldn't read cavium,max-bits from %s\n", - ciu_node->name); + pr_err("ERROR: Couldn't read cavium,max-bits from %pOFn\n", + ciu_node); return r; } host_data->max_bits = val; diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c index c2426232db06..dfb95cffef3e 100644 --- a/arch/mips/cavium-octeon/setup.c +++ b/arch/mips/cavium-octeon/setup.c @@ -1161,15 +1161,12 @@ void __init device_tree_init(void) bool do_prune; bool fill_mac; -#ifdef CONFIG_MIPS_ELF_APPENDED_DTB - if (!fdt_check_header(&__appended_dtb)) { - fdt = &__appended_dtb; + if (fw_passed_dtb) { + fdt = (void *)fw_passed_dtb; do_prune = false; fill_mac = true; pr_info("Using appended Device Tree.\n"); - } else -#endif - if (octeon_bootinfo->minor_version >= 3 && octeon_bootinfo->fdt_addr) { + } else if (octeon_bootinfo->minor_version >= 3 && octeon_bootinfo->fdt_addr) { fdt = phys_to_virt(octeon_bootinfo->fdt_addr); if (fdt_check_header(fdt)) panic("Corrupt Device Tree passed to kernel."); diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c index 75e7c8625659..39f2a2ec1286 100644 --- a/arch/mips/cavium-octeon/smp.c +++ b/arch/mips/cavium-octeon/smp.c @@ -15,6 +15,7 @@ #include <linux/sched/task_stack.h> #include <linux/init.h> #include <linux/export.h> +#include <linux/kexec.h> #include <asm/mmu_context.h> #include <asm/time.h> @@ -424,6 +425,9 @@ const struct plat_smp_ops octeon_smp_ops = { .cpu_disable = octeon_cpu_disable, .cpu_die = octeon_cpu_die, #endif +#ifdef CONFIG_KEXEC + .kexec_nonboot_cpu = kexec_nonboot_cpu_jump, +#endif }; static irqreturn_t octeon_78xx_reched_interrupt(int irq, void *dev_id) @@ -501,6 +505,9 @@ static const struct plat_smp_ops octeon_78xx_smp_ops = { .cpu_disable = octeon_cpu_disable, .cpu_die = octeon_cpu_die, #endif +#ifdef CONFIG_KEXEC + .kexec_nonboot_cpu = kexec_nonboot_cpu_jump, +#endif }; void __init octeon_setup_smp(void) diff --git a/arch/mips/configs/generic/board-ocelot.config b/arch/mips/configs/generic/board-ocelot.config index aa815761d85e..f607888d2483 100644 --- a/arch/mips/configs/generic/board-ocelot.config +++ b/arch/mips/configs/generic/board-ocelot.config @@ -18,17 +18,25 @@ CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_OF_PLATFORM=y -CONFIG_GPIO_SYSFS=y +CONFIG_NETDEVICES=y +CONFIG_MSCC_OCELOT_SWITCH=y +CONFIG_MSCC_OCELOT_SWITCH_OCELOT=y +CONFIG_MDIO_MSCC_MIIM=y +CONFIG_MICROSEMI_PHY=y CONFIG_I2C=y CONFIG_I2C_CHARDEV=y CONFIG_I2C_MUX=y +CONFIG_I2C_DESIGNWARE_PLATFORM=y CONFIG_SPI=y CONFIG_SPI_BITBANG=y CONFIG_SPI_DESIGNWARE=y +CONFIG_SPI_DW_MMIO=y CONFIG_SPI_SPIDEV=y +CONFIG_GPIO_SYSFS=y + CONFIG_POWER_RESET=y CONFIG_POWER_RESET_OCELOT_RESET=y diff --git a/arch/mips/generic/Kconfig b/arch/mips/generic/Kconfig index 08e33c6b2539..fd6019802657 100644 --- a/arch/mips/generic/Kconfig +++ b/arch/mips/generic/Kconfig @@ -65,11 +65,11 @@ config FIT_IMAGE_FDT_XILFPGA Enable this to include the FDT for the MIPSfpga platform from Imagination Technologies in the FIT kernel image. -config FIT_IMAGE_FDT_OCELOT_PCB123 - bool "Include FDT for Microsemi Ocelot PCB123" +config FIT_IMAGE_FDT_OCELOT + bool "Include FDT for Microsemi Ocelot development platforms" select MSCC_OCELOT help - Enable this to include the FDT for the Ocelot PCB123 platform + Enable this to include the FDT for the Ocelot development platforms from Microsemi in the FIT kernel image. This requires u-boot on the platform. diff --git a/arch/mips/generic/Makefile b/arch/mips/generic/Makefile index d03a36f869a4..181aa1335419 100644 --- a/arch/mips/generic/Makefile +++ b/arch/mips/generic/Makefile @@ -15,5 +15,4 @@ obj-y += proc.o obj-$(CONFIG_YAMON_DT_SHIM) += yamon-dt.o obj-$(CONFIG_LEGACY_BOARD_SEAD3) += board-sead3.o obj-$(CONFIG_LEGACY_BOARD_OCELOT) += board-ocelot.o -obj-$(CONFIG_KEXEC) += kexec.o obj-$(CONFIG_VIRT_BOARD_RANCHU) += board-ranchu.o diff --git a/arch/mips/generic/Platform b/arch/mips/generic/Platform index 879cb80396c8..eaa19d189324 100644 --- a/arch/mips/generic/Platform +++ b/arch/mips/generic/Platform @@ -16,5 +16,5 @@ all-$(CONFIG_MIPS_GENERIC) := vmlinux.gz.itb its-y := vmlinux.its.S its-$(CONFIG_FIT_IMAGE_FDT_BOSTON) += board-boston.its.S its-$(CONFIG_FIT_IMAGE_FDT_NI169445) += board-ni169445.its.S -its-$(CONFIG_FIT_IMAGE_FDT_OCELOT_PCB123) += board-ocelot_pcb123.its.S +its-$(CONFIG_FIT_IMAGE_FDT_OCELOT) += board-ocelot.its.S its-$(CONFIG_FIT_IMAGE_FDT_XILFPGA) += board-xilfpga.its.S diff --git a/arch/mips/generic/board-ocelot_pcb123.its.S b/arch/mips/generic/board-ocelot.its.S index 5a7d5e1c878a..3da23988149a 100644 --- a/arch/mips/generic/board-ocelot_pcb123.its.S +++ b/arch/mips/generic/board-ocelot.its.S @@ -11,6 +11,17 @@ algo = "sha1"; }; }; + + fdt@ocelot_pcb120 { + description = "MSCC Ocelot PCB120 Device Tree"; + data = /incbin/("boot/dts/mscc/ocelot_pcb120.dtb"); + type = "flat_dt"; + arch = "mips"; + compression = "none"; + hash@0 { + algo = "sha1"; + }; + }; }; configurations { @@ -19,5 +30,11 @@ kernel = "kernel@0"; fdt = "fdt@ocelot_pcb123"; }; + + conf@ocelot_pcb120 { + description = "Ocelot Linux kernel"; + kernel = "kernel@0"; + fdt = "fdt@ocelot_pcb120"; + }; }; }; diff --git a/arch/mips/generic/kexec.c b/arch/mips/generic/kexec.c deleted file mode 100644 index 1ca409f58929..000000000000 --- a/arch/mips/generic/kexec.c +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright (C) 2016 Imagination Technologies - * Author: Marcin Nowakowski <marcin.nowakowski@mips.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ - -#include <linux/kexec.h> -#include <linux/libfdt.h> -#include <linux/uaccess.h> - -static int generic_kexec_prepare(struct kimage *image) -{ - int i; - - for (i = 0; i < image->nr_segments; i++) { - struct fdt_header fdt; - - if (image->segment[i].memsz <= sizeof(fdt)) - continue; - - if (copy_from_user(&fdt, image->segment[i].buf, sizeof(fdt))) - continue; - - if (fdt_check_header(&fdt)) - continue; - - kexec_args[0] = -2; - kexec_args[1] = (unsigned long) - phys_to_virt((unsigned long)image->segment[i].mem); - break; - } - return 0; -} - -static int __init register_generic_kexec(void) -{ - _machine_kexec_prepare = generic_kexec_prepare; - return 0; -} -arch_initcall(register_generic_kexec); diff --git a/arch/mips/include/asm/asm-eva.h b/arch/mips/include/asm/asm-eva.h index 1e38f0e1ea3e..d80be38c4144 100644 --- a/arch/mips/include/asm/asm-eva.h +++ b/arch/mips/include/asm/asm-eva.h @@ -15,6 +15,7 @@ /* Kernel variants */ #define kernel_cache(op, base) "cache " op ", " base "\n" +#define kernel_pref(hint, base) "pref " hint ", " base "\n" #define kernel_ll(reg, addr) "ll " reg ", " addr "\n" #define kernel_sc(reg, addr) "sc " reg ", " addr "\n" #define kernel_lw(reg, addr) "lw " reg ", " addr "\n" @@ -51,6 +52,7 @@ " .set pop\n" #define user_cache(op, base) __BUILD_EVA_INSN("cachee", op, base) +#define user_pref(hint, base) __BUILD_EVA_INSN("prefe", hint, base) #define user_ll(reg, addr) __BUILD_EVA_INSN("lle", reg, addr) #define user_sc(reg, addr) __BUILD_EVA_INSN("sce", reg, addr) #define user_lw(reg, addr) __BUILD_EVA_INSN("lwe", reg, addr) @@ -72,6 +74,7 @@ #else #define user_cache(op, base) kernel_cache(op, base) +#define user_pref(hint, base) kernel_pref(hint, base) #define user_ll(reg, addr) kernel_ll(reg, addr) #define user_sc(reg, addr) kernel_sc(reg, addr) #define user_lw(reg, addr) kernel_lw(reg, addr) @@ -99,6 +102,7 @@ #else /* __ASSEMBLY__ */ #define kernel_cache(op, base) cache op, base +#define kernel_pref(hint, base) pref hint, base #define kernel_ll(reg, addr) ll reg, addr #define kernel_sc(reg, addr) sc reg, addr #define kernel_lw(reg, addr) lw reg, addr @@ -135,6 +139,7 @@ .set pop; #define user_cache(op, base) __BUILD_EVA_INSN(cachee, op, base) +#define user_pref(hint, base) __BUILD_EVA_INSN(prefe, hint, base) #define user_ll(reg, addr) __BUILD_EVA_INSN(lle, reg, addr) #define user_sc(reg, addr) __BUILD_EVA_INSN(sce, reg, addr) #define user_lw(reg, addr) __BUILD_EVA_INSN(lwe, reg, addr) @@ -155,6 +160,7 @@ #else #define user_cache(op, base) kernel_cache(op, base) +#define user_pref(hint, base) kernel_pref(hint, base) #define user_ll(reg, addr) kernel_ll(reg, addr) #define user_sc(reg, addr) kernel_sc(reg, addr) #define user_lw(reg, addr) kernel_lw(reg, addr) diff --git a/arch/mips/include/asm/asm.h b/arch/mips/include/asm/asm.h index 81fae23ce7cd..c23527ba65d0 100644 --- a/arch/mips/include/asm/asm.h +++ b/arch/mips/include/asm/asm.h @@ -20,32 +20,6 @@ #include <asm/sgidefs.h> #include <asm/asm-eva.h> -#ifndef CAT -#ifdef __STDC__ -#define __CAT(str1, str2) str1##str2 -#else -#define __CAT(str1, str2) str1/**/str2 -#endif -#define CAT(str1, str2) __CAT(str1, str2) -#endif - -/* - * PIC specific declarations - * Not used for the kernel but here seems to be the right place. - */ -#ifdef __PIC__ -#define CPRESTORE(register) \ - .cprestore register -#define CPADD(register) \ - .cpadd register -#define CPLOAD(register) \ - .cpload register -#else -#define CPRESTORE(register) -#define CPADD(register) -#define CPLOAD(register) -#endif - /* * LEAF - declare leaf routine */ @@ -130,96 +104,6 @@ symbol = value .popsection; /* - * Build text tables - */ -#define TTABLE(string) \ - .pushsection .text; \ - .word 1f; \ - .popsection \ - .pushsection .data; \ -1: .asciiz string; \ - .popsection - -/* - * MIPS IV pref instruction. - * Use with .set noreorder only! - * - * MIPS IV implementations are free to treat this as a nop. The R5000 - * is one of them. So we should have an option not to use this instruction. - */ -#ifdef CONFIG_CPU_HAS_PREFETCH - -#define PREF(hint,addr) \ - .set push; \ - .set arch=r5000; \ - pref hint, addr; \ - .set pop - -#define PREFE(hint, addr) \ - .set push; \ - .set mips0; \ - .set eva; \ - prefe hint, addr; \ - .set pop - -#define PREFX(hint,addr) \ - .set push; \ - .set arch=r5000; \ - prefx hint, addr; \ - .set pop - -#else /* !CONFIG_CPU_HAS_PREFETCH */ - -#define PREF(hint, addr) -#define PREFE(hint, addr) -#define PREFX(hint, addr) - -#endif /* !CONFIG_CPU_HAS_PREFETCH */ - -/* - * MIPS ISA IV/V movn/movz instructions and equivalents for older CPUs. - */ -#if (_MIPS_ISA == _MIPS_ISA_MIPS1) -#define MOVN(rd, rs, rt) \ - .set push; \ - .set reorder; \ - beqz rt, 9f; \ - move rd, rs; \ - .set pop; \ -9: -#define MOVZ(rd, rs, rt) \ - .set push; \ - .set reorder; \ - bnez rt, 9f; \ - move rd, rs; \ - .set pop; \ -9: -#endif /* _MIPS_ISA == _MIPS_ISA_MIPS1 */ -#if (_MIPS_ISA == _MIPS_ISA_MIPS2) || (_MIPS_ISA == _MIPS_ISA_MIPS3) -#define MOVN(rd, rs, rt) \ - .set push; \ - .set noreorder; \ - bnezl rt, 9f; \ - move rd, rs; \ - .set pop; \ -9: -#define MOVZ(rd, rs, rt) \ - .set push; \ - .set noreorder; \ - beqzl rt, 9f; \ - move rd, rs; \ - .set pop; \ -9: -#endif /* (_MIPS_ISA == _MIPS_ISA_MIPS2) || (_MIPS_ISA == _MIPS_ISA_MIPS3) */ -#if (_MIPS_ISA == _MIPS_ISA_MIPS4 ) || (_MIPS_ISA == _MIPS_ISA_MIPS5) || \ - (_MIPS_ISA == _MIPS_ISA_MIPS32) || (_MIPS_ISA == _MIPS_ISA_MIPS64) -#define MOVN(rd, rs, rt) \ - movn rd, rs, rt -#define MOVZ(rd, rs, rt) \ - movz rd, rs, rt -#endif /* MIPS IV, MIPS V, MIPS32 or MIPS64 */ - -/* * Stack alignment */ #if (_MIPS_SIM == _MIPS_SIM_ABI32) diff --git a/arch/mips/include/asm/compat.h b/arch/mips/include/asm/compat.h index 78675f19440f..c99166eadbde 100644 --- a/arch/mips/include/asm/compat.h +++ b/arch/mips/include/asm/compat.h @@ -9,43 +9,25 @@ #include <asm/page.h> #include <asm/ptrace.h> +#include <asm-generic/compat.h> + #define COMPAT_USER_HZ 100 #define COMPAT_UTS_MACHINE "mips\0\0\0" -typedef u32 compat_size_t; -typedef s32 compat_ssize_t; -typedef s32 compat_clock_t; -typedef s32 compat_suseconds_t; - -typedef s32 compat_pid_t; typedef s32 __compat_uid_t; typedef s32 __compat_gid_t; typedef __compat_uid_t __compat_uid32_t; typedef __compat_gid_t __compat_gid32_t; typedef u32 compat_mode_t; -typedef u32 compat_ino_t; typedef u32 compat_dev_t; -typedef s32 compat_off_t; -typedef s64 compat_loff_t; typedef u32 compat_nlink_t; typedef s32 compat_ipc_pid_t; -typedef s32 compat_daddr_t; typedef s32 compat_caddr_t; typedef struct { s32 val[2]; } compat_fsid_t; -typedef s32 compat_timer_t; -typedef s32 compat_key_t; - -typedef s16 compat_short_t; -typedef s32 compat_int_t; -typedef s32 compat_long_t; typedef s64 compat_s64; -typedef u16 compat_ushort_t; -typedef u32 compat_uint_t; -typedef u32 compat_ulong_t; typedef u64 compat_u64; -typedef u32 compat_uptr_t; struct compat_stat { compat_dev_t st_dev; @@ -59,11 +41,11 @@ struct compat_stat { s32 st_pad2[2]; compat_off_t st_size; s32 st_pad3; - compat_time_t st_atime; + old_time32_t st_atime; s32 st_atime_nsec; - compat_time_t st_mtime; + old_time32_t st_mtime; s32 st_mtime_nsec; - compat_time_t st_ctime; + old_time32_t st_ctime; s32 st_ctime_nsec; s32 st_blksize; s32 st_blocks; diff --git a/arch/mips/include/asm/hugetlb.h b/arch/mips/include/asm/hugetlb.h index 982bc0685330..425bb6fc3bda 100644 --- a/arch/mips/include/asm/hugetlb.h +++ b/arch/mips/include/asm/hugetlb.h @@ -10,8 +10,6 @@ #define __ASM_HUGETLB_H #include <asm/page.h> -#include <asm-generic/hugetlb.h> - static inline int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr, @@ -20,6 +18,7 @@ static inline int is_hugepage_only_range(struct mm_struct *mm, return 0; } +#define __HAVE_ARCH_PREPARE_HUGEPAGE_RANGE static inline int prepare_hugepage_range(struct file *file, unsigned long addr, unsigned long len) @@ -38,21 +37,7 @@ static inline int prepare_hugepage_range(struct file *file, return 0; } -static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb, - unsigned long addr, - unsigned long end, - unsigned long floor, - unsigned long ceiling) -{ - free_pgd_range(tlb, addr, end, floor, ceiling); -} - -static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, - pte_t *ptep, pte_t pte) -{ - set_pte_at(mm, addr, ptep, pte); -} - +#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { @@ -64,29 +49,21 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, return pte; } +#define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { flush_tlb_page(vma, addr & huge_page_mask(hstate_vma(vma))); } +#define __HAVE_ARCH_HUGE_PTE_NONE static inline int huge_pte_none(pte_t pte) { unsigned long val = pte_val(pte) & ~_PAGE_GLOBAL; return !val || (val == (unsigned long)invalid_pte_table); } -static inline pte_t huge_pte_wrprotect(pte_t pte) -{ - return pte_wrprotect(pte); -} - -static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, - unsigned long addr, pte_t *ptep) -{ - ptep_set_wrprotect(mm, addr, ptep); -} - +#define __HAVE_ARCH_HUGE_PTEP_SET_ACCESS_FLAGS static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, pte_t pte, @@ -105,13 +82,10 @@ static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, return changed; } -static inline pte_t huge_ptep_get(pte_t *ptep) -{ - return *ptep; -} - static inline void arch_clear_hugepage_flags(struct page *page) { } +#include <asm-generic/hugetlb.h> + #endif /* __ASM_HUGETLB_H */ diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h index 54c730aed327..266257d56fb6 100644 --- a/arch/mips/include/asm/io.h +++ b/arch/mips/include/asm/io.h @@ -20,6 +20,7 @@ #include <linux/irqflags.h> #include <asm/addrspace.h> +#include <asm/barrier.h> #include <asm/bug.h> #include <asm/byteorder.h> #include <asm/cpu.h> @@ -34,11 +35,6 @@ #include <mangle-port.h> /* - * Slowdown I/O port space accesses for antique hardware. - */ -#undef CONF_SLOWDOWN_IO - -/* * Raw operations are never swapped in software. OTOH values that raw * operations are working on may or may not have been swapped by the bus * hardware. An example use would be for flash memory that's used for @@ -50,6 +46,11 @@ # define __raw_ioswabq(a, x) (x) # define ____raw_ioswabq(a, x) (x) +# define __relaxed_ioswabb ioswabb +# define __relaxed_ioswabw ioswabw +# define __relaxed_ioswabl ioswabl +# define __relaxed_ioswabq ioswabq + /* ioswab[bwlq], __mem_ioswab[bwlq] are defined in mangle-port.h */ #define IO_SPACE_LIMIT 0xffff @@ -80,31 +81,29 @@ static inline void set_io_port_base(unsigned long base) } /* - * Thanks to James van Artsdalen for a better timing-fix than - * the two short jumps: using outb's to a nonexistent port seems - * to guarantee better timings even on fast machines. - * - * On the other hand, I'd like to be sure of a non-existent port: - * I feel a bit unsafe about using 0x80 (should be safe, though) - * - * Linus - * + * Provide the necessary definitions for generic iomap. We make use of + * mips_io_port_base for iomap(), but we don't reserve any low addresses for + * use with I/O ports. */ -#define __SLOW_DOWN_IO \ - __asm__ __volatile__( \ - "sb\t$0,0x80(%0)" \ - : : "r" (mips_io_port_base)); +#define HAVE_ARCH_PIO_SIZE +#define PIO_OFFSET mips_io_port_base +#define PIO_MASK IO_SPACE_LIMIT +#define PIO_RESERVED 0x0UL -#ifdef CONF_SLOWDOWN_IO -#ifdef REALLY_SLOW_IO -#define SLOW_DOWN_IO { __SLOW_DOWN_IO; __SLOW_DOWN_IO; __SLOW_DOWN_IO; __SLOW_DOWN_IO; } -#else -#define SLOW_DOWN_IO __SLOW_DOWN_IO -#endif -#else -#define SLOW_DOWN_IO -#endif +/* + * Enforce in-order execution of data I/O. In the MIPS architecture + * these are equivalent to corresponding platform-specific memory + * barriers defined in <asm/barrier.h>. API pinched from PowerPC, + * with sync additionally defined. + */ +#define iobarrier_rw() mb() +#define iobarrier_r() rmb() +#define iobarrier_w() wmb() +#define iobarrier_sync() iob() + +/* Some callers use this older API instead. */ +#define mmiowb() iobarrier_w() /* * virt_to_phys - map virtual addresses to physical @@ -172,11 +171,6 @@ static inline void *isa_bus_to_virt(unsigned long address) extern void __iomem * __ioremap(phys_addr_t offset, phys_addr_t size, unsigned long flags); extern void __iounmap(const volatile void __iomem *addr); -#ifndef CONFIG_PCI -struct pci_dev; -static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr) {} -#endif - static inline void __iomem * __ioremap_mode(phys_addr_t offset, unsigned long size, unsigned long flags) { @@ -316,13 +310,13 @@ static inline void iounmap(const volatile void __iomem *addr) #undef __IS_KSEG1 } -#if defined(CONFIG_CPU_CAVIUM_OCTEON) || defined(CONFIG_LOONGSON3_ENHANCEMENT) +#if defined(CONFIG_CPU_CAVIUM_OCTEON) || defined(CONFIG_CPU_LOONGSON3) #define war_io_reorder_wmb() wmb() #else #define war_io_reorder_wmb() barrier() #endif -#define __BUILD_MEMORY_SINGLE(pfx, bwlq, type, irq) \ +#define __BUILD_MEMORY_SINGLE(pfx, bwlq, type, barrier, relax, irq) \ \ static inline void pfx##write##bwlq(type val, \ volatile void __iomem *mem) \ @@ -330,7 +324,10 @@ static inline void pfx##write##bwlq(type val, \ volatile type *__mem; \ type __val; \ \ - war_io_reorder_wmb(); \ + if (barrier) \ + iobarrier_rw(); \ + else \ + war_io_reorder_wmb(); \ \ __mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \ \ @@ -367,6 +364,9 @@ static inline type pfx##read##bwlq(const volatile void __iomem *mem) \ \ __mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \ \ + if (barrier) \ + iobarrier_rw(); \ + \ if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \ __val = *__mem; \ else if (cpu_has_64bits) { \ @@ -390,18 +390,22 @@ static inline type pfx##read##bwlq(const volatile void __iomem *mem) \ } \ \ /* prevent prefetching of coherent DMA data prematurely */ \ - rmb(); \ + if (!relax) \ + rmb(); \ return pfx##ioswab##bwlq(__mem, __val); \ } -#define __BUILD_IOPORT_SINGLE(pfx, bwlq, type, p, slow) \ +#define __BUILD_IOPORT_SINGLE(pfx, bwlq, type, barrier, relax, p) \ \ static inline void pfx##out##bwlq##p(type val, unsigned long port) \ { \ volatile type *__addr; \ type __val; \ \ - war_io_reorder_wmb(); \ + if (barrier) \ + iobarrier_rw(); \ + else \ + war_io_reorder_wmb(); \ \ __addr = (void *)__swizzle_addr_##bwlq(mips_io_port_base + port); \ \ @@ -411,7 +415,6 @@ static inline void pfx##out##bwlq##p(type val, unsigned long port) \ BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \ \ *__addr = __val; \ - slow; \ } \ \ static inline type pfx##in##bwlq##p(unsigned long port) \ @@ -423,23 +426,27 @@ static inline type pfx##in##bwlq##p(unsigned long port) \ \ BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \ \ + if (barrier) \ + iobarrier_rw(); \ + \ __val = *__addr; \ - slow; \ \ /* prevent prefetching of coherent DMA data prematurely */ \ - rmb(); \ + if (!relax) \ + rmb(); \ return pfx##ioswab##bwlq(__addr, __val); \ } -#define __BUILD_MEMORY_PFX(bus, bwlq, type) \ +#define __BUILD_MEMORY_PFX(bus, bwlq, type, relax) \ \ -__BUILD_MEMORY_SINGLE(bus, bwlq, type, 1) +__BUILD_MEMORY_SINGLE(bus, bwlq, type, 1, relax, 1) #define BUILDIO_MEM(bwlq, type) \ \ -__BUILD_MEMORY_PFX(__raw_, bwlq, type) \ -__BUILD_MEMORY_PFX(, bwlq, type) \ -__BUILD_MEMORY_PFX(__mem_, bwlq, type) \ +__BUILD_MEMORY_PFX(__raw_, bwlq, type, 0) \ +__BUILD_MEMORY_PFX(__relaxed_, bwlq, type, 1) \ +__BUILD_MEMORY_PFX(__mem_, bwlq, type, 0) \ +__BUILD_MEMORY_PFX(, bwlq, type, 0) BUILDIO_MEM(b, u8) BUILDIO_MEM(w, u16) @@ -447,8 +454,8 @@ BUILDIO_MEM(l, u32) BUILDIO_MEM(q, u64) #define __BUILD_IOPORT_PFX(bus, bwlq, type) \ - __BUILD_IOPORT_SINGLE(bus, bwlq, type, ,) \ - __BUILD_IOPORT_SINGLE(bus, bwlq, type, _p, SLOW_DOWN_IO) + __BUILD_IOPORT_SINGLE(bus, bwlq, type, 1, 0,) \ + __BUILD_IOPORT_SINGLE(bus, bwlq, type, 1, 0, _p) #define BUILDIO_IOPORT(bwlq, type) \ __BUILD_IOPORT_PFX(, bwlq, type) \ @@ -463,19 +470,19 @@ BUILDIO_IOPORT(q, u64) #define __BUILDIO(bwlq, type) \ \ -__BUILD_MEMORY_SINGLE(____raw_, bwlq, type, 0) +__BUILD_MEMORY_SINGLE(____raw_, bwlq, type, 1, 0, 0) __BUILDIO(q, u64) -#define readb_relaxed readb -#define readw_relaxed readw -#define readl_relaxed readl -#define readq_relaxed readq +#define readb_relaxed __relaxed_readb +#define readw_relaxed __relaxed_readw +#define readl_relaxed __relaxed_readl +#define readq_relaxed __relaxed_readq -#define writeb_relaxed writeb -#define writew_relaxed writew -#define writel_relaxed writel -#define writeq_relaxed writeq +#define writeb_relaxed __relaxed_writeb +#define writew_relaxed __relaxed_writew +#define writel_relaxed __relaxed_writel +#define writeq_relaxed __relaxed_writeq #define readb_be(addr) \ __raw_readb((__force unsigned *)(addr)) @@ -561,14 +568,6 @@ BUILDSTRING(l, u32) BUILDSTRING(q, u64) #endif - -#ifdef CONFIG_CPU_CAVIUM_OCTEON -#define mmiowb() wmb() -#else -/* Depends on MIPS II instruction set */ -#define mmiowb() asm volatile ("sync" ::: "memory") -#endif - static inline void memset_io(volatile void __iomem *addr, unsigned char val, int count) { memset((void __force *) addr, val, count); diff --git a/arch/mips/include/asm/kexec.h b/arch/mips/include/asm/kexec.h index 493a3cc7c39a..40795ca89961 100644 --- a/arch/mips/include/asm/kexec.h +++ b/arch/mips/include/asm/kexec.h @@ -12,11 +12,11 @@ #include <asm/stacktrace.h> /* Maximum physical address we can use pages from */ -#define KEXEC_SOURCE_MEMORY_LIMIT (0x20000000) +#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL) /* Maximum address we can reach in physical address mode */ -#define KEXEC_DESTINATION_MEMORY_LIMIT (0x20000000) +#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL) /* Maximum address we can use for the control code buffer */ -#define KEXEC_CONTROL_MEMORY_LIMIT (0x20000000) +#define KEXEC_CONTROL_MEMORY_LIMIT (-1UL) /* Reserve 3*4096 bytes for board-specific info */ #define KEXEC_CONTROL_PAGE_SIZE (4096 + 3*4096) @@ -39,11 +39,12 @@ extern unsigned long kexec_args[4]; extern int (*_machine_kexec_prepare)(struct kimage *); extern void (*_machine_kexec_shutdown)(void); extern void (*_machine_crash_shutdown)(struct pt_regs *regs); -extern void default_machine_crash_shutdown(struct pt_regs *regs); +void default_machine_crash_shutdown(struct pt_regs *regs); +void kexec_nonboot_cpu_jump(void); +void kexec_reboot(void); #ifdef CONFIG_SMP extern const unsigned char kexec_smp_wait[]; extern unsigned long secondary_kexec_args[4]; -extern void (*relocated_kexec_smp_wait) (void *); extern atomic_t kexec_ready_to_reboot; extern void (*_crash_smp_send_stop)(void); #endif diff --git a/arch/mips/include/asm/mach-loongson64/irq.h b/arch/mips/include/asm/mach-loongson64/irq.h index 3644b68c0ccc..be9f727a9328 100644 --- a/arch/mips/include/asm/mach-loongson64/irq.h +++ b/arch/mips/include/asm/mach-loongson64/irq.h @@ -10,7 +10,7 @@ #define MIPS_CPU_IRQ_BASE 56 #define LOONGSON_UART_IRQ (MIPS_CPU_IRQ_BASE + 2) /* UART */ -#define LOONGSON_HT1_IRQ (MIPS_CPU_IRQ_BASE + 3) /* HT1 */ +#define LOONGSON_BRIDGE_IRQ (MIPS_CPU_IRQ_BASE + 3) /* CASCADE */ #define LOONGSON_TIMER_IRQ (MIPS_CPU_IRQ_BASE + 7) /* CPU Timer */ #define LOONGSON_HT1_CFG_BASE loongson_sysconf.ht_control_base diff --git a/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h b/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h index 312739117bb0..cbac603ced19 100644 --- a/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h +++ b/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h @@ -11,6 +11,8 @@ #ifndef __ASM_MACH_LOONGSON64_KERNEL_ENTRY_H #define __ASM_MACH_LOONGSON64_KERNEL_ENTRY_H +#include <asm/cpu.h> + /* * Override macros used in arch/mips/kernel/head.S. */ @@ -26,12 +28,15 @@ mfc0 t0, CP0_PAGEGRAIN or t0, (0x1 << 29) mtc0 t0, CP0_PAGEGRAIN -#ifdef CONFIG_LOONGSON3_ENHANCEMENT /* Enable STFill Buffer */ + mfc0 t0, CP0_PRID + andi t0, (PRID_IMP_MASK | PRID_REV_MASK) + slti t0, (PRID_IMP_LOONGSON_64 | PRID_REV_LOONGSON3A_R2) + bnez t0, 1f mfc0 t0, CP0_CONFIG6 or t0, 0x100 mtc0 t0, CP0_CONFIG6 -#endif +1: _ehb .set pop #endif @@ -52,12 +57,15 @@ mfc0 t0, CP0_PAGEGRAIN or t0, (0x1 << 29) mtc0 t0, CP0_PAGEGRAIN -#ifdef CONFIG_LOONGSON3_ENHANCEMENT /* Enable STFill Buffer */ + mfc0 t0, CP0_PRID + andi t0, (PRID_IMP_MASK | PRID_REV_MASK) + slti t0, (PRID_IMP_LOONGSON_64 | PRID_REV_LOONGSON3A_R2) + bnez t0, 1f mfc0 t0, CP0_CONFIG6 or t0, 0x100 mtc0 t0, CP0_CONFIG6 -#endif +1: _ehb .set pop #endif diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h index 01df9ad62fb8..341a02c92985 100644 --- a/arch/mips/include/asm/mipsregs.h +++ b/arch/mips/include/asm/mipsregs.h @@ -2287,13 +2287,14 @@ do { \ _write_32bit_cp1_register(dest, val, ) #endif -#ifdef HAVE_AS_DSP +#ifdef TOOLCHAIN_SUPPORTS_DSP #define rddsp(mask) \ ({ \ unsigned int __dspctl; \ \ __asm__ __volatile__( \ " .set push \n" \ + " .set " MIPS_ISA_LEVEL " \n" \ " .set dsp \n" \ " rddsp %0, %x1 \n" \ " .set pop \n" \ @@ -2306,6 +2307,7 @@ do { \ do { \ __asm__ __volatile__( \ " .set push \n" \ + " .set " MIPS_ISA_LEVEL " \n" \ " .set dsp \n" \ " wrdsp %0, %x1 \n" \ " .set pop \n" \ @@ -2318,6 +2320,7 @@ do { \ long mflo0; \ __asm__( \ " .set push \n" \ + " .set " MIPS_ISA_LEVEL " \n" \ " .set dsp \n" \ " mflo %0, $ac0 \n" \ " .set pop \n" \ @@ -2330,6 +2333,7 @@ do { \ long mflo1; \ __asm__( \ " .set push \n" \ + " .set " MIPS_ISA_LEVEL " \n" \ " .set dsp \n" \ " mflo %0, $ac1 \n" \ " .set pop \n" \ @@ -2342,6 +2346,7 @@ do { \ long mflo2; \ __asm__( \ " .set push \n" \ + " .set " MIPS_ISA_LEVEL " \n" \ " .set dsp \n" \ " mflo %0, $ac2 \n" \ " .set pop \n" \ @@ -2354,6 +2359,7 @@ do { \ long mflo3; \ __asm__( \ " .set push \n" \ + " .set " MIPS_ISA_LEVEL " \n" \ " .set dsp \n" \ " mflo %0, $ac3 \n" \ " .set pop \n" \ @@ -2366,6 +2372,7 @@ do { \ long mfhi0; \ __asm__( \ " .set push \n" \ + " .set " MIPS_ISA_LEVEL " \n" \ " .set dsp \n" \ " mfhi %0, $ac0 \n" \ " .set pop \n" \ @@ -2378,6 +2385,7 @@ do { \ long mfhi1; \ __asm__( \ " .set push \n" \ + " .set " MIPS_ISA_LEVEL " \n" \ " .set dsp \n" \ " mfhi %0, $ac1 \n" \ " .set pop \n" \ @@ -2390,6 +2398,7 @@ do { \ long mfhi2; \ __asm__( \ " .set push \n" \ + " .set " MIPS_ISA_LEVEL " \n" \ " .set dsp \n" \ " mfhi %0, $ac2 \n" \ " .set pop \n" \ @@ -2402,6 +2411,7 @@ do { \ long mfhi3; \ __asm__( \ " .set push \n" \ + " .set " MIPS_ISA_LEVEL " \n" \ " .set dsp \n" \ " mfhi %0, $ac3 \n" \ " .set pop \n" \ @@ -2414,6 +2424,7 @@ do { \ ({ \ __asm__( \ " .set push \n" \ + " .set " MIPS_ISA_LEVEL " \n" \ " .set dsp \n" \ " mtlo %0, $ac0 \n" \ " .set pop \n" \ @@ -2425,6 +2436,7 @@ do { \ ({ \ __asm__( \ " .set push \n" \ + " .set " MIPS_ISA_LEVEL " \n" \ " .set dsp \n" \ " mtlo %0, $ac1 \n" \ " .set pop \n" \ @@ -2436,6 +2448,7 @@ do { \ ({ \ __asm__( \ " .set push \n" \ + " .set " MIPS_ISA_LEVEL " \n" \ " .set dsp \n" \ " mtlo %0, $ac2 \n" \ " .set pop \n" \ @@ -2447,6 +2460,7 @@ do { \ ({ \ __asm__( \ " .set push \n" \ + " .set " MIPS_ISA_LEVEL " \n" \ " .set dsp \n" \ " mtlo %0, $ac3 \n" \ " .set pop \n" \ @@ -2458,6 +2472,7 @@ do { \ ({ \ __asm__( \ " .set push \n" \ + " .set " MIPS_ISA_LEVEL " \n" \ " .set dsp \n" \ " mthi %0, $ac0 \n" \ " .set pop \n" \ @@ -2469,6 +2484,7 @@ do { \ ({ \ __asm__( \ " .set push \n" \ + " .set " MIPS_ISA_LEVEL " \n" \ " .set dsp \n" \ " mthi %0, $ac1 \n" \ " .set pop \n" \ @@ -2480,6 +2496,7 @@ do { \ ({ \ __asm__( \ " .set push \n" \ + " .set " MIPS_ISA_LEVEL " \n" \ " .set dsp \n" \ " mthi %0, $ac2 \n" \ " .set pop \n" \ @@ -2491,6 +2508,7 @@ do { \ ({ \ __asm__( \ " .set push \n" \ + " .set " MIPS_ISA_LEVEL " \n" \ " .set dsp \n" \ " mthi %0, $ac3 \n" \ " .set pop \n" \ diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h index 49d6046ca1d0..c373eb605040 100644 --- a/arch/mips/include/asm/processor.h +++ b/arch/mips/include/asm/processor.h @@ -81,7 +81,7 @@ extern unsigned int vced_count, vcei_count; #endif -#define VDSO_RANDOMIZE_SIZE (TASK_IS_32BIT_ADDR ? SZ_1M : SZ_256M) +#define VDSO_RANDOMIZE_SIZE (TASK_IS_32BIT_ADDR ? SZ_1M : SZ_64M) extern unsigned long mips_stack_top(void); #define STACK_TOP mips_stack_top() diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h index 7f12d7e27c94..d19b2d65336b 100644 --- a/arch/mips/include/asm/r4kcache.h +++ b/arch/mips/include/asm/r4kcache.h @@ -48,58 +48,14 @@ extern void (*r4k_blast_icache)(void); : \ : "i" (op), "R" (*(unsigned char *)(addr))) -#ifdef CONFIG_MIPS_MT - -#define __iflush_prologue \ - unsigned long redundance; \ - extern int mt_n_iflushes; \ - for (redundance = 0; redundance < mt_n_iflushes; redundance++) { - -#define __iflush_epilogue \ - } - -#define __dflush_prologue \ - unsigned long redundance; \ - extern int mt_n_dflushes; \ - for (redundance = 0; redundance < mt_n_dflushes; redundance++) { - -#define __dflush_epilogue \ - } - -#define __inv_dflush_prologue __dflush_prologue -#define __inv_dflush_epilogue __dflush_epilogue -#define __sflush_prologue { -#define __sflush_epilogue } -#define __inv_sflush_prologue __sflush_prologue -#define __inv_sflush_epilogue __sflush_epilogue - -#else /* CONFIG_MIPS_MT */ - -#define __iflush_prologue { -#define __iflush_epilogue } -#define __dflush_prologue { -#define __dflush_epilogue } -#define __inv_dflush_prologue { -#define __inv_dflush_epilogue } -#define __sflush_prologue { -#define __sflush_epilogue } -#define __inv_sflush_prologue { -#define __inv_sflush_epilogue } - -#endif /* CONFIG_MIPS_MT */ - static inline void flush_icache_line_indexed(unsigned long addr) { - __iflush_prologue cache_op(Index_Invalidate_I, addr); - __iflush_epilogue } static inline void flush_dcache_line_indexed(unsigned long addr) { - __dflush_prologue cache_op(Index_Writeback_Inv_D, addr); - __dflush_epilogue } static inline void flush_scache_line_indexed(unsigned long addr) @@ -109,7 +65,6 @@ static inline void flush_scache_line_indexed(unsigned long addr) static inline void flush_icache_line(unsigned long addr) { - __iflush_prologue switch (boot_cpu_type()) { case CPU_LOONGSON2: cache_op(Hit_Invalidate_I_Loongson2, addr); @@ -119,21 +74,16 @@ static inline void flush_icache_line(unsigned long addr) cache_op(Hit_Invalidate_I, addr); break; } - __iflush_epilogue } static inline void flush_dcache_line(unsigned long addr) { - __dflush_prologue cache_op(Hit_Writeback_Inv_D, addr); - __dflush_epilogue } static inline void invalidate_dcache_line(unsigned long addr) { - __dflush_prologue cache_op(Hit_Invalidate_D, addr); - __dflush_epilogue } static inline void invalidate_scache_line(unsigned long addr) @@ -586,13 +536,9 @@ static inline void extra##blast_##pfx##cache##lsize(void) \ current_cpu_data.desc.waybit; \ unsigned long ws, addr; \ \ - __##pfx##flush_prologue \ - \ for (ws = 0; ws < ws_end; ws += ws_inc) \ for (addr = start; addr < end; addr += lsize * 32) \ cache##lsize##_unroll32(addr|ws, indexop); \ - \ - __##pfx##flush_epilogue \ } \ \ static inline void extra##blast_##pfx##cache##lsize##_page(unsigned long page) \ @@ -600,14 +546,10 @@ static inline void extra##blast_##pfx##cache##lsize##_page(unsigned long page) \ unsigned long start = page; \ unsigned long end = page + PAGE_SIZE; \ \ - __##pfx##flush_prologue \ - \ do { \ cache##lsize##_unroll32(start, hitop); \ start += lsize * 32; \ } while (start < end); \ - \ - __##pfx##flush_epilogue \ } \ \ static inline void extra##blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \ @@ -620,13 +562,9 @@ static inline void extra##blast_##pfx##cache##lsize##_page_indexed(unsigned long current_cpu_data.desc.waybit; \ unsigned long ws, addr; \ \ - __##pfx##flush_prologue \ - \ for (ws = 0; ws < ws_end; ws += ws_inc) \ for (addr = start; addr < end; addr += lsize * 32) \ cache##lsize##_unroll32(addr|ws, indexop); \ - \ - __##pfx##flush_epilogue \ } __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16, ) @@ -656,14 +594,10 @@ static inline void blast_##pfx##cache##lsize##_user_page(unsigned long page) \ unsigned long start = page; \ unsigned long end = page + PAGE_SIZE; \ \ - __##pfx##flush_prologue \ - \ do { \ cache##lsize##_unroll32_user(start, hitop); \ start += lsize * 32; \ } while (start < end); \ - \ - __##pfx##flush_epilogue \ } __BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, @@ -685,16 +619,12 @@ static inline void prot##extra##blast_##pfx##cache##_range(unsigned long start, unsigned long addr = start & ~(lsize - 1); \ unsigned long aend = (end - 1) & ~(lsize - 1); \ \ - __##pfx##flush_prologue \ - \ while (1) { \ prot##cache_op(hitop, addr); \ if (addr == aend) \ break; \ addr += lsize; \ } \ - \ - __##pfx##flush_epilogue \ } #ifndef CONFIG_EVA @@ -712,8 +642,6 @@ static inline void protected_blast_##pfx##cache##_range(unsigned long start,\ unsigned long addr = start & ~(lsize - 1); \ unsigned long aend = (end - 1) & ~(lsize - 1); \ \ - __##pfx##flush_prologue \ - \ if (!uaccess_kernel()) { \ while (1) { \ protected_cachee_op(hitop, addr); \ @@ -730,7 +658,6 @@ static inline void protected_blast_##pfx##cache##_range(unsigned long start,\ } \ \ } \ - __##pfx##flush_epilogue \ } __BUILD_PROT_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D) diff --git a/arch/mips/include/asm/smp-ops.h b/arch/mips/include/asm/smp-ops.h index 53b2cb8e5966..b7123f9c0785 100644 --- a/arch/mips/include/asm/smp-ops.h +++ b/arch/mips/include/asm/smp-ops.h @@ -33,6 +33,9 @@ struct plat_smp_ops { int (*cpu_disable)(void); void (*cpu_die)(unsigned int cpu); #endif +#ifdef CONFIG_KEXEC + void (*kexec_nonboot_cpu)(void); +#endif }; extern void register_smp_ops(const struct plat_smp_ops *ops); diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h index 056a6bf13491..7990c1c70471 100644 --- a/arch/mips/include/asm/smp.h +++ b/arch/mips/include/asm/smp.h @@ -91,6 +91,22 @@ static inline void __cpu_die(unsigned int cpu) extern void play_dead(void); #endif +#ifdef CONFIG_KEXEC +static inline void kexec_nonboot_cpu(void) +{ + extern const struct plat_smp_ops *mp_ops; /* private */ + + return mp_ops->kexec_nonboot_cpu(); +} + +static inline void *kexec_nonboot_cpu_func(void) +{ + extern const struct plat_smp_ops *mp_ops; /* private */ + + return mp_ops->kexec_nonboot_cpu; +} +#endif + /* * This function will set up the necessary IPIs for Linux to communicate * with the CPUs in mask. diff --git a/arch/mips/include/asm/unistd.h b/arch/mips/include/asm/unistd.h index 3c09450908aa..c68b8ae3efcb 100644 --- a/arch/mips/include/asm/unistd.h +++ b/arch/mips/include/asm/unistd.h @@ -24,16 +24,17 @@ #ifndef __ASSEMBLY__ +#define __ARCH_WANT_NEW_STAT #define __ARCH_WANT_OLD_READDIR #define __ARCH_WANT_SYS_ALARM #define __ARCH_WANT_SYS_GETHOSTNAME #define __ARCH_WANT_SYS_IPC #define __ARCH_WANT_SYS_PAUSE #define __ARCH_WANT_SYS_UTIME +#define __ARCH_WANT_SYS_UTIME32 #define __ARCH_WANT_SYS_WAITPID #define __ARCH_WANT_SYS_SOCKETCALL #define __ARCH_WANT_SYS_GETPGRP -#define __ARCH_WANT_SYS_LLSEEK #define __ARCH_WANT_SYS_NICE #define __ARCH_WANT_SYS_OLD_UNAME #define __ARCH_WANT_SYS_OLDUMOUNT diff --git a/arch/mips/include/uapi/asm/siginfo.h b/arch/mips/include/uapi/asm/siginfo.h index 262504bd59a5..c34c7eef0a1c 100644 --- a/arch/mips/include/uapi/asm/siginfo.h +++ b/arch/mips/include/uapi/asm/siginfo.h @@ -14,17 +14,6 @@ #define __ARCH_SIGEV_PREAMBLE_SIZE (sizeof(long) + 2*sizeof(int)) #undef __ARCH_SI_TRAPNO /* exception code needs to fill this ... */ -/* - * Careful to keep union _sifields from shifting ... - */ -#if _MIPS_SZLONG == 32 -#define __ARCH_SI_PREAMBLE_SIZE (3 * sizeof(int)) -#elif _MIPS_SZLONG == 64 -#define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int)) -#else -#error _MIPS_SZLONG neither 32 nor 64 -#endif - #define __ARCH_HAS_SWAPPED_SIGINFO #include <asm-generic/siginfo.h> diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile index f10e1e15e1c6..210c2802cf4d 100644 --- a/arch/mips/kernel/Makefile +++ b/arch/mips/kernel/Makefile @@ -113,22 +113,4 @@ obj-$(CONFIG_MIPS_CPC) += mips-cpc.o obj-$(CONFIG_CPU_PM) += pm.o obj-$(CONFIG_MIPS_CPS_PM) += pm-cps.o -# -# DSP ASE supported for MIPS32 or MIPS64 Release 2 cores only. It is not -# safe to unconditionnaly use the assembler -mdsp / -mdspr2 switches -# here because the compiler may use DSP ASE instructions (such as lwx) in -# code paths where we cannot check that the CPU we are running on supports it. -# Proper abstraction using HAVE_AS_DSP and macros is done in -# arch/mips/include/asm/mipsregs.h. -# -ifeq ($(CONFIG_CPU_MIPSR2), y) -CFLAGS_DSP = -DHAVE_AS_DSP - -CFLAGS_signal.o = $(CFLAGS_DSP) -CFLAGS_signal32.o = $(CFLAGS_DSP) -CFLAGS_process.o = $(CFLAGS_DSP) -CFLAGS_branch.o = $(CFLAGS_DSP) -CFLAGS_ptrace.o = $(CFLAGS_DSP) -endif - CPPFLAGS_vmlinux.lds := $(KBUILD_CFLAGS) diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c index 89b234844534..7a12763d553a 100644 --- a/arch/mips/kernel/binfmt_elfn32.c +++ b/arch/mips/kernel/binfmt_elfn32.c @@ -54,10 +54,10 @@ struct elf_prstatus32 pid_t pr_ppid; pid_t pr_pgrp; pid_t pr_sid; - struct compat_timeval pr_utime; /* User time */ - struct compat_timeval pr_stime; /* System time */ - struct compat_timeval pr_cutime;/* Cumulative user time */ - struct compat_timeval pr_cstime;/* Cumulative system time */ + struct old_timeval32 pr_utime; /* User time */ + struct old_timeval32 pr_stime; /* System time */ + struct old_timeval32 pr_cutime;/* Cumulative user time */ + struct old_timeval32 pr_cstime;/* Cumulative system time */ elf_gregset_t pr_reg; /* GP registers */ int pr_fpvalid; /* True if math co-processor being used. */ }; @@ -81,9 +81,9 @@ struct elf_prpsinfo32 #define elf_caddr_t u32 #define init_elf_binfmt init_elfn32_binfmt -#define jiffies_to_timeval jiffies_to_compat_timeval +#define jiffies_to_timeval jiffies_to_old_timeval32 static __inline__ void -jiffies_to_compat_timeval(unsigned long jiffies, struct compat_timeval *value) +jiffies_to_old_timeval32(unsigned long jiffies, struct old_timeval32 *value) { /* * Convert jiffies to nanoseconds and separate with @@ -101,6 +101,6 @@ jiffies_to_compat_timeval(unsigned long jiffies, struct compat_timeval *value) #define TASK_SIZE TASK_SIZE32 #undef ns_to_timeval -#define ns_to_timeval ns_to_compat_timeval +#define ns_to_timeval ns_to_old_timeval32 #include "../../../fs/binfmt_elf.c" diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c index a88c59db3d48..e6db06a1d31a 100644 --- a/arch/mips/kernel/binfmt_elfo32.c +++ b/arch/mips/kernel/binfmt_elfo32.c @@ -59,10 +59,10 @@ struct elf_prstatus32 pid_t pr_ppid; pid_t pr_pgrp; pid_t pr_sid; - struct compat_timeval pr_utime; /* User time */ - struct compat_timeval pr_stime; /* System time */ - struct compat_timeval pr_cutime;/* Cumulative user time */ - struct compat_timeval pr_cstime;/* Cumulative system time */ + struct old_timeval32 pr_utime; /* User time */ + struct old_timeval32 pr_stime; /* System time */ + struct old_timeval32 pr_cutime;/* Cumulative user time */ + struct old_timeval32 pr_cstime;/* Cumulative system time */ elf_gregset_t pr_reg; /* GP registers */ int pr_fpvalid; /* True if math co-processor being used. */ }; @@ -86,9 +86,9 @@ struct elf_prpsinfo32 #define elf_caddr_t u32 #define init_elf_binfmt init_elf32_binfmt -#define jiffies_to_timeval jiffies_to_compat_timeval +#define jiffies_to_timeval jiffies_to_old_timeval32 static inline void -jiffies_to_compat_timeval(unsigned long jiffies, struct compat_timeval *value) +jiffies_to_old_timeval32(unsigned long jiffies, struct old_timeval32 *value) { /* * Convert jiffies to nanoseconds and separate with @@ -104,6 +104,6 @@ jiffies_to_compat_timeval(unsigned long jiffies, struct compat_timeval *value) #define TASK_SIZE TASK_SIZE32 #undef ns_to_timeval -#define ns_to_timeval ns_to_compat_timeval +#define ns_to_timeval ns_to_old_timeval32 #include "../../../fs/binfmt_elf.c" diff --git a/arch/mips/kernel/crash.c b/arch/mips/kernel/crash.c index d455363d51c3..2c7288041a99 100644 --- a/arch/mips/kernel/crash.c +++ b/arch/mips/kernel/crash.c @@ -36,6 +36,9 @@ static void crash_shutdown_secondary(void *passed_regs) if (!cpu_online(cpu)) return; + /* We won't be sent IPIs any more. */ + set_cpu_online(cpu, false); + local_irq_disable(); if (!cpumask_test_cpu(cpu, &cpus_in_crash)) crash_save_cpu(regs, cpu); @@ -43,7 +46,9 @@ static void crash_shutdown_secondary(void *passed_regs) while (!atomic_read(&kexec_ready_to_reboot)) cpu_relax(); - relocated_kexec_smp_wait(NULL); + + kexec_reboot(); + /* NOTREACHED */ } diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S index d1bb506adc10..351d40fe0859 100644 --- a/arch/mips/kernel/head.S +++ b/arch/mips/kernel/head.S @@ -77,7 +77,7 @@ EXPORT(_stext) */ FEXPORT(__kernel_entry) j kernel_entry -#endif +#endif /* CONFIG_BOOT_RAW */ __REF @@ -94,24 +94,26 @@ NESTED(kernel_entry, 16, sp) # kernel entry point 0: #ifdef CONFIG_USE_OF -#ifdef CONFIG_MIPS_RAW_APPENDED_DTB +#if defined(CONFIG_MIPS_RAW_APPENDED_DTB) || \ + defined(CONFIG_MIPS_ELF_APPENDED_DTB) + PTR_LA t2, __appended_dtb #ifdef CONFIG_CPU_BIG_ENDIAN li t1, 0xd00dfeed -#else +#else /* !CONFIG_CPU_BIG_ENDIAN */ li t1, 0xedfe0dd0 -#endif +#endif /* !CONFIG_CPU_BIG_ENDIAN */ lw t0, (t2) beq t0, t1, dtb_found -#endif +#endif /* CONFIG_MIPS_RAW_APPENDED_DTB || CONFIG_MIPS_ELF_APPENDED_DTB */ li t1, -2 move t2, a1 beq a0, t1, dtb_found li t2, 0 dtb_found: -#endif +#endif /* CONFIG_USE_OF */ PTR_LA t0, __bss_start # clear .bss LONG_S zero, (t0) PTR_LA t1, __bss_stop - LONGSIZE @@ -156,9 +158,9 @@ dtb_found: * newly sync'd icache. */ jr.hb v0 -#else +#else /* !CONFIG_RELOCATABLE */ j start_kernel -#endif +#endif /* !CONFIG_RELOCATABLE */ END(kernel_entry) #ifdef CONFIG_SMP diff --git a/arch/mips/kernel/machine_kexec.c b/arch/mips/kernel/machine_kexec.c index 8b574bcd39ba..93936dce04d6 100644 --- a/arch/mips/kernel/machine_kexec.c +++ b/arch/mips/kernel/machine_kexec.c @@ -9,6 +9,7 @@ #include <linux/kexec.h> #include <linux/mm.h> #include <linux/delay.h> +#include <linux/libfdt.h> #include <asm/cacheflush.h> #include <asm/page.h> @@ -19,15 +20,18 @@ extern const size_t relocate_new_kernel_size; extern unsigned long kexec_start_address; extern unsigned long kexec_indirection_page; -int (*_machine_kexec_prepare)(struct kimage *) = NULL; -void (*_machine_kexec_shutdown)(void) = NULL; -void (*_machine_crash_shutdown)(struct pt_regs *regs) = NULL; +static unsigned long reboot_code_buffer; + #ifdef CONFIG_SMP -void (*relocated_kexec_smp_wait) (void *); +static void (*relocated_kexec_smp_wait)(void *); + atomic_t kexec_ready_to_reboot = ATOMIC_INIT(0); void (*_crash_smp_send_stop)(void) = NULL; #endif +void (*_machine_kexec_shutdown)(void) = NULL; +void (*_machine_crash_shutdown)(struct pt_regs *regs) = NULL; + static void kexec_image_info(const struct kimage *kimage) { unsigned long i; @@ -48,13 +52,59 @@ static void kexec_image_info(const struct kimage *kimage) } } +#ifdef CONFIG_UHI_BOOT + +static int uhi_machine_kexec_prepare(struct kimage *kimage) +{ + int i; + + /* + * In case DTB file is not passed to the new kernel, a flat device + * tree will be created by kexec tool. It holds modified command + * line for the new kernel. + */ + for (i = 0; i < kimage->nr_segments; i++) { + struct fdt_header fdt; + + if (kimage->segment[i].memsz <= sizeof(fdt)) + continue; + + if (copy_from_user(&fdt, kimage->segment[i].buf, sizeof(fdt))) + continue; + + if (fdt_check_header(&fdt)) + continue; + + kexec_args[0] = -2; + kexec_args[1] = (unsigned long) + phys_to_virt((unsigned long)kimage->segment[i].mem); + break; + } + + return 0; +} + +int (*_machine_kexec_prepare)(struct kimage *) = uhi_machine_kexec_prepare; + +#else + +int (*_machine_kexec_prepare)(struct kimage *) = NULL; + +#endif /* CONFIG_UHI_BOOT */ + int machine_kexec_prepare(struct kimage *kimage) { +#ifdef CONFIG_SMP + if (!kexec_nonboot_cpu_func()) + return -EINVAL; +#endif + kexec_image_info(kimage); if (_machine_kexec_prepare) return _machine_kexec_prepare(kimage); + return 0; } @@ -63,11 +113,41 @@ machine_kexec_cleanup(struct kimage *kimage) { } +#ifdef CONFIG_SMP +static void kexec_shutdown_secondary(void *param) +{ + int cpu = smp_processor_id(); + + if (!cpu_online(cpu)) + return; + + /* We won't be sent IPIs any more. */ + set_cpu_online(cpu, false); + + local_irq_disable(); + while (!atomic_read(&kexec_ready_to_reboot)) + cpu_relax(); + + kexec_reboot(); + + /* NOTREACHED */ +} +#endif + void machine_shutdown(void) { if (_machine_kexec_shutdown) _machine_kexec_shutdown(); + +#ifdef CONFIG_SMP + smp_call_function(kexec_shutdown_secondary, NULL, 0); + + while (num_online_cpus() > 1) { + cpu_relax(); + mdelay(1); + } +#endif } void @@ -79,12 +159,57 @@ machine_crash_shutdown(struct pt_regs *regs) default_machine_crash_shutdown(regs); } -typedef void (*noretfun_t)(void) __noreturn; +#ifdef CONFIG_SMP +void kexec_nonboot_cpu_jump(void) +{ + local_flush_icache_range((unsigned long)relocated_kexec_smp_wait, + reboot_code_buffer + relocate_new_kernel_size); + + relocated_kexec_smp_wait(NULL); +} +#endif + +void kexec_reboot(void) +{ + void (*do_kexec)(void) __noreturn; + + /* + * We know we were online, and there will be no incoming IPIs at + * this point. Mark online again before rebooting so that the crash + * analysis tool will see us correctly. + */ + set_cpu_online(smp_processor_id(), true); + + /* Ensure remote CPUs observe that we're online before rebooting. */ + smp_mb__after_atomic(); + +#ifdef CONFIG_SMP + if (smp_processor_id() > 0) { + /* + * Instead of cpu_relax() or wait, this is needed for kexec + * smp reboot. Kdump usually doesn't require an smp new + * kernel, but kexec may do. + */ + kexec_nonboot_cpu(); + + /* NOTREACHED */ + } +#endif + + /* + * Make sure we get correct instructions written by the + * machine_kexec() CPU. + */ + local_flush_icache_range(reboot_code_buffer, + reboot_code_buffer + relocate_new_kernel_size); + + do_kexec = (void *)reboot_code_buffer; + do_kexec(); +} void machine_kexec(struct kimage *image) { - unsigned long reboot_code_buffer; unsigned long entry; unsigned long *ptr; @@ -118,6 +243,9 @@ machine_kexec(struct kimage *image) *ptr = (unsigned long) phys_to_virt(*ptr); } + /* Mark offline BEFORE disabling local irq. */ + set_cpu_online(smp_processor_id(), false); + /* * we do not want to be bothered. */ @@ -125,6 +253,7 @@ machine_kexec(struct kimage *image) printk("Will call new kernel at %08lx\n", image->start); printk("Bye ...\n"); + /* Make reboot code buffer available to the boot CPU. */ __flush_cache_all(); #ifdef CONFIG_SMP /* All secondary cpus now may jump to kexec_wait cycle */ @@ -133,5 +262,5 @@ machine_kexec(struct kimage *image) smp_wmb(); atomic_set(&kexec_ready_to_reboot, 1); #endif - ((noretfun_t) reboot_code_buffer)(); + kexec_reboot(); } diff --git a/arch/mips/kernel/mips-mt.c b/arch/mips/kernel/mips-mt.c index efaa2527657d..9f85b98d24ac 100644 --- a/arch/mips/kernel/mips-mt.c +++ b/arch/mips/kernel/mips-mt.c @@ -154,40 +154,6 @@ static int __init config7_set(char *str) } __setup("config7=", config7_set); -/* Experimental cache flush control parameters that should go away some day */ -int mt_protiflush; -int mt_protdflush; -int mt_n_iflushes = 1; -int mt_n_dflushes = 1; - -static int __init set_protiflush(char *s) -{ - mt_protiflush = 1; - return 1; -} -__setup("protiflush", set_protiflush); - -static int __init set_protdflush(char *s) -{ - mt_protdflush = 1; - return 1; -} -__setup("protdflush", set_protdflush); - -static int __init niflush(char *s) -{ - get_option(&s, &mt_n_iflushes); - return 1; -} -__setup("niflush=", niflush); - -static int __init ndflush(char *s) -{ - get_option(&s, &mt_n_dflushes); - return 1; -} -__setup("ndflush=", ndflush); - static unsigned int itc_base; static int __init set_itc_base(char *str) @@ -232,16 +198,6 @@ void mips_mt_set_cpuoptions(void) printk("Config7: 0x%08x\n", read_c0_config7()); } - /* Report Cache management debug options */ - if (mt_protiflush) - printk("I-cache flushes single-threaded\n"); - if (mt_protdflush) - printk("D-cache flushes single-threaded\n"); - if (mt_n_iflushes != 1) - printk("I-Cache Flushes Repeated %d times\n", mt_n_iflushes); - if (mt_n_dflushes != 1) - printk("D-Cache Flushes Repeated %d times\n", mt_n_dflushes); - if (itc_base != 0) { /* * Configure ITC mapping. This code is very @@ -283,21 +239,6 @@ void mips_mt_set_cpuoptions(void) } } -/* - * Function to protect cache flushes from concurrent execution - * depends on MP software model chosen. - */ - -void mt_cflush_lockdown(void) -{ - /* FILL IN VSMP and AP/SP VERSIONS HERE */ -} - -void mt_cflush_release(void) -{ - /* FILL IN VSMP and AP/SP VERSIONS HERE */ -} - struct class *mt_class; static int __init mt_init(void) diff --git a/arch/mips/kernel/relocate.c b/arch/mips/kernel/relocate.c index cbf4cc0b0b6c..3d80a51256de 100644 --- a/arch/mips/kernel/relocate.c +++ b/arch/mips/kernel/relocate.c @@ -146,7 +146,7 @@ int __init do_relocations(void *kbase_old, void *kbase_new, long offset) break; type = (*r >> 24) & 0xff; - loc_orig = (void *)(kbase_old + ((*r & 0x00ffffff) << 2)); + loc_orig = kbase_old + ((*r & 0x00ffffff) << 2); loc_new = RELOCATED(loc_orig); if (reloc_handlers_rel[type] == NULL) { diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index e64b9e8bb002..01a5ff4c41ff 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c @@ -333,7 +333,7 @@ static void __init finalize_initrd(void) maybe_bswap_initrd(); - reserve_bootmem(__pa(initrd_start), size, BOOTMEM_DEFAULT); + memblock_reserve(__pa(initrd_start), size); initrd_below_start_ok = 1; pr_info("Initial ramdisk at: 0x%lx (%lu bytes)\n", @@ -370,20 +370,10 @@ static void __init bootmem_init(void) #else /* !CONFIG_SGI_IP27 */ -static unsigned long __init bootmap_bytes(unsigned long pages) -{ - unsigned long bytes = DIV_ROUND_UP(pages, 8); - - return ALIGN(bytes, sizeof(long)); -} - static void __init bootmem_init(void) { unsigned long reserved_end; - unsigned long mapstart = ~0UL; - unsigned long bootmap_size; phys_addr_t ramstart = PHYS_ADDR_MAX; - bool bootmap_valid = false; int i; /* @@ -395,6 +385,8 @@ static void __init bootmem_init(void) init_initrd(); reserved_end = (unsigned long) PFN_UP(__pa_symbol(&_end)); + memblock_reserve(PHYS_OFFSET, reserved_end << PAGE_SHIFT); + /* * max_low_pfn is not a number of pages. The number of pages * of the system is given by 'max_low_pfn - min_low_pfn'. @@ -442,9 +434,6 @@ static void __init bootmem_init(void) if (initrd_end && end <= (unsigned long)PFN_UP(__pa(initrd_end))) continue; #endif - if (start >= mapstart) - continue; - mapstart = max(reserved_end, start); } if (min_low_pfn >= max_low_pfn) @@ -456,9 +445,11 @@ static void __init bootmem_init(void) /* * Reserve any memory between the start of RAM and PHYS_OFFSET */ - if (ramstart > PHYS_OFFSET) + if (ramstart > PHYS_OFFSET) { add_memory_region(PHYS_OFFSET, ramstart - PHYS_OFFSET, BOOT_MEM_RESERVED); + memblock_reserve(PHYS_OFFSET, ramstart - PHYS_OFFSET); + } if (min_low_pfn > ARCH_PFN_OFFSET) { pr_info("Wasting %lu bytes for tracking %lu unused pages\n", @@ -483,52 +474,6 @@ static void __init bootmem_init(void) max_low_pfn = PFN_DOWN(HIGHMEM_START); } -#ifdef CONFIG_BLK_DEV_INITRD - /* - * mapstart should be after initrd_end - */ - if (initrd_end) - mapstart = max(mapstart, (unsigned long)PFN_UP(__pa(initrd_end))); -#endif - - /* - * check that mapstart doesn't overlap with any of - * memory regions that have been reserved through eg. DTB - */ - bootmap_size = bootmap_bytes(max_low_pfn - min_low_pfn); - - bootmap_valid = memory_region_available(PFN_PHYS(mapstart), - bootmap_size); - for (i = 0; i < boot_mem_map.nr_map && !bootmap_valid; i++) { - unsigned long mapstart_addr; - - switch (boot_mem_map.map[i].type) { - case BOOT_MEM_RESERVED: - mapstart_addr = PFN_ALIGN(boot_mem_map.map[i].addr + - boot_mem_map.map[i].size); - if (PHYS_PFN(mapstart_addr) < mapstart) - break; - - bootmap_valid = memory_region_available(mapstart_addr, - bootmap_size); - if (bootmap_valid) - mapstart = PHYS_PFN(mapstart_addr); - break; - default: - break; - } - } - - if (!bootmap_valid) - panic("No memory area to place a bootmap bitmap"); - - /* - * Initialize the boot-time allocator with low memory only. - */ - if (bootmap_size != init_bootmem_node(NODE_DATA(0), mapstart, - min_low_pfn, max_low_pfn)) - panic("Unexpected memory size required for bootmap"); - for (i = 0; i < boot_mem_map.nr_map; i++) { unsigned long start, end; @@ -577,9 +522,9 @@ static void __init bootmem_init(void) default: /* Not usable memory */ if (start > min_low_pfn && end < max_low_pfn) - reserve_bootmem(boot_mem_map.map[i].addr, - boot_mem_map.map[i].size, - BOOTMEM_DEFAULT); + memblock_reserve(boot_mem_map.map[i].addr, + boot_mem_map.map[i].size); + continue; } @@ -602,15 +547,9 @@ static void __init bootmem_init(void) size = end - start; /* Register lowmem ranges */ - free_bootmem(PFN_PHYS(start), size << PAGE_SHIFT); memory_present(0, start, end); } - /* - * Reserve the bootmap memory. - */ - reserve_bootmem(PFN_PHYS(mapstart), bootmap_size, BOOTMEM_DEFAULT); - #ifdef CONFIG_RELOCATABLE /* * The kernel reserves all memory below its _end symbol as bootmem, @@ -642,29 +581,6 @@ static void __init bootmem_init(void) #endif /* CONFIG_SGI_IP27 */ -/* - * arch_mem_init - initialize memory management subsystem - * - * o plat_mem_setup() detects the memory configuration and will record detected - * memory areas using add_memory_region. - * - * At this stage the memory configuration of the system is known to the - * kernel but generic memory management system is still entirely uninitialized. - * - * o bootmem_init() - * o sparse_init() - * o paging_init() - * o dma_contiguous_reserve() - * - * At this stage the bootmem allocator is ready to use. - * - * NOTE: historically plat_mem_setup did the entire platform initialization. - * This was rather impractical because it meant plat_mem_setup had to - * get away without any kind of memory allocator. To keep old code from - * breaking plat_setup was just renamed to plat_mem_setup and a second platform - * initialization hook for anything else was introduced. - */ - static int usermem __initdata; static int __init early_parse_mem(char *p) @@ -841,6 +757,28 @@ static void __init request_crashkernel(struct resource *res) #define BUILTIN_EXTEND_WITH_PROM \ IS_ENABLED(CONFIG_MIPS_CMDLINE_BUILTIN_EXTEND) +/* + * arch_mem_init - initialize memory management subsystem + * + * o plat_mem_setup() detects the memory configuration and will record detected + * memory areas using add_memory_region. + * + * At this stage the memory configuration of the system is known to the + * kernel but generic memory management system is still entirely uninitialized. + * + * o bootmem_init() + * o sparse_init() + * o paging_init() + * o dma_contiguous_reserve() + * + * At this stage the bootmem allocator is ready to use. + * + * NOTE: historically plat_mem_setup did the entire platform initialization. + * This was rather impractical because it meant plat_mem_setup had to + * get away without any kind of memory allocator. To keep old code from + * breaking plat_setup was just renamed to plat_mem_setup and a second platform + * initialization hook for anything else was introduced. + */ static void __init arch_mem_init(char **cmdline_p) { struct memblock_region *reg; @@ -916,21 +854,29 @@ static void __init arch_mem_init(char **cmdline_p) early_init_fdt_scan_reserved_mem(); bootmem_init(); + + /* + * Prevent memblock from allocating high memory. + * This cannot be done before max_low_pfn is detected, so up + * to this point is possible to only reserve physical memory + * with memblock_reserve; memblock_virt_alloc* can be used + * only after this point + */ + memblock_set_current_limit(PFN_PHYS(max_low_pfn)); + #ifdef CONFIG_PROC_VMCORE if (setup_elfcorehdr && setup_elfcorehdr_size) { printk(KERN_INFO "kdump reserved memory at %lx-%lx\n", setup_elfcorehdr, setup_elfcorehdr_size); - reserve_bootmem(setup_elfcorehdr, setup_elfcorehdr_size, - BOOTMEM_DEFAULT); + memblock_reserve(setup_elfcorehdr, setup_elfcorehdr_size); } #endif mips_parse_crashkernel(); #ifdef CONFIG_KEXEC if (crashk_res.start != crashk_res.end) - reserve_bootmem(crashk_res.start, - crashk_res.end - crashk_res.start + 1, - BOOTMEM_DEFAULT); + memblock_reserve(crashk_res.start, + crashk_res.end - crashk_res.start + 1); #endif device_tree_init(); sparse_init(); @@ -940,7 +886,7 @@ static void __init arch_mem_init(char **cmdline_p) /* Tell bootmem about cma reserved memblock section */ for_each_memblock(reserved, reg) if (reg->size != 0) - reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT); + memblock_reserve(reg->base, reg->size); reserve_bootmem_region(__pa_symbol(&__nosave_begin), __pa_symbol(&__nosave_end)); /* Reserve for hibernation */ diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c index 159e83add4bb..76fae9b79f13 100644 --- a/arch/mips/kernel/smp-bmips.c +++ b/arch/mips/kernel/smp-bmips.c @@ -25,6 +25,7 @@ #include <linux/linkage.h> #include <linux/bug.h> #include <linux/kernel.h> +#include <linux/kexec.h> #include <asm/time.h> #include <asm/pgtable.h> @@ -423,6 +424,9 @@ const struct plat_smp_ops bmips43xx_smp_ops = { .cpu_disable = bmips_cpu_disable, .cpu_die = bmips_cpu_die, #endif +#ifdef CONFIG_KEXEC + .kexec_nonboot_cpu = kexec_nonboot_cpu_jump, +#endif }; const struct plat_smp_ops bmips5000_smp_ops = { @@ -437,6 +441,9 @@ const struct plat_smp_ops bmips5000_smp_ops = { .cpu_disable = bmips_cpu_disable, .cpu_die = bmips_cpu_die, #endif +#ifdef CONFIG_KEXEC + .kexec_nonboot_cpu = kexec_nonboot_cpu_jump, +#endif }; #endif /* CONFIG_SMP */ diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c index 03f1026ad148..faccfa4b280b 100644 --- a/arch/mips/kernel/smp-cps.c +++ b/arch/mips/kernel/smp-cps.c @@ -398,6 +398,55 @@ static void cps_smp_finish(void) local_irq_enable(); } +#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_KEXEC) + +enum cpu_death { + CPU_DEATH_HALT, + CPU_DEATH_POWER, +}; + +static void cps_shutdown_this_cpu(enum cpu_death death) +{ + unsigned int cpu, core, vpe_id; + + cpu = smp_processor_id(); + core = cpu_core(&cpu_data[cpu]); + + if (death == CPU_DEATH_HALT) { + vpe_id = cpu_vpe_id(&cpu_data[cpu]); + + pr_debug("Halting core %d VP%d\n", core, vpe_id); + if (cpu_has_mipsmt) { + /* Halt this TC */ + write_c0_tchalt(TCHALT_H); + instruction_hazard(); + } else if (cpu_has_vp) { + write_cpc_cl_vp_stop(1 << vpe_id); + + /* Ensure that the VP_STOP register is written */ + wmb(); + } + } else { + pr_debug("Gating power to core %d\n", core); + /* Power down the core */ + cps_pm_enter_state(CPS_PM_POWER_GATED); + } +} + +#ifdef CONFIG_KEXEC + +static void cps_kexec_nonboot_cpu(void) +{ + if (cpu_has_mipsmt || cpu_has_vp) + cps_shutdown_this_cpu(CPU_DEATH_HALT); + else + cps_shutdown_this_cpu(CPU_DEATH_POWER); +} + +#endif /* CONFIG_KEXEC */ + +#endif /* CONFIG_HOTPLUG_CPU || CONFIG_KEXEC */ + #ifdef CONFIG_HOTPLUG_CPU static int cps_cpu_disable(void) @@ -421,19 +470,15 @@ static int cps_cpu_disable(void) } static unsigned cpu_death_sibling; -static enum { - CPU_DEATH_HALT, - CPU_DEATH_POWER, -} cpu_death; +static enum cpu_death cpu_death; void play_dead(void) { - unsigned int cpu, core, vpe_id; + unsigned int cpu; local_irq_disable(); idle_task_exit(); cpu = smp_processor_id(); - core = cpu_core(&cpu_data[cpu]); cpu_death = CPU_DEATH_POWER; pr_debug("CPU%d going offline\n", cpu); @@ -456,25 +501,7 @@ void play_dead(void) /* This CPU has chosen its way out */ (void)cpu_report_death(); - if (cpu_death == CPU_DEATH_HALT) { - vpe_id = cpu_vpe_id(&cpu_data[cpu]); - - pr_debug("Halting core %d VP%d\n", core, vpe_id); - if (cpu_has_mipsmt) { - /* Halt this TC */ - write_c0_tchalt(TCHALT_H); - instruction_hazard(); - } else if (cpu_has_vp) { - write_cpc_cl_vp_stop(1 << vpe_id); - - /* Ensure that the VP_STOP register is written */ - wmb(); - } - } else { - pr_debug("Gating power to core %d\n", core); - /* Power down the core */ - cps_pm_enter_state(CPS_PM_POWER_GATED); - } + cps_shutdown_this_cpu(cpu_death); /* This should never be reached */ panic("Failed to offline CPU %u", cpu); @@ -593,6 +620,9 @@ static const struct plat_smp_ops cps_smp_ops = { .cpu_disable = cps_cpu_disable, .cpu_die = cps_cpu_die, #endif +#ifdef CONFIG_KEXEC + .kexec_nonboot_cpu = cps_kexec_nonboot_cpu, +#endif }; bool mips_cps_smp_in_use(void) diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 9dab0ed1b227..5feef28deac8 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -29,6 +29,7 @@ #include <linux/spinlock.h> #include <linux/kallsyms.h> #include <linux/bootmem.h> +#include <linux/memblock.h> #include <linux/interrupt.h> #include <linux/ptrace.h> #include <linux/kgdb.h> @@ -348,7 +349,7 @@ static void __show_regs(const struct pt_regs *regs) */ void show_regs(struct pt_regs *regs) { - __show_regs((struct pt_regs *)regs); + __show_regs(regs); dump_stack(); } @@ -2260,8 +2261,10 @@ void __init trap_init(void) unsigned long size = 0x200 + VECTORSPACING*64; phys_addr_t ebase_pa; + memblock_set_bottom_up(true); ebase = (unsigned long) __alloc_bootmem(size, 1 << fls(size), 0); + memblock_set_bottom_up(false); /* * Try to ensure ebase resides in KSeg0 if possible. diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c index 2d0b912f9e3e..ce446eed62d2 100644 --- a/arch/mips/kernel/unaligned.c +++ b/arch/mips/kernel/unaligned.c @@ -130,7 +130,7 @@ do { \ : "r" (addr), "i" (-EFAULT)); \ } while(0) -#ifndef CONFIG_CPU_MIPSR6 +#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR #define _LoadW(addr, value, res, type) \ do { \ __asm__ __volatile__ ( \ @@ -151,8 +151,8 @@ do { \ : "r" (addr), "i" (-EFAULT)); \ } while(0) -#else -/* MIPSR6 has no lwl instruction */ +#else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */ +/* For CPUs without lwl instruction */ #define _LoadW(addr, value, res, type) \ do { \ __asm__ __volatile__ ( \ @@ -186,7 +186,7 @@ do { \ : "r" (addr), "i" (-EFAULT)); \ } while(0) -#endif /* CONFIG_CPU_MIPSR6 */ +#endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */ #define _LoadHWU(addr, value, res, type) \ do { \ @@ -212,7 +212,7 @@ do { \ : "r" (addr), "i" (-EFAULT)); \ } while(0) -#ifndef CONFIG_CPU_MIPSR6 +#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR #define _LoadWU(addr, value, res, type) \ do { \ __asm__ __volatile__ ( \ @@ -255,8 +255,8 @@ do { \ : "r" (addr), "i" (-EFAULT)); \ } while(0) -#else -/* MIPSR6 has not lwl and ldl instructions */ +#else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */ +/* For CPUs without lwl and ldl instructions */ #define _LoadWU(addr, value, res, type) \ do { \ __asm__ __volatile__ ( \ @@ -339,7 +339,7 @@ do { \ : "r" (addr), "i" (-EFAULT)); \ } while(0) -#endif /* CONFIG_CPU_MIPSR6 */ +#endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */ #define _StoreHW(addr, value, res, type) \ @@ -365,7 +365,7 @@ do { \ : "r" (value), "r" (addr), "i" (-EFAULT));\ } while(0) -#ifndef CONFIG_CPU_MIPSR6 +#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR #define _StoreW(addr, value, res, type) \ do { \ __asm__ __volatile__ ( \ @@ -406,8 +406,7 @@ do { \ : "r" (value), "r" (addr), "i" (-EFAULT)); \ } while(0) -#else -/* MIPSR6 has no swl and sdl instructions */ +#else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */ #define _StoreW(addr, value, res, type) \ do { \ __asm__ __volatile__ ( \ @@ -483,7 +482,7 @@ do { \ : "memory"); \ } while(0) -#endif /* CONFIG_CPU_MIPSR6 */ +#endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */ #else /* __BIG_ENDIAN */ @@ -509,7 +508,7 @@ do { \ : "r" (addr), "i" (-EFAULT)); \ } while(0) -#ifndef CONFIG_CPU_MIPSR6 +#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR #define _LoadW(addr, value, res, type) \ do { \ __asm__ __volatile__ ( \ @@ -530,8 +529,8 @@ do { \ : "r" (addr), "i" (-EFAULT)); \ } while(0) -#else -/* MIPSR6 has no lwl instruction */ +#else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */ +/* For CPUs without lwl instruction */ #define _LoadW(addr, value, res, type) \ do { \ __asm__ __volatile__ ( \ @@ -565,7 +564,7 @@ do { \ : "r" (addr), "i" (-EFAULT)); \ } while(0) -#endif /* CONFIG_CPU_MIPSR6 */ +#endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */ #define _LoadHWU(addr, value, res, type) \ @@ -592,7 +591,7 @@ do { \ : "r" (addr), "i" (-EFAULT)); \ } while(0) -#ifndef CONFIG_CPU_MIPSR6 +#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR #define _LoadWU(addr, value, res, type) \ do { \ __asm__ __volatile__ ( \ @@ -635,8 +634,8 @@ do { \ : "r" (addr), "i" (-EFAULT)); \ } while(0) -#else -/* MIPSR6 has not lwl and ldl instructions */ +#else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */ +/* For CPUs without lwl and ldl instructions */ #define _LoadWU(addr, value, res, type) \ do { \ __asm__ __volatile__ ( \ @@ -718,7 +717,7 @@ do { \ : "=&r" (value), "=r" (res) \ : "r" (addr), "i" (-EFAULT)); \ } while(0) -#endif /* CONFIG_CPU_MIPSR6 */ +#endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */ #define _StoreHW(addr, value, res, type) \ do { \ @@ -743,7 +742,7 @@ do { \ : "r" (value), "r" (addr), "i" (-EFAULT));\ } while(0) -#ifndef CONFIG_CPU_MIPSR6 +#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR #define _StoreW(addr, value, res, type) \ do { \ __asm__ __volatile__ ( \ @@ -784,8 +783,8 @@ do { \ : "r" (value), "r" (addr), "i" (-EFAULT)); \ } while(0) -#else -/* MIPSR6 has no swl and sdl instructions */ +#else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */ +/* For CPUs without swl and sdl instructions */ #define _StoreW(addr, value, res, type) \ do { \ __asm__ __volatile__ ( \ @@ -861,7 +860,7 @@ do { \ : "memory"); \ } while(0) -#endif /* CONFIG_CPU_MIPSR6 */ +#endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */ #endif #define LoadHWU(addr, value, res) _LoadHWU(addr, value, res, kernel) diff --git a/arch/mips/lantiq/xway/dma.c b/arch/mips/lantiq/xway/dma.c index 664f2f7f55c1..982859f2b2a3 100644 --- a/arch/mips/lantiq/xway/dma.c +++ b/arch/mips/lantiq/xway/dma.c @@ -106,7 +106,6 @@ ltq_dma_open(struct ltq_dma_channel *ch) spin_lock_irqsave(<q_dma_lock, flag); ltq_dma_w32(ch->nr, LTQ_DMA_CS); ltq_dma_w32_mask(0, DMA_CHAN_ON, LTQ_DMA_CCTRL); - ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN); spin_unlock_irqrestore(<q_dma_lock, flag); } EXPORT_SYMBOL_GPL(ltq_dma_open); diff --git a/arch/mips/lantiq/xway/sysctrl.c b/arch/mips/lantiq/xway/sysctrl.c index e0af39b33e28..fe25c99089b7 100644 --- a/arch/mips/lantiq/xway/sysctrl.c +++ b/arch/mips/lantiq/xway/sysctrl.c @@ -505,7 +505,7 @@ void __init ltq_soc_init(void) clkdev_add_pmu("1a800000.pcie", "msi", 1, 1, PMU1_PCIE2_MSI); clkdev_add_pmu("1a800000.pcie", "pdi", 1, 1, PMU1_PCIE2_PDI); clkdev_add_pmu("1a800000.pcie", "ctl", 1, 1, PMU1_PCIE2_CTL); - clkdev_add_pmu("1e108000.eth", NULL, 0, 0, PMU_SWITCH | PMU_PPE_DP); + clkdev_add_pmu("1e10b308.eth", NULL, 0, 0, PMU_SWITCH | PMU_PPE_DP); clkdev_add_pmu("1da00000.usif", "NULL", 1, 0, PMU_USIF); clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU); } else if (of_machine_is_compatible("lantiq,ar10")) { @@ -513,11 +513,11 @@ void __init ltq_soc_init(void) ltq_ar10_fpi_hz(), ltq_ar10_pp32_hz()); clkdev_add_pmu("1e101000.usb", "otg", 1, 0, PMU_USB0); clkdev_add_pmu("1e106000.usb", "otg", 1, 0, PMU_USB1); - clkdev_add_pmu("1e108000.eth", NULL, 0, 0, PMU_SWITCH | + clkdev_add_pmu("1e10b308.eth", NULL, 0, 0, PMU_SWITCH | PMU_PPE_DP | PMU_PPE_TC); clkdev_add_pmu("1da00000.usif", "NULL", 1, 0, PMU_USIF); - clkdev_add_pmu("1f203020.gphy", NULL, 1, 0, PMU_GPHY); - clkdev_add_pmu("1f203068.gphy", NULL, 1, 0, PMU_GPHY); + clkdev_add_pmu("1e108000.gswip", "gphy0", 0, 0, PMU_GPHY); + clkdev_add_pmu("1e108000.gswip", "gphy1", 0, 0, PMU_GPHY); clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU); clkdev_add_pmu("1e116000.mei", "afe", 1, 2, PMU_ANALOG_DSL_AFE); clkdev_add_pmu("1e116000.mei", "dfe", 1, 0, PMU_DFE); @@ -536,12 +536,12 @@ void __init ltq_soc_init(void) clkdev_add_pmu(NULL, "ahb", 1, 0, PMU_AHBM | PMU_AHBS); clkdev_add_pmu("1da00000.usif", "NULL", 1, 0, PMU_USIF); - clkdev_add_pmu("1e108000.eth", NULL, 0, 0, + clkdev_add_pmu("1e10b308.eth", NULL, 0, 0, PMU_SWITCH | PMU_PPE_DPLUS | PMU_PPE_DPLUM | PMU_PPE_EMA | PMU_PPE_TC | PMU_PPE_SLL01 | PMU_PPE_QSB | PMU_PPE_TOP); - clkdev_add_pmu("1f203020.gphy", NULL, 0, 0, PMU_GPHY); - clkdev_add_pmu("1f203068.gphy", NULL, 0, 0, PMU_GPHY); + clkdev_add_pmu("1e108000.gswip", "gphy0", 0, 0, PMU_GPHY); + clkdev_add_pmu("1e108000.gswip", "gphy1", 0, 0, PMU_GPHY); clkdev_add_pmu("1e103000.sdio", NULL, 1, 0, PMU_SDIO); clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU); clkdev_add_pmu("1e116000.mei", "dfe", 1, 0, PMU_DFE); diff --git a/arch/mips/lib/Makefile b/arch/mips/lib/Makefile index 6537e022ef62..479f50559c83 100644 --- a/arch/mips/lib/Makefile +++ b/arch/mips/lib/Makefile @@ -7,7 +7,7 @@ lib-y += bitops.o csum_partial.o delay.o memcpy.o memset.o \ mips-atomic.o strncpy_user.o \ strnlen_user.o uncached.o -obj-y += iomap.o iomap_copy.o +obj-y += iomap_copy.o obj-$(CONFIG_PCI) += iomap-pci.o lib-$(CONFIG_GENERIC_CSUM) := $(filter-out csum_partial.o, $(lib-y)) diff --git a/arch/mips/lib/iomap-pci.c b/arch/mips/lib/iomap-pci.c index 4850509c5534..210f5a95ecb1 100644 --- a/arch/mips/lib/iomap-pci.c +++ b/arch/mips/lib/iomap-pci.c @@ -44,10 +44,3 @@ void __iomem *__pci_ioport_map(struct pci_dev *dev, } #endif /* CONFIG_PCI_DRIVERS_LEGACY */ - -void pci_iounmap(struct pci_dev *dev, void __iomem * addr) -{ - iounmap(addr); -} - -EXPORT_SYMBOL(pci_iounmap); diff --git a/arch/mips/lib/iomap.c b/arch/mips/lib/iomap.c deleted file mode 100644 index 9b31653f318c..000000000000 --- a/arch/mips/lib/iomap.c +++ /dev/null @@ -1,227 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Implement the default iomap interfaces - * - * (C) Copyright 2004 Linus Torvalds - * (C) Copyright 2006 Ralf Baechle <ralf@linux-mips.org> - * (C) Copyright 2007 MIPS Technologies, Inc. - * written by Ralf Baechle <ralf@linux-mips.org> - */ -#include <linux/export.h> -#include <asm/io.h> - -/* - * Read/write from/to an (offsettable) iomem cookie. It might be a PIO - * access or a MMIO access, these functions don't care. The info is - * encoded in the hardware mapping set up by the mapping functions - * (or the cookie itself, depending on implementation and hw). - * - * The generic routines don't assume any hardware mappings, and just - * encode the PIO/MMIO as part of the cookie. They coldly assume that - * the MMIO IO mappings are not in the low address range. - * - * Architectures for which this is not true can't use this generic - * implementation and should do their own copy. - */ - -#define PIO_MASK 0x0ffffUL - -unsigned int ioread8(void __iomem *addr) -{ - return readb(addr); -} - -EXPORT_SYMBOL(ioread8); - -unsigned int ioread16(void __iomem *addr) -{ - return readw(addr); -} - -EXPORT_SYMBOL(ioread16); - -unsigned int ioread16be(void __iomem *addr) -{ - return be16_to_cpu(__raw_readw(addr)); -} - -EXPORT_SYMBOL(ioread16be); - -unsigned int ioread32(void __iomem *addr) -{ - return readl(addr); -} - -EXPORT_SYMBOL(ioread32); - -unsigned int ioread32be(void __iomem *addr) -{ - return be32_to_cpu(__raw_readl(addr)); -} - -EXPORT_SYMBOL(ioread32be); - -void iowrite8(u8 val, void __iomem *addr) -{ - writeb(val, addr); -} - -EXPORT_SYMBOL(iowrite8); - -void iowrite16(u16 val, void __iomem *addr) -{ - writew(val, addr); -} - -EXPORT_SYMBOL(iowrite16); - -void iowrite16be(u16 val, void __iomem *addr) -{ - __raw_writew(cpu_to_be16(val), addr); -} - -EXPORT_SYMBOL(iowrite16be); - -void iowrite32(u32 val, void __iomem *addr) -{ - writel(val, addr); -} - -EXPORT_SYMBOL(iowrite32); - -void iowrite32be(u32 val, void __iomem *addr) -{ - __raw_writel(cpu_to_be32(val), addr); -} - -EXPORT_SYMBOL(iowrite32be); - -/* - * These are the "repeat MMIO read/write" functions. - * Note the "__mem" accesses, since we want to convert - * to CPU byte order if the host bus happens to not match the - * endianness of PCI/ISA (see mach-generic/mangle-port.h). - */ -static inline void mmio_insb(void __iomem *addr, u8 *dst, int count) -{ - while (--count >= 0) { - u8 data = __mem_readb(addr); - *dst = data; - dst++; - } -} - -static inline void mmio_insw(void __iomem *addr, u16 *dst, int count) -{ - while (--count >= 0) { - u16 data = __mem_readw(addr); - *dst = data; - dst++; - } -} - -static inline void mmio_insl(void __iomem *addr, u32 *dst, int count) -{ - while (--count >= 0) { - u32 data = __mem_readl(addr); - *dst = data; - dst++; - } -} - -static inline void mmio_outsb(void __iomem *addr, const u8 *src, int count) -{ - while (--count >= 0) { - __mem_writeb(*src, addr); - src++; - } -} - -static inline void mmio_outsw(void __iomem *addr, const u16 *src, int count) -{ - while (--count >= 0) { - __mem_writew(*src, addr); - src++; - } -} - -static inline void mmio_outsl(void __iomem *addr, const u32 *src, int count) -{ - while (--count >= 0) { - __mem_writel(*src, addr); - src++; - } -} - -void ioread8_rep(void __iomem *addr, void *dst, unsigned long count) -{ - mmio_insb(addr, dst, count); -} - -EXPORT_SYMBOL(ioread8_rep); - -void ioread16_rep(void __iomem *addr, void *dst, unsigned long count) -{ - mmio_insw(addr, dst, count); -} - -EXPORT_SYMBOL(ioread16_rep); - -void ioread32_rep(void __iomem *addr, void *dst, unsigned long count) -{ - mmio_insl(addr, dst, count); -} - -EXPORT_SYMBOL(ioread32_rep); - -void iowrite8_rep(void __iomem *addr, const void *src, unsigned long count) -{ - mmio_outsb(addr, src, count); -} - -EXPORT_SYMBOL(iowrite8_rep); - -void iowrite16_rep(void __iomem *addr, const void *src, unsigned long count) -{ - mmio_outsw(addr, src, count); -} - -EXPORT_SYMBOL(iowrite16_rep); - -void iowrite32_rep(void __iomem *addr, const void *src, unsigned long count) -{ - mmio_outsl(addr, src, count); -} - -EXPORT_SYMBOL(iowrite32_rep); - -/* - * Create a virtual mapping cookie for an IO port range - * - * This uses the same mapping are as the in/out family which has to be setup - * by the platform initialization code. - * - * Just to make matters somewhat more interesting on MIPS systems with - * multiple host bridge each will have it's own ioport address space. - */ -static void __iomem *ioport_map_legacy(unsigned long port, unsigned int nr) -{ - return (void __iomem *) (mips_io_port_base + port); -} - -void __iomem *ioport_map(unsigned long port, unsigned int nr) -{ - if (port > PIO_MASK) - return NULL; - - return ioport_map_legacy(port, nr); -} - -EXPORT_SYMBOL(ioport_map); - -void ioport_unmap(void __iomem *addr) -{ - /* Nothing to do */ -} - -EXPORT_SYMBOL(ioport_unmap); diff --git a/arch/mips/lib/memcpy.S b/arch/mips/lib/memcpy.S index 03e3304d6ae5..cdd19d8561e8 100644 --- a/arch/mips/lib/memcpy.S +++ b/arch/mips/lib/memcpy.S @@ -204,9 +204,10 @@ #define LOADB(reg, addr, handler) EXC(lb, LD_INSN, reg, addr, handler) #define STOREB(reg, addr, handler) EXC(sb, ST_INSN, reg, addr, handler) -#define _PREF(hint, addr, type) \ +#ifdef CONFIG_CPU_HAS_PREFETCH +# define _PREF(hint, addr, type) \ .if \mode == LEGACY_MODE; \ - PREF(hint, addr); \ + kernel_pref(hint, addr); \ .else; \ .if ((\from == USEROP) && (type == SRC_PREFETCH)) || \ ((\to == USEROP) && (type == DST_PREFETCH)); \ @@ -218,12 +219,15 @@ * used later on. Therefore use $v1. \ */ \ .set at=v1; \ - PREFE(hint, addr); \ + user_pref(hint, addr); \ .set noat; \ .else; \ - PREF(hint, addr); \ + kernel_pref(hint, addr); \ .endif; \ .endif +#else +# define _PREF(hint, addr, type) +#endif #define PREFS(hint, addr) _PREF(hint, addr, SRC_PREFETCH) #define PREFD(hint, addr) _PREF(hint, addr, DST_PREFETCH) @@ -297,7 +301,7 @@ and t0, src, ADDRMASK PREFS( 0, 2*32(src) ) PREFD( 1, 2*32(dst) ) -#ifndef CONFIG_CPU_MIPSR6 +#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR bnez t1, .Ldst_unaligned\@ nop bnez t0, .Lsrc_unaligned_dst_aligned\@ @@ -385,7 +389,7 @@ bne rem, len, 1b .set noreorder -#ifndef CONFIG_CPU_MIPSR6 +#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR /* * src and dst are aligned, need to copy rem bytes (rem < NBYTES) * A loop would do only a byte at a time with possible branch @@ -487,7 +491,7 @@ bne len, rem, 1b .set noreorder -#endif /* !CONFIG_CPU_MIPSR6 */ +#endif /* CONFIG_CPU_HAS_LOAD_STORE_LR */ .Lcopy_bytes_checklen\@: beqz len, .Ldone\@ nop @@ -516,7 +520,7 @@ jr ra nop -#ifdef CONFIG_CPU_MIPSR6 +#ifndef CONFIG_CPU_HAS_LOAD_STORE_LR .Lcopy_unaligned_bytes\@: 1: COPY_BYTE(0) @@ -530,7 +534,7 @@ ADD src, src, 8 b 1b ADD dst, dst, 8 -#endif /* CONFIG_CPU_MIPSR6 */ +#endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */ .if __memcpy == 1 END(memcpy) .set __memcpy, 0 diff --git a/arch/mips/lib/memset.S b/arch/mips/lib/memset.S index 069acec3df9f..418611ef13cf 100644 --- a/arch/mips/lib/memset.S +++ b/arch/mips/lib/memset.S @@ -78,7 +78,6 @@ #endif .endm - .set noreorder .align 5 /* @@ -94,13 +93,16 @@ .endif sltiu t0, a2, STORSIZE /* very small region? */ + .set noreorder bnez t0, .Lsmall_memset\@ andi t0, a0, STORMASK /* aligned? */ + .set reorder #ifdef CONFIG_CPU_MICROMIPS move t8, a1 /* used by 'swp' instruction */ move t9, a1 #endif + .set noreorder #ifndef CONFIG_CPU_DADDI_WORKAROUNDS beqz t0, 1f PTR_SUBU t0, STORSIZE /* alignment in bytes */ @@ -111,8 +113,9 @@ PTR_SUBU t0, AT /* alignment in bytes */ .set at #endif + .set reorder -#ifndef CONFIG_CPU_MIPSR6 +#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR R10KCBARRIER(0(ra)) #ifdef __MIPSEB__ EX(LONG_S_L, a1, (a0), .Lfirst_fixup\@) /* make word/dword aligned */ @@ -122,11 +125,13 @@ PTR_SUBU a0, t0 /* long align ptr */ PTR_ADDU a2, t0 /* correct size */ -#else /* CONFIG_CPU_MIPSR6 */ +#else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */ #define STORE_BYTE(N) \ EX(sb, a1, N(a0), .Lbyte_fixup\@); \ + .set noreorder; \ beqz t0, 0f; \ - PTR_ADDU t0, 1; + PTR_ADDU t0, 1; \ + .set reorder; PTR_ADDU a2, t0 /* correct size */ PTR_ADDU t0, 1 @@ -145,19 +150,17 @@ ori a0, STORMASK xori a0, STORMASK PTR_ADDIU a0, STORSIZE -#endif /* CONFIG_CPU_MIPSR6 */ +#endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */ 1: ori t1, a2, 0x3f /* # of full blocks */ xori t1, 0x3f + andi t0, a2, 0x40-STORSIZE beqz t1, .Lmemset_partial\@ /* no block to fill */ - andi t0, a2, 0x40-STORSIZE PTR_ADDU t1, a0 /* end address */ - .set reorder 1: PTR_ADDIU a0, 64 R10KCBARRIER(0(ra)) f_fill64 a0, -64, FILL64RG, .Lfwd_fixup\@, \mode bne t1, a0, 1b - .set noreorder .Lmemset_partial\@: R10KCBARRIER(0(ra)) @@ -173,20 +176,18 @@ PTR_SUBU t1, AT .set at #endif + PTR_ADDU a0, t0 /* dest ptr */ jr t1 - PTR_ADDU a0, t0 /* dest ptr */ - .set push - .set noreorder - .set nomacro /* ... but first do longs ... */ f_fill64 a0, -64, FILL64RG, .Lpartial_fixup\@, \mode -2: .set pop - andi a2, STORMASK /* At most one long to go */ +2: andi a2, STORMASK /* At most one long to go */ + .set noreorder beqz a2, 1f -#ifndef CONFIG_CPU_MIPSR6 +#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR PTR_ADDU a0, a2 /* What's left */ + .set reorder R10KCBARRIER(0(ra)) #ifdef __MIPSEB__ EX(LONG_S_R, a1, -1(a0), .Llast_fixup\@) @@ -195,6 +196,7 @@ #endif #else PTR_SUBU t0, $0, a2 + .set reorder move a2, zero /* No remaining longs */ PTR_ADDIU t0, 1 STORE_BYTE(0) @@ -210,41 +212,42 @@ #endif 0: #endif -1: jr ra - move a2, zero +1: move a2, zero + jr ra .Lsmall_memset\@: + PTR_ADDU t1, a0, a2 beqz a2, 2f - PTR_ADDU t1, a0, a2 1: PTR_ADDIU a0, 1 /* fill bytewise */ R10KCBARRIER(0(ra)) + .set noreorder bne t1, a0, 1b EX(sb, a1, -1(a0), .Lsmall_fixup\@) + .set reorder -2: jr ra /* done */ - move a2, zero +2: move a2, zero + jr ra /* done */ .if __memset == 1 END(memset) .set __memset, 0 .hidden __memset .endif -#ifdef CONFIG_CPU_MIPSR6 +#ifndef CONFIG_CPU_HAS_LOAD_STORE_LR .Lbyte_fixup\@: /* * unset_bytes = (#bytes - (#unaligned bytes)) - (-#unaligned bytes remaining + 1) + 1 * a2 = a2 - t0 + 1 */ PTR_SUBU a2, t0 + PTR_ADDIU a2, 1 jr ra - PTR_ADDIU a2, 1 -#endif /* CONFIG_CPU_MIPSR6 */ +#endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */ .Lfirst_fixup\@: /* unset_bytes already in a2 */ jr ra - nop .Lfwd_fixup\@: /* @@ -255,8 +258,8 @@ andi a2, 0x3f LONG_L t0, THREAD_BUADDR(t0) LONG_ADDU a2, t1 + LONG_SUBU a2, t0 jr ra - LONG_SUBU a2, t0 .Lpartial_fixup\@: /* @@ -267,24 +270,21 @@ andi a2, STORMASK LONG_L t0, THREAD_BUADDR(t0) LONG_ADDU a2, a0 + LONG_SUBU a2, t0 jr ra - LONG_SUBU a2, t0 .Llast_fixup\@: /* unset_bytes already in a2 */ jr ra - nop .Lsmall_fixup\@: /* * unset_bytes = end_addr - current_addr + 1 * a2 = t1 - a0 + 1 */ - .set reorder PTR_SUBU a2, t1, a0 PTR_ADDIU a2, 1 jr ra - .set noreorder .endm @@ -298,8 +298,8 @@ LEAF(memset) EXPORT_SYMBOL(memset) + move v0, a0 /* result */ beqz a1, 1f - move v0, a0 /* result */ andi a1, 0xff /* spread fillword */ LONG_SLL t1, a1, 8 diff --git a/arch/mips/loongson64/common/Makefile b/arch/mips/loongson64/common/Makefile index 57ee03022941..684624f61f5a 100644 --- a/arch/mips/loongson64/common/Makefile +++ b/arch/mips/loongson64/common/Makefile @@ -6,7 +6,6 @@ obj-y += setup.o init.o cmdline.o env.o time.o reset.o irq.o \ bonito-irq.o mem.o machtype.o platform.o serial.o obj-$(CONFIG_PCI) += pci.o -obj-$(CONFIG_CPU_LOONGSON2) += dma.o # # Serial port support diff --git a/arch/mips/loongson64/fuloong-2e/Makefile b/arch/mips/loongson64/fuloong-2e/Makefile index b7622720c1ad..0a9a472bec0a 100644 --- a/arch/mips/loongson64/fuloong-2e/Makefile +++ b/arch/mips/loongson64/fuloong-2e/Makefile @@ -2,4 +2,4 @@ # Makefile for Lemote Fuloong2e mini-PC board. # -obj-y += irq.o reset.o +obj-y += irq.o reset.o dma.o diff --git a/arch/mips/loongson64/fuloong-2e/dma.c b/arch/mips/loongson64/fuloong-2e/dma.c new file mode 100644 index 000000000000..e122292bf666 --- /dev/null +++ b/arch/mips/loongson64/fuloong-2e/dma.c @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/dma-direct.h> + +dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr) +{ + return paddr | 0x80000000; +} + +phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dma_addr) +{ + return dma_addr & 0x7fffffff; +} diff --git a/arch/mips/loongson64/lemote-2f/Makefile b/arch/mips/loongson64/lemote-2f/Makefile index 08b8abcbfef5..b5792c334cd5 100644 --- a/arch/mips/loongson64/lemote-2f/Makefile +++ b/arch/mips/loongson64/lemote-2f/Makefile @@ -2,7 +2,7 @@ # Makefile for lemote loongson2f family machines # -obj-y += clock.o machtype.o irq.o reset.o ec_kb3310b.o +obj-y += clock.o machtype.o irq.o reset.o dma.o ec_kb3310b.o # # Suspend Support diff --git a/arch/mips/loongson64/common/dma.c b/arch/mips/loongson64/lemote-2f/dma.c index 48f04126bde2..abf0e39d7e46 100644 --- a/arch/mips/loongson64/common/dma.c +++ b/arch/mips/loongson64/lemote-2f/dma.c @@ -8,11 +8,7 @@ dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr) phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dma_addr) { -#if defined(CONFIG_CPU_LOONGSON2F) && defined(CONFIG_64BIT) if (dma_addr > 0x8fffffff) return dma_addr; return dma_addr & 0x0fffffff; -#else - return dma_addr & 0x7fffffff; -#endif } diff --git a/arch/mips/loongson64/loongson-3/irq.c b/arch/mips/loongson64/loongson-3/irq.c index cbeb20f9fc95..5605061f5f98 100644 --- a/arch/mips/loongson64/loongson-3/irq.c +++ b/arch/mips/loongson64/loongson-3/irq.c @@ -96,51 +96,8 @@ void mach_irq_dispatch(unsigned int pending) } } -static struct irqaction cascade_irqaction = { - .handler = no_action, - .flags = IRQF_NO_SUSPEND, - .name = "cascade", -}; - -static inline void mask_loongson_irq(struct irq_data *d) -{ - clear_c0_status(0x100 << (d->irq - MIPS_CPU_IRQ_BASE)); - irq_disable_hazard(); - - /* Workaround: UART IRQ may deliver to any core */ - if (d->irq == LOONGSON_UART_IRQ) { - int cpu = smp_processor_id(); - int node_id = cpu_logical_map(cpu) / loongson_sysconf.cores_per_node; - int core_id = cpu_logical_map(cpu) % loongson_sysconf.cores_per_node; - u64 intenclr_addr = smp_group[node_id] | - (u64)(&LOONGSON_INT_ROUTER_INTENCLR); - u64 introuter_lpc_addr = smp_group[node_id] | - (u64)(&LOONGSON_INT_ROUTER_LPC); - - *(volatile u32 *)intenclr_addr = 1 << 10; - *(volatile u8 *)introuter_lpc_addr = 0x10 + (1<<core_id); - } -} - -static inline void unmask_loongson_irq(struct irq_data *d) -{ - /* Workaround: UART IRQ may deliver to any core */ - if (d->irq == LOONGSON_UART_IRQ) { - int cpu = smp_processor_id(); - int node_id = cpu_logical_map(cpu) / loongson_sysconf.cores_per_node; - int core_id = cpu_logical_map(cpu) % loongson_sysconf.cores_per_node; - u64 intenset_addr = smp_group[node_id] | - (u64)(&LOONGSON_INT_ROUTER_INTENSET); - u64 introuter_lpc_addr = smp_group[node_id] | - (u64)(&LOONGSON_INT_ROUTER_LPC); - - *(volatile u32 *)intenset_addr = 1 << 10; - *(volatile u8 *)introuter_lpc_addr = 0x10 + (1<<core_id); - } - - set_c0_status(0x100 << (d->irq - MIPS_CPU_IRQ_BASE)); - irq_enable_hazard(); -} +static inline void mask_loongson_irq(struct irq_data *d) { } +static inline void unmask_loongson_irq(struct irq_data *d) { } /* For MIPS IRQs which shared by all cores */ static struct irq_chip loongson_irq_chip = { @@ -183,12 +140,11 @@ void __init mach_init_irq(void) chip->irq_set_affinity = plat_set_irq_affinity; irq_set_chip_and_handler(LOONGSON_UART_IRQ, - &loongson_irq_chip, handle_level_irq); - - /* setup HT1 irq */ - setup_irq(LOONGSON_HT1_IRQ, &cascade_irqaction); + &loongson_irq_chip, handle_percpu_irq); + irq_set_chip_and_handler(LOONGSON_BRIDGE_IRQ, + &loongson_irq_chip, handle_percpu_irq); - set_c0_status(STATUSF_IP2 | STATUSF_IP6); + set_c0_status(STATUSF_IP2 | STATUSF_IP3 | STATUSF_IP6); } #ifdef CONFIG_HOTPLUG_CPU diff --git a/arch/mips/loongson64/loongson-3/numa.c b/arch/mips/loongson64/loongson-3/numa.c index 9717106de4a5..c1e6ec52c614 100644 --- a/arch/mips/loongson64/loongson-3/numa.c +++ b/arch/mips/loongson64/loongson-3/numa.c @@ -180,43 +180,39 @@ static void __init szmem(unsigned int node) static void __init node_mem_init(unsigned int node) { - unsigned long bootmap_size; unsigned long node_addrspace_offset; - unsigned long start_pfn, end_pfn, freepfn; + unsigned long start_pfn, end_pfn; node_addrspace_offset = nid_to_addroffset(node); pr_info("Node%d's addrspace_offset is 0x%lx\n", node, node_addrspace_offset); get_pfn_range_for_nid(node, &start_pfn, &end_pfn); - freepfn = start_pfn; - if (node == 0) - freepfn = PFN_UP(__pa_symbol(&_end)); /* kernel end address */ - pr_info("Node%d: start_pfn=0x%lx, end_pfn=0x%lx, freepfn=0x%lx\n", - node, start_pfn, end_pfn, freepfn); + pr_info("Node%d: start_pfn=0x%lx, end_pfn=0x%lx\n", + node, start_pfn, end_pfn); __node_data[node] = prealloc__node_data + node; - NODE_DATA(node)->bdata = &bootmem_node_data[node]; NODE_DATA(node)->node_start_pfn = start_pfn; NODE_DATA(node)->node_spanned_pages = end_pfn - start_pfn; - bootmap_size = init_bootmem_node(NODE_DATA(node), freepfn, - start_pfn, end_pfn); free_bootmem_with_active_regions(node, end_pfn); - if (node == 0) /* used by finalize_initrd() */ + + if (node == 0) { + /* kernel end address */ + unsigned long kernel_end_pfn = PFN_UP(__pa_symbol(&_end)); + + /* used by finalize_initrd() */ max_low_pfn = end_pfn; - /* This is reserved for the kernel and bdata->node_bootmem_map */ - reserve_bootmem_node(NODE_DATA(node), start_pfn << PAGE_SHIFT, - ((freepfn - start_pfn) << PAGE_SHIFT) + bootmap_size, - BOOTMEM_DEFAULT); + /* Reserve the kernel text/data/bss */ + memblock_reserve(start_pfn << PAGE_SHIFT, + ((kernel_end_pfn - start_pfn) << PAGE_SHIFT)); - if (node == 0 && node_end_pfn(0) >= (0xffffffff >> PAGE_SHIFT)) { /* Reserve 0xfe000000~0xffffffff for RS780E integrated GPU */ - reserve_bootmem_node(NODE_DATA(node), - (node_addrspace_offset | 0xfe000000), - 32 << 20, BOOTMEM_DEFAULT); + if (node_end_pfn(0) >= (0xffffffff >> PAGE_SHIFT)) + memblock_reserve((node_addrspace_offset | 0xfe000000), + 32 << 20); } sparse_memory_present_with_active_regions(node); diff --git a/arch/mips/loongson64/loongson-3/smp.c b/arch/mips/loongson64/loongson-3/smp.c index fea95d003269..b5c1e0aa955e 100644 --- a/arch/mips/loongson64/loongson-3/smp.c +++ b/arch/mips/loongson64/loongson-3/smp.c @@ -21,6 +21,7 @@ #include <linux/sched/task_stack.h> #include <linux/smp.h> #include <linux/cpufreq.h> +#include <linux/kexec.h> #include <asm/processor.h> #include <asm/time.h> #include <asm/clock.h> @@ -349,7 +350,7 @@ static void loongson3_smp_finish(void) write_c0_compare(read_c0_count() + mips_hpt_frequency/HZ); local_irq_enable(); loongson3_ipi_write64(0, - (void *)(ipi_mailbox_buf[cpu_logical_map(cpu)]+0x0)); + ipi_mailbox_buf[cpu_logical_map(cpu)] + 0x0); pr_info("CPU#%d finished, CP0_ST=%x\n", smp_processor_id(), read_c0_status()); } @@ -416,13 +417,13 @@ static int loongson3_boot_secondary(int cpu, struct task_struct *idle) cpu, startargs[0], startargs[1], startargs[2]); loongson3_ipi_write64(startargs[3], - (void *)(ipi_mailbox_buf[cpu_logical_map(cpu)]+0x18)); + ipi_mailbox_buf[cpu_logical_map(cpu)] + 0x18); loongson3_ipi_write64(startargs[2], - (void *)(ipi_mailbox_buf[cpu_logical_map(cpu)]+0x10)); + ipi_mailbox_buf[cpu_logical_map(cpu)] + 0x10); loongson3_ipi_write64(startargs[1], - (void *)(ipi_mailbox_buf[cpu_logical_map(cpu)]+0x8)); + ipi_mailbox_buf[cpu_logical_map(cpu)] + 0x8); loongson3_ipi_write64(startargs[0], - (void *)(ipi_mailbox_buf[cpu_logical_map(cpu)]+0x0)); + ipi_mailbox_buf[cpu_logical_map(cpu)] + 0x0); return 0; } @@ -749,4 +750,7 @@ const struct plat_smp_ops loongson3_smp_ops = { .cpu_disable = loongson3_cpu_disable, .cpu_die = loongson3_cpu_die, #endif +#ifdef CONFIG_KEXEC + .kexec_nonboot_cpu = kexec_nonboot_cpu_jump, +#endif }; diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index 400676ce03f4..15cae0f11880 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c @@ -32,7 +32,6 @@ #include <linux/kcore.h> #include <linux/initrd.h> -#include <asm/asm-offsets.h> #include <asm/bootinfo.h> #include <asm/cachectl.h> #include <asm/cpu.h> @@ -521,17 +520,13 @@ unsigned long pgd_current[NR_CPUS]; #endif /* - * gcc 3.3 and older have trouble determining that PTRS_PER_PGD and PGD_ORDER - * are constants. So we use the variants from asm-offset.h until that gcc - * will officially be retired. - * * Align swapper_pg_dir in to 64K, allows its address to be loaded * with a single LUI instruction in the TLB handlers. If we used * __aligned(64K), its size would get rounded up to the alignment * size, and waste space. So we place it in its own section and align * it in the linker script. */ -pgd_t swapper_pg_dir[_PTRS_PER_PGD] __section(.bss..swapper_pg_dir); +pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(.bss..swapper_pg_dir); #ifndef __PAGETABLE_PUD_FOLDED pud_t invalid_pud_table[PTRS_PER_PUD] __page_aligned_bss; #endif diff --git a/arch/mips/netlogic/common/irq.c b/arch/mips/netlogic/common/irq.c index f4961bc9a61d..cf33dd8a487e 100644 --- a/arch/mips/netlogic/common/irq.c +++ b/arch/mips/netlogic/common/irq.c @@ -291,7 +291,7 @@ static int __init xlp_of_pic_init(struct device_node *node, /* we need a hack to get the PIC's SoC chip id */ ret = of_address_to_resource(node, 0, &res); if (ret < 0) { - pr_err("PIC %s: reg property not found!\n", node->name); + pr_err("PIC %pOFn: reg property not found!\n", node); return -EINVAL; } @@ -304,21 +304,21 @@ static int __init xlp_of_pic_init(struct device_node *node, break; } if (socid == NLM_NR_NODES) { - pr_err("PIC %s: Node mapping for bus %d not found!\n", - node->name, bus); + pr_err("PIC %pOFn: Node mapping for bus %d not found!\n", + node, bus); return -EINVAL; } } else { socid = (res.start >> 18) & 0x3; if (!nlm_node_present(socid)) { - pr_err("PIC %s: node %d does not exist!\n", - node->name, socid); + pr_err("PIC %pOFn: node %d does not exist!\n", + node, socid); return -EINVAL; } } if (!nlm_node_present(socid)) { - pr_err("PIC %s: node %d does not exist!\n", node->name, socid); + pr_err("PIC %pOFn: node %d does not exist!\n", node, socid); return -EINVAL; } @@ -326,7 +326,7 @@ static int __init xlp_of_pic_init(struct device_node *node, nlm_irq_to_xirq(socid, PIC_IRQ_BASE), PIC_IRQ_BASE, &xlp_pic_irq_domain_ops, NULL); if (xlp_pic_domain == NULL) { - pr_err("PIC %s: Creating legacy domain failed!\n", node->name); + pr_err("PIC %pOFn: Creating legacy domain failed!\n", node); return -EINVAL; } pr_info("Node %d: IRQ domain created for PIC@%pR\n", socid, &res); diff --git a/arch/mips/pci/ops-loongson3.c b/arch/mips/pci/ops-loongson3.c index 9e118431e226..2f6ad36bdea6 100644 --- a/arch/mips/pci/ops-loongson3.c +++ b/arch/mips/pci/ops-loongson3.c @@ -18,22 +18,36 @@ static int loongson3_pci_config_access(unsigned char access_type, int where, u32 *data) { unsigned char busnum = bus->number; - u_int64_t addr, type; - void *addrp; - int device = PCI_SLOT(devfn); int function = PCI_FUNC(devfn); + int device = PCI_SLOT(devfn); int reg = where & ~3; + void *addrp; + u64 addr; + + if (where < PCI_CFG_SPACE_SIZE) { /* standard config */ + addr = (busnum << 16) | (device << 11) | (function << 8) | reg; + if (busnum == 0) { + if (device > 31) + return PCIBIOS_DEVICE_NOT_FOUND; + addrp = (void *)TO_UNCAC(HT1LO_PCICFG_BASE | addr); + } else { + addrp = (void *)TO_UNCAC(HT1LO_PCICFG_BASE_TP1 | addr); + } + } else if (where < PCI_CFG_SPACE_EXP_SIZE) { /* extended config */ + struct pci_dev *rootdev; + + rootdev = pci_get_domain_bus_and_slot(0, 0, 0); + if (!rootdev) + return PCIBIOS_DEVICE_NOT_FOUND; - addr = (busnum << 16) | (device << 11) | (function << 8) | reg; - if (busnum == 0) { - if (device > 31) + addr = pci_resource_start(rootdev, 3); + if (!addr) return PCIBIOS_DEVICE_NOT_FOUND; - addrp = (void *)(TO_UNCAC(HT1LO_PCICFG_BASE) | (addr & 0xffff)); - type = 0; + addr |= busnum << 20 | device << 15 | function << 12 | reg; + addrp = (void *)TO_UNCAC(addr); } else { - addrp = (void *)(TO_UNCAC(HT1LO_PCICFG_BASE_TP1) | (addr)); - type = 0x10000; + return PCIBIOS_DEVICE_NOT_FOUND; } if (access_type == PCI_ACCESS_WRITE) diff --git a/arch/mips/pci/pci-legacy.c b/arch/mips/pci/pci-legacy.c index f1e92bf743c2..3c3b1e6abb53 100644 --- a/arch/mips/pci/pci-legacy.c +++ b/arch/mips/pci/pci-legacy.c @@ -127,8 +127,12 @@ static void pcibios_scanbus(struct pci_controller *hose) if (pci_has_flag(PCI_PROBE_ONLY)) { pci_bus_claim_resources(bus); } else { + struct pci_bus *child; + pci_bus_size_bridges(bus); pci_bus_assign_resources(bus); + list_for_each_entry(child, &bus->children, node) + pcie_bus_configure_settings(child); } pci_bus_add_devices(bus); } diff --git a/arch/mips/pci/pci-rt2880.c b/arch/mips/pci/pci-rt2880.c index 711cdccdf65b..f376a1df326a 100644 --- a/arch/mips/pci/pci-rt2880.c +++ b/arch/mips/pci/pci-rt2880.c @@ -246,6 +246,8 @@ static int rt288x_pci_probe(struct platform_device *pdev) rt2880_pci_write_u32(PCI_BASE_ADDRESS_0, 0x08000000); (void) rt2880_pci_read_u32(PCI_BASE_ADDRESS_0); + rt2880_pci_controller.of_node = pdev->dev.of_node; + register_pci_controller(&rt2880_pci_controller); return 0; } diff --git a/arch/mips/pmcs-msp71xx/msp_usb.c b/arch/mips/pmcs-msp71xx/msp_usb.c index c87c5f810cd1..d38ac70b5a2e 100644 --- a/arch/mips/pmcs-msp71xx/msp_usb.c +++ b/arch/mips/pmcs-msp71xx/msp_usb.c @@ -133,13 +133,13 @@ static int __init msp_usb_setup(void) * "D" for device-mode. If it works for Ethernet, why not USB... * -- hammtrev, 2007/03/22 */ - snprintf((char *)&envstr[0], sizeof(envstr), "usbmode"); + snprintf(&envstr[0], sizeof(envstr), "usbmode"); /* set default host mode */ val = 1; /* get environment string */ - strp = prom_getenv((char *)&envstr[0]); + strp = prom_getenv(&envstr[0]); if (strp) { /* compare string */ if (!strcmp(strp, "device")) diff --git a/arch/mips/ralink/cevt-rt3352.c b/arch/mips/ralink/cevt-rt3352.c index 92f284d2b802..61a08943eb2f 100644 --- a/arch/mips/ralink/cevt-rt3352.c +++ b/arch/mips/ralink/cevt-rt3352.c @@ -134,7 +134,7 @@ static int __init ralink_systick_init(struct device_node *np) systick.dev.min_delta_ticks = 0x3; systick.dev.irq = irq_of_parse_and_map(np, 0); if (!systick.dev.irq) { - pr_err("%s: request_irq failed", np->name); + pr_err("%pOFn: request_irq failed", np); return -EINVAL; } @@ -146,8 +146,8 @@ static int __init ralink_systick_init(struct device_node *np) clockevents_register_device(&systick.dev); - pr_info("%s: running - mult: %d, shift: %d\n", - np->name, systick.dev.mult, systick.dev.shift); + pr_info("%pOFn: running - mult: %d, shift: %d\n", + np, systick.dev.mult, systick.dev.shift); return 0; } diff --git a/arch/mips/ralink/ill_acc.c b/arch/mips/ralink/ill_acc.c index 765d5ba98fa2..fc056f2acfeb 100644 --- a/arch/mips/ralink/ill_acc.c +++ b/arch/mips/ralink/ill_acc.c @@ -62,7 +62,7 @@ static int __init ill_acc_of_setup(void) pdev = of_find_device_by_node(np); if (!pdev) { - pr_err("%s: failed to lookup pdev\n", np->name); + pr_err("%pOFn: failed to lookup pdev\n", np); return -EINVAL; } diff --git a/arch/mips/ralink/rt305x.c b/arch/mips/ralink/rt305x.c index 93d472c60ce4..0f2264e0cf76 100644 --- a/arch/mips/ralink/rt305x.c +++ b/arch/mips/ralink/rt305x.c @@ -49,6 +49,10 @@ static struct rt2880_pmx_func rgmii_func[] = { FUNC("rgmii", 0, 40, 12) }; static struct rt2880_pmx_func rt3352_lna_func[] = { FUNC("lna", 0, 36, 2) }; static struct rt2880_pmx_func rt3352_pa_func[] = { FUNC("pa", 0, 38, 2) }; static struct rt2880_pmx_func rt3352_led_func[] = { FUNC("led", 0, 40, 5) }; +static struct rt2880_pmx_func rt3352_cs1_func[] = { + FUNC("spi_cs1", 0, 45, 1), + FUNC("wdg_cs1", 1, 45, 1), +}; static struct rt2880_pmx_group rt3050_pinmux_data[] = { GRP("i2c", i2c_func, 1, RT305X_GPIO_MODE_I2C), @@ -75,6 +79,7 @@ static struct rt2880_pmx_group rt3352_pinmux_data[] = { GRP("lna", rt3352_lna_func, 1, RT3352_GPIO_MODE_LNA), GRP("pa", rt3352_pa_func, 1, RT3352_GPIO_MODE_PA), GRP("led", rt3352_led_func, 1, RT5350_GPIO_MODE_PHY_LED), + GRP("spi_cs1", rt3352_cs1_func, 2, RT5350_GPIO_MODE_SPI_CS1), { 0 } }; diff --git a/arch/mips/sgi-ip22/ip28-berr.c b/arch/mips/sgi-ip22/ip28-berr.c index 2ed8e4990b7a..082541d33161 100644 --- a/arch/mips/sgi-ip22/ip28-berr.c +++ b/arch/mips/sgi-ip22/ip28-berr.c @@ -464,7 +464,7 @@ void ip22_be_interrupt(int irq) die_if_kernel("Oops", regs); force_sig(SIGBUS, current); } else if (debug_be_interrupt) - show_regs((struct pt_regs *)regs); + show_regs(regs); } static int ip28_be_handler(struct pt_regs *regs, int is_fixup) diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c index 59133d0abc83..6f7bef052b7f 100644 --- a/arch/mips/sgi-ip27/ip27-memory.c +++ b/arch/mips/sgi-ip27/ip27-memory.c @@ -389,7 +389,6 @@ static void __init node_mem_init(cnodeid_t node) { unsigned long slot_firstpfn = slot_getbasepfn(node, 0); unsigned long slot_freepfn = node_getfirstfree(node); - unsigned long bootmap_size; unsigned long start_pfn, end_pfn; get_pfn_range_for_nid(node, &start_pfn, &end_pfn); @@ -400,7 +399,6 @@ static void __init node_mem_init(cnodeid_t node) __node_data[node] = __va(slot_freepfn << PAGE_SHIFT); memset(__node_data[node], 0, PAGE_SIZE); - NODE_DATA(node)->bdata = &bootmem_node_data[node]; NODE_DATA(node)->node_start_pfn = start_pfn; NODE_DATA(node)->node_spanned_pages = end_pfn - start_pfn; @@ -409,12 +407,11 @@ static void __init node_mem_init(cnodeid_t node) slot_freepfn += PFN_UP(sizeof(struct pglist_data) + sizeof(struct hub_data)); - bootmap_size = init_bootmem_node(NODE_DATA(node), slot_freepfn, - start_pfn, end_pfn); free_bootmem_with_active_regions(node, end_pfn); - reserve_bootmem_node(NODE_DATA(node), slot_firstpfn << PAGE_SHIFT, - ((slot_freepfn - slot_firstpfn) << PAGE_SHIFT) + bootmap_size, - BOOTMEM_DEFAULT); + + memblock_reserve(slot_firstpfn << PAGE_SHIFT, + ((slot_freepfn - slot_firstpfn) << PAGE_SHIFT)); + sparse_memory_present_with_active_regions(node); } diff --git a/arch/mips/tools/.gitignore b/arch/mips/tools/.gitignore new file mode 100644 index 000000000000..56d34ccccce4 --- /dev/null +++ b/arch/mips/tools/.gitignore @@ -0,0 +1 @@ +elf-entry diff --git a/arch/mips/tools/Makefile b/arch/mips/tools/Makefile new file mode 100644 index 000000000000..3baee4bc6775 --- /dev/null +++ b/arch/mips/tools/Makefile @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: GPL-2.0 +hostprogs-y := elf-entry +PHONY += elf-entry +elf-entry: $(obj)/elf-entry + @: diff --git a/arch/mips/tools/elf-entry.c b/arch/mips/tools/elf-entry.c new file mode 100644 index 000000000000..adde79ce7fc0 --- /dev/null +++ b/arch/mips/tools/elf-entry.c @@ -0,0 +1,96 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <byteswap.h> +#include <elf.h> +#include <endian.h> +#include <inttypes.h> +#include <stdint.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> + +#ifdef be32toh +/* If libc provides [bl]e{32,64}toh() then we'll use them */ +#elif BYTE_ORDER == LITTLE_ENDIAN +# define be32toh(x) bswap_32(x) +# define le32toh(x) (x) +# define be64toh(x) bswap_64(x) +# define le64toh(x) (x) +#elif BYTE_ORDER == BIG_ENDIAN +# define be32toh(x) (x) +# define le32toh(x) bswap_32(x) +# define be64toh(x) (x) +# define le64toh(x) bswap_64(x) +#endif + +__attribute__((noreturn)) +static void die(const char *msg) +{ + fputs(msg, stderr); + exit(EXIT_FAILURE); +} + +int main(int argc, const char *argv[]) +{ + uint64_t entry; + size_t nread; + FILE *file; + union { + Elf32_Ehdr ehdr32; + Elf64_Ehdr ehdr64; + } hdr; + + if (argc != 2) + die("Usage: elf-entry <elf-file>\n"); + + file = fopen(argv[1], "r"); + if (!file) { + perror("Unable to open input file"); + return EXIT_FAILURE; + } + + nread = fread(&hdr, 1, sizeof(hdr), file); + if (nread != sizeof(hdr)) { + perror("Unable to read input file"); + return EXIT_FAILURE; + } + + if (memcmp(hdr.ehdr32.e_ident, ELFMAG, SELFMAG)) + die("Input is not an ELF\n"); + + switch (hdr.ehdr32.e_ident[EI_CLASS]) { + case ELFCLASS32: + switch (hdr.ehdr32.e_ident[EI_DATA]) { + case ELFDATA2LSB: + entry = le32toh(hdr.ehdr32.e_entry); + break; + case ELFDATA2MSB: + entry = be32toh(hdr.ehdr32.e_entry); + break; + default: + die("Invalid ELF encoding\n"); + } + + /* Sign extend to form a canonical address */ + entry = (int64_t)(int32_t)entry; + break; + + case ELFCLASS64: + switch (hdr.ehdr32.e_ident[EI_DATA]) { + case ELFDATA2LSB: + entry = le64toh(hdr.ehdr64.e_entry); + break; + case ELFDATA2MSB: + entry = be64toh(hdr.ehdr64.e_entry); + break; + default: + die("Invalid ELF encoding\n"); + } + break; + + default: + die("Invalid ELF class\n"); + } + + printf("0x%016" PRIx64 "\n", entry); + return EXIT_SUCCESS; +} diff --git a/arch/mips/txx9/generic/setup.c b/arch/mips/txx9/generic/setup.c index f6d9182ef82a..70a1ab66d252 100644 --- a/arch/mips/txx9/generic/setup.c +++ b/arch/mips/txx9/generic/setup.c @@ -960,12 +960,11 @@ void __init txx9_sramc_init(struct resource *r) goto exit_put; err = sysfs_create_bin_file(&dev->dev.kobj, &dev->bindata_attr); if (err) { - device_unregister(&dev->dev); iounmap(dev->base); - kfree(dev); + device_unregister(&dev->dev); } return; exit_put: + iounmap(dev->base); put_device(&dev->dev); - return; } diff --git a/arch/nds32/Makefile b/arch/nds32/Makefile index 3509fac10491..9f525ed70049 100644 --- a/arch/nds32/Makefile +++ b/arch/nds32/Makefile @@ -47,7 +47,7 @@ CHECKFLAGS += -D__NDS32_EB__ endif boot := arch/nds32/boot -core-$(BUILTIN_DTB) += $(boot)/dts/ +core-y += $(boot)/dts/ .PHONY: FORCE diff --git a/arch/nds32/include/uapi/asm/unistd.h b/arch/nds32/include/uapi/asm/unistd.h index 6e95901cabe3..603e826e0449 100644 --- a/arch/nds32/include/uapi/asm/unistd.h +++ b/arch/nds32/include/uapi/asm/unistd.h @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2005-2017 Andes Technology Corporation +#define __ARCH_WANT_STAT64 #define __ARCH_WANT_SYNC_FILE_RANGE2 /* Use the standard ABI for syscalls */ diff --git a/arch/nios2/Kconfig b/arch/nios2/Kconfig index 03965692fbfe..2df0c57f2833 100644 --- a/arch/nios2/Kconfig +++ b/arch/nios2/Kconfig @@ -23,6 +23,9 @@ config NIOS2 select SPARSE_IRQ select USB_ARCH_HAS_HCD if USB_SUPPORT select CPU_NO_EFFICIENT_FFS + select HAVE_MEMBLOCK + select ARCH_DISCARD_MEMBLOCK + select NO_BOOTMEM config GENERIC_CSUM def_bool y diff --git a/arch/nios2/Makefile b/arch/nios2/Makefile index 8673a79dca9c..52c03e60b114 100644 --- a/arch/nios2/Makefile +++ b/arch/nios2/Makefile @@ -49,21 +49,13 @@ BOOT_TARGETS = vmImage zImage PHONY += $(BOOT_TARGETS) install KBUILD_IMAGE := $(nios2-boot)/vmImage -ifneq ($(CONFIG_NIOS2_DTB_SOURCE),"") - core-y += $(nios2-boot)/ -endif +core-y += $(nios2-boot)/dts/ all: vmImage archclean: $(Q)$(MAKE) $(clean)=$(nios2-boot) -%.dtb: | scripts - $(Q)$(MAKE) $(build)=$(nios2-boot) $(nios2-boot)/$@ - -dtbs: - $(Q)$(MAKE) $(build)=$(nios2-boot) $(nios2-boot)/$@ - $(BOOT_TARGETS): vmlinux $(Q)$(MAKE) $(build)=$(nios2-boot) $(nios2-boot)/$@ @@ -76,5 +68,4 @@ define archhelp echo ' (your) ~/bin/$(INSTALLKERNEL) or' echo ' (distribution) /sbin/$(INSTALLKERNEL) or' echo ' install to $$(INSTALL_PATH)' - echo ' dtbs - Build device tree blobs for enabled boards' endef diff --git a/arch/nios2/boot/Makefile b/arch/nios2/boot/Makefile index 2ba23a679732..37dfc7e584bc 100644 --- a/arch/nios2/boot/Makefile +++ b/arch/nios2/boot/Makefile @@ -31,27 +31,5 @@ $(obj)/zImage: $(obj)/compressed/vmlinux FORCE $(obj)/compressed/vmlinux: $(obj)/vmlinux.gz FORCE $(Q)$(MAKE) $(build)=$(obj)/compressed $@ -# Rule to build device tree blobs -DTB_SRC := $(patsubst "%",%,$(CONFIG_NIOS2_DTB_SOURCE)) - -# Make sure the generated dtb gets removed during clean -extra-$(CONFIG_NIOS2_DTB_SOURCE_BOOL) += system.dtb - -$(obj)/system.dtb: $(DTB_SRC) FORCE - $(call cmd,dtc) - -# Ensure system.dtb exists -$(obj)/linked_dtb.o: $(obj)/system.dtb - -obj-$(CONFIG_NIOS2_DTB_SOURCE_BOOL) += linked_dtb.o - -targets += $(dtb-y) - -# Rule to build device tree blobs with make command -$(obj)/%.dtb: $(src)/dts/%.dts FORCE - $(call if_changed_dep,dtc) - -$(obj)/dtbs: $(addprefix $(obj)/, $(dtb-y)) - install: sh $(srctree)/$(src)/install.sh $(KERNELRELEASE) $(BOOTIMAGE) System.map "$(INSTALL_PATH)" diff --git a/arch/nios2/boot/dts/Makefile b/arch/nios2/boot/dts/Makefile new file mode 100644 index 000000000000..a91a0b09be63 --- /dev/null +++ b/arch/nios2/boot/dts/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-y := $(patsubst "%.dts",%.dtb.o,$(CONFIG_NIOS2_DTB_SOURCE)) + +dtstree := $(srctree)/$(src) +dtb-$(CONFIG_OF_ALL_DTBS) := $(patsubst $(dtstree)/%.dts,%.dtb, $(wildcard $(dtstree)/*.dts)) diff --git a/arch/nios2/boot/linked_dtb.S b/arch/nios2/boot/linked_dtb.S deleted file mode 100644 index 071f922db338..000000000000 --- a/arch/nios2/boot/linked_dtb.S +++ /dev/null @@ -1,19 +0,0 @@ -/* - * Copyright (C) 2011 Thomas Chou <thomas@wytron.com.tw> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. - * - */ -.section .dtb.init.rodata,"a" -.incbin "arch/nios2/boot/system.dtb" diff --git a/arch/nios2/include/uapi/asm/unistd.h b/arch/nios2/include/uapi/asm/unistd.h index b6bdae04bc84..d9948d88790b 100644 --- a/arch/nios2/include/uapi/asm/unistd.h +++ b/arch/nios2/include/uapi/asm/unistd.h @@ -19,6 +19,7 @@ #define sys_mmap2 sys_mmap_pgoff #define __ARCH_WANT_RENAMEAT +#define __ARCH_WANT_STAT64 /* Use the standard ABI for syscalls */ #include <asm-generic/unistd.h> diff --git a/arch/nios2/kernel/cpuinfo.c b/arch/nios2/kernel/cpuinfo.c index 93207718bb22..ccc1d2a15a0a 100644 --- a/arch/nios2/kernel/cpuinfo.c +++ b/arch/nios2/kernel/cpuinfo.c @@ -47,7 +47,7 @@ void __init setup_cpuinfo(void) const char *str; int len; - cpu = of_find_node_by_type(NULL, "cpu"); + cpu = of_get_cpu_node(0, NULL); if (!cpu) panic("%s: No CPU found in devicetree!\n", __func__); @@ -120,6 +120,8 @@ void __init setup_cpuinfo(void) cpuinfo.reset_addr = fcpu(cpu, "altr,reset-addr"); cpuinfo.exception_addr = fcpu(cpu, "altr,exception-addr"); cpuinfo.fast_tlb_miss_exc_addr = fcpu(cpu, "altr,fast-tlb-miss-addr"); + + of_node_put(cpu); } #ifdef CONFIG_PROC_FS diff --git a/arch/nios2/kernel/prom.c b/arch/nios2/kernel/prom.c index 8d7446a4b475..a6d4f7530247 100644 --- a/arch/nios2/kernel/prom.c +++ b/arch/nios2/kernel/prom.c @@ -32,23 +32,6 @@ #include <asm/sections.h> -void __init early_init_dt_add_memory_arch(u64 base, u64 size) -{ - u64 kernel_start = (u64)virt_to_phys(_text); - - if (!memory_size && - (kernel_start >= base) && (kernel_start < (base + size))) - memory_size = size; - -} - -int __init early_init_dt_reserve_memory_arch(phys_addr_t base, phys_addr_t size, - bool nomap) -{ - reserve_bootmem(base, size, BOOTMEM_DEFAULT); - return 0; -} - void __init early_init_devtree(void *params) { __be32 *dtb = (u32 *)__dtb_start; diff --git a/arch/nios2/kernel/setup.c b/arch/nios2/kernel/setup.c index 926a02b17b31..2d0011ddd4d5 100644 --- a/arch/nios2/kernel/setup.c +++ b/arch/nios2/kernel/setup.c @@ -17,6 +17,7 @@ #include <linux/sched/task.h> #include <linux/console.h> #include <linux/bootmem.h> +#include <linux/memblock.h> #include <linux/initrd.h> #include <linux/of_fdt.h> #include <linux/screen_info.h> @@ -143,10 +144,12 @@ asmlinkage void __init nios2_boot_init(unsigned r4, unsigned r5, unsigned r6, void __init setup_arch(char **cmdline_p) { - int bootmap_size; + int dram_start; console_verbose(); + dram_start = memblock_start_of_DRAM(); + memory_size = memblock_phys_mem_size(); memory_start = PAGE_ALIGN((unsigned long)__pa(_end)); memory_end = (unsigned long) CONFIG_NIOS2_MEM_BASE + memory_size; @@ -163,39 +166,11 @@ void __init setup_arch(char **cmdline_p) max_low_pfn = PFN_DOWN(memory_end); max_mapnr = max_low_pfn; - /* - * give all the memory to the bootmap allocator, tell it to put the - * boot mem_map at the start of memory - */ - pr_debug("init_bootmem_node(?,%#lx, %#x, %#lx)\n", - min_low_pfn, PFN_DOWN(PHYS_OFFSET), max_low_pfn); - bootmap_size = init_bootmem_node(NODE_DATA(0), - min_low_pfn, PFN_DOWN(PHYS_OFFSET), - max_low_pfn); - - /* - * free the usable memory, we have to make sure we do not free - * the bootmem bitmap so we then reserve it after freeing it :-) - */ - pr_debug("free_bootmem(%#lx, %#lx)\n", - memory_start, memory_end - memory_start); - free_bootmem(memory_start, memory_end - memory_start); - - /* - * Reserve the bootmem bitmap itself as well. We do this in two - * steps (first step was init_bootmem()) because this catches - * the (very unlikely) case of us accidentally initializing the - * bootmem allocator with an invalid RAM area. - * - * Arguments are start, size - */ - pr_debug("reserve_bootmem(%#lx, %#x)\n", memory_start, bootmap_size); - reserve_bootmem(memory_start, bootmap_size, BOOTMEM_DEFAULT); - + memblock_reserve(dram_start, memory_start - dram_start); #ifdef CONFIG_BLK_DEV_INITRD if (initrd_start) { - reserve_bootmem(virt_to_phys((void *)initrd_start), - initrd_end - initrd_start, BOOTMEM_DEFAULT); + memblock_reserve(virt_to_phys((void *)initrd_start), + initrd_end - initrd_start); } #endif /* CONFIG_BLK_DEV_INITRD */ diff --git a/arch/nios2/kernel/time.c b/arch/nios2/kernel/time.c index ab88b6dd4679..54467d0085a1 100644 --- a/arch/nios2/kernel/time.c +++ b/arch/nios2/kernel/time.c @@ -214,12 +214,12 @@ static int __init nios2_timer_get_base_and_freq(struct device_node *np, { *base = of_iomap(np, 0); if (!*base) { - pr_crit("Unable to map reg for %s\n", np->name); + pr_crit("Unable to map reg for %pOFn\n", np); return -ENXIO; } if (of_property_read_u32(np, "clock-frequency", freq)) { - pr_crit("Unable to get %s clock frequency\n", np->name); + pr_crit("Unable to get %pOFn clock frequency\n", np); return -EINVAL; } diff --git a/arch/openrisc/include/uapi/asm/unistd.h b/arch/openrisc/include/uapi/asm/unistd.h index 11c5a58ab333..ec37df18d8ed 100644 --- a/arch/openrisc/include/uapi/asm/unistd.h +++ b/arch/openrisc/include/uapi/asm/unistd.h @@ -20,6 +20,7 @@ #define sys_mmap2 sys_mmap_pgoff #define __ARCH_WANT_RENAMEAT +#define __ARCH_WANT_STAT64 #define __ARCH_WANT_SYS_FORK #define __ARCH_WANT_SYS_CLONE diff --git a/arch/openrisc/kernel/setup.c b/arch/openrisc/kernel/setup.c index 9d28ab14d139..e17fcd83120f 100644 --- a/arch/openrisc/kernel/setup.c +++ b/arch/openrisc/kernel/setup.c @@ -158,9 +158,8 @@ static struct device_node *setup_find_cpu_node(int cpu) { u32 hwid; struct device_node *cpun; - struct device_node *cpus = of_find_node_by_path("/cpus"); - for_each_available_child_of_node(cpus, cpun) { + for_each_of_cpu_node(cpun) { if (of_property_read_u32(cpun, "reg", &hwid)) continue; if (hwid == cpu) diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile index 5ce030266e7d..d047a09d660f 100644 --- a/arch/parisc/Makefile +++ b/arch/parisc/Makefile @@ -156,12 +156,3 @@ define archhelp @echo ' copy to $$(INSTALL_PATH)' @echo ' zinstall - Install compressed vmlinuz kernel' endef - -# we require gcc 3.3 or above to compile the kernel -archprepare: checkbin -checkbin: - @if test "$(cc-version)" -lt "0303"; then \ - echo -n "Sorry, GCC v3.3 or above is required to build " ; \ - echo "the kernel." ; \ - false ; \ - fi diff --git a/arch/parisc/boot/compressed/Makefile b/arch/parisc/boot/compressed/Makefile index 7d7e594bda36..777533cdea31 100644 --- a/arch/parisc/boot/compressed/Makefile +++ b/arch/parisc/boot/compressed/Makefile @@ -14,7 +14,7 @@ targets += misc.o piggy.o sizes.h head.o real2.o firmware.o KBUILD_CFLAGS := -D__KERNEL__ -O2 -DBOOTLOADER KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING -KBUILD_CFLAGS += $(cflags-y) -fno-delete-null-pointer-checks +KBUILD_CFLAGS += $(cflags-y) -fno-delete-null-pointer-checks -fno-builtin-printf KBUILD_CFLAGS += -fno-PIE -mno-space-regs -mdisable-fpregs -Os ifndef CONFIG_64BIT KBUILD_CFLAGS += -mfast-indirect-calls @@ -22,7 +22,6 @@ endif OBJECTS += $(obj)/head.o $(obj)/real2.o $(obj)/firmware.o $(obj)/misc.o $(obj)/piggy.o -# LDFLAGS_vmlinux := -X --whole-archive -e startup -T LDFLAGS_vmlinux := -X -e startup --as-needed -T $(obj)/vmlinux: $(obj)/vmlinux.lds $(OBJECTS) $(LIBGCC) $(call if_changed,ld) @@ -55,7 +54,6 @@ $(obj)/misc.o: $(obj)/sizes.h CPPFLAGS_vmlinux.lds += -I$(objtree)/$(obj) -DBOOTLOADER $(obj)/vmlinux.lds: $(obj)/sizes.h -OBJCOPYFLAGS_vmlinux.bin := -O binary -R .comment -S $(obj)/vmlinux.bin: vmlinux $(call if_changed,objcopy) diff --git a/arch/parisc/boot/compressed/misc.c b/arch/parisc/boot/compressed/misc.c index f57118e1f6b4..2556bb181813 100644 --- a/arch/parisc/boot/compressed/misc.c +++ b/arch/parisc/boot/compressed/misc.c @@ -5,6 +5,7 @@ */ #include <linux/uaccess.h> +#include <linux/elf.h> #include <asm/unaligned.h> #include <asm/page.h> #include "sizes.h" @@ -227,13 +228,62 @@ static void flush_data_cache(char *start, unsigned long length) asm ("sync"); } +static void parse_elf(void *output) +{ +#ifdef CONFIG_64BIT + Elf64_Ehdr ehdr; + Elf64_Phdr *phdrs, *phdr; +#else + Elf32_Ehdr ehdr; + Elf32_Phdr *phdrs, *phdr; +#endif + void *dest; + int i; + + memcpy(&ehdr, output, sizeof(ehdr)); + if (ehdr.e_ident[EI_MAG0] != ELFMAG0 || + ehdr.e_ident[EI_MAG1] != ELFMAG1 || + ehdr.e_ident[EI_MAG2] != ELFMAG2 || + ehdr.e_ident[EI_MAG3] != ELFMAG3) { + error("Kernel is not a valid ELF file"); + return; + } + +#ifdef DEBUG + printf("Parsing ELF... "); +#endif + + phdrs = malloc(sizeof(*phdrs) * ehdr.e_phnum); + if (!phdrs) + error("Failed to allocate space for phdrs"); + + memcpy(phdrs, output + ehdr.e_phoff, sizeof(*phdrs) * ehdr.e_phnum); + + for (i = 0; i < ehdr.e_phnum; i++) { + phdr = &phdrs[i]; + + switch (phdr->p_type) { + case PT_LOAD: + dest = (void *)((unsigned long) phdr->p_paddr & + (__PAGE_OFFSET_DEFAULT-1)); + memmove(dest, output + phdr->p_offset, phdr->p_filesz); + break; + default: + break; + } + } + + free(phdrs); +} + unsigned long decompress_kernel(unsigned int started_wide, unsigned int command_line, const unsigned int rd_start, const unsigned int rd_end) { char *output; - unsigned long len, len_all; + unsigned long vmlinux_addr, vmlinux_len; + unsigned long kernel_addr, kernel_len; #ifdef CONFIG_64BIT parisc_narrow_firmware = 0; @@ -241,27 +291,29 @@ unsigned long decompress_kernel(unsigned int started_wide, set_firmware_width_unlocked(); - putchar('U'); /* if you get this p and no more, string storage */ + putchar('D'); /* if you get this D and no more, string storage */ /* in $GLOBAL$ is wrong or %dp is wrong */ - puts("ncompressing ...\n"); - - output = (char *) KERNEL_BINARY_TEXT_START; - len_all = __pa(SZ_end) - __pa(SZparisc_kernel_start); + puts("ecompressing Linux... "); - if ((unsigned long) &_startcode_end > (unsigned long) output) + /* where the final bits are stored */ + kernel_addr = KERNEL_BINARY_TEXT_START; + kernel_len = __pa(SZ_end) - __pa(SZparisc_kernel_start); + if ((unsigned long) &_startcode_end > kernel_addr) error("Bootcode overlaps kernel code"); - len = get_unaligned_le32(&output_len); - if (len > len_all) - error("Output len too big."); - else - memset(&output[len], 0, len_all - len); + /* + * Calculate addr to where the vmlinux ELF file shall be decompressed. + * Assembly code in head.S positioned the stack directly behind bss, so + * leave 2 MB for the stack. + */ + vmlinux_addr = (unsigned long) &_ebss + 2*1024*1024; + vmlinux_len = get_unaligned_le32(&output_len); + output = (char *) vmlinux_addr; /* * Initialize free_mem_ptr and free_mem_end_ptr. */ - free_mem_ptr = (unsigned long) &_ebss; - free_mem_ptr += 2*1024*1024; /* leave 2 MB for stack */ + free_mem_ptr = vmlinux_addr + vmlinux_len; /* Limit memory for bootoader to 1GB */ #define ARTIFICIAL_LIMIT (1*1024*1024*1024) @@ -275,7 +327,11 @@ unsigned long decompress_kernel(unsigned int started_wide, free_mem_end_ptr = rd_start; #endif + if (free_mem_ptr >= free_mem_end_ptr) + error("Kernel too big for machine."); + #ifdef DEBUG + printf("\n"); printf("startcode_end = %x\n", &_startcode_end); printf("commandline = %x\n", command_line); printf("rd_start = %x\n", rd_start); @@ -287,16 +343,19 @@ unsigned long decompress_kernel(unsigned int started_wide, printf("input_data = %x\n", input_data); printf("input_len = %x\n", input_len); printf("output = %x\n", output); - printf("output_len = %x\n", len); - printf("output_max = %x\n", len_all); + printf("output_len = %x\n", vmlinux_len); + printf("kernel_addr = %x\n", kernel_addr); + printf("kernel_len = %x\n", kernel_len); #endif __decompress(input_data, input_len, NULL, NULL, output, 0, NULL, error); + parse_elf(output); - flush_data_cache(output, len); + output = (char *) kernel_addr; + flush_data_cache(output, kernel_len); - printf("Booting kernel ...\n\n"); + printf("done.\nBooting the kernel.\n"); return (unsigned long) output; } diff --git a/arch/parisc/boot/compressed/vmlinux.lds.S b/arch/parisc/boot/compressed/vmlinux.lds.S index 4ebd4e65524c..bfd7872739a3 100644 --- a/arch/parisc/boot/compressed/vmlinux.lds.S +++ b/arch/parisc/boot/compressed/vmlinux.lds.S @@ -42,6 +42,12 @@ SECTIONS #endif _startcode_end = .; + /* vmlinux.bin.gz is here */ + . = ALIGN(8); + .rodata.compressed : { + *(.rodata.compressed) + } + /* bootloader code and data starts behind area of extracted kernel */ . = (SZ_end - SZparisc_kernel_start + KERNEL_BINARY_TEXT_START); @@ -68,10 +74,6 @@ SECTIONS _erodata = . ; } . = ALIGN(8); - .rodata.compressed : { - *(.rodata.compressed) - } - . = ALIGN(8); .bss : { _bss = . ; *(.bss) diff --git a/arch/parisc/include/asm/alternative.h b/arch/parisc/include/asm/alternative.h new file mode 100644 index 000000000000..bf485a94d0b4 --- /dev/null +++ b/arch/parisc/include/asm/alternative.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_PARISC_ALTERNATIVE_H +#define __ASM_PARISC_ALTERNATIVE_H + +#define ALT_COND_NO_SMP 0x01 /* when running UP instead of SMP */ +#define ALT_COND_NO_DCACHE 0x02 /* if system has no d-cache */ +#define ALT_COND_NO_ICACHE 0x04 /* if system has no i-cache */ +#define ALT_COND_NO_SPLIT_TLB 0x08 /* if split_tlb == 0 */ +#define ALT_COND_NO_IOC_FDC 0x10 /* if I/O cache does not need flushes */ + +#define INSN_PxTLB 0x02 /* modify pdtlb, pitlb */ +#define INSN_NOP 0x08000240 /* nop */ + +#ifndef __ASSEMBLY__ + +#include <linux/init.h> +#include <linux/types.h> +#include <linux/stddef.h> +#include <linux/stringify.h> + +struct alt_instr { + s32 orig_offset; /* offset to original instructions */ + u32 len; /* end of original instructions */ + u32 cond; /* see ALT_COND_XXX */ + u32 replacement; /* replacement instruction or code */ +}; + +void set_kernel_text_rw(int enable_read_write); + +/* Alternative SMP implementation. */ +#define ALTERNATIVE(cond, replacement) "!0:" \ + ".section .altinstructions, \"aw\" !" \ + ".word (0b-4-.), 1, " __stringify(cond) "," \ + __stringify(replacement) " !" \ + ".previous" + +#else + +#define ALTERNATIVE(from, to, cond, replacement)\ + .section .altinstructions, "aw" ! \ + .word (from - .), (to - from)/4 ! \ + .word cond, replacement ! \ + .previous + +#endif /* __ASSEMBLY__ */ + +#endif /* __ASM_PARISC_ALTERNATIVE_H */ diff --git a/arch/parisc/include/asm/assembly.h b/arch/parisc/include/asm/assembly.h index e9c6385ef0d1..c17ec0ee6e7c 100644 --- a/arch/parisc/include/asm/assembly.h +++ b/arch/parisc/include/asm/assembly.h @@ -129,15 +129,8 @@ .macro debug value .endm - - /* Shift Left - note the r and t can NOT be the same! */ - .macro shl r, sa, t - dep,z \r, 31-(\sa), 32-(\sa), \t - .endm - - /* The PA 2.0 shift left */ .macro shlw r, sa, t - depw,z \r, 31-(\sa), 32-(\sa), \t + zdep \r, 31-(\sa), 32-(\sa), \t .endm /* And the PA 2.0W shift left */ diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h index 150b7f30ea90..006fb939cac8 100644 --- a/arch/parisc/include/asm/cache.h +++ b/arch/parisc/include/asm/cache.h @@ -6,6 +6,7 @@ #ifndef __ARCH_PARISC_CACHE_H #define __ARCH_PARISC_CACHE_H +#include <asm/alternative.h> /* * PA 2.0 processors have 64 and 128-byte L2 cachelines; PA 1.1 processors @@ -41,9 +42,24 @@ extern int icache_stride; extern struct pdc_cache_info cache_info; void parisc_setup_cache_timing(void); -#define pdtlb(addr) asm volatile("pdtlb 0(%%sr1,%0)" : : "r" (addr)); -#define pitlb(addr) asm volatile("pitlb 0(%%sr1,%0)" : : "r" (addr)); -#define pdtlb_kernel(addr) asm volatile("pdtlb 0(%0)" : : "r" (addr)); +#define pdtlb(addr) asm volatile("pdtlb 0(%%sr1,%0)" \ + ALTERNATIVE(ALT_COND_NO_SMP, INSN_PxTLB) \ + : : "r" (addr)) +#define pitlb(addr) asm volatile("pitlb 0(%%sr1,%0)" \ + ALTERNATIVE(ALT_COND_NO_SMP, INSN_PxTLB) \ + ALTERNATIVE(ALT_COND_NO_SPLIT_TLB, INSN_NOP) \ + : : "r" (addr)) +#define pdtlb_kernel(addr) asm volatile("pdtlb 0(%0)" \ + ALTERNATIVE(ALT_COND_NO_SMP, INSN_PxTLB) \ + : : "r" (addr)) + +#define asm_io_fdc(addr) asm volatile("fdc %%r0(%0)" \ + ALTERNATIVE(ALT_COND_NO_DCACHE, INSN_NOP) \ + ALTERNATIVE(ALT_COND_NO_IOC_FDC, INSN_NOP) \ + : : "r" (addr)) +#define asm_io_sync() asm volatile("sync" \ + ALTERNATIVE(ALT_COND_NO_DCACHE, INSN_NOP) \ + ALTERNATIVE(ALT_COND_NO_IOC_FDC, INSN_NOP) :: ) #endif /* ! __ASSEMBLY__ */ diff --git a/arch/parisc/include/asm/compat.h b/arch/parisc/include/asm/compat.h index ab8a54771507..e03e3c849f40 100644 --- a/arch/parisc/include/asm/compat.h +++ b/arch/parisc/include/asm/compat.h @@ -8,36 +8,22 @@ #include <linux/sched.h> #include <linux/thread_info.h> +#include <asm-generic/compat.h> + #define COMPAT_USER_HZ 100 #define COMPAT_UTS_MACHINE "parisc\0\0" -typedef u32 compat_size_t; -typedef s32 compat_ssize_t; -typedef s32 compat_clock_t; -typedef s32 compat_pid_t; typedef u32 __compat_uid_t; typedef u32 __compat_gid_t; typedef u32 __compat_uid32_t; typedef u32 __compat_gid32_t; typedef u16 compat_mode_t; -typedef u32 compat_ino_t; typedef u32 compat_dev_t; -typedef s32 compat_off_t; -typedef s64 compat_loff_t; typedef u16 compat_nlink_t; typedef u16 compat_ipc_pid_t; -typedef s32 compat_daddr_t; typedef u32 compat_caddr_t; -typedef s32 compat_key_t; -typedef s32 compat_timer_t; - -typedef s32 compat_int_t; -typedef s32 compat_long_t; typedef s64 compat_s64; -typedef u32 compat_uint_t; -typedef u32 compat_ulong_t; typedef u64 compat_u64; -typedef u32 compat_uptr_t; struct compat_stat { compat_dev_t st_dev; /* dev_t is 32 bits on parisc */ @@ -48,11 +34,11 @@ struct compat_stat { u16 st_reserved2; /* old st_gid */ compat_dev_t st_rdev; compat_off_t st_size; - compat_time_t st_atime; + old_time32_t st_atime; u32 st_atime_nsec; - compat_time_t st_mtime; + old_time32_t st_mtime; u32 st_mtime_nsec; - compat_time_t st_ctime; + old_time32_t st_ctime; u32 st_ctime_nsec; s32 st_blksize; s32 st_blocks; diff --git a/arch/parisc/include/asm/hugetlb.h b/arch/parisc/include/asm/hugetlb.h index 58e0f4620426..7cb595dcb7d7 100644 --- a/arch/parisc/include/asm/hugetlb.h +++ b/arch/parisc/include/asm/hugetlb.h @@ -3,12 +3,12 @@ #define _ASM_PARISC64_HUGETLB_H #include <asm/page.h> -#include <asm-generic/hugetlb.h> - +#define __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte); +#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep); @@ -22,6 +22,7 @@ static inline int is_hugepage_only_range(struct mm_struct *mm, * If the arch doesn't supply something else, assume that hugepage * size aligned regions are ok without further preparation. */ +#define __HAVE_ARCH_PREPARE_HUGEPAGE_RANGE static inline int prepare_hugepage_range(struct file *file, unsigned long addr, unsigned long len) { @@ -32,43 +33,25 @@ static inline int prepare_hugepage_range(struct file *file, return 0; } -static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb, - unsigned long addr, unsigned long end, - unsigned long floor, - unsigned long ceiling) -{ - free_pgd_range(tlb, addr, end, floor, ceiling); -} - +#define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { } -static inline int huge_pte_none(pte_t pte) -{ - return pte_none(pte); -} - -static inline pte_t huge_pte_wrprotect(pte_t pte) -{ - return pte_wrprotect(pte); -} - +#define __HAVE_ARCH_HUGE_PTEP_SET_WRPROTECT void huge_ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep); +#define __HAVE_ARCH_HUGE_PTEP_SET_ACCESS_FLAGS int huge_ptep_set_access_flags(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, pte_t pte, int dirty); -static inline pte_t huge_ptep_get(pte_t *ptep) -{ - return *ptep; -} - static inline void arch_clear_hugepage_flags(struct page *page) { } +#include <asm-generic/hugetlb.h> + #endif /* _ASM_PARISC64_HUGETLB_H */ diff --git a/arch/parisc/include/asm/page.h b/arch/parisc/include/asm/page.h index af00fe9bf846..b77f49ce6220 100644 --- a/arch/parisc/include/asm/page.h +++ b/arch/parisc/include/asm/page.h @@ -117,14 +117,16 @@ extern int npmem_ranges; /* This governs the relationship between virtual and physical addresses. * If you alter it, make sure to take care of our various fixed mapping * segments in fixmap.h */ -#if defined(BOOTLOADER) -#define __PAGE_OFFSET (0) /* bootloader uses physical addresses */ -#else #ifdef CONFIG_64BIT -#define __PAGE_OFFSET (0x40000000) /* 1GB */ +#define __PAGE_OFFSET_DEFAULT (0x40000000) /* 1GB */ #else -#define __PAGE_OFFSET (0x10000000) /* 256MB */ +#define __PAGE_OFFSET_DEFAULT (0x10000000) /* 256MB */ #endif + +#if defined(BOOTLOADER) +#define __PAGE_OFFSET (0) /* bootloader uses physical addresses */ +#else +#define __PAGE_OFFSET __PAGE_OFFSET_DEFAULT #endif /* BOOTLOADER */ #define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET) diff --git a/arch/parisc/include/asm/pdc.h b/arch/parisc/include/asm/pdc.h index 339e83ddb39e..5b187d40d604 100644 --- a/arch/parisc/include/asm/pdc.h +++ b/arch/parisc/include/asm/pdc.h @@ -11,6 +11,7 @@ extern int parisc_narrow_firmware; extern int pdc_type; extern unsigned long parisc_cell_num; /* cell number the CPU runs on (PAT) */ extern unsigned long parisc_cell_loc; /* cell location of CPU (PAT) */ +extern unsigned long parisc_pat_pdc_cap; /* PDC capabilities (PAT) */ /* Values for pdc_type */ #define PDC_TYPE_ILLEGAL -1 diff --git a/arch/parisc/include/asm/pdcpat.h b/arch/parisc/include/asm/pdcpat.h index a468a172ee33..bce9ee1c1c99 100644 --- a/arch/parisc/include/asm/pdcpat.h +++ b/arch/parisc/include/asm/pdcpat.h @@ -173,6 +173,16 @@ /* PDC PAT PD */ #define PDC_PAT_PD 74L /* Protection Domain Info */ #define PDC_PAT_PD_GET_ADDR_MAP 0L /* Get Address Map */ +#define PDC_PAT_PD_GET_PDC_INTERF_REV 1L /* Get PDC Interface Revisions */ + +#define PDC_PAT_CAPABILITY_BIT_PDC_SERIALIZE (1UL << 0) +#define PDC_PAT_CAPABILITY_BIT_PDC_POLLING (1UL << 1) +#define PDC_PAT_CAPABILITY_BIT_PDC_NBC (1UL << 2) /* non-blocking calls */ +#define PDC_PAT_CAPABILITY_BIT_PDC_UFO (1UL << 3) +#define PDC_PAT_CAPABILITY_BIT_PDC_IODC_32 (1UL << 4) +#define PDC_PAT_CAPABILITY_BIT_PDC_IODC_64 (1UL << 5) +#define PDC_PAT_CAPABILITY_BIT_PDC_HPMC_RENDEZ (1UL << 6) +#define PDC_PAT_CAPABILITY_BIT_SIMULTANEOUS_PTLB (1UL << 7) /* PDC_PAT_PD_GET_ADDR_MAP entry types */ #define PAT_MEMORY_DESCRIPTOR 1 @@ -186,6 +196,14 @@ #define PAT_MEMUSE_GI 128 #define PAT_MEMUSE_GNI 129 +/* PDC PAT REGISTER TOC */ +#define PDC_PAT_REGISTER_TOC 75L +#define PDC_PAT_TOC_REGISTER_VECTOR 0L /* Register TOC Vector */ +#define PDC_PAT_TOC_READ_VECTOR 1L /* Read TOC Vector */ + +/* PDC PAT SYSTEM_INFO */ +#define PDC_PAT_SYSTEM_INFO 76L +/* PDC_PAT_SYSTEM_INFO uses the same options as PDC_SYSTEM_INFO function. */ #ifndef __ASSEMBLY__ #include <linux/types.h> @@ -297,18 +315,29 @@ struct pdc_pat_pd_addr_map_entry { ** PDC_PAT_CELL_GET_INFO return block */ typedef struct pdc_pat_cell_info_rtn_block { - unsigned long cpu_info; - unsigned long cell_info; - unsigned long cell_location; - unsigned long reo_location; - unsigned long mem_size; - unsigned long dimm_status; unsigned long pdc_rev; - unsigned long fabric_info0; - unsigned long fabric_info1; - unsigned long fabric_info2; - unsigned long fabric_info3; - unsigned long reserved[21]; + unsigned long capabilities; /* see PDC_PAT_CAPABILITY_BIT_* */ + unsigned long reserved0[2]; + unsigned long cell_info; /* 0x20 */ + unsigned long cell_phys_location; + unsigned long cpu_info; + unsigned long cpu_speed; + unsigned long io_chassis_phys_location; + unsigned long cell_io_information; + unsigned long reserved1[2]; + unsigned long io_slot_info_size; /* 0x60 */ + struct { + unsigned long header, info0, info1; + unsigned long phys_loc, hw_path; + } io_slot[16]; + unsigned long cell_mem_size; /* 0x2e8 */ + unsigned long cell_dimm_info_size; + unsigned long dimm_info[16]; + unsigned long fabric_info_size; /* 0x3f8 */ + struct { /* 0x380 */ + unsigned long fabric_info_xbc_port; + unsigned long rc_attached_to_xbc; + } xbc[8*4]; } pdc_pat_cell_info_rtn_block_t; @@ -326,12 +355,19 @@ typedef struct pdc_pat_cell_mod_maddr_block pdc_pat_cell_mod_maddr_block_t; extern int pdc_pat_chassis_send_log(unsigned long status, unsigned long data); extern int pdc_pat_cell_get_number(struct pdc_pat_cell_num *cell_info); -extern int pdc_pat_cell_module(unsigned long *actcnt, unsigned long ploc, unsigned long mod, unsigned long view_type, void *mem_addr); +extern int pdc_pat_cell_info(struct pdc_pat_cell_info_rtn_block *info, + unsigned long *actcnt, unsigned long offset, + unsigned long cell_number); +extern int pdc_pat_cell_module(unsigned long *actcnt, unsigned long ploc, + unsigned long mod, unsigned long view_type, void *mem_addr); extern int pdc_pat_cell_num_to_loc(void *, unsigned long); extern int pdc_pat_cpu_get_number(struct pdc_pat_cpu_num *cpu_info, unsigned long hpa); -extern int pdc_pat_pd_get_addr_map(unsigned long *actual_len, void *mem_addr, unsigned long count, unsigned long offset); +extern int pdc_pat_pd_get_addr_map(unsigned long *actual_len, void *mem_addr, + unsigned long count, unsigned long offset); +extern int pdc_pat_pd_get_pdc_revisions(unsigned long *legacy_rev, + unsigned long *pat_rev, unsigned long *pdc_cap); extern int pdc_pat_io_pci_cfg_read(unsigned long pci_addr, int pci_size, u32 *val); extern int pdc_pat_io_pci_cfg_write(unsigned long pci_addr, int pci_size, u32 val); diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h index fa6b7c78f18a..b941ac7d4e70 100644 --- a/arch/parisc/include/asm/pgtable.h +++ b/arch/parisc/include/asm/pgtable.h @@ -43,8 +43,7 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) { mtsp(mm->context, 1); pdtlb(addr); - if (unlikely(split_tlb)) - pitlb(addr); + pitlb(addr); } /* Certain architectures need to do special things when PTEs @@ -56,19 +55,14 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) *(pteptr) = (pteval); \ } while(0) -#define pte_inserted(x) \ - ((pte_val(x) & (_PAGE_PRESENT|_PAGE_ACCESSED)) \ - == (_PAGE_PRESENT|_PAGE_ACCESSED)) - #define set_pte_at(mm, addr, ptep, pteval) \ do { \ pte_t old_pte; \ unsigned long flags; \ spin_lock_irqsave(&pa_tlb_lock, flags); \ old_pte = *ptep; \ - if (pte_inserted(old_pte)) \ - purge_tlb_entries(mm, addr); \ set_pte(ptep, pteval); \ + purge_tlb_entries(mm, addr); \ spin_unlock_irqrestore(&pa_tlb_lock, flags); \ } while (0) @@ -202,7 +196,7 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) #define _PAGE_HUGE (1 << xlate_pabit(_PAGE_HPAGE_BIT)) #define _PAGE_USER (1 << xlate_pabit(_PAGE_USER_BIT)) -#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED) +#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED) #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) #define _PAGE_KERNEL_RO (_PAGE_PRESENT | _PAGE_READ | _PAGE_DIRTY | _PAGE_ACCESSED) #define _PAGE_KERNEL_EXEC (_PAGE_KERNEL_RO | _PAGE_EXEC) @@ -227,22 +221,22 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) #ifndef __ASSEMBLY__ -#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) -#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED) +#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_USER) +#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE) /* Others seem to make this executable, I don't know if that's correct or not. The stack is mapped this way though so this is necessary in the short term - dhd@linuxcare.com, 2000-08-08 */ -#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED) -#define PAGE_WRITEONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITE | _PAGE_ACCESSED) -#define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED) +#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ) +#define PAGE_WRITEONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITE) +#define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC) #define PAGE_COPY PAGE_EXECREAD -#define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED) +#define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC) #define PAGE_KERNEL __pgprot(_PAGE_KERNEL) #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC) #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX) #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL_RO) #define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE) -#define PAGE_GATEWAY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_GATEWAY| _PAGE_READ) +#define PAGE_GATEWAY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_GATEWAY| _PAGE_READ) /* @@ -479,8 +473,8 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned spin_unlock_irqrestore(&pa_tlb_lock, flags); return 0; } - purge_tlb_entries(vma->vm_mm, addr); set_pte(ptep, pte_mkold(pte)); + purge_tlb_entries(vma->vm_mm, addr); spin_unlock_irqrestore(&pa_tlb_lock, flags); return 1; } @@ -493,9 +487,8 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, spin_lock_irqsave(&pa_tlb_lock, flags); old_pte = *ptep; - if (pte_inserted(old_pte)) - purge_tlb_entries(mm, addr); set_pte(ptep, __pte(0)); + purge_tlb_entries(mm, addr); spin_unlock_irqrestore(&pa_tlb_lock, flags); return old_pte; @@ -505,8 +498,8 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, { unsigned long flags; spin_lock_irqsave(&pa_tlb_lock, flags); - purge_tlb_entries(mm, addr); set_pte(ptep, pte_wrprotect(*ptep)); + purge_tlb_entries(mm, addr); spin_unlock_irqrestore(&pa_tlb_lock, flags); } diff --git a/arch/parisc/include/asm/sections.h b/arch/parisc/include/asm/sections.h index 5a40b51df80c..bb52aea0cb21 100644 --- a/arch/parisc/include/asm/sections.h +++ b/arch/parisc/include/asm/sections.h @@ -5,6 +5,8 @@ /* nothing to see, move along */ #include <asm-generic/sections.h> +extern char __alt_instructions[], __alt_instructions_end[]; + #ifdef CONFIG_64BIT #define HAVE_DEREFERENCE_FUNCTION_DESCRIPTOR 1 diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h index 8a63515f03bf..16aec9ba2580 100644 --- a/arch/parisc/include/asm/spinlock.h +++ b/arch/parisc/include/asm/spinlock.h @@ -37,8 +37,8 @@ static inline void arch_spin_unlock(arch_spinlock_t *x) volatile unsigned int *a; a = __ldcw_align(x); - mb(); - *a = 1; + /* Release with ordered store. */ + __asm__ __volatile__("stw,ma %0,0(%1)" : : "r"(1), "r"(a) : "memory"); } static inline int arch_spin_trylock(arch_spinlock_t *x) diff --git a/arch/parisc/include/asm/tlbflush.h b/arch/parisc/include/asm/tlbflush.h index 14668bd52d60..6804374efa66 100644 --- a/arch/parisc/include/asm/tlbflush.h +++ b/arch/parisc/include/asm/tlbflush.h @@ -85,8 +85,7 @@ static inline void flush_tlb_page(struct vm_area_struct *vma, purge_tlb_start(flags); mtsp(sid, 1); pdtlb(addr); - if (unlikely(split_tlb)) - pitlb(addr); + pitlb(addr); purge_tlb_end(flags); } #endif diff --git a/arch/parisc/include/asm/unistd.h b/arch/parisc/include/asm/unistd.h index 3d507d04eb4c..bc37a4953eaa 100644 --- a/arch/parisc/include/asm/unistd.h +++ b/arch/parisc/include/asm/unistd.h @@ -141,6 +141,7 @@ type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \ return K_INLINE_SYSCALL(name, 5, arg1, arg2, arg3, arg4, arg5); \ } +#define __ARCH_WANT_NEW_STAT #define __ARCH_WANT_OLD_READDIR #define __ARCH_WANT_STAT64 #define __ARCH_WANT_SYS_ALARM @@ -151,11 +152,11 @@ type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \ #define __ARCH_WANT_COMPAT_SYS_TIME #define __ARCH_WANT_COMPAT_SYS_SCHED_RR_GET_INTERVAL #define __ARCH_WANT_SYS_UTIME +#define __ARCH_WANT_SYS_UTIME32 #define __ARCH_WANT_SYS_WAITPID #define __ARCH_WANT_SYS_SOCKETCALL #define __ARCH_WANT_SYS_FADVISE64 #define __ARCH_WANT_SYS_GETPGRP -#define __ARCH_WANT_SYS_LLSEEK #define __ARCH_WANT_SYS_NICE #define __ARCH_WANT_SYS_OLDUMOUNT #define __ARCH_WANT_SYS_SIGPENDING diff --git a/arch/parisc/include/uapi/asm/Kbuild b/arch/parisc/include/uapi/asm/Kbuild index 286ef5a5904b..adb5c64831c7 100644 --- a/arch/parisc/include/uapi/asm/Kbuild +++ b/arch/parisc/include/uapi/asm/Kbuild @@ -7,3 +7,4 @@ generic-y += kvm_para.h generic-y += param.h generic-y += poll.h generic-y += resource.h +generic-y += siginfo.h diff --git a/arch/parisc/include/uapi/asm/siginfo.h b/arch/parisc/include/uapi/asm/siginfo.h deleted file mode 100644 index 4a1062e05aaf..000000000000 --- a/arch/parisc/include/uapi/asm/siginfo.h +++ /dev/null @@ -1,11 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -#ifndef _PARISC_SIGINFO_H -#define _PARISC_SIGINFO_H - -#if defined(__LP64__) -#define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int)) -#endif - -#include <asm-generic/siginfo.h> - -#endif diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index bddd2acebdcc..804880efa11e 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c @@ -36,6 +36,7 @@ EXPORT_SYMBOL(dcache_stride); void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr); EXPORT_SYMBOL(flush_dcache_page_asm); +void purge_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr); void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr); @@ -303,6 +304,17 @@ __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, preempt_enable(); } +static inline void +__purge_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, + unsigned long physaddr) +{ + preempt_disable(); + purge_dcache_page_asm(physaddr, vmaddr); + if (vma->vm_flags & VM_EXEC) + flush_icache_page_asm(physaddr, vmaddr); + preempt_enable(); +} + void flush_dcache_page(struct page *page) { struct address_space *mapping = page_mapping_file(page); @@ -364,7 +376,7 @@ EXPORT_SYMBOL(flush_kernel_icache_range_asm); #define FLUSH_THRESHOLD 0x80000 /* 0.5MB */ static unsigned long parisc_cache_flush_threshold __read_mostly = FLUSH_THRESHOLD; -#define FLUSH_TLB_THRESHOLD (2*1024*1024) /* 2MB initial TLB threshold */ +#define FLUSH_TLB_THRESHOLD (16*1024) /* 16 KiB minimum TLB threshold */ static unsigned long parisc_tlb_flush_threshold __read_mostly = FLUSH_TLB_THRESHOLD; void __init parisc_setup_cache_timing(void) @@ -404,10 +416,6 @@ void __init parisc_setup_cache_timing(void) goto set_tlb_threshold; } - alltime = mfctl(16); - flush_tlb_all(); - alltime = mfctl(16) - alltime; - size = 0; start = (unsigned long) _text; rangetime = mfctl(16); @@ -418,13 +426,19 @@ void __init parisc_setup_cache_timing(void) } rangetime = mfctl(16) - rangetime; - printk(KERN_DEBUG "Whole TLB flush %lu cycles, flushing %lu bytes %lu cycles\n", + alltime = mfctl(16); + flush_tlb_all(); + alltime = mfctl(16) - alltime; + + printk(KERN_INFO "Whole TLB flush %lu cycles, Range flush %lu bytes %lu cycles\n", alltime, size, rangetime); - threshold = PAGE_ALIGN(num_online_cpus() * size * alltime / rangetime); + threshold = PAGE_ALIGN((num_online_cpus() * size * alltime) / rangetime); + printk(KERN_INFO "Calculated TLB flush threshold %lu KiB\n", + threshold/1024); set_tlb_threshold: - if (threshold) + if (threshold > parisc_tlb_flush_threshold) parisc_tlb_flush_threshold = threshold; printk(KERN_INFO "TLB flush threshold set to %lu KiB\n", parisc_tlb_flush_threshold/1024); @@ -477,18 +491,6 @@ int __flush_tlb_range(unsigned long sid, unsigned long start, /* Purge TLB entries for small ranges using the pdtlb and pitlb instructions. These instructions execute locally but cause a purge request to be broadcast to other TLBs. */ - if (likely(!split_tlb)) { - while (start < end) { - purge_tlb_start(flags); - mtsp(sid, 1); - pdtlb(start); - purge_tlb_end(flags); - start += PAGE_SIZE; - } - return 0; - } - - /* split TLB case */ while (start < end) { purge_tlb_start(flags); mtsp(sid, 1); @@ -573,9 +575,12 @@ void flush_cache_mm(struct mm_struct *mm) pfn = pte_pfn(*ptep); if (!pfn_valid(pfn)) continue; - if (unlikely(mm->context)) + if (unlikely(mm->context)) { flush_tlb_page(vma, addr); - __flush_cache_page(vma, addr, PFN_PHYS(pfn)); + __flush_cache_page(vma, addr, PFN_PHYS(pfn)); + } else { + __purge_cache_page(vma, addr, PFN_PHYS(pfn)); + } } } } @@ -610,9 +615,12 @@ void flush_cache_range(struct vm_area_struct *vma, continue; pfn = pte_pfn(*ptep); if (pfn_valid(pfn)) { - if (unlikely(vma->vm_mm->context)) + if (unlikely(vma->vm_mm->context)) { flush_tlb_page(vma, addr); - __flush_cache_page(vma, addr, PFN_PHYS(pfn)); + __flush_cache_page(vma, addr, PFN_PHYS(pfn)); + } else { + __purge_cache_page(vma, addr, PFN_PHYS(pfn)); + } } } } @@ -621,9 +629,12 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn) { if (pfn_valid(pfn)) { - if (likely(vma->vm_mm->context)) + if (likely(vma->vm_mm->context)) { flush_tlb_page(vma, vmaddr); - __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn)); + __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn)); + } else { + __purge_cache_page(vma, vmaddr, PFN_PHYS(pfn)); + } } } diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S index 242c5ab65611..1c60408a64ad 100644 --- a/arch/parisc/kernel/entry.S +++ b/arch/parisc/kernel/entry.S @@ -38,6 +38,7 @@ #include <asm/ldcw.h> #include <asm/traps.h> #include <asm/thread_info.h> +#include <asm/alternative.h> #include <linux/linkage.h> @@ -186,7 +187,7 @@ bv,n 0(%r3) nop .word 0 /* checksum (will be patched) */ - .word PA(os_hpmc) /* address of handler */ + .word 0 /* address of handler */ .word 0 /* length of handler */ .endm @@ -426,13 +427,10 @@ ldw,s \index(\pmd),\pmd bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault dep %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */ - copy \pmd,%r9 - SHLREG %r9,PxD_VALUE_SHIFT,\pmd + SHLREG \pmd,PxD_VALUE_SHIFT,\pmd extru \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */ shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd /* pmd is now pte */ - LDREG %r0(\pmd),\pte - bb,>=,n \pte,_PAGE_PRESENT_BIT,\fault .endm /* Look up PTE in a 3-Level scheme. @@ -448,7 +446,6 @@ .macro L3_ptep pgd,pte,index,va,fault #if CONFIG_PGTABLE_LEVELS == 3 /* we might have a 2-Level scheme, e.g. with 16kb page size */ extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index - copy %r0,\pte extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0 ldw,s \index(\pgd),\pgd extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0 @@ -463,36 +460,39 @@ L2_ptep \pgd,\pte,\index,\va,\fault .endm - /* Acquire pa_tlb_lock lock and recheck page is still present. */ + /* Acquire pa_tlb_lock lock and check page is present. */ .macro tlb_lock spc,ptp,pte,tmp,tmp1,fault #ifdef CONFIG_SMP - cmpib,COND(=),n 0,\spc,2f +98: cmpib,COND(=),n 0,\spc,2f load_pa_tlb_lock \tmp 1: LDCW 0(\tmp),\tmp1 cmpib,COND(=) 0,\tmp1,1b nop LDREG 0(\ptp),\pte - bb,<,n \pte,_PAGE_PRESENT_BIT,2f + bb,<,n \pte,_PAGE_PRESENT_BIT,3f b \fault - stw \spc,0(\tmp) -2: + stw,ma \spc,0(\tmp) +99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP) #endif +2: LDREG 0(\ptp),\pte + bb,>=,n \pte,_PAGE_PRESENT_BIT,\fault +3: .endm /* Release pa_tlb_lock lock without reloading lock address. */ .macro tlb_unlock0 spc,tmp #ifdef CONFIG_SMP - or,COND(=) %r0,\spc,%r0 - sync - or,COND(=) %r0,\spc,%r0 - stw \spc,0(\tmp) +98: or,COND(=) %r0,\spc,%r0 + stw,ma \spc,0(\tmp) +99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP) #endif .endm /* Release pa_tlb_lock lock. */ .macro tlb_unlock1 spc,tmp #ifdef CONFIG_SMP - load_pa_tlb_lock \tmp +98: load_pa_tlb_lock \tmp +99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP) tlb_unlock0 \spc,\tmp #endif .endm @@ -1658,7 +1658,7 @@ dbit_fault: itlb_fault: b intr_save - ldi 6,%r8 + ldi PARISC_ITLB_TRAP,%r8 nadtlb_fault: b intr_save diff --git a/arch/parisc/kernel/firmware.c b/arch/parisc/kernel/firmware.c index 6d471c00c71a..e6f3b49f2fd7 100644 --- a/arch/parisc/kernel/firmware.c +++ b/arch/parisc/kernel/firmware.c @@ -1326,6 +1326,36 @@ int pdc_pat_cell_module(unsigned long *actcnt, unsigned long ploc, unsigned long } /** + * pdc_pat_cell_info - Retrieve the cell's information. + * @info: The pointer to a struct pdc_pat_cell_info_rtn_block. + * @actcnt: The number of bytes which should be written to info. + * @offset: offset of the structure. + * @cell_number: The cell number which should be asked, or -1 for current cell. + * + * This PDC call returns information about the given cell (or all cells). + */ +int pdc_pat_cell_info(struct pdc_pat_cell_info_rtn_block *info, + unsigned long *actcnt, unsigned long offset, + unsigned long cell_number) +{ + int retval; + unsigned long flags; + struct pdc_pat_cell_info_rtn_block result; + + spin_lock_irqsave(&pdc_lock, flags); + retval = mem_pdc_call(PDC_PAT_CELL, PDC_PAT_CELL_GET_INFO, + __pa(pdc_result), __pa(&result), *actcnt, + offset, cell_number); + if (!retval) { + *actcnt = pdc_result[0]; + memcpy(info, &result, *actcnt); + } + spin_unlock_irqrestore(&pdc_lock, flags); + + return retval; +} + +/** * pdc_pat_cpu_get_number - Retrieve the cpu number. * @cpu_info: The return buffer. * @hpa: The Hard Physical Address of the CPU. @@ -1413,6 +1443,33 @@ int pdc_pat_pd_get_addr_map(unsigned long *actual_len, void *mem_addr, } /** + * pdc_pat_pd_get_PDC_interface_revisions - Retrieve PDC interface revisions. + * @legacy_rev: The legacy revision. + * @pat_rev: The PAT revision. + * @pdc_cap: The PDC capabilities. + * + */ +int pdc_pat_pd_get_pdc_revisions(unsigned long *legacy_rev, + unsigned long *pat_rev, unsigned long *pdc_cap) +{ + int retval; + unsigned long flags; + + spin_lock_irqsave(&pdc_lock, flags); + retval = mem_pdc_call(PDC_PAT_PD, PDC_PAT_PD_GET_PDC_INTERF_REV, + __pa(pdc_result)); + if (retval == PDC_OK) { + *legacy_rev = pdc_result[0]; + *pat_rev = pdc_result[1]; + *pdc_cap = pdc_result[2]; + } + spin_unlock_irqrestore(&pdc_lock, flags); + + return retval; +} + + +/** * pdc_pat_io_pci_cfg_read - Read PCI configuration space. * @pci_addr: PCI configuration space address for which the read request is being made. * @pci_size: Size of read in bytes. Valid values are 1, 2, and 4. diff --git a/arch/parisc/kernel/hpmc.S b/arch/parisc/kernel/hpmc.S index 781c3b9a3e46..fde654115564 100644 --- a/arch/parisc/kernel/hpmc.S +++ b/arch/parisc/kernel/hpmc.S @@ -85,7 +85,7 @@ END(hpmc_pim_data) .import intr_save, code .align 16 -ENTRY_CFI(os_hpmc) +ENTRY(os_hpmc) .os_hpmc: /* @@ -302,7 +302,6 @@ os_hpmc_6: b . nop .align 16 /* make function length multiple of 16 bytes */ -ENDPROC_CFI(os_hpmc) .os_hpmc_end: diff --git a/arch/parisc/kernel/inventory.c b/arch/parisc/kernel/inventory.c index b0fe19ac4d78..35d05fdd7483 100644 --- a/arch/parisc/kernel/inventory.c +++ b/arch/parisc/kernel/inventory.c @@ -43,6 +43,7 @@ int pdc_type __read_mostly = PDC_TYPE_ILLEGAL; /* cell number and location (PAT firmware only) */ unsigned long parisc_cell_num __read_mostly; unsigned long parisc_cell_loc __read_mostly; +unsigned long parisc_pat_pdc_cap __read_mostly; void __init setup_pdc(void) @@ -81,12 +82,21 @@ void __init setup_pdc(void) #ifdef CONFIG_64BIT status = pdc_pat_cell_get_number(&cell_info); if (status == PDC_OK) { + unsigned long legacy_rev, pat_rev; pdc_type = PDC_TYPE_PAT; pr_cont("64 bit PAT.\n"); parisc_cell_num = cell_info.cell_num; parisc_cell_loc = cell_info.cell_loc; pr_info("PAT: Running on cell %lu and location %lu.\n", parisc_cell_num, parisc_cell_loc); + status = pdc_pat_pd_get_pdc_revisions(&legacy_rev, + &pat_rev, &parisc_pat_pdc_cap); + pr_info("PAT: legacy revision 0x%lx, pat_rev 0x%lx, pdc_cap 0x%lx, S-PTLB %d, HPMC_RENDEZ %d.\n", + legacy_rev, pat_rev, parisc_pat_pdc_cap, + parisc_pat_pdc_cap + & PDC_PAT_CAPABILITY_BIT_SIMULTANEOUS_PTLB ? 1:0, + parisc_pat_pdc_cap + & PDC_PAT_CAPABILITY_BIT_PDC_HPMC_RENDEZ ? 1:0); return; } #endif diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S index f33bf2d306d6..187f032c9dd8 100644 --- a/arch/parisc/kernel/pacache.S +++ b/arch/parisc/kernel/pacache.S @@ -37,6 +37,7 @@ #include <asm/pgtable.h> #include <asm/cache.h> #include <asm/ldcw.h> +#include <asm/alternative.h> #include <linux/linkage.h> #include <linux/init.h> @@ -190,7 +191,7 @@ ENDPROC_CFI(flush_tlb_all_local) .import cache_info,data ENTRY_CFI(flush_instruction_cache_local) - load32 cache_info, %r1 +88: load32 cache_info, %r1 /* Flush Instruction Cache */ @@ -243,6 +244,7 @@ fioneloop2: fisync: sync mtsm %r22 /* restore I-bit */ +89: ALTERNATIVE(88b, 89b, ALT_COND_NO_ICACHE, INSN_NOP) bv %r0(%r2) nop ENDPROC_CFI(flush_instruction_cache_local) @@ -250,7 +252,7 @@ ENDPROC_CFI(flush_instruction_cache_local) .import cache_info, data ENTRY_CFI(flush_data_cache_local) - load32 cache_info, %r1 +88: load32 cache_info, %r1 /* Flush Data Cache */ @@ -304,6 +306,7 @@ fdsync: syncdma sync mtsm %r22 /* restore I-bit */ +89: ALTERNATIVE(88b, 89b, ALT_COND_NO_DCACHE, INSN_NOP) bv %r0(%r2) nop ENDPROC_CFI(flush_data_cache_local) @@ -312,6 +315,7 @@ ENDPROC_CFI(flush_data_cache_local) .macro tlb_lock la,flags,tmp #ifdef CONFIG_SMP +98: #if __PA_LDCW_ALIGNMENT > 4 load32 pa_tlb_lock + __PA_LDCW_ALIGNMENT-1, \la depi 0,31,__PA_LDCW_ALIGN_ORDER, \la @@ -326,15 +330,17 @@ ENDPROC_CFI(flush_data_cache_local) nop b,n 2b 3: +99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP) #endif .endm .macro tlb_unlock la,flags,tmp #ifdef CONFIG_SMP - ldi 1,\tmp +98: ldi 1,\tmp sync stw \tmp,0(\la) mtsm \flags +99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP) #endif .endm @@ -596,9 +602,11 @@ ENTRY_CFI(copy_user_page_asm) pdtlb,l %r0(%r29) #else tlb_lock %r20,%r21,%r22 - pdtlb %r0(%r28) - pdtlb %r0(%r29) +0: pdtlb %r0(%r28) +1: pdtlb %r0(%r29) tlb_unlock %r20,%r21,%r22 + ALTERNATIVE(0b, 0b+4, ALT_COND_NO_SMP, INSN_PxTLB) + ALTERNATIVE(1b, 1b+4, ALT_COND_NO_SMP, INSN_PxTLB) #endif #ifdef CONFIG_64BIT @@ -736,8 +744,9 @@ ENTRY_CFI(clear_user_page_asm) pdtlb,l %r0(%r28) #else tlb_lock %r20,%r21,%r22 - pdtlb %r0(%r28) +0: pdtlb %r0(%r28) tlb_unlock %r20,%r21,%r22 + ALTERNATIVE(0b, 0b+4, ALT_COND_NO_SMP, INSN_PxTLB) #endif #ifdef CONFIG_64BIT @@ -813,11 +822,12 @@ ENTRY_CFI(flush_dcache_page_asm) pdtlb,l %r0(%r28) #else tlb_lock %r20,%r21,%r22 - pdtlb %r0(%r28) +0: pdtlb %r0(%r28) tlb_unlock %r20,%r21,%r22 + ALTERNATIVE(0b, 0b+4, ALT_COND_NO_SMP, INSN_PxTLB) #endif - ldil L%dcache_stride, %r1 +88: ldil L%dcache_stride, %r1 ldw R%dcache_stride(%r1), r31 #ifdef CONFIG_64BIT @@ -828,8 +838,7 @@ ENTRY_CFI(flush_dcache_page_asm) add %r28, %r25, %r25 sub %r25, r31, %r25 - -1: fdc,m r31(%r28) +1: fdc,m r31(%r28) fdc,m r31(%r28) fdc,m r31(%r28) fdc,m r31(%r28) @@ -844,14 +853,76 @@ ENTRY_CFI(flush_dcache_page_asm) fdc,m r31(%r28) fdc,m r31(%r28) fdc,m r31(%r28) - cmpb,COND(<<) %r28, %r25,1b + cmpb,COND(>>) %r25, %r28, 1b /* predict taken */ fdc,m r31(%r28) +89: ALTERNATIVE(88b, 89b, ALT_COND_NO_DCACHE, INSN_NOP) sync bv %r0(%r2) nop ENDPROC_CFI(flush_dcache_page_asm) +ENTRY_CFI(purge_dcache_page_asm) + ldil L%(TMPALIAS_MAP_START), %r28 +#ifdef CONFIG_64BIT +#if (TMPALIAS_MAP_START >= 0x80000000) + depdi 0, 31,32, %r28 /* clear any sign extension */ +#endif + convert_phys_for_tlb_insert20 %r26 /* convert phys addr to tlb insert format */ + depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */ + depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */ +#else + extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */ + depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */ + depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */ +#endif + + /* Purge any old translation */ + +#ifdef CONFIG_PA20 + pdtlb,l %r0(%r28) +#else + tlb_lock %r20,%r21,%r22 +0: pdtlb %r0(%r28) + tlb_unlock %r20,%r21,%r22 + ALTERNATIVE(0b, 0b+4, ALT_COND_NO_SMP, INSN_PxTLB) +#endif + +88: ldil L%dcache_stride, %r1 + ldw R%dcache_stride(%r1), r31 + +#ifdef CONFIG_64BIT + depdi,z 1, 63-PAGE_SHIFT,1, %r25 +#else + depwi,z 1, 31-PAGE_SHIFT,1, %r25 +#endif + add %r28, %r25, %r25 + sub %r25, r31, %r25 + +1: pdc,m r31(%r28) + pdc,m r31(%r28) + pdc,m r31(%r28) + pdc,m r31(%r28) + pdc,m r31(%r28) + pdc,m r31(%r28) + pdc,m r31(%r28) + pdc,m r31(%r28) + pdc,m r31(%r28) + pdc,m r31(%r28) + pdc,m r31(%r28) + pdc,m r31(%r28) + pdc,m r31(%r28) + pdc,m r31(%r28) + pdc,m r31(%r28) + cmpb,COND(>>) %r25, %r28, 1b /* predict taken */ + pdc,m r31(%r28) + +89: ALTERNATIVE(88b, 89b, ALT_COND_NO_DCACHE, INSN_NOP) + sync + bv %r0(%r2) + nop +ENDPROC_CFI(purge_dcache_page_asm) + ENTRY_CFI(flush_icache_page_asm) ldil L%(TMPALIAS_MAP_START), %r28 #ifdef CONFIG_64BIT @@ -874,15 +945,19 @@ ENTRY_CFI(flush_icache_page_asm) #ifdef CONFIG_PA20 pdtlb,l %r0(%r28) - pitlb,l %r0(%sr4,%r28) +1: pitlb,l %r0(%sr4,%r28) + ALTERNATIVE(1b, 1b+4, ALT_COND_NO_SPLIT_TLB, INSN_NOP) #else tlb_lock %r20,%r21,%r22 - pdtlb %r0(%r28) - pitlb %r0(%sr4,%r28) +0: pdtlb %r0(%r28) +1: pitlb %r0(%sr4,%r28) tlb_unlock %r20,%r21,%r22 + ALTERNATIVE(0b, 0b+4, ALT_COND_NO_SMP, INSN_PxTLB) + ALTERNATIVE(1b, 1b+4, ALT_COND_NO_SMP, INSN_PxTLB) + ALTERNATIVE(1b, 1b+4, ALT_COND_NO_SPLIT_TLB, INSN_NOP) #endif - ldil L%icache_stride, %r1 +88: ldil L%icache_stride, %r1 ldw R%icache_stride(%r1), %r31 #ifdef CONFIG_64BIT @@ -893,7 +968,6 @@ ENTRY_CFI(flush_icache_page_asm) add %r28, %r25, %r25 sub %r25, %r31, %r25 - /* fic only has the type 26 form on PA1.1, requiring an * explicit space specification, so use %sr4 */ 1: fic,m %r31(%sr4,%r28) @@ -911,16 +985,17 @@ ENTRY_CFI(flush_icache_page_asm) fic,m %r31(%sr4,%r28) fic,m %r31(%sr4,%r28) fic,m %r31(%sr4,%r28) - cmpb,COND(<<) %r28, %r25,1b + cmpb,COND(>>) %r25, %r28, 1b /* predict taken */ fic,m %r31(%sr4,%r28) +89: ALTERNATIVE(88b, 89b, ALT_COND_NO_ICACHE, INSN_NOP) sync bv %r0(%r2) nop ENDPROC_CFI(flush_icache_page_asm) ENTRY_CFI(flush_kernel_dcache_page_asm) - ldil L%dcache_stride, %r1 +88: ldil L%dcache_stride, %r1 ldw R%dcache_stride(%r1), %r23 #ifdef CONFIG_64BIT @@ -931,7 +1006,6 @@ ENTRY_CFI(flush_kernel_dcache_page_asm) add %r26, %r25, %r25 sub %r25, %r23, %r25 - 1: fdc,m %r23(%r26) fdc,m %r23(%r26) fdc,m %r23(%r26) @@ -947,16 +1021,17 @@ ENTRY_CFI(flush_kernel_dcache_page_asm) fdc,m %r23(%r26) fdc,m %r23(%r26) fdc,m %r23(%r26) - cmpb,COND(<<) %r26, %r25,1b + cmpb,COND(>>) %r25, %r26, 1b /* predict taken */ fdc,m %r23(%r26) +89: ALTERNATIVE(88b, 89b, ALT_COND_NO_DCACHE, INSN_NOP) sync bv %r0(%r2) nop ENDPROC_CFI(flush_kernel_dcache_page_asm) ENTRY_CFI(purge_kernel_dcache_page_asm) - ldil L%dcache_stride, %r1 +88: ldil L%dcache_stride, %r1 ldw R%dcache_stride(%r1), %r23 #ifdef CONFIG_64BIT @@ -982,74 +1057,183 @@ ENTRY_CFI(purge_kernel_dcache_page_asm) pdc,m %r23(%r26) pdc,m %r23(%r26) pdc,m %r23(%r26) - cmpb,COND(<<) %r26, %r25, 1b + cmpb,COND(>>) %r25, %r26, 1b /* predict taken */ pdc,m %r23(%r26) +89: ALTERNATIVE(88b, 89b, ALT_COND_NO_DCACHE, INSN_NOP) sync bv %r0(%r2) nop ENDPROC_CFI(purge_kernel_dcache_page_asm) ENTRY_CFI(flush_user_dcache_range_asm) - ldil L%dcache_stride, %r1 +88: ldil L%dcache_stride, %r1 ldw R%dcache_stride(%r1), %r23 ldo -1(%r23), %r21 ANDCM %r26, %r21, %r26 -1: cmpb,COND(<<),n %r26, %r25, 1b +#ifdef CONFIG_64BIT + depd,z %r23, 59, 60, %r21 +#else + depw,z %r23, 27, 28, %r21 +#endif + add %r26, %r21, %r22 + cmpb,COND(>>),n %r22, %r25, 2f /* predict not taken */ +1: add %r22, %r21, %r22 + fdc,m %r23(%sr3, %r26) + fdc,m %r23(%sr3, %r26) + fdc,m %r23(%sr3, %r26) + fdc,m %r23(%sr3, %r26) + fdc,m %r23(%sr3, %r26) + fdc,m %r23(%sr3, %r26) + fdc,m %r23(%sr3, %r26) + fdc,m %r23(%sr3, %r26) + fdc,m %r23(%sr3, %r26) + fdc,m %r23(%sr3, %r26) + fdc,m %r23(%sr3, %r26) + fdc,m %r23(%sr3, %r26) + fdc,m %r23(%sr3, %r26) + fdc,m %r23(%sr3, %r26) + fdc,m %r23(%sr3, %r26) + cmpb,COND(<<=) %r22, %r25, 1b /* predict taken */ fdc,m %r23(%sr3, %r26) +2: cmpb,COND(>>),n %r25, %r26, 2b + fdc,m %r23(%sr3, %r26) + +89: ALTERNATIVE(88b, 89b, ALT_COND_NO_DCACHE, INSN_NOP) sync bv %r0(%r2) nop ENDPROC_CFI(flush_user_dcache_range_asm) ENTRY_CFI(flush_kernel_dcache_range_asm) - ldil L%dcache_stride, %r1 +88: ldil L%dcache_stride, %r1 ldw R%dcache_stride(%r1), %r23 ldo -1(%r23), %r21 ANDCM %r26, %r21, %r26 -1: cmpb,COND(<<),n %r26, %r25,1b +#ifdef CONFIG_64BIT + depd,z %r23, 59, 60, %r21 +#else + depw,z %r23, 27, 28, %r21 +#endif + add %r26, %r21, %r22 + cmpb,COND(>>),n %r22, %r25, 2f /* predict not taken */ +1: add %r22, %r21, %r22 + fdc,m %r23(%r26) + fdc,m %r23(%r26) + fdc,m %r23(%r26) + fdc,m %r23(%r26) + fdc,m %r23(%r26) + fdc,m %r23(%r26) + fdc,m %r23(%r26) + fdc,m %r23(%r26) + fdc,m %r23(%r26) + fdc,m %r23(%r26) + fdc,m %r23(%r26) + fdc,m %r23(%r26) + fdc,m %r23(%r26) + fdc,m %r23(%r26) + fdc,m %r23(%r26) + cmpb,COND(<<=) %r22, %r25, 1b /* predict taken */ + fdc,m %r23(%r26) + +2: cmpb,COND(>>),n %r25, %r26, 2b /* predict taken */ fdc,m %r23(%r26) sync +89: ALTERNATIVE(88b, 89b, ALT_COND_NO_DCACHE, INSN_NOP) syncdma bv %r0(%r2) nop ENDPROC_CFI(flush_kernel_dcache_range_asm) ENTRY_CFI(purge_kernel_dcache_range_asm) - ldil L%dcache_stride, %r1 +88: ldil L%dcache_stride, %r1 ldw R%dcache_stride(%r1), %r23 ldo -1(%r23), %r21 ANDCM %r26, %r21, %r26 -1: cmpb,COND(<<),n %r26, %r25,1b +#ifdef CONFIG_64BIT + depd,z %r23, 59, 60, %r21 +#else + depw,z %r23, 27, 28, %r21 +#endif + add %r26, %r21, %r22 + cmpb,COND(>>),n %r22, %r25, 2f /* predict not taken */ +1: add %r22, %r21, %r22 + pdc,m %r23(%r26) + pdc,m %r23(%r26) + pdc,m %r23(%r26) + pdc,m %r23(%r26) + pdc,m %r23(%r26) + pdc,m %r23(%r26) + pdc,m %r23(%r26) + pdc,m %r23(%r26) + pdc,m %r23(%r26) + pdc,m %r23(%r26) + pdc,m %r23(%r26) + pdc,m %r23(%r26) + pdc,m %r23(%r26) + pdc,m %r23(%r26) + pdc,m %r23(%r26) + cmpb,COND(<<=) %r22, %r25, 1b /* predict taken */ + pdc,m %r23(%r26) + +2: cmpb,COND(>>),n %r25, %r26, 2b /* predict taken */ pdc,m %r23(%r26) sync +89: ALTERNATIVE(88b, 89b, ALT_COND_NO_DCACHE, INSN_NOP) syncdma bv %r0(%r2) nop ENDPROC_CFI(purge_kernel_dcache_range_asm) ENTRY_CFI(flush_user_icache_range_asm) - ldil L%icache_stride, %r1 +88: ldil L%icache_stride, %r1 ldw R%icache_stride(%r1), %r23 ldo -1(%r23), %r21 ANDCM %r26, %r21, %r26 -1: cmpb,COND(<<),n %r26, %r25,1b +#ifdef CONFIG_64BIT + depd,z %r23, 59, 60, %r21 +#else + depw,z %r23, 27, 28, %r21 +#endif + add %r26, %r21, %r22 + cmpb,COND(>>),n %r22, %r25, 2f /* predict not taken */ +1: add %r22, %r21, %r22 + fic,m %r23(%sr3, %r26) + fic,m %r23(%sr3, %r26) + fic,m %r23(%sr3, %r26) + fic,m %r23(%sr3, %r26) + fic,m %r23(%sr3, %r26) + fic,m %r23(%sr3, %r26) + fic,m %r23(%sr3, %r26) + fic,m %r23(%sr3, %r26) + fic,m %r23(%sr3, %r26) + fic,m %r23(%sr3, %r26) + fic,m %r23(%sr3, %r26) + fic,m %r23(%sr3, %r26) + fic,m %r23(%sr3, %r26) + fic,m %r23(%sr3, %r26) + fic,m %r23(%sr3, %r26) + cmpb,COND(<<=) %r22, %r25, 1b /* predict taken */ + fic,m %r23(%sr3, %r26) + +2: cmpb,COND(>>),n %r25, %r26, 2b fic,m %r23(%sr3, %r26) +89: ALTERNATIVE(88b, 89b, ALT_COND_NO_ICACHE, INSN_NOP) sync bv %r0(%r2) nop ENDPROC_CFI(flush_user_icache_range_asm) ENTRY_CFI(flush_kernel_icache_page) - ldil L%icache_stride, %r1 +88: ldil L%icache_stride, %r1 ldw R%icache_stride(%r1), %r23 #ifdef CONFIG_64BIT @@ -1076,23 +1260,51 @@ ENTRY_CFI(flush_kernel_icache_page) fic,m %r23(%sr4, %r26) fic,m %r23(%sr4, %r26) fic,m %r23(%sr4, %r26) - cmpb,COND(<<) %r26, %r25, 1b + cmpb,COND(>>) %r25, %r26, 1b /* predict taken */ fic,m %r23(%sr4, %r26) +89: ALTERNATIVE(88b, 89b, ALT_COND_NO_ICACHE, INSN_NOP) sync bv %r0(%r2) nop ENDPROC_CFI(flush_kernel_icache_page) ENTRY_CFI(flush_kernel_icache_range_asm) - ldil L%icache_stride, %r1 +88: ldil L%icache_stride, %r1 ldw R%icache_stride(%r1), %r23 ldo -1(%r23), %r21 ANDCM %r26, %r21, %r26 -1: cmpb,COND(<<),n %r26, %r25, 1b +#ifdef CONFIG_64BIT + depd,z %r23, 59, 60, %r21 +#else + depw,z %r23, 27, 28, %r21 +#endif + add %r26, %r21, %r22 + cmpb,COND(>>),n %r22, %r25, 2f /* predict not taken */ +1: add %r22, %r21, %r22 + fic,m %r23(%sr4, %r26) + fic,m %r23(%sr4, %r26) + fic,m %r23(%sr4, %r26) + fic,m %r23(%sr4, %r26) + fic,m %r23(%sr4, %r26) + fic,m %r23(%sr4, %r26) + fic,m %r23(%sr4, %r26) + fic,m %r23(%sr4, %r26) + fic,m %r23(%sr4, %r26) + fic,m %r23(%sr4, %r26) + fic,m %r23(%sr4, %r26) + fic,m %r23(%sr4, %r26) + fic,m %r23(%sr4, %r26) + fic,m %r23(%sr4, %r26) + fic,m %r23(%sr4, %r26) + cmpb,COND(<<=) %r22, %r25, 1b /* predict taken */ + fic,m %r23(%sr4, %r26) + +2: cmpb,COND(>>),n %r25, %r26, 2b /* predict taken */ fic,m %r23(%sr4, %r26) +89: ALTERNATIVE(88b, 89b, ALT_COND_NO_ICACHE, INSN_NOP) sync bv %r0(%r2) nop diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c index 755e89ec828a..cd227f1cf629 100644 --- a/arch/parisc/kernel/setup.c +++ b/arch/parisc/kernel/setup.c @@ -305,6 +305,86 @@ static int __init parisc_init_resources(void) return 0; } +static int no_alternatives __initdata; +static int __init setup_no_alternatives(char *str) +{ + no_alternatives = 1; + return 1; +} +__setup("no-alternatives", setup_no_alternatives); + +static void __init apply_alternatives_all(void) +{ + struct alt_instr *entry; + int index = 0, applied = 0; + + + pr_info("alternatives: %spatching kernel code\n", + no_alternatives ? "NOT " : ""); + if (no_alternatives) + return; + + set_kernel_text_rw(1); + + for (entry = (struct alt_instr *) &__alt_instructions; + entry < (struct alt_instr *) &__alt_instructions_end; + entry++, index++) { + + u32 *from, len, cond, replacement; + + from = (u32 *)((ulong)&entry->orig_offset + entry->orig_offset); + len = entry->len; + cond = entry->cond; + replacement = entry->replacement; + + WARN_ON(!cond); + pr_debug("Check %d: Cond 0x%x, Replace %02d instructions @ 0x%px with 0x%08x\n", + index, cond, len, from, replacement); + + if ((cond & ALT_COND_NO_SMP) && (num_online_cpus() != 1)) + continue; + if ((cond & ALT_COND_NO_DCACHE) && (cache_info.dc_size != 0)) + continue; + if ((cond & ALT_COND_NO_ICACHE) && (cache_info.ic_size != 0)) + continue; + + /* + * If the PDC_MODEL capabilities has Non-coherent IO-PDIR bit + * set (bit #61, big endian), we have to flush and sync every + * time IO-PDIR is changed in Ike/Astro. + */ + if ((cond & ALT_COND_NO_IOC_FDC) && + (boot_cpu_data.pdc.capabilities & PDC_MODEL_IOPDIR_FDC)) + continue; + + /* Want to replace pdtlb by a pdtlb,l instruction? */ + if (replacement == INSN_PxTLB) { + replacement = *from; + if (boot_cpu_data.cpu_type >= pcxu) /* >= pa2.0 ? */ + replacement |= (1 << 10); /* set el bit */ + } + + /* + * Replace instruction with NOPs? + * For long distance insert a branch instruction instead. + */ + if (replacement == INSN_NOP && len > 1) + replacement = 0xe8000002 + (len-2)*8; /* "b,n .+8" */ + + pr_debug("Do %d: Cond 0x%x, Replace %02d instructions @ 0x%px with 0x%08x\n", + index, cond, len, from, replacement); + + /* Replace instruction */ + *from = replacement; + applied++; + } + + pr_info("alternatives: applied %d out of %d patches\n", applied, index); + + set_kernel_text_rw(0); +} + + extern void gsc_init(void); extern void processor_init(void); extern void ccio_init(void); @@ -346,6 +426,7 @@ static int __init parisc_init(void) boot_cpu_data.cpu_hz / 1000000, boot_cpu_data.cpu_hz % 1000000 ); + apply_alternatives_all(); parisc_setup_cache_timing(); /* These are in a non-obvious order, will fix when we have an iotree */ diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c index 342073f44d3f..848c1934680b 100644 --- a/arch/parisc/kernel/signal.c +++ b/arch/parisc/kernel/signal.c @@ -65,7 +65,6 @@ #define INSN_LDI_R25_1 0x34190002 /* ldi 1,%r25 (in_syscall=1) */ #define INSN_LDI_R20 0x3414015a /* ldi __NR_rt_sigreturn,%r20 */ #define INSN_BLE_SR2_R0 0xe4008200 /* be,l 0x100(%sr2,%r0),%sr0,%r31 */ -#define INSN_NOP 0x08000240 /* nop */ /* For debugging */ #define INSN_DIE_HORRIBLY 0x68000ccc /* stw %r0,0x666(%sr0,%r0) */ diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S index f453997a7b8f..f5f22ea9b97e 100644 --- a/arch/parisc/kernel/syscall.S +++ b/arch/parisc/kernel/syscall.S @@ -640,8 +640,7 @@ cas_action: sub,<> %r28, %r25, %r0 2: stw %r24, 0(%r26) /* Free lock */ - sync - stw %r20, 0(%sr2,%r20) + stw,ma %r20, 0(%sr2,%r20) #if ENABLE_LWS_DEBUG /* Clear thread register indicator */ stw %r0, 4(%sr2,%r20) @@ -655,8 +654,7 @@ cas_action: 3: /* Error occurred on load or store */ /* Free lock */ - sync - stw %r20, 0(%sr2,%r20) + stw,ma %r20, 0(%sr2,%r20) #if ENABLE_LWS_DEBUG stw %r0, 4(%sr2,%r20) #endif @@ -857,8 +855,7 @@ cas2_action: cas2_end: /* Free lock */ - sync - stw %r20, 0(%sr2,%r20) + stw,ma %r20, 0(%sr2,%r20) /* Enable interrupts */ ssm PSW_SM_I, %r0 /* Return to userspace, set no error */ @@ -868,8 +865,7 @@ cas2_end: 22: /* Error occurred on load or store */ /* Free lock */ - sync - stw %r20, 0(%sr2,%r20) + stw,ma %r20, 0(%sr2,%r20) ssm PSW_SM_I, %r0 ldo 1(%r0),%r28 b lws_exit diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c index 68f10f87073d..472a818e8c17 100644 --- a/arch/parisc/kernel/traps.c +++ b/arch/parisc/kernel/traps.c @@ -430,8 +430,8 @@ void parisc_terminate(char *msg, struct pt_regs *regs, int code, unsigned long o } printk("\n"); - pr_crit("%s: Code=%d (%s) regs=%p (Addr=" RFMT ")\n", - msg, code, trap_name(code), regs, offset); + pr_crit("%s: Code=%d (%s) at addr " RFMT "\n", + msg, code, trap_name(code), offset); show_regs(regs); spin_unlock(&terminate_lock); @@ -802,7 +802,8 @@ void __init initialize_ivt(const void *iva) * the Length/4 words starting at Address is zero. */ - /* Compute Checksum for HPMC handler */ + /* Setup IVA and compute checksum for HPMC handler */ + ivap[6] = (u32)__pa(os_hpmc); length = os_hpmc_size; ivap[7] = length; diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S index da2e31190efa..c3b1b9c24ede 100644 --- a/arch/parisc/kernel/vmlinux.lds.S +++ b/arch/parisc/kernel/vmlinux.lds.S @@ -61,6 +61,12 @@ SECTIONS EXIT_DATA } PERCPU_SECTION(8) + . = ALIGN(4); + .altinstructions : { + __alt_instructions = .; + *(.altinstructions) + __alt_instructions_end = .; + } . = ALIGN(HUGEPAGE_SIZE); __init_end = .; /* freed after init ends here */ diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index 74842d28a7a1..e7e626bcd0be 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c @@ -494,12 +494,8 @@ static void __init map_pages(unsigned long start_vaddr, pte = pte_mkhuge(pte); } - if (address >= end_paddr) { - if (force) - break; - else - pte_val(pte) = 0; - } + if (address >= end_paddr) + break; set_pte(pg_table, pte); @@ -515,6 +511,21 @@ static void __init map_pages(unsigned long start_vaddr, } } +void __init set_kernel_text_rw(int enable_read_write) +{ + unsigned long start = (unsigned long)_stext; + unsigned long end = (unsigned long)_etext; + + map_pages(start, __pa(start), end-start, + PAGE_KERNEL_RWX, enable_read_write ? 1:0); + + /* force the kernel to see the new TLB entries */ + __flush_tlb_range(0, start, end); + + /* dump old cached instructions */ + flush_icache_range(start, end); +} + void __ref free_initmem(void) { unsigned long init_begin = (unsigned long)__init_begin; diff --git a/arch/powerpc/Kbuild b/arch/powerpc/Kbuild new file mode 100644 index 000000000000..1625a06802ca --- /dev/null +++ b/arch/powerpc/Kbuild @@ -0,0 +1,16 @@ +subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror + +obj-y += kernel/ +obj-y += mm/ +obj-y += lib/ +obj-y += sysdev/ +obj-y += platforms/ +obj-y += math-emu/ +obj-y += crypto/ +obj-y += net/ + +obj-$(CONFIG_XMON) += xmon/ +obj-$(CONFIG_KVM) += kvm/ + +obj-$(CONFIG_PERF_EVENTS) += perf/ +obj-$(CONFIG_KEXEC_FILE) += purgatory/ diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index a80669209155..e84943d24e5c 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -137,7 +137,7 @@ config PPC select ARCH_HAS_PMEM_API if PPC64 select ARCH_HAS_PTE_SPECIAL select ARCH_HAS_MEMBARRIER_CALLBACKS - select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE + select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE && PPC64 select ARCH_HAS_SG_CHAIN select ARCH_HAS_STRICT_KERNEL_RWX if ((PPC_BOOK3S_64 || PPC32) && !RELOCATABLE && !HIBERNATION) select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST @@ -180,6 +180,8 @@ config PPC select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_TRACEHOOK select HAVE_CBPF_JIT if !PPC64 + select HAVE_STACKPROTECTOR if PPC64 && $(cc-option,-mstack-protector-guard=tls -mstack-protector-guard-reg=r13) + select HAVE_STACKPROTECTOR if PPC32 && $(cc-option,-mstack-protector-guard=tls -mstack-protector-guard-reg=r2) select HAVE_CONTEXT_TRACKING if PPC64 select HAVE_DEBUG_KMEMLEAK select HAVE_DEBUG_STACKOVERFLOW @@ -188,6 +190,7 @@ config PPC select HAVE_EBPF_JIT if PPC64 select HAVE_EFFICIENT_UNALIGNED_ACCESS if !(CPU_LITTLE_ENDIAN && POWER7_CPU) select HAVE_FTRACE_MCOUNT_RECORD + select HAVE_FUNCTION_ERROR_INJECTION select HAVE_FUNCTION_GRAPH_TRACER select HAVE_FUNCTION_TRACER select HAVE_GCC_PLUGINS if GCC_VERSION >= 50200 # plugin support on gcc <= 5.1 is buggy on PPC @@ -285,12 +288,10 @@ config ARCH_MAY_HAVE_PC_FDC config PPC_UDBG_16550 bool - default n config GENERIC_TBSYNC bool default y if PPC32 && SMP - default n config AUDIT_ARCH bool @@ -309,13 +310,11 @@ config EPAPR_BOOT bool help Used to allow a board to specify it wants an ePAPR compliant wrapper. - default n config DEFAULT_UIMAGE bool help Used to allow a board to specify it wants a uImage built by default - default n config ARCH_HIBERNATION_POSSIBLE bool @@ -329,11 +328,9 @@ config ARCH_SUSPEND_POSSIBLE config PPC_DCR_NATIVE bool - default n config PPC_DCR_MMIO bool - default n config PPC_DCR bool @@ -344,7 +341,6 @@ config PPC_OF_PLATFORM_PCI bool depends on PCI depends on PPC64 # not supported on 32 bits yet - default n config ARCH_SUPPORTS_DEBUG_PAGEALLOC depends on PPC32 || PPC_BOOK3S_64 @@ -447,14 +443,12 @@ config PPC_TRANSACTIONAL_MEM depends on SMP select ALTIVEC select VSX - default n ---help--- Support user-mode Transactional Memory on POWERPC. config LD_HEAD_STUB_CATCH bool "Reserve 256 bytes to cope with linker stubs in HEAD text" if EXPERT depends on PPC64 - default n help Very large kernels can cause linker branch stubs to be generated by code in head_64.S, which moves the head text sections out of their @@ -557,7 +551,6 @@ config RELOCATABLE config RELOCATABLE_TEST bool "Test relocatable kernel" depends on (PPC64 && RELOCATABLE) - default n help This runs the relocatable kernel at the address it was initially loaded at, which tends to be non-zero and therefore test the @@ -769,7 +762,6 @@ config PPC_SUBPAGE_PROT config PPC_COPRO_BASE bool - default n config SCHED_SMT bool "SMT (Hyperthreading) scheduler support" @@ -892,7 +884,6 @@ config PPC_INDIRECT_PCI bool depends on PCI default y if 40x || 44x - default n config EISA bool @@ -989,7 +980,6 @@ source "drivers/pcmcia/Kconfig" config HAS_RAPIDIO bool - default n config RAPIDIO tristate "RapidIO support" @@ -1012,7 +1002,6 @@ endmenu config NONSTATIC_KERNEL bool - default n menu "Advanced setup" depends on PPC32 diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug index fd63cd914a74..f4961fbcb48d 100644 --- a/arch/powerpc/Kconfig.debug +++ b/arch/powerpc/Kconfig.debug @@ -2,7 +2,6 @@ config PPC_DISABLE_WERROR bool "Don't build arch/powerpc code with -Werror" - default n help This option tells the compiler NOT to build the code under arch/powerpc with the -Werror flag (which means warnings @@ -56,7 +55,6 @@ config PPC_EMULATED_STATS config CODE_PATCHING_SELFTEST bool "Run self-tests of the code-patching code" depends on DEBUG_KERNEL - default n config JUMP_LABEL_FEATURE_CHECKS bool "Enable use of jump label for cpu/mmu_has_feature()" @@ -70,7 +68,6 @@ config JUMP_LABEL_FEATURE_CHECKS config JUMP_LABEL_FEATURE_CHECK_DEBUG bool "Do extra check on feature fixup calls" depends on DEBUG_KERNEL && JUMP_LABEL_FEATURE_CHECKS - default n help This tries to catch incorrect usage of cpu_has_feature() and mmu_has_feature() in the code. @@ -80,16 +77,13 @@ config JUMP_LABEL_FEATURE_CHECK_DEBUG config FTR_FIXUP_SELFTEST bool "Run self-tests of the feature-fixup code" depends on DEBUG_KERNEL - default n config MSI_BITMAP_SELFTEST bool "Run self-tests of the MSI bitmap code" depends on DEBUG_KERNEL - default n config PPC_IRQ_SOFT_MASK_DEBUG bool "Include extra checks for powerpc irq soft masking" - default n config XMON bool "Include xmon kernel debugger" diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index 11a1acba164a..17be664dafa2 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile @@ -112,6 +112,13 @@ KBUILD_LDFLAGS += -m elf$(BITS)$(LDEMULATION) KBUILD_ARFLAGS += --target=elf$(BITS)-$(GNUTARGET) endif +cflags-$(CONFIG_STACKPROTECTOR) += -mstack-protector-guard=tls +ifdef CONFIG_PPC64 +cflags-$(CONFIG_STACKPROTECTOR) += -mstack-protector-guard-reg=r13 +else +cflags-$(CONFIG_STACKPROTECTOR) += -mstack-protector-guard-reg=r2 +endif + LDFLAGS_vmlinux-y := -Bstatic LDFLAGS_vmlinux-$(CONFIG_RELOCATABLE) := -pie LDFLAGS_vmlinux := $(LDFLAGS_vmlinux-y) @@ -160,8 +167,17 @@ else CFLAGS-$(CONFIG_GENERIC_CPU) += -mcpu=powerpc64 endif +ifdef CONFIG_FUNCTION_TRACER +CC_FLAGS_FTRACE := -pg ifdef CONFIG_MPROFILE_KERNEL - CC_FLAGS_FTRACE := -pg -mprofile-kernel +CC_FLAGS_FTRACE += -mprofile-kernel +endif +# Work around gcc code-gen bugs with -pg / -fno-omit-frame-pointer in gcc <= 4.8 +# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=44199 +# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=52828 +ifneq ($(cc-name),clang) +CC_FLAGS_FTRACE += $(call cc-ifversion, -lt, 0409, -mno-sched-epilog) +endif endif CFLAGS-$(CONFIG_TARGET_CPU_BOOL) += $(call cc-option,-mcpu=$(CONFIG_TARGET_CPU)) @@ -229,16 +245,15 @@ ifdef CONFIG_6xx KBUILD_CFLAGS += -mcpu=powerpc endif -# Work around a gcc code-gen bug with -fno-omit-frame-pointer. -ifdef CONFIG_FUNCTION_TRACER -KBUILD_CFLAGS += -mno-sched-epilog -endif - cpu-as-$(CONFIG_4xx) += -Wa,-m405 cpu-as-$(CONFIG_ALTIVEC) += $(call as-option,-Wa$(comma)-maltivec) cpu-as-$(CONFIG_E200) += -Wa,-me200 cpu-as-$(CONFIG_E500) += -Wa,-me500 -cpu-as-$(CONFIG_PPC_BOOK3S_64) += -Wa,-mpower4 + +# When using '-many -mpower4' gas will first try and find a matching power4 +# mnemonic and failing that it will allow any valid mnemonic that GAS knows +# about. GCC will pass -many to GAS when assembling, clang does not. +cpu-as-$(CONFIG_PPC_BOOK3S_64) += -Wa,-mpower4 -Wa,-many cpu-as-$(CONFIG_PPC_E500MC) += $(call as-option,-Wa$(comma)-me500mc) KBUILD_AFLAGS += $(cpu-as-y) @@ -258,18 +273,8 @@ head-$(CONFIG_PPC_FPU) += arch/powerpc/kernel/fpu.o head-$(CONFIG_ALTIVEC) += arch/powerpc/kernel/vector.o head-$(CONFIG_PPC_OF_BOOT_TRAMPOLINE) += arch/powerpc/kernel/prom_init.o -core-y += arch/powerpc/kernel/ \ - arch/powerpc/mm/ \ - arch/powerpc/lib/ \ - arch/powerpc/sysdev/ \ - arch/powerpc/platforms/ \ - arch/powerpc/math-emu/ \ - arch/powerpc/crypto/ \ - arch/powerpc/net/ -core-$(CONFIG_XMON) += arch/powerpc/xmon/ -core-$(CONFIG_KVM) += arch/powerpc/kvm/ -core-$(CONFIG_PERF_EVENTS) += arch/powerpc/perf/ -core-$(CONFIG_KEXEC_FILE) += arch/powerpc/purgatory/ +# See arch/powerpc/Kbuild for content of core part of the kernel +core-y += arch/powerpc/ drivers-$(CONFIG_OPROFILE) += arch/powerpc/oprofile/ @@ -293,9 +298,6 @@ $(BOOT_TARGETS2): vmlinux bootwrapper_install: $(Q)$(MAKE) $(build)=$(boot) $(patsubst %,$(boot)/%,$@) -%.dtb: scripts - $(Q)$(MAKE) $(build)=$(boot) $(patsubst %,$(boot)/%,$@) - # Used to create 'merged defconfigs' # To use it $(call) it with the first argument as the base defconfig # and the second argument as a space separated list of .config files to merge, @@ -400,40 +402,20 @@ archclean: archprepare: checkbin -# Use the file '.tmp_gas_check' for binutils tests, as gas won't output -# to stdout and these checks are run even on install targets. -TOUT := .tmp_gas_check +ifdef CONFIG_STACKPROTECTOR +prepare: stack_protector_prepare + +stack_protector_prepare: prepare0 +ifdef CONFIG_PPC64 + $(eval KBUILD_CFLAGS += -mstack-protector-guard-offset=$(shell awk '{if ($$2 == "PACA_CANARY") print $$3;}' include/generated/asm-offsets.h)) +else + $(eval KBUILD_CFLAGS += -mstack-protector-guard-offset=$(shell awk '{if ($$2 == "TASK_CANARY") print $$3;}' include/generated/asm-offsets.h)) +endif +endif -# Check gcc and binutils versions: -# - gcc-3.4 and binutils-2.14 are a fatal combination -# - Require gcc 4.0 or above on 64-bit -# - gcc-4.2.0 has issues compiling modules on 64-bit +# Check toolchain versions: +# - gcc-4.6 is the minimum kernel-wide version so nothing required. checkbin: - @if test "$(cc-name)" != "clang" \ - && test "$(cc-version)" = "0304" ; then \ - if ! /bin/echo mftb 5 | $(AS) -v -mppc -many -o $(TOUT) >/dev/null 2>&1 ; then \ - echo -n '*** ${VERSION}.${PATCHLEVEL} kernels no longer build '; \ - echo 'correctly with gcc-3.4 and your version of binutils.'; \ - echo '*** Please upgrade your binutils or downgrade your gcc'; \ - false; \ - fi ; \ - fi - @if test "$(cc-name)" != "clang" \ - && test "$(cc-version)" -lt "0400" \ - && test "x${CONFIG_PPC64}" = "xy" ; then \ - echo -n "Sorry, GCC v4.0 or above is required to build " ; \ - echo "the 64-bit powerpc kernel." ; \ - false ; \ - fi - @if test "$(cc-name)" != "clang" \ - && test "$(cc-fullversion)" = "040200" \ - && test "x${CONFIG_MODULES}${CONFIG_PPC64}" = "xyy" ; then \ - echo -n '*** GCC-4.2.0 cannot compile the 64-bit powerpc ' ; \ - echo 'kernel with modules enabled.' ; \ - echo -n '*** Please use a different GCC version or ' ; \ - echo 'disable kernel modules' ; \ - false ; \ - fi @if test "x${CONFIG_CPU_LITTLE_ENDIAN}" = "xy" \ && $(LD) --version | head -1 | grep ' 2\.24$$' >/dev/null ; then \ echo -n '*** binutils 2.24 miscompiles weak symbols ' ; \ @@ -441,7 +423,3 @@ checkbin: echo -n '*** Please use a different binutils version.' ; \ false ; \ fi - - -CLEAN_FILES += $(TOUT) - diff --git a/arch/powerpc/boot/.gitignore b/arch/powerpc/boot/.gitignore index f92d0530ceb1..32034a0cc554 100644 --- a/arch/powerpc/boot/.gitignore +++ b/arch/powerpc/boot/.gitignore @@ -44,4 +44,5 @@ fdt_sw.c fdt_wip.c libfdt.h libfdt_internal.h +autoconf.h diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile index 0fb96c26136f..39354365f54a 100644 --- a/arch/powerpc/boot/Makefile +++ b/arch/powerpc/boot/Makefile @@ -32,8 +32,8 @@ else endif BOOTCFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \ - -fno-strict-aliasing -Os -msoft-float -pipe \ - -fomit-frame-pointer -fno-builtin -fPIC -nostdinc \ + -fno-strict-aliasing -O2 -msoft-float -mno-altivec -mno-vsx \ + -pipe -fomit-frame-pointer -fno-builtin -fPIC -nostdinc \ -D$(compress-y) ifdef CONFIG_PPC64_BOOT_WRAPPER @@ -197,9 +197,14 @@ $(obj)/empty.c: $(obj)/zImage.coff.lds $(obj)/zImage.ps3.lds : $(obj)/%: $(srctree)/$(src)/%.S $(Q)cp $< $@ +$(obj)/serial.c: $(obj)/autoconf.h + +$(obj)/autoconf.h: $(obj)/%: $(objtree)/include/generated/% + $(Q)cp $< $@ + clean-files := $(zlib-) $(zlibheader-) $(zliblinuxheader-) \ $(zlib-decomp-) $(libfdt) $(libfdtheader) \ - empty.c zImage.coff.lds zImage.ps3.lds zImage.lds + autoconf.h empty.c zImage.coff.lds zImage.ps3.lds zImage.lds quiet_cmd_bootcc = BOOTCC $@ cmd_bootcc = $(BOOTCC) -Wp,-MD,$(depfile) $(BOOTCFLAGS) -c -o $@ $< @@ -304,9 +309,9 @@ image-$(CONFIG_PPC_ADDER875) += cuImage.adder875-uboot \ dtbImage.adder875-redboot # Board ports in arch/powerpc/platform/52xx/Kconfig -image-$(CONFIG_PPC_LITE5200) += cuImage.lite5200 lite5200.dtb -image-$(CONFIG_PPC_LITE5200) += cuImage.lite5200b lite5200b.dtb -image-$(CONFIG_PPC_MEDIA5200) += cuImage.media5200 media5200.dtb +image-$(CONFIG_PPC_LITE5200) += cuImage.lite5200 +image-$(CONFIG_PPC_LITE5200) += cuImage.lite5200b +image-$(CONFIG_PPC_MEDIA5200) += cuImage.media5200 # Board ports in arch/powerpc/platform/82xx/Kconfig image-$(CONFIG_MPC8272_ADS) += cuImage.mpc8272ads @@ -381,11 +386,11 @@ $(addprefix $(obj)/, $(sort $(filter zImage.%, $(image-y)))): vmlinux $(wrapperb $(call if_changed,wrap,$(subst $(obj)/zImage.,,$@)) # dtbImage% - a dtbImage is a zImage with an embedded device tree blob -$(obj)/dtbImage.initrd.%: vmlinux $(wrapperbits) $(obj)/%.dtb FORCE - $(call if_changed,wrap,$*,,$(obj)/$*.dtb,$(obj)/ramdisk.image.gz) +$(obj)/dtbImage.initrd.%: vmlinux $(wrapperbits) $(obj)/dts/%.dtb FORCE + $(call if_changed,wrap,$*,,$(obj)/dts/$*.dtb,$(obj)/ramdisk.image.gz) -$(obj)/dtbImage.%: vmlinux $(wrapperbits) $(obj)/%.dtb FORCE - $(call if_changed,wrap,$*,,$(obj)/$*.dtb) +$(obj)/dtbImage.%: vmlinux $(wrapperbits) $(obj)/dts/%.dtb FORCE + $(call if_changed,wrap,$*,,$(obj)/dts/$*.dtb) # This cannot be in the root of $(src) as the zImage rule always adds a $(obj) # prefix @@ -395,36 +400,33 @@ $(obj)/vmlinux.strip: vmlinux $(obj)/uImage: vmlinux $(wrapperbits) FORCE $(call if_changed,wrap,uboot) -$(obj)/uImage.initrd.%: vmlinux $(obj)/%.dtb $(wrapperbits) FORCE - $(call if_changed,wrap,uboot-$*,,$(obj)/$*.dtb,$(obj)/ramdisk.image.gz) - -$(obj)/uImage.%: vmlinux $(obj)/%.dtb $(wrapperbits) FORCE - $(call if_changed,wrap,uboot-$*,,$(obj)/$*.dtb) +$(obj)/uImage.initrd.%: vmlinux $(obj)/dts/%.dtb $(wrapperbits) FORCE + $(call if_changed,wrap,uboot-$*,,$(obj)/dts/$*.dtb,$(obj)/ramdisk.image.gz) -$(obj)/cuImage.initrd.%: vmlinux $(obj)/%.dtb $(wrapperbits) FORCE - $(call if_changed,wrap,cuboot-$*,,$(obj)/$*.dtb,$(obj)/ramdisk.image.gz) +$(obj)/uImage.%: vmlinux $(obj)/dts/%.dtb $(wrapperbits) FORCE + $(call if_changed,wrap,uboot-$*,,$(obj)/dts/$*.dtb) -$(obj)/cuImage.%: vmlinux $(obj)/%.dtb $(wrapperbits) FORCE - $(call if_changed,wrap,cuboot-$*,,$(obj)/$*.dtb) +$(obj)/cuImage.initrd.%: vmlinux $(obj)/dts/%.dtb $(wrapperbits) FORCE + $(call if_changed,wrap,cuboot-$*,,$(obj)/dts/$*.dtb,$(obj)/ramdisk.image.gz) -$(obj)/simpleImage.initrd.%: vmlinux $(obj)/%.dtb $(wrapperbits) FORCE - $(call if_changed,wrap,simpleboot-$*,,$(obj)/$*.dtb,$(obj)/ramdisk.image.gz) +$(obj)/cuImage.%: vmlinux $(obj)/dts/%.dtb $(wrapperbits) FORCE + $(call if_changed,wrap,cuboot-$*,,$(obj)/dts/$*.dtb) -$(obj)/simpleImage.%: vmlinux $(obj)/%.dtb $(wrapperbits) FORCE - $(call if_changed,wrap,simpleboot-$*,,$(obj)/$*.dtb) +$(obj)/simpleImage.initrd.%: vmlinux $(obj)/dts/%.dtb $(wrapperbits) FORCE + $(call if_changed,wrap,simpleboot-$*,,$(obj)/dts/$*.dtb,$(obj)/ramdisk.image.gz) -$(obj)/treeImage.initrd.%: vmlinux $(obj)/%.dtb $(wrapperbits) FORCE - $(call if_changed,wrap,treeboot-$*,,$(obj)/$*.dtb,$(obj)/ramdisk.image.gz) +$(obj)/simpleImage.%: vmlinux $(obj)/dts/%.dtb $(wrapperbits) FORCE + $(call if_changed,wrap,simpleboot-$*,,$(obj)/dts/$*.dtb) -$(obj)/treeImage.%: vmlinux $(obj)/%.dtb $(wrapperbits) FORCE - $(call if_changed,wrap,treeboot-$*,,$(obj)/$*.dtb) +$(obj)/treeImage.initrd.%: vmlinux $(obj)/dts/%.dtb $(wrapperbits) FORCE + $(call if_changed,wrap,treeboot-$*,,$(obj)/dts/$*.dtb,$(obj)/ramdisk.image.gz) -# Rule to build device tree blobs -$(obj)/%.dtb: $(src)/dts/%.dts FORCE - $(call if_changed_dep,dtc) +$(obj)/treeImage.%: vmlinux $(obj)/dts/%.dtb $(wrapperbits) FORCE + $(call if_changed,wrap,treeboot-$*,,$(obj)/dts/$*.dtb) -$(obj)/%.dtb: $(src)/dts/fsl/%.dts FORCE - $(call if_changed_dep,dtc) +# Needed for the above targets to work with dts/fsl/ files +$(obj)/dts/%.dtb: $(obj)/dts/fsl/%.dtb + @cp $< $@ # If there isn't a platform selected then just strip the vmlinux. ifeq (,$(image-y)) diff --git a/arch/powerpc/boot/crt0.S b/arch/powerpc/boot/crt0.S index dcf2f15e6797..32dfe6d083f3 100644 --- a/arch/powerpc/boot/crt0.S +++ b/arch/powerpc/boot/crt0.S @@ -47,8 +47,10 @@ p_end: .long _end p_pstack: .long _platform_stack_top #endif - .weak _zimage_start .globl _zimage_start + /* Clang appears to require the .weak directive to be after the symbol + * is defined. See https://bugs.llvm.org/show_bug.cgi?id=38921 */ + .weak _zimage_start _zimage_start: .globl _zimage_start_lib _zimage_start_lib: diff --git a/arch/powerpc/boot/dts/Makefile b/arch/powerpc/boot/dts/Makefile new file mode 100644 index 000000000000..fb335d05aae8 --- /dev/null +++ b/arch/powerpc/boot/dts/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0 + +subdir-y += fsl + +dtstree := $(srctree)/$(src) +dtb-$(CONFIG_OF_ALL_DTBS) := $(patsubst $(dtstree)/%.dts,%.dtb, $(wildcard $(dtstree)/*.dts)) diff --git a/arch/powerpc/boot/dts/fsl/Makefile b/arch/powerpc/boot/dts/fsl/Makefile new file mode 100644 index 000000000000..3bae982641e9 --- /dev/null +++ b/arch/powerpc/boot/dts/fsl/Makefile @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0 + +dtstree := $(srctree)/$(src) +dtb-$(CONFIG_OF_ALL_DTBS) := $(patsubst $(dtstree)/%.dts,%.dtb, $(wildcard $(dtstree)/*.dts)) diff --git a/arch/powerpc/boot/libfdt_env.h b/arch/powerpc/boot/libfdt_env.h index 2a0c8b1bf147..2abc8e83b95e 100644 --- a/arch/powerpc/boot/libfdt_env.h +++ b/arch/powerpc/boot/libfdt_env.h @@ -5,6 +5,8 @@ #include <types.h> #include <string.h> +#define INT_MAX ((int)(~0U>>1)) + #include "of.h" typedef unsigned long uintptr_t; diff --git a/arch/powerpc/boot/opal.c b/arch/powerpc/boot/opal.c index 0272570d02de..dfb199ef5b94 100644 --- a/arch/powerpc/boot/opal.c +++ b/arch/powerpc/boot/opal.c @@ -13,8 +13,6 @@ #include <libfdt.h> #include "../include/asm/opal-api.h" -#ifdef CONFIG_PPC64_BOOT_WRAPPER - /* Global OPAL struct used by opal-call.S */ struct opal { u64 base; @@ -101,9 +99,3 @@ int opal_console_init(void *devp, struct serial_console_data *scdp) return 0; } -#else -int opal_console_init(void *devp, struct serial_console_data *scdp) -{ - return -1; -} -#endif /* __powerpc64__ */ diff --git a/arch/powerpc/boot/serial.c b/arch/powerpc/boot/serial.c index 48e3743faedf..f045f8494bf9 100644 --- a/arch/powerpc/boot/serial.c +++ b/arch/powerpc/boot/serial.c @@ -18,6 +18,7 @@ #include "stdio.h" #include "io.h" #include "ops.h" +#include "autoconf.h" static int serial_open(void) { diff --git a/arch/powerpc/configs/g5_defconfig b/arch/powerpc/configs/g5_defconfig index 67c39f4acede..f686cc1eac0b 100644 --- a/arch/powerpc/configs/g5_defconfig +++ b/arch/powerpc/configs/g5_defconfig @@ -262,3 +262,4 @@ CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m # CONFIG_CRYPTO_HW is not set +CONFIG_PRINTK_TIME=y diff --git a/arch/powerpc/configs/maple_defconfig b/arch/powerpc/configs/maple_defconfig index 59e47ec85336..f71eddafb02f 100644 --- a/arch/powerpc/configs/maple_defconfig +++ b/arch/powerpc/configs/maple_defconfig @@ -112,3 +112,4 @@ CONFIG_PPC_EARLY_DEBUG=y CONFIG_CRYPTO_ECB=m CONFIG_CRYPTO_PCBC=m # CONFIG_CRYPTO_HW is not set +CONFIG_PRINTK_TIME=y diff --git a/arch/powerpc/configs/powernv_defconfig b/arch/powerpc/configs/powernv_defconfig index 6ab34e60495f..ef2ef98d3f28 100644 --- a/arch/powerpc/configs/powernv_defconfig +++ b/arch/powerpc/configs/powernv_defconfig @@ -44,6 +44,9 @@ CONFIG_PPC_MEMTRACE=y # CONFIG_PPC_PSERIES is not set # CONFIG_PPC_OF_BOOT_TRAMPOLINE is not set CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y CONFIG_CPU_IDLE=y CONFIG_HZ_100=y CONFIG_BINFMT_MISC=m @@ -350,3 +353,4 @@ CONFIG_VIRTUALIZATION=y CONFIG_KVM_BOOK3S_64=m CONFIG_KVM_BOOK3S_64_HV=m CONFIG_VHOST_NET=m +CONFIG_PRINTK_TIME=y diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig index 5033e630afea..f2515674a1e2 100644 --- a/arch/powerpc/configs/ppc64_defconfig +++ b/arch/powerpc/configs/ppc64_defconfig @@ -40,6 +40,9 @@ CONFIG_PS3_LPM=m CONFIG_PPC_IBM_CELL_BLADE=y CONFIG_RTAS_FLASH=m CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y CONFIG_CPU_FREQ_PMAC64=y CONFIG_HZ_100=y CONFIG_BINFMT_MISC=m @@ -365,3 +368,4 @@ CONFIG_VIRTUALIZATION=y CONFIG_KVM_BOOK3S_64=m CONFIG_KVM_BOOK3S_64_HV=m CONFIG_VHOST_NET=m +CONFIG_PRINTK_TIME=y diff --git a/arch/powerpc/configs/ps3_defconfig b/arch/powerpc/configs/ps3_defconfig index 187e2f7c12c8..cf8d55f67272 100644 --- a/arch/powerpc/configs/ps3_defconfig +++ b/arch/powerpc/configs/ps3_defconfig @@ -171,3 +171,4 @@ CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_MICHAEL_MIC=m CONFIG_CRYPTO_SALSA20=m CONFIG_CRYPTO_LZO=m +CONFIG_PRINTK_TIME=y diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig index 0dd5cf7b566d..5e09a40cbcbf 100644 --- a/arch/powerpc/configs/pseries_defconfig +++ b/arch/powerpc/configs/pseries_defconfig @@ -325,3 +325,4 @@ CONFIG_VIRTUALIZATION=y CONFIG_KVM_BOOK3S_64=m CONFIG_KVM_BOOK3S_64_HV=m CONFIG_VHOST_NET=m +CONFIG_PRINTK_TIME=y diff --git a/arch/powerpc/configs/skiroot_defconfig b/arch/powerpc/configs/skiroot_defconfig index 6bd5e7261335..cfdd08897a06 100644 --- a/arch/powerpc/configs/skiroot_defconfig +++ b/arch/powerpc/configs/skiroot_defconfig @@ -3,20 +3,17 @@ CONFIG_ALTIVEC=y CONFIG_VSX=y CONFIG_NR_CPUS=2048 CONFIG_CPU_LITTLE_ENDIAN=y +CONFIG_KERNEL_XZ=y # CONFIG_SWAP is not set CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y # CONFIG_CROSS_MEMORY_ATTACH is not set CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y -CONFIG_TASKSTATS=y -CONFIG_TASK_DELAY_ACCT=y -CONFIG_TASK_XACCT=y -CONFIG_TASK_IO_ACCOUNTING=y +# CONFIG_CPU_ISOLATION is not set CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=20 -CONFIG_RELAY=y CONFIG_BLK_DEV_INITRD=y # CONFIG_RD_GZIP is not set # CONFIG_RD_BZIP2 is not set @@ -24,8 +21,14 @@ CONFIG_BLK_DEV_INITRD=y # CONFIG_RD_LZO is not set # CONFIG_RD_LZ4 is not set CONFIG_CC_OPTIMIZE_FOR_SIZE=y +CONFIG_EXPERT=y +# CONFIG_SGETMASK_SYSCALL is not set +# CONFIG_SYSFS_SYSCALL is not set +# CONFIG_SHMEM is not set +# CONFIG_AIO is not set CONFIG_PERF_EVENTS=y # CONFIG_COMPAT_BRK is not set +CONFIG_SLAB_FREELIST_HARDENED=y CONFIG_JUMP_LABEL=y CONFIG_STRICT_KERNEL_RWX=y CONFIG_MODULES=y @@ -35,7 +38,9 @@ CONFIG_MODULE_SIG_FORCE=y CONFIG_MODULE_SIG_SHA512=y CONFIG_PARTITION_ADVANCED=y # CONFIG_IOSCHED_DEADLINE is not set +# CONFIG_PPC_VAS is not set # CONFIG_PPC_PSERIES is not set +# CONFIG_PPC_OF_BOOT_TRAMPOLINE is not set CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y CONFIG_CPU_IDLE=y CONFIG_HZ_100=y @@ -48,8 +53,9 @@ CONFIG_NUMA=y CONFIG_PPC_64K_PAGES=y CONFIG_SCHED_SMT=y CONFIG_CMDLINE_BOOL=y -CONFIG_CMDLINE="console=tty0 console=hvc0 powersave=off" +CONFIG_CMDLINE="console=tty0 console=hvc0 ipr.fast_reboot=1 quiet" # CONFIG_SECCOMP is not set +# CONFIG_PPC_MEM_KEYS is not set CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -60,7 +66,6 @@ CONFIG_SYN_COOKIES=y # CONFIG_INET_XFRM_MODE_TRANSPORT is not set # CONFIG_INET_XFRM_MODE_TUNNEL is not set # CONFIG_INET_XFRM_MODE_BEET is not set -# CONFIG_IPV6 is not set CONFIG_DNS_RESOLVER=y # CONFIG_WIRELESS is not set CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" @@ -73,8 +78,10 @@ CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=65536 CONFIG_VIRTIO_BLK=m CONFIG_BLK_DEV_NVME=m -CONFIG_EEPROM_AT24=y +CONFIG_NVME_MULTIPATH=y +CONFIG_EEPROM_AT24=m # CONFIG_CXL is not set +# CONFIG_OCXL is not set CONFIG_BLK_DEV_SD=m CONFIG_BLK_DEV_SR=m CONFIG_BLK_DEV_SR_VENDOR=y @@ -85,7 +92,6 @@ CONFIG_SCSI_FC_ATTRS=y CONFIG_SCSI_CXGB3_ISCSI=m CONFIG_SCSI_CXGB4_ISCSI=m CONFIG_SCSI_BNX2_ISCSI=m -CONFIG_BE2ISCSI=m CONFIG_SCSI_AACRAID=m CONFIG_MEGARAID_NEWGEN=y CONFIG_MEGARAID_MM=m @@ -102,7 +108,7 @@ CONFIG_SCSI_VIRTIO=m CONFIG_SCSI_DH=y CONFIG_SCSI_DH_ALUA=m CONFIG_ATA=y -CONFIG_SATA_AHCI=y +CONFIG_SATA_AHCI=m # CONFIG_ATA_SFF is not set CONFIG_MD=y CONFIG_BLK_DEV_MD=m @@ -119,25 +125,72 @@ CONFIG_DM_SNAPSHOT=m CONFIG_DM_MIRROR=m CONFIG_DM_ZERO=m CONFIG_DM_MULTIPATH=m +# CONFIG_NET_VENDOR_3COM is not set +# CONFIG_NET_VENDOR_ADAPTEC is not set +# CONFIG_NET_VENDOR_AGERE is not set +# CONFIG_NET_VENDOR_ALACRITECH is not set CONFIG_ACENIC=m CONFIG_ACENIC_OMIT_TIGON_I=y -CONFIG_TIGON3=y +# CONFIG_NET_VENDOR_AMAZON is not set +# CONFIG_NET_VENDOR_AMD is not set +# CONFIG_NET_VENDOR_AQUANTIA is not set +# CONFIG_NET_VENDOR_ARC is not set +# CONFIG_NET_VENDOR_ATHEROS is not set +CONFIG_TIGON3=m CONFIG_BNX2X=m -CONFIG_CHELSIO_T1=y +# CONFIG_NET_VENDOR_BROCADE is not set +# CONFIG_NET_CADENCE is not set +# CONFIG_NET_VENDOR_CAVIUM is not set +CONFIG_CHELSIO_T1=m +# CONFIG_NET_VENDOR_CISCO is not set +# CONFIG_NET_VENDOR_CORTINA is not set +# CONFIG_NET_VENDOR_DEC is not set +# CONFIG_NET_VENDOR_DLINK is not set CONFIG_BE2NET=m -CONFIG_S2IO=m -CONFIG_E100=m +# CONFIG_NET_VENDOR_EZCHIP is not set +# CONFIG_NET_VENDOR_HP is not set +# CONFIG_NET_VENDOR_HUAWEI is not set CONFIG_E1000=m -CONFIG_E1000E=m +CONFIG_IGB=m CONFIG_IXGB=m CONFIG_IXGBE=m +CONFIG_I40E=m +CONFIG_S2IO=m +# CONFIG_NET_VENDOR_MARVELL is not set CONFIG_MLX4_EN=m +# CONFIG_MLX4_CORE_GEN2 is not set CONFIG_MLX5_CORE=m -CONFIG_MLX5_CORE_EN=y +# CONFIG_NET_VENDOR_MICREL is not set CONFIG_MYRI10GE=m +# CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_NETRONOME is not set +# CONFIG_NET_VENDOR_NI is not set +# CONFIG_NET_VENDOR_NVIDIA is not set +# CONFIG_NET_VENDOR_OKI is not set +# CONFIG_NET_PACKET_ENGINE is not set CONFIG_QLGE=m CONFIG_NETXEN_NIC=m +# CONFIG_NET_VENDOR_QUALCOMM is not set +# CONFIG_NET_VENDOR_RDC is not set +# CONFIG_NET_VENDOR_REALTEK is not set +# CONFIG_NET_VENDOR_RENESAS is not set +# CONFIG_NET_VENDOR_ROCKER is not set +# CONFIG_NET_VENDOR_SAMSUNG is not set +# CONFIG_NET_VENDOR_SEEQ is not set CONFIG_SFC=m +# CONFIG_NET_VENDOR_SILAN is not set +# CONFIG_NET_VENDOR_SIS is not set +# CONFIG_NET_VENDOR_SMSC is not set +# CONFIG_NET_VENDOR_SOCIONEXT is not set +# CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SUN is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set +# CONFIG_NET_VENDOR_TEHUTI is not set +# CONFIG_NET_VENDOR_TI is not set +# CONFIG_NET_VENDOR_VIA is not set +# CONFIG_NET_VENDOR_WIZNET is not set +# CONFIG_NET_VENDOR_XILINX is not set +CONFIG_PHYLIB=y # CONFIG_USB_NET_DRIVERS is not set # CONFIG_WLAN is not set CONFIG_INPUT_EVDEV=y @@ -149,39 +202,51 @@ CONFIG_SERIAL_8250_CONSOLE=y CONFIG_IPMI_HANDLER=y CONFIG_IPMI_DEVICE_INTERFACE=y CONFIG_IPMI_POWERNV=y +CONFIG_IPMI_WATCHDOG=y CONFIG_HW_RANDOM=y +CONFIG_TCG_TPM=y CONFIG_TCG_TIS_I2C_NUVOTON=y +CONFIG_I2C=y # CONFIG_I2C_COMPAT is not set CONFIG_I2C_CHARDEV=y # CONFIG_I2C_HELPER_AUTO is not set -CONFIG_DRM=y -CONFIG_DRM_RADEON=y +CONFIG_I2C_ALGOBIT=y +CONFIG_I2C_OPAL=m +CONFIG_PPS=y +CONFIG_SENSORS_IBMPOWERNV=m +CONFIG_DRM=m CONFIG_DRM_AST=m +CONFIG_FB=y CONFIG_FIRMWARE_EDID=y -CONFIG_FB_MODE_HELPERS=y -CONFIG_FB_OF=y -CONFIG_FB_MATROX=y -CONFIG_FB_MATROX_MILLENIUM=y -CONFIG_FB_MATROX_MYSTIQUE=y -CONFIG_FB_MATROX_G=y -# CONFIG_LCD_CLASS_DEVICE is not set -# CONFIG_BACKLIGHT_GENERIC is not set # CONFIG_VGA_CONSOLE is not set +CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_LOGO=y # CONFIG_LOGO_LINUX_MONO is not set # CONFIG_LOGO_LINUX_VGA16 is not set +CONFIG_HID_GENERIC=m +CONFIG_HID_A4TECH=y +CONFIG_HID_BELKIN=y +CONFIG_HID_CHERRY=y +CONFIG_HID_CHICONY=y +CONFIG_HID_CYPRESS=y +CONFIG_HID_EZKEY=y +CONFIG_HID_ITE=y +CONFIG_HID_KENSINGTON=y +CONFIG_HID_LOGITECH=y +CONFIG_HID_MICROSOFT=y +CONFIG_HID_MONTEREY=y CONFIG_USB_HIDDEV=y -CONFIG_USB=y -CONFIG_USB_MON=y -CONFIG_USB_XHCI_HCD=y -CONFIG_USB_EHCI_HCD=y +CONFIG_USB=m +CONFIG_USB_XHCI_HCD=m +CONFIG_USB_EHCI_HCD=m # CONFIG_USB_EHCI_HCD_PPC_OF is not set -CONFIG_USB_OHCI_HCD=y -CONFIG_USB_STORAGE=y +CONFIG_USB_OHCI_HCD=m +CONFIG_USB_STORAGE=m CONFIG_RTC_CLASS=y +CONFIG_RTC_DRV_OPAL=m CONFIG_RTC_DRV_GENERIC=m CONFIG_VIRT_DRIVERS=y -CONFIG_VIRTIO_PCI=y +CONFIG_VIRTIO_PCI=m # CONFIG_IOMMU_SUPPORT is not set CONFIG_EXT4_FS=m CONFIG_EXT4_FS_POSIX_ACL=y @@ -195,10 +260,9 @@ CONFIG_UDF_FS=m CONFIG_MSDOS_FS=m CONFIG_VFAT_FS=m CONFIG_PROC_KCORE=y -CONFIG_TMPFS=y -CONFIG_TMPFS_POSIX_ACL=y # CONFIG_MISC_FILESYSTEMS is not set # CONFIG_NETWORK_FILESYSTEMS is not set +CONFIG_NLS=y CONFIG_NLS_DEFAULT="utf8" CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ASCII=y @@ -207,26 +271,24 @@ CONFIG_NLS_UTF8=y CONFIG_CRC16=y CONFIG_CRC_ITU_T=y CONFIG_LIBCRC32C=y +# CONFIG_XZ_DEC_X86 is not set +# CONFIG_XZ_DEC_IA64 is not set +# CONFIG_XZ_DEC_ARM is not set +# CONFIG_XZ_DEC_ARMTHUMB is not set +# CONFIG_XZ_DEC_SPARC is not set CONFIG_PRINTK_TIME=y CONFIG_MAGIC_SYSRQ=y -CONFIG_DEBUG_KERNEL=y CONFIG_DEBUG_STACKOVERFLOW=y CONFIG_SOFTLOCKUP_DETECTOR=y +CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y CONFIG_HARDLOCKUP_DETECTOR=y CONFIG_BOOTPARAM_HARDLOCKUP_PANIC=y -CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y CONFIG_WQ_WATCHDOG=y -CONFIG_SCHEDSTATS=y +# CONFIG_SCHED_DEBUG is not set # CONFIG_FTRACE is not set +# CONFIG_RUNTIME_TESTING_MENU is not set CONFIG_XMON=y CONFIG_XMON_DEFAULT=y -CONFIG_SECURITY=y -CONFIG_IMA=y -CONFIG_EVM=y +CONFIG_ENCRYPTED_KEYS=y # CONFIG_CRYPTO_ECHAINIV is not set -CONFIG_CRYPTO_ECB=y -CONFIG_CRYPTO_CMAC=y -CONFIG_CRYPTO_MD4=y -CONFIG_CRYPTO_ARC4=y -CONFIG_CRYPTO_DES=y # CONFIG_CRYPTO_HW is not set diff --git a/arch/powerpc/include/asm/accounting.h b/arch/powerpc/include/asm/accounting.h index 3abcf98ed2e0..c607c5d835cc 100644 --- a/arch/powerpc/include/asm/accounting.h +++ b/arch/powerpc/include/asm/accounting.h @@ -15,8 +15,10 @@ struct cpu_accounting_data { /* Accumulated cputime values to flush on ticks*/ unsigned long utime; unsigned long stime; +#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME unsigned long utime_scaled; unsigned long stime_scaled; +#endif unsigned long gtime; unsigned long hardirq_time; unsigned long softirq_time; @@ -25,8 +27,10 @@ struct cpu_accounting_data { /* Internal counters */ unsigned long starttime; /* TB value snapshot */ unsigned long starttime_user; /* TB value on exit to usermode */ +#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME unsigned long startspurr; /* SPURR value snapshot */ unsigned long utime_sspurr; /* ->user_time when ->startspurr set */ +#endif }; #endif diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h index 1f4691ce4126..ec691d489656 100644 --- a/arch/powerpc/include/asm/asm-prototypes.h +++ b/arch/powerpc/include/asm/asm-prototypes.h @@ -63,7 +63,6 @@ void program_check_exception(struct pt_regs *regs); void alignment_exception(struct pt_regs *regs); void slb_miss_bad_addr(struct pt_regs *regs); void StackOverflow(struct pt_regs *regs); -void nonrecoverable_exception(struct pt_regs *regs); void kernel_fp_unavailable_exception(struct pt_regs *regs); void altivec_unavailable_exception(struct pt_regs *regs); void vsx_unavailable_exception(struct pt_regs *regs); @@ -78,6 +77,8 @@ void kernel_bad_stack(struct pt_regs *regs); void system_reset_exception(struct pt_regs *regs); void machine_check_exception(struct pt_regs *regs); void emulation_assist_interrupt(struct pt_regs *regs); +long do_slb_fault(struct pt_regs *regs, unsigned long ea); +void do_bad_slb_fault(struct pt_regs *regs, unsigned long ea, long err); /* signals, syscalls and interrupts */ long sys_swapcontext(struct ucontext __user *old_ctx, @@ -150,4 +151,25 @@ extern s32 patch__memset_nocache, patch__memcpy_nocache; extern long flush_count_cache; +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM +void kvmppc_save_tm_hv(struct kvm_vcpu *vcpu, u64 msr, bool preserve_nv); +void kvmppc_restore_tm_hv(struct kvm_vcpu *vcpu, u64 msr, bool preserve_nv); +#else +static inline void kvmppc_save_tm_hv(struct kvm_vcpu *vcpu, u64 msr, + bool preserve_nv) { } +static inline void kvmppc_restore_tm_hv(struct kvm_vcpu *vcpu, u64 msr, + bool preserve_nv) { } +#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ + +void kvmhv_save_host_pmu(void); +void kvmhv_load_host_pmu(void); +void kvmhv_save_guest_pmu(struct kvm_vcpu *vcpu, bool pmu_in_use); +void kvmhv_load_guest_pmu(struct kvm_vcpu *vcpu); + +int __kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu); + +long kvmppc_h_set_dabr(struct kvm_vcpu *vcpu, unsigned long dabr); +long kvmppc_h_set_xdabr(struct kvm_vcpu *vcpu, unsigned long dabr, + unsigned long dabrx); + #endif /* _ASM_POWERPC_ASM_PROTOTYPES_H */ diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h index 751cf931bb3f..c21d33704633 100644 --- a/arch/powerpc/include/asm/book3s/32/pgtable.h +++ b/arch/powerpc/include/asm/book3s/32/pgtable.h @@ -8,7 +8,97 @@ #include <asm/book3s/32/hash.h> /* And here we include common definitions */ -#include <asm/pte-common.h> + +#define _PAGE_KERNEL_RO 0 +#define _PAGE_KERNEL_ROX 0 +#define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW) +#define _PAGE_KERNEL_RWX (_PAGE_DIRTY | _PAGE_RW) + +#define _PAGE_HPTEFLAGS _PAGE_HASHPTE + +#ifndef __ASSEMBLY__ + +static inline bool pte_user(pte_t pte) +{ + return pte_val(pte) & _PAGE_USER; +} +#endif /* __ASSEMBLY__ */ + +/* + * Location of the PFN in the PTE. Most 32-bit platforms use the same + * as _PAGE_SHIFT here (ie, naturally aligned). + * Platform who don't just pre-define the value so we don't override it here. + */ +#define PTE_RPN_SHIFT (PAGE_SHIFT) + +/* + * The mask covered by the RPN must be a ULL on 32-bit platforms with + * 64-bit PTEs. + */ +#ifdef CONFIG_PTE_64BIT +#define PTE_RPN_MASK (~((1ULL << PTE_RPN_SHIFT) - 1)) +#else +#define PTE_RPN_MASK (~((1UL << PTE_RPN_SHIFT) - 1)) +#endif + +/* + * _PAGE_CHG_MASK masks of bits that are to be preserved across + * pgprot changes. + */ +#define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HASHPTE | _PAGE_DIRTY | \ + _PAGE_ACCESSED | _PAGE_SPECIAL) + +/* + * We define 2 sets of base prot bits, one for basic pages (ie, + * cacheable kernel and user pages) and one for non cacheable + * pages. We always set _PAGE_COHERENT when SMP is enabled or + * the processor might need it for DMA coherency. + */ +#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED) +#define _PAGE_BASE (_PAGE_BASE_NC | _PAGE_COHERENT) + +/* + * Permission masks used to generate the __P and __S table. + * + * Note:__pgprot is defined in arch/powerpc/include/asm/page.h + * + * Write permissions imply read permissions for now. + */ +#define PAGE_NONE __pgprot(_PAGE_BASE) +#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW) +#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW) +#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER) +#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER) +#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER) +#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER) + +/* Permission masks used for kernel mappings */ +#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW) +#define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE) +#define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \ + _PAGE_NO_CACHE | _PAGE_GUARDED) +#define PAGE_KERNEL_X __pgprot(_PAGE_BASE | _PAGE_KERNEL_RWX) +#define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO) +#define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX) + +/* + * Protection used for kernel text. We want the debuggers to be able to + * set breakpoints anywhere, so don't write protect the kernel text + * on platforms where such control is possible. + */ +#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\ + defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE) +#define PAGE_KERNEL_TEXT PAGE_KERNEL_X +#else +#define PAGE_KERNEL_TEXT PAGE_KERNEL_ROX +#endif + +/* Make modules code happy. We don't set RO yet */ +#define PAGE_KERNEL_EXEC PAGE_KERNEL_X + +/* Advertise special mapping type for AGP */ +#define PAGE_AGP (PAGE_KERNEL_NC) +#define HAVE_PAGE_AGP #define PTE_INDEX_SIZE PTE_SHIFT #define PMD_INDEX_SIZE 0 @@ -219,14 +309,8 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), _PAGE_RO); + pte_update(ptep, _PAGE_RW, 0); } -static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, - unsigned long addr, pte_t *ptep) -{ - ptep_set_wrprotect(mm, addr, ptep); -} - static inline void __ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep, pte_t entry, @@ -234,10 +318,9 @@ static inline void __ptep_set_access_flags(struct vm_area_struct *vma, int psize) { unsigned long set = pte_val(entry) & - (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC); - unsigned long clr = ~pte_val(entry) & _PAGE_RO; + (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW); - pte_update(ptep, clr, set); + pte_update(ptep, 0, set); flush_tlb_page(vma, address); } @@ -292,7 +375,7 @@ static inline void __ptep_set_access_flags(struct vm_area_struct *vma, #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 }) -int map_kernel_page(unsigned long va, phys_addr_t pa, int flags); +int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot); /* Generic accessors to PTE bits */ static inline int pte_write(pte_t pte) { return !!(pte_val(pte) & _PAGE_RW);} @@ -301,13 +384,28 @@ static inline int pte_dirty(pte_t pte) { return !!(pte_val(pte) & _PAGE_DIRTY); static inline int pte_young(pte_t pte) { return !!(pte_val(pte) & _PAGE_ACCESSED); } static inline int pte_special(pte_t pte) { return !!(pte_val(pte) & _PAGE_SPECIAL); } static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; } -static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); } +static inline bool pte_exec(pte_t pte) { return true; } static inline int pte_present(pte_t pte) { return pte_val(pte) & _PAGE_PRESENT; } +static inline bool pte_hw_valid(pte_t pte) +{ + return pte_val(pte) & _PAGE_PRESENT; +} + +static inline bool pte_hashpte(pte_t pte) +{ + return !!(pte_val(pte) & _PAGE_HASHPTE); +} + +static inline bool pte_ci(pte_t pte) +{ + return !!(pte_val(pte) & _PAGE_NO_CACHE); +} + /* * We only find page table entry in the last level * Hence no need for other accessors @@ -315,17 +413,14 @@ static inline int pte_present(pte_t pte) #define pte_access_permitted pte_access_permitted static inline bool pte_access_permitted(pte_t pte, bool write) { - unsigned long pteval = pte_val(pte); /* * A read-only access is controlled by _PAGE_USER bit. * We have _PAGE_READ set for WRITE and EXECUTE */ - unsigned long need_pte_bits = _PAGE_PRESENT | _PAGE_USER; - - if (write) - need_pte_bits |= _PAGE_WRITE; + if (!pte_present(pte) || !pte_user(pte) || !pte_read(pte)) + return false; - if ((pteval & need_pte_bits) != need_pte_bits) + if (write && !pte_write(pte)) return false; return true; @@ -354,6 +449,11 @@ static inline pte_t pte_wrprotect(pte_t pte) return __pte(pte_val(pte) & ~_PAGE_RW); } +static inline pte_t pte_exprotect(pte_t pte) +{ + return pte; +} + static inline pte_t pte_mkclean(pte_t pte) { return __pte(pte_val(pte) & ~_PAGE_DIRTY); @@ -364,6 +464,16 @@ static inline pte_t pte_mkold(pte_t pte) return __pte(pte_val(pte) & ~_PAGE_ACCESSED); } +static inline pte_t pte_mkexec(pte_t pte) +{ + return pte; +} + +static inline pte_t pte_mkpte(pte_t pte) +{ + return pte; +} + static inline pte_t pte_mkwrite(pte_t pte) { return __pte(pte_val(pte) | _PAGE_RW); @@ -389,6 +499,16 @@ static inline pte_t pte_mkhuge(pte_t pte) return pte; } +static inline pte_t pte_mkprivileged(pte_t pte) +{ + return __pte(pte_val(pte) & ~_PAGE_USER); +} + +static inline pte_t pte_mkuser(pte_t pte) +{ + return __pte(pte_val(pte) | _PAGE_USER); +} + static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)); diff --git a/arch/powerpc/include/asm/book3s/64/hash-4k.h b/arch/powerpc/include/asm/book3s/64/hash-4k.h index 9a3798660cef..15bc16b1dc9c 100644 --- a/arch/powerpc/include/asm/book3s/64/hash-4k.h +++ b/arch/powerpc/include/asm/book3s/64/hash-4k.h @@ -66,7 +66,7 @@ static inline int hash__hugepd_ok(hugepd_t hpd) * if it is not a pte and have hugepd shift mask * set, then it is a hugepd directory pointer */ - if (!(hpdval & _PAGE_PTE) && + if (!(hpdval & _PAGE_PTE) && (hpdval & _PAGE_PRESENT) && ((hpdval & HUGEPD_SHIFT_MASK) != 0)) return true; return false; diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h index d52a51b2ce7b..247aff9cc6ba 100644 --- a/arch/powerpc/include/asm/book3s/64/hash.h +++ b/arch/powerpc/include/asm/book3s/64/hash.h @@ -18,6 +18,11 @@ #include <asm/book3s/64/hash-4k.h> #endif +/* Bits to set in a PMD/PUD/PGD entry valid bit*/ +#define HASH_PMD_VAL_BITS (0x8000000000000000UL) +#define HASH_PUD_VAL_BITS (0x8000000000000000UL) +#define HASH_PGD_VAL_BITS (0x8000000000000000UL) + /* * Size of EA range mapped by our pagetables. */ @@ -196,8 +201,7 @@ static inline void hpte_do_hugepage_flush(struct mm_struct *mm, #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ -extern int hash__map_kernel_page(unsigned long ea, unsigned long pa, - unsigned long flags); +int hash__map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot); extern int __meminit hash__vmemmap_create_mapping(unsigned long start, unsigned long page_size, unsigned long phys); diff --git a/arch/powerpc/include/asm/book3s/64/hugetlb.h b/arch/powerpc/include/asm/book3s/64/hugetlb.h index 50888388a359..5b0177733994 100644 --- a/arch/powerpc/include/asm/book3s/64/hugetlb.h +++ b/arch/powerpc/include/asm/book3s/64/hugetlb.h @@ -39,4 +39,7 @@ static inline bool gigantic_page_supported(void) } #endif +/* hugepd entry valid bit */ +#define HUGEPD_VAL_BITS (0x8000000000000000UL) + #endif diff --git a/arch/powerpc/include/asm/book3s/64/mmu-hash.h b/arch/powerpc/include/asm/book3s/64/mmu-hash.h index b3520b549cba..12e522807f9f 100644 --- a/arch/powerpc/include/asm/book3s/64/mmu-hash.h +++ b/arch/powerpc/include/asm/book3s/64/mmu-hash.h @@ -30,7 +30,7 @@ * SLB */ -#define SLB_NUM_BOLTED 3 +#define SLB_NUM_BOLTED 2 #define SLB_CACHE_ENTRIES 8 #define SLB_MIN_SIZE 32 @@ -203,6 +203,18 @@ static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize) BUG(); } +static inline unsigned int ap_to_shift(unsigned long ap) +{ + int psize; + + for (psize = 0; psize < MMU_PAGE_COUNT; psize++) { + if (mmu_psize_defs[psize].ap == ap) + return mmu_psize_defs[psize].shift; + } + + return -1; +} + static inline unsigned long get_sllp_encoding(int psize) { unsigned long sllp; @@ -487,6 +499,8 @@ int htab_remove_mapping(unsigned long vstart, unsigned long vend, extern void pseries_add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages); extern void demote_segment_4k(struct mm_struct *mm, unsigned long addr); +extern void hash__setup_new_exec(void); + #ifdef CONFIG_PPC_PSERIES void hpte_init_pseries(void); #else @@ -495,11 +509,18 @@ static inline void hpte_init_pseries(void) { } extern void hpte_init_native(void); +struct slb_entry { + u64 esid; + u64 vsid; +}; + extern void slb_initialize(void); -extern void slb_flush_and_rebolt(void); +void slb_flush_and_restore_bolted(void); void slb_flush_all_realmode(void); void __slb_restore_bolted_realmode(void); void slb_restore_bolted_realmode(void); +void slb_save_contents(struct slb_entry *slb_ptr); +void slb_dump_contents(struct slb_entry *slb_ptr); extern void slb_vmalloc_update(void); extern void slb_set_size(u16 size); @@ -512,13 +533,9 @@ extern void slb_set_size(u16 size); * from mmu context id and effective segment id of the address. * * For user processes max context id is limited to MAX_USER_CONTEXT. - - * For kernel space, we use context ids 1-4 to map addresses as below: - * NOTE: each context only support 64TB now. - * 0x00001 - [ 0xc000000000000000 - 0xc0003fffffffffff ] - * 0x00002 - [ 0xd000000000000000 - 0xd0003fffffffffff ] - * 0x00003 - [ 0xe000000000000000 - 0xe0003fffffffffff ] - * 0x00004 - [ 0xf000000000000000 - 0xf0003fffffffffff ] + * more details in get_user_context + * + * For kernel space get_kernel_context * * The proto-VSIDs are then scrambled into real VSIDs with the * multiplicative hash: @@ -559,6 +576,21 @@ extern void slb_set_size(u16 size); #define ESID_BITS_1T_MASK ((1 << ESID_BITS_1T) - 1) /* + * Now certain config support MAX_PHYSMEM more than 512TB. Hence we will need + * to use more than one context for linear mapping the kernel. + * For vmalloc and memmap, we use just one context with 512TB. With 64 byte + * struct page size, we need ony 32 TB in memmap for 2PB (51 bits (MAX_PHYSMEM_BITS)). + */ +#if (MAX_PHYSMEM_BITS > MAX_EA_BITS_PER_CONTEXT) +#define MAX_KERNEL_CTX_CNT (1UL << (MAX_PHYSMEM_BITS - MAX_EA_BITS_PER_CONTEXT)) +#else +#define MAX_KERNEL_CTX_CNT 1 +#endif + +#define MAX_VMALLOC_CTX_CNT 1 +#define MAX_MEMMAP_CTX_CNT 1 + +/* * 256MB segment * The proto-VSID space has 2^(CONTEX_BITS + ESID_BITS) - 1 segments * available for user + kernel mapping. VSID 0 is reserved as invalid, contexts @@ -568,12 +600,13 @@ extern void slb_set_size(u16 size); * We also need to avoid the last segment of the last context, because that * would give a protovsid of 0x1fffffffff. That will result in a VSID 0 * because of the modulo operation in vsid scramble. + * + * We add one extra context to MIN_USER_CONTEXT so that we can map kernel + * context easily. The +1 is to map the unused 0xe region mapping. */ #define MAX_USER_CONTEXT ((ASM_CONST(1) << CONTEXT_BITS) - 2) -#define MIN_USER_CONTEXT (5) - -/* Would be nice to use KERNEL_REGION_ID here */ -#define KERNEL_REGION_CONTEXT_OFFSET (0xc - 1) +#define MIN_USER_CONTEXT (MAX_KERNEL_CTX_CNT + MAX_VMALLOC_CTX_CNT + \ + MAX_MEMMAP_CTX_CNT + 2) /* * For platforms that support on 65bit VA we limit the context bits @@ -734,6 +767,39 @@ static inline unsigned long get_vsid(unsigned long context, unsigned long ea, } /* + * For kernel space, we use context ids as below + * below. Range is 512TB per context. + * + * 0x00001 - [ 0xc000000000000000 - 0xc001ffffffffffff] + * 0x00002 - [ 0xc002000000000000 - 0xc003ffffffffffff] + * 0x00003 - [ 0xc004000000000000 - 0xc005ffffffffffff] + * 0x00004 - [ 0xc006000000000000 - 0xc007ffffffffffff] + + * 0x00005 - [ 0xd000000000000000 - 0xd001ffffffffffff ] + * 0x00006 - Not used - Can map 0xe000000000000000 range. + * 0x00007 - [ 0xf000000000000000 - 0xf001ffffffffffff ] + * + * So we can compute the context from the region (top nibble) by + * subtracting 11, or 0xc - 1. + */ +static inline unsigned long get_kernel_context(unsigned long ea) +{ + unsigned long region_id = REGION_ID(ea); + unsigned long ctx; + /* + * For linear mapping we do support multiple context + */ + if (region_id == KERNEL_REGION_ID) { + /* + * We already verified ea to be not beyond the addr limit. + */ + ctx = 1 + ((ea & ~REGION_MASK) >> MAX_EA_BITS_PER_CONTEXT); + } else + ctx = (region_id - 0xc) + MAX_KERNEL_CTX_CNT; + return ctx; +} + +/* * This is only valid for addresses >= PAGE_OFFSET */ static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize) @@ -743,20 +809,7 @@ static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize) if (!is_kernel_addr(ea)) return 0; - /* - * For kernel space, we use context ids 1-4 to map the address space as - * below: - * - * 0x00001 - [ 0xc000000000000000 - 0xc0003fffffffffff ] - * 0x00002 - [ 0xd000000000000000 - 0xd0003fffffffffff ] - * 0x00003 - [ 0xe000000000000000 - 0xe0003fffffffffff ] - * 0x00004 - [ 0xf000000000000000 - 0xf0003fffffffffff ] - * - * So we can compute the context from the region (top nibble) by - * subtracting 11, or 0xc - 1. - */ - context = (ea >> 60) - KERNEL_REGION_CONTEXT_OFFSET; - + context = get_kernel_context(ea); return get_vsid(context, ea, ssize); } diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h index 9c8c669a6b6a..6328857f259f 100644 --- a/arch/powerpc/include/asm/book3s/64/mmu.h +++ b/arch/powerpc/include/asm/book3s/64/mmu.h @@ -208,7 +208,7 @@ extern void radix_init_pseries(void); static inline void radix_init_pseries(void) { }; #endif -static inline int get_ea_context(mm_context_t *ctx, unsigned long ea) +static inline int get_user_context(mm_context_t *ctx, unsigned long ea) { int index = ea >> MAX_EA_BITS_PER_CONTEXT; @@ -223,7 +223,7 @@ static inline int get_ea_context(mm_context_t *ctx, unsigned long ea) static inline unsigned long get_user_vsid(mm_context_t *ctx, unsigned long ea, int ssize) { - unsigned long context = get_ea_context(ctx, ea); + unsigned long context = get_user_context(ctx, ea); return get_vsid(context, ea, ssize); } diff --git a/arch/powerpc/include/asm/book3s/64/pgtable-64k.h b/arch/powerpc/include/asm/book3s/64/pgtable-64k.h index d7ee249d6890..e3d4dd4ae2fa 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable-64k.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable-64k.h @@ -10,6 +10,9 @@ * * Defined in such a way that we can optimize away code block at build time * if CONFIG_HUGETLB_PAGE=n. + * + * returns true for pmd migration entries, THP, devmap, hugetlb + * But compile time dependent on CONFIG_HUGETLB_PAGE */ static inline int pmd_huge(pmd_t pmd) { diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index 2a2486526d1f..c4a726c10af5 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -14,10 +14,6 @@ */ #define _PAGE_BIT_SWAP_TYPE 0 -#define _PAGE_NA 0 -#define _PAGE_RO 0 -#define _PAGE_USER 0 - #define _PAGE_EXEC 0x00001 /* execute permission */ #define _PAGE_WRITE 0x00002 /* write access allowed */ #define _PAGE_READ 0x00004 /* read access allowed */ @@ -123,10 +119,6 @@ #define _PAGE_KERNEL_RWX (_PAGE_PRIVILEGED | _PAGE_DIRTY | \ _PAGE_RW | _PAGE_EXEC) /* - * No page size encoding in the linux PTE - */ -#define _PAGE_PSIZE 0 -/* * _PAGE_CHG_MASK masks of bits that are to be preserved across * pgprot changes */ @@ -137,19 +129,12 @@ #define H_PTE_PKEY (H_PTE_PKEY_BIT0 | H_PTE_PKEY_BIT1 | H_PTE_PKEY_BIT2 | \ H_PTE_PKEY_BIT3 | H_PTE_PKEY_BIT4) /* - * Mask of bits returned by pte_pgprot() - */ -#define PAGE_PROT_BITS (_PAGE_SAO | _PAGE_NON_IDEMPOTENT | _PAGE_TOLERANT | \ - H_PAGE_4K_PFN | _PAGE_PRIVILEGED | _PAGE_ACCESSED | \ - _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_EXEC | \ - _PAGE_SOFT_DIRTY | H_PTE_PKEY) -/* * We define 2 sets of base prot bits, one for basic pages (ie, * cacheable kernel and user pages) and one for non cacheable * pages. We always set _PAGE_COHERENT when SMP is enabled or * the processor might need it for DMA coherency. */ -#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_PSIZE) +#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED) #define _PAGE_BASE (_PAGE_BASE_NC) /* Permission masks used to generate the __P and __S table, @@ -159,8 +144,6 @@ * Write permissions imply read permissions for now (we could make write-only * pages on BookE but we don't bother for now). Execute permission control is * possible on platforms that define _PAGE_EXEC - * - * Note due to the way vm flags are laid out, the bits are XWR */ #define PAGE_NONE __pgprot(_PAGE_BASE | _PAGE_PRIVILEGED) #define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_RW) @@ -170,24 +153,6 @@ #define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_READ) #define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC) -#define __P000 PAGE_NONE -#define __P001 PAGE_READONLY -#define __P010 PAGE_COPY -#define __P011 PAGE_COPY -#define __P100 PAGE_READONLY_X -#define __P101 PAGE_READONLY_X -#define __P110 PAGE_COPY_X -#define __P111 PAGE_COPY_X - -#define __S000 PAGE_NONE -#define __S001 PAGE_READONLY -#define __S010 PAGE_SHARED -#define __S011 PAGE_SHARED -#define __S100 PAGE_READONLY_X -#define __S101 PAGE_READONLY_X -#define __S110 PAGE_SHARED_X -#define __S111 PAGE_SHARED_X - /* Permission masks used for kernel mappings */ #define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW) #define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \ @@ -461,6 +426,7 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_update(mm, addr, ptep, 0, _PAGE_PRIVILEGED, 0); } +#define __HAVE_ARCH_HUGE_PTEP_SET_WRPROTECT static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { @@ -519,7 +485,11 @@ static inline int pte_special(pte_t pte) return !!(pte_raw(pte) & cpu_to_be64(_PAGE_SPECIAL)); } -static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); } +static inline bool pte_exec(pte_t pte) +{ + return !!(pte_raw(pte) & cpu_to_be64(_PAGE_EXEC)); +} + #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY static inline bool pte_soft_dirty(pte_t pte) @@ -529,12 +499,12 @@ static inline bool pte_soft_dirty(pte_t pte) static inline pte_t pte_mksoft_dirty(pte_t pte) { - return __pte(pte_val(pte) | _PAGE_SOFT_DIRTY); + return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_SOFT_DIRTY)); } static inline pte_t pte_clear_soft_dirty(pte_t pte) { - return __pte(pte_val(pte) & ~_PAGE_SOFT_DIRTY); + return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_SOFT_DIRTY)); } #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */ @@ -555,7 +525,7 @@ static inline pte_t pte_mk_savedwrite(pte_t pte) */ VM_BUG_ON((pte_raw(pte) & cpu_to_be64(_PAGE_PRESENT | _PAGE_RWX | _PAGE_PRIVILEGED)) != cpu_to_be64(_PAGE_PRESENT | _PAGE_PRIVILEGED)); - return __pte(pte_val(pte) & ~_PAGE_PRIVILEGED); + return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_PRIVILEGED)); } #define pte_clear_savedwrite pte_clear_savedwrite @@ -565,14 +535,14 @@ static inline pte_t pte_clear_savedwrite(pte_t pte) * Used by KSM subsystem to make a protnone pte readonly. */ VM_BUG_ON(!pte_protnone(pte)); - return __pte(pte_val(pte) | _PAGE_PRIVILEGED); + return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_PRIVILEGED)); } #else #define pte_clear_savedwrite pte_clear_savedwrite static inline pte_t pte_clear_savedwrite(pte_t pte) { VM_WARN_ON(1); - return __pte(pte_val(pte) & ~_PAGE_WRITE); + return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_WRITE)); } #endif /* CONFIG_NUMA_BALANCING */ @@ -587,6 +557,11 @@ static inline int pte_present(pte_t pte) return !!(pte_raw(pte) & cpu_to_be64(_PAGE_PRESENT | _PAGE_INVALID)); } +static inline bool pte_hw_valid(pte_t pte) +{ + return !!(pte_raw(pte) & cpu_to_be64(_PAGE_PRESENT)); +} + #ifdef CONFIG_PPC_MEM_KEYS extern bool arch_pte_access_permitted(u64 pte, bool write, bool execute); #else @@ -596,25 +571,22 @@ static inline bool arch_pte_access_permitted(u64 pte, bool write, bool execute) } #endif /* CONFIG_PPC_MEM_KEYS */ +static inline bool pte_user(pte_t pte) +{ + return !(pte_raw(pte) & cpu_to_be64(_PAGE_PRIVILEGED)); +} + #define pte_access_permitted pte_access_permitted static inline bool pte_access_permitted(pte_t pte, bool write) { - unsigned long pteval = pte_val(pte); - /* Also check for pte_user */ - unsigned long clear_pte_bits = _PAGE_PRIVILEGED; /* * _PAGE_READ is needed for any access and will be * cleared for PROT_NONE */ - unsigned long need_pte_bits = _PAGE_PRESENT | _PAGE_READ; - - if (write) - need_pte_bits |= _PAGE_WRITE; - - if ((pteval & need_pte_bits) != need_pte_bits) + if (!pte_present(pte) || !pte_user(pte) || !pte_read(pte)) return false; - if ((pteval & clear_pte_bits) == clear_pte_bits) + if (write && !pte_write(pte)) return false; return arch_pte_access_permitted(pte_val(pte), write, 0); @@ -643,17 +615,32 @@ static inline pte_t pte_wrprotect(pte_t pte) { if (unlikely(pte_savedwrite(pte))) return pte_clear_savedwrite(pte); - return __pte(pte_val(pte) & ~_PAGE_WRITE); + return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_WRITE)); +} + +static inline pte_t pte_exprotect(pte_t pte) +{ + return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_EXEC)); } static inline pte_t pte_mkclean(pte_t pte) { - return __pte(pte_val(pte) & ~_PAGE_DIRTY); + return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_DIRTY)); } static inline pte_t pte_mkold(pte_t pte) { - return __pte(pte_val(pte) & ~_PAGE_ACCESSED); + return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_ACCESSED)); +} + +static inline pte_t pte_mkexec(pte_t pte) +{ + return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_EXEC)); +} + +static inline pte_t pte_mkpte(pte_t pte) +{ + return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_PTE)); } static inline pte_t pte_mkwrite(pte_t pte) @@ -661,22 +648,22 @@ static inline pte_t pte_mkwrite(pte_t pte) /* * write implies read, hence set both */ - return __pte(pte_val(pte) | _PAGE_RW); + return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_RW)); } static inline pte_t pte_mkdirty(pte_t pte) { - return __pte(pte_val(pte) | _PAGE_DIRTY | _PAGE_SOFT_DIRTY); + return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_DIRTY | _PAGE_SOFT_DIRTY)); } static inline pte_t pte_mkyoung(pte_t pte) { - return __pte(pte_val(pte) | _PAGE_ACCESSED); + return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_ACCESSED)); } static inline pte_t pte_mkspecial(pte_t pte) { - return __pte(pte_val(pte) | _PAGE_SPECIAL); + return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_SPECIAL)); } static inline pte_t pte_mkhuge(pte_t pte) @@ -686,7 +673,17 @@ static inline pte_t pte_mkhuge(pte_t pte) static inline pte_t pte_mkdevmap(pte_t pte) { - return __pte(pte_val(pte) | _PAGE_SPECIAL|_PAGE_DEVMAP); + return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_SPECIAL | _PAGE_DEVMAP)); +} + +static inline pte_t pte_mkprivileged(pte_t pte) +{ + return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_PRIVILEGED)); +} + +static inline pte_t pte_mkuser(pte_t pte) +{ + return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_PRIVILEGED)); } /* @@ -705,12 +702,8 @@ static inline int pte_devmap(pte_t pte) static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { /* FIXME!! check whether this need to be a conditional */ - return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)); -} - -static inline bool pte_user(pte_t pte) -{ - return !(pte_raw(pte) & cpu_to_be64(_PAGE_PRIVILEGED)); + return __pte_raw((pte_raw(pte) & cpu_to_be64(_PAGE_CHG_MASK)) | + cpu_to_be64(pgprot_val(newprot))); } /* Encode and de-code a swap entry */ @@ -741,6 +734,8 @@ static inline bool pte_user(pte_t pte) */ #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) & ~_PAGE_PTE }) #define __swp_entry_to_pte(x) __pte((x).val | _PAGE_PTE) +#define __pmd_to_swp_entry(pmd) (__pte_to_swp_entry(pmd_pte(pmd))) +#define __swp_entry_to_pmd(x) (pte_pmd(__swp_entry_to_pte(x))) #ifdef CONFIG_MEM_SOFT_DIRTY #define _PAGE_SWP_SOFT_DIRTY (1UL << (SWP_TYPE_BITS + _PAGE_BIT_SWAP_TYPE)) @@ -751,7 +746,7 @@ static inline bool pte_user(pte_t pte) #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY static inline pte_t pte_swp_mksoft_dirty(pte_t pte) { - return __pte(pte_val(pte) | _PAGE_SWP_SOFT_DIRTY); + return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_SWP_SOFT_DIRTY)); } static inline bool pte_swp_soft_dirty(pte_t pte) @@ -761,7 +756,7 @@ static inline bool pte_swp_soft_dirty(pte_t pte) static inline pte_t pte_swp_clear_soft_dirty(pte_t pte) { - return __pte(pte_val(pte) & ~_PAGE_SWP_SOFT_DIRTY); + return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_SWP_SOFT_DIRTY)); } #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */ @@ -850,10 +845,10 @@ static inline pgprot_t pgprot_writecombine(pgprot_t prot) */ static inline bool pte_ci(pte_t pte) { - unsigned long pte_v = pte_val(pte); + __be64 pte_v = pte_raw(pte); - if (((pte_v & _PAGE_CACHE_CTL) == _PAGE_TOLERANT) || - ((pte_v & _PAGE_CACHE_CTL) == _PAGE_NON_IDEMPOTENT)) + if (((pte_v & cpu_to_be64(_PAGE_CACHE_CTL)) == cpu_to_be64(_PAGE_TOLERANT)) || + ((pte_v & cpu_to_be64(_PAGE_CACHE_CTL)) == cpu_to_be64(_PAGE_NON_IDEMPOTENT))) return true; return false; } @@ -875,8 +870,16 @@ static inline int pmd_none(pmd_t pmd) static inline int pmd_present(pmd_t pmd) { + /* + * A pmd is considerent present if _PAGE_PRESENT is set. + * We also need to consider the pmd present which is marked + * invalid during a split. Hence we look for _PAGE_INVALID + * if we find _PAGE_PRESENT cleared. + */ + if (pmd_raw(pmd) & cpu_to_be64(_PAGE_PRESENT | _PAGE_INVALID)) + return true; - return !pmd_none(pmd); + return false; } static inline int pmd_bad(pmd_t pmd) @@ -903,7 +906,7 @@ static inline int pud_none(pud_t pud) static inline int pud_present(pud_t pud) { - return !pud_none(pud); + return (pud_raw(pud) & cpu_to_be64(_PAGE_PRESENT)); } extern struct page *pud_page(pud_t pud); @@ -950,7 +953,7 @@ static inline int pgd_none(pgd_t pgd) static inline int pgd_present(pgd_t pgd) { - return !pgd_none(pgd); + return (pgd_raw(pgd) & cpu_to_be64(_PAGE_PRESENT)); } static inline pte_t pgd_pte(pgd_t pgd) @@ -1020,17 +1023,16 @@ extern struct page *pgd_page(pgd_t pgd); #define pgd_ERROR(e) \ pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) -static inline int map_kernel_page(unsigned long ea, unsigned long pa, - unsigned long flags) +static inline int map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot) { if (radix_enabled()) { #if defined(CONFIG_PPC_RADIX_MMU) && defined(DEBUG_VM) unsigned long page_size = 1 << mmu_psize_defs[mmu_io_psize].shift; WARN((page_size != PAGE_SIZE), "I/O page size != PAGE_SIZE"); #endif - return radix__map_kernel_page(ea, pa, __pgprot(flags), PAGE_SIZE); + return radix__map_kernel_page(ea, pa, prot, PAGE_SIZE); } - return hash__map_kernel_page(ea, pa, flags); + return hash__map_kernel_page(ea, pa, prot); } static inline int __meminit vmemmap_create_mapping(unsigned long start, @@ -1082,6 +1084,12 @@ static inline pte_t *pmdp_ptep(pmd_t *pmd) #define pmd_soft_dirty(pmd) pte_soft_dirty(pmd_pte(pmd)) #define pmd_mksoft_dirty(pmd) pte_pmd(pte_mksoft_dirty(pmd_pte(pmd))) #define pmd_clear_soft_dirty(pmd) pte_pmd(pte_clear_soft_dirty(pmd_pte(pmd))) + +#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION +#define pmd_swp_mksoft_dirty(pmd) pte_pmd(pte_swp_mksoft_dirty(pmd_pte(pmd))) +#define pmd_swp_soft_dirty(pmd) pte_swp_soft_dirty(pmd_pte(pmd)) +#define pmd_swp_clear_soft_dirty(pmd) pte_pmd(pte_swp_clear_soft_dirty(pmd_pte(pmd))) +#endif #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */ #ifdef CONFIG_NUMA_BALANCING @@ -1127,6 +1135,10 @@ pmd_hugepage_update(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, return hash__pmd_hugepage_update(mm, addr, pmdp, clr, set); } +/* + * returns true for pmd migration entries, THP, devmap, hugetlb + * But compile time dependent on THP config + */ static inline int pmd_large(pmd_t pmd) { return !!(pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE)); @@ -1161,8 +1173,22 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr, pmd_hugepage_update(mm, addr, pmdp, 0, _PAGE_PRIVILEGED); } +/* + * Only returns true for a THP. False for pmd migration entry. + * We also need to return true when we come across a pte that + * in between a thp split. While splitting THP, we mark the pmd + * invalid (pmdp_invalidate()) before we set it with pte page + * address. A pmd_trans_huge() check against a pmd entry during that time + * should return true. + * We should not call this on a hugetlb entry. We should check for HugeTLB + * entry using vma->vm_flags + * The page table walk rule is explained in Documentation/vm/transhuge.rst + */ static inline int pmd_trans_huge(pmd_t pmd) { + if (!pmd_present(pmd)) + return false; + if (radix_enabled()) return radix__pmd_trans_huge(pmd); return hash__pmd_trans_huge(pmd); diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h index 1154a6dc6d26..671316f9e95d 100644 --- a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h +++ b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h @@ -53,6 +53,7 @@ extern void radix__flush_tlb_lpid_page(unsigned int lpid, unsigned long addr, unsigned long page_size); extern void radix__flush_pwc_lpid(unsigned int lpid); +extern void radix__flush_tlb_lpid(unsigned int lpid); extern void radix__local_flush_tlb_lpid(unsigned int lpid); extern void radix__local_flush_tlb_lpid_guest(unsigned int lpid); diff --git a/arch/powerpc/include/asm/bug.h b/arch/powerpc/include/asm/bug.h index fd06dbe7d7d3..fed7e6241349 100644 --- a/arch/powerpc/include/asm/bug.h +++ b/arch/powerpc/include/asm/bug.h @@ -133,7 +133,7 @@ struct pt_regs; extern int do_page_fault(struct pt_regs *, unsigned long, unsigned long); extern void bad_page_fault(struct pt_regs *, unsigned long, int); extern void _exception(int, struct pt_regs *, int, unsigned long); -extern void _exception_pkey(int, struct pt_regs *, int, unsigned long, int); +extern void _exception_pkey(struct pt_regs *, unsigned long, int); extern void die(const char *, struct pt_regs *, long); extern bool die_will_crash(void); extern void panic_flush_kmsg_start(void); diff --git a/arch/powerpc/include/asm/compat.h b/arch/powerpc/include/asm/compat.h index 85c8af2bb272..74d0db511099 100644 --- a/arch/powerpc/include/asm/compat.h +++ b/arch/powerpc/include/asm/compat.h @@ -8,6 +8,8 @@ #include <linux/types.h> #include <linux/sched.h> +#include <asm-generic/compat.h> + #define COMPAT_USER_HZ 100 #ifdef __BIG_ENDIAN__ #define COMPAT_UTS_MACHINE "ppc\0\0" @@ -15,34 +17,18 @@ #define COMPAT_UTS_MACHINE "ppcle\0\0" #endif -typedef u32 compat_size_t; -typedef s32 compat_ssize_t; -typedef s32 compat_clock_t; -typedef s32 compat_pid_t; typedef u32 __compat_uid_t; typedef u32 __compat_gid_t; typedef u32 __compat_uid32_t; typedef u32 __compat_gid32_t; typedef u32 compat_mode_t; -typedef u32 compat_ino_t; typedef u32 compat_dev_t; -typedef s32 compat_off_t; -typedef s64 compat_loff_t; typedef s16 compat_nlink_t; typedef u16 compat_ipc_pid_t; -typedef s32 compat_daddr_t; typedef u32 compat_caddr_t; typedef __kernel_fsid_t compat_fsid_t; -typedef s32 compat_key_t; -typedef s32 compat_timer_t; - -typedef s32 compat_int_t; -typedef s32 compat_long_t; typedef s64 compat_s64; -typedef u32 compat_uint_t; -typedef u32 compat_ulong_t; typedef u64 compat_u64; -typedef u32 compat_uptr_t; struct compat_stat { compat_dev_t st_dev; @@ -55,11 +41,11 @@ struct compat_stat { compat_off_t st_size; compat_off_t st_blksize; compat_off_t st_blocks; - compat_time_t st_atime; + old_time32_t st_atime; u32 st_atime_nsec; - compat_time_t st_mtime; + old_time32_t st_mtime; u32 st_mtime_nsec; - compat_time_t st_ctime; + old_time32_t st_ctime; u32 st_ctime_nsec; u32 __unused4[2]; }; diff --git a/arch/powerpc/include/asm/cputhreads.h b/arch/powerpc/include/asm/cputhreads.h index d71a90924f3b..deb99fd6e060 100644 --- a/arch/powerpc/include/asm/cputhreads.h +++ b/arch/powerpc/include/asm/cputhreads.h @@ -23,11 +23,13 @@ extern int threads_per_core; extern int threads_per_subcore; extern int threads_shift; +extern bool has_big_cores; extern cpumask_t threads_core_mask; #else #define threads_per_core 1 #define threads_per_subcore 1 #define threads_shift 0 +#define has_big_cores 0 #define threads_core_mask (*get_cpu_mask(0)) #endif diff --git a/arch/powerpc/include/asm/cputime.h b/arch/powerpc/include/asm/cputime.h index 133672744b2e..ae73dc8da2d4 100644 --- a/arch/powerpc/include/asm/cputime.h +++ b/arch/powerpc/include/asm/cputime.h @@ -61,7 +61,6 @@ static inline void arch_vtime_task_switch(struct task_struct *prev) struct cpu_accounting_data *acct0 = get_accounting(prev); acct->starttime = acct0->starttime; - acct->startspurr = acct0->startspurr; } #endif diff --git a/arch/powerpc/include/asm/drmem.h b/arch/powerpc/include/asm/drmem.h index ce242b9ea8c6..7c1d8e74b25d 100644 --- a/arch/powerpc/include/asm/drmem.h +++ b/arch/powerpc/include/asm/drmem.h @@ -99,4 +99,9 @@ void __init walk_drmem_lmbs_early(unsigned long node, void (*func)(struct drmem_lmb *, const __be32 **)); #endif +static inline void invalidate_lmb_associativity_index(struct drmem_lmb *lmb) +{ + lmb->aa_index = 0xffffffff; +} + #endif /* _ASM_POWERPC_LMB_H */ diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h index 219637ea69a1..8b596d096ebe 100644 --- a/arch/powerpc/include/asm/eeh.h +++ b/arch/powerpc/include/asm/eeh.h @@ -43,7 +43,6 @@ struct pci_dn; #define EEH_VALID_PE_ZERO 0x10 /* PE#0 is valid */ #define EEH_ENABLE_IO_FOR_LOG 0x20 /* Enable IO for log */ #define EEH_EARLY_DUMP_LOG 0x40 /* Dump log immediately */ -#define EEH_POSTPONED_PROBE 0x80 /* Powernv may postpone device probe */ /* * Delay for PE reset, all in ms @@ -99,13 +98,13 @@ struct eeh_pe { atomic_t pass_dev_cnt; /* Count of passed through devs */ struct eeh_pe *parent; /* Parent PE */ void *data; /* PE auxillary data */ - struct list_head child_list; /* Link PE to the child list */ - struct list_head edevs; /* Link list of EEH devices */ - struct list_head child; /* Child PEs */ + struct list_head child_list; /* List of PEs below this PE */ + struct list_head child; /* Memb. child_list/eeh_phb_pe */ + struct list_head edevs; /* List of eeh_dev in this PE */ }; #define eeh_pe_for_each_dev(pe, edev, tmp) \ - list_for_each_entry_safe(edev, tmp, &pe->edevs, list) + list_for_each_entry_safe(edev, tmp, &pe->edevs, entry) #define eeh_for_each_pe(root, pe) \ for (pe = root; pe; pe = eeh_pe_next(pe, root)) @@ -142,13 +141,12 @@ struct eeh_dev { int aer_cap; /* Saved AER capability */ int af_cap; /* Saved AF capability */ struct eeh_pe *pe; /* Associated PE */ - struct list_head list; /* Form link list in the PE */ - struct list_head rmv_list; /* Record the removed edevs */ + struct list_head entry; /* Membership in eeh_pe.edevs */ + struct list_head rmv_entry; /* Membership in rmv_list */ struct pci_dn *pdn; /* Associated PCI device node */ struct pci_dev *pdev; /* Associated PCI device */ bool in_error; /* Error flag for edev */ struct pci_dev *physfn; /* Associated SRIOV PF */ - struct pci_bus *bus; /* PCI bus for partial hotplug */ }; static inline struct pci_dn *eeh_dev_to_pdn(struct eeh_dev *edev) @@ -207,9 +205,8 @@ struct eeh_ops { void* (*probe)(struct pci_dn *pdn, void *data); int (*set_option)(struct eeh_pe *pe, int option); int (*get_pe_addr)(struct eeh_pe *pe); - int (*get_state)(struct eeh_pe *pe, int *state); + int (*get_state)(struct eeh_pe *pe, int *delay); int (*reset)(struct eeh_pe *pe, int option); - int (*wait_state)(struct eeh_pe *pe, int max_wait); int (*get_log)(struct eeh_pe *pe, int severity, char *drv_log, unsigned long len); int (*configure_bridge)(struct eeh_pe *pe); int (*err_inject)(struct eeh_pe *pe, int type, int func, @@ -243,11 +240,7 @@ static inline bool eeh_has_flag(int flag) static inline bool eeh_enabled(void) { - if (eeh_has_flag(EEH_FORCE_DISABLED) || - !eeh_has_flag(EEH_ENABLED)) - return false; - - return true; + return eeh_has_flag(EEH_ENABLED) && !eeh_has_flag(EEH_FORCE_DISABLED); } static inline void eeh_serialize_lock(unsigned long *flags) @@ -270,6 +263,7 @@ typedef void *(*eeh_edev_traverse_func)(struct eeh_dev *edev, void *flag); typedef void *(*eeh_pe_traverse_func)(struct eeh_pe *pe, void *flag); void eeh_set_pe_aux_size(int size); int eeh_phb_pe_create(struct pci_controller *phb); +int eeh_wait_state(struct eeh_pe *pe, int max_wait); struct eeh_pe *eeh_phb_pe_get(struct pci_controller *phb); struct eeh_pe *eeh_pe_next(struct eeh_pe *pe, struct eeh_pe *root); struct eeh_pe *eeh_pe_get(struct pci_controller *phb, diff --git a/arch/powerpc/include/asm/error-injection.h b/arch/powerpc/include/asm/error-injection.h new file mode 100644 index 000000000000..62fd24739852 --- /dev/null +++ b/arch/powerpc/include/asm/error-injection.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ + +#ifndef _ASM_ERROR_INJECTION_H +#define _ASM_ERROR_INJECTION_H + +#include <linux/compiler.h> +#include <linux/linkage.h> +#include <asm/ptrace.h> +#include <asm-generic/error-injection.h> + +void override_function_with_return(struct pt_regs *regs); + +#endif /* _ASM_ERROR_INJECTION_H */ diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h index a86feddddad0..3b4767ed3ec5 100644 --- a/arch/powerpc/include/asm/exception-64s.h +++ b/arch/powerpc/include/asm/exception-64s.h @@ -61,14 +61,6 @@ #define MAX_MCE_DEPTH 4 /* - * EX_LR is only used in EXSLB and where it does not overlap with EX_DAR - * EX_CCR similarly with DSISR, but being 4 byte registers there is a hole - * in the save area so it's not necessary to overlap them. Could be used - * for future savings though if another 4 byte register was to be saved. - */ -#define EX_LR EX_DAR - -/* * EX_R3 is only used by the bad_stack handler. bad_stack reloads and * saves DAR from SPRN_DAR, and EX_DAR is not used. So EX_R3 can overlap * with EX_DAR. @@ -236,11 +228,10 @@ * PPR save/restore macros used in exceptions_64s.S * Used for P7 or later processors */ -#define SAVE_PPR(area, ra, rb) \ +#define SAVE_PPR(area, ra) \ BEGIN_FTR_SECTION_NESTED(940) \ - ld ra,PACACURRENT(r13); \ - ld rb,area+EX_PPR(r13); /* Read PPR from paca */ \ - std rb,TASKTHREADPPR(ra); \ + ld ra,area+EX_PPR(r13); /* Read PPR from paca */ \ + std ra,_PPR(r1); \ END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,940) #define RESTORE_PPR_PACA(area, ra) \ @@ -508,7 +499,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943) 3: EXCEPTION_PROLOG_COMMON_1(); \ beq 4f; /* if from kernel mode */ \ ACCOUNT_CPU_USER_ENTRY(r13, r9, r10); \ - SAVE_PPR(area, r9, r10); \ + SAVE_PPR(area, r9); \ 4: EXCEPTION_PROLOG_COMMON_2(area) \ EXCEPTION_PROLOG_COMMON_3(n) \ ACCOUNT_STOLEN_TIME diff --git a/arch/powerpc/include/asm/firmware.h b/arch/powerpc/include/asm/firmware.h index 7a051bd21f87..00bc42d95679 100644 --- a/arch/powerpc/include/asm/firmware.h +++ b/arch/powerpc/include/asm/firmware.h @@ -52,6 +52,8 @@ #define FW_FEATURE_PRRN ASM_CONST(0x0000000200000000) #define FW_FEATURE_DRMEM_V2 ASM_CONST(0x0000000400000000) #define FW_FEATURE_DRC_INFO ASM_CONST(0x0000000800000000) +#define FW_FEATURE_BLOCK_REMOVE ASM_CONST(0x0000001000000000) +#define FW_FEATURE_PAPR_SCM ASM_CONST(0x0000002000000000) #ifndef __ASSEMBLY__ @@ -69,7 +71,8 @@ enum { FW_FEATURE_SET_MODE | FW_FEATURE_BEST_ENERGY | FW_FEATURE_TYPE1_AFFINITY | FW_FEATURE_PRRN | FW_FEATURE_HPT_RESIZE | FW_FEATURE_DRMEM_V2 | - FW_FEATURE_DRC_INFO, + FW_FEATURE_DRC_INFO | FW_FEATURE_BLOCK_REMOVE | + FW_FEATURE_PAPR_SCM, FW_FEATURE_PSERIES_ALWAYS = 0, FW_FEATURE_POWERNV_POSSIBLE = FW_FEATURE_OPAL, FW_FEATURE_POWERNV_ALWAYS = 0, diff --git a/arch/powerpc/include/asm/fixmap.h b/arch/powerpc/include/asm/fixmap.h index 41cc15c14eee..b9fbed84ddca 100644 --- a/arch/powerpc/include/asm/fixmap.h +++ b/arch/powerpc/include/asm/fixmap.h @@ -72,7 +72,7 @@ enum fixed_addresses { static inline void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t flags) { - map_kernel_page(fix_to_virt(idx), phys, pgprot_val(flags)); + map_kernel_page(fix_to_virt(idx), phys, flags); } #endif /* !__ASSEMBLY__ */ diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h index 2d00cc530083..383da1ab9e23 100644 --- a/arch/powerpc/include/asm/hugetlb.h +++ b/arch/powerpc/include/asm/hugetlb.h @@ -4,7 +4,6 @@ #ifdef CONFIG_HUGETLB_PAGE #include <asm/page.h> -#include <asm-generic/hugetlb.h> extern struct kmem_cache *hugepte_cache; @@ -110,31 +109,12 @@ static inline void flush_hugetlb_page(struct vm_area_struct *vma, void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr); #endif +#define __HAVE_ARCH_HUGETLB_FREE_PGD_RANGE void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr, unsigned long end, unsigned long floor, unsigned long ceiling); -/* - * If the arch doesn't supply something else, assume that hugepage - * size aligned regions are ok without further preparation. - */ -static inline int prepare_hugepage_range(struct file *file, - unsigned long addr, unsigned long len) -{ - struct hstate *h = hstate_file(file); - if (len & ~huge_page_mask(h)) - return -EINVAL; - if (addr & ~huge_page_mask(h)) - return -EINVAL; - return 0; -} - -static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, - pte_t *ptep, pte_t pte) -{ - set_pte_at(mm, addr, ptep, pte); -} - +#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { @@ -145,6 +125,7 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, #endif } +#define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { @@ -153,29 +134,17 @@ static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, flush_hugetlb_page(vma, addr); } -static inline int huge_pte_none(pte_t pte) -{ - return pte_none(pte); -} - -static inline pte_t huge_pte_wrprotect(pte_t pte) -{ - return pte_wrprotect(pte); -} - +#define __HAVE_ARCH_HUGE_PTEP_SET_ACCESS_FLAGS extern int huge_ptep_set_access_flags(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, pte_t pte, int dirty); -static inline pte_t huge_ptep_get(pte_t *ptep) -{ - return *ptep; -} - static inline void arch_clear_hugepage_flags(struct page *page) { } +#include <asm-generic/hugetlb.h> + #else /* ! CONFIG_HUGETLB_PAGE */ static inline void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr) diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h index a0b17f9f1ea4..33a4fc891947 100644 --- a/arch/powerpc/include/asm/hvcall.h +++ b/arch/powerpc/include/asm/hvcall.h @@ -278,6 +278,7 @@ #define H_COP 0x304 #define H_GET_MPP_X 0x314 #define H_SET_MODE 0x31C +#define H_BLOCK_REMOVE 0x328 #define H_CLEAR_HPT 0x358 #define H_REQUEST_VMC 0x360 #define H_RESIZE_HPT_PREPARE 0x36C @@ -295,7 +296,15 @@ #define H_INT_ESB 0x3C8 #define H_INT_SYNC 0x3CC #define H_INT_RESET 0x3D0 -#define MAX_HCALL_OPCODE H_INT_RESET +#define H_SCM_READ_METADATA 0x3E4 +#define H_SCM_WRITE_METADATA 0x3E8 +#define H_SCM_BIND_MEM 0x3EC +#define H_SCM_UNBIND_MEM 0x3F0 +#define H_SCM_QUERY_BLOCK_MEM_BINDING 0x3F4 +#define H_SCM_QUERY_LOGICAL_MEM_BINDING 0x3F8 +#define H_SCM_MEM_QUERY 0x3FC +#define H_SCM_BLOCK_CLEAR 0x400 +#define MAX_HCALL_OPCODE H_SCM_BLOCK_CLEAR /* H_VIOCTL functions */ #define H_GET_VIOA_DUMP_SIZE 0x01 @@ -322,6 +331,11 @@ #define H_GET_24X7_DATA 0xF07C #define H_GET_PERF_COUNTER_INFO 0xF080 +/* Platform-specific hcalls used for nested HV KVM */ +#define H_SET_PARTITION_TABLE 0xF800 +#define H_ENTER_NESTED 0xF804 +#define H_TLB_INVALIDATE 0xF808 + /* Values for 2nd argument to H_SET_MODE */ #define H_SET_MODE_RESOURCE_SET_CIABR 1 #define H_SET_MODE_RESOURCE_SET_DAWR 2 @@ -461,6 +475,42 @@ struct h_cpu_char_result { u64 behaviour; }; +/* Register state for entering a nested guest with H_ENTER_NESTED */ +struct hv_guest_state { + u64 version; /* version of this structure layout */ + u32 lpid; + u32 vcpu_token; + /* These registers are hypervisor privileged (at least for writing) */ + u64 lpcr; + u64 pcr; + u64 amor; + u64 dpdes; + u64 hfscr; + s64 tb_offset; + u64 dawr0; + u64 dawrx0; + u64 ciabr; + u64 hdec_expiry; + u64 purr; + u64 spurr; + u64 ic; + u64 vtb; + u64 hdar; + u64 hdsisr; + u64 heir; + u64 asdr; + /* These are OS privileged but need to be set late in guest entry */ + u64 srr0; + u64 srr1; + u64 sprg[4]; + u64 pidr; + u64 cfar; + u64 ppr; +}; + +/* Latest version of hv_guest_state structure */ +#define HV_GUEST_STATE_VERSION 1 + #endif /* __ASSEMBLY__ */ #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_HVCALL_H */ diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h index e0331e754568..3ef40b703c4a 100644 --- a/arch/powerpc/include/asm/io.h +++ b/arch/powerpc/include/asm/io.h @@ -3,6 +3,9 @@ #ifdef __KERNEL__ #define ARCH_HAS_IOREMAP_WC +#ifdef CONFIG_PPC32 +#define ARCH_HAS_IOREMAP_WT +#endif /* * This program is free software; you can redistribute it and/or @@ -108,25 +111,6 @@ extern bool isa_io_special; #define IO_SET_SYNC_FLAG() #endif -/* gcc 4.0 and older doesn't have 'Z' constraint */ -#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ == 0) -#define DEF_MMIO_IN_X(name, size, insn) \ -static inline u##size name(const volatile u##size __iomem *addr) \ -{ \ - u##size ret; \ - __asm__ __volatile__("sync;"#insn" %0,0,%1;twi 0,%0,0;isync" \ - : "=r" (ret) : "r" (addr), "m" (*addr) : "memory"); \ - return ret; \ -} - -#define DEF_MMIO_OUT_X(name, size, insn) \ -static inline void name(volatile u##size __iomem *addr, u##size val) \ -{ \ - __asm__ __volatile__("sync;"#insn" %1,0,%2" \ - : "=m" (*addr) : "r" (val), "r" (addr) : "memory"); \ - IO_SET_SYNC_FLAG(); \ -} -#else /* newer gcc */ #define DEF_MMIO_IN_X(name, size, insn) \ static inline u##size name(const volatile u##size __iomem *addr) \ { \ @@ -143,7 +127,6 @@ static inline void name(volatile u##size __iomem *addr, u##size val) \ : "=Z" (*addr) : "r" (val) : "memory"); \ IO_SET_SYNC_FLAG(); \ } -#endif #define DEF_MMIO_IN_D(name, size, insn) \ static inline u##size name(const volatile u##size __iomem *addr) \ @@ -746,6 +729,10 @@ static inline void iosync(void) * * * ioremap_wc enables write combining * + * * ioremap_wt enables write through + * + * * ioremap_coherent maps coherent cached memory + * * * iounmap undoes such a mapping and can be hooked * * * __ioremap_at (and the pending __iounmap_at) are low level functions to @@ -767,6 +754,8 @@ extern void __iomem *ioremap(phys_addr_t address, unsigned long size); extern void __iomem *ioremap_prot(phys_addr_t address, unsigned long size, unsigned long flags); extern void __iomem *ioremap_wc(phys_addr_t address, unsigned long size); +void __iomem *ioremap_wt(phys_addr_t address, unsigned long size); +void __iomem *ioremap_coherent(phys_addr_t address, unsigned long size); #define ioremap_nocache(addr, size) ioremap((addr), (size)) #define ioremap_uc(addr, size) ioremap((addr), (size)) #define ioremap_cache(addr, size) \ @@ -777,12 +766,12 @@ extern void iounmap(volatile void __iomem *addr); extern void __iomem *__ioremap(phys_addr_t, unsigned long size, unsigned long flags); extern void __iomem *__ioremap_caller(phys_addr_t, unsigned long size, - unsigned long flags, void *caller); + pgprot_t prot, void *caller); extern void __iounmap(volatile void __iomem *addr); extern void __iomem * __ioremap_at(phys_addr_t pa, void *ea, - unsigned long size, unsigned long flags); + unsigned long size, pgprot_t prot); extern void __iounmap_at(void *ea, unsigned long size); /* diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h index 3d4b88cb8599..35db0cbc9222 100644 --- a/arch/powerpc/include/asm/iommu.h +++ b/arch/powerpc/include/asm/iommu.h @@ -126,7 +126,7 @@ struct iommu_table { int it_nid; }; -#define IOMMU_TABLE_USERSPACE_ENTRY_RM(tbl, entry) \ +#define IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry) \ ((tbl)->it_ops->useraddrptr((tbl), (entry), false)) #define IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry) \ ((tbl)->it_ops->useraddrptr((tbl), (entry), true)) diff --git a/arch/powerpc/include/asm/kgdb.h b/arch/powerpc/include/asm/kgdb.h index 9db24e77b9f4..a9e098a3b881 100644 --- a/arch/powerpc/include/asm/kgdb.h +++ b/arch/powerpc/include/asm/kgdb.h @@ -26,9 +26,12 @@ #define BREAK_INSTR_SIZE 4 #define BUFMAX ((NUMREGBYTES * 2) + 512) #define OUTBUFMAX ((NUMREGBYTES * 2) + 512) + +#define BREAK_INSTR 0x7d821008 /* twge r2, r2 */ + static inline void arch_kgdb_breakpoint(void) { - asm(".long 0x7d821008"); /* twge r2, r2 */ + asm(stringify_in_c(.long BREAK_INSTR)); } #define CACHE_FLUSH_IS_SAFE 1 #define DBG_MAX_REG_NUM 70 diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h index a790d5cf6ea3..1f321914676d 100644 --- a/arch/powerpc/include/asm/kvm_asm.h +++ b/arch/powerpc/include/asm/kvm_asm.h @@ -84,7 +84,6 @@ #define BOOK3S_INTERRUPT_INST_STORAGE 0x400 #define BOOK3S_INTERRUPT_INST_SEGMENT 0x480 #define BOOK3S_INTERRUPT_EXTERNAL 0x500 -#define BOOK3S_INTERRUPT_EXTERNAL_LEVEL 0x501 #define BOOK3S_INTERRUPT_EXTERNAL_HV 0x502 #define BOOK3S_INTERRUPT_ALIGNMENT 0x600 #define BOOK3S_INTERRUPT_PROGRAM 0x700 @@ -134,8 +133,7 @@ #define BOOK3S_IRQPRIO_EXTERNAL 14 #define BOOK3S_IRQPRIO_DECREMENTER 15 #define BOOK3S_IRQPRIO_PERFORMANCE_MONITOR 16 -#define BOOK3S_IRQPRIO_EXTERNAL_LEVEL 17 -#define BOOK3S_IRQPRIO_MAX 18 +#define BOOK3S_IRQPRIO_MAX 17 #define BOOK3S_HFLAG_DCBZ32 0x1 #define BOOK3S_HFLAG_SLB 0x2 diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index 83a9aa3cf689..09f8e9ba69bc 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h @@ -188,14 +188,37 @@ extern int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hc); extern int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, unsigned long ea, unsigned long dsisr); +extern int kvmppc_mmu_walk_radix_tree(struct kvm_vcpu *vcpu, gva_t eaddr, + struct kvmppc_pte *gpte, u64 root, + u64 *pte_ret_p); +extern int kvmppc_mmu_radix_translate_table(struct kvm_vcpu *vcpu, gva_t eaddr, + struct kvmppc_pte *gpte, u64 table, + int table_index, u64 *pte_ret_p); extern int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, struct kvmppc_pte *gpte, bool data, bool iswrite); +extern void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa, + unsigned int shift, struct kvm_memory_slot *memslot, + unsigned int lpid); +extern bool kvmppc_hv_handle_set_rc(struct kvm *kvm, pgd_t *pgtable, + bool writing, unsigned long gpa, + unsigned int lpid); +extern int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu, + unsigned long gpa, + struct kvm_memory_slot *memslot, + bool writing, bool kvm_ro, + pte_t *inserted_pte, unsigned int *levelp); extern int kvmppc_init_vm_radix(struct kvm *kvm); extern void kvmppc_free_radix(struct kvm *kvm); +extern void kvmppc_free_pgtable_radix(struct kvm *kvm, pgd_t *pgd, + unsigned int lpid); extern int kvmppc_radix_init(void); extern void kvmppc_radix_exit(void); extern int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, unsigned long gfn); +extern void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, + unsigned long gpa, unsigned int shift, + struct kvm_memory_slot *memslot, + unsigned int lpid); extern int kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, unsigned long gfn); extern int kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, @@ -271,6 +294,21 @@ static inline void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu) {} static inline void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu) {} #endif +long kvmhv_nested_init(void); +void kvmhv_nested_exit(void); +void kvmhv_vm_nested_init(struct kvm *kvm); +long kvmhv_set_partition_table(struct kvm_vcpu *vcpu); +void kvmhv_set_ptbl_entry(unsigned int lpid, u64 dw0, u64 dw1); +void kvmhv_release_all_nested(struct kvm *kvm); +long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu); +long kvmhv_do_nested_tlbie(struct kvm_vcpu *vcpu); +int kvmhv_run_single_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu, + u64 time_limit, unsigned long lpcr); +void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr); +void kvmhv_restore_hv_return_state(struct kvm_vcpu *vcpu, + struct hv_guest_state *hr); +long int kvmhv_nested_page_fault(struct kvm_vcpu *vcpu); + void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac); extern int kvm_irq_bypass; @@ -301,12 +339,12 @@ static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num) static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val) { - vcpu->arch.cr = val; + vcpu->arch.regs.ccr = val; } static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu) { - return vcpu->arch.cr; + return vcpu->arch.regs.ccr; } static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val) @@ -384,9 +422,6 @@ extern int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu); /* TO = 31 for unconditional trap */ #define INS_TW 0x7fe00008 -/* LPIDs we support with this build -- runtime limit may be lower */ -#define KVMPPC_NR_LPIDS (LPID_RSVD + 1) - #define SPLIT_HACK_MASK 0xff000000 #define SPLIT_HACK_OFFS 0xfb000000 diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h index dc435a5af7d6..6d298145d564 100644 --- a/arch/powerpc/include/asm/kvm_book3s_64.h +++ b/arch/powerpc/include/asm/kvm_book3s_64.h @@ -23,6 +23,108 @@ #include <linux/string.h> #include <asm/bitops.h> #include <asm/book3s/64/mmu-hash.h> +#include <asm/cpu_has_feature.h> +#include <asm/ppc-opcode.h> + +#ifdef CONFIG_PPC_PSERIES +static inline bool kvmhv_on_pseries(void) +{ + return !cpu_has_feature(CPU_FTR_HVMODE); +} +#else +static inline bool kvmhv_on_pseries(void) +{ + return false; +} +#endif + +/* + * Structure for a nested guest, that is, for a guest that is managed by + * one of our guests. + */ +struct kvm_nested_guest { + struct kvm *l1_host; /* L1 VM that owns this nested guest */ + int l1_lpid; /* lpid L1 guest thinks this guest is */ + int shadow_lpid; /* real lpid of this nested guest */ + pgd_t *shadow_pgtable; /* our page table for this guest */ + u64 l1_gr_to_hr; /* L1's addr of part'n-scoped table */ + u64 process_table; /* process table entry for this guest */ + long refcnt; /* number of pointers to this struct */ + struct mutex tlb_lock; /* serialize page faults and tlbies */ + struct kvm_nested_guest *next; + cpumask_t need_tlb_flush; + cpumask_t cpu_in_guest; + short prev_cpu[NR_CPUS]; +}; + +/* + * We define a nested rmap entry as a single 64-bit quantity + * 0xFFF0000000000000 12-bit lpid field + * 0x000FFFFFFFFFF000 40-bit guest 4k page frame number + * 0x0000000000000001 1-bit single entry flag + */ +#define RMAP_NESTED_LPID_MASK 0xFFF0000000000000UL +#define RMAP_NESTED_LPID_SHIFT (52) +#define RMAP_NESTED_GPA_MASK 0x000FFFFFFFFFF000UL +#define RMAP_NESTED_IS_SINGLE_ENTRY 0x0000000000000001UL + +/* Structure for a nested guest rmap entry */ +struct rmap_nested { + struct llist_node list; + u64 rmap; +}; + +/* + * for_each_nest_rmap_safe - iterate over the list of nested rmap entries + * safe against removal of the list entry or NULL list + * @pos: a (struct rmap_nested *) to use as a loop cursor + * @node: pointer to the first entry + * NOTE: this can be NULL + * @rmapp: an (unsigned long *) in which to return the rmap entries on each + * iteration + * NOTE: this must point to already allocated memory + * + * The nested_rmap is a llist of (struct rmap_nested) entries pointed to by the + * rmap entry in the memslot. The list is always terminated by a "single entry" + * stored in the list element of the final entry of the llist. If there is ONLY + * a single entry then this is itself in the rmap entry of the memslot, not a + * llist head pointer. + * + * Note that the iterator below assumes that a nested rmap entry is always + * non-zero. This is true for our usage because the LPID field is always + * non-zero (zero is reserved for the host). + * + * This should be used to iterate over the list of rmap_nested entries with + * processing done on the u64 rmap value given by each iteration. This is safe + * against removal of list entries and it is always safe to call free on (pos). + * + * e.g. + * struct rmap_nested *cursor; + * struct llist_node *first; + * unsigned long rmap; + * for_each_nest_rmap_safe(cursor, first, &rmap) { + * do_something(rmap); + * free(cursor); + * } + */ +#define for_each_nest_rmap_safe(pos, node, rmapp) \ + for ((pos) = llist_entry((node), typeof(*(pos)), list); \ + (node) && \ + (*(rmapp) = ((RMAP_NESTED_IS_SINGLE_ENTRY & ((u64) (node))) ? \ + ((u64) (node)) : ((pos)->rmap))) && \ + (((node) = ((RMAP_NESTED_IS_SINGLE_ENTRY & ((u64) (node))) ? \ + ((struct llist_node *) ((pos) = NULL)) : \ + (pos)->list.next)), true); \ + (pos) = llist_entry((node), typeof(*(pos)), list)) + +struct kvm_nested_guest *kvmhv_get_nested(struct kvm *kvm, int l1_lpid, + bool create); +void kvmhv_put_nested(struct kvm_nested_guest *gp); +int kvmhv_nested_next_lpid(struct kvm *kvm, int lpid); + +/* Encoding of first parameter for H_TLB_INVALIDATE */ +#define H_TLBIE_P1_ENC(ric, prs, r) (___PPC_RIC(ric) | ___PPC_PRS(prs) | \ + ___PPC_R(r)) /* Power architecture requires HPT is at least 256kiB, at most 64TiB */ #define PPC_MIN_HPT_ORDER 18 @@ -435,6 +537,7 @@ static inline struct kvm_memslots *kvm_memslots_raw(struct kvm *kvm) } extern void kvmppc_mmu_debugfs_init(struct kvm *kvm); +extern void kvmhv_radix_debugfs_init(struct kvm *kvm); extern void kvmhv_rm_send_ipi(int cpu); @@ -482,7 +585,7 @@ static inline u64 sanitize_msr(u64 msr) #ifdef CONFIG_PPC_TRANSACTIONAL_MEM static inline void copy_from_checkpoint(struct kvm_vcpu *vcpu) { - vcpu->arch.cr = vcpu->arch.cr_tm; + vcpu->arch.regs.ccr = vcpu->arch.cr_tm; vcpu->arch.regs.xer = vcpu->arch.xer_tm; vcpu->arch.regs.link = vcpu->arch.lr_tm; vcpu->arch.regs.ctr = vcpu->arch.ctr_tm; @@ -499,7 +602,7 @@ static inline void copy_from_checkpoint(struct kvm_vcpu *vcpu) static inline void copy_to_checkpoint(struct kvm_vcpu *vcpu) { - vcpu->arch.cr_tm = vcpu->arch.cr; + vcpu->arch.cr_tm = vcpu->arch.regs.ccr; vcpu->arch.xer_tm = vcpu->arch.regs.xer; vcpu->arch.lr_tm = vcpu->arch.regs.link; vcpu->arch.ctr_tm = vcpu->arch.regs.ctr; @@ -515,6 +618,17 @@ static inline void copy_to_checkpoint(struct kvm_vcpu *vcpu) } #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ +extern int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte, + unsigned long gpa, unsigned int level, + unsigned long mmu_seq, unsigned int lpid, + unsigned long *rmapp, struct rmap_nested **n_rmap); +extern void kvmhv_insert_nest_rmap(struct kvm *kvm, unsigned long *rmapp, + struct rmap_nested **n_rmap); +extern void kvmhv_remove_nest_rmap_range(struct kvm *kvm, + struct kvm_memory_slot *memslot, + unsigned long gpa, unsigned long hpa, + unsigned long nbytes); + #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ #endif /* __ASM_KVM_BOOK3S_64_H__ */ diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h index d978fdf698af..eb3ba6390108 100644 --- a/arch/powerpc/include/asm/kvm_book3s_asm.h +++ b/arch/powerpc/include/asm/kvm_book3s_asm.h @@ -25,6 +25,9 @@ #define XICS_MFRR 0xc #define XICS_IPI 2 /* interrupt source # for IPIs */ +/* LPIDs we support with this build -- runtime limit may be lower */ +#define KVMPPC_NR_LPIDS (LPID_RSVD + 1) + /* Maximum number of threads per physical core */ #define MAX_SMT_THREADS 8 diff --git a/arch/powerpc/include/asm/kvm_booke.h b/arch/powerpc/include/asm/kvm_booke.h index d513e3ed1c65..f0cef625f17c 100644 --- a/arch/powerpc/include/asm/kvm_booke.h +++ b/arch/powerpc/include/asm/kvm_booke.h @@ -46,12 +46,12 @@ static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num) static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val) { - vcpu->arch.cr = val; + vcpu->arch.regs.ccr = val; } static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu) { - return vcpu->arch.cr; + return vcpu->arch.regs.ccr; } static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val) diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 906bcbdfd2a1..fac6f631ed29 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -46,6 +46,7 @@ #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE #include <asm/kvm_book3s_asm.h> /* for MAX_SMT_THREADS */ #define KVM_MAX_VCPU_ID (MAX_SMT_THREADS * KVM_MAX_VCORES) +#define KVM_MAX_NESTED_GUESTS KVMPPC_NR_LPIDS #else #define KVM_MAX_VCPU_ID KVM_MAX_VCPUS @@ -94,6 +95,7 @@ struct dtl_entry; struct kvmppc_vcpu_book3s; struct kvmppc_book3s_shadow_vcpu; +struct kvm_nested_guest; struct kvm_vm_stat { ulong remote_tlb_flush; @@ -287,10 +289,12 @@ struct kvm_arch { u8 radix; u8 fwnmi_enabled; bool threads_indep; + bool nested_enable; pgd_t *pgtable; u64 process_table; struct dentry *debugfs_dir; struct dentry *htab_dentry; + struct dentry *radix_dentry; struct kvm_resize_hpt *resize_hpt; /* protected by kvm->lock */ #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE @@ -311,6 +315,9 @@ struct kvm_arch { #endif struct kvmppc_ops *kvm_ops; #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE + u64 l1_ptcr; + int max_nested_lpid; + struct kvm_nested_guest *nested_guests[KVM_MAX_NESTED_GUESTS]; /* This array can grow quite large, keep it at the end */ struct kvmppc_vcore *vcores[KVM_MAX_VCORES]; #endif @@ -360,7 +367,9 @@ struct kvmppc_pte { bool may_write : 1; bool may_execute : 1; unsigned long wimg; + unsigned long rc; u8 page_size; /* MMU_PAGE_xxx */ + u8 page_shift; }; struct kvmppc_mmu { @@ -537,8 +546,6 @@ struct kvm_vcpu_arch { ulong tar; #endif - u32 cr; - #ifdef CONFIG_PPC_BOOK3S ulong hflags; ulong guest_owned_ext; @@ -707,6 +714,7 @@ struct kvm_vcpu_arch { u8 hcall_needed; u8 epr_flags; /* KVMPPC_EPR_xxx */ u8 epr_needed; + u8 external_oneshot; /* clear external irq after delivery */ u32 cpr0_cfgaddr; /* holds the last set cpr0_cfgaddr */ @@ -781,6 +789,10 @@ struct kvm_vcpu_arch { u32 emul_inst; u32 online; + + /* For support of nested guests */ + struct kvm_nested_guest *nested; + u32 nested_vcpu_id; #endif #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index e991821dd7fa..9b89b1918dfc 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -194,9 +194,7 @@ extern struct kvmppc_spapr_tce_table *kvmppc_find_table( (iommu_tce_check_ioba((stt)->page_shift, (stt)->offset, \ (stt)->size, (ioba), (npages)) ? \ H_PARAMETER : H_SUCCESS) -extern long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *tt, - unsigned long tce); -extern long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa, +extern long kvmppc_tce_to_ua(struct kvm *kvm, unsigned long tce, unsigned long *ua, unsigned long **prmap); extern void kvmppc_tce_put(struct kvmppc_spapr_tce_table *tt, unsigned long idx, unsigned long tce); @@ -327,6 +325,7 @@ struct kvmppc_ops { int (*set_smt_mode)(struct kvm *kvm, unsigned long mode, unsigned long flags); void (*giveup_ext)(struct kvm_vcpu *vcpu, ulong msr); + int (*enable_nested)(struct kvm *kvm); }; extern struct kvmppc_ops *kvmppc_hv_ops; @@ -585,6 +584,7 @@ extern int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval); extern int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level, bool line_status); +extern void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu); #else static inline int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server, u32 priority) { return -1; } @@ -607,6 +607,7 @@ static inline int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval) { retur static inline int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level, bool line_status) { return -ENODEV; } +static inline void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu) { } #endif /* CONFIG_KVM_XIVE */ /* @@ -652,6 +653,7 @@ int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server, unsigned long mfrr); int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr); int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr); +void kvmppc_guest_entry_inject_int(struct kvm_vcpu *vcpu); /* * Host-side operations we want to set up while running in real diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h index a47de82fb8e2..8311869005fa 100644 --- a/arch/powerpc/include/asm/machdep.h +++ b/arch/powerpc/include/asm/machdep.h @@ -35,7 +35,7 @@ struct machdep_calls { char *name; #ifdef CONFIG_PPC64 void __iomem * (*ioremap)(phys_addr_t addr, unsigned long size, - unsigned long flags, void *caller); + pgprot_t prot, void *caller); void (*iounmap)(volatile void __iomem *token); #ifdef CONFIG_PM @@ -108,6 +108,7 @@ struct machdep_calls { /* Early exception handlers called in realmode */ int (*hmi_exception_early)(struct pt_regs *regs); + long (*machine_check_early)(struct pt_regs *regs); /* Called during machine check exception to retrive fixup address. */ bool (*mce_check_early_recovery)(struct pt_regs *regs); diff --git a/arch/powerpc/include/asm/mce.h b/arch/powerpc/include/asm/mce.h index 3a1226e9b465..a8b8903e1844 100644 --- a/arch/powerpc/include/asm/mce.h +++ b/arch/powerpc/include/asm/mce.h @@ -210,4 +210,7 @@ extern void release_mce_event(void); extern void machine_check_queue_event(void); extern void machine_check_print_event_info(struct machine_check_event *evt, bool user_mode); +#ifdef CONFIG_PPC_BOOK3S_64 +void flush_and_reload_slb(void); +#endif /* CONFIG_PPC_BOOK3S_64 */ #endif /* __ASM_PPC64_MCE_H__ */ diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h index 13ea441ac531..eb20eb3b8fb0 100644 --- a/arch/powerpc/include/asm/mmu.h +++ b/arch/powerpc/include/asm/mmu.h @@ -309,6 +309,21 @@ static inline u16 get_mm_addr_key(struct mm_struct *mm, unsigned long address) */ #define MMU_PAGE_COUNT 16 +/* + * If we store section details in page->flags we can't increase the MAX_PHYSMEM_BITS + * if we increase SECTIONS_WIDTH we will not store node details in page->flags and + * page_to_nid does a page->section->node lookup + * Hence only increase for VMEMMAP. Further depending on SPARSEMEM_EXTREME reduce + * memory requirements with large number of sections. + * 51 bits is the max physical real address on POWER9 + */ +#if defined(CONFIG_SPARSEMEM_VMEMMAP) && defined(CONFIG_SPARSEMEM_EXTREME) && \ + defined (CONFIG_PPC_64K_PAGES) +#define MAX_PHYSMEM_BITS 51 +#else +#define MAX_PHYSMEM_BITS 46 +#endif + #ifdef CONFIG_PPC_BOOK3S_64 #include <asm/book3s/64/mmu.h> #else /* CONFIG_PPC_BOOK3S_64 */ diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h index b694d6af1150..0381394a425b 100644 --- a/arch/powerpc/include/asm/mmu_context.h +++ b/arch/powerpc/include/asm/mmu_context.h @@ -82,7 +82,7 @@ static inline bool need_extra_context(struct mm_struct *mm, unsigned long ea) { int context_id; - context_id = get_ea_context(&mm->context, ea); + context_id = get_user_context(&mm->context, ea); if (!context_id) return true; return false; diff --git a/arch/powerpc/include/asm/mpic.h b/arch/powerpc/include/asm/mpic.h index fad8ddd697ac..0abf2e7fd222 100644 --- a/arch/powerpc/include/asm/mpic.h +++ b/arch/powerpc/include/asm/mpic.h @@ -393,7 +393,14 @@ extern struct bus_type mpic_subsys; #define MPIC_REGSET_TSI108 MPIC_REGSET(1) /* Tsi108/109 PIC */ /* Get the version of primary MPIC */ +#ifdef CONFIG_MPIC extern u32 fsl_mpic_primary_get_version(void); +#else +static inline u32 fsl_mpic_primary_get_version(void) +{ + return 0; +} +#endif /* Allocate the controller structure and setup the linux irq descs * for the range if interrupts passed in. No HW initialization is diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h index a507a65b0866..3ffb0ff5a038 100644 --- a/arch/powerpc/include/asm/nohash/32/pgtable.h +++ b/arch/powerpc/include/asm/nohash/32/pgtable.h @@ -128,14 +128,65 @@ extern int icache_44x_need_flush; #include <asm/nohash/32/pte-8xx.h> #endif -/* And here we include common definitions */ -#include <asm/pte-common.h> +/* + * Location of the PFN in the PTE. Most 32-bit platforms use the same + * as _PAGE_SHIFT here (ie, naturally aligned). + * Platform who don't just pre-define the value so we don't override it here. + */ +#ifndef PTE_RPN_SHIFT +#define PTE_RPN_SHIFT (PAGE_SHIFT) +#endif + +/* + * The mask covered by the RPN must be a ULL on 32-bit platforms with + * 64-bit PTEs. + */ +#if defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT) +#define PTE_RPN_MASK (~((1ULL << PTE_RPN_SHIFT) - 1)) +#else +#define PTE_RPN_MASK (~((1UL << PTE_RPN_SHIFT) - 1)) +#endif + +/* + * _PAGE_CHG_MASK masks of bits that are to be preserved across + * pgprot changes. + */ +#define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SPECIAL) #ifndef __ASSEMBLY__ #define pte_clear(mm, addr, ptep) \ do { pte_update(ptep, ~0, 0); } while (0) +#ifndef pte_mkwrite +static inline pte_t pte_mkwrite(pte_t pte) +{ + return __pte(pte_val(pte) | _PAGE_RW); +} +#endif + +static inline pte_t pte_mkdirty(pte_t pte) +{ + return __pte(pte_val(pte) | _PAGE_DIRTY); +} + +static inline pte_t pte_mkyoung(pte_t pte) +{ + return __pte(pte_val(pte) | _PAGE_ACCESSED); +} + +#ifndef pte_wrprotect +static inline pte_t pte_wrprotect(pte_t pte) +{ + return __pte(pte_val(pte) & ~_PAGE_RW); +} +#endif + +static inline pte_t pte_mkexec(pte_t pte) +{ + return __pte(pte_val(pte) | _PAGE_EXEC); +} + #define pmd_none(pmd) (!pmd_val(pmd)) #define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD) #define pmd_present(pmd) (pmd_val(pmd) & _PMD_PRESENT_MASK) @@ -244,23 +295,21 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), _PAGE_RO); -} -static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, - unsigned long addr, pte_t *ptep) -{ - ptep_set_wrprotect(mm, addr, ptep); -} + unsigned long clr = ~pte_val(pte_wrprotect(__pte(~0))); + unsigned long set = pte_val(pte_wrprotect(__pte(0))); + pte_update(ptep, clr, set); +} static inline void __ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep, pte_t entry, unsigned long address, int psize) { - unsigned long set = pte_val(entry) & - (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC); - unsigned long clr = ~pte_val(entry) & (_PAGE_RO | _PAGE_NA); + pte_t pte_set = pte_mkyoung(pte_mkdirty(pte_mkwrite(pte_mkexec(__pte(0))))); + pte_t pte_clr = pte_mkyoung(pte_mkdirty(pte_mkwrite(pte_mkexec(__pte(~0))))); + unsigned long set = pte_val(entry) & pte_val(pte_set); + unsigned long clr = ~pte_val(entry) & ~pte_val(pte_clr); pte_update(ptep, clr, set); @@ -323,7 +372,7 @@ static inline int pte_young(pte_t pte) #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 }) -int map_kernel_page(unsigned long va, phys_addr_t pa, int flags); +int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot); #endif /* !__ASSEMBLY__ */ diff --git a/arch/powerpc/include/asm/nohash/32/pte-40x.h b/arch/powerpc/include/asm/nohash/32/pte-40x.h index bb4b3a4b92a0..661f4599f2fc 100644 --- a/arch/powerpc/include/asm/nohash/32/pte-40x.h +++ b/arch/powerpc/include/asm/nohash/32/pte-40x.h @@ -50,13 +50,56 @@ #define _PAGE_EXEC 0x200 /* hardware: EX permission */ #define _PAGE_ACCESSED 0x400 /* software: R: page referenced */ +/* No page size encoding in the linux PTE */ +#define _PAGE_PSIZE 0 + +/* cache related flags non existing on 40x */ +#define _PAGE_COHERENT 0 + +#define _PAGE_KERNEL_RO 0 +#define _PAGE_KERNEL_ROX _PAGE_EXEC +#define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW | _PAGE_HWWRITE) +#define _PAGE_KERNEL_RWX (_PAGE_DIRTY | _PAGE_RW | _PAGE_HWWRITE | _PAGE_EXEC) + #define _PMD_PRESENT 0x400 /* PMD points to page of PTEs */ +#define _PMD_PRESENT_MASK _PMD_PRESENT #define _PMD_BAD 0x802 #define _PMD_SIZE_4M 0x0c0 #define _PMD_SIZE_16M 0x0e0 +#define _PMD_USER 0 + +#define _PTE_NONE_MASK 0 /* Until my rework is finished, 40x still needs atomic PTE updates */ #define PTE_ATOMIC_UPDATES 1 +#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED) +#define _PAGE_BASE (_PAGE_BASE_NC) + +/* Permission masks used to generate the __P and __S table */ +#define PAGE_NONE __pgprot(_PAGE_BASE) +#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW) +#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC) +#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER) +#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) +#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER) +#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) + +#ifndef __ASSEMBLY__ +static inline pte_t pte_wrprotect(pte_t pte) +{ + return __pte(pte_val(pte) & ~(_PAGE_RW | _PAGE_HWWRITE)); +} + +#define pte_wrprotect pte_wrprotect + +static inline pte_t pte_mkclean(pte_t pte) +{ + return __pte(pte_val(pte) & ~(_PAGE_DIRTY | _PAGE_HWWRITE)); +} + +#define pte_mkclean pte_mkclean +#endif + #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_NOHASH_32_PTE_40x_H */ diff --git a/arch/powerpc/include/asm/nohash/32/pte-44x.h b/arch/powerpc/include/asm/nohash/32/pte-44x.h index f812c0272364..78bc304f750e 100644 --- a/arch/powerpc/include/asm/nohash/32/pte-44x.h +++ b/arch/powerpc/include/asm/nohash/32/pte-44x.h @@ -85,14 +85,44 @@ #define _PAGE_NO_CACHE 0x00000400 /* H: I bit */ #define _PAGE_WRITETHRU 0x00000800 /* H: W bit */ +/* No page size encoding in the linux PTE */ +#define _PAGE_PSIZE 0 + +#define _PAGE_KERNEL_RO 0 +#define _PAGE_KERNEL_ROX _PAGE_EXEC +#define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW) +#define _PAGE_KERNEL_RWX (_PAGE_DIRTY | _PAGE_RW | _PAGE_EXEC) + /* TODO: Add large page lowmem mapping support */ #define _PMD_PRESENT 0 #define _PMD_PRESENT_MASK (PAGE_MASK) #define _PMD_BAD (~PAGE_MASK) +#define _PMD_USER 0 /* ERPN in a PTE never gets cleared, ignore it */ #define _PTE_NONE_MASK 0xffffffff00000000ULL +/* + * We define 2 sets of base prot bits, one for basic pages (ie, + * cacheable kernel and user pages) and one for non cacheable + * pages. We always set _PAGE_COHERENT when SMP is enabled or + * the processor might need it for DMA coherency. + */ +#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED) +#if defined(CONFIG_SMP) +#define _PAGE_BASE (_PAGE_BASE_NC | _PAGE_COHERENT) +#else +#define _PAGE_BASE (_PAGE_BASE_NC) +#endif + +/* Permission masks used to generate the __P and __S table */ +#define PAGE_NONE __pgprot(_PAGE_BASE) +#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW) +#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC) +#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER) +#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) +#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER) +#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_NOHASH_32_PTE_44x_H */ diff --git a/arch/powerpc/include/asm/nohash/32/pte-8xx.h b/arch/powerpc/include/asm/nohash/32/pte-8xx.h index f04cb46ae8a1..6bfe041ef59d 100644 --- a/arch/powerpc/include/asm/nohash/32/pte-8xx.h +++ b/arch/powerpc/include/asm/nohash/32/pte-8xx.h @@ -29,10 +29,10 @@ */ /* Definitions for 8xx embedded chips. */ -#define _PAGE_PRESENT 0x0001 /* Page is valid */ -#define _PAGE_NO_CACHE 0x0002 /* I: cache inhibit */ -#define _PAGE_PRIVILEGED 0x0004 /* No ASID (context) compare */ -#define _PAGE_HUGE 0x0008 /* SPS: Small Page Size (1 if 16k, 512k or 8M)*/ +#define _PAGE_PRESENT 0x0001 /* V: Page is valid */ +#define _PAGE_NO_CACHE 0x0002 /* CI: cache inhibit */ +#define _PAGE_SH 0x0004 /* SH: No ASID (context) compare */ +#define _PAGE_SPS 0x0008 /* SPS: Small Page Size (1 if 16k, 512k or 8M)*/ #define _PAGE_DIRTY 0x0100 /* C: page changed */ /* These 4 software bits must be masked out when the L2 entry is loaded @@ -46,18 +46,95 @@ #define _PAGE_NA 0x0200 /* Supervisor NA, User no access */ #define _PAGE_RO 0x0600 /* Supervisor RO, User no access */ +/* cache related flags non existing on 8xx */ +#define _PAGE_COHERENT 0 +#define _PAGE_WRITETHRU 0 + +#define _PAGE_KERNEL_RO (_PAGE_SH | _PAGE_RO) +#define _PAGE_KERNEL_ROX (_PAGE_SH | _PAGE_RO | _PAGE_EXEC) +#define _PAGE_KERNEL_RW (_PAGE_SH | _PAGE_DIRTY) +#define _PAGE_KERNEL_RWX (_PAGE_SH | _PAGE_DIRTY | _PAGE_EXEC) + #define _PMD_PRESENT 0x0001 +#define _PMD_PRESENT_MASK _PMD_PRESENT #define _PMD_BAD 0x0fd0 #define _PMD_PAGE_MASK 0x000c #define _PMD_PAGE_8M 0x000c #define _PMD_PAGE_512K 0x0004 #define _PMD_USER 0x0020 /* APG 1 */ +#define _PTE_NONE_MASK 0 + /* Until my rework is finished, 8xx still needs atomic PTE updates */ #define PTE_ATOMIC_UPDATES 1 #ifdef CONFIG_PPC_16K_PAGES -#define _PAGE_PSIZE _PAGE_HUGE +#define _PAGE_PSIZE _PAGE_SPS +#else +#define _PAGE_PSIZE 0 +#endif + +#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_PSIZE) +#define _PAGE_BASE (_PAGE_BASE_NC) + +/* Permission masks used to generate the __P and __S table */ +#define PAGE_NONE __pgprot(_PAGE_BASE | _PAGE_NA) +#define PAGE_SHARED __pgprot(_PAGE_BASE) +#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_EXEC) +#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_RO) +#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_RO | _PAGE_EXEC) +#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_RO) +#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_RO | _PAGE_EXEC) + +#ifndef __ASSEMBLY__ +static inline pte_t pte_wrprotect(pte_t pte) +{ + return __pte(pte_val(pte) | _PAGE_RO); +} + +#define pte_wrprotect pte_wrprotect + +static inline int pte_write(pte_t pte) +{ + return !(pte_val(pte) & _PAGE_RO); +} + +#define pte_write pte_write + +static inline pte_t pte_mkwrite(pte_t pte) +{ + return __pte(pte_val(pte) & ~_PAGE_RO); +} + +#define pte_mkwrite pte_mkwrite + +static inline bool pte_user(pte_t pte) +{ + return !(pte_val(pte) & _PAGE_SH); +} + +#define pte_user pte_user + +static inline pte_t pte_mkprivileged(pte_t pte) +{ + return __pte(pte_val(pte) | _PAGE_SH); +} + +#define pte_mkprivileged pte_mkprivileged + +static inline pte_t pte_mkuser(pte_t pte) +{ + return __pte(pte_val(pte) & ~_PAGE_SH); +} + +#define pte_mkuser pte_mkuser + +static inline pte_t pte_mkhuge(pte_t pte) +{ + return __pte(pte_val(pte) | _PAGE_SPS); +} + +#define pte_mkhuge pte_mkhuge #endif #endif /* __KERNEL__ */ diff --git a/arch/powerpc/include/asm/nohash/32/pte-fsl-booke.h b/arch/powerpc/include/asm/nohash/32/pte-fsl-booke.h index d1ee24e9e137..0fc1bd42bb3e 100644 --- a/arch/powerpc/include/asm/nohash/32/pte-fsl-booke.h +++ b/arch/powerpc/include/asm/nohash/32/pte-fsl-booke.h @@ -31,11 +31,44 @@ #define _PAGE_WRITETHRU 0x00400 /* H: W bit */ #define _PAGE_SPECIAL 0x00800 /* S: Special page */ +#define _PAGE_KERNEL_RO 0 +#define _PAGE_KERNEL_ROX _PAGE_EXEC +#define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW) +#define _PAGE_KERNEL_RWX (_PAGE_DIRTY | _PAGE_RW | _PAGE_EXEC) + +/* No page size encoding in the linux PTE */ +#define _PAGE_PSIZE 0 + #define _PMD_PRESENT 0 #define _PMD_PRESENT_MASK (PAGE_MASK) #define _PMD_BAD (~PAGE_MASK) +#define _PMD_USER 0 + +#define _PTE_NONE_MASK 0 #define PTE_WIMGE_SHIFT (6) +/* + * We define 2 sets of base prot bits, one for basic pages (ie, + * cacheable kernel and user pages) and one for non cacheable + * pages. We always set _PAGE_COHERENT when SMP is enabled or + * the processor might need it for DMA coherency. + */ +#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED) +#if defined(CONFIG_SMP) || defined(CONFIG_PPC_E500MC) +#define _PAGE_BASE (_PAGE_BASE_NC | _PAGE_COHERENT) +#else +#define _PAGE_BASE (_PAGE_BASE_NC) +#endif + +/* Permission masks used to generate the __P and __S table */ +#define PAGE_NONE __pgprot(_PAGE_BASE) +#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW) +#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC) +#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER) +#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) +#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER) +#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) + #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_NOHASH_32_PTE_FSL_BOOKE_H */ diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h index 7cd6809f4d33..67421f74efcf 100644 --- a/arch/powerpc/include/asm/nohash/64/pgtable.h +++ b/arch/powerpc/include/asm/nohash/64/pgtable.h @@ -89,11 +89,47 @@ * Include the PTE bits definitions */ #include <asm/nohash/pte-book3e.h> -#include <asm/pte-common.h> + +#define _PAGE_SAO 0 + +#define PTE_RPN_MASK (~((1UL << PTE_RPN_SHIFT) - 1)) + +/* + * _PAGE_CHG_MASK masks of bits that are to be preserved across + * pgprot changes. + */ +#define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SPECIAL) + +#define H_PAGE_4K_PFN 0 #ifndef __ASSEMBLY__ /* pte_clear moved to later in this file */ +static inline pte_t pte_mkwrite(pte_t pte) +{ + return __pte(pte_val(pte) | _PAGE_RW); +} + +static inline pte_t pte_mkdirty(pte_t pte) +{ + return __pte(pte_val(pte) | _PAGE_DIRTY); +} + +static inline pte_t pte_mkyoung(pte_t pte) +{ + return __pte(pte_val(pte) | _PAGE_ACCESSED); +} + +static inline pte_t pte_wrprotect(pte_t pte) +{ + return __pte(pte_val(pte) & ~_PAGE_RW); +} + +static inline pte_t pte_mkexec(pte_t pte) +{ + return __pte(pte_val(pte) | _PAGE_EXEC); +} + #define PMD_BAD_BITS (PTE_TABLE_SIZE-1) #define PUD_BAD_BITS (PMD_TABLE_SIZE-1) @@ -239,6 +275,7 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_update(mm, addr, ptep, _PAGE_RW, 0, 0); } +#define __HAVE_ARCH_HUGE_PTEP_SET_WRPROTECT static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { @@ -327,8 +364,7 @@ static inline void __ptep_set_access_flags(struct vm_area_struct *vma, #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) }) #define __swp_entry_to_pte(x) __pte((x).val) -extern int map_kernel_page(unsigned long ea, unsigned long pa, - unsigned long flags); +int map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot); extern int __meminit vmemmap_create_mapping(unsigned long start, unsigned long page_size, unsigned long phys); diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h index b321c82b3624..70ff23974b59 100644 --- a/arch/powerpc/include/asm/nohash/pgtable.h +++ b/arch/powerpc/include/asm/nohash/pgtable.h @@ -8,18 +8,50 @@ #include <asm/nohash/32/pgtable.h> #endif +/* Permission masks used for kernel mappings */ +#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW) +#define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE) +#define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \ + _PAGE_NO_CACHE | _PAGE_GUARDED) +#define PAGE_KERNEL_X __pgprot(_PAGE_BASE | _PAGE_KERNEL_RWX) +#define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO) +#define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX) + +/* + * Protection used for kernel text. We want the debuggers to be able to + * set breakpoints anywhere, so don't write protect the kernel text + * on platforms where such control is possible. + */ +#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\ + defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE) +#define PAGE_KERNEL_TEXT PAGE_KERNEL_X +#else +#define PAGE_KERNEL_TEXT PAGE_KERNEL_ROX +#endif + +/* Make modules code happy. We don't set RO yet */ +#define PAGE_KERNEL_EXEC PAGE_KERNEL_X + +/* Advertise special mapping type for AGP */ +#define PAGE_AGP (PAGE_KERNEL_NC) +#define HAVE_PAGE_AGP + #ifndef __ASSEMBLY__ /* Generic accessors to PTE bits */ +#ifndef pte_write static inline int pte_write(pte_t pte) { - return (pte_val(pte) & (_PAGE_RW | _PAGE_RO)) != _PAGE_RO; + return pte_val(pte) & _PAGE_RW; } +#endif static inline int pte_read(pte_t pte) { return 1; } static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; } static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; } -static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); } +static inline bool pte_hashpte(pte_t pte) { return false; } +static inline bool pte_ci(pte_t pte) { return pte_val(pte) & _PAGE_NO_CACHE; } +static inline bool pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; } #ifdef CONFIG_NUMA_BALANCING /* @@ -29,8 +61,7 @@ static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PA */ static inline int pte_protnone(pte_t pte) { - return (pte_val(pte) & - (_PAGE_PRESENT | _PAGE_USER)) == _PAGE_PRESENT; + return pte_present(pte) && !pte_user(pte); } static inline int pmd_protnone(pmd_t pmd) @@ -44,6 +75,23 @@ static inline int pte_present(pte_t pte) return pte_val(pte) & _PAGE_PRESENT; } +static inline bool pte_hw_valid(pte_t pte) +{ + return pte_val(pte) & _PAGE_PRESENT; +} + +/* + * Don't just check for any non zero bits in __PAGE_USER, since for book3e + * and PTE_64BIT, PAGE_KERNEL_X contains _PAGE_BAP_SR which is also in + * _PAGE_USER. Need to explicitly match _PAGE_BAP_UR bit in that case too. + */ +#ifndef pte_user +static inline bool pte_user(pte_t pte) +{ + return (pte_val(pte) & _PAGE_USER) == _PAGE_USER; +} +#endif + /* * We only find page table entry in the last level * Hence no need for other accessors @@ -77,53 +125,53 @@ static inline unsigned long pte_pfn(pte_t pte) { return pte_val(pte) >> PTE_RPN_SHIFT; } /* Generic modifiers for PTE bits */ -static inline pte_t pte_wrprotect(pte_t pte) +static inline pte_t pte_exprotect(pte_t pte) { - pte_basic_t ptev; - - ptev = pte_val(pte) & ~(_PAGE_RW | _PAGE_HWWRITE); - ptev |= _PAGE_RO; - return __pte(ptev); + return __pte(pte_val(pte) & ~_PAGE_EXEC); } +#ifndef pte_mkclean static inline pte_t pte_mkclean(pte_t pte) { - return __pte(pte_val(pte) & ~(_PAGE_DIRTY | _PAGE_HWWRITE)); + return __pte(pte_val(pte) & ~_PAGE_DIRTY); } +#endif static inline pte_t pte_mkold(pte_t pte) { return __pte(pte_val(pte) & ~_PAGE_ACCESSED); } -static inline pte_t pte_mkwrite(pte_t pte) +static inline pte_t pte_mkpte(pte_t pte) { - pte_basic_t ptev; - - ptev = pte_val(pte) & ~_PAGE_RO; - ptev |= _PAGE_RW; - return __pte(ptev); + return pte; } -static inline pte_t pte_mkdirty(pte_t pte) +static inline pte_t pte_mkspecial(pte_t pte) { - return __pte(pte_val(pte) | _PAGE_DIRTY); + return __pte(pte_val(pte) | _PAGE_SPECIAL); } -static inline pte_t pte_mkyoung(pte_t pte) +#ifndef pte_mkhuge +static inline pte_t pte_mkhuge(pte_t pte) { - return __pte(pte_val(pte) | _PAGE_ACCESSED); + return __pte(pte_val(pte)); } +#endif -static inline pte_t pte_mkspecial(pte_t pte) +#ifndef pte_mkprivileged +static inline pte_t pte_mkprivileged(pte_t pte) { - return __pte(pte_val(pte) | _PAGE_SPECIAL); + return __pte(pte_val(pte) & ~_PAGE_USER); } +#endif -static inline pte_t pte_mkhuge(pte_t pte) +#ifndef pte_mkuser +static inline pte_t pte_mkuser(pte_t pte) { - return __pte(pte_val(pte) | _PAGE_HUGE); + return __pte(pte_val(pte) | _PAGE_USER); } +#endif static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { @@ -197,6 +245,8 @@ extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long addre #if _PAGE_WRITETHRU != 0 #define pgprot_cached_wthru(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ _PAGE_COHERENT | _PAGE_WRITETHRU)) +#else +#define pgprot_cached_wthru(prot) pgprot_noncached(prot) #endif #define pgprot_cached_noncoherent(prot) \ diff --git a/arch/powerpc/include/asm/nohash/pte-book3e.h b/arch/powerpc/include/asm/nohash/pte-book3e.h index 12730b81cd98..dd40d200f274 100644 --- a/arch/powerpc/include/asm/nohash/pte-book3e.h +++ b/arch/powerpc/include/asm/nohash/pte-book3e.h @@ -77,7 +77,48 @@ #define _PMD_PRESENT 0 #define _PMD_PRESENT_MASK (PAGE_MASK) #define _PMD_BAD (~PAGE_MASK) +#define _PMD_USER 0 +#else +#define _PTE_NONE_MASK 0 +#endif + +/* + * We define 2 sets of base prot bits, one for basic pages (ie, + * cacheable kernel and user pages) and one for non cacheable + * pages. We always set _PAGE_COHERENT when SMP is enabled or + * the processor might need it for DMA coherency. + */ +#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_PSIZE) +#if defined(CONFIG_SMP) +#define _PAGE_BASE (_PAGE_BASE_NC | _PAGE_COHERENT) +#else +#define _PAGE_BASE (_PAGE_BASE_NC) #endif +/* Permission masks used to generate the __P and __S table */ +#define PAGE_NONE __pgprot(_PAGE_BASE) +#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW) +#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC) +#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER) +#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) +#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER) +#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) + +#ifndef __ASSEMBLY__ +static inline pte_t pte_mkprivileged(pte_t pte) +{ + return __pte((pte_val(pte) & ~_PAGE_USER) | _PAGE_PRIVILEGED); +} + +#define pte_mkprivileged pte_mkprivileged + +static inline pte_t pte_mkuser(pte_t pte) +{ + return __pte((pte_val(pte) & ~_PAGE_PRIVILEGED) | _PAGE_USER); +} + +#define pte_mkuser pte_mkuser +#endif /* __ASSEMBLY__ */ + #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_NOHASH_PTE_BOOK3E_H */ diff --git a/arch/powerpc/include/asm/opal-api.h b/arch/powerpc/include/asm/opal-api.h index 8365353330b4..870fb7b239ea 100644 --- a/arch/powerpc/include/asm/opal-api.h +++ b/arch/powerpc/include/asm/opal-api.h @@ -1050,6 +1050,7 @@ enum OpalSysCooling { enum { OPAL_REBOOT_NORMAL = 0, OPAL_REBOOT_PLATFORM_ERROR = 1, + OPAL_REBOOT_FULL_IPL = 2, }; /* Argument to OPAL_PCI_TCE_KILL */ diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h index ad4f16164619..e843bc5d1a0f 100644 --- a/arch/powerpc/include/asm/paca.h +++ b/arch/powerpc/include/asm/paca.h @@ -113,7 +113,13 @@ struct paca_struct { * on the linear mapping */ /* SLB related definitions */ u16 vmalloc_sllp; - u16 slb_cache_ptr; + u8 slb_cache_ptr; + u8 stab_rr; /* stab/slb round-robin counter */ +#ifdef CONFIG_DEBUG_VM + u8 in_kernel_slb_handler; +#endif + u32 slb_used_bitmap; /* Bitmaps for first 32 SLB entries. */ + u32 slb_kern_bitmap; u32 slb_cache[SLB_CACHE_ENTRIES]; #endif /* CONFIG_PPC_BOOK3S_64 */ @@ -160,7 +166,6 @@ struct paca_struct { */ struct task_struct *__current; /* Pointer to current */ u64 kstack; /* Saved Kernel stack addr */ - u64 stab_rr; /* stab/slb round-robin counter */ u64 saved_r1; /* r1 save for RTAS calls or PM or EE=0 */ u64 saved_msr; /* MSR saved here by enter_rtas */ u16 trap_save; /* Used when bad stack is encountered */ @@ -250,6 +255,15 @@ struct paca_struct { #ifdef CONFIG_PPC_PSERIES u8 *mce_data_buf; /* buffer to hold per cpu rtas errlog */ #endif /* CONFIG_PPC_PSERIES */ + +#ifdef CONFIG_PPC_BOOK3S_64 + /* Capture SLB related old contents in MCE handler. */ + struct slb_entry *mce_faulty_slbs; + u16 slb_save_cache_ptr; +#endif /* CONFIG_PPC_BOOK3S_64 */ +#ifdef CONFIG_STACKPROTECTOR + unsigned long canary; +#endif } ____cacheline_aligned; extern void copy_mm_to_paca(struct mm_struct *mm); diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h index 14c79a7dc855..9679b7519a35 100644 --- a/arch/powerpc/include/asm/pgtable.h +++ b/arch/powerpc/include/asm/pgtable.h @@ -20,6 +20,25 @@ struct mm_struct; #include <asm/nohash/pgtable.h> #endif /* !CONFIG_PPC_BOOK3S */ +/* Note due to the way vm flags are laid out, the bits are XWR */ +#define __P000 PAGE_NONE +#define __P001 PAGE_READONLY +#define __P010 PAGE_COPY +#define __P011 PAGE_COPY +#define __P100 PAGE_READONLY_X +#define __P101 PAGE_READONLY_X +#define __P110 PAGE_COPY_X +#define __P111 PAGE_COPY_X + +#define __S000 PAGE_NONE +#define __S001 PAGE_READONLY +#define __S010 PAGE_SHARED +#define __S011 PAGE_SHARED +#define __S100 PAGE_READONLY_X +#define __S101 PAGE_READONLY_X +#define __S110 PAGE_SHARED_X +#define __S111 PAGE_SHARED_X + #ifndef __ASSEMBLY__ #include <asm/tlbflush.h> @@ -27,6 +46,16 @@ struct mm_struct; /* Keep these as a macros to avoid include dependency mess */ #define pte_page(x) pfn_to_page(pte_pfn(x)) #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) +/* + * Select all bits except the pfn + */ +static inline pgprot_t pte_pgprot(pte_t pte) +{ + unsigned long pte_flags; + + pte_flags = pte_val(pte) & ~PTE_RPN_MASK; + return __pgprot(pte_flags); +} /* * ZERO_PAGE is a global shared page that is always zero: used diff --git a/arch/powerpc/include/asm/pnv-pci.h b/arch/powerpc/include/asm/pnv-pci.h index 7f627e3f4da4..630eb8b1b7ed 100644 --- a/arch/powerpc/include/asm/pnv-pci.h +++ b/arch/powerpc/include/asm/pnv-pci.h @@ -54,7 +54,6 @@ void pnv_cxl_release_hwirq_ranges(struct cxl_irq_ranges *irqs, struct pnv_php_slot { struct hotplug_slot slot; - struct hotplug_slot_info slot_info; uint64_t id; char *name; int slot_no; @@ -72,6 +71,7 @@ struct pnv_php_slot { struct pci_dev *pdev; struct pci_bus *bus; bool power_state_check; + u8 attention_state; void *fdt; void *dt; struct of_changeset ocs; diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index 665af14850e4..6093bc8f74e5 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h @@ -104,6 +104,7 @@ #define OP_31_XOP_LHZUX 311 #define OP_31_XOP_MSGSNDP 142 #define OP_31_XOP_MSGCLRP 174 +#define OP_31_XOP_TLBIE 306 #define OP_31_XOP_MFSPR 339 #define OP_31_XOP_LWAX 341 #define OP_31_XOP_LHAX 343 diff --git a/arch/powerpc/include/asm/ppc-pci.h b/arch/powerpc/include/asm/ppc-pci.h index 726288048652..f67da277d652 100644 --- a/arch/powerpc/include/asm/ppc-pci.h +++ b/arch/powerpc/include/asm/ppc-pci.h @@ -58,6 +58,7 @@ void eeh_save_bars(struct eeh_dev *edev); int rtas_write_config(struct pci_dn *, int where, int size, u32 val); int rtas_read_config(struct pci_dn *, int where, int size, u32 *val); void eeh_pe_state_mark(struct eeh_pe *pe, int state); +void eeh_pe_mark_isolated(struct eeh_pe *pe); void eeh_pe_state_clear(struct eeh_pe *pe, int state); void eeh_pe_state_mark_with_cfg(struct eeh_pe *pe, int state); void eeh_pe_dev_mode_mark(struct eeh_pe *pe, int mode); diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index 52fadded5c1e..7d04d60a39c9 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h @@ -32,9 +32,9 @@ /* Default SMT priority is set to 3. Use 11- 13bits to save priority. */ #define PPR_PRIORITY 3 #ifdef __ASSEMBLY__ -#define INIT_PPR (PPR_PRIORITY << 50) +#define DEFAULT_PPR (PPR_PRIORITY << 50) #else -#define INIT_PPR ((u64)PPR_PRIORITY << 50) +#define DEFAULT_PPR ((u64)PPR_PRIORITY << 50) #endif /* __ASSEMBLY__ */ #endif /* CONFIG_PPC64 */ @@ -273,6 +273,7 @@ struct thread_struct { #endif /* CONFIG_HAVE_HW_BREAKPOINT */ struct arch_hw_breakpoint hw_brk; /* info on the hardware breakpoint */ unsigned long trap_nr; /* last trap # on this thread */ + u8 load_slb; /* Ages out SLB preload cache entries */ u8 load_fp; #ifdef CONFIG_ALTIVEC u8 load_vec; @@ -341,7 +342,6 @@ struct thread_struct { * onwards. */ int dscr_inherit; - unsigned long ppr; /* used to save/restore SMT priority */ unsigned long tidr; #endif #ifdef CONFIG_PPC_BOOK3S_64 @@ -389,7 +389,6 @@ struct thread_struct { .regs = (struct pt_regs *)INIT_SP - 1, /* XXX bogus, I think */ \ .addr_limit = KERNEL_DS, \ .fpexc_mode = 0, \ - .ppr = INIT_PPR, \ .fscr = FSCR_TAR | FSCR_EBB \ } #endif diff --git a/arch/powerpc/include/asm/pte-common.h b/arch/powerpc/include/asm/pte-common.h deleted file mode 100644 index bef56141a549..000000000000 --- a/arch/powerpc/include/asm/pte-common.h +++ /dev/null @@ -1,219 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Included from asm/pgtable-*.h only ! */ - -/* - * Some bits are only used on some cpu families... Make sure that all - * the undefined gets a sensible default - */ -#ifndef _PAGE_HASHPTE -#define _PAGE_HASHPTE 0 -#endif -#ifndef _PAGE_HWWRITE -#define _PAGE_HWWRITE 0 -#endif -#ifndef _PAGE_EXEC -#define _PAGE_EXEC 0 -#endif -#ifndef _PAGE_ENDIAN -#define _PAGE_ENDIAN 0 -#endif -#ifndef _PAGE_COHERENT -#define _PAGE_COHERENT 0 -#endif -#ifndef _PAGE_WRITETHRU -#define _PAGE_WRITETHRU 0 -#endif -#ifndef _PAGE_4K_PFN -#define _PAGE_4K_PFN 0 -#endif -#ifndef _PAGE_SAO -#define _PAGE_SAO 0 -#endif -#ifndef _PAGE_PSIZE -#define _PAGE_PSIZE 0 -#endif -/* _PAGE_RO and _PAGE_RW shall not be defined at the same time */ -#ifndef _PAGE_RO -#define _PAGE_RO 0 -#else -#define _PAGE_RW 0 -#endif - -#ifndef _PAGE_PTE -#define _PAGE_PTE 0 -#endif -/* At least one of _PAGE_PRIVILEGED or _PAGE_USER must be defined */ -#ifndef _PAGE_PRIVILEGED -#define _PAGE_PRIVILEGED 0 -#else -#ifndef _PAGE_USER -#define _PAGE_USER 0 -#endif -#endif -#ifndef _PAGE_NA -#define _PAGE_NA 0 -#endif -#ifndef _PAGE_HUGE -#define _PAGE_HUGE 0 -#endif - -#ifndef _PMD_PRESENT_MASK -#define _PMD_PRESENT_MASK _PMD_PRESENT -#endif -#ifndef _PMD_USER -#define _PMD_USER 0 -#endif -#ifndef _PAGE_KERNEL_RO -#define _PAGE_KERNEL_RO (_PAGE_PRIVILEGED | _PAGE_RO) -#endif -#ifndef _PAGE_KERNEL_ROX -#define _PAGE_KERNEL_ROX (_PAGE_PRIVILEGED | _PAGE_RO | _PAGE_EXEC) -#endif -#ifndef _PAGE_KERNEL_RW -#define _PAGE_KERNEL_RW (_PAGE_PRIVILEGED | _PAGE_DIRTY | _PAGE_RW | \ - _PAGE_HWWRITE) -#endif -#ifndef _PAGE_KERNEL_RWX -#define _PAGE_KERNEL_RWX (_PAGE_PRIVILEGED | _PAGE_DIRTY | _PAGE_RW | \ - _PAGE_HWWRITE | _PAGE_EXEC) -#endif -#ifndef _PAGE_HPTEFLAGS -#define _PAGE_HPTEFLAGS _PAGE_HASHPTE -#endif -#ifndef _PTE_NONE_MASK -#define _PTE_NONE_MASK _PAGE_HPTEFLAGS -#endif - -#ifndef __ASSEMBLY__ - -/* - * Don't just check for any non zero bits in __PAGE_USER, since for book3e - * and PTE_64BIT, PAGE_KERNEL_X contains _PAGE_BAP_SR which is also in - * _PAGE_USER. Need to explicitly match _PAGE_BAP_UR bit in that case too. - */ -static inline bool pte_user(pte_t pte) -{ - return (pte_val(pte) & (_PAGE_USER | _PAGE_PRIVILEGED)) == _PAGE_USER; -} -#endif /* __ASSEMBLY__ */ - -/* Location of the PFN in the PTE. Most 32-bit platforms use the same - * as _PAGE_SHIFT here (ie, naturally aligned). - * Platform who don't just pre-define the value so we don't override it here - */ -#ifndef PTE_RPN_SHIFT -#define PTE_RPN_SHIFT (PAGE_SHIFT) -#endif - -/* The mask covered by the RPN must be a ULL on 32-bit platforms with - * 64-bit PTEs - */ -#if defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT) -#define PTE_RPN_MASK (~((1ULL<<PTE_RPN_SHIFT)-1)) -#else -#define PTE_RPN_MASK (~((1UL<<PTE_RPN_SHIFT)-1)) -#endif - -/* _PAGE_CHG_MASK masks of bits that are to be preserved across - * pgprot changes - */ -#define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \ - _PAGE_ACCESSED | _PAGE_SPECIAL) - -/* Mask of bits returned by pte_pgprot() */ -#define PAGE_PROT_BITS (_PAGE_GUARDED | _PAGE_COHERENT | _PAGE_NO_CACHE | \ - _PAGE_WRITETHRU | _PAGE_ENDIAN | _PAGE_4K_PFN | \ - _PAGE_USER | _PAGE_ACCESSED | _PAGE_RO | _PAGE_NA | \ - _PAGE_PRIVILEGED | \ - _PAGE_RW | _PAGE_HWWRITE | _PAGE_DIRTY | _PAGE_EXEC) - -/* - * We define 2 sets of base prot bits, one for basic pages (ie, - * cacheable kernel and user pages) and one for non cacheable - * pages. We always set _PAGE_COHERENT when SMP is enabled or - * the processor might need it for DMA coherency. - */ -#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_PSIZE) -#if defined(CONFIG_SMP) || defined(CONFIG_PPC_STD_MMU) || \ - defined(CONFIG_PPC_E500MC) -#define _PAGE_BASE (_PAGE_BASE_NC | _PAGE_COHERENT) -#else -#define _PAGE_BASE (_PAGE_BASE_NC) -#endif - -/* Permission masks used to generate the __P and __S table, - * - * Note:__pgprot is defined in arch/powerpc/include/asm/page.h - * - * Write permissions imply read permissions for now (we could make write-only - * pages on BookE but we don't bother for now). Execute permission control is - * possible on platforms that define _PAGE_EXEC - * - * Note due to the way vm flags are laid out, the bits are XWR - */ -#define PAGE_NONE __pgprot(_PAGE_BASE | _PAGE_NA) -#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW) -#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | \ - _PAGE_EXEC) -#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RO) -#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RO | \ - _PAGE_EXEC) -#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RO) -#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RO | \ - _PAGE_EXEC) - -#define __P000 PAGE_NONE -#define __P001 PAGE_READONLY -#define __P010 PAGE_COPY -#define __P011 PAGE_COPY -#define __P100 PAGE_READONLY_X -#define __P101 PAGE_READONLY_X -#define __P110 PAGE_COPY_X -#define __P111 PAGE_COPY_X - -#define __S000 PAGE_NONE -#define __S001 PAGE_READONLY -#define __S010 PAGE_SHARED -#define __S011 PAGE_SHARED -#define __S100 PAGE_READONLY_X -#define __S101 PAGE_READONLY_X -#define __S110 PAGE_SHARED_X -#define __S111 PAGE_SHARED_X - -/* Permission masks used for kernel mappings */ -#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW) -#define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \ - _PAGE_NO_CACHE) -#define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \ - _PAGE_NO_CACHE | _PAGE_GUARDED) -#define PAGE_KERNEL_X __pgprot(_PAGE_BASE | _PAGE_KERNEL_RWX) -#define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO) -#define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX) - -/* Protection used for kernel text. We want the debuggers to be able to - * set breakpoints anywhere, so don't write protect the kernel text - * on platforms where such control is possible. - */ -#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\ - defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE) -#define PAGE_KERNEL_TEXT PAGE_KERNEL_X -#else -#define PAGE_KERNEL_TEXT PAGE_KERNEL_ROX -#endif - -/* Make modules code happy. We don't set RO yet */ -#define PAGE_KERNEL_EXEC PAGE_KERNEL_X - -/* Advertise special mapping type for AGP */ -#define PAGE_AGP (PAGE_KERNEL_NC) -#define HAVE_PAGE_AGP - -#ifndef _PAGE_READ -/* if not defined, we should not find _PAGE_WRITE too */ -#define _PAGE_READ 0 -#define _PAGE_WRITE _PAGE_RW -#endif - -#ifndef H_PAGE_4K_PFN -#define H_PAGE_4K_PFN 0 -#endif diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h index 447cbd1bee99..f73886a1a7f5 100644 --- a/arch/powerpc/include/asm/ptrace.h +++ b/arch/powerpc/include/asm/ptrace.h @@ -26,6 +26,37 @@ #include <uapi/asm/ptrace.h> #include <asm/asm-const.h> +#ifndef __ASSEMBLY__ +struct pt_regs +{ + union { + struct user_pt_regs user_regs; + struct { + unsigned long gpr[32]; + unsigned long nip; + unsigned long msr; + unsigned long orig_gpr3; + unsigned long ctr; + unsigned long link; + unsigned long xer; + unsigned long ccr; +#ifdef CONFIG_PPC64 + unsigned long softe; +#else + unsigned long mq; +#endif + unsigned long trap; + unsigned long dar; + unsigned long dsisr; + unsigned long result; + }; + }; + +#ifdef CONFIG_PPC64 + unsigned long ppr; +#endif +}; +#endif #ifdef __powerpc64__ @@ -102,6 +133,11 @@ static inline long regs_return_value(struct pt_regs *regs) return -regs->gpr[3]; } +static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc) +{ + regs->gpr[3] = rc; +} + #ifdef __powerpc64__ #define user_mode(regs) ((((regs)->msr) >> MSR_PR_LG) & 0x1) #else @@ -149,7 +185,7 @@ do { \ #define arch_has_single_step() (1) #define arch_has_block_step() (!cpu_has_feature(CPU_FTR_601)) -#define ARCH_HAS_USER_SINGLE_STEP_INFO +#define ARCH_HAS_USER_SINGLE_STEP_REPORT /* * kprobe-based event tracer support diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index e5b314ed054e..de52c3166ba4 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -118,11 +118,16 @@ #define MSR_TS_S __MASK(MSR_TS_S_LG) /* Transaction Suspended */ #define MSR_TS_T __MASK(MSR_TS_T_LG) /* Transaction Transactional */ #define MSR_TS_MASK (MSR_TS_T | MSR_TS_S) /* Transaction State bits */ -#define MSR_TM_ACTIVE(x) (((x) & MSR_TS_MASK) != 0) /* Transaction active? */ #define MSR_TM_RESV(x) (((x) & MSR_TS_MASK) == MSR_TS_MASK) /* Reserved */ #define MSR_TM_TRANSACTIONAL(x) (((x) & MSR_TS_MASK) == MSR_TS_T) #define MSR_TM_SUSPENDED(x) (((x) & MSR_TS_MASK) == MSR_TS_S) +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM +#define MSR_TM_ACTIVE(x) (((x) & MSR_TS_MASK) != 0) /* Transaction active? */ +#else +#define MSR_TM_ACTIVE(x) 0 +#endif + #if defined(CONFIG_PPC_BOOK3S_64) #define MSR_64BIT MSR_SF @@ -415,6 +420,7 @@ #define HFSCR_DSCR __MASK(FSCR_DSCR_LG) #define HFSCR_VECVSX __MASK(FSCR_VECVSX_LG) #define HFSCR_FP __MASK(FSCR_FP_LG) +#define HFSCR_INTR_CAUSE (ASM_CONST(0xFF) << 56) /* interrupt cause */ #define SPRN_TAR 0x32f /* Target Address Register */ #define SPRN_LPCR 0x13E /* LPAR Control Register */ #define LPCR_VPM0 ASM_CONST(0x8000000000000000) @@ -766,6 +772,7 @@ #define SPRN_HSRR0 0x13A /* Save/Restore Register 0 */ #define SPRN_HSRR1 0x13B /* Save/Restore Register 1 */ #define HSRR1_DENORM 0x00100000 /* Denorm exception */ +#define HSRR1_HISI_WRITE 0x00010000 /* HISI bcs couldn't update mem */ #define SPRN_TBCTL 0x35f /* PA6T Timebase control register */ #define TBCTL_FREEZE 0x0000000000000000ull /* Freeze all tbs */ diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h index 71e393c46a49..bb38dd67d47d 100644 --- a/arch/powerpc/include/asm/rtas.h +++ b/arch/powerpc/include/asm/rtas.h @@ -125,6 +125,7 @@ struct rtas_suspend_me_data { #define RTAS_TYPE_INFO 0xE2 #define RTAS_TYPE_DEALLOC 0xE3 #define RTAS_TYPE_DUMP 0xE4 +#define RTAS_TYPE_HOTPLUG 0xE5 /* I don't add PowerMGM events right now, this is a different topic */ #define RTAS_TYPE_PMGM_POWER_SW_ON 0x60 #define RTAS_TYPE_PMGM_POWER_SW_OFF 0x61 @@ -185,11 +186,23 @@ static inline uint8_t rtas_error_disposition(const struct rtas_error_log *elog) return (elog->byte1 & 0x18) >> 3; } +static inline +void rtas_set_disposition_recovered(struct rtas_error_log *elog) +{ + elog->byte1 &= ~0x18; + elog->byte1 |= (RTAS_DISP_FULLY_RECOVERED << 3); +} + static inline uint8_t rtas_error_extended(const struct rtas_error_log *elog) { return (elog->byte1 & 0x04) >> 2; } +static inline uint8_t rtas_error_initiator(const struct rtas_error_log *elog) +{ + return (elog->byte2 & 0xf0) >> 4; +} + #define rtas_error_type(x) ((x)->byte3) static inline @@ -275,6 +288,7 @@ inline uint32_t rtas_ext_event_company_id(struct rtas_ext_event_log_v6 *ext_log) #define PSERIES_ELOG_SECT_ID_CALL_HOME (('C' << 8) | 'H') #define PSERIES_ELOG_SECT_ID_USER_DEF (('U' << 8) | 'D') #define PSERIES_ELOG_SECT_ID_HOTPLUG (('H' << 8) | 'P') +#define PSERIES_ELOG_SECT_ID_MCE (('M' << 8) | 'C') /* Vendor specific Platform Event Log Format, Version 6, section header */ struct pseries_errorlog { @@ -316,6 +330,7 @@ struct pseries_hp_errorlog { #define PSERIES_HP_ELOG_RESOURCE_MEM 2 #define PSERIES_HP_ELOG_RESOURCE_SLOT 3 #define PSERIES_HP_ELOG_RESOURCE_PHB 4 +#define PSERIES_HP_ELOG_RESOURCE_PMEM 6 #define PSERIES_HP_ELOG_ACTION_ADD 1 #define PSERIES_HP_ELOG_ACTION_REMOVE 2 diff --git a/arch/powerpc/include/asm/slice.h b/arch/powerpc/include/asm/slice.h index e40406cf5628..a595461c9cb0 100644 --- a/arch/powerpc/include/asm/slice.h +++ b/arch/powerpc/include/asm/slice.h @@ -32,6 +32,7 @@ void slice_set_range_psize(struct mm_struct *mm, unsigned long start, unsigned long len, unsigned int psize); void slice_init_new_context_exec(struct mm_struct *mm); +void slice_setup_new_exec(void); #endif /* __ASSEMBLY__ */ diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h index 95b66a0c639b..41695745032c 100644 --- a/arch/powerpc/include/asm/smp.h +++ b/arch/powerpc/include/asm/smp.h @@ -100,6 +100,7 @@ static inline void set_hard_smp_processor_id(int cpu, int phys) DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map); DECLARE_PER_CPU(cpumask_var_t, cpu_l2_cache_map); DECLARE_PER_CPU(cpumask_var_t, cpu_core_map); +DECLARE_PER_CPU(cpumask_var_t, cpu_smallcore_map); static inline struct cpumask *cpu_sibling_mask(int cpu) { @@ -116,6 +117,11 @@ static inline struct cpumask *cpu_l2_cache_mask(int cpu) return per_cpu(cpu_l2_cache_map, cpu); } +static inline struct cpumask *cpu_smallcore_mask(int cpu) +{ + return per_cpu(cpu_smallcore_map, cpu); +} + extern int cpu_to_core_id(int cpu); /* Since OpenPIC has only 4 IPIs, we use slightly different message numbers. @@ -166,6 +172,11 @@ static inline const struct cpumask *cpu_sibling_mask(int cpu) return cpumask_of(cpu); } +static inline const struct cpumask *cpu_smallcore_mask(int cpu) +{ + return cpumask_of(cpu); +} + #endif /* CONFIG_SMP */ #ifdef CONFIG_PPC64 diff --git a/arch/powerpc/include/asm/sparsemem.h b/arch/powerpc/include/asm/sparsemem.h index 28f5dae25db6..68da49320592 100644 --- a/arch/powerpc/include/asm/sparsemem.h +++ b/arch/powerpc/include/asm/sparsemem.h @@ -9,17 +9,6 @@ * MAX_PHYSMEM_BITS 2^N: how much memory we can have in that space */ #define SECTION_SIZE_BITS 24 -/* - * If we store section details in page->flags we can't increase the MAX_PHYSMEM_BITS - * if we increase SECTIONS_WIDTH we will not store node details in page->flags and - * page_to_nid does a page->section->node lookup - * Hence only increase for VMEMMAP. - */ -#ifdef CONFIG_SPARSEMEM_VMEMMAP -#define MAX_PHYSMEM_BITS 47 -#else -#define MAX_PHYSMEM_BITS 46 -#endif #endif /* CONFIG_SPARSEMEM */ diff --git a/arch/powerpc/include/asm/stackprotector.h b/arch/powerpc/include/asm/stackprotector.h new file mode 100644 index 000000000000..1c8460e23583 --- /dev/null +++ b/arch/powerpc/include/asm/stackprotector.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * GCC stack protector support. + * + */ + +#ifndef _ASM_STACKPROTECTOR_H +#define _ASM_STACKPROTECTOR_H + +#include <linux/random.h> +#include <linux/version.h> +#include <asm/reg.h> +#include <asm/current.h> +#include <asm/paca.h> + +/* + * Initialize the stackprotector canary value. + * + * NOTE: this must only be called from functions that never return, + * and it must always be inlined. + */ +static __always_inline void boot_init_stack_canary(void) +{ + unsigned long canary; + + /* Try to get a semi random initial value. */ + canary = get_random_canary(); + canary ^= mftb(); + canary ^= LINUX_VERSION_CODE; + canary &= CANARY_MASK; + + current->stack_canary = canary; +#ifdef CONFIG_PPC64 + get_paca()->canary = canary; +#endif +} + +#endif /* _ASM_STACKPROTECTOR_H */ diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h index 3c0002044bc9..544cac0474cb 100644 --- a/arch/powerpc/include/asm/thread_info.h +++ b/arch/powerpc/include/asm/thread_info.h @@ -29,6 +29,7 @@ #include <asm/page.h> #include <asm/accounting.h> +#define SLB_PRELOAD_NR 16U /* * low level task data. */ @@ -44,6 +45,10 @@ struct thread_info { #if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) && defined(CONFIG_PPC32) struct cpu_accounting_data accounting; #endif + unsigned char slb_preload_nr; + unsigned char slb_preload_tail; + u32 slb_preload_esid[SLB_PRELOAD_NR]; + /* low level flags - has atomic operations done on it */ unsigned long flags ____cacheline_aligned_in_smp; }; @@ -72,6 +77,12 @@ static inline struct thread_info *current_thread_info(void) } extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src); + +#ifdef CONFIG_PPC_BOOK3S_64 +void arch_setup_new_exec(void); +#define arch_setup_new_exec arch_setup_new_exec +#endif + #endif /* __ASSEMBLY__ */ /* @@ -81,7 +92,7 @@ extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src #define TIF_SIGPENDING 1 /* signal pending */ #define TIF_NEED_RESCHED 2 /* rescheduling necessary */ #define TIF_FSCHECK 3 /* Check FS is USER_DS on return */ -#define TIF_32BIT 4 /* 32 bit binary */ +#define TIF_SYSCALL_EMU 4 /* syscall emulation active */ #define TIF_RESTORE_TM 5 /* need to restore TM FP/VEC/VSX */ #define TIF_PATCH_PENDING 6 /* pending live patching update */ #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ @@ -100,6 +111,7 @@ extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src #define TIF_ELF2ABI 18 /* function descriptors must die! */ #endif #define TIF_POLLING_NRFLAG 19 /* true if poll_idle() is polling TIF_NEED_RESCHED */ +#define TIF_32BIT 20 /* 32 bit binary */ /* as above, but as bit values */ #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) @@ -120,9 +132,10 @@ extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src #define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE) #define _TIF_NOHZ (1<<TIF_NOHZ) #define _TIF_FSCHECK (1<<TIF_FSCHECK) +#define _TIF_SYSCALL_EMU (1<<TIF_SYSCALL_EMU) #define _TIF_SYSCALL_DOTRACE (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \ - _TIF_NOHZ) + _TIF_NOHZ | _TIF_SYSCALL_EMU) #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \ _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ diff --git a/arch/powerpc/include/asm/trace.h b/arch/powerpc/include/asm/trace.h index d018e8602694..58ef8c43a89d 100644 --- a/arch/powerpc/include/asm/trace.h +++ b/arch/powerpc/include/asm/trace.h @@ -201,6 +201,21 @@ TRACE_EVENT(tlbie, __entry->r) ); +TRACE_EVENT(tlbia, + + TP_PROTO(unsigned long id), + TP_ARGS(id), + TP_STRUCT__entry( + __field(unsigned long, id) + ), + + TP_fast_assign( + __entry->id = id; + ), + + TP_printk("ctx.id=0x%lx", __entry->id) +); + #endif /* _TRACE_POWERPC_H */ #undef TRACE_INCLUDE_PATH diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h index bac225bb7f64..15bea9a0f260 100644 --- a/arch/powerpc/include/asm/uaccess.h +++ b/arch/powerpc/include/asm/uaccess.h @@ -260,7 +260,7 @@ do { \ ({ \ long __gu_err; \ __long_type(*(ptr)) __gu_val; \ - const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ + __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ __chk_user_ptr(ptr); \ if (!is_kernel_addr((unsigned long)__gu_addr)) \ might_fault(); \ @@ -274,7 +274,7 @@ do { \ ({ \ long __gu_err = -EFAULT; \ __long_type(*(ptr)) __gu_val = 0; \ - const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ + __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ might_fault(); \ if (access_ok(VERIFY_READ, __gu_addr, (size))) { \ barrier_nospec(); \ @@ -288,7 +288,7 @@ do { \ ({ \ long __gu_err; \ __long_type(*(ptr)) __gu_val; \ - const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ + __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ __chk_user_ptr(ptr); \ barrier_nospec(); \ __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h index c19379f0a32e..b0de85b477e1 100644 --- a/arch/powerpc/include/asm/unistd.h +++ b/arch/powerpc/include/asm/unistd.h @@ -22,6 +22,7 @@ #include <linux/compiler.h> #include <linux/linkage.h> +#define __ARCH_WANT_NEW_STAT #define __ARCH_WANT_OLD_READDIR #define __ARCH_WANT_STAT64 #define __ARCH_WANT_SYS_ALARM @@ -35,7 +36,6 @@ #define __ARCH_WANT_SYS_SOCKETCALL #define __ARCH_WANT_SYS_FADVISE64 #define __ARCH_WANT_SYS_GETPGRP -#define __ARCH_WANT_SYS_LLSEEK #define __ARCH_WANT_SYS_NICE #define __ARCH_WANT_SYS_OLD_GETRLIMIT #define __ARCH_WANT_SYS_OLD_UNAME @@ -47,6 +47,7 @@ #endif #ifdef CONFIG_PPC64 #define __ARCH_WANT_COMPAT_SYS_TIME +#define __ARCH_WANT_SYS_UTIME32 #define __ARCH_WANT_SYS_NEWFSTATAT #define __ARCH_WANT_COMPAT_SYS_SENDFILE #endif diff --git a/arch/powerpc/include/asm/user.h b/arch/powerpc/include/asm/user.h index 5c0e082eae7b..99443b8594e7 100644 --- a/arch/powerpc/include/asm/user.h +++ b/arch/powerpc/include/asm/user.h @@ -31,7 +31,7 @@ * to write an integer number of pages. */ struct user { - struct pt_regs regs; /* entire machine state */ + struct user_pt_regs regs; /* entire machine state */ size_t u_tsize; /* text size (pages) */ size_t u_dsize; /* data size (pages) */ size_t u_ssize; /* stack size (pages) */ diff --git a/arch/powerpc/include/uapi/asm/Kbuild b/arch/powerpc/include/uapi/asm/Kbuild index 1a6ed5919ffd..a658091a19f9 100644 --- a/arch/powerpc/include/uapi/asm/Kbuild +++ b/arch/powerpc/include/uapi/asm/Kbuild @@ -7,3 +7,4 @@ generic-y += poll.h generic-y += resource.h generic-y += sockios.h generic-y += statfs.h +generic-y += siginfo.h diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h index 1b32b56a03d3..8c876c166ef2 100644 --- a/arch/powerpc/include/uapi/asm/kvm.h +++ b/arch/powerpc/include/uapi/asm/kvm.h @@ -634,6 +634,7 @@ struct kvm_ppc_cpu_char { #define KVM_REG_PPC_DEC_EXPIRY (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbe) #define KVM_REG_PPC_ONLINE (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xbf) +#define KVM_REG_PPC_PTCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc0) /* Transactional Memory checkpointed state: * This is all GPRs, all VSX regs and a subset of SPRs diff --git a/arch/powerpc/include/uapi/asm/ptrace.h b/arch/powerpc/include/uapi/asm/ptrace.h index 5e3edc2a7634..f5f1ccc740fc 100644 --- a/arch/powerpc/include/uapi/asm/ptrace.h +++ b/arch/powerpc/include/uapi/asm/ptrace.h @@ -29,7 +29,12 @@ #ifndef __ASSEMBLY__ -struct pt_regs { +#ifdef __KERNEL__ +struct user_pt_regs +#else +struct pt_regs +#endif +{ unsigned long gpr[32]; unsigned long nip; unsigned long msr; @@ -160,6 +165,10 @@ struct pt_regs { #define PTRACE_GETVSRREGS 0x1b #define PTRACE_SETVSRREGS 0x1c +/* Syscall emulation defines */ +#define PTRACE_SYSEMU 0x1d +#define PTRACE_SYSEMU_SINGLESTEP 0x1e + /* * Get or set a debug register. The first 16 are DABR registers and the * second 16 are IABR registers. diff --git a/arch/powerpc/include/uapi/asm/sigcontext.h b/arch/powerpc/include/uapi/asm/sigcontext.h index 2fbe485acdb4..630aeda56d59 100644 --- a/arch/powerpc/include/uapi/asm/sigcontext.h +++ b/arch/powerpc/include/uapi/asm/sigcontext.h @@ -22,7 +22,11 @@ struct sigcontext { #endif unsigned long handler; unsigned long oldmask; - struct pt_regs __user *regs; +#ifdef __KERNEL__ + struct user_pt_regs __user *regs; +#else + struct pt_regs *regs; +#endif #ifdef __powerpc64__ elf_gregset_t gp_regs; elf_fpregset_t fp_regs; diff --git a/arch/powerpc/include/uapi/asm/siginfo.h b/arch/powerpc/include/uapi/asm/siginfo.h deleted file mode 100644 index 1d51d9b88221..000000000000 --- a/arch/powerpc/include/uapi/asm/siginfo.h +++ /dev/null @@ -1,18 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ -#ifndef _ASM_POWERPC_SIGINFO_H -#define _ASM_POWERPC_SIGINFO_H - -/* - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#ifdef __powerpc64__ -# define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int)) -#endif - -#include <asm-generic/siginfo.h> - -#endif /* _ASM_POWERPC_SIGINFO_H */ diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 3b66f2c19c84..53d4b8d5b54d 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -5,7 +5,8 @@ CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"' -subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror +# Disable clang warning for using setjmp without setjmp.h header +CFLAGS_crash.o += $(call cc-disable-warning, builtin-requires-header) ifdef CONFIG_PPC64 CFLAGS_prom_init.o += $(NO_MINIMAL_TOC) @@ -20,12 +21,14 @@ CFLAGS_prom_init.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) CFLAGS_btext.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) CFLAGS_prom.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) +CFLAGS_prom_init.o += $(call cc-option, -fno-stack-protector) + ifdef CONFIG_FUNCTION_TRACER # Do not trace early boot code -CFLAGS_REMOVE_cputable.o = -mno-sched-epilog $(CC_FLAGS_FTRACE) -CFLAGS_REMOVE_prom_init.o = -mno-sched-epilog $(CC_FLAGS_FTRACE) -CFLAGS_REMOVE_btext.o = -mno-sched-epilog $(CC_FLAGS_FTRACE) -CFLAGS_REMOVE_prom.o = -mno-sched-epilog $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_cputable.o = $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_prom_init.o = $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_btext.o = $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_prom.o = $(CC_FLAGS_FTRACE) endif obj-y := cputable.o ptrace.o syscalls.o \ diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 89cf15566c4e..9ffc72ded73a 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -79,11 +79,16 @@ int main(void) { OFFSET(THREAD, task_struct, thread); OFFSET(MM, task_struct, mm); +#ifdef CONFIG_STACKPROTECTOR + OFFSET(TASK_CANARY, task_struct, stack_canary); +#ifdef CONFIG_PPC64 + OFFSET(PACA_CANARY, paca_struct, canary); +#endif +#endif OFFSET(MMCONTEXTID, mm_struct, context.id); #ifdef CONFIG_PPC64 DEFINE(SIGSEGV, SIGSEGV); DEFINE(NMI_MASK, NMI_MASK); - OFFSET(TASKTHREADPPR, task_struct, thread.ppr); #else OFFSET(THREAD_INFO, task_struct, stack); DEFINE(THREAD_INFO_GAP, _ALIGN_UP(sizeof(struct thread_info), 16)); @@ -173,7 +178,6 @@ int main(void) OFFSET(PACAKSAVE, paca_struct, kstack); OFFSET(PACACURRENT, paca_struct, __current); OFFSET(PACASAVEDMSR, paca_struct, saved_msr); - OFFSET(PACASTABRR, paca_struct, stab_rr); OFFSET(PACAR1, paca_struct, saved_r1); OFFSET(PACATOC, paca_struct, kernel_toc); OFFSET(PACAKBASE, paca_struct, kernelbase); @@ -212,6 +216,7 @@ int main(void) #ifdef CONFIG_PPC_BOOK3S_64 OFFSET(PACASLBCACHE, paca_struct, slb_cache); OFFSET(PACASLBCACHEPTR, paca_struct, slb_cache_ptr); + OFFSET(PACASTABRR, paca_struct, stab_rr); OFFSET(PACAVMALLOCSLLP, paca_struct, vmalloc_sllp); #ifdef CONFIG_PPC_MM_SLICES OFFSET(MMUPSIZESLLP, mmu_psize_def, sllp); @@ -274,11 +279,6 @@ int main(void) /* Interrupt register frame */ DEFINE(INT_FRAME_SIZE, STACK_INT_FRAME_SIZE); DEFINE(SWITCH_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs)); -#ifdef CONFIG_PPC64 - /* Create extra stack space for SRR0 and SRR1 when calling prom/rtas. */ - DEFINE(PROM_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16); - DEFINE(RTAS_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16); -#endif /* CONFIG_PPC64 */ STACK_PT_REGS_OFFSET(GPR0, gpr[0]); STACK_PT_REGS_OFFSET(GPR1, gpr[1]); STACK_PT_REGS_OFFSET(GPR2, gpr[2]); @@ -322,10 +322,7 @@ int main(void) STACK_PT_REGS_OFFSET(_ESR, dsisr); #else /* CONFIG_PPC64 */ STACK_PT_REGS_OFFSET(SOFTE, softe); - - /* These _only_ to be used with {PROM,RTAS}_FRAME_SIZE!!! */ - DEFINE(_SRR0, STACK_FRAME_OVERHEAD+sizeof(struct pt_regs)); - DEFINE(_SRR1, STACK_FRAME_OVERHEAD+sizeof(struct pt_regs)+8); + STACK_PT_REGS_OFFSET(_PPR, ppr); #endif /* CONFIG_PPC64 */ #if defined(CONFIG_PPC32) @@ -387,12 +384,12 @@ int main(void) OFFSET(CFG_SYSCALL_MAP64, vdso_data, syscall_map_64); OFFSET(TVAL64_TV_SEC, timeval, tv_sec); OFFSET(TVAL64_TV_USEC, timeval, tv_usec); - OFFSET(TVAL32_TV_SEC, compat_timeval, tv_sec); - OFFSET(TVAL32_TV_USEC, compat_timeval, tv_usec); + OFFSET(TVAL32_TV_SEC, old_timeval32, tv_sec); + OFFSET(TVAL32_TV_USEC, old_timeval32, tv_usec); OFFSET(TSPC64_TV_SEC, timespec, tv_sec); OFFSET(TSPC64_TV_NSEC, timespec, tv_nsec); - OFFSET(TSPC32_TV_SEC, compat_timespec, tv_sec); - OFFSET(TSPC32_TV_NSEC, compat_timespec, tv_nsec); + OFFSET(TSPC32_TV_SEC, old_timespec32, tv_sec); + OFFSET(TSPC32_TV_NSEC, old_timespec32, tv_nsec); #else OFFSET(TVAL32_TV_SEC, timeval, tv_sec); OFFSET(TVAL32_TV_USEC, timeval, tv_usec); @@ -438,7 +435,7 @@ int main(void) #ifdef CONFIG_PPC_BOOK3S OFFSET(VCPU_TAR, kvm_vcpu, arch.tar); #endif - OFFSET(VCPU_CR, kvm_vcpu, arch.cr); + OFFSET(VCPU_CR, kvm_vcpu, arch.regs.ccr); OFFSET(VCPU_PC, kvm_vcpu, arch.regs.nip); #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE OFFSET(VCPU_MSR, kvm_vcpu, arch.shregs.msr); @@ -503,6 +500,7 @@ int main(void) OFFSET(VCPU_VPA, kvm_vcpu, arch.vpa.pinned_addr); OFFSET(VCPU_VPA_DIRTY, kvm_vcpu, arch.vpa.dirty); OFFSET(VCPU_HEIR, kvm_vcpu, arch.emul_inst); + OFFSET(VCPU_NESTED, kvm_vcpu, arch.nested); OFFSET(VCPU_CPU, kvm_vcpu, cpu); OFFSET(VCPU_THREAD_CPU, kvm_vcpu, arch.thread_cpu); #endif @@ -695,7 +693,7 @@ int main(void) #endif /* CONFIG_PPC_BOOK3S_64 */ #else /* CONFIG_PPC_BOOK3S */ - OFFSET(VCPU_CR, kvm_vcpu, arch.cr); + OFFSET(VCPU_CR, kvm_vcpu, arch.regs.ccr); OFFSET(VCPU_XER, kvm_vcpu, arch.regs.xer); OFFSET(VCPU_LR, kvm_vcpu, arch.regs.link); OFFSET(VCPU_CTR, kvm_vcpu, arch.regs.ctr); diff --git a/arch/powerpc/kernel/btext.c b/arch/powerpc/kernel/btext.c index b2072d5bbf2b..b4241ed1456e 100644 --- a/arch/powerpc/kernel/btext.c +++ b/arch/powerpc/kernel/btext.c @@ -163,7 +163,7 @@ void btext_map(void) offset = ((unsigned long) dispDeviceBase) - base; size = dispDeviceRowBytes * dispDeviceRect[3] + offset + dispDeviceRect[0]; - vbase = __ioremap(base, size, pgprot_val(pgprot_noncached_wc(__pgprot(0)))); + vbase = ioremap_wc(base, size); if (!vbase) return; logicalDisplayBase = vbase + offset; diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c index a8f20e5928e1..be57bd07596d 100644 --- a/arch/powerpc/kernel/cacheinfo.c +++ b/arch/powerpc/kernel/cacheinfo.c @@ -20,6 +20,8 @@ #include <linux/percpu.h> #include <linux/slab.h> #include <asm/prom.h> +#include <asm/cputhreads.h> +#include <asm/smp.h> #include "cacheinfo.h" @@ -627,17 +629,48 @@ static ssize_t level_show(struct kobject *k, struct kobj_attribute *attr, char * static struct kobj_attribute cache_level_attr = __ATTR(level, 0444, level_show, NULL); +static unsigned int index_dir_to_cpu(struct cache_index_dir *index) +{ + struct kobject *index_dir_kobj = &index->kobj; + struct kobject *cache_dir_kobj = index_dir_kobj->parent; + struct kobject *cpu_dev_kobj = cache_dir_kobj->parent; + struct device *dev = kobj_to_dev(cpu_dev_kobj); + + return dev->id; +} + +/* + * On big-core systems, each core has two groups of CPUs each of which + * has its own L1-cache. The thread-siblings which share l1-cache with + * @cpu can be obtained via cpu_smallcore_mask(). + */ +static const struct cpumask *get_big_core_shared_cpu_map(int cpu, struct cache *cache) +{ + if (cache->level == 1) + return cpu_smallcore_mask(cpu); + + return &cache->shared_cpu_map; +} + static ssize_t shared_cpu_map_show(struct kobject *k, struct kobj_attribute *attr, char *buf) { struct cache_index_dir *index; struct cache *cache; - int ret; + const struct cpumask *mask; + int ret, cpu; index = kobj_to_cache_index_dir(k); cache = index->cache; + if (has_big_cores) { + cpu = index_dir_to_cpu(index); + mask = get_big_core_shared_cpu_map(cpu, cache); + } else { + mask = &cache->shared_cpu_map; + } + ret = scnprintf(buf, PAGE_SIZE - 1, "%*pb\n", - cpumask_pr_args(&cache->shared_cpu_map)); + cpumask_pr_args(mask)); buf[ret++] = '\n'; buf[ret] = '\0'; return ret; diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S index 458b928dbd84..c317080db771 100644 --- a/arch/powerpc/kernel/cpu_setup_power.S +++ b/arch/powerpc/kernel/cpu_setup_power.S @@ -147,8 +147,8 @@ __init_hvmode_206: rldicl. r0,r3,4,63 bnelr ld r5,CPU_SPEC_FEATURES(r4) - LOAD_REG_IMMEDIATE(r6,CPU_FTR_HVMODE) - xor r5,r5,r6 + LOAD_REG_IMMEDIATE(r6,CPU_FTR_HVMODE | CPU_FTR_P9_TM_HV_ASSIST) + andc r5,r5,r6 std r5,CPU_SPEC_FEATURES(r4) blr diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c index d10ad258d41a..bbdc4706c159 100644 --- a/arch/powerpc/kernel/crash_dump.c +++ b/arch/powerpc/kernel/crash_dump.c @@ -110,7 +110,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf, vaddr = __va(paddr); csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf); } else { - vaddr = __ioremap(paddr, PAGE_SIZE, 0); + vaddr = ioremap_cache(paddr, PAGE_SIZE); csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf); iounmap(vaddr); } diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c index 88f3963ca30f..5fc335f4d9cd 100644 --- a/arch/powerpc/kernel/dma-swiotlb.c +++ b/arch/powerpc/kernel/dma-swiotlb.c @@ -11,7 +11,7 @@ * */ -#include <linux/dma-mapping.h> +#include <linux/dma-direct.h> #include <linux/memblock.h> #include <linux/pfn.h> #include <linux/of_platform.h> @@ -59,7 +59,7 @@ const struct dma_map_ops powerpc_swiotlb_dma_ops = { .sync_single_for_device = swiotlb_sync_single_for_device, .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, .sync_sg_for_device = swiotlb_sync_sg_for_device, - .mapping_error = swiotlb_dma_mapping_error, + .mapping_error = dma_direct_mapping_error, .get_required_mask = swiotlb_powerpc_get_required, }; diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c index 6ebba3e48b01..6cae6b56ffd6 100644 --- a/arch/powerpc/kernel/eeh.c +++ b/arch/powerpc/kernel/eeh.c @@ -169,6 +169,11 @@ static size_t eeh_dump_dev_log(struct eeh_dev *edev, char *buf, size_t len) int n = 0, l = 0; char buffer[128]; + if (!pdn) { + pr_warn("EEH: Note: No error log for absent device.\n"); + return 0; + } + n += scnprintf(buf+n, len-n, "%04x:%02x:%02x.%01x\n", pdn->phb->global_number, pdn->busno, PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn)); @@ -399,7 +404,7 @@ static int eeh_phb_check_failure(struct eeh_pe *pe) } /* Isolate the PHB and send event */ - eeh_pe_state_mark(phb_pe, EEH_PE_ISOLATED); + eeh_pe_mark_isolated(phb_pe); eeh_serialize_unlock(flags); pr_err("EEH: PHB#%x failure detected, location: %s\n", @@ -558,7 +563,7 @@ int eeh_dev_check_failure(struct eeh_dev *edev) * with other functions on this device, and functions under * bridges. */ - eeh_pe_state_mark(pe, EEH_PE_ISOLATED); + eeh_pe_mark_isolated(pe); eeh_serialize_unlock(flags); /* Most EEH events are due to device driver bugs. Having @@ -676,7 +681,7 @@ int eeh_pci_enable(struct eeh_pe *pe, int function) /* Check if the request is finished successfully */ if (active_flag) { - rc = eeh_ops->wait_state(pe, PCI_BUS_RESET_WAIT_MSEC); + rc = eeh_wait_state(pe, PCI_BUS_RESET_WAIT_MSEC); if (rc < 0) return rc; @@ -825,7 +830,8 @@ int pcibios_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state stat eeh_pe_state_clear(pe, EEH_PE_ISOLATED); break; case pcie_hot_reset: - eeh_pe_state_mark_with_cfg(pe, EEH_PE_ISOLATED); + eeh_pe_mark_isolated(pe); + eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED); eeh_ops->set_option(pe, EEH_OPT_FREEZE_PE); eeh_pe_dev_traverse(pe, eeh_disable_and_save_dev_state, dev); if (!(pe->type & EEH_PE_VF)) @@ -833,7 +839,8 @@ int pcibios_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state stat eeh_ops->reset(pe, EEH_RESET_HOT); break; case pcie_warm_reset: - eeh_pe_state_mark_with_cfg(pe, EEH_PE_ISOLATED); + eeh_pe_mark_isolated(pe); + eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED); eeh_ops->set_option(pe, EEH_OPT_FREEZE_PE); eeh_pe_dev_traverse(pe, eeh_disable_and_save_dev_state, dev); if (!(pe->type & EEH_PE_VF)) @@ -913,16 +920,15 @@ int eeh_pe_reset_full(struct eeh_pe *pe) break; /* Wait until the PE is in a functioning state */ - state = eeh_ops->wait_state(pe, PCI_BUS_RESET_WAIT_MSEC); - if (eeh_state_active(state)) - break; - + state = eeh_wait_state(pe, PCI_BUS_RESET_WAIT_MSEC); if (state < 0) { pr_warn("%s: Unrecoverable slot failure on PHB#%x-PE#%x", __func__, pe->phb->global_number, pe->addr); ret = -ENOTRECOVERABLE; break; } + if (eeh_state_active(state)) + break; /* Set error in case this is our last attempt */ ret = -EIO; @@ -1036,6 +1042,11 @@ void eeh_probe_devices(void) pdn = hose->pci_data; traverse_pci_dn(pdn, eeh_ops->probe, NULL); } + if (eeh_enabled()) + pr_info("EEH: PCI Enhanced I/O Error Handling Enabled\n"); + else + pr_info("EEH: No capable adapters found\n"); + } /** @@ -1079,18 +1090,7 @@ static int eeh_init(void) eeh_dev_phb_init_dynamic(hose); /* Initialize EEH event */ - ret = eeh_event_init(); - if (ret) - return ret; - - eeh_probe_devices(); - - if (eeh_enabled()) - pr_info("EEH: PCI Enhanced I/O Error Handling Enabled\n"); - else if (!eeh_has_flag(EEH_POSTPONED_PROBE)) - pr_info("EEH: No capable adapters found\n"); - - return ret; + return eeh_event_init(); } core_initcall_sync(eeh_init); diff --git a/arch/powerpc/kernel/eeh_dev.c b/arch/powerpc/kernel/eeh_dev.c index a34e6912c15e..d8c90f3284b5 100644 --- a/arch/powerpc/kernel/eeh_dev.c +++ b/arch/powerpc/kernel/eeh_dev.c @@ -60,8 +60,6 @@ struct eeh_dev *eeh_dev_init(struct pci_dn *pdn) /* Associate EEH device with OF node */ pdn->edev = edev; edev->pdn = pdn; - INIT_LIST_HEAD(&edev->list); - INIT_LIST_HEAD(&edev->rmv_list); return edev; } diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c index 67619b4b3f96..9446248eb6b8 100644 --- a/arch/powerpc/kernel/eeh_driver.c +++ b/arch/powerpc/kernel/eeh_driver.c @@ -35,8 +35,8 @@ #include <asm/rtas.h> struct eeh_rmv_data { - struct list_head edev_list; - int removed; + struct list_head removed_vf_list; + int removed_dev_count; }; static int eeh_result_priority(enum pci_ers_result result) @@ -281,6 +281,10 @@ static void eeh_pe_report_edev(struct eeh_dev *edev, eeh_report_fn fn, struct pci_driver *driver; enum pci_ers_result new_result; + if (!edev->pdev) { + eeh_edev_info(edev, "no device"); + return; + } device_lock(&edev->pdev->dev); if (eeh_edev_actionable(edev)) { driver = eeh_pcid_get(edev->pdev); @@ -400,7 +404,7 @@ static void *eeh_dev_restore_state(struct eeh_dev *edev, void *userdata) * EEH device is created. */ if (edev->pe && (edev->pe->state & EEH_PE_CFG_RESTRICTED)) { - if (list_is_last(&edev->list, &edev->pe->edevs)) + if (list_is_last(&edev->entry, &edev->pe->edevs)) eeh_pe_restore_bars(edev->pe); return NULL; @@ -465,10 +469,9 @@ static enum pci_ers_result eeh_report_failure(struct eeh_dev *edev, return rc; } -static void *eeh_add_virt_device(void *data, void *userdata) +static void *eeh_add_virt_device(struct eeh_dev *edev) { struct pci_driver *driver; - struct eeh_dev *edev = (struct eeh_dev *)data; struct pci_dev *dev = eeh_dev_to_pci_dev(edev); struct pci_dn *pdn = eeh_dev_to_pdn(edev); @@ -499,7 +502,6 @@ static void *eeh_rmv_device(struct eeh_dev *edev, void *userdata) struct pci_driver *driver; struct pci_dev *dev = eeh_dev_to_pci_dev(edev); struct eeh_rmv_data *rmv_data = (struct eeh_rmv_data *)userdata; - int *removed = rmv_data ? &rmv_data->removed : NULL; /* * Actually, we should remove the PCI bridges as well. @@ -521,7 +523,7 @@ static void *eeh_rmv_device(struct eeh_dev *edev, void *userdata) if (eeh_dev_removed(edev)) return NULL; - if (removed) { + if (rmv_data) { if (eeh_pe_passed(edev->pe)) return NULL; driver = eeh_pcid_get(dev); @@ -539,10 +541,9 @@ static void *eeh_rmv_device(struct eeh_dev *edev, void *userdata) /* Remove it from PCI subsystem */ pr_debug("EEH: Removing %s without EEH sensitive driver\n", pci_name(dev)); - edev->bus = dev->bus; edev->mode |= EEH_DEV_DISCONNECTED; - if (removed) - (*removed)++; + if (rmv_data) + rmv_data->removed_dev_count++; if (edev->physfn) { #ifdef CONFIG_PCI_IOV @@ -558,7 +559,7 @@ static void *eeh_rmv_device(struct eeh_dev *edev, void *userdata) pdn->pe_number = IODA_INVALID_PE; #endif if (rmv_data) - list_add(&edev->rmv_list, &rmv_data->edev_list); + list_add(&edev->rmv_entry, &rmv_data->removed_vf_list); } else { pci_lock_rescan_remove(); pci_stop_and_remove_bus_device(dev); @@ -727,7 +728,7 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus, * the device up before the scripts have taken it down, * potentially weird things happen. */ - if (!driver_eeh_aware || rmv_data->removed) { + if (!driver_eeh_aware || rmv_data->removed_dev_count) { pr_info("EEH: Sleep 5s ahead of %s hotplug\n", (driver_eeh_aware ? "partial" : "complete")); ssleep(5); @@ -737,10 +738,10 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus, * PE. We should disconnect it so the binding can be * rebuilt when adding PCI devices. */ - edev = list_first_entry(&pe->edevs, struct eeh_dev, list); + edev = list_first_entry(&pe->edevs, struct eeh_dev, entry); eeh_pe_traverse(pe, eeh_pe_detach_dev, NULL); if (pe->type & EEH_PE_VF) { - eeh_add_virt_device(edev, NULL); + eeh_add_virt_device(edev); } else { if (!driver_eeh_aware) eeh_pe_state_clear(pe, EEH_PE_PRI_BUS); @@ -789,7 +790,8 @@ void eeh_handle_normal_event(struct eeh_pe *pe) struct eeh_pe *tmp_pe; int rc = 0; enum pci_ers_result result = PCI_ERS_RESULT_NONE; - struct eeh_rmv_data rmv_data = {LIST_HEAD_INIT(rmv_data.edev_list), 0}; + struct eeh_rmv_data rmv_data = + {LIST_HEAD_INIT(rmv_data.removed_vf_list), 0}; bus = eeh_pe_bus_get(pe); if (!bus) { @@ -806,10 +808,8 @@ void eeh_handle_normal_event(struct eeh_pe *pe) pr_err("EEH: PHB#%x-PE#%x has failed %d times in the last hour and has been permanently disabled.\n", pe->phb->global_number, pe->addr, pe->freeze_count); - goto hard_fail; + result = PCI_ERS_RESULT_DISCONNECT; } - pr_warn("EEH: This PCI device has failed %d times in the last hour and will be permanently disabled after %d failures.\n", - pe->freeze_count, eeh_max_freezes); /* Walk the various device drivers attached to this slot through * a reset sequence, giving each an opportunity to do what it needs @@ -821,31 +821,39 @@ void eeh_handle_normal_event(struct eeh_pe *pe) * the error. Override the result if necessary to have partially * hotplug for this case. */ - pr_info("EEH: Notify device drivers to shutdown\n"); - eeh_set_channel_state(pe, pci_channel_io_frozen); - eeh_set_irq_state(pe, false); - eeh_pe_report("error_detected(IO frozen)", pe, eeh_report_error, - &result); - if ((pe->type & EEH_PE_PHB) && - result != PCI_ERS_RESULT_NONE && - result != PCI_ERS_RESULT_NEED_RESET) - result = PCI_ERS_RESULT_NEED_RESET; + if (result != PCI_ERS_RESULT_DISCONNECT) { + pr_warn("EEH: This PCI device has failed %d times in the last hour and will be permanently disabled after %d failures.\n", + pe->freeze_count, eeh_max_freezes); + pr_info("EEH: Notify device drivers to shutdown\n"); + eeh_set_channel_state(pe, pci_channel_io_frozen); + eeh_set_irq_state(pe, false); + eeh_pe_report("error_detected(IO frozen)", pe, + eeh_report_error, &result); + if ((pe->type & EEH_PE_PHB) && + result != PCI_ERS_RESULT_NONE && + result != PCI_ERS_RESULT_NEED_RESET) + result = PCI_ERS_RESULT_NEED_RESET; + } /* Get the current PCI slot state. This can take a long time, * sometimes over 300 seconds for certain systems. */ - rc = eeh_ops->wait_state(pe, MAX_WAIT_FOR_RECOVERY*1000); - if (rc < 0 || rc == EEH_STATE_NOT_SUPPORT) { - pr_warn("EEH: Permanent failure\n"); - goto hard_fail; + if (result != PCI_ERS_RESULT_DISCONNECT) { + rc = eeh_wait_state(pe, MAX_WAIT_FOR_RECOVERY*1000); + if (rc < 0 || rc == EEH_STATE_NOT_SUPPORT) { + pr_warn("EEH: Permanent failure\n"); + result = PCI_ERS_RESULT_DISCONNECT; + } } /* Since rtas may enable MMIO when posting the error log, * don't post the error log until after all dev drivers * have been informed. */ - pr_info("EEH: Collect temporary log\n"); - eeh_slot_error_detail(pe, EEH_LOG_TEMP); + if (result != PCI_ERS_RESULT_DISCONNECT) { + pr_info("EEH: Collect temporary log\n"); + eeh_slot_error_detail(pe, EEH_LOG_TEMP); + } /* If all device drivers were EEH-unaware, then shut * down all of the device drivers, and hope they @@ -857,7 +865,7 @@ void eeh_handle_normal_event(struct eeh_pe *pe) if (rc) { pr_warn("%s: Unable to reset, err=%d\n", __func__, rc); - goto hard_fail; + result = PCI_ERS_RESULT_DISCONNECT; } } @@ -866,9 +874,9 @@ void eeh_handle_normal_event(struct eeh_pe *pe) pr_info("EEH: Enable I/O for affected devices\n"); rc = eeh_pci_enable(pe, EEH_OPT_THAW_MMIO); - if (rc < 0) - goto hard_fail; - if (rc) { + if (rc < 0) { + result = PCI_ERS_RESULT_DISCONNECT; + } else if (rc) { result = PCI_ERS_RESULT_NEED_RESET; } else { pr_info("EEH: Notify device drivers to resume I/O\n"); @@ -882,9 +890,9 @@ void eeh_handle_normal_event(struct eeh_pe *pe) pr_info("EEH: Enabled DMA for affected devices\n"); rc = eeh_pci_enable(pe, EEH_OPT_THAW_DMA); - if (rc < 0) - goto hard_fail; - if (rc) { + if (rc < 0) { + result = PCI_ERS_RESULT_DISCONNECT; + } else if (rc) { result = PCI_ERS_RESULT_NEED_RESET; } else { /* @@ -897,12 +905,6 @@ void eeh_handle_normal_event(struct eeh_pe *pe) } } - /* If any device has a hard failure, then shut off everything. */ - if (result == PCI_ERS_RESULT_DISCONNECT) { - pr_warn("EEH: Device driver gave up\n"); - goto hard_fail; - } - /* If any device called out for a reset, then reset the slot */ if (result == PCI_ERS_RESULT_NEED_RESET) { pr_info("EEH: Reset without hotplug activity\n"); @@ -910,88 +912,81 @@ void eeh_handle_normal_event(struct eeh_pe *pe) if (rc) { pr_warn("%s: Cannot reset, err=%d\n", __func__, rc); - goto hard_fail; + result = PCI_ERS_RESULT_DISCONNECT; + } else { + result = PCI_ERS_RESULT_NONE; + eeh_set_channel_state(pe, pci_channel_io_normal); + eeh_set_irq_state(pe, true); + eeh_pe_report("slot_reset", pe, eeh_report_reset, + &result); } - - pr_info("EEH: Notify device drivers " - "the completion of reset\n"); - result = PCI_ERS_RESULT_NONE; - eeh_set_channel_state(pe, pci_channel_io_normal); - eeh_set_irq_state(pe, true); - eeh_pe_report("slot_reset", pe, eeh_report_reset, &result); - } - - /* All devices should claim they have recovered by now. */ - if ((result != PCI_ERS_RESULT_RECOVERED) && - (result != PCI_ERS_RESULT_NONE)) { - pr_warn("EEH: Not recovered\n"); - goto hard_fail; - } - - /* - * For those hot removed VFs, we should add back them after PF get - * recovered properly. - */ - list_for_each_entry_safe(edev, tmp, &rmv_data.edev_list, rmv_list) { - eeh_add_virt_device(edev, NULL); - list_del(&edev->rmv_list); } - /* Tell all device drivers that they can resume operations */ - pr_info("EEH: Notify device driver to resume\n"); - eeh_set_channel_state(pe, pci_channel_io_normal); - eeh_set_irq_state(pe, true); - eeh_pe_report("resume", pe, eeh_report_resume, NULL); - eeh_for_each_pe(pe, tmp_pe) { - eeh_pe_for_each_dev(tmp_pe, edev, tmp) { - edev->mode &= ~EEH_DEV_NO_HANDLER; - edev->in_error = false; + if ((result == PCI_ERS_RESULT_RECOVERED) || + (result == PCI_ERS_RESULT_NONE)) { + /* + * For those hot removed VFs, we should add back them after PF + * get recovered properly. + */ + list_for_each_entry_safe(edev, tmp, &rmv_data.removed_vf_list, + rmv_entry) { + eeh_add_virt_device(edev); + list_del(&edev->rmv_entry); } - } - pr_info("EEH: Recovery successful.\n"); - goto final; + /* Tell all device drivers that they can resume operations */ + pr_info("EEH: Notify device driver to resume\n"); + eeh_set_channel_state(pe, pci_channel_io_normal); + eeh_set_irq_state(pe, true); + eeh_pe_report("resume", pe, eeh_report_resume, NULL); + eeh_for_each_pe(pe, tmp_pe) { + eeh_pe_for_each_dev(tmp_pe, edev, tmp) { + edev->mode &= ~EEH_DEV_NO_HANDLER; + edev->in_error = false; + } + } -hard_fail: - /* - * About 90% of all real-life EEH failures in the field - * are due to poorly seated PCI cards. Only 10% or so are - * due to actual, failed cards. - */ - pr_err("EEH: Unable to recover from failure from PHB#%x-PE#%x.\n" - "Please try reseating or replacing it\n", - pe->phb->global_number, pe->addr); + pr_info("EEH: Recovery successful.\n"); + } else { + /* + * About 90% of all real-life EEH failures in the field + * are due to poorly seated PCI cards. Only 10% or so are + * due to actual, failed cards. + */ + pr_err("EEH: Unable to recover from failure from PHB#%x-PE#%x.\n" + "Please try reseating or replacing it\n", + pe->phb->global_number, pe->addr); - eeh_slot_error_detail(pe, EEH_LOG_PERM); + eeh_slot_error_detail(pe, EEH_LOG_PERM); - /* Notify all devices that they're about to go down. */ - eeh_set_channel_state(pe, pci_channel_io_perm_failure); - eeh_set_irq_state(pe, false); - eeh_pe_report("error_detected(permanent failure)", pe, - eeh_report_failure, NULL); + /* Notify all devices that they're about to go down. */ + eeh_set_channel_state(pe, pci_channel_io_perm_failure); + eeh_set_irq_state(pe, false); + eeh_pe_report("error_detected(permanent failure)", pe, + eeh_report_failure, NULL); - /* Mark the PE to be removed permanently */ - eeh_pe_state_mark(pe, EEH_PE_REMOVED); + /* Mark the PE to be removed permanently */ + eeh_pe_state_mark(pe, EEH_PE_REMOVED); - /* - * Shut down the device drivers for good. We mark - * all removed devices correctly to avoid access - * the their PCI config any more. - */ - if (pe->type & EEH_PE_VF) { - eeh_pe_dev_traverse(pe, eeh_rmv_device, NULL); - eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED); - } else { - eeh_pe_state_clear(pe, EEH_PE_PRI_BUS); - eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED); + /* + * Shut down the device drivers for good. We mark + * all removed devices correctly to avoid access + * the their PCI config any more. + */ + if (pe->type & EEH_PE_VF) { + eeh_pe_dev_traverse(pe, eeh_rmv_device, NULL); + eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED); + } else { + eeh_pe_state_clear(pe, EEH_PE_PRI_BUS); + eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED); - pci_lock_rescan_remove(); - pci_hp_remove_devices(bus); - pci_unlock_rescan_remove(); - /* The passed PE should no longer be used */ - return; + pci_lock_rescan_remove(); + pci_hp_remove_devices(bus); + pci_unlock_rescan_remove(); + /* The passed PE should no longer be used */ + return; + } } -final: eeh_pe_state_clear(pe, EEH_PE_RECOVERING); } @@ -1026,7 +1021,7 @@ void eeh_handle_special_event(void) phb_pe = eeh_phb_pe_get(hose); if (!phb_pe) continue; - eeh_pe_state_mark(phb_pe, EEH_PE_ISOLATED); + eeh_pe_mark_isolated(phb_pe); } eeh_serialize_unlock(flags); @@ -1041,11 +1036,9 @@ void eeh_handle_special_event(void) /* Purge all events of the PHB */ eeh_remove_event(pe, true); - if (rc == EEH_NEXT_ERR_DEAD_PHB) - eeh_pe_state_mark(pe, EEH_PE_ISOLATED); - else - eeh_pe_state_mark(pe, - EEH_PE_ISOLATED | EEH_PE_RECOVERING); + if (rc != EEH_NEXT_ERR_DEAD_PHB) + eeh_pe_state_mark(pe, EEH_PE_RECOVERING); + eeh_pe_mark_isolated(pe); eeh_serialize_unlock(flags); diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c index 1b238ecc553e..6fa2032e0594 100644 --- a/arch/powerpc/kernel/eeh_pe.c +++ b/arch/powerpc/kernel/eeh_pe.c @@ -75,7 +75,6 @@ static struct eeh_pe *eeh_pe_alloc(struct pci_controller *phb, int type) pe->type = type; pe->phb = phb; INIT_LIST_HEAD(&pe->child_list); - INIT_LIST_HEAD(&pe->child); INIT_LIST_HEAD(&pe->edevs); pe->data = (void *)pe + ALIGN(sizeof(struct eeh_pe), @@ -110,6 +109,57 @@ int eeh_phb_pe_create(struct pci_controller *phb) } /** + * eeh_wait_state - Wait for PE state + * @pe: EEH PE + * @max_wait: maximal period in millisecond + * + * Wait for the state of associated PE. It might take some time + * to retrieve the PE's state. + */ +int eeh_wait_state(struct eeh_pe *pe, int max_wait) +{ + int ret; + int mwait; + + /* + * According to PAPR, the state of PE might be temporarily + * unavailable. Under the circumstance, we have to wait + * for indicated time determined by firmware. The maximal + * wait time is 5 minutes, which is acquired from the original + * EEH implementation. Also, the original implementation + * also defined the minimal wait time as 1 second. + */ +#define EEH_STATE_MIN_WAIT_TIME (1000) +#define EEH_STATE_MAX_WAIT_TIME (300 * 1000) + + while (1) { + ret = eeh_ops->get_state(pe, &mwait); + + if (ret != EEH_STATE_UNAVAILABLE) + return ret; + + if (max_wait <= 0) { + pr_warn("%s: Timeout when getting PE's state (%d)\n", + __func__, max_wait); + return EEH_STATE_NOT_SUPPORT; + } + + if (mwait < EEH_STATE_MIN_WAIT_TIME) { + pr_warn("%s: Firmware returned bad wait value %d\n", + __func__, mwait); + mwait = EEH_STATE_MIN_WAIT_TIME; + } else if (mwait > EEH_STATE_MAX_WAIT_TIME) { + pr_warn("%s: Firmware returned too long wait value %d\n", + __func__, mwait); + mwait = EEH_STATE_MAX_WAIT_TIME; + } + + msleep(min(mwait, max_wait)); + max_wait -= mwait; + } +} + +/** * eeh_phb_pe_get - Retrieve PHB PE based on the given PHB * @phb: PCI controller * @@ -360,7 +410,7 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev) edev->pe = pe; /* Put the edev to PE */ - list_add_tail(&edev->list, &pe->edevs); + list_add_tail(&edev->entry, &pe->edevs); pr_debug("EEH: Add %04x:%02x:%02x.%01x to Bus PE#%x\n", pdn->phb->global_number, pdn->busno, @@ -369,7 +419,7 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev) pe->addr); return 0; } else if (pe && (pe->type & EEH_PE_INVALID)) { - list_add_tail(&edev->list, &pe->edevs); + list_add_tail(&edev->entry, &pe->edevs); edev->pe = pe; /* * We're running to here because of PCI hotplug caused by @@ -379,7 +429,7 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev) while (parent) { if (!(parent->type & EEH_PE_INVALID)) break; - parent->type &= ~(EEH_PE_INVALID | EEH_PE_KEEP); + parent->type &= ~EEH_PE_INVALID; parent = parent->parent; } @@ -429,7 +479,7 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev) * link the EEH device accordingly. */ list_add_tail(&pe->child, &parent->child_list); - list_add_tail(&edev->list, &pe->edevs); + list_add_tail(&edev->entry, &pe->edevs); edev->pe = pe; pr_debug("EEH: Add %04x:%02x:%02x.%01x to " "Device PE#%x, Parent PE#%x\n", @@ -457,7 +507,8 @@ int eeh_rmv_from_parent_pe(struct eeh_dev *edev) int cnt; struct pci_dn *pdn = eeh_dev_to_pdn(edev); - if (!edev->pe) { + pe = eeh_dev_to_pe(edev); + if (!pe) { pr_debug("%s: No PE found for device %04x:%02x:%02x.%01x\n", __func__, pdn->phb->global_number, pdn->busno, @@ -467,9 +518,8 @@ int eeh_rmv_from_parent_pe(struct eeh_dev *edev) } /* Remove the EEH device */ - pe = eeh_dev_to_pe(edev); edev->pe = NULL; - list_del(&edev->list); + list_del(&edev->entry); /* * Check if the parent PE includes any EEH devices. @@ -541,56 +591,50 @@ void eeh_pe_update_time_stamp(struct eeh_pe *pe) } /** - * __eeh_pe_state_mark - Mark the state for the PE - * @data: EEH PE - * @flag: state + * eeh_pe_state_mark - Mark specified state for PE and its associated device + * @pe: EEH PE * - * The function is used to mark the indicated state for the given - * PE. Also, the associated PCI devices will be put into IO frozen - * state as well. + * EEH error affects the current PE and its child PEs. The function + * is used to mark appropriate state for the affected PEs and the + * associated devices. */ -static void *__eeh_pe_state_mark(struct eeh_pe *pe, void *flag) +void eeh_pe_state_mark(struct eeh_pe *root, int state) { - int state = *((int *)flag); - struct eeh_dev *edev, *tmp; - struct pci_dev *pdev; - - /* Keep the state of permanently removed PE intact */ - if (pe->state & EEH_PE_REMOVED) - return NULL; - - pe->state |= state; - - /* Offline PCI devices if applicable */ - if (!(state & EEH_PE_ISOLATED)) - return NULL; - - eeh_pe_for_each_dev(pe, edev, tmp) { - pdev = eeh_dev_to_pci_dev(edev); - if (pdev) - pdev->error_state = pci_channel_io_frozen; - } - - /* Block PCI config access if required */ - if (pe->state & EEH_PE_CFG_RESTRICTED) - pe->state |= EEH_PE_CFG_BLOCKED; + struct eeh_pe *pe; - return NULL; + eeh_for_each_pe(root, pe) + if (!(pe->state & EEH_PE_REMOVED)) + pe->state |= state; } +EXPORT_SYMBOL_GPL(eeh_pe_state_mark); /** - * eeh_pe_state_mark - Mark specified state for PE and its associated device + * eeh_pe_mark_isolated * @pe: EEH PE * - * EEH error affects the current PE and its child PEs. The function - * is used to mark appropriate state for the affected PEs and the - * associated devices. + * Record that a PE has been isolated by marking the PE and it's children as + * EEH_PE_ISOLATED (and EEH_PE_CFG_BLOCKED, if required) and their PCI devices + * as pci_channel_io_frozen. */ -void eeh_pe_state_mark(struct eeh_pe *pe, int state) +void eeh_pe_mark_isolated(struct eeh_pe *root) { - eeh_pe_traverse(pe, __eeh_pe_state_mark, &state); + struct eeh_pe *pe; + struct eeh_dev *edev; + struct pci_dev *pdev; + + eeh_pe_state_mark(root, EEH_PE_ISOLATED); + eeh_for_each_pe(root, pe) { + list_for_each_entry(edev, &pe->edevs, entry) { + pdev = eeh_dev_to_pci_dev(edev); + if (pdev) + pdev->error_state = pci_channel_io_frozen; + } + /* Block PCI config access if required */ + if (pe->state & EEH_PE_CFG_RESTRICTED) + pe->state |= EEH_PE_CFG_BLOCKED; + } } -EXPORT_SYMBOL_GPL(eeh_pe_state_mark); +EXPORT_SYMBOL_GPL(eeh_pe_mark_isolated); static void *__eeh_pe_dev_mode_mark(struct eeh_dev *edev, void *flag) { @@ -671,28 +715,6 @@ void eeh_pe_state_clear(struct eeh_pe *pe, int state) eeh_pe_traverse(pe, __eeh_pe_state_clear, &state); } -/** - * eeh_pe_state_mark_with_cfg - Mark PE state with unblocked config space - * @pe: PE - * @state: PE state to be set - * - * Set specified flag to PE and its child PEs. The PCI config space - * of some PEs is blocked automatically when EEH_PE_ISOLATED is set, - * which isn't needed in some situations. The function allows to set - * the specified flag to indicated PEs without blocking their PCI - * config space. - */ -void eeh_pe_state_mark_with_cfg(struct eeh_pe *pe, int state) -{ - eeh_pe_traverse(pe, __eeh_pe_state_mark, &state); - if (!(state & EEH_PE_ISOLATED)) - return; - - /* Clear EEH_PE_CFG_BLOCKED, which might be set just now */ - state = EEH_PE_CFG_BLOCKED; - eeh_pe_traverse(pe, __eeh_pe_state_clear, &state); -} - /* * Some PCI bridges (e.g. PLX bridges) have primary/secondary * buses assigned explicitly by firmware, and we probably have @@ -945,7 +967,7 @@ struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe) return pe->bus; /* Retrieve the parent PCI bus of first (top) PCI device */ - edev = list_first_entry_or_null(&pe->edevs, struct eeh_dev, list); + edev = list_first_entry_or_null(&pe->edevs, struct eeh_dev, entry); pdev = eeh_dev_to_pci_dev(edev); if (pdev) return pdev->bus; diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index e58c3f467db5..77decded1175 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -794,7 +794,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_601) lis r10,MSR_KERNEL@h ori r10,r10,MSR_KERNEL@l bl transfer_to_handler_full - .long nonrecoverable_exception + .long unrecoverable_exception .long ret_from_except #endif @@ -1297,7 +1297,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_601) rlwinm r3,r3,0,0,30 stw r3,_TRAP(r1) 4: addi r3,r1,STACK_FRAME_OVERHEAD - bl nonrecoverable_exception + bl unrecoverable_exception /* shouldn't return */ b 4b diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 2206912ea4f0..7b1693adff2a 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -171,7 +171,7 @@ system_call: /* label this so stack traces look sane */ * based on caller's run-mode / personality. */ ld r11,SYS_CALL_TABLE@toc(2) - andi. r10,r10,_TIF_32BIT + andis. r10,r10,_TIF_32BIT@h beq 15f addi r11,r11,8 /* use 32-bit syscall entries */ clrldi r3,r3,32 @@ -386,10 +386,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 4: /* Anything else left to do? */ BEGIN_FTR_SECTION - lis r3,INIT_PPR@highest /* Set thread.ppr = 3 */ - ld r10,PACACURRENT(r13) + lis r3,DEFAULT_PPR@highest /* Set default PPR */ sldi r3,r3,32 /* bits 11-13 are used for ppr */ - std r3,TASKTHREADPPR(r10) + std r3,_PPR(r1) END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP) @@ -624,6 +623,10 @@ _GLOBAL(_switch) addi r6,r4,-THREAD /* Convert THREAD to 'current' */ std r6,PACACURRENT(r13) /* Set new 'current' */ +#if defined(CONFIG_STACKPROTECTOR) + ld r6, TASK_CANARY(r6) + std r6, PACA_CANARY(r13) +#endif ld r8,KSP(r4) /* new stack pointer */ #ifdef CONFIG_PPC_BOOK3S_64 @@ -672,7 +675,9 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) isync slbie r6 +BEGIN_FTR_SECTION slbie r6 /* Workaround POWER5 < DD2.1 issue */ +END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) slbmte r7,r0 isync 2: @@ -936,12 +941,6 @@ fast_exception_return: andi. r0,r3,MSR_RI beq- .Lunrecov_restore - /* Load PPR from thread struct before we clear MSR:RI */ -BEGIN_FTR_SECTION - ld r2,PACACURRENT(r13) - ld r2,TASKTHREADPPR(r2) -END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) - /* * Clear RI before restoring r13. If we are returning to * userspace and we take an exception after restoring r13, @@ -962,7 +961,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) andi. r0,r3,MSR_PR beq 1f BEGIN_FTR_SECTION - mtspr SPRN_PPR,r2 /* Restore PPR */ + /* Restore PPR */ + ld r2,_PPR(r1) + mtspr SPRN_PPR,r2 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) ACCOUNT_CPU_USER_EXIT(r13, r2, r4) REST_GPR(13, r1) @@ -1118,7 +1119,7 @@ _ASM_NOKPROBE_SYMBOL(fast_exception_return); _GLOBAL(enter_rtas) mflr r0 std r0,16(r1) - stdu r1,-RTAS_FRAME_SIZE(r1) /* Save SP and create stack space. */ + stdu r1,-SWITCH_FRAME_SIZE(r1) /* Save SP and create stack space. */ /* Because RTAS is running in 32b mode, it clobbers the high order half * of all registers that it saves. We therefore save those registers @@ -1250,7 +1251,7 @@ rtas_restore_regs: ld r8,_DSISR(r1) mtdsisr r8 - addi r1,r1,RTAS_FRAME_SIZE /* Unstack our frame */ + addi r1,r1,SWITCH_FRAME_SIZE /* Unstack our frame */ ld r0,16(r1) /* get return address */ mtlr r0 @@ -1261,7 +1262,7 @@ rtas_restore_regs: _GLOBAL(enter_prom) mflr r0 std r0,16(r1) - stdu r1,-PROM_FRAME_SIZE(r1) /* Save SP and create stack space */ + stdu r1,-SWITCH_FRAME_SIZE(r1) /* Save SP and create stack space */ /* Because PROM is running in 32b mode, it clobbers the high order half * of all registers that it saves. We therefore save those registers @@ -1318,8 +1319,8 @@ _GLOBAL(enter_prom) REST_10GPRS(22, r1) ld r4,_CCR(r1) mtcr r4 - - addi r1,r1,PROM_FRAME_SIZE + + addi r1,r1,SWITCH_FRAME_SIZE ld r0,16(r1) mtlr r0 blr diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 2d8fc8c9da7a..89d32bb79d5e 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -244,14 +244,13 @@ EXC_REAL_BEGIN(machine_check, 0x200, 0x100) SET_SCRATCH0(r13) /* save r13 */ EXCEPTION_PROLOG_0(PACA_EXMC) BEGIN_FTR_SECTION - b machine_check_powernv_early + b machine_check_common_early FTR_SECTION_ELSE b machine_check_pSeries_0 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE) EXC_REAL_END(machine_check, 0x200, 0x100) EXC_VIRT_NONE(0x4200, 0x100) -TRAMP_REAL_BEGIN(machine_check_powernv_early) -BEGIN_FTR_SECTION +TRAMP_REAL_BEGIN(machine_check_common_early) EXCEPTION_PROLOG_1(PACA_EXMC, NOTEST, 0x200) /* * Register contents: @@ -305,7 +304,9 @@ BEGIN_FTR_SECTION /* Save r9 through r13 from EXMC save area to stack frame. */ EXCEPTION_PROLOG_COMMON_2(PACA_EXMC) mfmsr r11 /* get MSR value */ +BEGIN_FTR_SECTION ori r11,r11,MSR_ME /* turn on ME bit */ +END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) ori r11,r11,MSR_RI /* turn on RI bit */ LOAD_HANDLER(r12, machine_check_handle_early) 1: mtspr SPRN_SRR0,r12 @@ -324,13 +325,15 @@ BEGIN_FTR_SECTION andc r11,r11,r10 /* Turn off MSR_ME */ b 1b b . /* prevent speculative execution */ -END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) TRAMP_REAL_BEGIN(machine_check_pSeries) .globl machine_check_fwnmi machine_check_fwnmi: SET_SCRATCH0(r13) /* save r13 */ EXCEPTION_PROLOG_0(PACA_EXMC) +BEGIN_FTR_SECTION + b machine_check_common_early +END_FTR_SECTION_IFCLR(CPU_FTR_HVMODE) machine_check_pSeries_0: EXCEPTION_PROLOG_1(PACA_EXMC, KVMTEST_PR, 0x200) /* @@ -440,6 +443,9 @@ EXC_COMMON_BEGIN(machine_check_handle_early) bl machine_check_early std r3,RESULT(r1) /* Save result */ ld r12,_MSR(r1) +BEGIN_FTR_SECTION + b 4f +END_FTR_SECTION_IFCLR(CPU_FTR_HVMODE) #ifdef CONFIG_PPC_P7_NAP /* @@ -463,11 +469,12 @@ EXC_COMMON_BEGIN(machine_check_handle_early) */ rldicl. r11,r12,4,63 /* See if MC hit while in HV mode. */ beq 5f - andi. r11,r12,MSR_PR /* See if coming from user. */ +4: andi. r11,r12,MSR_PR /* See if coming from user. */ bne 9f /* continue in V mode if we are. */ 5: #ifdef CONFIG_KVM_BOOK3S_64_HANDLER +BEGIN_FTR_SECTION /* * We are coming from kernel context. Check if we are coming from * guest. if yes, then we can continue. We will fall through @@ -476,6 +483,7 @@ EXC_COMMON_BEGIN(machine_check_handle_early) lbz r11,HSTATE_IN_GUEST(r13) cmpwi r11,0 /* Check if coming from guest */ bne 9f /* continue if we are. */ +END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) #endif /* * At this point we are not sure about what context we come from. @@ -510,6 +518,7 @@ EXC_COMMON_BEGIN(machine_check_handle_early) cmpdi r3,0 /* see if we handled MCE successfully */ beq 1b /* if !handled then panic */ +BEGIN_FTR_SECTION /* * Return from MC interrupt. * Queue up the MCE event so that we can log it later, while @@ -518,10 +527,24 @@ EXC_COMMON_BEGIN(machine_check_handle_early) bl machine_check_queue_event MACHINE_CHECK_HANDLER_WINDUP RFI_TO_USER_OR_KERNEL +FTR_SECTION_ELSE + /* + * pSeries: Return from MC interrupt. Before that stay on emergency + * stack and call machine_check_exception to log the MCE event. + */ + LOAD_HANDLER(r10,mce_return) + mtspr SPRN_SRR0,r10 + ld r10,PACAKMSR(r13) + mtspr SPRN_SRR1,r10 + RFI_TO_KERNEL + b . +ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE) 9: /* Deliver the machine check to host kernel in V mode. */ MACHINE_CHECK_HANDLER_WINDUP - b machine_check_pSeries + SET_SCRATCH0(r13) /* save r13 */ + EXCEPTION_PROLOG_0(PACA_EXMC) + b machine_check_pSeries_0 EXC_COMMON_BEGIN(unrecover_mce) /* Invoke machine_check_exception to print MCE event and panic. */ @@ -535,6 +558,13 @@ EXC_COMMON_BEGIN(unrecover_mce) bl unrecoverable_exception b 1b +EXC_COMMON_BEGIN(mce_return) + /* Invoke machine_check_exception to print MCE event and return. */ + addi r3,r1,STACK_FRAME_OVERHEAD + bl machine_check_exception + MACHINE_CHECK_HANDLER_WINDUP + RFI_TO_KERNEL + b . EXC_REAL(data_access, 0x300, 0x80) EXC_VIRT(data_access, 0x4300, 0x80, 0x300) @@ -566,28 +596,36 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) EXC_REAL_BEGIN(data_access_slb, 0x380, 0x80) - SET_SCRATCH0(r13) - EXCEPTION_PROLOG_0(PACA_EXSLB) - EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x380) - mr r12,r3 /* save r3 */ - mfspr r3,SPRN_DAR - mfspr r11,SPRN_SRR1 - crset 4*cr6+eq - BRANCH_TO_COMMON(r10, slb_miss_common) +EXCEPTION_PROLOG(PACA_EXSLB, data_access_slb_common, EXC_STD, KVMTEST_PR, 0x380); EXC_REAL_END(data_access_slb, 0x380, 0x80) EXC_VIRT_BEGIN(data_access_slb, 0x4380, 0x80) - SET_SCRATCH0(r13) - EXCEPTION_PROLOG_0(PACA_EXSLB) - EXCEPTION_PROLOG_1(PACA_EXSLB, NOTEST, 0x380) - mr r12,r3 /* save r3 */ - mfspr r3,SPRN_DAR - mfspr r11,SPRN_SRR1 - crset 4*cr6+eq - BRANCH_TO_COMMON(r10, slb_miss_common) +EXCEPTION_RELON_PROLOG(PACA_EXSLB, data_access_slb_common, EXC_STD, NOTEST, 0x380); EXC_VIRT_END(data_access_slb, 0x4380, 0x80) + TRAMP_KVM_SKIP(PACA_EXSLB, 0x380) +EXC_COMMON_BEGIN(data_access_slb_common) + mfspr r10,SPRN_DAR + std r10,PACA_EXSLB+EX_DAR(r13) + EXCEPTION_PROLOG_COMMON(0x380, PACA_EXSLB) + ld r4,PACA_EXSLB+EX_DAR(r13) + std r4,_DAR(r1) + addi r3,r1,STACK_FRAME_OVERHEAD + bl do_slb_fault + cmpdi r3,0 + bne- 1f + b fast_exception_return +1: /* Error case */ + std r3,RESULT(r1) + bl save_nvgprs + RECONCILE_IRQ_STATE(r10, r11) + ld r4,_DAR(r1) + ld r5,RESULT(r1) + addi r3,r1,STACK_FRAME_OVERHEAD + bl do_bad_slb_fault + b ret_from_except + EXC_REAL(instruction_access, 0x400, 0x80) EXC_VIRT(instruction_access, 0x4400, 0x80, 0x400) @@ -610,160 +648,34 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) EXC_REAL_BEGIN(instruction_access_slb, 0x480, 0x80) - SET_SCRATCH0(r13) - EXCEPTION_PROLOG_0(PACA_EXSLB) - EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x480) - mr r12,r3 /* save r3 */ - mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */ - mfspr r11,SPRN_SRR1 - crclr 4*cr6+eq - BRANCH_TO_COMMON(r10, slb_miss_common) +EXCEPTION_PROLOG(PACA_EXSLB, instruction_access_slb_common, EXC_STD, KVMTEST_PR, 0x480); EXC_REAL_END(instruction_access_slb, 0x480, 0x80) EXC_VIRT_BEGIN(instruction_access_slb, 0x4480, 0x80) - SET_SCRATCH0(r13) - EXCEPTION_PROLOG_0(PACA_EXSLB) - EXCEPTION_PROLOG_1(PACA_EXSLB, NOTEST, 0x480) - mr r12,r3 /* save r3 */ - mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */ - mfspr r11,SPRN_SRR1 - crclr 4*cr6+eq - BRANCH_TO_COMMON(r10, slb_miss_common) +EXCEPTION_RELON_PROLOG(PACA_EXSLB, instruction_access_slb_common, EXC_STD, NOTEST, 0x480); EXC_VIRT_END(instruction_access_slb, 0x4480, 0x80) -TRAMP_KVM(PACA_EXSLB, 0x480) - - -/* - * This handler is used by the 0x380 and 0x480 SLB miss interrupts, as well as - * the virtual mode 0x4380 and 0x4480 interrupts if AIL is enabled. - */ -EXC_COMMON_BEGIN(slb_miss_common) - /* - * r13 points to the PACA, r9 contains the saved CR, - * r12 contains the saved r3, - * r11 contain the saved SRR1, SRR0 is still ready for return - * r3 has the faulting address - * r9 - r13 are saved in paca->exslb. - * cr6.eq is set for a D-SLB miss, clear for a I-SLB miss - * We assume we aren't going to take any exceptions during this - * procedure. - */ - mflr r10 - stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ - std r10,PACA_EXSLB+EX_LR(r13) /* save LR */ - - andi. r9,r11,MSR_PR // Check for exception from userspace - cmpdi cr4,r9,MSR_PR // And save the result in CR4 for later - - /* - * Test MSR_RI before calling slb_allocate_realmode, because the - * MSR in r11 gets clobbered. However we still want to allocate - * SLB in case MSR_RI=0, to minimise the risk of getting stuck in - * recursive SLB faults. So use cr5 for this, which is preserved. - */ - andi. r11,r11,MSR_RI /* check for unrecoverable exception */ - cmpdi cr5,r11,MSR_RI - - crset 4*cr0+eq -#ifdef CONFIG_PPC_BOOK3S_64 -BEGIN_MMU_FTR_SECTION - bl slb_allocate -END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX) -#endif - - ld r10,PACA_EXSLB+EX_LR(r13) - lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */ - mtlr r10 - - /* - * Large address, check whether we have to allocate new contexts. - */ - beq- 8f - bne- cr5,2f /* if unrecoverable exception, oops */ - - /* All done -- return from exception. */ - - bne cr4,1f /* returning to kernel */ - - mtcrf 0x80,r9 - mtcrf 0x08,r9 /* MSR[PR] indication is in cr4 */ - mtcrf 0x04,r9 /* MSR[RI] indication is in cr5 */ - mtcrf 0x02,r9 /* I/D indication is in cr6 */ - mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */ - - RESTORE_CTR(r9, PACA_EXSLB) - RESTORE_PPR_PACA(PACA_EXSLB, r9) - mr r3,r12 - ld r9,PACA_EXSLB+EX_R9(r13) - ld r10,PACA_EXSLB+EX_R10(r13) - ld r11,PACA_EXSLB+EX_R11(r13) - ld r12,PACA_EXSLB+EX_R12(r13) - ld r13,PACA_EXSLB+EX_R13(r13) - RFI_TO_USER - b . /* prevent speculative execution */ -1: - mtcrf 0x80,r9 - mtcrf 0x08,r9 /* MSR[PR] indication is in cr4 */ - mtcrf 0x04,r9 /* MSR[RI] indication is in cr5 */ - mtcrf 0x02,r9 /* I/D indication is in cr6 */ - mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */ - - RESTORE_CTR(r9, PACA_EXSLB) - RESTORE_PPR_PACA(PACA_EXSLB, r9) - mr r3,r12 - ld r9,PACA_EXSLB+EX_R9(r13) - ld r10,PACA_EXSLB+EX_R10(r13) - ld r11,PACA_EXSLB+EX_R11(r13) - ld r12,PACA_EXSLB+EX_R12(r13) - ld r13,PACA_EXSLB+EX_R13(r13) - RFI_TO_KERNEL - b . /* prevent speculative execution */ - - -2: std r3,PACA_EXSLB+EX_DAR(r13) - mr r3,r12 - mfspr r11,SPRN_SRR0 - mfspr r12,SPRN_SRR1 - LOAD_HANDLER(r10,unrecov_slb) - mtspr SPRN_SRR0,r10 - ld r10,PACAKMSR(r13) - mtspr SPRN_SRR1,r10 - RFI_TO_KERNEL - b . - -8: std r3,PACA_EXSLB+EX_DAR(r13) - mr r3,r12 - mfspr r11,SPRN_SRR0 - mfspr r12,SPRN_SRR1 - LOAD_HANDLER(r10, large_addr_slb) - mtspr SPRN_SRR0,r10 - ld r10,PACAKMSR(r13) - mtspr SPRN_SRR1,r10 - RFI_TO_KERNEL - b . +TRAMP_KVM(PACA_EXSLB, 0x480) -EXC_COMMON_BEGIN(unrecov_slb) - EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB) - RECONCILE_IRQ_STATE(r10, r11) +EXC_COMMON_BEGIN(instruction_access_slb_common) + EXCEPTION_PROLOG_COMMON(0x480, PACA_EXSLB) + ld r4,_NIP(r1) + addi r3,r1,STACK_FRAME_OVERHEAD + bl do_slb_fault + cmpdi r3,0 + bne- 1f + b fast_exception_return +1: /* Error case */ + std r3,RESULT(r1) bl save_nvgprs -1: addi r3,r1,STACK_FRAME_OVERHEAD - bl unrecoverable_exception - b 1b - -EXC_COMMON_BEGIN(large_addr_slb) - EXCEPTION_PROLOG_COMMON(0x380, PACA_EXSLB) RECONCILE_IRQ_STATE(r10, r11) - ld r3, PACA_EXSLB+EX_DAR(r13) - std r3, _DAR(r1) - beq cr6, 2f - li r10, 0x481 /* fix trap number for I-SLB miss */ - std r10, _TRAP(r1) -2: bl save_nvgprs - addi r3, r1, STACK_FRAME_OVERHEAD - bl slb_miss_large_addr + ld r4,_NIP(r1) + ld r5,RESULT(r1) + addi r3,r1,STACK_FRAME_OVERHEAD + bl do_bad_slb_fault b ret_from_except + EXC_REAL_BEGIN(hardware_interrupt, 0x500, 0x100) .globl hardware_interrupt_hv; hardware_interrupt_hv: diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c index a711d22339ea..761b28b1427d 100644 --- a/arch/powerpc/kernel/fadump.c +++ b/arch/powerpc/kernel/fadump.c @@ -1444,8 +1444,8 @@ static ssize_t fadump_register_store(struct kobject *kobj, break; case 1: if (fw_dump.dump_registered == 1) { - ret = -EEXIST; - goto unlock_out; + /* Un-register Firmware-assisted dump */ + fadump_unregister_dump(&fdm); } /* Register Firmware-assisted dump */ ret = register_fadump(); diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S index 6582f824d620..134a573a9f2d 100644 --- a/arch/powerpc/kernel/head_8xx.S +++ b/arch/powerpc/kernel/head_8xx.S @@ -642,7 +642,7 @@ DTLBMissIMMR: mtspr SPRN_MD_TWC, r10 mfspr r10, SPRN_IMMR /* Get current IMMR */ rlwinm r10, r10, 0, 0xfff80000 /* Get 512 kbytes boundary */ - ori r10, r10, 0xf0 | MD_SPS16K | _PAGE_PRIVILEGED | _PAGE_DIRTY | \ + ori r10, r10, 0xf0 | MD_SPS16K | _PAGE_SH | _PAGE_DIRTY | \ _PAGE_PRESENT | _PAGE_NO_CACHE mtspr SPRN_MD_RPN, r10 /* Update TLB entry */ @@ -660,7 +660,7 @@ DTLBMissLinear: li r11, MD_PS8MEG | MD_SVALID | M_APG2 mtspr SPRN_MD_TWC, r11 rlwinm r10, r10, 0, 0x0f800000 /* 8xx supports max 256Mb RAM */ - ori r10, r10, 0xf0 | MD_SPS16K | _PAGE_PRIVILEGED | _PAGE_DIRTY | \ + ori r10, r10, 0xf0 | MD_SPS16K | _PAGE_SH | _PAGE_DIRTY | \ _PAGE_PRESENT mtspr SPRN_MD_RPN, r10 /* Update TLB entry */ @@ -679,7 +679,7 @@ ITLBMissLinear: li r11, MI_PS8MEG | MI_SVALID | M_APG2 mtspr SPRN_MI_TWC, r11 rlwinm r10, r10, 0, 0x0f800000 /* 8xx supports max 256Mb RAM */ - ori r10, r10, 0xf0 | MI_SPS16K | _PAGE_PRIVILEGED | _PAGE_DIRTY | \ + ori r10, r10, 0xf0 | MI_SPS16K | _PAGE_SH | _PAGE_DIRTY | \ _PAGE_PRESENT mtspr SPRN_MI_RPN, r10 /* Update TLB entry */ diff --git a/arch/powerpc/kernel/io-workarounds.c b/arch/powerpc/kernel/io-workarounds.c index aa9f1b8261db..7e89d02a84e1 100644 --- a/arch/powerpc/kernel/io-workarounds.c +++ b/arch/powerpc/kernel/io-workarounds.c @@ -153,10 +153,10 @@ static const struct ppc_pci_io iowa_pci_io = { #ifdef CONFIG_PPC_INDIRECT_MMIO static void __iomem *iowa_ioremap(phys_addr_t addr, unsigned long size, - unsigned long flags, void *caller) + pgprot_t prot, void *caller) { struct iowa_bus *bus; - void __iomem *res = __ioremap_caller(addr, size, flags, caller); + void __iomem *res = __ioremap_caller(addr, size, prot, caller); int busno; bus = iowa_pci_find(0, (unsigned long)addr); diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index 19b4c628f3be..f0dc680e659a 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c @@ -785,9 +785,9 @@ dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl, vaddr = page_address(page) + offset; uaddr = (unsigned long)vaddr; - npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE(tbl)); if (tbl) { + npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE(tbl)); align = 0; if (tbl->it_page_shift < PAGE_SHIFT && size >= PAGE_SIZE && ((unsigned long)vaddr & ~PAGE_MASK) == 0) diff --git a/arch/powerpc/kernel/isa-bridge.c b/arch/powerpc/kernel/isa-bridge.c index 1df6c74aa731..fda3ae48480c 100644 --- a/arch/powerpc/kernel/isa-bridge.c +++ b/arch/powerpc/kernel/isa-bridge.c @@ -110,14 +110,14 @@ static void pci_process_ISA_OF_ranges(struct device_node *isa_node, size = 0x10000; __ioremap_at(phb_io_base_phys, (void *)ISA_IO_BASE, - size, pgprot_val(pgprot_noncached(__pgprot(0)))); + size, pgprot_noncached(PAGE_KERNEL)); return; inval_range: printk(KERN_ERR "no ISA IO ranges or unexpected isa range, " "mapping 64k\n"); __ioremap_at(phb_io_base_phys, (void *)ISA_IO_BASE, - 0x10000, pgprot_val(pgprot_noncached(__pgprot(0)))); + 0x10000, pgprot_noncached(PAGE_KERNEL)); } @@ -253,7 +253,7 @@ void __init isa_bridge_init_non_pci(struct device_node *np) */ isa_io_base = ISA_IO_BASE; __ioremap_at(pbase, (void *)ISA_IO_BASE, - size, pgprot_val(pgprot_noncached(__pgprot(0)))); + size, pgprot_noncached(PAGE_KERNEL)); pr_debug("ISA: Non-PCI bridge is %pOF\n", np); } diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c index 35e240a0a408..59c578f865aa 100644 --- a/arch/powerpc/kernel/kgdb.c +++ b/arch/powerpc/kernel/kgdb.c @@ -24,6 +24,7 @@ #include <asm/processor.h> #include <asm/machdep.h> #include <asm/debug.h> +#include <asm/code-patching.h> #include <linux/slab.h> /* @@ -144,7 +145,7 @@ static int kgdb_handle_breakpoint(struct pt_regs *regs) if (kgdb_handle_exception(1, SIGTRAP, 0, regs) != 0) return 0; - if (*(u32 *) (regs->nip) == *(u32 *) (&arch_kgdb_ops.gdb_bpt_instr)) + if (*(u32 *)regs->nip == BREAK_INSTR) regs->nip += BREAK_INSTR_SIZE; return 1; @@ -441,16 +442,42 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code, return -1; } +int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt) +{ + int err; + unsigned int instr; + unsigned int *addr = (unsigned int *)bpt->bpt_addr; + + err = probe_kernel_address(addr, instr); + if (err) + return err; + + err = patch_instruction(addr, BREAK_INSTR); + if (err) + return -EFAULT; + + *(unsigned int *)bpt->saved_instr = instr; + + return 0; +} + +int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt) +{ + int err; + unsigned int instr = *(unsigned int *)bpt->saved_instr; + unsigned int *addr = (unsigned int *)bpt->bpt_addr; + + err = patch_instruction(addr, instr); + if (err) + return -EFAULT; + + return 0; +} + /* * Global data */ -struct kgdb_arch arch_kgdb_ops = { -#ifdef __LITTLE_ENDIAN__ - .gdb_bpt_instr = {0x08, 0x10, 0x82, 0x7d}, -#else - .gdb_bpt_instr = {0x7d, 0x82, 0x10, 0x08}, -#endif -}; +struct kgdb_arch arch_kgdb_ops; static int kgdb_not_implemented(struct pt_regs *regs) { diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c index efdd16a79075..bd933a75f0bc 100644 --- a/arch/powerpc/kernel/mce.c +++ b/arch/powerpc/kernel/mce.c @@ -488,10 +488,11 @@ long machine_check_early(struct pt_regs *regs) { long handled = 0; - __this_cpu_inc(irq_stat.mce_exceptions); - - if (cur_cpu_spec && cur_cpu_spec->machine_check_early) - handled = cur_cpu_spec->machine_check_early(regs); + /* + * See if platform is capable of handling machine check. + */ + if (ppc_md.machine_check_early) + handled = ppc_md.machine_check_early(regs); return handled; } diff --git a/arch/powerpc/kernel/mce_power.c b/arch/powerpc/kernel/mce_power.c index 3497c8329c1d..6b800eec31f2 100644 --- a/arch/powerpc/kernel/mce_power.c +++ b/arch/powerpc/kernel/mce_power.c @@ -60,7 +60,7 @@ static unsigned long addr_to_pfn(struct pt_regs *regs, unsigned long addr) /* flush SLBs and reload */ #ifdef CONFIG_PPC_BOOK3S_64 -static void flush_and_reload_slb(void) +void flush_and_reload_slb(void) { /* Invalidate all SLBs */ slb_flush_all_realmode(); @@ -89,6 +89,13 @@ static void flush_and_reload_slb(void) static void flush_erat(void) { +#ifdef CONFIG_PPC_BOOK3S_64 + if (!early_cpu_has_feature(CPU_FTR_ARCH_300)) { + flush_and_reload_slb(); + return; + } +#endif + /* PPC_INVALIDATE_ERAT can only be used on ISA v3 and newer */ asm volatile(PPC_INVALIDATE_ERAT : : :"memory"); } diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c index 77371c9ef3d8..2d861a36662e 100644 --- a/arch/powerpc/kernel/module.c +++ b/arch/powerpc/kernel/module.c @@ -74,6 +74,14 @@ int module_finalize(const Elf_Ehdr *hdr, (void *)sect->sh_addr + sect->sh_size); #endif /* CONFIG_PPC64 */ +#ifdef PPC64_ELF_ABI_v1 + sect = find_section(hdr, sechdrs, ".opd"); + if (sect != NULL) { + me->arch.start_opd = sect->sh_addr; + me->arch.end_opd = sect->sh_addr + sect->sh_size; + } +#endif /* PPC64_ELF_ABI_v1 */ + #ifdef CONFIG_PPC_BARRIER_NOSPEC sect = find_section(hdr, sechdrs, "__spec_barrier_fixup"); if (sect != NULL) diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c index b8d61e019d06..8661eea78503 100644 --- a/arch/powerpc/kernel/module_64.c +++ b/arch/powerpc/kernel/module_64.c @@ -360,11 +360,6 @@ int module_frob_arch_sections(Elf64_Ehdr *hdr, else if (strcmp(secstrings+sechdrs[i].sh_name,"__versions")==0) dedotify_versions((void *)hdr + sechdrs[i].sh_offset, sechdrs[i].sh_size); - else if (!strcmp(secstrings + sechdrs[i].sh_name, ".opd")) { - me->arch.start_opd = sechdrs[i].sh_addr; - me->arch.end_opd = sechdrs[i].sh_addr + - sechdrs[i].sh_size; - } /* We don't handle .init for the moment: rename to _init */ while ((p = strstr(secstrings + sechdrs[i].sh_name, ".init"))) @@ -685,7 +680,14 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, case R_PPC64_REL32: /* 32 bits relative (used by relative exception tables) */ - *(u32 *)location = value - (unsigned long)location; + /* Convert value to relative */ + value -= (unsigned long)location; + if (value + 0x80000000 > 0xffffffff) { + pr_err("%s: REL32 %li out of range!\n", + me->name, (long int)value); + return -ENOEXEC; + } + *(u32 *)location = value; break; case R_PPC64_TOCSAVE: diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c index d63b488d34d7..4da8ed576229 100644 --- a/arch/powerpc/kernel/pci_32.c +++ b/arch/powerpc/kernel/pci_32.c @@ -17,7 +17,6 @@ #include <linux/of.h> #include <linux/slab.h> #include <linux/export.h> -#include <linux/syscalls.h> #include <asm/processor.h> #include <asm/io.h> diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c index dff28f903512..9d8c10d55407 100644 --- a/arch/powerpc/kernel/pci_64.c +++ b/arch/powerpc/kernel/pci_64.c @@ -159,7 +159,7 @@ static int pcibios_map_phb_io_space(struct pci_controller *hose) /* Establish the mapping */ if (__ioremap_at(phys_page, area->addr, size_page, - pgprot_val(pgprot_noncached(__pgprot(0)))) == NULL) + pgprot_noncached(PAGE_KERNEL)) == NULL) return -ENOMEM; /* Fixup hose IO resource */ diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index bb6ac471a784..4d5322cfad25 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -43,6 +43,7 @@ #include <linux/uaccess.h> #include <linux/elf-randomize.h> #include <linux/pkeys.h> +#include <linux/seq_buf.h> #include <asm/pgtable.h> #include <asm/io.h> @@ -65,6 +66,7 @@ #include <asm/livepatch.h> #include <asm/cpu_has_feature.h> #include <asm/asm-prototypes.h> +#include <asm/stacktrace.h> #include <linux/kprobes.h> #include <linux/kdebug.h> @@ -102,24 +104,18 @@ static void check_if_tm_restore_required(struct task_struct *tsk) } } -static inline bool msr_tm_active(unsigned long msr) -{ - return MSR_TM_ACTIVE(msr); -} - static bool tm_active_with_fp(struct task_struct *tsk) { - return msr_tm_active(tsk->thread.regs->msr) && + return MSR_TM_ACTIVE(tsk->thread.regs->msr) && (tsk->thread.ckpt_regs.msr & MSR_FP); } static bool tm_active_with_altivec(struct task_struct *tsk) { - return msr_tm_active(tsk->thread.regs->msr) && + return MSR_TM_ACTIVE(tsk->thread.regs->msr) && (tsk->thread.ckpt_regs.msr & MSR_VEC); } #else -static inline bool msr_tm_active(unsigned long msr) { return false; } static inline void check_if_tm_restore_required(struct task_struct *tsk) { } static inline bool tm_active_with_fp(struct task_struct *tsk) { return false; } static inline bool tm_active_with_altivec(struct task_struct *tsk) { return false; } @@ -247,7 +243,8 @@ void enable_kernel_fp(void) * giveup as this would save to the 'live' structure not the * checkpointed structure. */ - if(!msr_tm_active(cpumsr) && msr_tm_active(current->thread.regs->msr)) + if (!MSR_TM_ACTIVE(cpumsr) && + MSR_TM_ACTIVE(current->thread.regs->msr)) return; __giveup_fpu(current); } @@ -311,7 +308,8 @@ void enable_kernel_altivec(void) * giveup as this would save to the 'live' structure not the * checkpointed structure. */ - if(!msr_tm_active(cpumsr) && msr_tm_active(current->thread.regs->msr)) + if (!MSR_TM_ACTIVE(cpumsr) && + MSR_TM_ACTIVE(current->thread.regs->msr)) return; __giveup_altivec(current); } @@ -397,7 +395,8 @@ void enable_kernel_vsx(void) * giveup as this would save to the 'live' structure not the * checkpointed structure. */ - if(!msr_tm_active(cpumsr) && msr_tm_active(current->thread.regs->msr)) + if (!MSR_TM_ACTIVE(cpumsr) && + MSR_TM_ACTIVE(current->thread.regs->msr)) return; __giveup_vsx(current); } @@ -530,7 +529,7 @@ void restore_math(struct pt_regs *regs) { unsigned long msr; - if (!msr_tm_active(regs->msr) && + if (!MSR_TM_ACTIVE(regs->msr) && !current->thread.load_fp && !loadvec(current->thread)) return; @@ -620,8 +619,6 @@ void do_send_trap(struct pt_regs *regs, unsigned long address, void do_break (struct pt_regs *regs, unsigned long address, unsigned long error_code) { - siginfo_t info; - current->thread.trap_nr = TRAP_HWBKPT; if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code, 11, SIGSEGV) == NOTIFY_STOP) @@ -634,12 +631,7 @@ void do_break (struct pt_regs *regs, unsigned long address, hw_breakpoint_disable(); /* Deliver the signal to userspace */ - clear_siginfo(&info); - info.si_signo = SIGTRAP; - info.si_errno = 0; - info.si_code = TRAP_HWBKPT; - info.si_addr = (void __user *)address; - force_sig_info(SIGTRAP, &info, current); + force_sig_fault(SIGTRAP, TRAP_HWBKPT, (void __user *)address, current); } #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ @@ -1259,17 +1251,16 @@ struct task_struct *__switch_to(struct task_struct *prev, return last; } -static int instructions_to_print = 16; +#define NR_INSN_TO_PRINT 16 static void show_instructions(struct pt_regs *regs) { int i; - unsigned long pc = regs->nip - (instructions_to_print * 3 / 4 * - sizeof(int)); + unsigned long pc = regs->nip - (NR_INSN_TO_PRINT * 3 / 4 * sizeof(int)); printk("Instruction dump:"); - for (i = 0; i < instructions_to_print; i++) { + for (i = 0; i < NR_INSN_TO_PRINT; i++) { int instr; if (!(i % 8)) @@ -1284,7 +1275,7 @@ static void show_instructions(struct pt_regs *regs) #endif if (!__kernel_text_address(pc) || - probe_kernel_address((unsigned int __user *)pc, instr)) { + probe_kernel_address((const void *)pc, instr)) { pr_cont("XXXXXXXX "); } else { if (regs->nip == pc) @@ -1302,43 +1293,43 @@ static void show_instructions(struct pt_regs *regs) void show_user_instructions(struct pt_regs *regs) { unsigned long pc; - int i; + int n = NR_INSN_TO_PRINT; + struct seq_buf s; + char buf[96]; /* enough for 8 times 9 + 2 chars */ - pc = regs->nip - (instructions_to_print * 3 / 4 * sizeof(int)); + pc = regs->nip - (NR_INSN_TO_PRINT * 3 / 4 * sizeof(int)); /* * Make sure the NIP points at userspace, not kernel text/data or * elsewhere. */ - if (!__access_ok(pc, instructions_to_print * sizeof(int), USER_DS)) { + if (!__access_ok(pc, NR_INSN_TO_PRINT * sizeof(int), USER_DS)) { pr_info("%s[%d]: Bad NIP, not dumping instructions.\n", current->comm, current->pid); return; } - pr_info("%s[%d]: code: ", current->comm, current->pid); + seq_buf_init(&s, buf, sizeof(buf)); - for (i = 0; i < instructions_to_print; i++) { - int instr; + while (n) { + int i; - if (!(i % 8) && (i > 0)) { - pr_cont("\n"); - pr_info("%s[%d]: code: ", current->comm, current->pid); - } + seq_buf_clear(&s); - if (probe_kernel_address((unsigned int __user *)pc, instr)) { - pr_cont("XXXXXXXX "); - } else { - if (regs->nip == pc) - pr_cont("<%08x> ", instr); - else - pr_cont("%08x ", instr); + for (i = 0; i < 8 && n; i++, n--, pc += sizeof(int)) { + int instr; + + if (probe_kernel_address((const void *)pc, instr)) { + seq_buf_printf(&s, "XXXXXXXX "); + continue; + } + seq_buf_printf(&s, regs->nip == pc ? "<%08x> " : "%08x ", instr); } - pc += sizeof(int); + if (!seq_buf_has_overflowed(&s)) + pr_info("%s[%d]: code: %s\n", current->comm, + current->pid, s.buffer); } - - pr_cont("\n"); } struct regbit { @@ -1492,6 +1483,15 @@ void flush_thread(void) #endif /* CONFIG_HAVE_HW_BREAKPOINT */ } +#ifdef CONFIG_PPC_BOOK3S_64 +void arch_setup_new_exec(void) +{ + if (radix_enabled()) + return; + hash__setup_new_exec(); +} +#endif + int set_thread_uses_vas(void) { #ifdef CONFIG_PPC_BOOK3S_64 @@ -1712,7 +1712,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, p->thread.dscr = mfspr(SPRN_DSCR); } if (cpu_has_feature(CPU_FTR_HAS_PPR)) - p->thread.ppr = INIT_PPR; + childregs->ppr = DEFAULT_PPR; p->thread.tidr = 0; #endif @@ -1720,6 +1720,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, return 0; } +void preload_new_slb_context(unsigned long start, unsigned long sp); + /* * Set up a thread for executing a new program */ @@ -1727,6 +1729,10 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp) { #ifdef CONFIG_PPC64 unsigned long load_addr = regs->gpr[2]; /* saved by ELF_PLAT_INIT */ + +#ifdef CONFIG_PPC_BOOK3S_64 + preload_new_slb_context(start, sp); +#endif #endif /* @@ -1817,6 +1823,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp) #ifdef CONFIG_VSX current->thread.used_vsr = 0; #endif + current->thread.load_slb = 0; current->thread.load_fp = 0; memset(¤t->thread.fp_state, 0, sizeof(current->thread.fp_state)); current->thread.fp_save_area = NULL; diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index 9b38a2e5dd35..f33ff4163a51 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -43,11 +43,13 @@ #include <asm/btext.h> #include <asm/sections.h> #include <asm/machdep.h> -#include <asm/opal.h> #include <asm/asm-prototypes.h> #include <linux/linux_logo.h> +/* All of prom_init bss lives here */ +#define __prombss __section(.bss.prominit) + /* * Eventually bump that one up */ @@ -87,7 +89,7 @@ #define OF_WORKAROUNDS 0 #else #define OF_WORKAROUNDS of_workarounds -int of_workarounds; +static int of_workarounds __prombss; #endif #define OF_WA_CLAIM 1 /* do phys/virt claim separately, then map */ @@ -148,29 +150,31 @@ extern void copy_and_flush(unsigned long dest, unsigned long src, unsigned long size, unsigned long offset); /* prom structure */ -static struct prom_t __initdata prom; +static struct prom_t __prombss prom; -static unsigned long prom_entry __initdata; +static unsigned long __prombss prom_entry; #define PROM_SCRATCH_SIZE 256 -static char __initdata of_stdout_device[256]; -static char __initdata prom_scratch[PROM_SCRATCH_SIZE]; +static char __prombss of_stdout_device[256]; +static char __prombss prom_scratch[PROM_SCRATCH_SIZE]; -static unsigned long __initdata dt_header_start; -static unsigned long __initdata dt_struct_start, dt_struct_end; -static unsigned long __initdata dt_string_start, dt_string_end; +static unsigned long __prombss dt_header_start; +static unsigned long __prombss dt_struct_start, dt_struct_end; +static unsigned long __prombss dt_string_start, dt_string_end; -static unsigned long __initdata prom_initrd_start, prom_initrd_end; +static unsigned long __prombss prom_initrd_start, prom_initrd_end; #ifdef CONFIG_PPC64 -static int __initdata prom_iommu_force_on; -static int __initdata prom_iommu_off; -static unsigned long __initdata prom_tce_alloc_start; -static unsigned long __initdata prom_tce_alloc_end; +static int __prombss prom_iommu_force_on; +static int __prombss prom_iommu_off; +static unsigned long __prombss prom_tce_alloc_start; +static unsigned long __prombss prom_tce_alloc_end; #endif -static bool prom_radix_disable __initdata = !IS_ENABLED(CONFIG_PPC_RADIX_MMU_DEFAULT); +#ifdef CONFIG_PPC_PSERIES +static bool __prombss prom_radix_disable; +#endif struct platform_support { bool hash_mmu; @@ -188,26 +192,25 @@ struct platform_support { #define PLATFORM_LPAR 0x0001 #define PLATFORM_POWERMAC 0x0400 #define PLATFORM_GENERIC 0x0500 -#define PLATFORM_OPAL 0x0600 -static int __initdata of_platform; +static int __prombss of_platform; -static char __initdata prom_cmd_line[COMMAND_LINE_SIZE]; +static char __prombss prom_cmd_line[COMMAND_LINE_SIZE]; -static unsigned long __initdata prom_memory_limit; +static unsigned long __prombss prom_memory_limit; -static unsigned long __initdata alloc_top; -static unsigned long __initdata alloc_top_high; -static unsigned long __initdata alloc_bottom; -static unsigned long __initdata rmo_top; -static unsigned long __initdata ram_top; +static unsigned long __prombss alloc_top; +static unsigned long __prombss alloc_top_high; +static unsigned long __prombss alloc_bottom; +static unsigned long __prombss rmo_top; +static unsigned long __prombss ram_top; -static struct mem_map_entry __initdata mem_reserve_map[MEM_RESERVE_MAP_SIZE]; -static int __initdata mem_reserve_cnt; +static struct mem_map_entry __prombss mem_reserve_map[MEM_RESERVE_MAP_SIZE]; +static int __prombss mem_reserve_cnt; -static cell_t __initdata regbuf[1024]; +static cell_t __prombss regbuf[1024]; -static bool rtas_has_query_cpu_stopped; +static bool __prombss rtas_has_query_cpu_stopped; /* @@ -522,8 +525,8 @@ static void add_string(char **str, const char *q) static char *tohex(unsigned int x) { - static char digits[] = "0123456789abcdef"; - static char result[9]; + static const char digits[] __initconst = "0123456789abcdef"; + static char result[9] __prombss; int i; result[8] = 0; @@ -664,6 +667,8 @@ static void __init early_cmdline_parse(void) #endif } +#ifdef CONFIG_PPC_PSERIES + prom_radix_disable = !IS_ENABLED(CONFIG_PPC_RADIX_MMU_DEFAULT); opt = strstr(prom_cmd_line, "disable_radix"); if (opt) { opt += 13; @@ -679,9 +684,10 @@ static void __init early_cmdline_parse(void) } if (prom_radix_disable) prom_debug("Radix disabled from cmdline\n"); +#endif /* CONFIG_PPC_PSERIES */ } -#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) +#ifdef CONFIG_PPC_PSERIES /* * The architecture vector has an array of PVR mask/value pairs, * followed by # option vectors - 1, followed by the option vectors. @@ -782,7 +788,7 @@ struct ibm_arch_vec { struct option_vector6 vec6; } __packed; -struct ibm_arch_vec __cacheline_aligned ibm_architecture_vec = { +static const struct ibm_arch_vec ibm_architecture_vec_template __initconst = { .pvrs = { { .mask = cpu_to_be32(0xfffe0000), /* POWER5/POWER5+ */ @@ -920,9 +926,11 @@ struct ibm_arch_vec __cacheline_aligned ibm_architecture_vec = { }, }; +static struct ibm_arch_vec __prombss ibm_architecture_vec ____cacheline_aligned; + /* Old method - ELF header with PT_NOTE sections only works on BE */ #ifdef __BIG_ENDIAN__ -static struct fake_elf { +static const struct fake_elf { Elf32_Ehdr elfhdr; Elf32_Phdr phdr[2]; struct chrpnote { @@ -955,7 +963,7 @@ static struct fake_elf { u32 ignore_me; } rpadesc; } rpanote; -} fake_elf = { +} fake_elf __initconst = { .elfhdr = { .e_ident = { 0x7f, 'E', 'L', 'F', ELFCLASS32, ELFDATA2MSB, EV_CURRENT }, @@ -1129,14 +1137,21 @@ static void __init prom_check_platform_support(void) }; int prop_len = prom_getproplen(prom.chosen, "ibm,arch-vec-5-platform-support"); + + /* First copy the architecture vec template */ + ibm_architecture_vec = ibm_architecture_vec_template; + if (prop_len > 1) { int i; - u8 vec[prop_len]; + u8 vec[8]; prom_debug("Found ibm,arch-vec-5-platform-support, len: %d\n", prop_len); + if (prop_len > sizeof(vec)) + prom_printf("WARNING: ibm,arch-vec-5-platform-support longer than expected (len: %d)\n", + prop_len); prom_getprop(prom.chosen, "ibm,arch-vec-5-platform-support", &vec, sizeof(vec)); - for (i = 0; i < prop_len; i += 2) { + for (i = 0; i < sizeof(vec); i += 2) { prom_debug("%d: index = 0x%x val = 0x%x\n", i / 2 , vec[i] , vec[i + 1]); @@ -1225,7 +1240,7 @@ static void __init prom_send_capabilities(void) } #endif /* __BIG_ENDIAN__ */ } -#endif /* #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) */ +#endif /* CONFIG_PPC_PSERIES */ /* * Memory allocation strategy... our layout is normally: @@ -1562,88 +1577,6 @@ static void __init prom_close_stdin(void) } } -#ifdef CONFIG_PPC_POWERNV - -#ifdef CONFIG_PPC_EARLY_DEBUG_OPAL -static u64 __initdata prom_opal_base; -static u64 __initdata prom_opal_entry; -#endif - -/* - * Allocate room for and instantiate OPAL - */ -static void __init prom_instantiate_opal(void) -{ - phandle opal_node; - ihandle opal_inst; - u64 base, entry; - u64 size = 0, align = 0x10000; - __be64 val64; - u32 rets[2]; - - prom_debug("prom_instantiate_opal: start...\n"); - - opal_node = call_prom("finddevice", 1, 1, ADDR("/ibm,opal")); - prom_debug("opal_node: %x\n", opal_node); - if (!PHANDLE_VALID(opal_node)) - return; - - val64 = 0; - prom_getprop(opal_node, "opal-runtime-size", &val64, sizeof(val64)); - size = be64_to_cpu(val64); - if (size == 0) - return; - val64 = 0; - prom_getprop(opal_node, "opal-runtime-alignment", &val64,sizeof(val64)); - align = be64_to_cpu(val64); - - base = alloc_down(size, align, 0); - if (base == 0) { - prom_printf("OPAL allocation failed !\n"); - return; - } - - opal_inst = call_prom("open", 1, 1, ADDR("/ibm,opal")); - if (!IHANDLE_VALID(opal_inst)) { - prom_printf("opening opal package failed (%x)\n", opal_inst); - return; - } - - prom_printf("instantiating opal at 0x%llx...", base); - - if (call_prom_ret("call-method", 4, 3, rets, - ADDR("load-opal-runtime"), - opal_inst, - base >> 32, base & 0xffffffff) != 0 - || (rets[0] == 0 && rets[1] == 0)) { - prom_printf(" failed\n"); - return; - } - entry = (((u64)rets[0]) << 32) | rets[1]; - - prom_printf(" done\n"); - - reserve_mem(base, size); - - prom_debug("opal base = 0x%llx\n", base); - prom_debug("opal align = 0x%llx\n", align); - prom_debug("opal entry = 0x%llx\n", entry); - prom_debug("opal size = 0x%llx\n", size); - - prom_setprop(opal_node, "/ibm,opal", "opal-base-address", - &base, sizeof(base)); - prom_setprop(opal_node, "/ibm,opal", "opal-entry-address", - &entry, sizeof(entry)); - -#ifdef CONFIG_PPC_EARLY_DEBUG_OPAL - prom_opal_base = base; - prom_opal_entry = entry; -#endif - prom_debug("prom_instantiate_opal: end...\n"); -} - -#endif /* CONFIG_PPC_POWERNV */ - /* * Allocate room for and instantiate RTAS */ @@ -2150,10 +2083,6 @@ static int __init prom_find_machine_type(void) } } #ifdef CONFIG_PPC64 - /* Try to detect OPAL */ - if (PHANDLE_VALID(call_prom("finddevice", 1, 1, ADDR("/ibm,opal")))) - return PLATFORM_OPAL; - /* Try to figure out if it's an IBM pSeries or any other * PAPR compliant platform. We assume it is if : * - /device_type is "chrp" (please, do NOT use that for future @@ -2202,7 +2131,7 @@ static void __init prom_check_displays(void) ihandle ih; int i; - static unsigned char default_colors[] = { + static const unsigned char default_colors[] __initconst = { 0x00, 0x00, 0x00, 0x00, 0x00, 0xaa, 0x00, 0xaa, 0x00, @@ -2398,7 +2327,7 @@ static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start, char *namep, *prev_name, *sstart, *p, *ep, *lp, *path; unsigned long soff; unsigned char *valp; - static char pname[MAX_PROPERTY_NAME]; + static char pname[MAX_PROPERTY_NAME] __prombss; int l, room, has_phandle = 0; dt_push_token(OF_DT_BEGIN_NODE, mem_start, mem_end); @@ -2481,14 +2410,11 @@ static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start, has_phandle = 1; } - /* Add a "linux,phandle" property if no "phandle" property already - * existed (can happen with OPAL) - */ + /* Add a "phandle" property if none already exist */ if (!has_phandle) { - soff = dt_find_string("linux,phandle"); + soff = dt_find_string("phandle"); if (soff == 0) - prom_printf("WARNING: Can't find string index for" - " <linux-phandle> node %s\n", path); + prom_printf("WARNING: Can't find string index for <phandle> node %s\n", path); else { dt_push_token(OF_DT_PROP, mem_start, mem_end); dt_push_token(4, mem_start, mem_end); @@ -2548,9 +2474,9 @@ static void __init flatten_device_tree(void) dt_string_start = mem_start; mem_start += 4; /* hole */ - /* Add "linux,phandle" in there, we'll need it */ + /* Add "phandle" in there, we'll need it */ namep = make_room(&mem_start, &mem_end, 16, 1); - strcpy(namep, "linux,phandle"); + strcpy(namep, "phandle"); mem_start = (unsigned long)namep + strlen(namep) + 1; /* Build string array */ @@ -3172,7 +3098,7 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4, */ early_cmdline_parse(); -#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) +#ifdef CONFIG_PPC_PSERIES /* * On pSeries, inform the firmware about our capabilities */ @@ -3216,15 +3142,9 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4, * On non-powermacs, try to instantiate RTAS. PowerMacs don't * have a usable RTAS implementation. */ - if (of_platform != PLATFORM_POWERMAC && - of_platform != PLATFORM_OPAL) + if (of_platform != PLATFORM_POWERMAC) prom_instantiate_rtas(); -#ifdef CONFIG_PPC_POWERNV - if (of_platform == PLATFORM_OPAL) - prom_instantiate_opal(); -#endif /* CONFIG_PPC_POWERNV */ - #ifdef CONFIG_PPC64 /* instantiate sml */ prom_instantiate_sml(); @@ -3237,8 +3157,7 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4, * * (This must be done after instanciating RTAS) */ - if (of_platform != PLATFORM_POWERMAC && - of_platform != PLATFORM_OPAL) + if (of_platform != PLATFORM_POWERMAC) prom_hold_cpus(); /* @@ -3282,11 +3201,9 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4, /* * in case stdin is USB and still active on IBM machines... * Unfortunately quiesce crashes on some powermacs if we have - * closed stdin already (in particular the powerbook 101). It - * appears that the OPAL version of OFW doesn't like it either. + * closed stdin already (in particular the powerbook 101). */ - if (of_platform != PLATFORM_POWERMAC && - of_platform != PLATFORM_OPAL) + if (of_platform != PLATFORM_POWERMAC) prom_close_stdin(); /* @@ -3304,10 +3221,8 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4, hdr = dt_header_start; /* Don't print anything after quiesce under OPAL, it crashes OFW */ - if (of_platform != PLATFORM_OPAL) { - prom_printf("Booting Linux via __start() @ 0x%lx ...\n", kbase); - prom_debug("->dt_header_start=0x%lx\n", hdr); - } + prom_printf("Booting Linux via __start() @ 0x%lx ...\n", kbase); + prom_debug("->dt_header_start=0x%lx\n", hdr); #ifdef CONFIG_PPC32 reloc_got2(-offset); @@ -3315,13 +3230,7 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4, unreloc_toc(); #endif -#ifdef CONFIG_PPC_EARLY_DEBUG_OPAL - /* OPAL early debug gets the OPAL base & entry in r8 and r9 */ - __start(hdr, kbase, 0, 0, 0, - prom_opal_base, prom_opal_entry); -#else __start(hdr, kbase, 0, 0, 0, 0, 0); -#endif return 0; } diff --git a/arch/powerpc/kernel/prom_init_check.sh b/arch/powerpc/kernel/prom_init_check.sh index acb6b9226352..667df97d2595 100644 --- a/arch/powerpc/kernel/prom_init_check.sh +++ b/arch/powerpc/kernel/prom_init_check.sh @@ -28,6 +28,18 @@ OBJ="$2" ERROR=0 +function check_section() +{ + file=$1 + section=$2 + size=$(objdump -h -j $section $file 2>/dev/null | awk "\$2 == \"$section\" {print \$3}") + size=${size:-0} + if [ $size -ne 0 ]; then + ERROR=1 + echo "Error: Section $section not empty in prom_init.c" >&2 + fi +} + for UNDEF in $($NM -u $OBJ | awk '{print $2}') do # On 64-bit nm gives us the function descriptors, which have @@ -66,4 +78,8 @@ do fi done +check_section $OBJ .data +check_section $OBJ .bss +check_section $OBJ .init.data + exit $ERROR diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index 9667666eb18e..afb819f4ca68 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c @@ -297,7 +297,7 @@ int ptrace_get_reg(struct task_struct *task, int regno, unsigned long *data) } #endif - if (regno < (sizeof(struct pt_regs) / sizeof(unsigned long))) { + if (regno < (sizeof(struct user_pt_regs) / sizeof(unsigned long))) { *data = ((unsigned long *)task->thread.regs)[regno]; return 0; } @@ -360,10 +360,10 @@ static int gpr_get(struct task_struct *target, const struct user_regset *regset, ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &target->thread.regs->orig_gpr3, offsetof(struct pt_regs, orig_gpr3), - sizeof(struct pt_regs)); + sizeof(struct user_pt_regs)); if (!ret) ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, - sizeof(struct pt_regs), -1); + sizeof(struct user_pt_regs), -1); return ret; } @@ -853,10 +853,10 @@ static int tm_cgpr_get(struct task_struct *target, ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &target->thread.ckpt_regs.orig_gpr3, offsetof(struct pt_regs, orig_gpr3), - sizeof(struct pt_regs)); + sizeof(struct user_pt_regs)); if (!ret) ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, - sizeof(struct pt_regs), -1); + sizeof(struct user_pt_regs), -1); return ret; } @@ -1609,7 +1609,7 @@ static int ppr_get(struct task_struct *target, void *kbuf, void __user *ubuf) { return user_regset_copyout(&pos, &count, &kbuf, &ubuf, - &target->thread.ppr, 0, sizeof(u64)); + &target->thread.regs->ppr, 0, sizeof(u64)); } static int ppr_set(struct task_struct *target, @@ -1618,7 +1618,7 @@ static int ppr_set(struct task_struct *target, const void *kbuf, const void __user *ubuf) { return user_regset_copyin(&pos, &count, &kbuf, &ubuf, - &target->thread.ppr, 0, sizeof(u64)); + &target->thread.regs->ppr, 0, sizeof(u64)); } static int dscr_get(struct task_struct *target, @@ -2508,6 +2508,7 @@ void ptrace_disable(struct task_struct *child) { /* make sure the single step bit is not set. */ user_disable_single_step(child); + clear_tsk_thread_flag(child, TIF_SYSCALL_EMU); } #ifdef CONFIG_PPC_ADV_DEBUG_REGS @@ -3130,7 +3131,7 @@ long arch_ptrace(struct task_struct *child, long request, case PTRACE_GETREGS: /* Get all pt_regs from the child. */ return copy_regset_to_user(child, &user_ppc_native_view, REGSET_GPR, - 0, sizeof(struct pt_regs), + 0, sizeof(struct user_pt_regs), datavp); #ifdef CONFIG_PPC64 @@ -3139,7 +3140,7 @@ long arch_ptrace(struct task_struct *child, long request, case PTRACE_SETREGS: /* Set all gp regs in the child. */ return copy_regset_from_user(child, &user_ppc_native_view, REGSET_GPR, - 0, sizeof(struct pt_regs), + 0, sizeof(struct user_pt_regs), datavp); case PTRACE_GETFPREGS: /* Get the child FPU state (FPR0...31 + FPSCR) */ @@ -3264,6 +3265,16 @@ long do_syscall_trace_enter(struct pt_regs *regs) { user_exit(); + if (test_thread_flag(TIF_SYSCALL_EMU)) { + ptrace_report_syscall(regs); + /* + * Returning -1 will skip the syscall execution. We want to + * avoid clobbering any register also, thus, not 'gotoing' + * skip label. + */ + return -1; + } + /* * The tracer may decide to abort the syscall, if so tracehook * will return !0. Note that the tracer may also just change @@ -3324,3 +3335,42 @@ void do_syscall_trace_leave(struct pt_regs *regs) user_enter(); } + +void __init pt_regs_check(void) +{ + BUILD_BUG_ON(offsetof(struct pt_regs, gpr) != + offsetof(struct user_pt_regs, gpr)); + BUILD_BUG_ON(offsetof(struct pt_regs, nip) != + offsetof(struct user_pt_regs, nip)); + BUILD_BUG_ON(offsetof(struct pt_regs, msr) != + offsetof(struct user_pt_regs, msr)); + BUILD_BUG_ON(offsetof(struct pt_regs, msr) != + offsetof(struct user_pt_regs, msr)); + BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) != + offsetof(struct user_pt_regs, orig_gpr3)); + BUILD_BUG_ON(offsetof(struct pt_regs, ctr) != + offsetof(struct user_pt_regs, ctr)); + BUILD_BUG_ON(offsetof(struct pt_regs, link) != + offsetof(struct user_pt_regs, link)); + BUILD_BUG_ON(offsetof(struct pt_regs, xer) != + offsetof(struct user_pt_regs, xer)); + BUILD_BUG_ON(offsetof(struct pt_regs, ccr) != + offsetof(struct user_pt_regs, ccr)); +#ifdef __powerpc64__ + BUILD_BUG_ON(offsetof(struct pt_regs, softe) != + offsetof(struct user_pt_regs, softe)); +#else + BUILD_BUG_ON(offsetof(struct pt_regs, mq) != + offsetof(struct user_pt_regs, mq)); +#endif + BUILD_BUG_ON(offsetof(struct pt_regs, trap) != + offsetof(struct user_pt_regs, trap)); + BUILD_BUG_ON(offsetof(struct pt_regs, dar) != + offsetof(struct user_pt_regs, dar)); + BUILD_BUG_ON(offsetof(struct pt_regs, dsisr) != + offsetof(struct user_pt_regs, dsisr)); + BUILD_BUG_ON(offsetof(struct pt_regs, result) != + offsetof(struct user_pt_regs, result)); + + BUILD_BUG_ON(sizeof(struct user_pt_regs) > sizeof(struct pt_regs)); +} diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index 8afd146bc9c7..de35bd8f047f 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c @@ -981,7 +981,15 @@ int rtas_ibm_suspend_me(u64 handle) goto out; } - stop_topology_update(); + cpu_hotplug_disable(); + + /* Check if we raced with a CPU-Offline Operation */ + if (unlikely(!cpumask_equal(cpu_present_mask, cpu_online_mask))) { + pr_err("%s: Raced against a concurrent CPU-Offline\n", + __func__); + atomic_set(&data.error, -EBUSY); + goto out_hotplug_enable; + } /* Call function on all CPUs. One of us will make the * rtas call @@ -994,7 +1002,8 @@ int rtas_ibm_suspend_me(u64 handle) if (atomic_read(&data.error) != 0) printk(KERN_ERR "Error doing global join\n"); - start_topology_update(); +out_hotplug_enable: + cpu_hotplug_enable(); /* Take down CPUs not online prior to suspend */ cpuret = rtas_offline_cpus_mask(offline_mask); diff --git a/arch/powerpc/kernel/rtasd.c b/arch/powerpc/kernel/rtasd.c index 44d66c33d59d..38cadae4ca4f 100644 --- a/arch/powerpc/kernel/rtasd.c +++ b/arch/powerpc/kernel/rtasd.c @@ -91,6 +91,8 @@ static char *rtas_event_type(int type) return "Dump Notification Event"; case RTAS_TYPE_PRRN: return "Platform Resource Reassignment Event"; + case RTAS_TYPE_HOTPLUG: + return "Hotplug Event"; } return rtas_type[0]; @@ -150,8 +152,10 @@ static void printk_log_rtas(char *buf, int len) } else { struct rtas_error_log *errlog = (struct rtas_error_log *)buf; - printk(RTAS_DEBUG "event: %d, Type: %s, Severity: %d\n", - error_log_cnt, rtas_event_type(rtas_error_type(errlog)), + printk(RTAS_DEBUG "event: %d, Type: %s (%d), Severity: %d\n", + error_log_cnt, + rtas_event_type(rtas_error_type(errlog)), + rtas_error_type(errlog), rtas_error_severity(errlog)); } } @@ -274,27 +278,16 @@ void pSeries_log_error(char *buf, unsigned int err_type, int fatal) } #ifdef CONFIG_PPC_PSERIES -static s32 prrn_update_scope; - -static void prrn_work_fn(struct work_struct *work) +static void handle_prrn_event(s32 scope) { /* * For PRRN, we must pass the negative of the scope value in * the RTAS event. */ - pseries_devicetree_update(-prrn_update_scope); + pseries_devicetree_update(-scope); numa_update_cpu_topology(false); } -static DECLARE_WORK(prrn_work, prrn_work_fn); - -static void prrn_schedule_update(u32 scope) -{ - flush_work(&prrn_work); - prrn_update_scope = scope; - schedule_work(&prrn_work); -} - static void handle_rtas_event(const struct rtas_error_log *log) { if (rtas_error_type(log) != RTAS_TYPE_PRRN || !prrn_is_enabled()) @@ -303,7 +296,7 @@ static void handle_rtas_event(const struct rtas_error_log *log) /* For PRRN Events the extended log length is used to denote * the scope for calling rtas update-nodes. */ - prrn_schedule_update(rtas_error_extended_log_length(log)); + handle_prrn_event(rtas_error_extended_log_length(log)); } #else diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index 93fa0c99681e..9ca9db707bcb 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -33,6 +33,7 @@ #include <linux/serial_8250.h> #include <linux/percpu.h> #include <linux/memblock.h> +#include <linux/bootmem.h> #include <linux/of_platform.h> #include <linux/hugetlb.h> #include <asm/debugfs.h> @@ -966,6 +967,8 @@ void __init setup_arch(char **cmdline_p) initmem_init(); + early_memtest(min_low_pfn << PAGE_SHIFT, max_low_pfn << PAGE_SHIFT); + #ifdef CONFIG_DUMMY_CONSOLE conswitchp = &dummy_con; #endif diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 6a501b25dd85..faf00222b324 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -243,13 +243,19 @@ static void cpu_ready_for_interrupts(void) } /* - * Fixup HFSCR:TM based on CPU features. The bit is set by our - * early asm init because at that point we haven't updated our - * CPU features from firmware and device-tree. Here we have, - * so let's do it. + * Set HFSCR:TM based on CPU features: + * In the special case of TM no suspend (P9N DD2.1), Linux is + * told TM is off via the dt-ftrs but told to (partially) use + * it via OPAL_REINIT_CPUS_TM_SUSPEND_DISABLED. So HFSCR[TM] + * will be off from dt-ftrs but we need to turn it on for the + * no suspend case. */ - if (cpu_has_feature(CPU_FTR_HVMODE) && !cpu_has_feature(CPU_FTR_TM_COMP)) - mtspr(SPRN_HFSCR, mfspr(SPRN_HFSCR) & ~HFSCR_TM); + if (cpu_has_feature(CPU_FTR_HVMODE)) { + if (cpu_has_feature(CPU_FTR_TM_COMP)) + mtspr(SPRN_HFSCR, mfspr(SPRN_HFSCR) | HFSCR_TM); + else + mtspr(SPRN_HFSCR, mfspr(SPRN_HFSCR) & ~HFSCR_TM); + } /* Set IR and DR in PACA MSR */ get_paca()->kernel_msr = MSR_KERNEL; diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 61c1fadbc644..3f15edf25a0d 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -34,6 +34,8 @@ #include <linux/topology.h> #include <linux/profile.h> #include <linux/processor.h> +#include <linux/random.h> +#include <linux/stackprotector.h> #include <asm/ptrace.h> #include <linux/atomic.h> @@ -74,14 +76,32 @@ static DEFINE_PER_CPU(int, cpu_state) = { 0 }; #endif struct thread_info *secondary_ti; +bool has_big_cores; DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map); +DEFINE_PER_CPU(cpumask_var_t, cpu_smallcore_map); DEFINE_PER_CPU(cpumask_var_t, cpu_l2_cache_map); DEFINE_PER_CPU(cpumask_var_t, cpu_core_map); EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); EXPORT_PER_CPU_SYMBOL(cpu_l2_cache_map); EXPORT_PER_CPU_SYMBOL(cpu_core_map); +EXPORT_SYMBOL_GPL(has_big_cores); + +#define MAX_THREAD_LIST_SIZE 8 +#define THREAD_GROUP_SHARE_L1 1 +struct thread_groups { + unsigned int property; + unsigned int nr_groups; + unsigned int threads_per_group; + unsigned int thread_list[MAX_THREAD_LIST_SIZE]; +}; + +/* + * On big-cores system, cpu_l1_cache_map for each CPU corresponds to + * the set its siblings that share the L1-cache. + */ +DEFINE_PER_CPU(cpumask_var_t, cpu_l1_cache_map); /* SMP operations for this machine */ struct smp_ops_t *smp_ops; @@ -674,6 +694,185 @@ static void set_cpus_unrelated(int i, int j, } #endif +/* + * parse_thread_groups: Parses the "ibm,thread-groups" device tree + * property for the CPU device node @dn and stores + * the parsed output in the thread_groups + * structure @tg if the ibm,thread-groups[0] + * matches @property. + * + * @dn: The device node of the CPU device. + * @tg: Pointer to a thread group structure into which the parsed + * output of "ibm,thread-groups" is stored. + * @property: The property of the thread-group that the caller is + * interested in. + * + * ibm,thread-groups[0..N-1] array defines which group of threads in + * the CPU-device node can be grouped together based on the property. + * + * ibm,thread-groups[0] tells us the property based on which the + * threads are being grouped together. If this value is 1, it implies + * that the threads in the same group share L1, translation cache. + * + * ibm,thread-groups[1] tells us how many such thread groups exist. + * + * ibm,thread-groups[2] tells us the number of threads in each such + * group. + * + * ibm,thread-groups[3..N-1] is the list of threads identified by + * "ibm,ppc-interrupt-server#s" arranged as per their membership in + * the grouping. + * + * Example: If ibm,thread-groups = [1,2,4,5,6,7,8,9,10,11,12] it + * implies that there are 2 groups of 4 threads each, where each group + * of threads share L1, translation cache. + * + * The "ibm,ppc-interrupt-server#s" of the first group is {5,6,7,8} + * and the "ibm,ppc-interrupt-server#s" of the second group is {9, 10, + * 11, 12} structure + * + * Returns 0 on success, -EINVAL if the property does not exist, + * -ENODATA if property does not have a value, and -EOVERFLOW if the + * property data isn't large enough. + */ +static int parse_thread_groups(struct device_node *dn, + struct thread_groups *tg, + unsigned int property) +{ + int i; + u32 thread_group_array[3 + MAX_THREAD_LIST_SIZE]; + u32 *thread_list; + size_t total_threads; + int ret; + + ret = of_property_read_u32_array(dn, "ibm,thread-groups", + thread_group_array, 3); + if (ret) + return ret; + + tg->property = thread_group_array[0]; + tg->nr_groups = thread_group_array[1]; + tg->threads_per_group = thread_group_array[2]; + if (tg->property != property || + tg->nr_groups < 1 || + tg->threads_per_group < 1) + return -ENODATA; + + total_threads = tg->nr_groups * tg->threads_per_group; + + ret = of_property_read_u32_array(dn, "ibm,thread-groups", + thread_group_array, + 3 + total_threads); + if (ret) + return ret; + + thread_list = &thread_group_array[3]; + + for (i = 0 ; i < total_threads; i++) + tg->thread_list[i] = thread_list[i]; + + return 0; +} + +/* + * get_cpu_thread_group_start : Searches the thread group in tg->thread_list + * that @cpu belongs to. + * + * @cpu : The logical CPU whose thread group is being searched. + * @tg : The thread-group structure of the CPU node which @cpu belongs + * to. + * + * Returns the index to tg->thread_list that points to the the start + * of the thread_group that @cpu belongs to. + * + * Returns -1 if cpu doesn't belong to any of the groups pointed to by + * tg->thread_list. + */ +static int get_cpu_thread_group_start(int cpu, struct thread_groups *tg) +{ + int hw_cpu_id = get_hard_smp_processor_id(cpu); + int i, j; + + for (i = 0; i < tg->nr_groups; i++) { + int group_start = i * tg->threads_per_group; + + for (j = 0; j < tg->threads_per_group; j++) { + int idx = group_start + j; + + if (tg->thread_list[idx] == hw_cpu_id) + return group_start; + } + } + + return -1; +} + +static int init_cpu_l1_cache_map(int cpu) + +{ + struct device_node *dn = of_get_cpu_node(cpu, NULL); + struct thread_groups tg = {.property = 0, + .nr_groups = 0, + .threads_per_group = 0}; + int first_thread = cpu_first_thread_sibling(cpu); + int i, cpu_group_start = -1, err = 0; + + if (!dn) + return -ENODATA; + + err = parse_thread_groups(dn, &tg, THREAD_GROUP_SHARE_L1); + if (err) + goto out; + + zalloc_cpumask_var_node(&per_cpu(cpu_l1_cache_map, cpu), + GFP_KERNEL, + cpu_to_node(cpu)); + + cpu_group_start = get_cpu_thread_group_start(cpu, &tg); + + if (unlikely(cpu_group_start == -1)) { + WARN_ON_ONCE(1); + err = -ENODATA; + goto out; + } + + for (i = first_thread; i < first_thread + threads_per_core; i++) { + int i_group_start = get_cpu_thread_group_start(i, &tg); + + if (unlikely(i_group_start == -1)) { + WARN_ON_ONCE(1); + err = -ENODATA; + goto out; + } + + if (i_group_start == cpu_group_start) + cpumask_set_cpu(i, per_cpu(cpu_l1_cache_map, cpu)); + } + +out: + of_node_put(dn); + return err; +} + +static int init_big_cores(void) +{ + int cpu; + + for_each_possible_cpu(cpu) { + int err = init_cpu_l1_cache_map(cpu); + + if (err) + return err; + + zalloc_cpumask_var_node(&per_cpu(cpu_smallcore_map, cpu), + GFP_KERNEL, + cpu_to_node(cpu)); + } + + has_big_cores = true; + return 0; +} + void __init smp_prepare_cpus(unsigned int max_cpus) { unsigned int cpu; @@ -712,6 +911,12 @@ void __init smp_prepare_cpus(unsigned int max_cpus) cpumask_set_cpu(boot_cpuid, cpu_l2_cache_mask(boot_cpuid)); cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid)); + init_big_cores(); + if (has_big_cores) { + cpumask_set_cpu(boot_cpuid, + cpu_smallcore_mask(boot_cpuid)); + } + if (smp_ops && smp_ops->probe) smp_ops->probe(); } @@ -995,10 +1200,28 @@ static void remove_cpu_from_masks(int cpu) set_cpus_unrelated(cpu, i, cpu_core_mask); set_cpus_unrelated(cpu, i, cpu_l2_cache_mask); set_cpus_unrelated(cpu, i, cpu_sibling_mask); + if (has_big_cores) + set_cpus_unrelated(cpu, i, cpu_smallcore_mask); } } #endif +static inline void add_cpu_to_smallcore_masks(int cpu) +{ + struct cpumask *this_l1_cache_map = per_cpu(cpu_l1_cache_map, cpu); + int i, first_thread = cpu_first_thread_sibling(cpu); + + if (!has_big_cores) + return; + + cpumask_set_cpu(cpu, cpu_smallcore_mask(cpu)); + + for (i = first_thread; i < first_thread + threads_per_core; i++) { + if (cpu_online(i) && cpumask_test_cpu(i, this_l1_cache_map)) + set_cpus_related(i, cpu, cpu_smallcore_mask); + } +} + static void add_cpu_to_masks(int cpu) { int first_thread = cpu_first_thread_sibling(cpu); @@ -1015,6 +1238,7 @@ static void add_cpu_to_masks(int cpu) if (cpu_online(i)) set_cpus_related(i, cpu, cpu_sibling_mask); + add_cpu_to_smallcore_masks(cpu); /* * Copy the thread sibling mask into the cache sibling mask * and mark any CPUs that share an L2 with this CPU. @@ -1044,6 +1268,7 @@ static bool shared_caches; void start_secondary(void *unused) { unsigned int cpu = smp_processor_id(); + struct cpumask *(*sibling_mask)(int) = cpu_sibling_mask; mmgrab(&init_mm); current->active_mm = &init_mm; @@ -1069,11 +1294,13 @@ void start_secondary(void *unused) /* Update topology CPU masks */ add_cpu_to_masks(cpu); + if (has_big_cores) + sibling_mask = cpu_smallcore_mask; /* * Check for any shared caches. Note that this must be done on a * per-core basis because one core in the pair might be disabled. */ - if (!cpumask_equal(cpu_l2_cache_mask(cpu), cpu_sibling_mask(cpu))) + if (!cpumask_equal(cpu_l2_cache_mask(cpu), sibling_mask(cpu))) shared_caches = true; set_numa_node(numa_cpu_lookup_table[cpu]); @@ -1083,6 +1310,8 @@ void start_secondary(void *unused) notify_cpu_starting(cpu); set_cpu_online(cpu, true); + boot_init_stack_canary(); + local_irq_enable(); /* We can enable ftrace for secondary cpus now */ @@ -1140,6 +1369,13 @@ static const struct cpumask *shared_cache_mask(int cpu) return cpu_l2_cache_mask(cpu); } +#ifdef CONFIG_SCHED_SMT +static const struct cpumask *smallcore_smt_mask(int cpu) +{ + return cpu_smallcore_mask(cpu); +} +#endif + static struct sched_domain_topology_level power9_topology[] = { #ifdef CONFIG_SCHED_SMT { cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) }, @@ -1167,6 +1403,13 @@ void __init smp_cpus_done(unsigned int max_cpus) shared_proc_topology_init(); dump_numa_cpu_topology(); +#ifdef CONFIG_SCHED_SMT + if (has_big_cores) { + pr_info("Using small cores at SMT level\n"); + power9_topology[0].mask = smallcore_smt_mask; + powerpc_topology[0].mask = smallcore_smt_mask; + } +#endif /* * If any CPU detects that it's sharing a cache with another CPU then * use the deeper topology that is aware of this sharing. diff --git a/arch/powerpc/kernel/swsusp_asm64.S b/arch/powerpc/kernel/swsusp_asm64.S index f83bf6f72cb0..185216becb8b 100644 --- a/arch/powerpc/kernel/swsusp_asm64.S +++ b/arch/powerpc/kernel/swsusp_asm64.S @@ -262,7 +262,7 @@ END_FW_FTR_SECTION_IFCLR(FW_FEATURE_LPAR) addi r1,r1,-128 #ifdef CONFIG_PPC_BOOK3S_64 - bl slb_flush_and_rebolt + bl slb_flush_and_restore_bolted #endif bl do_after_copyback addi r1,r1,128 diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 70f145e02487..3646affae963 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -111,6 +111,7 @@ struct clock_event_device decrementer_clockevent = { .rating = 200, .irq = 0, .set_next_event = decrementer_set_next_event, + .set_state_oneshot_stopped = decrementer_shutdown, .set_state_shutdown = decrementer_shutdown, .tick_resume = decrementer_shutdown, .features = CLOCK_EVT_FEAT_ONESHOT | @@ -175,7 +176,7 @@ static void calc_cputime_factors(void) * Read the SPURR on systems that have it, otherwise the PURR, * or if that doesn't exist return the timebase value passed in. */ -static unsigned long read_spurr(unsigned long tb) +static inline unsigned long read_spurr(unsigned long tb) { if (cpu_has_feature(CPU_FTR_SPURR)) return mfspr(SPRN_SPURR); @@ -281,26 +282,17 @@ static inline u64 calculate_stolen_time(u64 stop_tb) * Account time for a transition between system, hard irq * or soft irq state. */ -static unsigned long vtime_delta(struct task_struct *tsk, - unsigned long *stime_scaled, - unsigned long *steal_time) +static unsigned long vtime_delta_scaled(struct cpu_accounting_data *acct, + unsigned long now, unsigned long stime) { - unsigned long now, nowscaled, deltascaled; - unsigned long stime; + unsigned long stime_scaled = 0; +#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME + unsigned long nowscaled, deltascaled; unsigned long utime, utime_scaled; - struct cpu_accounting_data *acct = get_accounting(tsk); - WARN_ON_ONCE(!irqs_disabled()); - - now = mftb(); nowscaled = read_spurr(now); - stime = now - acct->starttime; - acct->starttime = now; deltascaled = nowscaled - acct->startspurr; acct->startspurr = nowscaled; - - *steal_time = calculate_stolen_time(now); - utime = acct->utime - acct->utime_sspurr; acct->utime_sspurr = acct->utime; @@ -314,17 +306,38 @@ static unsigned long vtime_delta(struct task_struct *tsk, * the user ticks get saved up in paca->user_time_scaled to be * used by account_process_tick. */ - *stime_scaled = stime; + stime_scaled = stime; utime_scaled = utime; if (deltascaled != stime + utime) { if (utime) { - *stime_scaled = deltascaled * stime / (stime + utime); - utime_scaled = deltascaled - *stime_scaled; + stime_scaled = deltascaled * stime / (stime + utime); + utime_scaled = deltascaled - stime_scaled; } else { - *stime_scaled = deltascaled; + stime_scaled = deltascaled; } } acct->utime_scaled += utime_scaled; +#endif + + return stime_scaled; +} + +static unsigned long vtime_delta(struct task_struct *tsk, + unsigned long *stime_scaled, + unsigned long *steal_time) +{ + unsigned long now, stime; + struct cpu_accounting_data *acct = get_accounting(tsk); + + WARN_ON_ONCE(!irqs_disabled()); + + now = mftb(); + stime = now - acct->starttime; + acct->starttime = now; + + *stime_scaled = vtime_delta_scaled(acct, now, stime); + + *steal_time = calculate_stolen_time(now); return stime; } @@ -341,7 +354,9 @@ void vtime_account_system(struct task_struct *tsk) if ((tsk->flags & PF_VCPU) && !irq_count()) { acct->gtime += stime; +#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME acct->utime_scaled += stime_scaled; +#endif } else { if (hardirq_count()) acct->hardirq_time += stime; @@ -350,7 +365,9 @@ void vtime_account_system(struct task_struct *tsk) else acct->stime += stime; +#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME acct->stime_scaled += stime_scaled; +#endif } } EXPORT_SYMBOL_GPL(vtime_account_system); @@ -364,6 +381,21 @@ void vtime_account_idle(struct task_struct *tsk) acct->idle_time += stime + steal_time; } +static void vtime_flush_scaled(struct task_struct *tsk, + struct cpu_accounting_data *acct) +{ +#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME + if (acct->utime_scaled) + tsk->utimescaled += cputime_to_nsecs(acct->utime_scaled); + if (acct->stime_scaled) + tsk->stimescaled += cputime_to_nsecs(acct->stime_scaled); + + acct->utime_scaled = 0; + acct->utime_sspurr = 0; + acct->stime_scaled = 0; +#endif +} + /* * Account the whole cputime accumulated in the paca * Must be called with interrupts disabled. @@ -378,14 +410,13 @@ void vtime_flush(struct task_struct *tsk) if (acct->utime) account_user_time(tsk, cputime_to_nsecs(acct->utime)); - if (acct->utime_scaled) - tsk->utimescaled += cputime_to_nsecs(acct->utime_scaled); - if (acct->gtime) account_guest_time(tsk, cputime_to_nsecs(acct->gtime)); - if (acct->steal_time) + if (IS_ENABLED(CONFIG_PPC_SPLPAR) && acct->steal_time) { account_steal_time(cputime_to_nsecs(acct->steal_time)); + acct->steal_time = 0; + } if (acct->idle_time) account_idle_time(cputime_to_nsecs(acct->idle_time)); @@ -393,8 +424,6 @@ void vtime_flush(struct task_struct *tsk) if (acct->stime) account_system_index_time(tsk, cputime_to_nsecs(acct->stime), CPUTIME_SYSTEM); - if (acct->stime_scaled) - tsk->stimescaled += cputime_to_nsecs(acct->stime_scaled); if (acct->hardirq_time) account_system_index_time(tsk, cputime_to_nsecs(acct->hardirq_time), @@ -403,14 +432,12 @@ void vtime_flush(struct task_struct *tsk) account_system_index_time(tsk, cputime_to_nsecs(acct->softirq_time), CPUTIME_SOFTIRQ); + vtime_flush_scaled(tsk, acct); + acct->utime = 0; - acct->utime_scaled = 0; - acct->utime_sspurr = 0; acct->gtime = 0; - acct->steal_time = 0; acct->idle_time = 0; acct->stime = 0; - acct->stime_scaled = 0; acct->hardirq_time = 0; acct->softirq_time = 0; } @@ -984,10 +1011,14 @@ static void register_decrementer_clockevent(int cpu) *dec = decrementer_clockevent; dec->cpumask = cpumask_of(cpu); + clockevents_config_and_register(dec, ppc_tb_freq, 2, decrementer_max); + printk_once(KERN_DEBUG "clockevent: %s mult[%x] shift[%d] cpu[%d]\n", dec->name, dec->mult, dec->shift, cpu); - clockevents_register_device(dec); + /* Set values for KVM, see kvm_emulate_dec() */ + decrementer_clockevent.mult = dec->mult; + decrementer_clockevent.shift = dec->shift; } static void enable_large_decrementer(void) @@ -1035,18 +1066,7 @@ static void __init set_decrementer_max(void) static void __init init_decrementer_clockevent(void) { - int cpu = smp_processor_id(); - - clockevents_calc_mult_shift(&decrementer_clockevent, ppc_tb_freq, 4); - - decrementer_clockevent.max_delta_ns = - clockevent_delta2ns(decrementer_max, &decrementer_clockevent); - decrementer_clockevent.max_delta_ticks = decrementer_max; - decrementer_clockevent.min_delta_ns = - clockevent_delta2ns(2, &decrementer_clockevent); - decrementer_clockevent.min_delta_ticks = 2; - - register_decrementer_clockevent(cpu); + register_decrementer_clockevent(smp_processor_id()); } void secondary_cpu_time_init(void) diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S index 7716374786bd..9fabdce255cd 100644 --- a/arch/powerpc/kernel/tm.S +++ b/arch/powerpc/kernel/tm.S @@ -92,13 +92,14 @@ _GLOBAL(tm_abort) blr EXPORT_SYMBOL_GPL(tm_abort); -/* void tm_reclaim(struct thread_struct *thread, +/* + * void tm_reclaim(struct thread_struct *thread, * uint8_t cause) * * - Performs a full reclaim. This destroys outstanding - * transactions and updates thread->regs.tm_ckpt_* with the - * original checkpointed state. Note that thread->regs is - * unchanged. + * transactions and updates thread.ckpt_regs, thread.ckfp_state and + * thread.ckvr_state with the original checkpointed state. Note that + * thread->regs is unchanged. * * Purpose is to both abort transactions of, and preserve the state of, * a transactions at a context switch. We preserve/restore both sets of process @@ -163,15 +164,16 @@ _GLOBAL(tm_reclaim) */ TRECLAIM(R4) /* Cause in r4 */ - /* ******************** GPRs ******************** */ - /* Stash the checkpointed r13 away in the scratch SPR and get the real - * paca + /* + * ******************** GPRs ******************** + * Stash the checkpointed r13 in the scratch SPR and get the real paca. */ SET_SCRATCH0(r13) GET_PACA(r13) - /* Stash the checkpointed r1 away in paca tm_scratch and get the real - * stack pointer back + /* + * Stash the checkpointed r1 away in paca->tm_scratch and get the real + * stack pointer back into r1. */ std r1, PACATMSCRATCH(r13) ld r1, PACAR1(r13) @@ -209,14 +211,15 @@ _GLOBAL(tm_reclaim) addi r7, r12, PT_CKPT_REGS /* Thread's ckpt_regs */ - /* Make r7 look like an exception frame so that we - * can use the neat GPRx(n) macros. r7 is NOT a pt_regs ptr! + /* + * Make r7 look like an exception frame so that we can use the neat + * GPRx(n) macros. r7 is NOT a pt_regs ptr! */ subi r7, r7, STACK_FRAME_OVERHEAD /* Sync the userland GPRs 2-12, 14-31 to thread->regs: */ SAVE_GPR(0, r7) /* user r0 */ - SAVE_GPR(2, r7) /* user r2 */ + SAVE_GPR(2, r7) /* user r2 */ SAVE_4GPRS(3, r7) /* user r3-r6 */ SAVE_GPR(8, r7) /* user r8 */ SAVE_GPR(9, r7) /* user r9 */ @@ -237,7 +240,8 @@ _GLOBAL(tm_reclaim) /* ******************** NIP ******************** */ mfspr r3, SPRN_TFHAR std r3, _NIP(r7) /* Returns to failhandler */ - /* The checkpointed NIP is ignored when rescheduling/rechkpting, + /* + * The checkpointed NIP is ignored when rescheduling/rechkpting, * but is used in signal return to 'wind back' to the abort handler. */ @@ -260,12 +264,13 @@ _GLOBAL(tm_reclaim) std r3, THREAD_TM_TAR(r12) std r4, THREAD_TM_DSCR(r12) - /* MSR and flags: We don't change CRs, and we don't need to alter - * MSR. + /* + * MSR and flags: We don't change CRs, and we don't need to alter MSR. */ - /* ******************** FPR/VR/VSRs ************ + /* + * ******************** FPR/VR/VSRs ************ * After reclaiming, capture the checkpointed FPRs/VRs. * * We enabled VEC/FP/VSX in the msr above, so we can execute these @@ -275,7 +280,7 @@ _GLOBAL(tm_reclaim) /* Altivec (VEC/VMX/VR)*/ addi r7, r3, THREAD_CKVRSTATE - SAVE_32VRS(0, r6, r7) /* r6 scratch, r7 transact vr state */ + SAVE_32VRS(0, r6, r7) /* r6 scratch, r7 ckvr_state */ mfvscr v0 li r6, VRSTATE_VSCR stvx v0, r7, r6 @@ -286,12 +291,13 @@ _GLOBAL(tm_reclaim) /* Floating Point (FP) */ addi r7, r3, THREAD_CKFPSTATE - SAVE_32FPRS_VSRS(0, R6, R7) /* r6 scratch, r7 transact fp state */ + SAVE_32FPRS_VSRS(0, R6, R7) /* r6 scratch, r7 ckfp_state */ mffs fr0 stfd fr0,FPSTATE_FPSCR(r7) - /* TM regs, incl TEXASR -- these live in thread_struct. Note they've + /* + * TM regs, incl TEXASR -- these live in thread_struct. Note they've * been updated by the treclaim, to explain to userland the failure * cause (aborted). */ @@ -327,7 +333,7 @@ _GLOBAL(tm_reclaim) blr - /* + /* * void __tm_recheckpoint(struct thread_struct *thread) * - Restore the checkpointed register state saved by tm_reclaim * when we switch_to a process. @@ -343,7 +349,8 @@ _GLOBAL(__tm_recheckpoint) std r2, STK_GOT(r1) stdu r1, -TM_FRAME_SIZE(r1) - /* We've a struct pt_regs at [r1+STACK_FRAME_OVERHEAD]. + /* + * We've a struct pt_regs at [r1+STACK_FRAME_OVERHEAD]. * This is used for backing up the NVGPRs: */ SAVE_NVGPRS(r1) @@ -352,8 +359,9 @@ _GLOBAL(__tm_recheckpoint) addi r7, r3, PT_CKPT_REGS /* Thread's ckpt_regs */ - /* Make r7 look like an exception frame so that we - * can use the neat GPRx(n) macros. r7 is now NOT a pt_regs ptr! + /* + * Make r7 look like an exception frame so that we can use the neat + * GPRx(n) macros. r7 is now NOT a pt_regs ptr! */ subi r7, r7, STACK_FRAME_OVERHEAD @@ -421,14 +429,15 @@ restore_gprs: REST_NVGPRS(r7) /* GPR14-31 */ - /* Load up PPR and DSCR here so we don't run with user values for long - */ + /* Load up PPR and DSCR here so we don't run with user values for long */ mtspr SPRN_DSCR, r5 mtspr SPRN_PPR, r6 - /* Do final sanity check on TEXASR to make sure FS is set. Do this + /* + * Do final sanity check on TEXASR to make sure FS is set. Do this * here before we load up the userspace r1 so any bugs we hit will get - * a call chain */ + * a call chain. + */ mfspr r5, SPRN_TEXASR srdi r5, r5, 16 li r6, (TEXASR_FS)@h @@ -436,8 +445,9 @@ restore_gprs: 1: tdeqi r6, 0 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,0 - /* Do final sanity check on MSR to make sure we are not transactional - * or suspended + /* + * Do final sanity check on MSR to make sure we are not transactional + * or suspended. */ mfmsr r6 li r5, (MSR_TS_MASK)@higher @@ -453,8 +463,8 @@ restore_gprs: REST_GPR(6, r7) /* - * Store r1 and r5 on the stack so that we can access them - * after we clear MSR RI. + * Store r1 and r5 on the stack so that we can access them after we + * clear MSR RI. */ REST_GPR(5, r7) @@ -484,7 +494,8 @@ restore_gprs: HMT_MEDIUM - /* Our transactional state has now changed. + /* + * Our transactional state has now changed. * * Now just get out of here. Transactional (current) state will be * updated once restore is called on the return path in the _switch-ed diff --git a/arch/powerpc/kernel/trace/Makefile b/arch/powerpc/kernel/trace/Makefile index d22d8bafb643..b1725ad3e13d 100644 --- a/arch/powerpc/kernel/trace/Makefile +++ b/arch/powerpc/kernel/trace/Makefile @@ -3,11 +3,9 @@ # Makefile for the powerpc trace subsystem # -subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror - ifdef CONFIG_FUNCTION_TRACER # do not trace tracer code -CFLAGS_REMOVE_ftrace.o = -mno-sched-epilog $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE) endif obj32-$(CONFIG_FUNCTION_TRACER) += ftrace_32.o diff --git a/arch/powerpc/kernel/trace/ftrace.c b/arch/powerpc/kernel/trace/ftrace.c index 4bfbb54dee51..4bf051d3e21e 100644 --- a/arch/powerpc/kernel/trace/ftrace.c +++ b/arch/powerpc/kernel/trace/ftrace.c @@ -30,6 +30,16 @@ #ifdef CONFIG_DYNAMIC_FTRACE + +/* + * We generally only have a single long_branch tramp and at most 2 or 3 plt + * tramps generated. But, we don't use the plt tramps currently. We also allot + * 2 tramps after .text and .init.text. So, we only end up with around 3 usable + * tramps in total. Set aside 8 just to be sure. + */ +#define NUM_FTRACE_TRAMPS 8 +static unsigned long ftrace_tramps[NUM_FTRACE_TRAMPS]; + static unsigned int ftrace_call_replace(unsigned long ip, unsigned long addr, int link) { @@ -85,13 +95,16 @@ static int test_24bit_addr(unsigned long ip, unsigned long addr) return create_branch((unsigned int *)ip, addr, 0); } -#ifdef CONFIG_MODULES - static int is_bl_op(unsigned int op) { return (op & 0xfc000003) == 0x48000001; } +static int is_b_op(unsigned int op) +{ + return (op & 0xfc000003) == 0x48000000; +} + static unsigned long find_bl_target(unsigned long ip, unsigned int op) { static int offset; @@ -104,6 +117,7 @@ static unsigned long find_bl_target(unsigned long ip, unsigned int op) return ip + (long)offset; } +#ifdef CONFIG_MODULES #ifdef CONFIG_PPC64 static int __ftrace_make_nop(struct module *mod, @@ -270,6 +284,146 @@ __ftrace_make_nop(struct module *mod, #endif /* PPC64 */ #endif /* CONFIG_MODULES */ +static unsigned long find_ftrace_tramp(unsigned long ip) +{ + int i; + + /* + * We have the compiler generated long_branch tramps at the end + * and we prefer those + */ + for (i = NUM_FTRACE_TRAMPS - 1; i >= 0; i--) + if (!ftrace_tramps[i]) + continue; + else if (create_branch((void *)ip, ftrace_tramps[i], 0)) + return ftrace_tramps[i]; + + return 0; +} + +static int add_ftrace_tramp(unsigned long tramp) +{ + int i; + + for (i = 0; i < NUM_FTRACE_TRAMPS; i++) + if (!ftrace_tramps[i]) { + ftrace_tramps[i] = tramp; + return 0; + } + + return -1; +} + +/* + * If this is a compiler generated long_branch trampoline (essentially, a + * trampoline that has a branch to _mcount()), we re-write the branch to + * instead go to ftrace_[regs_]caller() and note down the location of this + * trampoline. + */ +static int setup_mcount_compiler_tramp(unsigned long tramp) +{ + int i, op; + unsigned long ptr; + static unsigned long ftrace_plt_tramps[NUM_FTRACE_TRAMPS]; + + /* Is this a known long jump tramp? */ + for (i = 0; i < NUM_FTRACE_TRAMPS; i++) + if (!ftrace_tramps[i]) + break; + else if (ftrace_tramps[i] == tramp) + return 0; + + /* Is this a known plt tramp? */ + for (i = 0; i < NUM_FTRACE_TRAMPS; i++) + if (!ftrace_plt_tramps[i]) + break; + else if (ftrace_plt_tramps[i] == tramp) + return -1; + + /* New trampoline -- read where this goes */ + if (probe_kernel_read(&op, (void *)tramp, sizeof(int))) { + pr_debug("Fetching opcode failed.\n"); + return -1; + } + + /* Is this a 24 bit branch? */ + if (!is_b_op(op)) { + pr_debug("Trampoline is not a long branch tramp.\n"); + return -1; + } + + /* lets find where the pointer goes */ + ptr = find_bl_target(tramp, op); + + if (ptr != ppc_global_function_entry((void *)_mcount)) { + pr_debug("Trampoline target %p is not _mcount\n", (void *)ptr); + return -1; + } + + /* Let's re-write the tramp to go to ftrace_[regs_]caller */ +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS + ptr = ppc_global_function_entry((void *)ftrace_regs_caller); +#else + ptr = ppc_global_function_entry((void *)ftrace_caller); +#endif + if (!create_branch((void *)tramp, ptr, 0)) { + pr_debug("%ps is not reachable from existing mcount tramp\n", + (void *)ptr); + return -1; + } + + if (patch_branch((unsigned int *)tramp, ptr, 0)) { + pr_debug("REL24 out of range!\n"); + return -1; + } + + if (add_ftrace_tramp(tramp)) { + pr_debug("No tramp locations left\n"); + return -1; + } + + return 0; +} + +static int __ftrace_make_nop_kernel(struct dyn_ftrace *rec, unsigned long addr) +{ + unsigned long tramp, ip = rec->ip; + unsigned int op; + + /* Read where this goes */ + if (probe_kernel_read(&op, (void *)ip, sizeof(int))) { + pr_err("Fetching opcode failed.\n"); + return -EFAULT; + } + + /* Make sure that that this is still a 24bit jump */ + if (!is_bl_op(op)) { + pr_err("Not expected bl: opcode is %x\n", op); + return -EINVAL; + } + + /* Let's find where the pointer goes */ + tramp = find_bl_target(ip, op); + + pr_devel("ip:%lx jumps to %lx", ip, tramp); + + if (setup_mcount_compiler_tramp(tramp)) { + /* Are other trampolines reachable? */ + if (!find_ftrace_tramp(ip)) { + pr_err("No ftrace trampolines reachable from %ps\n", + (void *)ip); + return -EINVAL; + } + } + + if (patch_instruction((unsigned int *)ip, PPC_INST_NOP)) { + pr_err("Patching NOP failed.\n"); + return -EPERM; + } + + return 0; +} + int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr) { @@ -286,7 +440,8 @@ int ftrace_make_nop(struct module *mod, old = ftrace_call_replace(ip, addr, 1); new = PPC_INST_NOP; return ftrace_modify_code(ip, old, new); - } + } else if (core_kernel_text(ip)) + return __ftrace_make_nop_kernel(rec, addr); #ifdef CONFIG_MODULES /* @@ -456,6 +611,53 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) #endif /* CONFIG_PPC64 */ #endif /* CONFIG_MODULES */ +static int __ftrace_make_call_kernel(struct dyn_ftrace *rec, unsigned long addr) +{ + unsigned int op; + void *ip = (void *)rec->ip; + unsigned long tramp, entry, ptr; + + /* Make sure we're being asked to patch branch to a known ftrace addr */ + entry = ppc_global_function_entry((void *)ftrace_caller); + ptr = ppc_global_function_entry((void *)addr); + + if (ptr != entry) { +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS + entry = ppc_global_function_entry((void *)ftrace_regs_caller); + if (ptr != entry) { +#endif + pr_err("Unknown ftrace addr to patch: %ps\n", (void *)ptr); + return -EINVAL; +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS + } +#endif + } + + /* Make sure we have a nop */ + if (probe_kernel_read(&op, ip, sizeof(op))) { + pr_err("Unable to read ftrace location %p\n", ip); + return -EFAULT; + } + + if (op != PPC_INST_NOP) { + pr_err("Unexpected call sequence at %p: %x\n", ip, op); + return -EINVAL; + } + + tramp = find_ftrace_tramp((unsigned long)ip); + if (!tramp) { + pr_err("No ftrace trampolines reachable from %ps\n", ip); + return -EINVAL; + } + + if (patch_branch(ip, tramp, BRANCH_SET_LINK)) { + pr_err("Error patching branch to ftrace tramp!\n"); + return -EINVAL; + } + + return 0; +} + int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) { unsigned long ip = rec->ip; @@ -471,7 +673,8 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) old = PPC_INST_NOP; new = ftrace_call_replace(ip, addr, 1); return ftrace_modify_code(ip, old, new); - } + } else if (core_kernel_text(ip)) + return __ftrace_make_call_kernel(rec, addr); #ifdef CONFIG_MODULES /* @@ -603,6 +806,12 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, old = ftrace_call_replace(ip, old_addr, 1); new = ftrace_call_replace(ip, addr, 1); return ftrace_modify_code(ip, old, new); + } else if (core_kernel_text(ip)) { + /* + * We always patch out of range locations to go to the regs + * variant, so there is nothing to do here + */ + return 0; } #ifdef CONFIG_MODULES @@ -654,10 +863,54 @@ void arch_ftrace_update_code(int command) ftrace_modify_all_code(command); } +#ifdef CONFIG_PPC64 +#define PACATOC offsetof(struct paca_struct, kernel_toc) + +#define PPC_LO(v) ((v) & 0xffff) +#define PPC_HI(v) (((v) >> 16) & 0xffff) +#define PPC_HA(v) PPC_HI ((v) + 0x8000) + +extern unsigned int ftrace_tramp_text[], ftrace_tramp_init[]; + +int __init ftrace_dyn_arch_init(void) +{ + int i; + unsigned int *tramp[] = { ftrace_tramp_text, ftrace_tramp_init }; + u32 stub_insns[] = { + 0xe98d0000 | PACATOC, /* ld r12,PACATOC(r13) */ + 0x3d8c0000, /* addis r12,r12,<high> */ + 0x398c0000, /* addi r12,r12,<low> */ + 0x7d8903a6, /* mtctr r12 */ + 0x4e800420, /* bctr */ + }; +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS + unsigned long addr = ppc_global_function_entry((void *)ftrace_regs_caller); +#else + unsigned long addr = ppc_global_function_entry((void *)ftrace_caller); +#endif + long reladdr = addr - kernel_toc_addr(); + + if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) { + pr_err("Address of %ps out of range of kernel_toc.\n", + (void *)addr); + return -1; + } + + for (i = 0; i < 2; i++) { + memcpy(tramp[i], stub_insns, sizeof(stub_insns)); + tramp[i][1] |= PPC_HA(reladdr); + tramp[i][2] |= PPC_LO(reladdr); + add_ftrace_tramp((unsigned long)tramp[i]); + } + + return 0; +} +#else int __init ftrace_dyn_arch_init(void) { return 0; } +#endif #endif /* CONFIG_DYNAMIC_FTRACE */ #ifdef CONFIG_FUNCTION_GRAPH_TRACER diff --git a/arch/powerpc/kernel/trace/ftrace_64.S b/arch/powerpc/kernel/trace/ftrace_64.S index e25f77c10a72..1782af2d1496 100644 --- a/arch/powerpc/kernel/trace/ftrace_64.S +++ b/arch/powerpc/kernel/trace/ftrace_64.S @@ -14,6 +14,18 @@ #include <asm/ppc-opcode.h> #include <asm/export.h> +.pushsection ".tramp.ftrace.text","aw",@progbits; +.globl ftrace_tramp_text +ftrace_tramp_text: + .space 64 +.popsection + +.pushsection ".tramp.ftrace.init","aw",@progbits; +.globl ftrace_tramp_init +ftrace_tramp_init: + .space 64 +.popsection + _GLOBAL(mcount) _GLOBAL(_mcount) EXPORT_SYMBOL(_mcount) diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index c85adb858271..9a86572db1ef 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -247,8 +247,6 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, mdelay(MSEC_PER_SEC); } - if (in_interrupt()) - panic("Fatal exception in interrupt"); if (panic_on_oops) panic("Fatal exception"); do_exit(signr); @@ -307,12 +305,9 @@ void die(const char *str, struct pt_regs *regs, long err) } NOKPROBE_SYMBOL(die); -void user_single_step_siginfo(struct task_struct *tsk, - struct pt_regs *regs, siginfo_t *info) +void user_single_step_report(struct pt_regs *regs) { - info->si_signo = SIGTRAP; - info->si_code = TRAP_TRACE; - info->si_addr = (void __user *)regs->nip; + force_sig_fault(SIGTRAP, TRAP_TRACE, (void __user *)regs->nip, current); } static void show_signal_msg(int signr, struct pt_regs *regs, int code, @@ -341,14 +336,12 @@ static void show_signal_msg(int signr, struct pt_regs *regs, int code, show_user_instructions(regs); } -void _exception_pkey(int signr, struct pt_regs *regs, int code, - unsigned long addr, int key) +static bool exception_common(int signr, struct pt_regs *regs, int code, + unsigned long addr) { - siginfo_t info; - if (!user_mode(regs)) { die("Exception in kernel mode", regs, signr); - return; + return false; } show_signal_msg(signr, regs, code, addr); @@ -364,18 +357,23 @@ void _exception_pkey(int signr, struct pt_regs *regs, int code, */ thread_pkey_regs_save(¤t->thread); - clear_siginfo(&info); - info.si_signo = signr; - info.si_code = code; - info.si_addr = (void __user *) addr; - info.si_pkey = key; + return true; +} - force_sig_info(signr, &info, current); +void _exception_pkey(struct pt_regs *regs, unsigned long addr, int key) +{ + if (!exception_common(SIGSEGV, regs, SEGV_PKUERR, addr)) + return; + + force_sig_pkuerr((void __user *) addr, key); } void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr) { - _exception_pkey(signr, regs, code, addr, 0); + if (!exception_common(signr, regs, code, addr)) + return; + + force_sig_fault(signr, code, (void __user *)addr, current); } void system_reset_exception(struct pt_regs *regs) @@ -535,10 +533,10 @@ int machine_check_e500mc(struct pt_regs *regs) printk("Caused by (from MCSR=%lx): ", reason); if (reason & MCSR_MCP) - printk("Machine Check Signal\n"); + pr_cont("Machine Check Signal\n"); if (reason & MCSR_ICPERR) { - printk("Instruction Cache Parity Error\n"); + pr_cont("Instruction Cache Parity Error\n"); /* * This is recoverable by invalidating the i-cache. @@ -556,7 +554,7 @@ int machine_check_e500mc(struct pt_regs *regs) } if (reason & MCSR_DCPERR_MC) { - printk("Data Cache Parity Error\n"); + pr_cont("Data Cache Parity Error\n"); /* * In write shadow mode we auto-recover from the error, but it @@ -575,38 +573,38 @@ int machine_check_e500mc(struct pt_regs *regs) } if (reason & MCSR_L2MMU_MHIT) { - printk("Hit on multiple TLB entries\n"); + pr_cont("Hit on multiple TLB entries\n"); recoverable = 0; } if (reason & MCSR_NMI) - printk("Non-maskable interrupt\n"); + pr_cont("Non-maskable interrupt\n"); if (reason & MCSR_IF) { - printk("Instruction Fetch Error Report\n"); + pr_cont("Instruction Fetch Error Report\n"); recoverable = 0; } if (reason & MCSR_LD) { - printk("Load Error Report\n"); + pr_cont("Load Error Report\n"); recoverable = 0; } if (reason & MCSR_ST) { - printk("Store Error Report\n"); + pr_cont("Store Error Report\n"); recoverable = 0; } if (reason & MCSR_LDG) { - printk("Guarded Load Error Report\n"); + pr_cont("Guarded Load Error Report\n"); recoverable = 0; } if (reason & MCSR_TLBSYNC) - printk("Simultaneous tlbsync operations\n"); + pr_cont("Simultaneous tlbsync operations\n"); if (reason & MCSR_BSL2_ERR) { - printk("Level 2 Cache Error\n"); + pr_cont("Level 2 Cache Error\n"); recoverable = 0; } @@ -616,7 +614,7 @@ int machine_check_e500mc(struct pt_regs *regs) addr = mfspr(SPRN_MCAR); addr |= (u64)mfspr(SPRN_MCARU) << 32; - printk("Machine Check %s Address: %#llx\n", + pr_cont("Machine Check %s Address: %#llx\n", reason & MCSR_MEA ? "Effective" : "Physical", addr); } @@ -640,29 +638,29 @@ int machine_check_e500(struct pt_regs *regs) printk("Caused by (from MCSR=%lx): ", reason); if (reason & MCSR_MCP) - printk("Machine Check Signal\n"); + pr_cont("Machine Check Signal\n"); if (reason & MCSR_ICPERR) - printk("Instruction Cache Parity Error\n"); + pr_cont("Instruction Cache Parity Error\n"); if (reason & MCSR_DCP_PERR) - printk("Data Cache Push Parity Error\n"); + pr_cont("Data Cache Push Parity Error\n"); if (reason & MCSR_DCPERR) - printk("Data Cache Parity Error\n"); + pr_cont("Data Cache Parity Error\n"); if (reason & MCSR_BUS_IAERR) - printk("Bus - Instruction Address Error\n"); + pr_cont("Bus - Instruction Address Error\n"); if (reason & MCSR_BUS_RAERR) - printk("Bus - Read Address Error\n"); + pr_cont("Bus - Read Address Error\n"); if (reason & MCSR_BUS_WAERR) - printk("Bus - Write Address Error\n"); + pr_cont("Bus - Write Address Error\n"); if (reason & MCSR_BUS_IBERR) - printk("Bus - Instruction Data Error\n"); + pr_cont("Bus - Instruction Data Error\n"); if (reason & MCSR_BUS_RBERR) - printk("Bus - Read Data Bus Error\n"); + pr_cont("Bus - Read Data Bus Error\n"); if (reason & MCSR_BUS_WBERR) - printk("Bus - Write Data Bus Error\n"); + pr_cont("Bus - Write Data Bus Error\n"); if (reason & MCSR_BUS_IPERR) - printk("Bus - Instruction Parity Error\n"); + pr_cont("Bus - Instruction Parity Error\n"); if (reason & MCSR_BUS_RPERR) - printk("Bus - Read Parity Error\n"); + pr_cont("Bus - Read Parity Error\n"); return 0; } @@ -680,19 +678,19 @@ int machine_check_e200(struct pt_regs *regs) printk("Caused by (from MCSR=%lx): ", reason); if (reason & MCSR_MCP) - printk("Machine Check Signal\n"); + pr_cont("Machine Check Signal\n"); if (reason & MCSR_CP_PERR) - printk("Cache Push Parity Error\n"); + pr_cont("Cache Push Parity Error\n"); if (reason & MCSR_CPERR) - printk("Cache Parity Error\n"); + pr_cont("Cache Parity Error\n"); if (reason & MCSR_EXCP_ERR) - printk("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n"); + pr_cont("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n"); if (reason & MCSR_BUS_IRERR) - printk("Bus - Read Bus Error on instruction fetch\n"); + pr_cont("Bus - Read Bus Error on instruction fetch\n"); if (reason & MCSR_BUS_DRERR) - printk("Bus - Read Bus Error on data load\n"); + pr_cont("Bus - Read Bus Error on data load\n"); if (reason & MCSR_BUS_WRERR) - printk("Bus - Write Bus Error on buffered store or cache line push\n"); + pr_cont("Bus - Write Bus Error on buffered store or cache line push\n"); return 0; } @@ -705,30 +703,30 @@ int machine_check_generic(struct pt_regs *regs) printk("Caused by (from SRR1=%lx): ", reason); switch (reason & 0x601F0000) { case 0x80000: - printk("Machine check signal\n"); + pr_cont("Machine check signal\n"); break; case 0: /* for 601 */ case 0x40000: case 0x140000: /* 7450 MSS error and TEA */ - printk("Transfer error ack signal\n"); + pr_cont("Transfer error ack signal\n"); break; case 0x20000: - printk("Data parity error signal\n"); + pr_cont("Data parity error signal\n"); break; case 0x10000: - printk("Address parity error signal\n"); + pr_cont("Address parity error signal\n"); break; case 0x20000000: - printk("L1 Data Cache error\n"); + pr_cont("L1 Data Cache error\n"); break; case 0x40000000: - printk("L1 Instruction Cache error\n"); + pr_cont("L1 Instruction Cache error\n"); break; case 0x00100000: - printk("L2 data cache parity error\n"); + pr_cont("L2 data cache parity error\n"); break; default: - printk("Unknown values in msr\n"); + pr_cont("Unknown values in msr\n"); } return 0; } @@ -741,9 +739,7 @@ void machine_check_exception(struct pt_regs *regs) if (!nested) nmi_enter(); - /* 64s accounts the mce in machine_check_early when in HVMODE */ - if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64) || !cpu_has_feature(CPU_FTR_HVMODE)) - __this_cpu_inc(irq_stat.mce_exceptions); + __this_cpu_inc(irq_stat.mce_exceptions); add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE); @@ -767,12 +763,17 @@ void machine_check_exception(struct pt_regs *regs) if (check_io_access(regs)) goto bail; - die("Machine check", regs, SIGBUS); - /* Must die if the interrupt is not recoverable */ if (!(regs->msr & MSR_RI)) nmi_panic(regs, "Unrecoverable Machine check"); + if (!nested) + nmi_exit(); + + die("Machine check", regs, SIGBUS); + + return; + bail: if (!nested) nmi_exit(); @@ -1433,7 +1434,7 @@ void program_check_exception(struct pt_regs *regs) goto bail; } else { printk(KERN_EMERG "Unexpected TM Bad Thing exception " - "at %lx (msr 0x%x)\n", regs->nip, reason); + "at %lx (msr 0x%lx)\n", regs->nip, regs->msr); die("Unrecoverable exception", regs, SIGABRT); } } @@ -1547,14 +1548,6 @@ void StackOverflow(struct pt_regs *regs) panic("kernel stack overflow"); } -void nonrecoverable_exception(struct pt_regs *regs) -{ - printk(KERN_ERR "Non-recoverable exception at PC=%lx MSR=%lx\n", - regs->nip, regs->msr); - debugger(regs); - die("nonrecoverable exception", regs, SIGKILL); -} - void kernel_fp_unavailable_exception(struct pt_regs *regs) { enum ctx_state prev_state = exception_enter(); @@ -1750,16 +1743,20 @@ void fp_unavailable_tm(struct pt_regs *regs) * checkpointed FP registers need to be loaded. */ tm_reclaim_current(TM_CAUSE_FAC_UNAV); - /* Reclaim didn't save out any FPRs to transact_fprs. */ + + /* + * Reclaim initially saved out bogus (lazy) FPRs to ckfp_state, and + * then it was overwrite by the thr->fp_state by tm_reclaim_thread(). + * + * At this point, ck{fp,vr}_state contains the exact values we want to + * recheckpoint. + */ /* Enable FP for the task: */ current->thread.load_fp = 1; - /* This loads and recheckpoints the FP registers from - * thread.fpr[]. They will remain in registers after the - * checkpoint so we don't need to reload them after. - * If VMX is in use, the VRs now hold checkpointed values, - * so we don't want to load the VRs from the thread_struct. + /* + * Recheckpoint all the checkpointed ckpt, ck{fp, vr}_state registers. */ tm_recheckpoint(¤t->thread); } @@ -2086,8 +2083,8 @@ void SPEFloatingPointRoundException(struct pt_regs *regs) */ void unrecoverable_exception(struct pt_regs *regs) { - printk(KERN_EMERG "Unrecoverable exception %lx at %lx\n", - regs->trap, regs->nip); + pr_emerg("Unrecoverable exception %lx at %lx (msr=%lx)\n", + regs->trap, regs->nip, regs->msr); die("Unrecoverable exception", regs, SIGABRT); } NOKPROBE_SYMBOL(unrecoverable_exception); diff --git a/arch/powerpc/kernel/vdso32/datapage.S b/arch/powerpc/kernel/vdso32/datapage.S index 3745113fcc65..2a7eb5452aba 100644 --- a/arch/powerpc/kernel/vdso32/datapage.S +++ b/arch/powerpc/kernel/vdso32/datapage.S @@ -37,6 +37,7 @@ data_page_branch: mtlr r0 addi r3, r3, __kernel_datapage_offset-data_page_branch lwz r0,0(r3) + .cfi_restore lr add r3,r0,r3 blr .cfi_endproc diff --git a/arch/powerpc/kernel/vdso32/gettimeofday.S b/arch/powerpc/kernel/vdso32/gettimeofday.S index 769c2624e0a6..1e0bc5955a40 100644 --- a/arch/powerpc/kernel/vdso32/gettimeofday.S +++ b/arch/powerpc/kernel/vdso32/gettimeofday.S @@ -139,6 +139,7 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime) */ 99: li r0,__NR_clock_gettime + .cfi_restore lr sc blr .cfi_endproc diff --git a/arch/powerpc/kernel/vdso64/datapage.S b/arch/powerpc/kernel/vdso64/datapage.S index abf17feffe40..bf9668691511 100644 --- a/arch/powerpc/kernel/vdso64/datapage.S +++ b/arch/powerpc/kernel/vdso64/datapage.S @@ -37,6 +37,7 @@ data_page_branch: mtlr r0 addi r3, r3, __kernel_datapage_offset-data_page_branch lwz r0,0(r3) + .cfi_restore lr add r3,r0,r3 blr .cfi_endproc diff --git a/arch/powerpc/kernel/vdso64/gettimeofday.S b/arch/powerpc/kernel/vdso64/gettimeofday.S index c002adcc694c..a4ed9edfd5f0 100644 --- a/arch/powerpc/kernel/vdso64/gettimeofday.S +++ b/arch/powerpc/kernel/vdso64/gettimeofday.S @@ -169,6 +169,7 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime) */ 99: li r0,__NR_clock_gettime + .cfi_restore lr sc blr .cfi_endproc diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index 07ae018e550e..434581bcd5b4 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -4,6 +4,9 @@ #else #define PROVIDE32(x) PROVIDE(x) #endif + +#define BSS_FIRST_SECTIONS *(.bss.prominit) + #include <asm/page.h> #include <asm-generic/vmlinux.lds.h> #include <asm/cache.h> @@ -99,6 +102,9 @@ SECTIONS #endif /* careful! __ftr_alt_* sections need to be close to .text */ *(.text.hot TEXT_MAIN .text.fixup .text.unlikely .fixup __ftr_alt_* .ref.text); +#ifdef CONFIG_PPC64 + *(.tramp.ftrace.text); +#endif SCHED_TEXT CPUIDLE_TEXT LOCK_TEXT @@ -181,7 +187,15 @@ SECTIONS */ . = ALIGN(STRICT_ALIGN_SIZE); __init_begin = .; - INIT_TEXT_SECTION(PAGE_SIZE) :kernel + . = ALIGN(PAGE_SIZE); + .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { + _sinittext = .; + INIT_TEXT + _einittext = .; +#ifdef CONFIG_PPC64 + *(.tramp.ftrace.init); +#endif + } :kernel /* .exit.text is discarded at runtime, not link time, * to deal with references from __bug_table @@ -212,8 +226,6 @@ SECTIONS CON_INITCALL } - SECURITY_INIT - . = ALIGN(8); __ftr_fixup : AT(ADDR(__ftr_fixup) - LOAD_OFFSET) { __start___ftr_fixup = .; diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile index f872c04bb5b1..64f1135e7732 100644 --- a/arch/powerpc/kvm/Makefile +++ b/arch/powerpc/kvm/Makefile @@ -3,8 +3,6 @@ # Makefile for Kernel-based Virtual Machine module # -subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror - ccflags-y := -Ivirt/kvm -Iarch/powerpc/kvm KVM := ../../../virt/kvm @@ -75,7 +73,8 @@ kvm-hv-y += \ book3s_hv.o \ book3s_hv_interrupts.o \ book3s_64_mmu_hv.o \ - book3s_64_mmu_radix.o + book3s_64_mmu_radix.o \ + book3s_hv_nested.o kvm-hv-$(CONFIG_PPC_TRANSACTIONAL_MEM) += \ book3s_hv_tm.o diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index 87348e498c89..fd9893bc7aa1 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -78,8 +78,11 @@ void kvmppc_unfixup_split_real(struct kvm_vcpu *vcpu) { if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) { ulong pc = kvmppc_get_pc(vcpu); + ulong lr = kvmppc_get_lr(vcpu); if ((pc & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS) kvmppc_set_pc(vcpu, pc & ~SPLIT_HACK_MASK); + if ((lr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS) + kvmppc_set_lr(vcpu, lr & ~SPLIT_HACK_MASK); vcpu->arch.hflags &= ~BOOK3S_HFLAG_SPLIT_HACK; } } @@ -150,7 +153,6 @@ static int kvmppc_book3s_vec2irqprio(unsigned int vec) case 0x400: prio = BOOK3S_IRQPRIO_INST_STORAGE; break; case 0x480: prio = BOOK3S_IRQPRIO_INST_SEGMENT; break; case 0x500: prio = BOOK3S_IRQPRIO_EXTERNAL; break; - case 0x501: prio = BOOK3S_IRQPRIO_EXTERNAL_LEVEL; break; case 0x600: prio = BOOK3S_IRQPRIO_ALIGNMENT; break; case 0x700: prio = BOOK3S_IRQPRIO_PROGRAM; break; case 0x800: prio = BOOK3S_IRQPRIO_FP_UNAVAIL; break; @@ -236,18 +238,35 @@ EXPORT_SYMBOL_GPL(kvmppc_core_dequeue_dec); void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) { - unsigned int vec = BOOK3S_INTERRUPT_EXTERNAL; - - if (irq->irq == KVM_INTERRUPT_SET_LEVEL) - vec = BOOK3S_INTERRUPT_EXTERNAL_LEVEL; + /* + * This case (KVM_INTERRUPT_SET) should never actually arise for + * a pseries guest (because pseries guests expect their interrupt + * controllers to continue asserting an external interrupt request + * until it is acknowledged at the interrupt controller), but is + * included to avoid ABI breakage and potentially for other + * sorts of guest. + * + * There is a subtlety here: HV KVM does not test the + * external_oneshot flag in the code that synthesizes + * external interrupts for the guest just before entering + * the guest. That is OK even if userspace did do a + * KVM_INTERRUPT_SET on a pseries guest vcpu, because the + * caller (kvm_vcpu_ioctl_interrupt) does a kvm_vcpu_kick() + * which ends up doing a smp_send_reschedule(), which will + * pull the guest all the way out to the host, meaning that + * we will call kvmppc_core_prepare_to_enter() before entering + * the guest again, and that will handle the external_oneshot + * flag correctly. + */ + if (irq->irq == KVM_INTERRUPT_SET) + vcpu->arch.external_oneshot = 1; - kvmppc_book3s_queue_irqprio(vcpu, vec); + kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL); } void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu) { kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL); - kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL_LEVEL); } void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu, ulong dar, @@ -278,7 +297,6 @@ static int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, vec = BOOK3S_INTERRUPT_DECREMENTER; break; case BOOK3S_IRQPRIO_EXTERNAL: - case BOOK3S_IRQPRIO_EXTERNAL_LEVEL: deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit; vec = BOOK3S_INTERRUPT_EXTERNAL; break; @@ -352,8 +370,16 @@ static bool clear_irqprio(struct kvm_vcpu *vcpu, unsigned int priority) case BOOK3S_IRQPRIO_DECREMENTER: /* DEC interrupts get cleared by mtdec */ return false; - case BOOK3S_IRQPRIO_EXTERNAL_LEVEL: - /* External interrupts get cleared by userspace */ + case BOOK3S_IRQPRIO_EXTERNAL: + /* + * External interrupts get cleared by userspace + * except when set by the KVM_INTERRUPT ioctl with + * KVM_INTERRUPT_SET (not KVM_INTERRUPT_SET_LEVEL). + */ + if (vcpu->arch.external_oneshot) { + vcpu->arch.external_oneshot = 0; + return true; + } return false; } diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index 68e14afecac8..c615617e78ac 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -268,14 +268,13 @@ int kvmppc_mmu_hv_init(void) { unsigned long host_lpid, rsvd_lpid; - if (!cpu_has_feature(CPU_FTR_HVMODE)) - return -EINVAL; - if (!mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE)) return -EINVAL; /* POWER7 has 10-bit LPIDs (12-bit in POWER8) */ - host_lpid = mfspr(SPRN_LPID); + host_lpid = 0; + if (cpu_has_feature(CPU_FTR_HVMODE)) + host_lpid = mfspr(SPRN_LPID); rsvd_lpid = LPID_RSVD; kvmppc_init_lpid(rsvd_lpid + 1); diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c index 998f8d089ac7..d68162ee159b 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c @@ -10,6 +10,9 @@ #include <linux/string.h> #include <linux/kvm.h> #include <linux/kvm_host.h> +#include <linux/anon_inodes.h> +#include <linux/file.h> +#include <linux/debugfs.h> #include <asm/kvm_ppc.h> #include <asm/kvm_book3s.h> @@ -26,87 +29,74 @@ */ static int p9_supported_radix_bits[4] = { 5, 9, 9, 13 }; -int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, - struct kvmppc_pte *gpte, bool data, bool iswrite) +int kvmppc_mmu_walk_radix_tree(struct kvm_vcpu *vcpu, gva_t eaddr, + struct kvmppc_pte *gpte, u64 root, + u64 *pte_ret_p) { struct kvm *kvm = vcpu->kvm; - u32 pid; int ret, level, ps; - __be64 prte, rpte; - unsigned long ptbl; - unsigned long root, pte, index; - unsigned long rts, bits, offset; - unsigned long gpa; - unsigned long proc_tbl_size; - - /* Work out effective PID */ - switch (eaddr >> 62) { - case 0: - pid = vcpu->arch.pid; - break; - case 3: - pid = 0; - break; - default: - return -EINVAL; - } - proc_tbl_size = 1 << ((kvm->arch.process_table & PRTS_MASK) + 12); - if (pid * 16 >= proc_tbl_size) - return -EINVAL; - - /* Read partition table to find root of tree for effective PID */ - ptbl = (kvm->arch.process_table & PRTB_MASK) + (pid * 16); - ret = kvm_read_guest(kvm, ptbl, &prte, sizeof(prte)); - if (ret) - return ret; + unsigned long rts, bits, offset, index; + u64 pte, base, gpa; + __be64 rpte; - root = be64_to_cpu(prte); rts = ((root & RTS1_MASK) >> (RTS1_SHIFT - 3)) | ((root & RTS2_MASK) >> RTS2_SHIFT); bits = root & RPDS_MASK; - root = root & RPDB_MASK; + base = root & RPDB_MASK; offset = rts + 31; - /* current implementations only support 52-bit space */ + /* Current implementations only support 52-bit space */ if (offset != 52) return -EINVAL; + /* Walk each level of the radix tree */ for (level = 3; level >= 0; --level) { + u64 addr; + /* Check a valid size */ if (level && bits != p9_supported_radix_bits[level]) return -EINVAL; if (level == 0 && !(bits == 5 || bits == 9)) return -EINVAL; offset -= bits; index = (eaddr >> offset) & ((1UL << bits) - 1); - /* check that low bits of page table base are zero */ - if (root & ((1UL << (bits + 3)) - 1)) + /* Check that low bits of page table base are zero */ + if (base & ((1UL << (bits + 3)) - 1)) return -EINVAL; - ret = kvm_read_guest(kvm, root + index * 8, - &rpte, sizeof(rpte)); - if (ret) + /* Read the entry from guest memory */ + addr = base + (index * sizeof(rpte)); + ret = kvm_read_guest(kvm, addr, &rpte, sizeof(rpte)); + if (ret) { + if (pte_ret_p) + *pte_ret_p = addr; return ret; + } pte = __be64_to_cpu(rpte); if (!(pte & _PAGE_PRESENT)) return -ENOENT; + /* Check if a leaf entry */ if (pte & _PAGE_PTE) break; - bits = pte & 0x1f; - root = pte & 0x0fffffffffffff00ul; + /* Get ready to walk the next level */ + base = pte & RPDB_MASK; + bits = pte & RPDS_MASK; } - /* need a leaf at lowest level; 512GB pages not supported */ + + /* Need a leaf at lowest level; 512GB pages not supported */ if (level < 0 || level == 3) return -EINVAL; - /* offset is now log base 2 of the page size */ + /* We found a valid leaf PTE */ + /* Offset is now log base 2 of the page size */ gpa = pte & 0x01fffffffffff000ul; if (gpa & ((1ul << offset) - 1)) return -EINVAL; - gpa += eaddr & ((1ul << offset) - 1); + gpa |= eaddr & ((1ul << offset) - 1); for (ps = MMU_PAGE_4K; ps < MMU_PAGE_COUNT; ++ps) if (offset == mmu_psize_defs[ps].shift) break; gpte->page_size = ps; + gpte->page_shift = offset; gpte->eaddr = eaddr; gpte->raddr = gpa; @@ -115,6 +105,77 @@ int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, gpte->may_read = !!(pte & _PAGE_READ); gpte->may_write = !!(pte & _PAGE_WRITE); gpte->may_execute = !!(pte & _PAGE_EXEC); + + gpte->rc = pte & (_PAGE_ACCESSED | _PAGE_DIRTY); + + if (pte_ret_p) + *pte_ret_p = pte; + + return 0; +} + +/* + * Used to walk a partition or process table radix tree in guest memory + * Note: We exploit the fact that a partition table and a process + * table have the same layout, a partition-scoped page table and a + * process-scoped page table have the same layout, and the 2nd + * doubleword of a partition table entry has the same layout as + * the PTCR register. + */ +int kvmppc_mmu_radix_translate_table(struct kvm_vcpu *vcpu, gva_t eaddr, + struct kvmppc_pte *gpte, u64 table, + int table_index, u64 *pte_ret_p) +{ + struct kvm *kvm = vcpu->kvm; + int ret; + unsigned long size, ptbl, root; + struct prtb_entry entry; + + if ((table & PRTS_MASK) > 24) + return -EINVAL; + size = 1ul << ((table & PRTS_MASK) + 12); + + /* Is the table big enough to contain this entry? */ + if ((table_index * sizeof(entry)) >= size) + return -EINVAL; + + /* Read the table to find the root of the radix tree */ + ptbl = (table & PRTB_MASK) + (table_index * sizeof(entry)); + ret = kvm_read_guest(kvm, ptbl, &entry, sizeof(entry)); + if (ret) + return ret; + + /* Root is stored in the first double word */ + root = be64_to_cpu(entry.prtb0); + + return kvmppc_mmu_walk_radix_tree(vcpu, eaddr, gpte, root, pte_ret_p); +} + +int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, + struct kvmppc_pte *gpte, bool data, bool iswrite) +{ + u32 pid; + u64 pte; + int ret; + + /* Work out effective PID */ + switch (eaddr >> 62) { + case 0: + pid = vcpu->arch.pid; + break; + case 3: + pid = 0; + break; + default: + return -EINVAL; + } + + ret = kvmppc_mmu_radix_translate_table(vcpu, eaddr, gpte, + vcpu->kvm->arch.process_table, pid, &pte); + if (ret) + return ret; + + /* Check privilege (applies only to process scoped translations) */ if (kvmppc_get_msr(vcpu) & MSR_PR) { if (pte & _PAGE_PRIVILEGED) { gpte->may_read = 0; @@ -137,20 +198,46 @@ int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, } static void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr, - unsigned int pshift) + unsigned int pshift, unsigned int lpid) { unsigned long psize = PAGE_SIZE; + int psi; + long rc; + unsigned long rb; if (pshift) psize = 1UL << pshift; + else + pshift = PAGE_SHIFT; addr &= ~(psize - 1); - radix__flush_tlb_lpid_page(kvm->arch.lpid, addr, psize); + + if (!kvmhv_on_pseries()) { + radix__flush_tlb_lpid_page(lpid, addr, psize); + return; + } + + psi = shift_to_mmu_psize(pshift); + rb = addr | (mmu_get_ap(psi) << PPC_BITLSHIFT(58)); + rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(0, 0, 1), + lpid, rb); + if (rc) + pr_err("KVM: TLB page invalidation hcall failed, rc=%ld\n", rc); } -static void kvmppc_radix_flush_pwc(struct kvm *kvm) +static void kvmppc_radix_flush_pwc(struct kvm *kvm, unsigned int lpid) { - radix__flush_pwc_lpid(kvm->arch.lpid); + long rc; + + if (!kvmhv_on_pseries()) { + radix__flush_pwc_lpid(lpid); + return; + } + + rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(1, 0, 1), + lpid, TLBIEL_INVAL_SET_LPID); + if (rc) + pr_err("KVM: TLB PWC invalidation hcall failed, rc=%ld\n", rc); } static unsigned long kvmppc_radix_update_pte(struct kvm *kvm, pte_t *ptep, @@ -195,23 +282,38 @@ static void kvmppc_pmd_free(pmd_t *pmdp) kmem_cache_free(kvm_pmd_cache, pmdp); } -static void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, - unsigned long gpa, unsigned int shift) +/* Called with kvm->mmu_lock held */ +void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa, + unsigned int shift, struct kvm_memory_slot *memslot, + unsigned int lpid) { - unsigned long page_size = 1ul << shift; unsigned long old; + unsigned long gfn = gpa >> PAGE_SHIFT; + unsigned long page_size = PAGE_SIZE; + unsigned long hpa; old = kvmppc_radix_update_pte(kvm, pte, ~0UL, 0, gpa, shift); - kvmppc_radix_tlbie_page(kvm, gpa, shift); - if (old & _PAGE_DIRTY) { - unsigned long gfn = gpa >> PAGE_SHIFT; - struct kvm_memory_slot *memslot; + kvmppc_radix_tlbie_page(kvm, gpa, shift, lpid); + + /* The following only applies to L1 entries */ + if (lpid != kvm->arch.lpid) + return; + if (!memslot) { memslot = gfn_to_memslot(kvm, gfn); - if (memslot && memslot->dirty_bitmap) - kvmppc_update_dirty_map(memslot, gfn, page_size); + if (!memslot) + return; } + if (shift) + page_size = 1ul << shift; + + gpa &= ~(page_size - 1); + hpa = old & PTE_RPN_MASK; + kvmhv_remove_nest_rmap_range(kvm, memslot, gpa, hpa, page_size); + + if ((old & _PAGE_DIRTY) && memslot->dirty_bitmap) + kvmppc_update_dirty_map(memslot, gfn, page_size); } /* @@ -224,7 +326,8 @@ static void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, * and emit a warning if encountered, but there may already be data * corruption due to the unexpected mappings. */ -static void kvmppc_unmap_free_pte(struct kvm *kvm, pte_t *pte, bool full) +static void kvmppc_unmap_free_pte(struct kvm *kvm, pte_t *pte, bool full, + unsigned int lpid) { if (full) { memset(pte, 0, sizeof(long) << PTE_INDEX_SIZE); @@ -238,14 +341,15 @@ static void kvmppc_unmap_free_pte(struct kvm *kvm, pte_t *pte, bool full) WARN_ON_ONCE(1); kvmppc_unmap_pte(kvm, p, pte_pfn(*p) << PAGE_SHIFT, - PAGE_SHIFT); + PAGE_SHIFT, NULL, lpid); } } kvmppc_pte_free(pte); } -static void kvmppc_unmap_free_pmd(struct kvm *kvm, pmd_t *pmd, bool full) +static void kvmppc_unmap_free_pmd(struct kvm *kvm, pmd_t *pmd, bool full, + unsigned int lpid) { unsigned long im; pmd_t *p = pmd; @@ -260,20 +364,21 @@ static void kvmppc_unmap_free_pmd(struct kvm *kvm, pmd_t *pmd, bool full) WARN_ON_ONCE(1); kvmppc_unmap_pte(kvm, (pte_t *)p, pte_pfn(*(pte_t *)p) << PAGE_SHIFT, - PMD_SHIFT); + PMD_SHIFT, NULL, lpid); } } else { pte_t *pte; pte = pte_offset_map(p, 0); - kvmppc_unmap_free_pte(kvm, pte, full); + kvmppc_unmap_free_pte(kvm, pte, full, lpid); pmd_clear(p); } } kvmppc_pmd_free(pmd); } -static void kvmppc_unmap_free_pud(struct kvm *kvm, pud_t *pud) +static void kvmppc_unmap_free_pud(struct kvm *kvm, pud_t *pud, + unsigned int lpid) { unsigned long iu; pud_t *p = pud; @@ -287,36 +392,40 @@ static void kvmppc_unmap_free_pud(struct kvm *kvm, pud_t *pud) pmd_t *pmd; pmd = pmd_offset(p, 0); - kvmppc_unmap_free_pmd(kvm, pmd, true); + kvmppc_unmap_free_pmd(kvm, pmd, true, lpid); pud_clear(p); } } pud_free(kvm->mm, pud); } -void kvmppc_free_radix(struct kvm *kvm) +void kvmppc_free_pgtable_radix(struct kvm *kvm, pgd_t *pgd, unsigned int lpid) { unsigned long ig; - pgd_t *pgd; - if (!kvm->arch.pgtable) - return; - pgd = kvm->arch.pgtable; for (ig = 0; ig < PTRS_PER_PGD; ++ig, ++pgd) { pud_t *pud; if (!pgd_present(*pgd)) continue; pud = pud_offset(pgd, 0); - kvmppc_unmap_free_pud(kvm, pud); + kvmppc_unmap_free_pud(kvm, pud, lpid); pgd_clear(pgd); } - pgd_free(kvm->mm, kvm->arch.pgtable); - kvm->arch.pgtable = NULL; +} + +void kvmppc_free_radix(struct kvm *kvm) +{ + if (kvm->arch.pgtable) { + kvmppc_free_pgtable_radix(kvm, kvm->arch.pgtable, + kvm->arch.lpid); + pgd_free(kvm->mm, kvm->arch.pgtable); + kvm->arch.pgtable = NULL; + } } static void kvmppc_unmap_free_pmd_entry_table(struct kvm *kvm, pmd_t *pmd, - unsigned long gpa) + unsigned long gpa, unsigned int lpid) { pte_t *pte = pte_offset_kernel(pmd, 0); @@ -326,13 +435,13 @@ static void kvmppc_unmap_free_pmd_entry_table(struct kvm *kvm, pmd_t *pmd, * flushing the PWC again. */ pmd_clear(pmd); - kvmppc_radix_flush_pwc(kvm); + kvmppc_radix_flush_pwc(kvm, lpid); - kvmppc_unmap_free_pte(kvm, pte, false); + kvmppc_unmap_free_pte(kvm, pte, false, lpid); } static void kvmppc_unmap_free_pud_entry_table(struct kvm *kvm, pud_t *pud, - unsigned long gpa) + unsigned long gpa, unsigned int lpid) { pmd_t *pmd = pmd_offset(pud, 0); @@ -342,9 +451,9 @@ static void kvmppc_unmap_free_pud_entry_table(struct kvm *kvm, pud_t *pud, * so can be freed without flushing the PWC again. */ pud_clear(pud); - kvmppc_radix_flush_pwc(kvm); + kvmppc_radix_flush_pwc(kvm, lpid); - kvmppc_unmap_free_pmd(kvm, pmd, false); + kvmppc_unmap_free_pmd(kvm, pmd, false, lpid); } /* @@ -356,8 +465,10 @@ static void kvmppc_unmap_free_pud_entry_table(struct kvm *kvm, pud_t *pud, */ #define PTE_BITS_MUST_MATCH (~(_PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED)) -static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa, - unsigned int level, unsigned long mmu_seq) +int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte, + unsigned long gpa, unsigned int level, + unsigned long mmu_seq, unsigned int lpid, + unsigned long *rmapp, struct rmap_nested **n_rmap) { pgd_t *pgd; pud_t *pud, *new_pud = NULL; @@ -366,7 +477,7 @@ static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa, int ret; /* Traverse the guest's 2nd-level tree, allocate new levels needed */ - pgd = kvm->arch.pgtable + pgd_index(gpa); + pgd = pgtable + pgd_index(gpa); pud = NULL; if (pgd_present(*pgd)) pud = pud_offset(pgd, gpa); @@ -423,7 +534,8 @@ static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa, goto out_unlock; } /* Valid 1GB page here already, remove it */ - kvmppc_unmap_pte(kvm, (pte_t *)pud, hgpa, PUD_SHIFT); + kvmppc_unmap_pte(kvm, (pte_t *)pud, hgpa, PUD_SHIFT, NULL, + lpid); } if (level == 2) { if (!pud_none(*pud)) { @@ -432,9 +544,11 @@ static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa, * install a large page, so remove and free the page * table page. */ - kvmppc_unmap_free_pud_entry_table(kvm, pud, gpa); + kvmppc_unmap_free_pud_entry_table(kvm, pud, gpa, lpid); } kvmppc_radix_set_pte_at(kvm, gpa, (pte_t *)pud, pte); + if (rmapp && n_rmap) + kvmhv_insert_nest_rmap(kvm, rmapp, n_rmap); ret = 0; goto out_unlock; } @@ -458,7 +572,7 @@ static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa, WARN_ON_ONCE((pmd_val(*pmd) ^ pte_val(pte)) & PTE_BITS_MUST_MATCH); kvmppc_radix_update_pte(kvm, pmdp_ptep(pmd), - 0, pte_val(pte), lgpa, PMD_SHIFT); + 0, pte_val(pte), lgpa, PMD_SHIFT); ret = 0; goto out_unlock; } @@ -472,7 +586,8 @@ static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa, goto out_unlock; } /* Valid 2MB page here already, remove it */ - kvmppc_unmap_pte(kvm, pmdp_ptep(pmd), lgpa, PMD_SHIFT); + kvmppc_unmap_pte(kvm, pmdp_ptep(pmd), lgpa, PMD_SHIFT, NULL, + lpid); } if (level == 1) { if (!pmd_none(*pmd)) { @@ -481,9 +596,11 @@ static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa, * install a large page, so remove and free the page * table page. */ - kvmppc_unmap_free_pmd_entry_table(kvm, pmd, gpa); + kvmppc_unmap_free_pmd_entry_table(kvm, pmd, gpa, lpid); } kvmppc_radix_set_pte_at(kvm, gpa, pmdp_ptep(pmd), pte); + if (rmapp && n_rmap) + kvmhv_insert_nest_rmap(kvm, rmapp, n_rmap); ret = 0; goto out_unlock; } @@ -508,6 +625,8 @@ static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa, goto out_unlock; } kvmppc_radix_set_pte_at(kvm, gpa, ptep, pte); + if (rmapp && n_rmap) + kvmhv_insert_nest_rmap(kvm, rmapp, n_rmap); ret = 0; out_unlock: @@ -521,95 +640,49 @@ static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa, return ret; } -int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, - unsigned long ea, unsigned long dsisr) +bool kvmppc_hv_handle_set_rc(struct kvm *kvm, pgd_t *pgtable, bool writing, + unsigned long gpa, unsigned int lpid) +{ + unsigned long pgflags; + unsigned int shift; + pte_t *ptep; + + /* + * Need to set an R or C bit in the 2nd-level tables; + * since we are just helping out the hardware here, + * it is sufficient to do what the hardware does. + */ + pgflags = _PAGE_ACCESSED; + if (writing) + pgflags |= _PAGE_DIRTY; + /* + * We are walking the secondary (partition-scoped) page table here. + * We can do this without disabling irq because the Linux MM + * subsystem doesn't do THP splits and collapses on this tree. + */ + ptep = __find_linux_pte(pgtable, gpa, NULL, &shift); + if (ptep && pte_present(*ptep) && (!writing || pte_write(*ptep))) { + kvmppc_radix_update_pte(kvm, ptep, 0, pgflags, gpa, shift); + return true; + } + return false; +} + +int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu, + unsigned long gpa, + struct kvm_memory_slot *memslot, + bool writing, bool kvm_ro, + pte_t *inserted_pte, unsigned int *levelp) { struct kvm *kvm = vcpu->kvm; - unsigned long mmu_seq; - unsigned long gpa, gfn, hva; - struct kvm_memory_slot *memslot; struct page *page = NULL; - long ret; - bool writing; + unsigned long mmu_seq; + unsigned long hva, gfn = gpa >> PAGE_SHIFT; bool upgrade_write = false; bool *upgrade_p = &upgrade_write; pte_t pte, *ptep; - unsigned long pgflags; unsigned int shift, level; - - /* Check for unusual errors */ - if (dsisr & DSISR_UNSUPP_MMU) { - pr_err("KVM: Got unsupported MMU fault\n"); - return -EFAULT; - } - if (dsisr & DSISR_BADACCESS) { - /* Reflect to the guest as DSI */ - pr_err("KVM: Got radix HV page fault with DSISR=%lx\n", dsisr); - kvmppc_core_queue_data_storage(vcpu, ea, dsisr); - return RESUME_GUEST; - } - - /* Translate the logical address and get the page */ - gpa = vcpu->arch.fault_gpa & ~0xfffUL; - gpa &= ~0xF000000000000000ul; - gfn = gpa >> PAGE_SHIFT; - if (!(dsisr & DSISR_PRTABLE_FAULT)) - gpa |= ea & 0xfff; - memslot = gfn_to_memslot(kvm, gfn); - - /* No memslot means it's an emulated MMIO region */ - if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) { - if (dsisr & (DSISR_PRTABLE_FAULT | DSISR_BADACCESS | - DSISR_SET_RC)) { - /* - * Bad address in guest page table tree, or other - * unusual error - reflect it to the guest as DSI. - */ - kvmppc_core_queue_data_storage(vcpu, ea, dsisr); - return RESUME_GUEST; - } - return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea, - dsisr & DSISR_ISSTORE); - } - - writing = (dsisr & DSISR_ISSTORE) != 0; - if (memslot->flags & KVM_MEM_READONLY) { - if (writing) { - /* give the guest a DSI */ - dsisr = DSISR_ISSTORE | DSISR_PROTFAULT; - kvmppc_core_queue_data_storage(vcpu, ea, dsisr); - return RESUME_GUEST; - } - upgrade_p = NULL; - } - - if (dsisr & DSISR_SET_RC) { - /* - * Need to set an R or C bit in the 2nd-level tables; - * since we are just helping out the hardware here, - * it is sufficient to do what the hardware does. - */ - pgflags = _PAGE_ACCESSED; - if (writing) - pgflags |= _PAGE_DIRTY; - /* - * We are walking the secondary page table here. We can do this - * without disabling irq. - */ - spin_lock(&kvm->mmu_lock); - ptep = __find_linux_pte(kvm->arch.pgtable, - gpa, NULL, &shift); - if (ptep && pte_present(*ptep) && - (!writing || pte_write(*ptep))) { - kvmppc_radix_update_pte(kvm, ptep, 0, pgflags, - gpa, shift); - dsisr &= ~DSISR_SET_RC; - } - spin_unlock(&kvm->mmu_lock); - if (!(dsisr & (DSISR_BAD_FAULT_64S | DSISR_NOHPTE | - DSISR_PROTFAULT | DSISR_SET_RC))) - return RESUME_GUEST; - } + int ret; /* used to check for invalidations in progress */ mmu_seq = kvm->mmu_notifier_seq; @@ -622,7 +695,7 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, * is that the page is writable. */ hva = gfn_to_hva_memslot(memslot, gfn); - if (upgrade_p && __get_user_pages_fast(hva, 1, 1, &page) == 1) { + if (!kvm_ro && __get_user_pages_fast(hva, 1, 1, &page) == 1) { upgrade_write = true; } else { unsigned long pfn; @@ -690,7 +763,12 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, } /* Allocate space in the tree and write the PTE */ - ret = kvmppc_create_pte(kvm, pte, gpa, level, mmu_seq); + ret = kvmppc_create_pte(kvm, kvm->arch.pgtable, pte, gpa, level, + mmu_seq, kvm->arch.lpid, NULL, NULL); + if (inserted_pte) + *inserted_pte = pte; + if (levelp) + *levelp = level; if (page) { if (!ret && (pte_val(pte) & _PAGE_WRITE)) @@ -698,6 +776,82 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, put_page(page); } + return ret; +} + +int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, + unsigned long ea, unsigned long dsisr) +{ + struct kvm *kvm = vcpu->kvm; + unsigned long gpa, gfn; + struct kvm_memory_slot *memslot; + long ret; + bool writing = !!(dsisr & DSISR_ISSTORE); + bool kvm_ro = false; + + /* Check for unusual errors */ + if (dsisr & DSISR_UNSUPP_MMU) { + pr_err("KVM: Got unsupported MMU fault\n"); + return -EFAULT; + } + if (dsisr & DSISR_BADACCESS) { + /* Reflect to the guest as DSI */ + pr_err("KVM: Got radix HV page fault with DSISR=%lx\n", dsisr); + kvmppc_core_queue_data_storage(vcpu, ea, dsisr); + return RESUME_GUEST; + } + + /* Translate the logical address */ + gpa = vcpu->arch.fault_gpa & ~0xfffUL; + gpa &= ~0xF000000000000000ul; + gfn = gpa >> PAGE_SHIFT; + if (!(dsisr & DSISR_PRTABLE_FAULT)) + gpa |= ea & 0xfff; + + /* Get the corresponding memslot */ + memslot = gfn_to_memslot(kvm, gfn); + + /* No memslot means it's an emulated MMIO region */ + if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) { + if (dsisr & (DSISR_PRTABLE_FAULT | DSISR_BADACCESS | + DSISR_SET_RC)) { + /* + * Bad address in guest page table tree, or other + * unusual error - reflect it to the guest as DSI. + */ + kvmppc_core_queue_data_storage(vcpu, ea, dsisr); + return RESUME_GUEST; + } + return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea, writing); + } + + if (memslot->flags & KVM_MEM_READONLY) { + if (writing) { + /* give the guest a DSI */ + kvmppc_core_queue_data_storage(vcpu, ea, DSISR_ISSTORE | + DSISR_PROTFAULT); + return RESUME_GUEST; + } + kvm_ro = true; + } + + /* Failed to set the reference/change bits */ + if (dsisr & DSISR_SET_RC) { + spin_lock(&kvm->mmu_lock); + if (kvmppc_hv_handle_set_rc(kvm, kvm->arch.pgtable, + writing, gpa, kvm->arch.lpid)) + dsisr &= ~DSISR_SET_RC; + spin_unlock(&kvm->mmu_lock); + + if (!(dsisr & (DSISR_BAD_FAULT_64S | DSISR_NOHPTE | + DSISR_PROTFAULT | DSISR_SET_RC))) + return RESUME_GUEST; + } + + /* Try to insert a pte */ + ret = kvmppc_book3s_instantiate_page(vcpu, gpa, memslot, writing, + kvm_ro, NULL, NULL); + if (ret == 0 || ret == -EAGAIN) ret = RESUME_GUEST; return ret; @@ -710,20 +864,11 @@ int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, pte_t *ptep; unsigned long gpa = gfn << PAGE_SHIFT; unsigned int shift; - unsigned long old; ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift); - if (ptep && pte_present(*ptep)) { - old = kvmppc_radix_update_pte(kvm, ptep, ~0UL, 0, - gpa, shift); - kvmppc_radix_tlbie_page(kvm, gpa, shift); - if ((old & _PAGE_DIRTY) && memslot->dirty_bitmap) { - unsigned long psize = PAGE_SIZE; - if (shift) - psize = 1ul << shift; - kvmppc_update_dirty_map(memslot, gfn, psize); - } - } + if (ptep && pte_present(*ptep)) + kvmppc_unmap_pte(kvm, ptep, gpa, shift, memslot, + kvm->arch.lpid); return 0; } @@ -778,7 +923,7 @@ static int kvm_radix_test_clear_dirty(struct kvm *kvm, ret = 1 << (shift - PAGE_SHIFT); kvmppc_radix_update_pte(kvm, ptep, _PAGE_DIRTY, 0, gpa, shift); - kvmppc_radix_tlbie_page(kvm, gpa, shift); + kvmppc_radix_tlbie_page(kvm, gpa, shift, kvm->arch.lpid); } return ret; } @@ -863,6 +1008,215 @@ static void pmd_ctor(void *addr) memset(addr, 0, RADIX_PMD_TABLE_SIZE); } +struct debugfs_radix_state { + struct kvm *kvm; + struct mutex mutex; + unsigned long gpa; + int lpid; + int chars_left; + int buf_index; + char buf[128]; + u8 hdr; +}; + +static int debugfs_radix_open(struct inode *inode, struct file *file) +{ + struct kvm *kvm = inode->i_private; + struct debugfs_radix_state *p; + + p = kzalloc(sizeof(*p), GFP_KERNEL); + if (!p) + return -ENOMEM; + + kvm_get_kvm(kvm); + p->kvm = kvm; + mutex_init(&p->mutex); + file->private_data = p; + + return nonseekable_open(inode, file); +} + +static int debugfs_radix_release(struct inode *inode, struct file *file) +{ + struct debugfs_radix_state *p = file->private_data; + + kvm_put_kvm(p->kvm); + kfree(p); + return 0; +} + +static ssize_t debugfs_radix_read(struct file *file, char __user *buf, + size_t len, loff_t *ppos) +{ + struct debugfs_radix_state *p = file->private_data; + ssize_t ret, r; + unsigned long n; + struct kvm *kvm; + unsigned long gpa; + pgd_t *pgt; + struct kvm_nested_guest *nested; + pgd_t pgd, *pgdp; + pud_t pud, *pudp; + pmd_t pmd, *pmdp; + pte_t *ptep; + int shift; + unsigned long pte; + + kvm = p->kvm; + if (!kvm_is_radix(kvm)) + return 0; + + ret = mutex_lock_interruptible(&p->mutex); + if (ret) + return ret; + + if (p->chars_left) { + n = p->chars_left; + if (n > len) + n = len; + r = copy_to_user(buf, p->buf + p->buf_index, n); + n -= r; + p->chars_left -= n; + p->buf_index += n; + buf += n; + len -= n; + ret = n; + if (r) { + if (!n) + ret = -EFAULT; + goto out; + } + } + + gpa = p->gpa; + nested = NULL; + pgt = NULL; + while (len != 0 && p->lpid >= 0) { + if (gpa >= RADIX_PGTABLE_RANGE) { + gpa = 0; + pgt = NULL; + if (nested) { + kvmhv_put_nested(nested); + nested = NULL; + } + p->lpid = kvmhv_nested_next_lpid(kvm, p->lpid); + p->hdr = 0; + if (p->lpid < 0) + break; + } + if (!pgt) { + if (p->lpid == 0) { + pgt = kvm->arch.pgtable; + } else { + nested = kvmhv_get_nested(kvm, p->lpid, false); + if (!nested) { + gpa = RADIX_PGTABLE_RANGE; + continue; + } + pgt = nested->shadow_pgtable; + } + } + n = 0; + if (!p->hdr) { + if (p->lpid > 0) + n = scnprintf(p->buf, sizeof(p->buf), + "\nNested LPID %d: ", p->lpid); + n += scnprintf(p->buf + n, sizeof(p->buf) - n, + "pgdir: %lx\n", (unsigned long)pgt); + p->hdr = 1; + goto copy; + } + + pgdp = pgt + pgd_index(gpa); + pgd = READ_ONCE(*pgdp); + if (!(pgd_val(pgd) & _PAGE_PRESENT)) { + gpa = (gpa & PGDIR_MASK) + PGDIR_SIZE; + continue; + } + + pudp = pud_offset(&pgd, gpa); + pud = READ_ONCE(*pudp); + if (!(pud_val(pud) & _PAGE_PRESENT)) { + gpa = (gpa & PUD_MASK) + PUD_SIZE; + continue; + } + if (pud_val(pud) & _PAGE_PTE) { + pte = pud_val(pud); + shift = PUD_SHIFT; + goto leaf; + } + + pmdp = pmd_offset(&pud, gpa); + pmd = READ_ONCE(*pmdp); + if (!(pmd_val(pmd) & _PAGE_PRESENT)) { + gpa = (gpa & PMD_MASK) + PMD_SIZE; + continue; + } + if (pmd_val(pmd) & _PAGE_PTE) { + pte = pmd_val(pmd); + shift = PMD_SHIFT; + goto leaf; + } + + ptep = pte_offset_kernel(&pmd, gpa); + pte = pte_val(READ_ONCE(*ptep)); + if (!(pte & _PAGE_PRESENT)) { + gpa += PAGE_SIZE; + continue; + } + shift = PAGE_SHIFT; + leaf: + n = scnprintf(p->buf, sizeof(p->buf), + " %lx: %lx %d\n", gpa, pte, shift); + gpa += 1ul << shift; + copy: + p->chars_left = n; + if (n > len) + n = len; + r = copy_to_user(buf, p->buf, n); + n -= r; + p->chars_left -= n; + p->buf_index = n; + buf += n; + len -= n; + ret += n; + if (r) { + if (!ret) + ret = -EFAULT; + break; + } + } + p->gpa = gpa; + if (nested) + kvmhv_put_nested(nested); + + out: + mutex_unlock(&p->mutex); + return ret; +} + +static ssize_t debugfs_radix_write(struct file *file, const char __user *buf, + size_t len, loff_t *ppos) +{ + return -EACCES; +} + +static const struct file_operations debugfs_radix_fops = { + .owner = THIS_MODULE, + .open = debugfs_radix_open, + .release = debugfs_radix_release, + .read = debugfs_radix_read, + .write = debugfs_radix_write, + .llseek = generic_file_llseek, +}; + +void kvmhv_radix_debugfs_init(struct kvm *kvm) +{ + kvm->arch.radix_dentry = debugfs_create_file("radix", 0400, + kvm->arch.debugfs_dir, kvm, + &debugfs_radix_fops); +} + int kvmppc_radix_init(void) { unsigned long size = sizeof(void *) << RADIX_PTE_INDEX_SIZE; diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c index 9a3f2646ecc7..62a8d03ba7e9 100644 --- a/arch/powerpc/kvm/book3s_64_vio.c +++ b/arch/powerpc/kvm/book3s_64_vio.c @@ -363,6 +363,40 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, return ret; } +static long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt, + unsigned long tce) +{ + unsigned long gpa = tce & ~(TCE_PCI_READ | TCE_PCI_WRITE); + enum dma_data_direction dir = iommu_tce_direction(tce); + struct kvmppc_spapr_tce_iommu_table *stit; + unsigned long ua = 0; + + /* Allow userspace to poison TCE table */ + if (dir == DMA_NONE) + return H_SUCCESS; + + if (iommu_tce_check_gpa(stt->page_shift, gpa)) + return H_TOO_HARD; + + if (kvmppc_tce_to_ua(stt->kvm, tce, &ua, NULL)) + return H_TOO_HARD; + + list_for_each_entry_rcu(stit, &stt->iommu_tables, next) { + unsigned long hpa = 0; + struct mm_iommu_table_group_mem_t *mem; + long shift = stit->tbl->it_page_shift; + + mem = mm_iommu_lookup(stt->kvm->mm, ua, 1ULL << shift); + if (!mem) + return H_TOO_HARD; + + if (mm_iommu_ua_to_hpa(mem, ua, shift, &hpa)) + return H_TOO_HARD; + } + + return H_SUCCESS; +} + static void kvmppc_clear_tce(struct iommu_table *tbl, unsigned long entry) { unsigned long hpa = 0; @@ -376,11 +410,10 @@ static long kvmppc_tce_iommu_mapped_dec(struct kvm *kvm, { struct mm_iommu_table_group_mem_t *mem = NULL; const unsigned long pgsize = 1ULL << tbl->it_page_shift; - __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry); + __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry); if (!pua) - /* it_userspace allocation might be delayed */ - return H_TOO_HARD; + return H_SUCCESS; mem = mm_iommu_lookup(kvm->mm, be64_to_cpu(*pua), pgsize); if (!mem) @@ -401,7 +434,7 @@ static long kvmppc_tce_iommu_do_unmap(struct kvm *kvm, long ret; if (WARN_ON_ONCE(iommu_tce_xchg(tbl, entry, &hpa, &dir))) - return H_HARDWARE; + return H_TOO_HARD; if (dir == DMA_NONE) return H_SUCCESS; @@ -449,15 +482,15 @@ long kvmppc_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl, return H_TOO_HARD; if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, tbl->it_page_shift, &hpa))) - return H_HARDWARE; + return H_TOO_HARD; if (mm_iommu_mapped_inc(mem)) - return H_CLOSED; + return H_TOO_HARD; ret = iommu_tce_xchg(tbl, entry, &hpa, &dir); if (WARN_ON_ONCE(ret)) { mm_iommu_mapped_dec(mem); - return H_HARDWARE; + return H_TOO_HARD; } if (dir != DMA_NONE) @@ -517,8 +550,7 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, idx = srcu_read_lock(&vcpu->kvm->srcu); - if ((dir != DMA_NONE) && kvmppc_gpa_to_ua(vcpu->kvm, - tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), &ua, NULL)) { + if ((dir != DMA_NONE) && kvmppc_tce_to_ua(vcpu->kvm, tce, &ua, NULL)) { ret = H_PARAMETER; goto unlock_exit; } @@ -533,14 +565,10 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, ret = kvmppc_tce_iommu_map(vcpu->kvm, stt, stit->tbl, entry, ua, dir); - if (ret == H_SUCCESS) - continue; - - if (ret == H_TOO_HARD) + if (ret != H_SUCCESS) { + kvmppc_clear_tce(stit->tbl, entry); goto unlock_exit; - - WARN_ON_ONCE(1); - kvmppc_clear_tce(stit->tbl, entry); + } } kvmppc_tce_put(stt, entry, tce); @@ -583,7 +611,7 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu, return ret; idx = srcu_read_lock(&vcpu->kvm->srcu); - if (kvmppc_gpa_to_ua(vcpu->kvm, tce_list, &ua, NULL)) { + if (kvmppc_tce_to_ua(vcpu->kvm, tce_list, &ua, NULL)) { ret = H_TOO_HARD; goto unlock_exit; } @@ -599,10 +627,26 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu, ret = kvmppc_tce_validate(stt, tce); if (ret != H_SUCCESS) goto unlock_exit; + } + + for (i = 0; i < npages; ++i) { + /* + * This looks unsafe, because we validate, then regrab + * the TCE from userspace which could have been changed by + * another thread. + * + * But it actually is safe, because the relevant checks will be + * re-executed in the following code. If userspace tries to + * change this dodgily it will result in a messier failure mode + * but won't threaten the host. + */ + if (get_user(tce, tces + i)) { + ret = H_TOO_HARD; + goto unlock_exit; + } + tce = be64_to_cpu(tce); - if (kvmppc_gpa_to_ua(vcpu->kvm, - tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), - &ua, NULL)) + if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua, NULL)) return H_PARAMETER; list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { @@ -610,14 +654,10 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu, stit->tbl, entry + i, ua, iommu_tce_direction(tce)); - if (ret == H_SUCCESS) - continue; - - if (ret == H_TOO_HARD) + if (ret != H_SUCCESS) { + kvmppc_clear_tce(stit->tbl, entry); goto unlock_exit; - - WARN_ON_ONCE(1); - kvmppc_clear_tce(stit->tbl, entry); + } } kvmppc_tce_put(stt, entry + i, tce); diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c index 6821ead4b4eb..2206bc729b9a 100644 --- a/arch/powerpc/kvm/book3s_64_vio_hv.c +++ b/arch/powerpc/kvm/book3s_64_vio_hv.c @@ -87,6 +87,7 @@ struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm *kvm, } EXPORT_SYMBOL_GPL(kvmppc_find_table); +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE /* * Validates TCE address. * At the moment flags and page mask are validated. @@ -94,14 +95,14 @@ EXPORT_SYMBOL_GPL(kvmppc_find_table); * to the table and user space is supposed to process them), we can skip * checking other things (such as TCE is a guest RAM address or the page * was actually allocated). - * - * WARNING: This will be called in real-mode on HV KVM and virtual - * mode on PR KVM */ -long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt, unsigned long tce) +static long kvmppc_rm_tce_validate(struct kvmppc_spapr_tce_table *stt, + unsigned long tce) { unsigned long gpa = tce & ~(TCE_PCI_READ | TCE_PCI_WRITE); enum dma_data_direction dir = iommu_tce_direction(tce); + struct kvmppc_spapr_tce_iommu_table *stit; + unsigned long ua = 0; /* Allow userspace to poison TCE table */ if (dir == DMA_NONE) @@ -110,9 +111,25 @@ long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt, unsigned long tce) if (iommu_tce_check_gpa(stt->page_shift, gpa)) return H_PARAMETER; + if (kvmppc_tce_to_ua(stt->kvm, tce, &ua, NULL)) + return H_TOO_HARD; + + list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { + unsigned long hpa = 0; + struct mm_iommu_table_group_mem_t *mem; + long shift = stit->tbl->it_page_shift; + + mem = mm_iommu_lookup_rm(stt->kvm->mm, ua, 1ULL << shift); + if (!mem) + return H_TOO_HARD; + + if (mm_iommu_ua_to_hpa_rm(mem, ua, shift, &hpa)) + return H_TOO_HARD; + } + return H_SUCCESS; } -EXPORT_SYMBOL_GPL(kvmppc_tce_validate); +#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ /* Note on the use of page_address() in real mode, * @@ -164,10 +181,10 @@ void kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt, } EXPORT_SYMBOL_GPL(kvmppc_tce_put); -long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa, +long kvmppc_tce_to_ua(struct kvm *kvm, unsigned long tce, unsigned long *ua, unsigned long **prmap) { - unsigned long gfn = gpa >> PAGE_SHIFT; + unsigned long gfn = tce >> PAGE_SHIFT; struct kvm_memory_slot *memslot; memslot = search_memslots(kvm_memslots(kvm), gfn); @@ -175,7 +192,7 @@ long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa, return -EINVAL; *ua = __gfn_to_hva_memslot(memslot, gfn) | - (gpa & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE)); + (tce & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE)); #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE if (prmap) @@ -184,7 +201,7 @@ long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa, return 0; } -EXPORT_SYMBOL_GPL(kvmppc_gpa_to_ua); +EXPORT_SYMBOL_GPL(kvmppc_tce_to_ua); #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE static long iommu_tce_xchg_rm(struct mm_struct *mm, struct iommu_table *tbl, @@ -197,7 +214,7 @@ static long iommu_tce_xchg_rm(struct mm_struct *mm, struct iommu_table *tbl, if (!ret && ((*direction == DMA_FROM_DEVICE) || (*direction == DMA_BIDIRECTIONAL))) { - __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RM(tbl, entry); + __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry); /* * kvmppc_rm_tce_iommu_do_map() updates the UA cache after * calling this so we still get here a valid UA. @@ -223,7 +240,7 @@ static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm, { struct mm_iommu_table_group_mem_t *mem = NULL; const unsigned long pgsize = 1ULL << tbl->it_page_shift; - __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RM(tbl, entry); + __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry); if (!pua) /* it_userspace allocation might be delayed */ @@ -287,7 +304,7 @@ static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl, { long ret; unsigned long hpa = 0; - __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RM(tbl, entry); + __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry); struct mm_iommu_table_group_mem_t *mem; if (!pua) @@ -300,10 +317,10 @@ static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl, if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem, ua, tbl->it_page_shift, &hpa))) - return H_HARDWARE; + return H_TOO_HARD; if (WARN_ON_ONCE_RM(mm_iommu_mapped_inc(mem))) - return H_CLOSED; + return H_TOO_HARD; ret = iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir); if (ret) { @@ -368,13 +385,12 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, if (ret != H_SUCCESS) return ret; - ret = kvmppc_tce_validate(stt, tce); + ret = kvmppc_rm_tce_validate(stt, tce); if (ret != H_SUCCESS) return ret; dir = iommu_tce_direction(tce); - if ((dir != DMA_NONE) && kvmppc_gpa_to_ua(vcpu->kvm, - tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), &ua, NULL)) + if ((dir != DMA_NONE) && kvmppc_tce_to_ua(vcpu->kvm, tce, &ua, NULL)) return H_PARAMETER; entry = ioba >> stt->page_shift; @@ -387,14 +403,10 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt, stit->tbl, entry, ua, dir); - if (ret == H_SUCCESS) - continue; - - if (ret == H_TOO_HARD) + if (ret != H_SUCCESS) { + kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry); return ret; - - WARN_ON_ONCE_RM(1); - kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry); + } } kvmppc_tce_put(stt, entry, tce); @@ -480,7 +492,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu, */ struct mm_iommu_table_group_mem_t *mem; - if (kvmppc_gpa_to_ua(vcpu->kvm, tce_list, &ua, NULL)) + if (kvmppc_tce_to_ua(vcpu->kvm, tce_list, &ua, NULL)) return H_TOO_HARD; mem = mm_iommu_lookup_rm(vcpu->kvm->mm, ua, IOMMU_PAGE_SIZE_4K); @@ -496,12 +508,12 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu, * We do not require memory to be preregistered in this case * so lock rmap and do __find_linux_pte_or_hugepte(). */ - if (kvmppc_gpa_to_ua(vcpu->kvm, tce_list, &ua, &rmap)) + if (kvmppc_tce_to_ua(vcpu->kvm, tce_list, &ua, &rmap)) return H_TOO_HARD; rmap = (void *) vmalloc_to_phys(rmap); if (WARN_ON_ONCE_RM(!rmap)) - return H_HARDWARE; + return H_TOO_HARD; /* * Synchronize with the MMU notifier callbacks in @@ -521,14 +533,16 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu, for (i = 0; i < npages; ++i) { unsigned long tce = be64_to_cpu(((u64 *)tces)[i]); - ret = kvmppc_tce_validate(stt, tce); + ret = kvmppc_rm_tce_validate(stt, tce); if (ret != H_SUCCESS) goto unlock_exit; + } + + for (i = 0; i < npages; ++i) { + unsigned long tce = be64_to_cpu(((u64 *)tces)[i]); ua = 0; - if (kvmppc_gpa_to_ua(vcpu->kvm, - tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), - &ua, NULL)) + if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua, NULL)) return H_PARAMETER; list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { @@ -536,14 +550,11 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu, stit->tbl, entry + i, ua, iommu_tce_direction(tce)); - if (ret == H_SUCCESS) - continue; - - if (ret == H_TOO_HARD) + if (ret != H_SUCCESS) { + kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, + entry); goto unlock_exit; - - WARN_ON_ONCE_RM(1); - kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry); + } } kvmppc_tce_put(stt, entry + i, tce); diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c index 36b11c5a0dbb..8c7e933e942e 100644 --- a/arch/powerpc/kvm/book3s_emulate.c +++ b/arch/powerpc/kvm/book3s_emulate.c @@ -36,7 +36,6 @@ #define OP_31_XOP_MTSR 210 #define OP_31_XOP_MTSRIN 242 #define OP_31_XOP_TLBIEL 274 -#define OP_31_XOP_TLBIE 306 /* Opcode is officially reserved, reuse it as sc 1 when sc 1 doesn't trap */ #define OP_31_XOP_FAKE_SC1 308 #define OP_31_XOP_SLBMTE 402 @@ -110,7 +109,7 @@ static inline void kvmppc_copyto_vcpu_tm(struct kvm_vcpu *vcpu) vcpu->arch.ctr_tm = vcpu->arch.regs.ctr; vcpu->arch.tar_tm = vcpu->arch.tar; vcpu->arch.lr_tm = vcpu->arch.regs.link; - vcpu->arch.cr_tm = vcpu->arch.cr; + vcpu->arch.cr_tm = vcpu->arch.regs.ccr; vcpu->arch.xer_tm = vcpu->arch.regs.xer; vcpu->arch.vrsave_tm = vcpu->arch.vrsave; } @@ -129,7 +128,7 @@ static inline void kvmppc_copyfrom_vcpu_tm(struct kvm_vcpu *vcpu) vcpu->arch.regs.ctr = vcpu->arch.ctr_tm; vcpu->arch.tar = vcpu->arch.tar_tm; vcpu->arch.regs.link = vcpu->arch.lr_tm; - vcpu->arch.cr = vcpu->arch.cr_tm; + vcpu->arch.regs.ccr = vcpu->arch.cr_tm; vcpu->arch.regs.xer = vcpu->arch.xer_tm; vcpu->arch.vrsave = vcpu->arch.vrsave_tm; } @@ -141,7 +140,7 @@ static void kvmppc_emulate_treclaim(struct kvm_vcpu *vcpu, int ra_val) uint64_t texasr; /* CR0 = 0 | MSR[TS] | 0 */ - vcpu->arch.cr = (vcpu->arch.cr & ~(CR0_MASK << CR0_SHIFT)) | + vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & ~(CR0_MASK << CR0_SHIFT)) | (((guest_msr & MSR_TS_MASK) >> (MSR_TS_S_LG - 1)) << CR0_SHIFT); @@ -220,7 +219,7 @@ void kvmppc_emulate_tabort(struct kvm_vcpu *vcpu, int ra_val) tm_abort(ra_val); /* CR0 = 0 | MSR[TS] | 0 */ - vcpu->arch.cr = (vcpu->arch.cr & ~(CR0_MASK << CR0_SHIFT)) | + vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & ~(CR0_MASK << CR0_SHIFT)) | (((guest_msr & MSR_TS_MASK) >> (MSR_TS_S_LG - 1)) << CR0_SHIFT); @@ -494,8 +493,8 @@ int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, if (!(kvmppc_get_msr(vcpu) & MSR_PR)) { preempt_disable(); - vcpu->arch.cr = (CR0_TBEGIN_FAILURE | - (vcpu->arch.cr & ~(CR0_MASK << CR0_SHIFT))); + vcpu->arch.regs.ccr = (CR0_TBEGIN_FAILURE | + (vcpu->arch.regs.ccr & ~(CR0_MASK << CR0_SHIFT))); vcpu->arch.texasr = (TEXASR_FS | TEXASR_EXACT | (((u64)(TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT)) diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 3e3a71594e63..bf8def2159c3 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -50,6 +50,7 @@ #include <asm/reg.h> #include <asm/ppc-opcode.h> #include <asm/asm-prototypes.h> +#include <asm/archrandom.h> #include <asm/debug.h> #include <asm/disassemble.h> #include <asm/cputable.h> @@ -104,6 +105,10 @@ static bool indep_threads_mode = true; module_param(indep_threads_mode, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(indep_threads_mode, "Independent-threads mode (only on POWER9)"); +static bool one_vm_per_core; +module_param(one_vm_per_core, bool, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(one_vm_per_core, "Only run vCPUs from the same VM on a core (requires indep_threads_mode=N)"); + #ifdef CONFIG_KVM_XICS static struct kernel_param_ops module_param_ops = { .set = param_set_int, @@ -117,6 +122,16 @@ module_param_cb(h_ipi_redirect, &module_param_ops, &h_ipi_redirect, 0644); MODULE_PARM_DESC(h_ipi_redirect, "Redirect H_IPI wakeup to a free host core"); #endif +/* If set, guests are allowed to create and control nested guests */ +static bool nested = true; +module_param(nested, bool, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(nested, "Enable nested virtualization (only on POWER9)"); + +static inline bool nesting_enabled(struct kvm *kvm) +{ + return kvm->arch.nested_enable && kvm_is_radix(kvm); +} + /* If set, the threads on each CPU core have to be in the same MMU mode */ static bool no_mixing_hpt_and_radix; @@ -173,6 +188,10 @@ static bool kvmppc_ipi_thread(int cpu) { unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER); + /* If we're a nested hypervisor, fall back to ordinary IPIs for now */ + if (kvmhv_on_pseries()) + return false; + /* On POWER9 we can use msgsnd to IPI any cpu */ if (cpu_has_feature(CPU_FTR_ARCH_300)) { msg |= get_hard_smp_processor_id(cpu); @@ -410,8 +429,8 @@ static void kvmppc_dump_regs(struct kvm_vcpu *vcpu) vcpu->arch.shregs.sprg0, vcpu->arch.shregs.sprg1); pr_err("sprg2 = %.16llx sprg3 = %.16llx\n", vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3); - pr_err("cr = %.8x xer = %.16lx dsisr = %.8x\n", - vcpu->arch.cr, vcpu->arch.regs.xer, vcpu->arch.shregs.dsisr); + pr_err("cr = %.8lx xer = %.16lx dsisr = %.8x\n", + vcpu->arch.regs.ccr, vcpu->arch.regs.xer, vcpu->arch.shregs.dsisr); pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar); pr_err("fault dar = %.16lx dsisr = %.8x\n", vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); @@ -730,8 +749,7 @@ static bool kvmppc_doorbell_pending(struct kvm_vcpu *vcpu) /* * Ensure that the read of vcore->dpdes comes after the read * of vcpu->doorbell_request. This barrier matches the - * lwsync in book3s_hv_rmhandlers.S just before the - * fast_guest_return label. + * smb_wmb() in kvmppc_guest_entry_inject(). */ smp_rmb(); vc = vcpu->arch.vcore; @@ -912,6 +930,19 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) break; } return RESUME_HOST; + case H_SET_DABR: + ret = kvmppc_h_set_dabr(vcpu, kvmppc_get_gpr(vcpu, 4)); + break; + case H_SET_XDABR: + ret = kvmppc_h_set_xdabr(vcpu, kvmppc_get_gpr(vcpu, 4), + kvmppc_get_gpr(vcpu, 5)); + break; + case H_GET_TCE: + ret = kvmppc_h_get_tce(vcpu, kvmppc_get_gpr(vcpu, 4), + kvmppc_get_gpr(vcpu, 5)); + if (ret == H_TOO_HARD) + return RESUME_HOST; + break; case H_PUT_TCE: ret = kvmppc_h_put_tce(vcpu, kvmppc_get_gpr(vcpu, 4), kvmppc_get_gpr(vcpu, 5), @@ -935,6 +966,32 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) if (ret == H_TOO_HARD) return RESUME_HOST; break; + case H_RANDOM: + if (!powernv_get_random_long(&vcpu->arch.regs.gpr[4])) + ret = H_HARDWARE; + break; + + case H_SET_PARTITION_TABLE: + ret = H_FUNCTION; + if (nesting_enabled(vcpu->kvm)) + ret = kvmhv_set_partition_table(vcpu); + break; + case H_ENTER_NESTED: + ret = H_FUNCTION; + if (!nesting_enabled(vcpu->kvm)) + break; + ret = kvmhv_enter_nested_guest(vcpu); + if (ret == H_INTERRUPT) { + kvmppc_set_gpr(vcpu, 3, 0); + return -EINTR; + } + break; + case H_TLB_INVALIDATE: + ret = H_FUNCTION; + if (nesting_enabled(vcpu->kvm)) + ret = kvmhv_do_nested_tlbie(vcpu); + break; + default: return RESUME_HOST; } @@ -943,6 +1000,24 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) return RESUME_GUEST; } +/* + * Handle H_CEDE in the nested virtualization case where we haven't + * called the real-mode hcall handlers in book3s_hv_rmhandlers.S. + * This has to be done early, not in kvmppc_pseries_do_hcall(), so + * that the cede logic in kvmppc_run_single_vcpu() works properly. + */ +static void kvmppc_nested_cede(struct kvm_vcpu *vcpu) +{ + vcpu->arch.shregs.msr |= MSR_EE; + vcpu->arch.ceded = 1; + smp_mb(); + if (vcpu->arch.prodded) { + vcpu->arch.prodded = 0; + smp_mb(); + vcpu->arch.ceded = 0; + } +} + static int kvmppc_hcall_impl_hv(unsigned long cmd) { switch (cmd) { @@ -1085,7 +1160,6 @@ static int kvmppc_emulate_doorbell_instr(struct kvm_vcpu *vcpu) return RESUME_GUEST; } -/* Called with vcpu->arch.vcore->lock held */ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, struct task_struct *tsk) { @@ -1190,7 +1264,10 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, break; case BOOK3S_INTERRUPT_H_INST_STORAGE: vcpu->arch.fault_dar = kvmppc_get_pc(vcpu); - vcpu->arch.fault_dsisr = 0; + vcpu->arch.fault_dsisr = vcpu->arch.shregs.msr & + DSISR_SRR1_MATCH_64S; + if (vcpu->arch.shregs.msr & HSRR1_HISI_WRITE) + vcpu->arch.fault_dsisr |= DSISR_ISSTORE; r = RESUME_PAGE_FAULT; break; /* @@ -1206,10 +1283,7 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, swab32(vcpu->arch.emul_inst) : vcpu->arch.emul_inst; if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) { - /* Need vcore unlocked to call kvmppc_get_last_inst */ - spin_unlock(&vcpu->arch.vcore->lock); r = kvmppc_emulate_debug_inst(run, vcpu); - spin_lock(&vcpu->arch.vcore->lock); } else { kvmppc_core_queue_program(vcpu, SRR1_PROGILL); r = RESUME_GUEST; @@ -1225,12 +1299,8 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, case BOOK3S_INTERRUPT_H_FAC_UNAVAIL: r = EMULATE_FAIL; if (((vcpu->arch.hfscr >> 56) == FSCR_MSGP_LG) && - cpu_has_feature(CPU_FTR_ARCH_300)) { - /* Need vcore unlocked to call kvmppc_get_last_inst */ - spin_unlock(&vcpu->arch.vcore->lock); + cpu_has_feature(CPU_FTR_ARCH_300)) r = kvmppc_emulate_doorbell_instr(vcpu); - spin_lock(&vcpu->arch.vcore->lock); - } if (r == EMULATE_FAIL) { kvmppc_core_queue_program(vcpu, SRR1_PROGILL); r = RESUME_GUEST; @@ -1265,6 +1335,104 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, return r; } +static int kvmppc_handle_nested_exit(struct kvm_vcpu *vcpu) +{ + int r; + int srcu_idx; + + vcpu->stat.sum_exits++; + + /* + * This can happen if an interrupt occurs in the last stages + * of guest entry or the first stages of guest exit (i.e. after + * setting paca->kvm_hstate.in_guest to KVM_GUEST_MODE_GUEST_HV + * and before setting it to KVM_GUEST_MODE_HOST_HV). + * That can happen due to a bug, or due to a machine check + * occurring at just the wrong time. + */ + if (vcpu->arch.shregs.msr & MSR_HV) { + pr_emerg("KVM trap in HV mode while nested!\n"); + pr_emerg("trap=0x%x | pc=0x%lx | msr=0x%llx\n", + vcpu->arch.trap, kvmppc_get_pc(vcpu), + vcpu->arch.shregs.msr); + kvmppc_dump_regs(vcpu); + return RESUME_HOST; + } + switch (vcpu->arch.trap) { + /* We're good on these - the host merely wanted to get our attention */ + case BOOK3S_INTERRUPT_HV_DECREMENTER: + vcpu->stat.dec_exits++; + r = RESUME_GUEST; + break; + case BOOK3S_INTERRUPT_EXTERNAL: + vcpu->stat.ext_intr_exits++; + r = RESUME_HOST; + break; + case BOOK3S_INTERRUPT_H_DOORBELL: + case BOOK3S_INTERRUPT_H_VIRT: + vcpu->stat.ext_intr_exits++; + r = RESUME_GUEST; + break; + /* SR/HMI/PMI are HV interrupts that host has handled. Resume guest.*/ + case BOOK3S_INTERRUPT_HMI: + case BOOK3S_INTERRUPT_PERFMON: + case BOOK3S_INTERRUPT_SYSTEM_RESET: + r = RESUME_GUEST; + break; + case BOOK3S_INTERRUPT_MACHINE_CHECK: + /* Pass the machine check to the L1 guest */ + r = RESUME_HOST; + /* Print the MCE event to host console. */ + machine_check_print_event_info(&vcpu->arch.mce_evt, false); + break; + /* + * We get these next two if the guest accesses a page which it thinks + * it has mapped but which is not actually present, either because + * it is for an emulated I/O device or because the corresonding + * host page has been paged out. + */ + case BOOK3S_INTERRUPT_H_DATA_STORAGE: + srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); + r = kvmhv_nested_page_fault(vcpu); + srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); + break; + case BOOK3S_INTERRUPT_H_INST_STORAGE: + vcpu->arch.fault_dar = kvmppc_get_pc(vcpu); + vcpu->arch.fault_dsisr = kvmppc_get_msr(vcpu) & + DSISR_SRR1_MATCH_64S; + if (vcpu->arch.shregs.msr & HSRR1_HISI_WRITE) + vcpu->arch.fault_dsisr |= DSISR_ISSTORE; + srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); + r = kvmhv_nested_page_fault(vcpu); + srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); + break; + +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + case BOOK3S_INTERRUPT_HV_SOFTPATCH: + /* + * This occurs for various TM-related instructions that + * we need to emulate on POWER9 DD2.2. We have already + * handled the cases where the guest was in real-suspend + * mode and was transitioning to transactional state. + */ + r = kvmhv_p9_tm_emulation(vcpu); + break; +#endif + + case BOOK3S_INTERRUPT_HV_RM_HARD: + vcpu->arch.trap = 0; + r = RESUME_GUEST; + if (!xive_enabled()) + kvmppc_xics_rm_complete(vcpu, 0); + break; + default: + r = RESUME_HOST; + break; + } + + return r; +} + static int kvm_arch_vcpu_ioctl_get_sregs_hv(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { @@ -1555,6 +1723,9 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, case KVM_REG_PPC_ONLINE: *val = get_reg_val(id, vcpu->arch.online); break; + case KVM_REG_PPC_PTCR: + *val = get_reg_val(id, vcpu->kvm->arch.l1_ptcr); + break; default: r = -EINVAL; break; @@ -1786,6 +1957,9 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, atomic_dec(&vcpu->arch.vcore->online_count); vcpu->arch.online = i; break; + case KVM_REG_PPC_PTCR: + vcpu->kvm->arch.l1_ptcr = set_reg_val(id, *val); + break; default: r = -EINVAL; break; @@ -2019,15 +2193,18 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm, * Set the default HFSCR for the guest from the host value. * This value is only used on POWER9. * On POWER9, we want to virtualize the doorbell facility, so we - * turn off the HFSCR bit, which causes those instructions to trap. + * don't set the HFSCR_MSGP bit, and that causes those instructions + * to trap and then we emulate them. */ - vcpu->arch.hfscr = mfspr(SPRN_HFSCR); - if (cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) + vcpu->arch.hfscr = HFSCR_TAR | HFSCR_EBB | HFSCR_PM | HFSCR_BHRB | + HFSCR_DSCR | HFSCR_VECVSX | HFSCR_FP; + if (cpu_has_feature(CPU_FTR_HVMODE)) { + vcpu->arch.hfscr &= mfspr(SPRN_HFSCR); + if (cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) + vcpu->arch.hfscr |= HFSCR_TM; + } + if (cpu_has_feature(CPU_FTR_TM_COMP)) vcpu->arch.hfscr |= HFSCR_TM; - else if (!cpu_has_feature(CPU_FTR_TM_COMP)) - vcpu->arch.hfscr &= ~HFSCR_TM; - if (cpu_has_feature(CPU_FTR_ARCH_300)) - vcpu->arch.hfscr &= ~HFSCR_MSGP; kvmppc_mmu_book3s_hv_init(vcpu); @@ -2242,10 +2419,18 @@ static void kvmppc_release_hwthread(int cpu) static void radix_flush_cpu(struct kvm *kvm, int cpu, struct kvm_vcpu *vcpu) { + struct kvm_nested_guest *nested = vcpu->arch.nested; + cpumask_t *cpu_in_guest; int i; cpu = cpu_first_thread_sibling(cpu); - cpumask_set_cpu(cpu, &kvm->arch.need_tlb_flush); + if (nested) { + cpumask_set_cpu(cpu, &nested->need_tlb_flush); + cpu_in_guest = &nested->cpu_in_guest; + } else { + cpumask_set_cpu(cpu, &kvm->arch.need_tlb_flush); + cpu_in_guest = &kvm->arch.cpu_in_guest; + } /* * Make sure setting of bit in need_tlb_flush precedes * testing of cpu_in_guest bits. The matching barrier on @@ -2253,13 +2438,23 @@ static void radix_flush_cpu(struct kvm *kvm, int cpu, struct kvm_vcpu *vcpu) */ smp_mb(); for (i = 0; i < threads_per_core; ++i) - if (cpumask_test_cpu(cpu + i, &kvm->arch.cpu_in_guest)) + if (cpumask_test_cpu(cpu + i, cpu_in_guest)) smp_call_function_single(cpu + i, do_nothing, NULL, 1); } static void kvmppc_prepare_radix_vcpu(struct kvm_vcpu *vcpu, int pcpu) { + struct kvm_nested_guest *nested = vcpu->arch.nested; struct kvm *kvm = vcpu->kvm; + int prev_cpu; + + if (!cpu_has_feature(CPU_FTR_HVMODE)) + return; + + if (nested) + prev_cpu = nested->prev_cpu[vcpu->arch.nested_vcpu_id]; + else + prev_cpu = vcpu->arch.prev_cpu; /* * With radix, the guest can do TLB invalidations itself, @@ -2273,12 +2468,46 @@ static void kvmppc_prepare_radix_vcpu(struct kvm_vcpu *vcpu, int pcpu) * ran to flush the TLB. The TLB is shared between threads, * so we use a single bit in .need_tlb_flush for all 4 threads. */ - if (vcpu->arch.prev_cpu != pcpu) { - if (vcpu->arch.prev_cpu >= 0 && - cpu_first_thread_sibling(vcpu->arch.prev_cpu) != + if (prev_cpu != pcpu) { + if (prev_cpu >= 0 && + cpu_first_thread_sibling(prev_cpu) != cpu_first_thread_sibling(pcpu)) - radix_flush_cpu(kvm, vcpu->arch.prev_cpu, vcpu); - vcpu->arch.prev_cpu = pcpu; + radix_flush_cpu(kvm, prev_cpu, vcpu); + if (nested) + nested->prev_cpu[vcpu->arch.nested_vcpu_id] = pcpu; + else + vcpu->arch.prev_cpu = pcpu; + } +} + +static void kvmppc_radix_check_need_tlb_flush(struct kvm *kvm, int pcpu, + struct kvm_nested_guest *nested) +{ + cpumask_t *need_tlb_flush; + int lpid; + + if (!cpu_has_feature(CPU_FTR_HVMODE)) + return; + + if (cpu_has_feature(CPU_FTR_ARCH_300)) + pcpu &= ~0x3UL; + + if (nested) { + lpid = nested->shadow_lpid; + need_tlb_flush = &nested->need_tlb_flush; + } else { + lpid = kvm->arch.lpid; + need_tlb_flush = &kvm->arch.need_tlb_flush; + } + + mtspr(SPRN_LPID, lpid); + isync(); + smp_mb(); + + if (cpumask_test_cpu(pcpu, need_tlb_flush)) { + radix__local_flush_tlb_lpid_guest(lpid); + /* Clear the bit after the TLB flush */ + cpumask_clear_cpu(pcpu, need_tlb_flush); } } @@ -2493,6 +2722,10 @@ static bool can_dynamic_split(struct kvmppc_vcore *vc, struct core_info *cip) if (!cpu_has_feature(CPU_FTR_ARCH_207S)) return false; + /* In one_vm_per_core mode, require all vcores to be from the same vm */ + if (one_vm_per_core && vc->kvm != cip->vc[0]->kvm) + return false; + /* Some POWER9 chips require all threads to be in the same MMU mode */ if (no_mixing_hpt_and_radix && kvm_is_radix(vc->kvm) != kvm_is_radix(cip->vc[0]->kvm)) @@ -2600,6 +2833,14 @@ static void post_guest_process(struct kvmppc_vcore *vc, bool is_master) spin_lock(&vc->lock); now = get_tb(); for_each_runnable_thread(i, vcpu, vc) { + /* + * It's safe to unlock the vcore in the loop here, because + * for_each_runnable_thread() is safe against removal of + * the vcpu, and the vcore state is VCORE_EXITING here, + * so any vcpus becoming runnable will have their arch.trap + * set to zero and can't actually run in the guest. + */ + spin_unlock(&vc->lock); /* cancel pending dec exception if dec is positive */ if (now < vcpu->arch.dec_expires && kvmppc_core_pending_dec(vcpu)) @@ -2615,6 +2856,7 @@ static void post_guest_process(struct kvmppc_vcore *vc, bool is_master) vcpu->arch.ret = ret; vcpu->arch.trap = 0; + spin_lock(&vc->lock); if (is_kvmppc_resume_guest(vcpu->arch.ret)) { if (vcpu->arch.pending_exceptions) kvmppc_core_prepare_to_enter(vcpu); @@ -2963,8 +3205,6 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) spin_unlock(&core_info.vc[sub]->lock); if (kvm_is_radix(vc->kvm)) { - int tmp = pcpu; - /* * Do we need to flush the process scoped TLB for the LPAR? * @@ -2975,17 +3215,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) * * Hash must be flushed in realmode in order to use tlbiel. */ - mtspr(SPRN_LPID, vc->kvm->arch.lpid); - isync(); - - if (cpu_has_feature(CPU_FTR_ARCH_300)) - tmp &= ~0x3UL; - - if (cpumask_test_cpu(tmp, &vc->kvm->arch.need_tlb_flush)) { - radix__local_flush_tlb_lpid_guest(vc->kvm->arch.lpid); - /* Clear the bit after the TLB flush */ - cpumask_clear_cpu(tmp, &vc->kvm->arch.need_tlb_flush); - } + kvmppc_radix_check_need_tlb_flush(vc->kvm, pcpu, NULL); } /* @@ -3080,6 +3310,300 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) } /* + * Load up hypervisor-mode registers on P9. + */ +static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit, + unsigned long lpcr) +{ + struct kvmppc_vcore *vc = vcpu->arch.vcore; + s64 hdec; + u64 tb, purr, spurr; + int trap; + unsigned long host_hfscr = mfspr(SPRN_HFSCR); + unsigned long host_ciabr = mfspr(SPRN_CIABR); + unsigned long host_dawr = mfspr(SPRN_DAWR); + unsigned long host_dawrx = mfspr(SPRN_DAWRX); + unsigned long host_psscr = mfspr(SPRN_PSSCR); + unsigned long host_pidr = mfspr(SPRN_PID); + + hdec = time_limit - mftb(); + if (hdec < 0) + return BOOK3S_INTERRUPT_HV_DECREMENTER; + mtspr(SPRN_HDEC, hdec); + + if (vc->tb_offset) { + u64 new_tb = mftb() + vc->tb_offset; + mtspr(SPRN_TBU40, new_tb); + tb = mftb(); + if ((tb & 0xffffff) < (new_tb & 0xffffff)) + mtspr(SPRN_TBU40, new_tb + 0x1000000); + vc->tb_offset_applied = vc->tb_offset; + } + + if (vc->pcr) + mtspr(SPRN_PCR, vc->pcr); + mtspr(SPRN_DPDES, vc->dpdes); + mtspr(SPRN_VTB, vc->vtb); + + local_paca->kvm_hstate.host_purr = mfspr(SPRN_PURR); + local_paca->kvm_hstate.host_spurr = mfspr(SPRN_SPURR); + mtspr(SPRN_PURR, vcpu->arch.purr); + mtspr(SPRN_SPURR, vcpu->arch.spurr); + + if (cpu_has_feature(CPU_FTR_DAWR)) { + mtspr(SPRN_DAWR, vcpu->arch.dawr); + mtspr(SPRN_DAWRX, vcpu->arch.dawrx); + } + mtspr(SPRN_CIABR, vcpu->arch.ciabr); + mtspr(SPRN_IC, vcpu->arch.ic); + mtspr(SPRN_PID, vcpu->arch.pid); + + mtspr(SPRN_PSSCR, vcpu->arch.psscr | PSSCR_EC | + (local_paca->kvm_hstate.fake_suspend << PSSCR_FAKE_SUSPEND_LG)); + + mtspr(SPRN_HFSCR, vcpu->arch.hfscr); + + mtspr(SPRN_SPRG0, vcpu->arch.shregs.sprg0); + mtspr(SPRN_SPRG1, vcpu->arch.shregs.sprg1); + mtspr(SPRN_SPRG2, vcpu->arch.shregs.sprg2); + mtspr(SPRN_SPRG3, vcpu->arch.shregs.sprg3); + + mtspr(SPRN_AMOR, ~0UL); + + mtspr(SPRN_LPCR, lpcr); + isync(); + + kvmppc_xive_push_vcpu(vcpu); + + mtspr(SPRN_SRR0, vcpu->arch.shregs.srr0); + mtspr(SPRN_SRR1, vcpu->arch.shregs.srr1); + + trap = __kvmhv_vcpu_entry_p9(vcpu); + + /* Advance host PURR/SPURR by the amount used by guest */ + purr = mfspr(SPRN_PURR); + spurr = mfspr(SPRN_SPURR); + mtspr(SPRN_PURR, local_paca->kvm_hstate.host_purr + + purr - vcpu->arch.purr); + mtspr(SPRN_SPURR, local_paca->kvm_hstate.host_spurr + + spurr - vcpu->arch.spurr); + vcpu->arch.purr = purr; + vcpu->arch.spurr = spurr; + + vcpu->arch.ic = mfspr(SPRN_IC); + vcpu->arch.pid = mfspr(SPRN_PID); + vcpu->arch.psscr = mfspr(SPRN_PSSCR) & PSSCR_GUEST_VIS; + + vcpu->arch.shregs.sprg0 = mfspr(SPRN_SPRG0); + vcpu->arch.shregs.sprg1 = mfspr(SPRN_SPRG1); + vcpu->arch.shregs.sprg2 = mfspr(SPRN_SPRG2); + vcpu->arch.shregs.sprg3 = mfspr(SPRN_SPRG3); + + mtspr(SPRN_PSSCR, host_psscr); + mtspr(SPRN_HFSCR, host_hfscr); + mtspr(SPRN_CIABR, host_ciabr); + mtspr(SPRN_DAWR, host_dawr); + mtspr(SPRN_DAWRX, host_dawrx); + mtspr(SPRN_PID, host_pidr); + + /* + * Since this is radix, do a eieio; tlbsync; ptesync sequence in + * case we interrupted the guest between a tlbie and a ptesync. + */ + asm volatile("eieio; tlbsync; ptesync"); + + mtspr(SPRN_LPID, vcpu->kvm->arch.host_lpid); /* restore host LPID */ + isync(); + + vc->dpdes = mfspr(SPRN_DPDES); + vc->vtb = mfspr(SPRN_VTB); + mtspr(SPRN_DPDES, 0); + if (vc->pcr) + mtspr(SPRN_PCR, 0); + + if (vc->tb_offset_applied) { + u64 new_tb = mftb() - vc->tb_offset_applied; + mtspr(SPRN_TBU40, new_tb); + tb = mftb(); + if ((tb & 0xffffff) < (new_tb & 0xffffff)) + mtspr(SPRN_TBU40, new_tb + 0x1000000); + vc->tb_offset_applied = 0; + } + + mtspr(SPRN_HDEC, 0x7fffffff); + mtspr(SPRN_LPCR, vcpu->kvm->arch.host_lpcr); + + return trap; +} + +/* + * Virtual-mode guest entry for POWER9 and later when the host and + * guest are both using the radix MMU. The LPIDR has already been set. + */ +int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit, + unsigned long lpcr) +{ + struct kvmppc_vcore *vc = vcpu->arch.vcore; + unsigned long host_dscr = mfspr(SPRN_DSCR); + unsigned long host_tidr = mfspr(SPRN_TIDR); + unsigned long host_iamr = mfspr(SPRN_IAMR); + s64 dec; + u64 tb; + int trap, save_pmu; + + dec = mfspr(SPRN_DEC); + tb = mftb(); + if (dec < 512) + return BOOK3S_INTERRUPT_HV_DECREMENTER; + local_paca->kvm_hstate.dec_expires = dec + tb; + if (local_paca->kvm_hstate.dec_expires < time_limit) + time_limit = local_paca->kvm_hstate.dec_expires; + + vcpu->arch.ceded = 0; + + kvmhv_save_host_pmu(); /* saves it to PACA kvm_hstate */ + + kvmppc_subcore_enter_guest(); + + vc->entry_exit_map = 1; + vc->in_guest = 1; + + if (vcpu->arch.vpa.pinned_addr) { + struct lppaca *lp = vcpu->arch.vpa.pinned_addr; + u32 yield_count = be32_to_cpu(lp->yield_count) + 1; + lp->yield_count = cpu_to_be32(yield_count); + vcpu->arch.vpa.dirty = 1; + } + + if (cpu_has_feature(CPU_FTR_TM) || + cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) + kvmppc_restore_tm_hv(vcpu, vcpu->arch.shregs.msr, true); + + kvmhv_load_guest_pmu(vcpu); + + msr_check_and_set(MSR_FP | MSR_VEC | MSR_VSX); + load_fp_state(&vcpu->arch.fp); +#ifdef CONFIG_ALTIVEC + load_vr_state(&vcpu->arch.vr); +#endif + + mtspr(SPRN_DSCR, vcpu->arch.dscr); + mtspr(SPRN_IAMR, vcpu->arch.iamr); + mtspr(SPRN_PSPB, vcpu->arch.pspb); + mtspr(SPRN_FSCR, vcpu->arch.fscr); + mtspr(SPRN_TAR, vcpu->arch.tar); + mtspr(SPRN_EBBHR, vcpu->arch.ebbhr); + mtspr(SPRN_EBBRR, vcpu->arch.ebbrr); + mtspr(SPRN_BESCR, vcpu->arch.bescr); + mtspr(SPRN_WORT, vcpu->arch.wort); + mtspr(SPRN_TIDR, vcpu->arch.tid); + mtspr(SPRN_DAR, vcpu->arch.shregs.dar); + mtspr(SPRN_DSISR, vcpu->arch.shregs.dsisr); + mtspr(SPRN_AMR, vcpu->arch.amr); + mtspr(SPRN_UAMOR, vcpu->arch.uamor); + + if (!(vcpu->arch.ctrl & 1)) + mtspr(SPRN_CTRLT, mfspr(SPRN_CTRLF) & ~1); + + mtspr(SPRN_DEC, vcpu->arch.dec_expires - mftb()); + + if (kvmhv_on_pseries()) { + /* call our hypervisor to load up HV regs and go */ + struct hv_guest_state hvregs; + + kvmhv_save_hv_regs(vcpu, &hvregs); + hvregs.lpcr = lpcr; + vcpu->arch.regs.msr = vcpu->arch.shregs.msr; + hvregs.version = HV_GUEST_STATE_VERSION; + if (vcpu->arch.nested) { + hvregs.lpid = vcpu->arch.nested->shadow_lpid; + hvregs.vcpu_token = vcpu->arch.nested_vcpu_id; + } else { + hvregs.lpid = vcpu->kvm->arch.lpid; + hvregs.vcpu_token = vcpu->vcpu_id; + } + hvregs.hdec_expiry = time_limit; + trap = plpar_hcall_norets(H_ENTER_NESTED, __pa(&hvregs), + __pa(&vcpu->arch.regs)); + kvmhv_restore_hv_return_state(vcpu, &hvregs); + vcpu->arch.shregs.msr = vcpu->arch.regs.msr; + vcpu->arch.shregs.dar = mfspr(SPRN_DAR); + vcpu->arch.shregs.dsisr = mfspr(SPRN_DSISR); + + /* H_CEDE has to be handled now, not later */ + if (trap == BOOK3S_INTERRUPT_SYSCALL && !vcpu->arch.nested && + kvmppc_get_gpr(vcpu, 3) == H_CEDE) { + kvmppc_nested_cede(vcpu); + trap = 0; + } + } else { + trap = kvmhv_load_hv_regs_and_go(vcpu, time_limit, lpcr); + } + + vcpu->arch.slb_max = 0; + dec = mfspr(SPRN_DEC); + tb = mftb(); + vcpu->arch.dec_expires = dec + tb; + vcpu->cpu = -1; + vcpu->arch.thread_cpu = -1; + vcpu->arch.ctrl = mfspr(SPRN_CTRLF); + + vcpu->arch.iamr = mfspr(SPRN_IAMR); + vcpu->arch.pspb = mfspr(SPRN_PSPB); + vcpu->arch.fscr = mfspr(SPRN_FSCR); + vcpu->arch.tar = mfspr(SPRN_TAR); + vcpu->arch.ebbhr = mfspr(SPRN_EBBHR); + vcpu->arch.ebbrr = mfspr(SPRN_EBBRR); + vcpu->arch.bescr = mfspr(SPRN_BESCR); + vcpu->arch.wort = mfspr(SPRN_WORT); + vcpu->arch.tid = mfspr(SPRN_TIDR); + vcpu->arch.amr = mfspr(SPRN_AMR); + vcpu->arch.uamor = mfspr(SPRN_UAMOR); + vcpu->arch.dscr = mfspr(SPRN_DSCR); + + mtspr(SPRN_PSPB, 0); + mtspr(SPRN_WORT, 0); + mtspr(SPRN_AMR, 0); + mtspr(SPRN_UAMOR, 0); + mtspr(SPRN_DSCR, host_dscr); + mtspr(SPRN_TIDR, host_tidr); + mtspr(SPRN_IAMR, host_iamr); + mtspr(SPRN_PSPB, 0); + + msr_check_and_set(MSR_FP | MSR_VEC | MSR_VSX); + store_fp_state(&vcpu->arch.fp); +#ifdef CONFIG_ALTIVEC + store_vr_state(&vcpu->arch.vr); +#endif + + if (cpu_has_feature(CPU_FTR_TM) || + cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) + kvmppc_save_tm_hv(vcpu, vcpu->arch.shregs.msr, true); + + save_pmu = 1; + if (vcpu->arch.vpa.pinned_addr) { + struct lppaca *lp = vcpu->arch.vpa.pinned_addr; + u32 yield_count = be32_to_cpu(lp->yield_count) + 1; + lp->yield_count = cpu_to_be32(yield_count); + vcpu->arch.vpa.dirty = 1; + save_pmu = lp->pmcregs_in_use; + } + + kvmhv_save_guest_pmu(vcpu, save_pmu); + + vc->entry_exit_map = 0x101; + vc->in_guest = 0; + + mtspr(SPRN_DEC, local_paca->kvm_hstate.dec_expires - mftb()); + + kvmhv_load_host_pmu(); + + kvmppc_subcore_exit_guest(); + + return trap; +} + +/* * Wait for some other vcpu thread to execute us, and * wake us up when we need to handle something in the host. */ @@ -3256,6 +3780,11 @@ out: trace_kvmppc_vcore_wakeup(do_sleep, block_ns); } +/* + * This never fails for a radix guest, as none of the operations it does + * for a radix guest can fail or have a way to report failure. + * kvmhv_run_single_vcpu() relies on this fact. + */ static int kvmhv_setup_mmu(struct kvm_vcpu *vcpu) { int r = 0; @@ -3405,6 +3934,171 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) return vcpu->arch.ret; } +int kvmhv_run_single_vcpu(struct kvm_run *kvm_run, + struct kvm_vcpu *vcpu, u64 time_limit, + unsigned long lpcr) +{ + int trap, r, pcpu; + int srcu_idx; + struct kvmppc_vcore *vc; + struct kvm *kvm = vcpu->kvm; + struct kvm_nested_guest *nested = vcpu->arch.nested; + + trace_kvmppc_run_vcpu_enter(vcpu); + + kvm_run->exit_reason = 0; + vcpu->arch.ret = RESUME_GUEST; + vcpu->arch.trap = 0; + + vc = vcpu->arch.vcore; + vcpu->arch.ceded = 0; + vcpu->arch.run_task = current; + vcpu->arch.kvm_run = kvm_run; + vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb()); + vcpu->arch.state = KVMPPC_VCPU_RUNNABLE; + vcpu->arch.busy_preempt = TB_NIL; + vcpu->arch.last_inst = KVM_INST_FETCH_FAILED; + vc->runnable_threads[0] = vcpu; + vc->n_runnable = 1; + vc->runner = vcpu; + + /* See if the MMU is ready to go */ + if (!kvm->arch.mmu_ready) + kvmhv_setup_mmu(vcpu); + + if (need_resched()) + cond_resched(); + + kvmppc_update_vpas(vcpu); + + init_vcore_to_run(vc); + vc->preempt_tb = TB_NIL; + + preempt_disable(); + pcpu = smp_processor_id(); + vc->pcpu = pcpu; + kvmppc_prepare_radix_vcpu(vcpu, pcpu); + + local_irq_disable(); + hard_irq_disable(); + if (signal_pending(current)) + goto sigpend; + if (lazy_irq_pending() || need_resched() || !kvm->arch.mmu_ready) + goto out; + + if (!nested) { + kvmppc_core_prepare_to_enter(vcpu); + if (vcpu->arch.doorbell_request) { + vc->dpdes = 1; + smp_wmb(); + vcpu->arch.doorbell_request = 0; + } + if (test_bit(BOOK3S_IRQPRIO_EXTERNAL, + &vcpu->arch.pending_exceptions)) + lpcr |= LPCR_MER; + } else if (vcpu->arch.pending_exceptions || + vcpu->arch.doorbell_request || + xive_interrupt_pending(vcpu)) { + vcpu->arch.ret = RESUME_HOST; + goto out; + } + + kvmppc_clear_host_core(pcpu); + + local_paca->kvm_hstate.tid = 0; + local_paca->kvm_hstate.napping = 0; + local_paca->kvm_hstate.kvm_split_mode = NULL; + kvmppc_start_thread(vcpu, vc); + kvmppc_create_dtl_entry(vcpu, vc); + trace_kvm_guest_enter(vcpu); + + vc->vcore_state = VCORE_RUNNING; + trace_kvmppc_run_core(vc, 0); + + if (cpu_has_feature(CPU_FTR_HVMODE)) + kvmppc_radix_check_need_tlb_flush(kvm, pcpu, nested); + + trace_hardirqs_on(); + guest_enter_irqoff(); + + srcu_idx = srcu_read_lock(&kvm->srcu); + + this_cpu_disable_ftrace(); + + trap = kvmhv_p9_guest_entry(vcpu, time_limit, lpcr); + vcpu->arch.trap = trap; + + this_cpu_enable_ftrace(); + + srcu_read_unlock(&kvm->srcu, srcu_idx); + + if (cpu_has_feature(CPU_FTR_HVMODE)) { + mtspr(SPRN_LPID, kvm->arch.host_lpid); + isync(); + } + + trace_hardirqs_off(); + set_irq_happened(trap); + + kvmppc_set_host_core(pcpu); + + local_irq_enable(); + guest_exit(); + + cpumask_clear_cpu(pcpu, &kvm->arch.cpu_in_guest); + + preempt_enable(); + + /* cancel pending decrementer exception if DEC is now positive */ + if (get_tb() < vcpu->arch.dec_expires && kvmppc_core_pending_dec(vcpu)) + kvmppc_core_dequeue_dec(vcpu); + + trace_kvm_guest_exit(vcpu); + r = RESUME_GUEST; + if (trap) { + if (!nested) + r = kvmppc_handle_exit_hv(kvm_run, vcpu, current); + else + r = kvmppc_handle_nested_exit(vcpu); + } + vcpu->arch.ret = r; + + if (is_kvmppc_resume_guest(r) && vcpu->arch.ceded && + !kvmppc_vcpu_woken(vcpu)) { + kvmppc_set_timer(vcpu); + while (vcpu->arch.ceded && !kvmppc_vcpu_woken(vcpu)) { + if (signal_pending(current)) { + vcpu->stat.signal_exits++; + kvm_run->exit_reason = KVM_EXIT_INTR; + vcpu->arch.ret = -EINTR; + break; + } + spin_lock(&vc->lock); + kvmppc_vcore_blocked(vc); + spin_unlock(&vc->lock); + } + } + vcpu->arch.ceded = 0; + + vc->vcore_state = VCORE_INACTIVE; + trace_kvmppc_run_core(vc, 1); + + done: + kvmppc_remove_runnable(vc, vcpu); + trace_kvmppc_run_vcpu_exit(vcpu, kvm_run); + + return vcpu->arch.ret; + + sigpend: + vcpu->stat.signal_exits++; + kvm_run->exit_reason = KVM_EXIT_INTR; + vcpu->arch.ret = -EINTR; + out: + local_irq_enable(); + preempt_enable(); + goto done; +} + static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu) { int r; @@ -3480,7 +4174,20 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu) vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; do { - r = kvmppc_run_vcpu(run, vcpu); + /* + * The early POWER9 chips that can't mix radix and HPT threads + * on the same core also need the workaround for the problem + * where the TLB would prefetch entries in the guest exit path + * for radix guests using the guest PIDR value and LPID 0. + * The workaround is in the old path (kvmppc_run_vcpu()) + * but not the new path (kvmhv_run_single_vcpu()). + */ + if (kvm->arch.threads_indep && kvm_is_radix(kvm) && + !no_mixing_hpt_and_radix) + r = kvmhv_run_single_vcpu(run, vcpu, ~(u64)0, + vcpu->arch.vcore->lpcr); + else + r = kvmppc_run_vcpu(run, vcpu); if (run->exit_reason == KVM_EXIT_PAPR_HCALL && !(vcpu->arch.shregs.msr & MSR_PR)) { @@ -3559,6 +4266,10 @@ static int kvm_vm_ioctl_get_smmu_info_hv(struct kvm *kvm, kvmppc_add_seg_page_size(&sps, 16, SLB_VSID_L | SLB_VSID_LP_01); kvmppc_add_seg_page_size(&sps, 24, SLB_VSID_L); + /* If running as a nested hypervisor, we don't support HPT guests */ + if (kvmhv_on_pseries()) + info->flags |= KVM_PPC_NO_HASH; + return 0; } @@ -3723,8 +4434,7 @@ void kvmppc_setup_partition_table(struct kvm *kvm) __pa(kvm->arch.pgtable) | RADIX_PGD_INDEX_SIZE; dw1 = PATB_GR | kvm->arch.process_table; } - - mmu_partition_table_set_entry(kvm->arch.lpid, dw0, dw1); + kvmhv_set_ptbl_entry(kvm->arch.lpid, dw0, dw1); } /* @@ -3820,6 +4530,8 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu) /* Must be called with kvm->lock held and mmu_ready = 0 and no vcpus running */ int kvmppc_switch_mmu_to_hpt(struct kvm *kvm) { + if (nesting_enabled(kvm)) + kvmhv_release_all_nested(kvm); kvmppc_free_radix(kvm); kvmppc_update_lpcr(kvm, LPCR_VPM1, LPCR_VPM1 | LPCR_UPRT | LPCR_GTSE | LPCR_HR); @@ -3841,6 +4553,7 @@ int kvmppc_switch_mmu_to_radix(struct kvm *kvm) kvmppc_free_hpt(&kvm->arch.hpt); kvmppc_update_lpcr(kvm, LPCR_UPRT | LPCR_GTSE | LPCR_HR, LPCR_VPM1 | LPCR_UPRT | LPCR_GTSE | LPCR_HR); + kvmppc_rmap_reset(kvm); kvm->arch.radix = 1; return 0; } @@ -3940,6 +4653,8 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm) kvmppc_alloc_host_rm_ops(); + kvmhv_vm_nested_init(kvm); + /* * Since we don't flush the TLB when tearing down a VM, * and this lpid might have previously been used, @@ -3958,9 +4673,13 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm) kvm->arch.host_sdr1 = mfspr(SPRN_SDR1); /* Init LPCR for virtual RMA mode */ - kvm->arch.host_lpid = mfspr(SPRN_LPID); - kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR); - lpcr &= LPCR_PECE | LPCR_LPES; + if (cpu_has_feature(CPU_FTR_HVMODE)) { + kvm->arch.host_lpid = mfspr(SPRN_LPID); + kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR); + lpcr &= LPCR_PECE | LPCR_LPES; + } else { + lpcr = 0; + } lpcr |= (4UL << LPCR_DPFD_SH) | LPCR_HDICE | LPCR_VPM0 | LPCR_VPM1; kvm->arch.vrma_slb_v = SLB_VSID_B_1T | @@ -4027,8 +4746,14 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm) * On POWER9, we only need to do this if the "indep_threads_mode" * module parameter has been set to N. */ - if (cpu_has_feature(CPU_FTR_ARCH_300)) - kvm->arch.threads_indep = indep_threads_mode; + if (cpu_has_feature(CPU_FTR_ARCH_300)) { + if (!indep_threads_mode && !cpu_has_feature(CPU_FTR_HVMODE)) { + pr_warn("KVM: Ignoring indep_threads_mode=N in nested hypervisor\n"); + kvm->arch.threads_indep = true; + } else { + kvm->arch.threads_indep = indep_threads_mode; + } + } if (!kvm->arch.threads_indep) kvm_hv_vm_activated(); @@ -4051,6 +4776,8 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm) snprintf(buf, sizeof(buf), "vm%d", current->pid); kvm->arch.debugfs_dir = debugfs_create_dir(buf, kvm_debugfs_dir); kvmppc_mmu_debugfs_init(kvm); + if (radix_enabled()) + kvmhv_radix_debugfs_init(kvm); return 0; } @@ -4073,13 +4800,21 @@ static void kvmppc_core_destroy_vm_hv(struct kvm *kvm) kvmppc_free_vcores(kvm); - kvmppc_free_lpid(kvm->arch.lpid); if (kvm_is_radix(kvm)) kvmppc_free_radix(kvm); else kvmppc_free_hpt(&kvm->arch.hpt); + /* Perform global invalidation and return lpid to the pool */ + if (cpu_has_feature(CPU_FTR_ARCH_300)) { + if (nesting_enabled(kvm)) + kvmhv_release_all_nested(kvm); + kvm->arch.process_table = 0; + kvmhv_set_ptbl_entry(kvm->arch.lpid, 0, 0); + } + kvmppc_free_lpid(kvm->arch.lpid); + kvmppc_free_pimap(kvm); } @@ -4104,11 +4839,15 @@ static int kvmppc_core_emulate_mfspr_hv(struct kvm_vcpu *vcpu, int sprn, static int kvmppc_core_check_processor_compat_hv(void) { - if (!cpu_has_feature(CPU_FTR_HVMODE) || - !cpu_has_feature(CPU_FTR_ARCH_206)) - return -EIO; + if (cpu_has_feature(CPU_FTR_HVMODE) && + cpu_has_feature(CPU_FTR_ARCH_206)) + return 0; - return 0; + /* POWER9 in radix mode is capable of being a nested hypervisor. */ + if (cpu_has_feature(CPU_FTR_ARCH_300) && radix_enabled()) + return 0; + + return -EIO; } #ifdef CONFIG_KVM_XICS @@ -4426,6 +5165,10 @@ static int kvmhv_configure_mmu(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg) if (radix && !radix_enabled()) return -EINVAL; + /* If we're a nested hypervisor, we currently only support radix */ + if (kvmhv_on_pseries() && !radix) + return -EINVAL; + mutex_lock(&kvm->lock); if (radix != kvm_is_radix(kvm)) { if (kvm->arch.mmu_ready) { @@ -4458,6 +5201,19 @@ static int kvmhv_configure_mmu(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg) return err; } +static int kvmhv_enable_nested(struct kvm *kvm) +{ + if (!nested) + return -EPERM; + if (!cpu_has_feature(CPU_FTR_ARCH_300) || no_mixing_hpt_and_radix) + return -ENODEV; + + /* kvm == NULL means the caller is testing if the capability exists */ + if (kvm) + kvm->arch.nested_enable = true; + return 0; +} + static struct kvmppc_ops kvm_ops_hv = { .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_hv, .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_hv, @@ -4497,6 +5253,7 @@ static struct kvmppc_ops kvm_ops_hv = { .configure_mmu = kvmhv_configure_mmu, .get_rmmu_info = kvmhv_get_rmmu_info, .set_smt_mode = kvmhv_set_smt_mode, + .enable_nested = kvmhv_enable_nested, }; static int kvm_init_subcore_bitmap(void) @@ -4547,6 +5304,10 @@ static int kvmppc_book3s_init_hv(void) if (r < 0) return -ENODEV; + r = kvmhv_nested_init(); + if (r) + return r; + r = kvm_init_subcore_bitmap(); if (r) return r; @@ -4557,7 +5318,8 @@ static int kvmppc_book3s_init_hv(void) * indirectly, via OPAL. */ #ifdef CONFIG_SMP - if (!xive_enabled() && !local_paca->kvm_hstate.xics_phys) { + if (!xive_enabled() && !kvmhv_on_pseries() && + !local_paca->kvm_hstate.xics_phys) { struct device_node *np; np = of_find_compatible_node(NULL, NULL, "ibm,opal-intc"); @@ -4605,6 +5367,7 @@ static void kvmppc_book3s_exit_hv(void) if (kvmppc_radix_possible()) kvmppc_radix_exit(); kvmppc_hv_ops = NULL; + kvmhv_nested_exit(); } module_init(kvmppc_book3s_init_hv); diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c index fc6bb9630a9c..a71e2fc00a4e 100644 --- a/arch/powerpc/kvm/book3s_hv_builtin.c +++ b/arch/powerpc/kvm/book3s_hv_builtin.c @@ -231,6 +231,15 @@ void kvmhv_rm_send_ipi(int cpu) void __iomem *xics_phys; unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER); + /* For a nested hypervisor, use the XICS via hcall */ + if (kvmhv_on_pseries()) { + unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; + + plpar_hcall_raw(H_IPI, retbuf, get_hard_smp_processor_id(cpu), + IPI_PRIORITY); + return; + } + /* On POWER9 we can use msgsnd for any destination cpu. */ if (cpu_has_feature(CPU_FTR_ARCH_300)) { msg |= get_hard_smp_processor_id(cpu); @@ -460,12 +469,19 @@ static long kvmppc_read_one_intr(bool *again) return 1; /* Now read the interrupt from the ICP */ - xics_phys = local_paca->kvm_hstate.xics_phys; - rc = 0; - if (!xics_phys) - rc = opal_int_get_xirr(&xirr, false); - else - xirr = __raw_rm_readl(xics_phys + XICS_XIRR); + if (kvmhv_on_pseries()) { + unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; + + rc = plpar_hcall_raw(H_XIRR, retbuf, 0xFF); + xirr = cpu_to_be32(retbuf[0]); + } else { + xics_phys = local_paca->kvm_hstate.xics_phys; + rc = 0; + if (!xics_phys) + rc = opal_int_get_xirr(&xirr, false); + else + xirr = __raw_rm_readl(xics_phys + XICS_XIRR); + } if (rc < 0) return 1; @@ -494,7 +510,13 @@ static long kvmppc_read_one_intr(bool *again) */ if (xisr == XICS_IPI) { rc = 0; - if (xics_phys) { + if (kvmhv_on_pseries()) { + unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; + + plpar_hcall_raw(H_IPI, retbuf, + hard_smp_processor_id(), 0xff); + plpar_hcall_raw(H_EOI, retbuf, h_xirr); + } else if (xics_phys) { __raw_rm_writeb(0xff, xics_phys + XICS_MFRR); __raw_rm_writel(xirr, xics_phys + XICS_XIRR); } else { @@ -520,7 +542,13 @@ static long kvmppc_read_one_intr(bool *again) /* We raced with the host, * we need to resend that IPI, bummer */ - if (xics_phys) + if (kvmhv_on_pseries()) { + unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; + + plpar_hcall_raw(H_IPI, retbuf, + hard_smp_processor_id(), + IPI_PRIORITY); + } else if (xics_phys) __raw_rm_writeb(IPI_PRIORITY, xics_phys + XICS_MFRR); else @@ -729,3 +757,51 @@ void kvmhv_p9_restore_lpcr(struct kvm_split_mode *sip) smp_mb(); local_paca->kvm_hstate.kvm_split_mode = NULL; } + +/* + * Is there a PRIV_DOORBELL pending for the guest (on POWER9)? + * Can we inject a Decrementer or a External interrupt? + */ +void kvmppc_guest_entry_inject_int(struct kvm_vcpu *vcpu) +{ + int ext; + unsigned long vec = 0; + unsigned long lpcr; + + /* Insert EXTERNAL bit into LPCR at the MER bit position */ + ext = (vcpu->arch.pending_exceptions >> BOOK3S_IRQPRIO_EXTERNAL) & 1; + lpcr = mfspr(SPRN_LPCR); + lpcr |= ext << LPCR_MER_SH; + mtspr(SPRN_LPCR, lpcr); + isync(); + + if (vcpu->arch.shregs.msr & MSR_EE) { + if (ext) { + vec = BOOK3S_INTERRUPT_EXTERNAL; + } else { + long int dec = mfspr(SPRN_DEC); + if (!(lpcr & LPCR_LD)) + dec = (int) dec; + if (dec < 0) + vec = BOOK3S_INTERRUPT_DECREMENTER; + } + } + if (vec) { + unsigned long msr, old_msr = vcpu->arch.shregs.msr; + + kvmppc_set_srr0(vcpu, kvmppc_get_pc(vcpu)); + kvmppc_set_srr1(vcpu, old_msr); + kvmppc_set_pc(vcpu, vec); + msr = vcpu->arch.intr_msr; + if (MSR_TM_ACTIVE(old_msr)) + msr |= MSR_TS_S; + vcpu->arch.shregs.msr = msr; + } + + if (vcpu->arch.doorbell_request) { + mtspr(SPRN_DPDES, 1); + vcpu->arch.vcore->dpdes = 1; + smp_wmb(); + vcpu->arch.doorbell_request = 0; + } +} diff --git a/arch/powerpc/kvm/book3s_hv_interrupts.S b/arch/powerpc/kvm/book3s_hv_interrupts.S index 666b91c79eb4..a6d10010d9e8 100644 --- a/arch/powerpc/kvm/book3s_hv_interrupts.S +++ b/arch/powerpc/kvm/book3s_hv_interrupts.S @@ -64,52 +64,7 @@ BEGIN_FTR_SECTION END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) /* Save host PMU registers */ -BEGIN_FTR_SECTION - /* Work around P8 PMAE bug */ - li r3, -1 - clrrdi r3, r3, 10 - mfspr r8, SPRN_MMCR2 - mtspr SPRN_MMCR2, r3 /* freeze all counters using MMCR2 */ - isync -END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) - li r3, 1 - sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ - mfspr r7, SPRN_MMCR0 /* save MMCR0 */ - mtspr SPRN_MMCR0, r3 /* freeze all counters, disable interrupts */ - mfspr r6, SPRN_MMCRA - /* Clear MMCRA in order to disable SDAR updates */ - li r5, 0 - mtspr SPRN_MMCRA, r5 - isync - lbz r5, PACA_PMCINUSE(r13) /* is the host using the PMU? */ - cmpwi r5, 0 - beq 31f /* skip if not */ - mfspr r5, SPRN_MMCR1 - mfspr r9, SPRN_SIAR - mfspr r10, SPRN_SDAR - std r7, HSTATE_MMCR0(r13) - std r5, HSTATE_MMCR1(r13) - std r6, HSTATE_MMCRA(r13) - std r9, HSTATE_SIAR(r13) - std r10, HSTATE_SDAR(r13) -BEGIN_FTR_SECTION - mfspr r9, SPRN_SIER - std r8, HSTATE_MMCR2(r13) - std r9, HSTATE_SIER(r13) -END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) - mfspr r3, SPRN_PMC1 - mfspr r5, SPRN_PMC2 - mfspr r6, SPRN_PMC3 - mfspr r7, SPRN_PMC4 - mfspr r8, SPRN_PMC5 - mfspr r9, SPRN_PMC6 - stw r3, HSTATE_PMC1(r13) - stw r5, HSTATE_PMC2(r13) - stw r6, HSTATE_PMC3(r13) - stw r7, HSTATE_PMC4(r13) - stw r8, HSTATE_PMC5(r13) - stw r9, HSTATE_PMC6(r13) -31: + bl kvmhv_save_host_pmu /* * Put whatever is in the decrementer into the @@ -161,3 +116,51 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) ld r0, PPC_LR_STKOFF(r1) mtlr r0 blr + +_GLOBAL(kvmhv_save_host_pmu) +BEGIN_FTR_SECTION + /* Work around P8 PMAE bug */ + li r3, -1 + clrrdi r3, r3, 10 + mfspr r8, SPRN_MMCR2 + mtspr SPRN_MMCR2, r3 /* freeze all counters using MMCR2 */ + isync +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) + li r3, 1 + sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ + mfspr r7, SPRN_MMCR0 /* save MMCR0 */ + mtspr SPRN_MMCR0, r3 /* freeze all counters, disable interrupts */ + mfspr r6, SPRN_MMCRA + /* Clear MMCRA in order to disable SDAR updates */ + li r5, 0 + mtspr SPRN_MMCRA, r5 + isync + lbz r5, PACA_PMCINUSE(r13) /* is the host using the PMU? */ + cmpwi r5, 0 + beq 31f /* skip if not */ + mfspr r5, SPRN_MMCR1 + mfspr r9, SPRN_SIAR + mfspr r10, SPRN_SDAR + std r7, HSTATE_MMCR0(r13) + std r5, HSTATE_MMCR1(r13) + std r6, HSTATE_MMCRA(r13) + std r9, HSTATE_SIAR(r13) + std r10, HSTATE_SDAR(r13) +BEGIN_FTR_SECTION + mfspr r9, SPRN_SIER + std r8, HSTATE_MMCR2(r13) + std r9, HSTATE_SIER(r13) +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) + mfspr r3, SPRN_PMC1 + mfspr r5, SPRN_PMC2 + mfspr r6, SPRN_PMC3 + mfspr r7, SPRN_PMC4 + mfspr r8, SPRN_PMC5 + mfspr r9, SPRN_PMC6 + stw r3, HSTATE_PMC1(r13) + stw r5, HSTATE_PMC2(r13) + stw r6, HSTATE_PMC3(r13) + stw r7, HSTATE_PMC4(r13) + stw r8, HSTATE_PMC5(r13) + stw r9, HSTATE_PMC6(r13) +31: blr diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c new file mode 100644 index 000000000000..401d2ecbebc5 --- /dev/null +++ b/arch/powerpc/kvm/book3s_hv_nested.c @@ -0,0 +1,1291 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright IBM Corporation, 2018 + * Authors Suraj Jitindar Singh <sjitindarsingh@gmail.com> + * Paul Mackerras <paulus@ozlabs.org> + * + * Description: KVM functions specific to running nested KVM-HV guests + * on Book3S processors (specifically POWER9 and later). + */ + +#include <linux/kernel.h> +#include <linux/kvm_host.h> +#include <linux/llist.h> + +#include <asm/kvm_ppc.h> +#include <asm/kvm_book3s.h> +#include <asm/mmu.h> +#include <asm/pgtable.h> +#include <asm/pgalloc.h> +#include <asm/pte-walk.h> +#include <asm/reg.h> + +static struct patb_entry *pseries_partition_tb; + +static void kvmhv_update_ptbl_cache(struct kvm_nested_guest *gp); +static void kvmhv_free_memslot_nest_rmap(struct kvm_memory_slot *free); + +void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr) +{ + struct kvmppc_vcore *vc = vcpu->arch.vcore; + + hr->pcr = vc->pcr; + hr->dpdes = vc->dpdes; + hr->hfscr = vcpu->arch.hfscr; + hr->tb_offset = vc->tb_offset; + hr->dawr0 = vcpu->arch.dawr; + hr->dawrx0 = vcpu->arch.dawrx; + hr->ciabr = vcpu->arch.ciabr; + hr->purr = vcpu->arch.purr; + hr->spurr = vcpu->arch.spurr; + hr->ic = vcpu->arch.ic; + hr->vtb = vc->vtb; + hr->srr0 = vcpu->arch.shregs.srr0; + hr->srr1 = vcpu->arch.shregs.srr1; + hr->sprg[0] = vcpu->arch.shregs.sprg0; + hr->sprg[1] = vcpu->arch.shregs.sprg1; + hr->sprg[2] = vcpu->arch.shregs.sprg2; + hr->sprg[3] = vcpu->arch.shregs.sprg3; + hr->pidr = vcpu->arch.pid; + hr->cfar = vcpu->arch.cfar; + hr->ppr = vcpu->arch.ppr; +} + +static void byteswap_pt_regs(struct pt_regs *regs) +{ + unsigned long *addr = (unsigned long *) regs; + + for (; addr < ((unsigned long *) (regs + 1)); addr++) + *addr = swab64(*addr); +} + +static void byteswap_hv_regs(struct hv_guest_state *hr) +{ + hr->version = swab64(hr->version); + hr->lpid = swab32(hr->lpid); + hr->vcpu_token = swab32(hr->vcpu_token); + hr->lpcr = swab64(hr->lpcr); + hr->pcr = swab64(hr->pcr); + hr->amor = swab64(hr->amor); + hr->dpdes = swab64(hr->dpdes); + hr->hfscr = swab64(hr->hfscr); + hr->tb_offset = swab64(hr->tb_offset); + hr->dawr0 = swab64(hr->dawr0); + hr->dawrx0 = swab64(hr->dawrx0); + hr->ciabr = swab64(hr->ciabr); + hr->hdec_expiry = swab64(hr->hdec_expiry); + hr->purr = swab64(hr->purr); + hr->spurr = swab64(hr->spurr); + hr->ic = swab64(hr->ic); + hr->vtb = swab64(hr->vtb); + hr->hdar = swab64(hr->hdar); + hr->hdsisr = swab64(hr->hdsisr); + hr->heir = swab64(hr->heir); + hr->asdr = swab64(hr->asdr); + hr->srr0 = swab64(hr->srr0); + hr->srr1 = swab64(hr->srr1); + hr->sprg[0] = swab64(hr->sprg[0]); + hr->sprg[1] = swab64(hr->sprg[1]); + hr->sprg[2] = swab64(hr->sprg[2]); + hr->sprg[3] = swab64(hr->sprg[3]); + hr->pidr = swab64(hr->pidr); + hr->cfar = swab64(hr->cfar); + hr->ppr = swab64(hr->ppr); +} + +static void save_hv_return_state(struct kvm_vcpu *vcpu, int trap, + struct hv_guest_state *hr) +{ + struct kvmppc_vcore *vc = vcpu->arch.vcore; + + hr->dpdes = vc->dpdes; + hr->hfscr = vcpu->arch.hfscr; + hr->purr = vcpu->arch.purr; + hr->spurr = vcpu->arch.spurr; + hr->ic = vcpu->arch.ic; + hr->vtb = vc->vtb; + hr->srr0 = vcpu->arch.shregs.srr0; + hr->srr1 = vcpu->arch.shregs.srr1; + hr->sprg[0] = vcpu->arch.shregs.sprg0; + hr->sprg[1] = vcpu->arch.shregs.sprg1; + hr->sprg[2] = vcpu->arch.shregs.sprg2; + hr->sprg[3] = vcpu->arch.shregs.sprg3; + hr->pidr = vcpu->arch.pid; + hr->cfar = vcpu->arch.cfar; + hr->ppr = vcpu->arch.ppr; + switch (trap) { + case BOOK3S_INTERRUPT_H_DATA_STORAGE: + hr->hdar = vcpu->arch.fault_dar; + hr->hdsisr = vcpu->arch.fault_dsisr; + hr->asdr = vcpu->arch.fault_gpa; + break; + case BOOK3S_INTERRUPT_H_INST_STORAGE: + hr->asdr = vcpu->arch.fault_gpa; + break; + case BOOK3S_INTERRUPT_H_EMUL_ASSIST: + hr->heir = vcpu->arch.emul_inst; + break; + } +} + +static void sanitise_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr) +{ + /* + * Don't let L1 enable features for L2 which we've disabled for L1, + * but preserve the interrupt cause field. + */ + hr->hfscr &= (HFSCR_INTR_CAUSE | vcpu->arch.hfscr); + + /* Don't let data address watchpoint match in hypervisor state */ + hr->dawrx0 &= ~DAWRX_HYP; + + /* Don't let completed instruction address breakpt match in HV state */ + if ((hr->ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER) + hr->ciabr &= ~CIABR_PRIV; +} + +static void restore_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr) +{ + struct kvmppc_vcore *vc = vcpu->arch.vcore; + + vc->pcr = hr->pcr; + vc->dpdes = hr->dpdes; + vcpu->arch.hfscr = hr->hfscr; + vcpu->arch.dawr = hr->dawr0; + vcpu->arch.dawrx = hr->dawrx0; + vcpu->arch.ciabr = hr->ciabr; + vcpu->arch.purr = hr->purr; + vcpu->arch.spurr = hr->spurr; + vcpu->arch.ic = hr->ic; + vc->vtb = hr->vtb; + vcpu->arch.shregs.srr0 = hr->srr0; + vcpu->arch.shregs.srr1 = hr->srr1; + vcpu->arch.shregs.sprg0 = hr->sprg[0]; + vcpu->arch.shregs.sprg1 = hr->sprg[1]; + vcpu->arch.shregs.sprg2 = hr->sprg[2]; + vcpu->arch.shregs.sprg3 = hr->sprg[3]; + vcpu->arch.pid = hr->pidr; + vcpu->arch.cfar = hr->cfar; + vcpu->arch.ppr = hr->ppr; +} + +void kvmhv_restore_hv_return_state(struct kvm_vcpu *vcpu, + struct hv_guest_state *hr) +{ + struct kvmppc_vcore *vc = vcpu->arch.vcore; + + vc->dpdes = hr->dpdes; + vcpu->arch.hfscr = hr->hfscr; + vcpu->arch.purr = hr->purr; + vcpu->arch.spurr = hr->spurr; + vcpu->arch.ic = hr->ic; + vc->vtb = hr->vtb; + vcpu->arch.fault_dar = hr->hdar; + vcpu->arch.fault_dsisr = hr->hdsisr; + vcpu->arch.fault_gpa = hr->asdr; + vcpu->arch.emul_inst = hr->heir; + vcpu->arch.shregs.srr0 = hr->srr0; + vcpu->arch.shregs.srr1 = hr->srr1; + vcpu->arch.shregs.sprg0 = hr->sprg[0]; + vcpu->arch.shregs.sprg1 = hr->sprg[1]; + vcpu->arch.shregs.sprg2 = hr->sprg[2]; + vcpu->arch.shregs.sprg3 = hr->sprg[3]; + vcpu->arch.pid = hr->pidr; + vcpu->arch.cfar = hr->cfar; + vcpu->arch.ppr = hr->ppr; +} + +long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu) +{ + long int err, r; + struct kvm_nested_guest *l2; + struct pt_regs l2_regs, saved_l1_regs; + struct hv_guest_state l2_hv, saved_l1_hv; + struct kvmppc_vcore *vc = vcpu->arch.vcore; + u64 hv_ptr, regs_ptr; + u64 hdec_exp; + s64 delta_purr, delta_spurr, delta_ic, delta_vtb; + u64 mask; + unsigned long lpcr; + + if (vcpu->kvm->arch.l1_ptcr == 0) + return H_NOT_AVAILABLE; + + /* copy parameters in */ + hv_ptr = kvmppc_get_gpr(vcpu, 4); + err = kvm_vcpu_read_guest(vcpu, hv_ptr, &l2_hv, + sizeof(struct hv_guest_state)); + if (err) + return H_PARAMETER; + if (kvmppc_need_byteswap(vcpu)) + byteswap_hv_regs(&l2_hv); + if (l2_hv.version != HV_GUEST_STATE_VERSION) + return H_P2; + + regs_ptr = kvmppc_get_gpr(vcpu, 5); + err = kvm_vcpu_read_guest(vcpu, regs_ptr, &l2_regs, + sizeof(struct pt_regs)); + if (err) + return H_PARAMETER; + if (kvmppc_need_byteswap(vcpu)) + byteswap_pt_regs(&l2_regs); + if (l2_hv.vcpu_token >= NR_CPUS) + return H_PARAMETER; + + /* translate lpid */ + l2 = kvmhv_get_nested(vcpu->kvm, l2_hv.lpid, true); + if (!l2) + return H_PARAMETER; + if (!l2->l1_gr_to_hr) { + mutex_lock(&l2->tlb_lock); + kvmhv_update_ptbl_cache(l2); + mutex_unlock(&l2->tlb_lock); + } + + /* save l1 values of things */ + vcpu->arch.regs.msr = vcpu->arch.shregs.msr; + saved_l1_regs = vcpu->arch.regs; + kvmhv_save_hv_regs(vcpu, &saved_l1_hv); + + /* convert TB values/offsets to host (L0) values */ + hdec_exp = l2_hv.hdec_expiry - vc->tb_offset; + vc->tb_offset += l2_hv.tb_offset; + + /* set L1 state to L2 state */ + vcpu->arch.nested = l2; + vcpu->arch.nested_vcpu_id = l2_hv.vcpu_token; + vcpu->arch.regs = l2_regs; + vcpu->arch.shregs.msr = vcpu->arch.regs.msr; + mask = LPCR_DPFD | LPCR_ILE | LPCR_TC | LPCR_AIL | LPCR_LD | + LPCR_LPES | LPCR_MER; + lpcr = (vc->lpcr & ~mask) | (l2_hv.lpcr & mask); + sanitise_hv_regs(vcpu, &l2_hv); + restore_hv_regs(vcpu, &l2_hv); + + vcpu->arch.ret = RESUME_GUEST; + vcpu->arch.trap = 0; + do { + if (mftb() >= hdec_exp) { + vcpu->arch.trap = BOOK3S_INTERRUPT_HV_DECREMENTER; + r = RESUME_HOST; + break; + } + r = kvmhv_run_single_vcpu(vcpu->arch.kvm_run, vcpu, hdec_exp, + lpcr); + } while (is_kvmppc_resume_guest(r)); + + /* save L2 state for return */ + l2_regs = vcpu->arch.regs; + l2_regs.msr = vcpu->arch.shregs.msr; + delta_purr = vcpu->arch.purr - l2_hv.purr; + delta_spurr = vcpu->arch.spurr - l2_hv.spurr; + delta_ic = vcpu->arch.ic - l2_hv.ic; + delta_vtb = vc->vtb - l2_hv.vtb; + save_hv_return_state(vcpu, vcpu->arch.trap, &l2_hv); + + /* restore L1 state */ + vcpu->arch.nested = NULL; + vcpu->arch.regs = saved_l1_regs; + vcpu->arch.shregs.msr = saved_l1_regs.msr & ~MSR_TS_MASK; + /* set L1 MSR TS field according to L2 transaction state */ + if (l2_regs.msr & MSR_TS_MASK) + vcpu->arch.shregs.msr |= MSR_TS_S; + vc->tb_offset = saved_l1_hv.tb_offset; + restore_hv_regs(vcpu, &saved_l1_hv); + vcpu->arch.purr += delta_purr; + vcpu->arch.spurr += delta_spurr; + vcpu->arch.ic += delta_ic; + vc->vtb += delta_vtb; + + kvmhv_put_nested(l2); + + /* copy l2_hv_state and regs back to guest */ + if (kvmppc_need_byteswap(vcpu)) { + byteswap_hv_regs(&l2_hv); + byteswap_pt_regs(&l2_regs); + } + err = kvm_vcpu_write_guest(vcpu, hv_ptr, &l2_hv, + sizeof(struct hv_guest_state)); + if (err) + return H_AUTHORITY; + err = kvm_vcpu_write_guest(vcpu, regs_ptr, &l2_regs, + sizeof(struct pt_regs)); + if (err) + return H_AUTHORITY; + + if (r == -EINTR) + return H_INTERRUPT; + + return vcpu->arch.trap; +} + +long kvmhv_nested_init(void) +{ + long int ptb_order; + unsigned long ptcr; + long rc; + + if (!kvmhv_on_pseries()) + return 0; + if (!radix_enabled()) + return -ENODEV; + + /* find log base 2 of KVMPPC_NR_LPIDS, rounding up */ + ptb_order = __ilog2(KVMPPC_NR_LPIDS - 1) + 1; + if (ptb_order < 8) + ptb_order = 8; + pseries_partition_tb = kmalloc(sizeof(struct patb_entry) << ptb_order, + GFP_KERNEL); + if (!pseries_partition_tb) { + pr_err("kvm-hv: failed to allocated nested partition table\n"); + return -ENOMEM; + } + + ptcr = __pa(pseries_partition_tb) | (ptb_order - 8); + rc = plpar_hcall_norets(H_SET_PARTITION_TABLE, ptcr); + if (rc != H_SUCCESS) { + pr_err("kvm-hv: Parent hypervisor does not support nesting (rc=%ld)\n", + rc); + kfree(pseries_partition_tb); + pseries_partition_tb = NULL; + return -ENODEV; + } + + return 0; +} + +void kvmhv_nested_exit(void) +{ + /* + * N.B. the kvmhv_on_pseries() test is there because it enables + * the compiler to remove the call to plpar_hcall_norets() + * when CONFIG_PPC_PSERIES=n. + */ + if (kvmhv_on_pseries() && pseries_partition_tb) { + plpar_hcall_norets(H_SET_PARTITION_TABLE, 0); + kfree(pseries_partition_tb); + pseries_partition_tb = NULL; + } +} + +static void kvmhv_flush_lpid(unsigned int lpid) +{ + long rc; + + if (!kvmhv_on_pseries()) { + radix__flush_tlb_lpid(lpid); + return; + } + + rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(2, 0, 1), + lpid, TLBIEL_INVAL_SET_LPID); + if (rc) + pr_err("KVM: TLB LPID invalidation hcall failed, rc=%ld\n", rc); +} + +void kvmhv_set_ptbl_entry(unsigned int lpid, u64 dw0, u64 dw1) +{ + if (!kvmhv_on_pseries()) { + mmu_partition_table_set_entry(lpid, dw0, dw1); + return; + } + + pseries_partition_tb[lpid].patb0 = cpu_to_be64(dw0); + pseries_partition_tb[lpid].patb1 = cpu_to_be64(dw1); + /* L0 will do the necessary barriers */ + kvmhv_flush_lpid(lpid); +} + +static void kvmhv_set_nested_ptbl(struct kvm_nested_guest *gp) +{ + unsigned long dw0; + + dw0 = PATB_HR | radix__get_tree_size() | + __pa(gp->shadow_pgtable) | RADIX_PGD_INDEX_SIZE; + kvmhv_set_ptbl_entry(gp->shadow_lpid, dw0, gp->process_table); +} + +void kvmhv_vm_nested_init(struct kvm *kvm) +{ + kvm->arch.max_nested_lpid = -1; +} + +/* + * Handle the H_SET_PARTITION_TABLE hcall. + * r4 = guest real address of partition table + log_2(size) - 12 + * (formatted as for the PTCR). + */ +long kvmhv_set_partition_table(struct kvm_vcpu *vcpu) +{ + struct kvm *kvm = vcpu->kvm; + unsigned long ptcr = kvmppc_get_gpr(vcpu, 4); + int srcu_idx; + long ret = H_SUCCESS; + + srcu_idx = srcu_read_lock(&kvm->srcu); + /* + * Limit the partition table to 4096 entries (because that's what + * hardware supports), and check the base address. + */ + if ((ptcr & PRTS_MASK) > 12 - 8 || + !kvm_is_visible_gfn(vcpu->kvm, (ptcr & PRTB_MASK) >> PAGE_SHIFT)) + ret = H_PARAMETER; + srcu_read_unlock(&kvm->srcu, srcu_idx); + if (ret == H_SUCCESS) + kvm->arch.l1_ptcr = ptcr; + return ret; +} + +/* + * Reload the partition table entry for a guest. + * Caller must hold gp->tlb_lock. + */ +static void kvmhv_update_ptbl_cache(struct kvm_nested_guest *gp) +{ + int ret; + struct patb_entry ptbl_entry; + unsigned long ptbl_addr; + struct kvm *kvm = gp->l1_host; + + ret = -EFAULT; + ptbl_addr = (kvm->arch.l1_ptcr & PRTB_MASK) + (gp->l1_lpid << 4); + if (gp->l1_lpid < (1ul << ((kvm->arch.l1_ptcr & PRTS_MASK) + 8))) + ret = kvm_read_guest(kvm, ptbl_addr, + &ptbl_entry, sizeof(ptbl_entry)); + if (ret) { + gp->l1_gr_to_hr = 0; + gp->process_table = 0; + } else { + gp->l1_gr_to_hr = be64_to_cpu(ptbl_entry.patb0); + gp->process_table = be64_to_cpu(ptbl_entry.patb1); + } + kvmhv_set_nested_ptbl(gp); +} + +struct kvm_nested_guest *kvmhv_alloc_nested(struct kvm *kvm, unsigned int lpid) +{ + struct kvm_nested_guest *gp; + long shadow_lpid; + + gp = kzalloc(sizeof(*gp), GFP_KERNEL); + if (!gp) + return NULL; + gp->l1_host = kvm; + gp->l1_lpid = lpid; + mutex_init(&gp->tlb_lock); + gp->shadow_pgtable = pgd_alloc(kvm->mm); + if (!gp->shadow_pgtable) + goto out_free; + shadow_lpid = kvmppc_alloc_lpid(); + if (shadow_lpid < 0) + goto out_free2; + gp->shadow_lpid = shadow_lpid; + + memset(gp->prev_cpu, -1, sizeof(gp->prev_cpu)); + + return gp; + + out_free2: + pgd_free(kvm->mm, gp->shadow_pgtable); + out_free: + kfree(gp); + return NULL; +} + +/* + * Free up any resources allocated for a nested guest. + */ +static void kvmhv_release_nested(struct kvm_nested_guest *gp) +{ + struct kvm *kvm = gp->l1_host; + + if (gp->shadow_pgtable) { + /* + * No vcpu is using this struct and no call to + * kvmhv_get_nested can find this struct, + * so we don't need to hold kvm->mmu_lock. + */ + kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable, + gp->shadow_lpid); + pgd_free(kvm->mm, gp->shadow_pgtable); + } + kvmhv_set_ptbl_entry(gp->shadow_lpid, 0, 0); + kvmppc_free_lpid(gp->shadow_lpid); + kfree(gp); +} + +static void kvmhv_remove_nested(struct kvm_nested_guest *gp) +{ + struct kvm *kvm = gp->l1_host; + int lpid = gp->l1_lpid; + long ref; + + spin_lock(&kvm->mmu_lock); + if (gp == kvm->arch.nested_guests[lpid]) { + kvm->arch.nested_guests[lpid] = NULL; + if (lpid == kvm->arch.max_nested_lpid) { + while (--lpid >= 0 && !kvm->arch.nested_guests[lpid]) + ; + kvm->arch.max_nested_lpid = lpid; + } + --gp->refcnt; + } + ref = gp->refcnt; + spin_unlock(&kvm->mmu_lock); + if (ref == 0) + kvmhv_release_nested(gp); +} + +/* + * Free up all nested resources allocated for this guest. + * This is called with no vcpus of the guest running, when + * switching the guest to HPT mode or when destroying the + * guest. + */ +void kvmhv_release_all_nested(struct kvm *kvm) +{ + int i; + struct kvm_nested_guest *gp; + struct kvm_nested_guest *freelist = NULL; + struct kvm_memory_slot *memslot; + int srcu_idx; + + spin_lock(&kvm->mmu_lock); + for (i = 0; i <= kvm->arch.max_nested_lpid; i++) { + gp = kvm->arch.nested_guests[i]; + if (!gp) + continue; + kvm->arch.nested_guests[i] = NULL; + if (--gp->refcnt == 0) { + gp->next = freelist; + freelist = gp; + } + } + kvm->arch.max_nested_lpid = -1; + spin_unlock(&kvm->mmu_lock); + while ((gp = freelist) != NULL) { + freelist = gp->next; + kvmhv_release_nested(gp); + } + + srcu_idx = srcu_read_lock(&kvm->srcu); + kvm_for_each_memslot(memslot, kvm_memslots(kvm)) + kvmhv_free_memslot_nest_rmap(memslot); + srcu_read_unlock(&kvm->srcu, srcu_idx); +} + +/* caller must hold gp->tlb_lock */ +static void kvmhv_flush_nested(struct kvm_nested_guest *gp) +{ + struct kvm *kvm = gp->l1_host; + + spin_lock(&kvm->mmu_lock); + kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable, gp->shadow_lpid); + spin_unlock(&kvm->mmu_lock); + kvmhv_flush_lpid(gp->shadow_lpid); + kvmhv_update_ptbl_cache(gp); + if (gp->l1_gr_to_hr == 0) + kvmhv_remove_nested(gp); +} + +struct kvm_nested_guest *kvmhv_get_nested(struct kvm *kvm, int l1_lpid, + bool create) +{ + struct kvm_nested_guest *gp, *newgp; + + if (l1_lpid >= KVM_MAX_NESTED_GUESTS || + l1_lpid >= (1ul << ((kvm->arch.l1_ptcr & PRTS_MASK) + 12 - 4))) + return NULL; + + spin_lock(&kvm->mmu_lock); + gp = kvm->arch.nested_guests[l1_lpid]; + if (gp) + ++gp->refcnt; + spin_unlock(&kvm->mmu_lock); + + if (gp || !create) + return gp; + + newgp = kvmhv_alloc_nested(kvm, l1_lpid); + if (!newgp) + return NULL; + spin_lock(&kvm->mmu_lock); + if (kvm->arch.nested_guests[l1_lpid]) { + /* someone else beat us to it */ + gp = kvm->arch.nested_guests[l1_lpid]; + } else { + kvm->arch.nested_guests[l1_lpid] = newgp; + ++newgp->refcnt; + gp = newgp; + newgp = NULL; + if (l1_lpid > kvm->arch.max_nested_lpid) + kvm->arch.max_nested_lpid = l1_lpid; + } + ++gp->refcnt; + spin_unlock(&kvm->mmu_lock); + + if (newgp) + kvmhv_release_nested(newgp); + + return gp; +} + +void kvmhv_put_nested(struct kvm_nested_guest *gp) +{ + struct kvm *kvm = gp->l1_host; + long ref; + + spin_lock(&kvm->mmu_lock); + ref = --gp->refcnt; + spin_unlock(&kvm->mmu_lock); + if (ref == 0) + kvmhv_release_nested(gp); +} + +static struct kvm_nested_guest *kvmhv_find_nested(struct kvm *kvm, int lpid) +{ + if (lpid > kvm->arch.max_nested_lpid) + return NULL; + return kvm->arch.nested_guests[lpid]; +} + +static inline bool kvmhv_n_rmap_is_equal(u64 rmap_1, u64 rmap_2) +{ + return !((rmap_1 ^ rmap_2) & (RMAP_NESTED_LPID_MASK | + RMAP_NESTED_GPA_MASK)); +} + +void kvmhv_insert_nest_rmap(struct kvm *kvm, unsigned long *rmapp, + struct rmap_nested **n_rmap) +{ + struct llist_node *entry = ((struct llist_head *) rmapp)->first; + struct rmap_nested *cursor; + u64 rmap, new_rmap = (*n_rmap)->rmap; + + /* Are there any existing entries? */ + if (!(*rmapp)) { + /* No -> use the rmap as a single entry */ + *rmapp = new_rmap | RMAP_NESTED_IS_SINGLE_ENTRY; + return; + } + + /* Do any entries match what we're trying to insert? */ + for_each_nest_rmap_safe(cursor, entry, &rmap) { + if (kvmhv_n_rmap_is_equal(rmap, new_rmap)) + return; + } + + /* Do we need to create a list or just add the new entry? */ + rmap = *rmapp; + if (rmap & RMAP_NESTED_IS_SINGLE_ENTRY) /* Not previously a list */ + *rmapp = 0UL; + llist_add(&((*n_rmap)->list), (struct llist_head *) rmapp); + if (rmap & RMAP_NESTED_IS_SINGLE_ENTRY) /* Not previously a list */ + (*n_rmap)->list.next = (struct llist_node *) rmap; + + /* Set NULL so not freed by caller */ + *n_rmap = NULL; +} + +static void kvmhv_remove_nest_rmap(struct kvm *kvm, u64 n_rmap, + unsigned long hpa, unsigned long mask) +{ + struct kvm_nested_guest *gp; + unsigned long gpa; + unsigned int shift, lpid; + pte_t *ptep; + + gpa = n_rmap & RMAP_NESTED_GPA_MASK; + lpid = (n_rmap & RMAP_NESTED_LPID_MASK) >> RMAP_NESTED_LPID_SHIFT; + gp = kvmhv_find_nested(kvm, lpid); + if (!gp) + return; + + /* Find and invalidate the pte */ + ptep = __find_linux_pte(gp->shadow_pgtable, gpa, NULL, &shift); + /* Don't spuriously invalidate ptes if the pfn has changed */ + if (ptep && pte_present(*ptep) && ((pte_val(*ptep) & mask) == hpa)) + kvmppc_unmap_pte(kvm, ptep, gpa, shift, NULL, gp->shadow_lpid); +} + +static void kvmhv_remove_nest_rmap_list(struct kvm *kvm, unsigned long *rmapp, + unsigned long hpa, unsigned long mask) +{ + struct llist_node *entry = llist_del_all((struct llist_head *) rmapp); + struct rmap_nested *cursor; + unsigned long rmap; + + for_each_nest_rmap_safe(cursor, entry, &rmap) { + kvmhv_remove_nest_rmap(kvm, rmap, hpa, mask); + kfree(cursor); + } +} + +/* called with kvm->mmu_lock held */ +void kvmhv_remove_nest_rmap_range(struct kvm *kvm, + struct kvm_memory_slot *memslot, + unsigned long gpa, unsigned long hpa, + unsigned long nbytes) +{ + unsigned long gfn, end_gfn; + unsigned long addr_mask; + + if (!memslot) + return; + gfn = (gpa >> PAGE_SHIFT) - memslot->base_gfn; + end_gfn = gfn + (nbytes >> PAGE_SHIFT); + + addr_mask = PTE_RPN_MASK & ~(nbytes - 1); + hpa &= addr_mask; + + for (; gfn < end_gfn; gfn++) { + unsigned long *rmap = &memslot->arch.rmap[gfn]; + kvmhv_remove_nest_rmap_list(kvm, rmap, hpa, addr_mask); + } +} + +static void kvmhv_free_memslot_nest_rmap(struct kvm_memory_slot *free) +{ + unsigned long page; + + for (page = 0; page < free->npages; page++) { + unsigned long rmap, *rmapp = &free->arch.rmap[page]; + struct rmap_nested *cursor; + struct llist_node *entry; + + entry = llist_del_all((struct llist_head *) rmapp); + for_each_nest_rmap_safe(cursor, entry, &rmap) + kfree(cursor); + } +} + +static bool kvmhv_invalidate_shadow_pte(struct kvm_vcpu *vcpu, + struct kvm_nested_guest *gp, + long gpa, int *shift_ret) +{ + struct kvm *kvm = vcpu->kvm; + bool ret = false; + pte_t *ptep; + int shift; + + spin_lock(&kvm->mmu_lock); + ptep = __find_linux_pte(gp->shadow_pgtable, gpa, NULL, &shift); + if (!shift) + shift = PAGE_SHIFT; + if (ptep && pte_present(*ptep)) { + kvmppc_unmap_pte(kvm, ptep, gpa, shift, NULL, gp->shadow_lpid); + ret = true; + } + spin_unlock(&kvm->mmu_lock); + + if (shift_ret) + *shift_ret = shift; + return ret; +} + +static inline int get_ric(unsigned int instr) +{ + return (instr >> 18) & 0x3; +} + +static inline int get_prs(unsigned int instr) +{ + return (instr >> 17) & 0x1; +} + +static inline int get_r(unsigned int instr) +{ + return (instr >> 16) & 0x1; +} + +static inline int get_lpid(unsigned long r_val) +{ + return r_val & 0xffffffff; +} + +static inline int get_is(unsigned long r_val) +{ + return (r_val >> 10) & 0x3; +} + +static inline int get_ap(unsigned long r_val) +{ + return (r_val >> 5) & 0x7; +} + +static inline long get_epn(unsigned long r_val) +{ + return r_val >> 12; +} + +static int kvmhv_emulate_tlbie_tlb_addr(struct kvm_vcpu *vcpu, int lpid, + int ap, long epn) +{ + struct kvm *kvm = vcpu->kvm; + struct kvm_nested_guest *gp; + long npages; + int shift, shadow_shift; + unsigned long addr; + + shift = ap_to_shift(ap); + addr = epn << 12; + if (shift < 0) + /* Invalid ap encoding */ + return -EINVAL; + + addr &= ~((1UL << shift) - 1); + npages = 1UL << (shift - PAGE_SHIFT); + + gp = kvmhv_get_nested(kvm, lpid, false); + if (!gp) /* No such guest -> nothing to do */ + return 0; + mutex_lock(&gp->tlb_lock); + + /* There may be more than one host page backing this single guest pte */ + do { + kvmhv_invalidate_shadow_pte(vcpu, gp, addr, &shadow_shift); + + npages -= 1UL << (shadow_shift - PAGE_SHIFT); + addr += 1UL << shadow_shift; + } while (npages > 0); + + mutex_unlock(&gp->tlb_lock); + kvmhv_put_nested(gp); + return 0; +} + +static void kvmhv_emulate_tlbie_lpid(struct kvm_vcpu *vcpu, + struct kvm_nested_guest *gp, int ric) +{ + struct kvm *kvm = vcpu->kvm; + + mutex_lock(&gp->tlb_lock); + switch (ric) { + case 0: + /* Invalidate TLB */ + spin_lock(&kvm->mmu_lock); + kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable, + gp->shadow_lpid); + kvmhv_flush_lpid(gp->shadow_lpid); + spin_unlock(&kvm->mmu_lock); + break; + case 1: + /* + * Invalidate PWC + * We don't cache this -> nothing to do + */ + break; + case 2: + /* Invalidate TLB, PWC and caching of partition table entries */ + kvmhv_flush_nested(gp); + break; + default: + break; + } + mutex_unlock(&gp->tlb_lock); +} + +static void kvmhv_emulate_tlbie_all_lpid(struct kvm_vcpu *vcpu, int ric) +{ + struct kvm *kvm = vcpu->kvm; + struct kvm_nested_guest *gp; + int i; + + spin_lock(&kvm->mmu_lock); + for (i = 0; i <= kvm->arch.max_nested_lpid; i++) { + gp = kvm->arch.nested_guests[i]; + if (gp) { + spin_unlock(&kvm->mmu_lock); + kvmhv_emulate_tlbie_lpid(vcpu, gp, ric); + spin_lock(&kvm->mmu_lock); + } + } + spin_unlock(&kvm->mmu_lock); +} + +static int kvmhv_emulate_priv_tlbie(struct kvm_vcpu *vcpu, unsigned int instr, + unsigned long rsval, unsigned long rbval) +{ + struct kvm *kvm = vcpu->kvm; + struct kvm_nested_guest *gp; + int r, ric, prs, is, ap; + int lpid; + long epn; + int ret = 0; + + ric = get_ric(instr); + prs = get_prs(instr); + r = get_r(instr); + lpid = get_lpid(rsval); + is = get_is(rbval); + + /* + * These cases are invalid and are not handled: + * r != 1 -> Only radix supported + * prs == 1 -> Not HV privileged + * ric == 3 -> No cluster bombs for radix + * is == 1 -> Partition scoped translations not associated with pid + * (!is) && (ric == 1 || ric == 2) -> Not supported by ISA + */ + if ((!r) || (prs) || (ric == 3) || (is == 1) || + ((!is) && (ric == 1 || ric == 2))) + return -EINVAL; + + switch (is) { + case 0: + /* + * We know ric == 0 + * Invalidate TLB for a given target address + */ + epn = get_epn(rbval); + ap = get_ap(rbval); + ret = kvmhv_emulate_tlbie_tlb_addr(vcpu, lpid, ap, epn); + break; + case 2: + /* Invalidate matching LPID */ + gp = kvmhv_get_nested(kvm, lpid, false); + if (gp) { + kvmhv_emulate_tlbie_lpid(vcpu, gp, ric); + kvmhv_put_nested(gp); + } + break; + case 3: + /* Invalidate ALL LPIDs */ + kvmhv_emulate_tlbie_all_lpid(vcpu, ric); + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +/* + * This handles the H_TLB_INVALIDATE hcall. + * Parameters are (r4) tlbie instruction code, (r5) rS contents, + * (r6) rB contents. + */ +long kvmhv_do_nested_tlbie(struct kvm_vcpu *vcpu) +{ + int ret; + + ret = kvmhv_emulate_priv_tlbie(vcpu, kvmppc_get_gpr(vcpu, 4), + kvmppc_get_gpr(vcpu, 5), kvmppc_get_gpr(vcpu, 6)); + if (ret) + return H_PARAMETER; + return H_SUCCESS; +} + +/* Used to convert a nested guest real address to a L1 guest real address */ +static int kvmhv_translate_addr_nested(struct kvm_vcpu *vcpu, + struct kvm_nested_guest *gp, + unsigned long n_gpa, unsigned long dsisr, + struct kvmppc_pte *gpte_p) +{ + u64 fault_addr, flags = dsisr & DSISR_ISSTORE; + int ret; + + ret = kvmppc_mmu_walk_radix_tree(vcpu, n_gpa, gpte_p, gp->l1_gr_to_hr, + &fault_addr); + + if (ret) { + /* We didn't find a pte */ + if (ret == -EINVAL) { + /* Unsupported mmu config */ + flags |= DSISR_UNSUPP_MMU; + } else if (ret == -ENOENT) { + /* No translation found */ + flags |= DSISR_NOHPTE; + } else if (ret == -EFAULT) { + /* Couldn't access L1 real address */ + flags |= DSISR_PRTABLE_FAULT; + vcpu->arch.fault_gpa = fault_addr; + } else { + /* Unknown error */ + return ret; + } + goto forward_to_l1; + } else { + /* We found a pte -> check permissions */ + if (dsisr & DSISR_ISSTORE) { + /* Can we write? */ + if (!gpte_p->may_write) { + flags |= DSISR_PROTFAULT; + goto forward_to_l1; + } + } else if (vcpu->arch.trap == BOOK3S_INTERRUPT_H_INST_STORAGE) { + /* Can we execute? */ + if (!gpte_p->may_execute) { + flags |= SRR1_ISI_N_OR_G; + goto forward_to_l1; + } + } else { + /* Can we read? */ + if (!gpte_p->may_read && !gpte_p->may_write) { + flags |= DSISR_PROTFAULT; + goto forward_to_l1; + } + } + } + + return 0; + +forward_to_l1: + vcpu->arch.fault_dsisr = flags; + if (vcpu->arch.trap == BOOK3S_INTERRUPT_H_INST_STORAGE) { + vcpu->arch.shregs.msr &= ~0x783f0000ul; + vcpu->arch.shregs.msr |= flags; + } + return RESUME_HOST; +} + +static long kvmhv_handle_nested_set_rc(struct kvm_vcpu *vcpu, + struct kvm_nested_guest *gp, + unsigned long n_gpa, + struct kvmppc_pte gpte, + unsigned long dsisr) +{ + struct kvm *kvm = vcpu->kvm; + bool writing = !!(dsisr & DSISR_ISSTORE); + u64 pgflags; + bool ret; + + /* Are the rc bits set in the L1 partition scoped pte? */ + pgflags = _PAGE_ACCESSED; + if (writing) + pgflags |= _PAGE_DIRTY; + if (pgflags & ~gpte.rc) + return RESUME_HOST; + + spin_lock(&kvm->mmu_lock); + /* Set the rc bit in the pte of our (L0) pgtable for the L1 guest */ + ret = kvmppc_hv_handle_set_rc(kvm, kvm->arch.pgtable, writing, + gpte.raddr, kvm->arch.lpid); + spin_unlock(&kvm->mmu_lock); + if (!ret) + return -EINVAL; + + /* Set the rc bit in the pte of the shadow_pgtable for the nest guest */ + ret = kvmppc_hv_handle_set_rc(kvm, gp->shadow_pgtable, writing, n_gpa, + gp->shadow_lpid); + if (!ret) + return -EINVAL; + return 0; +} + +static inline int kvmppc_radix_level_to_shift(int level) +{ + switch (level) { + case 2: + return PUD_SHIFT; + case 1: + return PMD_SHIFT; + default: + return PAGE_SHIFT; + } +} + +static inline int kvmppc_radix_shift_to_level(int shift) +{ + if (shift == PUD_SHIFT) + return 2; + if (shift == PMD_SHIFT) + return 1; + if (shift == PAGE_SHIFT) + return 0; + WARN_ON_ONCE(1); + return 0; +} + +/* called with gp->tlb_lock held */ +static long int __kvmhv_nested_page_fault(struct kvm_vcpu *vcpu, + struct kvm_nested_guest *gp) +{ + struct kvm *kvm = vcpu->kvm; + struct kvm_memory_slot *memslot; + struct rmap_nested *n_rmap; + struct kvmppc_pte gpte; + pte_t pte, *pte_p; + unsigned long mmu_seq; + unsigned long dsisr = vcpu->arch.fault_dsisr; + unsigned long ea = vcpu->arch.fault_dar; + unsigned long *rmapp; + unsigned long n_gpa, gpa, gfn, perm = 0UL; + unsigned int shift, l1_shift, level; + bool writing = !!(dsisr & DSISR_ISSTORE); + bool kvm_ro = false; + long int ret; + + if (!gp->l1_gr_to_hr) { + kvmhv_update_ptbl_cache(gp); + if (!gp->l1_gr_to_hr) + return RESUME_HOST; + } + + /* Convert the nested guest real address into a L1 guest real address */ + + n_gpa = vcpu->arch.fault_gpa & ~0xF000000000000FFFULL; + if (!(dsisr & DSISR_PRTABLE_FAULT)) + n_gpa |= ea & 0xFFF; + ret = kvmhv_translate_addr_nested(vcpu, gp, n_gpa, dsisr, &gpte); + + /* + * If the hardware found a translation but we don't now have a usable + * translation in the l1 partition-scoped tree, remove the shadow pte + * and let the guest retry. + */ + if (ret == RESUME_HOST && + (dsisr & (DSISR_PROTFAULT | DSISR_BADACCESS | DSISR_NOEXEC_OR_G | + DSISR_BAD_COPYPASTE))) + goto inval; + if (ret) + return ret; + + /* Failed to set the reference/change bits */ + if (dsisr & DSISR_SET_RC) { + ret = kvmhv_handle_nested_set_rc(vcpu, gp, n_gpa, gpte, dsisr); + if (ret == RESUME_HOST) + return ret; + if (ret) + goto inval; + dsisr &= ~DSISR_SET_RC; + if (!(dsisr & (DSISR_BAD_FAULT_64S | DSISR_NOHPTE | + DSISR_PROTFAULT))) + return RESUME_GUEST; + } + + /* + * We took an HISI or HDSI while we were running a nested guest which + * means we have no partition scoped translation for that. This means + * we need to insert a pte for the mapping into our shadow_pgtable. + */ + + l1_shift = gpte.page_shift; + if (l1_shift < PAGE_SHIFT) { + /* We don't support l1 using a page size smaller than our own */ + pr_err("KVM: L1 guest page shift (%d) less than our own (%d)\n", + l1_shift, PAGE_SHIFT); + return -EINVAL; + } + gpa = gpte.raddr; + gfn = gpa >> PAGE_SHIFT; + + /* 1. Get the corresponding host memslot */ + + memslot = gfn_to_memslot(kvm, gfn); + if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) { + if (dsisr & (DSISR_PRTABLE_FAULT | DSISR_BADACCESS)) { + /* unusual error -> reflect to the guest as a DSI */ + kvmppc_core_queue_data_storage(vcpu, ea, dsisr); + return RESUME_GUEST; + } + /* passthrough of emulated MMIO case... */ + pr_err("emulated MMIO passthrough?\n"); + return -EINVAL; + } + if (memslot->flags & KVM_MEM_READONLY) { + if (writing) { + /* Give the guest a DSI */ + kvmppc_core_queue_data_storage(vcpu, ea, + DSISR_ISSTORE | DSISR_PROTFAULT); + return RESUME_GUEST; + } + kvm_ro = true; + } + + /* 2. Find the host pte for this L1 guest real address */ + + /* Used to check for invalidations in progress */ + mmu_seq = kvm->mmu_notifier_seq; + smp_rmb(); + + /* See if can find translation in our partition scoped tables for L1 */ + pte = __pte(0); + spin_lock(&kvm->mmu_lock); + pte_p = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift); + if (!shift) + shift = PAGE_SHIFT; + if (pte_p) + pte = *pte_p; + spin_unlock(&kvm->mmu_lock); + + if (!pte_present(pte) || (writing && !(pte_val(pte) & _PAGE_WRITE))) { + /* No suitable pte found -> try to insert a mapping */ + ret = kvmppc_book3s_instantiate_page(vcpu, gpa, memslot, + writing, kvm_ro, &pte, &level); + if (ret == -EAGAIN) + return RESUME_GUEST; + else if (ret) + return ret; + shift = kvmppc_radix_level_to_shift(level); + } + + /* 3. Compute the pte we need to insert for nest_gpa -> host r_addr */ + + /* The permissions is the combination of the host and l1 guest ptes */ + perm |= gpte.may_read ? 0UL : _PAGE_READ; + perm |= gpte.may_write ? 0UL : _PAGE_WRITE; + perm |= gpte.may_execute ? 0UL : _PAGE_EXEC; + pte = __pte(pte_val(pte) & ~perm); + + /* What size pte can we insert? */ + if (shift > l1_shift) { + u64 mask; + unsigned int actual_shift = PAGE_SHIFT; + if (PMD_SHIFT < l1_shift) + actual_shift = PMD_SHIFT; + mask = (1UL << shift) - (1UL << actual_shift); + pte = __pte(pte_val(pte) | (gpa & mask)); + shift = actual_shift; + } + level = kvmppc_radix_shift_to_level(shift); + n_gpa &= ~((1UL << shift) - 1); + + /* 4. Insert the pte into our shadow_pgtable */ + + n_rmap = kzalloc(sizeof(*n_rmap), GFP_KERNEL); + if (!n_rmap) + return RESUME_GUEST; /* Let the guest try again */ + n_rmap->rmap = (n_gpa & RMAP_NESTED_GPA_MASK) | + (((unsigned long) gp->l1_lpid) << RMAP_NESTED_LPID_SHIFT); + rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; + ret = kvmppc_create_pte(kvm, gp->shadow_pgtable, pte, n_gpa, level, + mmu_seq, gp->shadow_lpid, rmapp, &n_rmap); + if (n_rmap) + kfree(n_rmap); + if (ret == -EAGAIN) + ret = RESUME_GUEST; /* Let the guest try again */ + + return ret; + + inval: + kvmhv_invalidate_shadow_pte(vcpu, gp, n_gpa, NULL); + return RESUME_GUEST; +} + +long int kvmhv_nested_page_fault(struct kvm_vcpu *vcpu) +{ + struct kvm_nested_guest *gp = vcpu->arch.nested; + long int ret; + + mutex_lock(&gp->tlb_lock); + ret = __kvmhv_nested_page_fault(vcpu, gp); + mutex_unlock(&gp->tlb_lock); + return ret; +} + +int kvmhv_nested_next_lpid(struct kvm *kvm, int lpid) +{ + int ret = -1; + + spin_lock(&kvm->mmu_lock); + while (++lpid <= kvm->arch.max_nested_lpid) { + if (kvm->arch.nested_guests[lpid]) { + ret = lpid; + break; + } + } + spin_unlock(&kvm->mmu_lock); + return ret; +} diff --git a/arch/powerpc/kvm/book3s_hv_ras.c b/arch/powerpc/kvm/book3s_hv_ras.c index b11043b23c18..0787f12c1a1b 100644 --- a/arch/powerpc/kvm/book3s_hv_ras.c +++ b/arch/powerpc/kvm/book3s_hv_ras.c @@ -177,6 +177,7 @@ void kvmppc_subcore_enter_guest(void) local_paca->sibling_subcore_state->in_guest[subcore_id] = 1; } +EXPORT_SYMBOL_GPL(kvmppc_subcore_enter_guest); void kvmppc_subcore_exit_guest(void) { @@ -187,6 +188,7 @@ void kvmppc_subcore_exit_guest(void) local_paca->sibling_subcore_state->in_guest[subcore_id] = 0; } +EXPORT_SYMBOL_GPL(kvmppc_subcore_exit_guest); static bool kvmppc_tb_resync_required(void) { @@ -331,5 +333,13 @@ long kvmppc_realmode_hmi_handler(void) } else { wait_for_tb_resync(); } + + /* + * Reset tb_offset_applied so the guest exit code won't try + * to subtract the previous timebase offset from the timebase. + */ + if (local_paca->kvm_hstate.kvm_vcore) + local_paca->kvm_hstate.kvm_vcore->tb_offset_applied = 0; + return 0; } diff --git a/arch/powerpc/kvm/book3s_hv_rm_xics.c b/arch/powerpc/kvm/book3s_hv_rm_xics.c index 758d1d23215e..b3f5786b20dc 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_xics.c +++ b/arch/powerpc/kvm/book3s_hv_rm_xics.c @@ -136,7 +136,7 @@ static void icp_rm_set_vcpu_irq(struct kvm_vcpu *vcpu, /* Mark the target VCPU as having an interrupt pending */ vcpu->stat.queue_intr++; - set_bit(BOOK3S_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions); + set_bit(BOOK3S_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions); /* Kick self ? Just set MER and return */ if (vcpu == this_vcpu) { @@ -170,8 +170,7 @@ static void icp_rm_set_vcpu_irq(struct kvm_vcpu *vcpu, static void icp_rm_clr_vcpu_irq(struct kvm_vcpu *vcpu) { /* Note: Only called on self ! */ - clear_bit(BOOK3S_IRQPRIO_EXTERNAL_LEVEL, - &vcpu->arch.pending_exceptions); + clear_bit(BOOK3S_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions); mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~LPCR_MER); } @@ -768,6 +767,14 @@ static void icp_eoi(struct irq_chip *c, u32 hwirq, __be32 xirr, bool *again) void __iomem *xics_phys; int64_t rc; + if (kvmhv_on_pseries()) { + unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; + + iosync(); + plpar_hcall_raw(H_EOI, retbuf, hwirq); + return; + } + rc = pnv_opal_pci_msi_eoi(c, hwirq); if (rc) diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 1d14046124a0..9b8d50a7cbaf 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -28,6 +28,7 @@ #include <asm/exception-64s.h> #include <asm/kvm_book3s_asm.h> #include <asm/book3s/64/mmu-hash.h> +#include <asm/export.h> #include <asm/tm.h> #include <asm/opal.h> #include <asm/xive-regs.h> @@ -46,8 +47,9 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) #define NAPPING_NOVCPU 2 /* Stack frame offsets for kvmppc_hv_entry */ -#define SFS 160 +#define SFS 208 #define STACK_SLOT_TRAP (SFS-4) +#define STACK_SLOT_SHORT_PATH (SFS-8) #define STACK_SLOT_TID (SFS-16) #define STACK_SLOT_PSSCR (SFS-24) #define STACK_SLOT_PID (SFS-32) @@ -56,6 +58,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) #define STACK_SLOT_DAWR (SFS-56) #define STACK_SLOT_DAWRX (SFS-64) #define STACK_SLOT_HFSCR (SFS-72) +/* the following is used by the P9 short path */ +#define STACK_SLOT_NVGPRS (SFS-152) /* 18 gprs */ /* * Call kvmppc_hv_entry in real mode. @@ -113,45 +117,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) mtspr SPRN_SPRG_VDSO_WRITE,r3 /* Reload the host's PMU registers */ - lbz r4, PACA_PMCINUSE(r13) /* is the host using the PMU? */ - cmpwi r4, 0 - beq 23f /* skip if not */ -BEGIN_FTR_SECTION - ld r3, HSTATE_MMCR0(r13) - andi. r4, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO - cmpwi r4, MMCR0_PMAO - beql kvmppc_fix_pmao -END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG) - lwz r3, HSTATE_PMC1(r13) - lwz r4, HSTATE_PMC2(r13) - lwz r5, HSTATE_PMC3(r13) - lwz r6, HSTATE_PMC4(r13) - lwz r8, HSTATE_PMC5(r13) - lwz r9, HSTATE_PMC6(r13) - mtspr SPRN_PMC1, r3 - mtspr SPRN_PMC2, r4 - mtspr SPRN_PMC3, r5 - mtspr SPRN_PMC4, r6 - mtspr SPRN_PMC5, r8 - mtspr SPRN_PMC6, r9 - ld r3, HSTATE_MMCR0(r13) - ld r4, HSTATE_MMCR1(r13) - ld r5, HSTATE_MMCRA(r13) - ld r6, HSTATE_SIAR(r13) - ld r7, HSTATE_SDAR(r13) - mtspr SPRN_MMCR1, r4 - mtspr SPRN_MMCRA, r5 - mtspr SPRN_SIAR, r6 - mtspr SPRN_SDAR, r7 -BEGIN_FTR_SECTION - ld r8, HSTATE_MMCR2(r13) - ld r9, HSTATE_SIER(r13) - mtspr SPRN_MMCR2, r8 - mtspr SPRN_SIER, r9 -END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) - mtspr SPRN_MMCR0, r3 - isync -23: + bl kvmhv_load_host_pmu /* * Reload DEC. HDEC interrupts were disabled when @@ -796,66 +762,23 @@ BEGIN_FTR_SECTION b 91f END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0) /* - * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR + * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR) */ mr r3, r4 ld r4, VCPU_MSR(r3) + li r5, 0 /* don't preserve non-vol regs */ bl kvmppc_restore_tm_hv + nop ld r4, HSTATE_KVM_VCPU(r13) 91: #endif - /* Load guest PMU registers */ - /* R4 is live here (vcpu pointer) */ - li r3, 1 - sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ - mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */ - isync -BEGIN_FTR_SECTION - ld r3, VCPU_MMCR(r4) - andi. r5, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO - cmpwi r5, MMCR0_PMAO - beql kvmppc_fix_pmao -END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG) - lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */ - lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */ - lwz r6, VCPU_PMC + 8(r4) - lwz r7, VCPU_PMC + 12(r4) - lwz r8, VCPU_PMC + 16(r4) - lwz r9, VCPU_PMC + 20(r4) - mtspr SPRN_PMC1, r3 - mtspr SPRN_PMC2, r5 - mtspr SPRN_PMC3, r6 - mtspr SPRN_PMC4, r7 - mtspr SPRN_PMC5, r8 - mtspr SPRN_PMC6, r9 - ld r3, VCPU_MMCR(r4) - ld r5, VCPU_MMCR + 8(r4) - ld r6, VCPU_MMCR + 16(r4) - ld r7, VCPU_SIAR(r4) - ld r8, VCPU_SDAR(r4) - mtspr SPRN_MMCR1, r5 - mtspr SPRN_MMCRA, r6 - mtspr SPRN_SIAR, r7 - mtspr SPRN_SDAR, r8 -BEGIN_FTR_SECTION - ld r5, VCPU_MMCR + 24(r4) - ld r6, VCPU_SIER(r4) - mtspr SPRN_MMCR2, r5 - mtspr SPRN_SIER, r6 -BEGIN_FTR_SECTION_NESTED(96) - lwz r7, VCPU_PMC + 24(r4) - lwz r8, VCPU_PMC + 28(r4) - ld r9, VCPU_MMCR + 32(r4) - mtspr SPRN_SPMC1, r7 - mtspr SPRN_SPMC2, r8 - mtspr SPRN_MMCRS, r9 -END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96) -END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) - mtspr SPRN_MMCR0, r3 - isync + /* Load guest PMU registers; r4 = vcpu pointer here */ + mr r3, r4 + bl kvmhv_load_guest_pmu /* Load up FP, VMX and VSX registers */ + ld r4, HSTATE_KVM_VCPU(r13) bl kvmppc_load_fp ld r14, VCPU_GPR(R14)(r4) @@ -1100,73 +1023,40 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300) no_xive: #endif /* CONFIG_KVM_XICS */ -deliver_guest_interrupt: - ld r6, VCPU_CTR(r4) - ld r7, VCPU_XER(r4) - - mtctr r6 - mtxer r7 + li r0, 0 + stw r0, STACK_SLOT_SHORT_PATH(r1) -kvmppc_cede_reentry: /* r4 = vcpu, r13 = paca */ - ld r10, VCPU_PC(r4) - ld r11, VCPU_MSR(r4) +deliver_guest_interrupt: /* r4 = vcpu, r13 = paca */ + /* Check if we can deliver an external or decrementer interrupt now */ + ld r0, VCPU_PENDING_EXC(r4) +BEGIN_FTR_SECTION + /* On POWER9, also check for emulated doorbell interrupt */ + lbz r3, VCPU_DBELL_REQ(r4) + or r0, r0, r3 +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) + cmpdi r0, 0 + beq 71f + mr r3, r4 + bl kvmppc_guest_entry_inject_int + ld r4, HSTATE_KVM_VCPU(r13) +71: ld r6, VCPU_SRR0(r4) ld r7, VCPU_SRR1(r4) mtspr SPRN_SRR0, r6 mtspr SPRN_SRR1, r7 +fast_guest_entry_c: + ld r10, VCPU_PC(r4) + ld r11, VCPU_MSR(r4) /* r11 = vcpu->arch.msr & ~MSR_HV */ rldicl r11, r11, 63 - MSR_HV_LG, 1 rotldi r11, r11, 1 + MSR_HV_LG ori r11, r11, MSR_ME - /* Check if we can deliver an external or decrementer interrupt now */ - ld r0, VCPU_PENDING_EXC(r4) - rldicl r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL_LEVEL, 63 - cmpdi cr1, r0, 0 - andi. r8, r11, MSR_EE - mfspr r8, SPRN_LPCR - /* Insert EXTERNAL_LEVEL bit into LPCR at the MER bit position */ - rldimi r8, r0, LPCR_MER_SH, 63 - LPCR_MER_SH - mtspr SPRN_LPCR, r8 - isync - beq 5f - li r0, BOOK3S_INTERRUPT_EXTERNAL - bne cr1, 12f - mfspr r0, SPRN_DEC -BEGIN_FTR_SECTION - /* On POWER9 check whether the guest has large decrementer enabled */ - andis. r8, r8, LPCR_LD@h - bne 15f -END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) - extsw r0, r0 -15: cmpdi r0, 0 - li r0, BOOK3S_INTERRUPT_DECREMENTER - bge 5f - -12: mtspr SPRN_SRR0, r10 - mr r10,r0 - mtspr SPRN_SRR1, r11 - mr r9, r4 - bl kvmppc_msr_interrupt -5: -BEGIN_FTR_SECTION - b fast_guest_return -END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) - /* On POWER9, check for pending doorbell requests */ - lbz r0, VCPU_DBELL_REQ(r4) - cmpwi r0, 0 - beq fast_guest_return - ld r5, HSTATE_KVM_VCORE(r13) - /* Set DPDES register so the CPU will take a doorbell interrupt */ - li r0, 1 - mtspr SPRN_DPDES, r0 - std r0, VCORE_DPDES(r5) - /* Make sure other cpus see vcore->dpdes set before dbell req clear */ - lwsync - /* Clear the pending doorbell request */ - li r0, 0 - stb r0, VCPU_DBELL_REQ(r4) + ld r6, VCPU_CTR(r4) + ld r7, VCPU_XER(r4) + mtctr r6 + mtxer r7 /* * Required state: @@ -1202,7 +1092,7 @@ BEGIN_FTR_SECTION END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) ld r5, VCPU_LR(r4) - lwz r6, VCPU_CR(r4) + ld r6, VCPU_CR(r4) mtlr r5 mtcr r6 @@ -1234,6 +1124,83 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) HRFI_TO_GUEST b . +/* + * Enter the guest on a P9 or later system where we have exactly + * one vcpu per vcore and we don't need to go to real mode + * (which implies that host and guest are both using radix MMU mode). + * r3 = vcpu pointer + * Most SPRs and all the VSRs have been loaded already. + */ +_GLOBAL(__kvmhv_vcpu_entry_p9) +EXPORT_SYMBOL_GPL(__kvmhv_vcpu_entry_p9) + mflr r0 + std r0, PPC_LR_STKOFF(r1) + stdu r1, -SFS(r1) + + li r0, 1 + stw r0, STACK_SLOT_SHORT_PATH(r1) + + std r3, HSTATE_KVM_VCPU(r13) + mfcr r4 + stw r4, SFS+8(r1) + + std r1, HSTATE_HOST_R1(r13) + + reg = 14 + .rept 18 + std reg, STACK_SLOT_NVGPRS + ((reg - 14) * 8)(r1) + reg = reg + 1 + .endr + + reg = 14 + .rept 18 + ld reg, __VCPU_GPR(reg)(r3) + reg = reg + 1 + .endr + + mfmsr r10 + std r10, HSTATE_HOST_MSR(r13) + + mr r4, r3 + b fast_guest_entry_c +guest_exit_short_path: + + li r0, KVM_GUEST_MODE_NONE + stb r0, HSTATE_IN_GUEST(r13) + + reg = 14 + .rept 18 + std reg, __VCPU_GPR(reg)(r9) + reg = reg + 1 + .endr + + reg = 14 + .rept 18 + ld reg, STACK_SLOT_NVGPRS + ((reg - 14) * 8)(r1) + reg = reg + 1 + .endr + + lwz r4, SFS+8(r1) + mtcr r4 + + mr r3, r12 /* trap number */ + + addi r1, r1, SFS + ld r0, PPC_LR_STKOFF(r1) + mtlr r0 + + /* If we are in real mode, do a rfid to get back to the caller */ + mfmsr r4 + andi. r5, r4, MSR_IR + bnelr + rldicl r5, r4, 64 - MSR_TS_S_LG, 62 /* extract TS field */ + mtspr SPRN_SRR0, r0 + ld r10, HSTATE_HOST_MSR(r13) + rldimi r10, r5, MSR_TS_S_LG, 63 - MSR_TS_T_LG + mtspr SPRN_SRR1, r10 + RFI_TO_KERNEL + b . + secondary_too_late: li r12, 0 stw r12, STACK_SLOT_TRAP(r1) @@ -1313,7 +1280,7 @@ kvmppc_interrupt_hv: std r3, VCPU_GPR(R12)(r9) /* CR is in the high half of r12 */ srdi r4, r12, 32 - stw r4, VCPU_CR(r9) + std r4, VCPU_CR(r9) BEGIN_FTR_SECTION ld r3, HSTATE_CFAR(r13) std r3, VCPU_CFAR(r9) @@ -1387,18 +1354,26 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) std r3, VCPU_CTR(r9) std r4, VCPU_XER(r9) -#ifdef CONFIG_PPC_TRANSACTIONAL_MEM - /* For softpatch interrupt, go off and do TM instruction emulation */ - cmpwi r12, BOOK3S_INTERRUPT_HV_SOFTPATCH - beq kvmppc_tm_emul -#endif + /* Save more register state */ + mfdar r3 + mfdsisr r4 + std r3, VCPU_DAR(r9) + stw r4, VCPU_DSISR(r9) /* If this is a page table miss then see if it's theirs or ours */ cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE beq kvmppc_hdsi + std r3, VCPU_FAULT_DAR(r9) + stw r4, VCPU_FAULT_DSISR(r9) cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE beq kvmppc_hisi +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + /* For softpatch interrupt, go off and do TM instruction emulation */ + cmpwi r12, BOOK3S_INTERRUPT_HV_SOFTPATCH + beq kvmppc_tm_emul +#endif + /* See if this is a leftover HDEC interrupt */ cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER bne 2f @@ -1418,10 +1393,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) BEGIN_FTR_SECTION PPC_MSGSYNC lwsync + /* always exit if we're running a nested guest */ + ld r0, VCPU_NESTED(r9) + cmpdi r0, 0 + bne guest_exit_cont END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) lbz r0, HSTATE_HOST_IPI(r13) cmpwi r0, 0 - beq 4f + beq maybe_reenter_guest b guest_exit_cont 3: /* If it's a hypervisor facility unavailable interrupt, save HFSCR */ @@ -1433,82 +1412,16 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 14: /* External interrupt ? */ cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL - bne+ guest_exit_cont - - /* External interrupt, first check for host_ipi. If this is - * set, we know the host wants us out so let's do it now - */ - bl kvmppc_read_intr - - /* - * Restore the active volatile registers after returning from - * a C function. - */ - ld r9, HSTATE_KVM_VCPU(r13) - li r12, BOOK3S_INTERRUPT_EXTERNAL - - /* - * kvmppc_read_intr return codes: - * - * Exit to host (r3 > 0) - * 1 An interrupt is pending that needs to be handled by the host - * Exit guest and return to host by branching to guest_exit_cont - * - * 2 Passthrough that needs completion in the host - * Exit guest and return to host by branching to guest_exit_cont - * However, we also set r12 to BOOK3S_INTERRUPT_HV_RM_HARD - * to indicate to the host to complete handling the interrupt - * - * Before returning to guest, we check if any CPU is heading out - * to the host and if so, we head out also. If no CPUs are heading - * check return values <= 0. - * - * Return to guest (r3 <= 0) - * 0 No external interrupt is pending - * -1 A guest wakeup IPI (which has now been cleared) - * In either case, we return to guest to deliver any pending - * guest interrupts. - * - * -2 A PCI passthrough external interrupt was handled - * (interrupt was delivered directly to guest) - * Return to guest to deliver any pending guest interrupts. - */ - - cmpdi r3, 1 - ble 1f - - /* Return code = 2 */ - li r12, BOOK3S_INTERRUPT_HV_RM_HARD - stw r12, VCPU_TRAP(r9) - b guest_exit_cont - -1: /* Return code <= 1 */ - cmpdi r3, 0 - bgt guest_exit_cont - - /* Return code <= 0 */ -4: ld r5, HSTATE_KVM_VCORE(r13) - lwz r0, VCORE_ENTRY_EXIT(r5) - cmpwi r0, 0x100 - mr r4, r9 - blt deliver_guest_interrupt - -guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */ - /* Save more register state */ - mfdar r6 - mfdsisr r7 - std r6, VCPU_DAR(r9) - stw r7, VCPU_DSISR(r9) - /* don't overwrite fault_dar/fault_dsisr if HDSI */ - cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE - beq mc_cont - std r6, VCPU_FAULT_DAR(r9) - stw r7, VCPU_FAULT_DSISR(r9) - + beq kvmppc_guest_external /* See if it is a machine check */ cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK beq machine_check_realmode -mc_cont: + /* Or a hypervisor maintenance interrupt */ + cmpwi r12, BOOK3S_INTERRUPT_HMI + beq hmi_realmode + +guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */ + #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING addi r3, r9, VCPU_TB_RMEXIT mr r4, r9 @@ -1552,6 +1465,11 @@ mc_cont: 1: #endif /* CONFIG_KVM_XICS */ + /* If we came in through the P9 short path, go back out to C now */ + lwz r0, STACK_SLOT_SHORT_PATH(r1) + cmpwi r0, 0 + bne guest_exit_short_path + /* For hash guest, read the guest SLB and save it away */ ld r5, VCPU_KVM(r9) lbz r0, KVM_RADIX(r5) @@ -1780,11 +1698,13 @@ BEGIN_FTR_SECTION b 91f END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0) /* - * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR + * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR) */ mr r3, r9 ld r4, VCPU_MSR(r3) + li r5, 0 /* don't preserve non-vol regs */ bl kvmppc_save_tm_hv + nop ld r9, HSTATE_KVM_VCPU(r13) 91: #endif @@ -1802,83 +1722,12 @@ END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0) 25: /* Save PMU registers if requested */ /* r8 and cr0.eq are live here */ -BEGIN_FTR_SECTION - /* - * POWER8 seems to have a hardware bug where setting - * MMCR0[PMAE] along with MMCR0[PMC1CE] and/or MMCR0[PMCjCE] - * when some counters are already negative doesn't seem - * to cause a performance monitor alert (and hence interrupt). - * The effect of this is that when saving the PMU state, - * if there is no PMU alert pending when we read MMCR0 - * before freezing the counters, but one becomes pending - * before we read the counters, we lose it. - * To work around this, we need a way to freeze the counters - * before reading MMCR0. Normally, freezing the counters - * is done by writing MMCR0 (to set MMCR0[FC]) which - * unavoidably writes MMCR0[PMA0] as well. On POWER8, - * we can also freeze the counters using MMCR2, by writing - * 1s to all the counter freeze condition bits (there are - * 9 bits each for 6 counters). - */ - li r3, -1 /* set all freeze bits */ - clrrdi r3, r3, 10 - mfspr r10, SPRN_MMCR2 - mtspr SPRN_MMCR2, r3 - isync -END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) - li r3, 1 - sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ - mfspr r4, SPRN_MMCR0 /* save MMCR0 */ - mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */ - mfspr r6, SPRN_MMCRA - /* Clear MMCRA in order to disable SDAR updates */ - li r7, 0 - mtspr SPRN_MMCRA, r7 - isync + mr r3, r9 + li r4, 1 beq 21f /* if no VPA, save PMU stuff anyway */ - lbz r7, LPPACA_PMCINUSE(r8) - cmpwi r7, 0 /* did they ask for PMU stuff to be saved? */ - bne 21f - std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */ - b 22f -21: mfspr r5, SPRN_MMCR1 - mfspr r7, SPRN_SIAR - mfspr r8, SPRN_SDAR - std r4, VCPU_MMCR(r9) - std r5, VCPU_MMCR + 8(r9) - std r6, VCPU_MMCR + 16(r9) -BEGIN_FTR_SECTION - std r10, VCPU_MMCR + 24(r9) -END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) - std r7, VCPU_SIAR(r9) - std r8, VCPU_SDAR(r9) - mfspr r3, SPRN_PMC1 - mfspr r4, SPRN_PMC2 - mfspr r5, SPRN_PMC3 - mfspr r6, SPRN_PMC4 - mfspr r7, SPRN_PMC5 - mfspr r8, SPRN_PMC6 - stw r3, VCPU_PMC(r9) - stw r4, VCPU_PMC + 4(r9) - stw r5, VCPU_PMC + 8(r9) - stw r6, VCPU_PMC + 12(r9) - stw r7, VCPU_PMC + 16(r9) - stw r8, VCPU_PMC + 20(r9) -BEGIN_FTR_SECTION - mfspr r5, SPRN_SIER - std r5, VCPU_SIER(r9) -BEGIN_FTR_SECTION_NESTED(96) - mfspr r6, SPRN_SPMC1 - mfspr r7, SPRN_SPMC2 - mfspr r8, SPRN_MMCRS - stw r6, VCPU_PMC + 24(r9) - stw r7, VCPU_PMC + 28(r9) - std r8, VCPU_MMCR + 32(r9) - lis r4, 0x8000 - mtspr SPRN_MMCRS, r4 -END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96) -END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) -22: + lbz r4, LPPACA_PMCINUSE(r8) +21: bl kvmhv_save_guest_pmu + ld r9, HSTATE_KVM_VCPU(r13) /* Restore host values of some registers */ BEGIN_FTR_SECTION @@ -2010,24 +1859,6 @@ BEGIN_FTR_SECTION mtspr SPRN_DPDES, r8 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) - /* If HMI, call kvmppc_realmode_hmi_handler() */ - lwz r12, STACK_SLOT_TRAP(r1) - cmpwi r12, BOOK3S_INTERRUPT_HMI - bne 27f - bl kvmppc_realmode_hmi_handler - nop - cmpdi r3, 0 - /* - * At this point kvmppc_realmode_hmi_handler may have resync-ed - * the TB, and if it has, we must not subtract the guest timebase - * offset from the timebase. So, skip it. - * - * Also, do not call kvmppc_subcore_exit_guest() because it has - * been invoked as part of kvmppc_realmode_hmi_handler(). - */ - beq 30f - -27: /* Subtract timebase offset from timebase */ ld r8, VCORE_TB_OFFSET_APPL(r5) cmpdi r8,0 @@ -2045,7 +1876,16 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) addis r8,r8,0x100 /* if so, increment upper 40 bits */ mtspr SPRN_TBU40,r8 -17: bl kvmppc_subcore_exit_guest +17: + /* + * If this is an HMI, we called kvmppc_realmode_hmi_handler + * above, which may or may not have already called + * kvmppc_subcore_exit_guest. Fortunately, all that + * kvmppc_subcore_exit_guest does is clear a flag, so calling + * it again here is benign even if kvmppc_realmode_hmi_handler + * has already called it. + */ + bl kvmppc_subcore_exit_guest nop 30: ld r5,HSTATE_KVM_VCORE(r13) ld r4,VCORE_KVM(r5) /* pointer to struct kvm */ @@ -2099,6 +1939,67 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) mtlr r0 blr +kvmppc_guest_external: + /* External interrupt, first check for host_ipi. If this is + * set, we know the host wants us out so let's do it now + */ + bl kvmppc_read_intr + + /* + * Restore the active volatile registers after returning from + * a C function. + */ + ld r9, HSTATE_KVM_VCPU(r13) + li r12, BOOK3S_INTERRUPT_EXTERNAL + + /* + * kvmppc_read_intr return codes: + * + * Exit to host (r3 > 0) + * 1 An interrupt is pending that needs to be handled by the host + * Exit guest and return to host by branching to guest_exit_cont + * + * 2 Passthrough that needs completion in the host + * Exit guest and return to host by branching to guest_exit_cont + * However, we also set r12 to BOOK3S_INTERRUPT_HV_RM_HARD + * to indicate to the host to complete handling the interrupt + * + * Before returning to guest, we check if any CPU is heading out + * to the host and if so, we head out also. If no CPUs are heading + * check return values <= 0. + * + * Return to guest (r3 <= 0) + * 0 No external interrupt is pending + * -1 A guest wakeup IPI (which has now been cleared) + * In either case, we return to guest to deliver any pending + * guest interrupts. + * + * -2 A PCI passthrough external interrupt was handled + * (interrupt was delivered directly to guest) + * Return to guest to deliver any pending guest interrupts. + */ + + cmpdi r3, 1 + ble 1f + + /* Return code = 2 */ + li r12, BOOK3S_INTERRUPT_HV_RM_HARD + stw r12, VCPU_TRAP(r9) + b guest_exit_cont + +1: /* Return code <= 1 */ + cmpdi r3, 0 + bgt guest_exit_cont + + /* Return code <= 0 */ +maybe_reenter_guest: + ld r5, HSTATE_KVM_VCORE(r13) + lwz r0, VCORE_ENTRY_EXIT(r5) + cmpwi r0, 0x100 + mr r4, r9 + blt deliver_guest_interrupt + b guest_exit_cont + #ifdef CONFIG_PPC_TRANSACTIONAL_MEM /* * Softpatch interrupt for transactional memory emulation cases @@ -2302,6 +2203,10 @@ hcall_try_real_mode: andi. r0,r11,MSR_PR /* sc 1 from userspace - reflect to guest syscall */ bne sc_1_fast_return + /* sc 1 from nested guest - give it to L1 to handle */ + ld r0, VCPU_NESTED(r9) + cmpdi r0, 0 + bne guest_exit_cont clrrdi r3,r3,2 cmpldi r3,hcall_real_table_end - hcall_real_table bge guest_exit_cont @@ -2561,6 +2466,7 @@ hcall_real_table: hcall_real_table_end: _GLOBAL(kvmppc_h_set_xdabr) +EXPORT_SYMBOL_GPL(kvmppc_h_set_xdabr) andi. r0, r5, DABRX_USER | DABRX_KERNEL beq 6f li r0, DABRX_USER | DABRX_KERNEL | DABRX_BTI @@ -2570,6 +2476,7 @@ _GLOBAL(kvmppc_h_set_xdabr) blr _GLOBAL(kvmppc_h_set_dabr) +EXPORT_SYMBOL_GPL(kvmppc_h_set_dabr) li r5, DABRX_USER | DABRX_KERNEL 3: BEGIN_FTR_SECTION @@ -2682,11 +2589,13 @@ BEGIN_FTR_SECTION b 91f END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0) /* - * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR + * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR) */ ld r3, HSTATE_KVM_VCPU(r13) ld r4, VCPU_MSR(r3) + li r5, 0 /* don't preserve non-vol regs */ bl kvmppc_save_tm_hv + nop 91: #endif @@ -2802,11 +2711,13 @@ BEGIN_FTR_SECTION b 91f END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0) /* - * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR + * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR) */ mr r3, r4 ld r4, VCPU_MSR(r3) + li r5, 0 /* don't preserve non-vol regs */ bl kvmppc_restore_tm_hv + nop ld r4, HSTATE_KVM_VCPU(r13) 91: #endif @@ -2874,13 +2785,7 @@ END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0) mr r9, r4 cmpdi r3, 0 bgt guest_exit_cont - - /* see if any other thread is already exiting */ - lwz r0,VCORE_ENTRY_EXIT(r5) - cmpwi r0,0x100 - bge guest_exit_cont - - b kvmppc_cede_reentry /* if not go back to guest */ + b maybe_reenter_guest /* cede when already previously prodded case */ kvm_cede_prodded: @@ -2947,12 +2852,12 @@ machine_check_realmode: */ ld r11, VCPU_MSR(r9) rldicl. r0, r11, 64-MSR_HV_LG, 63 /* check if it happened in HV mode */ - bne mc_cont /* if so, exit to host */ + bne guest_exit_cont /* if so, exit to host */ /* Check if guest is capable of handling NMI exit */ ld r10, VCPU_KVM(r9) lbz r10, KVM_FWNMI(r10) cmpdi r10, 1 /* FWNMI capable? */ - beq mc_cont /* if so, exit with KVM_EXIT_NMI. */ + beq guest_exit_cont /* if so, exit with KVM_EXIT_NMI. */ /* if not, fall through for backward compatibility. */ andi. r10, r11, MSR_RI /* check for unrecoverable exception */ @@ -2966,6 +2871,21 @@ machine_check_realmode: 2: b fast_interrupt_c_return /* + * Call C code to handle a HMI in real mode. + * Only the primary thread does the call, secondary threads are handled + * by calling hmi_exception_realmode() after kvmppc_hv_entry returns. + * r9 points to the vcpu on entry + */ +hmi_realmode: + lbz r0, HSTATE_PTID(r13) + cmpwi r0, 0 + bne guest_exit_cont + bl kvmppc_realmode_hmi_handler + ld r9, HSTATE_KVM_VCPU(r13) + li r12, BOOK3S_INTERRUPT_HMI + b guest_exit_cont + +/* * Check the reason we woke from nap, and take appropriate action. * Returns (in r3): * 0 if nothing needs to be done @@ -3130,10 +3050,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) * Save transactional state and TM-related registers. * Called with r3 pointing to the vcpu struct and r4 containing * the guest MSR value. - * This can modify all checkpointed registers, but + * r5 is non-zero iff non-volatile register state needs to be maintained. + * If r5 == 0, this can modify all checkpointed registers, but * restores r1 and r2 before exit. */ -kvmppc_save_tm_hv: +_GLOBAL_TOC(kvmppc_save_tm_hv) +EXPORT_SYMBOL_GPL(kvmppc_save_tm_hv) /* See if we need to handle fake suspend mode */ BEGIN_FTR_SECTION b __kvmppc_save_tm @@ -3161,12 +3083,6 @@ BEGIN_FTR_SECTION END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG) nop - std r1, HSTATE_HOST_R1(r13) - - /* Clear the MSR RI since r1, r13 may be foobar. */ - li r5, 0 - mtmsrd r5, 1 - /* We have to treclaim here because that's the only way to do S->N */ li r3, TM_CAUSE_KVM_RESCHED TRECLAIM(R3) @@ -3175,22 +3091,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG) * We were in fake suspend, so we are not going to save the * register state as the guest checkpointed state (since * we already have it), therefore we can now use any volatile GPR. + * In fact treclaim in fake suspend state doesn't modify + * any registers. */ - /* Reload PACA pointer, stack pointer and TOC. */ - GET_PACA(r13) - ld r1, HSTATE_HOST_R1(r13) - ld r2, PACATOC(r13) - /* Set MSR RI now we have r1 and r13 back. */ - li r5, MSR_RI - mtmsrd r5, 1 - - HMT_MEDIUM - ld r6, HSTATE_DSCR(r13) - mtspr SPRN_DSCR, r6 -BEGIN_FTR_SECTION_NESTED(96) +BEGIN_FTR_SECTION bl pnv_power9_force_smt4_release -END_FTR_SECTION_NESTED(CPU_FTR_P9_TM_XER_SO_BUG, CPU_FTR_P9_TM_XER_SO_BUG, 96) +END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG) nop 4: @@ -3216,10 +3123,12 @@ END_FTR_SECTION_NESTED(CPU_FTR_P9_TM_XER_SO_BUG, CPU_FTR_P9_TM_XER_SO_BUG, 96) * Restore transactional state and TM-related registers. * Called with r3 pointing to the vcpu struct * and r4 containing the guest MSR value. + * r5 is non-zero iff non-volatile register state needs to be maintained. * This potentially modifies all checkpointed registers. * It restores r1 and r2 from the PACA. */ -kvmppc_restore_tm_hv: +_GLOBAL_TOC(kvmppc_restore_tm_hv) +EXPORT_SYMBOL_GPL(kvmppc_restore_tm_hv) /* * If we are doing TM emulation for the guest on a POWER9 DD2, * then we don't actually do a trechkpt -- we either set up @@ -3424,6 +3333,194 @@ kvmppc_msr_interrupt: blr /* + * Load up guest PMU state. R3 points to the vcpu struct. + */ +_GLOBAL(kvmhv_load_guest_pmu) +EXPORT_SYMBOL_GPL(kvmhv_load_guest_pmu) + mr r4, r3 + mflr r0 + li r3, 1 + sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ + mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */ + isync +BEGIN_FTR_SECTION + ld r3, VCPU_MMCR(r4) + andi. r5, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO + cmpwi r5, MMCR0_PMAO + beql kvmppc_fix_pmao +END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG) + lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */ + lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */ + lwz r6, VCPU_PMC + 8(r4) + lwz r7, VCPU_PMC + 12(r4) + lwz r8, VCPU_PMC + 16(r4) + lwz r9, VCPU_PMC + 20(r4) + mtspr SPRN_PMC1, r3 + mtspr SPRN_PMC2, r5 + mtspr SPRN_PMC3, r6 + mtspr SPRN_PMC4, r7 + mtspr SPRN_PMC5, r8 + mtspr SPRN_PMC6, r9 + ld r3, VCPU_MMCR(r4) + ld r5, VCPU_MMCR + 8(r4) + ld r6, VCPU_MMCR + 16(r4) + ld r7, VCPU_SIAR(r4) + ld r8, VCPU_SDAR(r4) + mtspr SPRN_MMCR1, r5 + mtspr SPRN_MMCRA, r6 + mtspr SPRN_SIAR, r7 + mtspr SPRN_SDAR, r8 +BEGIN_FTR_SECTION + ld r5, VCPU_MMCR + 24(r4) + ld r6, VCPU_SIER(r4) + mtspr SPRN_MMCR2, r5 + mtspr SPRN_SIER, r6 +BEGIN_FTR_SECTION_NESTED(96) + lwz r7, VCPU_PMC + 24(r4) + lwz r8, VCPU_PMC + 28(r4) + ld r9, VCPU_MMCR + 32(r4) + mtspr SPRN_SPMC1, r7 + mtspr SPRN_SPMC2, r8 + mtspr SPRN_MMCRS, r9 +END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96) +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) + mtspr SPRN_MMCR0, r3 + isync + mtlr r0 + blr + +/* + * Reload host PMU state saved in the PACA by kvmhv_save_host_pmu. + */ +_GLOBAL(kvmhv_load_host_pmu) +EXPORT_SYMBOL_GPL(kvmhv_load_host_pmu) + mflr r0 + lbz r4, PACA_PMCINUSE(r13) /* is the host using the PMU? */ + cmpwi r4, 0 + beq 23f /* skip if not */ +BEGIN_FTR_SECTION + ld r3, HSTATE_MMCR0(r13) + andi. r4, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO + cmpwi r4, MMCR0_PMAO + beql kvmppc_fix_pmao +END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG) + lwz r3, HSTATE_PMC1(r13) + lwz r4, HSTATE_PMC2(r13) + lwz r5, HSTATE_PMC3(r13) + lwz r6, HSTATE_PMC4(r13) + lwz r8, HSTATE_PMC5(r13) + lwz r9, HSTATE_PMC6(r13) + mtspr SPRN_PMC1, r3 + mtspr SPRN_PMC2, r4 + mtspr SPRN_PMC3, r5 + mtspr SPRN_PMC4, r6 + mtspr SPRN_PMC5, r8 + mtspr SPRN_PMC6, r9 + ld r3, HSTATE_MMCR0(r13) + ld r4, HSTATE_MMCR1(r13) + ld r5, HSTATE_MMCRA(r13) + ld r6, HSTATE_SIAR(r13) + ld r7, HSTATE_SDAR(r13) + mtspr SPRN_MMCR1, r4 + mtspr SPRN_MMCRA, r5 + mtspr SPRN_SIAR, r6 + mtspr SPRN_SDAR, r7 +BEGIN_FTR_SECTION + ld r8, HSTATE_MMCR2(r13) + ld r9, HSTATE_SIER(r13) + mtspr SPRN_MMCR2, r8 + mtspr SPRN_SIER, r9 +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) + mtspr SPRN_MMCR0, r3 + isync + mtlr r0 +23: blr + +/* + * Save guest PMU state into the vcpu struct. + * r3 = vcpu, r4 = full save flag (PMU in use flag set in VPA) + */ +_GLOBAL(kvmhv_save_guest_pmu) +EXPORT_SYMBOL_GPL(kvmhv_save_guest_pmu) + mr r9, r3 + mr r8, r4 +BEGIN_FTR_SECTION + /* + * POWER8 seems to have a hardware bug where setting + * MMCR0[PMAE] along with MMCR0[PMC1CE] and/or MMCR0[PMCjCE] + * when some counters are already negative doesn't seem + * to cause a performance monitor alert (and hence interrupt). + * The effect of this is that when saving the PMU state, + * if there is no PMU alert pending when we read MMCR0 + * before freezing the counters, but one becomes pending + * before we read the counters, we lose it. + * To work around this, we need a way to freeze the counters + * before reading MMCR0. Normally, freezing the counters + * is done by writing MMCR0 (to set MMCR0[FC]) which + * unavoidably writes MMCR0[PMA0] as well. On POWER8, + * we can also freeze the counters using MMCR2, by writing + * 1s to all the counter freeze condition bits (there are + * 9 bits each for 6 counters). + */ + li r3, -1 /* set all freeze bits */ + clrrdi r3, r3, 10 + mfspr r10, SPRN_MMCR2 + mtspr SPRN_MMCR2, r3 + isync +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) + li r3, 1 + sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ + mfspr r4, SPRN_MMCR0 /* save MMCR0 */ + mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */ + mfspr r6, SPRN_MMCRA + /* Clear MMCRA in order to disable SDAR updates */ + li r7, 0 + mtspr SPRN_MMCRA, r7 + isync + cmpwi r8, 0 /* did they ask for PMU stuff to be saved? */ + bne 21f + std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */ + b 22f +21: mfspr r5, SPRN_MMCR1 + mfspr r7, SPRN_SIAR + mfspr r8, SPRN_SDAR + std r4, VCPU_MMCR(r9) + std r5, VCPU_MMCR + 8(r9) + std r6, VCPU_MMCR + 16(r9) +BEGIN_FTR_SECTION + std r10, VCPU_MMCR + 24(r9) +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) + std r7, VCPU_SIAR(r9) + std r8, VCPU_SDAR(r9) + mfspr r3, SPRN_PMC1 + mfspr r4, SPRN_PMC2 + mfspr r5, SPRN_PMC3 + mfspr r6, SPRN_PMC4 + mfspr r7, SPRN_PMC5 + mfspr r8, SPRN_PMC6 + stw r3, VCPU_PMC(r9) + stw r4, VCPU_PMC + 4(r9) + stw r5, VCPU_PMC + 8(r9) + stw r6, VCPU_PMC + 12(r9) + stw r7, VCPU_PMC + 16(r9) + stw r8, VCPU_PMC + 20(r9) +BEGIN_FTR_SECTION + mfspr r5, SPRN_SIER + std r5, VCPU_SIER(r9) +BEGIN_FTR_SECTION_NESTED(96) + mfspr r6, SPRN_SPMC1 + mfspr r7, SPRN_SPMC2 + mfspr r8, SPRN_MMCRS + stw r6, VCPU_PMC + 24(r9) + stw r7, VCPU_PMC + 28(r9) + std r8, VCPU_MMCR + 32(r9) + lis r4, 0x8000 + mtspr SPRN_MMCRS, r4 +END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96) +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) +22: blr + +/* * This works around a hardware bug on POWER8E processors, where * writing a 1 to the MMCR0[PMAO] bit doesn't generate a * performance monitor interrupt. Instead, when we need to have diff --git a/arch/powerpc/kvm/book3s_hv_tm.c b/arch/powerpc/kvm/book3s_hv_tm.c index 008285058f9b..888e2609e3f1 100644 --- a/arch/powerpc/kvm/book3s_hv_tm.c +++ b/arch/powerpc/kvm/book3s_hv_tm.c @@ -130,7 +130,7 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu) return RESUME_GUEST; } /* Set CR0 to indicate previous transactional state */ - vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) | + vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) | (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 28); /* L=1 => tresume, L=0 => tsuspend */ if (instr & (1 << 21)) { @@ -174,7 +174,7 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu) copy_from_checkpoint(vcpu); /* Set CR0 to indicate previous transactional state */ - vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) | + vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) | (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 28); vcpu->arch.shregs.msr &= ~MSR_TS_MASK; return RESUME_GUEST; @@ -204,7 +204,7 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu) copy_to_checkpoint(vcpu); /* Set CR0 to indicate previous transactional state */ - vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) | + vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) | (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 28); vcpu->arch.shregs.msr = msr | MSR_TS_S; return RESUME_GUEST; diff --git a/arch/powerpc/kvm/book3s_hv_tm_builtin.c b/arch/powerpc/kvm/book3s_hv_tm_builtin.c index b2c7c6fca4f9..3cf5863bc06e 100644 --- a/arch/powerpc/kvm/book3s_hv_tm_builtin.c +++ b/arch/powerpc/kvm/book3s_hv_tm_builtin.c @@ -89,7 +89,8 @@ int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu) if (instr & (1 << 21)) vcpu->arch.shregs.msr = (msr & ~MSR_TS_MASK) | MSR_TS_T; /* Set CR0 to 0b0010 */ - vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) | 0x20000000; + vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) | + 0x20000000; return 1; } @@ -105,5 +106,5 @@ void kvmhv_emulate_tm_rollback(struct kvm_vcpu *vcpu) vcpu->arch.shregs.msr &= ~MSR_TS_MASK; /* go to N state */ vcpu->arch.regs.nip = vcpu->arch.tfhar; copy_from_checkpoint(vcpu); - vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) | 0xa0000000; + vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) | 0xa0000000; } diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index 614ebb4261f7..4efd65d9e828 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c @@ -167,7 +167,7 @@ void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu) svcpu->gpr[11] = vcpu->arch.regs.gpr[11]; svcpu->gpr[12] = vcpu->arch.regs.gpr[12]; svcpu->gpr[13] = vcpu->arch.regs.gpr[13]; - svcpu->cr = vcpu->arch.cr; + svcpu->cr = vcpu->arch.regs.ccr; svcpu->xer = vcpu->arch.regs.xer; svcpu->ctr = vcpu->arch.regs.ctr; svcpu->lr = vcpu->arch.regs.link; @@ -249,7 +249,7 @@ void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu) vcpu->arch.regs.gpr[11] = svcpu->gpr[11]; vcpu->arch.regs.gpr[12] = svcpu->gpr[12]; vcpu->arch.regs.gpr[13] = svcpu->gpr[13]; - vcpu->arch.cr = svcpu->cr; + vcpu->arch.regs.ccr = svcpu->cr; vcpu->arch.regs.xer = svcpu->xer; vcpu->arch.regs.ctr = svcpu->ctr; vcpu->arch.regs.link = svcpu->lr; @@ -1246,7 +1246,6 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, r = RESUME_GUEST; break; case BOOK3S_INTERRUPT_EXTERNAL: - case BOOK3S_INTERRUPT_EXTERNAL_LEVEL: case BOOK3S_INTERRUPT_EXTERNAL_HV: case BOOK3S_INTERRUPT_H_VIRT: vcpu->stat.ext_intr_exits++; diff --git a/arch/powerpc/kvm/book3s_xics.c b/arch/powerpc/kvm/book3s_xics.c index b8356cdc0c04..b0b2bfc2ff51 100644 --- a/arch/powerpc/kvm/book3s_xics.c +++ b/arch/powerpc/kvm/book3s_xics.c @@ -310,7 +310,7 @@ static inline bool icp_try_update(struct kvmppc_icp *icp, */ if (new.out_ee) { kvmppc_book3s_queue_irqprio(icp->vcpu, - BOOK3S_INTERRUPT_EXTERNAL_LEVEL); + BOOK3S_INTERRUPT_EXTERNAL); if (!change_self) kvmppc_fast_vcpu_kick(icp->vcpu); } @@ -593,8 +593,7 @@ static noinline unsigned long kvmppc_h_xirr(struct kvm_vcpu *vcpu) u32 xirr; /* First, remove EE from the processor */ - kvmppc_book3s_dequeue_irqprio(icp->vcpu, - BOOK3S_INTERRUPT_EXTERNAL_LEVEL); + kvmppc_book3s_dequeue_irqprio(icp->vcpu, BOOK3S_INTERRUPT_EXTERNAL); /* * ICP State: Accept_Interrupt @@ -754,8 +753,7 @@ static noinline void kvmppc_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr) * We can remove EE from the current processor, the update * transaction will set it again if needed */ - kvmppc_book3s_dequeue_irqprio(icp->vcpu, - BOOK3S_INTERRUPT_EXTERNAL_LEVEL); + kvmppc_book3s_dequeue_irqprio(icp->vcpu, BOOK3S_INTERRUPT_EXTERNAL); do { old_state = new_state = READ_ONCE(icp->state); @@ -1167,8 +1165,7 @@ int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval) * Deassert the CPU interrupt request. * icp_try_update will reassert it if necessary. */ - kvmppc_book3s_dequeue_irqprio(icp->vcpu, - BOOK3S_INTERRUPT_EXTERNAL_LEVEL); + kvmppc_book3s_dequeue_irqprio(icp->vcpu, BOOK3S_INTERRUPT_EXTERNAL); /* * Note that if we displace an interrupt from old_state.xisr, @@ -1393,7 +1390,8 @@ static int kvmppc_xics_create(struct kvm_device *dev, u32 type) } #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE - if (cpu_has_feature(CPU_FTR_ARCH_206)) { + if (cpu_has_feature(CPU_FTR_ARCH_206) && + cpu_has_feature(CPU_FTR_HVMODE)) { /* Enable real mode support */ xics->real_mode = ENABLE_REALMODE; xics->real_mode_dbg = DEBUG_REALMODE; diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c index 30c2eb766954..ad4a370703d3 100644 --- a/arch/powerpc/kvm/book3s_xive.c +++ b/arch/powerpc/kvm/book3s_xive.c @@ -62,6 +62,69 @@ #define XIVE_Q_GAP 2 /* + * Push a vcpu's context to the XIVE on guest entry. + * This assumes we are in virtual mode (MMU on) + */ +void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu) +{ + void __iomem *tima = local_paca->kvm_hstate.xive_tima_virt; + u64 pq; + + if (!tima) + return; + eieio(); + __raw_writeq(vcpu->arch.xive_saved_state.w01, tima + TM_QW1_OS); + __raw_writel(vcpu->arch.xive_cam_word, tima + TM_QW1_OS + TM_WORD2); + vcpu->arch.xive_pushed = 1; + eieio(); + + /* + * We clear the irq_pending flag. There is a small chance of a + * race vs. the escalation interrupt happening on another + * processor setting it again, but the only consequence is to + * cause a spurious wakeup on the next H_CEDE, which is not an + * issue. + */ + vcpu->arch.irq_pending = 0; + + /* + * In single escalation mode, if the escalation interrupt is + * on, we mask it. + */ + if (vcpu->arch.xive_esc_on) { + pq = __raw_readq((void __iomem *)(vcpu->arch.xive_esc_vaddr + + XIVE_ESB_SET_PQ_01)); + mb(); + + /* + * We have a possible subtle race here: The escalation + * interrupt might have fired and be on its way to the + * host queue while we mask it, and if we unmask it + * early enough (re-cede right away), there is a + * theorical possibility that it fires again, thus + * landing in the target queue more than once which is + * a big no-no. + * + * Fortunately, solving this is rather easy. If the + * above load setting PQ to 01 returns a previous + * value where P is set, then we know the escalation + * interrupt is somewhere on its way to the host. In + * that case we simply don't clear the xive_esc_on + * flag below. It will be eventually cleared by the + * handler for the escalation interrupt. + * + * Then, when doing a cede, we check that flag again + * before re-enabling the escalation interrupt, and if + * set, we abort the cede. + */ + if (!(pq & XIVE_ESB_VAL_P)) + /* Now P is 0, we can clear the flag */ + vcpu->arch.xive_esc_on = 0; + } +} +EXPORT_SYMBOL_GPL(kvmppc_xive_push_vcpu); + +/* * This is a simple trigger for a generic XIVE IRQ. This must * only be called for interrupts that support a trigger page */ diff --git a/arch/powerpc/kvm/book3s_xive_template.c b/arch/powerpc/kvm/book3s_xive_template.c index 4171ede8722b..033363d6e764 100644 --- a/arch/powerpc/kvm/book3s_xive_template.c +++ b/arch/powerpc/kvm/book3s_xive_template.c @@ -280,14 +280,6 @@ X_STATIC unsigned long GLUE(X_PFX,h_xirr)(struct kvm_vcpu *vcpu) /* First collect pending bits from HW */ GLUE(X_PFX,ack_pending)(xc); - /* - * Cleanup the old-style bits if needed (they may have been - * set by pull or an escalation interrupts). - */ - if (test_bit(BOOK3S_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions)) - clear_bit(BOOK3S_IRQPRIO_EXTERNAL_LEVEL, - &vcpu->arch.pending_exceptions); - pr_devel(" new pending=0x%02x hw_cppr=%d cppr=%d\n", xc->pending, xc->hw_cppr, xc->cppr); diff --git a/arch/powerpc/kvm/bookehv_interrupts.S b/arch/powerpc/kvm/bookehv_interrupts.S index 81bd8a07aa51..051af7d97327 100644 --- a/arch/powerpc/kvm/bookehv_interrupts.S +++ b/arch/powerpc/kvm/bookehv_interrupts.S @@ -182,7 +182,7 @@ */ PPC_LL r4, PACACURRENT(r13) PPC_LL r4, (THREAD + THREAD_KVM_VCPU)(r4) - stw r10, VCPU_CR(r4) + PPC_STL r10, VCPU_CR(r4) PPC_STL r11, VCPU_GPR(R4)(r4) PPC_STL r5, VCPU_GPR(R5)(r4) PPC_STL r6, VCPU_GPR(R6)(r4) @@ -292,7 +292,7 @@ _GLOBAL(kvmppc_handler_\intno\()_\srr1) PPC_STL r4, VCPU_GPR(R4)(r11) PPC_LL r4, THREAD_NORMSAVE(0)(r10) PPC_STL r5, VCPU_GPR(R5)(r11) - stw r13, VCPU_CR(r11) + PPC_STL r13, VCPU_CR(r11) mfspr r5, \srr0 PPC_STL r3, VCPU_GPR(R10)(r11) PPC_LL r3, THREAD_NORMSAVE(2)(r10) @@ -319,7 +319,7 @@ _GLOBAL(kvmppc_handler_\intno\()_\srr1) PPC_STL r4, VCPU_GPR(R4)(r11) PPC_LL r4, GPR9(r8) PPC_STL r5, VCPU_GPR(R5)(r11) - stw r9, VCPU_CR(r11) + PPC_STL r9, VCPU_CR(r11) mfspr r5, \srr0 PPC_STL r3, VCPU_GPR(R8)(r11) PPC_LL r3, GPR10(r8) @@ -643,7 +643,7 @@ lightweight_exit: PPC_LL r3, VCPU_LR(r4) PPC_LL r5, VCPU_XER(r4) PPC_LL r6, VCPU_CTR(r4) - lwz r7, VCPU_CR(r4) + PPC_LL r7, VCPU_CR(r4) PPC_LL r8, VCPU_PC(r4) PPC_LD(r9, VCPU_SHARED_MSR, r11) PPC_LL r0, VCPU_GPR(R0)(r4) diff --git a/arch/powerpc/kvm/emulate_loadstore.c b/arch/powerpc/kvm/emulate_loadstore.c index 75dce1ef3bc8..f91b1309a0a8 100644 --- a/arch/powerpc/kvm/emulate_loadstore.c +++ b/arch/powerpc/kvm/emulate_loadstore.c @@ -117,7 +117,6 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) emulated = EMULATE_FAIL; vcpu->arch.regs.msr = vcpu->arch.shared->msr; - vcpu->arch.regs.ccr = vcpu->arch.cr; if (analyse_instr(&op, &vcpu->arch.regs, inst) == 0) { int type = op.type & INSTR_TYPE_MASK; int size = GETSIZE(op.type); diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index eba5756d5b41..2869a299c4ed 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -594,7 +594,12 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) r = !!(hv_enabled && radix_enabled()); break; case KVM_CAP_PPC_MMU_HASH_V3: - r = !!(hv_enabled && cpu_has_feature(CPU_FTR_ARCH_300)); + r = !!(hv_enabled && cpu_has_feature(CPU_FTR_ARCH_300) && + cpu_has_feature(CPU_FTR_HVMODE)); + break; + case KVM_CAP_PPC_NESTED_HV: + r = !!(hv_enabled && kvmppc_hv_ops->enable_nested && + !kvmppc_hv_ops->enable_nested(NULL)); break; #endif case KVM_CAP_SYNC_MMU: @@ -2114,6 +2119,14 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, r = kvm->arch.kvm_ops->set_smt_mode(kvm, mode, flags); break; } + + case KVM_CAP_PPC_NESTED_HV: + r = -EINVAL; + if (!is_kvmppc_hv_enabled(kvm) || + !kvm->arch.kvm_ops->enable_nested) + break; + r = kvm->arch.kvm_ops->enable_nested(kvm); + break; #endif default: r = -EINVAL; diff --git a/arch/powerpc/kvm/tm.S b/arch/powerpc/kvm/tm.S index 90e330f21356..0531a1492fdf 100644 --- a/arch/powerpc/kvm/tm.S +++ b/arch/powerpc/kvm/tm.S @@ -28,17 +28,25 @@ * Save transactional state and TM-related registers. * Called with: * - r3 pointing to the vcpu struct - * - r4 points to the MSR with current TS bits: + * - r4 containing the MSR with current TS bits: * (For HV KVM, it is VCPU_MSR ; For PR KVM, it is host MSR). - * This can modify all checkpointed registers, but - * restores r1, r2 before exit. + * - r5 containing a flag indicating that non-volatile registers + * must be preserved. + * If r5 == 0, this can modify all checkpointed registers, but + * restores r1, r2 before exit. If r5 != 0, this restores the + * MSR TM/FP/VEC/VSX bits to their state on entry. */ _GLOBAL(__kvmppc_save_tm) mflr r0 std r0, PPC_LR_STKOFF(r1) + stdu r1, -SWITCH_FRAME_SIZE(r1) + + mr r9, r3 + cmpdi cr7, r5, 0 /* Turn on TM. */ mfmsr r8 + mr r10, r8 li r0, 1 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG ori r8, r8, MSR_FP @@ -51,6 +59,27 @@ _GLOBAL(__kvmppc_save_tm) std r1, HSTATE_SCRATCH2(r13) std r3, HSTATE_SCRATCH1(r13) + /* Save CR on the stack - even if r5 == 0 we need to get cr7 back. */ + mfcr r6 + SAVE_GPR(6, r1) + + /* Save DSCR so we can restore it to avoid running with user value */ + mfspr r7, SPRN_DSCR + SAVE_GPR(7, r1) + + /* + * We are going to do treclaim., which will modify all checkpointed + * registers. Save the non-volatile registers on the stack if + * preservation of non-volatile state has been requested. + */ + beq cr7, 3f + SAVE_NVGPRS(r1) + + /* MSR[TS] will be 0 (non-transactional) once we do treclaim. */ + li r0, 0 + rldimi r10, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG + SAVE_GPR(10, r1) /* final MSR value */ +3: #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE BEGIN_FTR_SECTION /* Emulation of the treclaim instruction needs TEXASR before treclaim */ @@ -74,22 +103,25 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST) std r9, PACATMSCRATCH(r13) ld r9, HSTATE_SCRATCH1(r13) - /* Get a few more GPRs free. */ - std r29, VCPU_GPRS_TM(29)(r9) - std r30, VCPU_GPRS_TM(30)(r9) - std r31, VCPU_GPRS_TM(31)(r9) - - /* Save away PPR and DSCR soon so don't run with user values. */ - mfspr r31, SPRN_PPR + /* Save away PPR soon so we don't run with user value. */ + std r0, VCPU_GPRS_TM(0)(r9) + mfspr r0, SPRN_PPR HMT_MEDIUM - mfspr r30, SPRN_DSCR -#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE - ld r29, HSTATE_DSCR(r13) - mtspr SPRN_DSCR, r29 -#endif - /* Save all but r9, r13 & r29-r31 */ - reg = 0 + /* Reload stack pointer. */ + std r1, VCPU_GPRS_TM(1)(r9) + ld r1, HSTATE_SCRATCH2(r13) + + /* Set MSR RI now we have r1 and r13 back. */ + std r2, VCPU_GPRS_TM(2)(r9) + li r2, MSR_RI + mtmsrd r2, 1 + + /* Reload TOC pointer. */ + ld r2, PACATOC(r13) + + /* Save all but r0-r2, r9 & r13 */ + reg = 3 .rept 29 .if (reg != 9) && (reg != 13) std reg, VCPU_GPRS_TM(reg)(r9) @@ -103,33 +135,29 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST) ld r4, PACATMSCRATCH(r13) std r4, VCPU_GPRS_TM(9)(r9) - /* Reload stack pointer and TOC. */ - ld r1, HSTATE_SCRATCH2(r13) - ld r2, PACATOC(r13) - - /* Set MSR RI now we have r1 and r13 back. */ - li r5, MSR_RI - mtmsrd r5, 1 + /* Restore host DSCR and CR values, after saving guest values */ + mfcr r6 + mfspr r7, SPRN_DSCR + stw r6, VCPU_CR_TM(r9) + std r7, VCPU_DSCR_TM(r9) + REST_GPR(6, r1) + REST_GPR(7, r1) + mtcr r6 + mtspr SPRN_DSCR, r7 - /* Save away checkpinted SPRs. */ - std r31, VCPU_PPR_TM(r9) - std r30, VCPU_DSCR_TM(r9) + /* Save away checkpointed SPRs. */ + std r0, VCPU_PPR_TM(r9) mflr r5 - mfcr r6 mfctr r7 mfspr r8, SPRN_AMR mfspr r10, SPRN_TAR mfxer r11 std r5, VCPU_LR_TM(r9) - stw r6, VCPU_CR_TM(r9) std r7, VCPU_CTR_TM(r9) std r8, VCPU_AMR_TM(r9) std r10, VCPU_TAR_TM(r9) std r11, VCPU_XER_TM(r9) - /* Restore r12 as trap number. */ - lwz r12, VCPU_TRAP(r9) - /* Save FP/VSX. */ addi r3, r9, VCPU_FPRS_TM bl store_fp_state @@ -137,6 +165,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST) bl store_vr_state mfspr r6, SPRN_VRSAVE stw r6, VCPU_VRSAVE_TM(r9) + + /* Restore non-volatile registers if requested to */ + beq cr7, 1f + REST_NVGPRS(r1) + REST_GPR(10, r1) 1: /* * We need to save these SPRs after the treclaim so that the software @@ -146,12 +179,16 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST) */ mfspr r7, SPRN_TEXASR std r7, VCPU_TEXASR(r9) -11: mfspr r5, SPRN_TFHAR mfspr r6, SPRN_TFIAR std r5, VCPU_TFHAR(r9) std r6, VCPU_TFIAR(r9) + /* Restore MSR state if requested */ + beq cr7, 2f + mtmsrd r10, 0 +2: + addi r1, r1, SWITCH_FRAME_SIZE ld r0, PPC_LR_STKOFF(r1) mtlr r0 blr @@ -161,49 +198,22 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST) * be invoked from C function by PR KVM only. */ _GLOBAL(_kvmppc_save_tm_pr) - mflr r5 - std r5, PPC_LR_STKOFF(r1) - stdu r1, -SWITCH_FRAME_SIZE(r1) - SAVE_NVGPRS(r1) - - /* save MSR since TM/math bits might be impacted - * by __kvmppc_save_tm(). - */ - mfmsr r5 - SAVE_GPR(5, r1) - - /* also save DSCR/CR/TAR so that it can be recovered later */ - mfspr r6, SPRN_DSCR - SAVE_GPR(6, r1) - - mfcr r7 - stw r7, _CCR(r1) + mflr r0 + std r0, PPC_LR_STKOFF(r1) + stdu r1, -PPC_MIN_STKFRM(r1) mfspr r8, SPRN_TAR - SAVE_GPR(8, r1) + std r8, PPC_MIN_STKFRM-8(r1) + li r5, 1 /* preserve non-volatile registers */ bl __kvmppc_save_tm - REST_GPR(8, r1) + ld r8, PPC_MIN_STKFRM-8(r1) mtspr SPRN_TAR, r8 - ld r7, _CCR(r1) - mtcr r7 - - REST_GPR(6, r1) - mtspr SPRN_DSCR, r6 - - /* need preserve current MSR's MSR_TS bits */ - REST_GPR(5, r1) - mfmsr r6 - rldicl r6, r6, 64 - MSR_TS_S_LG, 62 - rldimi r5, r6, MSR_TS_S_LG, 63 - MSR_TS_T_LG - mtmsrd r5 - - REST_NVGPRS(r1) - addi r1, r1, SWITCH_FRAME_SIZE - ld r5, PPC_LR_STKOFF(r1) - mtlr r5 + addi r1, r1, PPC_MIN_STKFRM + ld r0, PPC_LR_STKOFF(r1) + mtlr r0 blr EXPORT_SYMBOL_GPL(_kvmppc_save_tm_pr); @@ -215,15 +225,21 @@ EXPORT_SYMBOL_GPL(_kvmppc_save_tm_pr); * - r4 is the guest MSR with desired TS bits: * For HV KVM, it is VCPU_MSR * For PR KVM, it is provided by caller - * This potentially modifies all checkpointed registers. - * It restores r1, r2 from the PACA. + * - r5 containing a flag indicating that non-volatile registers + * must be preserved. + * If r5 == 0, this potentially modifies all checkpointed registers, but + * restores r1, r2 from the PACA before exit. + * If r5 != 0, this restores the MSR TM/FP/VEC/VSX bits to their state on entry. */ _GLOBAL(__kvmppc_restore_tm) mflr r0 std r0, PPC_LR_STKOFF(r1) + cmpdi cr7, r5, 0 + /* Turn on TM/FP/VSX/VMX so we can restore them. */ mfmsr r5 + mr r10, r5 li r6, MSR_TM >> 32 sldi r6, r6, 32 or r5, r5, r6 @@ -244,8 +260,7 @@ _GLOBAL(__kvmppc_restore_tm) mr r5, r4 rldicl. r5, r5, 64 - MSR_TS_S_LG, 62 - beqlr /* TM not active in guest */ - std r1, HSTATE_SCRATCH2(r13) + beq 9f /* TM not active in guest */ /* Make sure the failure summary is set, otherwise we'll program check * when we trechkpt. It's possible that this might have been not set @@ -256,6 +271,26 @@ _GLOBAL(__kvmppc_restore_tm) mtspr SPRN_TEXASR, r7 /* + * Make a stack frame and save non-volatile registers if requested. + */ + stdu r1, -SWITCH_FRAME_SIZE(r1) + std r1, HSTATE_SCRATCH2(r13) + + mfcr r6 + mfspr r7, SPRN_DSCR + SAVE_GPR(2, r1) + SAVE_GPR(6, r1) + SAVE_GPR(7, r1) + + beq cr7, 4f + SAVE_NVGPRS(r1) + + /* MSR[TS] will be 1 (suspended) once we do trechkpt */ + li r0, 1 + rldimi r10, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG + SAVE_GPR(10, r1) /* final MSR value */ +4: + /* * We need to load up the checkpointed state for the guest. * We need to do this early as it will blow away any GPRs, VSRs and * some SPRs. @@ -291,8 +326,6 @@ _GLOBAL(__kvmppc_restore_tm) ld r29, VCPU_DSCR_TM(r3) ld r30, VCPU_PPR_TM(r3) - std r2, PACATMSCRATCH(r13) /* Save TOC */ - /* Clear the MSR RI since r1, r13 are all going to be foobar. */ li r5, 0 mtmsrd r5, 1 @@ -318,18 +351,31 @@ _GLOBAL(__kvmppc_restore_tm) /* Now let's get back the state we need. */ HMT_MEDIUM GET_PACA(r13) -#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE - ld r29, HSTATE_DSCR(r13) - mtspr SPRN_DSCR, r29 -#endif ld r1, HSTATE_SCRATCH2(r13) - ld r2, PACATMSCRATCH(r13) + REST_GPR(7, r1) + mtspr SPRN_DSCR, r7 /* Set the MSR RI since we have our registers back. */ li r5, MSR_RI mtmsrd r5, 1 + + /* Restore TOC pointer and CR */ + REST_GPR(2, r1) + REST_GPR(6, r1) + mtcr r6 + + /* Restore non-volatile registers if requested to. */ + beq cr7, 5f + REST_GPR(10, r1) + REST_NVGPRS(r1) + +5: addi r1, r1, SWITCH_FRAME_SIZE ld r0, PPC_LR_STKOFF(r1) mtlr r0 + +9: /* Restore MSR bits if requested */ + beqlr cr7 + mtmsrd r10, 0 blr /* @@ -337,47 +383,23 @@ _GLOBAL(__kvmppc_restore_tm) * can be invoked from C function by PR KVM only. */ _GLOBAL(_kvmppc_restore_tm_pr) - mflr r5 - std r5, PPC_LR_STKOFF(r1) - stdu r1, -SWITCH_FRAME_SIZE(r1) - SAVE_NVGPRS(r1) - - /* save MSR to avoid TM/math bits change */ - mfmsr r5 - SAVE_GPR(5, r1) - - /* also save DSCR/CR/TAR so that it can be recovered later */ - mfspr r6, SPRN_DSCR - SAVE_GPR(6, r1) - - mfcr r7 - stw r7, _CCR(r1) + mflr r0 + std r0, PPC_LR_STKOFF(r1) + stdu r1, -PPC_MIN_STKFRM(r1) + /* save TAR so that it can be recovered later */ mfspr r8, SPRN_TAR - SAVE_GPR(8, r1) + std r8, PPC_MIN_STKFRM-8(r1) + li r5, 1 bl __kvmppc_restore_tm - REST_GPR(8, r1) + ld r8, PPC_MIN_STKFRM-8(r1) mtspr SPRN_TAR, r8 - ld r7, _CCR(r1) - mtcr r7 - - REST_GPR(6, r1) - mtspr SPRN_DSCR, r6 - - /* need preserve current MSR's MSR_TS bits */ - REST_GPR(5, r1) - mfmsr r6 - rldicl r6, r6, 64 - MSR_TS_S_LG, 62 - rldimi r5, r6, MSR_TS_S_LG, 63 - MSR_TS_T_LG - mtmsrd r5 - - REST_NVGPRS(r1) - addi r1, r1, SWITCH_FRAME_SIZE - ld r5, PPC_LR_STKOFF(r1) - mtlr r5 + addi r1, r1, PPC_MIN_STKFRM + ld r0, PPC_LR_STKOFF(r1) + mtlr r0 blr EXPORT_SYMBOL_GPL(_kvmppc_restore_tm_pr); diff --git a/arch/powerpc/kvm/trace_book3s.h b/arch/powerpc/kvm/trace_book3s.h index f3b23759e017..372a82fa2de3 100644 --- a/arch/powerpc/kvm/trace_book3s.h +++ b/arch/powerpc/kvm/trace_book3s.h @@ -14,7 +14,6 @@ {0x400, "INST_STORAGE"}, \ {0x480, "INST_SEGMENT"}, \ {0x500, "EXTERNAL"}, \ - {0x501, "EXTERNAL_LEVEL"}, \ {0x502, "EXTERNAL_HV"}, \ {0x600, "ALIGNMENT"}, \ {0x700, "PROGRAM"}, \ diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile index 670286808928..3bf9fc6fd36c 100644 --- a/arch/powerpc/lib/Makefile +++ b/arch/powerpc/lib/Makefile @@ -3,8 +3,6 @@ # Makefile for ppc-specific library files.. # -subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror - ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC) CFLAGS_REMOVE_code-patching.o = $(CC_FLAGS_FTRACE) @@ -14,6 +12,8 @@ obj-y += string.o alloc.o code-patching.o feature-fixups.o obj-$(CONFIG_PPC32) += div64.o copy_32.o crtsavres.o strlen_32.o +obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o + # See corresponding test in arch/powerpc/Makefile # 64-bit linker creates .sfpr on demand for final link (vmlinux), # so it is only needed for modules, and only for older linkers which diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c index 5ffee298745f..89502cbccb1b 100644 --- a/arch/powerpc/lib/code-patching.c +++ b/arch/powerpc/lib/code-patching.c @@ -98,8 +98,7 @@ static int map_patch_area(void *addr, unsigned long text_poke_addr) else pfn = __pa_symbol(addr) >> PAGE_SHIFT; - err = map_kernel_page(text_poke_addr, (pfn << PAGE_SHIFT), - pgprot_val(PAGE_KERNEL)); + err = map_kernel_page(text_poke_addr, (pfn << PAGE_SHIFT), PAGE_KERNEL); pr_devel("Mapped addr %lx with pfn %lx:%d\n", text_poke_addr, pfn, err); if (err) diff --git a/arch/powerpc/lib/error-inject.c b/arch/powerpc/lib/error-inject.c new file mode 100644 index 000000000000..407b992fb02f --- /dev/null +++ b/arch/powerpc/lib/error-inject.c @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include <linux/error-injection.h> +#include <linux/kprobes.h> +#include <linux/uaccess.h> + +void override_function_with_return(struct pt_regs *regs) +{ + /* + * Emulate 'blr'. 'regs' represents the state on entry of a predefined + * function in the kernel/module, captured on a kprobe. We don't need + * to worry about 32-bit userspace on a 64-bit kernel. + */ + regs->nip = regs->link; +} +NOKPROBE_SYMBOL(override_function_with_return); diff --git a/arch/powerpc/lib/mem_64.S b/arch/powerpc/lib/mem_64.S index ec531de99996..3c3be02f33b7 100644 --- a/arch/powerpc/lib/mem_64.S +++ b/arch/powerpc/lib/mem_64.S @@ -40,7 +40,7 @@ _GLOBAL(memset) .Lms: PPC_MTOCRF(1,r0) mr r6,r3 blt cr1,8f - beq+ 3f /* if already 8-byte aligned */ + beq 3f /* if already 8-byte aligned */ subf r5,r0,r5 bf 31,1f stb r4,0(r6) @@ -85,7 +85,7 @@ _GLOBAL(memset) addi r6,r6,8 8: cmpwi r5,0 PPC_MTOCRF(1,r5) - beqlr+ + beqlr bf 29,9f stw r4,0(r6) addi r6,r6,4 diff --git a/arch/powerpc/mm/8xx_mmu.c b/arch/powerpc/mm/8xx_mmu.c index cf77d755246d..36484a2ef915 100644 --- a/arch/powerpc/mm/8xx_mmu.c +++ b/arch/powerpc/mm/8xx_mmu.c @@ -67,7 +67,7 @@ void __init MMU_init_hw(void) /* PIN up to the 3 first 8Mb after IMMR in DTLB table */ #ifdef CONFIG_PIN_TLB_DATA unsigned long ctr = mfspr(SPRN_MD_CTR) & 0xfe000000; - unsigned long flags = 0xf0 | MD_SPS16K | _PAGE_PRIVILEGED | _PAGE_DIRTY; + unsigned long flags = 0xf0 | MD_SPS16K | _PAGE_SH | _PAGE_DIRTY; #ifdef CONFIG_PIN_TLB_IMMR int i = 29; #else @@ -91,11 +91,10 @@ static void __init mmu_mapin_immr(void) { unsigned long p = PHYS_IMMR_BASE; unsigned long v = VIRT_IMMR_BASE; - unsigned long f = pgprot_val(PAGE_KERNEL_NCG); int offset; for (offset = 0; offset < IMMR_SIZE; offset += PAGE_SIZE) - map_kernel_page(v + offset, p + offset, f); + map_kernel_page(v + offset, p + offset, PAGE_KERNEL_NCG); } /* Address of instructions to patch */ diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile index cdf6a9960046..ca96e7be4d0e 100644 --- a/arch/powerpc/mm/Makefile +++ b/arch/powerpc/mm/Makefile @@ -3,10 +3,10 @@ # Makefile for the linux ppc-specific parts of the memory manager. # -subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror - ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC) +CFLAGS_REMOVE_slb.o = $(CC_FLAGS_FTRACE) + obj-y := fault.o mem.o pgtable.o mmap.o \ init_$(BITS).o pgtable_$(BITS).o \ init-common.o mmu_context.o drmem.o @@ -15,7 +15,7 @@ obj-$(CONFIG_PPC_MMU_NOHASH) += mmu_context_nohash.o tlb_nohash.o \ obj-$(CONFIG_PPC_BOOK3E) += tlb_low_$(BITS)e.o hash64-$(CONFIG_PPC_NATIVE) := hash_native_64.o obj-$(CONFIG_PPC_BOOK3E_64) += pgtable-book3e.o -obj-$(CONFIG_PPC_BOOK3S_64) += pgtable-hash64.o hash_utils_64.o slb_low.o slb.o $(hash64-y) mmu_context_book3s64.o pgtable-book3s64.o +obj-$(CONFIG_PPC_BOOK3S_64) += pgtable-hash64.o hash_utils_64.o slb.o $(hash64-y) mmu_context_book3s64.o pgtable-book3s64.o obj-$(CONFIG_PPC_RADIX_MMU) += pgtable-radix.o tlb-radix.o obj-$(CONFIG_PPC_STD_MMU_32) += ppc_mmu_32.o hash_low_32.o mmu_context_hash32.o obj-$(CONFIG_PPC_STD_MMU) += tlb_hash$(BITS).o @@ -43,5 +43,12 @@ obj-$(CONFIG_HIGHMEM) += highmem.o obj-$(CONFIG_PPC_COPRO_BASE) += copro_fault.o obj-$(CONFIG_SPAPR_TCE_IOMMU) += mmu_context_iommu.o obj-$(CONFIG_PPC_PTDUMP) += dump_linuxpagetables.o +ifdef CONFIG_PPC_PTDUMP +obj-$(CONFIG_4xx) += dump_linuxpagetables-generic.o +obj-$(CONFIG_PPC_8xx) += dump_linuxpagetables-8xx.o +obj-$(CONFIG_PPC_BOOK3E_MMU) += dump_linuxpagetables-generic.o +obj-$(CONFIG_PPC_BOOK3S_32) += dump_linuxpagetables-generic.o +obj-$(CONFIG_PPC_BOOK3S_64) += dump_linuxpagetables-book3s64.o +endif obj-$(CONFIG_PPC_HTDUMP) += dump_hashpagetable.o obj-$(CONFIG_PPC_MEM_KEYS) += pkeys.o diff --git a/arch/powerpc/mm/dma-noncoherent.c b/arch/powerpc/mm/dma-noncoherent.c index 382528475433..b6e7b5952ab5 100644 --- a/arch/powerpc/mm/dma-noncoherent.c +++ b/arch/powerpc/mm/dma-noncoherent.c @@ -228,7 +228,7 @@ __dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t do { SetPageReserved(page); map_kernel_page(vaddr, page_to_phys(page), - pgprot_val(pgprot_noncached(PAGE_KERNEL))); + pgprot_noncached(PAGE_KERNEL)); page++; vaddr += PAGE_SIZE; } while (size -= PAGE_SIZE); diff --git a/arch/powerpc/mm/dump_linuxpagetables-8xx.c b/arch/powerpc/mm/dump_linuxpagetables-8xx.c new file mode 100644 index 000000000000..ab9e3f24db2f --- /dev/null +++ b/arch/powerpc/mm/dump_linuxpagetables-8xx.c @@ -0,0 +1,82 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * From split of dump_linuxpagetables.c + * Copyright 2016, Rashmica Gupta, IBM Corp. + * + */ +#include <linux/kernel.h> +#include <asm/pgtable.h> + +#include "dump_linuxpagetables.h" + +static const struct flag_info flag_array[] = { + { + .mask = _PAGE_SH, + .val = 0, + .set = "user", + .clear = " ", + }, { + .mask = _PAGE_RO | _PAGE_NA, + .val = 0, + .set = "rw", + }, { + .mask = _PAGE_RO | _PAGE_NA, + .val = _PAGE_RO, + .set = "r ", + }, { + .mask = _PAGE_RO | _PAGE_NA, + .val = _PAGE_NA, + .set = " ", + }, { + .mask = _PAGE_EXEC, + .val = _PAGE_EXEC, + .set = " X ", + .clear = " ", + }, { + .mask = _PAGE_PRESENT, + .val = _PAGE_PRESENT, + .set = "present", + .clear = " ", + }, { + .mask = _PAGE_GUARDED, + .val = _PAGE_GUARDED, + .set = "guarded", + .clear = " ", + }, { + .mask = _PAGE_DIRTY, + .val = _PAGE_DIRTY, + .set = "dirty", + .clear = " ", + }, { + .mask = _PAGE_ACCESSED, + .val = _PAGE_ACCESSED, + .set = "accessed", + .clear = " ", + }, { + .mask = _PAGE_NO_CACHE, + .val = _PAGE_NO_CACHE, + .set = "no cache", + .clear = " ", + }, { + .mask = _PAGE_SPECIAL, + .val = _PAGE_SPECIAL, + .set = "special", + } +}; + +struct pgtable_level pg_level[5] = { + { + }, { /* pgd */ + .flag = flag_array, + .num = ARRAY_SIZE(flag_array), + }, { /* pud */ + .flag = flag_array, + .num = ARRAY_SIZE(flag_array), + }, { /* pmd */ + .flag = flag_array, + .num = ARRAY_SIZE(flag_array), + }, { /* pte */ + .flag = flag_array, + .num = ARRAY_SIZE(flag_array), + }, +}; diff --git a/arch/powerpc/mm/dump_linuxpagetables-book3s64.c b/arch/powerpc/mm/dump_linuxpagetables-book3s64.c new file mode 100644 index 000000000000..ed6fcf78256e --- /dev/null +++ b/arch/powerpc/mm/dump_linuxpagetables-book3s64.c @@ -0,0 +1,120 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * From split of dump_linuxpagetables.c + * Copyright 2016, Rashmica Gupta, IBM Corp. + * + */ +#include <linux/kernel.h> +#include <asm/pgtable.h> + +#include "dump_linuxpagetables.h" + +static const struct flag_info flag_array[] = { + { + .mask = _PAGE_PRIVILEGED, + .val = 0, + .set = "user", + .clear = " ", + }, { + .mask = _PAGE_READ, + .val = _PAGE_READ, + .set = "r", + .clear = " ", + }, { + .mask = _PAGE_WRITE, + .val = _PAGE_WRITE, + .set = "w", + .clear = " ", + }, { + .mask = _PAGE_EXEC, + .val = _PAGE_EXEC, + .set = " X ", + .clear = " ", + }, { + .mask = _PAGE_PTE, + .val = _PAGE_PTE, + .set = "pte", + .clear = " ", + }, { + .mask = _PAGE_PRESENT, + .val = _PAGE_PRESENT, + .set = "valid", + .clear = " ", + }, { + .mask = _PAGE_PRESENT | _PAGE_INVALID, + .val = 0, + .set = " ", + .clear = "present", + }, { + .mask = H_PAGE_HASHPTE, + .val = H_PAGE_HASHPTE, + .set = "hpte", + .clear = " ", + }, { + .mask = _PAGE_DIRTY, + .val = _PAGE_DIRTY, + .set = "dirty", + .clear = " ", + }, { + .mask = _PAGE_ACCESSED, + .val = _PAGE_ACCESSED, + .set = "accessed", + .clear = " ", + }, { + .mask = _PAGE_NON_IDEMPOTENT, + .val = _PAGE_NON_IDEMPOTENT, + .set = "non-idempotent", + .clear = " ", + }, { + .mask = _PAGE_TOLERANT, + .val = _PAGE_TOLERANT, + .set = "tolerant", + .clear = " ", + }, { + .mask = H_PAGE_BUSY, + .val = H_PAGE_BUSY, + .set = "busy", + }, { +#ifdef CONFIG_PPC_64K_PAGES + .mask = H_PAGE_COMBO, + .val = H_PAGE_COMBO, + .set = "combo", + }, { + .mask = H_PAGE_4K_PFN, + .val = H_PAGE_4K_PFN, + .set = "4K_pfn", + }, { +#else /* CONFIG_PPC_64K_PAGES */ + .mask = H_PAGE_F_GIX, + .val = H_PAGE_F_GIX, + .set = "f_gix", + .is_val = true, + .shift = H_PAGE_F_GIX_SHIFT, + }, { + .mask = H_PAGE_F_SECOND, + .val = H_PAGE_F_SECOND, + .set = "f_second", + }, { +#endif /* CONFIG_PPC_64K_PAGES */ + .mask = _PAGE_SPECIAL, + .val = _PAGE_SPECIAL, + .set = "special", + } +}; + +struct pgtable_level pg_level[5] = { + { + }, { /* pgd */ + .flag = flag_array, + .num = ARRAY_SIZE(flag_array), + }, { /* pud */ + .flag = flag_array, + .num = ARRAY_SIZE(flag_array), + }, { /* pmd */ + .flag = flag_array, + .num = ARRAY_SIZE(flag_array), + }, { /* pte */ + .flag = flag_array, + .num = ARRAY_SIZE(flag_array), + }, +}; diff --git a/arch/powerpc/mm/dump_linuxpagetables-generic.c b/arch/powerpc/mm/dump_linuxpagetables-generic.c new file mode 100644 index 000000000000..1e3829ec1348 --- /dev/null +++ b/arch/powerpc/mm/dump_linuxpagetables-generic.c @@ -0,0 +1,82 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * From split of dump_linuxpagetables.c + * Copyright 2016, Rashmica Gupta, IBM Corp. + * + */ +#include <linux/kernel.h> +#include <asm/pgtable.h> + +#include "dump_linuxpagetables.h" + +static const struct flag_info flag_array[] = { + { + .mask = _PAGE_USER, + .val = _PAGE_USER, + .set = "user", + .clear = " ", + }, { + .mask = _PAGE_RW, + .val = _PAGE_RW, + .set = "rw", + .clear = "r ", + }, { +#ifndef CONFIG_PPC_BOOK3S_32 + .mask = _PAGE_EXEC, + .val = _PAGE_EXEC, + .set = " X ", + .clear = " ", + }, { +#endif + .mask = _PAGE_PRESENT, + .val = _PAGE_PRESENT, + .set = "present", + .clear = " ", + }, { + .mask = _PAGE_GUARDED, + .val = _PAGE_GUARDED, + .set = "guarded", + .clear = " ", + }, { + .mask = _PAGE_DIRTY, + .val = _PAGE_DIRTY, + .set = "dirty", + .clear = " ", + }, { + .mask = _PAGE_ACCESSED, + .val = _PAGE_ACCESSED, + .set = "accessed", + .clear = " ", + }, { + .mask = _PAGE_WRITETHRU, + .val = _PAGE_WRITETHRU, + .set = "write through", + .clear = " ", + }, { + .mask = _PAGE_NO_CACHE, + .val = _PAGE_NO_CACHE, + .set = "no cache", + .clear = " ", + }, { + .mask = _PAGE_SPECIAL, + .val = _PAGE_SPECIAL, + .set = "special", + } +}; + +struct pgtable_level pg_level[5] = { + { + }, { /* pgd */ + .flag = flag_array, + .num = ARRAY_SIZE(flag_array), + }, { /* pud */ + .flag = flag_array, + .num = ARRAY_SIZE(flag_array), + }, { /* pmd */ + .flag = flag_array, + .num = ARRAY_SIZE(flag_array), + }, { /* pte */ + .flag = flag_array, + .num = ARRAY_SIZE(flag_array), + }, +}; diff --git a/arch/powerpc/mm/dump_linuxpagetables.c b/arch/powerpc/mm/dump_linuxpagetables.c index 876e2a3c79f2..2b74f8adf4d0 100644 --- a/arch/powerpc/mm/dump_linuxpagetables.c +++ b/arch/powerpc/mm/dump_linuxpagetables.c @@ -27,6 +27,8 @@ #include <asm/page.h> #include <asm/pgalloc.h> +#include "dump_linuxpagetables.h" + #ifdef CONFIG_PPC32 #define KERN_VIRT_START 0 #endif @@ -101,159 +103,6 @@ static struct addr_marker address_markers[] = { { -1, NULL }, }; -struct flag_info { - u64 mask; - u64 val; - const char *set; - const char *clear; - bool is_val; - int shift; -}; - -static const struct flag_info flag_array[] = { - { - .mask = _PAGE_USER | _PAGE_PRIVILEGED, - .val = _PAGE_USER, - .set = "user", - .clear = " ", - }, { - .mask = _PAGE_RW | _PAGE_RO | _PAGE_NA, - .val = _PAGE_RW, - .set = "rw", - }, { - .mask = _PAGE_RW | _PAGE_RO | _PAGE_NA, - .val = _PAGE_RO, - .set = "ro", - }, { -#if _PAGE_NA != 0 - .mask = _PAGE_RW | _PAGE_RO | _PAGE_NA, - .val = _PAGE_RO, - .set = "na", - }, { -#endif - .mask = _PAGE_EXEC, - .val = _PAGE_EXEC, - .set = " X ", - .clear = " ", - }, { - .mask = _PAGE_PTE, - .val = _PAGE_PTE, - .set = "pte", - .clear = " ", - }, { - .mask = _PAGE_PRESENT, - .val = _PAGE_PRESENT, - .set = "present", - .clear = " ", - }, { -#ifdef CONFIG_PPC_BOOK3S_64 - .mask = H_PAGE_HASHPTE, - .val = H_PAGE_HASHPTE, -#else - .mask = _PAGE_HASHPTE, - .val = _PAGE_HASHPTE, -#endif - .set = "hpte", - .clear = " ", - }, { -#ifndef CONFIG_PPC_BOOK3S_64 - .mask = _PAGE_GUARDED, - .val = _PAGE_GUARDED, - .set = "guarded", - .clear = " ", - }, { -#endif - .mask = _PAGE_DIRTY, - .val = _PAGE_DIRTY, - .set = "dirty", - .clear = " ", - }, { - .mask = _PAGE_ACCESSED, - .val = _PAGE_ACCESSED, - .set = "accessed", - .clear = " ", - }, { -#ifndef CONFIG_PPC_BOOK3S_64 - .mask = _PAGE_WRITETHRU, - .val = _PAGE_WRITETHRU, - .set = "write through", - .clear = " ", - }, { -#endif -#ifndef CONFIG_PPC_BOOK3S_64 - .mask = _PAGE_NO_CACHE, - .val = _PAGE_NO_CACHE, - .set = "no cache", - .clear = " ", - }, { -#else - .mask = _PAGE_NON_IDEMPOTENT, - .val = _PAGE_NON_IDEMPOTENT, - .set = "non-idempotent", - .clear = " ", - }, { - .mask = _PAGE_TOLERANT, - .val = _PAGE_TOLERANT, - .set = "tolerant", - .clear = " ", - }, { -#endif -#ifdef CONFIG_PPC_BOOK3S_64 - .mask = H_PAGE_BUSY, - .val = H_PAGE_BUSY, - .set = "busy", - }, { -#ifdef CONFIG_PPC_64K_PAGES - .mask = H_PAGE_COMBO, - .val = H_PAGE_COMBO, - .set = "combo", - }, { - .mask = H_PAGE_4K_PFN, - .val = H_PAGE_4K_PFN, - .set = "4K_pfn", - }, { -#else /* CONFIG_PPC_64K_PAGES */ - .mask = H_PAGE_F_GIX, - .val = H_PAGE_F_GIX, - .set = "f_gix", - .is_val = true, - .shift = H_PAGE_F_GIX_SHIFT, - }, { - .mask = H_PAGE_F_SECOND, - .val = H_PAGE_F_SECOND, - .set = "f_second", - }, { -#endif /* CONFIG_PPC_64K_PAGES */ -#endif - .mask = _PAGE_SPECIAL, - .val = _PAGE_SPECIAL, - .set = "special", - } -}; - -struct pgtable_level { - const struct flag_info *flag; - size_t num; - u64 mask; -}; - -static struct pgtable_level pg_level[] = { - { - }, { /* pgd */ - .flag = flag_array, - .num = ARRAY_SIZE(flag_array), - }, { /* pud */ - .flag = flag_array, - .num = ARRAY_SIZE(flag_array), - }, { /* pmd */ - .flag = flag_array, - .num = ARRAY_SIZE(flag_array), - }, { /* pte */ - .flag = flag_array, - .num = ARRAY_SIZE(flag_array), - }, -}; - static void dump_flag_info(struct pg_state *st, const struct flag_info *flag, u64 pte, int num) { @@ -418,12 +267,13 @@ static void walk_pagetables(struct pg_state *st) unsigned int i; unsigned long addr; + addr = st->start_address; + /* * Traverse the linux pagetable structure and dump pages that are in * the hash pagetable. */ - for (i = 0; i < PTRS_PER_PGD; i++, pgd++) { - addr = KERN_VIRT_START + i * PGDIR_SIZE; + for (i = 0; i < PTRS_PER_PGD; i++, pgd++, addr += PGDIR_SIZE) { if (!pgd_none(*pgd) && !pgd_huge(*pgd)) /* pgd exists */ walk_pud(st, pgd, addr); @@ -472,9 +322,14 @@ static int ptdump_show(struct seq_file *m, void *v) { struct pg_state st = { .seq = m, - .start_address = KERN_VIRT_START, .marker = address_markers, }; + + if (radix_enabled()) + st.start_address = PAGE_OFFSET; + else + st.start_address = KERN_VIRT_START; + /* Traverse kernel page tables */ walk_pagetables(&st); note_page(&st, 0, 0, 0); diff --git a/arch/powerpc/mm/dump_linuxpagetables.h b/arch/powerpc/mm/dump_linuxpagetables.h new file mode 100644 index 000000000000..5d513636de73 --- /dev/null +++ b/arch/powerpc/mm/dump_linuxpagetables.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include <linux/types.h> + +struct flag_info { + u64 mask; + u64 val; + const char *set; + const char *clear; + bool is_val; + int shift; +}; + +struct pgtable_level { + const struct flag_info *flag; + size_t num; + u64 mask; +}; + +extern struct pgtable_level pg_level[5]; diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index d51cf5f4e45e..1697e903bbf2 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -103,8 +103,7 @@ static bool store_updates_sp(unsigned int inst) */ static int -__bad_area_nosemaphore(struct pt_regs *regs, unsigned long address, int si_code, - int pkey) +__bad_area_nosemaphore(struct pt_regs *regs, unsigned long address, int si_code) { /* * If we are in kernel mode, bail out with a SEGV, this will @@ -114,18 +113,17 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long address, int si_code, if (!user_mode(regs)) return SIGSEGV; - _exception_pkey(SIGSEGV, regs, si_code, address, pkey); + _exception(SIGSEGV, regs, si_code, address); return 0; } static noinline int bad_area_nosemaphore(struct pt_regs *regs, unsigned long address) { - return __bad_area_nosemaphore(regs, address, SEGV_MAPERR, 0); + return __bad_area_nosemaphore(regs, address, SEGV_MAPERR); } -static int __bad_area(struct pt_regs *regs, unsigned long address, int si_code, - int pkey) +static int __bad_area(struct pt_regs *regs, unsigned long address, int si_code) { struct mm_struct *mm = current->mm; @@ -135,54 +133,61 @@ static int __bad_area(struct pt_regs *regs, unsigned long address, int si_code, */ up_read(&mm->mmap_sem); - return __bad_area_nosemaphore(regs, address, si_code, pkey); + return __bad_area_nosemaphore(regs, address, si_code); } static noinline int bad_area(struct pt_regs *regs, unsigned long address) { - return __bad_area(regs, address, SEGV_MAPERR, 0); + return __bad_area(regs, address, SEGV_MAPERR); } static int bad_key_fault_exception(struct pt_regs *regs, unsigned long address, int pkey) { - return __bad_area_nosemaphore(regs, address, SEGV_PKUERR, pkey); + /* + * If we are in kernel mode, bail out with a SEGV, this will + * be caught by the assembly which will restore the non-volatile + * registers before calling bad_page_fault() + */ + if (!user_mode(regs)) + return SIGSEGV; + + _exception_pkey(regs, address, pkey); + + return 0; } static noinline int bad_access(struct pt_regs *regs, unsigned long address) { - return __bad_area(regs, address, SEGV_ACCERR, 0); + return __bad_area(regs, address, SEGV_ACCERR); } static int do_sigbus(struct pt_regs *regs, unsigned long address, vm_fault_t fault) { - siginfo_t info; - unsigned int lsb = 0; - if (!user_mode(regs)) return SIGBUS; current->thread.trap_nr = BUS_ADRERR; - clear_siginfo(&info); - info.si_signo = SIGBUS; - info.si_errno = 0; - info.si_code = BUS_ADRERR; - info.si_addr = (void __user *)address; #ifdef CONFIG_MEMORY_FAILURE if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) { + unsigned int lsb = 0; /* shutup gcc */ + pr_err("MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n", current->comm, current->pid, address); - info.si_code = BUS_MCEERR_AR; + + if (fault & VM_FAULT_HWPOISON_LARGE) + lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault)); + if (fault & VM_FAULT_HWPOISON) + lsb = PAGE_SHIFT; + + force_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb, + current); + return 0; } - if (fault & VM_FAULT_HWPOISON_LARGE) - lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault)); - if (fault & VM_FAULT_HWPOISON) - lsb = PAGE_SHIFT; #endif - info.si_addr_lsb = lsb; - force_sig_info(SIGBUS, &info, current); + force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address, current); return 0; } diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c index 729f02df8290..aaa28fd918fe 100644 --- a/arch/powerpc/mm/hash_native_64.c +++ b/arch/powerpc/mm/hash_native_64.c @@ -115,6 +115,8 @@ static void tlbiel_all_isa300(unsigned int num_sets, unsigned int is) tlbiel_hash_set_isa300(0, is, 0, 2, 1); asm volatile("ptesync": : :"memory"); + + asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory"); } void hash__tlbiel_all(unsigned int action) @@ -140,8 +142,6 @@ void hash__tlbiel_all(unsigned int action) tlbiel_all_isa206(POWER7_TLB_SETS, is); else WARN(1, "%s called on pre-POWER7 CPU\n", __func__); - - asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory"); } static inline unsigned long ___tlbie(unsigned long vpn, int psize, diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index f23a89d8e4ce..0cc7fbc3bd1c 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -1001,9 +1001,9 @@ void __init hash__early_init_mmu(void) * 4k use hugepd format, so for hash set then to * zero */ - __pmd_val_bits = 0; - __pud_val_bits = 0; - __pgd_val_bits = 0; + __pmd_val_bits = HASH_PMD_VAL_BITS; + __pud_val_bits = HASH_PUD_VAL_BITS; + __pgd_val_bits = HASH_PGD_VAL_BITS; __kernel_virt_start = H_KERN_VIRT_START; __kernel_virt_size = H_KERN_VIRT_SIZE; @@ -1125,7 +1125,7 @@ void demote_segment_4k(struct mm_struct *mm, unsigned long addr) if ((get_paca_psize(addr) != MMU_PAGE_4K) && (current->mm == mm)) { copy_mm_to_paca(mm); - slb_flush_and_rebolt(); + slb_flush_and_restore_bolted(); } } #endif /* CONFIG_PPC_64K_PAGES */ @@ -1197,7 +1197,7 @@ static void check_paca_psize(unsigned long ea, struct mm_struct *mm, if (user_region) { if (psize != get_paca_psize(ea)) { copy_mm_to_paca(mm); - slb_flush_and_rebolt(); + slb_flush_and_restore_bolted(); } } else if (get_paca()->vmalloc_sllp != mmu_psize_defs[mmu_vmalloc_psize].sllp) { @@ -1482,7 +1482,7 @@ static bool should_hash_preload(struct mm_struct *mm, unsigned long ea) #endif void hash_preload(struct mm_struct *mm, unsigned long ea, - unsigned long access, unsigned long trap) + bool is_exec, unsigned long trap) { int hugepage_shift; unsigned long vsid; @@ -1490,6 +1490,7 @@ void hash_preload(struct mm_struct *mm, unsigned long ea, pte_t *ptep; unsigned long flags; int rc, ssize, update_flags = 0; + unsigned long access = _PAGE_PRESENT | _PAGE_READ | (is_exec ? _PAGE_EXEC : 0); BUG_ON(REGION_ID(ea) != USER_REGION_ID); diff --git a/arch/powerpc/mm/hugepage-hash64.c b/arch/powerpc/mm/hugepage-hash64.c index 01f213d2bcb9..dfbc3b32f09b 100644 --- a/arch/powerpc/mm/hugepage-hash64.c +++ b/arch/powerpc/mm/hugepage-hash64.c @@ -51,6 +51,12 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid, new_pmd |= _PAGE_DIRTY; } while (!pmd_xchg(pmdp, __pmd(old_pmd), __pmd(new_pmd))); + /* + * Make sure this is thp or devmap entry + */ + if (!(old_pmd & (H_PAGE_THP_HUGE | _PAGE_DEVMAP))) + return 0; + rflags = htab_convert_pte_flags(new_pmd); #if 0 diff --git a/arch/powerpc/mm/hugetlbpage-hash64.c b/arch/powerpc/mm/hugetlbpage-hash64.c index b320f5097a06..2e6a8f9345d3 100644 --- a/arch/powerpc/mm/hugetlbpage-hash64.c +++ b/arch/powerpc/mm/hugetlbpage-hash64.c @@ -62,6 +62,10 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid, new_pte |= _PAGE_DIRTY; } while(!pte_xchg(ptep, __pte(old_pte), __pte(new_pte))); + /* Make sure this is a hugetlb entry */ + if (old_pte & (H_PAGE_THP_HUGE | _PAGE_DEVMAP)) + return 0; + rflags = htab_convert_pte_flags(new_pte); if (unlikely(mmu_psize == MMU_PAGE_16G)) offset = PTRS_PER_PUD; diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index e87f9ef9115b..a7226ed9cae6 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -19,6 +19,7 @@ #include <linux/moduleparam.h> #include <linux/swap.h> #include <linux/swapops.h> +#include <linux/kmemleak.h> #include <asm/pgtable.h> #include <asm/pgalloc.h> #include <asm/tlb.h> @@ -95,7 +96,7 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, break; else { #ifdef CONFIG_PPC_BOOK3S_64 - *hpdp = __hugepd(__pa(new) | + *hpdp = __hugepd(__pa(new) | HUGEPD_VAL_BITS | (shift_to_mmu_psize(pshift) << 2)); #elif defined(CONFIG_PPC_8xx) *hpdp = __hugepd(__pa(new) | _PMD_USER | @@ -112,6 +113,8 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, for (i = i - 1 ; i >= 0; i--, hpdp--) *hpdp = __hugepd(0); kmem_cache_free(cachep, new); + } else { + kmemleak_ignore(new); } spin_unlock(ptl); return 0; @@ -837,8 +840,12 @@ pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea, ret_pte = (pte_t *) pmdp; goto out; } - - if (pmd_huge(pmd)) { + /* + * pmd_large check below will handle the swap pmd pte + * we need to do both the check because they are config + * dependent. + */ + if (pmd_huge(pmd) || pmd_large(pmd)) { ret_pte = (pte_t *) pmdp; goto out; } else if (is_hugepd(__hugepd(pmd_val(pmd)))) diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 04ccb274a620..dd949d6649a2 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -309,11 +309,11 @@ void __init paging_init(void) unsigned long end = __fix_to_virt(FIX_HOLE); for (; v < end; v += PAGE_SIZE) - map_kernel_page(v, 0, 0); /* XXX gross */ + map_kernel_page(v, 0, __pgprot(0)); /* XXX gross */ #endif #ifdef CONFIG_HIGHMEM - map_kernel_page(PKMAP_BASE, 0, 0); /* XXX gross */ + map_kernel_page(PKMAP_BASE, 0, __pgprot(0)); /* XXX gross */ pkmap_page_table = virt_to_kpte(PKMAP_BASE); kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN)); @@ -509,7 +509,8 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, * We don't need to worry about _PAGE_PRESENT here because we are * called with either mm->page_table_lock held or ptl lock held */ - unsigned long access, trap; + unsigned long trap; + bool is_exec; if (radix_enabled()) { prefetch((void *)address); @@ -531,16 +532,16 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, trap = current->thread.regs ? TRAP(current->thread.regs) : 0UL; switch (trap) { case 0x300: - access = 0UL; + is_exec = false; break; case 0x400: - access = _PAGE_EXEC; + is_exec = true; break; default: return; } - hash_preload(vma->vm_mm, address, access, trap); + hash_preload(vma->vm_mm, address, is_exec, trap); #endif /* CONFIG_PPC_STD_MMU */ #if (defined(CONFIG_PPC_BOOK3E_64) || defined(CONFIG_PPC_FSL_BOOK3E)) \ && defined(CONFIG_HUGETLB_PAGE) diff --git a/arch/powerpc/mm/mmu_context_book3s64.c b/arch/powerpc/mm/mmu_context_book3s64.c index dbd8f762140b..510f103d7813 100644 --- a/arch/powerpc/mm/mmu_context_book3s64.c +++ b/arch/powerpc/mm/mmu_context_book3s64.c @@ -53,6 +53,8 @@ int hash__alloc_context_id(void) } EXPORT_SYMBOL_GPL(hash__alloc_context_id); +void slb_setup_new_exec(void); + static int hash__init_new_context(struct mm_struct *mm) { int index; @@ -84,6 +86,13 @@ static int hash__init_new_context(struct mm_struct *mm) return index; } +void hash__setup_new_exec(void) +{ + slice_setup_new_exec(); + + slb_setup_new_exec(); +} + static int radix__init_new_context(struct mm_struct *mm) { unsigned long rts_field; diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h index e5d779eed181..8574fbbc45e0 100644 --- a/arch/powerpc/mm/mmu_decl.h +++ b/arch/powerpc/mm/mmu_decl.h @@ -22,6 +22,7 @@ #include <asm/mmu.h> #ifdef CONFIG_PPC_MMU_NOHASH +#include <asm/trace.h> /* * On 40x and 8xx, we directly inline tlbia and tlbivax @@ -30,10 +31,12 @@ static inline void _tlbil_all(void) { asm volatile ("sync; tlbia; isync" : : : "memory"); + trace_tlbia(MMU_NO_CONTEXT); } static inline void _tlbil_pid(unsigned int pid) { asm volatile ("sync; tlbia; isync" : : : "memory"); + trace_tlbia(pid); } #define _tlbil_pid_noind(pid) _tlbil_pid(pid) @@ -55,6 +58,7 @@ static inline void _tlbil_va(unsigned long address, unsigned int pid, unsigned int tsize, unsigned int ind) { asm volatile ("tlbie %0; sync" : : "r" (address) : "memory"); + trace_tlbie(0, 0, address, pid, 0, 0, 0); } #elif defined(CONFIG_PPC_BOOK3E) extern void _tlbil_va(unsigned long address, unsigned int pid, @@ -82,7 +86,7 @@ static inline void _tlbivax_bcast(unsigned long address, unsigned int pid, #else /* CONFIG_PPC_MMU_NOHASH */ extern void hash_preload(struct mm_struct *mm, unsigned long ea, - unsigned long access, unsigned long trap); + bool is_exec, unsigned long trap); extern void _tlbie(unsigned long address); diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 055b211b7126..693ae1c1acba 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -1521,6 +1521,10 @@ int start_topology_update(void) } } + pr_info("Starting topology update%s%s\n", + (prrn_enabled ? " prrn_enabled" : ""), + (vphn_enabled ? " vphn_enabled" : "")); + return rc; } @@ -1542,6 +1546,8 @@ int stop_topology_update(void) rc = del_timer_sync(&topology_timer); } + pr_info("Stopping topology update\n"); + return rc; } diff --git a/arch/powerpc/mm/pgtable-book3e.c b/arch/powerpc/mm/pgtable-book3e.c index a2298930f990..e0ccf36714b2 100644 --- a/arch/powerpc/mm/pgtable-book3e.c +++ b/arch/powerpc/mm/pgtable-book3e.c @@ -42,7 +42,7 @@ int __meminit vmemmap_create_mapping(unsigned long start, * thus must have the low bits clear */ for (i = 0; i < page_size; i += PAGE_SIZE) - BUG_ON(map_kernel_page(start + i, phys, flags)); + BUG_ON(map_kernel_page(start + i, phys, __pgprot(flags))); return 0; } @@ -70,7 +70,7 @@ static __ref void *early_alloc_pgtable(unsigned long size) * map_kernel_page adds an entry to the ioremap page table * and adds an entry to the HPT, possibly bolting it */ -int map_kernel_page(unsigned long ea, unsigned long pa, unsigned long flags) +int map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot) { pgd_t *pgdp; pud_t *pudp; @@ -89,8 +89,6 @@ int map_kernel_page(unsigned long ea, unsigned long pa, unsigned long flags) ptep = pte_alloc_kernel(pmdp, ea); if (!ptep) return -ENOMEM; - set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, - __pgprot(flags))); } else { pgdp = pgd_offset_k(ea); #ifndef __PAGETABLE_PUD_FOLDED @@ -113,9 +111,8 @@ int map_kernel_page(unsigned long ea, unsigned long pa, unsigned long flags) pmd_populate_kernel(&init_mm, pmdp, ptep); } ptep = pte_offset_kernel(pmdp, ea); - set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, - __pgprot(flags))); } + set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, prot)); smp_wmb(); return 0; diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c index 01d7c0f7c4f0..9f93c9f985c5 100644 --- a/arch/powerpc/mm/pgtable-book3s64.c +++ b/arch/powerpc/mm/pgtable-book3s64.c @@ -69,9 +69,14 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t pmd) { #ifdef CONFIG_DEBUG_VM - WARN_ON(pte_present(pmd_pte(*pmdp)) && !pte_protnone(pmd_pte(*pmdp))); + /* + * Make sure hardware valid bit is not set. We don't do + * tlb flush for this update. + */ + + WARN_ON(pte_hw_valid(pmd_pte(*pmdp)) && !pte_protnone(pmd_pte(*pmdp))); assert_spin_locked(pmd_lockptr(mm, pmdp)); - WARN_ON(!(pmd_trans_huge(pmd) || pmd_devmap(pmd))); + WARN_ON(!(pmd_large(pmd) || pmd_devmap(pmd))); #endif trace_hugepage_set_pmd(addr, pmd_val(pmd)); return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd)); @@ -106,7 +111,7 @@ pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, { unsigned long old_pmd; - old_pmd = pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, 0); + old_pmd = pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, _PAGE_INVALID); flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); /* * This ensures that generic code that rely on IRQ disabling diff --git a/arch/powerpc/mm/pgtable-hash64.c b/arch/powerpc/mm/pgtable-hash64.c index 692bfc9e372c..c08d49046a96 100644 --- a/arch/powerpc/mm/pgtable-hash64.c +++ b/arch/powerpc/mm/pgtable-hash64.c @@ -142,7 +142,7 @@ void hash__vmemmap_remove_mapping(unsigned long start, * map_kernel_page adds an entry to the ioremap page table * and adds an entry to the HPT, possibly bolting it */ -int hash__map_kernel_page(unsigned long ea, unsigned long pa, unsigned long flags) +int hash__map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot) { pgd_t *pgdp; pud_t *pudp; @@ -161,8 +161,7 @@ int hash__map_kernel_page(unsigned long ea, unsigned long pa, unsigned long flag ptep = pte_alloc_kernel(pmdp, ea); if (!ptep) return -ENOMEM; - set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, - __pgprot(flags))); + set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, prot)); } else { /* * If the mm subsystem is not fully up, we cannot create a @@ -170,7 +169,7 @@ int hash__map_kernel_page(unsigned long ea, unsigned long pa, unsigned long flag * entry in the hardware page table. * */ - if (htab_bolt_mapping(ea, ea + PAGE_SIZE, pa, flags, + if (htab_bolt_mapping(ea, ea + PAGE_SIZE, pa, pgprot_val(prot), mmu_io_psize, mmu_kernel_ssize)) { printk(KERN_ERR "Failed to do bolted mapping IO " "memory at %016lx !\n", pa); diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c index c879979faa73..931156069a81 100644 --- a/arch/powerpc/mm/pgtable-radix.c +++ b/arch/powerpc/mm/pgtable-radix.c @@ -241,9 +241,8 @@ void radix__mark_initmem_nx(void) } #endif /* CONFIG_STRICT_KERNEL_RWX */ -static inline void __meminit print_mapping(unsigned long start, - unsigned long end, - unsigned long size) +static inline void __meminit +print_mapping(unsigned long start, unsigned long end, unsigned long size, bool exec) { char buf[10]; @@ -252,7 +251,17 @@ static inline void __meminit print_mapping(unsigned long start, string_get_size(size, 1, STRING_UNITS_2, buf, sizeof(buf)); - pr_info("Mapped 0x%016lx-0x%016lx with %s pages\n", start, end, buf); + pr_info("Mapped 0x%016lx-0x%016lx with %s pages%s\n", start, end, buf, + exec ? " (exec)" : ""); +} + +static unsigned long next_boundary(unsigned long addr, unsigned long end) +{ +#ifdef CONFIG_STRICT_KERNEL_RWX + if (addr < __pa_symbol(__init_begin)) + return __pa_symbol(__init_begin); +#endif + return end; } static int __meminit create_physical_mapping(unsigned long start, @@ -260,13 +269,8 @@ static int __meminit create_physical_mapping(unsigned long start, int nid) { unsigned long vaddr, addr, mapping_size = 0; + bool prev_exec, exec = false; pgprot_t prot; - unsigned long max_mapping_size; -#ifdef CONFIG_STRICT_KERNEL_RWX - int split_text_mapping = 1; -#else - int split_text_mapping = 0; -#endif int psize; start = _ALIGN_UP(start, PAGE_SIZE); @@ -274,14 +278,12 @@ static int __meminit create_physical_mapping(unsigned long start, unsigned long gap, previous_size; int rc; - gap = end - addr; + gap = next_boundary(addr, end) - addr; previous_size = mapping_size; - max_mapping_size = PUD_SIZE; + prev_exec = exec; -retry: if (IS_ALIGNED(addr, PUD_SIZE) && gap >= PUD_SIZE && - mmu_psize_defs[MMU_PAGE_1G].shift && - PUD_SIZE <= max_mapping_size) { + mmu_psize_defs[MMU_PAGE_1G].shift) { mapping_size = PUD_SIZE; psize = MMU_PAGE_1G; } else if (IS_ALIGNED(addr, PMD_SIZE) && gap >= PMD_SIZE && @@ -293,32 +295,21 @@ retry: psize = mmu_virtual_psize; } - if (split_text_mapping && (mapping_size == PUD_SIZE) && - (addr <= __pa_symbol(__init_begin)) && - (addr + mapping_size) >= __pa_symbol(_stext)) { - max_mapping_size = PMD_SIZE; - goto retry; - } - - if (split_text_mapping && (mapping_size == PMD_SIZE) && - (addr <= __pa_symbol(__init_begin)) && - (addr + mapping_size) >= __pa_symbol(_stext)) { - mapping_size = PAGE_SIZE; - psize = mmu_virtual_psize; - } - - if (mapping_size != previous_size) { - print_mapping(start, addr, previous_size); - start = addr; - } - vaddr = (unsigned long)__va(addr); if (overlaps_kernel_text(vaddr, vaddr + mapping_size) || - overlaps_interrupt_vector_text(vaddr, vaddr + mapping_size)) + overlaps_interrupt_vector_text(vaddr, vaddr + mapping_size)) { prot = PAGE_KERNEL_X; - else + exec = true; + } else { prot = PAGE_KERNEL; + exec = false; + } + + if (mapping_size != previous_size || exec != prev_exec) { + print_mapping(start, addr, previous_size, prev_exec); + start = addr; + } rc = __map_kernel_page(vaddr, addr, prot, mapping_size, nid, start, end); if (rc) @@ -327,7 +318,7 @@ retry: update_page_count(psize, 1); } - print_mapping(start, addr, mapping_size); + print_mapping(start, addr, mapping_size, exec); return 0; } diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c index d71c7777669c..010e1c616cb2 100644 --- a/arch/powerpc/mm/pgtable.c +++ b/arch/powerpc/mm/pgtable.c @@ -44,20 +44,13 @@ static inline int is_exec_fault(void) static inline int pte_looks_normal(pte_t pte) { -#if defined(CONFIG_PPC_BOOK3S_64) - if ((pte_val(pte) & (_PAGE_PRESENT | _PAGE_SPECIAL)) == _PAGE_PRESENT) { + if (pte_present(pte) && !pte_special(pte)) { if (pte_ci(pte)) return 0; if (pte_user(pte)) return 1; } return 0; -#else - return (pte_val(pte) & - (_PAGE_PRESENT | _PAGE_SPECIAL | _PAGE_NO_CACHE | _PAGE_USER | - _PAGE_PRIVILEGED)) == - (_PAGE_PRESENT | _PAGE_USER); -#endif } static struct page *maybe_pte_to_page(pte_t pte) @@ -73,7 +66,7 @@ static struct page *maybe_pte_to_page(pte_t pte) return page; } -#if defined(CONFIG_PPC_STD_MMU) || _PAGE_EXEC == 0 +#ifdef CONFIG_PPC_BOOK3S /* Server-style MMU handles coherency when hashing if HW exec permission * is supposed per page (currently 64-bit only). If not, then, we always @@ -106,7 +99,7 @@ static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma, return pte; } -#else /* defined(CONFIG_PPC_STD_MMU) || _PAGE_EXEC == 0 */ +#else /* CONFIG_PPC_BOOK3S */ /* Embedded type MMU with HW exec support. This is a bit more complicated * as we don't have two bits to spare for _PAGE_EXEC and _PAGE_HWEXEC so @@ -117,7 +110,7 @@ static pte_t set_pte_filter(pte_t pte) struct page *pg; /* No exec permission in the first place, move on */ - if (!(pte_val(pte) & _PAGE_EXEC) || !pte_looks_normal(pte)) + if (!pte_exec(pte) || !pte_looks_normal(pte)) return pte; /* If you set _PAGE_EXEC on weird pages you're on your own */ @@ -137,7 +130,7 @@ static pte_t set_pte_filter(pte_t pte) } /* Else, we filter out _PAGE_EXEC */ - return __pte(pte_val(pte) & ~_PAGE_EXEC); + return pte_exprotect(pte); } static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma, @@ -150,7 +143,7 @@ static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma, * if necessary. Also if _PAGE_EXEC is already set, same deal, * we just bail out */ - if (dirty || (pte_val(pte) & _PAGE_EXEC) || !is_exec_fault()) + if (dirty || pte_exec(pte) || !is_exec_fault()) return pte; #ifdef CONFIG_DEBUG_VM @@ -176,10 +169,10 @@ static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma, set_bit(PG_arch_1, &pg->flags); bail: - return __pte(pte_val(pte) | _PAGE_EXEC); + return pte_mkexec(pte); } -#endif /* !(defined(CONFIG_PPC_STD_MMU) || _PAGE_EXEC == 0) */ +#endif /* CONFIG_PPC_BOOK3S */ /* * set_pte stores a linux PTE into the linux page table. @@ -188,14 +181,13 @@ void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte) { /* - * When handling numa faults, we already have the pte marked - * _PAGE_PRESENT, but we can be sure that it is not in hpte. - * Hence we can use set_pte_at for them. + * Make sure hardware valid bit is not set. We don't do + * tlb flush for this update. */ - VM_WARN_ON(pte_present(*ptep) && !pte_protnone(*ptep)); + VM_WARN_ON(pte_hw_valid(*ptep) && !pte_protnone(*ptep)); /* Add the pte bit when trying to set a pte */ - pte = __pte(pte_val(pte) | _PAGE_PTE); + pte = pte_mkpte(pte); /* Note: mm->context.id might not yet have been assigned as * this context might not have been activated yet when this diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c index 120a49bfb9c6..5877f5aa8f5d 100644 --- a/arch/powerpc/mm/pgtable_32.c +++ b/arch/powerpc/mm/pgtable_32.c @@ -76,56 +76,69 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) void __iomem * ioremap(phys_addr_t addr, unsigned long size) { - return __ioremap_caller(addr, size, _PAGE_NO_CACHE | _PAGE_GUARDED, - __builtin_return_address(0)); + pgprot_t prot = pgprot_noncached(PAGE_KERNEL); + + return __ioremap_caller(addr, size, prot, __builtin_return_address(0)); } EXPORT_SYMBOL(ioremap); void __iomem * ioremap_wc(phys_addr_t addr, unsigned long size) { - return __ioremap_caller(addr, size, _PAGE_NO_CACHE, - __builtin_return_address(0)); + pgprot_t prot = pgprot_noncached_wc(PAGE_KERNEL); + + return __ioremap_caller(addr, size, prot, __builtin_return_address(0)); } EXPORT_SYMBOL(ioremap_wc); void __iomem * +ioremap_wt(phys_addr_t addr, unsigned long size) +{ + pgprot_t prot = pgprot_cached_wthru(PAGE_KERNEL); + + return __ioremap_caller(addr, size, prot, __builtin_return_address(0)); +} +EXPORT_SYMBOL(ioremap_wt); + +void __iomem * +ioremap_coherent(phys_addr_t addr, unsigned long size) +{ + pgprot_t prot = pgprot_cached(PAGE_KERNEL); + + return __ioremap_caller(addr, size, prot, __builtin_return_address(0)); +} +EXPORT_SYMBOL(ioremap_coherent); + +void __iomem * ioremap_prot(phys_addr_t addr, unsigned long size, unsigned long flags) { + pte_t pte = __pte(flags); + /* writeable implies dirty for kernel addresses */ - if ((flags & (_PAGE_RW | _PAGE_RO)) != _PAGE_RO) - flags |= _PAGE_DIRTY | _PAGE_HWWRITE; + if (pte_write(pte)) + pte = pte_mkdirty(pte); /* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */ - flags &= ~(_PAGE_USER | _PAGE_EXEC); - flags |= _PAGE_PRIVILEGED; + pte = pte_exprotect(pte); + pte = pte_mkprivileged(pte); - return __ioremap_caller(addr, size, flags, __builtin_return_address(0)); + return __ioremap_caller(addr, size, pte_pgprot(pte), __builtin_return_address(0)); } EXPORT_SYMBOL(ioremap_prot); void __iomem * __ioremap(phys_addr_t addr, unsigned long size, unsigned long flags) { - return __ioremap_caller(addr, size, flags, __builtin_return_address(0)); + return __ioremap_caller(addr, size, __pgprot(flags), __builtin_return_address(0)); } void __iomem * -__ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags, - void *caller) +__ioremap_caller(phys_addr_t addr, unsigned long size, pgprot_t prot, void *caller) { unsigned long v, i; phys_addr_t p; int err; - /* Make sure we have the base flags */ - if ((flags & _PAGE_PRESENT) == 0) - flags |= pgprot_val(PAGE_KERNEL); - - /* Non-cacheable page cannot be coherent */ - if (flags & _PAGE_NO_CACHE) - flags &= ~_PAGE_COHERENT; - /* * Choose an address to map it to. * Once the vmalloc system is running, we use it. @@ -183,7 +196,7 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags, err = 0; for (i = 0; i < size && err == 0; i += PAGE_SIZE) - err = map_kernel_page(v+i, p+i, flags); + err = map_kernel_page(v + i, p + i, prot); if (err) { if (slab_is_available()) vunmap((void *)v); @@ -209,7 +222,7 @@ void iounmap(volatile void __iomem *addr) } EXPORT_SYMBOL(iounmap); -int map_kernel_page(unsigned long va, phys_addr_t pa, int flags) +int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot) { pmd_t *pd; pte_t *pg; @@ -224,10 +237,8 @@ int map_kernel_page(unsigned long va, phys_addr_t pa, int flags) /* The PTE should never be already set nor present in the * hash table */ - BUG_ON((pte_val(*pg) & (_PAGE_PRESENT | _PAGE_HASHPTE)) && - flags); - set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, - __pgprot(flags))); + BUG_ON((pte_present(*pg) | pte_hashpte(*pg)) && pgprot_val(prot)); + set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, prot)); } smp_wmb(); return err; @@ -238,7 +249,7 @@ int map_kernel_page(unsigned long va, phys_addr_t pa, int flags) */ static void __init __mapin_ram_chunk(unsigned long offset, unsigned long top) { - unsigned long v, s, f; + unsigned long v, s; phys_addr_t p; int ktext; @@ -248,11 +259,10 @@ static void __init __mapin_ram_chunk(unsigned long offset, unsigned long top) for (; s < top; s += PAGE_SIZE) { ktext = ((char *)v >= _stext && (char *)v < etext) || ((char *)v >= _sinittext && (char *)v < _einittext); - f = ktext ? pgprot_val(PAGE_KERNEL_TEXT) : pgprot_val(PAGE_KERNEL); - map_kernel_page(v, p, f); + map_kernel_page(v, p, ktext ? PAGE_KERNEL_TEXT : PAGE_KERNEL); #ifdef CONFIG_PPC_STD_MMU_32 if (ktext) - hash_preload(&init_mm, v, 0, 0x300); + hash_preload(&init_mm, v, false, 0x300); #endif v += PAGE_SIZE; p += PAGE_SIZE; diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c index 53e9eeecd5d4..fb1375c07e8c 100644 --- a/arch/powerpc/mm/pgtable_64.c +++ b/arch/powerpc/mm/pgtable_64.c @@ -113,17 +113,12 @@ unsigned long ioremap_bot = IOREMAP_BASE; * __ioremap_at - Low level function to establish the page tables * for an IO mapping */ -void __iomem * __ioremap_at(phys_addr_t pa, void *ea, unsigned long size, - unsigned long flags) +void __iomem *__ioremap_at(phys_addr_t pa, void *ea, unsigned long size, pgprot_t prot) { unsigned long i; - /* Make sure we have the base flags */ - if ((flags & _PAGE_PRESENT) == 0) - flags |= pgprot_val(PAGE_KERNEL); - /* We don't support the 4K PFN hack with ioremap */ - if (flags & H_PAGE_4K_PFN) + if (pgprot_val(prot) & H_PAGE_4K_PFN) return NULL; WARN_ON(pa & ~PAGE_MASK); @@ -131,7 +126,7 @@ void __iomem * __ioremap_at(phys_addr_t pa, void *ea, unsigned long size, WARN_ON(size & ~PAGE_MASK); for (i = 0; i < size; i += PAGE_SIZE) - if (map_kernel_page((unsigned long)ea+i, pa+i, flags)) + if (map_kernel_page((unsigned long)ea + i, pa + i, prot)) return NULL; return (void __iomem *)ea; @@ -152,7 +147,7 @@ void __iounmap_at(void *ea, unsigned long size) } void __iomem * __ioremap_caller(phys_addr_t addr, unsigned long size, - unsigned long flags, void *caller) + pgprot_t prot, void *caller) { phys_addr_t paligned; void __iomem *ret; @@ -182,11 +177,11 @@ void __iomem * __ioremap_caller(phys_addr_t addr, unsigned long size, return NULL; area->phys_addr = paligned; - ret = __ioremap_at(paligned, area->addr, size, flags); + ret = __ioremap_at(paligned, area->addr, size, prot); if (!ret) vunmap(area->addr); } else { - ret = __ioremap_at(paligned, (void *)ioremap_bot, size, flags); + ret = __ioremap_at(paligned, (void *)ioremap_bot, size, prot); if (ret) ioremap_bot += size; } @@ -199,49 +194,59 @@ void __iomem * __ioremap_caller(phys_addr_t addr, unsigned long size, void __iomem * __ioremap(phys_addr_t addr, unsigned long size, unsigned long flags) { - return __ioremap_caller(addr, size, flags, __builtin_return_address(0)); + return __ioremap_caller(addr, size, __pgprot(flags), __builtin_return_address(0)); } void __iomem * ioremap(phys_addr_t addr, unsigned long size) { - unsigned long flags = pgprot_val(pgprot_noncached(__pgprot(0))); + pgprot_t prot = pgprot_noncached(PAGE_KERNEL); void *caller = __builtin_return_address(0); if (ppc_md.ioremap) - return ppc_md.ioremap(addr, size, flags, caller); - return __ioremap_caller(addr, size, flags, caller); + return ppc_md.ioremap(addr, size, prot, caller); + return __ioremap_caller(addr, size, prot, caller); } void __iomem * ioremap_wc(phys_addr_t addr, unsigned long size) { - unsigned long flags = pgprot_val(pgprot_noncached_wc(__pgprot(0))); + pgprot_t prot = pgprot_noncached_wc(PAGE_KERNEL); + void *caller = __builtin_return_address(0); + + if (ppc_md.ioremap) + return ppc_md.ioremap(addr, size, prot, caller); + return __ioremap_caller(addr, size, prot, caller); +} + +void __iomem *ioremap_coherent(phys_addr_t addr, unsigned long size) +{ + pgprot_t prot = pgprot_cached(PAGE_KERNEL); void *caller = __builtin_return_address(0); if (ppc_md.ioremap) - return ppc_md.ioremap(addr, size, flags, caller); - return __ioremap_caller(addr, size, flags, caller); + return ppc_md.ioremap(addr, size, prot, caller); + return __ioremap_caller(addr, size, prot, caller); } void __iomem * ioremap_prot(phys_addr_t addr, unsigned long size, unsigned long flags) { + pte_t pte = __pte(flags); void *caller = __builtin_return_address(0); /* writeable implies dirty for kernel addresses */ - if (flags & _PAGE_WRITE) - flags |= _PAGE_DIRTY; + if (pte_write(pte)) + pte = pte_mkdirty(pte); /* we don't want to let _PAGE_EXEC leak out */ - flags &= ~_PAGE_EXEC; + pte = pte_exprotect(pte); /* * Force kernel mapping. */ - flags &= ~_PAGE_USER; - flags |= _PAGE_PRIVILEGED; + pte = pte_mkprivileged(pte); if (ppc_md.ioremap) - return ppc_md.ioremap(addr, size, flags, caller); - return __ioremap_caller(addr, size, flags, caller); + return ppc_md.ioremap(addr, size, pte_pgprot(pte), caller); + return __ioremap_caller(addr, size, pte_pgprot(pte), caller); } @@ -306,7 +311,7 @@ struct page *pud_page(pud_t pud) */ struct page *pmd_page(pmd_t pmd) { - if (pmd_trans_huge(pmd) || pmd_huge(pmd) || pmd_devmap(pmd)) + if (pmd_large(pmd) || pmd_huge(pmd) || pmd_devmap(pmd)) return pte_page(pmd_pte(pmd)); return virt_to_page(pmd_page_vaddr(pmd)); } diff --git a/arch/powerpc/mm/ppc_mmu_32.c b/arch/powerpc/mm/ppc_mmu_32.c index bea6c544e38f..38a793bfca37 100644 --- a/arch/powerpc/mm/ppc_mmu_32.c +++ b/arch/powerpc/mm/ppc_mmu_32.c @@ -163,7 +163,7 @@ void __init setbat(int index, unsigned long virt, phys_addr_t phys, * Preload a translation in the hash table */ void hash_preload(struct mm_struct *mm, unsigned long ea, - unsigned long access, unsigned long trap) + bool is_exec, unsigned long trap) { pmd_t *pmd; diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c index 9f574e59d178..c3fdf2969d9f 100644 --- a/arch/powerpc/mm/slb.c +++ b/arch/powerpc/mm/slb.c @@ -14,6 +14,7 @@ * 2 of the License, or (at your option) any later version. */ +#include <asm/asm-prototypes.h> #include <asm/pgtable.h> #include <asm/mmu.h> #include <asm/mmu_context.h> @@ -30,11 +31,10 @@ enum slb_index { LINEAR_INDEX = 0, /* Kernel linear map (0xc000000000000000) */ - VMALLOC_INDEX = 1, /* Kernel virtual map (0xd000000000000000) */ - KSTACK_INDEX = 2, /* Kernel stack map */ + KSTACK_INDEX = 1, /* Kernel stack map */ }; -extern void slb_allocate(unsigned long ea); +static long slb_allocate_user(struct mm_struct *mm, unsigned long ea); #define slb_esid_mask(ssize) \ (((ssize) == MMU_SEGSIZE_256M)? ESID_MASK: ESID_MASK_1T) @@ -45,13 +45,43 @@ static inline unsigned long mk_esid_data(unsigned long ea, int ssize, return (ea & slb_esid_mask(ssize)) | SLB_ESID_V | index; } -static inline unsigned long mk_vsid_data(unsigned long ea, int ssize, +static inline unsigned long __mk_vsid_data(unsigned long vsid, int ssize, unsigned long flags) { - return (get_kernel_vsid(ea, ssize) << slb_vsid_shift(ssize)) | flags | + return (vsid << slb_vsid_shift(ssize)) | flags | ((unsigned long) ssize << SLB_VSID_SSIZE_SHIFT); } +static inline unsigned long mk_vsid_data(unsigned long ea, int ssize, + unsigned long flags) +{ + return __mk_vsid_data(get_kernel_vsid(ea, ssize), ssize, flags); +} + +static void assert_slb_exists(unsigned long ea) +{ +#ifdef CONFIG_DEBUG_VM + unsigned long tmp; + + WARN_ON_ONCE(mfmsr() & MSR_EE); + + asm volatile("slbfee. %0, %1" : "=r"(tmp) : "r"(ea) : "cr0"); + WARN_ON(tmp == 0); +#endif +} + +static void assert_slb_notexists(unsigned long ea) +{ +#ifdef CONFIG_DEBUG_VM + unsigned long tmp; + + WARN_ON_ONCE(mfmsr() & MSR_EE); + + asm volatile("slbfee. %0, %1" : "=r"(tmp) : "r"(ea) : "cr0"); + WARN_ON(tmp != 0); +#endif +} + static inline void slb_shadow_update(unsigned long ea, int ssize, unsigned long flags, enum slb_index index) @@ -84,6 +114,7 @@ static inline void create_shadowed_slbe(unsigned long ea, int ssize, */ slb_shadow_update(ea, ssize, flags, index); + assert_slb_notexists(ea); asm volatile("slbmte %0,%1" : : "r" (mk_vsid_data(ea, ssize, flags)), "r" (mk_esid_data(ea, ssize, index)) @@ -105,17 +136,20 @@ void __slb_restore_bolted_realmode(void) : "r" (be64_to_cpu(p->save_area[index].vsid)), "r" (be64_to_cpu(p->save_area[index].esid))); } + + assert_slb_exists(local_paca->kstack); } /* * Insert the bolted entries into an empty SLB. - * This is not the same as rebolt because the bolted segments are not - * changed, just loaded from the shadow area. */ void slb_restore_bolted_realmode(void) { __slb_restore_bolted_realmode(); get_paca()->slb_cache_ptr = 0; + + get_paca()->slb_kern_bitmap = (1U << SLB_NUM_BOLTED) - 1; + get_paca()->slb_used_bitmap = get_paca()->slb_kern_bitmap; } /* @@ -123,113 +157,262 @@ void slb_restore_bolted_realmode(void) */ void slb_flush_all_realmode(void) { - /* - * This flushes all SLB entries including 0, so it must be realmode. - */ asm volatile("slbmte %0,%0; slbia" : : "r" (0)); } -static void __slb_flush_and_rebolt(void) +/* + * This flushes non-bolted entries, it can be run in virtual mode. Must + * be called with interrupts disabled. + */ +void slb_flush_and_restore_bolted(void) { - /* If you change this make sure you change SLB_NUM_BOLTED - * and PR KVM appropriately too. */ - unsigned long linear_llp, vmalloc_llp, lflags, vflags; - unsigned long ksp_esid_data, ksp_vsid_data; + struct slb_shadow *p = get_slb_shadow(); - linear_llp = mmu_psize_defs[mmu_linear_psize].sllp; - vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp; - lflags = SLB_VSID_KERNEL | linear_llp; - vflags = SLB_VSID_KERNEL | vmalloc_llp; + BUILD_BUG_ON(SLB_NUM_BOLTED != 2); - ksp_esid_data = mk_esid_data(get_paca()->kstack, mmu_kernel_ssize, KSTACK_INDEX); - if ((ksp_esid_data & ~0xfffffffUL) <= PAGE_OFFSET) { - ksp_esid_data &= ~SLB_ESID_V; - ksp_vsid_data = 0; - slb_shadow_clear(KSTACK_INDEX); - } else { - /* Update stack entry; others don't change */ - slb_shadow_update(get_paca()->kstack, mmu_kernel_ssize, lflags, KSTACK_INDEX); - ksp_vsid_data = - be64_to_cpu(get_slb_shadow()->save_area[KSTACK_INDEX].vsid); - } + WARN_ON(!irqs_disabled()); + + /* + * We can't take a PMU exception in the following code, so hard + * disable interrupts. + */ + hard_irq_disable(); - /* We need to do this all in asm, so we're sure we don't touch - * the stack between the slbia and rebolting it. */ asm volatile("isync\n" "slbia\n" - /* Slot 1 - first VMALLOC segment */ - "slbmte %0,%1\n" - /* Slot 2 - kernel stack */ - "slbmte %2,%3\n" - "isync" - :: "r"(mk_vsid_data(VMALLOC_START, mmu_kernel_ssize, vflags)), - "r"(mk_esid_data(VMALLOC_START, mmu_kernel_ssize, VMALLOC_INDEX)), - "r"(ksp_vsid_data), - "r"(ksp_esid_data) + "slbmte %0, %1\n" + "isync\n" + :: "r" (be64_to_cpu(p->save_area[KSTACK_INDEX].vsid)), + "r" (be64_to_cpu(p->save_area[KSTACK_INDEX].esid)) : "memory"); + assert_slb_exists(get_paca()->kstack); + + get_paca()->slb_cache_ptr = 0; + + get_paca()->slb_kern_bitmap = (1U << SLB_NUM_BOLTED) - 1; + get_paca()->slb_used_bitmap = get_paca()->slb_kern_bitmap; } -void slb_flush_and_rebolt(void) +void slb_save_contents(struct slb_entry *slb_ptr) { + int i; + unsigned long e, v; - WARN_ON(!irqs_disabled()); + /* Save slb_cache_ptr value. */ + get_paca()->slb_save_cache_ptr = get_paca()->slb_cache_ptr; + + if (!slb_ptr) + return; + + for (i = 0; i < mmu_slb_size; i++) { + asm volatile("slbmfee %0,%1" : "=r" (e) : "r" (i)); + asm volatile("slbmfev %0,%1" : "=r" (v) : "r" (i)); + slb_ptr->esid = e; + slb_ptr->vsid = v; + slb_ptr++; + } +} + +void slb_dump_contents(struct slb_entry *slb_ptr) +{ + int i, n; + unsigned long e, v; + unsigned long llp; + + if (!slb_ptr) + return; + + pr_err("SLB contents of cpu 0x%x\n", smp_processor_id()); + pr_err("Last SLB entry inserted at slot %d\n", get_paca()->stab_rr); + + for (i = 0; i < mmu_slb_size; i++) { + e = slb_ptr->esid; + v = slb_ptr->vsid; + slb_ptr++; + + if (!e && !v) + continue; + + pr_err("%02d %016lx %016lx\n", i, e, v); + + if (!(e & SLB_ESID_V)) { + pr_err("\n"); + continue; + } + llp = v & SLB_VSID_LLP; + if (v & SLB_VSID_B_1T) { + pr_err(" 1T ESID=%9lx VSID=%13lx LLP:%3lx\n", + GET_ESID_1T(e), + (v & ~SLB_VSID_B) >> SLB_VSID_SHIFT_1T, llp); + } else { + pr_err(" 256M ESID=%9lx VSID=%13lx LLP:%3lx\n", + GET_ESID(e), + (v & ~SLB_VSID_B) >> SLB_VSID_SHIFT, llp); + } + } + pr_err("----------------------------------\n"); + + /* Dump slb cache entires as well. */ + pr_err("SLB cache ptr value = %d\n", get_paca()->slb_save_cache_ptr); + pr_err("Valid SLB cache entries:\n"); + n = min_t(int, get_paca()->slb_save_cache_ptr, SLB_CACHE_ENTRIES); + for (i = 0; i < n; i++) + pr_err("%02d EA[0-35]=%9x\n", i, get_paca()->slb_cache[i]); + pr_err("Rest of SLB cache entries:\n"); + for (i = n; i < SLB_CACHE_ENTRIES; i++) + pr_err("%02d EA[0-35]=%9x\n", i, get_paca()->slb_cache[i]); +} +void slb_vmalloc_update(void) +{ /* - * We can't take a PMU exception in the following code, so hard - * disable interrupts. + * vmalloc is not bolted, so just have to flush non-bolted. */ - hard_irq_disable(); + slb_flush_and_restore_bolted(); +} - __slb_flush_and_rebolt(); - get_paca()->slb_cache_ptr = 0; +static bool preload_hit(struct thread_info *ti, unsigned long esid) +{ + unsigned char i; + + for (i = 0; i < ti->slb_preload_nr; i++) { + unsigned char idx; + + idx = (ti->slb_preload_tail + i) % SLB_PRELOAD_NR; + if (esid == ti->slb_preload_esid[idx]) + return true; + } + return false; } -void slb_vmalloc_update(void) +static bool preload_add(struct thread_info *ti, unsigned long ea) { - unsigned long vflags; + unsigned char idx; + unsigned long esid; + + if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) { + /* EAs are stored >> 28 so 256MB segments don't need clearing */ + if (ea & ESID_MASK_1T) + ea &= ESID_MASK_1T; + } + + esid = ea >> SID_SHIFT; - vflags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_vmalloc_psize].sllp; - slb_shadow_update(VMALLOC_START, mmu_kernel_ssize, vflags, VMALLOC_INDEX); - slb_flush_and_rebolt(); + if (preload_hit(ti, esid)) + return false; + + idx = (ti->slb_preload_tail + ti->slb_preload_nr) % SLB_PRELOAD_NR; + ti->slb_preload_esid[idx] = esid; + if (ti->slb_preload_nr == SLB_PRELOAD_NR) + ti->slb_preload_tail = (ti->slb_preload_tail + 1) % SLB_PRELOAD_NR; + else + ti->slb_preload_nr++; + + return true; } -/* Helper function to compare esids. There are four cases to handle. - * 1. The system is not 1T segment size capable. Use the GET_ESID compare. - * 2. The system is 1T capable, both addresses are < 1T, use the GET_ESID compare. - * 3. The system is 1T capable, only one of the two addresses is > 1T. This is not a match. - * 4. The system is 1T capable, both addresses are > 1T, use the GET_ESID_1T macro to compare. - */ -static inline int esids_match(unsigned long addr1, unsigned long addr2) +static void preload_age(struct thread_info *ti) { - int esid_1t_count; + if (!ti->slb_preload_nr) + return; + ti->slb_preload_nr--; + ti->slb_preload_tail = (ti->slb_preload_tail + 1) % SLB_PRELOAD_NR; +} - /* System is not 1T segment size capable. */ - if (!mmu_has_feature(MMU_FTR_1T_SEGMENT)) - return (GET_ESID(addr1) == GET_ESID(addr2)); +void slb_setup_new_exec(void) +{ + struct thread_info *ti = current_thread_info(); + struct mm_struct *mm = current->mm; + unsigned long exec = 0x10000000; - esid_1t_count = (((addr1 >> SID_SHIFT_1T) != 0) + - ((addr2 >> SID_SHIFT_1T) != 0)); + WARN_ON(irqs_disabled()); - /* both addresses are < 1T */ - if (esid_1t_count == 0) - return (GET_ESID(addr1) == GET_ESID(addr2)); + /* + * preload cache can only be used to determine whether a SLB + * entry exists if it does not start to overflow. + */ + if (ti->slb_preload_nr + 2 > SLB_PRELOAD_NR) + return; - /* One address < 1T, the other > 1T. Not a match */ - if (esid_1t_count == 1) - return 0; + hard_irq_disable(); - /* Both addresses are > 1T. */ - return (GET_ESID_1T(addr1) == GET_ESID_1T(addr2)); + /* + * We have no good place to clear the slb preload cache on exec, + * flush_thread is about the earliest arch hook but that happens + * after we switch to the mm and have aleady preloaded the SLBEs. + * + * For the most part that's probably okay to use entries from the + * previous exec, they will age out if unused. It may turn out to + * be an advantage to clear the cache before switching to it, + * however. + */ + + /* + * preload some userspace segments into the SLB. + * Almost all 32 and 64bit PowerPC executables are linked at + * 0x10000000 so it makes sense to preload this segment. + */ + if (!is_kernel_addr(exec)) { + if (preload_add(ti, exec)) + slb_allocate_user(mm, exec); + } + + /* Libraries and mmaps. */ + if (!is_kernel_addr(mm->mmap_base)) { + if (preload_add(ti, mm->mmap_base)) + slb_allocate_user(mm, mm->mmap_base); + } + + /* see switch_slb */ + asm volatile("isync" : : : "memory"); + + local_irq_enable(); } +void preload_new_slb_context(unsigned long start, unsigned long sp) +{ + struct thread_info *ti = current_thread_info(); + struct mm_struct *mm = current->mm; + unsigned long heap = mm->start_brk; + + WARN_ON(irqs_disabled()); + + /* see above */ + if (ti->slb_preload_nr + 3 > SLB_PRELOAD_NR) + return; + + hard_irq_disable(); + + /* Userspace entry address. */ + if (!is_kernel_addr(start)) { + if (preload_add(ti, start)) + slb_allocate_user(mm, start); + } + + /* Top of stack, grows down. */ + if (!is_kernel_addr(sp)) { + if (preload_add(ti, sp)) + slb_allocate_user(mm, sp); + } + + /* Bottom of heap, grows up. */ + if (heap && !is_kernel_addr(heap)) { + if (preload_add(ti, heap)) + slb_allocate_user(mm, heap); + } + + /* see switch_slb */ + asm volatile("isync" : : : "memory"); + + local_irq_enable(); +} + + /* Flush all user entries from the segment table of the current processor. */ void switch_slb(struct task_struct *tsk, struct mm_struct *mm) { - unsigned long offset; - unsigned long slbie_data = 0; - unsigned long pc = KSTK_EIP(tsk); - unsigned long stack = KSTK_ESP(tsk); - unsigned long exec_base; + struct thread_info *ti = task_thread_info(tsk); + unsigned char i; /* * We need interrupts hard-disabled here, not just soft-disabled, @@ -238,91 +421,107 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm) * which would update the slb_cache/slb_cache_ptr fields in the PACA. */ hard_irq_disable(); - offset = get_paca()->slb_cache_ptr; - if (!mmu_has_feature(MMU_FTR_NO_SLBIE_B) && - offset <= SLB_CACHE_ENTRIES) { - int i; - asm volatile("isync" : : : "memory"); - for (i = 0; i < offset; i++) { - slbie_data = (unsigned long)get_paca()->slb_cache[i] - << SID_SHIFT; /* EA */ - slbie_data |= user_segment_size(slbie_data) - << SLBIE_SSIZE_SHIFT; - slbie_data |= SLBIE_C; /* C set for user addresses */ - asm volatile("slbie %0" : : "r" (slbie_data)); - } - asm volatile("isync" : : : "memory"); + asm volatile("isync" : : : "memory"); + if (cpu_has_feature(CPU_FTR_ARCH_300)) { + /* + * SLBIA IH=3 invalidates all Class=1 SLBEs and their + * associated lookaside structures, which matches what + * switch_slb wants. So ARCH_300 does not use the slb + * cache. + */ + asm volatile(PPC_SLBIA(3)); } else { - __slb_flush_and_rebolt(); - } + unsigned long offset = get_paca()->slb_cache_ptr; + + if (!mmu_has_feature(MMU_FTR_NO_SLBIE_B) && + offset <= SLB_CACHE_ENTRIES) { + unsigned long slbie_data = 0; + + for (i = 0; i < offset; i++) { + unsigned long ea; + + ea = (unsigned long) + get_paca()->slb_cache[i] << SID_SHIFT; + /* + * Could assert_slb_exists here, but hypervisor + * or machine check could have come in and + * removed the entry at this point. + */ + + slbie_data = ea; + slbie_data |= user_segment_size(slbie_data) + << SLBIE_SSIZE_SHIFT; + slbie_data |= SLBIE_C; /* user slbs have C=1 */ + asm volatile("slbie %0" : : "r" (slbie_data)); + } + + /* Workaround POWER5 < DD2.1 issue */ + if (!cpu_has_feature(CPU_FTR_ARCH_207S) && offset == 1) + asm volatile("slbie %0" : : "r" (slbie_data)); + + } else { + struct slb_shadow *p = get_slb_shadow(); + unsigned long ksp_esid_data = + be64_to_cpu(p->save_area[KSTACK_INDEX].esid); + unsigned long ksp_vsid_data = + be64_to_cpu(p->save_area[KSTACK_INDEX].vsid); + + asm volatile(PPC_SLBIA(1) "\n" + "slbmte %0,%1\n" + "isync" + :: "r"(ksp_vsid_data), + "r"(ksp_esid_data)); + + get_paca()->slb_kern_bitmap = (1U << SLB_NUM_BOLTED) - 1; + } - /* Workaround POWER5 < DD2.1 issue */ - if (offset == 1 || offset > SLB_CACHE_ENTRIES) - asm volatile("slbie %0" : : "r" (slbie_data)); + get_paca()->slb_cache_ptr = 0; + } + get_paca()->slb_used_bitmap = get_paca()->slb_kern_bitmap; - get_paca()->slb_cache_ptr = 0; copy_mm_to_paca(mm); /* - * preload some userspace segments into the SLB. - * Almost all 32 and 64bit PowerPC executables are linked at - * 0x10000000 so it makes sense to preload this segment. + * We gradually age out SLBs after a number of context switches to + * reduce reload overhead of unused entries (like we do with FP/VEC + * reload). Each time we wrap 256 switches, take an entry out of the + * SLB preload cache. */ - exec_base = 0x10000000; - - if (is_kernel_addr(pc) || is_kernel_addr(stack) || - is_kernel_addr(exec_base)) - return; + tsk->thread.load_slb++; + if (!tsk->thread.load_slb) { + unsigned long pc = KSTK_EIP(tsk); - slb_allocate(pc); + preload_age(ti); + preload_add(ti, pc); + } - if (!esids_match(pc, stack)) - slb_allocate(stack); + for (i = 0; i < ti->slb_preload_nr; i++) { + unsigned char idx; + unsigned long ea; - if (!esids_match(pc, exec_base) && - !esids_match(stack, exec_base)) - slb_allocate(exec_base); -} + idx = (ti->slb_preload_tail + i) % SLB_PRELOAD_NR; + ea = (unsigned long)ti->slb_preload_esid[idx] << SID_SHIFT; -static inline void patch_slb_encoding(unsigned int *insn_addr, - unsigned int immed) -{ + slb_allocate_user(mm, ea); + } /* - * This function patches either an li or a cmpldi instruction with - * a new immediate value. This relies on the fact that both li - * (which is actually addi) and cmpldi both take a 16-bit immediate - * value, and it is situated in the same location in the instruction, - * ie. bits 16-31 (Big endian bit order) or the lower 16 bits. - * The signedness of the immediate operand differs between the two - * instructions however this code is only ever patching a small value, - * much less than 1 << 15, so we can get away with it. - * To patch the value we read the existing instruction, clear the - * immediate value, and or in our new value, then write the instruction - * back. + * Synchronize slbmte preloads with possible subsequent user memory + * address accesses by the kernel (user mode won't happen until + * rfid, which is safe). */ - unsigned int insn = (*insn_addr & 0xffff0000) | immed; - patch_instruction(insn_addr, insn); + asm volatile("isync" : : : "memory"); } -extern u32 slb_miss_kernel_load_linear[]; -extern u32 slb_miss_kernel_load_io[]; -extern u32 slb_compare_rr_to_size[]; -extern u32 slb_miss_kernel_load_vmemmap[]; - void slb_set_size(u16 size) { - if (mmu_slb_size == size) - return; - mmu_slb_size = size; - patch_slb_encoding(slb_compare_rr_to_size, mmu_slb_size); } void slb_initialize(void) { unsigned long linear_llp, vmalloc_llp, io_llp; - unsigned long lflags, vflags; + unsigned long lflags; static int slb_encoding_inited; #ifdef CONFIG_SPARSEMEM_VMEMMAP unsigned long vmemmap_llp; @@ -338,34 +537,24 @@ void slb_initialize(void) #endif if (!slb_encoding_inited) { slb_encoding_inited = 1; - patch_slb_encoding(slb_miss_kernel_load_linear, - SLB_VSID_KERNEL | linear_llp); - patch_slb_encoding(slb_miss_kernel_load_io, - SLB_VSID_KERNEL | io_llp); - patch_slb_encoding(slb_compare_rr_to_size, - mmu_slb_size); - pr_devel("SLB: linear LLP = %04lx\n", linear_llp); pr_devel("SLB: io LLP = %04lx\n", io_llp); - #ifdef CONFIG_SPARSEMEM_VMEMMAP - patch_slb_encoding(slb_miss_kernel_load_vmemmap, - SLB_VSID_KERNEL | vmemmap_llp); pr_devel("SLB: vmemmap LLP = %04lx\n", vmemmap_llp); #endif } - get_paca()->stab_rr = SLB_NUM_BOLTED; + get_paca()->stab_rr = SLB_NUM_BOLTED - 1; + get_paca()->slb_kern_bitmap = (1U << SLB_NUM_BOLTED) - 1; + get_paca()->slb_used_bitmap = get_paca()->slb_kern_bitmap; lflags = SLB_VSID_KERNEL | linear_llp; - vflags = SLB_VSID_KERNEL | vmalloc_llp; /* Invalidate the entire SLB (even entry 0) & all the ERATS */ asm volatile("isync":::"memory"); asm volatile("slbmte %0,%0"::"r" (0) : "memory"); asm volatile("isync; slbia; isync":::"memory"); create_shadowed_slbe(PAGE_OFFSET, mmu_kernel_ssize, lflags, LINEAR_INDEX); - create_shadowed_slbe(VMALLOC_START, mmu_kernel_ssize, vflags, VMALLOC_INDEX); /* For the boot cpu, we're running on the stack in init_thread_union, * which is in the first segment of the linear mapping, and also @@ -381,122 +570,259 @@ void slb_initialize(void) asm volatile("isync":::"memory"); } -static void insert_slb_entry(unsigned long vsid, unsigned long ea, - int bpsize, int ssize) +static void slb_cache_update(unsigned long esid_data) { - unsigned long flags, vsid_data, esid_data; - enum slb_index index; int slb_cache_index; - /* - * We are irq disabled, hence should be safe to access PACA. - */ - VM_WARN_ON(!irqs_disabled()); - - /* - * We can't take a PMU exception in the following code, so hard - * disable interrupts. - */ - hard_irq_disable(); - - index = get_paca()->stab_rr; - - /* - * simple round-robin replacement of slb starting at SLB_NUM_BOLTED. - */ - if (index < (mmu_slb_size - 1)) - index++; - else - index = SLB_NUM_BOLTED; - - get_paca()->stab_rr = index; - - flags = SLB_VSID_USER | mmu_psize_defs[bpsize].sllp; - vsid_data = (vsid << slb_vsid_shift(ssize)) | flags | - ((unsigned long) ssize << SLB_VSID_SSIZE_SHIFT); - esid_data = mk_esid_data(ea, ssize, index); - - /* - * No need for an isync before or after this slbmte. The exception - * we enter with and the rfid we exit with are context synchronizing. - * Also we only handle user segments here. - */ - asm volatile("slbmte %0, %1" : : "r" (vsid_data), "r" (esid_data) - : "memory"); + if (cpu_has_feature(CPU_FTR_ARCH_300)) + return; /* ISAv3.0B and later does not use slb_cache */ /* * Now update slb cache entries */ - slb_cache_index = get_paca()->slb_cache_ptr; + slb_cache_index = local_paca->slb_cache_ptr; if (slb_cache_index < SLB_CACHE_ENTRIES) { /* * We have space in slb cache for optimized switch_slb(). * Top 36 bits from esid_data as per ISA */ - get_paca()->slb_cache[slb_cache_index++] = esid_data >> 28; - get_paca()->slb_cache_ptr++; + local_paca->slb_cache[slb_cache_index++] = esid_data >> 28; + local_paca->slb_cache_ptr++; } else { /* * Our cache is full and the current cache content strictly * doesn't indicate the active SLB conents. Bump the ptr * so that switch_slb() will ignore the cache. */ - get_paca()->slb_cache_ptr = SLB_CACHE_ENTRIES + 1; + local_paca->slb_cache_ptr = SLB_CACHE_ENTRIES + 1; } } -static void handle_multi_context_slb_miss(int context_id, unsigned long ea) +static enum slb_index alloc_slb_index(bool kernel) { - struct mm_struct *mm = current->mm; - unsigned long vsid; - int bpsize; + enum slb_index index; /* - * We are always above 1TB, hence use high user segment size. + * The allocation bitmaps can become out of synch with the SLB + * when the _switch code does slbie when bolting a new stack + * segment and it must not be anywhere else in the SLB. This leaves + * a kernel allocated entry that is unused in the SLB. With very + * large systems or small segment sizes, the bitmaps could slowly + * fill with these entries. They will eventually be cleared out + * by the round robin allocator in that case, so it's probably not + * worth accounting for. */ - vsid = get_vsid(context_id, ea, mmu_highuser_ssize); - bpsize = get_slice_psize(mm, ea); - insert_slb_entry(vsid, ea, bpsize, mmu_highuser_ssize); + + /* + * SLBs beyond 32 entries are allocated with stab_rr only + * POWER7/8/9 have 32 SLB entries, this could be expanded if a + * future CPU has more. + */ + if (local_paca->slb_used_bitmap != U32_MAX) { + index = ffz(local_paca->slb_used_bitmap); + local_paca->slb_used_bitmap |= 1U << index; + if (kernel) + local_paca->slb_kern_bitmap |= 1U << index; + } else { + /* round-robin replacement of slb starting at SLB_NUM_BOLTED. */ + index = local_paca->stab_rr; + if (index < (mmu_slb_size - 1)) + index++; + else + index = SLB_NUM_BOLTED; + local_paca->stab_rr = index; + if (index < 32) { + if (kernel) + local_paca->slb_kern_bitmap |= 1U << index; + else + local_paca->slb_kern_bitmap &= ~(1U << index); + } + } + BUG_ON(index < SLB_NUM_BOLTED); + + return index; } -void slb_miss_large_addr(struct pt_regs *regs) +static long slb_insert_entry(unsigned long ea, unsigned long context, + unsigned long flags, int ssize, bool kernel) { - enum ctx_state prev_state = exception_enter(); - unsigned long ea = regs->dar; - int context; + unsigned long vsid; + unsigned long vsid_data, esid_data; + enum slb_index index; - if (REGION_ID(ea) != USER_REGION_ID) - goto slb_bad_addr; + vsid = get_vsid(context, ea, ssize); + if (!vsid) + return -EFAULT; /* - * Are we beyound what the page table layout supports ? + * There must not be a kernel SLB fault in alloc_slb_index or before + * slbmte here or the allocation bitmaps could get out of whack with + * the SLB. + * + * User SLB faults or preloads take this path which might get inlined + * into the caller, so add compiler barriers here to ensure unsafe + * memory accesses do not come between. */ - if ((ea & ~REGION_MASK) >= H_PGTABLE_RANGE) - goto slb_bad_addr; + barrier(); - /* Lower address should have been handled by asm code */ - if (ea < (1UL << MAX_EA_BITS_PER_CONTEXT)) - goto slb_bad_addr; + index = alloc_slb_index(kernel); + + vsid_data = __mk_vsid_data(vsid, ssize, flags); + esid_data = mk_esid_data(ea, ssize, index); + + /* + * No need for an isync before or after this slbmte. The exception + * we enter with and the rfid we exit with are context synchronizing. + * User preloads should add isync afterwards in case the kernel + * accesses user memory before it returns to userspace with rfid. + */ + assert_slb_notexists(ea); + asm volatile("slbmte %0, %1" : : "r" (vsid_data), "r" (esid_data)); + + barrier(); + + if (!kernel) + slb_cache_update(esid_data); + + return 0; +} + +static long slb_allocate_kernel(unsigned long ea, unsigned long id) +{ + unsigned long context; + unsigned long flags; + int ssize; + + if (id == KERNEL_REGION_ID) { + + /* We only support upto MAX_PHYSMEM_BITS */ + if ((ea & ~REGION_MASK) > (1UL << MAX_PHYSMEM_BITS)) + return -EFAULT; + + flags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_linear_psize].sllp; + +#ifdef CONFIG_SPARSEMEM_VMEMMAP + } else if (id == VMEMMAP_REGION_ID) { + + if ((ea & ~REGION_MASK) >= (1ULL << MAX_EA_BITS_PER_CONTEXT)) + return -EFAULT; + + flags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_vmemmap_psize].sllp; +#endif + } else if (id == VMALLOC_REGION_ID) { + + if ((ea & ~REGION_MASK) >= (1ULL << MAX_EA_BITS_PER_CONTEXT)) + return -EFAULT; + + if (ea < H_VMALLOC_END) + flags = get_paca()->vmalloc_sllp; + else + flags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_io_psize].sllp; + } else { + return -EFAULT; + } + + ssize = MMU_SEGSIZE_1T; + if (!mmu_has_feature(MMU_FTR_1T_SEGMENT)) + ssize = MMU_SEGSIZE_256M; + + context = get_kernel_context(ea); + return slb_insert_entry(ea, context, flags, ssize, true); +} + +static long slb_allocate_user(struct mm_struct *mm, unsigned long ea) +{ + unsigned long context; + unsigned long flags; + int bpsize; + int ssize; /* * consider this as bad access if we take a SLB miss * on an address above addr limit. */ - if (ea >= current->mm->context.slb_addr_limit) - goto slb_bad_addr; + if (ea >= mm->context.slb_addr_limit) + return -EFAULT; - context = get_ea_context(¤t->mm->context, ea); + context = get_user_context(&mm->context, ea); if (!context) - goto slb_bad_addr; + return -EFAULT; + + if (unlikely(ea >= H_PGTABLE_RANGE)) { + WARN_ON(1); + return -EFAULT; + } - handle_multi_context_slb_miss(context, ea); - exception_exit(prev_state); - return; + ssize = user_segment_size(ea); -slb_bad_addr: - if (user_mode(regs)) - _exception(SIGSEGV, regs, SEGV_BNDERR, ea); - else - bad_page_fault(regs, ea, SIGSEGV); - exception_exit(prev_state); + bpsize = get_slice_psize(mm, ea); + flags = SLB_VSID_USER | mmu_psize_defs[bpsize].sllp; + + return slb_insert_entry(ea, context, flags, ssize, false); +} + +long do_slb_fault(struct pt_regs *regs, unsigned long ea) +{ + unsigned long id = REGION_ID(ea); + + /* IRQs are not reconciled here, so can't check irqs_disabled */ + VM_WARN_ON(mfmsr() & MSR_EE); + + if (unlikely(!(regs->msr & MSR_RI))) + return -EINVAL; + + /* + * SLB kernel faults must be very careful not to touch anything + * that is not bolted. E.g., PACA and global variables are okay, + * mm->context stuff is not. + * + * SLB user faults can access all of kernel memory, but must be + * careful not to touch things like IRQ state because it is not + * "reconciled" here. The difficulty is that we must use + * fast_exception_return to return from kernel SLB faults without + * looking at possible non-bolted memory. We could test user vs + * kernel faults in the interrupt handler asm and do a full fault, + * reconcile, ret_from_except for user faults which would make them + * first class kernel code. But for performance it's probably nicer + * if they go via fast_exception_return too. + */ + if (id >= KERNEL_REGION_ID) { + long err; +#ifdef CONFIG_DEBUG_VM + /* Catch recursive kernel SLB faults. */ + BUG_ON(local_paca->in_kernel_slb_handler); + local_paca->in_kernel_slb_handler = 1; +#endif + err = slb_allocate_kernel(ea, id); +#ifdef CONFIG_DEBUG_VM + local_paca->in_kernel_slb_handler = 0; +#endif + return err; + } else { + struct mm_struct *mm = current->mm; + long err; + + if (unlikely(!mm)) + return -EFAULT; + + err = slb_allocate_user(mm, ea); + if (!err) + preload_add(current_thread_info(), ea); + + return err; + } +} + +void do_bad_slb_fault(struct pt_regs *regs, unsigned long ea, long err) +{ + if (err == -EFAULT) { + if (user_mode(regs)) + _exception(SIGSEGV, regs, SEGV_BNDERR, ea); + else + bad_page_fault(regs, ea, SIGSEGV); + } else if (err == -EINVAL) { + unrecoverable_exception(regs); + } else { + BUG(); + } } diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S deleted file mode 100644 index 4ac5057ad439..000000000000 --- a/arch/powerpc/mm/slb_low.S +++ /dev/null @@ -1,335 +0,0 @@ -/* - * Low-level SLB routines - * - * Copyright (C) 2004 David Gibson <dwg@au.ibm.com>, IBM - * - * Based on earlier C version: - * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com - * Copyright (c) 2001 Dave Engebretsen - * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#include <asm/processor.h> -#include <asm/ppc_asm.h> -#include <asm/asm-offsets.h> -#include <asm/cputable.h> -#include <asm/page.h> -#include <asm/mmu.h> -#include <asm/pgtable.h> -#include <asm/firmware.h> -#include <asm/feature-fixups.h> - -/* - * This macro generates asm code to compute the VSID scramble - * function. Used in slb_allocate() and do_stab_bolted. The function - * computed is: (protovsid*VSID_MULTIPLIER) % VSID_MODULUS - * - * rt = register containing the proto-VSID and into which the - * VSID will be stored - * rx = scratch register (clobbered) - * rf = flags - * - * - rt and rx must be different registers - * - The answer will end up in the low VSID_BITS bits of rt. The higher - * bits may contain other garbage, so you may need to mask the - * result. - */ -#define ASM_VSID_SCRAMBLE(rt, rx, rf, size) \ - lis rx,VSID_MULTIPLIER_##size@h; \ - ori rx,rx,VSID_MULTIPLIER_##size@l; \ - mulld rt,rt,rx; /* rt = rt * MULTIPLIER */ \ -/* \ - * powermac get slb fault before feature fixup, so make 65 bit part \ - * the default part of feature fixup \ - */ \ -BEGIN_MMU_FTR_SECTION \ - srdi rx,rt,VSID_BITS_65_##size; \ - clrldi rt,rt,(64-VSID_BITS_65_##size); \ - add rt,rt,rx; \ - addi rx,rt,1; \ - srdi rx,rx,VSID_BITS_65_##size; \ - add rt,rt,rx; \ - rldimi rf,rt,SLB_VSID_SHIFT_##size,(64 - (SLB_VSID_SHIFT_##size + VSID_BITS_65_##size)); \ -MMU_FTR_SECTION_ELSE \ - srdi rx,rt,VSID_BITS_##size; \ - clrldi rt,rt,(64-VSID_BITS_##size); \ - add rt,rt,rx; /* add high and low bits */ \ - addi rx,rt,1; \ - srdi rx,rx,VSID_BITS_##size; /* extract 2^VSID_BITS bit */ \ - add rt,rt,rx; \ - rldimi rf,rt,SLB_VSID_SHIFT_##size,(64 - (SLB_VSID_SHIFT_##size + VSID_BITS_##size)); \ -ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_68_BIT_VA) - - -/* void slb_allocate(unsigned long ea); - * - * Create an SLB entry for the given EA (user or kernel). - * r3 = faulting address, r13 = PACA - * r9, r10, r11 are clobbered by this function - * r3 is preserved. - * No other registers are examined or changed. - */ -_GLOBAL(slb_allocate) - /* - * Check if the address falls within the range of the first context, or - * if we may need to handle multi context. For the first context we - * allocate the slb entry via the fast path below. For large address we - * branch out to C-code and see if additional contexts have been - * allocated. - * The test here is: - * (ea & ~REGION_MASK) >= (1ull << MAX_EA_BITS_PER_CONTEXT) - */ - rldicr. r9,r3,4,(63 - MAX_EA_BITS_PER_CONTEXT - 4) - bne- 8f - - srdi r9,r3,60 /* get region */ - srdi r10,r3,SID_SHIFT /* get esid */ - cmpldi cr7,r9,0xc /* cmp PAGE_OFFSET for later use */ - - /* r3 = address, r10 = esid, cr7 = <> PAGE_OFFSET */ - blt cr7,0f /* user or kernel? */ - - /* Check if hitting the linear mapping or some other kernel space - */ - bne cr7,1f - - /* Linear mapping encoding bits, the "li" instruction below will - * be patched by the kernel at boot - */ -.globl slb_miss_kernel_load_linear -slb_miss_kernel_load_linear: - li r11,0 - /* - * context = (ea >> 60) - (0xc - 1) - * r9 = region id. - */ - subi r9,r9,KERNEL_REGION_CONTEXT_OFFSET - -BEGIN_FTR_SECTION - b .Lslb_finish_load -END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) - b .Lslb_finish_load_1T - -1: -#ifdef CONFIG_SPARSEMEM_VMEMMAP - cmpldi cr0,r9,0xf - bne 1f -/* Check virtual memmap region. To be patched at kernel boot */ -.globl slb_miss_kernel_load_vmemmap -slb_miss_kernel_load_vmemmap: - li r11,0 - b 6f -1: -#endif /* CONFIG_SPARSEMEM_VMEMMAP */ - - /* - * r10 contains the ESID, which is the original faulting EA shifted - * right by 28 bits. We need to compare that with (H_VMALLOC_END >> 28) - * which is 0xd00038000. That can't be used as an immediate, even if we - * ignored the 0xd, so we have to load it into a register, and we only - * have one register free. So we must load all of (H_VMALLOC_END >> 28) - * into a register and compare ESID against that. - */ - lis r11,(H_VMALLOC_END >> 32)@h // r11 = 0xffffffffd0000000 - ori r11,r11,(H_VMALLOC_END >> 32)@l // r11 = 0xffffffffd0003800 - // Rotate left 4, then mask with 0xffffffff0 - rldic r11,r11,4,28 // r11 = 0xd00038000 - cmpld r10,r11 // if r10 >= r11 - bge 5f // goto io_mapping - - /* - * vmalloc mapping gets the encoding from the PACA as the mapping - * can be demoted from 64K -> 4K dynamically on some machines. - */ - lhz r11,PACAVMALLOCSLLP(r13) - b 6f -5: - /* IO mapping */ -.globl slb_miss_kernel_load_io -slb_miss_kernel_load_io: - li r11,0 -6: - /* - * context = (ea >> 60) - (0xc - 1) - * r9 = region id. - */ - subi r9,r9,KERNEL_REGION_CONTEXT_OFFSET - -BEGIN_FTR_SECTION - b .Lslb_finish_load -END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) - b .Lslb_finish_load_1T - -0: /* - * For userspace addresses, make sure this is region 0. - */ - cmpdi r9, 0 - bne- 8f - /* - * user space make sure we are within the allowed limit - */ - ld r11,PACA_SLB_ADDR_LIMIT(r13) - cmpld r3,r11 - bge- 8f - - /* when using slices, we extract the psize off the slice bitmaps - * and then we need to get the sllp encoding off the mmu_psize_defs - * array. - * - * XXX This is a bit inefficient especially for the normal case, - * so we should try to implement a fast path for the standard page - * size using the old sllp value so we avoid the array. We cannot - * really do dynamic patching unfortunately as processes might flip - * between 4k and 64k standard page size - */ -#ifdef CONFIG_PPC_MM_SLICES - /* r10 have esid */ - cmpldi r10,16 - /* below SLICE_LOW_TOP */ - blt 5f - /* - * Handle hpsizes, - * r9 is get_paca()->context.high_slices_psize[index], r11 is mask_index - */ - srdi r11,r10,(SLICE_HIGH_SHIFT - SLICE_LOW_SHIFT + 1) /* index */ - addi r9,r11,PACAHIGHSLICEPSIZE - lbzx r9,r13,r9 /* r9 is hpsizes[r11] */ - /* r11 = (r10 >> (SLICE_HIGH_SHIFT - SLICE_LOW_SHIFT)) & 0x1 */ - rldicl r11,r10,(64 - (SLICE_HIGH_SHIFT - SLICE_LOW_SHIFT)),63 - b 6f - -5: - /* - * Handle lpsizes - * r9 is get_paca()->context.low_slices_psize[index], r11 is mask_index - */ - srdi r11,r10,1 /* index */ - addi r9,r11,PACALOWSLICESPSIZE - lbzx r9,r13,r9 /* r9 is lpsizes[r11] */ - rldicl r11,r10,0,63 /* r11 = r10 & 0x1 */ -6: - sldi r11,r11,2 /* index * 4 */ - /* Extract the psize and multiply to get an array offset */ - srd r9,r9,r11 - andi. r9,r9,0xf - mulli r9,r9,MMUPSIZEDEFSIZE - - /* Now get to the array and obtain the sllp - */ - ld r11,PACATOC(r13) - ld r11,mmu_psize_defs@got(r11) - add r11,r11,r9 - ld r11,MMUPSIZESLLP(r11) - ori r11,r11,SLB_VSID_USER -#else - /* paca context sllp already contains the SLB_VSID_USER bits */ - lhz r11,PACACONTEXTSLLP(r13) -#endif /* CONFIG_PPC_MM_SLICES */ - - ld r9,PACACONTEXTID(r13) -BEGIN_FTR_SECTION - cmpldi r10,0x1000 - bge .Lslb_finish_load_1T -END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) - b .Lslb_finish_load - -8: /* invalid EA - return an error indication */ - crset 4*cr0+eq /* indicate failure */ - blr - -/* - * Finish loading of an SLB entry and return - * - * r3 = EA, r9 = context, r10 = ESID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET - */ -.Lslb_finish_load: - rldimi r10,r9,ESID_BITS,0 - ASM_VSID_SCRAMBLE(r10,r9,r11,256M) - /* r3 = EA, r11 = VSID data */ - /* - * Find a slot, round robin. Previously we tried to find a - * free slot first but that took too long. Unfortunately we - * dont have any LRU information to help us choose a slot. - */ - - mr r9,r3 - - /* slb_finish_load_1T continues here. r9=EA with non-ESID bits clear */ -7: ld r10,PACASTABRR(r13) - addi r10,r10,1 - /* This gets soft patched on boot. */ -.globl slb_compare_rr_to_size -slb_compare_rr_to_size: - cmpldi r10,0 - - blt+ 4f - li r10,SLB_NUM_BOLTED - -4: - std r10,PACASTABRR(r13) - -3: - rldimi r9,r10,0,36 /* r9 = EA[0:35] | entry */ - oris r10,r9,SLB_ESID_V@h /* r10 = r9 | SLB_ESID_V */ - - /* r9 = ESID data, r11 = VSID data */ - - /* - * No need for an isync before or after this slbmte. The exception - * we enter with and the rfid we exit with are context synchronizing. - */ - slbmte r11,r10 - - /* we're done for kernel addresses */ - crclr 4*cr0+eq /* set result to "success" */ - bgelr cr7 - - /* Update the slb cache */ - lhz r9,PACASLBCACHEPTR(r13) /* offset = paca->slb_cache_ptr */ - cmpldi r9,SLB_CACHE_ENTRIES - bge 1f - - /* still room in the slb cache */ - sldi r11,r9,2 /* r11 = offset * sizeof(u32) */ - srdi r10,r10,28 /* get the 36 bits of the ESID */ - add r11,r11,r13 /* r11 = (u32 *)paca + offset */ - stw r10,PACASLBCACHE(r11) /* paca->slb_cache[offset] = esid */ - addi r9,r9,1 /* offset++ */ - b 2f -1: /* offset >= SLB_CACHE_ENTRIES */ - li r9,SLB_CACHE_ENTRIES+1 -2: - sth r9,PACASLBCACHEPTR(r13) /* paca->slb_cache_ptr = offset */ - crclr 4*cr0+eq /* set result to "success" */ - blr - -/* - * Finish loading of a 1T SLB entry (for the kernel linear mapping) and return. - * - * r3 = EA, r9 = context, r10 = ESID(256MB), r11 = flags, clobbers r9 - */ -.Lslb_finish_load_1T: - srdi r10,r10,(SID_SHIFT_1T - SID_SHIFT) /* get 1T ESID */ - rldimi r10,r9,ESID_BITS_1T,0 - ASM_VSID_SCRAMBLE(r10,r9,r11,1T) - - li r10,MMU_SEGSIZE_1T - rldimi r11,r10,SLB_VSID_SSIZE_SHIFT,0 /* insert segment size */ - - /* r3 = EA, r11 = VSID data */ - clrrdi r9,r3,SID_SHIFT_1T /* clear out non-ESID bits */ - b 7b - - -_ASM_NOKPROBE_SYMBOL(slb_allocate) -_ASM_NOKPROBE_SYMBOL(slb_miss_kernel_load_linear) -_ASM_NOKPROBE_SYMBOL(slb_miss_kernel_load_io) -_ASM_NOKPROBE_SYMBOL(slb_compare_rr_to_size) -#ifdef CONFIG_SPARSEMEM_VMEMMAP -_ASM_NOKPROBE_SYMBOL(slb_miss_kernel_load_vmemmap) -#endif diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c index 205fe557ca10..06898c13901d 100644 --- a/arch/powerpc/mm/slice.c +++ b/arch/powerpc/mm/slice.c @@ -31,6 +31,7 @@ #include <linux/spinlock.h> #include <linux/export.h> #include <linux/hugetlb.h> +#include <linux/sched/mm.h> #include <asm/mman.h> #include <asm/mmu.h> #include <asm/copro.h> @@ -61,6 +62,13 @@ static void slice_print_mask(const char *label, const struct slice_mask *mask) { #endif +static inline bool slice_addr_is_low(unsigned long addr) +{ + u64 tmp = (u64)addr; + + return tmp < SLICE_LOW_TOP; +} + static void slice_range_to_mask(unsigned long start, unsigned long len, struct slice_mask *ret) { @@ -70,7 +78,7 @@ static void slice_range_to_mask(unsigned long start, unsigned long len, if (SLICE_NUM_HIGH) bitmap_zero(ret->high_slices, SLICE_NUM_HIGH); - if (start < SLICE_LOW_TOP) { + if (slice_addr_is_low(start)) { unsigned long mend = min(end, (unsigned long)(SLICE_LOW_TOP - 1)); @@ -78,7 +86,7 @@ static void slice_range_to_mask(unsigned long start, unsigned long len, - (1u << GET_LOW_SLICE_INDEX(start)); } - if ((start + len) > SLICE_LOW_TOP) { + if (SLICE_NUM_HIGH && !slice_addr_is_low(end)) { unsigned long start_index = GET_HIGH_SLICE_INDEX(start); unsigned long align_end = ALIGN(end, (1UL << SLICE_HIGH_SHIFT)); unsigned long count = GET_HIGH_SLICE_INDEX(align_end) - start_index; @@ -133,7 +141,7 @@ static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret, if (!slice_low_has_vma(mm, i)) ret->low_slices |= 1u << i; - if (high_limit <= SLICE_LOW_TOP) + if (slice_addr_is_low(high_limit - 1)) return; for (i = 0; i < GET_HIGH_SLICE_INDEX(high_limit); i++) @@ -182,7 +190,7 @@ static bool slice_check_range_fits(struct mm_struct *mm, unsigned long end = start + len - 1; u64 low_slices = 0; - if (start < SLICE_LOW_TOP) { + if (slice_addr_is_low(start)) { unsigned long mend = min(end, (unsigned long)(SLICE_LOW_TOP - 1)); @@ -192,7 +200,7 @@ static bool slice_check_range_fits(struct mm_struct *mm, if ((low_slices & available->low_slices) != low_slices) return false; - if (SLICE_NUM_HIGH && ((start + len) > SLICE_LOW_TOP)) { + if (SLICE_NUM_HIGH && !slice_addr_is_low(end)) { unsigned long start_index = GET_HIGH_SLICE_INDEX(start); unsigned long align_end = ALIGN(end, (1UL << SLICE_HIGH_SHIFT)); unsigned long count = GET_HIGH_SLICE_INDEX(align_end) - start_index; @@ -219,7 +227,7 @@ static void slice_flush_segments(void *parm) copy_mm_to_paca(current->active_mm); local_irq_save(flags); - slb_flush_and_rebolt(); + slb_flush_and_restore_bolted(); local_irq_restore(flags); #endif } @@ -303,7 +311,7 @@ static bool slice_scan_available(unsigned long addr, int end, unsigned long *boundary_addr) { unsigned long slice; - if (addr < SLICE_LOW_TOP) { + if (slice_addr_is_low(addr)) { slice = GET_LOW_SLICE_INDEX(addr); *boundary_addr = (slice + end) << SLICE_LOW_SHIFT; return !!(available->low_slices & (1u << slice)); @@ -706,7 +714,7 @@ unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr) VM_BUG_ON(radix_enabled()); - if (addr < SLICE_LOW_TOP) { + if (slice_addr_is_low(addr)) { psizes = mm->context.low_slices_psize; index = GET_LOW_SLICE_INDEX(addr); } else { @@ -757,6 +765,20 @@ void slice_init_new_context_exec(struct mm_struct *mm) bitmap_fill(mask->high_slices, SLICE_NUM_HIGH); } +#ifdef CONFIG_PPC_BOOK3S_64 +void slice_setup_new_exec(void) +{ + struct mm_struct *mm = current->mm; + + slice_dbg("slice_setup_new_exec(mm=%p)\n", mm); + + if (!is_32bit_task()) + return; + + mm->context.slb_addr_limit = DEFAULT_MAP_WINDOW; +} +#endif + void slice_set_range_psize(struct mm_struct *mm, unsigned long start, unsigned long len, unsigned int psize) { diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c index fef3e1eb3a19..6a23b9ebd2a1 100644 --- a/arch/powerpc/mm/tlb-radix.c +++ b/arch/powerpc/mm/tlb-radix.c @@ -366,6 +366,7 @@ static inline void _tlbiel_lpid_guest(unsigned long lpid, unsigned long ric) __tlbiel_lpid_guest(lpid, set, RIC_FLUSH_TLB); asm volatile("ptesync": : :"memory"); + asm volatile(PPC_INVALIDATE_ERAT : : :"memory"); } @@ -833,6 +834,15 @@ EXPORT_SYMBOL_GPL(radix__flush_pwc_lpid); /* * Flush partition scoped translations from LPID (=LPIDR) */ +void radix__flush_tlb_lpid(unsigned int lpid) +{ + _tlbie_lpid(lpid, RIC_FLUSH_ALL); +} +EXPORT_SYMBOL_GPL(radix__flush_tlb_lpid); + +/* + * Flush partition scoped translations from LPID (=LPIDR) + */ void radix__local_flush_tlb_lpid(unsigned int lpid) { _tlbiel_lpid(lpid, RIC_FLUSH_ALL); @@ -1007,7 +1017,6 @@ void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr) goto local; } _tlbie_va_range(addr, end, pid, PAGE_SIZE, mmu_virtual_psize, true); - goto local; } else { local: _tlbiel_va_range(addr, end, pid, PAGE_SIZE, mmu_virtual_psize, true); diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c index 15fe5f0c8665..ae5d568e267f 100644 --- a/arch/powerpc/mm/tlb_nohash.c +++ b/arch/powerpc/mm/tlb_nohash.c @@ -503,6 +503,9 @@ static void setup_page_sizes(void) for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { struct mmu_psize_def *def = &mmu_psize_defs[psize]; + if (!def->shift) + continue; + if (tlb1ps & (1U << (def->shift - 10))) { def->flags |= MMU_PAGE_SIZE_DIRECT; diff --git a/arch/powerpc/oprofile/Makefile b/arch/powerpc/oprofile/Makefile index 7a7834c39f64..8d26d7416481 100644 --- a/arch/powerpc/oprofile/Makefile +++ b/arch/powerpc/oprofile/Makefile @@ -1,5 +1,4 @@ # SPDX-License-Identifier: GPL-2.0 -subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC) diff --git a/arch/powerpc/oprofile/backtrace.c b/arch/powerpc/oprofile/backtrace.c index ad054dd0d666..5df6290d1ccc 100644 --- a/arch/powerpc/oprofile/backtrace.c +++ b/arch/powerpc/oprofile/backtrace.c @@ -7,7 +7,7 @@ * 2 of the License, or (at your option) any later version. **/ -#include <linux/compat_time.h> +#include <linux/time.h> #include <linux/oprofile.h> #include <linux/sched.h> #include <asm/processor.h> diff --git a/arch/powerpc/perf/Makefile b/arch/powerpc/perf/Makefile index 82986d2acd9b..ab26df5bacb9 100644 --- a/arch/powerpc/perf/Makefile +++ b/arch/powerpc/perf/Makefile @@ -1,5 +1,4 @@ # SPDX-License-Identifier: GPL-2.0 -subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror obj-$(CONFIG_PERF_EVENTS) += callchain.o perf_regs.o diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c index 1fafc32b12a0..6954636b16d1 100644 --- a/arch/powerpc/perf/imc-pmu.c +++ b/arch/powerpc/perf/imc-pmu.c @@ -1392,7 +1392,7 @@ int init_imc_pmu(struct device_node *parent, struct imc_pmu *pmu_ptr, int pmu_id if (ret) goto err_free_cpuhp_mem; - pr_info("%s performance monitor hardware support registered\n", + pr_debug("%s performance monitor hardware support registered\n", pmu_ptr->pmu.name); return 0; diff --git a/arch/powerpc/perf/power7-pmu.c b/arch/powerpc/perf/power7-pmu.c index 7963658dbc22..6dbae9884ec4 100644 --- a/arch/powerpc/perf/power7-pmu.c +++ b/arch/powerpc/perf/power7-pmu.c @@ -238,6 +238,7 @@ static int power7_marked_instr_event(u64 event) case 6: if (psel == 0x64) return pmc >= 3; + break; case 8: return unit == 0xd; } diff --git a/arch/powerpc/platforms/40x/Kconfig b/arch/powerpc/platforms/40x/Kconfig index 60254a321a91..2a9d66254ffc 100644 --- a/arch/powerpc/platforms/40x/Kconfig +++ b/arch/powerpc/platforms/40x/Kconfig @@ -2,7 +2,6 @@ config ACADIA bool "Acadia" depends on 40x - default n select PPC40x_SIMPLE select 405EZ help @@ -11,7 +10,6 @@ config ACADIA config EP405 bool "EP405/EP405PC" depends on 40x - default n select 405GP select PCI help @@ -20,7 +18,6 @@ config EP405 config HOTFOOT bool "Hotfoot" depends on 40x - default n select PPC40x_SIMPLE select PCI help @@ -29,7 +26,6 @@ config HOTFOOT config KILAUEA bool "Kilauea" depends on 40x - default n select 405EX select PPC40x_SIMPLE select PPC4xx_PCI_EXPRESS @@ -41,7 +37,6 @@ config KILAUEA config MAKALU bool "Makalu" depends on 40x - default n select 405EX select PCI select PPC4xx_PCI_EXPRESS @@ -62,7 +57,6 @@ config WALNUT config XILINX_VIRTEX_GENERIC_BOARD bool "Generic Xilinx Virtex board" depends on 40x - default n select XILINX_VIRTEX_II_PRO select XILINX_VIRTEX_4_FX select XILINX_INTC @@ -80,7 +74,6 @@ config XILINX_VIRTEX_GENERIC_BOARD config OBS600 bool "OpenBlockS 600" depends on 40x - default n select 405EX select PPC40x_SIMPLE help @@ -90,7 +83,6 @@ config OBS600 config PPC40x_SIMPLE bool "Simple PowerPC 40x board support" depends on 40x - default n help This option enables the simple PowerPC 40x platform support. @@ -156,7 +148,6 @@ config IBM405_ERR51 config APM8018X bool "APM8018X" depends on 40x - default n select PPC40x_SIMPLE help This option enables support for the AppliedMicro APM8018X evaluation diff --git a/arch/powerpc/platforms/44x/Kconfig b/arch/powerpc/platforms/44x/Kconfig index a6011422b861..f024efd5a4c2 100644 --- a/arch/powerpc/platforms/44x/Kconfig +++ b/arch/powerpc/platforms/44x/Kconfig @@ -2,7 +2,6 @@ config PPC_47x bool "Support for 47x variant" depends on 44x - default n select MPIC help This option enables support for the 47x family of processors and is @@ -11,7 +10,6 @@ config PPC_47x config BAMBOO bool "Bamboo" depends on 44x - default n select PPC44x_SIMPLE select 440EP select PCI @@ -21,7 +19,6 @@ config BAMBOO config BLUESTONE bool "Bluestone" depends on 44x - default n select PPC44x_SIMPLE select APM821xx select PCI_MSI @@ -44,7 +41,6 @@ config EBONY config SAM440EP bool "Sam440ep" depends on 44x - default n select 440EP select PCI help @@ -53,7 +49,6 @@ config SAM440EP config SEQUOIA bool "Sequoia" depends on 44x - default n select PPC44x_SIMPLE select 440EPX help @@ -62,7 +57,6 @@ config SEQUOIA config TAISHAN bool "Taishan" depends on 44x - default n select PPC44x_SIMPLE select 440GX select PCI @@ -73,7 +67,6 @@ config TAISHAN config KATMAI bool "Katmai" depends on 44x - default n select PPC44x_SIMPLE select 440SPe select PCI @@ -86,7 +79,6 @@ config KATMAI config RAINIER bool "Rainier" depends on 44x - default n select PPC44x_SIMPLE select 440GRX select PCI @@ -96,7 +88,6 @@ config RAINIER config WARP bool "PIKA Warp" depends on 44x - default n select 440EP help This option enables support for the PIKA Warp(tm) Appliance. The Warp @@ -109,7 +100,6 @@ config WARP config ARCHES bool "Arches" depends on 44x - default n select PPC44x_SIMPLE select 460EX # Odd since it uses 460GT but the effects are the same select PCI @@ -120,7 +110,6 @@ config ARCHES config CANYONLANDS bool "Canyonlands" depends on 44x - default n select 460EX select PCI select PPC4xx_PCI_EXPRESS @@ -134,7 +123,6 @@ config CANYONLANDS config GLACIER bool "Glacier" depends on 44x - default n select PPC44x_SIMPLE select 460EX # Odd since it uses 460GT but the effects are the same select PCI @@ -147,7 +135,6 @@ config GLACIER config REDWOOD bool "Redwood" depends on 44x - default n select PPC44x_SIMPLE select 460SX select PCI @@ -160,7 +147,6 @@ config REDWOOD config EIGER bool "Eiger" depends on 44x - default n select PPC44x_SIMPLE select 460SX select PCI @@ -172,7 +158,6 @@ config EIGER config YOSEMITE bool "Yosemite" depends on 44x - default n select PPC44x_SIMPLE select 440EP select PCI @@ -182,7 +167,6 @@ config YOSEMITE config ISS4xx bool "ISS 4xx Simulator" depends on (44x || 40x) - default n select 405GP if 40x select 440GP if 44x && !PPC_47x select PPC_FPU @@ -193,7 +177,6 @@ config ISS4xx config CURRITUCK bool "IBM Currituck (476fpe) Support" depends on PPC_47x - default n select SWIOTLB select 476FPE select PPC4xx_PCI_EXPRESS @@ -203,7 +186,6 @@ config CURRITUCK config FSP2 bool "IBM FSP2 (476fpe) Support" depends on PPC_47x - default n select 476FPE select IBM_EMAC_EMAC4 if IBM_EMAC select IBM_EMAC_RGMII if IBM_EMAC @@ -215,7 +197,6 @@ config FSP2 config AKEBONO bool "IBM Akebono (476gtr) Support" depends on PPC_47x - default n select SWIOTLB select 476FPE select PPC4xx_PCI_EXPRESS @@ -241,7 +222,6 @@ config AKEBONO config ICON bool "Icon" depends on 44x - default n select PPC44x_SIMPLE select 440SPe select PCI @@ -252,7 +232,6 @@ config ICON config XILINX_VIRTEX440_GENERIC_BOARD bool "Generic Xilinx Virtex 5 FXT board support" depends on 44x - default n select XILINX_VIRTEX_5_FXT select XILINX_INTC help @@ -280,7 +259,6 @@ config XILINX_ML510 config PPC44x_SIMPLE bool "Simple PowerPC 44x board support" depends on 44x - default n help This option enables the simple PowerPC 44x platform support. diff --git a/arch/powerpc/platforms/44x/fsp2.c b/arch/powerpc/platforms/44x/fsp2.c index 04f0c73a9b4f..7a507f775308 100644 --- a/arch/powerpc/platforms/44x/fsp2.c +++ b/arch/powerpc/platforms/44x/fsp2.c @@ -210,15 +210,15 @@ static void node_irq_request(const char *compat, irq_handler_t errirq_handler) for_each_compatible_node(np, NULL, compat) { irq = irq_of_parse_and_map(np, 0); if (irq == NO_IRQ) { - pr_err("device tree node %s is missing a interrupt", - np->name); + pr_err("device tree node %pOFn is missing a interrupt", + np); return; } rc = request_irq(irq, errirq_handler, 0, np->name, np); if (rc) { - pr_err("fsp_of_probe: request_irq failed: np=%s rc=%d", - np->full_name, rc); + pr_err("fsp_of_probe: request_irq failed: np=%pOF rc=%d", + np, rc); return; } } diff --git a/arch/powerpc/platforms/4xx/ocm.c b/arch/powerpc/platforms/4xx/ocm.c index 69d9f60d9fe5..f5bbd4563342 100644 --- a/arch/powerpc/platforms/4xx/ocm.c +++ b/arch/powerpc/platforms/4xx/ocm.c @@ -113,7 +113,6 @@ static void __init ocm_init_node(int count, struct device_node *node) int len; struct resource rsrc; - int ioflags; ocm = ocm_get_node(count); @@ -179,9 +178,8 @@ static void __init ocm_init_node(int count, struct device_node *node) /* ioremap the non-cached region */ if (ocm->nc.memtotal) { - ioflags = _PAGE_NO_CACHE | _PAGE_GUARDED | _PAGE_EXEC; ocm->nc.virt = __ioremap(ocm->nc.phys, ocm->nc.memtotal, - ioflags); + _PAGE_EXEC | PAGE_KERNEL_NCG); if (!ocm->nc.virt) { printk(KERN_ERR @@ -195,9 +193,8 @@ static void __init ocm_init_node(int count, struct device_node *node) /* ioremap the cached region */ if (ocm->c.memtotal) { - ioflags = _PAGE_EXEC; ocm->c.virt = __ioremap(ocm->c.phys, ocm->c.memtotal, - ioflags); + _PAGE_EXEC | PAGE_KERNEL); if (!ocm->c.virt) { printk(KERN_ERR diff --git a/arch/powerpc/platforms/4xx/soc.c b/arch/powerpc/platforms/4xx/soc.c index 5e36508b2a70..1844bf502fcf 100644 --- a/arch/powerpc/platforms/4xx/soc.c +++ b/arch/powerpc/platforms/4xx/soc.c @@ -200,7 +200,7 @@ void ppc4xx_reset_system(char *cmd) u32 reset_type = DBCR0_RST_SYSTEM; const u32 *prop; - np = of_find_node_by_type(NULL, "cpu"); + np = of_get_cpu_node(0, NULL); if (np) { prop = of_get_property(np, "reset-type", NULL); diff --git a/arch/powerpc/platforms/82xx/Kconfig b/arch/powerpc/platforms/82xx/Kconfig index 6e04099361b9..1947a88bc69f 100644 --- a/arch/powerpc/platforms/82xx/Kconfig +++ b/arch/powerpc/platforms/82xx/Kconfig @@ -51,7 +51,6 @@ endif config PQ2ADS bool - default n config 8260 bool diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c index 7e966f4cf19a..fff72425727a 100644 --- a/arch/powerpc/platforms/85xx/smp.c +++ b/arch/powerpc/platforms/85xx/smp.c @@ -216,8 +216,8 @@ static int smp_85xx_start_cpu(int cpu) /* Map the spin table */ if (ioremappable) - spin_table = ioremap_prot(*cpu_rel_addr, - sizeof(struct epapr_spin_table), _PAGE_COHERENT); + spin_table = ioremap_coherent(*cpu_rel_addr, + sizeof(struct epapr_spin_table)); else spin_table = phys_to_virt(*cpu_rel_addr); diff --git a/arch/powerpc/platforms/8xx/m8xx_setup.c b/arch/powerpc/platforms/8xx/m8xx_setup.c index 027c42d8966c..f1c805c8adbc 100644 --- a/arch/powerpc/platforms/8xx/m8xx_setup.c +++ b/arch/powerpc/platforms/8xx/m8xx_setup.c @@ -66,7 +66,7 @@ static int __init get_freq(char *name, unsigned long *val) int found = 0; /* The cpu node should have timebase and clock frequency properties */ - cpu = of_find_node_by_type(NULL, "cpu"); + cpu = of_get_cpu_node(0, NULL); if (cpu) { fp = of_get_property(cpu, name, NULL); @@ -147,8 +147,9 @@ void __init mpc8xx_calibrate_decr(void) * we have to enable the timebase). The decrementer interrupt * is wired into the vector table, nothing to do here for that. */ - cpu = of_find_node_by_type(NULL, "cpu"); + cpu = of_get_cpu_node(0, NULL); virq= irq_of_parse_and_map(cpu, 0); + of_node_put(cpu); irq = virq_to_hw(virq); sys_tmr2 = immr_map(im_sit); diff --git a/arch/powerpc/platforms/8xx/machine_check.c b/arch/powerpc/platforms/8xx/machine_check.c index 402016705a39..9944fc303df0 100644 --- a/arch/powerpc/platforms/8xx/machine_check.c +++ b/arch/powerpc/platforms/8xx/machine_check.c @@ -18,9 +18,9 @@ int machine_check_8xx(struct pt_regs *regs) pr_err("Machine check in kernel mode.\n"); pr_err("Caused by (from SRR1=%lx): ", reason); if (reason & 0x40000000) - pr_err("Fetch error at address %lx\n", regs->nip); + pr_cont("Fetch error at address %lx\n", regs->nip); else - pr_err("Data access error at address %lx\n", regs->dar); + pr_cont("Data access error at address %lx\n", regs->dar); #ifdef CONFIG_PCI /* the qspan pci read routines can cause machine checks -- Cort diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig index 14ef17e10ec9..260a56b7602d 100644 --- a/arch/powerpc/platforms/Kconfig +++ b/arch/powerpc/platforms/Kconfig @@ -23,7 +23,6 @@ source "arch/powerpc/platforms/amigaone/Kconfig" config KVM_GUEST bool "KVM Guest support" - default n select EPAPR_PARAVIRT ---help--- This option enables various optimizations for running under the KVM @@ -34,7 +33,6 @@ config KVM_GUEST config EPAPR_PARAVIRT bool "ePAPR para-virtualization support" - default n help Enables ePAPR para-virtualization support for guests. @@ -74,7 +72,6 @@ config PPC_DT_CPU_FTRS config UDBG_RTAS_CONSOLE bool "RTAS based debug console" depends on PPC_RTAS - default n config PPC_SMP_MUXED_IPI bool @@ -86,16 +83,13 @@ config PPC_SMP_MUXED_IPI config IPIC bool - default n config MPIC bool - default n config MPIC_TIMER bool "MPIC Global Timer" depends on MPIC && FSL_SOC - default n help The MPIC global timer is a hardware timer inside the Freescale PIC complying with OpenPIC standard. When the @@ -107,7 +101,6 @@ config MPIC_TIMER config FSL_MPIC_TIMER_WAKEUP tristate "Freescale MPIC global timer wakeup driver" depends on FSL_SOC && MPIC_TIMER && PM - default n help The driver provides a way to wake up the system by MPIC timer. @@ -115,43 +108,35 @@ config FSL_MPIC_TIMER_WAKEUP config PPC_EPAPR_HV_PIC bool - default n select EPAPR_PARAVIRT config MPIC_WEIRD bool - default n config MPIC_MSGR bool "MPIC message register support" depends on MPIC - default n help Enables support for the MPIC message registers. These registers are used for inter-processor communication. config PPC_I8259 bool - default n config U3_DART bool depends on PPC64 - default n config PPC_RTAS bool - default n config RTAS_ERROR_LOGGING bool depends on PPC_RTAS - default n config PPC_RTAS_DAEMON bool depends on PPC_RTAS - default n config RTAS_PROC bool "Proc interface to RTAS" @@ -164,11 +149,9 @@ config RTAS_FLASH config MMIO_NVRAM bool - default n config MPIC_U3_HT_IRQS bool - default n config MPIC_BROKEN_REGREAD bool @@ -187,15 +170,12 @@ config EEH config PPC_MPC106 bool - default n config PPC_970_NAP bool - default n config PPC_P7_NAP bool - default n config PPC_INDIRECT_PIO bool @@ -295,7 +275,6 @@ config CPM2 config FSL_ULI1575 bool - default n select GENERIC_ISA_DMA help Supports for the ULI1575 PCIe south bridge that exists on some diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype index 6c6a7c72cae4..f4e2c5729374 100644 --- a/arch/powerpc/platforms/Kconfig.cputype +++ b/arch/powerpc/platforms/Kconfig.cputype @@ -1,7 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 config PPC64 bool "64-bit kernel" - default n select ZLIB_DEFLATE help This option selects whether a 32-bit or a 64-bit kernel @@ -72,6 +71,7 @@ config PPC_BOOK3S_64 select PPC_HAVE_PMU_SUPPORT select SYS_SUPPORTS_HUGETLBFS select HAVE_ARCH_TRANSPARENT_HUGEPAGE + select ARCH_ENABLE_THP_MIGRATION if TRANSPARENT_HUGEPAGE select ARCH_SUPPORTS_NUMA_BALANCING select IRQ_WORK @@ -368,7 +368,6 @@ config PPC_MM_SLICES bool default y if PPC_BOOK3S_64 default y if PPC_8xx && HUGETLB_PAGE - default n config PPC_HAVE_PMU_SUPPORT bool @@ -382,7 +381,6 @@ config PPC_PERF_CTRS config FORCE_SMP # Allow platforms to force SMP=y by selecting this bool - default n select SMP config SMP @@ -423,7 +421,6 @@ config CHECK_CACHE_COHERENCY config PPC_DOORBELL bool - default n endmenu diff --git a/arch/powerpc/platforms/Makefile b/arch/powerpc/platforms/Makefile index e46bb7ea710f..143d4417f6cc 100644 --- a/arch/powerpc/platforms/Makefile +++ b/arch/powerpc/platforms/Makefile @@ -1,7 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror - obj-$(CONFIG_FSL_ULI1575) += fsl_uli1575.o obj-$(CONFIG_PPC_PMAC) += powermac/ diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig index 9f5958f16923..4b2f114f3116 100644 --- a/arch/powerpc/platforms/cell/Kconfig +++ b/arch/powerpc/platforms/cell/Kconfig @@ -1,7 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 config PPC_CELL bool - default n config PPC_CELL_COMMON bool @@ -22,7 +21,6 @@ config PPC_CELL_NATIVE select IBM_EMAC_RGMII if IBM_EMAC select IBM_EMAC_ZMII if IBM_EMAC #test only select IBM_EMAC_TAH if IBM_EMAC #test only - default n config PPC_IBM_CELL_BLADE bool "IBM Cell Blade" @@ -54,7 +52,6 @@ config SPU_FS config SPU_BASE bool - default n select PPC_COPRO_BASE config CBE_RAS diff --git a/arch/powerpc/platforms/cell/cpufreq_spudemand.c b/arch/powerpc/platforms/cell/cpufreq_spudemand.c index 882944c36ef5..5d8e8b6bb1cc 100644 --- a/arch/powerpc/platforms/cell/cpufreq_spudemand.c +++ b/arch/powerpc/platforms/cell/cpufreq_spudemand.c @@ -49,7 +49,7 @@ static int calc_freq(struct spu_gov_info_struct *info) cpu = info->policy->cpu; busy_spus = atomic_read(&cbe_spu_info[cpu_to_node(cpu)].busy_spus); - CALC_LOAD(info->busy_spus, EXP, busy_spus * FIXED_1); + info->busy_spus = calc_load(info->busy_spus, EXP, busy_spus * FIXED_1); pr_debug("cpu %d: busy_spus=%d, info->busy_spus=%ld\n", cpu, busy_spus, info->busy_spus); diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c index 0c45cdbac4cf..7f12c7b78c0f 100644 --- a/arch/powerpc/platforms/cell/spu_base.c +++ b/arch/powerpc/platforms/cell/spu_base.c @@ -50,11 +50,11 @@ struct cbe_spu_info cbe_spu_info[MAX_NUMNODES]; EXPORT_SYMBOL_GPL(cbe_spu_info); /* - * The spufs fault-handling code needs to call force_sig_info to raise signals + * The spufs fault-handling code needs to call force_sig_fault to raise signals * on DMA errors. Export it here to avoid general kernel-wide access to this * function */ -EXPORT_SYMBOL_GPL(force_sig_info); +EXPORT_SYMBOL_GPL(force_sig_fault); /* * Protects cbe_spu_info and spu->number. diff --git a/arch/powerpc/platforms/cell/spu_manage.c b/arch/powerpc/platforms/cell/spu_manage.c index 5c409c98cca8..f7e36373f6e0 100644 --- a/arch/powerpc/platforms/cell/spu_manage.c +++ b/arch/powerpc/platforms/cell/spu_manage.c @@ -180,35 +180,22 @@ out: static int __init spu_map_interrupts(struct spu *spu, struct device_node *np) { - struct of_phandle_args oirq; - int ret; int i; for (i=0; i < 3; i++) { - ret = of_irq_parse_one(np, i, &oirq); - if (ret) { - pr_debug("spu_new: failed to get irq %d\n", i); - goto err; - } - ret = -EINVAL; - pr_debug(" irq %d no 0x%x on %pOF\n", i, oirq.args[0], - oirq.np); - spu->irqs[i] = irq_create_of_mapping(&oirq); - if (!spu->irqs[i]) { - pr_debug("spu_new: failed to map it !\n"); + spu->irqs[i] = irq_of_parse_and_map(np, i); + if (!spu->irqs[i]) goto err; - } } return 0; err: - pr_debug("failed to map irq %x for spu %s\n", *oirq.args, - spu->name); + pr_debug("failed to map irq %x for spu %s\n", i, spu->name); for (; i >= 0; i--) { if (spu->irqs[i]) irq_dispose_mapping(spu->irqs[i]); } - return ret; + return -EINVAL; } static int spu_map_resource(struct spu *spu, int nr, @@ -295,8 +282,8 @@ static int __init of_enumerate_spus(int (*fn)(void *data)) for_each_node_by_type(node, "spe") { ret = fn(node); if (ret) { - printk(KERN_WARNING "%s: Error initializing %s\n", - __func__, node->name); + printk(KERN_WARNING "%s: Error initializing %pOFn\n", + __func__, node); of_node_put(node); break; } diff --git a/arch/powerpc/platforms/cell/spufs/fault.c b/arch/powerpc/platforms/cell/spufs/fault.c index 83cf58daaa79..971ac43b5d60 100644 --- a/arch/powerpc/platforms/cell/spufs/fault.c +++ b/arch/powerpc/platforms/cell/spufs/fault.c @@ -36,42 +36,32 @@ static void spufs_handle_event(struct spu_context *ctx, unsigned long ea, int type) { - siginfo_t info; - if (ctx->flags & SPU_CREATE_EVENTS_ENABLED) { ctx->event_return |= type; wake_up_all(&ctx->stop_wq); return; } - clear_siginfo(&info); - switch (type) { case SPE_EVENT_INVALID_DMA: - info.si_signo = SIGBUS; - info.si_code = BUS_OBJERR; + force_sig_fault(SIGBUS, BUS_OBJERR, NULL, current); break; case SPE_EVENT_SPE_DATA_STORAGE: - info.si_signo = SIGSEGV; - info.si_addr = (void __user *)ea; - info.si_code = SEGV_ACCERR; ctx->ops->restart_dma(ctx); + force_sig_fault(SIGSEGV, SEGV_ACCERR, (void __user *)ea, + current); break; case SPE_EVENT_DMA_ALIGNMENT: - info.si_signo = SIGBUS; /* DAR isn't set for an alignment fault :( */ - info.si_code = BUS_ADRALN; + force_sig_fault(SIGBUS, BUS_ADRALN, NULL, current); break; case SPE_EVENT_SPE_ERROR: - info.si_signo = SIGILL; - info.si_addr = (void __user *)(unsigned long) - ctx->ops->npc_read(ctx) - 4; - info.si_code = ILL_ILLOPC; + force_sig_fault( + SIGILL, ILL_ILLOPC, + (void __user *)(unsigned long) + ctx->ops->npc_read(ctx) - 4, current); break; } - - if (info.si_signo) - force_sig_info(info.si_signo, &info, current); } int spufs_handle_class0(struct spu_context *ctx) diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index c9ef3c532169..9fcccb4490b9 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c @@ -987,9 +987,9 @@ static void spu_calc_load(void) unsigned long active_tasks; /* fixed-point */ active_tasks = count_active_contexts() * FIXED_1; - CALC_LOAD(spu_avenrun[0], EXP_1, active_tasks); - CALC_LOAD(spu_avenrun[1], EXP_5, active_tasks); - CALC_LOAD(spu_avenrun[2], EXP_15, active_tasks); + spu_avenrun[0] = calc_load(spu_avenrun[0], EXP_1, active_tasks); + spu_avenrun[1] = calc_load(spu_avenrun[1], EXP_5, active_tasks); + spu_avenrun[2] = calc_load(spu_avenrun[2], EXP_15, active_tasks); } static void spusched_wake(struct timer_list *unused) @@ -1071,9 +1071,6 @@ void spuctx_switch_state(struct spu_context *ctx, } } -#define LOAD_INT(x) ((x) >> FSHIFT) -#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100) - static int show_spu_loadavg(struct seq_file *s, void *private) { int a, b, c; diff --git a/arch/powerpc/platforms/embedded6xx/wii.c b/arch/powerpc/platforms/embedded6xx/wii.c index 403523c061ba..ecf703ee3a76 100644 --- a/arch/powerpc/platforms/embedded6xx/wii.c +++ b/arch/powerpc/platforms/embedded6xx/wii.c @@ -112,7 +112,7 @@ static void __iomem *wii_ioremap_hw_regs(char *name, char *compatible) } error = of_address_to_resource(np, 0, &res); if (error) { - pr_err("no valid reg found for %s\n", np->name); + pr_err("no valid reg found for %pOFn\n", np); goto out_put; } diff --git a/arch/powerpc/platforms/maple/Kconfig b/arch/powerpc/platforms/maple/Kconfig index 376d0be36b66..2601fac50354 100644 --- a/arch/powerpc/platforms/maple/Kconfig +++ b/arch/powerpc/platforms/maple/Kconfig @@ -13,7 +13,6 @@ config PPC_MAPLE select PPC_RTAS select MMIO_NVRAM select ATA_NONSTANDARD if ATA - default n help This option enables support for the Maple 970FX Evaluation Board. For more information, refer to <http://www.970eval.com> diff --git a/arch/powerpc/platforms/pasemi/Kconfig b/arch/powerpc/platforms/pasemi/Kconfig index d458a791d35b..98e3bc22bebc 100644 --- a/arch/powerpc/platforms/pasemi/Kconfig +++ b/arch/powerpc/platforms/pasemi/Kconfig @@ -2,7 +2,6 @@ config PPC_PASEMI depends on PPC64 && PPC_BOOK3S && CPU_BIG_ENDIAN bool "PA Semi SoC-based platforms" - default n select MPIC select PCI select PPC_UDBG_16550 diff --git a/arch/powerpc/platforms/pasemi/dma_lib.c b/arch/powerpc/platforms/pasemi/dma_lib.c index c80f72c370ae..53384eb42a76 100644 --- a/arch/powerpc/platforms/pasemi/dma_lib.c +++ b/arch/powerpc/platforms/pasemi/dma_lib.c @@ -576,7 +576,7 @@ int pasemi_dma_init(void) res.start = 0xfd800000; res.end = res.start + 0x1000; } - dma_status = __ioremap(res.start, resource_size(&res), 0); + dma_status = ioremap_cache(res.start, resource_size(&res)); pci_dev_put(iob_pdev); for (i = 0; i < MAX_TXCH; i++) diff --git a/arch/powerpc/platforms/powermac/Makefile b/arch/powerpc/platforms/powermac/Makefile index f2839eed0f89..923bfb340433 100644 --- a/arch/powerpc/platforms/powermac/Makefile +++ b/arch/powerpc/platforms/powermac/Makefile @@ -1,9 +1,10 @@ # SPDX-License-Identifier: GPL-2.0 CFLAGS_bootx_init.o += -fPIC +CFLAGS_bootx_init.o += $(call cc-option, -fno-stack-protector) ifdef CONFIG_FUNCTION_TRACER # Do not trace early boot code -CFLAGS_REMOVE_bootx_init.o = -mno-sched-epilog $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_bootx_init.o = $(CC_FLAGS_FTRACE) endif obj-y += pic.o setup.o time.o feature.o pci.o \ diff --git a/arch/powerpc/platforms/powermac/feature.c b/arch/powerpc/platforms/powermac/feature.c index 4eb8cb38fc69..ed2f54b3f173 100644 --- a/arch/powerpc/platforms/powermac/feature.c +++ b/arch/powerpc/platforms/powermac/feature.c @@ -1049,7 +1049,6 @@ core99_reset_cpu(struct device_node *node, long param, long value) unsigned long flags; struct macio_chip *macio; struct device_node *np; - struct device_node *cpus; const int dflt_reset_lines[] = { KL_GPIO_RESET_CPU0, KL_GPIO_RESET_CPU1, KL_GPIO_RESET_CPU2, @@ -1059,10 +1058,7 @@ core99_reset_cpu(struct device_node *node, long param, long value) if (macio->type != macio_keylargo) return -ENODEV; - cpus = of_find_node_by_path("/cpus"); - if (cpus == NULL) - return -ENODEV; - for (np = cpus->child; np != NULL; np = np->sibling) { + for_each_of_cpu_node(np) { const u32 *num = of_get_property(np, "reg", NULL); const u32 *rst = of_get_property(np, "soft-reset", NULL); if (num == NULL || rst == NULL) @@ -1072,7 +1068,6 @@ core99_reset_cpu(struct device_node *node, long param, long value) break; } } - of_node_put(cpus); if (np == NULL || reset_io == 0) reset_io = dflt_reset_lines[param]; @@ -1504,16 +1499,12 @@ static long g5_reset_cpu(struct device_node *node, long param, long value) unsigned long flags; struct macio_chip *macio; struct device_node *np; - struct device_node *cpus; macio = &macio_chips[0]; if (macio->type != macio_keylargo2 && macio->type != macio_shasta) return -ENODEV; - cpus = of_find_node_by_path("/cpus"); - if (cpus == NULL) - return -ENODEV; - for (np = cpus->child; np != NULL; np = np->sibling) { + for_each_of_cpu_node(np) { const u32 *num = of_get_property(np, "reg", NULL); const u32 *rst = of_get_property(np, "soft-reset", NULL); if (num == NULL || rst == NULL) @@ -1523,7 +1514,6 @@ static long g5_reset_cpu(struct device_node *node, long param, long value) break; } } - of_node_put(cpus); if (np == NULL || reset_io == 0) return -ENODEV; @@ -2515,31 +2505,26 @@ found: * supposed to be set when not supported, but I'm not very confident * that all Apple OF revs did it properly, I do it the paranoid way. */ - while (uninorth_base && uninorth_rev > 3) { - struct device_node *cpus = of_find_node_by_path("/cpus"); + if (uninorth_base && uninorth_rev > 3) { struct device_node *np; - if (!cpus || !cpus->child) { - printk(KERN_WARNING "Can't find CPU(s) in device tree !\n"); - of_node_put(cpus); - break; - } - np = cpus->child; - /* Nap mode not supported on SMP */ - if (np->sibling) { - of_node_put(cpus); - break; - } - /* Nap mode not supported if flush-on-lock property is present */ - if (of_get_property(np, "flush-on-lock", NULL)) { - of_node_put(cpus); - break; + for_each_of_cpu_node(np) { + int cpu_count = 1; + + /* Nap mode not supported on SMP */ + if (of_get_property(np, "flush-on-lock", NULL) || + (cpu_count > 1)) { + powersave_nap = 0; + of_node_put(np); + break; + } + + cpu_count++; + powersave_nap = 1; } - of_node_put(cpus); - powersave_nap = 1; - printk(KERN_DEBUG "Processor NAP mode on idle enabled.\n"); - break; } + if (powersave_nap) + printk(KERN_DEBUG "Processor NAP mode on idle enabled.\n"); /* On CPUs that support it (750FX), lowspeed by default during * NAP mode diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c index 3a529fcdae97..2f00e3daafb0 100644 --- a/arch/powerpc/platforms/powermac/setup.c +++ b/arch/powerpc/platforms/powermac/setup.c @@ -243,10 +243,9 @@ static void __init l2cr_init(void) { /* Checks "l2cr-value" property in the registry */ if (cpu_has_feature(CPU_FTR_L2CR)) { - struct device_node *np = of_find_node_by_name(NULL, "cpus"); - if (!np) - np = of_find_node_by_type(NULL, "cpu"); - if (np) { + struct device_node *np; + + for_each_of_cpu_node(np) { const unsigned int *l2cr = of_get_property(np, "l2cr-value", NULL); if (l2cr) { @@ -256,6 +255,7 @@ static void __init l2cr_init(void) _set_L2CR(ppc_override_l2cr_value); } of_node_put(np); + break; } } @@ -279,8 +279,8 @@ static void __init pmac_setup_arch(void) /* Set loops_per_jiffy to a half-way reasonable value, for use until calibrate_delay gets called. */ loops_per_jiffy = 50000000 / HZ; - cpu = of_find_node_by_type(NULL, "cpu"); - if (cpu != NULL) { + + for_each_of_cpu_node(cpu) { fp = of_get_property(cpu, "clock-frequency", NULL); if (fp != NULL) { if (pvr >= 0x30 && pvr < 0x80) @@ -292,8 +292,9 @@ static void __init pmac_setup_arch(void) else /* 601, 603, etc. */ loops_per_jiffy = *fp / (2 * HZ); + of_node_put(cpu); + break; } - of_node_put(cpu); } /* See if newworld or oldworld */ diff --git a/arch/powerpc/platforms/powermac/time.c b/arch/powerpc/platforms/powermac/time.c index f92c1918fb56..f157e3d071f2 100644 --- a/arch/powerpc/platforms/powermac/time.c +++ b/arch/powerpc/platforms/powermac/time.c @@ -45,13 +45,6 @@ #endif /* - * Offset between Unix time (1970-based) and Mac time (1904-based). Cuda and PMU - * times wrap in 2040. If we need to handle later times, the read_time functions - * need to be changed to interpret wrapped times as post-2040. - */ -#define RTC_OFFSET 2082844800 - -/* * Calibrate the decrementer frequency with the VIA timer 1. */ #define VIA_TIMER_FREQ_6 4700000 /* time 1 frequency * 6 */ @@ -90,98 +83,6 @@ long __init pmac_time_init(void) return delta; } -#ifdef CONFIG_ADB_CUDA -static time64_t cuda_get_time(void) -{ - struct adb_request req; - time64_t now; - - if (cuda_request(&req, NULL, 2, CUDA_PACKET, CUDA_GET_TIME) < 0) - return 0; - while (!req.complete) - cuda_poll(); - if (req.reply_len != 7) - printk(KERN_ERR "cuda_get_time: got %d byte reply\n", - req.reply_len); - now = (u32)((req.reply[3] << 24) + (req.reply[4] << 16) + - (req.reply[5] << 8) + req.reply[6]); - /* it's either after year 2040, or the RTC has gone backwards */ - WARN_ON(now < RTC_OFFSET); - - return now - RTC_OFFSET; -} - -#define cuda_get_rtc_time(tm) rtc_time64_to_tm(cuda_get_time(), (tm)) - -static int cuda_set_rtc_time(struct rtc_time *tm) -{ - u32 nowtime; - struct adb_request req; - - nowtime = lower_32_bits(rtc_tm_to_time64(tm) + RTC_OFFSET); - if (cuda_request(&req, NULL, 6, CUDA_PACKET, CUDA_SET_TIME, - nowtime >> 24, nowtime >> 16, nowtime >> 8, - nowtime) < 0) - return -ENXIO; - while (!req.complete) - cuda_poll(); - if ((req.reply_len != 3) && (req.reply_len != 7)) - printk(KERN_ERR "cuda_set_rtc_time: got %d byte reply\n", - req.reply_len); - return 0; -} - -#else -#define cuda_get_time() 0 -#define cuda_get_rtc_time(tm) -#define cuda_set_rtc_time(tm) 0 -#endif - -#ifdef CONFIG_ADB_PMU -static time64_t pmu_get_time(void) -{ - struct adb_request req; - time64_t now; - - if (pmu_request(&req, NULL, 1, PMU_READ_RTC) < 0) - return 0; - pmu_wait_complete(&req); - if (req.reply_len != 4) - printk(KERN_ERR "pmu_get_time: got %d byte reply from PMU\n", - req.reply_len); - now = (u32)((req.reply[0] << 24) + (req.reply[1] << 16) + - (req.reply[2] << 8) + req.reply[3]); - - /* it's either after year 2040, or the RTC has gone backwards */ - WARN_ON(now < RTC_OFFSET); - - return now - RTC_OFFSET; -} - -#define pmu_get_rtc_time(tm) rtc_time64_to_tm(pmu_get_time(), (tm)) - -static int pmu_set_rtc_time(struct rtc_time *tm) -{ - u32 nowtime; - struct adb_request req; - - nowtime = lower_32_bits(rtc_tm_to_time64(tm) + RTC_OFFSET); - if (pmu_request(&req, NULL, 5, PMU_SET_RTC, nowtime >> 24, - nowtime >> 16, nowtime >> 8, nowtime) < 0) - return -ENXIO; - pmu_wait_complete(&req); - if (req.reply_len != 0) - printk(KERN_ERR "pmu_set_rtc_time: %d byte reply from PMU\n", - req.reply_len); - return 0; -} - -#else -#define pmu_get_time() 0 -#define pmu_get_rtc_time(tm) -#define pmu_set_rtc_time(tm) 0 -#endif - #ifdef CONFIG_PMAC_SMU static time64_t smu_get_time(void) { @@ -191,11 +92,6 @@ static time64_t smu_get_time(void) return 0; return rtc_tm_to_time64(&tm); } - -#else -#define smu_get_time() 0 -#define smu_get_rtc_time(tm, spin) -#define smu_set_rtc_time(tm, spin) 0 #endif /* Can't be __init, it's called when suspending and resuming */ @@ -203,12 +99,18 @@ time64_t pmac_get_boot_time(void) { /* Get the time from the RTC, used only at boot time */ switch (sys_ctrler) { +#ifdef CONFIG_ADB_CUDA case SYS_CTRLER_CUDA: return cuda_get_time(); +#endif +#ifdef CONFIG_ADB_PMU case SYS_CTRLER_PMU: return pmu_get_time(); +#endif +#ifdef CONFIG_PMAC_SMU case SYS_CTRLER_SMU: return smu_get_time(); +#endif default: return 0; } @@ -218,15 +120,21 @@ void pmac_get_rtc_time(struct rtc_time *tm) { /* Get the time from the RTC, used only at boot time */ switch (sys_ctrler) { +#ifdef CONFIG_ADB_CUDA case SYS_CTRLER_CUDA: - cuda_get_rtc_time(tm); + rtc_time64_to_tm(cuda_get_time(), tm); break; +#endif +#ifdef CONFIG_ADB_PMU case SYS_CTRLER_PMU: - pmu_get_rtc_time(tm); + rtc_time64_to_tm(pmu_get_time(), tm); break; +#endif +#ifdef CONFIG_PMAC_SMU case SYS_CTRLER_SMU: smu_get_rtc_time(tm, 1); break; +#endif default: ; } @@ -235,12 +143,18 @@ void pmac_get_rtc_time(struct rtc_time *tm) int pmac_set_rtc_time(struct rtc_time *tm) { switch (sys_ctrler) { +#ifdef CONFIG_ADB_CUDA case SYS_CTRLER_CUDA: return cuda_set_rtc_time(tm); +#endif +#ifdef CONFIG_ADB_PMU case SYS_CTRLER_PMU: return pmu_set_rtc_time(tm); +#endif +#ifdef CONFIG_PMAC_SMU case SYS_CTRLER_SMU: return smu_set_rtc_time(tm, 1); +#endif default: return -ENODEV; } diff --git a/arch/powerpc/platforms/powernv/Kconfig b/arch/powerpc/platforms/powernv/Kconfig index f8dc98d3dc01..99083fe992d5 100644 --- a/arch/powerpc/platforms/powernv/Kconfig +++ b/arch/powerpc/platforms/powernv/Kconfig @@ -15,11 +15,6 @@ config PPC_POWERNV select PPC_SCOM select ARCH_RANDOM select CPU_FREQ - select CPU_FREQ_GOV_PERFORMANCE - select CPU_FREQ_GOV_POWERSAVE - select CPU_FREQ_GOV_USERSPACE - select CPU_FREQ_GOV_ONDEMAND - select CPU_FREQ_GOV_CONSERVATIVE select PPC_DOORBELL select MMU_NOTIFIER select FORCE_SMP @@ -35,7 +30,6 @@ config OPAL_PRD config PPC_MEMTRACE bool "Enable removal of RAM from kernel mappings for tracing" depends on PPC_POWERNV && MEMORY_HOTREMOVE - default n help Enabling this option allows for the removal of memory (RAM) from the kernel mappings to be used for hardware tracing. diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c index 3c1beae29f2d..abc0be7507c8 100644 --- a/arch/powerpc/platforms/powernv/eeh-powernv.c +++ b/arch/powerpc/platforms/powernv/eeh-powernv.c @@ -223,14 +223,6 @@ int pnv_eeh_post_init(void) eeh_probe_devices(); eeh_addr_cache_build(); - if (eeh_has_flag(EEH_POSTPONED_PROBE)) { - eeh_clear_flag(EEH_POSTPONED_PROBE); - if (eeh_enabled()) - pr_info("EEH: PCI Enhanced I/O Error Handling Enabled\n"); - else - pr_info("EEH: No capable adapters found\n"); - } - /* Register OPAL event notifier */ eeh_event_irq = opal_event_request(ilog2(OPAL_EVENT_PCI_ERROR)); if (eeh_event_irq < 0) { @@ -391,12 +383,6 @@ static void *pnv_eeh_probe(struct pci_dn *pdn, void *data) if ((pdn->class_code >> 8) == PCI_CLASS_BRIDGE_ISA) return NULL; - /* Skip if we haven't probed yet */ - if (phb->ioda.pe_rmap[config_addr] == IODA_INVALID_PE) { - eeh_add_flag(EEH_POSTPONED_PROBE); - return NULL; - } - /* Initialize eeh device */ edev->class_code = pdn->class_code; edev->mode &= 0xFFFFFF00; @@ -604,7 +590,7 @@ static int pnv_eeh_get_phb_state(struct eeh_pe *pe) EEH_STATE_MMIO_ENABLED | EEH_STATE_DMA_ENABLED); } else if (!(pe->state & EEH_PE_ISOLATED)) { - eeh_pe_state_mark(pe, EEH_PE_ISOLATED); + eeh_pe_mark_isolated(pe); pnv_eeh_get_phb_diag(pe); if (eeh_has_flag(EEH_EARLY_DUMP_LOG)) @@ -706,7 +692,7 @@ static int pnv_eeh_get_pe_state(struct eeh_pe *pe) if (phb->freeze_pe) phb->freeze_pe(phb, pe->addr); - eeh_pe_state_mark(pe, EEH_PE_ISOLATED); + eeh_pe_mark_isolated(pe); pnv_eeh_get_phb_diag(pe); if (eeh_has_flag(EEH_EARLY_DUMP_LOG)) @@ -1054,7 +1040,7 @@ static int pnv_eeh_reset_vf_pe(struct eeh_pe *pe, int option) int ret; /* The VF PE should have only one child device */ - edev = list_first_entry_or_null(&pe->edevs, struct eeh_dev, list); + edev = list_first_entry_or_null(&pe->edevs, struct eeh_dev, entry); pdn = eeh_dev_to_pdn(edev); if (!pdn) return -ENXIO; @@ -1148,43 +1134,6 @@ static int pnv_eeh_reset(struct eeh_pe *pe, int option) } /** - * pnv_eeh_wait_state - Wait for PE state - * @pe: EEH PE - * @max_wait: maximal period in millisecond - * - * Wait for the state of associated PE. It might take some time - * to retrieve the PE's state. - */ -static int pnv_eeh_wait_state(struct eeh_pe *pe, int max_wait) -{ - int ret; - int mwait; - - while (1) { - ret = pnv_eeh_get_state(pe, &mwait); - - /* - * If the PE's state is temporarily unavailable, - * we have to wait for the specified time. Otherwise, - * the PE's state will be returned immediately. - */ - if (ret != EEH_STATE_UNAVAILABLE) - return ret; - - if (max_wait <= 0) { - pr_warn("%s: Timeout getting PE#%x's state (%d)\n", - __func__, pe->addr, max_wait); - return EEH_STATE_NOT_SUPPORT; - } - - max_wait -= mwait; - msleep(mwait); - } - - return EEH_STATE_NOT_SUPPORT; -} - -/** * pnv_eeh_get_log - Retrieve error log * @pe: EEH PE * @severity: temporary or permanent error log @@ -1611,7 +1560,7 @@ static int pnv_eeh_next_error(struct eeh_pe **pe) if ((ret == EEH_NEXT_ERR_FROZEN_PE || ret == EEH_NEXT_ERR_FENCED_PHB) && !((*pe)->state & EEH_PE_ISOLATED)) { - eeh_pe_state_mark(*pe, EEH_PE_ISOLATED); + eeh_pe_mark_isolated(*pe); pnv_eeh_get_phb_diag(*pe); if (eeh_has_flag(EEH_EARLY_DUMP_LOG)) @@ -1640,7 +1589,7 @@ static int pnv_eeh_next_error(struct eeh_pe **pe) } /* We possibly migrate to another PE */ - eeh_pe_state_mark(*pe, EEH_PE_ISOLATED); + eeh_pe_mark_isolated(*pe); } /* @@ -1702,7 +1651,6 @@ static struct eeh_ops pnv_eeh_ops = { .get_pe_addr = pnv_eeh_get_pe_addr, .get_state = pnv_eeh_get_state, .reset = pnv_eeh_reset, - .wait_state = pnv_eeh_wait_state, .get_log = pnv_eeh_get_log, .configure_bridge = pnv_eeh_configure_bridge, .err_inject = pnv_eeh_err_inject, diff --git a/arch/powerpc/platforms/powernv/memtrace.c b/arch/powerpc/platforms/powernv/memtrace.c index 51dc398ae3f7..a29fdf8a2e56 100644 --- a/arch/powerpc/platforms/powernv/memtrace.c +++ b/arch/powerpc/platforms/powernv/memtrace.c @@ -90,17 +90,15 @@ static bool memtrace_offline_pages(u32 nid, u64 start_pfn, u64 nr_pages) walk_memory_range(start_pfn, end_pfn, (void *)MEM_OFFLINE, change_memblock_state); - lock_device_hotplug(); - remove_memory(nid, start_pfn << PAGE_SHIFT, nr_pages << PAGE_SHIFT); - unlock_device_hotplug(); return true; } static u64 memtrace_alloc_node(u32 nid, u64 size) { - u64 start_pfn, end_pfn, nr_pages; + u64 start_pfn, end_pfn, nr_pages, pfn; u64 base_pfn; + u64 bytes = memory_block_size_bytes(); if (!node_spanned_pages(nid)) return 0; @@ -113,8 +111,21 @@ static u64 memtrace_alloc_node(u32 nid, u64 size) end_pfn = round_down(end_pfn - nr_pages, nr_pages); for (base_pfn = end_pfn; base_pfn > start_pfn; base_pfn -= nr_pages) { - if (memtrace_offline_pages(nid, base_pfn, nr_pages) == true) + if (memtrace_offline_pages(nid, base_pfn, nr_pages) == true) { + /* + * Remove memory in memory block size chunks so that + * iomem resources are always split to the same size and + * we never try to remove memory that spans two iomem + * resources. + */ + lock_device_hotplug(); + end_pfn = base_pfn + nr_pages; + for (pfn = base_pfn; pfn < end_pfn; pfn += bytes>> PAGE_SHIFT) { + remove_memory(nid, pfn << PAGE_SHIFT, bytes); + } + unlock_device_hotplug(); return base_pfn << PAGE_SHIFT; + } } return 0; diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c index 8006c54a91e3..6f60e0931922 100644 --- a/arch/powerpc/platforms/powernv/npu-dma.c +++ b/arch/powerpc/platforms/powernv/npu-dma.c @@ -17,7 +17,7 @@ #include <linux/pci.h> #include <linux/memblock.h> #include <linux/iommu.h> -#include <linux/debugfs.h> +#include <linux/sizes.h> #include <asm/debugfs.h> #include <asm/tlb.h> @@ -42,14 +42,6 @@ static DEFINE_SPINLOCK(npu_context_lock); /* - * When an address shootdown range exceeds this threshold we invalidate the - * entire TLB on the GPU for the given PID rather than each specific address in - * the range. - */ -static uint64_t atsd_threshold = 2 * 1024 * 1024; -static struct dentry *atsd_threshold_dentry; - -/* * Other types of TCE cache invalidation are not functional in the * hardware. */ @@ -454,79 +446,73 @@ static void put_mmio_atsd_reg(struct npu *npu, int reg) } /* MMIO ATSD register offsets */ -#define XTS_ATSD_AVA 1 -#define XTS_ATSD_STAT 2 - -static void mmio_launch_invalidate(struct mmio_atsd_reg *mmio_atsd_reg, - unsigned long launch, unsigned long va) -{ - struct npu *npu = mmio_atsd_reg->npu; - int reg = mmio_atsd_reg->reg; - - __raw_writeq_be(va, npu->mmio_atsd_regs[reg] + XTS_ATSD_AVA); - eieio(); - __raw_writeq_be(launch, npu->mmio_atsd_regs[reg]); -} +#define XTS_ATSD_LAUNCH 0 +#define XTS_ATSD_AVA 1 +#define XTS_ATSD_STAT 2 -static void mmio_invalidate_pid(struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS], - unsigned long pid, bool flush) +static unsigned long get_atsd_launch_val(unsigned long pid, unsigned long psize) { - int i; - unsigned long launch; - - for (i = 0; i <= max_npu2_index; i++) { - if (mmio_atsd_reg[i].reg < 0) - continue; + unsigned long launch = 0; - /* IS set to invalidate matching PID */ - launch = PPC_BIT(12); - - /* PRS set to process-scoped */ - launch |= PPC_BIT(13); + if (psize == MMU_PAGE_COUNT) { + /* IS set to invalidate entire matching PID */ + launch |= PPC_BIT(12); + } else { + /* AP set to invalidate region of psize */ + launch |= (u64)mmu_get_ap(psize) << PPC_BITLSHIFT(17); + } - /* AP */ - launch |= (u64) - mmu_get_ap(mmu_virtual_psize) << PPC_BITLSHIFT(17); + /* PRS set to process-scoped */ + launch |= PPC_BIT(13); - /* PID */ - launch |= pid << PPC_BITLSHIFT(38); + /* PID */ + launch |= pid << PPC_BITLSHIFT(38); - /* No flush */ - launch |= !flush << PPC_BITLSHIFT(39); + /* Leave "No flush" (bit 39) 0 so every ATSD performs a flush */ - /* Invalidating the entire process doesn't use a va */ - mmio_launch_invalidate(&mmio_atsd_reg[i], launch, 0); - } + return launch; } -static void mmio_invalidate_va(struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS], - unsigned long va, unsigned long pid, bool flush) +static void mmio_atsd_regs_write(struct mmio_atsd_reg + mmio_atsd_reg[NV_MAX_NPUS], unsigned long offset, + unsigned long val) { - int i; - unsigned long launch; + struct npu *npu; + int i, reg; for (i = 0; i <= max_npu2_index; i++) { - if (mmio_atsd_reg[i].reg < 0) + reg = mmio_atsd_reg[i].reg; + if (reg < 0) continue; - /* IS set to invalidate target VA */ - launch = 0; + npu = mmio_atsd_reg[i].npu; + __raw_writeq_be(val, npu->mmio_atsd_regs[reg] + offset); + } +} + +static void mmio_invalidate_pid(struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS], + unsigned long pid) +{ + unsigned long launch = get_atsd_launch_val(pid, MMU_PAGE_COUNT); - /* PRS set to process scoped */ - launch |= PPC_BIT(13); + /* Invalidating the entire process doesn't use a va */ + mmio_atsd_regs_write(mmio_atsd_reg, XTS_ATSD_LAUNCH, launch); +} - /* AP */ - launch |= (u64) - mmu_get_ap(mmu_virtual_psize) << PPC_BITLSHIFT(17); +static void mmio_invalidate_range(struct mmio_atsd_reg + mmio_atsd_reg[NV_MAX_NPUS], unsigned long pid, + unsigned long start, unsigned long psize) +{ + unsigned long launch = get_atsd_launch_val(pid, psize); - /* PID */ - launch |= pid << PPC_BITLSHIFT(38); + /* Write all VAs first */ + mmio_atsd_regs_write(mmio_atsd_reg, XTS_ATSD_AVA, start); - /* No flush */ - launch |= !flush << PPC_BITLSHIFT(39); + /* Issue one barrier for all address writes */ + eieio(); - mmio_launch_invalidate(&mmio_atsd_reg[i], launch, va); - } + /* Launch */ + mmio_atsd_regs_write(mmio_atsd_reg, XTS_ATSD_LAUNCH, launch); } #define mn_to_npu_context(x) container_of(x, struct npu_context, mn) @@ -612,14 +598,36 @@ static void release_atsd_reg(struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS]) } /* - * Invalidate either a single address or an entire PID depending on - * the value of va. + * Invalidate a virtual address range */ -static void mmio_invalidate(struct npu_context *npu_context, int va, - unsigned long address, bool flush) +static void mmio_invalidate(struct npu_context *npu_context, + unsigned long start, unsigned long size) { struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS]; unsigned long pid = npu_context->mm->context.id; + unsigned long atsd_start = 0; + unsigned long end = start + size - 1; + int atsd_psize = MMU_PAGE_COUNT; + + /* + * Convert the input range into one of the supported sizes. If the range + * doesn't fit, use the next larger supported size. Invalidation latency + * is high, so over-invalidation is preferred to issuing multiple + * invalidates. + * + * A 4K page size isn't supported by NPU/GPU ATS, so that case is + * ignored. + */ + if (size == SZ_64K) { + atsd_start = start; + atsd_psize = MMU_PAGE_64K; + } else if (ALIGN_DOWN(start, SZ_2M) == ALIGN_DOWN(end, SZ_2M)) { + atsd_start = ALIGN_DOWN(start, SZ_2M); + atsd_psize = MMU_PAGE_2M; + } else if (ALIGN_DOWN(start, SZ_1G) == ALIGN_DOWN(end, SZ_1G)) { + atsd_start = ALIGN_DOWN(start, SZ_1G); + atsd_psize = MMU_PAGE_1G; + } if (npu_context->nmmu_flush) /* @@ -634,23 +642,25 @@ static void mmio_invalidate(struct npu_context *npu_context, int va, * an invalidate. */ acquire_atsd_reg(npu_context, mmio_atsd_reg); - if (va) - mmio_invalidate_va(mmio_atsd_reg, address, pid, flush); + + if (atsd_psize == MMU_PAGE_COUNT) + mmio_invalidate_pid(mmio_atsd_reg, pid); else - mmio_invalidate_pid(mmio_atsd_reg, pid, flush); + mmio_invalidate_range(mmio_atsd_reg, pid, atsd_start, + atsd_psize); mmio_invalidate_wait(mmio_atsd_reg); - if (flush) { - /* - * The GPU requires two flush ATSDs to ensure all entries have - * been flushed. We use PID 0 as it will never be used for a - * process on the GPU. - */ - mmio_invalidate_pid(mmio_atsd_reg, 0, true); - mmio_invalidate_wait(mmio_atsd_reg); - mmio_invalidate_pid(mmio_atsd_reg, 0, true); - mmio_invalidate_wait(mmio_atsd_reg); - } + + /* + * The GPU requires two flush ATSDs to ensure all entries have been + * flushed. We use PID 0 as it will never be used for a process on the + * GPU. + */ + mmio_invalidate_pid(mmio_atsd_reg, 0); + mmio_invalidate_wait(mmio_atsd_reg); + mmio_invalidate_pid(mmio_atsd_reg, 0); + mmio_invalidate_wait(mmio_atsd_reg); + release_atsd_reg(mmio_atsd_reg); } @@ -667,7 +677,7 @@ static void pnv_npu2_mn_release(struct mmu_notifier *mn, * There should be no more translation requests for this PID, but we * need to ensure any entries for it are removed from the TLB. */ - mmio_invalidate(npu_context, 0, 0, true); + mmio_invalidate(npu_context, 0, ~0UL); } static void pnv_npu2_mn_change_pte(struct mmu_notifier *mn, @@ -676,8 +686,7 @@ static void pnv_npu2_mn_change_pte(struct mmu_notifier *mn, pte_t pte) { struct npu_context *npu_context = mn_to_npu_context(mn); - - mmio_invalidate(npu_context, 1, address, true); + mmio_invalidate(npu_context, address, PAGE_SIZE); } static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn, @@ -685,21 +694,7 @@ static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn, unsigned long start, unsigned long end) { struct npu_context *npu_context = mn_to_npu_context(mn); - unsigned long address; - - if (end - start > atsd_threshold) { - /* - * Just invalidate the entire PID if the address range is too - * large. - */ - mmio_invalidate(npu_context, 0, 0, true); - } else { - for (address = start; address < end; address += PAGE_SIZE) - mmio_invalidate(npu_context, 1, address, false); - - /* Do the flush only on the final addess == end */ - mmio_invalidate(npu_context, 1, address, true); - } + mmio_invalidate(npu_context, start, end - start); } static const struct mmu_notifier_ops nv_nmmu_notifier_ops = { @@ -962,11 +957,6 @@ int pnv_npu2_init(struct pnv_phb *phb) static int npu_index; uint64_t rc = 0; - if (!atsd_threshold_dentry) { - atsd_threshold_dentry = debugfs_create_x64("atsd_threshold", - 0600, powerpc_debugfs_root, &atsd_threshold); - } - phb->npu.nmmu_flush = of_property_read_bool(phb->hose->dn, "ibm,nmmu-flush"); for_each_child_of_node(phb->hose->dn, dn) { diff --git a/arch/powerpc/platforms/powernv/opal-powercap.c b/arch/powerpc/platforms/powernv/opal-powercap.c index badb29bde93f..d90ee4fc2c6a 100644 --- a/arch/powerpc/platforms/powernv/opal-powercap.c +++ b/arch/powerpc/platforms/powernv/opal-powercap.c @@ -199,7 +199,7 @@ void __init opal_powercap_init(void) } j = 0; - pcaps[i].pg.name = node->name; + pcaps[i].pg.name = kasprintf(GFP_KERNEL, "%pOFn", node); if (has_min) { powercap_add_attr(min, "powercap-min", &pcaps[i].pattrs[j]); @@ -237,6 +237,7 @@ out_pcaps_pattrs: while (--i >= 0) { kfree(pcaps[i].pattrs); kfree(pcaps[i].pg.attrs); + kfree(pcaps[i].pg.name); } kobject_put(powercap_kobj); out_pcaps: diff --git a/arch/powerpc/platforms/powernv/opal-sensor-groups.c b/arch/powerpc/platforms/powernv/opal-sensor-groups.c index f7d04b6a2d7a..179609220e6f 100644 --- a/arch/powerpc/platforms/powernv/opal-sensor-groups.c +++ b/arch/powerpc/platforms/powernv/opal-sensor-groups.c @@ -214,9 +214,9 @@ void __init opal_sensor_groups_init(void) } if (!of_property_read_u32(node, "ibm,chip-id", &chipid)) - sprintf(sgs[i].name, "%s%d", node->name, chipid); + sprintf(sgs[i].name, "%pOFn%d", node, chipid); else - sprintf(sgs[i].name, "%s", node->name); + sprintf(sgs[i].name, "%pOFn", node); sgs[i].sg.name = sgs[i].name; if (add_attr_group(ops, len, &sgs[i], sgid)) { diff --git a/arch/powerpc/platforms/powernv/opal-sysparam.c b/arch/powerpc/platforms/powernv/opal-sysparam.c index 9aa87df114fd..916a4b7b1bb5 100644 --- a/arch/powerpc/platforms/powernv/opal-sysparam.c +++ b/arch/powerpc/platforms/powernv/opal-sysparam.c @@ -194,7 +194,7 @@ void __init opal_sys_param_init(void) count = of_property_count_strings(sysparam, "param-name"); if (count < 0) { pr_err("SYSPARAM: No string found of property param-name in " - "the node %s\n", sysparam->name); + "the node %pOFn\n", sysparam); goto out_param_buf; } diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c index 38fe4087484a..a4641515956f 100644 --- a/arch/powerpc/platforms/powernv/opal.c +++ b/arch/powerpc/platforms/powernv/opal.c @@ -535,7 +535,7 @@ static int opal_recover_mce(struct pt_regs *regs, return recovered; } -void pnv_platform_error_reboot(struct pt_regs *regs, const char *msg) +void __noreturn pnv_platform_error_reboot(struct pt_regs *regs, const char *msg) { panic_flush_kmsg_start(); diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c index adddde023622..14befee4b3f1 100644 --- a/arch/powerpc/platforms/powernv/setup.c +++ b/arch/powerpc/platforms/powernv/setup.c @@ -219,17 +219,41 @@ static void pnv_prepare_going_down(void) static void __noreturn pnv_restart(char *cmd) { - long rc = OPAL_BUSY; + long rc; pnv_prepare_going_down(); - while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) { - rc = opal_cec_reboot(); - if (rc == OPAL_BUSY_EVENT) - opal_poll_events(NULL); + do { + if (!cmd) + rc = opal_cec_reboot(); + else if (strcmp(cmd, "full") == 0) + rc = opal_cec_reboot2(OPAL_REBOOT_FULL_IPL, NULL); else + rc = OPAL_UNSUPPORTED; + + if (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) { + /* Opal is busy wait for some time and retry */ + opal_poll_events(NULL); mdelay(10); - } + + } else if (cmd && rc) { + /* Unknown error while issuing reboot */ + if (rc == OPAL_UNSUPPORTED) + pr_err("Unsupported '%s' reboot.\n", cmd); + else + pr_err("Unable to issue '%s' reboot. Err=%ld\n", + cmd, rc); + pr_info("Forcing a cec-reboot\n"); + cmd = NULL; + rc = OPAL_BUSY; + + } else if (rc != OPAL_SUCCESS) { + /* Unknown error while issuing cec-reboot */ + pr_err("Unable to reboot. Err=%ld\n", rc); + } + + } while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT); + for (;;) opal_poll_events(NULL); } @@ -437,6 +461,16 @@ static unsigned long pnv_get_proc_freq(unsigned int cpu) return ret_freq; } +static long pnv_machine_check_early(struct pt_regs *regs) +{ + long handled = 0; + + if (cur_cpu_spec && cur_cpu_spec->machine_check_early) + handled = cur_cpu_spec->machine_check_early(regs); + + return handled; +} + define_machine(powernv) { .name = "PowerNV", .probe = pnv_probe, @@ -448,6 +482,7 @@ define_machine(powernv) { .machine_shutdown = pnv_shutdown, .power_save = NULL, .calibrate_decr = generic_calibrate_decr, + .machine_check_early = pnv_machine_check_early, #ifdef CONFIG_KEXEC_CORE .kexec_cpu_down = pnv_kexec_cpu_down, #endif diff --git a/arch/powerpc/platforms/ps3/Kconfig b/arch/powerpc/platforms/ps3/Kconfig index 6f7525555b19..24864b8aaf5d 100644 --- a/arch/powerpc/platforms/ps3/Kconfig +++ b/arch/powerpc/platforms/ps3/Kconfig @@ -49,7 +49,6 @@ config PS3_HTAB_SIZE config PS3_DYNAMIC_DMA depends on PPC_PS3 bool "PS3 Platform dynamic DMA page table management" - default n help This option will enable kernel support to take advantage of the per device dynamic DMA page table management provided by the Cell @@ -89,7 +88,6 @@ config PS3_SYS_MANAGER config PS3_REPOSITORY_WRITE bool "PS3 Repository write support" if PS3_ADVANCED depends on PPC_PS3 - default n help Enables support for writing to the PS3 System Repository. diff --git a/arch/powerpc/platforms/ps3/os-area.c b/arch/powerpc/platforms/ps3/os-area.c index cdbfc5cfd6f3..f5387ad82279 100644 --- a/arch/powerpc/platforms/ps3/os-area.c +++ b/arch/powerpc/platforms/ps3/os-area.c @@ -664,7 +664,7 @@ static int update_flash_db(void) db_set_64(db, &os_area_db_id_rtc_diff, saved_params.rtc_diff); count = os_area_flash_write(db, sizeof(struct os_area_db), pos); - if (count < sizeof(struct os_area_db)) { + if (count < 0 || count < sizeof(struct os_area_db)) { pr_debug("%s: os_area_flash_write failed %zd\n", __func__, count); error = count < 0 ? count : -EIO; diff --git a/arch/powerpc/platforms/ps3/spu.c b/arch/powerpc/platforms/ps3/spu.c index b54850845466..7746c2a3c509 100644 --- a/arch/powerpc/platforms/ps3/spu.c +++ b/arch/powerpc/platforms/ps3/spu.c @@ -215,8 +215,7 @@ static int __init setup_areas(struct spu *spu) goto fail_ioremap; } - spu->local_store = (__force void *)ioremap_prot(spu->local_store_phys, - LS_SIZE, pgprot_val(pgprot_noncached_wc(__pgprot(0)))); + spu->local_store = (__force void *)ioremap_wc(spu->local_store_phys, LS_SIZE); if (!spu->local_store) { pr_debug("%s:%d: ioremap local_store failed\n", diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig index 0c698fd6d491..2e4bd32154b5 100644 --- a/arch/powerpc/platforms/pseries/Kconfig +++ b/arch/powerpc/platforms/pseries/Kconfig @@ -28,7 +28,6 @@ config PPC_PSERIES config PPC_SPLPAR depends on PPC_PSERIES bool "Support for shared-processor logical partitions" - default n help Enabling this option will make the kernel run more efficiently on logically-partitioned pSeries systems which use shared @@ -99,7 +98,6 @@ config PPC_SMLPAR bool "Support for shared-memory logical partitions" depends on PPC_PSERIES select LPARCFG - default n help Select this option to enable shared memory partition support. With this option a system running in an LPAR can be given more @@ -140,3 +138,10 @@ config IBMEBUS bool "Support for GX bus based adapters" help Bus device driver for GX bus based adapters. + +config PAPR_SCM + depends on PPC_PSERIES && MEMORY_HOTPLUG + select LIBNVDIMM + tristate "Support for the PAPR Storage Class Memory interface" + help + Enable access to hypervisor provided storage class memory. diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile index 7e89d5c47068..a43ec843c8e2 100644 --- a/arch/powerpc/platforms/pseries/Makefile +++ b/arch/powerpc/platforms/pseries/Makefile @@ -13,7 +13,7 @@ obj-$(CONFIG_KEXEC_CORE) += kexec.o obj-$(CONFIG_PSERIES_ENERGY) += pseries_energy.o obj-$(CONFIG_HOTPLUG_CPU) += hotplug-cpu.o -obj-$(CONFIG_MEMORY_HOTPLUG) += hotplug-memory.o +obj-$(CONFIG_MEMORY_HOTPLUG) += hotplug-memory.o pmem.o obj-$(CONFIG_HVC_CONSOLE) += hvconsole.o obj-$(CONFIG_HVCS) += hvcserver.o @@ -24,6 +24,7 @@ obj-$(CONFIG_IO_EVENT_IRQ) += io_event_irq.o obj-$(CONFIG_LPARCFG) += lparcfg.o obj-$(CONFIG_IBMVIO) += vio.o obj-$(CONFIG_IBMEBUS) += ibmebus.o +obj-$(CONFIG_PAPR_SCM) += papr_scm.o ifdef CONFIG_PPC_PSERIES obj-$(CONFIG_SUSPEND) += suspend.o diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c index a0b20c03f078..7625546caefd 100644 --- a/arch/powerpc/platforms/pseries/dlpar.c +++ b/arch/powerpc/platforms/pseries/dlpar.c @@ -32,8 +32,6 @@ static struct workqueue_struct *pseries_hp_wq; struct pseries_hp_work { struct work_struct work; struct pseries_hp_errorlog *errlog; - struct completion *hp_completion; - int *rc; }; struct cc_workarea { @@ -329,7 +327,7 @@ int dlpar_release_drc(u32 drc_index) return 0; } -static int handle_dlpar_errorlog(struct pseries_hp_errorlog *hp_elog) +int handle_dlpar_errorlog(struct pseries_hp_errorlog *hp_elog) { int rc; @@ -357,6 +355,10 @@ static int handle_dlpar_errorlog(struct pseries_hp_errorlog *hp_elog) case PSERIES_HP_ELOG_RESOURCE_CPU: rc = dlpar_cpu(hp_elog); break; + case PSERIES_HP_ELOG_RESOURCE_PMEM: + rc = dlpar_hp_pmem(hp_elog); + break; + default: pr_warn_ratelimited("Invalid resource (%d) specified\n", hp_elog->resource); @@ -371,20 +373,13 @@ static void pseries_hp_work_fn(struct work_struct *work) struct pseries_hp_work *hp_work = container_of(work, struct pseries_hp_work, work); - if (hp_work->rc) - *(hp_work->rc) = handle_dlpar_errorlog(hp_work->errlog); - else - handle_dlpar_errorlog(hp_work->errlog); - - if (hp_work->hp_completion) - complete(hp_work->hp_completion); + handle_dlpar_errorlog(hp_work->errlog); kfree(hp_work->errlog); kfree((void *)work); } -void queue_hotplug_event(struct pseries_hp_errorlog *hp_errlog, - struct completion *hotplug_done, int *rc) +void queue_hotplug_event(struct pseries_hp_errorlog *hp_errlog) { struct pseries_hp_work *work; struct pseries_hp_errorlog *hp_errlog_copy; @@ -397,13 +392,9 @@ void queue_hotplug_event(struct pseries_hp_errorlog *hp_errlog, if (work) { INIT_WORK((struct work_struct *)work, pseries_hp_work_fn); work->errlog = hp_errlog_copy; - work->hp_completion = hotplug_done; - work->rc = rc; queue_work(pseries_hp_wq, (struct work_struct *)work); } else { - *rc = -ENOMEM; kfree(hp_errlog_copy); - complete(hotplug_done); } } @@ -521,18 +512,15 @@ static int dlpar_parse_id_type(char **cmd, struct pseries_hp_errorlog *hp_elog) static ssize_t dlpar_store(struct class *class, struct class_attribute *attr, const char *buf, size_t count) { - struct pseries_hp_errorlog *hp_elog; - struct completion hotplug_done; + struct pseries_hp_errorlog hp_elog; char *argbuf; char *args; int rc; args = argbuf = kstrdup(buf, GFP_KERNEL); - hp_elog = kzalloc(sizeof(*hp_elog), GFP_KERNEL); - if (!hp_elog || !argbuf) { + if (!argbuf) { pr_info("Could not allocate resources for DLPAR operation\n"); kfree(argbuf); - kfree(hp_elog); return -ENOMEM; } @@ -540,25 +528,22 @@ static ssize_t dlpar_store(struct class *class, struct class_attribute *attr, * Parse out the request from the user, this will be in the form: * <resource> <action> <id_type> <id> */ - rc = dlpar_parse_resource(&args, hp_elog); + rc = dlpar_parse_resource(&args, &hp_elog); if (rc) goto dlpar_store_out; - rc = dlpar_parse_action(&args, hp_elog); + rc = dlpar_parse_action(&args, &hp_elog); if (rc) goto dlpar_store_out; - rc = dlpar_parse_id_type(&args, hp_elog); + rc = dlpar_parse_id_type(&args, &hp_elog); if (rc) goto dlpar_store_out; - init_completion(&hotplug_done); - queue_hotplug_event(hp_elog, &hotplug_done, &rc); - wait_for_completion(&hotplug_done); + rc = handle_dlpar_errorlog(&hp_elog); dlpar_store_out: kfree(argbuf); - kfree(hp_elog); if (rc) pr_err("Could not handle DLPAR request \"%s\"\n", buf); diff --git a/arch/powerpc/platforms/pseries/dtl.c b/arch/powerpc/platforms/pseries/dtl.c index 18014cdeb590..ef6595153642 100644 --- a/arch/powerpc/platforms/pseries/dtl.c +++ b/arch/powerpc/platforms/pseries/dtl.c @@ -149,7 +149,7 @@ static int dtl_start(struct dtl *dtl) /* Register our dtl buffer with the hypervisor. The HV expects the * buffer size to be passed in the second word of the buffer */ - ((u32 *)dtl->buf)[1] = DISPATCH_LOG_BYTES; + ((u32 *)dtl->buf)[1] = cpu_to_be32(DISPATCH_LOG_BYTES); hwcpu = get_hard_smp_processor_id(dtl->cpu); addr = __pa(dtl->buf); @@ -184,7 +184,7 @@ static void dtl_stop(struct dtl *dtl) static u64 dtl_current_index(struct dtl *dtl) { - return lppaca_of(dtl->cpu).dtl_idx; + return be64_to_cpu(lppaca_of(dtl->cpu).dtl_idx); } #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c index 823cb27efa8b..c9e5ca4afb26 100644 --- a/arch/powerpc/platforms/pseries/eeh_pseries.c +++ b/arch/powerpc/platforms/pseries/eeh_pseries.c @@ -438,7 +438,7 @@ static int pseries_eeh_get_pe_addr(struct eeh_pe *pe) /** * pseries_eeh_get_state - Retrieve PE state * @pe: EEH PE - * @state: return value + * @delay: suggested time to wait if state is unavailable * * Retrieve the state of the specified PE. On RTAS compliant * pseries platform, there already has one dedicated RTAS function @@ -448,7 +448,7 @@ static int pseries_eeh_get_pe_addr(struct eeh_pe *pe) * RTAS calls for the purpose, we need to try the new one and back * to the old one if the new one couldn't work properly. */ -static int pseries_eeh_get_state(struct eeh_pe *pe, int *state) +static int pseries_eeh_get_state(struct eeh_pe *pe, int *delay) { int config_addr; int ret; @@ -499,7 +499,8 @@ static int pseries_eeh_get_state(struct eeh_pe *pe, int *state) break; case 5: if (rets[2]) { - if (state) *state = rets[2]; + if (delay) + *delay = rets[2]; result = EEH_STATE_UNAVAILABLE; } else { result = EEH_STATE_NOT_SUPPORT; @@ -554,64 +555,6 @@ static int pseries_eeh_reset(struct eeh_pe *pe, int option) } /** - * pseries_eeh_wait_state - Wait for PE state - * @pe: EEH PE - * @max_wait: maximal period in millisecond - * - * Wait for the state of associated PE. It might take some time - * to retrieve the PE's state. - */ -static int pseries_eeh_wait_state(struct eeh_pe *pe, int max_wait) -{ - int ret; - int mwait; - - /* - * According to PAPR, the state of PE might be temporarily - * unavailable. Under the circumstance, we have to wait - * for indicated time determined by firmware. The maximal - * wait time is 5 minutes, which is acquired from the original - * EEH implementation. Also, the original implementation - * also defined the minimal wait time as 1 second. - */ -#define EEH_STATE_MIN_WAIT_TIME (1000) -#define EEH_STATE_MAX_WAIT_TIME (300 * 1000) - - while (1) { - ret = pseries_eeh_get_state(pe, &mwait); - - /* - * If the PE's state is temporarily unavailable, - * we have to wait for the specified time. Otherwise, - * the PE's state will be returned immediately. - */ - if (ret != EEH_STATE_UNAVAILABLE) - return ret; - - if (max_wait <= 0) { - pr_warn("%s: Timeout when getting PE's state (%d)\n", - __func__, max_wait); - return EEH_STATE_NOT_SUPPORT; - } - - if (mwait <= 0) { - pr_warn("%s: Firmware returned bad wait value %d\n", - __func__, mwait); - mwait = EEH_STATE_MIN_WAIT_TIME; - } else if (mwait > EEH_STATE_MAX_WAIT_TIME) { - pr_warn("%s: Firmware returned too long wait value %d\n", - __func__, mwait); - mwait = EEH_STATE_MAX_WAIT_TIME; - } - - max_wait -= mwait; - msleep(mwait); - } - - return EEH_STATE_NOT_SUPPORT; -} - -/** * pseries_eeh_get_log - Retrieve error log * @pe: EEH PE * @severity: temporary or permanent error log @@ -849,7 +792,6 @@ static struct eeh_ops pseries_eeh_ops = { .get_pe_addr = pseries_eeh_get_pe_addr, .get_state = pseries_eeh_get_state, .reset = pseries_eeh_reset, - .wait_state = pseries_eeh_wait_state, .get_log = pseries_eeh_get_log, .configure_bridge = pseries_eeh_configure_bridge, .err_inject = NULL, diff --git a/arch/powerpc/platforms/pseries/event_sources.c b/arch/powerpc/platforms/pseries/event_sources.c index 6eeb0d4bab61..446ef104fb3a 100644 --- a/arch/powerpc/platforms/pseries/event_sources.c +++ b/arch/powerpc/platforms/pseries/event_sources.c @@ -16,7 +16,8 @@ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -#include <asm/prom.h> +#include <linux/interrupt.h> +#include <linux/of_irq.h> #include "pseries.h" @@ -24,34 +25,19 @@ void request_event_sources_irqs(struct device_node *np, irq_handler_t handler, const char *name) { - int i, index, count = 0; - struct of_phandle_args oirq; - unsigned int virqs[16]; + int i, virq, rc; - /* First try to do a proper OF tree parsing */ - for (index = 0; of_irq_parse_one(np, index, &oirq) == 0; - index++) { - if (count > 15) - break; - virqs[count] = irq_create_of_mapping(&oirq); - if (!virqs[count]) { - pr_err("event-sources: Unable to allocate " - "interrupt number for %pOF\n", - np); - WARN_ON(1); - } else { - count++; - } - } + for (i = 0; i < 16; i++) { + virq = of_irq_get(np, i); + if (virq < 0) + return; + if (WARN(!virq, "event-sources: Unable to allocate " + "interrupt number for %pOF\n", np)) + continue; - /* Now request them */ - for (i = 0; i < count; i++) { - if (request_irq(virqs[i], handler, 0, name, NULL)) { - pr_err("event-sources: Unable to request interrupt " - "%d for %pOF\n", virqs[i], np); - WARN_ON(1); + rc = request_irq(virq, handler, 0, name, NULL); + if (WARN(rc, "event-sources: Unable to request interrupt %d for %pOF\n", + virq, np)) return; - } } } - diff --git a/arch/powerpc/platforms/pseries/firmware.c b/arch/powerpc/platforms/pseries/firmware.c index a3bbeb43689e..608ecad0178f 100644 --- a/arch/powerpc/platforms/pseries/firmware.c +++ b/arch/powerpc/platforms/pseries/firmware.c @@ -65,6 +65,8 @@ hypertas_fw_features_table[] = { {FW_FEATURE_SET_MODE, "hcall-set-mode"}, {FW_FEATURE_BEST_ENERGY, "hcall-best-energy-1*"}, {FW_FEATURE_HPT_RESIZE, "hcall-hpt-resize"}, + {FW_FEATURE_BLOCK_REMOVE, "hcall-block-remove"}, + {FW_FEATURE_PAPR_SCM, "hcall-scm"}, }; /* Build up the firmware features bitmask using the contents of diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c index 6ef77caf7bcf..2f8e62163602 100644 --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c @@ -287,7 +287,7 @@ static int pseries_add_processor(struct device_node *np) if (cpumask_empty(tmp)) { printk(KERN_ERR "Unable to find space in cpu_present_mask for" - " processor %s with %d thread(s)\n", np->name, + " processor %pOFn with %d thread(s)\n", np, nthreads); goto out_unlock; } @@ -481,8 +481,8 @@ static ssize_t dlpar_cpu_add(u32 drc_index) if (rc) { saved_rc = rc; - pr_warn("Failed to attach node %s, rc: %d, drc index: %x\n", - dn->name, rc, drc_index); + pr_warn("Failed to attach node %pOFn, rc: %d, drc index: %x\n", + dn, rc, drc_index); rc = dlpar_release_drc(drc_index); if (!rc) @@ -494,8 +494,8 @@ static ssize_t dlpar_cpu_add(u32 drc_index) rc = dlpar_online_cpu(dn); if (rc) { saved_rc = rc; - pr_warn("Failed to online cpu %s, rc: %d, drc index: %x\n", - dn->name, rc, drc_index); + pr_warn("Failed to online cpu %pOFn, rc: %d, drc index: %x\n", + dn, rc, drc_index); rc = dlpar_detach_node(dn); if (!rc) @@ -504,7 +504,7 @@ static ssize_t dlpar_cpu_add(u32 drc_index) return saved_rc; } - pr_debug("Successfully added CPU %s, drc index: %x\n", dn->name, + pr_debug("Successfully added CPU %pOFn, drc index: %x\n", dn, drc_index); return rc; } @@ -570,19 +570,19 @@ static ssize_t dlpar_cpu_remove(struct device_node *dn, u32 drc_index) { int rc; - pr_debug("Attempting to remove CPU %s, drc index: %x\n", - dn->name, drc_index); + pr_debug("Attempting to remove CPU %pOFn, drc index: %x\n", + dn, drc_index); rc = dlpar_offline_cpu(dn); if (rc) { - pr_warn("Failed to offline CPU %s, rc: %d\n", dn->name, rc); + pr_warn("Failed to offline CPU %pOFn, rc: %d\n", dn, rc); return -EINVAL; } rc = dlpar_release_drc(drc_index); if (rc) { - pr_warn("Failed to release drc (%x) for CPU %s, rc: %d\n", - drc_index, dn->name, rc); + pr_warn("Failed to release drc (%x) for CPU %pOFn, rc: %d\n", + drc_index, dn, rc); dlpar_online_cpu(dn); return rc; } @@ -591,7 +591,7 @@ static ssize_t dlpar_cpu_remove(struct device_node *dn, u32 drc_index) if (rc) { int saved_rc = rc; - pr_warn("Failed to detach CPU %s, rc: %d", dn->name, rc); + pr_warn("Failed to detach CPU %pOFn, rc: %d", dn, rc); rc = dlpar_acquire_drc(drc_index); if (!rc) @@ -662,8 +662,8 @@ static int find_dlpar_cpus_to_remove(u32 *cpu_drcs, int cpus_to_remove) rc = of_property_read_u32(dn, "ibm,my-drc-index", &cpu_drcs[cpus_found - 1]); if (rc) { - pr_warn("Error occurred getting drc-index for %s\n", - dn->name); + pr_warn("Error occurred getting drc-index for %pOFn\n", + dn); of_node_put(dn); return -1; } diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c index c1578f54c626..2b796da822c2 100644 --- a/arch/powerpc/platforms/pseries/hotplug-memory.c +++ b/arch/powerpc/platforms/pseries/hotplug-memory.c @@ -101,11 +101,12 @@ static struct property *dlpar_clone_property(struct property *prop, return new_prop; } -static u32 find_aa_index(struct device_node *dr_node, - struct property *ala_prop, const u32 *lmb_assoc) +static bool find_aa_index(struct device_node *dr_node, + struct property *ala_prop, + const u32 *lmb_assoc, u32 *aa_index) { - u32 *assoc_arrays; - u32 aa_index; + u32 *assoc_arrays, new_prop_size; + struct property *new_prop; int aa_arrays, aa_array_entries, aa_array_sz; int i, index; @@ -121,54 +122,48 @@ static u32 find_aa_index(struct device_node *dr_node, aa_array_entries = be32_to_cpu(assoc_arrays[1]); aa_array_sz = aa_array_entries * sizeof(u32); - aa_index = -1; for (i = 0; i < aa_arrays; i++) { index = (i * aa_array_entries) + 2; if (memcmp(&assoc_arrays[index], &lmb_assoc[1], aa_array_sz)) continue; - aa_index = i; - break; + *aa_index = i; + return true; } - if (aa_index == -1) { - struct property *new_prop; - u32 new_prop_size; - - new_prop_size = ala_prop->length + aa_array_sz; - new_prop = dlpar_clone_property(ala_prop, new_prop_size); - if (!new_prop) - return -1; - - assoc_arrays = new_prop->value; + new_prop_size = ala_prop->length + aa_array_sz; + new_prop = dlpar_clone_property(ala_prop, new_prop_size); + if (!new_prop) + return false; - /* increment the number of entries in the lookup array */ - assoc_arrays[0] = cpu_to_be32(aa_arrays + 1); + assoc_arrays = new_prop->value; - /* copy the new associativity into the lookup array */ - index = aa_arrays * aa_array_entries + 2; - memcpy(&assoc_arrays[index], &lmb_assoc[1], aa_array_sz); + /* increment the number of entries in the lookup array */ + assoc_arrays[0] = cpu_to_be32(aa_arrays + 1); - of_update_property(dr_node, new_prop); + /* copy the new associativity into the lookup array */ + index = aa_arrays * aa_array_entries + 2; + memcpy(&assoc_arrays[index], &lmb_assoc[1], aa_array_sz); - /* - * The associativity lookup array index for this lmb is - * number of entries - 1 since we added its associativity - * to the end of the lookup array. - */ - aa_index = be32_to_cpu(assoc_arrays[0]) - 1; - } + of_update_property(dr_node, new_prop); - return aa_index; + /* + * The associativity lookup array index for this lmb is + * number of entries - 1 since we added its associativity + * to the end of the lookup array. + */ + *aa_index = be32_to_cpu(assoc_arrays[0]) - 1; + return true; } -static u32 lookup_lmb_associativity_index(struct drmem_lmb *lmb) +static int update_lmb_associativity_index(struct drmem_lmb *lmb) { struct device_node *parent, *lmb_node, *dr_node; struct property *ala_prop; const u32 *lmb_assoc; u32 aa_index; + bool found; parent = of_find_node_by_path("/"); if (!parent) @@ -200,46 +195,17 @@ static u32 lookup_lmb_associativity_index(struct drmem_lmb *lmb) return -ENODEV; } - aa_index = find_aa_index(dr_node, ala_prop, lmb_assoc); + found = find_aa_index(dr_node, ala_prop, lmb_assoc, &aa_index); dlpar_free_cc_nodes(lmb_node); - return aa_index; -} -static int dlpar_add_device_tree_lmb(struct drmem_lmb *lmb) -{ - int rc, aa_index; - - lmb->flags |= DRCONF_MEM_ASSIGNED; - - aa_index = lookup_lmb_associativity_index(lmb); - if (aa_index < 0) { - pr_err("Couldn't find associativity index for drc index %x\n", - lmb->drc_index); - return aa_index; + if (!found) { + pr_err("Could not find LMB associativity\n"); + return -1; } lmb->aa_index = aa_index; - - rtas_hp_event = true; - rc = drmem_update_dt(); - rtas_hp_event = false; - - return rc; -} - -static int dlpar_remove_device_tree_lmb(struct drmem_lmb *lmb) -{ - int rc; - - lmb->flags &= ~DRCONF_MEM_ASSIGNED; - lmb->aa_index = 0xffffffff; - - rtas_hp_event = true; - rc = drmem_update_dt(); - rtas_hp_event = false; - - return rc; + return 0; } static struct memory_block *lmb_to_memblock(struct drmem_lmb *lmb) @@ -428,7 +394,9 @@ static int dlpar_remove_lmb(struct drmem_lmb *lmb) /* Update memory regions for memory remove */ memblock_remove(lmb->base_addr, block_sz); - dlpar_remove_device_tree_lmb(lmb); + invalidate_lmb_associativity_index(lmb); + lmb->flags &= ~DRCONF_MEM_ASSIGNED; + return 0; } @@ -688,10 +656,8 @@ static int dlpar_add_lmb(struct drmem_lmb *lmb) if (lmb->flags & DRCONF_MEM_ASSIGNED) return -EINVAL; - rc = dlpar_add_device_tree_lmb(lmb); + rc = update_lmb_associativity_index(lmb); if (rc) { - pr_err("Couldn't update device tree for drc index %x\n", - lmb->drc_index); dlpar_release_drc(lmb->drc_index); return rc; } @@ -704,14 +670,14 @@ static int dlpar_add_lmb(struct drmem_lmb *lmb) /* Add the memory */ rc = add_memory(nid, lmb->base_addr, block_sz); if (rc) { - dlpar_remove_device_tree_lmb(lmb); + invalidate_lmb_associativity_index(lmb); return rc; } rc = dlpar_online_lmb(lmb); if (rc) { remove_memory(nid, lmb->base_addr, block_sz); - dlpar_remove_device_tree_lmb(lmb); + invalidate_lmb_associativity_index(lmb); } else { lmb->flags |= DRCONF_MEM_ASSIGNED; } @@ -958,6 +924,12 @@ int dlpar_memory(struct pseries_hp_errorlog *hp_elog) break; } + if (!rc) { + rtas_hp_event = true; + rc = drmem_update_dt(); + rtas_hp_event = false; + } + unlock_device_hotplug(); return rc; } diff --git a/arch/powerpc/platforms/pseries/ibmebus.c b/arch/powerpc/platforms/pseries/ibmebus.c index c7c1140c13b6..5b4a56131904 100644 --- a/arch/powerpc/platforms/pseries/ibmebus.c +++ b/arch/powerpc/platforms/pseries/ibmebus.c @@ -404,7 +404,7 @@ static ssize_t name_show(struct device *dev, struct platform_device *ofdev; ofdev = to_platform_device(dev); - return sprintf(buf, "%s\n", ofdev->dev.of_node->name); + return sprintf(buf, "%pOFn\n", ofdev->dev.of_node); } static DEVICE_ATTR_RO(name); diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index d3992ced0782..32d4452973e7 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c @@ -48,6 +48,7 @@ #include <asm/kexec.h> #include <asm/fadump.h> #include <asm/asm-prototypes.h> +#include <asm/debugfs.h> #include "pseries.h" @@ -417,6 +418,79 @@ static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long vpn, BUG_ON(lpar_rc != H_SUCCESS); } + +/* + * As defined in the PAPR's section 14.5.4.1.8 + * The control mask doesn't include the returned reference and change bit from + * the processed PTE. + */ +#define HBLKR_AVPN 0x0100000000000000UL +#define HBLKR_CTRL_MASK 0xf800000000000000UL +#define HBLKR_CTRL_SUCCESS 0x8000000000000000UL +#define HBLKR_CTRL_ERRNOTFOUND 0x8800000000000000UL +#define HBLKR_CTRL_ERRBUSY 0xa000000000000000UL + +/** + * H_BLOCK_REMOVE caller. + * @idx should point to the latest @param entry set with a PTEX. + * If PTE cannot be processed because another CPUs has already locked that + * group, those entries are put back in @param starting at index 1. + * If entries has to be retried and @retry_busy is set to true, these entries + * are retried until success. If @retry_busy is set to false, the returned + * is the number of entries yet to process. + */ +static unsigned long call_block_remove(unsigned long idx, unsigned long *param, + bool retry_busy) +{ + unsigned long i, rc, new_idx; + unsigned long retbuf[PLPAR_HCALL9_BUFSIZE]; + + if (idx < 2) { + pr_warn("Unexpected empty call to H_BLOCK_REMOVE"); + return 0; + } +again: + new_idx = 0; + if (idx > PLPAR_HCALL9_BUFSIZE) { + pr_err("Too many PTEs (%lu) for H_BLOCK_REMOVE", idx); + idx = PLPAR_HCALL9_BUFSIZE; + } else if (idx < PLPAR_HCALL9_BUFSIZE) + param[idx] = HBR_END; + + rc = plpar_hcall9(H_BLOCK_REMOVE, retbuf, + param[0], /* AVA */ + param[1], param[2], param[3], param[4], /* TS0-7 */ + param[5], param[6], param[7], param[8]); + if (rc == H_SUCCESS) + return 0; + + BUG_ON(rc != H_PARTIAL); + + /* Check that the unprocessed entries were 'not found' or 'busy' */ + for (i = 0; i < idx-1; i++) { + unsigned long ctrl = retbuf[i] & HBLKR_CTRL_MASK; + + if (ctrl == HBLKR_CTRL_ERRBUSY) { + param[++new_idx] = param[i+1]; + continue; + } + + BUG_ON(ctrl != HBLKR_CTRL_SUCCESS + && ctrl != HBLKR_CTRL_ERRNOTFOUND); + } + + /* + * If there were entries found busy, retry these entries if requested, + * of if all the entries have to be retried. + */ + if (new_idx && (retry_busy || new_idx == (PLPAR_HCALL9_BUFSIZE-1))) { + idx = new_idx + 1; + goto again; + } + + return new_idx; +} + #ifdef CONFIG_TRANSPARENT_HUGEPAGE /* * Limit iterations holding pSeries_lpar_tlbie_lock to 3. We also need @@ -424,17 +498,57 @@ static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long vpn, */ #define PPC64_HUGE_HPTE_BATCH 12 -static void __pSeries_lpar_hugepage_invalidate(unsigned long *slot, - unsigned long *vpn, int count, - int psize, int ssize) +static void hugepage_block_invalidate(unsigned long *slot, unsigned long *vpn, + int count, int psize, int ssize) { unsigned long param[PLPAR_HCALL9_BUFSIZE]; - int i = 0, pix = 0, rc; - unsigned long flags = 0; - int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); + unsigned long shift, current_vpgb, vpgb; + int i, pix = 0; - if (lock_tlbie) - spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags); + shift = mmu_psize_defs[psize].shift; + + for (i = 0; i < count; i++) { + /* + * Shifting 3 bits more on the right to get a + * 8 pages aligned virtual addresse. + */ + vpgb = (vpn[i] >> (shift - VPN_SHIFT + 3)); + if (!pix || vpgb != current_vpgb) { + /* + * Need to start a new 8 pages block, flush + * the current one if needed. + */ + if (pix) + (void)call_block_remove(pix, param, true); + current_vpgb = vpgb; + param[0] = hpte_encode_avpn(vpn[i], psize, ssize); + pix = 1; + } + + param[pix++] = HBR_REQUEST | HBLKR_AVPN | slot[i]; + if (pix == PLPAR_HCALL9_BUFSIZE) { + pix = call_block_remove(pix, param, false); + /* + * pix = 0 means that all the entries were + * removed, we can start a new block. + * Otherwise, this means that there are entries + * to retry, and pix points to latest one, so + * we should increment it and try to continue + * the same block. + */ + if (pix) + pix++; + } + } + if (pix) + (void)call_block_remove(pix, param, true); +} + +static void hugepage_bulk_invalidate(unsigned long *slot, unsigned long *vpn, + int count, int psize, int ssize) +{ + unsigned long param[PLPAR_HCALL9_BUFSIZE]; + int i = 0, pix = 0, rc; for (i = 0; i < count; i++) { @@ -462,6 +576,23 @@ static void __pSeries_lpar_hugepage_invalidate(unsigned long *slot, param[6], param[7]); BUG_ON(rc != H_SUCCESS); } +} + +static inline void __pSeries_lpar_hugepage_invalidate(unsigned long *slot, + unsigned long *vpn, + int count, int psize, + int ssize) +{ + unsigned long flags = 0; + int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); + + if (lock_tlbie) + spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags); + + if (firmware_has_feature(FW_FEATURE_BLOCK_REMOVE)) + hugepage_block_invalidate(slot, vpn, count, psize, ssize); + else + hugepage_bulk_invalidate(slot, vpn, count, psize, ssize); if (lock_tlbie) spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags); @@ -546,6 +677,86 @@ static int pSeries_lpar_hpte_removebolted(unsigned long ea, return 0; } + +static inline unsigned long compute_slot(real_pte_t pte, + unsigned long vpn, + unsigned long index, + unsigned long shift, + int ssize) +{ + unsigned long slot, hash, hidx; + + hash = hpt_hash(vpn, shift, ssize); + hidx = __rpte_to_hidx(pte, index); + if (hidx & _PTEIDX_SECONDARY) + hash = ~hash; + slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; + slot += hidx & _PTEIDX_GROUP_IX; + return slot; +} + +/** + * The hcall H_BLOCK_REMOVE implies that the virtual pages to processed are + * "all within the same naturally aligned 8 page virtual address block". + */ +static void do_block_remove(unsigned long number, struct ppc64_tlb_batch *batch, + unsigned long *param) +{ + unsigned long vpn; + unsigned long i, pix = 0; + unsigned long index, shift, slot, current_vpgb, vpgb; + real_pte_t pte; + int psize, ssize; + + psize = batch->psize; + ssize = batch->ssize; + + for (i = 0; i < number; i++) { + vpn = batch->vpn[i]; + pte = batch->pte[i]; + pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) { + /* + * Shifting 3 bits more on the right to get a + * 8 pages aligned virtual addresse. + */ + vpgb = (vpn >> (shift - VPN_SHIFT + 3)); + if (!pix || vpgb != current_vpgb) { + /* + * Need to start a new 8 pages block, flush + * the current one if needed. + */ + if (pix) + (void)call_block_remove(pix, param, + true); + current_vpgb = vpgb; + param[0] = hpte_encode_avpn(vpn, psize, + ssize); + pix = 1; + } + + slot = compute_slot(pte, vpn, index, shift, ssize); + param[pix++] = HBR_REQUEST | HBLKR_AVPN | slot; + + if (pix == PLPAR_HCALL9_BUFSIZE) { + pix = call_block_remove(pix, param, false); + /* + * pix = 0 means that all the entries were + * removed, we can start a new block. + * Otherwise, this means that there are entries + * to retry, and pix points to latest one, so + * we should increment it and try to continue + * the same block. + */ + if (pix) + pix++; + } + } pte_iterate_hashed_end(); + } + + if (pix) + (void)call_block_remove(pix, param, true); +} + /* * Take a spinlock around flushes to avoid bouncing the hypervisor tlbie * lock. @@ -558,13 +769,18 @@ static void pSeries_lpar_flush_hash_range(unsigned long number, int local) struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch); int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); unsigned long param[PLPAR_HCALL9_BUFSIZE]; - unsigned long hash, index, shift, hidx, slot; + unsigned long index, shift, slot; real_pte_t pte; int psize, ssize; if (lock_tlbie) spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags); + if (firmware_has_feature(FW_FEATURE_BLOCK_REMOVE)) { + do_block_remove(number, batch, param); + goto out; + } + psize = batch->psize; ssize = batch->ssize; pix = 0; @@ -572,12 +788,7 @@ static void pSeries_lpar_flush_hash_range(unsigned long number, int local) vpn = batch->vpn[i]; pte = batch->pte[i]; pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) { - hash = hpt_hash(vpn, shift, ssize); - hidx = __rpte_to_hidx(pte, index); - if (hidx & _PTEIDX_SECONDARY) - hash = ~hash; - slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; - slot += hidx & _PTEIDX_GROUP_IX; + slot = compute_slot(pte, vpn, index, shift, ssize); if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE)) { /* * lpar doesn't use the passed actual page size @@ -608,6 +819,7 @@ static void pSeries_lpar_flush_hash_range(unsigned long number, int local) BUG_ON(rc != H_SUCCESS); } +out: if (lock_tlbie) spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags); } @@ -1028,3 +1240,56 @@ static int __init reserve_vrma_context_id(void) return 0; } machine_device_initcall(pseries, reserve_vrma_context_id); + +#ifdef CONFIG_DEBUG_FS +/* debugfs file interface for vpa data */ +static ssize_t vpa_file_read(struct file *filp, char __user *buf, size_t len, + loff_t *pos) +{ + int cpu = (long)filp->private_data; + struct lppaca *lppaca = &lppaca_of(cpu); + + return simple_read_from_buffer(buf, len, pos, lppaca, + sizeof(struct lppaca)); +} + +static const struct file_operations vpa_fops = { + .open = simple_open, + .read = vpa_file_read, + .llseek = default_llseek, +}; + +static int __init vpa_debugfs_init(void) +{ + char name[16]; + long i; + static struct dentry *vpa_dir; + + if (!firmware_has_feature(FW_FEATURE_SPLPAR)) + return 0; + + vpa_dir = debugfs_create_dir("vpa", powerpc_debugfs_root); + if (!vpa_dir) { + pr_warn("%s: can't create vpa root dir\n", __func__); + return -ENOMEM; + } + + /* set up the per-cpu vpa file*/ + for_each_possible_cpu(i) { + struct dentry *d; + + sprintf(name, "cpu-%ld", i); + + d = debugfs_create_file(name, 0400, vpa_dir, (void *)i, + &vpa_fops); + if (!d) { + pr_warn("%s: can't create per-cpu vpa file\n", + __func__); + return -ENOMEM; + } + } + + return 0; +} +machine_arch_initcall(pseries, vpa_debugfs_init); +#endif /* CONFIG_DEBUG_FS */ diff --git a/arch/powerpc/platforms/pseries/lparcfg.c b/arch/powerpc/platforms/pseries/lparcfg.c index 7c872dc01bdb..8bd590af488a 100644 --- a/arch/powerpc/platforms/pseries/lparcfg.c +++ b/arch/powerpc/platforms/pseries/lparcfg.c @@ -585,8 +585,7 @@ static ssize_t update_mpp(u64 *entitlement, u8 *weight) static ssize_t lparcfg_write(struct file *file, const char __user * buf, size_t count, loff_t * off) { - int kbuf_sz = 64; - char kbuf[kbuf_sz]; + char kbuf[64]; char *tmp; u64 new_entitled, *new_entitled_ptr = &new_entitled; u8 new_weight, *new_weight_ptr = &new_weight; @@ -595,7 +594,7 @@ static ssize_t lparcfg_write(struct file *file, const char __user * buf, if (!firmware_has_feature(FW_FEATURE_SPLPAR)) return -EINVAL; - if (count > kbuf_sz) + if (count > sizeof(kbuf)) return -EINVAL; if (copy_from_user(kbuf, buf, count)) diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c index f0e30dc94988..88925f8ca8a0 100644 --- a/arch/powerpc/platforms/pseries/mobility.c +++ b/arch/powerpc/platforms/pseries/mobility.c @@ -242,7 +242,7 @@ static int add_dt_node(__be32 parent_phandle, __be32 drc_index) static void prrn_update_node(__be32 phandle) { - struct pseries_hp_errorlog *hp_elog; + struct pseries_hp_errorlog hp_elog; struct device_node *dn; /* @@ -255,18 +255,12 @@ static void prrn_update_node(__be32 phandle) return; } - hp_elog = kzalloc(sizeof(*hp_elog), GFP_KERNEL); - if(!hp_elog) - return; - - hp_elog->resource = PSERIES_HP_ELOG_RESOURCE_MEM; - hp_elog->action = PSERIES_HP_ELOG_ACTION_READD; - hp_elog->id_type = PSERIES_HP_ELOG_ID_DRC_INDEX; - hp_elog->_drc_u.drc_index = phandle; - - queue_hotplug_event(hp_elog, NULL, NULL); + hp_elog.resource = PSERIES_HP_ELOG_RESOURCE_MEM; + hp_elog.action = PSERIES_HP_ELOG_ACTION_READD; + hp_elog.id_type = PSERIES_HP_ELOG_ID_DRC_INDEX; + hp_elog._drc_u.drc_index = phandle; - kfree(hp_elog); + handle_dlpar_errorlog(&hp_elog); } int pseries_devicetree_update(s32 scope) @@ -366,6 +360,8 @@ static ssize_t migration_store(struct class *class, if (rc) return rc; + stop_topology_update(); + do { rc = rtas_ibm_suspend_me(streamid); if (rc == -EAGAIN) @@ -376,6 +372,9 @@ static ssize_t migration_store(struct class *class, return rc; post_mobility_fixup(); + + start_topology_update(); + return count; } diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c index b7496948129e..8011b4129e3a 100644 --- a/arch/powerpc/platforms/pseries/msi.c +++ b/arch/powerpc/platforms/pseries/msi.c @@ -203,7 +203,8 @@ static struct device_node *find_pe_dn(struct pci_dev *dev, int *total) /* Get the top level device in the PE */ edev = pdn_to_eeh_dev(PCI_DN(dn)); if (edev->pe) - edev = list_first_entry(&edev->pe->edevs, struct eeh_dev, list); + edev = list_first_entry(&edev->pe->edevs, struct eeh_dev, + entry); dn = pci_device_to_OF_node(edev->pdev); if (!dn) return NULL; diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c new file mode 100644 index 000000000000..ee9372b65ca5 --- /dev/null +++ b/arch/powerpc/platforms/pseries/papr_scm.c @@ -0,0 +1,345 @@ +// SPDX-License-Identifier: GPL-2.0 + +#define pr_fmt(fmt) "papr-scm: " fmt + +#include <linux/of.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/ioport.h> +#include <linux/slab.h> +#include <linux/ndctl.h> +#include <linux/sched.h> +#include <linux/libnvdimm.h> +#include <linux/platform_device.h> + +#include <asm/plpar_wrappers.h> + +#define BIND_ANY_ADDR (~0ul) + +#define PAPR_SCM_DIMM_CMD_MASK \ + ((1ul << ND_CMD_GET_CONFIG_SIZE) | \ + (1ul << ND_CMD_GET_CONFIG_DATA) | \ + (1ul << ND_CMD_SET_CONFIG_DATA)) + +struct papr_scm_priv { + struct platform_device *pdev; + struct device_node *dn; + uint32_t drc_index; + uint64_t blocks; + uint64_t block_size; + int metadata_size; + + uint64_t bound_addr; + + struct nvdimm_bus_descriptor bus_desc; + struct nvdimm_bus *bus; + struct nvdimm *nvdimm; + struct resource res; + struct nd_region *region; + struct nd_interleave_set nd_set; +}; + +static int drc_pmem_bind(struct papr_scm_priv *p) +{ + unsigned long ret[PLPAR_HCALL_BUFSIZE]; + uint64_t rc, token; + + /* + * When the hypervisor cannot map all the requested memory in a single + * hcall it returns H_BUSY and we call again with the token until + * we get H_SUCCESS. Aborting the retry loop before getting H_SUCCESS + * leave the system in an undefined state, so we wait. + */ + token = 0; + + do { + rc = plpar_hcall(H_SCM_BIND_MEM, ret, p->drc_index, 0, + p->blocks, BIND_ANY_ADDR, token); + token = be64_to_cpu(ret[0]); + cond_resched(); + } while (rc == H_BUSY); + + if (rc) { + dev_err(&p->pdev->dev, "bind err: %lld\n", rc); + return -ENXIO; + } + + p->bound_addr = be64_to_cpu(ret[1]); + + dev_dbg(&p->pdev->dev, "bound drc %x to %pR\n", p->drc_index, &p->res); + + return 0; +} + +static int drc_pmem_unbind(struct papr_scm_priv *p) +{ + unsigned long ret[PLPAR_HCALL_BUFSIZE]; + uint64_t rc, token; + + token = 0; + + /* NB: unbind has the same retry requirements mentioned above */ + do { + rc = plpar_hcall(H_SCM_UNBIND_MEM, ret, p->drc_index, + p->bound_addr, p->blocks, token); + token = be64_to_cpu(ret); + cond_resched(); + } while (rc == H_BUSY); + + if (rc) + dev_err(&p->pdev->dev, "unbind error: %lld\n", rc); + + return !!rc; +} + +static int papr_scm_meta_get(struct papr_scm_priv *p, + struct nd_cmd_get_config_data_hdr *hdr) +{ + unsigned long data[PLPAR_HCALL_BUFSIZE]; + int64_t ret; + + if (hdr->in_offset >= p->metadata_size || hdr->in_length != 1) + return -EINVAL; + + ret = plpar_hcall(H_SCM_READ_METADATA, data, p->drc_index, + hdr->in_offset, 1); + + if (ret == H_PARAMETER) /* bad DRC index */ + return -ENODEV; + if (ret) + return -EINVAL; /* other invalid parameter */ + + hdr->out_buf[0] = data[0] & 0xff; + + return 0; +} + +static int papr_scm_meta_set(struct papr_scm_priv *p, + struct nd_cmd_set_config_hdr *hdr) +{ + int64_t ret; + + if (hdr->in_offset >= p->metadata_size || hdr->in_length != 1) + return -EINVAL; + + ret = plpar_hcall_norets(H_SCM_WRITE_METADATA, + p->drc_index, hdr->in_offset, hdr->in_buf[0], 1); + + if (ret == H_PARAMETER) /* bad DRC index */ + return -ENODEV; + if (ret) + return -EINVAL; /* other invalid parameter */ + + return 0; +} + +int papr_scm_ndctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, + unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc) +{ + struct nd_cmd_get_config_size *get_size_hdr; + struct papr_scm_priv *p; + + /* Only dimm-specific calls are supported atm */ + if (!nvdimm) + return -EINVAL; + + p = nvdimm_provider_data(nvdimm); + + switch (cmd) { + case ND_CMD_GET_CONFIG_SIZE: + get_size_hdr = buf; + + get_size_hdr->status = 0; + get_size_hdr->max_xfer = 1; + get_size_hdr->config_size = p->metadata_size; + *cmd_rc = 0; + break; + + case ND_CMD_GET_CONFIG_DATA: + *cmd_rc = papr_scm_meta_get(p, buf); + break; + + case ND_CMD_SET_CONFIG_DATA: + *cmd_rc = papr_scm_meta_set(p, buf); + break; + + default: + return -EINVAL; + } + + dev_dbg(&p->pdev->dev, "returned with cmd_rc = %d\n", *cmd_rc); + + return 0; +} + +static const struct attribute_group *region_attr_groups[] = { + &nd_region_attribute_group, + &nd_device_attribute_group, + &nd_mapping_attribute_group, + &nd_numa_attribute_group, + NULL, +}; + +static const struct attribute_group *bus_attr_groups[] = { + &nvdimm_bus_attribute_group, + NULL, +}; + +static const struct attribute_group *papr_scm_dimm_groups[] = { + &nvdimm_attribute_group, + &nd_device_attribute_group, + NULL, +}; + +static int papr_scm_nvdimm_init(struct papr_scm_priv *p) +{ + struct device *dev = &p->pdev->dev; + struct nd_mapping_desc mapping; + struct nd_region_desc ndr_desc; + unsigned long dimm_flags; + + p->bus_desc.ndctl = papr_scm_ndctl; + p->bus_desc.module = THIS_MODULE; + p->bus_desc.of_node = p->pdev->dev.of_node; + p->bus_desc.attr_groups = bus_attr_groups; + p->bus_desc.provider_name = kstrdup(p->pdev->name, GFP_KERNEL); + + if (!p->bus_desc.provider_name) + return -ENOMEM; + + p->bus = nvdimm_bus_register(NULL, &p->bus_desc); + if (!p->bus) { + dev_err(dev, "Error creating nvdimm bus %pOF\n", p->dn); + return -ENXIO; + } + + dimm_flags = 0; + set_bit(NDD_ALIASING, &dimm_flags); + + p->nvdimm = nvdimm_create(p->bus, p, papr_scm_dimm_groups, + dimm_flags, PAPR_SCM_DIMM_CMD_MASK, 0, NULL); + if (!p->nvdimm) { + dev_err(dev, "Error creating DIMM object for %pOF\n", p->dn); + goto err; + } + + /* now add the region */ + + memset(&mapping, 0, sizeof(mapping)); + mapping.nvdimm = p->nvdimm; + mapping.start = 0; + mapping.size = p->blocks * p->block_size; // XXX: potential overflow? + + memset(&ndr_desc, 0, sizeof(ndr_desc)); + ndr_desc.attr_groups = region_attr_groups; + ndr_desc.numa_node = dev_to_node(&p->pdev->dev); + ndr_desc.res = &p->res; + ndr_desc.of_node = p->dn; + ndr_desc.provider_data = p; + ndr_desc.mapping = &mapping; + ndr_desc.num_mappings = 1; + ndr_desc.nd_set = &p->nd_set; + set_bit(ND_REGION_PAGEMAP, &ndr_desc.flags); + + p->region = nvdimm_pmem_region_create(p->bus, &ndr_desc); + if (!p->region) { + dev_err(dev, "Error registering region %pR from %pOF\n", + ndr_desc.res, p->dn); + goto err; + } + + return 0; + +err: nvdimm_bus_unregister(p->bus); + kfree(p->bus_desc.provider_name); + return -ENXIO; +} + +static int papr_scm_probe(struct platform_device *pdev) +{ + uint32_t drc_index, metadata_size, unit_cap[2]; + struct device_node *dn = pdev->dev.of_node; + struct papr_scm_priv *p; + int rc; + + /* check we have all the required DT properties */ + if (of_property_read_u32(dn, "ibm,my-drc-index", &drc_index)) { + dev_err(&pdev->dev, "%pOF: missing drc-index!\n", dn); + return -ENODEV; + } + + if (of_property_read_u32_array(dn, "ibm,unit-capacity", unit_cap, 2)) { + dev_err(&pdev->dev, "%pOF: missing unit-capacity!\n", dn); + return -ENODEV; + } + + p = kzalloc(sizeof(*p), GFP_KERNEL); + if (!p) + return -ENOMEM; + + /* optional DT properties */ + of_property_read_u32(dn, "ibm,metadata-size", &metadata_size); + + p->dn = dn; + p->drc_index = drc_index; + p->block_size = unit_cap[0]; + p->blocks = unit_cap[1]; + + /* might be zero */ + p->metadata_size = metadata_size; + p->pdev = pdev; + + /* request the hypervisor to bind this region to somewhere in memory */ + rc = drc_pmem_bind(p); + if (rc) + goto err; + + /* setup the resource for the newly bound range */ + p->res.start = p->bound_addr; + p->res.end = p->bound_addr + p->blocks * p->block_size; + p->res.name = pdev->name; + p->res.flags = IORESOURCE_MEM; + + rc = papr_scm_nvdimm_init(p); + if (rc) + goto err2; + + platform_set_drvdata(pdev, p); + + return 0; + +err2: drc_pmem_unbind(p); +err: kfree(p); + return rc; +} + +static int papr_scm_remove(struct platform_device *pdev) +{ + struct papr_scm_priv *p = platform_get_drvdata(pdev); + + nvdimm_bus_unregister(p->bus); + drc_pmem_unbind(p); + kfree(p); + + return 0; +} + +static const struct of_device_id papr_scm_match[] = { + { .compatible = "ibm,pmemory" }, + { }, +}; + +static struct platform_driver papr_scm_driver = { + .probe = papr_scm_probe, + .remove = papr_scm_remove, + .driver = { + .name = "papr_scm", + .owner = THIS_MODULE, + .of_match_table = papr_scm_match, + }, +}; + +module_platform_driver(papr_scm_driver); +MODULE_DEVICE_TABLE(of, papr_scm_match); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("IBM Corporation"); diff --git a/arch/powerpc/platforms/pseries/pci.c b/arch/powerpc/platforms/pseries/pci.c index eab96637d6cf..41d8a4d1d02e 100644 --- a/arch/powerpc/platforms/pseries/pci.c +++ b/arch/powerpc/platforms/pseries/pci.c @@ -239,6 +239,7 @@ void __init pSeries_final_fixup(void) { pSeries_request_regions(); + eeh_probe_devices(); eeh_addr_cache_build(); #ifdef CONFIG_PCI_IOV diff --git a/arch/powerpc/platforms/pseries/pmem.c b/arch/powerpc/platforms/pseries/pmem.c new file mode 100644 index 000000000000..a27f40eb57b1 --- /dev/null +++ b/arch/powerpc/platforms/pseries/pmem.c @@ -0,0 +1,164 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Handles hot and cold plug of persistent memory regions on pseries. + */ + +#define pr_fmt(fmt) "pseries-pmem: " fmt + +#include <linux/kernel.h> +#include <linux/interrupt.h> +#include <linux/delay.h> +#include <linux/sched.h> /* for idle_task_exit */ +#include <linux/sched/hotplug.h> +#include <linux/cpu.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/slab.h> +#include <asm/prom.h> +#include <asm/rtas.h> +#include <asm/firmware.h> +#include <asm/machdep.h> +#include <asm/vdso_datapage.h> +#include <asm/plpar_wrappers.h> +#include <asm/topology.h> + +#include "pseries.h" +#include "offline_states.h" + +static struct device_node *pmem_node; + +static ssize_t pmem_drc_add_node(u32 drc_index) +{ + struct device_node *dn; + int rc; + + pr_debug("Attempting to add pmem node, drc index: %x\n", drc_index); + + rc = dlpar_acquire_drc(drc_index); + if (rc) { + pr_err("Failed to acquire DRC, rc: %d, drc index: %x\n", + rc, drc_index); + return -EINVAL; + } + + dn = dlpar_configure_connector(cpu_to_be32(drc_index), pmem_node); + if (!dn) { + pr_err("configure-connector failed for drc %x\n", drc_index); + dlpar_release_drc(drc_index); + return -EINVAL; + } + + /* NB: The of reconfig notifier creates platform device from the node */ + rc = dlpar_attach_node(dn, pmem_node); + if (rc) { + pr_err("Failed to attach node %s, rc: %d, drc index: %x\n", + dn->name, rc, drc_index); + + if (dlpar_release_drc(drc_index)) + dlpar_free_cc_nodes(dn); + + return rc; + } + + pr_info("Successfully added %pOF, drc index: %x\n", dn, drc_index); + + return 0; +} + +static ssize_t pmem_drc_remove_node(u32 drc_index) +{ + struct device_node *dn; + uint32_t index; + int rc; + + for_each_child_of_node(pmem_node, dn) { + if (of_property_read_u32(dn, "ibm,my-drc-index", &index)) + continue; + if (index == drc_index) + break; + } + + if (!dn) { + pr_err("Attempting to remove unused DRC index %x\n", drc_index); + return -ENODEV; + } + + pr_debug("Attempting to remove %pOF, drc index: %x\n", dn, drc_index); + + /* * NB: tears down the ibm,pmemory device as a side-effect */ + rc = dlpar_detach_node(dn); + if (rc) + return rc; + + rc = dlpar_release_drc(drc_index); + if (rc) { + pr_err("Failed to release drc (%x) for CPU %s, rc: %d\n", + drc_index, dn->name, rc); + dlpar_attach_node(dn, pmem_node); + return rc; + } + + pr_info("Successfully removed PMEM with drc index: %x\n", drc_index); + + return 0; +} + +int dlpar_hp_pmem(struct pseries_hp_errorlog *hp_elog) +{ + u32 count, drc_index; + int rc; + + /* slim chance, but we might get a hotplug event while booting */ + if (!pmem_node) + pmem_node = of_find_node_by_type(NULL, "ibm,persistent-memory"); + if (!pmem_node) { + pr_err("Hotplug event for a pmem device, but none exists\n"); + return -ENODEV; + } + + if (hp_elog->id_type != PSERIES_HP_ELOG_ID_DRC_INDEX) { + pr_err("Unsupported hotplug event type %d\n", + hp_elog->id_type); + return -EINVAL; + } + + count = hp_elog->_drc_u.drc_count; + drc_index = hp_elog->_drc_u.drc_index; + + lock_device_hotplug(); + + if (hp_elog->action == PSERIES_HP_ELOG_ACTION_ADD) { + rc = pmem_drc_add_node(drc_index); + } else if (hp_elog->action == PSERIES_HP_ELOG_ACTION_REMOVE) { + rc = pmem_drc_remove_node(drc_index); + } else { + pr_err("Unsupported hotplug action (%d)\n", hp_elog->action); + rc = -EINVAL; + } + + unlock_device_hotplug(); + return rc; +} + +const struct of_device_id drc_pmem_match[] = { + { .type = "ibm,persistent-memory", }, + {} +}; + +static int pseries_pmem_init(void) +{ + pmem_node = of_find_node_by_type(NULL, "ibm,persistent-memory"); + if (!pmem_node) + return 0; + + /* + * The generic OF bus probe/populate handles creating platform devices + * from the child (ibm,pmemory) nodes. The generic code registers an of + * reconfig notifier to handle the hot-add/remove cases too. + */ + of_platform_bus_probe(pmem_node, drc_pmem_match, NULL); + + return 0; +} +machine_arch_initcall(pseries, pseries_pmem_init); diff --git a/arch/powerpc/platforms/pseries/pseries.h b/arch/powerpc/platforms/pseries/pseries.h index 60db2ee511fb..7dee8c5d3363 100644 --- a/arch/powerpc/platforms/pseries/pseries.h +++ b/arch/powerpc/platforms/pseries/pseries.h @@ -24,6 +24,7 @@ struct pt_regs; extern int pSeries_system_reset_exception(struct pt_regs *regs); extern int pSeries_machine_check_exception(struct pt_regs *regs); +extern long pseries_machine_check_realmode(struct pt_regs *regs); #ifdef CONFIG_SMP extern void smp_init_pseries(void); @@ -59,15 +60,21 @@ extern int dlpar_detach_node(struct device_node *); extern int dlpar_acquire_drc(u32 drc_index); extern int dlpar_release_drc(u32 drc_index); -void queue_hotplug_event(struct pseries_hp_errorlog *hp_errlog, - struct completion *hotplug_done, int *rc); +void queue_hotplug_event(struct pseries_hp_errorlog *hp_errlog); +int handle_dlpar_errorlog(struct pseries_hp_errorlog *hp_errlog); + #ifdef CONFIG_MEMORY_HOTPLUG int dlpar_memory(struct pseries_hp_errorlog *hp_elog); +int dlpar_hp_pmem(struct pseries_hp_errorlog *hp_elog); #else static inline int dlpar_memory(struct pseries_hp_errorlog *hp_elog) { return -EOPNOTSUPP; } +static inline int dlpar_hp_pmem(struct pseries_hp_errorlog *hp_elog) +{ + return -EOPNOTSUPP; +} #endif #ifdef CONFIG_HOTPLUG_CPU diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c index 851ce326874a..d97d52772789 100644 --- a/arch/powerpc/platforms/pseries/ras.c +++ b/arch/powerpc/platforms/pseries/ras.c @@ -27,6 +27,7 @@ #include <asm/machdep.h> #include <asm/rtas.h> #include <asm/firmware.h> +#include <asm/mce.h> #include "pseries.h" @@ -50,6 +51,101 @@ static irqreturn_t ras_hotplug_interrupt(int irq, void *dev_id); static irqreturn_t ras_epow_interrupt(int irq, void *dev_id); static irqreturn_t ras_error_interrupt(int irq, void *dev_id); +/* RTAS pseries MCE errorlog section. */ +struct pseries_mc_errorlog { + __be32 fru_id; + __be32 proc_id; + u8 error_type; + /* + * sub_err_type (1 byte). Bit fields depends on error_type + * + * MSB0 + * | + * V + * 01234567 + * XXXXXXXX + * + * For error_type == MC_ERROR_TYPE_UE + * XXXXXXXX + * X 1: Permanent or Transient UE. + * X 1: Effective address provided. + * X 1: Logical address provided. + * XX 2: Reserved. + * XXX 3: Type of UE error. + * + * For error_type != MC_ERROR_TYPE_UE + * XXXXXXXX + * X 1: Effective address provided. + * XXXXX 5: Reserved. + * XX 2: Type of SLB/ERAT/TLB error. + */ + u8 sub_err_type; + u8 reserved_1[6]; + __be64 effective_address; + __be64 logical_address; +} __packed; + +/* RTAS pseries MCE error types */ +#define MC_ERROR_TYPE_UE 0x00 +#define MC_ERROR_TYPE_SLB 0x01 +#define MC_ERROR_TYPE_ERAT 0x02 +#define MC_ERROR_TYPE_TLB 0x04 +#define MC_ERROR_TYPE_D_CACHE 0x05 +#define MC_ERROR_TYPE_I_CACHE 0x07 + +/* RTAS pseries MCE error sub types */ +#define MC_ERROR_UE_INDETERMINATE 0 +#define MC_ERROR_UE_IFETCH 1 +#define MC_ERROR_UE_PAGE_TABLE_WALK_IFETCH 2 +#define MC_ERROR_UE_LOAD_STORE 3 +#define MC_ERROR_UE_PAGE_TABLE_WALK_LOAD_STORE 4 + +#define MC_ERROR_SLB_PARITY 0 +#define MC_ERROR_SLB_MULTIHIT 1 +#define MC_ERROR_SLB_INDETERMINATE 2 + +#define MC_ERROR_ERAT_PARITY 1 +#define MC_ERROR_ERAT_MULTIHIT 2 +#define MC_ERROR_ERAT_INDETERMINATE 3 + +#define MC_ERROR_TLB_PARITY 1 +#define MC_ERROR_TLB_MULTIHIT 2 +#define MC_ERROR_TLB_INDETERMINATE 3 + +static inline u8 rtas_mc_error_sub_type(const struct pseries_mc_errorlog *mlog) +{ + switch (mlog->error_type) { + case MC_ERROR_TYPE_UE: + return (mlog->sub_err_type & 0x07); + case MC_ERROR_TYPE_SLB: + case MC_ERROR_TYPE_ERAT: + case MC_ERROR_TYPE_TLB: + return (mlog->sub_err_type & 0x03); + default: + return 0; + } +} + +static +inline u64 rtas_mc_get_effective_addr(const struct pseries_mc_errorlog *mlog) +{ + __be64 addr = 0; + + switch (mlog->error_type) { + case MC_ERROR_TYPE_UE: + if (mlog->sub_err_type & 0x40) + addr = mlog->effective_address; + break; + case MC_ERROR_TYPE_SLB: + case MC_ERROR_TYPE_ERAT: + case MC_ERROR_TYPE_TLB: + if (mlog->sub_err_type & 0x80) + addr = mlog->effective_address; + default: + break; + } + return be64_to_cpu(addr); +} /* * Enable the hotplug interrupt late because processing them may touch other @@ -237,8 +333,9 @@ static irqreturn_t ras_hotplug_interrupt(int irq, void *dev_id) * hotplug events on the ras_log_buf to be handled by rtas_errd. */ if (hp_elog->resource == PSERIES_HP_ELOG_RESOURCE_MEM || - hp_elog->resource == PSERIES_HP_ELOG_RESOURCE_CPU) - queue_hotplug_event(hp_elog, NULL, NULL); + hp_elog->resource == PSERIES_HP_ELOG_RESOURCE_CPU || + hp_elog->resource == PSERIES_HP_ELOG_RESOURCE_PMEM) + queue_hotplug_event(hp_elog); else log_error(ras_log_buf, ERR_TYPE_RTAS_LOG, 0); @@ -427,6 +524,188 @@ int pSeries_system_reset_exception(struct pt_regs *regs) return 0; /* need to perform reset */ } +#define VAL_TO_STRING(ar, val) \ + (((val) < ARRAY_SIZE(ar)) ? ar[(val)] : "Unknown") + +static void pseries_print_mce_info(struct pt_regs *regs, + struct rtas_error_log *errp) +{ + const char *level, *sevstr; + struct pseries_errorlog *pseries_log; + struct pseries_mc_errorlog *mce_log; + u8 error_type, err_sub_type; + u64 addr; + u8 initiator = rtas_error_initiator(errp); + int disposition = rtas_error_disposition(errp); + + static const char * const initiators[] = { + "Unknown", + "CPU", + "PCI", + "ISA", + "Memory", + "Power Mgmt", + }; + static const char * const mc_err_types[] = { + "UE", + "SLB", + "ERAT", + "TLB", + "D-Cache", + "Unknown", + "I-Cache", + }; + static const char * const mc_ue_types[] = { + "Indeterminate", + "Instruction fetch", + "Page table walk ifetch", + "Load/Store", + "Page table walk Load/Store", + }; + + /* SLB sub errors valid values are 0x0, 0x1, 0x2 */ + static const char * const mc_slb_types[] = { + "Parity", + "Multihit", + "Indeterminate", + }; + + /* TLB and ERAT sub errors valid values are 0x1, 0x2, 0x3 */ + static const char * const mc_soft_types[] = { + "Unknown", + "Parity", + "Multihit", + "Indeterminate", + }; + + if (!rtas_error_extended(errp)) { + pr_err("Machine check interrupt: Missing extended error log\n"); + return; + } + + pseries_log = get_pseries_errorlog(errp, PSERIES_ELOG_SECT_ID_MCE); + if (pseries_log == NULL) + return; + + mce_log = (struct pseries_mc_errorlog *)pseries_log->data; + + error_type = mce_log->error_type; + err_sub_type = rtas_mc_error_sub_type(mce_log); + + switch (rtas_error_severity(errp)) { + case RTAS_SEVERITY_NO_ERROR: + level = KERN_INFO; + sevstr = "Harmless"; + break; + case RTAS_SEVERITY_WARNING: + level = KERN_WARNING; + sevstr = ""; + break; + case RTAS_SEVERITY_ERROR: + case RTAS_SEVERITY_ERROR_SYNC: + level = KERN_ERR; + sevstr = "Severe"; + break; + case RTAS_SEVERITY_FATAL: + default: + level = KERN_ERR; + sevstr = "Fatal"; + break; + } + +#ifdef CONFIG_PPC_BOOK3S_64 + /* Display faulty slb contents for SLB errors. */ + if (error_type == MC_ERROR_TYPE_SLB) + slb_dump_contents(local_paca->mce_faulty_slbs); +#endif + + printk("%s%s Machine check interrupt [%s]\n", level, sevstr, + disposition == RTAS_DISP_FULLY_RECOVERED ? + "Recovered" : "Not recovered"); + if (user_mode(regs)) { + printk("%s NIP: [%016lx] PID: %d Comm: %s\n", level, + regs->nip, current->pid, current->comm); + } else { + printk("%s NIP [%016lx]: %pS\n", level, regs->nip, + (void *)regs->nip); + } + printk("%s Initiator: %s\n", level, + VAL_TO_STRING(initiators, initiator)); + + switch (error_type) { + case MC_ERROR_TYPE_UE: + printk("%s Error type: %s [%s]\n", level, + VAL_TO_STRING(mc_err_types, error_type), + VAL_TO_STRING(mc_ue_types, err_sub_type)); + break; + case MC_ERROR_TYPE_SLB: + printk("%s Error type: %s [%s]\n", level, + VAL_TO_STRING(mc_err_types, error_type), + VAL_TO_STRING(mc_slb_types, err_sub_type)); + break; + case MC_ERROR_TYPE_ERAT: + case MC_ERROR_TYPE_TLB: + printk("%s Error type: %s [%s]\n", level, + VAL_TO_STRING(mc_err_types, error_type), + VAL_TO_STRING(mc_soft_types, err_sub_type)); + break; + default: + printk("%s Error type: %s\n", level, + VAL_TO_STRING(mc_err_types, error_type)); + break; + } + + addr = rtas_mc_get_effective_addr(mce_log); + if (addr) + printk("%s Effective address: %016llx\n", level, addr); +} + +static int mce_handle_error(struct rtas_error_log *errp) +{ + struct pseries_errorlog *pseries_log; + struct pseries_mc_errorlog *mce_log; + int disposition = rtas_error_disposition(errp); + u8 error_type; + + if (!rtas_error_extended(errp)) + goto out; + + pseries_log = get_pseries_errorlog(errp, PSERIES_ELOG_SECT_ID_MCE); + if (pseries_log == NULL) + goto out; + + mce_log = (struct pseries_mc_errorlog *)pseries_log->data; + error_type = mce_log->error_type; + +#ifdef CONFIG_PPC_BOOK3S_64 + if (disposition == RTAS_DISP_NOT_RECOVERED) { + switch (error_type) { + case MC_ERROR_TYPE_SLB: + case MC_ERROR_TYPE_ERAT: + /* + * Store the old slb content in paca before flushing. + * Print this when we go to virtual mode. + * There are chances that we may hit MCE again if there + * is a parity error on the SLB entry we trying to read + * for saving. Hence limit the slb saving to single + * level of recursion. + */ + if (local_paca->in_mce == 1) + slb_save_contents(local_paca->mce_faulty_slbs); + flush_and_reload_slb(); + disposition = RTAS_DISP_FULLY_RECOVERED; + rtas_set_disposition_recovered(errp); + break; + default: + break; + } + } +#endif + +out: + return disposition; +} + /* * Process MCE rtas errlog event. */ @@ -452,8 +731,11 @@ static int recover_mce(struct pt_regs *regs, struct rtas_error_log *err) int recovered = 0; int disposition = rtas_error_disposition(err); + pseries_print_mce_info(regs, err); + if (!(regs->msr & MSR_RI)) { /* If MSR_RI isn't set, we cannot recover */ + pr_err("Machine check interrupt unrecoverable: MSR(RI=0)\n"); recovered = 0; } else if (disposition == RTAS_DISP_FULLY_RECOVERED) { @@ -503,11 +785,31 @@ int pSeries_machine_check_exception(struct pt_regs *regs) struct rtas_error_log *errp; if (fwnmi_active) { - errp = fwnmi_get_errinfo(regs); fwnmi_release_errinfo(); + errp = fwnmi_get_errlog(); if (errp && recover_mce(regs, errp)) return 1; } return 0; } + +long pseries_machine_check_realmode(struct pt_regs *regs) +{ + struct rtas_error_log *errp; + int disposition; + + if (fwnmi_active) { + errp = fwnmi_get_errinfo(regs); + /* + * Call to fwnmi_release_errinfo() in real mode causes kernel + * to panic. Hence we will call it as soon as we go into + * virtual mode. + */ + disposition = mce_handle_error(errp); + if (disposition == RTAS_DISP_FULLY_RECOVERED) + return 1; + } + + return 0; +} diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index ba1791fd3234..0f553dcfa548 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -107,6 +107,10 @@ static void __init fwnmi_init(void) u8 *mce_data_buf; unsigned int i; int nr_cpus = num_possible_cpus(); +#ifdef CONFIG_PPC_BOOK3S_64 + struct slb_entry *slb_ptr; + size_t size; +#endif int ibm_nmi_register = rtas_token("ibm,nmi-register"); if (ibm_nmi_register == RTAS_UNKNOWN_SERVICE) @@ -132,6 +136,15 @@ static void __init fwnmi_init(void) paca_ptrs[i]->mce_data_buf = mce_data_buf + (RTAS_ERROR_LOG_MAX * i); } + +#ifdef CONFIG_PPC_BOOK3S_64 + /* Allocate per cpu slb area to save old slb contents during MCE */ + size = sizeof(struct slb_entry) * mmu_slb_size * nr_cpus; + slb_ptr = __va(memblock_alloc_base(size, sizeof(struct slb_entry), + ppc64_rma_size)); + for_each_possible_cpu(i) + paca_ptrs[i]->mce_faulty_slbs = slb_ptr + (mmu_slb_size * i); +#endif } static void pseries_8259_cascade(struct irq_desc *desc) @@ -1017,6 +1030,7 @@ define_machine(pseries) { .calibrate_decr = generic_calibrate_decr, .progress = rtas_progress, .system_reset_exception = pSeries_system_reset_exception, + .machine_check_early = pseries_machine_check_realmode, .machine_check_exception = pSeries_machine_check_exception, #ifdef CONFIG_KEXEC_CORE .machine_kexec = pSeries_machine_kexec, diff --git a/arch/powerpc/platforms/pseries/vio.c b/arch/powerpc/platforms/pseries/vio.c index 49e04ec19238..88f1ad1d6309 100644 --- a/arch/powerpc/platforms/pseries/vio.c +++ b/arch/powerpc/platforms/pseries/vio.c @@ -1349,7 +1349,6 @@ struct vio_dev *vio_register_device_node(struct device_node *of_node) struct device_node *parent_node; const __be32 *prop; enum vio_dev_family family; - const char *of_node_name = of_node->name ? of_node->name : "<unknown>"; /* * Determine if this node is a under the /vdevice node or under the @@ -1362,24 +1361,24 @@ struct vio_dev *vio_register_device_node(struct device_node *of_node) else if (!strcmp(parent_node->type, "vdevice")) family = VDEVICE; else { - pr_warn("%s: parent(%pOF) of %s not recognized.\n", + pr_warn("%s: parent(%pOF) of %pOFn not recognized.\n", __func__, parent_node, - of_node_name); + of_node); of_node_put(parent_node); return NULL; } of_node_put(parent_node); } else { - pr_warn("%s: could not determine the parent of node %s.\n", - __func__, of_node_name); + pr_warn("%s: could not determine the parent of node %pOFn.\n", + __func__, of_node); return NULL; } if (family == PFO) { if (of_get_property(of_node, "interrupt-controller", NULL)) { - pr_debug("%s: Skipping the interrupt controller %s.\n", - __func__, of_node_name); + pr_debug("%s: Skipping the interrupt controller %pOFn.\n", + __func__, of_node); return NULL; } } @@ -1399,15 +1398,15 @@ struct vio_dev *vio_register_device_node(struct device_node *of_node) if (of_node->type != NULL) viodev->type = of_node->type; else { - pr_warn("%s: node %s is missing the 'device_type' " - "property.\n", __func__, of_node_name); + pr_warn("%s: node %pOFn is missing the 'device_type' " + "property.\n", __func__, of_node); goto out; } prop = of_get_property(of_node, "reg", NULL); if (prop == NULL) { - pr_warn("%s: node %s missing 'reg'\n", - __func__, of_node_name); + pr_warn("%s: node %pOFn missing 'reg'\n", + __func__, of_node); goto out; } unit_address = of_read_number(prop, 1); @@ -1422,8 +1421,8 @@ struct vio_dev *vio_register_device_node(struct device_node *of_node) if (prop != NULL) viodev->resource_id = of_read_number(prop, 1); - dev_set_name(&viodev->dev, "%s", of_node_name); - viodev->type = of_node_name; + dev_set_name(&viodev->dev, "%pOFn", of_node); + viodev->type = dev_name(&viodev->dev); viodev->irq = 0; } @@ -1694,7 +1693,7 @@ struct vio_dev *vio_find_node(struct device_node *vnode) snprintf(kobj_name, sizeof(kobj_name), "%x", (uint32_t)of_read_number(prop, 1)); } else if (!strcmp(dev_type, "ibm,platform-facilities")) - snprintf(kobj_name, sizeof(kobj_name), "%s", vnode->name); + snprintf(kobj_name, sizeof(kobj_name), "%pOFn", vnode); else return NULL; diff --git a/arch/powerpc/sysdev/Kconfig b/arch/powerpc/sysdev/Kconfig index bcef2ac56479..e0dbec780fe9 100644 --- a/arch/powerpc/sysdev/Kconfig +++ b/arch/powerpc/sysdev/Kconfig @@ -6,19 +6,16 @@ config PPC4xx_PCI_EXPRESS bool depends on PCI && 4xx - default n config PPC4xx_HSTA_MSI bool depends on PCI_MSI depends on PCI && 4xx - default n config PPC4xx_MSI bool depends on PCI_MSI depends on PCI && 4xx - default n config PPC_MSI_BITMAP bool @@ -37,11 +34,9 @@ config PPC_SCOM config SCOM_DEBUGFS bool "Expose SCOM controllers via debugfs" depends on PPC_SCOM && DEBUG_FS - default n config GE_FPGA bool - default n config FSL_CORENET_RCPM bool diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile index f730539074c4..2caa4defdfb6 100644 --- a/arch/powerpc/sysdev/Makefile +++ b/arch/powerpc/sysdev/Makefile @@ -1,5 +1,4 @@ # SPDX-License-Identifier: GPL-2.0 -subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC) @@ -56,8 +55,6 @@ obj-$(CONFIG_PPC_SCOM) += scom.o obj-$(CONFIG_PPC_EARLY_DEBUG_MEMCONS) += udbg_memcons.o -subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror - obj-$(CONFIG_PPC_XICS) += xics/ obj-$(CONFIG_PPC_XIVE) += xive/ diff --git a/arch/powerpc/sysdev/fsl_85xx_cache_sram.c b/arch/powerpc/sysdev/fsl_85xx_cache_sram.c index 00ccf3e4fcb4..15cbdd4fde06 100644 --- a/arch/powerpc/sysdev/fsl_85xx_cache_sram.c +++ b/arch/powerpc/sysdev/fsl_85xx_cache_sram.c @@ -107,11 +107,11 @@ int __init instantiate_cache_sram(struct platform_device *dev, goto out_free; } - cache_sram->base_virt = ioremap_prot(cache_sram->base_phys, - cache_sram->size, _PAGE_COHERENT | PAGE_KERNEL); + cache_sram->base_virt = ioremap_coherent(cache_sram->base_phys, + cache_sram->size); if (!cache_sram->base_virt) { - dev_err(&dev->dev, "%pOF: ioremap_prot failed\n", - dev->dev.of_node); + dev_err(&dev->dev, "%pOF: ioremap_coherent failed\n", + dev->dev.of_node); ret = -ENOMEM; goto out_release; } diff --git a/arch/powerpc/sysdev/ipic.c b/arch/powerpc/sysdev/ipic.c index 535cf1f6941c..6300123ce965 100644 --- a/arch/powerpc/sysdev/ipic.c +++ b/arch/powerpc/sysdev/ipic.c @@ -846,7 +846,7 @@ void ipic_disable_mcp(enum ipic_mcp_irq mcp_irq) u32 ipic_get_mcp_status(void) { - return ipic_read(primary_ipic->regs, IPIC_SERSR); + return primary_ipic ? ipic_read(primary_ipic->regs, IPIC_SERSR) : 0; } void ipic_clear_mcp_status(u32 mask) diff --git a/arch/powerpc/sysdev/xics/Makefile b/arch/powerpc/sysdev/xics/Makefile index 5d438d92472b..ba1e3117b1c0 100644 --- a/arch/powerpc/sysdev/xics/Makefile +++ b/arch/powerpc/sysdev/xics/Makefile @@ -1,5 +1,4 @@ # SPDX-License-Identifier: GPL-2.0 -subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror obj-y += xics-common.o obj-$(CONFIG_PPC_ICP_NATIVE) += icp-native.o diff --git a/arch/powerpc/sysdev/xive/Kconfig b/arch/powerpc/sysdev/xive/Kconfig index 70ee976e1de0..785c292d104b 100644 --- a/arch/powerpc/sysdev/xive/Kconfig +++ b/arch/powerpc/sysdev/xive/Kconfig @@ -1,17 +1,14 @@ # SPDX-License-Identifier: GPL-2.0 config PPC_XIVE bool - default n select PPC_SMP_MUXED_IPI select HARDIRQS_SW_RESEND config PPC_XIVE_NATIVE bool - default n select PPC_XIVE depends on PPC_POWERNV config PPC_XIVE_SPAPR bool - default n select PPC_XIVE diff --git a/arch/powerpc/sysdev/xive/Makefile b/arch/powerpc/sysdev/xive/Makefile index 536d6e5706e3..dea2abc23f4d 100644 --- a/arch/powerpc/sysdev/xive/Makefile +++ b/arch/powerpc/sysdev/xive/Makefile @@ -1,4 +1,3 @@ -subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror obj-y += common.o obj-$(CONFIG_PPC_XIVE_NATIVE) += native.o diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c index 959a2a62f233..9824074ec1b5 100644 --- a/arch/powerpc/sysdev/xive/common.c +++ b/arch/powerpc/sysdev/xive/common.c @@ -1010,12 +1010,13 @@ static void xive_ipi_eoi(struct irq_data *d) { struct xive_cpu *xc = __this_cpu_read(xive_cpu); - DBG_VERBOSE("IPI eoi: irq=%d [0x%lx] (HW IRQ 0x%x) pending=%02x\n", - d->irq, irqd_to_hwirq(d), xc->hw_ipi, xc->pending_prio); - /* Handle possible race with unplug and drop stale IPIs */ if (!xc) return; + + DBG_VERBOSE("IPI eoi: irq=%d [0x%lx] (HW IRQ 0x%x) pending=%02x\n", + d->irq, irqd_to_hwirq(d), xc->hw_ipi, xc->pending_prio); + xive_do_source_eoi(xc->hw_ipi, &xc->ipi_data); xive_do_queue_eoi(xc); } diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c index 5b20a678d755..1ca127d052a6 100644 --- a/arch/powerpc/sysdev/xive/native.c +++ b/arch/powerpc/sysdev/xive/native.c @@ -238,20 +238,11 @@ static bool xive_native_match(struct device_node *node) #ifdef CONFIG_SMP static int xive_native_get_ipi(unsigned int cpu, struct xive_cpu *xc) { - struct device_node *np; - unsigned int chip_id; s64 irq; - /* Find the chip ID */ - np = of_get_cpu_node(cpu, NULL); - if (np) { - if (of_property_read_u32(np, "ibm,chip-id", &chip_id) < 0) - chip_id = 0; - } - /* Allocate an IPI and populate info about it */ for (;;) { - irq = opal_xive_allocate_irq(chip_id); + irq = opal_xive_allocate_irq(xc->chip_id); if (irq == OPAL_BUSY) { msleep(OPAL_BUSY_DELAY_MS); continue; diff --git a/arch/powerpc/xmon/Makefile b/arch/powerpc/xmon/Makefile index 1bc3abb237cd..69e7fb47bcaa 100644 --- a/arch/powerpc/xmon/Makefile +++ b/arch/powerpc/xmon/Makefile @@ -1,14 +1,15 @@ # SPDX-License-Identifier: GPL-2.0 # Makefile for xmon -subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror +# Disable clang warning for using setjmp without setjmp.h header +subdir-ccflags-y := $(call cc-disable-warning, builtin-requires-header) GCOV_PROFILE := n UBSAN_SANITIZE := n # Disable ftrace for the entire directory ORIG_CFLAGS := $(KBUILD_CFLAGS) -KBUILD_CFLAGS = $(subst -mno-sched-epilog,,$(subst $(CC_FLAGS_FTRACE),,$(ORIG_CFLAGS))) +KBUILD_CFLAGS = $(subst $(CC_FLAGS_FTRACE),,$(ORIG_CFLAGS)) ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC) diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index 4264aedc7775..36b8dc47a3c3 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -2378,25 +2378,33 @@ static void dump_one_paca(int cpu) DUMP(p, cpu_start, "%#-*x"); DUMP(p, kexec_state, "%#-*x"); #ifdef CONFIG_PPC_BOOK3S_64 - for (i = 0; i < SLB_NUM_BOLTED; i++) { - u64 esid, vsid; + if (!early_radix_enabled()) { + for (i = 0; i < SLB_NUM_BOLTED; i++) { + u64 esid, vsid; - if (!p->slb_shadow_ptr) - continue; + if (!p->slb_shadow_ptr) + continue; + + esid = be64_to_cpu(p->slb_shadow_ptr->save_area[i].esid); + vsid = be64_to_cpu(p->slb_shadow_ptr->save_area[i].vsid); - esid = be64_to_cpu(p->slb_shadow_ptr->save_area[i].esid); - vsid = be64_to_cpu(p->slb_shadow_ptr->save_area[i].vsid); + if (esid || vsid) { + printf(" %-*s[%d] = 0x%016llx 0x%016llx\n", + 22, "slb_shadow", i, esid, vsid); + } + } + DUMP(p, vmalloc_sllp, "%#-*x"); + DUMP(p, stab_rr, "%#-*x"); + DUMP(p, slb_used_bitmap, "%#-*x"); + DUMP(p, slb_kern_bitmap, "%#-*x"); - if (esid || vsid) { - printf(" %-*s[%d] = 0x%016llx 0x%016llx\n", - 22, "slb_shadow", i, esid, vsid); + if (!early_cpu_has_feature(CPU_FTR_ARCH_300)) { + DUMP(p, slb_cache_ptr, "%#-*x"); + for (i = 0; i < SLB_CACHE_ENTRIES; i++) + printf(" %-*s[%d] = 0x%016x\n", + 22, "slb_cache", i, p->slb_cache[i]); } } - DUMP(p, vmalloc_sllp, "%#-*x"); - DUMP(p, slb_cache_ptr, "%#-*x"); - for (i = 0; i < SLB_CACHE_ENTRIES; i++) - printf(" %-*s[%d] = 0x%016x\n", - 22, "slb_cache", i, p->slb_cache[i]); DUMP(p, rfi_flush_fallback_area, "%-*px"); #endif @@ -2412,7 +2420,9 @@ static void dump_one_paca(int cpu) DUMP(p, __current, "%-*px"); DUMP(p, kstack, "%#-*llx"); printf(" %-*s = 0x%016llx\n", 25, "kstack_base", p->kstack & ~(THREAD_SIZE - 1)); - DUMP(p, stab_rr, "%#-*llx"); +#ifdef CONFIG_STACKPROTECTOR + DUMP(p, canary, "%#-*lx"); +#endif DUMP(p, saved_r1, "%#-*llx"); DUMP(p, trap_save, "%#-*x"); DUMP(p, irq_soft_mask, "%#-*x"); @@ -2444,11 +2454,15 @@ static void dump_one_paca(int cpu) DUMP(p, accounting.utime, "%#-*lx"); DUMP(p, accounting.stime, "%#-*lx"); +#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME DUMP(p, accounting.utime_scaled, "%#-*lx"); +#endif DUMP(p, accounting.starttime, "%#-*lx"); DUMP(p, accounting.starttime_user, "%#-*lx"); +#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME DUMP(p, accounting.startspurr, "%#-*lx"); DUMP(p, accounting.utime_sspurr, "%#-*lx"); +#endif DUMP(p, accounting.steal_time, "%#-*lx"); #undef DUMP @@ -2988,15 +3002,17 @@ static void show_task(struct task_struct *tsk) #ifdef CONFIG_PPC_BOOK3S_64 void format_pte(void *ptep, unsigned long pte) { + pte_t entry = __pte(pte); + printf("ptep @ 0x%016lx = 0x%016lx\n", (unsigned long)ptep, pte); printf("Maps physical address = 0x%016lx\n", pte & PTE_RPN_MASK); printf("Flags = %s%s%s%s%s\n", - (pte & _PAGE_ACCESSED) ? "Accessed " : "", - (pte & _PAGE_DIRTY) ? "Dirty " : "", - (pte & _PAGE_READ) ? "Read " : "", - (pte & _PAGE_WRITE) ? "Write " : "", - (pte & _PAGE_EXEC) ? "Exec " : ""); + pte_young(entry) ? "Accessed " : "", + pte_dirty(entry) ? "Dirty " : "", + pte_read(entry) ? "Read " : "", + pte_write(entry) ? "Write " : "", + pte_exec(entry) ? "Exec " : ""); } static void show_pte(unsigned long addr) diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index a344980287a5..fe451348ae57 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -31,6 +31,7 @@ config RISCV select HAVE_MEMBLOCK select HAVE_MEMBLOCK_NODE_MAP select HAVE_DMA_CONTIGUOUS + select HAVE_FUTEX_CMPXCHG if FUTEX select HAVE_GENERIC_DMA_COHERENT select HAVE_PERF_EVENTS select IRQ_DOMAIN @@ -108,10 +109,12 @@ config ARCH_RV32I select GENERIC_LIB_ASHRDI3 select GENERIC_LIB_LSHRDI3 select GENERIC_LIB_UCMPDI2 + select GENERIC_LIB_UMODDI3 config ARCH_RV64I bool "RV64I" select 64BIT + select ARCH_SUPPORTS_INT128 if GCC_VERSION >= 50000 select HAVE_FUNCTION_TRACER select HAVE_FUNCTION_GRAPH_TRACER select HAVE_FTRACE_MCOUNT_RECORD @@ -208,14 +211,61 @@ config RISCV_BASE_PMU endmenu +config FPU + bool "FPU support" + default y + help + Say N here if you want to disable all floating-point related procedure + in the kernel. + + If you don't know what to do here, say Y. + endmenu -menu "Kernel type" +menu "Kernel features" source "kernel/Kconfig.hz" endmenu +menu "Boot options" + +config CMDLINE_BOOL + bool "Built-in kernel command line" + help + For most platforms, it is firmware or second stage bootloader + that by default specifies the kernel command line options. + However, it might be necessary or advantageous to either override + the default kernel command line or add a few extra options to it. + For such cases, this option allows hardcoding command line options + directly into the kernel. + + For that, choose 'Y' here and fill in the extra boot parameters + in CONFIG_CMDLINE. + + The built-in options will be concatenated to the default command + line if CMDLINE_FORCE is set to 'N'. Otherwise, the default + command line will be ignored and replaced by the built-in string. + +config CMDLINE + string "Built-in kernel command string" + depends on CMDLINE_BOOL + default "" + help + Supply command-line options at build time by entering them here. + +config CMDLINE_FORCE + bool "Built-in command line overrides bootloader arguments" + depends on CMDLINE_BOOL + help + Set this option to 'Y' to have the kernel ignore the bootloader + or firmware command line. Instead, the built-in command line + will be used exclusively. + + If you don't know what to do here, say N. + +endmenu + menu "Bus support" config PCI diff --git a/arch/riscv/Kconfig.debug b/arch/riscv/Kconfig.debug index 3224ff6ecf6e..c5a72f17c469 100644 --- a/arch/riscv/Kconfig.debug +++ b/arch/riscv/Kconfig.debug @@ -1,37 +1,2 @@ - -config CMDLINE_BOOL - bool "Built-in kernel command line" - help - For most platforms, it is firmware or second stage bootloader - that by default specifies the kernel command line options. - However, it might be necessary or advantageous to either override - the default kernel command line or add a few extra options to it. - For such cases, this option allows hardcoding command line options - directly into the kernel. - - For that, choose 'Y' here and fill in the extra boot parameters - in CONFIG_CMDLINE. - - The built-in options will be concatenated to the default command - line if CMDLINE_FORCE is set to 'N'. Otherwise, the default - command line will be ignored and replaced by the built-in string. - -config CMDLINE - string "Built-in kernel command string" - depends on CMDLINE_BOOL - default "" - help - Supply command-line options at build time by entering them here. - -config CMDLINE_FORCE - bool "Built-in command line overrides bootloader arguments" - depends on CMDLINE_BOOL - help - Set this option to 'Y' to have the kernel ignore the bootloader - or firmware command line. Instead, the built-in command line - will be used exclusively. - - If you don't know what to do here, say N. - config EARLY_PRINTK def_bool y diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile index 61ec42405ec9..d10146197533 100644 --- a/arch/riscv/Makefile +++ b/arch/riscv/Makefile @@ -25,10 +25,7 @@ ifeq ($(CONFIG_ARCH_RV64I),y) KBUILD_CFLAGS += -mabi=lp64 KBUILD_AFLAGS += -mabi=lp64 - - KBUILD_CFLAGS += $(call cc-ifversion, -ge, 0500, -DCONFIG_ARCH_SUPPORTS_INT128) - KBUILD_MARCH = rv64im KBUILD_LDFLAGS += -melf64lriscv else BITS := 32 @@ -36,22 +33,20 @@ else KBUILD_CFLAGS += -mabi=ilp32 KBUILD_AFLAGS += -mabi=ilp32 - KBUILD_MARCH = rv32im KBUILD_LDFLAGS += -melf32lriscv endif KBUILD_CFLAGS += -Wall -ifeq ($(CONFIG_RISCV_ISA_A),y) - KBUILD_ARCH_A = a -endif -ifeq ($(CONFIG_RISCV_ISA_C),y) - KBUILD_ARCH_C = c -endif - -KBUILD_AFLAGS += -march=$(KBUILD_MARCH)$(KBUILD_ARCH_A)fd$(KBUILD_ARCH_C) +# ISA string setting +riscv-march-$(CONFIG_ARCH_RV32I) := rv32im +riscv-march-$(CONFIG_ARCH_RV64I) := rv64im +riscv-march-$(CONFIG_RISCV_ISA_A) := $(riscv-march-y)a +riscv-march-$(CONFIG_FPU) := $(riscv-march-y)fd +riscv-march-$(CONFIG_RISCV_ISA_C) := $(riscv-march-y)c +KBUILD_CFLAGS += -march=$(subst fd,,$(riscv-march-y)) +KBUILD_AFLAGS += -march=$(riscv-march-y) -KBUILD_CFLAGS += -march=$(KBUILD_MARCH)$(KBUILD_ARCH_A)$(KBUILD_ARCH_C) KBUILD_CFLAGS += -mno-save-restore KBUILD_CFLAGS += -DCONFIG_PAGE_OFFSET=$(CONFIG_PAGE_OFFSET) diff --git a/arch/riscv/include/asm/Kbuild b/arch/riscv/include/asm/Kbuild index efdbe311e936..6a646d9ea780 100644 --- a/arch/riscv/include/asm/Kbuild +++ b/arch/riscv/include/asm/Kbuild @@ -13,7 +13,6 @@ generic-y += errno.h generic-y += exec.h generic-y += fb.h generic-y += fcntl.h -generic-y += futex.h generic-y += hardirq.h generic-y += hash.h generic-y += hw_irq.h diff --git a/arch/riscv/include/asm/futex.h b/arch/riscv/include/asm/futex.h new file mode 100644 index 000000000000..3b19eba1bc8e --- /dev/null +++ b/arch/riscv/include/asm/futex.h @@ -0,0 +1,128 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2006 Ralf Baechle (ralf@linux-mips.org) + * Copyright (c) 2018 Jim Wilson (jimw@sifive.com) + */ + +#ifndef _ASM_FUTEX_H +#define _ASM_FUTEX_H + +#ifndef CONFIG_RISCV_ISA_A +/* + * Use the generic interrupt disabling versions if the A extension + * is not supported. + */ +#ifdef CONFIG_SMP +#error "Can't support generic futex calls without A extension on SMP" +#endif +#include <asm-generic/futex.h> + +#else /* CONFIG_RISCV_ISA_A */ + +#include <linux/futex.h> +#include <linux/uaccess.h> +#include <linux/errno.h> +#include <asm/asm.h> + +#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ +{ \ + uintptr_t tmp; \ + __enable_user_access(); \ + __asm__ __volatile__ ( \ + "1: " insn " \n" \ + "2: \n" \ + " .section .fixup,\"ax\" \n" \ + " .balign 4 \n" \ + "3: li %[r],%[e] \n" \ + " jump 2b,%[t] \n" \ + " .previous \n" \ + " .section __ex_table,\"a\" \n" \ + " .balign " RISCV_SZPTR " \n" \ + " " RISCV_PTR " 1b, 3b \n" \ + " .previous \n" \ + : [r] "+r" (ret), [ov] "=&r" (oldval), \ + [u] "+m" (*uaddr), [t] "=&r" (tmp) \ + : [op] "Jr" (oparg), [e] "i" (-EFAULT) \ + : "memory"); \ + __disable_user_access(); \ +} + +static inline int +arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr) +{ + int oldval = 0, ret = 0; + + pagefault_disable(); + + switch (op) { + case FUTEX_OP_SET: + __futex_atomic_op("amoswap.w.aqrl %[ov],%z[op],%[u]", + ret, oldval, uaddr, oparg); + break; + case FUTEX_OP_ADD: + __futex_atomic_op("amoadd.w.aqrl %[ov],%z[op],%[u]", + ret, oldval, uaddr, oparg); + break; + case FUTEX_OP_OR: + __futex_atomic_op("amoor.w.aqrl %[ov],%z[op],%[u]", + ret, oldval, uaddr, oparg); + break; + case FUTEX_OP_ANDN: + __futex_atomic_op("amoand.w.aqrl %[ov],%z[op],%[u]", + ret, oldval, uaddr, ~oparg); + break; + case FUTEX_OP_XOR: + __futex_atomic_op("amoxor.w.aqrl %[ov],%z[op],%[u]", + ret, oldval, uaddr, oparg); + break; + default: + ret = -ENOSYS; + } + + pagefault_enable(); + + if (!ret) + *oval = oldval; + + return ret; +} + +static inline int +futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) +{ + int ret = 0; + u32 val; + uintptr_t tmp; + + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) + return -EFAULT; + + __enable_user_access(); + __asm__ __volatile__ ( + "1: lr.w.aqrl %[v],%[u] \n" + " bne %[v],%z[ov],3f \n" + "2: sc.w.aqrl %[t],%z[nv],%[u] \n" + " bnez %[t],1b \n" + "3: \n" + " .section .fixup,\"ax\" \n" + " .balign 4 \n" + "4: li %[r],%[e] \n" + " jump 3b,%[t] \n" + " .previous \n" + " .section __ex_table,\"a\" \n" + " .balign " RISCV_SZPTR " \n" + " " RISCV_PTR " 1b, 4b \n" + " " RISCV_PTR " 2b, 4b \n" + " .previous \n" + : [r] "+r" (ret), [v] "=&r" (val), [u] "+m" (*uaddr), [t] "=&r" (tmp) + : [ov] "Jr" (oldval), [nv] "Jr" (newval), [e] "i" (-EFAULT) + : "memory"); + __disable_user_access(); + + *uval = val; + return ret; +} + +#endif /* CONFIG_RISCV_ISA_A */ +#endif /* _ASM_FUTEX_H */ diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h index 3fe4af8147d2..50de774d827a 100644 --- a/arch/riscv/include/asm/processor.h +++ b/arch/riscv/include/asm/processor.h @@ -88,7 +88,7 @@ static inline void wait_for_interrupt(void) } struct device_node; -extern int riscv_of_processor_hart(struct device_node *node); +int riscv_of_processor_hartid(struct device_node *node); extern void riscv_fill_hwcap(void); diff --git a/arch/riscv/include/asm/smp.h b/arch/riscv/include/asm/smp.h index 36016845461d..41aa73b476f4 100644 --- a/arch/riscv/include/asm/smp.h +++ b/arch/riscv/include/asm/smp.h @@ -14,16 +14,24 @@ #ifndef _ASM_RISCV_SMP_H #define _ASM_RISCV_SMP_H -/* This both needs asm-offsets.h and is used when generating it. */ -#ifndef GENERATING_ASM_OFFSETS -#include <asm/asm-offsets.h> -#endif - #include <linux/cpumask.h> #include <linux/irqreturn.h> +#include <linux/thread_info.h> + +#define INVALID_HARTID ULONG_MAX +/* + * Mapping between linux logical cpu index and hartid. + */ +extern unsigned long __cpuid_to_hartid_map[NR_CPUS]; +#define cpuid_to_hartid_map(cpu) __cpuid_to_hartid_map[cpu] + +struct seq_file; #ifdef CONFIG_SMP +/* print IPI stats */ +void show_ipi_stats(struct seq_file *p, int prec); + /* SMP initialization hook for setup_arch */ void __init setup_smp(void); @@ -33,14 +41,31 @@ void arch_send_call_function_ipi_mask(struct cpumask *mask); /* Hook for the generic smp_call_function_single() routine. */ void arch_send_call_function_single_ipi(int cpu); +int riscv_hartid_to_cpuid(int hartid); +void riscv_cpuid_to_hartid_mask(const struct cpumask *in, struct cpumask *out); + /* - * This is particularly ugly: it appears we can't actually get the definition - * of task_struct here, but we need access to the CPU this task is running on. - * Instead of using C we're using asm-offsets.h to get the current processor - * ID. + * Obtains the hart ID of the currently executing task. This relies on + * THREAD_INFO_IN_TASK, but we define that unconditionally. */ -#define raw_smp_processor_id() (*((int*)((char*)get_current() + TASK_TI_CPU))) +#define raw_smp_processor_id() (current_thread_info()->cpu) -#endif /* CONFIG_SMP */ +#else + +static inline void show_ipi_stats(struct seq_file *p, int prec) +{ +} +static inline int riscv_hartid_to_cpuid(int hartid) +{ + return 0; +} + +static inline void riscv_cpuid_to_hartid_mask(const struct cpumask *in, + struct cpumask *out) +{ + cpumask_set_cpu(cpuid_to_hartid_map(0), out); +} + +#endif /* CONFIG_SMP */ #endif /* _ASM_RISCV_SMP_H */ diff --git a/arch/riscv/include/asm/switch_to.h b/arch/riscv/include/asm/switch_to.h index dd6b05bff75b..733559083f24 100644 --- a/arch/riscv/include/asm/switch_to.h +++ b/arch/riscv/include/asm/switch_to.h @@ -18,6 +18,7 @@ #include <asm/ptrace.h> #include <asm/csr.h> +#ifdef CONFIG_FPU extern void __fstate_save(struct task_struct *save_to); extern void __fstate_restore(struct task_struct *restore_from); @@ -55,6 +56,14 @@ static inline void __switch_to_aux(struct task_struct *prev, fstate_restore(next, task_pt_regs(next)); } +extern bool has_fpu; +#else +#define has_fpu false +#define fstate_save(task, regs) do { } while (0) +#define fstate_restore(task, regs) do { } while (0) +#define __switch_to_aux(__prev, __next) do { } while (0) +#endif + extern struct task_struct *__switch_to(struct task_struct *, struct task_struct *); @@ -62,7 +71,8 @@ extern struct task_struct *__switch_to(struct task_struct *, do { \ struct task_struct *__prev = (prev); \ struct task_struct *__next = (next); \ - __switch_to_aux(__prev, __next); \ + if (has_fpu) \ + __switch_to_aux(__prev, __next); \ ((last) = __switch_to(__prev, __next)); \ } while (0) diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h index 85c2d8bae957..54fee0cadb1e 100644 --- a/arch/riscv/include/asm/tlbflush.h +++ b/arch/riscv/include/asm/tlbflush.h @@ -16,6 +16,7 @@ #define _ASM_RISCV_TLBFLUSH_H #include <linux/mm_types.h> +#include <asm/smp.h> /* * Flush entire local TLB. 'sfence.vma' implicitly fences with the instruction @@ -49,13 +50,22 @@ static inline void flush_tlb_range(struct vm_area_struct *vma, #include <asm/sbi.h> +static inline void remote_sfence_vma(struct cpumask *cmask, unsigned long start, + unsigned long size) +{ + struct cpumask hmask; + + cpumask_clear(&hmask); + riscv_cpuid_to_hartid_mask(cmask, &hmask); + sbi_remote_sfence_vma(hmask.bits, start, size); +} + #define flush_tlb_all() sbi_remote_sfence_vma(NULL, 0, -1) #define flush_tlb_page(vma, addr) flush_tlb_range(vma, addr, 0) #define flush_tlb_range(vma, start, end) \ - sbi_remote_sfence_vma(mm_cpumask((vma)->vm_mm)->bits, \ - start, (end) - (start)) + remote_sfence_vma(mm_cpumask((vma)->vm_mm), start, (end) - (start)) #define flush_tlb_mm(mm) \ - sbi_remote_sfence_vma(mm_cpumask(mm)->bits, 0, -1) + remote_sfence_vma(mm_cpumask(mm), 0, -1) #endif /* CONFIG_SMP */ diff --git a/arch/riscv/include/asm/unistd.h b/arch/riscv/include/asm/unistd.h index 0caea01d5cca..eff7aa9aa163 100644 --- a/arch/riscv/include/asm/unistd.h +++ b/arch/riscv/include/asm/unistd.h @@ -16,6 +16,7 @@ * be included multiple times. See uapi/asm/syscalls.h for more info. */ +#define __ARCH_WANT_NEW_STAT #define __ARCH_WANT_SYS_CLONE #include <uapi/asm/unistd.h> #include <uapi/asm/syscalls.h> diff --git a/arch/riscv/include/uapi/asm/Kbuild b/arch/riscv/include/uapi/asm/Kbuild index 7e91f4850475..5511b9918131 100644 --- a/arch/riscv/include/uapi/asm/Kbuild +++ b/arch/riscv/include/uapi/asm/Kbuild @@ -26,3 +26,4 @@ generic-y += swab.h generic-y += termbits.h generic-y += termios.h generic-y += types.h +generic-y += siginfo.h diff --git a/arch/riscv/include/uapi/asm/elf.h b/arch/riscv/include/uapi/asm/elf.h index 1e0dfc36aab9..644a00ce6e2e 100644 --- a/arch/riscv/include/uapi/asm/elf.h +++ b/arch/riscv/include/uapi/asm/elf.h @@ -19,7 +19,10 @@ typedef unsigned long elf_greg_t; typedef struct user_regs_struct elf_gregset_t; #define ELF_NGREG (sizeof(elf_gregset_t) / sizeof(elf_greg_t)) +/* We don't support f without d, or q. */ +typedef __u64 elf_fpreg_t; typedef union __riscv_fp_state elf_fpregset_t; +#define ELF_NFPREG (sizeof(struct __riscv_d_ext_state) / sizeof(elf_fpreg_t)) #if __riscv_xlen == 64 #define ELF_RISCV_R_SYM(r_info) ELF64_R_SYM(r_info) diff --git a/arch/riscv/include/uapi/asm/siginfo.h b/arch/riscv/include/uapi/asm/siginfo.h deleted file mode 100644 index f96849aac662..000000000000 --- a/arch/riscv/include/uapi/asm/siginfo.h +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright (C) 2012 ARM Ltd. - * Copyright (C) 2016 SiFive, Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. - */ -#ifndef __ASM_SIGINFO_H -#define __ASM_SIGINFO_H - -#define __ARCH_SI_PREAMBLE_SIZE (__SIZEOF_POINTER__ == 4 ? 12 : 16) - -#include <asm-generic/siginfo.h> - -#endif diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile index e1274fc03af4..f13f7f276639 100644 --- a/arch/riscv/kernel/Makefile +++ b/arch/riscv/kernel/Makefile @@ -31,6 +31,7 @@ obj-y += vdso/ CFLAGS_setup.o := -mcmodel=medany +obj-$(CONFIG_FPU) += fpu.o obj-$(CONFIG_SMP) += smpboot.o obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_MODULES) += module.o diff --git a/arch/riscv/kernel/cacheinfo.c b/arch/riscv/kernel/cacheinfo.c index 0bc86e5f8f3f..cb35ffd8ec6b 100644 --- a/arch/riscv/kernel/cacheinfo.c +++ b/arch/riscv/kernel/cacheinfo.c @@ -22,13 +22,6 @@ static void ci_leaf_init(struct cacheinfo *this_leaf, { this_leaf->level = level; this_leaf->type = type; - /* not a sector cache */ - this_leaf->physical_line_partition = 1; - /* TODO: Add to DTS */ - this_leaf->attributes = - CACHE_WRITE_BACK - | CACHE_READ_ALLOCATE - | CACHE_WRITE_ALLOCATE; } static int __init_cache_level(unsigned int cpu) diff --git a/arch/riscv/kernel/cpu.c b/arch/riscv/kernel/cpu.c index ca6c81e54e37..3a5a2ee31547 100644 --- a/arch/riscv/kernel/cpu.c +++ b/arch/riscv/kernel/cpu.c @@ -14,9 +14,13 @@ #include <linux/init.h> #include <linux/seq_file.h> #include <linux/of.h> +#include <asm/smp.h> -/* Return -1 if not a valid hart */ -int riscv_of_processor_hart(struct device_node *node) +/* + * Returns the hart ID of the given device tree node, or -1 if the device tree + * node isn't a RISC-V hart. + */ +int riscv_of_processor_hartid(struct device_node *node) { const char *isa, *status; u32 hart; @@ -58,6 +62,64 @@ int riscv_of_processor_hart(struct device_node *node) #ifdef CONFIG_PROC_FS +static void print_isa(struct seq_file *f, const char *orig_isa) +{ + static const char *ext = "mafdc"; + const char *isa = orig_isa; + const char *e; + + /* + * Linux doesn't support rv32e or rv128i, and we only support booting + * kernels on harts with the same ISA that the kernel is compiled for. + */ +#if defined(CONFIG_32BIT) + if (strncmp(isa, "rv32i", 5) != 0) + return; +#elif defined(CONFIG_64BIT) + if (strncmp(isa, "rv64i", 5) != 0) + return; +#endif + + /* Print the base ISA, as we already know it's legal. */ + seq_puts(f, "isa\t\t: "); + seq_write(f, isa, 5); + isa += 5; + + /* + * Check the rest of the ISA string for valid extensions, printing those + * we find. RISC-V ISA strings define an order, so we only print the + * extension bits when they're in order. + */ + for (e = ext; *e != '\0'; ++e) { + if (isa[0] == e[0]) { + seq_write(f, isa, 1); + isa++; + } + } + seq_puts(f, "\n"); + + /* + * If we were given an unsupported ISA in the device tree then print + * a bit of info describing what went wrong. + */ + if (isa[0] != '\0') + pr_info("unsupported ISA \"%s\" in device tree", orig_isa); +} + +static void print_mmu(struct seq_file *f, const char *mmu_type) +{ +#if defined(CONFIG_32BIT) + if (strcmp(mmu_type, "riscv,sv32") != 0) + return; +#elif defined(CONFIG_64BIT) + if (strcmp(mmu_type, "riscv,sv39") != 0 && + strcmp(mmu_type, "riscv,sv48") != 0) + return; +#endif + + seq_printf(f, "mmu\t\t: %s\n", mmu_type+6); +} + static void *c_start(struct seq_file *m, loff_t *pos) { *pos = cpumask_next(*pos - 1, cpu_online_mask); @@ -78,21 +140,20 @@ static void c_stop(struct seq_file *m, void *v) static int c_show(struct seq_file *m, void *v) { - unsigned long hart_id = (unsigned long)v - 1; - struct device_node *node = of_get_cpu_node(hart_id, NULL); + unsigned long cpu_id = (unsigned long)v - 1; + struct device_node *node = of_get_cpu_node(cpuid_to_hartid_map(cpu_id), + NULL); const char *compat, *isa, *mmu; - seq_printf(m, "hart\t: %lu\n", hart_id); - if (!of_property_read_string(node, "riscv,isa", &isa) - && isa[0] == 'r' - && isa[1] == 'v') - seq_printf(m, "isa\t: %s\n", isa); - if (!of_property_read_string(node, "mmu-type", &mmu) - && !strncmp(mmu, "riscv,", 6)) - seq_printf(m, "mmu\t: %s\n", mmu+6); + seq_printf(m, "processor\t: %lu\n", cpu_id); + seq_printf(m, "hart\t\t: %lu\n", cpuid_to_hartid_map(cpu_id)); + if (!of_property_read_string(node, "riscv,isa", &isa)) + print_isa(m, isa); + if (!of_property_read_string(node, "mmu-type", &mmu)) + print_mmu(m, mmu); if (!of_property_read_string(node, "compatible", &compat) && strcmp(compat, "riscv")) - seq_printf(m, "uarch\t: %s\n", compat); + seq_printf(m, "uarch\t\t: %s\n", compat); seq_puts(m, "\n"); return 0; diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c index 17011a870044..5493f3228704 100644 --- a/arch/riscv/kernel/cpufeature.c +++ b/arch/riscv/kernel/cpufeature.c @@ -22,6 +22,9 @@ #include <asm/hwcap.h> unsigned long elf_hwcap __read_mostly; +#ifdef CONFIG_FPU +bool has_fpu __read_mostly; +#endif void riscv_fill_hwcap(void) { @@ -57,5 +60,17 @@ void riscv_fill_hwcap(void) for (i = 0; i < strlen(isa); ++i) elf_hwcap |= isa2hwcap[(unsigned char)(isa[i])]; + /* We don't support systems with F but without D, so mask those out + * here. */ + if ((elf_hwcap & COMPAT_HWCAP_ISA_F) && !(elf_hwcap & COMPAT_HWCAP_ISA_D)) { + pr_info("This kernel does not support systems with F but not D"); + elf_hwcap &= ~COMPAT_HWCAP_ISA_F; + } + pr_info("elf_hwcap is 0x%lx", elf_hwcap); + +#ifdef CONFIG_FPU + if (elf_hwcap & (COMPAT_HWCAP_ISA_F | COMPAT_HWCAP_ISA_D)) + has_fpu = true; +#endif } diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S index fa2c08e3c05e..13d4826ab2a1 100644 --- a/arch/riscv/kernel/entry.S +++ b/arch/riscv/kernel/entry.S @@ -168,7 +168,6 @@ ENTRY(handle_exception) /* Handle interrupts */ move a0, sp /* pt_regs */ - move a1, s4 /* scause */ tail do_IRQ 1: /* Exceptions run with interrupts enabled */ @@ -357,93 +356,6 @@ ENTRY(__switch_to) ret ENDPROC(__switch_to) -ENTRY(__fstate_save) - li a2, TASK_THREAD_F0 - add a0, a0, a2 - li t1, SR_FS - csrs sstatus, t1 - frcsr t0 - fsd f0, TASK_THREAD_F0_F0(a0) - fsd f1, TASK_THREAD_F1_F0(a0) - fsd f2, TASK_THREAD_F2_F0(a0) - fsd f3, TASK_THREAD_F3_F0(a0) - fsd f4, TASK_THREAD_F4_F0(a0) - fsd f5, TASK_THREAD_F5_F0(a0) - fsd f6, TASK_THREAD_F6_F0(a0) - fsd f7, TASK_THREAD_F7_F0(a0) - fsd f8, TASK_THREAD_F8_F0(a0) - fsd f9, TASK_THREAD_F9_F0(a0) - fsd f10, TASK_THREAD_F10_F0(a0) - fsd f11, TASK_THREAD_F11_F0(a0) - fsd f12, TASK_THREAD_F12_F0(a0) - fsd f13, TASK_THREAD_F13_F0(a0) - fsd f14, TASK_THREAD_F14_F0(a0) - fsd f15, TASK_THREAD_F15_F0(a0) - fsd f16, TASK_THREAD_F16_F0(a0) - fsd f17, TASK_THREAD_F17_F0(a0) - fsd f18, TASK_THREAD_F18_F0(a0) - fsd f19, TASK_THREAD_F19_F0(a0) - fsd f20, TASK_THREAD_F20_F0(a0) - fsd f21, TASK_THREAD_F21_F0(a0) - fsd f22, TASK_THREAD_F22_F0(a0) - fsd f23, TASK_THREAD_F23_F0(a0) - fsd f24, TASK_THREAD_F24_F0(a0) - fsd f25, TASK_THREAD_F25_F0(a0) - fsd f26, TASK_THREAD_F26_F0(a0) - fsd f27, TASK_THREAD_F27_F0(a0) - fsd f28, TASK_THREAD_F28_F0(a0) - fsd f29, TASK_THREAD_F29_F0(a0) - fsd f30, TASK_THREAD_F30_F0(a0) - fsd f31, TASK_THREAD_F31_F0(a0) - sw t0, TASK_THREAD_FCSR_F0(a0) - csrc sstatus, t1 - ret -ENDPROC(__fstate_save) - -ENTRY(__fstate_restore) - li a2, TASK_THREAD_F0 - add a0, a0, a2 - li t1, SR_FS - lw t0, TASK_THREAD_FCSR_F0(a0) - csrs sstatus, t1 - fld f0, TASK_THREAD_F0_F0(a0) - fld f1, TASK_THREAD_F1_F0(a0) - fld f2, TASK_THREAD_F2_F0(a0) - fld f3, TASK_THREAD_F3_F0(a0) - fld f4, TASK_THREAD_F4_F0(a0) - fld f5, TASK_THREAD_F5_F0(a0) - fld f6, TASK_THREAD_F6_F0(a0) - fld f7, TASK_THREAD_F7_F0(a0) - fld f8, TASK_THREAD_F8_F0(a0) - fld f9, TASK_THREAD_F9_F0(a0) - fld f10, TASK_THREAD_F10_F0(a0) - fld f11, TASK_THREAD_F11_F0(a0) - fld f12, TASK_THREAD_F12_F0(a0) - fld f13, TASK_THREAD_F13_F0(a0) - fld f14, TASK_THREAD_F14_F0(a0) - fld f15, TASK_THREAD_F15_F0(a0) - fld f16, TASK_THREAD_F16_F0(a0) - fld f17, TASK_THREAD_F17_F0(a0) - fld f18, TASK_THREAD_F18_F0(a0) - fld f19, TASK_THREAD_F19_F0(a0) - fld f20, TASK_THREAD_F20_F0(a0) - fld f21, TASK_THREAD_F21_F0(a0) - fld f22, TASK_THREAD_F22_F0(a0) - fld f23, TASK_THREAD_F23_F0(a0) - fld f24, TASK_THREAD_F24_F0(a0) - fld f25, TASK_THREAD_F25_F0(a0) - fld f26, TASK_THREAD_F26_F0(a0) - fld f27, TASK_THREAD_F27_F0(a0) - fld f28, TASK_THREAD_F28_F0(a0) - fld f29, TASK_THREAD_F29_F0(a0) - fld f30, TASK_THREAD_F30_F0(a0) - fld f31, TASK_THREAD_F31_F0(a0) - fscsr t0 - csrc sstatus, t1 - ret -ENDPROC(__fstate_restore) - - .section ".rodata" /* Exception vector table */ ENTRY(excp_vect_table) diff --git a/arch/riscv/kernel/fpu.S b/arch/riscv/kernel/fpu.S new file mode 100644 index 000000000000..1defb0618aff --- /dev/null +++ b/arch/riscv/kernel/fpu.S @@ -0,0 +1,106 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2012 Regents of the University of California + * Copyright (C) 2017 SiFive + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation, version 2. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/linkage.h> + +#include <asm/asm.h> +#include <asm/csr.h> +#include <asm/asm-offsets.h> + +ENTRY(__fstate_save) + li a2, TASK_THREAD_F0 + add a0, a0, a2 + li t1, SR_FS + csrs sstatus, t1 + frcsr t0 + fsd f0, TASK_THREAD_F0_F0(a0) + fsd f1, TASK_THREAD_F1_F0(a0) + fsd f2, TASK_THREAD_F2_F0(a0) + fsd f3, TASK_THREAD_F3_F0(a0) + fsd f4, TASK_THREAD_F4_F0(a0) + fsd f5, TASK_THREAD_F5_F0(a0) + fsd f6, TASK_THREAD_F6_F0(a0) + fsd f7, TASK_THREAD_F7_F0(a0) + fsd f8, TASK_THREAD_F8_F0(a0) + fsd f9, TASK_THREAD_F9_F0(a0) + fsd f10, TASK_THREAD_F10_F0(a0) + fsd f11, TASK_THREAD_F11_F0(a0) + fsd f12, TASK_THREAD_F12_F0(a0) + fsd f13, TASK_THREAD_F13_F0(a0) + fsd f14, TASK_THREAD_F14_F0(a0) + fsd f15, TASK_THREAD_F15_F0(a0) + fsd f16, TASK_THREAD_F16_F0(a0) + fsd f17, TASK_THREAD_F17_F0(a0) + fsd f18, TASK_THREAD_F18_F0(a0) + fsd f19, TASK_THREAD_F19_F0(a0) + fsd f20, TASK_THREAD_F20_F0(a0) + fsd f21, TASK_THREAD_F21_F0(a0) + fsd f22, TASK_THREAD_F22_F0(a0) + fsd f23, TASK_THREAD_F23_F0(a0) + fsd f24, TASK_THREAD_F24_F0(a0) + fsd f25, TASK_THREAD_F25_F0(a0) + fsd f26, TASK_THREAD_F26_F0(a0) + fsd f27, TASK_THREAD_F27_F0(a0) + fsd f28, TASK_THREAD_F28_F0(a0) + fsd f29, TASK_THREAD_F29_F0(a0) + fsd f30, TASK_THREAD_F30_F0(a0) + fsd f31, TASK_THREAD_F31_F0(a0) + sw t0, TASK_THREAD_FCSR_F0(a0) + csrc sstatus, t1 + ret +ENDPROC(__fstate_save) + +ENTRY(__fstate_restore) + li a2, TASK_THREAD_F0 + add a0, a0, a2 + li t1, SR_FS + lw t0, TASK_THREAD_FCSR_F0(a0) + csrs sstatus, t1 + fld f0, TASK_THREAD_F0_F0(a0) + fld f1, TASK_THREAD_F1_F0(a0) + fld f2, TASK_THREAD_F2_F0(a0) + fld f3, TASK_THREAD_F3_F0(a0) + fld f4, TASK_THREAD_F4_F0(a0) + fld f5, TASK_THREAD_F5_F0(a0) + fld f6, TASK_THREAD_F6_F0(a0) + fld f7, TASK_THREAD_F7_F0(a0) + fld f8, TASK_THREAD_F8_F0(a0) + fld f9, TASK_THREAD_F9_F0(a0) + fld f10, TASK_THREAD_F10_F0(a0) + fld f11, TASK_THREAD_F11_F0(a0) + fld f12, TASK_THREAD_F12_F0(a0) + fld f13, TASK_THREAD_F13_F0(a0) + fld f14, TASK_THREAD_F14_F0(a0) + fld f15, TASK_THREAD_F15_F0(a0) + fld f16, TASK_THREAD_F16_F0(a0) + fld f17, TASK_THREAD_F17_F0(a0) + fld f18, TASK_THREAD_F18_F0(a0) + fld f19, TASK_THREAD_F19_F0(a0) + fld f20, TASK_THREAD_F20_F0(a0) + fld f21, TASK_THREAD_F21_F0(a0) + fld f22, TASK_THREAD_F22_F0(a0) + fld f23, TASK_THREAD_F23_F0(a0) + fld f24, TASK_THREAD_F24_F0(a0) + fld f25, TASK_THREAD_F25_F0(a0) + fld f26, TASK_THREAD_F26_F0(a0) + fld f27, TASK_THREAD_F27_F0(a0) + fld f28, TASK_THREAD_F28_F0(a0) + fld f29, TASK_THREAD_F29_F0(a0) + fld f30, TASK_THREAD_F30_F0(a0) + fld f31, TASK_THREAD_F31_F0(a0) + fscsr t0 + csrc sstatus, t1 + ret +ENDPROC(__fstate_restore) diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S index c4d2c63f9a29..711190d473d4 100644 --- a/arch/riscv/kernel/head.S +++ b/arch/riscv/kernel/head.S @@ -47,6 +47,8 @@ ENTRY(_start) /* Save hart ID and DTB physical address */ mv s0, a0 mv s1, a1 + la a2, boot_cpu_hartid + REG_S a0, (a2) /* Initialize page tables and relocate to virtual addresses */ la sp, init_thread_union + THREAD_SIZE @@ -55,7 +57,7 @@ ENTRY(_start) /* Restore C environment */ la tp, init_task - sw s0, TASK_TI_CPU(tp) + sw zero, TASK_TI_CPU(tp) la sp, init_thread_union li a0, ASM_THREAD_SIZE diff --git a/arch/riscv/kernel/irq.c b/arch/riscv/kernel/irq.c index 0cfac48a1272..48e6b7db83a1 100644 --- a/arch/riscv/kernel/irq.c +++ b/arch/riscv/kernel/irq.c @@ -8,6 +8,8 @@ #include <linux/interrupt.h> #include <linux/irqchip.h> #include <linux/irqdomain.h> +#include <linux/seq_file.h> +#include <asm/smp.h> /* * Possible interrupt causes: @@ -24,12 +26,18 @@ */ #define INTERRUPT_CAUSE_FLAG (1UL << (__riscv_xlen - 1)) -asmlinkage void __irq_entry do_IRQ(struct pt_regs *regs, unsigned long cause) +int arch_show_interrupts(struct seq_file *p, int prec) +{ + show_ipi_stats(p, prec); + return 0; +} + +asmlinkage void __irq_entry do_IRQ(struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); irq_enter(); - switch (cause & ~INTERRUPT_CAUSE_FLAG) { + switch (regs->scause & ~INTERRUPT_CAUSE_FLAG) { case INTERRUPT_CAUSE_TIMER: riscv_timer_interrupt(); break; diff --git a/arch/riscv/kernel/mcount.S b/arch/riscv/kernel/mcount.S index 5721624886a1..8a5593ff9ff3 100644 --- a/arch/riscv/kernel/mcount.S +++ b/arch/riscv/kernel/mcount.S @@ -75,7 +75,6 @@ ENTRY(return_to_handler) RESTORE_RET_ABI_STATE jalr a1 ENDPROC(return_to_handler) -EXPORT_SYMBOL(return_to_handler) #endif #ifndef CONFIG_DYNAMIC_FTRACE diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c index d7c6ca7c95ae..bef19993ea92 100644 --- a/arch/riscv/kernel/process.c +++ b/arch/riscv/kernel/process.c @@ -76,7 +76,9 @@ void show_regs(struct pt_regs *regs) void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp) { - regs->sstatus = SR_SPIE /* User mode, irqs on */ | SR_FS_INITIAL; + regs->sstatus = SR_SPIE; + if (has_fpu) + regs->sstatus |= SR_FS_INITIAL; regs->sepc = pc; regs->sp = sp; set_fs(USER_DS); @@ -84,12 +86,14 @@ void start_thread(struct pt_regs *regs, unsigned long pc, void flush_thread(void) { +#ifdef CONFIG_FPU /* * Reset FPU context * frm: round to nearest, ties to even (IEEE default) * fflags: accrued exceptions cleared */ memset(¤t->thread.fstate, 0, sizeof(current->thread.fstate)); +#endif } int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) diff --git a/arch/riscv/kernel/ptrace.c b/arch/riscv/kernel/ptrace.c index 9f82a7e34c64..60f1e02eed36 100644 --- a/arch/riscv/kernel/ptrace.c +++ b/arch/riscv/kernel/ptrace.c @@ -28,6 +28,9 @@ enum riscv_regset { REGSET_X, +#ifdef CONFIG_FPU + REGSET_F, +#endif }; static int riscv_gpr_get(struct task_struct *target, @@ -54,6 +57,45 @@ static int riscv_gpr_set(struct task_struct *target, return ret; } +#ifdef CONFIG_FPU +static int riscv_fpr_get(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) +{ + int ret; + struct __riscv_d_ext_state *fstate = &target->thread.fstate; + + ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, fstate, 0, + offsetof(struct __riscv_d_ext_state, fcsr)); + if (!ret) { + ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, fstate, 0, + offsetof(struct __riscv_d_ext_state, fcsr) + + sizeof(fstate->fcsr)); + } + + return ret; +} + +static int riscv_fpr_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + int ret; + struct __riscv_d_ext_state *fstate = &target->thread.fstate; + + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, fstate, 0, + offsetof(struct __riscv_d_ext_state, fcsr)); + if (!ret) { + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, fstate, 0, + offsetof(struct __riscv_d_ext_state, fcsr) + + sizeof(fstate->fcsr)); + } + + return ret; +} +#endif static const struct user_regset riscv_user_regset[] = { [REGSET_X] = { @@ -64,6 +106,16 @@ static const struct user_regset riscv_user_regset[] = { .get = &riscv_gpr_get, .set = &riscv_gpr_set, }, +#ifdef CONFIG_FPU + [REGSET_F] = { + .core_note_type = NT_PRFPREG, + .n = ELF_NFPREG, + .size = sizeof(elf_fpreg_t), + .align = sizeof(elf_fpreg_t), + .get = &riscv_fpr_get, + .set = &riscv_fpr_set, + }, +#endif }; static const struct user_regset_view riscv_user_native_view = { diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c index b2d26d9d8489..2c290e6aaa6e 100644 --- a/arch/riscv/kernel/setup.c +++ b/arch/riscv/kernel/setup.c @@ -81,6 +81,16 @@ EXPORT_SYMBOL(empty_zero_page); /* The lucky hart to first increment this variable will boot the other cores */ atomic_t hart_lottery; +unsigned long boot_cpu_hartid; + +unsigned long __cpuid_to_hartid_map[NR_CPUS] = { + [0 ... NR_CPUS-1] = INVALID_HARTID +}; + +void __init smp_setup_processor_id(void) +{ + cpuid_to_hartid_map(0) = boot_cpu_hartid; +} #ifdef CONFIG_BLK_DEV_INITRD static void __init setup_initrd(void) @@ -227,7 +237,10 @@ void __init setup_arch(char **cmdline_p) setup_bootmem(); paging_init(); unflatten_device_tree(); + +#ifdef CONFIG_SWIOTLB swiotlb_init(1); +#endif #ifdef CONFIG_SMP setup_smp(); diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c index 718d0c984ef0..f9b5e7e352ef 100644 --- a/arch/riscv/kernel/signal.c +++ b/arch/riscv/kernel/signal.c @@ -37,45 +37,69 @@ struct rt_sigframe { struct ucontext uc; }; -static long restore_d_state(struct pt_regs *regs, - struct __riscv_d_ext_state __user *state) +#ifdef CONFIG_FPU +static long restore_fp_state(struct pt_regs *regs, + union __riscv_fp_state *sc_fpregs) { long err; + struct __riscv_d_ext_state __user *state = &sc_fpregs->d; + size_t i; + err = __copy_from_user(¤t->thread.fstate, state, sizeof(*state)); - if (likely(!err)) - fstate_restore(current, regs); + if (unlikely(err)) + return err; + + fstate_restore(current, regs); + + /* We support no other extension state at this time. */ + for (i = 0; i < ARRAY_SIZE(sc_fpregs->q.reserved); i++) { + u32 value; + + err = __get_user(value, &sc_fpregs->q.reserved[i]); + if (unlikely(err)) + break; + if (value != 0) + return -EINVAL; + } + return err; } -static long save_d_state(struct pt_regs *regs, - struct __riscv_d_ext_state __user *state) +static long save_fp_state(struct pt_regs *regs, + union __riscv_fp_state *sc_fpregs) { + long err; + struct __riscv_d_ext_state __user *state = &sc_fpregs->d; + size_t i; + fstate_save(current, regs); - return __copy_to_user(state, ¤t->thread.fstate, sizeof(*state)); + err = __copy_to_user(state, ¤t->thread.fstate, sizeof(*state)); + if (unlikely(err)) + return err; + + /* We support no other extension state at this time. */ + for (i = 0; i < ARRAY_SIZE(sc_fpregs->q.reserved); i++) { + err = __put_user(0, &sc_fpregs->q.reserved[i]); + if (unlikely(err)) + break; + } + + return err; } +#else +#define save_fp_state(task, regs) (0) +#define restore_fp_state(task, regs) (0) +#endif static long restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) { long err; - size_t i; /* sc_regs is structured the same as the start of pt_regs */ err = __copy_from_user(regs, &sc->sc_regs, sizeof(sc->sc_regs)); - if (unlikely(err)) - return err; /* Restore the floating-point state. */ - err = restore_d_state(regs, &sc->sc_fpregs.d); - if (unlikely(err)) - return err; - /* We support no other extension state at this time. */ - for (i = 0; i < ARRAY_SIZE(sc->sc_fpregs.q.reserved); i++) { - u32 value; - err = __get_user(value, &sc->sc_fpregs.q.reserved[i]); - if (unlikely(err)) - break; - if (value != 0) - return -EINVAL; - } + if (has_fpu) + err |= restore_fp_state(regs, &sc->sc_fpregs); return err; } @@ -124,14 +148,11 @@ static long setup_sigcontext(struct rt_sigframe __user *frame, { struct sigcontext __user *sc = &frame->uc.uc_mcontext; long err; - size_t i; /* sc_regs is structured the same as the start of pt_regs */ err = __copy_to_user(&sc->sc_regs, regs, sizeof(sc->sc_regs)); /* Save the floating-point state. */ - err |= save_d_state(regs, &sc->sc_fpregs.d); - /* We support no other extension state at this time. */ - for (i = 0; i < ARRAY_SIZE(sc->sc_fpregs.q.reserved); i++) - err |= __put_user(0, &sc->sc_fpregs.q.reserved[i]); + if (has_fpu) + err |= save_fp_state(regs, &sc->sc_fpregs); return err; } diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c index 906fe21ea21b..57b1383e5ef7 100644 --- a/arch/riscv/kernel/smp.c +++ b/arch/riscv/kernel/smp.c @@ -22,23 +22,44 @@ #include <linux/interrupt.h> #include <linux/smp.h> #include <linux/sched.h> +#include <linux/seq_file.h> #include <asm/sbi.h> #include <asm/tlbflush.h> #include <asm/cacheflush.h> -/* A collection of single bit ipi messages. */ -static struct { - unsigned long bits ____cacheline_aligned; -} ipi_data[NR_CPUS] __cacheline_aligned; - enum ipi_message_type { IPI_RESCHEDULE, IPI_CALL_FUNC, IPI_MAX }; +/* A collection of single bit ipi messages. */ +static struct { + unsigned long stats[IPI_MAX] ____cacheline_aligned; + unsigned long bits ____cacheline_aligned; +} ipi_data[NR_CPUS] __cacheline_aligned; + +int riscv_hartid_to_cpuid(int hartid) +{ + int i = -1; + + for (i = 0; i < NR_CPUS; i++) + if (cpuid_to_hartid_map(i) == hartid) + return i; + pr_err("Couldn't find cpu id for hartid [%d]\n", hartid); + BUG(); + return i; +} + +void riscv_cpuid_to_hartid_mask(const struct cpumask *in, struct cpumask *out) +{ + int cpu; + + for_each_cpu(cpu, in) + cpumask_set_cpu(cpuid_to_hartid_map(cpu), out); +} /* Unsupported */ int setup_profiling_timer(unsigned int multiplier) { @@ -48,6 +69,7 @@ int setup_profiling_timer(unsigned int multiplier) void riscv_software_interrupt(void) { unsigned long *pending_ipis = &ipi_data[smp_processor_id()].bits; + unsigned long *stats = ipi_data[smp_processor_id()].stats; /* Clear pending IPI */ csr_clear(sip, SIE_SSIE); @@ -62,11 +84,15 @@ void riscv_software_interrupt(void) if (ops == 0) return; - if (ops & (1 << IPI_RESCHEDULE)) + if (ops & (1 << IPI_RESCHEDULE)) { + stats[IPI_RESCHEDULE]++; scheduler_ipi(); + } - if (ops & (1 << IPI_CALL_FUNC)) + if (ops & (1 << IPI_CALL_FUNC)) { + stats[IPI_CALL_FUNC]++; generic_smp_call_function_interrupt(); + } BUG_ON((ops >> IPI_MAX) != 0); @@ -78,14 +104,36 @@ void riscv_software_interrupt(void) static void send_ipi_message(const struct cpumask *to_whom, enum ipi_message_type operation) { - int i; + int cpuid, hartid; + struct cpumask hartid_mask; + cpumask_clear(&hartid_mask); mb(); - for_each_cpu(i, to_whom) - set_bit(operation, &ipi_data[i].bits); - + for_each_cpu(cpuid, to_whom) { + set_bit(operation, &ipi_data[cpuid].bits); + hartid = cpuid_to_hartid_map(cpuid); + cpumask_set_cpu(hartid, &hartid_mask); + } mb(); - sbi_send_ipi(cpumask_bits(to_whom)); + sbi_send_ipi(cpumask_bits(&hartid_mask)); +} + +static const char * const ipi_names[] = { + [IPI_RESCHEDULE] = "Rescheduling interrupts", + [IPI_CALL_FUNC] = "Function call interrupts", +}; + +void show_ipi_stats(struct seq_file *p, int prec) +{ + unsigned int cpu, i; + + for (i = 0; i < IPI_MAX; i++) { + seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i, + prec >= 4 ? " " : ""); + for_each_online_cpu(cpu) + seq_printf(p, "%10lu ", ipi_data[cpu].stats[i]); + seq_printf(p, " %s\n", ipi_names[i]); + } } void arch_send_call_function_ipi_mask(struct cpumask *mask) @@ -127,7 +175,7 @@ void smp_send_reschedule(int cpu) void flush_icache_mm(struct mm_struct *mm, bool local) { unsigned int cpu; - cpumask_t others, *mask; + cpumask_t others, hmask, *mask; preempt_disable(); @@ -145,9 +193,11 @@ void flush_icache_mm(struct mm_struct *mm, bool local) */ cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu)); local |= cpumask_empty(&others); - if (mm != current->active_mm || !local) - sbi_remote_fence_i(others.bits); - else { + if (mm != current->active_mm || !local) { + cpumask_clear(&hmask); + riscv_cpuid_to_hartid_mask(&others, &hmask); + sbi_remote_fence_i(hmask.bits); + } else { /* * It's assumed that at least one strongly ordered operation is * performed on this hart between setting a hart's cpumask bit diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c index 56abab6a9812..18cda0e8cf94 100644 --- a/arch/riscv/kernel/smpboot.c +++ b/arch/riscv/kernel/smpboot.c @@ -30,6 +30,7 @@ #include <linux/irq.h> #include <linux/of.h> #include <linux/sched/task_stack.h> +#include <linux/sched/mm.h> #include <asm/irq.h> #include <asm/mmu_context.h> #include <asm/tlbflush.h> @@ -50,25 +51,33 @@ void __init smp_prepare_cpus(unsigned int max_cpus) void __init setup_smp(void) { struct device_node *dn = NULL; - int hart, im_okay_therefore_i_am = 0; + int hart; + bool found_boot_cpu = false; + int cpuid = 1; while ((dn = of_find_node_by_type(dn, "cpu"))) { - hart = riscv_of_processor_hart(dn); - if (hart >= 0) { - set_cpu_possible(hart, true); - set_cpu_present(hart, true); - if (hart == smp_processor_id()) { - BUG_ON(im_okay_therefore_i_am); - im_okay_therefore_i_am = 1; - } + hart = riscv_of_processor_hartid(dn); + if (hart < 0) + continue; + + if (hart == cpuid_to_hartid_map(0)) { + BUG_ON(found_boot_cpu); + found_boot_cpu = 1; + continue; } + + cpuid_to_hartid_map(cpuid) = hart; + set_cpu_possible(cpuid, true); + set_cpu_present(cpuid, true); + cpuid++; } - BUG_ON(!im_okay_therefore_i_am); + BUG_ON(!found_boot_cpu); } int __cpu_up(unsigned int cpu, struct task_struct *tidle) { + int hartid = cpuid_to_hartid_map(cpu); tidle->thread_info.cpu = cpu; /* @@ -79,8 +88,9 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle) * the spinning harts that they can continue the boot process. */ smp_mb(); - __cpu_up_stack_pointer[cpu] = task_stack_page(tidle) + THREAD_SIZE; - __cpu_up_task_pointer[cpu] = tidle; + WRITE_ONCE(__cpu_up_stack_pointer[hartid], + task_stack_page(tidle) + THREAD_SIZE); + WRITE_ONCE(__cpu_up_task_pointer[hartid], tidle); while (!cpu_online(cpu)) cpu_relax(); @@ -100,14 +110,22 @@ asmlinkage void __init smp_callin(void) struct mm_struct *mm = &init_mm; /* All kernel threads share the same mm context. */ - atomic_inc(&mm->mm_count); + mmgrab(mm); current->active_mm = mm; trap_init(); notify_cpu_starting(smp_processor_id()); set_cpu_online(smp_processor_id(), 1); + /* + * Remote TLB flushes are ignored while the CPU is offline, so emit + * a local TLB flush right now just in case. + */ local_flush_tlb_all(); - local_irq_enable(); + /* + * Disable preemption before enabling interrupts, so we don't try to + * schedule a CPU that hasn't actually started yet. + */ preempt_disable(); + local_irq_enable(); cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); } diff --git a/arch/riscv/lib/Makefile b/arch/riscv/lib/Makefile index 445ec84f9a47..5739bd05d289 100644 --- a/arch/riscv/lib/Makefile +++ b/arch/riscv/lib/Makefile @@ -2,6 +2,7 @@ lib-y += delay.o lib-y += memcpy.o lib-y += memset.o lib-y += uaccess.o -lib-y += tishift.o + +lib-(CONFIG_64BIT) += tishift.o lib-$(CONFIG_32BIT) += udivdi3.o diff --git a/arch/riscv/mm/ioremap.c b/arch/riscv/mm/ioremap.c index 70ef2724cdf6..bd2f2db557cc 100644 --- a/arch/riscv/mm/ioremap.c +++ b/arch/riscv/mm/ioremap.c @@ -42,7 +42,7 @@ static void __iomem *__ioremap_caller(phys_addr_t addr, size_t size, /* Page-align mappings */ offset = addr & (~PAGE_MASK); - addr &= PAGE_MASK; + addr -= offset; size = PAGE_ALIGN(size + offset); area = get_vm_area_caller(size, VM_IOREMAP, caller); diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 039a3417dfc4..8b25e1f45b27 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -783,6 +783,17 @@ config VFIO_CCW To compile this driver as a module, choose M here: the module will be called vfio_ccw. +config VFIO_AP + def_tristate n + prompt "VFIO support for AP devices" + depends on S390_AP_IOMMU && VFIO_MDEV_DEVICE && KVM + help + This driver grants access to Adjunct Processor (AP) devices + via the VFIO mediated device interface. + + To compile this driver as a module, choose M here: the module + will be called vfio_ap. + endmenu menu "Dump support" diff --git a/arch/s390/appldata/appldata_os.c b/arch/s390/appldata/appldata_os.c index 433a994b1a89..54f375627532 100644 --- a/arch/s390/appldata/appldata_os.c +++ b/arch/s390/appldata/appldata_os.c @@ -25,10 +25,6 @@ #include "appldata.h" - -#define LOAD_INT(x) ((x) >> FSHIFT) -#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100) - /* * OS data * diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig index 941d8cc6c9f5..259d1698ac50 100644 --- a/arch/s390/configs/debug_defconfig +++ b/arch/s390/configs/debug_defconfig @@ -668,7 +668,6 @@ CONFIG_CRYPTO_USER=m # CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set CONFIG_CRYPTO_PCRYPT=m CONFIG_CRYPTO_CRYPTD=m -CONFIG_CRYPTO_MCRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_LRW=m diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig index eb6f75f24208..37fd60c20e22 100644 --- a/arch/s390/configs/performance_defconfig +++ b/arch/s390/configs/performance_defconfig @@ -610,7 +610,6 @@ CONFIG_CRYPTO_USER=m # CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set CONFIG_CRYPTO_PCRYPT=m CONFIG_CRYPTO_CRYPTD=m -CONFIG_CRYPTO_MCRYPTD=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_LRW=m diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c index c54cb26eb7f5..812d9498d97b 100644 --- a/arch/s390/crypto/aes_s390.c +++ b/arch/s390/crypto/aes_s390.c @@ -44,7 +44,7 @@ struct s390_aes_ctx { int key_len; unsigned long fc; union { - struct crypto_skcipher *blk; + struct crypto_sync_skcipher *blk; struct crypto_cipher *cip; } fallback; }; @@ -54,7 +54,7 @@ struct s390_xts_ctx { u8 pcc_key[32]; int key_len; unsigned long fc; - struct crypto_skcipher *fallback; + struct crypto_sync_skcipher *fallback; }; struct gcm_sg_walk { @@ -184,14 +184,15 @@ static int setkey_fallback_blk(struct crypto_tfm *tfm, const u8 *key, struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); unsigned int ret; - crypto_skcipher_clear_flags(sctx->fallback.blk, CRYPTO_TFM_REQ_MASK); - crypto_skcipher_set_flags(sctx->fallback.blk, tfm->crt_flags & + crypto_sync_skcipher_clear_flags(sctx->fallback.blk, + CRYPTO_TFM_REQ_MASK); + crypto_sync_skcipher_set_flags(sctx->fallback.blk, tfm->crt_flags & CRYPTO_TFM_REQ_MASK); - ret = crypto_skcipher_setkey(sctx->fallback.blk, key, len); + ret = crypto_sync_skcipher_setkey(sctx->fallback.blk, key, len); tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; - tfm->crt_flags |= crypto_skcipher_get_flags(sctx->fallback.blk) & + tfm->crt_flags |= crypto_sync_skcipher_get_flags(sctx->fallback.blk) & CRYPTO_TFM_RES_MASK; return ret; @@ -204,9 +205,9 @@ static int fallback_blk_dec(struct blkcipher_desc *desc, unsigned int ret; struct crypto_blkcipher *tfm = desc->tfm; struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(tfm); - SKCIPHER_REQUEST_ON_STACK(req, sctx->fallback.blk); + SYNC_SKCIPHER_REQUEST_ON_STACK(req, sctx->fallback.blk); - skcipher_request_set_tfm(req, sctx->fallback.blk); + skcipher_request_set_sync_tfm(req, sctx->fallback.blk); skcipher_request_set_callback(req, desc->flags, NULL, NULL); skcipher_request_set_crypt(req, src, dst, nbytes, desc->info); @@ -223,9 +224,9 @@ static int fallback_blk_enc(struct blkcipher_desc *desc, unsigned int ret; struct crypto_blkcipher *tfm = desc->tfm; struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(tfm); - SKCIPHER_REQUEST_ON_STACK(req, sctx->fallback.blk); + SYNC_SKCIPHER_REQUEST_ON_STACK(req, sctx->fallback.blk); - skcipher_request_set_tfm(req, sctx->fallback.blk); + skcipher_request_set_sync_tfm(req, sctx->fallback.blk); skcipher_request_set_callback(req, desc->flags, NULL, NULL); skcipher_request_set_crypt(req, src, dst, nbytes, desc->info); @@ -306,8 +307,7 @@ static int fallback_init_blk(struct crypto_tfm *tfm) const char *name = tfm->__crt_alg->cra_name; struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); - sctx->fallback.blk = crypto_alloc_skcipher(name, 0, - CRYPTO_ALG_ASYNC | + sctx->fallback.blk = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(sctx->fallback.blk)) { @@ -323,7 +323,7 @@ static void fallback_exit_blk(struct crypto_tfm *tfm) { struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); - crypto_free_skcipher(sctx->fallback.blk); + crypto_free_sync_skcipher(sctx->fallback.blk); } static struct crypto_alg ecb_aes_alg = { @@ -453,14 +453,15 @@ static int xts_fallback_setkey(struct crypto_tfm *tfm, const u8 *key, struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm); unsigned int ret; - crypto_skcipher_clear_flags(xts_ctx->fallback, CRYPTO_TFM_REQ_MASK); - crypto_skcipher_set_flags(xts_ctx->fallback, tfm->crt_flags & + crypto_sync_skcipher_clear_flags(xts_ctx->fallback, + CRYPTO_TFM_REQ_MASK); + crypto_sync_skcipher_set_flags(xts_ctx->fallback, tfm->crt_flags & CRYPTO_TFM_REQ_MASK); - ret = crypto_skcipher_setkey(xts_ctx->fallback, key, len); + ret = crypto_sync_skcipher_setkey(xts_ctx->fallback, key, len); tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; - tfm->crt_flags |= crypto_skcipher_get_flags(xts_ctx->fallback) & + tfm->crt_flags |= crypto_sync_skcipher_get_flags(xts_ctx->fallback) & CRYPTO_TFM_RES_MASK; return ret; @@ -472,10 +473,10 @@ static int xts_fallback_decrypt(struct blkcipher_desc *desc, { struct crypto_blkcipher *tfm = desc->tfm; struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(tfm); - SKCIPHER_REQUEST_ON_STACK(req, xts_ctx->fallback); + SYNC_SKCIPHER_REQUEST_ON_STACK(req, xts_ctx->fallback); unsigned int ret; - skcipher_request_set_tfm(req, xts_ctx->fallback); + skcipher_request_set_sync_tfm(req, xts_ctx->fallback); skcipher_request_set_callback(req, desc->flags, NULL, NULL); skcipher_request_set_crypt(req, src, dst, nbytes, desc->info); @@ -491,10 +492,10 @@ static int xts_fallback_encrypt(struct blkcipher_desc *desc, { struct crypto_blkcipher *tfm = desc->tfm; struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(tfm); - SKCIPHER_REQUEST_ON_STACK(req, xts_ctx->fallback); + SYNC_SKCIPHER_REQUEST_ON_STACK(req, xts_ctx->fallback); unsigned int ret; - skcipher_request_set_tfm(req, xts_ctx->fallback); + skcipher_request_set_sync_tfm(req, xts_ctx->fallback); skcipher_request_set_callback(req, desc->flags, NULL, NULL); skcipher_request_set_crypt(req, src, dst, nbytes, desc->info); @@ -611,8 +612,7 @@ static int xts_fallback_init(struct crypto_tfm *tfm) const char *name = tfm->__crt_alg->cra_name; struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm); - xts_ctx->fallback = crypto_alloc_skcipher(name, 0, - CRYPTO_ALG_ASYNC | + xts_ctx->fallback = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(xts_ctx->fallback)) { @@ -627,7 +627,7 @@ static void xts_fallback_exit(struct crypto_tfm *tfm) { struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm); - crypto_free_skcipher(xts_ctx->fallback); + crypto_free_sync_skcipher(xts_ctx->fallback); } static struct crypto_alg xts_aes_alg = { diff --git a/arch/s390/defconfig b/arch/s390/defconfig index 20add000dd6d..7cb6a52f727d 100644 --- a/arch/s390/defconfig +++ b/arch/s390/defconfig @@ -221,7 +221,6 @@ CONFIG_CRYPTO_SALSA20=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_SM4=m -CONFIG_CRYPTO_SPECK=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_DEFLATE=m diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h index 97db2fba546a..63b46e30b2c3 100644 --- a/arch/s390/include/asm/compat.h +++ b/arch/s390/include/asm/compat.h @@ -9,6 +9,8 @@ #include <linux/sched/task_stack.h> #include <linux/thread_info.h> +#include <asm-generic/compat.h> + #define __TYPE_IS_PTR(t) (!__builtin_types_compatible_p( \ typeof(0?(__force t)0:0ULL), u64)) @@ -51,34 +53,18 @@ #define COMPAT_USER_HZ 100 #define COMPAT_UTS_MACHINE "s390\0\0\0\0" -typedef u32 compat_size_t; -typedef s32 compat_ssize_t; -typedef s32 compat_clock_t; -typedef s32 compat_pid_t; typedef u16 __compat_uid_t; typedef u16 __compat_gid_t; typedef u32 __compat_uid32_t; typedef u32 __compat_gid32_t; typedef u16 compat_mode_t; -typedef u32 compat_ino_t; typedef u16 compat_dev_t; -typedef s32 compat_off_t; -typedef s64 compat_loff_t; typedef u16 compat_nlink_t; typedef u16 compat_ipc_pid_t; -typedef s32 compat_daddr_t; typedef u32 compat_caddr_t; typedef __kernel_fsid_t compat_fsid_t; -typedef s32 compat_key_t; -typedef s32 compat_timer_t; - -typedef s32 compat_int_t; -typedef s32 compat_long_t; typedef s64 compat_s64; -typedef u32 compat_uint_t; -typedef u32 compat_ulong_t; typedef u64 compat_u64; -typedef u32 compat_uptr_t; typedef struct { u32 mask; diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 29c940bf8506..d5d24889c3bc 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -44,6 +44,7 @@ #define KVM_REQ_ICPT_OPEREXC KVM_ARCH_REQ(2) #define KVM_REQ_START_MIGRATION KVM_ARCH_REQ(3) #define KVM_REQ_STOP_MIGRATION KVM_ARCH_REQ(4) +#define KVM_REQ_VSIE_RESTART KVM_ARCH_REQ(5) #define SIGP_CTRL_C 0x80 #define SIGP_CTRL_SCN_MASK 0x3f @@ -186,6 +187,7 @@ struct kvm_s390_sie_block { #define ECA_AIV 0x00200000 #define ECA_VX 0x00020000 #define ECA_PROTEXCI 0x00002000 +#define ECA_APIE 0x00000008 #define ECA_SII 0x00000001 __u32 eca; /* 0x004c */ #define ICPT_INST 0x04 @@ -237,7 +239,11 @@ struct kvm_s390_sie_block { psw_t gpsw; /* 0x0090 */ __u64 gg14; /* 0x00a0 */ __u64 gg15; /* 0x00a8 */ - __u8 reservedb0[20]; /* 0x00b0 */ + __u8 reservedb0[8]; /* 0x00b0 */ +#define HPID_KVM 0x4 +#define HPID_VSIE 0x5 + __u8 hpid; /* 0x00b8 */ + __u8 reservedb9[11]; /* 0x00b9 */ __u16 extcpuaddr; /* 0x00c4 */ __u16 eic; /* 0x00c6 */ __u32 reservedc8; /* 0x00c8 */ @@ -255,6 +261,8 @@ struct kvm_s390_sie_block { __u8 reservede4[4]; /* 0x00e4 */ __u64 tecmc; /* 0x00e8 */ __u8 reservedf0[12]; /* 0x00f0 */ +#define CRYCB_FORMAT_MASK 0x00000003 +#define CRYCB_FORMAT0 0x00000000 #define CRYCB_FORMAT1 0x00000001 #define CRYCB_FORMAT2 0x00000003 __u32 crycbd; /* 0x00fc */ @@ -715,6 +723,7 @@ struct kvm_s390_crypto { __u32 crycbd; __u8 aes_kw; __u8 dea_kw; + __u8 apie; }; #define APCB0_MASK_SIZE 1 @@ -855,6 +864,10 @@ void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, struct kvm_async_pf *work); +void kvm_arch_crypto_clear_masks(struct kvm *kvm); +void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm, + unsigned long *aqm, unsigned long *adm); + extern int sie64a(struct kvm_s390_sie_block *, u64 *); extern char sie_exit; diff --git a/arch/s390/include/asm/unistd.h b/arch/s390/include/asm/unistd.h index fd79c0d35dc4..a1fbf15d53aa 100644 --- a/arch/s390/include/asm/unistd.h +++ b/arch/s390/include/asm/unistd.h @@ -15,6 +15,7 @@ #define __IGNORE_pkey_alloc #define __IGNORE_pkey_free +#define __ARCH_WANT_NEW_STAT #define __ARCH_WANT_OLD_READDIR #define __ARCH_WANT_SYS_ALARM #define __ARCH_WANT_SYS_GETHOSTNAME @@ -25,7 +26,6 @@ #define __ARCH_WANT_SYS_IPC #define __ARCH_WANT_SYS_FADVISE64 #define __ARCH_WANT_SYS_GETPGRP -#define __ARCH_WANT_SYS_LLSEEK #define __ARCH_WANT_SYS_NICE #define __ARCH_WANT_SYS_OLD_GETRLIMIT #define __ARCH_WANT_SYS_OLD_MMAP @@ -34,6 +34,7 @@ #define __ARCH_WANT_SYS_SIGPROCMASK # ifdef CONFIG_COMPAT # define __ARCH_WANT_COMPAT_SYS_TIME +# define __ARCH_WANT_SYS_UTIME32 # endif #define __ARCH_WANT_SYS_FORK #define __ARCH_WANT_SYS_VFORK diff --git a/arch/s390/include/uapi/asm/Kbuild b/arch/s390/include/uapi/asm/Kbuild index e364873e0d10..dc38a90cf091 100644 --- a/arch/s390/include/uapi/asm/Kbuild +++ b/arch/s390/include/uapi/asm/Kbuild @@ -18,3 +18,4 @@ generic-y += shmbuf.h generic-y += sockios.h generic-y += swab.h generic-y += termbits.h +generic-y += siginfo.h
\ No newline at end of file diff --git a/arch/s390/include/uapi/asm/kvm.h b/arch/s390/include/uapi/asm/kvm.h index 9a50f02b9894..16511d97e8dc 100644 --- a/arch/s390/include/uapi/asm/kvm.h +++ b/arch/s390/include/uapi/asm/kvm.h @@ -160,6 +160,8 @@ struct kvm_s390_vm_cpu_subfunc { #define KVM_S390_VM_CRYPTO_ENABLE_DEA_KW 1 #define KVM_S390_VM_CRYPTO_DISABLE_AES_KW 2 #define KVM_S390_VM_CRYPTO_DISABLE_DEA_KW 3 +#define KVM_S390_VM_CRYPTO_ENABLE_APIE 4 +#define KVM_S390_VM_CRYPTO_DISABLE_APIE 5 /* kvm attributes for migration mode */ #define KVM_S390_VM_MIGRATION_STOP 0 diff --git a/arch/s390/include/uapi/asm/siginfo.h b/arch/s390/include/uapi/asm/siginfo.h deleted file mode 100644 index 6984820f2f1c..000000000000 --- a/arch/s390/include/uapi/asm/siginfo.h +++ /dev/null @@ -1,17 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -/* - * S390 version - * - * Derived from "include/asm-i386/siginfo.h" - */ - -#ifndef _S390_SIGINFO_H -#define _S390_SIGINFO_H - -#ifdef __s390x__ -#define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int)) -#endif - -#include <asm-generic/siginfo.h> - -#endif diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index ac5da6b0b862..fe24150ff666 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -40,6 +40,7 @@ #include <asm/sclp.h> #include <asm/cpacf.h> #include <asm/timex.h> +#include <asm/ap.h> #include "kvm-s390.h" #include "gaccess.h" @@ -844,20 +845,24 @@ void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm) kvm_s390_vcpu_block_all(kvm); - kvm_for_each_vcpu(i, vcpu, kvm) + kvm_for_each_vcpu(i, vcpu, kvm) { kvm_s390_vcpu_crypto_setup(vcpu); + /* recreate the shadow crycb by leaving the VSIE handler */ + kvm_s390_sync_request(KVM_REQ_VSIE_RESTART, vcpu); + } kvm_s390_vcpu_unblock_all(kvm); } static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr) { - if (!test_kvm_facility(kvm, 76)) - return -EINVAL; - mutex_lock(&kvm->lock); switch (attr->attr) { case KVM_S390_VM_CRYPTO_ENABLE_AES_KW: + if (!test_kvm_facility(kvm, 76)) { + mutex_unlock(&kvm->lock); + return -EINVAL; + } get_random_bytes( kvm->arch.crypto.crycb->aes_wrapping_key_mask, sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); @@ -865,6 +870,10 @@ static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr) VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support"); break; case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW: + if (!test_kvm_facility(kvm, 76)) { + mutex_unlock(&kvm->lock); + return -EINVAL; + } get_random_bytes( kvm->arch.crypto.crycb->dea_wrapping_key_mask, sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); @@ -872,17 +881,39 @@ static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr) VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support"); break; case KVM_S390_VM_CRYPTO_DISABLE_AES_KW: + if (!test_kvm_facility(kvm, 76)) { + mutex_unlock(&kvm->lock); + return -EINVAL; + } kvm->arch.crypto.aes_kw = 0; memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0, sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support"); break; case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW: + if (!test_kvm_facility(kvm, 76)) { + mutex_unlock(&kvm->lock); + return -EINVAL; + } kvm->arch.crypto.dea_kw = 0; memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0, sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support"); break; + case KVM_S390_VM_CRYPTO_ENABLE_APIE: + if (!ap_instructions_available()) { + mutex_unlock(&kvm->lock); + return -EOPNOTSUPP; + } + kvm->arch.crypto.apie = 1; + break; + case KVM_S390_VM_CRYPTO_DISABLE_APIE: + if (!ap_instructions_available()) { + mutex_unlock(&kvm->lock); + return -EOPNOTSUPP; + } + kvm->arch.crypto.apie = 0; + break; default: mutex_unlock(&kvm->lock); return -ENXIO; @@ -1491,6 +1522,10 @@ static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr) case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW: ret = 0; break; + case KVM_S390_VM_CRYPTO_ENABLE_APIE: + case KVM_S390_VM_CRYPTO_DISABLE_APIE: + ret = ap_instructions_available() ? 0 : -ENXIO; + break; default: ret = -ENXIO; break; @@ -1992,55 +2027,101 @@ long kvm_arch_vm_ioctl(struct file *filp, return r; } -static int kvm_s390_query_ap_config(u8 *config) -{ - u32 fcn_code = 0x04000000UL; - u32 cc = 0; - - memset(config, 0, 128); - asm volatile( - "lgr 0,%1\n" - "lgr 2,%2\n" - ".long 0xb2af0000\n" /* PQAP(QCI) */ - "0: ipm %0\n" - "srl %0,28\n" - "1:\n" - EX_TABLE(0b, 1b) - : "+r" (cc) - : "r" (fcn_code), "r" (config) - : "cc", "0", "2", "memory" - ); - - return cc; -} - static int kvm_s390_apxa_installed(void) { - u8 config[128]; - int cc; + struct ap_config_info info; - if (test_facility(12)) { - cc = kvm_s390_query_ap_config(config); - - if (cc) - pr_err("PQAP(QCI) failed with cc=%d", cc); - else - return config[0] & 0x40; + if (ap_instructions_available()) { + if (ap_qci(&info) == 0) + return info.apxa; } return 0; } +/* + * The format of the crypto control block (CRYCB) is specified in the 3 low + * order bits of the CRYCB designation (CRYCBD) field as follows: + * Format 0: Neither the message security assist extension 3 (MSAX3) nor the + * AP extended addressing (APXA) facility are installed. + * Format 1: The APXA facility is not installed but the MSAX3 facility is. + * Format 2: Both the APXA and MSAX3 facilities are installed + */ static void kvm_s390_set_crycb_format(struct kvm *kvm) { kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb; + /* Clear the CRYCB format bits - i.e., set format 0 by default */ + kvm->arch.crypto.crycbd &= ~(CRYCB_FORMAT_MASK); + + /* Check whether MSAX3 is installed */ + if (!test_kvm_facility(kvm, 76)) + return; + if (kvm_s390_apxa_installed()) kvm->arch.crypto.crycbd |= CRYCB_FORMAT2; else kvm->arch.crypto.crycbd |= CRYCB_FORMAT1; } +void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm, + unsigned long *aqm, unsigned long *adm) +{ + struct kvm_s390_crypto_cb *crycb = kvm->arch.crypto.crycb; + + mutex_lock(&kvm->lock); + kvm_s390_vcpu_block_all(kvm); + + switch (kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) { + case CRYCB_FORMAT2: /* APCB1 use 256 bits */ + memcpy(crycb->apcb1.apm, apm, 32); + VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx %016lx %016lx %016lx", + apm[0], apm[1], apm[2], apm[3]); + memcpy(crycb->apcb1.aqm, aqm, 32); + VM_EVENT(kvm, 3, "SET CRYCB: aqm %016lx %016lx %016lx %016lx", + aqm[0], aqm[1], aqm[2], aqm[3]); + memcpy(crycb->apcb1.adm, adm, 32); + VM_EVENT(kvm, 3, "SET CRYCB: adm %016lx %016lx %016lx %016lx", + adm[0], adm[1], adm[2], adm[3]); + break; + case CRYCB_FORMAT1: + case CRYCB_FORMAT0: /* Fall through both use APCB0 */ + memcpy(crycb->apcb0.apm, apm, 8); + memcpy(crycb->apcb0.aqm, aqm, 2); + memcpy(crycb->apcb0.adm, adm, 2); + VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx aqm %04x adm %04x", + apm[0], *((unsigned short *)aqm), + *((unsigned short *)adm)); + break; + default: /* Can not happen */ + break; + } + + /* recreate the shadow crycb for each vcpu */ + kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART); + kvm_s390_vcpu_unblock_all(kvm); + mutex_unlock(&kvm->lock); +} +EXPORT_SYMBOL_GPL(kvm_arch_crypto_set_masks); + +void kvm_arch_crypto_clear_masks(struct kvm *kvm) +{ + mutex_lock(&kvm->lock); + kvm_s390_vcpu_block_all(kvm); + + memset(&kvm->arch.crypto.crycb->apcb0, 0, + sizeof(kvm->arch.crypto.crycb->apcb0)); + memset(&kvm->arch.crypto.crycb->apcb1, 0, + sizeof(kvm->arch.crypto.crycb->apcb1)); + + VM_EVENT(kvm, 3, "%s", "CLR CRYCB:"); + /* recreate the shadow crycb for each vcpu */ + kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART); + kvm_s390_vcpu_unblock_all(kvm); + mutex_unlock(&kvm->lock); +} +EXPORT_SYMBOL_GPL(kvm_arch_crypto_clear_masks); + static u64 kvm_s390_get_initial_cpuid(void) { struct cpuid cpuid; @@ -2052,12 +2133,12 @@ static u64 kvm_s390_get_initial_cpuid(void) static void kvm_s390_crypto_init(struct kvm *kvm) { - if (!test_kvm_facility(kvm, 76)) - return; - kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb; kvm_s390_set_crycb_format(kvm); + if (!test_kvm_facility(kvm, 76)) + return; + /* Enable AES/DEA protected key functions by default */ kvm->arch.crypto.aes_kw = 1; kvm->arch.crypto.dea_kw = 1; @@ -2583,17 +2664,25 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu) { - if (!test_kvm_facility(vcpu->kvm, 76)) + /* + * If the AP instructions are not being interpreted and the MSAX3 + * facility is not configured for the guest, there is nothing to set up. + */ + if (!vcpu->kvm->arch.crypto.apie && !test_kvm_facility(vcpu->kvm, 76)) return; + vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd; vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA); + vcpu->arch.sie_block->eca &= ~ECA_APIE; + + if (vcpu->kvm->arch.crypto.apie) + vcpu->arch.sie_block->eca |= ECA_APIE; + /* Set up protected key support */ if (vcpu->kvm->arch.crypto.aes_kw) vcpu->arch.sie_block->ecb3 |= ECB3_AES; if (vcpu->kvm->arch.crypto.dea_kw) vcpu->arch.sie_block->ecb3 |= ECB3_DEA; - - vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd; } void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu) @@ -2685,6 +2774,8 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup; + vcpu->arch.sie_block->hpid = HPID_KVM; + kvm_s390_vcpu_crypto_setup(vcpu); return rc; @@ -2768,18 +2859,25 @@ static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu) exit_sie(vcpu); } +bool kvm_s390_vcpu_sie_inhibited(struct kvm_vcpu *vcpu) +{ + return atomic_read(&vcpu->arch.sie_block->prog20) & + (PROG_BLOCK_SIE | PROG_REQUEST); +} + static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu) { atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20); } /* - * Kick a guest cpu out of SIE and wait until SIE is not running. + * Kick a guest cpu out of (v)SIE and wait until (v)SIE is not running. * If the CPU is not running (e.g. waiting as idle) the function will * return immediately. */ void exit_sie(struct kvm_vcpu *vcpu) { kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT); + kvm_s390_vsie_kick(vcpu); while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE) cpu_relax(); } @@ -3196,6 +3294,8 @@ retry: /* nothing to do, just clear the request */ kvm_clear_request(KVM_REQ_UNHALT, vcpu); + /* we left the vsie handler, nothing to do, just clear the request */ + kvm_clear_request(KVM_REQ_VSIE_RESTART, vcpu); return 0; } diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index 981e3ba97461..1f6e36cdce0d 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h @@ -290,6 +290,7 @@ void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu); void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu); void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu); void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu); +bool kvm_s390_vcpu_sie_inhibited(struct kvm_vcpu *vcpu); void exit_sie(struct kvm_vcpu *vcpu); void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu); int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu); diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c index a2b28cd1e3fe..a153257bf7d9 100644 --- a/arch/s390/kvm/vsie.c +++ b/arch/s390/kvm/vsie.c @@ -135,14 +135,148 @@ static int prepare_cpuflags(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) atomic_set(&scb_s->cpuflags, newflags); return 0; } +/* Copy to APCB FORMAT1 from APCB FORMAT0 */ +static int setup_apcb10(struct kvm_vcpu *vcpu, struct kvm_s390_apcb1 *apcb_s, + unsigned long apcb_o, struct kvm_s390_apcb1 *apcb_h) +{ + struct kvm_s390_apcb0 tmp; -/* + if (read_guest_real(vcpu, apcb_o, &tmp, sizeof(struct kvm_s390_apcb0))) + return -EFAULT; + + apcb_s->apm[0] = apcb_h->apm[0] & tmp.apm[0]; + apcb_s->aqm[0] = apcb_h->aqm[0] & tmp.aqm[0] & 0xffff000000000000UL; + apcb_s->adm[0] = apcb_h->adm[0] & tmp.adm[0] & 0xffff000000000000UL; + + return 0; + +} + +/** + * setup_apcb00 - Copy to APCB FORMAT0 from APCB FORMAT0 + * @vcpu: pointer to the virtual CPU + * @apcb_s: pointer to start of apcb in the shadow crycb + * @apcb_o: pointer to start of original apcb in the guest2 + * @apcb_h: pointer to start of apcb in the guest1 + * + * Returns 0 and -EFAULT on error reading guest apcb + */ +static int setup_apcb00(struct kvm_vcpu *vcpu, unsigned long *apcb_s, + unsigned long apcb_o, unsigned long *apcb_h) +{ + if (read_guest_real(vcpu, apcb_o, apcb_s, + sizeof(struct kvm_s390_apcb0))) + return -EFAULT; + + bitmap_and(apcb_s, apcb_s, apcb_h, sizeof(struct kvm_s390_apcb0)); + + return 0; +} + +/** + * setup_apcb11 - Copy the FORMAT1 APCB from the guest to the shadow CRYCB + * @vcpu: pointer to the virtual CPU + * @apcb_s: pointer to start of apcb in the shadow crycb + * @apcb_o: pointer to start of original guest apcb + * @apcb_h: pointer to start of apcb in the host + * + * Returns 0 and -EFAULT on error reading guest apcb + */ +static int setup_apcb11(struct kvm_vcpu *vcpu, unsigned long *apcb_s, + unsigned long apcb_o, + unsigned long *apcb_h) +{ + if (read_guest_real(vcpu, apcb_o, apcb_s, + sizeof(struct kvm_s390_apcb1))) + return -EFAULT; + + bitmap_and(apcb_s, apcb_s, apcb_h, sizeof(struct kvm_s390_apcb1)); + + return 0; +} + +/** + * setup_apcb - Create a shadow copy of the apcb. + * @vcpu: pointer to the virtual CPU + * @crycb_s: pointer to shadow crycb + * @crycb_o: pointer to original guest crycb + * @crycb_h: pointer to the host crycb + * @fmt_o: format of the original guest crycb. + * @fmt_h: format of the host crycb. + * + * Checks the compatibility between the guest and host crycb and calls the + * appropriate copy function. + * + * Return 0 or an error number if the guest and host crycb are incompatible. + */ +static int setup_apcb(struct kvm_vcpu *vcpu, struct kvm_s390_crypto_cb *crycb_s, + const u32 crycb_o, + struct kvm_s390_crypto_cb *crycb_h, + int fmt_o, int fmt_h) +{ + struct kvm_s390_crypto_cb *crycb; + + crycb = (struct kvm_s390_crypto_cb *) (unsigned long)crycb_o; + + switch (fmt_o) { + case CRYCB_FORMAT2: + if ((crycb_o & PAGE_MASK) != ((crycb_o + 256) & PAGE_MASK)) + return -EACCES; + if (fmt_h != CRYCB_FORMAT2) + return -EINVAL; + return setup_apcb11(vcpu, (unsigned long *)&crycb_s->apcb1, + (unsigned long) &crycb->apcb1, + (unsigned long *)&crycb_h->apcb1); + case CRYCB_FORMAT1: + switch (fmt_h) { + case CRYCB_FORMAT2: + return setup_apcb10(vcpu, &crycb_s->apcb1, + (unsigned long) &crycb->apcb0, + &crycb_h->apcb1); + case CRYCB_FORMAT1: + return setup_apcb00(vcpu, + (unsigned long *) &crycb_s->apcb0, + (unsigned long) &crycb->apcb0, + (unsigned long *) &crycb_h->apcb0); + } + break; + case CRYCB_FORMAT0: + if ((crycb_o & PAGE_MASK) != ((crycb_o + 32) & PAGE_MASK)) + return -EACCES; + + switch (fmt_h) { + case CRYCB_FORMAT2: + return setup_apcb10(vcpu, &crycb_s->apcb1, + (unsigned long) &crycb->apcb0, + &crycb_h->apcb1); + case CRYCB_FORMAT1: + case CRYCB_FORMAT0: + return setup_apcb00(vcpu, + (unsigned long *) &crycb_s->apcb0, + (unsigned long) &crycb->apcb0, + (unsigned long *) &crycb_h->apcb0); + } + } + return -EINVAL; +} + +/** + * shadow_crycb - Create a shadow copy of the crycb block + * @vcpu: a pointer to the virtual CPU + * @vsie_page: a pointer to internal date used for the vSIE + * * Create a shadow copy of the crycb block and setup key wrapping, if * requested for guest 3 and enabled for guest 2. * - * We only accept format-1 (no AP in g2), but convert it into format-2 + * We accept format-1 or format-2, but we convert format-1 into format-2 + * in the shadow CRYCB. + * Using format-2 enables the firmware to choose the right format when + * scheduling the SIE. * There is nothing to do for format-0. * + * This function centralize the issuing of set_validity_icpt() for all + * the subfunctions working on the crycb. + * * Returns: - 0 if shadowed or nothing to do * - > 0 if control has to be given to guest 2 */ @@ -154,23 +288,40 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) const u32 crycb_addr = crycbd_o & 0x7ffffff8U; unsigned long *b1, *b2; u8 ecb3_flags; + int apie_h; + int key_msk = test_kvm_facility(vcpu->kvm, 76); + int fmt_o = crycbd_o & CRYCB_FORMAT_MASK; + int fmt_h = vcpu->arch.sie_block->crycbd & CRYCB_FORMAT_MASK; + int ret = 0; scb_s->crycbd = 0; - if (!(crycbd_o & vcpu->arch.sie_block->crycbd & CRYCB_FORMAT1)) - return 0; - /* format-1 is supported with message-security-assist extension 3 */ - if (!test_kvm_facility(vcpu->kvm, 76)) + + apie_h = vcpu->arch.sie_block->eca & ECA_APIE; + if (!apie_h && !key_msk) return 0; + + if (!crycb_addr) + return set_validity_icpt(scb_s, 0x0039U); + + if (fmt_o == CRYCB_FORMAT1) + if ((crycb_addr & PAGE_MASK) != + ((crycb_addr + 128) & PAGE_MASK)) + return set_validity_icpt(scb_s, 0x003CU); + + if (apie_h && (scb_o->eca & ECA_APIE)) { + ret = setup_apcb(vcpu, &vsie_page->crycb, crycb_addr, + vcpu->kvm->arch.crypto.crycb, + fmt_o, fmt_h); + if (ret) + goto end; + scb_s->eca |= scb_o->eca & ECA_APIE; + } + /* we may only allow it if enabled for guest 2 */ ecb3_flags = scb_o->ecb3 & vcpu->arch.sie_block->ecb3 & (ECB3_AES | ECB3_DEA); if (!ecb3_flags) - return 0; - - if ((crycb_addr & PAGE_MASK) != ((crycb_addr + 128) & PAGE_MASK)) - return set_validity_icpt(scb_s, 0x003CU); - else if (!crycb_addr) - return set_validity_icpt(scb_s, 0x0039U); + goto end; /* copy only the wrapping keys */ if (read_guest_real(vcpu, crycb_addr + 72, @@ -178,8 +329,6 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) return set_validity_icpt(scb_s, 0x0035U); scb_s->ecb3 |= ecb3_flags; - scb_s->crycbd = ((__u32)(__u64) &vsie_page->crycb) | CRYCB_FORMAT1 | - CRYCB_FORMAT2; /* xor both blocks in one run */ b1 = (unsigned long *) vsie_page->crycb.dea_wrapping_key_mask; @@ -187,6 +336,16 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) vcpu->kvm->arch.crypto.crycb->dea_wrapping_key_mask; /* as 56%8 == 0, bitmap_xor won't overwrite any data */ bitmap_xor(b1, b1, b2, BITS_PER_BYTE * 56); +end: + switch (ret) { + case -EINVAL: + return set_validity_icpt(scb_s, 0x0020U); + case -EFAULT: + return set_validity_icpt(scb_s, 0x0035U); + case -EACCES: + return set_validity_icpt(scb_s, 0x003CU); + } + scb_s->crycbd = ((__u32)(__u64) &vsie_page->crycb) | CRYCB_FORMAT2; return 0; } @@ -383,6 +542,8 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) if (test_kvm_facility(vcpu->kvm, 156)) scb_s->ecd |= scb_o->ecd & ECD_ETOKENF; + scb_s->hpid = HPID_VSIE; + prepare_ibc(vcpu, vsie_page); rc = shadow_crycb(vcpu, vsie_page); out: @@ -830,7 +991,7 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; struct kvm_s390_sie_block *scb_o = vsie_page->scb_o; int guest_bp_isolation; - int rc; + int rc = 0; handle_last_fault(vcpu, vsie_page); @@ -858,7 +1019,18 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) guest_enter_irqoff(); local_irq_enable(); - rc = sie64a(scb_s, vcpu->run->s.regs.gprs); + /* + * Simulate a SIE entry of the VCPU (see sie64a), so VCPU blocking + * and VCPU requests also hinder the vSIE from running and lead + * to an immediate exit. kvm_s390_vsie_kick() has to be used to + * also kick the vSIE. + */ + vcpu->arch.sie_block->prog0c |= PROG_IN_SIE; + barrier(); + if (!kvm_s390_vcpu_sie_inhibited(vcpu)) + rc = sie64a(scb_s, vcpu->run->s.regs.gprs); + barrier(); + vcpu->arch.sie_block->prog0c &= ~PROG_IN_SIE; local_irq_disable(); guest_exit_irqoff(); @@ -1005,7 +1177,8 @@ static int vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) if (rc == -EAGAIN) rc = 0; if (rc || scb_s->icptcode || signal_pending(current) || - kvm_s390_vcpu_has_irq(vcpu, 0)) + kvm_s390_vcpu_has_irq(vcpu, 0) || + kvm_s390_vcpu_sie_inhibited(vcpu)) break; } @@ -1122,7 +1295,8 @@ int kvm_s390_handle_vsie(struct kvm_vcpu *vcpu) if (unlikely(scb_addr & 0x1ffUL)) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); - if (signal_pending(current) || kvm_s390_vcpu_has_irq(vcpu, 0)) + if (signal_pending(current) || kvm_s390_vcpu_has_irq(vcpu, 0) || + kvm_s390_vcpu_sie_inhibited(vcpu)) return 0; vsie_page = get_vsie_page(vcpu->kvm, scb_addr); diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c index 911c7ded35f1..1e668b95e0c6 100644 --- a/arch/s390/mm/gmap.c +++ b/arch/s390/mm/gmap.c @@ -907,10 +907,16 @@ static inline pmd_t *gmap_pmd_op_walk(struct gmap *gmap, unsigned long gaddr) pmd_t *pmdp; BUG_ON(gmap_is_shadow(gmap)); - spin_lock(&gmap->guest_table_lock); pmdp = (pmd_t *) gmap_table_walk(gmap, gaddr, 1); + if (!pmdp) + return NULL; - if (!pmdp || pmd_none(*pmdp)) { + /* without huge pages, there is no need to take the table lock */ + if (!gmap->mm->context.allow_gmap_hpage_1m) + return pmd_none(*pmdp) ? NULL : pmdp; + + spin_lock(&gmap->guest_table_lock); + if (pmd_none(*pmdp)) { spin_unlock(&gmap->guest_table_lock); return NULL; } diff --git a/arch/s390/tools/gen_facilities.c b/arch/s390/tools/gen_facilities.c index 0c85aedcf9b3..fd788e0f2e5b 100644 --- a/arch/s390/tools/gen_facilities.c +++ b/arch/s390/tools/gen_facilities.c @@ -106,6 +106,8 @@ static struct facility_def facility_defs[] = { .name = "FACILITIES_KVM_CPUMODEL", .bits = (int[]){ + 12, /* AP Query Configuration Information */ + 15, /* AP Facilities Test */ 156, /* etoken facility */ -1 /* END */ } diff --git a/arch/sh/boards/of-generic.c b/arch/sh/boards/of-generic.c index 26789ad28193..cde370cad4ae 100644 --- a/arch/sh/boards/of-generic.c +++ b/arch/sh/boards/of-generic.c @@ -64,7 +64,7 @@ static void sh_of_smp_probe(void) init_cpu_possible(cpumask_of(0)); - for_each_node_by_type(np, "cpu") { + for_each_of_cpu_node(np) { const __be32 *cell = of_get_property(np, "reg", NULL); u64 id = -1; if (cell) id = of_read_number(cell, of_n_addr_cells(np)); diff --git a/arch/sh/include/asm/hugetlb.h b/arch/sh/include/asm/hugetlb.h index 735939c0f513..6f025fe18146 100644 --- a/arch/sh/include/asm/hugetlb.h +++ b/arch/sh/include/asm/hugetlb.h @@ -4,8 +4,6 @@ #include <asm/cacheflush.h> #include <asm/page.h> -#include <asm-generic/hugetlb.h> - static inline int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr, @@ -17,6 +15,7 @@ static inline int is_hugepage_only_range(struct mm_struct *mm, * If the arch doesn't supply something else, assume that hugepage * size aligned regions are ok without further preparation. */ +#define __HAVE_ARCH_PREPARE_HUGEPAGE_RANGE static inline int prepare_hugepage_range(struct file *file, unsigned long addr, unsigned long len) { @@ -27,62 +26,17 @@ static inline int prepare_hugepage_range(struct file *file, return 0; } -static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb, - unsigned long addr, unsigned long end, - unsigned long floor, - unsigned long ceiling) -{ - free_pgd_range(tlb, addr, end, floor, ceiling); -} - -static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, - pte_t *ptep, pte_t pte) -{ - set_pte_at(mm, addr, ptep, pte); -} - -static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, - unsigned long addr, pte_t *ptep) -{ - return ptep_get_and_clear(mm, addr, ptep); -} - +#define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { } -static inline int huge_pte_none(pte_t pte) -{ - return pte_none(pte); -} - -static inline pte_t huge_pte_wrprotect(pte_t pte) -{ - return pte_wrprotect(pte); -} - -static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, - unsigned long addr, pte_t *ptep) -{ - ptep_set_wrprotect(mm, addr, ptep); -} - -static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, - unsigned long addr, pte_t *ptep, - pte_t pte, int dirty) -{ - return ptep_set_access_flags(vma, addr, ptep, pte, dirty); -} - -static inline pte_t huge_ptep_get(pte_t *ptep) -{ - return *ptep; -} - static inline void arch_clear_hugepage_flags(struct page *page) { clear_bit(PG_dcache_clean, &page->flags); } +#include <asm-generic/hugetlb.h> + #endif /* _ASM_SH_HUGETLB_H */ diff --git a/arch/sh/include/asm/unistd.h b/arch/sh/include/asm/unistd.h index b36200af9ce7..a99234b61051 100644 --- a/arch/sh/include/asm/unistd.h +++ b/arch/sh/include/asm/unistd.h @@ -5,6 +5,7 @@ # include <asm/unistd_64.h> # endif +# define __ARCH_WANT_NEW_STAT # define __ARCH_WANT_OLD_READDIR # define __ARCH_WANT_OLD_STAT # define __ARCH_WANT_STAT64 @@ -19,7 +20,6 @@ # define __ARCH_WANT_SYS_SOCKETCALL # define __ARCH_WANT_SYS_FADVISE64 # define __ARCH_WANT_SYS_GETPGRP -# define __ARCH_WANT_SYS_LLSEEK # define __ARCH_WANT_SYS_NICE # define __ARCH_WANT_SYS_OLD_GETRLIMIT # define __ARCH_WANT_SYS_OLD_UNAME diff --git a/arch/sparc/include/asm/cmpxchg_64.h b/arch/sparc/include/asm/cmpxchg_64.h index f71ef3729888..316faa0130ba 100644 --- a/arch/sparc/include/asm/cmpxchg_64.h +++ b/arch/sparc/include/asm/cmpxchg_64.h @@ -52,7 +52,12 @@ static inline unsigned long xchg64(__volatile__ unsigned long *m, unsigned long return val; } -#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) +#define xchg(ptr,x) \ +({ __typeof__(*(ptr)) __ret; \ + __ret = (__typeof__(*(ptr))) \ + __xchg((unsigned long)(x), (ptr), sizeof(*(ptr))); \ + __ret; \ +}) void __xchg_called_with_bad_pointer(void); diff --git a/arch/sparc/include/asm/compat.h b/arch/sparc/include/asm/compat.h index 4eb51d2dae98..30b1763580b1 100644 --- a/arch/sparc/include/asm/compat.h +++ b/arch/sparc/include/asm/compat.h @@ -6,38 +6,23 @@ */ #include <linux/types.h> +#include <asm-generic/compat.h> + #define COMPAT_USER_HZ 100 #define COMPAT_UTS_MACHINE "sparc\0\0" -typedef u32 compat_size_t; -typedef s32 compat_ssize_t; -typedef s32 compat_clock_t; -typedef s32 compat_pid_t; typedef u16 __compat_uid_t; typedef u16 __compat_gid_t; typedef u32 __compat_uid32_t; typedef u32 __compat_gid32_t; typedef u16 compat_mode_t; -typedef u32 compat_ino_t; typedef u16 compat_dev_t; -typedef s32 compat_off_t; -typedef s64 compat_loff_t; typedef s16 compat_nlink_t; typedef u16 compat_ipc_pid_t; -typedef s32 compat_daddr_t; typedef u32 compat_caddr_t; typedef __kernel_fsid_t compat_fsid_t; -typedef s32 compat_key_t; -typedef s32 compat_timer_t; - -typedef s32 compat_int_t; -typedef s32 compat_long_t; typedef s64 compat_s64; -typedef u32 compat_uint_t; -typedef u32 compat_ulong_t; typedef u64 compat_u64; -typedef u32 compat_uptr_t; - struct compat_stat { compat_dev_t st_dev; compat_ino_t st_ino; @@ -47,11 +32,11 @@ struct compat_stat { __compat_gid_t st_gid; compat_dev_t st_rdev; compat_off_t st_size; - compat_time_t st_atime; + old_time32_t st_atime; compat_ulong_t st_atime_nsec; - compat_time_t st_mtime; + old_time32_t st_mtime; compat_ulong_t st_mtime_nsec; - compat_time_t st_ctime; + old_time32_t st_ctime; compat_ulong_t st_ctime_nsec; compat_off_t st_blksize; compat_off_t st_blocks; diff --git a/arch/sparc/include/asm/hugetlb.h b/arch/sparc/include/asm/hugetlb.h index 300557c66698..3963f80d1cb3 100644 --- a/arch/sparc/include/asm/hugetlb.h +++ b/arch/sparc/include/asm/hugetlb.h @@ -3,7 +3,6 @@ #define _ASM_SPARC64_HUGETLB_H #include <asm/page.h> -#include <asm-generic/hugetlb.h> #ifdef CONFIG_HUGETLB_PAGE struct pud_huge_patch_entry { @@ -13,9 +12,11 @@ struct pud_huge_patch_entry { extern struct pud_huge_patch_entry __pud_huge_patch, __pud_huge_patch_end; #endif +#define __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte); +#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep); @@ -25,37 +26,13 @@ static inline int is_hugepage_only_range(struct mm_struct *mm, return 0; } -/* - * If the arch doesn't supply something else, assume that hugepage - * size aligned regions are ok without further preparation. - */ -static inline int prepare_hugepage_range(struct file *file, - unsigned long addr, unsigned long len) -{ - struct hstate *h = hstate_file(file); - - if (len & ~huge_page_mask(h)) - return -EINVAL; - if (addr & ~huge_page_mask(h)) - return -EINVAL; - return 0; -} - +#define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { } -static inline int huge_pte_none(pte_t pte) -{ - return pte_none(pte); -} - -static inline pte_t huge_pte_wrprotect(pte_t pte) -{ - return pte_wrprotect(pte); -} - +#define __HAVE_ARCH_HUGE_PTEP_SET_WRPROTECT static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { @@ -63,6 +40,7 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte)); } +#define __HAVE_ARCH_HUGE_PTEP_SET_ACCESS_FLAGS static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, pte_t pte, int dirty) @@ -75,17 +53,15 @@ static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, return changed; } -static inline pte_t huge_ptep_get(pte_t *ptep) -{ - return *ptep; -} - static inline void arch_clear_hugepage_flags(struct page *page) { } +#define __HAVE_ARCH_HUGETLB_FREE_PGD_RANGE void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr, unsigned long end, unsigned long floor, unsigned long ceiling); +#include <asm-generic/hugetlb.h> + #endif /* _ASM_SPARC64_HUGETLB_H */ diff --git a/arch/sparc/include/asm/parport.h b/arch/sparc/include/asm/parport.h index 05df5f043053..3c5a1c620f0f 100644 --- a/arch/sparc/include/asm/parport.h +++ b/arch/sparc/include/asm/parport.h @@ -21,6 +21,7 @@ */ #define HAS_DMA +#ifdef CONFIG_PARPORT_PC_FIFO static DEFINE_SPINLOCK(dma_spin_lock); #define claim_dma_lock() \ @@ -31,6 +32,7 @@ static DEFINE_SPINLOCK(dma_spin_lock); #define release_dma_lock(__flags) \ spin_unlock_irqrestore(&dma_spin_lock, __flags); +#endif static struct sparc_ebus_info { struct ebus_dma_info info; diff --git a/arch/sparc/include/asm/prom.h b/arch/sparc/include/asm/prom.h index d955c8df62d6..1902db27ff4b 100644 --- a/arch/sparc/include/asm/prom.h +++ b/arch/sparc/include/asm/prom.h @@ -24,9 +24,6 @@ #include <linux/atomic.h> #include <linux/irqdomain.h> -#define OF_ROOT_NODE_ADDR_CELLS_DEFAULT 2 -#define OF_ROOT_NODE_SIZE_CELLS_DEFAULT 1 - #define of_compat_cmp(s1, s2, l) strncmp((s1), (s2), (l)) #define of_prop_cmp(s1, s2) strcasecmp((s1), (s2)) #define of_node_cmp(s1, s2) strcmp((s1), (s2)) diff --git a/arch/sparc/include/asm/switch_to_64.h b/arch/sparc/include/asm/switch_to_64.h index 4ff29b1406a9..b1d4e2e3210f 100644 --- a/arch/sparc/include/asm/switch_to_64.h +++ b/arch/sparc/include/asm/switch_to_64.h @@ -67,6 +67,7 @@ do { save_and_clear_fpu(); \ } while(0) void synchronize_user_stack(void); -void fault_in_user_windows(void); +struct pt_regs; +void fault_in_user_windows(struct pt_regs *); #endif /* __SPARC64_SWITCH_TO_64_H */ diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h index 7fb676360928..20255471e653 100644 --- a/arch/sparc/include/asm/thread_info_64.h +++ b/arch/sparc/include/asm/thread_info_64.h @@ -121,8 +121,12 @@ struct thread_info { } /* how to get the thread information struct from C */ +#ifndef BUILD_VDSO register struct thread_info *current_thread_info_reg asm("g6"); #define current_thread_info() (current_thread_info_reg) +#else +extern struct thread_info *current_thread_info(void); +#endif /* thread information allocation */ #if PAGE_SHIFT == 13 diff --git a/arch/sparc/include/asm/unistd.h b/arch/sparc/include/asm/unistd.h index b2a6a955113e..00f87dbd0b17 100644 --- a/arch/sparc/include/asm/unistd.h +++ b/arch/sparc/include/asm/unistd.h @@ -21,6 +21,7 @@ #else #define __NR_time 231 /* Linux sparc32 */ #endif +#define __ARCH_WANT_NEW_STAT #define __ARCH_WANT_OLD_READDIR #define __ARCH_WANT_STAT64 #define __ARCH_WANT_SYS_ALARM @@ -33,7 +34,6 @@ #define __ARCH_WANT_SYS_SOCKETCALL #define __ARCH_WANT_SYS_FADVISE64 #define __ARCH_WANT_SYS_GETPGRP -#define __ARCH_WANT_SYS_LLSEEK #define __ARCH_WANT_SYS_NICE #define __ARCH_WANT_SYS_OLDUMOUNT #define __ARCH_WANT_SYS_SIGPENDING @@ -42,6 +42,7 @@ #define __ARCH_WANT_SYS_IPC #else #define __ARCH_WANT_COMPAT_SYS_TIME +#define __ARCH_WANT_SYS_UTIME32 #define __ARCH_WANT_COMPAT_SYS_SENDFILE #endif diff --git a/arch/sparc/include/asm/vdso.h b/arch/sparc/include/asm/vdso.h index 93b628731a5e..59e79d35cd73 100644 --- a/arch/sparc/include/asm/vdso.h +++ b/arch/sparc/include/asm/vdso.h @@ -8,10 +8,8 @@ struct vdso_image { void *data; unsigned long size; /* Always a multiple of PAGE_SIZE */ + long sym_vvar_start; /* Negative offset to the vvar area */ - long sym_vread_tick; /* Start of vread_tick section */ - long sym_vread_tick_patch_start; /* Start of tick read */ - long sym_vread_tick_patch_end; /* End of tick read */ }; #ifdef CONFIG_SPARC64 diff --git a/arch/sparc/include/uapi/asm/siginfo.h b/arch/sparc/include/uapi/asm/siginfo.h index e7049550ac82..68bdde4c2a2e 100644 --- a/arch/sparc/include/uapi/asm/siginfo.h +++ b/arch/sparc/include/uapi/asm/siginfo.h @@ -4,7 +4,6 @@ #if defined(__sparc__) && defined(__arch64__) -#define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int)) #define __ARCH_SI_BAND_T int #endif /* defined(__sparc__) && defined(__arch64__) */ @@ -17,10 +16,4 @@ #define SI_NOINFO 32767 /* no information in siginfo_t */ -/* - * SIGEMT si_codes - */ -#define EMT_TAGOVF 1 /* tag overflow */ -#define NSIGEMT 1 - #endif /* _UAPI__SPARC_SIGINFO_H */ diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c index 6c086086ca8f..59eaf6227af1 100644 --- a/arch/sparc/kernel/process_64.c +++ b/arch/sparc/kernel/process_64.c @@ -36,6 +36,7 @@ #include <linux/sysrq.h> #include <linux/nmi.h> #include <linux/context_tracking.h> +#include <linux/signal.h> #include <linux/uaccess.h> #include <asm/page.h> @@ -521,7 +522,12 @@ static void stack_unaligned(unsigned long sp) force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *) sp, 0, current); } -void fault_in_user_windows(void) +static const char uwfault32[] = KERN_INFO \ + "%s[%d]: bad register window fault: SP %08lx (orig_sp %08lx) TPC %08lx O7 %08lx\n"; +static const char uwfault64[] = KERN_INFO \ + "%s[%d]: bad register window fault: SP %016lx (orig_sp %016lx) TPC %08lx O7 %016lx\n"; + +void fault_in_user_windows(struct pt_regs *regs) { struct thread_info *t = current_thread_info(); unsigned long window; @@ -534,9 +540,9 @@ void fault_in_user_windows(void) do { struct reg_window *rwin = &t->reg_window[window]; int winsize = sizeof(struct reg_window); - unsigned long sp; + unsigned long sp, orig_sp; - sp = t->rwbuf_stkptrs[window]; + orig_sp = sp = t->rwbuf_stkptrs[window]; if (test_thread_64bit_stack(sp)) sp += STACK_BIAS; @@ -547,8 +553,16 @@ void fault_in_user_windows(void) stack_unaligned(sp); if (unlikely(copy_to_user((char __user *)sp, - rwin, winsize))) + rwin, winsize))) { + if (show_unhandled_signals) + printk_ratelimited(is_compat_task() ? + uwfault32 : uwfault64, + current->comm, current->pid, + sp, orig_sp, + regs->tpc, + regs->u_regs[UREG_I7]); goto barf; + } } while (window--); } set_thread_wsaved(0); @@ -556,8 +570,7 @@ void fault_in_user_windows(void) barf: set_thread_wsaved(window + 1); - user_exit(); - do_exit(SIGILL); + force_sig(SIGSEGV, current); } asmlinkage long sparc_do_fork(unsigned long clone_flags, diff --git a/arch/sparc/kernel/rtrap_64.S b/arch/sparc/kernel/rtrap_64.S index 4073e2b87dd0..29aa34f11720 100644 --- a/arch/sparc/kernel/rtrap_64.S +++ b/arch/sparc/kernel/rtrap_64.S @@ -39,6 +39,7 @@ __handle_preemption: wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate __handle_user_windows: + add %sp, PTREGS_OFF, %o0 call fault_in_user_windows 661: wrpr %g0, RTRAP_PSTATE, %pstate /* If userspace is using ADI, it could potentially pass diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c index 44d379db3f64..4c5b3fcbed94 100644 --- a/arch/sparc/kernel/signal32.c +++ b/arch/sparc/kernel/signal32.c @@ -371,7 +371,11 @@ static int setup_frame32(struct ksignal *ksig, struct pt_regs *regs, get_sigframe(ksig, regs, sigframe_size); if (invalid_frame_pointer(sf, sigframe_size)) { - do_exit(SIGILL); + if (show_unhandled_signals) + pr_info("%s[%d] bad frame in setup_frame32: %08lx TPC %08lx O7 %08lx\n", + current->comm, current->pid, (unsigned long)sf, + regs->tpc, regs->u_regs[UREG_I7]); + force_sigsegv(ksig->sig, current); return -EINVAL; } @@ -501,7 +505,11 @@ static int setup_rt_frame32(struct ksignal *ksig, struct pt_regs *regs, get_sigframe(ksig, regs, sigframe_size); if (invalid_frame_pointer(sf, sigframe_size)) { - do_exit(SIGILL); + if (show_unhandled_signals) + pr_info("%s[%d] bad frame in setup_rt_frame32: %08lx TPC %08lx O7 %08lx\n", + current->comm, current->pid, (unsigned long)sf, + regs->tpc, regs->u_regs[UREG_I7]); + force_sigsegv(ksig->sig, current); return -EINVAL; } diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c index 48366e5eb5b2..e9de1803a22e 100644 --- a/arch/sparc/kernel/signal_64.c +++ b/arch/sparc/kernel/signal_64.c @@ -370,7 +370,11 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs) get_sigframe(ksig, regs, sf_size); if (invalid_frame_pointer (sf)) { - do_exit(SIGILL); /* won't return, actually */ + if (show_unhandled_signals) + pr_info("%s[%d] bad frame in setup_rt_frame: %016lx TPC %016lx O7 %016lx\n", + current->comm, current->pid, (unsigned long)sf, + regs->tpc, regs->u_regs[UREG_I7]); + force_sigsegv(ksig->sig, current); return -EINVAL; } diff --git a/arch/sparc/kernel/time_64.c b/arch/sparc/kernel/time_64.c index f0eba72aa1ad..5f356dc8e178 100644 --- a/arch/sparc/kernel/time_64.c +++ b/arch/sparc/kernel/time_64.c @@ -53,8 +53,6 @@ DEFINE_SPINLOCK(rtc_lock); -unsigned int __read_mostly vdso_fix_stick; - #ifdef CONFIG_SMP unsigned long profile_pc(struct pt_regs *regs) { @@ -838,7 +836,6 @@ void __init time_init_early(void) } else { init_tick_ops(&tick_operations); clocksource_tick.archdata.vclock_mode = VCLOCK_TICK; - vdso_fix_stick = 1; } } else { init_tick_ops(&stick_operations); diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index f396048a0d68..39822f611c01 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -1383,6 +1383,7 @@ int __node_distance(int from, int to) } return numa_latency[from][to]; } +EXPORT_SYMBOL(__node_distance); static int __init find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp) { diff --git a/arch/sparc/vdso/Makefile b/arch/sparc/vdso/Makefile index dc85570d8839..a6e18ca4cc18 100644 --- a/arch/sparc/vdso/Makefile +++ b/arch/sparc/vdso/Makefile @@ -33,10 +33,8 @@ targets += $(vdso_img_sodbg) $(vdso_img-y:%=vdso%.so) CPPFLAGS_vdso.lds += -P -C -VDSO_LDFLAGS_vdso.lds = -m64 -Wl,-soname=linux-vdso.so.1 \ - -Wl,--no-undefined \ - -Wl,-z,max-page-size=8192 -Wl,-z,common-page-size=8192 \ - $(DISABLE_LTO) +VDSO_LDFLAGS_vdso.lds = -m elf64_sparc -soname linux-vdso.so.1 --no-undefined \ + -z max-page-size=8192 -z common-page-size=8192 $(obj)/vdso64.so.dbg: $(obj)/vdso.lds $(vobjs) FORCE $(call if_changed,vdso) @@ -54,13 +52,14 @@ $(obj)/vdso-image-%.c: $(obj)/vdso%.so.dbg $(obj)/vdso%.so $(obj)/vdso2c FORCE # Don't omit frame pointers for ease of userspace debugging, but do # optimize sibling calls. # -CFL := $(PROFILING) -mcmodel=medlow -fPIC -O2 -fasynchronous-unwind-tables \ - -m64 -ffixed-g2 -ffixed-g3 -fcall-used-g4 -fcall-used-g5 -ffixed-g6 \ - -ffixed-g7 $(filter -g%,$(KBUILD_CFLAGS)) \ - $(call cc-option, -fno-stack-protector) -fno-omit-frame-pointer \ - -foptimize-sibling-calls -DBUILD_VDSO +CFL := $(PROFILING) -mcmodel=medlow -fPIC -O2 -fasynchronous-unwind-tables -m64 \ + $(filter -g%,$(KBUILD_CFLAGS)) $(call cc-option, -fno-stack-protector) \ + -fno-omit-frame-pointer -foptimize-sibling-calls \ + -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO -$(vobjs): KBUILD_CFLAGS += $(CFL) +SPARC_REG_CFLAGS = -ffixed-g4 -ffixed-g5 -fcall-used-g5 -fcall-used-g7 + +$(vobjs): KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS) $(SPARC_REG_CFLAGS),$(KBUILD_CFLAGS)) $(CFL) # # vDSO code runs in userspace and -pg doesn't help with profiling anyway. @@ -73,7 +72,7 @@ $(obj)/%.so: $(obj)/%.so.dbg $(call if_changed,objcopy) CPPFLAGS_vdso32.lds = $(CPPFLAGS_vdso.lds) -VDSO_LDFLAGS_vdso32.lds = -m32 -Wl,-m,elf32_sparc,-soname=linux-gate.so.1 +VDSO_LDFLAGS_vdso32.lds = -m elf32_sparc -soname linux-gate.so.1 #This makes sure the $(obj) subdirectory exists even though vdso32/ #is not a kbuild sub-make subdirectory @@ -91,7 +90,8 @@ KBUILD_CFLAGS_32 := $(filter-out -m64,$(KBUILD_CFLAGS)) KBUILD_CFLAGS_32 := $(filter-out -mcmodel=medlow,$(KBUILD_CFLAGS_32)) KBUILD_CFLAGS_32 := $(filter-out -fno-pic,$(KBUILD_CFLAGS_32)) KBUILD_CFLAGS_32 := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS_32)) -KBUILD_CFLAGS_32 += -m32 -msoft-float -fpic -mno-app-regs -ffixed-g7 +KBUILD_CFLAGS_32 := $(filter-out $(SPARC_REG_CFLAGS),$(KBUILD_CFLAGS_32)) +KBUILD_CFLAGS_32 += -m32 -msoft-float -fpic KBUILD_CFLAGS_32 += $(call cc-option, -fno-stack-protector) KBUILD_CFLAGS_32 += $(call cc-option, -foptimize-sibling-calls) KBUILD_CFLAGS_32 += -fno-omit-frame-pointer @@ -109,12 +109,13 @@ $(obj)/vdso32.so.dbg: FORCE \ # The DSO images are built using a special linker script. # quiet_cmd_vdso = VDSO $@ - cmd_vdso = $(CC) -nostdlib -o $@ \ + cmd_vdso = $(LD) -nostdlib -o $@ \ $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \ - -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) + -T $(filter %.lds,$^) $(filter %.o,$^) && \ + sh $(srctree)/$(src)/checkundef.sh '$(OBJDUMP)' '$@' -VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \ - $(call cc-ldoption, -Wl$(comma)--build-id) -Wl,-Bsymbolic +VDSO_LDFLAGS = -shared $(call ld-option, --hash-style=both) \ + $(call ld-option, --build-id) -Bsymbolic GCOV_PROFILE := n # diff --git a/arch/sparc/vdso/checkundef.sh b/arch/sparc/vdso/checkundef.sh new file mode 100644 index 000000000000..2d85876ffc32 --- /dev/null +++ b/arch/sparc/vdso/checkundef.sh @@ -0,0 +1,10 @@ +#!/bin/sh +objdump="$1" +file="$2" +$objdump -t "$file" | grep '*UUND*' | grep -v '#scratch' > /dev/null 2>&1 +if [ $? -eq 1 ]; then + exit 0 +else + echo "$file: undefined symbols found" >&2 + exit 1 +fi diff --git a/arch/sparc/vdso/vclock_gettime.c b/arch/sparc/vdso/vclock_gettime.c index 75dca9aab737..55662c3b4513 100644 --- a/arch/sparc/vdso/vclock_gettime.c +++ b/arch/sparc/vdso/vclock_gettime.c @@ -12,11 +12,6 @@ * Copyright (c) 2017 Oracle and/or its affiliates. All rights reserved. */ -/* Disable profiling for userspace code: */ -#ifndef DISABLE_BRANCH_PROFILING -#define DISABLE_BRANCH_PROFILING -#endif - #include <linux/kernel.h> #include <linux/time.h> #include <linux/string.h> @@ -26,13 +21,6 @@ #include <asm/clocksource.h> #include <asm/vvar.h> -#undef TICK_PRIV_BIT -#ifdef CONFIG_SPARC64 -#define TICK_PRIV_BIT (1UL << 63) -#else -#define TICK_PRIV_BIT (1ULL << 63) -#endif - #ifdef CONFIG_SPARC64 #define SYSCALL_STRING \ "ta 0x6d;" \ @@ -60,24 +48,22 @@ * Compute the vvar page's address in the process address space, and return it * as a pointer to the vvar_data. */ -static notrace noinline struct vvar_data * -get_vvar_data(void) +notrace static __always_inline struct vvar_data *get_vvar_data(void) { unsigned long ret; /* - * vdso data page is the first vDSO page so grab the return address + * vdso data page is the first vDSO page so grab the PC * and move up a page to get to the data page. */ - ret = (unsigned long)__builtin_return_address(0); + __asm__("rd %%pc, %0" : "=r" (ret)); ret &= ~(8192 - 1); ret -= 8192; return (struct vvar_data *) ret; } -static notrace long -vdso_fallback_gettime(long clock, struct timespec *ts) +notrace static long vdso_fallback_gettime(long clock, struct timespec *ts) { register long num __asm__("g1") = __NR_clock_gettime; register long o0 __asm__("o0") = clock; @@ -88,8 +74,7 @@ vdso_fallback_gettime(long clock, struct timespec *ts) return o0; } -static notrace __always_inline long -vdso_fallback_gettimeofday(struct timeval *tv, struct timezone *tz) +notrace static long vdso_fallback_gettimeofday(struct timeval *tv, struct timezone *tz) { register long num __asm__("g1") = __NR_gettimeofday; register long o0 __asm__("o0") = (long) tv; @@ -101,38 +86,44 @@ vdso_fallback_gettimeofday(struct timeval *tv, struct timezone *tz) } #ifdef CONFIG_SPARC64 -static notrace noinline u64 -vread_tick(void) { +notrace static __always_inline u64 vread_tick(void) +{ u64 ret; - __asm__ __volatile__("rd %%asr24, %0 \n" - ".section .vread_tick_patch, \"ax\" \n" - "rd %%tick, %0 \n" - ".previous \n" - : "=&r" (ret)); - return ret & ~TICK_PRIV_BIT; + __asm__ __volatile__("rd %%tick, %0" : "=r" (ret)); + return ret; +} + +notrace static __always_inline u64 vread_tick_stick(void) +{ + u64 ret; + + __asm__ __volatile__("rd %%asr24, %0" : "=r" (ret)); + return ret; } #else -static notrace noinline u64 -vread_tick(void) +notrace static __always_inline u64 vread_tick(void) { - unsigned int lo, hi; - - __asm__ __volatile__("rd %%asr24, %%g1\n\t" - "srlx %%g1, 32, %1\n\t" - "srl %%g1, 0, %0\n" - ".section .vread_tick_patch, \"ax\" \n" - "rd %%tick, %%g1\n" - ".previous \n" - : "=&r" (lo), "=&r" (hi) - : - : "g1"); - return lo | ((u64)hi << 32); + register unsigned long long ret asm("o4"); + + __asm__ __volatile__("rd %%tick, %L0\n\t" + "srlx %L0, 32, %H0" + : "=r" (ret)); + return ret; +} + +notrace static __always_inline u64 vread_tick_stick(void) +{ + register unsigned long long ret asm("o4"); + + __asm__ __volatile__("rd %%asr24, %L0\n\t" + "srlx %L0, 32, %H0" + : "=r" (ret)); + return ret; } #endif -static notrace inline u64 -vgetsns(struct vvar_data *vvar) +notrace static __always_inline u64 vgetsns(struct vvar_data *vvar) { u64 v; u64 cycles; @@ -142,13 +133,22 @@ vgetsns(struct vvar_data *vvar) return v * vvar->clock.mult; } -static notrace noinline int -do_realtime(struct vvar_data *vvar, struct timespec *ts) +notrace static __always_inline u64 vgetsns_stick(struct vvar_data *vvar) +{ + u64 v; + u64 cycles; + + cycles = vread_tick_stick(); + v = (cycles - vvar->clock.cycle_last) & vvar->clock.mask; + return v * vvar->clock.mult; +} + +notrace static __always_inline int do_realtime(struct vvar_data *vvar, + struct timespec *ts) { unsigned long seq; u64 ns; - ts->tv_nsec = 0; do { seq = vvar_read_begin(vvar); ts->tv_sec = vvar->wall_time_sec; @@ -157,18 +157,38 @@ do_realtime(struct vvar_data *vvar, struct timespec *ts) ns >>= vvar->clock.shift; } while (unlikely(vvar_read_retry(vvar, seq))); - timespec_add_ns(ts, ns); + ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns); + ts->tv_nsec = ns; + + return 0; +} + +notrace static __always_inline int do_realtime_stick(struct vvar_data *vvar, + struct timespec *ts) +{ + unsigned long seq; + u64 ns; + + do { + seq = vvar_read_begin(vvar); + ts->tv_sec = vvar->wall_time_sec; + ns = vvar->wall_time_snsec; + ns += vgetsns_stick(vvar); + ns >>= vvar->clock.shift; + } while (unlikely(vvar_read_retry(vvar, seq))); + + ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns); + ts->tv_nsec = ns; return 0; } -static notrace noinline int -do_monotonic(struct vvar_data *vvar, struct timespec *ts) +notrace static __always_inline int do_monotonic(struct vvar_data *vvar, + struct timespec *ts) { unsigned long seq; u64 ns; - ts->tv_nsec = 0; do { seq = vvar_read_begin(vvar); ts->tv_sec = vvar->monotonic_time_sec; @@ -177,13 +197,34 @@ do_monotonic(struct vvar_data *vvar, struct timespec *ts) ns >>= vvar->clock.shift; } while (unlikely(vvar_read_retry(vvar, seq))); - timespec_add_ns(ts, ns); + ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns); + ts->tv_nsec = ns; + + return 0; +} + +notrace static __always_inline int do_monotonic_stick(struct vvar_data *vvar, + struct timespec *ts) +{ + unsigned long seq; + u64 ns; + + do { + seq = vvar_read_begin(vvar); + ts->tv_sec = vvar->monotonic_time_sec; + ns = vvar->monotonic_time_snsec; + ns += vgetsns_stick(vvar); + ns >>= vvar->clock.shift; + } while (unlikely(vvar_read_retry(vvar, seq))); + + ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns); + ts->tv_nsec = ns; return 0; } -static notrace noinline int -do_realtime_coarse(struct vvar_data *vvar, struct timespec *ts) +notrace static int do_realtime_coarse(struct vvar_data *vvar, + struct timespec *ts) { unsigned long seq; @@ -195,8 +236,8 @@ do_realtime_coarse(struct vvar_data *vvar, struct timespec *ts) return 0; } -static notrace noinline int -do_monotonic_coarse(struct vvar_data *vvar, struct timespec *ts) +notrace static int do_monotonic_coarse(struct vvar_data *vvar, + struct timespec *ts) { unsigned long seq; @@ -238,6 +279,31 @@ clock_gettime(clockid_t, struct timespec *) __attribute__((weak, alias("__vdso_clock_gettime"))); notrace int +__vdso_clock_gettime_stick(clockid_t clock, struct timespec *ts) +{ + struct vvar_data *vvd = get_vvar_data(); + + switch (clock) { + case CLOCK_REALTIME: + if (unlikely(vvd->vclock_mode == VCLOCK_NONE)) + break; + return do_realtime_stick(vvd, ts); + case CLOCK_MONOTONIC: + if (unlikely(vvd->vclock_mode == VCLOCK_NONE)) + break; + return do_monotonic_stick(vvd, ts); + case CLOCK_REALTIME_COARSE: + return do_realtime_coarse(vvd, ts); + case CLOCK_MONOTONIC_COARSE: + return do_monotonic_coarse(vvd, ts); + } + /* + * Unknown clock ID ? Fall back to the syscall. + */ + return vdso_fallback_gettime(clock, ts); +} + +notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz) { struct vvar_data *vvd = get_vvar_data(); @@ -272,3 +338,36 @@ __vdso_gettimeofday(struct timeval *tv, struct timezone *tz) int gettimeofday(struct timeval *, struct timezone *) __attribute__((weak, alias("__vdso_gettimeofday"))); + +notrace int +__vdso_gettimeofday_stick(struct timeval *tv, struct timezone *tz) +{ + struct vvar_data *vvd = get_vvar_data(); + + if (likely(vvd->vclock_mode != VCLOCK_NONE)) { + if (likely(tv != NULL)) { + union tstv_t { + struct timespec ts; + struct timeval tv; + } *tstv = (union tstv_t *) tv; + do_realtime_stick(vvd, &tstv->ts); + /* + * Assign before dividing to ensure that the division is + * done in the type of tv_usec, not tv_nsec. + * + * There cannot be > 1 billion usec in a second: + * do_realtime() has already distributed such overflow + * into tv_sec. So we can assign it to an int safely. + */ + tstv->tv.tv_usec = tstv->ts.tv_nsec; + tstv->tv.tv_usec /= 1000; + } + if (unlikely(tz != NULL)) { + /* Avoid memcpy. Some old compilers fail to inline it */ + tz->tz_minuteswest = vvd->tz_minuteswest; + tz->tz_dsttime = vvd->tz_dsttime; + } + return 0; + } + return vdso_fallback_gettimeofday(tv, tz); +} diff --git a/arch/sparc/vdso/vdso-layout.lds.S b/arch/sparc/vdso/vdso-layout.lds.S index f2c83abaca12..d31e57e8a3bb 100644 --- a/arch/sparc/vdso/vdso-layout.lds.S +++ b/arch/sparc/vdso/vdso-layout.lds.S @@ -73,12 +73,6 @@ SECTIONS .text : { *(.text*) } :text =0x90909090, - .vread_tick_patch : { - vread_tick_patch_start = .; - *(.vread_tick_patch) - vread_tick_patch_end = .; - } - /DISCARD/ : { *(.discard) *(.discard.*) diff --git a/arch/sparc/vdso/vdso.lds.S b/arch/sparc/vdso/vdso.lds.S index f3caa29a331c..629ab6900df7 100644 --- a/arch/sparc/vdso/vdso.lds.S +++ b/arch/sparc/vdso/vdso.lds.S @@ -18,8 +18,10 @@ VERSION { global: clock_gettime; __vdso_clock_gettime; + __vdso_clock_gettime_stick; gettimeofday; __vdso_gettimeofday; + __vdso_gettimeofday_stick; local: *; }; } diff --git a/arch/sparc/vdso/vdso2c.c b/arch/sparc/vdso/vdso2c.c index 9f5b1cd6d51d..ab7504176a7f 100644 --- a/arch/sparc/vdso/vdso2c.c +++ b/arch/sparc/vdso/vdso2c.c @@ -63,9 +63,6 @@ enum { sym_vvar_start, sym_VDSO_FAKE_SECTION_TABLE_START, sym_VDSO_FAKE_SECTION_TABLE_END, - sym_vread_tick, - sym_vread_tick_patch_start, - sym_vread_tick_patch_end }; struct vdso_sym { @@ -81,9 +78,6 @@ struct vdso_sym required_syms[] = { [sym_VDSO_FAKE_SECTION_TABLE_END] = { "VDSO_FAKE_SECTION_TABLE_END", 0 }, - [sym_vread_tick] = {"vread_tick", 1}, - [sym_vread_tick_patch_start] = {"vread_tick_patch_start", 1}, - [sym_vread_tick_patch_end] = {"vread_tick_patch_end", 1} }; __attribute__((format(printf, 1, 2))) __attribute__((noreturn)) diff --git a/arch/sparc/vdso/vdso2c.h b/arch/sparc/vdso/vdso2c.h index 808decb0f7be..60d69acc748f 100644 --- a/arch/sparc/vdso/vdso2c.h +++ b/arch/sparc/vdso/vdso2c.h @@ -17,7 +17,6 @@ static void BITSFUNC(go)(void *raw_addr, size_t raw_len, unsigned long mapping_size; int i; unsigned long j; - ELF(Shdr) *symtab_hdr = NULL, *strtab_hdr; ELF(Ehdr) *hdr = (ELF(Ehdr) *)raw_addr; ELF(Dyn) *dyn = 0, *dyn_end = 0; diff --git a/arch/sparc/vdso/vdso32/vdso32.lds.S b/arch/sparc/vdso/vdso32/vdso32.lds.S index 53575ee154c4..218930fdff03 100644 --- a/arch/sparc/vdso/vdso32/vdso32.lds.S +++ b/arch/sparc/vdso/vdso32/vdso32.lds.S @@ -17,8 +17,10 @@ VERSION { global: clock_gettime; __vdso_clock_gettime; + __vdso_clock_gettime_stick; gettimeofday; __vdso_gettimeofday; + __vdso_gettimeofday_stick; local: *; }; } diff --git a/arch/sparc/vdso/vma.c b/arch/sparc/vdso/vma.c index 5eaff3c1aa0c..154fe8adc090 100644 --- a/arch/sparc/vdso/vma.c +++ b/arch/sparc/vdso/vma.c @@ -16,6 +16,8 @@ #include <linux/linkage.h> #include <linux/random.h> #include <linux/elf.h> +#include <asm/cacheflush.h> +#include <asm/spitfire.h> #include <asm/vdso.h> #include <asm/vvar.h> #include <asm/page.h> @@ -40,20 +42,221 @@ static struct vm_special_mapping vdso_mapping32 = { struct vvar_data *vvar_data; -#define SAVE_INSTR_SIZE 4 +struct vdso_elfinfo32 { + Elf32_Ehdr *hdr; + Elf32_Sym *dynsym; + unsigned long dynsymsize; + const char *dynstr; + unsigned long text; +}; + +struct vdso_elfinfo64 { + Elf64_Ehdr *hdr; + Elf64_Sym *dynsym; + unsigned long dynsymsize; + const char *dynstr; + unsigned long text; +}; + +struct vdso_elfinfo { + union { + struct vdso_elfinfo32 elf32; + struct vdso_elfinfo64 elf64; + } u; +}; + +static void *one_section64(struct vdso_elfinfo64 *e, const char *name, + unsigned long *size) +{ + const char *snames; + Elf64_Shdr *shdrs; + unsigned int i; + + shdrs = (void *)e->hdr + e->hdr->e_shoff; + snames = (void *)e->hdr + shdrs[e->hdr->e_shstrndx].sh_offset; + for (i = 1; i < e->hdr->e_shnum; i++) { + if (!strcmp(snames+shdrs[i].sh_name, name)) { + if (size) + *size = shdrs[i].sh_size; + return (void *)e->hdr + shdrs[i].sh_offset; + } + } + return NULL; +} + +static int find_sections64(const struct vdso_image *image, struct vdso_elfinfo *_e) +{ + struct vdso_elfinfo64 *e = &_e->u.elf64; + + e->hdr = image->data; + e->dynsym = one_section64(e, ".dynsym", &e->dynsymsize); + e->dynstr = one_section64(e, ".dynstr", NULL); + + if (!e->dynsym || !e->dynstr) { + pr_err("VDSO64: Missing symbol sections.\n"); + return -ENODEV; + } + return 0; +} + +static Elf64_Sym *find_sym64(const struct vdso_elfinfo64 *e, const char *name) +{ + unsigned int i; + + for (i = 0; i < (e->dynsymsize / sizeof(Elf64_Sym)); i++) { + Elf64_Sym *s = &e->dynsym[i]; + if (s->st_name == 0) + continue; + if (!strcmp(e->dynstr + s->st_name, name)) + return s; + } + return NULL; +} + +static int patchsym64(struct vdso_elfinfo *_e, const char *orig, + const char *new) +{ + struct vdso_elfinfo64 *e = &_e->u.elf64; + Elf64_Sym *osym = find_sym64(e, orig); + Elf64_Sym *nsym = find_sym64(e, new); + + if (!nsym || !osym) { + pr_err("VDSO64: Missing symbols.\n"); + return -ENODEV; + } + osym->st_value = nsym->st_value; + osym->st_size = nsym->st_size; + osym->st_info = nsym->st_info; + osym->st_other = nsym->st_other; + osym->st_shndx = nsym->st_shndx; + + return 0; +} + +static void *one_section32(struct vdso_elfinfo32 *e, const char *name, + unsigned long *size) +{ + const char *snames; + Elf32_Shdr *shdrs; + unsigned int i; + + shdrs = (void *)e->hdr + e->hdr->e_shoff; + snames = (void *)e->hdr + shdrs[e->hdr->e_shstrndx].sh_offset; + for (i = 1; i < e->hdr->e_shnum; i++) { + if (!strcmp(snames+shdrs[i].sh_name, name)) { + if (size) + *size = shdrs[i].sh_size; + return (void *)e->hdr + shdrs[i].sh_offset; + } + } + return NULL; +} + +static int find_sections32(const struct vdso_image *image, struct vdso_elfinfo *_e) +{ + struct vdso_elfinfo32 *e = &_e->u.elf32; + + e->hdr = image->data; + e->dynsym = one_section32(e, ".dynsym", &e->dynsymsize); + e->dynstr = one_section32(e, ".dynstr", NULL); + + if (!e->dynsym || !e->dynstr) { + pr_err("VDSO32: Missing symbol sections.\n"); + return -ENODEV; + } + return 0; +} + +static Elf32_Sym *find_sym32(const struct vdso_elfinfo32 *e, const char *name) +{ + unsigned int i; + + for (i = 0; i < (e->dynsymsize / sizeof(Elf32_Sym)); i++) { + Elf32_Sym *s = &e->dynsym[i]; + if (s->st_name == 0) + continue; + if (!strcmp(e->dynstr + s->st_name, name)) + return s; + } + return NULL; +} + +static int patchsym32(struct vdso_elfinfo *_e, const char *orig, + const char *new) +{ + struct vdso_elfinfo32 *e = &_e->u.elf32; + Elf32_Sym *osym = find_sym32(e, orig); + Elf32_Sym *nsym = find_sym32(e, new); + + if (!nsym || !osym) { + pr_err("VDSO32: Missing symbols.\n"); + return -ENODEV; + } + osym->st_value = nsym->st_value; + osym->st_size = nsym->st_size; + osym->st_info = nsym->st_info; + osym->st_other = nsym->st_other; + osym->st_shndx = nsym->st_shndx; + + return 0; +} + +static int find_sections(const struct vdso_image *image, struct vdso_elfinfo *e, + bool elf64) +{ + if (elf64) + return find_sections64(image, e); + else + return find_sections32(image, e); +} + +static int patch_one_symbol(struct vdso_elfinfo *e, const char *orig, + const char *new_target, bool elf64) +{ + if (elf64) + return patchsym64(e, orig, new_target); + else + return patchsym32(e, orig, new_target); +} + +static int stick_patch(const struct vdso_image *image, struct vdso_elfinfo *e, bool elf64) +{ + int err; + + err = find_sections(image, e, elf64); + if (err) + return err; + + err = patch_one_symbol(e, + "__vdso_gettimeofday", + "__vdso_gettimeofday_stick", elf64); + if (err) + return err; + + return patch_one_symbol(e, + "__vdso_clock_gettime", + "__vdso_clock_gettime_stick", elf64); + return 0; +} /* * Allocate pages for the vdso and vvar, and copy in the vdso text from the * kernel image. */ int __init init_vdso_image(const struct vdso_image *image, - struct vm_special_mapping *vdso_mapping) + struct vm_special_mapping *vdso_mapping, bool elf64) { - int i; + int cnpages = (image->size) / PAGE_SIZE; struct page *dp, **dpp = NULL; - int dnpages = 0; struct page *cp, **cpp = NULL; - int cnpages = (image->size) / PAGE_SIZE; + struct vdso_elfinfo ei; + int i, dnpages = 0; + + if (tlb_type != spitfire) { + int err = stick_patch(image, &ei, elf64); + if (err) + return err; + } /* * First, the vdso text. This is initialied data, an integral number of @@ -68,22 +271,6 @@ int __init init_vdso_image(const struct vdso_image *image, if (!cpp) goto oom; - if (vdso_fix_stick) { - /* - * If the system uses %tick instead of %stick, patch the VDSO - * with instruction reading %tick instead of %stick. - */ - unsigned int j, k = SAVE_INSTR_SIZE; - unsigned char *data = image->data; - - for (j = image->sym_vread_tick_patch_start; - j < image->sym_vread_tick_patch_end; j++) { - - data[image->sym_vread_tick + k] = data[j]; - k++; - } - } - for (i = 0; i < cnpages; i++) { cp = alloc_page(GFP_KERNEL); if (!cp) @@ -146,13 +333,13 @@ static int __init init_vdso(void) { int err = 0; #ifdef CONFIG_SPARC64 - err = init_vdso_image(&vdso_image_64_builtin, &vdso_mapping64); + err = init_vdso_image(&vdso_image_64_builtin, &vdso_mapping64, true); if (err) return err; #endif #ifdef CONFIG_COMPAT - err = init_vdso_image(&vdso_image_32_builtin, &vdso_mapping32); + err = init_vdso_image(&vdso_image_32_builtin, &vdso_mapping32, false); #endif return err; diff --git a/arch/um/Kconfig b/arch/um/Kconfig index 6b9938919f0b..10c15b8853ae 100644 --- a/arch/um/Kconfig +++ b/arch/um/Kconfig @@ -12,6 +12,8 @@ config UML select HAVE_UID16 select HAVE_FUTEX_CMPXCHG if FUTEX select HAVE_DEBUG_KMEMLEAK + select HAVE_MEMBLOCK + select NO_BOOTMEM select GENERIC_IRQ_SHOW select GENERIC_CPU_DEVICES select GENERIC_CLOCKEVENTS diff --git a/arch/um/include/asm/common.lds.S b/arch/um/include/asm/common.lds.S index 7adb4e6b658a..4049f2c46387 100644 --- a/arch/um/include/asm/common.lds.S +++ b/arch/um/include/asm/common.lds.S @@ -53,8 +53,6 @@ CON_INITCALL } - SECURITY_INIT - .exitcall : { __exitcall_begin = .; *(.exitcall.exit) diff --git a/arch/um/kernel/physmem.c b/arch/um/kernel/physmem.c index f02596e9931d..296a91a04598 100644 --- a/arch/um/kernel/physmem.c +++ b/arch/um/kernel/physmem.c @@ -5,6 +5,7 @@ #include <linux/module.h> #include <linux/bootmem.h> +#include <linux/memblock.h> #include <linux/mm.h> #include <linux/pfn.h> #include <asm/page.h> @@ -80,28 +81,23 @@ void __init setup_physmem(unsigned long start, unsigned long reserve_end, unsigned long len, unsigned long long highmem) { unsigned long reserve = reserve_end - start; - unsigned long pfn = PFN_UP(__pa(reserve_end)); - unsigned long delta = (len - reserve) >> PAGE_SHIFT; - unsigned long offset, bootmap_size; - long map_size; + long map_size = len - reserve; int err; - offset = uml_reserved - uml_physmem; - map_size = len - offset; if(map_size <= 0) { os_warn("Too few physical memory! Needed=%lu, given=%lu\n", - offset, len); + reserve, len); exit(1); } physmem_fd = create_mem_file(len + highmem); - err = os_map_memory((void *) uml_reserved, physmem_fd, offset, + err = os_map_memory((void *) reserve_end, physmem_fd, reserve, map_size, 1, 1, 1); if (err < 0) { os_warn("setup_physmem - mapping %ld bytes of memory at 0x%p " "failed - errno = %d\n", map_size, - (void *) uml_reserved, err); + (void *) reserve_end, err); exit(1); } @@ -113,9 +109,11 @@ void __init setup_physmem(unsigned long start, unsigned long reserve_end, os_write_file(physmem_fd, __syscall_stub_start, PAGE_SIZE); os_fsync_file(physmem_fd); - bootmap_size = init_bootmem(pfn, pfn + delta); - free_bootmem(__pa(reserve_end) + bootmap_size, - len - bootmap_size - reserve); + memblock_add(__pa(start), len + highmem); + memblock_reserve(__pa(start), reserve); + + min_low_pfn = PFN_UP(__pa(reserve_end)); + max_low_pfn = min_low_pfn + (map_size >> PAGE_SHIFT); } int phys_mapping(unsigned long phys, unsigned long long *offset_out) diff --git a/arch/unicore32/Kconfig b/arch/unicore32/Kconfig index 3a3b40f79558..0c5111b206bd 100644 --- a/arch/unicore32/Kconfig +++ b/arch/unicore32/Kconfig @@ -6,6 +6,7 @@ config UNICORE32 select ARCH_MIGHT_HAVE_PC_SERIO select DMA_DIRECT_OPS select HAVE_MEMBLOCK + select NO_BOOTMEM select HAVE_GENERIC_DMA_COHERENT select HAVE_KERNEL_GZIP select HAVE_KERNEL_BZIP2 diff --git a/arch/unicore32/include/asm/bug.h b/arch/unicore32/include/asm/bug.h index 93a56f3e2344..83c7687a0e61 100644 --- a/arch/unicore32/include/asm/bug.h +++ b/arch/unicore32/include/asm/bug.h @@ -17,6 +17,7 @@ struct siginfo; extern void die(const char *msg, struct pt_regs *regs, int err); extern void uc32_notify_die(const char *str, struct pt_regs *regs, - struct siginfo *info, unsigned long err, unsigned long trap); + int sig, int code, void __user *addr, + unsigned long err, unsigned long trap); #endif /* __UNICORE_BUG_H__ */ diff --git a/arch/unicore32/include/uapi/asm/unistd.h b/arch/unicore32/include/uapi/asm/unistd.h index 65856eaab163..1e8fe5941b8a 100644 --- a/arch/unicore32/include/uapi/asm/unistd.h +++ b/arch/unicore32/include/uapi/asm/unistd.h @@ -15,4 +15,5 @@ /* Use the standard ABI for syscalls. */ #include <asm-generic/unistd.h> +#define __ARCH_WANT_STAT64 #define __ARCH_WANT_SYS_CLONE diff --git a/arch/unicore32/kernel/fpu-ucf64.c b/arch/unicore32/kernel/fpu-ucf64.c index 8594b168f25e..fc5dad32a982 100644 --- a/arch/unicore32/kernel/fpu-ucf64.c +++ b/arch/unicore32/kernel/fpu-ucf64.c @@ -54,14 +54,6 @@ */ void ucf64_raise_sigfpe(struct pt_regs *regs) { - siginfo_t info; - - clear_siginfo(&info); - - info.si_signo = SIGFPE; - info.si_code = FPE_FLTUNK; - info.si_addr = (void __user *)(instruction_pointer(regs) - 4); - /* * This is the same as NWFPE, because it's not clear what * this is used for @@ -69,7 +61,9 @@ void ucf64_raise_sigfpe(struct pt_regs *regs) current->thread.error_code = 0; current->thread.trap_no = 6; - send_sig_info(SIGFPE, &info, current); + send_sig_fault(SIGFPE, FPE_FLTUNK, + (void __user *)(instruction_pointer(regs) - 4), + current); } /* diff --git a/arch/unicore32/kernel/traps.c b/arch/unicore32/kernel/traps.c index c4ac6043ebb0..fb376d83e043 100644 --- a/arch/unicore32/kernel/traps.c +++ b/arch/unicore32/kernel/traps.c @@ -241,13 +241,14 @@ void die(const char *str, struct pt_regs *regs, int err) } void uc32_notify_die(const char *str, struct pt_regs *regs, - struct siginfo *info, unsigned long err, unsigned long trap) + int sig, int code, void __user *addr, + unsigned long err, unsigned long trap) { if (user_mode(regs)) { current->thread.error_code = err; current->thread.trap_no = trap; - force_sig_info(info->si_signo, info, current); + force_sig_fault(sig, code, addr, current); } else die(str, regs, err); } diff --git a/arch/unicore32/mm/fault.c b/arch/unicore32/mm/fault.c index 8f12a5b50a42..b9a3a50644c1 100644 --- a/arch/unicore32/mm/fault.c +++ b/arch/unicore32/mm/fault.c @@ -120,17 +120,10 @@ static void __do_user_fault(struct task_struct *tsk, unsigned long addr, unsigned int fsr, unsigned int sig, int code, struct pt_regs *regs) { - struct siginfo si; - tsk->thread.address = addr; tsk->thread.error_code = fsr; tsk->thread.trap_no = 14; - clear_siginfo(&si); - si.si_signo = sig; - si.si_errno = 0; - si.si_code = code; - si.si_addr = (void __user *)addr; - force_sig_info(sig, &si, tsk); + force_sig_fault(sig, code, (void __user *)addr, tsk); } void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs) @@ -466,7 +459,6 @@ asmlinkage void do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs) { const struct fsr_info *inf = fsr_info + fsr_fs(fsr); - struct siginfo info; if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs)) return; @@ -474,19 +466,14 @@ asmlinkage void do_DataAbort(unsigned long addr, unsigned int fsr, printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n", inf->name, fsr, addr); - clear_siginfo(&info); - info.si_signo = inf->sig; - info.si_errno = 0; - info.si_code = inf->code; - info.si_addr = (void __user *)addr; - uc32_notify_die("", regs, &info, fsr, 0); + uc32_notify_die("", regs, inf->sig, inf->code, (void __user *)addr, + fsr, 0); } asmlinkage void do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs) { const struct fsr_info *inf = fsr_info + fsr_fs(ifsr); - struct siginfo info; if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs)) return; @@ -494,10 +481,6 @@ asmlinkage void do_PrefetchAbort(unsigned long addr, printk(KERN_ALERT "Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n", inf->name, ifsr, addr); - clear_siginfo(&info); - info.si_signo = inf->sig; - info.si_errno = 0; - info.si_code = inf->code; - info.si_addr = (void __user *)addr; - uc32_notify_die("", regs, &info, ifsr, 0); + uc32_notify_die("", regs, inf->sig, inf->code, (void __user *)addr, + ifsr, 0); } diff --git a/arch/unicore32/mm/init.c b/arch/unicore32/mm/init.c index 5f72a8d1d953..8f8699e62bd5 100644 --- a/arch/unicore32/mm/init.c +++ b/arch/unicore32/mm/init.c @@ -84,58 +84,6 @@ static void __init find_limits(unsigned long *min, unsigned long *max_low, } } -static void __init uc32_bootmem_init(unsigned long start_pfn, - unsigned long end_pfn) -{ - struct memblock_region *reg; - unsigned int boot_pages; - phys_addr_t bitmap; - pg_data_t *pgdat; - - /* - * Allocate the bootmem bitmap page. This must be in a region - * of memory which has already been mapped. - */ - boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn); - bitmap = memblock_alloc_base(boot_pages << PAGE_SHIFT, L1_CACHE_BYTES, - __pfn_to_phys(end_pfn)); - - /* - * Initialise the bootmem allocator, handing the - * memory banks over to bootmem. - */ - node_set_online(0); - pgdat = NODE_DATA(0); - init_bootmem_node(pgdat, __phys_to_pfn(bitmap), start_pfn, end_pfn); - - /* Free the lowmem regions from memblock into bootmem. */ - for_each_memblock(memory, reg) { - unsigned long start = memblock_region_memory_base_pfn(reg); - unsigned long end = memblock_region_memory_end_pfn(reg); - - if (end >= end_pfn) - end = end_pfn; - if (start >= end) - break; - - free_bootmem(__pfn_to_phys(start), (end - start) << PAGE_SHIFT); - } - - /* Reserve the lowmem memblock reserved regions in bootmem. */ - for_each_memblock(reserved, reg) { - unsigned long start = memblock_region_reserved_base_pfn(reg); - unsigned long end = memblock_region_reserved_end_pfn(reg); - - if (end >= end_pfn) - end = end_pfn; - if (start >= end) - break; - - reserve_bootmem(__pfn_to_phys(start), - (end - start) << PAGE_SHIFT, BOOTMEM_DEFAULT); - } -} - static void __init uc32_bootmem_free(unsigned long min, unsigned long max_low, unsigned long max_high) { @@ -232,7 +180,7 @@ void __init bootmem_init(void) find_limits(&min, &max_low, &max_high); - uc32_bootmem_init(min, max_low); + node_set_online(0); /* * Sparsemem tries to allocate bootmem in memory_present(), diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile index a450ad573dcb..a4b0007a54e1 100644 --- a/arch/x86/crypto/Makefile +++ b/arch/x86/crypto/Makefile @@ -60,9 +60,6 @@ endif ifeq ($(avx2_supported),yes) obj-$(CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64) += camellia-aesni-avx2.o obj-$(CONFIG_CRYPTO_SERPENT_AVX2_X86_64) += serpent-avx2.o - obj-$(CONFIG_CRYPTO_SHA1_MB) += sha1-mb/ - obj-$(CONFIG_CRYPTO_SHA256_MB) += sha256-mb/ - obj-$(CONFIG_CRYPTO_SHA512_MB) += sha512-mb/ obj-$(CONFIG_CRYPTO_MORUS1280_AVX2) += morus1280-avx2.o endif @@ -106,7 +103,7 @@ ifeq ($(avx2_supported),yes) morus1280-avx2-y := morus1280-avx2-asm.o morus1280-avx2-glue.o endif -aesni-intel-y := aesni-intel_asm.o aesni-intel_glue.o fpu.o +aesni-intel-y := aesni-intel_asm.o aesni-intel_glue.o aesni-intel-$(CONFIG_64BIT) += aesni-intel_avx-x86_64.o aes_ctrby8_avx-x86_64.o ghash-clmulni-intel-y := ghash-clmulni-intel_asm.o ghash-clmulni-intel_glue.o sha1-ssse3-y := sha1_ssse3_asm.o sha1_ssse3_glue.o diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index acbe7e8336d8..661f7daf43da 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c @@ -102,9 +102,6 @@ asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out, asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in, unsigned int len, u8 *iv); -int crypto_fpu_init(void); -void crypto_fpu_exit(void); - #define AVX_GEN2_OPTSIZE 640 #define AVX_GEN4_OPTSIZE 4096 @@ -817,7 +814,7 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req, /* Linearize assoc, if not already linear */ if (req->src->length >= assoclen && req->src->length && (!PageHighMem(sg_page(req->src)) || - req->src->offset + req->src->length < PAGE_SIZE)) { + req->src->offset + req->src->length <= PAGE_SIZE)) { scatterwalk_start(&assoc_sg_walk, req->src); assoc = scatterwalk_map(&assoc_sg_walk); } else { @@ -1253,22 +1250,6 @@ static struct skcipher_alg aesni_skciphers[] = { static struct simd_skcipher_alg *aesni_simd_skciphers[ARRAY_SIZE(aesni_skciphers)]; -static struct { - const char *algname; - const char *drvname; - const char *basename; - struct simd_skcipher_alg *simd; -} aesni_simd_skciphers2[] = { -#if (defined(MODULE) && IS_ENABLED(CONFIG_CRYPTO_PCBC)) || \ - IS_BUILTIN(CONFIG_CRYPTO_PCBC) - { - .algname = "pcbc(aes)", - .drvname = "pcbc-aes-aesni", - .basename = "fpu(pcbc(__aes-aesni))", - }, -#endif -}; - #ifdef CONFIG_X86_64 static int generic_gcmaes_set_key(struct crypto_aead *aead, const u8 *key, unsigned int key_len) @@ -1422,10 +1403,6 @@ static void aesni_free_simds(void) for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers) && aesni_simd_skciphers[i]; i++) simd_skcipher_free(aesni_simd_skciphers[i]); - - for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers2); i++) - if (aesni_simd_skciphers2[i].simd) - simd_skcipher_free(aesni_simd_skciphers2[i].simd); } static int __init aesni_init(void) @@ -1469,13 +1446,9 @@ static int __init aesni_init(void) #endif #endif - err = crypto_fpu_init(); - if (err) - return err; - err = crypto_register_algs(aesni_algs, ARRAY_SIZE(aesni_algs)); if (err) - goto fpu_exit; + return err; err = crypto_register_skciphers(aesni_skciphers, ARRAY_SIZE(aesni_skciphers)); @@ -1499,18 +1472,6 @@ static int __init aesni_init(void) aesni_simd_skciphers[i] = simd; } - for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers2); i++) { - algname = aesni_simd_skciphers2[i].algname; - drvname = aesni_simd_skciphers2[i].drvname; - basename = aesni_simd_skciphers2[i].basename; - simd = simd_skcipher_create_compat(algname, drvname, basename); - err = PTR_ERR(simd); - if (IS_ERR(simd)) - continue; - - aesni_simd_skciphers2[i].simd = simd; - } - return 0; unregister_simds: @@ -1521,8 +1482,6 @@ unregister_skciphers: ARRAY_SIZE(aesni_skciphers)); unregister_algs: crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs)); -fpu_exit: - crypto_fpu_exit(); return err; } @@ -1533,8 +1492,6 @@ static void __exit aesni_exit(void) crypto_unregister_skciphers(aesni_skciphers, ARRAY_SIZE(aesni_skciphers)); crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs)); - - crypto_fpu_exit(); } late_initcall(aesni_init); diff --git a/arch/x86/crypto/fpu.c b/arch/x86/crypto/fpu.c deleted file mode 100644 index 406680476c52..000000000000 --- a/arch/x86/crypto/fpu.c +++ /dev/null @@ -1,207 +0,0 @@ -/* - * FPU: Wrapper for blkcipher touching fpu - * - * Copyright (c) Intel Corp. - * Author: Huang Ying <ying.huang@intel.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - * - */ - -#include <crypto/internal/skcipher.h> -#include <linux/err.h> -#include <linux/init.h> -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/slab.h> -#include <asm/fpu/api.h> - -struct crypto_fpu_ctx { - struct crypto_skcipher *child; -}; - -static int crypto_fpu_setkey(struct crypto_skcipher *parent, const u8 *key, - unsigned int keylen) -{ - struct crypto_fpu_ctx *ctx = crypto_skcipher_ctx(parent); - struct crypto_skcipher *child = ctx->child; - int err; - - crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); - crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) & - CRYPTO_TFM_REQ_MASK); - err = crypto_skcipher_setkey(child, key, keylen); - crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) & - CRYPTO_TFM_RES_MASK); - return err; -} - -static int crypto_fpu_encrypt(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct crypto_fpu_ctx *ctx = crypto_skcipher_ctx(tfm); - struct crypto_skcipher *child = ctx->child; - SKCIPHER_REQUEST_ON_STACK(subreq, child); - int err; - - skcipher_request_set_tfm(subreq, child); - skcipher_request_set_callback(subreq, 0, NULL, NULL); - skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, - req->iv); - - kernel_fpu_begin(); - err = crypto_skcipher_encrypt(subreq); - kernel_fpu_end(); - - skcipher_request_zero(subreq); - return err; -} - -static int crypto_fpu_decrypt(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct crypto_fpu_ctx *ctx = crypto_skcipher_ctx(tfm); - struct crypto_skcipher *child = ctx->child; - SKCIPHER_REQUEST_ON_STACK(subreq, child); - int err; - - skcipher_request_set_tfm(subreq, child); - skcipher_request_set_callback(subreq, 0, NULL, NULL); - skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, - req->iv); - - kernel_fpu_begin(); - err = crypto_skcipher_decrypt(subreq); - kernel_fpu_end(); - - skcipher_request_zero(subreq); - return err; -} - -static int crypto_fpu_init_tfm(struct crypto_skcipher *tfm) -{ - struct skcipher_instance *inst = skcipher_alg_instance(tfm); - struct crypto_fpu_ctx *ctx = crypto_skcipher_ctx(tfm); - struct crypto_skcipher_spawn *spawn; - struct crypto_skcipher *cipher; - - spawn = skcipher_instance_ctx(inst); - cipher = crypto_spawn_skcipher(spawn); - if (IS_ERR(cipher)) - return PTR_ERR(cipher); - - ctx->child = cipher; - - return 0; -} - -static void crypto_fpu_exit_tfm(struct crypto_skcipher *tfm) -{ - struct crypto_fpu_ctx *ctx = crypto_skcipher_ctx(tfm); - - crypto_free_skcipher(ctx->child); -} - -static void crypto_fpu_free(struct skcipher_instance *inst) -{ - crypto_drop_skcipher(skcipher_instance_ctx(inst)); - kfree(inst); -} - -static int crypto_fpu_create(struct crypto_template *tmpl, struct rtattr **tb) -{ - struct crypto_skcipher_spawn *spawn; - struct skcipher_instance *inst; - struct crypto_attr_type *algt; - struct skcipher_alg *alg; - const char *cipher_name; - int err; - - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if ((algt->type ^ (CRYPTO_ALG_INTERNAL | CRYPTO_ALG_TYPE_SKCIPHER)) & - algt->mask) - return -EINVAL; - - if (!(algt->mask & CRYPTO_ALG_INTERNAL)) - return -EINVAL; - - cipher_name = crypto_attr_alg_name(tb[1]); - if (IS_ERR(cipher_name)) - return PTR_ERR(cipher_name); - - inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); - if (!inst) - return -ENOMEM; - - spawn = skcipher_instance_ctx(inst); - - crypto_set_skcipher_spawn(spawn, skcipher_crypto_instance(inst)); - err = crypto_grab_skcipher(spawn, cipher_name, CRYPTO_ALG_INTERNAL, - CRYPTO_ALG_INTERNAL | CRYPTO_ALG_ASYNC); - if (err) - goto out_free_inst; - - alg = crypto_skcipher_spawn_alg(spawn); - - err = crypto_inst_setname(skcipher_crypto_instance(inst), "fpu", - &alg->base); - if (err) - goto out_drop_skcipher; - - inst->alg.base.cra_flags = CRYPTO_ALG_INTERNAL; - inst->alg.base.cra_priority = alg->base.cra_priority; - inst->alg.base.cra_blocksize = alg->base.cra_blocksize; - inst->alg.base.cra_alignmask = alg->base.cra_alignmask; - - inst->alg.ivsize = crypto_skcipher_alg_ivsize(alg); - inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg); - inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg); - - inst->alg.base.cra_ctxsize = sizeof(struct crypto_fpu_ctx); - - inst->alg.init = crypto_fpu_init_tfm; - inst->alg.exit = crypto_fpu_exit_tfm; - - inst->alg.setkey = crypto_fpu_setkey; - inst->alg.encrypt = crypto_fpu_encrypt; - inst->alg.decrypt = crypto_fpu_decrypt; - - inst->free = crypto_fpu_free; - - err = skcipher_register_instance(tmpl, inst); - if (err) - goto out_drop_skcipher; - -out: - return err; - -out_drop_skcipher: - crypto_drop_skcipher(spawn); -out_free_inst: - kfree(inst); - goto out; -} - -static struct crypto_template crypto_fpu_tmpl = { - .name = "fpu", - .create = crypto_fpu_create, - .module = THIS_MODULE, -}; - -int __init crypto_fpu_init(void) -{ - return crypto_register_template(&crypto_fpu_tmpl); -} - -void crypto_fpu_exit(void) -{ - crypto_unregister_template(&crypto_fpu_tmpl); -} - -MODULE_ALIAS_CRYPTO("fpu"); diff --git a/arch/x86/crypto/sha1-mb/Makefile b/arch/x86/crypto/sha1-mb/Makefile deleted file mode 100644 index 815ded3ba90e..000000000000 --- a/arch/x86/crypto/sha1-mb/Makefile +++ /dev/null @@ -1,14 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -# -# Arch-specific CryptoAPI modules. -# - -OBJECT_FILES_NON_STANDARD := y - -avx2_supported := $(call as-instr,vpgatherdd %ymm0$(comma)(%eax$(comma)%ymm1\ - $(comma)4)$(comma)%ymm2,yes,no) -ifeq ($(avx2_supported),yes) - obj-$(CONFIG_CRYPTO_SHA1_MB) += sha1-mb.o - sha1-mb-y := sha1_mb.o sha1_mb_mgr_flush_avx2.o \ - sha1_mb_mgr_init_avx2.o sha1_mb_mgr_submit_avx2.o sha1_x8_avx2.o -endif diff --git a/arch/x86/crypto/sha1-mb/sha1_mb.c b/arch/x86/crypto/sha1-mb/sha1_mb.c deleted file mode 100644 index b93805664c1d..000000000000 --- a/arch/x86/crypto/sha1-mb/sha1_mb.c +++ /dev/null @@ -1,1011 +0,0 @@ -/* - * Multi buffer SHA1 algorithm Glue Code - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * Contact Information: - * Tim Chen <tim.c.chen@linux.intel.com> - * - * BSD LICENSE - * - * Copyright(c) 2014 Intel Corporation. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <crypto/internal/hash.h> -#include <linux/init.h> -#include <linux/module.h> -#include <linux/mm.h> -#include <linux/cryptohash.h> -#include <linux/types.h> -#include <linux/list.h> -#include <crypto/scatterwalk.h> -#include <crypto/sha.h> -#include <crypto/mcryptd.h> -#include <crypto/crypto_wq.h> -#include <asm/byteorder.h> -#include <linux/hardirq.h> -#include <asm/fpu/api.h> -#include "sha1_mb_ctx.h" - -#define FLUSH_INTERVAL 1000 /* in usec */ - -static struct mcryptd_alg_state sha1_mb_alg_state; - -struct sha1_mb_ctx { - struct mcryptd_ahash *mcryptd_tfm; -}; - -static inline struct mcryptd_hash_request_ctx - *cast_hash_to_mcryptd_ctx(struct sha1_hash_ctx *hash_ctx) -{ - struct ahash_request *areq; - - areq = container_of((void *) hash_ctx, struct ahash_request, __ctx); - return container_of(areq, struct mcryptd_hash_request_ctx, areq); -} - -static inline struct ahash_request - *cast_mcryptd_ctx_to_req(struct mcryptd_hash_request_ctx *ctx) -{ - return container_of((void *) ctx, struct ahash_request, __ctx); -} - -static void req_ctx_init(struct mcryptd_hash_request_ctx *rctx, - struct ahash_request *areq) -{ - rctx->flag = HASH_UPDATE; -} - -static asmlinkage void (*sha1_job_mgr_init)(struct sha1_mb_mgr *state); -static asmlinkage struct job_sha1* (*sha1_job_mgr_submit) - (struct sha1_mb_mgr *state, struct job_sha1 *job); -static asmlinkage struct job_sha1* (*sha1_job_mgr_flush) - (struct sha1_mb_mgr *state); -static asmlinkage struct job_sha1* (*sha1_job_mgr_get_comp_job) - (struct sha1_mb_mgr *state); - -static inline uint32_t sha1_pad(uint8_t padblock[SHA1_BLOCK_SIZE * 2], - uint64_t total_len) -{ - uint32_t i = total_len & (SHA1_BLOCK_SIZE - 1); - - memset(&padblock[i], 0, SHA1_BLOCK_SIZE); - padblock[i] = 0x80; - - i += ((SHA1_BLOCK_SIZE - 1) & - (0 - (total_len + SHA1_PADLENGTHFIELD_SIZE + 1))) - + 1 + SHA1_PADLENGTHFIELD_SIZE; - -#if SHA1_PADLENGTHFIELD_SIZE == 16 - *((uint64_t *) &padblock[i - 16]) = 0; -#endif - - *((uint64_t *) &padblock[i - 8]) = cpu_to_be64(total_len << 3); - - /* Number of extra blocks to hash */ - return i >> SHA1_LOG2_BLOCK_SIZE; -} - -static struct sha1_hash_ctx *sha1_ctx_mgr_resubmit(struct sha1_ctx_mgr *mgr, - struct sha1_hash_ctx *ctx) -{ - while (ctx) { - if (ctx->status & HASH_CTX_STS_COMPLETE) { - /* Clear PROCESSING bit */ - ctx->status = HASH_CTX_STS_COMPLETE; - return ctx; - } - - /* - * If the extra blocks are empty, begin hashing what remains - * in the user's buffer. - */ - if (ctx->partial_block_buffer_length == 0 && - ctx->incoming_buffer_length) { - - const void *buffer = ctx->incoming_buffer; - uint32_t len = ctx->incoming_buffer_length; - uint32_t copy_len; - - /* - * Only entire blocks can be hashed. - * Copy remainder to extra blocks buffer. - */ - copy_len = len & (SHA1_BLOCK_SIZE-1); - - if (copy_len) { - len -= copy_len; - memcpy(ctx->partial_block_buffer, - ((const char *) buffer + len), - copy_len); - ctx->partial_block_buffer_length = copy_len; - } - - ctx->incoming_buffer_length = 0; - - /* len should be a multiple of the block size now */ - assert((len % SHA1_BLOCK_SIZE) == 0); - - /* Set len to the number of blocks to be hashed */ - len >>= SHA1_LOG2_BLOCK_SIZE; - - if (len) { - - ctx->job.buffer = (uint8_t *) buffer; - ctx->job.len = len; - ctx = (struct sha1_hash_ctx *)sha1_job_mgr_submit(&mgr->mgr, - &ctx->job); - continue; - } - } - - /* - * If the extra blocks are not empty, then we are - * either on the last block(s) or we need more - * user input before continuing. - */ - if (ctx->status & HASH_CTX_STS_LAST) { - - uint8_t *buf = ctx->partial_block_buffer; - uint32_t n_extra_blocks = - sha1_pad(buf, ctx->total_length); - - ctx->status = (HASH_CTX_STS_PROCESSING | - HASH_CTX_STS_COMPLETE); - ctx->job.buffer = buf; - ctx->job.len = (uint32_t) n_extra_blocks; - ctx = (struct sha1_hash_ctx *) - sha1_job_mgr_submit(&mgr->mgr, &ctx->job); - continue; - } - - ctx->status = HASH_CTX_STS_IDLE; - return ctx; - } - - return NULL; -} - -static struct sha1_hash_ctx - *sha1_ctx_mgr_get_comp_ctx(struct sha1_ctx_mgr *mgr) -{ - /* - * If get_comp_job returns NULL, there are no jobs complete. - * If get_comp_job returns a job, verify that it is safe to return to - * the user. - * If it is not ready, resubmit the job to finish processing. - * If sha1_ctx_mgr_resubmit returned a job, it is ready to be returned. - * Otherwise, all jobs currently being managed by the hash_ctx_mgr - * still need processing. - */ - struct sha1_hash_ctx *ctx; - - ctx = (struct sha1_hash_ctx *) sha1_job_mgr_get_comp_job(&mgr->mgr); - return sha1_ctx_mgr_resubmit(mgr, ctx); -} - -static void sha1_ctx_mgr_init(struct sha1_ctx_mgr *mgr) -{ - sha1_job_mgr_init(&mgr->mgr); -} - -static struct sha1_hash_ctx *sha1_ctx_mgr_submit(struct sha1_ctx_mgr *mgr, - struct sha1_hash_ctx *ctx, - const void *buffer, - uint32_t len, - int flags) -{ - if (flags & ~(HASH_UPDATE | HASH_LAST)) { - /* User should not pass anything other than UPDATE or LAST */ - ctx->error = HASH_CTX_ERROR_INVALID_FLAGS; - return ctx; - } - - if (ctx->status & HASH_CTX_STS_PROCESSING) { - /* Cannot submit to a currently processing job. */ - ctx->error = HASH_CTX_ERROR_ALREADY_PROCESSING; - return ctx; - } - - if (ctx->status & HASH_CTX_STS_COMPLETE) { - /* Cannot update a finished job. */ - ctx->error = HASH_CTX_ERROR_ALREADY_COMPLETED; - return ctx; - } - - /* - * If we made it here, there were no errors during this call to - * submit - */ - ctx->error = HASH_CTX_ERROR_NONE; - - /* Store buffer ptr info from user */ - ctx->incoming_buffer = buffer; - ctx->incoming_buffer_length = len; - - /* - * Store the user's request flags and mark this ctx as currently - * being processed. - */ - ctx->status = (flags & HASH_LAST) ? - (HASH_CTX_STS_PROCESSING | HASH_CTX_STS_LAST) : - HASH_CTX_STS_PROCESSING; - - /* Advance byte counter */ - ctx->total_length += len; - - /* - * If there is anything currently buffered in the extra blocks, - * append to it until it contains a whole block. - * Or if the user's buffer contains less than a whole block, - * append as much as possible to the extra block. - */ - if (ctx->partial_block_buffer_length || len < SHA1_BLOCK_SIZE) { - /* - * Compute how many bytes to copy from user buffer into - * extra block - */ - uint32_t copy_len = SHA1_BLOCK_SIZE - - ctx->partial_block_buffer_length; - if (len < copy_len) - copy_len = len; - - if (copy_len) { - /* Copy and update relevant pointers and counters */ - memcpy(&ctx->partial_block_buffer[ctx->partial_block_buffer_length], - buffer, copy_len); - - ctx->partial_block_buffer_length += copy_len; - ctx->incoming_buffer = (const void *) - ((const char *)buffer + copy_len); - ctx->incoming_buffer_length = len - copy_len; - } - - /* - * The extra block should never contain more than 1 block - * here - */ - assert(ctx->partial_block_buffer_length <= SHA1_BLOCK_SIZE); - - /* - * If the extra block buffer contains exactly 1 block, it can - * be hashed. - */ - if (ctx->partial_block_buffer_length >= SHA1_BLOCK_SIZE) { - ctx->partial_block_buffer_length = 0; - - ctx->job.buffer = ctx->partial_block_buffer; - ctx->job.len = 1; - ctx = (struct sha1_hash_ctx *) - sha1_job_mgr_submit(&mgr->mgr, &ctx->job); - } - } - - return sha1_ctx_mgr_resubmit(mgr, ctx); -} - -static struct sha1_hash_ctx *sha1_ctx_mgr_flush(struct sha1_ctx_mgr *mgr) -{ - struct sha1_hash_ctx *ctx; - - while (1) { - ctx = (struct sha1_hash_ctx *) sha1_job_mgr_flush(&mgr->mgr); - - /* If flush returned 0, there are no more jobs in flight. */ - if (!ctx) - return NULL; - - /* - * If flush returned a job, resubmit the job to finish - * processing. - */ - ctx = sha1_ctx_mgr_resubmit(mgr, ctx); - - /* - * If sha1_ctx_mgr_resubmit returned a job, it is ready to be - * returned. Otherwise, all jobs currently being managed by the - * sha1_ctx_mgr still need processing. Loop. - */ - if (ctx) - return ctx; - } -} - -static int sha1_mb_init(struct ahash_request *areq) -{ - struct sha1_hash_ctx *sctx = ahash_request_ctx(areq); - - hash_ctx_init(sctx); - sctx->job.result_digest[0] = SHA1_H0; - sctx->job.result_digest[1] = SHA1_H1; - sctx->job.result_digest[2] = SHA1_H2; - sctx->job.result_digest[3] = SHA1_H3; - sctx->job.result_digest[4] = SHA1_H4; - sctx->total_length = 0; - sctx->partial_block_buffer_length = 0; - sctx->status = HASH_CTX_STS_IDLE; - - return 0; -} - -static int sha1_mb_set_results(struct mcryptd_hash_request_ctx *rctx) -{ - int i; - struct sha1_hash_ctx *sctx = ahash_request_ctx(&rctx->areq); - __be32 *dst = (__be32 *) rctx->out; - - for (i = 0; i < 5; ++i) - dst[i] = cpu_to_be32(sctx->job.result_digest[i]); - - return 0; -} - -static int sha_finish_walk(struct mcryptd_hash_request_ctx **ret_rctx, - struct mcryptd_alg_cstate *cstate, bool flush) -{ - int flag = HASH_UPDATE; - int nbytes, err = 0; - struct mcryptd_hash_request_ctx *rctx = *ret_rctx; - struct sha1_hash_ctx *sha_ctx; - - /* more work ? */ - while (!(rctx->flag & HASH_DONE)) { - nbytes = crypto_ahash_walk_done(&rctx->walk, 0); - if (nbytes < 0) { - err = nbytes; - goto out; - } - /* check if the walk is done */ - if (crypto_ahash_walk_last(&rctx->walk)) { - rctx->flag |= HASH_DONE; - if (rctx->flag & HASH_FINAL) - flag |= HASH_LAST; - - } - sha_ctx = (struct sha1_hash_ctx *) - ahash_request_ctx(&rctx->areq); - kernel_fpu_begin(); - sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, - rctx->walk.data, nbytes, flag); - if (!sha_ctx) { - if (flush) - sha_ctx = sha1_ctx_mgr_flush(cstate->mgr); - } - kernel_fpu_end(); - if (sha_ctx) - rctx = cast_hash_to_mcryptd_ctx(sha_ctx); - else { - rctx = NULL; - goto out; - } - } - - /* copy the results */ - if (rctx->flag & HASH_FINAL) - sha1_mb_set_results(rctx); - -out: - *ret_rctx = rctx; - return err; -} - -static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx, - struct mcryptd_alg_cstate *cstate, - int err) -{ - struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx); - struct sha1_hash_ctx *sha_ctx; - struct mcryptd_hash_request_ctx *req_ctx; - int ret; - - /* remove from work list */ - spin_lock(&cstate->work_lock); - list_del(&rctx->waiter); - spin_unlock(&cstate->work_lock); - - if (irqs_disabled()) - rctx->complete(&req->base, err); - else { - local_bh_disable(); - rctx->complete(&req->base, err); - local_bh_enable(); - } - - /* check to see if there are other jobs that are done */ - sha_ctx = sha1_ctx_mgr_get_comp_ctx(cstate->mgr); - while (sha_ctx) { - req_ctx = cast_hash_to_mcryptd_ctx(sha_ctx); - ret = sha_finish_walk(&req_ctx, cstate, false); - if (req_ctx) { - spin_lock(&cstate->work_lock); - list_del(&req_ctx->waiter); - spin_unlock(&cstate->work_lock); - - req = cast_mcryptd_ctx_to_req(req_ctx); - if (irqs_disabled()) - req_ctx->complete(&req->base, ret); - else { - local_bh_disable(); - req_ctx->complete(&req->base, ret); - local_bh_enable(); - } - } - sha_ctx = sha1_ctx_mgr_get_comp_ctx(cstate->mgr); - } - - return 0; -} - -static void sha1_mb_add_list(struct mcryptd_hash_request_ctx *rctx, - struct mcryptd_alg_cstate *cstate) -{ - unsigned long next_flush; - unsigned long delay = usecs_to_jiffies(FLUSH_INTERVAL); - - /* initialize tag */ - rctx->tag.arrival = jiffies; /* tag the arrival time */ - rctx->tag.seq_num = cstate->next_seq_num++; - next_flush = rctx->tag.arrival + delay; - rctx->tag.expire = next_flush; - - spin_lock(&cstate->work_lock); - list_add_tail(&rctx->waiter, &cstate->work_list); - spin_unlock(&cstate->work_lock); - - mcryptd_arm_flusher(cstate, delay); -} - -static int sha1_mb_update(struct ahash_request *areq) -{ - struct mcryptd_hash_request_ctx *rctx = - container_of(areq, struct mcryptd_hash_request_ctx, areq); - struct mcryptd_alg_cstate *cstate = - this_cpu_ptr(sha1_mb_alg_state.alg_cstate); - - struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx); - struct sha1_hash_ctx *sha_ctx; - int ret = 0, nbytes; - - - /* sanity check */ - if (rctx->tag.cpu != smp_processor_id()) { - pr_err("mcryptd error: cpu clash\n"); - goto done; - } - - /* need to init context */ - req_ctx_init(rctx, areq); - - nbytes = crypto_ahash_walk_first(req, &rctx->walk); - - if (nbytes < 0) { - ret = nbytes; - goto done; - } - - if (crypto_ahash_walk_last(&rctx->walk)) - rctx->flag |= HASH_DONE; - - /* submit */ - sha_ctx = (struct sha1_hash_ctx *) ahash_request_ctx(areq); - sha1_mb_add_list(rctx, cstate); - kernel_fpu_begin(); - sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data, - nbytes, HASH_UPDATE); - kernel_fpu_end(); - - /* check if anything is returned */ - if (!sha_ctx) - return -EINPROGRESS; - - if (sha_ctx->error) { - ret = sha_ctx->error; - rctx = cast_hash_to_mcryptd_ctx(sha_ctx); - goto done; - } - - rctx = cast_hash_to_mcryptd_ctx(sha_ctx); - ret = sha_finish_walk(&rctx, cstate, false); - - if (!rctx) - return -EINPROGRESS; -done: - sha_complete_job(rctx, cstate, ret); - return ret; -} - -static int sha1_mb_finup(struct ahash_request *areq) -{ - struct mcryptd_hash_request_ctx *rctx = - container_of(areq, struct mcryptd_hash_request_ctx, areq); - struct mcryptd_alg_cstate *cstate = - this_cpu_ptr(sha1_mb_alg_state.alg_cstate); - - struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx); - struct sha1_hash_ctx *sha_ctx; - int ret = 0, flag = HASH_UPDATE, nbytes; - - /* sanity check */ - if (rctx->tag.cpu != smp_processor_id()) { - pr_err("mcryptd error: cpu clash\n"); - goto done; - } - - /* need to init context */ - req_ctx_init(rctx, areq); - - nbytes = crypto_ahash_walk_first(req, &rctx->walk); - - if (nbytes < 0) { - ret = nbytes; - goto done; - } - - if (crypto_ahash_walk_last(&rctx->walk)) { - rctx->flag |= HASH_DONE; - flag = HASH_LAST; - } - - /* submit */ - rctx->flag |= HASH_FINAL; - sha_ctx = (struct sha1_hash_ctx *) ahash_request_ctx(areq); - sha1_mb_add_list(rctx, cstate); - - kernel_fpu_begin(); - sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data, - nbytes, flag); - kernel_fpu_end(); - - /* check if anything is returned */ - if (!sha_ctx) - return -EINPROGRESS; - - if (sha_ctx->error) { - ret = sha_ctx->error; - goto done; - } - - rctx = cast_hash_to_mcryptd_ctx(sha_ctx); - ret = sha_finish_walk(&rctx, cstate, false); - if (!rctx) - return -EINPROGRESS; -done: - sha_complete_job(rctx, cstate, ret); - return ret; -} - -static int sha1_mb_final(struct ahash_request *areq) -{ - struct mcryptd_hash_request_ctx *rctx = - container_of(areq, struct mcryptd_hash_request_ctx, areq); - struct mcryptd_alg_cstate *cstate = - this_cpu_ptr(sha1_mb_alg_state.alg_cstate); - - struct sha1_hash_ctx *sha_ctx; - int ret = 0; - u8 data; - - /* sanity check */ - if (rctx->tag.cpu != smp_processor_id()) { - pr_err("mcryptd error: cpu clash\n"); - goto done; - } - - /* need to init context */ - req_ctx_init(rctx, areq); - - rctx->flag |= HASH_DONE | HASH_FINAL; - - sha_ctx = (struct sha1_hash_ctx *) ahash_request_ctx(areq); - /* flag HASH_FINAL and 0 data size */ - sha1_mb_add_list(rctx, cstate); - kernel_fpu_begin(); - sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, &data, 0, - HASH_LAST); - kernel_fpu_end(); - - /* check if anything is returned */ - if (!sha_ctx) - return -EINPROGRESS; - - if (sha_ctx->error) { - ret = sha_ctx->error; - rctx = cast_hash_to_mcryptd_ctx(sha_ctx); - goto done; - } - - rctx = cast_hash_to_mcryptd_ctx(sha_ctx); - ret = sha_finish_walk(&rctx, cstate, false); - if (!rctx) - return -EINPROGRESS; -done: - sha_complete_job(rctx, cstate, ret); - return ret; -} - -static int sha1_mb_export(struct ahash_request *areq, void *out) -{ - struct sha1_hash_ctx *sctx = ahash_request_ctx(areq); - - memcpy(out, sctx, sizeof(*sctx)); - - return 0; -} - -static int sha1_mb_import(struct ahash_request *areq, const void *in) -{ - struct sha1_hash_ctx *sctx = ahash_request_ctx(areq); - - memcpy(sctx, in, sizeof(*sctx)); - - return 0; -} - -static int sha1_mb_async_init_tfm(struct crypto_tfm *tfm) -{ - struct mcryptd_ahash *mcryptd_tfm; - struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm); - struct mcryptd_hash_ctx *mctx; - - mcryptd_tfm = mcryptd_alloc_ahash("__intel_sha1-mb", - CRYPTO_ALG_INTERNAL, - CRYPTO_ALG_INTERNAL); - if (IS_ERR(mcryptd_tfm)) - return PTR_ERR(mcryptd_tfm); - mctx = crypto_ahash_ctx(&mcryptd_tfm->base); - mctx->alg_state = &sha1_mb_alg_state; - ctx->mcryptd_tfm = mcryptd_tfm; - crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), - sizeof(struct ahash_request) + - crypto_ahash_reqsize(&mcryptd_tfm->base)); - - return 0; -} - -static void sha1_mb_async_exit_tfm(struct crypto_tfm *tfm) -{ - struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm); - - mcryptd_free_ahash(ctx->mcryptd_tfm); -} - -static int sha1_mb_areq_init_tfm(struct crypto_tfm *tfm) -{ - crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), - sizeof(struct ahash_request) + - sizeof(struct sha1_hash_ctx)); - - return 0; -} - -static void sha1_mb_areq_exit_tfm(struct crypto_tfm *tfm) -{ - struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm); - - mcryptd_free_ahash(ctx->mcryptd_tfm); -} - -static struct ahash_alg sha1_mb_areq_alg = { - .init = sha1_mb_init, - .update = sha1_mb_update, - .final = sha1_mb_final, - .finup = sha1_mb_finup, - .export = sha1_mb_export, - .import = sha1_mb_import, - .halg = { - .digestsize = SHA1_DIGEST_SIZE, - .statesize = sizeof(struct sha1_hash_ctx), - .base = { - .cra_name = "__sha1-mb", - .cra_driver_name = "__intel_sha1-mb", - .cra_priority = 100, - /* - * use ASYNC flag as some buffers in multi-buffer - * algo may not have completed before hashing thread - * sleep - */ - .cra_flags = CRYPTO_ALG_ASYNC | - CRYPTO_ALG_INTERNAL, - .cra_blocksize = SHA1_BLOCK_SIZE, - .cra_module = THIS_MODULE, - .cra_list = LIST_HEAD_INIT - (sha1_mb_areq_alg.halg.base.cra_list), - .cra_init = sha1_mb_areq_init_tfm, - .cra_exit = sha1_mb_areq_exit_tfm, - .cra_ctxsize = sizeof(struct sha1_hash_ctx), - } - } -}; - -static int sha1_mb_async_init(struct ahash_request *req) -{ - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm); - struct ahash_request *mcryptd_req = ahash_request_ctx(req); - struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm; - - memcpy(mcryptd_req, req, sizeof(*req)); - ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base); - return crypto_ahash_init(mcryptd_req); -} - -static int sha1_mb_async_update(struct ahash_request *req) -{ - struct ahash_request *mcryptd_req = ahash_request_ctx(req); - - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm); - struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm; - - memcpy(mcryptd_req, req, sizeof(*req)); - ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base); - return crypto_ahash_update(mcryptd_req); -} - -static int sha1_mb_async_finup(struct ahash_request *req) -{ - struct ahash_request *mcryptd_req = ahash_request_ctx(req); - - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm); - struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm; - - memcpy(mcryptd_req, req, sizeof(*req)); - ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base); - return crypto_ahash_finup(mcryptd_req); -} - -static int sha1_mb_async_final(struct ahash_request *req) -{ - struct ahash_request *mcryptd_req = ahash_request_ctx(req); - - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm); - struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm; - - memcpy(mcryptd_req, req, sizeof(*req)); - ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base); - return crypto_ahash_final(mcryptd_req); -} - -static int sha1_mb_async_digest(struct ahash_request *req) -{ - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm); - struct ahash_request *mcryptd_req = ahash_request_ctx(req); - struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm; - - memcpy(mcryptd_req, req, sizeof(*req)); - ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base); - return crypto_ahash_digest(mcryptd_req); -} - -static int sha1_mb_async_export(struct ahash_request *req, void *out) -{ - struct ahash_request *mcryptd_req = ahash_request_ctx(req); - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm); - struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm; - - memcpy(mcryptd_req, req, sizeof(*req)); - ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base); - return crypto_ahash_export(mcryptd_req, out); -} - -static int sha1_mb_async_import(struct ahash_request *req, const void *in) -{ - struct ahash_request *mcryptd_req = ahash_request_ctx(req); - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm); - struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm; - struct crypto_ahash *child = mcryptd_ahash_child(mcryptd_tfm); - struct mcryptd_hash_request_ctx *rctx; - struct ahash_request *areq; - - memcpy(mcryptd_req, req, sizeof(*req)); - ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base); - rctx = ahash_request_ctx(mcryptd_req); - areq = &rctx->areq; - - ahash_request_set_tfm(areq, child); - ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_SLEEP, - rctx->complete, req); - - return crypto_ahash_import(mcryptd_req, in); -} - -static struct ahash_alg sha1_mb_async_alg = { - .init = sha1_mb_async_init, - .update = sha1_mb_async_update, - .final = sha1_mb_async_final, - .finup = sha1_mb_async_finup, - .digest = sha1_mb_async_digest, - .export = sha1_mb_async_export, - .import = sha1_mb_async_import, - .halg = { - .digestsize = SHA1_DIGEST_SIZE, - .statesize = sizeof(struct sha1_hash_ctx), - .base = { - .cra_name = "sha1", - .cra_driver_name = "sha1_mb", - /* - * Low priority, since with few concurrent hash requests - * this is extremely slow due to the flush delay. Users - * whose workloads would benefit from this can request - * it explicitly by driver name, or can increase its - * priority at runtime using NETLINK_CRYPTO. - */ - .cra_priority = 50, - .cra_flags = CRYPTO_ALG_ASYNC, - .cra_blocksize = SHA1_BLOCK_SIZE, - .cra_module = THIS_MODULE, - .cra_list = LIST_HEAD_INIT(sha1_mb_async_alg.halg.base.cra_list), - .cra_init = sha1_mb_async_init_tfm, - .cra_exit = sha1_mb_async_exit_tfm, - .cra_ctxsize = sizeof(struct sha1_mb_ctx), - .cra_alignmask = 0, - }, - }, -}; - -static unsigned long sha1_mb_flusher(struct mcryptd_alg_cstate *cstate) -{ - struct mcryptd_hash_request_ctx *rctx; - unsigned long cur_time; - unsigned long next_flush = 0; - struct sha1_hash_ctx *sha_ctx; - - - cur_time = jiffies; - - while (!list_empty(&cstate->work_list)) { - rctx = list_entry(cstate->work_list.next, - struct mcryptd_hash_request_ctx, waiter); - if (time_before(cur_time, rctx->tag.expire)) - break; - kernel_fpu_begin(); - sha_ctx = (struct sha1_hash_ctx *) - sha1_ctx_mgr_flush(cstate->mgr); - kernel_fpu_end(); - if (!sha_ctx) { - pr_err("sha1_mb error: nothing got flushed for non-empty list\n"); - break; - } - rctx = cast_hash_to_mcryptd_ctx(sha_ctx); - sha_finish_walk(&rctx, cstate, true); - sha_complete_job(rctx, cstate, 0); - } - - if (!list_empty(&cstate->work_list)) { - rctx = list_entry(cstate->work_list.next, - struct mcryptd_hash_request_ctx, waiter); - /* get the hash context and then flush time */ - next_flush = rctx->tag.expire; - mcryptd_arm_flusher(cstate, get_delay(next_flush)); - } - return next_flush; -} - -static int __init sha1_mb_mod_init(void) -{ - - int cpu; - int err; - struct mcryptd_alg_cstate *cpu_state; - - /* check for dependent cpu features */ - if (!boot_cpu_has(X86_FEATURE_AVX2) || - !boot_cpu_has(X86_FEATURE_BMI2)) - return -ENODEV; - - /* initialize multibuffer structures */ - sha1_mb_alg_state.alg_cstate = alloc_percpu(struct mcryptd_alg_cstate); - - sha1_job_mgr_init = sha1_mb_mgr_init_avx2; - sha1_job_mgr_submit = sha1_mb_mgr_submit_avx2; - sha1_job_mgr_flush = sha1_mb_mgr_flush_avx2; - sha1_job_mgr_get_comp_job = sha1_mb_mgr_get_comp_job_avx2; - - if (!sha1_mb_alg_state.alg_cstate) - return -ENOMEM; - for_each_possible_cpu(cpu) { - cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu); - cpu_state->next_flush = 0; - cpu_state->next_seq_num = 0; - cpu_state->flusher_engaged = false; - INIT_DELAYED_WORK(&cpu_state->flush, mcryptd_flusher); - cpu_state->cpu = cpu; - cpu_state->alg_state = &sha1_mb_alg_state; - cpu_state->mgr = kzalloc(sizeof(struct sha1_ctx_mgr), - GFP_KERNEL); - if (!cpu_state->mgr) - goto err2; - sha1_ctx_mgr_init(cpu_state->mgr); - INIT_LIST_HEAD(&cpu_state->work_list); - spin_lock_init(&cpu_state->work_lock); - } - sha1_mb_alg_state.flusher = &sha1_mb_flusher; - - err = crypto_register_ahash(&sha1_mb_areq_alg); - if (err) - goto err2; - err = crypto_register_ahash(&sha1_mb_async_alg); - if (err) - goto err1; - - - return 0; -err1: - crypto_unregister_ahash(&sha1_mb_areq_alg); -err2: - for_each_possible_cpu(cpu) { - cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu); - kfree(cpu_state->mgr); - } - free_percpu(sha1_mb_alg_state.alg_cstate); - return -ENODEV; -} - -static void __exit sha1_mb_mod_fini(void) -{ - int cpu; - struct mcryptd_alg_cstate *cpu_state; - - crypto_unregister_ahash(&sha1_mb_async_alg); - crypto_unregister_ahash(&sha1_mb_areq_alg); - for_each_possible_cpu(cpu) { - cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu); - kfree(cpu_state->mgr); - } - free_percpu(sha1_mb_alg_state.alg_cstate); -} - -module_init(sha1_mb_mod_init); -module_exit(sha1_mb_mod_fini); - -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, multi buffer accelerated"); - -MODULE_ALIAS_CRYPTO("sha1"); diff --git a/arch/x86/crypto/sha1-mb/sha1_mb_ctx.h b/arch/x86/crypto/sha1-mb/sha1_mb_ctx.h deleted file mode 100644 index 9454bd16f9f8..000000000000 --- a/arch/x86/crypto/sha1-mb/sha1_mb_ctx.h +++ /dev/null @@ -1,134 +0,0 @@ -/* - * Header file for multi buffer SHA context - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * Contact Information: - * Tim Chen <tim.c.chen@linux.intel.com> - * - * BSD LICENSE - * - * Copyright(c) 2014 Intel Corporation. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef _SHA_MB_CTX_INTERNAL_H -#define _SHA_MB_CTX_INTERNAL_H - -#include "sha1_mb_mgr.h" - -#define HASH_UPDATE 0x00 -#define HASH_LAST 0x01 -#define HASH_DONE 0x02 -#define HASH_FINAL 0x04 - -#define HASH_CTX_STS_IDLE 0x00 -#define HASH_CTX_STS_PROCESSING 0x01 -#define HASH_CTX_STS_LAST 0x02 -#define HASH_CTX_STS_COMPLETE 0x04 - -enum hash_ctx_error { - HASH_CTX_ERROR_NONE = 0, - HASH_CTX_ERROR_INVALID_FLAGS = -1, - HASH_CTX_ERROR_ALREADY_PROCESSING = -2, - HASH_CTX_ERROR_ALREADY_COMPLETED = -3, - -#ifdef HASH_CTX_DEBUG - HASH_CTX_ERROR_DEBUG_DIGEST_MISMATCH = -4, -#endif -}; - - -#define hash_ctx_user_data(ctx) ((ctx)->user_data) -#define hash_ctx_digest(ctx) ((ctx)->job.result_digest) -#define hash_ctx_processing(ctx) ((ctx)->status & HASH_CTX_STS_PROCESSING) -#define hash_ctx_complete(ctx) ((ctx)->status == HASH_CTX_STS_COMPLETE) -#define hash_ctx_status(ctx) ((ctx)->status) -#define hash_ctx_error(ctx) ((ctx)->error) -#define hash_ctx_init(ctx) \ - do { \ - (ctx)->error = HASH_CTX_ERROR_NONE; \ - (ctx)->status = HASH_CTX_STS_COMPLETE; \ - } while (0) - - -/* Hash Constants and Typedefs */ -#define SHA1_DIGEST_LENGTH 5 -#define SHA1_LOG2_BLOCK_SIZE 6 - -#define SHA1_PADLENGTHFIELD_SIZE 8 - -#ifdef SHA_MB_DEBUG -#define assert(expr) \ -do { \ - if (unlikely(!(expr))) { \ - printk(KERN_ERR "Assertion failed! %s,%s,%s,line=%d\n", \ - #expr, __FILE__, __func__, __LINE__); \ - } \ -} while (0) -#else -#define assert(expr) do {} while (0) -#endif - -struct sha1_ctx_mgr { - struct sha1_mb_mgr mgr; -}; - -/* typedef struct sha1_ctx_mgr sha1_ctx_mgr; */ - -struct sha1_hash_ctx { - /* Must be at struct offset 0 */ - struct job_sha1 job; - /* status flag */ - int status; - /* error flag */ - int error; - - uint64_t total_length; - const void *incoming_buffer; - uint32_t incoming_buffer_length; - uint8_t partial_block_buffer[SHA1_BLOCK_SIZE * 2]; - uint32_t partial_block_buffer_length; - void *user_data; -}; - -#endif diff --git a/arch/x86/crypto/sha1-mb/sha1_mb_mgr.h b/arch/x86/crypto/sha1-mb/sha1_mb_mgr.h deleted file mode 100644 index 08ad1a9acfd7..000000000000 --- a/arch/x86/crypto/sha1-mb/sha1_mb_mgr.h +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Header file for multi buffer SHA1 algorithm manager - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * Contact Information: - * James Guilford <james.guilford@intel.com> - * Tim Chen <tim.c.chen@linux.intel.com> - * - * BSD LICENSE - * - * Copyright(c) 2014 Intel Corporation. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -#ifndef __SHA_MB_MGR_H -#define __SHA_MB_MGR_H - - -#include <linux/types.h> - -#define NUM_SHA1_DIGEST_WORDS 5 - -enum job_sts { STS_UNKNOWN = 0, - STS_BEING_PROCESSED = 1, - STS_COMPLETED = 2, - STS_INTERNAL_ERROR = 3, - STS_ERROR = 4 -}; - -struct job_sha1 { - u8 *buffer; - u32 len; - u32 result_digest[NUM_SHA1_DIGEST_WORDS] __aligned(32); - enum job_sts status; - void *user_data; -}; - -/* SHA1 out-of-order scheduler */ - -/* typedef uint32_t sha1_digest_array[5][8]; */ - -struct sha1_args_x8 { - uint32_t digest[5][8]; - uint8_t *data_ptr[8]; -}; - -struct sha1_lane_data { - struct job_sha1 *job_in_lane; -}; - -struct sha1_mb_mgr { - struct sha1_args_x8 args; - - uint32_t lens[8]; - - /* each byte is index (0...7) of unused lanes */ - uint64_t unused_lanes; - /* byte 4 is set to FF as a flag */ - struct sha1_lane_data ldata[8]; -}; - - -#define SHA1_MB_MGR_NUM_LANES_AVX2 8 - -void sha1_mb_mgr_init_avx2(struct sha1_mb_mgr *state); -struct job_sha1 *sha1_mb_mgr_submit_avx2(struct sha1_mb_mgr *state, - struct job_sha1 *job); -struct job_sha1 *sha1_mb_mgr_flush_avx2(struct sha1_mb_mgr *state); -struct job_sha1 *sha1_mb_mgr_get_comp_job_avx2(struct sha1_mb_mgr *state); - -#endif diff --git a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_datastruct.S b/arch/x86/crypto/sha1-mb/sha1_mb_mgr_datastruct.S deleted file mode 100644 index 86688c6e7a25..000000000000 --- a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_datastruct.S +++ /dev/null @@ -1,287 +0,0 @@ -/* - * Header file for multi buffer SHA1 algorithm data structure - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * Contact Information: - * James Guilford <james.guilford@intel.com> - * Tim Chen <tim.c.chen@linux.intel.com> - * - * BSD LICENSE - * - * Copyright(c) 2014 Intel Corporation. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -# Macros for defining data structures - -# Usage example - -#START_FIELDS # JOB_AES -### name size align -#FIELD _plaintext, 8, 8 # pointer to plaintext -#FIELD _ciphertext, 8, 8 # pointer to ciphertext -#FIELD _IV, 16, 8 # IV -#FIELD _keys, 8, 8 # pointer to keys -#FIELD _len, 4, 4 # length in bytes -#FIELD _status, 4, 4 # status enumeration -#FIELD _user_data, 8, 8 # pointer to user data -#UNION _union, size1, align1, \ -# size2, align2, \ -# size3, align3, \ -# ... -#END_FIELDS -#%assign _JOB_AES_size _FIELD_OFFSET -#%assign _JOB_AES_align _STRUCT_ALIGN - -######################################################################### - -# Alternate "struc-like" syntax: -# STRUCT job_aes2 -# RES_Q .plaintext, 1 -# RES_Q .ciphertext, 1 -# RES_DQ .IV, 1 -# RES_B .nested, _JOB_AES_SIZE, _JOB_AES_ALIGN -# RES_U .union, size1, align1, \ -# size2, align2, \ -# ... -# ENDSTRUCT -# # Following only needed if nesting -# %assign job_aes2_size _FIELD_OFFSET -# %assign job_aes2_align _STRUCT_ALIGN -# -# RES_* macros take a name, a count and an optional alignment. -# The count in in terms of the base size of the macro, and the -# default alignment is the base size. -# The macros are: -# Macro Base size -# RES_B 1 -# RES_W 2 -# RES_D 4 -# RES_Q 8 -# RES_DQ 16 -# RES_Y 32 -# RES_Z 64 -# -# RES_U defines a union. It's arguments are a name and two or more -# pairs of "size, alignment" -# -# The two assigns are only needed if this structure is being nested -# within another. Even if the assigns are not done, one can still use -# STRUCT_NAME_size as the size of the structure. -# -# Note that for nesting, you still need to assign to STRUCT_NAME_size. -# -# The differences between this and using "struc" directly are that each -# type is implicitly aligned to its natural length (although this can be -# over-ridden with an explicit third parameter), and that the structure -# is padded at the end to its overall alignment. -# - -######################################################################### - -#ifndef _SHA1_MB_MGR_DATASTRUCT_ASM_ -#define _SHA1_MB_MGR_DATASTRUCT_ASM_ - -## START_FIELDS -.macro START_FIELDS - _FIELD_OFFSET = 0 - _STRUCT_ALIGN = 0 -.endm - -## FIELD name size align -.macro FIELD name size align - _FIELD_OFFSET = (_FIELD_OFFSET + (\align) - 1) & (~ ((\align)-1)) - \name = _FIELD_OFFSET - _FIELD_OFFSET = _FIELD_OFFSET + (\size) -.if (\align > _STRUCT_ALIGN) - _STRUCT_ALIGN = \align -.endif -.endm - -## END_FIELDS -.macro END_FIELDS - _FIELD_OFFSET = (_FIELD_OFFSET + _STRUCT_ALIGN-1) & (~ (_STRUCT_ALIGN-1)) -.endm - -######################################################################## - -.macro STRUCT p1 -START_FIELDS -.struc \p1 -.endm - -.macro ENDSTRUCT - tmp = _FIELD_OFFSET - END_FIELDS - tmp = (_FIELD_OFFSET - %%tmp) -.if (tmp > 0) - .lcomm tmp -.endif -.endstruc -.endm - -## RES_int name size align -.macro RES_int p1 p2 p3 - name = \p1 - size = \p2 - align = .\p3 - - _FIELD_OFFSET = (_FIELD_OFFSET + (align) - 1) & (~ ((align)-1)) -.align align -.lcomm name size - _FIELD_OFFSET = _FIELD_OFFSET + (size) -.if (align > _STRUCT_ALIGN) - _STRUCT_ALIGN = align -.endif -.endm - - - -# macro RES_B name, size [, align] -.macro RES_B _name, _size, _align=1 -RES_int _name _size _align -.endm - -# macro RES_W name, size [, align] -.macro RES_W _name, _size, _align=2 -RES_int _name 2*(_size) _align -.endm - -# macro RES_D name, size [, align] -.macro RES_D _name, _size, _align=4 -RES_int _name 4*(_size) _align -.endm - -# macro RES_Q name, size [, align] -.macro RES_Q _name, _size, _align=8 -RES_int _name 8*(_size) _align -.endm - -# macro RES_DQ name, size [, align] -.macro RES_DQ _name, _size, _align=16 -RES_int _name 16*(_size) _align -.endm - -# macro RES_Y name, size [, align] -.macro RES_Y _name, _size, _align=32 -RES_int _name 32*(_size) _align -.endm - -# macro RES_Z name, size [, align] -.macro RES_Z _name, _size, _align=64 -RES_int _name 64*(_size) _align -.endm - - -#endif - -######################################################################## -#### Define constants -######################################################################## - -######################################################################## -#### Define SHA1 Out Of Order Data Structures -######################################################################## - -START_FIELDS # LANE_DATA -### name size align -FIELD _job_in_lane, 8, 8 # pointer to job object -END_FIELDS - -_LANE_DATA_size = _FIELD_OFFSET -_LANE_DATA_align = _STRUCT_ALIGN - -######################################################################## - -START_FIELDS # SHA1_ARGS_X8 -### name size align -FIELD _digest, 4*5*8, 16 # transposed digest -FIELD _data_ptr, 8*8, 8 # array of pointers to data -END_FIELDS - -_SHA1_ARGS_X4_size = _FIELD_OFFSET -_SHA1_ARGS_X4_align = _STRUCT_ALIGN -_SHA1_ARGS_X8_size = _FIELD_OFFSET -_SHA1_ARGS_X8_align = _STRUCT_ALIGN - -######################################################################## - -START_FIELDS # MB_MGR -### name size align -FIELD _args, _SHA1_ARGS_X4_size, _SHA1_ARGS_X4_align -FIELD _lens, 4*8, 8 -FIELD _unused_lanes, 8, 8 -FIELD _ldata, _LANE_DATA_size*8, _LANE_DATA_align -END_FIELDS - -_MB_MGR_size = _FIELD_OFFSET -_MB_MGR_align = _STRUCT_ALIGN - -_args_digest = _args + _digest -_args_data_ptr = _args + _data_ptr - - -######################################################################## -#### Define constants -######################################################################## - -#define STS_UNKNOWN 0 -#define STS_BEING_PROCESSED 1 -#define STS_COMPLETED 2 - -######################################################################## -#### Define JOB_SHA1 structure -######################################################################## - -START_FIELDS # JOB_SHA1 - -### name size align -FIELD _buffer, 8, 8 # pointer to buffer -FIELD _len, 4, 4 # length in bytes -FIELD _result_digest, 5*4, 32 # Digest (output) -FIELD _status, 4, 4 -FIELD _user_data, 8, 8 -END_FIELDS - -_JOB_SHA1_size = _FIELD_OFFSET -_JOB_SHA1_align = _STRUCT_ALIGN diff --git a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S b/arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S deleted file mode 100644 index 7cfba738f104..000000000000 --- a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S +++ /dev/null @@ -1,304 +0,0 @@ -/* - * Flush routine for SHA1 multibuffer - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * Contact Information: - * James Guilford <james.guilford@intel.com> - * Tim Chen <tim.c.chen@linux.intel.com> - * - * BSD LICENSE - * - * Copyright(c) 2014 Intel Corporation. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -#include <linux/linkage.h> -#include <asm/frame.h> -#include "sha1_mb_mgr_datastruct.S" - - -.extern sha1_x8_avx2 - -# LINUX register definitions -#define arg1 %rdi -#define arg2 %rsi - -# Common definitions -#define state arg1 -#define job arg2 -#define len2 arg2 - -# idx must be a register not clobbered by sha1_x8_avx2 -#define idx %r8 -#define DWORD_idx %r8d - -#define unused_lanes %rbx -#define lane_data %rbx -#define tmp2 %rbx -#define tmp2_w %ebx - -#define job_rax %rax -#define tmp1 %rax -#define size_offset %rax -#define tmp %rax -#define start_offset %rax - -#define tmp3 %arg1 - -#define extra_blocks %arg2 -#define p %arg2 - -.macro LABEL prefix n -\prefix\n\(): -.endm - -.macro JNE_SKIP i -jne skip_\i -.endm - -.altmacro -.macro SET_OFFSET _offset -offset = \_offset -.endm -.noaltmacro - -# JOB* sha1_mb_mgr_flush_avx2(MB_MGR *state) -# arg 1 : rcx : state -ENTRY(sha1_mb_mgr_flush_avx2) - FRAME_BEGIN - push %rbx - - # If bit (32+3) is set, then all lanes are empty - mov _unused_lanes(state), unused_lanes - bt $32+3, unused_lanes - jc return_null - - # find a lane with a non-null job - xor idx, idx - offset = (_ldata + 1 * _LANE_DATA_size + _job_in_lane) - cmpq $0, offset(state) - cmovne one(%rip), idx - offset = (_ldata + 2 * _LANE_DATA_size + _job_in_lane) - cmpq $0, offset(state) - cmovne two(%rip), idx - offset = (_ldata + 3 * _LANE_DATA_size + _job_in_lane) - cmpq $0, offset(state) - cmovne three(%rip), idx - offset = (_ldata + 4 * _LANE_DATA_size + _job_in_lane) - cmpq $0, offset(state) - cmovne four(%rip), idx - offset = (_ldata + 5 * _LANE_DATA_size + _job_in_lane) - cmpq $0, offset(state) - cmovne five(%rip), idx - offset = (_ldata + 6 * _LANE_DATA_size + _job_in_lane) - cmpq $0, offset(state) - cmovne six(%rip), idx - offset = (_ldata + 7 * _LANE_DATA_size + _job_in_lane) - cmpq $0, offset(state) - cmovne seven(%rip), idx - - # copy idx to empty lanes -copy_lane_data: - offset = (_args + _data_ptr) - mov offset(state,idx,8), tmp - - I = 0 -.rep 8 - offset = (_ldata + I * _LANE_DATA_size + _job_in_lane) - cmpq $0, offset(state) -.altmacro - JNE_SKIP %I - offset = (_args + _data_ptr + 8*I) - mov tmp, offset(state) - offset = (_lens + 4*I) - movl $0xFFFFFFFF, offset(state) -LABEL skip_ %I - I = (I+1) -.noaltmacro -.endr - - # Find min length - vmovdqu _lens+0*16(state), %xmm0 - vmovdqu _lens+1*16(state), %xmm1 - - vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A} - vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C} - vpminud %xmm3, %xmm2, %xmm2 # xmm2 has {x,x,E,F} - vpalignr $4, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,x,E} - vpminud %xmm3, %xmm2, %xmm2 # xmm2 has min value in low dword - - vmovd %xmm2, DWORD_idx - mov idx, len2 - and $0xF, idx - shr $4, len2 - jz len_is_0 - - vpand clear_low_nibble(%rip), %xmm2, %xmm2 - vpshufd $0, %xmm2, %xmm2 - - vpsubd %xmm2, %xmm0, %xmm0 - vpsubd %xmm2, %xmm1, %xmm1 - - vmovdqu %xmm0, _lens+0*16(state) - vmovdqu %xmm1, _lens+1*16(state) - - # "state" and "args" are the same address, arg1 - # len is arg2 - call sha1_x8_avx2 - # state and idx are intact - - -len_is_0: - # process completed job "idx" - imul $_LANE_DATA_size, idx, lane_data - lea _ldata(state, lane_data), lane_data - - mov _job_in_lane(lane_data), job_rax - movq $0, _job_in_lane(lane_data) - movl $STS_COMPLETED, _status(job_rax) - mov _unused_lanes(state), unused_lanes - shl $4, unused_lanes - or idx, unused_lanes - mov unused_lanes, _unused_lanes(state) - - movl $0xFFFFFFFF, _lens(state, idx, 4) - - vmovd _args_digest(state , idx, 4) , %xmm0 - vpinsrd $1, _args_digest+1*32(state, idx, 4), %xmm0, %xmm0 - vpinsrd $2, _args_digest+2*32(state, idx, 4), %xmm0, %xmm0 - vpinsrd $3, _args_digest+3*32(state, idx, 4), %xmm0, %xmm0 - movl _args_digest+4*32(state, idx, 4), tmp2_w - - vmovdqu %xmm0, _result_digest(job_rax) - offset = (_result_digest + 1*16) - mov tmp2_w, offset(job_rax) - -return: - pop %rbx - FRAME_END - ret - -return_null: - xor job_rax, job_rax - jmp return -ENDPROC(sha1_mb_mgr_flush_avx2) - - -################################################################# - -.align 16 -ENTRY(sha1_mb_mgr_get_comp_job_avx2) - push %rbx - - ## if bit 32+3 is set, then all lanes are empty - mov _unused_lanes(state), unused_lanes - bt $(32+3), unused_lanes - jc .return_null - - # Find min length - vmovdqu _lens(state), %xmm0 - vmovdqu _lens+1*16(state), %xmm1 - - vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A} - vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C} - vpminud %xmm3, %xmm2, %xmm2 # xmm2 has {x,x,E,F} - vpalignr $4, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,x,E} - vpminud %xmm3, %xmm2, %xmm2 # xmm2 has min value in low dword - - vmovd %xmm2, DWORD_idx - test $~0xF, idx - jnz .return_null - - # process completed job "idx" - imul $_LANE_DATA_size, idx, lane_data - lea _ldata(state, lane_data), lane_data - - mov _job_in_lane(lane_data), job_rax - movq $0, _job_in_lane(lane_data) - movl $STS_COMPLETED, _status(job_rax) - mov _unused_lanes(state), unused_lanes - shl $4, unused_lanes - or idx, unused_lanes - mov unused_lanes, _unused_lanes(state) - - movl $0xFFFFFFFF, _lens(state, idx, 4) - - vmovd _args_digest(state, idx, 4), %xmm0 - vpinsrd $1, _args_digest+1*32(state, idx, 4), %xmm0, %xmm0 - vpinsrd $2, _args_digest+2*32(state, idx, 4), %xmm0, %xmm0 - vpinsrd $3, _args_digest+3*32(state, idx, 4), %xmm0, %xmm0 - movl _args_digest+4*32(state, idx, 4), tmp2_w - - vmovdqu %xmm0, _result_digest(job_rax) - movl tmp2_w, _result_digest+1*16(job_rax) - - pop %rbx - - ret - -.return_null: - xor job_rax, job_rax - pop %rbx - ret -ENDPROC(sha1_mb_mgr_get_comp_job_avx2) - -.section .rodata.cst16.clear_low_nibble, "aM", @progbits, 16 -.align 16 -clear_low_nibble: -.octa 0x000000000000000000000000FFFFFFF0 - -.section .rodata.cst8, "aM", @progbits, 8 -.align 8 -one: -.quad 1 -two: -.quad 2 -three: -.quad 3 -four: -.quad 4 -five: -.quad 5 -six: -.quad 6 -seven: -.quad 7 diff --git a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_init_avx2.c b/arch/x86/crypto/sha1-mb/sha1_mb_mgr_init_avx2.c deleted file mode 100644 index d2add0d35f43..000000000000 --- a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_init_avx2.c +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Initialization code for multi buffer SHA1 algorithm for AVX2 - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * Contact Information: - * Tim Chen <tim.c.chen@linux.intel.com> - * - * BSD LICENSE - * - * Copyright(c) 2014 Intel Corporation. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "sha1_mb_mgr.h" - -void sha1_mb_mgr_init_avx2(struct sha1_mb_mgr *state) -{ - unsigned int j; - state->unused_lanes = 0xF76543210ULL; - for (j = 0; j < 8; j++) { - state->lens[j] = 0xFFFFFFFF; - state->ldata[j].job_in_lane = NULL; - } -} diff --git a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_submit_avx2.S b/arch/x86/crypto/sha1-mb/sha1_mb_mgr_submit_avx2.S deleted file mode 100644 index 7a93b1c0d69a..000000000000 --- a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_submit_avx2.S +++ /dev/null @@ -1,209 +0,0 @@ -/* - * Buffer submit code for multi buffer SHA1 algorithm - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * Contact Information: - * James Guilford <james.guilford@intel.com> - * Tim Chen <tim.c.chen@linux.intel.com> - * - * BSD LICENSE - * - * Copyright(c) 2014 Intel Corporation. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include <linux/linkage.h> -#include <asm/frame.h> -#include "sha1_mb_mgr_datastruct.S" - - -.extern sha1_x8_avx - -# LINUX register definitions -arg1 = %rdi -arg2 = %rsi -size_offset = %rcx -tmp2 = %rcx -extra_blocks = %rdx - -# Common definitions -#define state arg1 -#define job %rsi -#define len2 arg2 -#define p2 arg2 - -# idx must be a register not clobberred by sha1_x8_avx2 -idx = %r8 -DWORD_idx = %r8d -last_len = %r8 - -p = %r11 -start_offset = %r11 - -unused_lanes = %rbx -BYTE_unused_lanes = %bl - -job_rax = %rax -len = %rax -DWORD_len = %eax - -lane = %r12 -tmp3 = %r12 - -tmp = %r9 -DWORD_tmp = %r9d - -lane_data = %r10 - -# JOB* submit_mb_mgr_submit_avx2(MB_MGR *state, job_sha1 *job) -# arg 1 : rcx : state -# arg 2 : rdx : job -ENTRY(sha1_mb_mgr_submit_avx2) - FRAME_BEGIN - push %rbx - push %r12 - - mov _unused_lanes(state), unused_lanes - mov unused_lanes, lane - and $0xF, lane - shr $4, unused_lanes - imul $_LANE_DATA_size, lane, lane_data - movl $STS_BEING_PROCESSED, _status(job) - lea _ldata(state, lane_data), lane_data - mov unused_lanes, _unused_lanes(state) - movl _len(job), DWORD_len - - mov job, _job_in_lane(lane_data) - shl $4, len - or lane, len - - movl DWORD_len, _lens(state , lane, 4) - - # Load digest words from result_digest - vmovdqu _result_digest(job), %xmm0 - mov _result_digest+1*16(job), DWORD_tmp - vmovd %xmm0, _args_digest(state, lane, 4) - vpextrd $1, %xmm0, _args_digest+1*32(state , lane, 4) - vpextrd $2, %xmm0, _args_digest+2*32(state , lane, 4) - vpextrd $3, %xmm0, _args_digest+3*32(state , lane, 4) - movl DWORD_tmp, _args_digest+4*32(state , lane, 4) - - mov _buffer(job), p - mov p, _args_data_ptr(state, lane, 8) - - cmp $0xF, unused_lanes - jne return_null - -start_loop: - # Find min length - vmovdqa _lens(state), %xmm0 - vmovdqa _lens+1*16(state), %xmm1 - - vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A} - vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C} - vpminud %xmm3, %xmm2, %xmm2 # xmm2 has {x,x,E,F} - vpalignr $4, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,x,E} - vpminud %xmm3, %xmm2, %xmm2 # xmm2 has min value in low dword - - vmovd %xmm2, DWORD_idx - mov idx, len2 - and $0xF, idx - shr $4, len2 - jz len_is_0 - - vpand clear_low_nibble(%rip), %xmm2, %xmm2 - vpshufd $0, %xmm2, %xmm2 - - vpsubd %xmm2, %xmm0, %xmm0 - vpsubd %xmm2, %xmm1, %xmm1 - - vmovdqa %xmm0, _lens + 0*16(state) - vmovdqa %xmm1, _lens + 1*16(state) - - - # "state" and "args" are the same address, arg1 - # len is arg2 - call sha1_x8_avx2 - - # state and idx are intact - -len_is_0: - # process completed job "idx" - imul $_LANE_DATA_size, idx, lane_data - lea _ldata(state, lane_data), lane_data - - mov _job_in_lane(lane_data), job_rax - mov _unused_lanes(state), unused_lanes - movq $0, _job_in_lane(lane_data) - movl $STS_COMPLETED, _status(job_rax) - shl $4, unused_lanes - or idx, unused_lanes - mov unused_lanes, _unused_lanes(state) - - movl $0xFFFFFFFF, _lens(state, idx, 4) - - vmovd _args_digest(state, idx, 4), %xmm0 - vpinsrd $1, _args_digest+1*32(state , idx, 4), %xmm0, %xmm0 - vpinsrd $2, _args_digest+2*32(state , idx, 4), %xmm0, %xmm0 - vpinsrd $3, _args_digest+3*32(state , idx, 4), %xmm0, %xmm0 - movl _args_digest+4*32(state, idx, 4), DWORD_tmp - - vmovdqu %xmm0, _result_digest(job_rax) - movl DWORD_tmp, _result_digest+1*16(job_rax) - -return: - pop %r12 - pop %rbx - FRAME_END - ret - -return_null: - xor job_rax, job_rax - jmp return - -ENDPROC(sha1_mb_mgr_submit_avx2) - -.section .rodata.cst16.clear_low_nibble, "aM", @progbits, 16 -.align 16 -clear_low_nibble: - .octa 0x000000000000000000000000FFFFFFF0 diff --git a/arch/x86/crypto/sha1-mb/sha1_x8_avx2.S b/arch/x86/crypto/sha1-mb/sha1_x8_avx2.S deleted file mode 100644 index 20f77aa633de..000000000000 --- a/arch/x86/crypto/sha1-mb/sha1_x8_avx2.S +++ /dev/null @@ -1,492 +0,0 @@ -/* - * Multi-buffer SHA1 algorithm hash compute routine - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * Contact Information: - * James Guilford <james.guilford@intel.com> - * Tim Chen <tim.c.chen@linux.intel.com> - * - * BSD LICENSE - * - * Copyright(c) 2014 Intel Corporation. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include <linux/linkage.h> -#include "sha1_mb_mgr_datastruct.S" - -## code to compute oct SHA1 using SSE-256 -## outer calling routine takes care of save and restore of XMM registers - -## Function clobbers: rax, rcx, rdx, rbx, rsi, rdi, r9-r15# ymm0-15 -## -## Linux clobbers: rax rbx rcx rdx rsi r9 r10 r11 r12 r13 r14 r15 -## Linux preserves: rdi rbp r8 -## -## clobbers ymm0-15 - - -# TRANSPOSE8 r0, r1, r2, r3, r4, r5, r6, r7, t0, t1 -# "transpose" data in {r0...r7} using temps {t0...t1} -# Input looks like: {r0 r1 r2 r3 r4 r5 r6 r7} -# r0 = {a7 a6 a5 a4 a3 a2 a1 a0} -# r1 = {b7 b6 b5 b4 b3 b2 b1 b0} -# r2 = {c7 c6 c5 c4 c3 c2 c1 c0} -# r3 = {d7 d6 d5 d4 d3 d2 d1 d0} -# r4 = {e7 e6 e5 e4 e3 e2 e1 e0} -# r5 = {f7 f6 f5 f4 f3 f2 f1 f0} -# r6 = {g7 g6 g5 g4 g3 g2 g1 g0} -# r7 = {h7 h6 h5 h4 h3 h2 h1 h0} -# -# Output looks like: {r0 r1 r2 r3 r4 r5 r6 r7} -# r0 = {h0 g0 f0 e0 d0 c0 b0 a0} -# r1 = {h1 g1 f1 e1 d1 c1 b1 a1} -# r2 = {h2 g2 f2 e2 d2 c2 b2 a2} -# r3 = {h3 g3 f3 e3 d3 c3 b3 a3} -# r4 = {h4 g4 f4 e4 d4 c4 b4 a4} -# r5 = {h5 g5 f5 e5 d5 c5 b5 a5} -# r6 = {h6 g6 f6 e6 d6 c6 b6 a6} -# r7 = {h7 g7 f7 e7 d7 c7 b7 a7} -# - -.macro TRANSPOSE8 r0 r1 r2 r3 r4 r5 r6 r7 t0 t1 - # process top half (r0..r3) {a...d} - vshufps $0x44, \r1, \r0, \t0 # t0 = {b5 b4 a5 a4 b1 b0 a1 a0} - vshufps $0xEE, \r1, \r0, \r0 # r0 = {b7 b6 a7 a6 b3 b2 a3 a2} - vshufps $0x44, \r3, \r2, \t1 # t1 = {d5 d4 c5 c4 d1 d0 c1 c0} - vshufps $0xEE, \r3, \r2, \r2 # r2 = {d7 d6 c7 c6 d3 d2 c3 c2} - vshufps $0xDD, \t1, \t0, \r3 # r3 = {d5 c5 b5 a5 d1 c1 b1 a1} - vshufps $0x88, \r2, \r0, \r1 # r1 = {d6 c6 b6 a6 d2 c2 b2 a2} - vshufps $0xDD, \r2, \r0, \r0 # r0 = {d7 c7 b7 a7 d3 c3 b3 a3} - vshufps $0x88, \t1, \t0, \t0 # t0 = {d4 c4 b4 a4 d0 c0 b0 a0} - - # use r2 in place of t0 - # process bottom half (r4..r7) {e...h} - vshufps $0x44, \r5, \r4, \r2 # r2 = {f5 f4 e5 e4 f1 f0 e1 e0} - vshufps $0xEE, \r5, \r4, \r4 # r4 = {f7 f6 e7 e6 f3 f2 e3 e2} - vshufps $0x44, \r7, \r6, \t1 # t1 = {h5 h4 g5 g4 h1 h0 g1 g0} - vshufps $0xEE, \r7, \r6, \r6 # r6 = {h7 h6 g7 g6 h3 h2 g3 g2} - vshufps $0xDD, \t1, \r2, \r7 # r7 = {h5 g5 f5 e5 h1 g1 f1 e1} - vshufps $0x88, \r6, \r4, \r5 # r5 = {h6 g6 f6 e6 h2 g2 f2 e2} - vshufps $0xDD, \r6, \r4, \r4 # r4 = {h7 g7 f7 e7 h3 g3 f3 e3} - vshufps $0x88, \t1, \r2, \t1 # t1 = {h4 g4 f4 e4 h0 g0 f0 e0} - - vperm2f128 $0x13, \r1, \r5, \r6 # h6...a6 - vperm2f128 $0x02, \r1, \r5, \r2 # h2...a2 - vperm2f128 $0x13, \r3, \r7, \r5 # h5...a5 - vperm2f128 $0x02, \r3, \r7, \r1 # h1...a1 - vperm2f128 $0x13, \r0, \r4, \r7 # h7...a7 - vperm2f128 $0x02, \r0, \r4, \r3 # h3...a3 - vperm2f128 $0x13, \t0, \t1, \r4 # h4...a4 - vperm2f128 $0x02, \t0, \t1, \r0 # h0...a0 - -.endm -## -## Magic functions defined in FIPS 180-1 -## -# macro MAGIC_F0 F,B,C,D,T ## F = (D ^ (B & (C ^ D))) -.macro MAGIC_F0 regF regB regC regD regT - vpxor \regD, \regC, \regF - vpand \regB, \regF, \regF - vpxor \regD, \regF, \regF -.endm - -# macro MAGIC_F1 F,B,C,D,T ## F = (B ^ C ^ D) -.macro MAGIC_F1 regF regB regC regD regT - vpxor \regC, \regD, \regF - vpxor \regB, \regF, \regF -.endm - -# macro MAGIC_F2 F,B,C,D,T ## F = ((B & C) | (B & D) | (C & D)) -.macro MAGIC_F2 regF regB regC regD regT - vpor \regC, \regB, \regF - vpand \regC, \regB, \regT - vpand \regD, \regF, \regF - vpor \regT, \regF, \regF -.endm - -# macro MAGIC_F3 F,B,C,D,T ## F = (B ^ C ^ D) -.macro MAGIC_F3 regF regB regC regD regT - MAGIC_F1 \regF,\regB,\regC,\regD,\regT -.endm - -# PROLD reg, imm, tmp -.macro PROLD reg imm tmp - vpsrld $(32-\imm), \reg, \tmp - vpslld $\imm, \reg, \reg - vpor \tmp, \reg, \reg -.endm - -.macro PROLD_nd reg imm tmp src - vpsrld $(32-\imm), \src, \tmp - vpslld $\imm, \src, \reg - vpor \tmp, \reg, \reg -.endm - -.macro SHA1_STEP_00_15 regA regB regC regD regE regT regF memW immCNT MAGIC - vpaddd \immCNT, \regE, \regE - vpaddd \memW*32(%rsp), \regE, \regE - PROLD_nd \regT, 5, \regF, \regA - vpaddd \regT, \regE, \regE - \MAGIC \regF, \regB, \regC, \regD, \regT - PROLD \regB, 30, \regT - vpaddd \regF, \regE, \regE -.endm - -.macro SHA1_STEP_16_79 regA regB regC regD regE regT regF memW immCNT MAGIC - vpaddd \immCNT, \regE, \regE - offset = ((\memW - 14) & 15) * 32 - vmovdqu offset(%rsp), W14 - vpxor W14, W16, W16 - offset = ((\memW - 8) & 15) * 32 - vpxor offset(%rsp), W16, W16 - offset = ((\memW - 3) & 15) * 32 - vpxor offset(%rsp), W16, W16 - vpsrld $(32-1), W16, \regF - vpslld $1, W16, W16 - vpor W16, \regF, \regF - - ROTATE_W - - offset = ((\memW - 0) & 15) * 32 - vmovdqu \regF, offset(%rsp) - vpaddd \regF, \regE, \regE - PROLD_nd \regT, 5, \regF, \regA - vpaddd \regT, \regE, \regE - \MAGIC \regF,\regB,\regC,\regD,\regT ## FUN = MAGIC_Fi(B,C,D) - PROLD \regB,30, \regT - vpaddd \regF, \regE, \regE -.endm - -######################################################################## -######################################################################## -######################################################################## - -## FRAMESZ plus pushes must be an odd multiple of 8 -YMM_SAVE = (15-15)*32 -FRAMESZ = 32*16 + YMM_SAVE -_YMM = FRAMESZ - YMM_SAVE - -#define VMOVPS vmovups - -IDX = %rax -inp0 = %r9 -inp1 = %r10 -inp2 = %r11 -inp3 = %r12 -inp4 = %r13 -inp5 = %r14 -inp6 = %r15 -inp7 = %rcx -arg1 = %rdi -arg2 = %rsi -RSP_SAVE = %rdx - -# ymm0 A -# ymm1 B -# ymm2 C -# ymm3 D -# ymm4 E -# ymm5 F AA -# ymm6 T0 BB -# ymm7 T1 CC -# ymm8 T2 DD -# ymm9 T3 EE -# ymm10 T4 TMP -# ymm11 T5 FUN -# ymm12 T6 K -# ymm13 T7 W14 -# ymm14 T8 W15 -# ymm15 T9 W16 - - -A = %ymm0 -B = %ymm1 -C = %ymm2 -D = %ymm3 -E = %ymm4 -F = %ymm5 -T0 = %ymm6 -T1 = %ymm7 -T2 = %ymm8 -T3 = %ymm9 -T4 = %ymm10 -T5 = %ymm11 -T6 = %ymm12 -T7 = %ymm13 -T8 = %ymm14 -T9 = %ymm15 - -AA = %ymm5 -BB = %ymm6 -CC = %ymm7 -DD = %ymm8 -EE = %ymm9 -TMP = %ymm10 -FUN = %ymm11 -K = %ymm12 -W14 = %ymm13 -W15 = %ymm14 -W16 = %ymm15 - -.macro ROTATE_ARGS - TMP_ = E - E = D - D = C - C = B - B = A - A = TMP_ -.endm - -.macro ROTATE_W -TMP_ = W16 -W16 = W15 -W15 = W14 -W14 = TMP_ -.endm - -# 8 streams x 5 32bit words per digest x 4 bytes per word -#define DIGEST_SIZE (8*5*4) - -.align 32 - -# void sha1_x8_avx2(void **input_data, UINT128 *digest, UINT32 size) -# arg 1 : pointer to array[4] of pointer to input data -# arg 2 : size (in blocks) ;; assumed to be >= 1 -# -ENTRY(sha1_x8_avx2) - - # save callee-saved clobbered registers to comply with C function ABI - push %r12 - push %r13 - push %r14 - push %r15 - - #save rsp - mov %rsp, RSP_SAVE - sub $FRAMESZ, %rsp - - #align rsp to 32 Bytes - and $~0x1F, %rsp - - ## Initialize digests - vmovdqu 0*32(arg1), A - vmovdqu 1*32(arg1), B - vmovdqu 2*32(arg1), C - vmovdqu 3*32(arg1), D - vmovdqu 4*32(arg1), E - - ## transpose input onto stack - mov _data_ptr+0*8(arg1),inp0 - mov _data_ptr+1*8(arg1),inp1 - mov _data_ptr+2*8(arg1),inp2 - mov _data_ptr+3*8(arg1),inp3 - mov _data_ptr+4*8(arg1),inp4 - mov _data_ptr+5*8(arg1),inp5 - mov _data_ptr+6*8(arg1),inp6 - mov _data_ptr+7*8(arg1),inp7 - - xor IDX, IDX -lloop: - vmovdqu PSHUFFLE_BYTE_FLIP_MASK(%rip), F - I=0 -.rep 2 - VMOVPS (inp0, IDX), T0 - VMOVPS (inp1, IDX), T1 - VMOVPS (inp2, IDX), T2 - VMOVPS (inp3, IDX), T3 - VMOVPS (inp4, IDX), T4 - VMOVPS (inp5, IDX), T5 - VMOVPS (inp6, IDX), T6 - VMOVPS (inp7, IDX), T7 - - TRANSPOSE8 T0, T1, T2, T3, T4, T5, T6, T7, T8, T9 - vpshufb F, T0, T0 - vmovdqu T0, (I*8)*32(%rsp) - vpshufb F, T1, T1 - vmovdqu T1, (I*8+1)*32(%rsp) - vpshufb F, T2, T2 - vmovdqu T2, (I*8+2)*32(%rsp) - vpshufb F, T3, T3 - vmovdqu T3, (I*8+3)*32(%rsp) - vpshufb F, T4, T4 - vmovdqu T4, (I*8+4)*32(%rsp) - vpshufb F, T5, T5 - vmovdqu T5, (I*8+5)*32(%rsp) - vpshufb F, T6, T6 - vmovdqu T6, (I*8+6)*32(%rsp) - vpshufb F, T7, T7 - vmovdqu T7, (I*8+7)*32(%rsp) - add $32, IDX - I = (I+1) -.endr - # save old digests - vmovdqu A,AA - vmovdqu B,BB - vmovdqu C,CC - vmovdqu D,DD - vmovdqu E,EE - -## -## perform 0-79 steps -## - vmovdqu K00_19(%rip), K -## do rounds 0...15 - I = 0 -.rep 16 - SHA1_STEP_00_15 A,B,C,D,E, TMP,FUN, I, K, MAGIC_F0 - ROTATE_ARGS - I = (I+1) -.endr - -## do rounds 16...19 - vmovdqu ((16 - 16) & 15) * 32 (%rsp), W16 - vmovdqu ((16 - 15) & 15) * 32 (%rsp), W15 -.rep 4 - SHA1_STEP_16_79 A,B,C,D,E, TMP,FUN, I, K, MAGIC_F0 - ROTATE_ARGS - I = (I+1) -.endr - -## do rounds 20...39 - vmovdqu K20_39(%rip), K -.rep 20 - SHA1_STEP_16_79 A,B,C,D,E, TMP,FUN, I, K, MAGIC_F1 - ROTATE_ARGS - I = (I+1) -.endr - -## do rounds 40...59 - vmovdqu K40_59(%rip), K -.rep 20 - SHA1_STEP_16_79 A,B,C,D,E, TMP,FUN, I, K, MAGIC_F2 - ROTATE_ARGS - I = (I+1) -.endr - -## do rounds 60...79 - vmovdqu K60_79(%rip), K -.rep 20 - SHA1_STEP_16_79 A,B,C,D,E, TMP,FUN, I, K, MAGIC_F3 - ROTATE_ARGS - I = (I+1) -.endr - - vpaddd AA,A,A - vpaddd BB,B,B - vpaddd CC,C,C - vpaddd DD,D,D - vpaddd EE,E,E - - sub $1, arg2 - jne lloop - - # write out digests - vmovdqu A, 0*32(arg1) - vmovdqu B, 1*32(arg1) - vmovdqu C, 2*32(arg1) - vmovdqu D, 3*32(arg1) - vmovdqu E, 4*32(arg1) - - # update input pointers - add IDX, inp0 - add IDX, inp1 - add IDX, inp2 - add IDX, inp3 - add IDX, inp4 - add IDX, inp5 - add IDX, inp6 - add IDX, inp7 - mov inp0, _data_ptr (arg1) - mov inp1, _data_ptr + 1*8(arg1) - mov inp2, _data_ptr + 2*8(arg1) - mov inp3, _data_ptr + 3*8(arg1) - mov inp4, _data_ptr + 4*8(arg1) - mov inp5, _data_ptr + 5*8(arg1) - mov inp6, _data_ptr + 6*8(arg1) - mov inp7, _data_ptr + 7*8(arg1) - - ################ - ## Postamble - - mov RSP_SAVE, %rsp - - # restore callee-saved clobbered registers - pop %r15 - pop %r14 - pop %r13 - pop %r12 - - ret -ENDPROC(sha1_x8_avx2) - - -.section .rodata.cst32.K00_19, "aM", @progbits, 32 -.align 32 -K00_19: -.octa 0x5A8279995A8279995A8279995A827999 -.octa 0x5A8279995A8279995A8279995A827999 - -.section .rodata.cst32.K20_39, "aM", @progbits, 32 -.align 32 -K20_39: -.octa 0x6ED9EBA16ED9EBA16ED9EBA16ED9EBA1 -.octa 0x6ED9EBA16ED9EBA16ED9EBA16ED9EBA1 - -.section .rodata.cst32.K40_59, "aM", @progbits, 32 -.align 32 -K40_59: -.octa 0x8F1BBCDC8F1BBCDC8F1BBCDC8F1BBCDC -.octa 0x8F1BBCDC8F1BBCDC8F1BBCDC8F1BBCDC - -.section .rodata.cst32.K60_79, "aM", @progbits, 32 -.align 32 -K60_79: -.octa 0xCA62C1D6CA62C1D6CA62C1D6CA62C1D6 -.octa 0xCA62C1D6CA62C1D6CA62C1D6CA62C1D6 - -.section .rodata.cst32.PSHUFFLE_BYTE_FLIP_MASK, "aM", @progbits, 32 -.align 32 -PSHUFFLE_BYTE_FLIP_MASK: -.octa 0x0c0d0e0f08090a0b0405060700010203 -.octa 0x0c0d0e0f08090a0b0405060700010203 diff --git a/arch/x86/crypto/sha256-mb/Makefile b/arch/x86/crypto/sha256-mb/Makefile deleted file mode 100644 index 53ad6e7db747..000000000000 --- a/arch/x86/crypto/sha256-mb/Makefile +++ /dev/null @@ -1,14 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -# -# Arch-specific CryptoAPI modules. -# - -OBJECT_FILES_NON_STANDARD := y - -avx2_supported := $(call as-instr,vpgatherdd %ymm0$(comma)(%eax$(comma)%ymm1\ - $(comma)4)$(comma)%ymm2,yes,no) -ifeq ($(avx2_supported),yes) - obj-$(CONFIG_CRYPTO_SHA256_MB) += sha256-mb.o - sha256-mb-y := sha256_mb.o sha256_mb_mgr_flush_avx2.o \ - sha256_mb_mgr_init_avx2.o sha256_mb_mgr_submit_avx2.o sha256_x8_avx2.o -endif diff --git a/arch/x86/crypto/sha256-mb/sha256_mb.c b/arch/x86/crypto/sha256-mb/sha256_mb.c deleted file mode 100644 index 97c5fc43e115..000000000000 --- a/arch/x86/crypto/sha256-mb/sha256_mb.c +++ /dev/null @@ -1,1013 +0,0 @@ -/* - * Multi buffer SHA256 algorithm Glue Code - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2016 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * Contact Information: - * Megha Dey <megha.dey@linux.intel.com> - * - * BSD LICENSE - * - * Copyright(c) 2016 Intel Corporation. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <crypto/internal/hash.h> -#include <linux/init.h> -#include <linux/module.h> -#include <linux/mm.h> -#include <linux/cryptohash.h> -#include <linux/types.h> -#include <linux/list.h> -#include <crypto/scatterwalk.h> -#include <crypto/sha.h> -#include <crypto/mcryptd.h> -#include <crypto/crypto_wq.h> -#include <asm/byteorder.h> -#include <linux/hardirq.h> -#include <asm/fpu/api.h> -#include "sha256_mb_ctx.h" - -#define FLUSH_INTERVAL 1000 /* in usec */ - -static struct mcryptd_alg_state sha256_mb_alg_state; - -struct sha256_mb_ctx { - struct mcryptd_ahash *mcryptd_tfm; -}; - -static inline struct mcryptd_hash_request_ctx - *cast_hash_to_mcryptd_ctx(struct sha256_hash_ctx *hash_ctx) -{ - struct ahash_request *areq; - - areq = container_of((void *) hash_ctx, struct ahash_request, __ctx); - return container_of(areq, struct mcryptd_hash_request_ctx, areq); -} - -static inline struct ahash_request - *cast_mcryptd_ctx_to_req(struct mcryptd_hash_request_ctx *ctx) -{ - return container_of((void *) ctx, struct ahash_request, __ctx); -} - -static void req_ctx_init(struct mcryptd_hash_request_ctx *rctx, - struct ahash_request *areq) -{ - rctx->flag = HASH_UPDATE; -} - -static asmlinkage void (*sha256_job_mgr_init)(struct sha256_mb_mgr *state); -static asmlinkage struct job_sha256* (*sha256_job_mgr_submit) - (struct sha256_mb_mgr *state, struct job_sha256 *job); -static asmlinkage struct job_sha256* (*sha256_job_mgr_flush) - (struct sha256_mb_mgr *state); -static asmlinkage struct job_sha256* (*sha256_job_mgr_get_comp_job) - (struct sha256_mb_mgr *state); - -inline uint32_t sha256_pad(uint8_t padblock[SHA256_BLOCK_SIZE * 2], - uint64_t total_len) -{ - uint32_t i = total_len & (SHA256_BLOCK_SIZE - 1); - - memset(&padblock[i], 0, SHA256_BLOCK_SIZE); - padblock[i] = 0x80; - - i += ((SHA256_BLOCK_SIZE - 1) & - (0 - (total_len + SHA256_PADLENGTHFIELD_SIZE + 1))) - + 1 + SHA256_PADLENGTHFIELD_SIZE; - -#if SHA256_PADLENGTHFIELD_SIZE == 16 - *((uint64_t *) &padblock[i - 16]) = 0; -#endif - - *((uint64_t *) &padblock[i - 8]) = cpu_to_be64(total_len << 3); - - /* Number of extra blocks to hash */ - return i >> SHA256_LOG2_BLOCK_SIZE; -} - -static struct sha256_hash_ctx - *sha256_ctx_mgr_resubmit(struct sha256_ctx_mgr *mgr, - struct sha256_hash_ctx *ctx) -{ - while (ctx) { - if (ctx->status & HASH_CTX_STS_COMPLETE) { - /* Clear PROCESSING bit */ - ctx->status = HASH_CTX_STS_COMPLETE; - return ctx; - } - - /* - * If the extra blocks are empty, begin hashing what remains - * in the user's buffer. - */ - if (ctx->partial_block_buffer_length == 0 && - ctx->incoming_buffer_length) { - - const void *buffer = ctx->incoming_buffer; - uint32_t len = ctx->incoming_buffer_length; - uint32_t copy_len; - - /* - * Only entire blocks can be hashed. - * Copy remainder to extra blocks buffer. - */ - copy_len = len & (SHA256_BLOCK_SIZE-1); - - if (copy_len) { - len -= copy_len; - memcpy(ctx->partial_block_buffer, - ((const char *) buffer + len), - copy_len); - ctx->partial_block_buffer_length = copy_len; - } - - ctx->incoming_buffer_length = 0; - - /* len should be a multiple of the block size now */ - assert((len % SHA256_BLOCK_SIZE) == 0); - - /* Set len to the number of blocks to be hashed */ - len >>= SHA256_LOG2_BLOCK_SIZE; - - if (len) { - - ctx->job.buffer = (uint8_t *) buffer; - ctx->job.len = len; - ctx = (struct sha256_hash_ctx *) - sha256_job_mgr_submit(&mgr->mgr, &ctx->job); - continue; - } - } - - /* - * If the extra blocks are not empty, then we are - * either on the last block(s) or we need more - * user input before continuing. - */ - if (ctx->status & HASH_CTX_STS_LAST) { - - uint8_t *buf = ctx->partial_block_buffer; - uint32_t n_extra_blocks = - sha256_pad(buf, ctx->total_length); - - ctx->status = (HASH_CTX_STS_PROCESSING | - HASH_CTX_STS_COMPLETE); - ctx->job.buffer = buf; - ctx->job.len = (uint32_t) n_extra_blocks; - ctx = (struct sha256_hash_ctx *) - sha256_job_mgr_submit(&mgr->mgr, &ctx->job); - continue; - } - - ctx->status = HASH_CTX_STS_IDLE; - return ctx; - } - - return NULL; -} - -static struct sha256_hash_ctx - *sha256_ctx_mgr_get_comp_ctx(struct sha256_ctx_mgr *mgr) -{ - /* - * If get_comp_job returns NULL, there are no jobs complete. - * If get_comp_job returns a job, verify that it is safe to return to - * the user. If it is not ready, resubmit the job to finish processing. - * If sha256_ctx_mgr_resubmit returned a job, it is ready to be - * returned. Otherwise, all jobs currently being managed by the - * hash_ctx_mgr still need processing. - */ - struct sha256_hash_ctx *ctx; - - ctx = (struct sha256_hash_ctx *) sha256_job_mgr_get_comp_job(&mgr->mgr); - return sha256_ctx_mgr_resubmit(mgr, ctx); -} - -static void sha256_ctx_mgr_init(struct sha256_ctx_mgr *mgr) -{ - sha256_job_mgr_init(&mgr->mgr); -} - -static struct sha256_hash_ctx *sha256_ctx_mgr_submit(struct sha256_ctx_mgr *mgr, - struct sha256_hash_ctx *ctx, - const void *buffer, - uint32_t len, - int flags) -{ - if (flags & ~(HASH_UPDATE | HASH_LAST)) { - /* User should not pass anything other than UPDATE or LAST */ - ctx->error = HASH_CTX_ERROR_INVALID_FLAGS; - return ctx; - } - - if (ctx->status & HASH_CTX_STS_PROCESSING) { - /* Cannot submit to a currently processing job. */ - ctx->error = HASH_CTX_ERROR_ALREADY_PROCESSING; - return ctx; - } - - if (ctx->status & HASH_CTX_STS_COMPLETE) { - /* Cannot update a finished job. */ - ctx->error = HASH_CTX_ERROR_ALREADY_COMPLETED; - return ctx; - } - - /* If we made it here, there was no error during this call to submit */ - ctx->error = HASH_CTX_ERROR_NONE; - - /* Store buffer ptr info from user */ - ctx->incoming_buffer = buffer; - ctx->incoming_buffer_length = len; - - /* - * Store the user's request flags and mark this ctx as currently - * being processed. - */ - ctx->status = (flags & HASH_LAST) ? - (HASH_CTX_STS_PROCESSING | HASH_CTX_STS_LAST) : - HASH_CTX_STS_PROCESSING; - - /* Advance byte counter */ - ctx->total_length += len; - - /* - * If there is anything currently buffered in the extra blocks, - * append to it until it contains a whole block. - * Or if the user's buffer contains less than a whole block, - * append as much as possible to the extra block. - */ - if (ctx->partial_block_buffer_length || len < SHA256_BLOCK_SIZE) { - /* - * Compute how many bytes to copy from user buffer into - * extra block - */ - uint32_t copy_len = SHA256_BLOCK_SIZE - - ctx->partial_block_buffer_length; - if (len < copy_len) - copy_len = len; - - if (copy_len) { - /* Copy and update relevant pointers and counters */ - memcpy( - &ctx->partial_block_buffer[ctx->partial_block_buffer_length], - buffer, copy_len); - - ctx->partial_block_buffer_length += copy_len; - ctx->incoming_buffer = (const void *) - ((const char *)buffer + copy_len); - ctx->incoming_buffer_length = len - copy_len; - } - - /* The extra block should never contain more than 1 block */ - assert(ctx->partial_block_buffer_length <= SHA256_BLOCK_SIZE); - - /* - * If the extra block buffer contains exactly 1 block, - * it can be hashed. - */ - if (ctx->partial_block_buffer_length >= SHA256_BLOCK_SIZE) { - ctx->partial_block_buffer_length = 0; - - ctx->job.buffer = ctx->partial_block_buffer; - ctx->job.len = 1; - ctx = (struct sha256_hash_ctx *) - sha256_job_mgr_submit(&mgr->mgr, &ctx->job); - } - } - - return sha256_ctx_mgr_resubmit(mgr, ctx); -} - -static struct sha256_hash_ctx *sha256_ctx_mgr_flush(struct sha256_ctx_mgr *mgr) -{ - struct sha256_hash_ctx *ctx; - - while (1) { - ctx = (struct sha256_hash_ctx *) - sha256_job_mgr_flush(&mgr->mgr); - - /* If flush returned 0, there are no more jobs in flight. */ - if (!ctx) - return NULL; - - /* - * If flush returned a job, resubmit the job to finish - * processing. - */ - ctx = sha256_ctx_mgr_resubmit(mgr, ctx); - - /* - * If sha256_ctx_mgr_resubmit returned a job, it is ready to - * be returned. Otherwise, all jobs currently being managed by - * the sha256_ctx_mgr still need processing. Loop. - */ - if (ctx) - return ctx; - } -} - -static int sha256_mb_init(struct ahash_request *areq) -{ - struct sha256_hash_ctx *sctx = ahash_request_ctx(areq); - - hash_ctx_init(sctx); - sctx->job.result_digest[0] = SHA256_H0; - sctx->job.result_digest[1] = SHA256_H1; - sctx->job.result_digest[2] = SHA256_H2; - sctx->job.result_digest[3] = SHA256_H3; - sctx->job.result_digest[4] = SHA256_H4; - sctx->job.result_digest[5] = SHA256_H5; - sctx->job.result_digest[6] = SHA256_H6; - sctx->job.result_digest[7] = SHA256_H7; - sctx->total_length = 0; - sctx->partial_block_buffer_length = 0; - sctx->status = HASH_CTX_STS_IDLE; - - return 0; -} - -static int sha256_mb_set_results(struct mcryptd_hash_request_ctx *rctx) -{ - int i; - struct sha256_hash_ctx *sctx = ahash_request_ctx(&rctx->areq); - __be32 *dst = (__be32 *) rctx->out; - - for (i = 0; i < 8; ++i) - dst[i] = cpu_to_be32(sctx->job.result_digest[i]); - - return 0; -} - -static int sha_finish_walk(struct mcryptd_hash_request_ctx **ret_rctx, - struct mcryptd_alg_cstate *cstate, bool flush) -{ - int flag = HASH_UPDATE; - int nbytes, err = 0; - struct mcryptd_hash_request_ctx *rctx = *ret_rctx; - struct sha256_hash_ctx *sha_ctx; - - /* more work ? */ - while (!(rctx->flag & HASH_DONE)) { - nbytes = crypto_ahash_walk_done(&rctx->walk, 0); - if (nbytes < 0) { - err = nbytes; - goto out; - } - /* check if the walk is done */ - if (crypto_ahash_walk_last(&rctx->walk)) { - rctx->flag |= HASH_DONE; - if (rctx->flag & HASH_FINAL) - flag |= HASH_LAST; - - } - sha_ctx = (struct sha256_hash_ctx *) - ahash_request_ctx(&rctx->areq); - kernel_fpu_begin(); - sha_ctx = sha256_ctx_mgr_submit(cstate->mgr, sha_ctx, - rctx->walk.data, nbytes, flag); - if (!sha_ctx) { - if (flush) - sha_ctx = sha256_ctx_mgr_flush(cstate->mgr); - } - kernel_fpu_end(); - if (sha_ctx) - rctx = cast_hash_to_mcryptd_ctx(sha_ctx); - else { - rctx = NULL; - goto out; - } - } - - /* copy the results */ - if (rctx->flag & HASH_FINAL) - sha256_mb_set_results(rctx); - -out: - *ret_rctx = rctx; - return err; -} - -static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx, - struct mcryptd_alg_cstate *cstate, - int err) -{ - struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx); - struct sha256_hash_ctx *sha_ctx; - struct mcryptd_hash_request_ctx *req_ctx; - int ret; - - /* remove from work list */ - spin_lock(&cstate->work_lock); - list_del(&rctx->waiter); - spin_unlock(&cstate->work_lock); - - if (irqs_disabled()) - rctx->complete(&req->base, err); - else { - local_bh_disable(); - rctx->complete(&req->base, err); - local_bh_enable(); - } - - /* check to see if there are other jobs that are done */ - sha_ctx = sha256_ctx_mgr_get_comp_ctx(cstate->mgr); - while (sha_ctx) { - req_ctx = cast_hash_to_mcryptd_ctx(sha_ctx); - ret = sha_finish_walk(&req_ctx, cstate, false); - if (req_ctx) { - spin_lock(&cstate->work_lock); - list_del(&req_ctx->waiter); - spin_unlock(&cstate->work_lock); - - req = cast_mcryptd_ctx_to_req(req_ctx); - if (irqs_disabled()) - req_ctx->complete(&req->base, ret); - else { - local_bh_disable(); - req_ctx->complete(&req->base, ret); - local_bh_enable(); - } - } - sha_ctx = sha256_ctx_mgr_get_comp_ctx(cstate->mgr); - } - - return 0; -} - -static void sha256_mb_add_list(struct mcryptd_hash_request_ctx *rctx, - struct mcryptd_alg_cstate *cstate) -{ - unsigned long next_flush; - unsigned long delay = usecs_to_jiffies(FLUSH_INTERVAL); - - /* initialize tag */ - rctx->tag.arrival = jiffies; /* tag the arrival time */ - rctx->tag.seq_num = cstate->next_seq_num++; - next_flush = rctx->tag.arrival + delay; - rctx->tag.expire = next_flush; - - spin_lock(&cstate->work_lock); - list_add_tail(&rctx->waiter, &cstate->work_list); - spin_unlock(&cstate->work_lock); - - mcryptd_arm_flusher(cstate, delay); -} - -static int sha256_mb_update(struct ahash_request *areq) -{ - struct mcryptd_hash_request_ctx *rctx = - container_of(areq, struct mcryptd_hash_request_ctx, areq); - struct mcryptd_alg_cstate *cstate = - this_cpu_ptr(sha256_mb_alg_state.alg_cstate); - - struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx); - struct sha256_hash_ctx *sha_ctx; - int ret = 0, nbytes; - - /* sanity check */ - if (rctx->tag.cpu != smp_processor_id()) { - pr_err("mcryptd error: cpu clash\n"); - goto done; - } - - /* need to init context */ - req_ctx_init(rctx, areq); - - nbytes = crypto_ahash_walk_first(req, &rctx->walk); - - if (nbytes < 0) { - ret = nbytes; - goto done; - } - - if (crypto_ahash_walk_last(&rctx->walk)) - rctx->flag |= HASH_DONE; - - /* submit */ - sha_ctx = (struct sha256_hash_ctx *) ahash_request_ctx(areq); - sha256_mb_add_list(rctx, cstate); - kernel_fpu_begin(); - sha_ctx = sha256_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data, - nbytes, HASH_UPDATE); - kernel_fpu_end(); - - /* check if anything is returned */ - if (!sha_ctx) - return -EINPROGRESS; - - if (sha_ctx->error) { - ret = sha_ctx->error; - rctx = cast_hash_to_mcryptd_ctx(sha_ctx); - goto done; - } - - rctx = cast_hash_to_mcryptd_ctx(sha_ctx); - ret = sha_finish_walk(&rctx, cstate, false); - - if (!rctx) - return -EINPROGRESS; -done: - sha_complete_job(rctx, cstate, ret); - return ret; -} - -static int sha256_mb_finup(struct ahash_request *areq) -{ - struct mcryptd_hash_request_ctx *rctx = - container_of(areq, struct mcryptd_hash_request_ctx, areq); - struct mcryptd_alg_cstate *cstate = - this_cpu_ptr(sha256_mb_alg_state.alg_cstate); - - struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx); - struct sha256_hash_ctx *sha_ctx; - int ret = 0, flag = HASH_UPDATE, nbytes; - - /* sanity check */ - if (rctx->tag.cpu != smp_processor_id()) { - pr_err("mcryptd error: cpu clash\n"); - goto done; - } - - /* need to init context */ - req_ctx_init(rctx, areq); - - nbytes = crypto_ahash_walk_first(req, &rctx->walk); - - if (nbytes < 0) { - ret = nbytes; - goto done; - } - - if (crypto_ahash_walk_last(&rctx->walk)) { - rctx->flag |= HASH_DONE; - flag = HASH_LAST; - } - - /* submit */ - rctx->flag |= HASH_FINAL; - sha_ctx = (struct sha256_hash_ctx *) ahash_request_ctx(areq); - sha256_mb_add_list(rctx, cstate); - - kernel_fpu_begin(); - sha_ctx = sha256_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data, - nbytes, flag); - kernel_fpu_end(); - - /* check if anything is returned */ - if (!sha_ctx) - return -EINPROGRESS; - - if (sha_ctx->error) { - ret = sha_ctx->error; - goto done; - } - - rctx = cast_hash_to_mcryptd_ctx(sha_ctx); - ret = sha_finish_walk(&rctx, cstate, false); - if (!rctx) - return -EINPROGRESS; -done: - sha_complete_job(rctx, cstate, ret); - return ret; -} - -static int sha256_mb_final(struct ahash_request *areq) -{ - struct mcryptd_hash_request_ctx *rctx = - container_of(areq, struct mcryptd_hash_request_ctx, - areq); - struct mcryptd_alg_cstate *cstate = - this_cpu_ptr(sha256_mb_alg_state.alg_cstate); - - struct sha256_hash_ctx *sha_ctx; - int ret = 0; - u8 data; - - /* sanity check */ - if (rctx->tag.cpu != smp_processor_id()) { - pr_err("mcryptd error: cpu clash\n"); - goto done; - } - - /* need to init context */ - req_ctx_init(rctx, areq); - - rctx->flag |= HASH_DONE | HASH_FINAL; - - sha_ctx = (struct sha256_hash_ctx *) ahash_request_ctx(areq); - /* flag HASH_FINAL and 0 data size */ - sha256_mb_add_list(rctx, cstate); - kernel_fpu_begin(); - sha_ctx = sha256_ctx_mgr_submit(cstate->mgr, sha_ctx, &data, 0, - HASH_LAST); - kernel_fpu_end(); - - /* check if anything is returned */ - if (!sha_ctx) - return -EINPROGRESS; - - if (sha_ctx->error) { - ret = sha_ctx->error; - rctx = cast_hash_to_mcryptd_ctx(sha_ctx); - goto done; - } - - rctx = cast_hash_to_mcryptd_ctx(sha_ctx); - ret = sha_finish_walk(&rctx, cstate, false); - if (!rctx) - return -EINPROGRESS; -done: - sha_complete_job(rctx, cstate, ret); - return ret; -} - -static int sha256_mb_export(struct ahash_request *areq, void *out) -{ - struct sha256_hash_ctx *sctx = ahash_request_ctx(areq); - - memcpy(out, sctx, sizeof(*sctx)); - - return 0; -} - -static int sha256_mb_import(struct ahash_request *areq, const void *in) -{ - struct sha256_hash_ctx *sctx = ahash_request_ctx(areq); - - memcpy(sctx, in, sizeof(*sctx)); - - return 0; -} - -static int sha256_mb_async_init_tfm(struct crypto_tfm *tfm) -{ - struct mcryptd_ahash *mcryptd_tfm; - struct sha256_mb_ctx *ctx = crypto_tfm_ctx(tfm); - struct mcryptd_hash_ctx *mctx; - - mcryptd_tfm = mcryptd_alloc_ahash("__intel_sha256-mb", - CRYPTO_ALG_INTERNAL, - CRYPTO_ALG_INTERNAL); - if (IS_ERR(mcryptd_tfm)) - return PTR_ERR(mcryptd_tfm); - mctx = crypto_ahash_ctx(&mcryptd_tfm->base); - mctx->alg_state = &sha256_mb_alg_state; - ctx->mcryptd_tfm = mcryptd_tfm; - crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), - sizeof(struct ahash_request) + - crypto_ahash_reqsize(&mcryptd_tfm->base)); - - return 0; -} - -static void sha256_mb_async_exit_tfm(struct crypto_tfm *tfm) -{ - struct sha256_mb_ctx *ctx = crypto_tfm_ctx(tfm); - - mcryptd_free_ahash(ctx->mcryptd_tfm); -} - -static int sha256_mb_areq_init_tfm(struct crypto_tfm *tfm) -{ - crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), - sizeof(struct ahash_request) + - sizeof(struct sha256_hash_ctx)); - - return 0; -} - -static void sha256_mb_areq_exit_tfm(struct crypto_tfm *tfm) -{ - struct sha256_mb_ctx *ctx = crypto_tfm_ctx(tfm); - - mcryptd_free_ahash(ctx->mcryptd_tfm); -} - -static struct ahash_alg sha256_mb_areq_alg = { - .init = sha256_mb_init, - .update = sha256_mb_update, - .final = sha256_mb_final, - .finup = sha256_mb_finup, - .export = sha256_mb_export, - .import = sha256_mb_import, - .halg = { - .digestsize = SHA256_DIGEST_SIZE, - .statesize = sizeof(struct sha256_hash_ctx), - .base = { - .cra_name = "__sha256-mb", - .cra_driver_name = "__intel_sha256-mb", - .cra_priority = 100, - /* - * use ASYNC flag as some buffers in multi-buffer - * algo may not have completed before hashing thread - * sleep - */ - .cra_flags = CRYPTO_ALG_ASYNC | - CRYPTO_ALG_INTERNAL, - .cra_blocksize = SHA256_BLOCK_SIZE, - .cra_module = THIS_MODULE, - .cra_list = LIST_HEAD_INIT - (sha256_mb_areq_alg.halg.base.cra_list), - .cra_init = sha256_mb_areq_init_tfm, - .cra_exit = sha256_mb_areq_exit_tfm, - .cra_ctxsize = sizeof(struct sha256_hash_ctx), - } - } -}; - -static int sha256_mb_async_init(struct ahash_request *req) -{ - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm); - struct ahash_request *mcryptd_req = ahash_request_ctx(req); - struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm; - - memcpy(mcryptd_req, req, sizeof(*req)); - ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base); - return crypto_ahash_init(mcryptd_req); -} - -static int sha256_mb_async_update(struct ahash_request *req) -{ - struct ahash_request *mcryptd_req = ahash_request_ctx(req); - - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm); - struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm; - - memcpy(mcryptd_req, req, sizeof(*req)); - ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base); - return crypto_ahash_update(mcryptd_req); -} - -static int sha256_mb_async_finup(struct ahash_request *req) -{ - struct ahash_request *mcryptd_req = ahash_request_ctx(req); - - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm); - struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm; - - memcpy(mcryptd_req, req, sizeof(*req)); - ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base); - return crypto_ahash_finup(mcryptd_req); -} - -static int sha256_mb_async_final(struct ahash_request *req) -{ - struct ahash_request *mcryptd_req = ahash_request_ctx(req); - - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm); - struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm; - - memcpy(mcryptd_req, req, sizeof(*req)); - ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base); - return crypto_ahash_final(mcryptd_req); -} - -static int sha256_mb_async_digest(struct ahash_request *req) -{ - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm); - struct ahash_request *mcryptd_req = ahash_request_ctx(req); - struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm; - - memcpy(mcryptd_req, req, sizeof(*req)); - ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base); - return crypto_ahash_digest(mcryptd_req); -} - -static int sha256_mb_async_export(struct ahash_request *req, void *out) -{ - struct ahash_request *mcryptd_req = ahash_request_ctx(req); - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm); - struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm; - - memcpy(mcryptd_req, req, sizeof(*req)); - ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base); - return crypto_ahash_export(mcryptd_req, out); -} - -static int sha256_mb_async_import(struct ahash_request *req, const void *in) -{ - struct ahash_request *mcryptd_req = ahash_request_ctx(req); - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm); - struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm; - struct crypto_ahash *child = mcryptd_ahash_child(mcryptd_tfm); - struct mcryptd_hash_request_ctx *rctx; - struct ahash_request *areq; - - memcpy(mcryptd_req, req, sizeof(*req)); - ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base); - rctx = ahash_request_ctx(mcryptd_req); - areq = &rctx->areq; - - ahash_request_set_tfm(areq, child); - ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_SLEEP, - rctx->complete, req); - - return crypto_ahash_import(mcryptd_req, in); -} - -static struct ahash_alg sha256_mb_async_alg = { - .init = sha256_mb_async_init, - .update = sha256_mb_async_update, - .final = sha256_mb_async_final, - .finup = sha256_mb_async_finup, - .export = sha256_mb_async_export, - .import = sha256_mb_async_import, - .digest = sha256_mb_async_digest, - .halg = { - .digestsize = SHA256_DIGEST_SIZE, - .statesize = sizeof(struct sha256_hash_ctx), - .base = { - .cra_name = "sha256", - .cra_driver_name = "sha256_mb", - /* - * Low priority, since with few concurrent hash requests - * this is extremely slow due to the flush delay. Users - * whose workloads would benefit from this can request - * it explicitly by driver name, or can increase its - * priority at runtime using NETLINK_CRYPTO. - */ - .cra_priority = 50, - .cra_flags = CRYPTO_ALG_ASYNC, - .cra_blocksize = SHA256_BLOCK_SIZE, - .cra_module = THIS_MODULE, - .cra_list = LIST_HEAD_INIT - (sha256_mb_async_alg.halg.base.cra_list), - .cra_init = sha256_mb_async_init_tfm, - .cra_exit = sha256_mb_async_exit_tfm, - .cra_ctxsize = sizeof(struct sha256_mb_ctx), - .cra_alignmask = 0, - }, - }, -}; - -static unsigned long sha256_mb_flusher(struct mcryptd_alg_cstate *cstate) -{ - struct mcryptd_hash_request_ctx *rctx; - unsigned long cur_time; - unsigned long next_flush = 0; - struct sha256_hash_ctx *sha_ctx; - - - cur_time = jiffies; - - while (!list_empty(&cstate->work_list)) { - rctx = list_entry(cstate->work_list.next, - struct mcryptd_hash_request_ctx, waiter); - if (time_before(cur_time, rctx->tag.expire)) - break; - kernel_fpu_begin(); - sha_ctx = (struct sha256_hash_ctx *) - sha256_ctx_mgr_flush(cstate->mgr); - kernel_fpu_end(); - if (!sha_ctx) { - pr_err("sha256_mb error: nothing got" - " flushed for non-empty list\n"); - break; - } - rctx = cast_hash_to_mcryptd_ctx(sha_ctx); - sha_finish_walk(&rctx, cstate, true); - sha_complete_job(rctx, cstate, 0); - } - - if (!list_empty(&cstate->work_list)) { - rctx = list_entry(cstate->work_list.next, - struct mcryptd_hash_request_ctx, waiter); - /* get the hash context and then flush time */ - next_flush = rctx->tag.expire; - mcryptd_arm_flusher(cstate, get_delay(next_flush)); - } - return next_flush; -} - -static int __init sha256_mb_mod_init(void) -{ - - int cpu; - int err; - struct mcryptd_alg_cstate *cpu_state; - - /* check for dependent cpu features */ - if (!boot_cpu_has(X86_FEATURE_AVX2) || - !boot_cpu_has(X86_FEATURE_BMI2)) - return -ENODEV; - - /* initialize multibuffer structures */ - sha256_mb_alg_state.alg_cstate = alloc_percpu - (struct mcryptd_alg_cstate); - - sha256_job_mgr_init = sha256_mb_mgr_init_avx2; - sha256_job_mgr_submit = sha256_mb_mgr_submit_avx2; - sha256_job_mgr_flush = sha256_mb_mgr_flush_avx2; - sha256_job_mgr_get_comp_job = sha256_mb_mgr_get_comp_job_avx2; - - if (!sha256_mb_alg_state.alg_cstate) - return -ENOMEM; - for_each_possible_cpu(cpu) { - cpu_state = per_cpu_ptr(sha256_mb_alg_state.alg_cstate, cpu); - cpu_state->next_flush = 0; - cpu_state->next_seq_num = 0; - cpu_state->flusher_engaged = false; - INIT_DELAYED_WORK(&cpu_state->flush, mcryptd_flusher); - cpu_state->cpu = cpu; - cpu_state->alg_state = &sha256_mb_alg_state; - cpu_state->mgr = kzalloc(sizeof(struct sha256_ctx_mgr), - GFP_KERNEL); - if (!cpu_state->mgr) - goto err2; - sha256_ctx_mgr_init(cpu_state->mgr); - INIT_LIST_HEAD(&cpu_state->work_list); - spin_lock_init(&cpu_state->work_lock); - } - sha256_mb_alg_state.flusher = &sha256_mb_flusher; - - err = crypto_register_ahash(&sha256_mb_areq_alg); - if (err) - goto err2; - err = crypto_register_ahash(&sha256_mb_async_alg); - if (err) - goto err1; - - - return 0; -err1: - crypto_unregister_ahash(&sha256_mb_areq_alg); -err2: - for_each_possible_cpu(cpu) { - cpu_state = per_cpu_ptr(sha256_mb_alg_state.alg_cstate, cpu); - kfree(cpu_state->mgr); - } - free_percpu(sha256_mb_alg_state.alg_cstate); - return -ENODEV; -} - -static void __exit sha256_mb_mod_fini(void) -{ - int cpu; - struct mcryptd_alg_cstate *cpu_state; - - crypto_unregister_ahash(&sha256_mb_async_alg); - crypto_unregister_ahash(&sha256_mb_areq_alg); - for_each_possible_cpu(cpu) { - cpu_state = per_cpu_ptr(sha256_mb_alg_state.alg_cstate, cpu); - kfree(cpu_state->mgr); - } - free_percpu(sha256_mb_alg_state.alg_cstate); -} - -module_init(sha256_mb_mod_init); -module_exit(sha256_mb_mod_fini); - -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("SHA256 Secure Hash Algorithm, multi buffer accelerated"); - -MODULE_ALIAS_CRYPTO("sha256"); diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_ctx.h b/arch/x86/crypto/sha256-mb/sha256_mb_ctx.h deleted file mode 100644 index 7c432543dc7f..000000000000 --- a/arch/x86/crypto/sha256-mb/sha256_mb_ctx.h +++ /dev/null @@ -1,134 +0,0 @@ -/* - * Header file for multi buffer SHA256 context - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2016 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * Contact Information: - * Megha Dey <megha.dey@linux.intel.com> - * - * BSD LICENSE - * - * Copyright(c) 2016 Intel Corporation. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef _SHA_MB_CTX_INTERNAL_H -#define _SHA_MB_CTX_INTERNAL_H - -#include "sha256_mb_mgr.h" - -#define HASH_UPDATE 0x00 -#define HASH_LAST 0x01 -#define HASH_DONE 0x02 -#define HASH_FINAL 0x04 - -#define HASH_CTX_STS_IDLE 0x00 -#define HASH_CTX_STS_PROCESSING 0x01 -#define HASH_CTX_STS_LAST 0x02 -#define HASH_CTX_STS_COMPLETE 0x04 - -enum hash_ctx_error { - HASH_CTX_ERROR_NONE = 0, - HASH_CTX_ERROR_INVALID_FLAGS = -1, - HASH_CTX_ERROR_ALREADY_PROCESSING = -2, - HASH_CTX_ERROR_ALREADY_COMPLETED = -3, - -#ifdef HASH_CTX_DEBUG - HASH_CTX_ERROR_DEBUG_DIGEST_MISMATCH = -4, -#endif -}; - - -#define hash_ctx_user_data(ctx) ((ctx)->user_data) -#define hash_ctx_digest(ctx) ((ctx)->job.result_digest) -#define hash_ctx_processing(ctx) ((ctx)->status & HASH_CTX_STS_PROCESSING) -#define hash_ctx_complete(ctx) ((ctx)->status == HASH_CTX_STS_COMPLETE) -#define hash_ctx_status(ctx) ((ctx)->status) -#define hash_ctx_error(ctx) ((ctx)->error) -#define hash_ctx_init(ctx) \ - do { \ - (ctx)->error = HASH_CTX_ERROR_NONE; \ - (ctx)->status = HASH_CTX_STS_COMPLETE; \ - } while (0) - - -/* Hash Constants and Typedefs */ -#define SHA256_DIGEST_LENGTH 8 -#define SHA256_LOG2_BLOCK_SIZE 6 - -#define SHA256_PADLENGTHFIELD_SIZE 8 - -#ifdef SHA_MB_DEBUG -#define assert(expr) \ -do { \ - if (unlikely(!(expr))) { \ - printk(KERN_ERR "Assertion failed! %s,%s,%s,line=%d\n", \ - #expr, __FILE__, __func__, __LINE__); \ - } \ -} while (0) -#else -#define assert(expr) do {} while (0) -#endif - -struct sha256_ctx_mgr { - struct sha256_mb_mgr mgr; -}; - -/* typedef struct sha256_ctx_mgr sha256_ctx_mgr; */ - -struct sha256_hash_ctx { - /* Must be at struct offset 0 */ - struct job_sha256 job; - /* status flag */ - int status; - /* error flag */ - int error; - - uint64_t total_length; - const void *incoming_buffer; - uint32_t incoming_buffer_length; - uint8_t partial_block_buffer[SHA256_BLOCK_SIZE * 2]; - uint32_t partial_block_buffer_length; - void *user_data; -}; - -#endif diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_mgr.h b/arch/x86/crypto/sha256-mb/sha256_mb_mgr.h deleted file mode 100644 index b01ae408c56d..000000000000 --- a/arch/x86/crypto/sha256-mb/sha256_mb_mgr.h +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Header file for multi buffer SHA256 algorithm manager - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2016 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * Contact Information: - * Megha Dey <megha.dey@linux.intel.com> - * - * BSD LICENSE - * - * Copyright(c) 2016 Intel Corporation. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -#ifndef __SHA_MB_MGR_H -#define __SHA_MB_MGR_H - -#include <linux/types.h> - -#define NUM_SHA256_DIGEST_WORDS 8 - -enum job_sts { STS_UNKNOWN = 0, - STS_BEING_PROCESSED = 1, - STS_COMPLETED = 2, - STS_INTERNAL_ERROR = 3, - STS_ERROR = 4 -}; - -struct job_sha256 { - u8 *buffer; - u32 len; - u32 result_digest[NUM_SHA256_DIGEST_WORDS] __aligned(32); - enum job_sts status; - void *user_data; -}; - -/* SHA256 out-of-order scheduler */ - -/* typedef uint32_t sha8_digest_array[8][8]; */ - -struct sha256_args_x8 { - uint32_t digest[8][8]; - uint8_t *data_ptr[8]; -}; - -struct sha256_lane_data { - struct job_sha256 *job_in_lane; -}; - -struct sha256_mb_mgr { - struct sha256_args_x8 args; - - uint32_t lens[8]; - - /* each byte is index (0...7) of unused lanes */ - uint64_t unused_lanes; - /* byte 4 is set to FF as a flag */ - struct sha256_lane_data ldata[8]; -}; - - -#define SHA256_MB_MGR_NUM_LANES_AVX2 8 - -void sha256_mb_mgr_init_avx2(struct sha256_mb_mgr *state); -struct job_sha256 *sha256_mb_mgr_submit_avx2(struct sha256_mb_mgr *state, - struct job_sha256 *job); -struct job_sha256 *sha256_mb_mgr_flush_avx2(struct sha256_mb_mgr *state); -struct job_sha256 *sha256_mb_mgr_get_comp_job_avx2(struct sha256_mb_mgr *state); - -#endif diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_datastruct.S b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_datastruct.S deleted file mode 100644 index 5c377bac21d0..000000000000 --- a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_datastruct.S +++ /dev/null @@ -1,304 +0,0 @@ -/* - * Header file for multi buffer SHA256 algorithm data structure - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2016 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * Contact Information: - * Megha Dey <megha.dey@linux.intel.com> - * - * BSD LICENSE - * - * Copyright(c) 2016 Intel Corporation. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -# Macros for defining data structures - -# Usage example - -#START_FIELDS # JOB_AES -### name size align -#FIELD _plaintext, 8, 8 # pointer to plaintext -#FIELD _ciphertext, 8, 8 # pointer to ciphertext -#FIELD _IV, 16, 8 # IV -#FIELD _keys, 8, 8 # pointer to keys -#FIELD _len, 4, 4 # length in bytes -#FIELD _status, 4, 4 # status enumeration -#FIELD _user_data, 8, 8 # pointer to user data -#UNION _union, size1, align1, \ -# size2, align2, \ -# size3, align3, \ -# ... -#END_FIELDS -#%assign _JOB_AES_size _FIELD_OFFSET -#%assign _JOB_AES_align _STRUCT_ALIGN - -######################################################################### - -# Alternate "struc-like" syntax: -# STRUCT job_aes2 -# RES_Q .plaintext, 1 -# RES_Q .ciphertext, 1 -# RES_DQ .IV, 1 -# RES_B .nested, _JOB_AES_SIZE, _JOB_AES_ALIGN -# RES_U .union, size1, align1, \ -# size2, align2, \ -# ... -# ENDSTRUCT -# # Following only needed if nesting -# %assign job_aes2_size _FIELD_OFFSET -# %assign job_aes2_align _STRUCT_ALIGN -# -# RES_* macros take a name, a count and an optional alignment. -# The count in in terms of the base size of the macro, and the -# default alignment is the base size. -# The macros are: -# Macro Base size -# RES_B 1 -# RES_W 2 -# RES_D 4 -# RES_Q 8 -# RES_DQ 16 -# RES_Y 32 -# RES_Z 64 -# -# RES_U defines a union. It's arguments are a name and two or more -# pairs of "size, alignment" -# -# The two assigns are only needed if this structure is being nested -# within another. Even if the assigns are not done, one can still use -# STRUCT_NAME_size as the size of the structure. -# -# Note that for nesting, you still need to assign to STRUCT_NAME_size. -# -# The differences between this and using "struc" directly are that each -# type is implicitly aligned to its natural length (although this can be -# over-ridden with an explicit third parameter), and that the structure -# is padded at the end to its overall alignment. -# - -######################################################################### - -#ifndef _DATASTRUCT_ASM_ -#define _DATASTRUCT_ASM_ - -#define SZ8 8*SHA256_DIGEST_WORD_SIZE -#define ROUNDS 64*SZ8 -#define PTR_SZ 8 -#define SHA256_DIGEST_WORD_SIZE 4 -#define MAX_SHA256_LANES 8 -#define SHA256_DIGEST_WORDS 8 -#define SHA256_DIGEST_ROW_SIZE (MAX_SHA256_LANES * SHA256_DIGEST_WORD_SIZE) -#define SHA256_DIGEST_SIZE (SHA256_DIGEST_ROW_SIZE * SHA256_DIGEST_WORDS) -#define SHA256_BLK_SZ 64 - -# START_FIELDS -.macro START_FIELDS - _FIELD_OFFSET = 0 - _STRUCT_ALIGN = 0 -.endm - -# FIELD name size align -.macro FIELD name size align - _FIELD_OFFSET = (_FIELD_OFFSET + (\align) - 1) & (~ ((\align)-1)) - \name = _FIELD_OFFSET - _FIELD_OFFSET = _FIELD_OFFSET + (\size) -.if (\align > _STRUCT_ALIGN) - _STRUCT_ALIGN = \align -.endif -.endm - -# END_FIELDS -.macro END_FIELDS - _FIELD_OFFSET = (_FIELD_OFFSET + _STRUCT_ALIGN-1) & (~ (_STRUCT_ALIGN-1)) -.endm - -######################################################################## - -.macro STRUCT p1 -START_FIELDS -.struc \p1 -.endm - -.macro ENDSTRUCT - tmp = _FIELD_OFFSET - END_FIELDS - tmp = (_FIELD_OFFSET - %%tmp) -.if (tmp > 0) - .lcomm tmp -.endif -.endstruc -.endm - -## RES_int name size align -.macro RES_int p1 p2 p3 - name = \p1 - size = \p2 - align = .\p3 - - _FIELD_OFFSET = (_FIELD_OFFSET + (align) - 1) & (~ ((align)-1)) -.align align -.lcomm name size - _FIELD_OFFSET = _FIELD_OFFSET + (size) -.if (align > _STRUCT_ALIGN) - _STRUCT_ALIGN = align -.endif -.endm - -# macro RES_B name, size [, align] -.macro RES_B _name, _size, _align=1 -RES_int _name _size _align -.endm - -# macro RES_W name, size [, align] -.macro RES_W _name, _size, _align=2 -RES_int _name 2*(_size) _align -.endm - -# macro RES_D name, size [, align] -.macro RES_D _name, _size, _align=4 -RES_int _name 4*(_size) _align -.endm - -# macro RES_Q name, size [, align] -.macro RES_Q _name, _size, _align=8 -RES_int _name 8*(_size) _align -.endm - -# macro RES_DQ name, size [, align] -.macro RES_DQ _name, _size, _align=16 -RES_int _name 16*(_size) _align -.endm - -# macro RES_Y name, size [, align] -.macro RES_Y _name, _size, _align=32 -RES_int _name 32*(_size) _align -.endm - -# macro RES_Z name, size [, align] -.macro RES_Z _name, _size, _align=64 -RES_int _name 64*(_size) _align -.endm - -#endif - - -######################################################################## -#### Define SHA256 Out Of Order Data Structures -######################################################################## - -START_FIELDS # LANE_DATA -### name size align -FIELD _job_in_lane, 8, 8 # pointer to job object -END_FIELDS - - _LANE_DATA_size = _FIELD_OFFSET - _LANE_DATA_align = _STRUCT_ALIGN - -######################################################################## - -START_FIELDS # SHA256_ARGS_X4 -### name size align -FIELD _digest, 4*8*8, 4 # transposed digest -FIELD _data_ptr, 8*8, 8 # array of pointers to data -END_FIELDS - - _SHA256_ARGS_X4_size = _FIELD_OFFSET - _SHA256_ARGS_X4_align = _STRUCT_ALIGN - _SHA256_ARGS_X8_size = _FIELD_OFFSET - _SHA256_ARGS_X8_align = _STRUCT_ALIGN - -####################################################################### - -START_FIELDS # MB_MGR -### name size align -FIELD _args, _SHA256_ARGS_X4_size, _SHA256_ARGS_X4_align -FIELD _lens, 4*8, 8 -FIELD _unused_lanes, 8, 8 -FIELD _ldata, _LANE_DATA_size*8, _LANE_DATA_align -END_FIELDS - - _MB_MGR_size = _FIELD_OFFSET - _MB_MGR_align = _STRUCT_ALIGN - -_args_digest = _args + _digest -_args_data_ptr = _args + _data_ptr - -####################################################################### - -START_FIELDS #STACK_FRAME -### name size align -FIELD _data, 16*SZ8, 1 # transposed digest -FIELD _digest, 8*SZ8, 1 # array of pointers to data -FIELD _ytmp, 4*SZ8, 1 -FIELD _rsp, 8, 1 -END_FIELDS - - _STACK_FRAME_size = _FIELD_OFFSET - _STACK_FRAME_align = _STRUCT_ALIGN - -####################################################################### - -######################################################################## -#### Define constants -######################################################################## - -#define STS_UNKNOWN 0 -#define STS_BEING_PROCESSED 1 -#define STS_COMPLETED 2 - -######################################################################## -#### Define JOB_SHA256 structure -######################################################################## - -START_FIELDS # JOB_SHA256 - -### name size align -FIELD _buffer, 8, 8 # pointer to buffer -FIELD _len, 8, 8 # length in bytes -FIELD _result_digest, 8*4, 32 # Digest (output) -FIELD _status, 4, 4 -FIELD _user_data, 8, 8 -END_FIELDS - - _JOB_SHA256_size = _FIELD_OFFSET - _JOB_SHA256_align = _STRUCT_ALIGN diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S deleted file mode 100644 index d2364c55bbde..000000000000 --- a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S +++ /dev/null @@ -1,307 +0,0 @@ -/* - * Flush routine for SHA256 multibuffer - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2016 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * Contact Information: - * Megha Dey <megha.dey@linux.intel.com> - * - * BSD LICENSE - * - * Copyright(c) 2016 Intel Corporation. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -#include <linux/linkage.h> -#include <asm/frame.h> -#include "sha256_mb_mgr_datastruct.S" - -.extern sha256_x8_avx2 - -#LINUX register definitions -#define arg1 %rdi -#define arg2 %rsi - -# Common register definitions -#define state arg1 -#define job arg2 -#define len2 arg2 - -# idx must be a register not clobberred by sha1_mult -#define idx %r8 -#define DWORD_idx %r8d - -#define unused_lanes %rbx -#define lane_data %rbx -#define tmp2 %rbx -#define tmp2_w %ebx - -#define job_rax %rax -#define tmp1 %rax -#define size_offset %rax -#define tmp %rax -#define start_offset %rax - -#define tmp3 %arg1 - -#define extra_blocks %arg2 -#define p %arg2 - -.macro LABEL prefix n -\prefix\n\(): -.endm - -.macro JNE_SKIP i -jne skip_\i -.endm - -.altmacro -.macro SET_OFFSET _offset -offset = \_offset -.endm -.noaltmacro - -# JOB_SHA256* sha256_mb_mgr_flush_avx2(MB_MGR *state) -# arg 1 : rcx : state -ENTRY(sha256_mb_mgr_flush_avx2) - FRAME_BEGIN - push %rbx - - # If bit (32+3) is set, then all lanes are empty - mov _unused_lanes(state), unused_lanes - bt $32+3, unused_lanes - jc return_null - - # find a lane with a non-null job - xor idx, idx - offset = (_ldata + 1 * _LANE_DATA_size + _job_in_lane) - cmpq $0, offset(state) - cmovne one(%rip), idx - offset = (_ldata + 2 * _LANE_DATA_size + _job_in_lane) - cmpq $0, offset(state) - cmovne two(%rip), idx - offset = (_ldata + 3 * _LANE_DATA_size + _job_in_lane) - cmpq $0, offset(state) - cmovne three(%rip), idx - offset = (_ldata + 4 * _LANE_DATA_size + _job_in_lane) - cmpq $0, offset(state) - cmovne four(%rip), idx - offset = (_ldata + 5 * _LANE_DATA_size + _job_in_lane) - cmpq $0, offset(state) - cmovne five(%rip), idx - offset = (_ldata + 6 * _LANE_DATA_size + _job_in_lane) - cmpq $0, offset(state) - cmovne six(%rip), idx - offset = (_ldata + 7 * _LANE_DATA_size + _job_in_lane) - cmpq $0, offset(state) - cmovne seven(%rip), idx - - # copy idx to empty lanes -copy_lane_data: - offset = (_args + _data_ptr) - mov offset(state,idx,8), tmp - - I = 0 -.rep 8 - offset = (_ldata + I * _LANE_DATA_size + _job_in_lane) - cmpq $0, offset(state) -.altmacro - JNE_SKIP %I - offset = (_args + _data_ptr + 8*I) - mov tmp, offset(state) - offset = (_lens + 4*I) - movl $0xFFFFFFFF, offset(state) -LABEL skip_ %I - I = (I+1) -.noaltmacro -.endr - - # Find min length - vmovdqu _lens+0*16(state), %xmm0 - vmovdqu _lens+1*16(state), %xmm1 - - vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A} - vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C} - vpminud %xmm3, %xmm2, %xmm2 # xmm2 has {x,x,E,F} - vpalignr $4, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,x,E} - vpminud %xmm3, %xmm2, %xmm2 # xmm2 has min val in low dword - - vmovd %xmm2, DWORD_idx - mov idx, len2 - and $0xF, idx - shr $4, len2 - jz len_is_0 - - vpand clear_low_nibble(%rip), %xmm2, %xmm2 - vpshufd $0, %xmm2, %xmm2 - - vpsubd %xmm2, %xmm0, %xmm0 - vpsubd %xmm2, %xmm1, %xmm1 - - vmovdqu %xmm0, _lens+0*16(state) - vmovdqu %xmm1, _lens+1*16(state) - - # "state" and "args" are the same address, arg1 - # len is arg2 - call sha256_x8_avx2 - # state and idx are intact - -len_is_0: - # process completed job "idx" - imul $_LANE_DATA_size, idx, lane_data - lea _ldata(state, lane_data), lane_data - - mov _job_in_lane(lane_data), job_rax - movq $0, _job_in_lane(lane_data) - movl $STS_COMPLETED, _status(job_rax) - mov _unused_lanes(state), unused_lanes - shl $4, unused_lanes - or idx, unused_lanes - - mov unused_lanes, _unused_lanes(state) - movl $0xFFFFFFFF, _lens(state,idx,4) - - vmovd _args_digest(state , idx, 4) , %xmm0 - vpinsrd $1, _args_digest+1*32(state, idx, 4), %xmm0, %xmm0 - vpinsrd $2, _args_digest+2*32(state, idx, 4), %xmm0, %xmm0 - vpinsrd $3, _args_digest+3*32(state, idx, 4), %xmm0, %xmm0 - vmovd _args_digest+4*32(state, idx, 4), %xmm1 - vpinsrd $1, _args_digest+5*32(state, idx, 4), %xmm1, %xmm1 - vpinsrd $2, _args_digest+6*32(state, idx, 4), %xmm1, %xmm1 - vpinsrd $3, _args_digest+7*32(state, idx, 4), %xmm1, %xmm1 - - vmovdqu %xmm0, _result_digest(job_rax) - offset = (_result_digest + 1*16) - vmovdqu %xmm1, offset(job_rax) - -return: - pop %rbx - FRAME_END - ret - -return_null: - xor job_rax, job_rax - jmp return -ENDPROC(sha256_mb_mgr_flush_avx2) - -############################################################################## - -.align 16 -ENTRY(sha256_mb_mgr_get_comp_job_avx2) - push %rbx - - ## if bit 32+3 is set, then all lanes are empty - mov _unused_lanes(state), unused_lanes - bt $(32+3), unused_lanes - jc .return_null - - # Find min length - vmovdqu _lens(state), %xmm0 - vmovdqu _lens+1*16(state), %xmm1 - - vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A} - vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C} - vpminud %xmm3, %xmm2, %xmm2 # xmm2 has {x,x,E,F} - vpalignr $4, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,x,E} - vpminud %xmm3, %xmm2, %xmm2 # xmm2 has min val in low dword - - vmovd %xmm2, DWORD_idx - test $~0xF, idx - jnz .return_null - - # process completed job "idx" - imul $_LANE_DATA_size, idx, lane_data - lea _ldata(state, lane_data), lane_data - - mov _job_in_lane(lane_data), job_rax - movq $0, _job_in_lane(lane_data) - movl $STS_COMPLETED, _status(job_rax) - mov _unused_lanes(state), unused_lanes - shl $4, unused_lanes - or idx, unused_lanes - mov unused_lanes, _unused_lanes(state) - - movl $0xFFFFFFFF, _lens(state, idx, 4) - - vmovd _args_digest(state, idx, 4), %xmm0 - vpinsrd $1, _args_digest+1*32(state, idx, 4), %xmm0, %xmm0 - vpinsrd $2, _args_digest+2*32(state, idx, 4), %xmm0, %xmm0 - vpinsrd $3, _args_digest+3*32(state, idx, 4), %xmm0, %xmm0 - vmovd _args_digest+4*32(state, idx, 4), %xmm1 - vpinsrd $1, _args_digest+5*32(state, idx, 4), %xmm1, %xmm1 - vpinsrd $2, _args_digest+6*32(state, idx, 4), %xmm1, %xmm1 - vpinsrd $3, _args_digest+7*32(state, idx, 4), %xmm1, %xmm1 - - vmovdqu %xmm0, _result_digest(job_rax) - offset = (_result_digest + 1*16) - vmovdqu %xmm1, offset(job_rax) - - pop %rbx - - ret - -.return_null: - xor job_rax, job_rax - pop %rbx - ret -ENDPROC(sha256_mb_mgr_get_comp_job_avx2) - -.section .rodata.cst16.clear_low_nibble, "aM", @progbits, 16 -.align 16 -clear_low_nibble: -.octa 0x000000000000000000000000FFFFFFF0 - -.section .rodata.cst8, "aM", @progbits, 8 -.align 8 -one: -.quad 1 -two: -.quad 2 -three: -.quad 3 -four: -.quad 4 -five: -.quad 5 -six: -.quad 6 -seven: -.quad 7 diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_init_avx2.c b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_init_avx2.c deleted file mode 100644 index b0c498371e67..000000000000 --- a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_init_avx2.c +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Initialization code for multi buffer SHA256 algorithm for AVX2 - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2016 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * Contact Information: - * Megha Dey <megha.dey@linux.intel.com> - * - * BSD LICENSE - * - * Copyright(c) 2016 Intel Corporation. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "sha256_mb_mgr.h" - -void sha256_mb_mgr_init_avx2(struct sha256_mb_mgr *state) -{ - unsigned int j; - - state->unused_lanes = 0xF76543210ULL; - for (j = 0; j < 8; j++) { - state->lens[j] = 0xFFFFFFFF; - state->ldata[j].job_in_lane = NULL; - } -} diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_submit_avx2.S b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_submit_avx2.S deleted file mode 100644 index b36ae7454084..000000000000 --- a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_submit_avx2.S +++ /dev/null @@ -1,214 +0,0 @@ -/* - * Buffer submit code for multi buffer SHA256 algorithm - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2016 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * Contact Information: - * Megha Dey <megha.dey@linux.intel.com> - * - * BSD LICENSE - * - * Copyright(c) 2016 Intel Corporation. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include <linux/linkage.h> -#include <asm/frame.h> -#include "sha256_mb_mgr_datastruct.S" - -.extern sha256_x8_avx2 - -# LINUX register definitions -arg1 = %rdi -arg2 = %rsi -size_offset = %rcx -tmp2 = %rcx -extra_blocks = %rdx - -# Common definitions -#define state arg1 -#define job %rsi -#define len2 arg2 -#define p2 arg2 - -# idx must be a register not clobberred by sha1_x8_avx2 -idx = %r8 -DWORD_idx = %r8d -last_len = %r8 - -p = %r11 -start_offset = %r11 - -unused_lanes = %rbx -BYTE_unused_lanes = %bl - -job_rax = %rax -len = %rax -DWORD_len = %eax - -lane = %r12 -tmp3 = %r12 - -tmp = %r9 -DWORD_tmp = %r9d - -lane_data = %r10 - -# JOB* sha256_mb_mgr_submit_avx2(MB_MGR *state, JOB_SHA256 *job) -# arg 1 : rcx : state -# arg 2 : rdx : job -ENTRY(sha256_mb_mgr_submit_avx2) - FRAME_BEGIN - push %rbx - push %r12 - - mov _unused_lanes(state), unused_lanes - mov unused_lanes, lane - and $0xF, lane - shr $4, unused_lanes - imul $_LANE_DATA_size, lane, lane_data - movl $STS_BEING_PROCESSED, _status(job) - lea _ldata(state, lane_data), lane_data - mov unused_lanes, _unused_lanes(state) - movl _len(job), DWORD_len - - mov job, _job_in_lane(lane_data) - shl $4, len - or lane, len - - movl DWORD_len, _lens(state , lane, 4) - - # Load digest words from result_digest - vmovdqu _result_digest(job), %xmm0 - vmovdqu _result_digest+1*16(job), %xmm1 - vmovd %xmm0, _args_digest(state, lane, 4) - vpextrd $1, %xmm0, _args_digest+1*32(state , lane, 4) - vpextrd $2, %xmm0, _args_digest+2*32(state , lane, 4) - vpextrd $3, %xmm0, _args_digest+3*32(state , lane, 4) - vmovd %xmm1, _args_digest+4*32(state , lane, 4) - - vpextrd $1, %xmm1, _args_digest+5*32(state , lane, 4) - vpextrd $2, %xmm1, _args_digest+6*32(state , lane, 4) - vpextrd $3, %xmm1, _args_digest+7*32(state , lane, 4) - - mov _buffer(job), p - mov p, _args_data_ptr(state, lane, 8) - - cmp $0xF, unused_lanes - jne return_null - -start_loop: - # Find min length - vmovdqa _lens(state), %xmm0 - vmovdqa _lens+1*16(state), %xmm1 - - vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A} - vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C} - vpminud %xmm3, %xmm2, %xmm2 # xmm2 has {x,x,E,F} - vpalignr $4, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,x,E} - vpminud %xmm3, %xmm2, %xmm2 # xmm2 has min val in low dword - - vmovd %xmm2, DWORD_idx - mov idx, len2 - and $0xF, idx - shr $4, len2 - jz len_is_0 - - vpand clear_low_nibble(%rip), %xmm2, %xmm2 - vpshufd $0, %xmm2, %xmm2 - - vpsubd %xmm2, %xmm0, %xmm0 - vpsubd %xmm2, %xmm1, %xmm1 - - vmovdqa %xmm0, _lens + 0*16(state) - vmovdqa %xmm1, _lens + 1*16(state) - - # "state" and "args" are the same address, arg1 - # len is arg2 - call sha256_x8_avx2 - - # state and idx are intact - -len_is_0: - # process completed job "idx" - imul $_LANE_DATA_size, idx, lane_data - lea _ldata(state, lane_data), lane_data - - mov _job_in_lane(lane_data), job_rax - mov _unused_lanes(state), unused_lanes - movq $0, _job_in_lane(lane_data) - movl $STS_COMPLETED, _status(job_rax) - shl $4, unused_lanes - or idx, unused_lanes - mov unused_lanes, _unused_lanes(state) - - movl $0xFFFFFFFF, _lens(state,idx,4) - - vmovd _args_digest(state, idx, 4), %xmm0 - vpinsrd $1, _args_digest+1*32(state , idx, 4), %xmm0, %xmm0 - vpinsrd $2, _args_digest+2*32(state , idx, 4), %xmm0, %xmm0 - vpinsrd $3, _args_digest+3*32(state , idx, 4), %xmm0, %xmm0 - vmovd _args_digest+4*32(state, idx, 4), %xmm1 - - vpinsrd $1, _args_digest+5*32(state , idx, 4), %xmm1, %xmm1 - vpinsrd $2, _args_digest+6*32(state , idx, 4), %xmm1, %xmm1 - vpinsrd $3, _args_digest+7*32(state , idx, 4), %xmm1, %xmm1 - - vmovdqu %xmm0, _result_digest(job_rax) - vmovdqu %xmm1, _result_digest+1*16(job_rax) - -return: - pop %r12 - pop %rbx - FRAME_END - ret - -return_null: - xor job_rax, job_rax - jmp return - -ENDPROC(sha256_mb_mgr_submit_avx2) - -.section .rodata.cst16.clear_low_nibble, "aM", @progbits, 16 -.align 16 -clear_low_nibble: - .octa 0x000000000000000000000000FFFFFFF0 diff --git a/arch/x86/crypto/sha256-mb/sha256_x8_avx2.S b/arch/x86/crypto/sha256-mb/sha256_x8_avx2.S deleted file mode 100644 index 1687c80c5995..000000000000 --- a/arch/x86/crypto/sha256-mb/sha256_x8_avx2.S +++ /dev/null @@ -1,598 +0,0 @@ -/* - * Multi-buffer SHA256 algorithm hash compute routine - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2016 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * Contact Information: - * Megha Dey <megha.dey@linux.intel.com> - * - * BSD LICENSE - * - * Copyright(c) 2016 Intel Corporation. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include <linux/linkage.h> -#include "sha256_mb_mgr_datastruct.S" - -## code to compute oct SHA256 using SSE-256 -## outer calling routine takes care of save and restore of XMM registers -## Logic designed/laid out by JDG - -## Function clobbers: rax, rcx, rdx, rbx, rsi, rdi, r9-r15; %ymm0-15 -## Linux clobbers: rax rbx rcx rdx rsi r9 r10 r11 r12 r13 r14 r15 -## Linux preserves: rdi rbp r8 -## -## clobbers %ymm0-15 - -arg1 = %rdi -arg2 = %rsi -reg3 = %rcx -reg4 = %rdx - -# Common definitions -STATE = arg1 -INP_SIZE = arg2 - -IDX = %rax -ROUND = %rbx -TBL = reg3 - -inp0 = %r9 -inp1 = %r10 -inp2 = %r11 -inp3 = %r12 -inp4 = %r13 -inp5 = %r14 -inp6 = %r15 -inp7 = reg4 - -a = %ymm0 -b = %ymm1 -c = %ymm2 -d = %ymm3 -e = %ymm4 -f = %ymm5 -g = %ymm6 -h = %ymm7 - -T1 = %ymm8 - -a0 = %ymm12 -a1 = %ymm13 -a2 = %ymm14 -TMP = %ymm15 -TMP0 = %ymm6 -TMP1 = %ymm7 - -TT0 = %ymm8 -TT1 = %ymm9 -TT2 = %ymm10 -TT3 = %ymm11 -TT4 = %ymm12 -TT5 = %ymm13 -TT6 = %ymm14 -TT7 = %ymm15 - -# Define stack usage - -# Assume stack aligned to 32 bytes before call -# Therefore FRAMESZ mod 32 must be 32-8 = 24 - -#define FRAMESZ 0x388 - -#define VMOVPS vmovups - -# TRANSPOSE8 r0, r1, r2, r3, r4, r5, r6, r7, t0, t1 -# "transpose" data in {r0...r7} using temps {t0...t1} -# Input looks like: {r0 r1 r2 r3 r4 r5 r6 r7} -# r0 = {a7 a6 a5 a4 a3 a2 a1 a0} -# r1 = {b7 b6 b5 b4 b3 b2 b1 b0} -# r2 = {c7 c6 c5 c4 c3 c2 c1 c0} -# r3 = {d7 d6 d5 d4 d3 d2 d1 d0} -# r4 = {e7 e6 e5 e4 e3 e2 e1 e0} -# r5 = {f7 f6 f5 f4 f3 f2 f1 f0} -# r6 = {g7 g6 g5 g4 g3 g2 g1 g0} -# r7 = {h7 h6 h5 h4 h3 h2 h1 h0} -# -# Output looks like: {r0 r1 r2 r3 r4 r5 r6 r7} -# r0 = {h0 g0 f0 e0 d0 c0 b0 a0} -# r1 = {h1 g1 f1 e1 d1 c1 b1 a1} -# r2 = {h2 g2 f2 e2 d2 c2 b2 a2} -# r3 = {h3 g3 f3 e3 d3 c3 b3 a3} -# r4 = {h4 g4 f4 e4 d4 c4 b4 a4} -# r5 = {h5 g5 f5 e5 d5 c5 b5 a5} -# r6 = {h6 g6 f6 e6 d6 c6 b6 a6} -# r7 = {h7 g7 f7 e7 d7 c7 b7 a7} -# - -.macro TRANSPOSE8 r0 r1 r2 r3 r4 r5 r6 r7 t0 t1 - # process top half (r0..r3) {a...d} - vshufps $0x44, \r1, \r0, \t0 # t0 = {b5 b4 a5 a4 b1 b0 a1 a0} - vshufps $0xEE, \r1, \r0, \r0 # r0 = {b7 b6 a7 a6 b3 b2 a3 a2} - vshufps $0x44, \r3, \r2, \t1 # t1 = {d5 d4 c5 c4 d1 d0 c1 c0} - vshufps $0xEE, \r3, \r2, \r2 # r2 = {d7 d6 c7 c6 d3 d2 c3 c2} - vshufps $0xDD, \t1, \t0, \r3 # r3 = {d5 c5 b5 a5 d1 c1 b1 a1} - vshufps $0x88, \r2, \r0, \r1 # r1 = {d6 c6 b6 a6 d2 c2 b2 a2} - vshufps $0xDD, \r2, \r0, \r0 # r0 = {d7 c7 b7 a7 d3 c3 b3 a3} - vshufps $0x88, \t1, \t0, \t0 # t0 = {d4 c4 b4 a4 d0 c0 b0 a0} - - # use r2 in place of t0 - # process bottom half (r4..r7) {e...h} - vshufps $0x44, \r5, \r4, \r2 # r2 = {f5 f4 e5 e4 f1 f0 e1 e0} - vshufps $0xEE, \r5, \r4, \r4 # r4 = {f7 f6 e7 e6 f3 f2 e3 e2} - vshufps $0x44, \r7, \r6, \t1 # t1 = {h5 h4 g5 g4 h1 h0 g1 g0} - vshufps $0xEE, \r7, \r6, \r6 # r6 = {h7 h6 g7 g6 h3 h2 g3 g2} - vshufps $0xDD, \t1, \r2, \r7 # r7 = {h5 g5 f5 e5 h1 g1 f1 e1} - vshufps $0x88, \r6, \r4, \r5 # r5 = {h6 g6 f6 e6 h2 g2 f2 e2} - vshufps $0xDD, \r6, \r4, \r4 # r4 = {h7 g7 f7 e7 h3 g3 f3 e3} - vshufps $0x88, \t1, \r2, \t1 # t1 = {h4 g4 f4 e4 h0 g0 f0 e0} - - vperm2f128 $0x13, \r1, \r5, \r6 # h6...a6 - vperm2f128 $0x02, \r1, \r5, \r2 # h2...a2 - vperm2f128 $0x13, \r3, \r7, \r5 # h5...a5 - vperm2f128 $0x02, \r3, \r7, \r1 # h1...a1 - vperm2f128 $0x13, \r0, \r4, \r7 # h7...a7 - vperm2f128 $0x02, \r0, \r4, \r3 # h3...a3 - vperm2f128 $0x13, \t0, \t1, \r4 # h4...a4 - vperm2f128 $0x02, \t0, \t1, \r0 # h0...a0 - -.endm - -.macro ROTATE_ARGS -TMP_ = h -h = g -g = f -f = e -e = d -d = c -c = b -b = a -a = TMP_ -.endm - -.macro _PRORD reg imm tmp - vpslld $(32-\imm),\reg,\tmp - vpsrld $\imm,\reg, \reg - vpor \tmp,\reg, \reg -.endm - -# PRORD_nd reg, imm, tmp, src -.macro _PRORD_nd reg imm tmp src - vpslld $(32-\imm), \src, \tmp - vpsrld $\imm, \src, \reg - vpor \tmp, \reg, \reg -.endm - -# PRORD dst/src, amt -.macro PRORD reg imm - _PRORD \reg,\imm,TMP -.endm - -# PRORD_nd dst, src, amt -.macro PRORD_nd reg tmp imm - _PRORD_nd \reg, \imm, TMP, \tmp -.endm - -# arguments passed implicitly in preprocessor symbols i, a...h -.macro ROUND_00_15 _T1 i - PRORD_nd a0,e,5 # sig1: a0 = (e >> 5) - - vpxor g, f, a2 # ch: a2 = f^g - vpand e,a2, a2 # ch: a2 = (f^g)&e - vpxor g, a2, a2 # a2 = ch - - PRORD_nd a1,e,25 # sig1: a1 = (e >> 25) - - vmovdqu \_T1,(SZ8*(\i & 0xf))(%rsp) - vpaddd (TBL,ROUND,1), \_T1, \_T1 # T1 = W + K - vpxor e,a0, a0 # sig1: a0 = e ^ (e >> 5) - PRORD a0, 6 # sig1: a0 = (e >> 6) ^ (e >> 11) - vpaddd a2, h, h # h = h + ch - PRORD_nd a2,a,11 # sig0: a2 = (a >> 11) - vpaddd \_T1,h, h # h = h + ch + W + K - vpxor a1, a0, a0 # a0 = sigma1 - PRORD_nd a1,a,22 # sig0: a1 = (a >> 22) - vpxor c, a, \_T1 # maj: T1 = a^c - add $SZ8, ROUND # ROUND++ - vpand b, \_T1, \_T1 # maj: T1 = (a^c)&b - vpaddd a0, h, h - vpaddd h, d, d - vpxor a, a2, a2 # sig0: a2 = a ^ (a >> 11) - PRORD a2,2 # sig0: a2 = (a >> 2) ^ (a >> 13) - vpxor a1, a2, a2 # a2 = sig0 - vpand c, a, a1 # maj: a1 = a&c - vpor \_T1, a1, a1 # a1 = maj - vpaddd a1, h, h # h = h + ch + W + K + maj - vpaddd a2, h, h # h = h + ch + W + K + maj + sigma0 - ROTATE_ARGS -.endm - -# arguments passed implicitly in preprocessor symbols i, a...h -.macro ROUND_16_XX _T1 i - vmovdqu (SZ8*((\i-15)&0xf))(%rsp), \_T1 - vmovdqu (SZ8*((\i-2)&0xf))(%rsp), a1 - vmovdqu \_T1, a0 - PRORD \_T1,11 - vmovdqu a1, a2 - PRORD a1,2 - vpxor a0, \_T1, \_T1 - PRORD \_T1, 7 - vpxor a2, a1, a1 - PRORD a1, 17 - vpsrld $3, a0, a0 - vpxor a0, \_T1, \_T1 - vpsrld $10, a2, a2 - vpxor a2, a1, a1 - vpaddd (SZ8*((\i-16)&0xf))(%rsp), \_T1, \_T1 - vpaddd (SZ8*((\i-7)&0xf))(%rsp), a1, a1 - vpaddd a1, \_T1, \_T1 - - ROUND_00_15 \_T1,\i -.endm - -# SHA256_ARGS: -# UINT128 digest[8]; // transposed digests -# UINT8 *data_ptr[4]; - -# void sha256_x8_avx2(SHA256_ARGS *args, UINT64 bytes); -# arg 1 : STATE : pointer to array of pointers to input data -# arg 2 : INP_SIZE : size of input in blocks - # general registers preserved in outer calling routine - # outer calling routine saves all the XMM registers - # save rsp, allocate 32-byte aligned for local variables -ENTRY(sha256_x8_avx2) - - # save callee-saved clobbered registers to comply with C function ABI - push %r12 - push %r13 - push %r14 - push %r15 - - mov %rsp, IDX - sub $FRAMESZ, %rsp - and $~0x1F, %rsp - mov IDX, _rsp(%rsp) - - # Load the pre-transposed incoming digest. - vmovdqu 0*SHA256_DIGEST_ROW_SIZE(STATE),a - vmovdqu 1*SHA256_DIGEST_ROW_SIZE(STATE),b - vmovdqu 2*SHA256_DIGEST_ROW_SIZE(STATE),c - vmovdqu 3*SHA256_DIGEST_ROW_SIZE(STATE),d - vmovdqu 4*SHA256_DIGEST_ROW_SIZE(STATE),e - vmovdqu 5*SHA256_DIGEST_ROW_SIZE(STATE),f - vmovdqu 6*SHA256_DIGEST_ROW_SIZE(STATE),g - vmovdqu 7*SHA256_DIGEST_ROW_SIZE(STATE),h - - lea K256_8(%rip),TBL - - # load the address of each of the 4 message lanes - # getting ready to transpose input onto stack - mov _args_data_ptr+0*PTR_SZ(STATE),inp0 - mov _args_data_ptr+1*PTR_SZ(STATE),inp1 - mov _args_data_ptr+2*PTR_SZ(STATE),inp2 - mov _args_data_ptr+3*PTR_SZ(STATE),inp3 - mov _args_data_ptr+4*PTR_SZ(STATE),inp4 - mov _args_data_ptr+5*PTR_SZ(STATE),inp5 - mov _args_data_ptr+6*PTR_SZ(STATE),inp6 - mov _args_data_ptr+7*PTR_SZ(STATE),inp7 - - xor IDX, IDX -lloop: - xor ROUND, ROUND - - # save old digest - vmovdqu a, _digest(%rsp) - vmovdqu b, _digest+1*SZ8(%rsp) - vmovdqu c, _digest+2*SZ8(%rsp) - vmovdqu d, _digest+3*SZ8(%rsp) - vmovdqu e, _digest+4*SZ8(%rsp) - vmovdqu f, _digest+5*SZ8(%rsp) - vmovdqu g, _digest+6*SZ8(%rsp) - vmovdqu h, _digest+7*SZ8(%rsp) - i = 0 -.rep 2 - VMOVPS i*32(inp0, IDX), TT0 - VMOVPS i*32(inp1, IDX), TT1 - VMOVPS i*32(inp2, IDX), TT2 - VMOVPS i*32(inp3, IDX), TT3 - VMOVPS i*32(inp4, IDX), TT4 - VMOVPS i*32(inp5, IDX), TT5 - VMOVPS i*32(inp6, IDX), TT6 - VMOVPS i*32(inp7, IDX), TT7 - vmovdqu g, _ytmp(%rsp) - vmovdqu h, _ytmp+1*SZ8(%rsp) - TRANSPOSE8 TT0, TT1, TT2, TT3, TT4, TT5, TT6, TT7, TMP0, TMP1 - vmovdqu PSHUFFLE_BYTE_FLIP_MASK(%rip), TMP1 - vmovdqu _ytmp(%rsp), g - vpshufb TMP1, TT0, TT0 - vpshufb TMP1, TT1, TT1 - vpshufb TMP1, TT2, TT2 - vpshufb TMP1, TT3, TT3 - vpshufb TMP1, TT4, TT4 - vpshufb TMP1, TT5, TT5 - vpshufb TMP1, TT6, TT6 - vpshufb TMP1, TT7, TT7 - vmovdqu _ytmp+1*SZ8(%rsp), h - vmovdqu TT4, _ytmp(%rsp) - vmovdqu TT5, _ytmp+1*SZ8(%rsp) - vmovdqu TT6, _ytmp+2*SZ8(%rsp) - vmovdqu TT7, _ytmp+3*SZ8(%rsp) - ROUND_00_15 TT0,(i*8+0) - vmovdqu _ytmp(%rsp), TT0 - ROUND_00_15 TT1,(i*8+1) - vmovdqu _ytmp+1*SZ8(%rsp), TT1 - ROUND_00_15 TT2,(i*8+2) - vmovdqu _ytmp+2*SZ8(%rsp), TT2 - ROUND_00_15 TT3,(i*8+3) - vmovdqu _ytmp+3*SZ8(%rsp), TT3 - ROUND_00_15 TT0,(i*8+4) - ROUND_00_15 TT1,(i*8+5) - ROUND_00_15 TT2,(i*8+6) - ROUND_00_15 TT3,(i*8+7) - i = (i+1) -.endr - add $64, IDX - i = (i*8) - - jmp Lrounds_16_xx -.align 16 -Lrounds_16_xx: -.rep 16 - ROUND_16_XX T1, i - i = (i+1) -.endr - - cmp $ROUNDS,ROUND - jb Lrounds_16_xx - - # add old digest - vpaddd _digest+0*SZ8(%rsp), a, a - vpaddd _digest+1*SZ8(%rsp), b, b - vpaddd _digest+2*SZ8(%rsp), c, c - vpaddd _digest+3*SZ8(%rsp), d, d - vpaddd _digest+4*SZ8(%rsp), e, e - vpaddd _digest+5*SZ8(%rsp), f, f - vpaddd _digest+6*SZ8(%rsp), g, g - vpaddd _digest+7*SZ8(%rsp), h, h - - sub $1, INP_SIZE # unit is blocks - jne lloop - - # write back to memory (state object) the transposed digest - vmovdqu a, 0*SHA256_DIGEST_ROW_SIZE(STATE) - vmovdqu b, 1*SHA256_DIGEST_ROW_SIZE(STATE) - vmovdqu c, 2*SHA256_DIGEST_ROW_SIZE(STATE) - vmovdqu d, 3*SHA256_DIGEST_ROW_SIZE(STATE) - vmovdqu e, 4*SHA256_DIGEST_ROW_SIZE(STATE) - vmovdqu f, 5*SHA256_DIGEST_ROW_SIZE(STATE) - vmovdqu g, 6*SHA256_DIGEST_ROW_SIZE(STATE) - vmovdqu h, 7*SHA256_DIGEST_ROW_SIZE(STATE) - - # update input pointers - add IDX, inp0 - mov inp0, _args_data_ptr+0*8(STATE) - add IDX, inp1 - mov inp1, _args_data_ptr+1*8(STATE) - add IDX, inp2 - mov inp2, _args_data_ptr+2*8(STATE) - add IDX, inp3 - mov inp3, _args_data_ptr+3*8(STATE) - add IDX, inp4 - mov inp4, _args_data_ptr+4*8(STATE) - add IDX, inp5 - mov inp5, _args_data_ptr+5*8(STATE) - add IDX, inp6 - mov inp6, _args_data_ptr+6*8(STATE) - add IDX, inp7 - mov inp7, _args_data_ptr+7*8(STATE) - - # Postamble - mov _rsp(%rsp), %rsp - - # restore callee-saved clobbered registers - pop %r15 - pop %r14 - pop %r13 - pop %r12 - - ret -ENDPROC(sha256_x8_avx2) - -.section .rodata.K256_8, "a", @progbits -.align 64 -K256_8: - .octa 0x428a2f98428a2f98428a2f98428a2f98 - .octa 0x428a2f98428a2f98428a2f98428a2f98 - .octa 0x71374491713744917137449171374491 - .octa 0x71374491713744917137449171374491 - .octa 0xb5c0fbcfb5c0fbcfb5c0fbcfb5c0fbcf - .octa 0xb5c0fbcfb5c0fbcfb5c0fbcfb5c0fbcf - .octa 0xe9b5dba5e9b5dba5e9b5dba5e9b5dba5 - .octa 0xe9b5dba5e9b5dba5e9b5dba5e9b5dba5 - .octa 0x3956c25b3956c25b3956c25b3956c25b - .octa 0x3956c25b3956c25b3956c25b3956c25b - .octa 0x59f111f159f111f159f111f159f111f1 - .octa 0x59f111f159f111f159f111f159f111f1 - .octa 0x923f82a4923f82a4923f82a4923f82a4 - .octa 0x923f82a4923f82a4923f82a4923f82a4 - .octa 0xab1c5ed5ab1c5ed5ab1c5ed5ab1c5ed5 - .octa 0xab1c5ed5ab1c5ed5ab1c5ed5ab1c5ed5 - .octa 0xd807aa98d807aa98d807aa98d807aa98 - .octa 0xd807aa98d807aa98d807aa98d807aa98 - .octa 0x12835b0112835b0112835b0112835b01 - .octa 0x12835b0112835b0112835b0112835b01 - .octa 0x243185be243185be243185be243185be - .octa 0x243185be243185be243185be243185be - .octa 0x550c7dc3550c7dc3550c7dc3550c7dc3 - .octa 0x550c7dc3550c7dc3550c7dc3550c7dc3 - .octa 0x72be5d7472be5d7472be5d7472be5d74 - .octa 0x72be5d7472be5d7472be5d7472be5d74 - .octa 0x80deb1fe80deb1fe80deb1fe80deb1fe - .octa 0x80deb1fe80deb1fe80deb1fe80deb1fe - .octa 0x9bdc06a79bdc06a79bdc06a79bdc06a7 - .octa 0x9bdc06a79bdc06a79bdc06a79bdc06a7 - .octa 0xc19bf174c19bf174c19bf174c19bf174 - .octa 0xc19bf174c19bf174c19bf174c19bf174 - .octa 0xe49b69c1e49b69c1e49b69c1e49b69c1 - .octa 0xe49b69c1e49b69c1e49b69c1e49b69c1 - .octa 0xefbe4786efbe4786efbe4786efbe4786 - .octa 0xefbe4786efbe4786efbe4786efbe4786 - .octa 0x0fc19dc60fc19dc60fc19dc60fc19dc6 - .octa 0x0fc19dc60fc19dc60fc19dc60fc19dc6 - .octa 0x240ca1cc240ca1cc240ca1cc240ca1cc - .octa 0x240ca1cc240ca1cc240ca1cc240ca1cc - .octa 0x2de92c6f2de92c6f2de92c6f2de92c6f - .octa 0x2de92c6f2de92c6f2de92c6f2de92c6f - .octa 0x4a7484aa4a7484aa4a7484aa4a7484aa - .octa 0x4a7484aa4a7484aa4a7484aa4a7484aa - .octa 0x5cb0a9dc5cb0a9dc5cb0a9dc5cb0a9dc - .octa 0x5cb0a9dc5cb0a9dc5cb0a9dc5cb0a9dc - .octa 0x76f988da76f988da76f988da76f988da - .octa 0x76f988da76f988da76f988da76f988da - .octa 0x983e5152983e5152983e5152983e5152 - .octa 0x983e5152983e5152983e5152983e5152 - .octa 0xa831c66da831c66da831c66da831c66d - .octa 0xa831c66da831c66da831c66da831c66d - .octa 0xb00327c8b00327c8b00327c8b00327c8 - .octa 0xb00327c8b00327c8b00327c8b00327c8 - .octa 0xbf597fc7bf597fc7bf597fc7bf597fc7 - .octa 0xbf597fc7bf597fc7bf597fc7bf597fc7 - .octa 0xc6e00bf3c6e00bf3c6e00bf3c6e00bf3 - .octa 0xc6e00bf3c6e00bf3c6e00bf3c6e00bf3 - .octa 0xd5a79147d5a79147d5a79147d5a79147 - .octa 0xd5a79147d5a79147d5a79147d5a79147 - .octa 0x06ca635106ca635106ca635106ca6351 - .octa 0x06ca635106ca635106ca635106ca6351 - .octa 0x14292967142929671429296714292967 - .octa 0x14292967142929671429296714292967 - .octa 0x27b70a8527b70a8527b70a8527b70a85 - .octa 0x27b70a8527b70a8527b70a8527b70a85 - .octa 0x2e1b21382e1b21382e1b21382e1b2138 - .octa 0x2e1b21382e1b21382e1b21382e1b2138 - .octa 0x4d2c6dfc4d2c6dfc4d2c6dfc4d2c6dfc - .octa 0x4d2c6dfc4d2c6dfc4d2c6dfc4d2c6dfc - .octa 0x53380d1353380d1353380d1353380d13 - .octa 0x53380d1353380d1353380d1353380d13 - .octa 0x650a7354650a7354650a7354650a7354 - .octa 0x650a7354650a7354650a7354650a7354 - .octa 0x766a0abb766a0abb766a0abb766a0abb - .octa 0x766a0abb766a0abb766a0abb766a0abb - .octa 0x81c2c92e81c2c92e81c2c92e81c2c92e - .octa 0x81c2c92e81c2c92e81c2c92e81c2c92e - .octa 0x92722c8592722c8592722c8592722c85 - .octa 0x92722c8592722c8592722c8592722c85 - .octa 0xa2bfe8a1a2bfe8a1a2bfe8a1a2bfe8a1 - .octa 0xa2bfe8a1a2bfe8a1a2bfe8a1a2bfe8a1 - .octa 0xa81a664ba81a664ba81a664ba81a664b - .octa 0xa81a664ba81a664ba81a664ba81a664b - .octa 0xc24b8b70c24b8b70c24b8b70c24b8b70 - .octa 0xc24b8b70c24b8b70c24b8b70c24b8b70 - .octa 0xc76c51a3c76c51a3c76c51a3c76c51a3 - .octa 0xc76c51a3c76c51a3c76c51a3c76c51a3 - .octa 0xd192e819d192e819d192e819d192e819 - .octa 0xd192e819d192e819d192e819d192e819 - .octa 0xd6990624d6990624d6990624d6990624 - .octa 0xd6990624d6990624d6990624d6990624 - .octa 0xf40e3585f40e3585f40e3585f40e3585 - .octa 0xf40e3585f40e3585f40e3585f40e3585 - .octa 0x106aa070106aa070106aa070106aa070 - .octa 0x106aa070106aa070106aa070106aa070 - .octa 0x19a4c11619a4c11619a4c11619a4c116 - .octa 0x19a4c11619a4c11619a4c11619a4c116 - .octa 0x1e376c081e376c081e376c081e376c08 - .octa 0x1e376c081e376c081e376c081e376c08 - .octa 0x2748774c2748774c2748774c2748774c - .octa 0x2748774c2748774c2748774c2748774c - .octa 0x34b0bcb534b0bcb534b0bcb534b0bcb5 - .octa 0x34b0bcb534b0bcb534b0bcb534b0bcb5 - .octa 0x391c0cb3391c0cb3391c0cb3391c0cb3 - .octa 0x391c0cb3391c0cb3391c0cb3391c0cb3 - .octa 0x4ed8aa4a4ed8aa4a4ed8aa4a4ed8aa4a - .octa 0x4ed8aa4a4ed8aa4a4ed8aa4a4ed8aa4a - .octa 0x5b9cca4f5b9cca4f5b9cca4f5b9cca4f - .octa 0x5b9cca4f5b9cca4f5b9cca4f5b9cca4f - .octa 0x682e6ff3682e6ff3682e6ff3682e6ff3 - .octa 0x682e6ff3682e6ff3682e6ff3682e6ff3 - .octa 0x748f82ee748f82ee748f82ee748f82ee - .octa 0x748f82ee748f82ee748f82ee748f82ee - .octa 0x78a5636f78a5636f78a5636f78a5636f - .octa 0x78a5636f78a5636f78a5636f78a5636f - .octa 0x84c8781484c8781484c8781484c87814 - .octa 0x84c8781484c8781484c8781484c87814 - .octa 0x8cc702088cc702088cc702088cc70208 - .octa 0x8cc702088cc702088cc702088cc70208 - .octa 0x90befffa90befffa90befffa90befffa - .octa 0x90befffa90befffa90befffa90befffa - .octa 0xa4506ceba4506ceba4506ceba4506ceb - .octa 0xa4506ceba4506ceba4506ceba4506ceb - .octa 0xbef9a3f7bef9a3f7bef9a3f7bef9a3f7 - .octa 0xbef9a3f7bef9a3f7bef9a3f7bef9a3f7 - .octa 0xc67178f2c67178f2c67178f2c67178f2 - .octa 0xc67178f2c67178f2c67178f2c67178f2 - -.section .rodata.cst32.PSHUFFLE_BYTE_FLIP_MASK, "aM", @progbits, 32 -.align 32 -PSHUFFLE_BYTE_FLIP_MASK: -.octa 0x0c0d0e0f08090a0b0405060700010203 -.octa 0x0c0d0e0f08090a0b0405060700010203 - -.section .rodata.cst256.K256, "aM", @progbits, 256 -.align 64 -.global K256 -K256: - .int 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 - .int 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 - .int 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 - .int 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 - .int 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc - .int 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da - .int 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 - .int 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 - .int 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 - .int 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 - .int 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 - .int 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 - .int 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 - .int 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 - .int 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 - .int 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 diff --git a/arch/x86/crypto/sha512-mb/Makefile b/arch/x86/crypto/sha512-mb/Makefile deleted file mode 100644 index 90f1ef69152e..000000000000 --- a/arch/x86/crypto/sha512-mb/Makefile +++ /dev/null @@ -1,12 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -# -# Arch-specific CryptoAPI modules. -# - -avx2_supported := $(call as-instr,vpgatherdd %ymm0$(comma)(%eax$(comma)%ymm1\ - $(comma)4)$(comma)%ymm2,yes,no) -ifeq ($(avx2_supported),yes) - obj-$(CONFIG_CRYPTO_SHA512_MB) += sha512-mb.o - sha512-mb-y := sha512_mb.o sha512_mb_mgr_flush_avx2.o \ - sha512_mb_mgr_init_avx2.o sha512_mb_mgr_submit_avx2.o sha512_x4_avx2.o -endif diff --git a/arch/x86/crypto/sha512-mb/sha512_mb.c b/arch/x86/crypto/sha512-mb/sha512_mb.c deleted file mode 100644 index 26b85678012d..000000000000 --- a/arch/x86/crypto/sha512-mb/sha512_mb.c +++ /dev/null @@ -1,1047 +0,0 @@ -/* - * Multi buffer SHA512 algorithm Glue Code - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2016 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * Contact Information: - * Megha Dey <megha.dey@linux.intel.com> - * - * BSD LICENSE - * - * Copyright(c) 2016 Intel Corporation. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <crypto/internal/hash.h> -#include <linux/init.h> -#include <linux/module.h> -#include <linux/mm.h> -#include <linux/cryptohash.h> -#include <linux/types.h> -#include <linux/list.h> -#include <crypto/scatterwalk.h> -#include <crypto/sha.h> -#include <crypto/mcryptd.h> -#include <crypto/crypto_wq.h> -#include <asm/byteorder.h> -#include <linux/hardirq.h> -#include <asm/fpu/api.h> -#include "sha512_mb_ctx.h" - -#define FLUSH_INTERVAL 1000 /* in usec */ - -static struct mcryptd_alg_state sha512_mb_alg_state; - -struct sha512_mb_ctx { - struct mcryptd_ahash *mcryptd_tfm; -}; - -static inline struct mcryptd_hash_request_ctx - *cast_hash_to_mcryptd_ctx(struct sha512_hash_ctx *hash_ctx) -{ - struct ahash_request *areq; - - areq = container_of((void *) hash_ctx, struct ahash_request, __ctx); - return container_of(areq, struct mcryptd_hash_request_ctx, areq); -} - -static inline struct ahash_request - *cast_mcryptd_ctx_to_req(struct mcryptd_hash_request_ctx *ctx) -{ - return container_of((void *) ctx, struct ahash_request, __ctx); -} - -static void req_ctx_init(struct mcryptd_hash_request_ctx *rctx, - struct ahash_request *areq) -{ - rctx->flag = HASH_UPDATE; -} - -static asmlinkage void (*sha512_job_mgr_init)(struct sha512_mb_mgr *state); -static asmlinkage struct job_sha512* (*sha512_job_mgr_submit) - (struct sha512_mb_mgr *state, - struct job_sha512 *job); -static asmlinkage struct job_sha512* (*sha512_job_mgr_flush) - (struct sha512_mb_mgr *state); -static asmlinkage struct job_sha512* (*sha512_job_mgr_get_comp_job) - (struct sha512_mb_mgr *state); - -inline uint32_t sha512_pad(uint8_t padblock[SHA512_BLOCK_SIZE * 2], - uint64_t total_len) -{ - uint32_t i = total_len & (SHA512_BLOCK_SIZE - 1); - - memset(&padblock[i], 0, SHA512_BLOCK_SIZE); - padblock[i] = 0x80; - - i += ((SHA512_BLOCK_SIZE - 1) & - (0 - (total_len + SHA512_PADLENGTHFIELD_SIZE + 1))) - + 1 + SHA512_PADLENGTHFIELD_SIZE; - -#if SHA512_PADLENGTHFIELD_SIZE == 16 - *((uint64_t *) &padblock[i - 16]) = 0; -#endif - - *((uint64_t *) &padblock[i - 8]) = cpu_to_be64(total_len << 3); - - /* Number of extra blocks to hash */ - return i >> SHA512_LOG2_BLOCK_SIZE; -} - -static struct sha512_hash_ctx *sha512_ctx_mgr_resubmit - (struct sha512_ctx_mgr *mgr, struct sha512_hash_ctx *ctx) -{ - while (ctx) { - if (ctx->status & HASH_CTX_STS_COMPLETE) { - /* Clear PROCESSING bit */ - ctx->status = HASH_CTX_STS_COMPLETE; - return ctx; - } - - /* - * If the extra blocks are empty, begin hashing what remains - * in the user's buffer. - */ - if (ctx->partial_block_buffer_length == 0 && - ctx->incoming_buffer_length) { - - const void *buffer = ctx->incoming_buffer; - uint32_t len = ctx->incoming_buffer_length; - uint32_t copy_len; - - /* - * Only entire blocks can be hashed. - * Copy remainder to extra blocks buffer. - */ - copy_len = len & (SHA512_BLOCK_SIZE-1); - - if (copy_len) { - len -= copy_len; - memcpy(ctx->partial_block_buffer, - ((const char *) buffer + len), - copy_len); - ctx->partial_block_buffer_length = copy_len; - } - - ctx->incoming_buffer_length = 0; - - /* len should be a multiple of the block size now */ - assert((len % SHA512_BLOCK_SIZE) == 0); - - /* Set len to the number of blocks to be hashed */ - len >>= SHA512_LOG2_BLOCK_SIZE; - - if (len) { - - ctx->job.buffer = (uint8_t *) buffer; - ctx->job.len = len; - ctx = (struct sha512_hash_ctx *) - sha512_job_mgr_submit(&mgr->mgr, - &ctx->job); - continue; - } - } - - /* - * If the extra blocks are not empty, then we are - * either on the last block(s) or we need more - * user input before continuing. - */ - if (ctx->status & HASH_CTX_STS_LAST) { - - uint8_t *buf = ctx->partial_block_buffer; - uint32_t n_extra_blocks = - sha512_pad(buf, ctx->total_length); - - ctx->status = (HASH_CTX_STS_PROCESSING | - HASH_CTX_STS_COMPLETE); - ctx->job.buffer = buf; - ctx->job.len = (uint32_t) n_extra_blocks; - ctx = (struct sha512_hash_ctx *) - sha512_job_mgr_submit(&mgr->mgr, &ctx->job); - continue; - } - - if (ctx) - ctx->status = HASH_CTX_STS_IDLE; - return ctx; - } - - return NULL; -} - -static struct sha512_hash_ctx - *sha512_ctx_mgr_get_comp_ctx(struct mcryptd_alg_cstate *cstate) -{ - /* - * If get_comp_job returns NULL, there are no jobs complete. - * If get_comp_job returns a job, verify that it is safe to return to - * the user. - * If it is not ready, resubmit the job to finish processing. - * If sha512_ctx_mgr_resubmit returned a job, it is ready to be - * returned. - * Otherwise, all jobs currently being managed by the hash_ctx_mgr - * still need processing. - */ - struct sha512_ctx_mgr *mgr; - struct sha512_hash_ctx *ctx; - unsigned long flags; - - mgr = cstate->mgr; - spin_lock_irqsave(&cstate->work_lock, flags); - ctx = (struct sha512_hash_ctx *) - sha512_job_mgr_get_comp_job(&mgr->mgr); - ctx = sha512_ctx_mgr_resubmit(mgr, ctx); - spin_unlock_irqrestore(&cstate->work_lock, flags); - return ctx; -} - -static void sha512_ctx_mgr_init(struct sha512_ctx_mgr *mgr) -{ - sha512_job_mgr_init(&mgr->mgr); -} - -static struct sha512_hash_ctx - *sha512_ctx_mgr_submit(struct mcryptd_alg_cstate *cstate, - struct sha512_hash_ctx *ctx, - const void *buffer, - uint32_t len, - int flags) -{ - struct sha512_ctx_mgr *mgr; - unsigned long irqflags; - - mgr = cstate->mgr; - spin_lock_irqsave(&cstate->work_lock, irqflags); - if (flags & ~(HASH_UPDATE | HASH_LAST)) { - /* User should not pass anything other than UPDATE or LAST */ - ctx->error = HASH_CTX_ERROR_INVALID_FLAGS; - goto unlock; - } - - if (ctx->status & HASH_CTX_STS_PROCESSING) { - /* Cannot submit to a currently processing job. */ - ctx->error = HASH_CTX_ERROR_ALREADY_PROCESSING; - goto unlock; - } - - if (ctx->status & HASH_CTX_STS_COMPLETE) { - /* Cannot update a finished job. */ - ctx->error = HASH_CTX_ERROR_ALREADY_COMPLETED; - goto unlock; - } - - /* - * If we made it here, there were no errors during this call to - * submit - */ - ctx->error = HASH_CTX_ERROR_NONE; - - /* Store buffer ptr info from user */ - ctx->incoming_buffer = buffer; - ctx->incoming_buffer_length = len; - - /* - * Store the user's request flags and mark this ctx as currently being - * processed. - */ - ctx->status = (flags & HASH_LAST) ? - (HASH_CTX_STS_PROCESSING | HASH_CTX_STS_LAST) : - HASH_CTX_STS_PROCESSING; - - /* Advance byte counter */ - ctx->total_length += len; - - /* - * If there is anything currently buffered in the extra blocks, - * append to it until it contains a whole block. - * Or if the user's buffer contains less than a whole block, - * append as much as possible to the extra block. - */ - if (ctx->partial_block_buffer_length || len < SHA512_BLOCK_SIZE) { - /* Compute how many bytes to copy from user buffer into extra - * block - */ - uint32_t copy_len = SHA512_BLOCK_SIZE - - ctx->partial_block_buffer_length; - if (len < copy_len) - copy_len = len; - - if (copy_len) { - /* Copy and update relevant pointers and counters */ - memcpy - (&ctx->partial_block_buffer[ctx->partial_block_buffer_length], - buffer, copy_len); - - ctx->partial_block_buffer_length += copy_len; - ctx->incoming_buffer = (const void *) - ((const char *)buffer + copy_len); - ctx->incoming_buffer_length = len - copy_len; - } - - /* The extra block should never contain more than 1 block - * here - */ - assert(ctx->partial_block_buffer_length <= SHA512_BLOCK_SIZE); - - /* If the extra block buffer contains exactly 1 block, it can - * be hashed. - */ - if (ctx->partial_block_buffer_length >= SHA512_BLOCK_SIZE) { - ctx->partial_block_buffer_length = 0; - - ctx->job.buffer = ctx->partial_block_buffer; - ctx->job.len = 1; - ctx = (struct sha512_hash_ctx *) - sha512_job_mgr_submit(&mgr->mgr, &ctx->job); - } - } - - ctx = sha512_ctx_mgr_resubmit(mgr, ctx); -unlock: - spin_unlock_irqrestore(&cstate->work_lock, irqflags); - return ctx; -} - -static struct sha512_hash_ctx *sha512_ctx_mgr_flush(struct mcryptd_alg_cstate *cstate) -{ - struct sha512_ctx_mgr *mgr; - struct sha512_hash_ctx *ctx; - unsigned long flags; - - mgr = cstate->mgr; - spin_lock_irqsave(&cstate->work_lock, flags); - while (1) { - ctx = (struct sha512_hash_ctx *) - sha512_job_mgr_flush(&mgr->mgr); - - /* If flush returned 0, there are no more jobs in flight. */ - if (!ctx) - break; - - /* - * If flush returned a job, resubmit the job to finish - * processing. - */ - ctx = sha512_ctx_mgr_resubmit(mgr, ctx); - - /* - * If sha512_ctx_mgr_resubmit returned a job, it is ready to - * be returned. Otherwise, all jobs currently being managed by - * the sha512_ctx_mgr still need processing. Loop. - */ - if (ctx) - break; - } - spin_unlock_irqrestore(&cstate->work_lock, flags); - return ctx; -} - -static int sha512_mb_init(struct ahash_request *areq) -{ - struct sha512_hash_ctx *sctx = ahash_request_ctx(areq); - - hash_ctx_init(sctx); - sctx->job.result_digest[0] = SHA512_H0; - sctx->job.result_digest[1] = SHA512_H1; - sctx->job.result_digest[2] = SHA512_H2; - sctx->job.result_digest[3] = SHA512_H3; - sctx->job.result_digest[4] = SHA512_H4; - sctx->job.result_digest[5] = SHA512_H5; - sctx->job.result_digest[6] = SHA512_H6; - sctx->job.result_digest[7] = SHA512_H7; - sctx->total_length = 0; - sctx->partial_block_buffer_length = 0; - sctx->status = HASH_CTX_STS_IDLE; - - return 0; -} - -static int sha512_mb_set_results(struct mcryptd_hash_request_ctx *rctx) -{ - int i; - struct sha512_hash_ctx *sctx = ahash_request_ctx(&rctx->areq); - __be64 *dst = (__be64 *) rctx->out; - - for (i = 0; i < 8; ++i) - dst[i] = cpu_to_be64(sctx->job.result_digest[i]); - - return 0; -} - -static int sha_finish_walk(struct mcryptd_hash_request_ctx **ret_rctx, - struct mcryptd_alg_cstate *cstate, bool flush) -{ - int flag = HASH_UPDATE; - int nbytes, err = 0; - struct mcryptd_hash_request_ctx *rctx = *ret_rctx; - struct sha512_hash_ctx *sha_ctx; - - /* more work ? */ - while (!(rctx->flag & HASH_DONE)) { - nbytes = crypto_ahash_walk_done(&rctx->walk, 0); - if (nbytes < 0) { - err = nbytes; - goto out; - } - /* check if the walk is done */ - if (crypto_ahash_walk_last(&rctx->walk)) { - rctx->flag |= HASH_DONE; - if (rctx->flag & HASH_FINAL) - flag |= HASH_LAST; - - } - sha_ctx = (struct sha512_hash_ctx *) - ahash_request_ctx(&rctx->areq); - kernel_fpu_begin(); - sha_ctx = sha512_ctx_mgr_submit(cstate, sha_ctx, - rctx->walk.data, nbytes, flag); - if (!sha_ctx) { - if (flush) - sha_ctx = sha512_ctx_mgr_flush(cstate); - } - kernel_fpu_end(); - if (sha_ctx) - rctx = cast_hash_to_mcryptd_ctx(sha_ctx); - else { - rctx = NULL; - goto out; - } - } - - /* copy the results */ - if (rctx->flag & HASH_FINAL) - sha512_mb_set_results(rctx); - -out: - *ret_rctx = rctx; - return err; -} - -static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx, - struct mcryptd_alg_cstate *cstate, - int err) -{ - struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx); - struct sha512_hash_ctx *sha_ctx; - struct mcryptd_hash_request_ctx *req_ctx; - int ret; - unsigned long flags; - - /* remove from work list */ - spin_lock_irqsave(&cstate->work_lock, flags); - list_del(&rctx->waiter); - spin_unlock_irqrestore(&cstate->work_lock, flags); - - if (irqs_disabled()) - rctx->complete(&req->base, err); - else { - local_bh_disable(); - rctx->complete(&req->base, err); - local_bh_enable(); - } - - /* check to see if there are other jobs that are done */ - sha_ctx = sha512_ctx_mgr_get_comp_ctx(cstate); - while (sha_ctx) { - req_ctx = cast_hash_to_mcryptd_ctx(sha_ctx); - ret = sha_finish_walk(&req_ctx, cstate, false); - if (req_ctx) { - spin_lock_irqsave(&cstate->work_lock, flags); - list_del(&req_ctx->waiter); - spin_unlock_irqrestore(&cstate->work_lock, flags); - - req = cast_mcryptd_ctx_to_req(req_ctx); - if (irqs_disabled()) - req_ctx->complete(&req->base, ret); - else { - local_bh_disable(); - req_ctx->complete(&req->base, ret); - local_bh_enable(); - } - } - sha_ctx = sha512_ctx_mgr_get_comp_ctx(cstate); - } - - return 0; -} - -static void sha512_mb_add_list(struct mcryptd_hash_request_ctx *rctx, - struct mcryptd_alg_cstate *cstate) -{ - unsigned long next_flush; - unsigned long delay = usecs_to_jiffies(FLUSH_INTERVAL); - unsigned long flags; - - /* initialize tag */ - rctx->tag.arrival = jiffies; /* tag the arrival time */ - rctx->tag.seq_num = cstate->next_seq_num++; - next_flush = rctx->tag.arrival + delay; - rctx->tag.expire = next_flush; - - spin_lock_irqsave(&cstate->work_lock, flags); - list_add_tail(&rctx->waiter, &cstate->work_list); - spin_unlock_irqrestore(&cstate->work_lock, flags); - - mcryptd_arm_flusher(cstate, delay); -} - -static int sha512_mb_update(struct ahash_request *areq) -{ - struct mcryptd_hash_request_ctx *rctx = - container_of(areq, struct mcryptd_hash_request_ctx, - areq); - struct mcryptd_alg_cstate *cstate = - this_cpu_ptr(sha512_mb_alg_state.alg_cstate); - - struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx); - struct sha512_hash_ctx *sha_ctx; - int ret = 0, nbytes; - - - /* sanity check */ - if (rctx->tag.cpu != smp_processor_id()) { - pr_err("mcryptd error: cpu clash\n"); - goto done; - } - - /* need to init context */ - req_ctx_init(rctx, areq); - - nbytes = crypto_ahash_walk_first(req, &rctx->walk); - - if (nbytes < 0) { - ret = nbytes; - goto done; - } - - if (crypto_ahash_walk_last(&rctx->walk)) - rctx->flag |= HASH_DONE; - - /* submit */ - sha_ctx = (struct sha512_hash_ctx *) ahash_request_ctx(areq); - sha512_mb_add_list(rctx, cstate); - kernel_fpu_begin(); - sha_ctx = sha512_ctx_mgr_submit(cstate, sha_ctx, rctx->walk.data, - nbytes, HASH_UPDATE); - kernel_fpu_end(); - - /* check if anything is returned */ - if (!sha_ctx) - return -EINPROGRESS; - - if (sha_ctx->error) { - ret = sha_ctx->error; - rctx = cast_hash_to_mcryptd_ctx(sha_ctx); - goto done; - } - - rctx = cast_hash_to_mcryptd_ctx(sha_ctx); - ret = sha_finish_walk(&rctx, cstate, false); - - if (!rctx) - return -EINPROGRESS; -done: - sha_complete_job(rctx, cstate, ret); - return ret; -} - -static int sha512_mb_finup(struct ahash_request *areq) -{ - struct mcryptd_hash_request_ctx *rctx = - container_of(areq, struct mcryptd_hash_request_ctx, - areq); - struct mcryptd_alg_cstate *cstate = - this_cpu_ptr(sha512_mb_alg_state.alg_cstate); - - struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx); - struct sha512_hash_ctx *sha_ctx; - int ret = 0, flag = HASH_UPDATE, nbytes; - - /* sanity check */ - if (rctx->tag.cpu != smp_processor_id()) { - pr_err("mcryptd error: cpu clash\n"); - goto done; - } - - /* need to init context */ - req_ctx_init(rctx, areq); - - nbytes = crypto_ahash_walk_first(req, &rctx->walk); - - if (nbytes < 0) { - ret = nbytes; - goto done; - } - - if (crypto_ahash_walk_last(&rctx->walk)) { - rctx->flag |= HASH_DONE; - flag = HASH_LAST; - } - - /* submit */ - rctx->flag |= HASH_FINAL; - sha_ctx = (struct sha512_hash_ctx *) ahash_request_ctx(areq); - sha512_mb_add_list(rctx, cstate); - - kernel_fpu_begin(); - sha_ctx = sha512_ctx_mgr_submit(cstate, sha_ctx, rctx->walk.data, - nbytes, flag); - kernel_fpu_end(); - - /* check if anything is returned */ - if (!sha_ctx) - return -EINPROGRESS; - - if (sha_ctx->error) { - ret = sha_ctx->error; - goto done; - } - - rctx = cast_hash_to_mcryptd_ctx(sha_ctx); - ret = sha_finish_walk(&rctx, cstate, false); - if (!rctx) - return -EINPROGRESS; -done: - sha_complete_job(rctx, cstate, ret); - return ret; -} - -static int sha512_mb_final(struct ahash_request *areq) -{ - struct mcryptd_hash_request_ctx *rctx = - container_of(areq, struct mcryptd_hash_request_ctx, - areq); - struct mcryptd_alg_cstate *cstate = - this_cpu_ptr(sha512_mb_alg_state.alg_cstate); - - struct sha512_hash_ctx *sha_ctx; - int ret = 0; - u8 data; - - /* sanity check */ - if (rctx->tag.cpu != smp_processor_id()) { - pr_err("mcryptd error: cpu clash\n"); - goto done; - } - - /* need to init context */ - req_ctx_init(rctx, areq); - - rctx->flag |= HASH_DONE | HASH_FINAL; - - sha_ctx = (struct sha512_hash_ctx *) ahash_request_ctx(areq); - /* flag HASH_FINAL and 0 data size */ - sha512_mb_add_list(rctx, cstate); - kernel_fpu_begin(); - sha_ctx = sha512_ctx_mgr_submit(cstate, sha_ctx, &data, 0, HASH_LAST); - kernel_fpu_end(); - - /* check if anything is returned */ - if (!sha_ctx) - return -EINPROGRESS; - - if (sha_ctx->error) { - ret = sha_ctx->error; - rctx = cast_hash_to_mcryptd_ctx(sha_ctx); - goto done; - } - - rctx = cast_hash_to_mcryptd_ctx(sha_ctx); - ret = sha_finish_walk(&rctx, cstate, false); - if (!rctx) - return -EINPROGRESS; -done: - sha_complete_job(rctx, cstate, ret); - return ret; -} - -static int sha512_mb_export(struct ahash_request *areq, void *out) -{ - struct sha512_hash_ctx *sctx = ahash_request_ctx(areq); - - memcpy(out, sctx, sizeof(*sctx)); - - return 0; -} - -static int sha512_mb_import(struct ahash_request *areq, const void *in) -{ - struct sha512_hash_ctx *sctx = ahash_request_ctx(areq); - - memcpy(sctx, in, sizeof(*sctx)); - - return 0; -} - -static int sha512_mb_async_init_tfm(struct crypto_tfm *tfm) -{ - struct mcryptd_ahash *mcryptd_tfm; - struct sha512_mb_ctx *ctx = crypto_tfm_ctx(tfm); - struct mcryptd_hash_ctx *mctx; - - mcryptd_tfm = mcryptd_alloc_ahash("__intel_sha512-mb", - CRYPTO_ALG_INTERNAL, - CRYPTO_ALG_INTERNAL); - if (IS_ERR(mcryptd_tfm)) - return PTR_ERR(mcryptd_tfm); - mctx = crypto_ahash_ctx(&mcryptd_tfm->base); - mctx->alg_state = &sha512_mb_alg_state; - ctx->mcryptd_tfm = mcryptd_tfm; - crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), - sizeof(struct ahash_request) + - crypto_ahash_reqsize(&mcryptd_tfm->base)); - - return 0; -} - -static void sha512_mb_async_exit_tfm(struct crypto_tfm *tfm) -{ - struct sha512_mb_ctx *ctx = crypto_tfm_ctx(tfm); - - mcryptd_free_ahash(ctx->mcryptd_tfm); -} - -static int sha512_mb_areq_init_tfm(struct crypto_tfm *tfm) -{ - crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), - sizeof(struct ahash_request) + - sizeof(struct sha512_hash_ctx)); - - return 0; -} - -static void sha512_mb_areq_exit_tfm(struct crypto_tfm *tfm) -{ - struct sha512_mb_ctx *ctx = crypto_tfm_ctx(tfm); - - mcryptd_free_ahash(ctx->mcryptd_tfm); -} - -static struct ahash_alg sha512_mb_areq_alg = { - .init = sha512_mb_init, - .update = sha512_mb_update, - .final = sha512_mb_final, - .finup = sha512_mb_finup, - .export = sha512_mb_export, - .import = sha512_mb_import, - .halg = { - .digestsize = SHA512_DIGEST_SIZE, - .statesize = sizeof(struct sha512_hash_ctx), - .base = { - .cra_name = "__sha512-mb", - .cra_driver_name = "__intel_sha512-mb", - .cra_priority = 100, - /* - * use ASYNC flag as some buffers in multi-buffer - * algo may not have completed before hashing thread - * sleep - */ - .cra_flags = CRYPTO_ALG_ASYNC | - CRYPTO_ALG_INTERNAL, - .cra_blocksize = SHA512_BLOCK_SIZE, - .cra_module = THIS_MODULE, - .cra_list = LIST_HEAD_INIT - (sha512_mb_areq_alg.halg.base.cra_list), - .cra_init = sha512_mb_areq_init_tfm, - .cra_exit = sha512_mb_areq_exit_tfm, - .cra_ctxsize = sizeof(struct sha512_hash_ctx), - } - } -}; - -static int sha512_mb_async_init(struct ahash_request *req) -{ - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct sha512_mb_ctx *ctx = crypto_ahash_ctx(tfm); - struct ahash_request *mcryptd_req = ahash_request_ctx(req); - struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm; - - memcpy(mcryptd_req, req, sizeof(*req)); - ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base); - return crypto_ahash_init(mcryptd_req); -} - -static int sha512_mb_async_update(struct ahash_request *req) -{ - struct ahash_request *mcryptd_req = ahash_request_ctx(req); - - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct sha512_mb_ctx *ctx = crypto_ahash_ctx(tfm); - struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm; - - memcpy(mcryptd_req, req, sizeof(*req)); - ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base); - return crypto_ahash_update(mcryptd_req); -} - -static int sha512_mb_async_finup(struct ahash_request *req) -{ - struct ahash_request *mcryptd_req = ahash_request_ctx(req); - - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct sha512_mb_ctx *ctx = crypto_ahash_ctx(tfm); - struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm; - - memcpy(mcryptd_req, req, sizeof(*req)); - ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base); - return crypto_ahash_finup(mcryptd_req); -} - -static int sha512_mb_async_final(struct ahash_request *req) -{ - struct ahash_request *mcryptd_req = ahash_request_ctx(req); - - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct sha512_mb_ctx *ctx = crypto_ahash_ctx(tfm); - struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm; - - memcpy(mcryptd_req, req, sizeof(*req)); - ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base); - return crypto_ahash_final(mcryptd_req); -} - -static int sha512_mb_async_digest(struct ahash_request *req) -{ - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct sha512_mb_ctx *ctx = crypto_ahash_ctx(tfm); - struct ahash_request *mcryptd_req = ahash_request_ctx(req); - struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm; - - memcpy(mcryptd_req, req, sizeof(*req)); - ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base); - return crypto_ahash_digest(mcryptd_req); -} - -static int sha512_mb_async_export(struct ahash_request *req, void *out) -{ - struct ahash_request *mcryptd_req = ahash_request_ctx(req); - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct sha512_mb_ctx *ctx = crypto_ahash_ctx(tfm); - struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm; - - memcpy(mcryptd_req, req, sizeof(*req)); - ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base); - return crypto_ahash_export(mcryptd_req, out); -} - -static int sha512_mb_async_import(struct ahash_request *req, const void *in) -{ - struct ahash_request *mcryptd_req = ahash_request_ctx(req); - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct sha512_mb_ctx *ctx = crypto_ahash_ctx(tfm); - struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm; - struct crypto_ahash *child = mcryptd_ahash_child(mcryptd_tfm); - struct mcryptd_hash_request_ctx *rctx; - struct ahash_request *areq; - - memcpy(mcryptd_req, req, sizeof(*req)); - ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base); - rctx = ahash_request_ctx(mcryptd_req); - - areq = &rctx->areq; - - ahash_request_set_tfm(areq, child); - ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_SLEEP, - rctx->complete, req); - - return crypto_ahash_import(mcryptd_req, in); -} - -static struct ahash_alg sha512_mb_async_alg = { - .init = sha512_mb_async_init, - .update = sha512_mb_async_update, - .final = sha512_mb_async_final, - .finup = sha512_mb_async_finup, - .digest = sha512_mb_async_digest, - .export = sha512_mb_async_export, - .import = sha512_mb_async_import, - .halg = { - .digestsize = SHA512_DIGEST_SIZE, - .statesize = sizeof(struct sha512_hash_ctx), - .base = { - .cra_name = "sha512", - .cra_driver_name = "sha512_mb", - /* - * Low priority, since with few concurrent hash requests - * this is extremely slow due to the flush delay. Users - * whose workloads would benefit from this can request - * it explicitly by driver name, or can increase its - * priority at runtime using NETLINK_CRYPTO. - */ - .cra_priority = 50, - .cra_flags = CRYPTO_ALG_ASYNC, - .cra_blocksize = SHA512_BLOCK_SIZE, - .cra_module = THIS_MODULE, - .cra_list = LIST_HEAD_INIT - (sha512_mb_async_alg.halg.base.cra_list), - .cra_init = sha512_mb_async_init_tfm, - .cra_exit = sha512_mb_async_exit_tfm, - .cra_ctxsize = sizeof(struct sha512_mb_ctx), - .cra_alignmask = 0, - }, - }, -}; - -static unsigned long sha512_mb_flusher(struct mcryptd_alg_cstate *cstate) -{ - struct mcryptd_hash_request_ctx *rctx; - unsigned long cur_time; - unsigned long next_flush = 0; - struct sha512_hash_ctx *sha_ctx; - - - cur_time = jiffies; - - while (!list_empty(&cstate->work_list)) { - rctx = list_entry(cstate->work_list.next, - struct mcryptd_hash_request_ctx, waiter); - if time_before(cur_time, rctx->tag.expire) - break; - kernel_fpu_begin(); - sha_ctx = (struct sha512_hash_ctx *) - sha512_ctx_mgr_flush(cstate); - kernel_fpu_end(); - if (!sha_ctx) { - pr_err("sha512_mb error: nothing got flushed for" - " non-empty list\n"); - break; - } - rctx = cast_hash_to_mcryptd_ctx(sha_ctx); - sha_finish_walk(&rctx, cstate, true); - sha_complete_job(rctx, cstate, 0); - } - - if (!list_empty(&cstate->work_list)) { - rctx = list_entry(cstate->work_list.next, - struct mcryptd_hash_request_ctx, waiter); - /* get the hash context and then flush time */ - next_flush = rctx->tag.expire; - mcryptd_arm_flusher(cstate, get_delay(next_flush)); - } - return next_flush; -} - -static int __init sha512_mb_mod_init(void) -{ - - int cpu; - int err; - struct mcryptd_alg_cstate *cpu_state; - - /* check for dependent cpu features */ - if (!boot_cpu_has(X86_FEATURE_AVX2) || - !boot_cpu_has(X86_FEATURE_BMI2)) - return -ENODEV; - - /* initialize multibuffer structures */ - sha512_mb_alg_state.alg_cstate = - alloc_percpu(struct mcryptd_alg_cstate); - - sha512_job_mgr_init = sha512_mb_mgr_init_avx2; - sha512_job_mgr_submit = sha512_mb_mgr_submit_avx2; - sha512_job_mgr_flush = sha512_mb_mgr_flush_avx2; - sha512_job_mgr_get_comp_job = sha512_mb_mgr_get_comp_job_avx2; - - if (!sha512_mb_alg_state.alg_cstate) - return -ENOMEM; - for_each_possible_cpu(cpu) { - cpu_state = per_cpu_ptr(sha512_mb_alg_state.alg_cstate, cpu); - cpu_state->next_flush = 0; - cpu_state->next_seq_num = 0; - cpu_state->flusher_engaged = false; - INIT_DELAYED_WORK(&cpu_state->flush, mcryptd_flusher); - cpu_state->cpu = cpu; - cpu_state->alg_state = &sha512_mb_alg_state; - cpu_state->mgr = kzalloc(sizeof(struct sha512_ctx_mgr), - GFP_KERNEL); - if (!cpu_state->mgr) - goto err2; - sha512_ctx_mgr_init(cpu_state->mgr); - INIT_LIST_HEAD(&cpu_state->work_list); - spin_lock_init(&cpu_state->work_lock); - } - sha512_mb_alg_state.flusher = &sha512_mb_flusher; - - err = crypto_register_ahash(&sha512_mb_areq_alg); - if (err) - goto err2; - err = crypto_register_ahash(&sha512_mb_async_alg); - if (err) - goto err1; - - - return 0; -err1: - crypto_unregister_ahash(&sha512_mb_areq_alg); -err2: - for_each_possible_cpu(cpu) { - cpu_state = per_cpu_ptr(sha512_mb_alg_state.alg_cstate, cpu); - kfree(cpu_state->mgr); - } - free_percpu(sha512_mb_alg_state.alg_cstate); - return -ENODEV; -} - -static void __exit sha512_mb_mod_fini(void) -{ - int cpu; - struct mcryptd_alg_cstate *cpu_state; - - crypto_unregister_ahash(&sha512_mb_async_alg); - crypto_unregister_ahash(&sha512_mb_areq_alg); - for_each_possible_cpu(cpu) { - cpu_state = per_cpu_ptr(sha512_mb_alg_state.alg_cstate, cpu); - kfree(cpu_state->mgr); - } - free_percpu(sha512_mb_alg_state.alg_cstate); -} - -module_init(sha512_mb_mod_init); -module_exit(sha512_mb_mod_fini); - -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("SHA512 Secure Hash Algorithm, multi buffer accelerated"); - -MODULE_ALIAS("sha512"); diff --git a/arch/x86/crypto/sha512-mb/sha512_mb_ctx.h b/arch/x86/crypto/sha512-mb/sha512_mb_ctx.h deleted file mode 100644 index e5c465bd821e..000000000000 --- a/arch/x86/crypto/sha512-mb/sha512_mb_ctx.h +++ /dev/null @@ -1,128 +0,0 @@ -/* - * Header file for multi buffer SHA512 context - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2016 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * Contact Information: - * Megha Dey <megha.dey@linux.intel.com> - * - * BSD LICENSE - * - * Copyright(c) 2016 Intel Corporation. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef _SHA_MB_CTX_INTERNAL_H -#define _SHA_MB_CTX_INTERNAL_H - -#include "sha512_mb_mgr.h" - -#define HASH_UPDATE 0x00 -#define HASH_LAST 0x01 -#define HASH_DONE 0x02 -#define HASH_FINAL 0x04 - -#define HASH_CTX_STS_IDLE 0x00 -#define HASH_CTX_STS_PROCESSING 0x01 -#define HASH_CTX_STS_LAST 0x02 -#define HASH_CTX_STS_COMPLETE 0x04 - -enum hash_ctx_error { - HASH_CTX_ERROR_NONE = 0, - HASH_CTX_ERROR_INVALID_FLAGS = -1, - HASH_CTX_ERROR_ALREADY_PROCESSING = -2, - HASH_CTX_ERROR_ALREADY_COMPLETED = -3, -}; - -#define hash_ctx_user_data(ctx) ((ctx)->user_data) -#define hash_ctx_digest(ctx) ((ctx)->job.result_digest) -#define hash_ctx_processing(ctx) ((ctx)->status & HASH_CTX_STS_PROCESSING) -#define hash_ctx_complete(ctx) ((ctx)->status == HASH_CTX_STS_COMPLETE) -#define hash_ctx_status(ctx) ((ctx)->status) -#define hash_ctx_error(ctx) ((ctx)->error) -#define hash_ctx_init(ctx) \ - do { \ - (ctx)->error = HASH_CTX_ERROR_NONE; \ - (ctx)->status = HASH_CTX_STS_COMPLETE; \ - } while (0) - -/* Hash Constants and Typedefs */ -#define SHA512_DIGEST_LENGTH 8 -#define SHA512_LOG2_BLOCK_SIZE 7 - -#define SHA512_PADLENGTHFIELD_SIZE 16 - -#ifdef SHA_MB_DEBUG -#define assert(expr) \ -do { \ - if (unlikely(!(expr))) { \ - printk(KERN_ERR "Assertion failed! %s,%s,%s,line=%d\n", \ - #expr, __FILE__, __func__, __LINE__); \ - } \ -} while (0) -#else -#define assert(expr) do {} while (0) -#endif - -struct sha512_ctx_mgr { - struct sha512_mb_mgr mgr; -}; - -/* typedef struct sha512_ctx_mgr sha512_ctx_mgr; */ - -struct sha512_hash_ctx { - /* Must be at struct offset 0 */ - struct job_sha512 job; - /* status flag */ - int status; - /* error flag */ - int error; - - uint64_t total_length; - const void *incoming_buffer; - uint32_t incoming_buffer_length; - uint8_t partial_block_buffer[SHA512_BLOCK_SIZE * 2]; - uint32_t partial_block_buffer_length; - void *user_data; -}; - -#endif diff --git a/arch/x86/crypto/sha512-mb/sha512_mb_mgr.h b/arch/x86/crypto/sha512-mb/sha512_mb_mgr.h deleted file mode 100644 index 178f17eef382..000000000000 --- a/arch/x86/crypto/sha512-mb/sha512_mb_mgr.h +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Header file for multi buffer SHA512 algorithm manager - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2016 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * Contact Information: - * Megha Dey <megha.dey@linux.intel.com> - * - * BSD LICENSE - * - * Copyright(c) 2016 Intel Corporation. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef __SHA_MB_MGR_H -#define __SHA_MB_MGR_H - -#include <linux/types.h> - -#define NUM_SHA512_DIGEST_WORDS 8 - -enum job_sts {STS_UNKNOWN = 0, - STS_BEING_PROCESSED = 1, - STS_COMPLETED = 2, - STS_INTERNAL_ERROR = 3, - STS_ERROR = 4 -}; - -struct job_sha512 { - u8 *buffer; - u64 len; - u64 result_digest[NUM_SHA512_DIGEST_WORDS] __aligned(32); - enum job_sts status; - void *user_data; -}; - -struct sha512_args_x4 { - uint64_t digest[8][4]; - uint8_t *data_ptr[4]; -}; - -struct sha512_lane_data { - struct job_sha512 *job_in_lane; -}; - -struct sha512_mb_mgr { - struct sha512_args_x4 args; - - uint64_t lens[4]; - - /* each byte is index (0...7) of unused lanes */ - uint64_t unused_lanes; - /* byte 4 is set to FF as a flag */ - struct sha512_lane_data ldata[4]; -}; - -#define SHA512_MB_MGR_NUM_LANES_AVX2 4 - -void sha512_mb_mgr_init_avx2(struct sha512_mb_mgr *state); -struct job_sha512 *sha512_mb_mgr_submit_avx2(struct sha512_mb_mgr *state, - struct job_sha512 *job); -struct job_sha512 *sha512_mb_mgr_flush_avx2(struct sha512_mb_mgr *state); -struct job_sha512 *sha512_mb_mgr_get_comp_job_avx2(struct sha512_mb_mgr *state); - -#endif diff --git a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_datastruct.S b/arch/x86/crypto/sha512-mb/sha512_mb_mgr_datastruct.S deleted file mode 100644 index cf2636d4c9ba..000000000000 --- a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_datastruct.S +++ /dev/null @@ -1,281 +0,0 @@ -/* - * Header file for multi buffer SHA256 algorithm data structure - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2016 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * Contact Information: - * Megha Dey <megha.dey@linux.intel.com> - * - * BSD LICENSE - * - * Copyright(c) 2016 Intel Corporation. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -# Macros for defining data structures - -# Usage example - -#START_FIELDS # JOB_AES -### name size align -#FIELD _plaintext, 8, 8 # pointer to plaintext -#FIELD _ciphertext, 8, 8 # pointer to ciphertext -#FIELD _IV, 16, 8 # IV -#FIELD _keys, 8, 8 # pointer to keys -#FIELD _len, 4, 4 # length in bytes -#FIELD _status, 4, 4 # status enumeration -#FIELD _user_data, 8, 8 # pointer to user data -#UNION _union, size1, align1, \ -# size2, align2, \ -# size3, align3, \ -# ... -#END_FIELDS -#%assign _JOB_AES_size _FIELD_OFFSET -#%assign _JOB_AES_align _STRUCT_ALIGN - -######################################################################### - -# Alternate "struc-like" syntax: -# STRUCT job_aes2 -# RES_Q .plaintext, 1 -# RES_Q .ciphertext, 1 -# RES_DQ .IV, 1 -# RES_B .nested, _JOB_AES_SIZE, _JOB_AES_ALIGN -# RES_U .union, size1, align1, \ -# size2, align2, \ -# ... -# ENDSTRUCT -# # Following only needed if nesting -# %assign job_aes2_size _FIELD_OFFSET -# %assign job_aes2_align _STRUCT_ALIGN -# -# RES_* macros take a name, a count and an optional alignment. -# The count in in terms of the base size of the macro, and the -# default alignment is the base size. -# The macros are: -# Macro Base size -# RES_B 1 -# RES_W 2 -# RES_D 4 -# RES_Q 8 -# RES_DQ 16 -# RES_Y 32 -# RES_Z 64 -# -# RES_U defines a union. It's arguments are a name and two or more -# pairs of "size, alignment" -# -# The two assigns are only needed if this structure is being nested -# within another. Even if the assigns are not done, one can still use -# STRUCT_NAME_size as the size of the structure. -# -# Note that for nesting, you still need to assign to STRUCT_NAME_size. -# -# The differences between this and using "struc" directly are that each -# type is implicitly aligned to its natural length (although this can be -# over-ridden with an explicit third parameter), and that the structure -# is padded at the end to its overall alignment. -# - -######################################################################### - -#ifndef _DATASTRUCT_ASM_ -#define _DATASTRUCT_ASM_ - -#define PTR_SZ 8 -#define SHA512_DIGEST_WORD_SIZE 8 -#define SHA512_MB_MGR_NUM_LANES_AVX2 4 -#define NUM_SHA512_DIGEST_WORDS 8 -#define SZ4 4*SHA512_DIGEST_WORD_SIZE -#define ROUNDS 80*SZ4 -#define SHA512_DIGEST_ROW_SIZE (SHA512_MB_MGR_NUM_LANES_AVX2 * 8) - -# START_FIELDS -.macro START_FIELDS - _FIELD_OFFSET = 0 - _STRUCT_ALIGN = 0 -.endm - -# FIELD name size align -.macro FIELD name size align - _FIELD_OFFSET = (_FIELD_OFFSET + (\align) - 1) & (~ ((\align)-1)) - \name = _FIELD_OFFSET - _FIELD_OFFSET = _FIELD_OFFSET + (\size) -.if (\align > _STRUCT_ALIGN) - _STRUCT_ALIGN = \align -.endif -.endm - -# END_FIELDS -.macro END_FIELDS - _FIELD_OFFSET = (_FIELD_OFFSET + _STRUCT_ALIGN-1) & (~ (_STRUCT_ALIGN-1)) -.endm - -.macro STRUCT p1 -START_FIELDS -.struc \p1 -.endm - -.macro ENDSTRUCT - tmp = _FIELD_OFFSET - END_FIELDS - tmp = (_FIELD_OFFSET - ##tmp) -.if (tmp > 0) - .lcomm tmp -.endm - -## RES_int name size align -.macro RES_int p1 p2 p3 - name = \p1 - size = \p2 - align = .\p3 - - _FIELD_OFFSET = (_FIELD_OFFSET + (align) - 1) & (~ ((align)-1)) -.align align -.lcomm name size - _FIELD_OFFSET = _FIELD_OFFSET + (size) -.if (align > _STRUCT_ALIGN) - _STRUCT_ALIGN = align -.endif -.endm - -# macro RES_B name, size [, align] -.macro RES_B _name, _size, _align=1 -RES_int _name _size _align -.endm - -# macro RES_W name, size [, align] -.macro RES_W _name, _size, _align=2 -RES_int _name 2*(_size) _align -.endm - -# macro RES_D name, size [, align] -.macro RES_D _name, _size, _align=4 -RES_int _name 4*(_size) _align -.endm - -# macro RES_Q name, size [, align] -.macro RES_Q _name, _size, _align=8 -RES_int _name 8*(_size) _align -.endm - -# macro RES_DQ name, size [, align] -.macro RES_DQ _name, _size, _align=16 -RES_int _name 16*(_size) _align -.endm - -# macro RES_Y name, size [, align] -.macro RES_Y _name, _size, _align=32 -RES_int _name 32*(_size) _align -.endm - -# macro RES_Z name, size [, align] -.macro RES_Z _name, _size, _align=64 -RES_int _name 64*(_size) _align -.endm - -#endif - -################################################################### -### Define SHA512 Out Of Order Data Structures -################################################################### - -START_FIELDS # LANE_DATA -### name size align -FIELD _job_in_lane, 8, 8 # pointer to job object -END_FIELDS - - _LANE_DATA_size = _FIELD_OFFSET - _LANE_DATA_align = _STRUCT_ALIGN - -#################################################################### - -START_FIELDS # SHA512_ARGS_X4 -### name size align -FIELD _digest, 8*8*4, 4 # transposed digest -FIELD _data_ptr, 8*4, 8 # array of pointers to data -END_FIELDS - - _SHA512_ARGS_X4_size = _FIELD_OFFSET - _SHA512_ARGS_X4_align = _STRUCT_ALIGN - -##################################################################### - -START_FIELDS # MB_MGR -### name size align -FIELD _args, _SHA512_ARGS_X4_size, _SHA512_ARGS_X4_align -FIELD _lens, 8*4, 8 -FIELD _unused_lanes, 8, 8 -FIELD _ldata, _LANE_DATA_size*4, _LANE_DATA_align -END_FIELDS - - _MB_MGR_size = _FIELD_OFFSET - _MB_MGR_align = _STRUCT_ALIGN - -_args_digest = _args + _digest -_args_data_ptr = _args + _data_ptr - -####################################################################### - -####################################################################### -#### Define constants -####################################################################### - -#define STS_UNKNOWN 0 -#define STS_BEING_PROCESSED 1 -#define STS_COMPLETED 2 - -####################################################################### -#### Define JOB_SHA512 structure -####################################################################### - -START_FIELDS # JOB_SHA512 -### name size align -FIELD _buffer, 8, 8 # pointer to buffer -FIELD _len, 8, 8 # length in bytes -FIELD _result_digest, 8*8, 32 # Digest (output) -FIELD _status, 4, 4 -FIELD _user_data, 8, 8 -END_FIELDS - - _JOB_SHA512_size = _FIELD_OFFSET - _JOB_SHA512_align = _STRUCT_ALIGN diff --git a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_flush_avx2.S b/arch/x86/crypto/sha512-mb/sha512_mb_mgr_flush_avx2.S deleted file mode 100644 index 7c629caebc05..000000000000 --- a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_flush_avx2.S +++ /dev/null @@ -1,297 +0,0 @@ -/* - * Flush routine for SHA512 multibuffer - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2016 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * Contact Information: - * Megha Dey <megha.dey@linux.intel.com> - * - * BSD LICENSE - * - * Copyright(c) 2016 Intel Corporation. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include <linux/linkage.h> -#include <asm/frame.h> -#include "sha512_mb_mgr_datastruct.S" - -.extern sha512_x4_avx2 - -# LINUX register definitions -#define arg1 %rdi -#define arg2 %rsi - -# idx needs to be other than arg1, arg2, rbx, r12 -#define idx %rdx - -# Common definitions -#define state arg1 -#define job arg2 -#define len2 arg2 - -#define unused_lanes %rbx -#define lane_data %rbx -#define tmp2 %rbx - -#define job_rax %rax -#define tmp1 %rax -#define size_offset %rax -#define tmp %rax -#define start_offset %rax - -#define tmp3 arg1 - -#define extra_blocks arg2 -#define p arg2 - -#define tmp4 %r8 -#define lens0 %r8 - -#define lens1 %r9 -#define lens2 %r10 -#define lens3 %r11 - -.macro LABEL prefix n -\prefix\n\(): -.endm - -.macro JNE_SKIP i -jne skip_\i -.endm - -.altmacro -.macro SET_OFFSET _offset -offset = \_offset -.endm -.noaltmacro - -# JOB* sha512_mb_mgr_flush_avx2(MB_MGR *state) -# arg 1 : rcx : state -ENTRY(sha512_mb_mgr_flush_avx2) - FRAME_BEGIN - push %rbx - - # If bit (32+3) is set, then all lanes are empty - mov _unused_lanes(state), unused_lanes - bt $32+7, unused_lanes - jc return_null - - # find a lane with a non-null job - xor idx, idx - offset = (_ldata + 1*_LANE_DATA_size + _job_in_lane) - cmpq $0, offset(state) - cmovne one(%rip), idx - offset = (_ldata + 2*_LANE_DATA_size + _job_in_lane) - cmpq $0, offset(state) - cmovne two(%rip), idx - offset = (_ldata + 3*_LANE_DATA_size + _job_in_lane) - cmpq $0, offset(state) - cmovne three(%rip), idx - - # copy idx to empty lanes -copy_lane_data: - offset = (_args + _data_ptr) - mov offset(state,idx,8), tmp - - I = 0 -.rep 4 - offset = (_ldata + I * _LANE_DATA_size + _job_in_lane) - cmpq $0, offset(state) -.altmacro - JNE_SKIP %I - offset = (_args + _data_ptr + 8*I) - mov tmp, offset(state) - offset = (_lens + 8*I +4) - movl $0xFFFFFFFF, offset(state) -LABEL skip_ %I - I = (I+1) -.noaltmacro -.endr - - # Find min length - mov _lens + 0*8(state),lens0 - mov lens0,idx - mov _lens + 1*8(state),lens1 - cmp idx,lens1 - cmovb lens1,idx - mov _lens + 2*8(state),lens2 - cmp idx,lens2 - cmovb lens2,idx - mov _lens + 3*8(state),lens3 - cmp idx,lens3 - cmovb lens3,idx - mov idx,len2 - and $0xF,idx - and $~0xFF,len2 - jz len_is_0 - - sub len2, lens0 - sub len2, lens1 - sub len2, lens2 - sub len2, lens3 - shr $32,len2 - mov lens0, _lens + 0*8(state) - mov lens1, _lens + 1*8(state) - mov lens2, _lens + 2*8(state) - mov lens3, _lens + 3*8(state) - - # "state" and "args" are the same address, arg1 - # len is arg2 - call sha512_x4_avx2 - # state and idx are intact - -len_is_0: - # process completed job "idx" - imul $_LANE_DATA_size, idx, lane_data - lea _ldata(state, lane_data), lane_data - - mov _job_in_lane(lane_data), job_rax - movq $0, _job_in_lane(lane_data) - movl $STS_COMPLETED, _status(job_rax) - mov _unused_lanes(state), unused_lanes - shl $8, unused_lanes - or idx, unused_lanes - mov unused_lanes, _unused_lanes(state) - - movl $0xFFFFFFFF, _lens+4(state, idx, 8) - - vmovq _args_digest+0*32(state, idx, 8), %xmm0 - vpinsrq $1, _args_digest+1*32(state, idx, 8), %xmm0, %xmm0 - vmovq _args_digest+2*32(state, idx, 8), %xmm1 - vpinsrq $1, _args_digest+3*32(state, idx, 8), %xmm1, %xmm1 - vmovq _args_digest+4*32(state, idx, 8), %xmm2 - vpinsrq $1, _args_digest+5*32(state, idx, 8), %xmm2, %xmm2 - vmovq _args_digest+6*32(state, idx, 8), %xmm3 - vpinsrq $1, _args_digest+7*32(state, idx, 8), %xmm3, %xmm3 - - vmovdqu %xmm0, _result_digest(job_rax) - vmovdqu %xmm1, _result_digest+1*16(job_rax) - vmovdqu %xmm2, _result_digest+2*16(job_rax) - vmovdqu %xmm3, _result_digest+3*16(job_rax) - -return: - pop %rbx - FRAME_END - ret - -return_null: - xor job_rax, job_rax - jmp return -ENDPROC(sha512_mb_mgr_flush_avx2) -.align 16 - -ENTRY(sha512_mb_mgr_get_comp_job_avx2) - push %rbx - - mov _unused_lanes(state), unused_lanes - bt $(32+7), unused_lanes - jc .return_null - - # Find min length - mov _lens(state),lens0 - mov lens0,idx - mov _lens+1*8(state),lens1 - cmp idx,lens1 - cmovb lens1,idx - mov _lens+2*8(state),lens2 - cmp idx,lens2 - cmovb lens2,idx - mov _lens+3*8(state),lens3 - cmp idx,lens3 - cmovb lens3,idx - test $~0xF,idx - jnz .return_null - and $0xF,idx - - #process completed job "idx" - imul $_LANE_DATA_size, idx, lane_data - lea _ldata(state, lane_data), lane_data - - mov _job_in_lane(lane_data), job_rax - movq $0, _job_in_lane(lane_data) - movl $STS_COMPLETED, _status(job_rax) - mov _unused_lanes(state), unused_lanes - shl $8, unused_lanes - or idx, unused_lanes - mov unused_lanes, _unused_lanes(state) - - movl $0xFFFFFFFF, _lens+4(state, idx, 8) - - vmovq _args_digest(state, idx, 8), %xmm0 - vpinsrq $1, _args_digest+1*32(state, idx, 8), %xmm0, %xmm0 - vmovq _args_digest+2*32(state, idx, 8), %xmm1 - vpinsrq $1, _args_digest+3*32(state, idx, 8), %xmm1, %xmm1 - vmovq _args_digest+4*32(state, idx, 8), %xmm2 - vpinsrq $1, _args_digest+5*32(state, idx, 8), %xmm2, %xmm2 - vmovq _args_digest+6*32(state, idx, 8), %xmm3 - vpinsrq $1, _args_digest+7*32(state, idx, 8), %xmm3, %xmm3 - - vmovdqu %xmm0, _result_digest+0*16(job_rax) - vmovdqu %xmm1, _result_digest+1*16(job_rax) - vmovdqu %xmm2, _result_digest+2*16(job_rax) - vmovdqu %xmm3, _result_digest+3*16(job_rax) - - pop %rbx - - ret - -.return_null: - xor job_rax, job_rax - pop %rbx - ret -ENDPROC(sha512_mb_mgr_get_comp_job_avx2) - -.section .rodata.cst8.one, "aM", @progbits, 8 -.align 8 -one: -.quad 1 - -.section .rodata.cst8.two, "aM", @progbits, 8 -.align 8 -two: -.quad 2 - -.section .rodata.cst8.three, "aM", @progbits, 8 -.align 8 -three: -.quad 3 diff --git a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_init_avx2.c b/arch/x86/crypto/sha512-mb/sha512_mb_mgr_init_avx2.c deleted file mode 100644 index d08805032f01..000000000000 --- a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_init_avx2.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Initialization code for multi buffer SHA256 algorithm for AVX2 - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2016 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * Contact Information: - * Megha Dey <megha.dey@linux.intel.com> - * - * BSD LICENSE - * - * Copyright(c) 2016 Intel Corporation. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "sha512_mb_mgr.h" - -void sha512_mb_mgr_init_avx2(struct sha512_mb_mgr *state) -{ - unsigned int j; - - /* initially all lanes are unused */ - state->lens[0] = 0xFFFFFFFF00000000; - state->lens[1] = 0xFFFFFFFF00000001; - state->lens[2] = 0xFFFFFFFF00000002; - state->lens[3] = 0xFFFFFFFF00000003; - - state->unused_lanes = 0xFF03020100; - for (j = 0; j < 4; j++) - state->ldata[j].job_in_lane = NULL; -} diff --git a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_submit_avx2.S b/arch/x86/crypto/sha512-mb/sha512_mb_mgr_submit_avx2.S deleted file mode 100644 index 4ba709ba78e5..000000000000 --- a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_submit_avx2.S +++ /dev/null @@ -1,224 +0,0 @@ -/* - * Buffer submit code for multi buffer SHA512 algorithm - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2016 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * Contact Information: - * Megha Dey <megha.dey@linux.intel.com> - * - * BSD LICENSE - * - * Copyright(c) 2016 Intel Corporation. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include <linux/linkage.h> -#include <asm/frame.h> -#include "sha512_mb_mgr_datastruct.S" - -.extern sha512_x4_avx2 - -#define arg1 %rdi -#define arg2 %rsi - -#define idx %rdx -#define last_len %rdx - -#define size_offset %rcx -#define tmp2 %rcx - -# Common definitions -#define state arg1 -#define job arg2 -#define len2 arg2 -#define p2 arg2 - -#define p %r11 -#define start_offset %r11 - -#define unused_lanes %rbx - -#define job_rax %rax -#define len %rax - -#define lane %r12 -#define tmp3 %r12 -#define lens3 %r12 - -#define extra_blocks %r8 -#define lens0 %r8 - -#define tmp %r9 -#define lens1 %r9 - -#define lane_data %r10 -#define lens2 %r10 - -#define DWORD_len %eax - -# JOB* sha512_mb_mgr_submit_avx2(MB_MGR *state, JOB *job) -# arg 1 : rcx : state -# arg 2 : rdx : job -ENTRY(sha512_mb_mgr_submit_avx2) - FRAME_BEGIN - push %rbx - push %r12 - - mov _unused_lanes(state), unused_lanes - movzb %bl,lane - shr $8, unused_lanes - imul $_LANE_DATA_size, lane,lane_data - movl $STS_BEING_PROCESSED, _status(job) - lea _ldata(state, lane_data), lane_data - mov unused_lanes, _unused_lanes(state) - movl _len(job), DWORD_len - - mov job, _job_in_lane(lane_data) - movl DWORD_len,_lens+4(state , lane, 8) - - # Load digest words from result_digest - vmovdqu _result_digest+0*16(job), %xmm0 - vmovdqu _result_digest+1*16(job), %xmm1 - vmovdqu _result_digest+2*16(job), %xmm2 - vmovdqu _result_digest+3*16(job), %xmm3 - - vmovq %xmm0, _args_digest(state, lane, 8) - vpextrq $1, %xmm0, _args_digest+1*32(state , lane, 8) - vmovq %xmm1, _args_digest+2*32(state , lane, 8) - vpextrq $1, %xmm1, _args_digest+3*32(state , lane, 8) - vmovq %xmm2, _args_digest+4*32(state , lane, 8) - vpextrq $1, %xmm2, _args_digest+5*32(state , lane, 8) - vmovq %xmm3, _args_digest+6*32(state , lane, 8) - vpextrq $1, %xmm3, _args_digest+7*32(state , lane, 8) - - mov _buffer(job), p - mov p, _args_data_ptr(state, lane, 8) - - cmp $0xFF, unused_lanes - jne return_null - -start_loop: - - # Find min length - mov _lens+0*8(state),lens0 - mov lens0,idx - mov _lens+1*8(state),lens1 - cmp idx,lens1 - cmovb lens1, idx - mov _lens+2*8(state),lens2 - cmp idx,lens2 - cmovb lens2,idx - mov _lens+3*8(state),lens3 - cmp idx,lens3 - cmovb lens3,idx - mov idx,len2 - and $0xF,idx - and $~0xFF,len2 - jz len_is_0 - - sub len2,lens0 - sub len2,lens1 - sub len2,lens2 - sub len2,lens3 - shr $32,len2 - mov lens0, _lens + 0*8(state) - mov lens1, _lens + 1*8(state) - mov lens2, _lens + 2*8(state) - mov lens3, _lens + 3*8(state) - - # "state" and "args" are the same address, arg1 - # len is arg2 - call sha512_x4_avx2 - # state and idx are intact - -len_is_0: - - # process completed job "idx" - imul $_LANE_DATA_size, idx, lane_data - lea _ldata(state, lane_data), lane_data - - mov _job_in_lane(lane_data), job_rax - mov _unused_lanes(state), unused_lanes - movq $0, _job_in_lane(lane_data) - movl $STS_COMPLETED, _status(job_rax) - shl $8, unused_lanes - or idx, unused_lanes - mov unused_lanes, _unused_lanes(state) - - movl $0xFFFFFFFF,_lens+4(state,idx,8) - vmovq _args_digest+0*32(state , idx, 8), %xmm0 - vpinsrq $1, _args_digest+1*32(state , idx, 8), %xmm0, %xmm0 - vmovq _args_digest+2*32(state , idx, 8), %xmm1 - vpinsrq $1, _args_digest+3*32(state , idx, 8), %xmm1, %xmm1 - vmovq _args_digest+4*32(state , idx, 8), %xmm2 - vpinsrq $1, _args_digest+5*32(state , idx, 8), %xmm2, %xmm2 - vmovq _args_digest+6*32(state , idx, 8), %xmm3 - vpinsrq $1, _args_digest+7*32(state , idx, 8), %xmm3, %xmm3 - - vmovdqu %xmm0, _result_digest + 0*16(job_rax) - vmovdqu %xmm1, _result_digest + 1*16(job_rax) - vmovdqu %xmm2, _result_digest + 2*16(job_rax) - vmovdqu %xmm3, _result_digest + 3*16(job_rax) - -return: - pop %r12 - pop %rbx - FRAME_END - ret - -return_null: - xor job_rax, job_rax - jmp return -ENDPROC(sha512_mb_mgr_submit_avx2) - -/* UNUSED? -.section .rodata.cst16, "aM", @progbits, 16 -.align 16 -H0: .int 0x6a09e667 -H1: .int 0xbb67ae85 -H2: .int 0x3c6ef372 -H3: .int 0xa54ff53a -H4: .int 0x510e527f -H5: .int 0x9b05688c -H6: .int 0x1f83d9ab -H7: .int 0x5be0cd19 -*/ diff --git a/arch/x86/crypto/sha512-mb/sha512_x4_avx2.S b/arch/x86/crypto/sha512-mb/sha512_x4_avx2.S deleted file mode 100644 index e22e907643a6..000000000000 --- a/arch/x86/crypto/sha512-mb/sha512_x4_avx2.S +++ /dev/null @@ -1,531 +0,0 @@ -/* - * Multi-buffer SHA512 algorithm hash compute routine - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2016 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * Contact Information: - * Megha Dey <megha.dey@linux.intel.com> - * - * BSD LICENSE - * - * Copyright(c) 2016 Intel Corporation. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -# code to compute quad SHA512 using AVX2 -# use YMMs to tackle the larger digest size -# outer calling routine takes care of save and restore of XMM registers -# Logic designed/laid out by JDG - -# Function clobbers: rax, rcx, rdx, rbx, rsi, rdi, r9-r15; ymm0-15 -# Stack must be aligned to 32 bytes before call -# Linux clobbers: rax rbx rcx rsi r8 r9 r10 r11 r12 -# Linux preserves: rcx rdx rdi rbp r13 r14 r15 -# clobbers ymm0-15 - -#include <linux/linkage.h> -#include "sha512_mb_mgr_datastruct.S" - -arg1 = %rdi -arg2 = %rsi - -# Common definitions -STATE = arg1 -INP_SIZE = arg2 - -IDX = %rax -ROUND = %rbx -TBL = %r8 - -inp0 = %r9 -inp1 = %r10 -inp2 = %r11 -inp3 = %r12 - -a = %ymm0 -b = %ymm1 -c = %ymm2 -d = %ymm3 -e = %ymm4 -f = %ymm5 -g = %ymm6 -h = %ymm7 - -a0 = %ymm8 -a1 = %ymm9 -a2 = %ymm10 - -TT0 = %ymm14 -TT1 = %ymm13 -TT2 = %ymm12 -TT3 = %ymm11 -TT4 = %ymm10 -TT5 = %ymm9 - -T1 = %ymm14 -TMP = %ymm15 - -# Define stack usage -STACK_SPACE1 = SZ4*16 + NUM_SHA512_DIGEST_WORDS*SZ4 + 24 - -#define VMOVPD vmovupd -_digest = SZ4*16 - -# transpose r0, r1, r2, r3, t0, t1 -# "transpose" data in {r0..r3} using temps {t0..t3} -# Input looks like: {r0 r1 r2 r3} -# r0 = {a7 a6 a5 a4 a3 a2 a1 a0} -# r1 = {b7 b6 b5 b4 b3 b2 b1 b0} -# r2 = {c7 c6 c5 c4 c3 c2 c1 c0} -# r3 = {d7 d6 d5 d4 d3 d2 d1 d0} -# -# output looks like: {t0 r1 r0 r3} -# t0 = {d1 d0 c1 c0 b1 b0 a1 a0} -# r1 = {d3 d2 c3 c2 b3 b2 a3 a2} -# r0 = {d5 d4 c5 c4 b5 b4 a5 a4} -# r3 = {d7 d6 c7 c6 b7 b6 a7 a6} - -.macro TRANSPOSE r0 r1 r2 r3 t0 t1 - vshufps $0x44, \r1, \r0, \t0 # t0 = {b5 b4 a5 a4 b1 b0 a1 a0} - vshufps $0xEE, \r1, \r0, \r0 # r0 = {b7 b6 a7 a6 b3 b2 a3 a2} - vshufps $0x44, \r3, \r2, \t1 # t1 = {d5 d4 c5 c4 d1 d0 c1 c0} - vshufps $0xEE, \r3, \r2, \r2 # r2 = {d7 d6 c7 c6 d3 d2 c3 c2} - - vperm2f128 $0x20, \r2, \r0, \r1 # h6...a6 - vperm2f128 $0x31, \r2, \r0, \r3 # h2...a2 - vperm2f128 $0x31, \t1, \t0, \r0 # h5...a5 - vperm2f128 $0x20, \t1, \t0, \t0 # h1...a1 -.endm - -.macro ROTATE_ARGS -TMP_ = h -h = g -g = f -f = e -e = d -d = c -c = b -b = a -a = TMP_ -.endm - -# PRORQ reg, imm, tmp -# packed-rotate-right-double -# does a rotate by doing two shifts and an or -.macro _PRORQ reg imm tmp - vpsllq $(64-\imm),\reg,\tmp - vpsrlq $\imm,\reg, \reg - vpor \tmp,\reg, \reg -.endm - -# non-destructive -# PRORQ_nd reg, imm, tmp, src -.macro _PRORQ_nd reg imm tmp src - vpsllq $(64-\imm), \src, \tmp - vpsrlq $\imm, \src, \reg - vpor \tmp, \reg, \reg -.endm - -# PRORQ dst/src, amt -.macro PRORQ reg imm - _PRORQ \reg, \imm, TMP -.endm - -# PRORQ_nd dst, src, amt -.macro PRORQ_nd reg tmp imm - _PRORQ_nd \reg, \imm, TMP, \tmp -.endm - -#; arguments passed implicitly in preprocessor symbols i, a...h -.macro ROUND_00_15 _T1 i - PRORQ_nd a0, e, (18-14) # sig1: a0 = (e >> 4) - - vpxor g, f, a2 # ch: a2 = f^g - vpand e,a2, a2 # ch: a2 = (f^g)&e - vpxor g, a2, a2 # a2 = ch - - PRORQ_nd a1,e,41 # sig1: a1 = (e >> 25) - - offset = SZ4*(\i & 0xf) - vmovdqu \_T1,offset(%rsp) - vpaddq (TBL,ROUND,1), \_T1, \_T1 # T1 = W + K - vpxor e,a0, a0 # sig1: a0 = e ^ (e >> 5) - PRORQ a0, 14 # sig1: a0 = (e >> 6) ^ (e >> 11) - vpaddq a2, h, h # h = h + ch - PRORQ_nd a2,a,6 # sig0: a2 = (a >> 11) - vpaddq \_T1,h, h # h = h + ch + W + K - vpxor a1, a0, a0 # a0 = sigma1 - vmovdqu a,\_T1 - PRORQ_nd a1,a,39 # sig0: a1 = (a >> 22) - vpxor c, \_T1, \_T1 # maj: T1 = a^c - add $SZ4, ROUND # ROUND++ - vpand b, \_T1, \_T1 # maj: T1 = (a^c)&b - vpaddq a0, h, h - vpaddq h, d, d - vpxor a, a2, a2 # sig0: a2 = a ^ (a >> 11) - PRORQ a2,28 # sig0: a2 = (a >> 2) ^ (a >> 13) - vpxor a1, a2, a2 # a2 = sig0 - vpand c, a, a1 # maj: a1 = a&c - vpor \_T1, a1, a1 # a1 = maj - vpaddq a1, h, h # h = h + ch + W + K + maj - vpaddq a2, h, h # h = h + ch + W + K + maj + sigma0 - ROTATE_ARGS -.endm - - -#; arguments passed implicitly in preprocessor symbols i, a...h -.macro ROUND_16_XX _T1 i - vmovdqu SZ4*((\i-15)&0xf)(%rsp), \_T1 - vmovdqu SZ4*((\i-2)&0xf)(%rsp), a1 - vmovdqu \_T1, a0 - PRORQ \_T1,7 - vmovdqu a1, a2 - PRORQ a1,42 - vpxor a0, \_T1, \_T1 - PRORQ \_T1, 1 - vpxor a2, a1, a1 - PRORQ a1, 19 - vpsrlq $7, a0, a0 - vpxor a0, \_T1, \_T1 - vpsrlq $6, a2, a2 - vpxor a2, a1, a1 - vpaddq SZ4*((\i-16)&0xf)(%rsp), \_T1, \_T1 - vpaddq SZ4*((\i-7)&0xf)(%rsp), a1, a1 - vpaddq a1, \_T1, \_T1 - - ROUND_00_15 \_T1,\i -.endm - - -# void sha512_x4_avx2(void *STATE, const int INP_SIZE) -# arg 1 : STATE : pointer to input data -# arg 2 : INP_SIZE : size of data in blocks (assumed >= 1) -ENTRY(sha512_x4_avx2) - # general registers preserved in outer calling routine - # outer calling routine saves all the XMM registers - # save callee-saved clobbered registers to comply with C function ABI - push %r12 - push %r13 - push %r14 - push %r15 - - sub $STACK_SPACE1, %rsp - - # Load the pre-transposed incoming digest. - vmovdqu 0*SHA512_DIGEST_ROW_SIZE(STATE),a - vmovdqu 1*SHA512_DIGEST_ROW_SIZE(STATE),b - vmovdqu 2*SHA512_DIGEST_ROW_SIZE(STATE),c - vmovdqu 3*SHA512_DIGEST_ROW_SIZE(STATE),d - vmovdqu 4*SHA512_DIGEST_ROW_SIZE(STATE),e - vmovdqu 5*SHA512_DIGEST_ROW_SIZE(STATE),f - vmovdqu 6*SHA512_DIGEST_ROW_SIZE(STATE),g - vmovdqu 7*SHA512_DIGEST_ROW_SIZE(STATE),h - - lea K512_4(%rip),TBL - - # load the address of each of the 4 message lanes - # getting ready to transpose input onto stack - mov _data_ptr+0*PTR_SZ(STATE),inp0 - mov _data_ptr+1*PTR_SZ(STATE),inp1 - mov _data_ptr+2*PTR_SZ(STATE),inp2 - mov _data_ptr+3*PTR_SZ(STATE),inp3 - - xor IDX, IDX -lloop: - xor ROUND, ROUND - - # save old digest - vmovdqu a, _digest(%rsp) - vmovdqu b, _digest+1*SZ4(%rsp) - vmovdqu c, _digest+2*SZ4(%rsp) - vmovdqu d, _digest+3*SZ4(%rsp) - vmovdqu e, _digest+4*SZ4(%rsp) - vmovdqu f, _digest+5*SZ4(%rsp) - vmovdqu g, _digest+6*SZ4(%rsp) - vmovdqu h, _digest+7*SZ4(%rsp) - i = 0 -.rep 4 - vmovdqu PSHUFFLE_BYTE_FLIP_MASK(%rip), TMP - VMOVPD i*32(inp0, IDX), TT2 - VMOVPD i*32(inp1, IDX), TT1 - VMOVPD i*32(inp2, IDX), TT4 - VMOVPD i*32(inp3, IDX), TT3 - TRANSPOSE TT2, TT1, TT4, TT3, TT0, TT5 - vpshufb TMP, TT0, TT0 - vpshufb TMP, TT1, TT1 - vpshufb TMP, TT2, TT2 - vpshufb TMP, TT3, TT3 - ROUND_00_15 TT0,(i*4+0) - ROUND_00_15 TT1,(i*4+1) - ROUND_00_15 TT2,(i*4+2) - ROUND_00_15 TT3,(i*4+3) - i = (i+1) -.endr - add $128, IDX - - i = (i*4) - - jmp Lrounds_16_xx -.align 16 -Lrounds_16_xx: -.rep 16 - ROUND_16_XX T1, i - i = (i+1) -.endr - cmp $0xa00,ROUND - jb Lrounds_16_xx - - # add old digest - vpaddq _digest(%rsp), a, a - vpaddq _digest+1*SZ4(%rsp), b, b - vpaddq _digest+2*SZ4(%rsp), c, c - vpaddq _digest+3*SZ4(%rsp), d, d - vpaddq _digest+4*SZ4(%rsp), e, e - vpaddq _digest+5*SZ4(%rsp), f, f - vpaddq _digest+6*SZ4(%rsp), g, g - vpaddq _digest+7*SZ4(%rsp), h, h - - sub $1, INP_SIZE # unit is blocks - jne lloop - - # write back to memory (state object) the transposed digest - vmovdqu a, 0*SHA512_DIGEST_ROW_SIZE(STATE) - vmovdqu b, 1*SHA512_DIGEST_ROW_SIZE(STATE) - vmovdqu c, 2*SHA512_DIGEST_ROW_SIZE(STATE) - vmovdqu d, 3*SHA512_DIGEST_ROW_SIZE(STATE) - vmovdqu e, 4*SHA512_DIGEST_ROW_SIZE(STATE) - vmovdqu f, 5*SHA512_DIGEST_ROW_SIZE(STATE) - vmovdqu g, 6*SHA512_DIGEST_ROW_SIZE(STATE) - vmovdqu h, 7*SHA512_DIGEST_ROW_SIZE(STATE) - - # update input data pointers - add IDX, inp0 - mov inp0, _data_ptr+0*PTR_SZ(STATE) - add IDX, inp1 - mov inp1, _data_ptr+1*PTR_SZ(STATE) - add IDX, inp2 - mov inp2, _data_ptr+2*PTR_SZ(STATE) - add IDX, inp3 - mov inp3, _data_ptr+3*PTR_SZ(STATE) - - #;;;;;;;;;;;;;;; - #; Postamble - add $STACK_SPACE1, %rsp - # restore callee-saved clobbered registers - - pop %r15 - pop %r14 - pop %r13 - pop %r12 - - # outer calling routine restores XMM and other GP registers - ret -ENDPROC(sha512_x4_avx2) - -.section .rodata.K512_4, "a", @progbits -.align 64 -K512_4: - .octa 0x428a2f98d728ae22428a2f98d728ae22,\ - 0x428a2f98d728ae22428a2f98d728ae22 - .octa 0x7137449123ef65cd7137449123ef65cd,\ - 0x7137449123ef65cd7137449123ef65cd - .octa 0xb5c0fbcfec4d3b2fb5c0fbcfec4d3b2f,\ - 0xb5c0fbcfec4d3b2fb5c0fbcfec4d3b2f - .octa 0xe9b5dba58189dbbce9b5dba58189dbbc,\ - 0xe9b5dba58189dbbce9b5dba58189dbbc - .octa 0x3956c25bf348b5383956c25bf348b538,\ - 0x3956c25bf348b5383956c25bf348b538 - .octa 0x59f111f1b605d01959f111f1b605d019,\ - 0x59f111f1b605d01959f111f1b605d019 - .octa 0x923f82a4af194f9b923f82a4af194f9b,\ - 0x923f82a4af194f9b923f82a4af194f9b - .octa 0xab1c5ed5da6d8118ab1c5ed5da6d8118,\ - 0xab1c5ed5da6d8118ab1c5ed5da6d8118 - .octa 0xd807aa98a3030242d807aa98a3030242,\ - 0xd807aa98a3030242d807aa98a3030242 - .octa 0x12835b0145706fbe12835b0145706fbe,\ - 0x12835b0145706fbe12835b0145706fbe - .octa 0x243185be4ee4b28c243185be4ee4b28c,\ - 0x243185be4ee4b28c243185be4ee4b28c - .octa 0x550c7dc3d5ffb4e2550c7dc3d5ffb4e2,\ - 0x550c7dc3d5ffb4e2550c7dc3d5ffb4e2 - .octa 0x72be5d74f27b896f72be5d74f27b896f,\ - 0x72be5d74f27b896f72be5d74f27b896f - .octa 0x80deb1fe3b1696b180deb1fe3b1696b1,\ - 0x80deb1fe3b1696b180deb1fe3b1696b1 - .octa 0x9bdc06a725c712359bdc06a725c71235,\ - 0x9bdc06a725c712359bdc06a725c71235 - .octa 0xc19bf174cf692694c19bf174cf692694,\ - 0xc19bf174cf692694c19bf174cf692694 - .octa 0xe49b69c19ef14ad2e49b69c19ef14ad2,\ - 0xe49b69c19ef14ad2e49b69c19ef14ad2 - .octa 0xefbe4786384f25e3efbe4786384f25e3,\ - 0xefbe4786384f25e3efbe4786384f25e3 - .octa 0x0fc19dc68b8cd5b50fc19dc68b8cd5b5,\ - 0x0fc19dc68b8cd5b50fc19dc68b8cd5b5 - .octa 0x240ca1cc77ac9c65240ca1cc77ac9c65,\ - 0x240ca1cc77ac9c65240ca1cc77ac9c65 - .octa 0x2de92c6f592b02752de92c6f592b0275,\ - 0x2de92c6f592b02752de92c6f592b0275 - .octa 0x4a7484aa6ea6e4834a7484aa6ea6e483,\ - 0x4a7484aa6ea6e4834a7484aa6ea6e483 - .octa 0x5cb0a9dcbd41fbd45cb0a9dcbd41fbd4,\ - 0x5cb0a9dcbd41fbd45cb0a9dcbd41fbd4 - .octa 0x76f988da831153b576f988da831153b5,\ - 0x76f988da831153b576f988da831153b5 - .octa 0x983e5152ee66dfab983e5152ee66dfab,\ - 0x983e5152ee66dfab983e5152ee66dfab - .octa 0xa831c66d2db43210a831c66d2db43210,\ - 0xa831c66d2db43210a831c66d2db43210 - .octa 0xb00327c898fb213fb00327c898fb213f,\ - 0xb00327c898fb213fb00327c898fb213f - .octa 0xbf597fc7beef0ee4bf597fc7beef0ee4,\ - 0xbf597fc7beef0ee4bf597fc7beef0ee4 - .octa 0xc6e00bf33da88fc2c6e00bf33da88fc2,\ - 0xc6e00bf33da88fc2c6e00bf33da88fc2 - .octa 0xd5a79147930aa725d5a79147930aa725,\ - 0xd5a79147930aa725d5a79147930aa725 - .octa 0x06ca6351e003826f06ca6351e003826f,\ - 0x06ca6351e003826f06ca6351e003826f - .octa 0x142929670a0e6e70142929670a0e6e70,\ - 0x142929670a0e6e70142929670a0e6e70 - .octa 0x27b70a8546d22ffc27b70a8546d22ffc,\ - 0x27b70a8546d22ffc27b70a8546d22ffc - .octa 0x2e1b21385c26c9262e1b21385c26c926,\ - 0x2e1b21385c26c9262e1b21385c26c926 - .octa 0x4d2c6dfc5ac42aed4d2c6dfc5ac42aed,\ - 0x4d2c6dfc5ac42aed4d2c6dfc5ac42aed - .octa 0x53380d139d95b3df53380d139d95b3df,\ - 0x53380d139d95b3df53380d139d95b3df - .octa 0x650a73548baf63de650a73548baf63de,\ - 0x650a73548baf63de650a73548baf63de - .octa 0x766a0abb3c77b2a8766a0abb3c77b2a8,\ - 0x766a0abb3c77b2a8766a0abb3c77b2a8 - .octa 0x81c2c92e47edaee681c2c92e47edaee6,\ - 0x81c2c92e47edaee681c2c92e47edaee6 - .octa 0x92722c851482353b92722c851482353b,\ - 0x92722c851482353b92722c851482353b - .octa 0xa2bfe8a14cf10364a2bfe8a14cf10364,\ - 0xa2bfe8a14cf10364a2bfe8a14cf10364 - .octa 0xa81a664bbc423001a81a664bbc423001,\ - 0xa81a664bbc423001a81a664bbc423001 - .octa 0xc24b8b70d0f89791c24b8b70d0f89791,\ - 0xc24b8b70d0f89791c24b8b70d0f89791 - .octa 0xc76c51a30654be30c76c51a30654be30,\ - 0xc76c51a30654be30c76c51a30654be30 - .octa 0xd192e819d6ef5218d192e819d6ef5218,\ - 0xd192e819d6ef5218d192e819d6ef5218 - .octa 0xd69906245565a910d69906245565a910,\ - 0xd69906245565a910d69906245565a910 - .octa 0xf40e35855771202af40e35855771202a,\ - 0xf40e35855771202af40e35855771202a - .octa 0x106aa07032bbd1b8106aa07032bbd1b8,\ - 0x106aa07032bbd1b8106aa07032bbd1b8 - .octa 0x19a4c116b8d2d0c819a4c116b8d2d0c8,\ - 0x19a4c116b8d2d0c819a4c116b8d2d0c8 - .octa 0x1e376c085141ab531e376c085141ab53,\ - 0x1e376c085141ab531e376c085141ab53 - .octa 0x2748774cdf8eeb992748774cdf8eeb99,\ - 0x2748774cdf8eeb992748774cdf8eeb99 - .octa 0x34b0bcb5e19b48a834b0bcb5e19b48a8,\ - 0x34b0bcb5e19b48a834b0bcb5e19b48a8 - .octa 0x391c0cb3c5c95a63391c0cb3c5c95a63,\ - 0x391c0cb3c5c95a63391c0cb3c5c95a63 - .octa 0x4ed8aa4ae3418acb4ed8aa4ae3418acb,\ - 0x4ed8aa4ae3418acb4ed8aa4ae3418acb - .octa 0x5b9cca4f7763e3735b9cca4f7763e373,\ - 0x5b9cca4f7763e3735b9cca4f7763e373 - .octa 0x682e6ff3d6b2b8a3682e6ff3d6b2b8a3,\ - 0x682e6ff3d6b2b8a3682e6ff3d6b2b8a3 - .octa 0x748f82ee5defb2fc748f82ee5defb2fc,\ - 0x748f82ee5defb2fc748f82ee5defb2fc - .octa 0x78a5636f43172f6078a5636f43172f60,\ - 0x78a5636f43172f6078a5636f43172f60 - .octa 0x84c87814a1f0ab7284c87814a1f0ab72,\ - 0x84c87814a1f0ab7284c87814a1f0ab72 - .octa 0x8cc702081a6439ec8cc702081a6439ec,\ - 0x8cc702081a6439ec8cc702081a6439ec - .octa 0x90befffa23631e2890befffa23631e28,\ - 0x90befffa23631e2890befffa23631e28 - .octa 0xa4506cebde82bde9a4506cebde82bde9,\ - 0xa4506cebde82bde9a4506cebde82bde9 - .octa 0xbef9a3f7b2c67915bef9a3f7b2c67915,\ - 0xbef9a3f7b2c67915bef9a3f7b2c67915 - .octa 0xc67178f2e372532bc67178f2e372532b,\ - 0xc67178f2e372532bc67178f2e372532b - .octa 0xca273eceea26619cca273eceea26619c,\ - 0xca273eceea26619cca273eceea26619c - .octa 0xd186b8c721c0c207d186b8c721c0c207,\ - 0xd186b8c721c0c207d186b8c721c0c207 - .octa 0xeada7dd6cde0eb1eeada7dd6cde0eb1e,\ - 0xeada7dd6cde0eb1eeada7dd6cde0eb1e - .octa 0xf57d4f7fee6ed178f57d4f7fee6ed178,\ - 0xf57d4f7fee6ed178f57d4f7fee6ed178 - .octa 0x06f067aa72176fba06f067aa72176fba,\ - 0x06f067aa72176fba06f067aa72176fba - .octa 0x0a637dc5a2c898a60a637dc5a2c898a6,\ - 0x0a637dc5a2c898a60a637dc5a2c898a6 - .octa 0x113f9804bef90dae113f9804bef90dae,\ - 0x113f9804bef90dae113f9804bef90dae - .octa 0x1b710b35131c471b1b710b35131c471b,\ - 0x1b710b35131c471b1b710b35131c471b - .octa 0x28db77f523047d8428db77f523047d84,\ - 0x28db77f523047d8428db77f523047d84 - .octa 0x32caab7b40c7249332caab7b40c72493,\ - 0x32caab7b40c7249332caab7b40c72493 - .octa 0x3c9ebe0a15c9bebc3c9ebe0a15c9bebc,\ - 0x3c9ebe0a15c9bebc3c9ebe0a15c9bebc - .octa 0x431d67c49c100d4c431d67c49c100d4c,\ - 0x431d67c49c100d4c431d67c49c100d4c - .octa 0x4cc5d4becb3e42b64cc5d4becb3e42b6,\ - 0x4cc5d4becb3e42b64cc5d4becb3e42b6 - .octa 0x597f299cfc657e2a597f299cfc657e2a,\ - 0x597f299cfc657e2a597f299cfc657e2a - .octa 0x5fcb6fab3ad6faec5fcb6fab3ad6faec,\ - 0x5fcb6fab3ad6faec5fcb6fab3ad6faec - .octa 0x6c44198c4a4758176c44198c4a475817,\ - 0x6c44198c4a4758176c44198c4a475817 - -.section .rodata.cst32.PSHUFFLE_BYTE_FLIP_MASK, "aM", @progbits, 32 -.align 32 -PSHUFFLE_BYTE_FLIP_MASK: .octa 0x08090a0b0c0d0e0f0001020304050607 - .octa 0x18191a1b1c1d1e1f1011121314151617 diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c index 3f9d43f26f63..7eb878561910 100644 --- a/arch/x86/entry/vdso/vma.c +++ b/arch/x86/entry/vdso/vma.c @@ -39,7 +39,7 @@ void __init init_vdso_image(const struct vdso_image *image) struct linux_binprm; -static int vdso_fault(const struct vm_special_mapping *sm, +static vm_fault_t vdso_fault(const struct vm_special_mapping *sm, struct vm_area_struct *vma, struct vm_fault *vmf) { const struct vdso_image *image = vma->vm_mm->context.vdso_image; @@ -84,12 +84,11 @@ static int vdso_mremap(const struct vm_special_mapping *sm, return 0; } -static int vvar_fault(const struct vm_special_mapping *sm, +static vm_fault_t vvar_fault(const struct vm_special_mapping *sm, struct vm_area_struct *vma, struct vm_fault *vmf) { const struct vdso_image *image = vma->vm_mm->context.vdso_image; long sym_offset; - int ret = -EFAULT; if (!image) return VM_FAULT_SIGBUS; @@ -108,29 +107,24 @@ static int vvar_fault(const struct vm_special_mapping *sm, return VM_FAULT_SIGBUS; if (sym_offset == image->sym_vvar_page) { - ret = vm_insert_pfn(vma, vmf->address, - __pa_symbol(&__vvar_page) >> PAGE_SHIFT); + return vmf_insert_pfn(vma, vmf->address, + __pa_symbol(&__vvar_page) >> PAGE_SHIFT); } else if (sym_offset == image->sym_pvclock_page) { struct pvclock_vsyscall_time_info *pvti = pvclock_get_pvti_cpu0_va(); if (pvti && vclock_was_used(VCLOCK_PVCLOCK)) { - ret = vm_insert_pfn_prot( - vma, - vmf->address, - __pa(pvti) >> PAGE_SHIFT, - pgprot_decrypted(vma->vm_page_prot)); + return vmf_insert_pfn_prot(vma, vmf->address, + __pa(pvti) >> PAGE_SHIFT, + pgprot_decrypted(vma->vm_page_prot)); } } else if (sym_offset == image->sym_hvclock_page) { struct ms_hyperv_tsc_page *tsc_pg = hv_get_tsc_page(); if (tsc_pg && vclock_was_used(VCLOCK_HVCLOCK)) - ret = vm_insert_pfn(vma, vmf->address, - vmalloc_to_pfn(tsc_pg)); + return vmf_insert_pfn(vma, vmf->address, + vmalloc_to_pfn(tsc_pg)); } - if (ret == 0 || ret == -EBUSY) - return VM_FAULT_NOPAGE; - return VM_FAULT_SIGBUS; } diff --git a/arch/x86/entry/vsyscall/vsyscall_64.c b/arch/x86/entry/vsyscall/vsyscall_64.c index 82ed001e8909..85fd85d52ffd 100644 --- a/arch/x86/entry/vsyscall/vsyscall_64.c +++ b/arch/x86/entry/vsyscall/vsyscall_64.c @@ -100,20 +100,13 @@ static bool write_ok_or_segv(unsigned long ptr, size_t size) */ if (!access_ok(VERIFY_WRITE, (void __user *)ptr, size)) { - siginfo_t info; struct thread_struct *thread = ¤t->thread; thread->error_code = 6; /* user fault, no page, write */ thread->cr2 = ptr; thread->trap_nr = X86_TRAP_PF; - clear_siginfo(&info); - info.si_signo = SIGSEGV; - info.si_errno = 0; - info.si_code = SEGV_MAPERR; - info.si_addr = (void __user *)ptr; - - force_sig_info(SIGSEGV, &info, current); + force_sig_fault(SIGSEGV, SEGV_MAPERR, (void __user *)ptr, current); return false; } else { return true; diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h index fb97cf7c4137..fab4df16a3c4 100644 --- a/arch/x86/include/asm/compat.h +++ b/arch/x86/include/asm/compat.h @@ -12,38 +12,23 @@ #include <asm/user32.h> #include <asm/unistd.h> +#include <asm-generic/compat.h> + #define COMPAT_USER_HZ 100 #define COMPAT_UTS_MACHINE "i686\0\0" -typedef u32 compat_size_t; -typedef s32 compat_ssize_t; -typedef s32 compat_clock_t; -typedef s32 compat_pid_t; typedef u16 __compat_uid_t; typedef u16 __compat_gid_t; typedef u32 __compat_uid32_t; typedef u32 __compat_gid32_t; typedef u16 compat_mode_t; -typedef u32 compat_ino_t; typedef u16 compat_dev_t; -typedef s32 compat_off_t; -typedef s64 compat_loff_t; typedef u16 compat_nlink_t; typedef u16 compat_ipc_pid_t; -typedef s32 compat_daddr_t; typedef u32 compat_caddr_t; typedef __kernel_fsid_t compat_fsid_t; -typedef s32 compat_timer_t; -typedef s32 compat_key_t; - -typedef s32 compat_int_t; -typedef s32 compat_long_t; typedef s64 __attribute__((aligned(4))) compat_s64; -typedef u32 compat_uint_t; -typedef u32 compat_ulong_t; -typedef u32 compat_u32; typedef u64 __attribute__((aligned(4))) compat_u64; -typedef u32 compat_uptr_t; struct compat_stat { compat_dev_t st_dev; @@ -240,6 +225,6 @@ static inline bool in_compat_syscall(void) struct compat_siginfo; int __copy_siginfo_to_user32(struct compat_siginfo __user *to, - const siginfo_t *from, bool x32_ABI); + const kernel_siginfo_t *from, bool x32_ABI); #endif /* _ASM_X86_COMPAT_H */ diff --git a/arch/x86/include/asm/hugetlb.h b/arch/x86/include/asm/hugetlb.h index 5ed826da5e07..7469d321f072 100644 --- a/arch/x86/include/asm/hugetlb.h +++ b/arch/x86/include/asm/hugetlb.h @@ -13,75 +13,6 @@ static inline int is_hugepage_only_range(struct mm_struct *mm, return 0; } -/* - * If the arch doesn't supply something else, assume that hugepage - * size aligned regions are ok without further preparation. - */ -static inline int prepare_hugepage_range(struct file *file, - unsigned long addr, unsigned long len) -{ - struct hstate *h = hstate_file(file); - if (len & ~huge_page_mask(h)) - return -EINVAL; - if (addr & ~huge_page_mask(h)) - return -EINVAL; - return 0; -} - -static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb, - unsigned long addr, unsigned long end, - unsigned long floor, - unsigned long ceiling) -{ - free_pgd_range(tlb, addr, end, floor, ceiling); -} - -static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, - pte_t *ptep, pte_t pte) -{ - set_pte_at(mm, addr, ptep, pte); -} - -static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, - unsigned long addr, pte_t *ptep) -{ - return ptep_get_and_clear(mm, addr, ptep); -} - -static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, - unsigned long addr, pte_t *ptep) -{ - ptep_clear_flush(vma, addr, ptep); -} - -static inline int huge_pte_none(pte_t pte) -{ - return pte_none(pte); -} - -static inline pte_t huge_pte_wrprotect(pte_t pte) -{ - return pte_wrprotect(pte); -} - -static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, - unsigned long addr, pte_t *ptep) -{ - ptep_set_wrprotect(mm, addr, ptep); -} - -static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, - unsigned long addr, pte_t *ptep, - pte_t pte, int dirty) -{ - return ptep_set_access_flags(vma, addr, ptep, pte, dirty); -} - -static inline pte_t huge_ptep_get(pte_t *ptep) -{ - return *ptep; -} - static inline void arch_clear_hugepage_flags(struct page *page) { } diff --git a/arch/x86/include/asm/irq_remapping.h b/arch/x86/include/asm/irq_remapping.h index 5f26962eff42..67ed72f31cc2 100644 --- a/arch/x86/include/asm/irq_remapping.h +++ b/arch/x86/include/asm/irq_remapping.h @@ -45,6 +45,8 @@ struct vcpu_data { #ifdef CONFIG_IRQ_REMAP +extern raw_spinlock_t irq_2_ir_lock; + extern bool irq_remapping_cap(enum irq_remap_cap cap); extern void set_irq_remapping_broken(void); extern int irq_remapping_prepare(void); diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 09b2e3e2cf1b..55e51ff7e421 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -102,7 +102,15 @@ #define UNMAPPED_GVA (~(gpa_t)0) /* KVM Hugepage definitions for x86 */ -#define KVM_NR_PAGE_SIZES 3 +enum { + PT_PAGE_TABLE_LEVEL = 1, + PT_DIRECTORY_LEVEL = 2, + PT_PDPE_LEVEL = 3, + /* set max level to the biggest one */ + PT_MAX_HUGEPAGE_LEVEL = PT_PDPE_LEVEL, +}; +#define KVM_NR_PAGE_SIZES (PT_MAX_HUGEPAGE_LEVEL - \ + PT_PAGE_TABLE_LEVEL + 1) #define KVM_HPAGE_GFN_SHIFT(x) (((x) - 1) * 9) #define KVM_HPAGE_SHIFT(x) (PAGE_SHIFT + KVM_HPAGE_GFN_SHIFT(x)) #define KVM_HPAGE_SIZE(x) (1UL << KVM_HPAGE_SHIFT(x)) @@ -177,6 +185,7 @@ enum { #define DR6_BD (1 << 13) #define DR6_BS (1 << 14) +#define DR6_BT (1 << 15) #define DR6_RTM (1 << 16) #define DR6_FIXED_1 0xfffe0ff0 #define DR6_INIT 0xffff0ff0 @@ -247,7 +256,7 @@ struct kvm_mmu_memory_cache { * @nxe, @cr0_wp, @smep_andnot_wp and @smap_andnot_wp. */ union kvm_mmu_page_role { - unsigned word; + u32 word; struct { unsigned level:4; unsigned cr4_pae:1; @@ -273,6 +282,34 @@ union kvm_mmu_page_role { }; }; +union kvm_mmu_extended_role { +/* + * This structure complements kvm_mmu_page_role caching everything needed for + * MMU configuration. If nothing in both these structures changed, MMU + * re-configuration can be skipped. @valid bit is set on first usage so we don't + * treat all-zero structure as valid data. + */ + u32 word; + struct { + unsigned int valid:1; + unsigned int execonly:1; + unsigned int cr0_pg:1; + unsigned int cr4_pse:1; + unsigned int cr4_pke:1; + unsigned int cr4_smap:1; + unsigned int cr4_smep:1; + unsigned int cr4_la57:1; + }; +}; + +union kvm_mmu_role { + u64 as_u64; + struct { + union kvm_mmu_page_role base; + union kvm_mmu_extended_role ext; + }; +}; + struct kvm_rmap_head { unsigned long val; }; @@ -280,18 +317,18 @@ struct kvm_rmap_head { struct kvm_mmu_page { struct list_head link; struct hlist_node hash_link; + bool unsync; /* * The following two entries are used to key the shadow page in the * hash table. */ - gfn_t gfn; union kvm_mmu_page_role role; + gfn_t gfn; u64 *spt; /* hold the gfn of each spte inside spt */ gfn_t *gfns; - bool unsync; int root_count; /* Currently serving as active root */ unsigned int unsync_children; struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */ @@ -360,7 +397,7 @@ struct kvm_mmu { void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, u64 *spte, const void *pte); hpa_t root_hpa; - union kvm_mmu_page_role base_role; + union kvm_mmu_role mmu_role; u8 root_level; u8 shadow_root_level; u8 ept_ad; @@ -490,7 +527,7 @@ struct kvm_vcpu_hv { struct kvm_hyperv_exit exit; struct kvm_vcpu_hv_stimer stimer[HV_SYNIC_STIMER_COUNT]; DECLARE_BITMAP(stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT); - cpumask_t tlb_lush; + cpumask_t tlb_flush; }; struct kvm_vcpu_arch { @@ -534,7 +571,13 @@ struct kvm_vcpu_arch { * the paging mode of the l1 guest. This context is always used to * handle faults. */ - struct kvm_mmu mmu; + struct kvm_mmu *mmu; + + /* Non-nested MMU for L1 */ + struct kvm_mmu root_mmu; + + /* L1 MMU when running nested */ + struct kvm_mmu guest_mmu; /* * Paging state of an L2 guest (used for nested npt) @@ -585,6 +628,8 @@ struct kvm_vcpu_arch { bool has_error_code; u8 nr; u32 error_code; + unsigned long payload; + bool has_payload; u8 nested_apf; } exception; @@ -781,6 +826,9 @@ struct kvm_hv { u64 hv_reenlightenment_control; u64 hv_tsc_emulation_control; u64 hv_tsc_emulation_status; + + /* How many vCPUs have VP index != vCPU index */ + atomic_t num_mismatched_vp_indexes; }; enum kvm_irqchip_mode { @@ -871,6 +919,7 @@ struct kvm_arch { bool x2apic_broadcast_quirk_disabled; bool guest_can_read_msr_platform_info; + bool exception_payload_enabled; }; struct kvm_vm_stat { @@ -1133,6 +1182,9 @@ struct kvm_x86_ops { int (*mem_enc_unreg_region)(struct kvm *kvm, struct kvm_enc_region *argp); int (*get_msr_feature)(struct kvm_msr_entry *entry); + + int (*nested_enable_evmcs)(struct kvm_vcpu *vcpu, + uint16_t *vmcs_version); }; struct kvm_arch_async_pf { @@ -1170,7 +1222,6 @@ void kvm_mmu_module_exit(void); void kvm_mmu_destroy(struct kvm_vcpu *vcpu); int kvm_mmu_create(struct kvm_vcpu *vcpu); -void kvm_mmu_setup(struct kvm_vcpu *vcpu); void kvm_mmu_init_vm(struct kvm *kvm); void kvm_mmu_uninit_vm(struct kvm *kvm); void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, @@ -1324,7 +1375,8 @@ void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu); int kvm_mmu_load(struct kvm_vcpu *vcpu); void kvm_mmu_unload(struct kvm_vcpu *vcpu); void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu); -void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, ulong roots_to_free); +void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, + ulong roots_to_free); gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access, struct x86_exception *exception); gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, diff --git a/arch/x86/include/asm/mpx.h b/arch/x86/include/asm/mpx.h index 61eb4b63c5ec..d0b1434fb0b6 100644 --- a/arch/x86/include/asm/mpx.h +++ b/arch/x86/include/asm/mpx.h @@ -57,8 +57,14 @@ #define MPX_BNDCFG_ADDR_MASK (~((1UL<<MPX_BNDCFG_TAIL)-1)) #define MPX_BNDSTA_ERROR_CODE 0x3 +struct mpx_fault_info { + void __user *addr; + void __user *lower; + void __user *upper; +}; + #ifdef CONFIG_X86_INTEL_MPX -siginfo_t *mpx_generate_siginfo(struct pt_regs *regs); +int mpx_fault_info(struct mpx_fault_info *info, struct pt_regs *regs); int mpx_handle_bd_fault(void); static inline int kernel_managing_mpx_tables(struct mm_struct *mm) { @@ -78,9 +84,9 @@ void mpx_notify_unmap(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long mpx_unmapped_area_check(unsigned long addr, unsigned long len, unsigned long flags); #else -static inline siginfo_t *mpx_generate_siginfo(struct pt_regs *regs) +static inline int mpx_fault_info(struct mpx_fault_info *info, struct pt_regs *regs) { - return NULL; + return -EINVAL; } static inline int mpx_handle_bd_fault(void) { diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h index f236bcd5485d..143c99499531 100644 --- a/arch/x86/include/asm/ptrace.h +++ b/arch/x86/include/asm/ptrace.h @@ -293,7 +293,7 @@ static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, #define arch_has_block_step() (boot_cpu_data.x86 >= 6) #endif -#define ARCH_HAS_USER_SINGLE_STEP_INFO +#define ARCH_HAS_USER_SINGLE_STEP_REPORT /* * When hitting ptrace_stop(), we cannot return using SYSRET because diff --git a/arch/x86/include/asm/trace/mpx.h b/arch/x86/include/asm/trace/mpx.h index 7bd92db09e8d..54133017267c 100644 --- a/arch/x86/include/asm/trace/mpx.h +++ b/arch/x86/include/asm/trace/mpx.h @@ -11,12 +11,12 @@ TRACE_EVENT(mpx_bounds_register_exception, - TP_PROTO(void *addr_referenced, + TP_PROTO(void __user *addr_referenced, const struct mpx_bndreg *bndreg), TP_ARGS(addr_referenced, bndreg), TP_STRUCT__entry( - __field(void *, addr_referenced) + __field(void __user *, addr_referenced) __field(u64, lower_bound) __field(u64, upper_bound) ), diff --git a/arch/x86/include/asm/unistd.h b/arch/x86/include/asm/unistd.h index 51c4eee00732..dc4ed8bc2382 100644 --- a/arch/x86/include/asm/unistd.h +++ b/arch/x86/include/asm/unistd.h @@ -24,6 +24,7 @@ # include <asm/unistd_64.h> # include <asm/unistd_64_x32.h> # define __ARCH_WANT_COMPAT_SYS_TIME +# define __ARCH_WANT_SYS_UTIME32 # define __ARCH_WANT_COMPAT_SYS_PREADV64 # define __ARCH_WANT_COMPAT_SYS_PWRITEV64 # define __ARCH_WANT_COMPAT_SYS_PREADV64V2 @@ -31,13 +32,13 @@ # endif +# define __ARCH_WANT_NEW_STAT # define __ARCH_WANT_OLD_READDIR # define __ARCH_WANT_OLD_STAT # define __ARCH_WANT_SYS_ALARM # define __ARCH_WANT_SYS_FADVISE64 # define __ARCH_WANT_SYS_GETHOSTNAME # define __ARCH_WANT_SYS_GETPGRP -# define __ARCH_WANT_SYS_LLSEEK # define __ARCH_WANT_SYS_NICE # define __ARCH_WANT_SYS_OLDUMOUNT # define __ARCH_WANT_SYS_OLD_GETRLIMIT diff --git a/arch/x86/include/asm/virtext.h b/arch/x86/include/asm/virtext.h index e05e0d309244..1fc7a0d1e877 100644 --- a/arch/x86/include/asm/virtext.h +++ b/arch/x86/include/asm/virtext.h @@ -40,7 +40,7 @@ static inline int cpu_has_vmx(void) */ static inline void cpu_vmxoff(void) { - asm volatile (ASM_VMX_VMXOFF : : : "cc"); + asm volatile ("vmxoff"); cr4_clear_bits(X86_CR4_VMXE); } diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h index 9527ba5d62da..ade0f153947d 100644 --- a/arch/x86/include/asm/vmx.h +++ b/arch/x86/include/asm/vmx.h @@ -503,19 +503,6 @@ enum vmcs_field { #define VMX_EPT_IDENTITY_PAGETABLE_ADDR 0xfffbc000ul - -#define ASM_VMX_VMCLEAR_RAX ".byte 0x66, 0x0f, 0xc7, 0x30" -#define ASM_VMX_VMLAUNCH ".byte 0x0f, 0x01, 0xc2" -#define ASM_VMX_VMRESUME ".byte 0x0f, 0x01, 0xc3" -#define ASM_VMX_VMPTRLD_RAX ".byte 0x0f, 0xc7, 0x30" -#define ASM_VMX_VMREAD_RDX_RAX ".byte 0x0f, 0x78, 0xd0" -#define ASM_VMX_VMWRITE_RAX_RDX ".byte 0x0f, 0x79, 0xd0" -#define ASM_VMX_VMWRITE_RSP_RDX ".byte 0x0f, 0x79, 0xd4" -#define ASM_VMX_VMXOFF ".byte 0x0f, 0x01, 0xc4" -#define ASM_VMX_VMXON_RAX ".byte 0xf3, 0x0f, 0xc7, 0x30" -#define ASM_VMX_INVEPT ".byte 0x66, 0x0f, 0x38, 0x80, 0x08" -#define ASM_VMX_INVVPID ".byte 0x66, 0x0f, 0x38, 0x81, 0x08" - struct vmx_msr_entry { u32 index; u32 reserved; diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h index fd23d5778ea1..dabfcf7c3941 100644 --- a/arch/x86/include/uapi/asm/kvm.h +++ b/arch/x86/include/uapi/asm/kvm.h @@ -288,6 +288,7 @@ struct kvm_reinject_control { #define KVM_VCPUEVENT_VALID_SIPI_VECTOR 0x00000002 #define KVM_VCPUEVENT_VALID_SHADOW 0x00000004 #define KVM_VCPUEVENT_VALID_SMM 0x00000008 +#define KVM_VCPUEVENT_VALID_PAYLOAD 0x00000010 /* Interrupt shadow states */ #define KVM_X86_SHADOW_INT_MOV_SS 0x01 @@ -299,7 +300,7 @@ struct kvm_vcpu_events { __u8 injected; __u8 nr; __u8 has_error_code; - __u8 pad; + __u8 pending; __u32 error_code; } exception; struct { @@ -322,7 +323,9 @@ struct kvm_vcpu_events { __u8 smm_inside_nmi; __u8 latched_init; } smi; - __u32 reserved[9]; + __u8 reserved[27]; + __u8 exception_has_payload; + __u64 exception_payload; }; /* for KVM_GET/SET_DEBUGREGS */ @@ -381,6 +384,7 @@ struct kvm_sync_regs { #define KVM_STATE_NESTED_GUEST_MODE 0x00000001 #define KVM_STATE_NESTED_RUN_PENDING 0x00000002 +#define KVM_STATE_NESTED_EVMCS 0x00000004 #define KVM_STATE_NESTED_SMM_GUEST_MODE 0x00000001 #define KVM_STATE_NESTED_SMM_VMXON 0x00000002 diff --git a/arch/x86/include/uapi/asm/siginfo.h b/arch/x86/include/uapi/asm/siginfo.h index b3d157957177..6642d8be40c4 100644 --- a/arch/x86/include/uapi/asm/siginfo.h +++ b/arch/x86/include/uapi/asm/siginfo.h @@ -7,8 +7,6 @@ typedef long long __kernel_si_clock_t __attribute__((aligned(4))); # define __ARCH_SI_CLOCK_T __kernel_si_clock_t # define __ARCH_SI_ATTRIBUTES __attribute__((aligned(8))) -# else /* x86-64 */ -# define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int)) # endif #endif diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c index f39f3a06c26f..7299dcbf8e85 100644 --- a/arch/x86/kernel/devicetree.c +++ b/arch/x86/kernel/devicetree.c @@ -140,7 +140,7 @@ static void __init dtb_cpu_setup(void) int ret; version = GET_APIC_VERSION(apic_read(APIC_LVR)); - for_each_node_by_type(dn, "cpu") { + for_each_of_cpu_node(dn) { ret = of_property_read_u32(dn, "reg", &apic_id); if (ret < 0) { pr_warn("%pOF: missing local APIC ID\n", dn); diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c index c88c23c658c1..d1f25c831447 100644 --- a/arch/x86/kernel/e820.c +++ b/arch/x86/kernel/e820.c @@ -1248,7 +1248,6 @@ void __init e820__memblock_setup(void) { int i; u64 end; - u64 addr = 0; /* * The bootstrap memblock region count maximum is 128 entries @@ -1265,21 +1264,13 @@ void __init e820__memblock_setup(void) struct e820_entry *entry = &e820_table->entries[i]; end = entry->addr + entry->size; - if (addr < entry->addr) - memblock_reserve(addr, entry->addr - addr); - addr = end; if (end != (resource_size_t)end) continue; - /* - * all !E820_TYPE_RAM ranges (including gap ranges) are put - * into memblock.reserved to make sure that struct pages in - * such regions are not left uninitialized after bootup. - */ if (entry->type != E820_TYPE_RAM && entry->type != E820_TYPE_RESERVED_KERN) - memblock_reserve(entry->addr, entry->size); - else - memblock_add(entry->addr, entry->size); + continue; + + memblock_add(entry->addr, entry->size); } /* Throw away partial pages: */ diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index d8f49c7384a3..ffae9b9740fd 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c @@ -1359,33 +1359,18 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task) #endif } -static void fill_sigtrap_info(struct task_struct *tsk, - struct pt_regs *regs, - int error_code, int si_code, - struct siginfo *info) +void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, + int error_code, int si_code) { tsk->thread.trap_nr = X86_TRAP_DB; tsk->thread.error_code = error_code; - info->si_signo = SIGTRAP; - info->si_code = si_code; - info->si_addr = user_mode(regs) ? (void __user *)regs->ip : NULL; -} - -void user_single_step_siginfo(struct task_struct *tsk, - struct pt_regs *regs, - struct siginfo *info) -{ - fill_sigtrap_info(tsk, regs, 0, TRAP_BRKPT, info); + /* Send us the fake SIGTRAP */ + force_sig_fault(SIGTRAP, si_code, + user_mode(regs) ? (void __user *)regs->ip : NULL, tsk); } -void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, - int error_code, int si_code) +void user_single_step_report(struct pt_regs *regs) { - struct siginfo info; - - clear_siginfo(&info); - fill_sigtrap_info(tsk, regs, error_code, si_code, &info); - /* Send us the fake SIGTRAP */ - force_sig_info(SIGTRAP, &info, tsk); + send_sigtrap(current, regs, 0, TRAP_BRKPT); } diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 5bd0a997d81e..8f6dcd88202e 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -189,7 +189,7 @@ int fixup_bug(struct pt_regs *regs, int trapnr) } static nokprobe_inline int -do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str, +do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str, struct pt_regs *regs, long error_code) { if (v8086_mode(regs)) { @@ -202,10 +202,7 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str, error_code, trapnr)) return 0; } - return -1; - } - - if (!user_mode(regs)) { + } else if (!user_mode(regs)) { if (fixup_exception(regs, trapnr, error_code, 0)) return 0; @@ -214,49 +211,6 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str, die(str, regs, error_code); } - return -1; -} - -static siginfo_t *fill_trap_info(struct pt_regs *regs, int signr, int trapnr, - siginfo_t *info) -{ - unsigned long siaddr; - int sicode; - - switch (trapnr) { - default: - return SEND_SIG_PRIV; - - case X86_TRAP_DE: - sicode = FPE_INTDIV; - siaddr = uprobe_get_trap_addr(regs); - break; - case X86_TRAP_UD: - sicode = ILL_ILLOPN; - siaddr = uprobe_get_trap_addr(regs); - break; - case X86_TRAP_AC: - sicode = BUS_ADRALN; - siaddr = 0; - break; - } - - info->si_signo = signr; - info->si_errno = 0; - info->si_code = sicode; - info->si_addr = (void __user *)siaddr; - return info; -} - -static void -do_trap(int trapnr, int signr, char *str, struct pt_regs *regs, - long error_code, siginfo_t *info) -{ - struct task_struct *tsk = current; - - - if (!do_trap_no_signal(tsk, trapnr, str, regs, error_code)) - return; /* * We want error_code and trap_nr set for userspace faults and * kernelspace faults which result in die(), but not @@ -269,24 +223,45 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs, tsk->thread.error_code = error_code; tsk->thread.trap_nr = trapnr; + return -1; +} + +static void show_signal(struct task_struct *tsk, int signr, + const char *type, const char *desc, + struct pt_regs *regs, long error_code) +{ if (show_unhandled_signals && unhandled_signal(tsk, signr) && printk_ratelimit()) { - pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx", - tsk->comm, tsk->pid, str, + pr_info("%s[%d] %s%s ip:%lx sp:%lx error:%lx", + tsk->comm, task_pid_nr(tsk), type, desc, regs->ip, regs->sp, error_code); print_vma_addr(KERN_CONT " in ", regs->ip); pr_cont("\n"); } +} + +static void +do_trap(int trapnr, int signr, char *str, struct pt_regs *regs, + long error_code, int sicode, void __user *addr) +{ + struct task_struct *tsk = current; + + + if (!do_trap_no_signal(tsk, trapnr, str, regs, error_code)) + return; + + show_signal(tsk, signr, "trap ", str, regs, error_code); - force_sig_info(signr, info ?: SEND_SIG_PRIV, tsk); + if (!sicode) + force_sig(signr, tsk); + else + force_sig_fault(signr, sicode, addr, tsk); } NOKPROBE_SYMBOL(do_trap); static void do_error_trap(struct pt_regs *regs, long error_code, char *str, - unsigned long trapnr, int signr) + unsigned long trapnr, int signr, int sicode, void __user *addr) { - siginfo_t info; - RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU"); /* @@ -299,26 +274,26 @@ static void do_error_trap(struct pt_regs *regs, long error_code, char *str, if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) != NOTIFY_STOP) { cond_local_irq_enable(regs); - clear_siginfo(&info); - do_trap(trapnr, signr, str, regs, error_code, - fill_trap_info(regs, signr, trapnr, &info)); + do_trap(trapnr, signr, str, regs, error_code, sicode, addr); } } -#define DO_ERROR(trapnr, signr, str, name) \ -dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \ -{ \ - do_error_trap(regs, error_code, str, trapnr, signr); \ +#define IP ((void __user *)uprobe_get_trap_addr(regs)) +#define DO_ERROR(trapnr, signr, sicode, addr, str, name) \ +dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \ +{ \ + do_error_trap(regs, error_code, str, trapnr, signr, sicode, addr); \ } -DO_ERROR(X86_TRAP_DE, SIGFPE, "divide error", divide_error) -DO_ERROR(X86_TRAP_OF, SIGSEGV, "overflow", overflow) -DO_ERROR(X86_TRAP_UD, SIGILL, "invalid opcode", invalid_op) -DO_ERROR(X86_TRAP_OLD_MF, SIGFPE, "coprocessor segment overrun",coprocessor_segment_overrun) -DO_ERROR(X86_TRAP_TS, SIGSEGV, "invalid TSS", invalid_TSS) -DO_ERROR(X86_TRAP_NP, SIGBUS, "segment not present", segment_not_present) -DO_ERROR(X86_TRAP_SS, SIGBUS, "stack segment", stack_segment) -DO_ERROR(X86_TRAP_AC, SIGBUS, "alignment check", alignment_check) +DO_ERROR(X86_TRAP_DE, SIGFPE, FPE_INTDIV, IP, "divide error", divide_error) +DO_ERROR(X86_TRAP_OF, SIGSEGV, 0, NULL, "overflow", overflow) +DO_ERROR(X86_TRAP_UD, SIGILL, ILL_ILLOPN, IP, "invalid opcode", invalid_op) +DO_ERROR(X86_TRAP_OLD_MF, SIGFPE, 0, NULL, "coprocessor segment overrun", coprocessor_segment_overrun) +DO_ERROR(X86_TRAP_TS, SIGSEGV, 0, NULL, "invalid TSS", invalid_TSS) +DO_ERROR(X86_TRAP_NP, SIGBUS, 0, NULL, "segment not present", segment_not_present) +DO_ERROR(X86_TRAP_SS, SIGBUS, 0, NULL, "stack segment", stack_segment) +DO_ERROR(X86_TRAP_AC, SIGBUS, BUS_ADRALN, NULL, "alignment check", alignment_check) +#undef IP #ifdef CONFIG_VMAP_STACK __visible void __noreturn handle_stack_overflow(const char *message, @@ -459,7 +434,6 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code) dotraplinkage void do_bounds(struct pt_regs *regs, long error_code) { const struct mpx_bndcsr *bndcsr; - siginfo_t *info; RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU"); if (notify_die(DIE_TRAP, "bounds", regs, error_code, @@ -497,8 +471,11 @@ dotraplinkage void do_bounds(struct pt_regs *regs, long error_code) goto exit_trap; break; /* Success, it was handled */ case 1: /* Bound violation. */ - info = mpx_generate_siginfo(regs); - if (IS_ERR(info)) { + { + struct task_struct *tsk = current; + struct mpx_fault_info mpx; + + if (mpx_fault_info(&mpx, regs)) { /* * We failed to decode the MPX instruction. Act as if * the exception was not caused by MPX. @@ -507,14 +484,20 @@ dotraplinkage void do_bounds(struct pt_regs *regs, long error_code) } /* * Success, we decoded the instruction and retrieved - * an 'info' containing the address being accessed + * an 'mpx' containing the address being accessed * which caused the exception. This information * allows and application to possibly handle the * #BR exception itself. */ - do_trap(X86_TRAP_BR, SIGSEGV, "bounds", regs, error_code, info); - kfree(info); + if (!do_trap_no_signal(tsk, X86_TRAP_BR, "bounds", regs, + error_code)) + break; + + show_signal(tsk, SIGSEGV, "trap ", "bounds", regs, error_code); + + force_sig_bnderr(mpx.addr, mpx.lower, mpx.upper); break; + } case 0: /* No exception caused by Intel MPX operations. */ goto exit_trap; default: @@ -531,12 +514,13 @@ exit_trap: * up here if the kernel has MPX turned off at compile * time.. */ - do_trap(X86_TRAP_BR, SIGSEGV, "bounds", regs, error_code, NULL); + do_trap(X86_TRAP_BR, SIGSEGV, "bounds", regs, error_code, 0, NULL); } dotraplinkage void do_general_protection(struct pt_regs *regs, long error_code) { + const char *desc = "general protection fault"; struct task_struct *tsk; RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU"); @@ -570,25 +554,18 @@ do_general_protection(struct pt_regs *regs, long error_code) kprobe_fault_handler(regs, X86_TRAP_GP)) return; - if (notify_die(DIE_GPF, "general protection fault", regs, error_code, + if (notify_die(DIE_GPF, desc, regs, error_code, X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP) - die("general protection fault", regs, error_code); + die(desc, regs, error_code); return; } tsk->thread.error_code = error_code; tsk->thread.trap_nr = X86_TRAP_GP; - if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) && - printk_ratelimit()) { - pr_info("%s[%d] general protection ip:%lx sp:%lx error:%lx", - tsk->comm, task_pid_nr(tsk), - regs->ip, regs->sp, error_code); - print_vma_addr(KERN_CONT " in ", regs->ip); - pr_cont("\n"); - } + show_signal(tsk, SIGSEGV, "", desc, regs, error_code); - force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk); + force_sig(SIGSEGV, tsk); } NOKPROBE_SYMBOL(do_general_protection); @@ -631,7 +608,7 @@ dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code) goto exit; cond_local_irq_enable(regs); - do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL); + do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, 0, NULL); cond_local_irq_disable(regs); exit: @@ -845,7 +822,7 @@ static void math_error(struct pt_regs *regs, int error_code, int trapnr) { struct task_struct *task = current; struct fpu *fpu = &task->thread.fpu; - siginfo_t info; + int si_code; char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" : "simd exception"; @@ -871,18 +848,14 @@ static void math_error(struct pt_regs *regs, int error_code, int trapnr) task->thread.trap_nr = trapnr; task->thread.error_code = error_code; - clear_siginfo(&info); - info.si_signo = SIGFPE; - info.si_errno = 0; - info.si_addr = (void __user *)uprobe_get_trap_addr(regs); - - info.si_code = fpu__exception_code(fpu, trapnr); + si_code = fpu__exception_code(fpu, trapnr); /* Retry when we get spurious exceptions: */ - if (!info.si_code) + if (!si_code) return; - force_sig_info(SIGFPE, &info, task); + force_sig_fault(SIGFPE, si_code, + (void __user *)uprobe_get_trap_addr(regs), task); } dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code) @@ -942,20 +915,13 @@ NOKPROBE_SYMBOL(do_device_not_available); #ifdef CONFIG_X86_32 dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code) { - siginfo_t info; - RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU"); local_irq_enable(); - clear_siginfo(&info); - info.si_signo = SIGILL; - info.si_errno = 0; - info.si_code = ILL_BADSTK; - info.si_addr = NULL; if (notify_die(DIE_TRAP, "iret exception", regs, error_code, X86_TRAP_IRET, SIGILL) != NOTIFY_STOP) { do_trap(X86_TRAP_IRET, SIGILL, "iret exception", regs, error_code, - &info); + ILL_BADSTK, (void __user *)NULL); } } #endif diff --git a/arch/x86/kernel/umip.c b/arch/x86/kernel/umip.c index ff20b35e98dd..f8f3cfda01ae 100644 --- a/arch/x86/kernel/umip.c +++ b/arch/x86/kernel/umip.c @@ -271,19 +271,13 @@ static int emulate_umip_insn(struct insn *insn, int umip_inst, */ static void force_sig_info_umip_fault(void __user *addr, struct pt_regs *regs) { - siginfo_t info; struct task_struct *tsk = current; tsk->thread.cr2 = (unsigned long)addr; tsk->thread.error_code = X86_PF_USER | X86_PF_WRITE; tsk->thread.trap_nr = X86_TRAP_PF; - clear_siginfo(&info); - info.si_signo = SIGSEGV; - info.si_errno = 0; - info.si_code = SEGV_MAPERR; - info.si_addr = addr; - force_sig_info(SIGSEGV, &info, tsk); + force_sig_fault(SIGSEGV, SEGV_MAPERR, addr, tsk); if (!(show_unhandled_signals && unhandled_signal(tsk, SIGSEGV))) return; diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index deb576b23b7c..843feb94a950 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -1086,7 +1086,7 @@ arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs pr_err("return address clobbered: pid=%d, %%sp=%#lx, %%ip=%#lx\n", current->pid, regs->sp, regs->ip); - force_sig_info(SIGSEGV, SEND_SIG_FORCED, current); + force_sig(SIGSEGV, current); } return -1; diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index 01d209ab5481..4e80080f277a 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -36,6 +36,8 @@ #include "trace.h" +#define KVM_HV_MAX_SPARSE_VCPU_SET_BITS DIV_ROUND_UP(KVM_MAX_VCPUS, 64) + static inline u64 synic_read_sint(struct kvm_vcpu_hv_synic *synic, int sint) { return atomic64_read(&synic->sint[sint]); @@ -132,8 +134,10 @@ static struct kvm_vcpu *get_vcpu_by_vpidx(struct kvm *kvm, u32 vpidx) struct kvm_vcpu *vcpu = NULL; int i; - if (vpidx < KVM_MAX_VCPUS) - vcpu = kvm_get_vcpu(kvm, vpidx); + if (vpidx >= KVM_MAX_VCPUS) + return NULL; + + vcpu = kvm_get_vcpu(kvm, vpidx); if (vcpu && vcpu_to_hv_vcpu(vcpu)->vp_index == vpidx) return vcpu; kvm_for_each_vcpu(i, vcpu, kvm) @@ -689,6 +693,24 @@ void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu) stimer_cleanup(&hv_vcpu->stimer[i]); } +bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu) +{ + if (!(vcpu->arch.hyperv.hv_vapic & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE)) + return false; + return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED; +} +EXPORT_SYMBOL_GPL(kvm_hv_assist_page_enabled); + +bool kvm_hv_get_assist_page(struct kvm_vcpu *vcpu, + struct hv_vp_assist_page *assist_page) +{ + if (!kvm_hv_assist_page_enabled(vcpu)) + return false; + return !kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, + assist_page, sizeof(*assist_page)); +} +EXPORT_SYMBOL_GPL(kvm_hv_get_assist_page); + static void stimer_prepare_msg(struct kvm_vcpu_hv_stimer *stimer) { struct hv_message *msg = &stimer->msg; @@ -1040,21 +1062,41 @@ static u64 current_task_runtime_100ns(void) static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host) { - struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv; + struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv; switch (msr) { - case HV_X64_MSR_VP_INDEX: - if (!host) + case HV_X64_MSR_VP_INDEX: { + struct kvm_hv *hv = &vcpu->kvm->arch.hyperv; + int vcpu_idx = kvm_vcpu_get_idx(vcpu); + u32 new_vp_index = (u32)data; + + if (!host || new_vp_index >= KVM_MAX_VCPUS) return 1; - hv->vp_index = (u32)data; + + if (new_vp_index == hv_vcpu->vp_index) + return 0; + + /* + * The VP index is initialized to vcpu_index by + * kvm_hv_vcpu_postcreate so they initially match. Now the + * VP index is changing, adjust num_mismatched_vp_indexes if + * it now matches or no longer matches vcpu_idx. + */ + if (hv_vcpu->vp_index == vcpu_idx) + atomic_inc(&hv->num_mismatched_vp_indexes); + else if (new_vp_index == vcpu_idx) + atomic_dec(&hv->num_mismatched_vp_indexes); + + hv_vcpu->vp_index = new_vp_index; break; + } case HV_X64_MSR_VP_ASSIST_PAGE: { u64 gfn; unsigned long addr; if (!(data & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE)) { - hv->hv_vapic = data; - if (kvm_lapic_enable_pv_eoi(vcpu, 0)) + hv_vcpu->hv_vapic = data; + if (kvm_lapic_enable_pv_eoi(vcpu, 0, 0)) return 1; break; } @@ -1062,12 +1104,19 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host) addr = kvm_vcpu_gfn_to_hva(vcpu, gfn); if (kvm_is_error_hva(addr)) return 1; - if (__clear_user((void __user *)addr, PAGE_SIZE)) + + /* + * Clear apic_assist portion of f(struct hv_vp_assist_page + * only, there can be valuable data in the rest which needs + * to be preserved e.g. on migration. + */ + if (__clear_user((void __user *)addr, sizeof(u32))) return 1; - hv->hv_vapic = data; + hv_vcpu->hv_vapic = data; kvm_vcpu_mark_page_dirty(vcpu, gfn); if (kvm_lapic_enable_pv_eoi(vcpu, - gfn_to_gpa(gfn) | KVM_MSR_ENABLED)) + gfn_to_gpa(gfn) | KVM_MSR_ENABLED, + sizeof(struct hv_vp_assist_page))) return 1; break; } @@ -1080,7 +1129,7 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host) case HV_X64_MSR_VP_RUNTIME: if (!host) return 1; - hv->runtime_offset = data - current_task_runtime_100ns(); + hv_vcpu->runtime_offset = data - current_task_runtime_100ns(); break; case HV_X64_MSR_SCONTROL: case HV_X64_MSR_SVERSION: @@ -1172,11 +1221,11 @@ static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host) { u64 data = 0; - struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv; + struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv; switch (msr) { case HV_X64_MSR_VP_INDEX: - data = hv->vp_index; + data = hv_vcpu->vp_index; break; case HV_X64_MSR_EOI: return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata); @@ -1185,10 +1234,10 @@ static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, case HV_X64_MSR_TPR: return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata); case HV_X64_MSR_VP_ASSIST_PAGE: - data = hv->hv_vapic; + data = hv_vcpu->hv_vapic; break; case HV_X64_MSR_VP_RUNTIME: - data = current_task_runtime_100ns() + hv->runtime_offset; + data = current_task_runtime_100ns() + hv_vcpu->runtime_offset; break; case HV_X64_MSR_SCONTROL: case HV_X64_MSR_SVERSION: @@ -1255,32 +1304,47 @@ int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host) return kvm_hv_get_msr(vcpu, msr, pdata, host); } -static __always_inline int get_sparse_bank_no(u64 valid_bank_mask, int bank_no) +static __always_inline unsigned long *sparse_set_to_vcpu_mask( + struct kvm *kvm, u64 *sparse_banks, u64 valid_bank_mask, + u64 *vp_bitmap, unsigned long *vcpu_bitmap) { - int i = 0, j; + struct kvm_hv *hv = &kvm->arch.hyperv; + struct kvm_vcpu *vcpu; + int i, bank, sbank = 0; - if (!(valid_bank_mask & BIT_ULL(bank_no))) - return -1; + memset(vp_bitmap, 0, + KVM_HV_MAX_SPARSE_VCPU_SET_BITS * sizeof(*vp_bitmap)); + for_each_set_bit(bank, (unsigned long *)&valid_bank_mask, + KVM_HV_MAX_SPARSE_VCPU_SET_BITS) + vp_bitmap[bank] = sparse_banks[sbank++]; - for (j = 0; j < bank_no; j++) - if (valid_bank_mask & BIT_ULL(j)) - i++; + if (likely(!atomic_read(&hv->num_mismatched_vp_indexes))) { + /* for all vcpus vp_index == vcpu_idx */ + return (unsigned long *)vp_bitmap; + } - return i; + bitmap_zero(vcpu_bitmap, KVM_MAX_VCPUS); + kvm_for_each_vcpu(i, vcpu, kvm) { + if (test_bit(vcpu_to_hv_vcpu(vcpu)->vp_index, + (unsigned long *)vp_bitmap)) + __set_bit(i, vcpu_bitmap); + } + return vcpu_bitmap; } static u64 kvm_hv_flush_tlb(struct kvm_vcpu *current_vcpu, u64 ingpa, u16 rep_cnt, bool ex) { struct kvm *kvm = current_vcpu->kvm; - struct kvm_vcpu_hv *hv_current = ¤t_vcpu->arch.hyperv; + struct kvm_vcpu_hv *hv_vcpu = ¤t_vcpu->arch.hyperv; struct hv_tlb_flush_ex flush_ex; struct hv_tlb_flush flush; - struct kvm_vcpu *vcpu; - unsigned long vcpu_bitmap[BITS_TO_LONGS(KVM_MAX_VCPUS)] = {0}; - unsigned long valid_bank_mask = 0; + u64 vp_bitmap[KVM_HV_MAX_SPARSE_VCPU_SET_BITS]; + DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS); + unsigned long *vcpu_mask; + u64 valid_bank_mask; u64 sparse_banks[64]; - int sparse_banks_len, i; + int sparse_banks_len; bool all_cpus; if (!ex) { @@ -1290,6 +1354,7 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *current_vcpu, u64 ingpa, trace_kvm_hv_flush_tlb(flush.processor_mask, flush.address_space, flush.flags); + valid_bank_mask = BIT_ULL(0); sparse_banks[0] = flush.processor_mask; all_cpus = flush.flags & HV_FLUSH_ALL_PROCESSORS; } else { @@ -1306,7 +1371,8 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *current_vcpu, u64 ingpa, all_cpus = flush_ex.hv_vp_set.format != HV_GENERIC_SET_SPARSE_4K; - sparse_banks_len = bitmap_weight(&valid_bank_mask, 64) * + sparse_banks_len = + bitmap_weight((unsigned long *)&valid_bank_mask, 64) * sizeof(sparse_banks[0]); if (!sparse_banks_len && !all_cpus) @@ -1321,48 +1387,19 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *current_vcpu, u64 ingpa, return HV_STATUS_INVALID_HYPERCALL_INPUT; } - cpumask_clear(&hv_current->tlb_lush); - - kvm_for_each_vcpu(i, vcpu, kvm) { - struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv; - int bank = hv->vp_index / 64, sbank = 0; - - if (!all_cpus) { - /* Banks >64 can't be represented */ - if (bank >= 64) - continue; - - /* Non-ex hypercalls can only address first 64 vCPUs */ - if (!ex && bank) - continue; - - if (ex) { - /* - * Check is the bank of this vCPU is in sparse - * set and get the sparse bank number. - */ - sbank = get_sparse_bank_no(valid_bank_mask, - bank); - - if (sbank < 0) - continue; - } - - if (!(sparse_banks[sbank] & BIT_ULL(hv->vp_index % 64))) - continue; - } + cpumask_clear(&hv_vcpu->tlb_flush); - /* - * vcpu->arch.cr3 may not be up-to-date for running vCPUs so we - * can't analyze it here, flush TLB regardless of the specified - * address space. - */ - __set_bit(i, vcpu_bitmap); - } + vcpu_mask = all_cpus ? NULL : + sparse_set_to_vcpu_mask(kvm, sparse_banks, valid_bank_mask, + vp_bitmap, vcpu_bitmap); + /* + * vcpu->arch.cr3 may not be up-to-date for running vCPUs so we can't + * analyze it here, flush TLB regardless of the specified address space. + */ kvm_make_vcpus_request_mask(kvm, KVM_REQ_TLB_FLUSH | KVM_REQUEST_NO_WAKEUP, - vcpu_bitmap, &hv_current->tlb_lush); + vcpu_mask, &hv_vcpu->tlb_flush); ret_success: /* We always do full TLB flush, set rep_done = rep_cnt. */ @@ -1370,6 +1407,99 @@ ret_success: ((u64)rep_cnt << HV_HYPERCALL_REP_COMP_OFFSET); } +static void kvm_send_ipi_to_many(struct kvm *kvm, u32 vector, + unsigned long *vcpu_bitmap) +{ + struct kvm_lapic_irq irq = { + .delivery_mode = APIC_DM_FIXED, + .vector = vector + }; + struct kvm_vcpu *vcpu; + int i; + + kvm_for_each_vcpu(i, vcpu, kvm) { + if (vcpu_bitmap && !test_bit(i, vcpu_bitmap)) + continue; + + /* We fail only when APIC is disabled */ + kvm_apic_set_irq(vcpu, &irq, NULL); + } +} + +static u64 kvm_hv_send_ipi(struct kvm_vcpu *current_vcpu, u64 ingpa, u64 outgpa, + bool ex, bool fast) +{ + struct kvm *kvm = current_vcpu->kvm; + struct hv_send_ipi_ex send_ipi_ex; + struct hv_send_ipi send_ipi; + u64 vp_bitmap[KVM_HV_MAX_SPARSE_VCPU_SET_BITS]; + DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS); + unsigned long *vcpu_mask; + unsigned long valid_bank_mask; + u64 sparse_banks[64]; + int sparse_banks_len; + u32 vector; + bool all_cpus; + + if (!ex) { + if (!fast) { + if (unlikely(kvm_read_guest(kvm, ingpa, &send_ipi, + sizeof(send_ipi)))) + return HV_STATUS_INVALID_HYPERCALL_INPUT; + sparse_banks[0] = send_ipi.cpu_mask; + vector = send_ipi.vector; + } else { + /* 'reserved' part of hv_send_ipi should be 0 */ + if (unlikely(ingpa >> 32 != 0)) + return HV_STATUS_INVALID_HYPERCALL_INPUT; + sparse_banks[0] = outgpa; + vector = (u32)ingpa; + } + all_cpus = false; + valid_bank_mask = BIT_ULL(0); + + trace_kvm_hv_send_ipi(vector, sparse_banks[0]); + } else { + if (unlikely(kvm_read_guest(kvm, ingpa, &send_ipi_ex, + sizeof(send_ipi_ex)))) + return HV_STATUS_INVALID_HYPERCALL_INPUT; + + trace_kvm_hv_send_ipi_ex(send_ipi_ex.vector, + send_ipi_ex.vp_set.format, + send_ipi_ex.vp_set.valid_bank_mask); + + vector = send_ipi_ex.vector; + valid_bank_mask = send_ipi_ex.vp_set.valid_bank_mask; + sparse_banks_len = bitmap_weight(&valid_bank_mask, 64) * + sizeof(sparse_banks[0]); + + all_cpus = send_ipi_ex.vp_set.format == HV_GENERIC_SET_ALL; + + if (!sparse_banks_len) + goto ret_success; + + if (!all_cpus && + kvm_read_guest(kvm, + ingpa + offsetof(struct hv_send_ipi_ex, + vp_set.bank_contents), + sparse_banks, + sparse_banks_len)) + return HV_STATUS_INVALID_HYPERCALL_INPUT; + } + + if ((vector < HV_IPI_LOW_VECTOR) || (vector > HV_IPI_HIGH_VECTOR)) + return HV_STATUS_INVALID_HYPERCALL_INPUT; + + vcpu_mask = all_cpus ? NULL : + sparse_set_to_vcpu_mask(kvm, sparse_banks, valid_bank_mask, + vp_bitmap, vcpu_bitmap); + + kvm_send_ipi_to_many(kvm, vector, vcpu_mask); + +ret_success: + return HV_STATUS_SUCCESS; +} + bool kvm_hv_hypercall_enabled(struct kvm *kvm) { return READ_ONCE(kvm->arch.hyperv.hv_hypercall) & HV_X64_MSR_HYPERCALL_ENABLE; @@ -1539,6 +1669,20 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu) } ret = kvm_hv_flush_tlb(vcpu, ingpa, rep_cnt, true); break; + case HVCALL_SEND_IPI: + if (unlikely(rep)) { + ret = HV_STATUS_INVALID_HYPERCALL_INPUT; + break; + } + ret = kvm_hv_send_ipi(vcpu, ingpa, outgpa, false, fast); + break; + case HVCALL_SEND_IPI_EX: + if (unlikely(fast || rep)) { + ret = HV_STATUS_INVALID_HYPERCALL_INPUT; + break; + } + ret = kvm_hv_send_ipi(vcpu, ingpa, outgpa, true, false); + break; default: ret = HV_STATUS_INVALID_HYPERCALL_CODE; break; diff --git a/arch/x86/kvm/hyperv.h b/arch/x86/kvm/hyperv.h index d6aa969e20f1..0e66c12ed2c3 100644 --- a/arch/x86/kvm/hyperv.h +++ b/arch/x86/kvm/hyperv.h @@ -62,6 +62,10 @@ void kvm_hv_vcpu_init(struct kvm_vcpu *vcpu); void kvm_hv_vcpu_postcreate(struct kvm_vcpu *vcpu); void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu); +bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu); +bool kvm_hv_get_assist_page(struct kvm_vcpu *vcpu, + struct hv_vp_assist_page *assist_page); + static inline struct kvm_vcpu_hv_stimer *vcpu_to_stimer(struct kvm_vcpu *vcpu, int timer_index) { diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index fbb0e6df121b..3cd227ff807f 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -70,6 +70,11 @@ #define APIC_BROADCAST 0xFF #define X2APIC_BROADCAST 0xFFFFFFFFul +static bool lapic_timer_advance_adjust_done = false; +#define LAPIC_TIMER_ADVANCE_ADJUST_DONE 100 +/* step-by-step approximation to mitigate fluctuation */ +#define LAPIC_TIMER_ADVANCE_ADJUST_STEP 8 + static inline int apic_test_vector(int vec, void *bitmap) { return test_bit(VEC_POS(vec), (bitmap) + REG_POS(vec)); @@ -955,14 +960,14 @@ bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src, map = rcu_dereference(kvm->arch.apic_map); ret = kvm_apic_map_get_dest_lapic(kvm, &src, irq, map, &dst, &bitmap); - if (ret) + if (ret) { + *r = 0; for_each_set_bit(i, &bitmap, 16) { if (!dst[i]) continue; - if (*r < 0) - *r = 0; *r += kvm_apic_set_irq(dst[i]->vcpu, irq, dest_map); } + } rcu_read_unlock(); return ret; @@ -1472,7 +1477,7 @@ static bool lapic_timer_int_injected(struct kvm_vcpu *vcpu) void wait_lapic_expire(struct kvm_vcpu *vcpu) { struct kvm_lapic *apic = vcpu->arch.apic; - u64 guest_tsc, tsc_deadline; + u64 guest_tsc, tsc_deadline, ns; if (!lapic_in_kernel(vcpu)) return; @@ -1492,6 +1497,24 @@ void wait_lapic_expire(struct kvm_vcpu *vcpu) if (guest_tsc < tsc_deadline) __delay(min(tsc_deadline - guest_tsc, nsec_to_cycles(vcpu, lapic_timer_advance_ns))); + + if (!lapic_timer_advance_adjust_done) { + /* too early */ + if (guest_tsc < tsc_deadline) { + ns = (tsc_deadline - guest_tsc) * 1000000ULL; + do_div(ns, vcpu->arch.virtual_tsc_khz); + lapic_timer_advance_ns -= min((unsigned int)ns, + lapic_timer_advance_ns / LAPIC_TIMER_ADVANCE_ADJUST_STEP); + } else { + /* too late */ + ns = (guest_tsc - tsc_deadline) * 1000000ULL; + do_div(ns, vcpu->arch.virtual_tsc_khz); + lapic_timer_advance_ns += min((unsigned int)ns, + lapic_timer_advance_ns / LAPIC_TIMER_ADVANCE_ADJUST_STEP); + } + if (abs(guest_tsc - tsc_deadline) < LAPIC_TIMER_ADVANCE_ADJUST_DONE) + lapic_timer_advance_adjust_done = true; + } } static void start_sw_tscdeadline(struct kvm_lapic *apic) @@ -2621,17 +2644,25 @@ int kvm_hv_vapic_msr_read(struct kvm_vcpu *vcpu, u32 reg, u64 *data) return 0; } -int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data) +int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data, unsigned long len) { u64 addr = data & ~KVM_MSR_ENABLED; + struct gfn_to_hva_cache *ghc = &vcpu->arch.pv_eoi.data; + unsigned long new_len; + if (!IS_ALIGNED(addr, 4)) return 1; vcpu->arch.pv_eoi.msr_val = data; if (!pv_eoi_enabled(vcpu)) return 0; - return kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.pv_eoi.data, - addr, sizeof(u8)); + + if (addr == ghc->gpa && len <= ghc->len) + new_len = ghc->len; + else + new_len = len; + + return kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, addr, new_len); } void kvm_apic_accept_events(struct kvm_vcpu *vcpu) diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h index ed0ed39abd36..ff6ef9c3d760 100644 --- a/arch/x86/kvm/lapic.h +++ b/arch/x86/kvm/lapic.h @@ -120,7 +120,7 @@ static inline bool kvm_hv_vapic_assist_page_enabled(struct kvm_vcpu *vcpu) return vcpu->arch.hyperv.hv_vapic & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE; } -int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data); +int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data, unsigned long len); void kvm_lapic_init(void); void kvm_lapic_exit(void); diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 51b953ad9d4e..cf5f572f2305 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -932,7 +932,7 @@ static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, while (cache->nobjs < ARRAY_SIZE(cache->objects)) { obj = kmem_cache_zalloc(base_cache, GFP_KERNEL); if (!obj) - return -ENOMEM; + return cache->nobjs >= min ? 0 : -ENOMEM; cache->objects[cache->nobjs++] = obj; } return 0; @@ -960,7 +960,7 @@ static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache, while (cache->nobjs < ARRAY_SIZE(cache->objects)) { page = (void *)__get_free_page(GFP_KERNEL_ACCOUNT); if (!page) - return -ENOMEM; + return cache->nobjs >= min ? 0 : -ENOMEM; cache->objects[cache->nobjs++] = page; } return 0; @@ -1265,24 +1265,24 @@ pte_list_desc_remove_entry(struct kvm_rmap_head *rmap_head, mmu_free_pte_list_desc(desc); } -static void pte_list_remove(u64 *spte, struct kvm_rmap_head *rmap_head) +static void __pte_list_remove(u64 *spte, struct kvm_rmap_head *rmap_head) { struct pte_list_desc *desc; struct pte_list_desc *prev_desc; int i; if (!rmap_head->val) { - printk(KERN_ERR "pte_list_remove: %p 0->BUG\n", spte); + pr_err("%s: %p 0->BUG\n", __func__, spte); BUG(); } else if (!(rmap_head->val & 1)) { - rmap_printk("pte_list_remove: %p 1->0\n", spte); + rmap_printk("%s: %p 1->0\n", __func__, spte); if ((u64 *)rmap_head->val != spte) { - printk(KERN_ERR "pte_list_remove: %p 1->BUG\n", spte); + pr_err("%s: %p 1->BUG\n", __func__, spte); BUG(); } rmap_head->val = 0; } else { - rmap_printk("pte_list_remove: %p many->many\n", spte); + rmap_printk("%s: %p many->many\n", __func__, spte); desc = (struct pte_list_desc *)(rmap_head->val & ~1ul); prev_desc = NULL; while (desc) { @@ -1296,11 +1296,17 @@ static void pte_list_remove(u64 *spte, struct kvm_rmap_head *rmap_head) prev_desc = desc; desc = desc->more; } - pr_err("pte_list_remove: %p many->many\n", spte); + pr_err("%s: %p many->many\n", __func__, spte); BUG(); } } +static void pte_list_remove(struct kvm_rmap_head *rmap_head, u64 *sptep) +{ + mmu_spte_clear_track_bits(sptep); + __pte_list_remove(sptep, rmap_head); +} + static struct kvm_rmap_head *__gfn_to_rmap(gfn_t gfn, int level, struct kvm_memory_slot *slot) { @@ -1349,7 +1355,7 @@ static void rmap_remove(struct kvm *kvm, u64 *spte) sp = page_header(__pa(spte)); gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt); rmap_head = gfn_to_rmap(kvm, gfn, sp); - pte_list_remove(spte, rmap_head); + __pte_list_remove(spte, rmap_head); } /* @@ -1685,7 +1691,7 @@ static bool kvm_zap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head) while ((sptep = rmap_get_first(rmap_head, &iter))) { rmap_printk("%s: spte %p %llx.\n", __func__, sptep, *sptep); - drop_spte(kvm, sptep); + pte_list_remove(rmap_head, sptep); flush = true; } @@ -1721,7 +1727,7 @@ restart: need_flush = 1; if (pte_write(*ptep)) { - drop_spte(kvm, sptep); + pte_list_remove(rmap_head, sptep); goto restart; } else { new_spte = *sptep & ~PT64_BASE_ADDR_MASK; @@ -1988,7 +1994,7 @@ static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu, static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp, u64 *parent_pte) { - pte_list_remove(parent_pte, &sp->parent_ptes); + __pte_list_remove(parent_pte, &sp->parent_ptes); } static void drop_parent_pte(struct kvm_mmu_page *sp, @@ -2181,7 +2187,7 @@ static bool __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, struct list_head *invalid_list) { if (sp->role.cr4_pae != !!is_pae(vcpu) - || vcpu->arch.mmu.sync_page(vcpu, sp) == 0) { + || vcpu->arch.mmu->sync_page(vcpu, sp) == 0) { kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list); return false; } @@ -2375,14 +2381,14 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, int collisions = 0; LIST_HEAD(invalid_list); - role = vcpu->arch.mmu.base_role; + role = vcpu->arch.mmu->mmu_role.base; role.level = level; role.direct = direct; if (role.direct) role.cr4_pae = 0; role.access = access; - if (!vcpu->arch.mmu.direct_map - && vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) { + if (!vcpu->arch.mmu->direct_map + && vcpu->arch.mmu->root_level <= PT32_ROOT_LEVEL) { quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level)); quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1; role.quadrant = quadrant; @@ -2457,11 +2463,11 @@ static void shadow_walk_init_using_root(struct kvm_shadow_walk_iterator *iterato { iterator->addr = addr; iterator->shadow_addr = root; - iterator->level = vcpu->arch.mmu.shadow_root_level; + iterator->level = vcpu->arch.mmu->shadow_root_level; if (iterator->level == PT64_ROOT_4LEVEL && - vcpu->arch.mmu.root_level < PT64_ROOT_4LEVEL && - !vcpu->arch.mmu.direct_map) + vcpu->arch.mmu->root_level < PT64_ROOT_4LEVEL && + !vcpu->arch.mmu->direct_map) --iterator->level; if (iterator->level == PT32E_ROOT_LEVEL) { @@ -2469,10 +2475,10 @@ static void shadow_walk_init_using_root(struct kvm_shadow_walk_iterator *iterato * prev_root is currently only used for 64-bit hosts. So only * the active root_hpa is valid here. */ - BUG_ON(root != vcpu->arch.mmu.root_hpa); + BUG_ON(root != vcpu->arch.mmu->root_hpa); iterator->shadow_addr - = vcpu->arch.mmu.pae_root[(addr >> 30) & 3]; + = vcpu->arch.mmu->pae_root[(addr >> 30) & 3]; iterator->shadow_addr &= PT64_BASE_ADDR_MASK; --iterator->level; if (!iterator->shadow_addr) @@ -2483,7 +2489,7 @@ static void shadow_walk_init_using_root(struct kvm_shadow_walk_iterator *iterato static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator, struct kvm_vcpu *vcpu, u64 addr) { - shadow_walk_init_using_root(iterator, vcpu, vcpu->arch.mmu.root_hpa, + shadow_walk_init_using_root(iterator, vcpu, vcpu->arch.mmu->root_hpa, addr); } @@ -3095,7 +3101,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, int write, int map_writable, int emulate = 0; gfn_t pseudo_gfn; - if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) + if (!VALID_PAGE(vcpu->arch.mmu->root_hpa)) return 0; for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) { @@ -3125,16 +3131,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, int write, int map_writable, static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct *tsk) { - siginfo_t info; - - clear_siginfo(&info); - info.si_signo = SIGBUS; - info.si_errno = 0; - info.si_code = BUS_MCEERR_AR; - info.si_addr = (void __user *)address; - info.si_addr_lsb = PAGE_SHIFT; - - send_sig_info(SIGBUS, &info, tsk); + send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, PAGE_SHIFT, tsk); } static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn) @@ -3310,7 +3307,7 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level, u64 spte = 0ull; uint retry_count = 0; - if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) + if (!VALID_PAGE(vcpu->arch.mmu->root_hpa)) return false; if (!page_fault_can_be_fast(error_code)) @@ -3480,11 +3477,11 @@ static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa, } /* roots_to_free must be some combination of the KVM_MMU_ROOT_* flags */ -void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, ulong roots_to_free) +void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, + ulong roots_to_free) { int i; LIST_HEAD(invalid_list); - struct kvm_mmu *mmu = &vcpu->arch.mmu; bool free_active_root = roots_to_free & KVM_MMU_ROOT_CURRENT; BUILD_BUG_ON(KVM_MMU_NUM_PREV_ROOTS >= BITS_PER_LONG); @@ -3544,20 +3541,20 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu) struct kvm_mmu_page *sp; unsigned i; - if (vcpu->arch.mmu.shadow_root_level >= PT64_ROOT_4LEVEL) { + if (vcpu->arch.mmu->shadow_root_level >= PT64_ROOT_4LEVEL) { spin_lock(&vcpu->kvm->mmu_lock); if(make_mmu_pages_available(vcpu) < 0) { spin_unlock(&vcpu->kvm->mmu_lock); return -ENOSPC; } sp = kvm_mmu_get_page(vcpu, 0, 0, - vcpu->arch.mmu.shadow_root_level, 1, ACC_ALL); + vcpu->arch.mmu->shadow_root_level, 1, ACC_ALL); ++sp->root_count; spin_unlock(&vcpu->kvm->mmu_lock); - vcpu->arch.mmu.root_hpa = __pa(sp->spt); - } else if (vcpu->arch.mmu.shadow_root_level == PT32E_ROOT_LEVEL) { + vcpu->arch.mmu->root_hpa = __pa(sp->spt); + } else if (vcpu->arch.mmu->shadow_root_level == PT32E_ROOT_LEVEL) { for (i = 0; i < 4; ++i) { - hpa_t root = vcpu->arch.mmu.pae_root[i]; + hpa_t root = vcpu->arch.mmu->pae_root[i]; MMU_WARN_ON(VALID_PAGE(root)); spin_lock(&vcpu->kvm->mmu_lock); @@ -3570,9 +3567,9 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu) root = __pa(sp->spt); ++sp->root_count; spin_unlock(&vcpu->kvm->mmu_lock); - vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK; + vcpu->arch.mmu->pae_root[i] = root | PT_PRESENT_MASK; } - vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root); + vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root); } else BUG(); @@ -3586,7 +3583,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) gfn_t root_gfn; int i; - root_gfn = vcpu->arch.mmu.get_cr3(vcpu) >> PAGE_SHIFT; + root_gfn = vcpu->arch.mmu->get_cr3(vcpu) >> PAGE_SHIFT; if (mmu_check_root(vcpu, root_gfn)) return 1; @@ -3595,8 +3592,8 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) * Do we shadow a long mode page table? If so we need to * write-protect the guests page table root. */ - if (vcpu->arch.mmu.root_level >= PT64_ROOT_4LEVEL) { - hpa_t root = vcpu->arch.mmu.root_hpa; + if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) { + hpa_t root = vcpu->arch.mmu->root_hpa; MMU_WARN_ON(VALID_PAGE(root)); @@ -3606,11 +3603,11 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) return -ENOSPC; } sp = kvm_mmu_get_page(vcpu, root_gfn, 0, - vcpu->arch.mmu.shadow_root_level, 0, ACC_ALL); + vcpu->arch.mmu->shadow_root_level, 0, ACC_ALL); root = __pa(sp->spt); ++sp->root_count; spin_unlock(&vcpu->kvm->mmu_lock); - vcpu->arch.mmu.root_hpa = root; + vcpu->arch.mmu->root_hpa = root; return 0; } @@ -3620,17 +3617,17 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) * the shadow page table may be a PAE or a long mode page table. */ pm_mask = PT_PRESENT_MASK; - if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_4LEVEL) + if (vcpu->arch.mmu->shadow_root_level == PT64_ROOT_4LEVEL) pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK; for (i = 0; i < 4; ++i) { - hpa_t root = vcpu->arch.mmu.pae_root[i]; + hpa_t root = vcpu->arch.mmu->pae_root[i]; MMU_WARN_ON(VALID_PAGE(root)); - if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) { - pdptr = vcpu->arch.mmu.get_pdptr(vcpu, i); + if (vcpu->arch.mmu->root_level == PT32E_ROOT_LEVEL) { + pdptr = vcpu->arch.mmu->get_pdptr(vcpu, i); if (!(pdptr & PT_PRESENT_MASK)) { - vcpu->arch.mmu.pae_root[i] = 0; + vcpu->arch.mmu->pae_root[i] = 0; continue; } root_gfn = pdptr >> PAGE_SHIFT; @@ -3648,16 +3645,16 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) ++sp->root_count; spin_unlock(&vcpu->kvm->mmu_lock); - vcpu->arch.mmu.pae_root[i] = root | pm_mask; + vcpu->arch.mmu->pae_root[i] = root | pm_mask; } - vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root); + vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root); /* * If we shadow a 32 bit page table with a long mode page * table we enter this path. */ - if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_4LEVEL) { - if (vcpu->arch.mmu.lm_root == NULL) { + if (vcpu->arch.mmu->shadow_root_level == PT64_ROOT_4LEVEL) { + if (vcpu->arch.mmu->lm_root == NULL) { /* * The additional page necessary for this is only * allocated on demand. @@ -3669,12 +3666,12 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) if (lm_root == NULL) return 1; - lm_root[0] = __pa(vcpu->arch.mmu.pae_root) | pm_mask; + lm_root[0] = __pa(vcpu->arch.mmu->pae_root) | pm_mask; - vcpu->arch.mmu.lm_root = lm_root; + vcpu->arch.mmu->lm_root = lm_root; } - vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.lm_root); + vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->lm_root); } return 0; @@ -3682,7 +3679,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) static int mmu_alloc_roots(struct kvm_vcpu *vcpu) { - if (vcpu->arch.mmu.direct_map) + if (vcpu->arch.mmu->direct_map) return mmu_alloc_direct_roots(vcpu); else return mmu_alloc_shadow_roots(vcpu); @@ -3693,17 +3690,16 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu) int i; struct kvm_mmu_page *sp; - if (vcpu->arch.mmu.direct_map) + if (vcpu->arch.mmu->direct_map) return; - if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) + if (!VALID_PAGE(vcpu->arch.mmu->root_hpa)) return; vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY); - if (vcpu->arch.mmu.root_level >= PT64_ROOT_4LEVEL) { - hpa_t root = vcpu->arch.mmu.root_hpa; - + if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) { + hpa_t root = vcpu->arch.mmu->root_hpa; sp = page_header(root); /* @@ -3734,7 +3730,7 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu) kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC); for (i = 0; i < 4; ++i) { - hpa_t root = vcpu->arch.mmu.pae_root[i]; + hpa_t root = vcpu->arch.mmu->pae_root[i]; if (root && VALID_PAGE(root)) { root &= PT64_BASE_ADDR_MASK; @@ -3808,7 +3804,7 @@ walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep) int root, leaf; bool reserved = false; - if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) + if (!VALID_PAGE(vcpu->arch.mmu->root_hpa)) goto exit; walk_shadow_page_lockless_begin(vcpu); @@ -3825,7 +3821,7 @@ walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep) if (!is_shadow_present_pte(spte)) break; - reserved |= is_shadow_zero_bits_set(&vcpu->arch.mmu, spte, + reserved |= is_shadow_zero_bits_set(vcpu->arch.mmu, spte, iterator.level); } @@ -3904,7 +3900,7 @@ static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr) struct kvm_shadow_walk_iterator iterator; u64 spte; - if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) + if (!VALID_PAGE(vcpu->arch.mmu->root_hpa)) return; walk_shadow_page_lockless_begin(vcpu); @@ -3931,7 +3927,7 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva, if (r) return r; - MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu.root_hpa)); + MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa)); return nonpaging_map(vcpu, gva & PAGE_MASK, @@ -3944,8 +3940,8 @@ static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn) arch.token = (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id; arch.gfn = gfn; - arch.direct_map = vcpu->arch.mmu.direct_map; - arch.cr3 = vcpu->arch.mmu.get_cr3(vcpu); + arch.direct_map = vcpu->arch.mmu->direct_map; + arch.cr3 = vcpu->arch.mmu->get_cr3(vcpu); return kvm_setup_async_pf(vcpu, gva, kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch); } @@ -4051,7 +4047,7 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code, int write = error_code & PFERR_WRITE_MASK; bool map_writable; - MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu.root_hpa)); + MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa)); if (page_fault_handle_page_track(vcpu, error_code, gfn)) return RET_PF_EMULATE; @@ -4127,7 +4123,7 @@ static bool cached_root_available(struct kvm_vcpu *vcpu, gpa_t new_cr3, { uint i; struct kvm_mmu_root_info root; - struct kvm_mmu *mmu = &vcpu->arch.mmu; + struct kvm_mmu *mmu = vcpu->arch.mmu; root.cr3 = mmu->get_cr3(vcpu); root.hpa = mmu->root_hpa; @@ -4150,7 +4146,7 @@ static bool fast_cr3_switch(struct kvm_vcpu *vcpu, gpa_t new_cr3, union kvm_mmu_page_role new_role, bool skip_tlb_flush) { - struct kvm_mmu *mmu = &vcpu->arch.mmu; + struct kvm_mmu *mmu = vcpu->arch.mmu; /* * For now, limit the fast switch to 64-bit hosts+VMs in order to avoid @@ -4201,7 +4197,8 @@ static void __kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3, bool skip_tlb_flush) { if (!fast_cr3_switch(vcpu, new_cr3, new_role, skip_tlb_flush)) - kvm_mmu_free_roots(vcpu, KVM_MMU_ROOT_CURRENT); + kvm_mmu_free_roots(vcpu, vcpu->arch.mmu, + KVM_MMU_ROOT_CURRENT); } void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3, bool skip_tlb_flush) @@ -4219,7 +4216,7 @@ static unsigned long get_cr3(struct kvm_vcpu *vcpu) static void inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault) { - vcpu->arch.mmu.inject_page_fault(vcpu, fault); + vcpu->arch.mmu->inject_page_fault(vcpu, fault); } static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn, @@ -4423,7 +4420,8 @@ static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu, void reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context) { - bool uses_nx = context->nx || context->base_role.smep_andnot_wp; + bool uses_nx = context->nx || + context->mmu_role.base.smep_andnot_wp; struct rsvd_bits_validate *shadow_zero_check; int i; @@ -4562,7 +4560,7 @@ static void update_permission_bitmask(struct kvm_vcpu *vcpu, * SMAP:kernel-mode data accesses from user-mode * mappings should fault. A fault is considered * as a SMAP violation if all of the following - * conditions are ture: + * conditions are true: * - X86_CR4_SMAP is set in CR4 * - A user page is accessed * - The access is not a fetch @@ -4723,27 +4721,65 @@ static void paging32E_init_context(struct kvm_vcpu *vcpu, paging64_init_context_common(vcpu, context, PT32E_ROOT_LEVEL); } -static union kvm_mmu_page_role -kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu) +static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *vcpu) +{ + union kvm_mmu_extended_role ext = {0}; + + ext.cr0_pg = !!is_paging(vcpu); + ext.cr4_smep = !!kvm_read_cr4_bits(vcpu, X86_CR4_SMEP); + ext.cr4_smap = !!kvm_read_cr4_bits(vcpu, X86_CR4_SMAP); + ext.cr4_pse = !!is_pse(vcpu); + ext.cr4_pke = !!kvm_read_cr4_bits(vcpu, X86_CR4_PKE); + ext.cr4_la57 = !!kvm_read_cr4_bits(vcpu, X86_CR4_LA57); + + ext.valid = 1; + + return ext; +} + +static union kvm_mmu_role kvm_calc_mmu_role_common(struct kvm_vcpu *vcpu, + bool base_only) { - union kvm_mmu_page_role role = {0}; + union kvm_mmu_role role = {0}; - role.guest_mode = is_guest_mode(vcpu); - role.smm = is_smm(vcpu); - role.ad_disabled = (shadow_accessed_mask == 0); - role.level = kvm_x86_ops->get_tdp_level(vcpu); - role.direct = true; - role.access = ACC_ALL; + role.base.access = ACC_ALL; + role.base.nxe = !!is_nx(vcpu); + role.base.cr4_pae = !!is_pae(vcpu); + role.base.cr0_wp = is_write_protection(vcpu); + role.base.smm = is_smm(vcpu); + role.base.guest_mode = is_guest_mode(vcpu); + + if (base_only) + return role; + + role.ext = kvm_calc_mmu_role_ext(vcpu); + + return role; +} + +static union kvm_mmu_role +kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only) +{ + union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, base_only); + + role.base.ad_disabled = (shadow_accessed_mask == 0); + role.base.level = kvm_x86_ops->get_tdp_level(vcpu); + role.base.direct = true; return role; } static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu) { - struct kvm_mmu *context = &vcpu->arch.mmu; + struct kvm_mmu *context = vcpu->arch.mmu; + union kvm_mmu_role new_role = + kvm_calc_tdp_mmu_root_page_role(vcpu, false); + + new_role.base.word &= mmu_base_role_mask.word; + if (new_role.as_u64 == context->mmu_role.as_u64) + return; - context->base_role.word = mmu_base_role_mask.word & - kvm_calc_tdp_mmu_root_page_role(vcpu).word; + context->mmu_role.as_u64 = new_role.as_u64; context->page_fault = tdp_page_fault; context->sync_page = nonpaging_sync_page; context->invlpg = nonpaging_invlpg; @@ -4783,36 +4819,36 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu) reset_tdp_shadow_zero_bits_mask(vcpu, context); } -static union kvm_mmu_page_role -kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu) -{ - union kvm_mmu_page_role role = {0}; - bool smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP); - bool smap = kvm_read_cr4_bits(vcpu, X86_CR4_SMAP); - - role.nxe = is_nx(vcpu); - role.cr4_pae = !!is_pae(vcpu); - role.cr0_wp = is_write_protection(vcpu); - role.smep_andnot_wp = smep && !is_write_protection(vcpu); - role.smap_andnot_wp = smap && !is_write_protection(vcpu); - role.guest_mode = is_guest_mode(vcpu); - role.smm = is_smm(vcpu); - role.direct = !is_paging(vcpu); - role.access = ACC_ALL; +static union kvm_mmu_role +kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only) +{ + union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, base_only); + + role.base.smep_andnot_wp = role.ext.cr4_smep && + !is_write_protection(vcpu); + role.base.smap_andnot_wp = role.ext.cr4_smap && + !is_write_protection(vcpu); + role.base.direct = !is_paging(vcpu); if (!is_long_mode(vcpu)) - role.level = PT32E_ROOT_LEVEL; + role.base.level = PT32E_ROOT_LEVEL; else if (is_la57_mode(vcpu)) - role.level = PT64_ROOT_5LEVEL; + role.base.level = PT64_ROOT_5LEVEL; else - role.level = PT64_ROOT_4LEVEL; + role.base.level = PT64_ROOT_4LEVEL; return role; } void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu) { - struct kvm_mmu *context = &vcpu->arch.mmu; + struct kvm_mmu *context = vcpu->arch.mmu; + union kvm_mmu_role new_role = + kvm_calc_shadow_mmu_root_page_role(vcpu, false); + + new_role.base.word &= mmu_base_role_mask.word; + if (new_role.as_u64 == context->mmu_role.as_u64) + return; if (!is_paging(vcpu)) nonpaging_init_context(vcpu, context); @@ -4823,22 +4859,28 @@ void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu) else paging32_init_context(vcpu, context); - context->base_role.word = mmu_base_role_mask.word & - kvm_calc_shadow_mmu_root_page_role(vcpu).word; + context->mmu_role.as_u64 = new_role.as_u64; reset_shadow_zero_bits_mask(vcpu, context); } EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu); -static union kvm_mmu_page_role -kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty) +static union kvm_mmu_role +kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty, + bool execonly) { - union kvm_mmu_page_role role = vcpu->arch.mmu.base_role; + union kvm_mmu_role role; + + /* Base role is inherited from root_mmu */ + role.base.word = vcpu->arch.root_mmu.mmu_role.base.word; + role.ext = kvm_calc_mmu_role_ext(vcpu); - role.level = PT64_ROOT_4LEVEL; - role.direct = false; - role.ad_disabled = !accessed_dirty; - role.guest_mode = true; - role.access = ACC_ALL; + role.base.level = PT64_ROOT_4LEVEL; + role.base.direct = false; + role.base.ad_disabled = !accessed_dirty; + role.base.guest_mode = true; + role.base.access = ACC_ALL; + + role.ext.execonly = execonly; return role; } @@ -4846,11 +4888,17 @@ kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty) void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly, bool accessed_dirty, gpa_t new_eptp) { - struct kvm_mmu *context = &vcpu->arch.mmu; - union kvm_mmu_page_role root_page_role = - kvm_calc_shadow_ept_root_page_role(vcpu, accessed_dirty); + struct kvm_mmu *context = vcpu->arch.mmu; + union kvm_mmu_role new_role = + kvm_calc_shadow_ept_root_page_role(vcpu, accessed_dirty, + execonly); + + __kvm_mmu_new_cr3(vcpu, new_eptp, new_role.base, false); + + new_role.base.word &= mmu_base_role_mask.word; + if (new_role.as_u64 == context->mmu_role.as_u64) + return; - __kvm_mmu_new_cr3(vcpu, new_eptp, root_page_role, false); context->shadow_root_level = PT64_ROOT_4LEVEL; context->nx = true; @@ -4862,7 +4910,8 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly, context->update_pte = ept_update_pte; context->root_level = PT64_ROOT_4LEVEL; context->direct_map = false; - context->base_role.word = root_page_role.word & mmu_base_role_mask.word; + context->mmu_role.as_u64 = new_role.as_u64; + update_permission_bitmask(vcpu, context, true); update_pkru_bitmask(vcpu, context, true); update_last_nonleaf_level(vcpu, context); @@ -4873,7 +4922,7 @@ EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu); static void init_kvm_softmmu(struct kvm_vcpu *vcpu) { - struct kvm_mmu *context = &vcpu->arch.mmu; + struct kvm_mmu *context = vcpu->arch.mmu; kvm_init_shadow_mmu(vcpu); context->set_cr3 = kvm_x86_ops->set_cr3; @@ -4884,14 +4933,20 @@ static void init_kvm_softmmu(struct kvm_vcpu *vcpu) static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu) { + union kvm_mmu_role new_role = kvm_calc_mmu_role_common(vcpu, false); struct kvm_mmu *g_context = &vcpu->arch.nested_mmu; + new_role.base.word &= mmu_base_role_mask.word; + if (new_role.as_u64 == g_context->mmu_role.as_u64) + return; + + g_context->mmu_role.as_u64 = new_role.as_u64; g_context->get_cr3 = get_cr3; g_context->get_pdptr = kvm_pdptr_read; g_context->inject_page_fault = kvm_inject_page_fault; /* - * Note that arch.mmu.gva_to_gpa translates l2_gpa to l1_gpa using + * Note that arch.mmu->gva_to_gpa translates l2_gpa to l1_gpa using * L1's nested page tables (e.g. EPT12). The nested translation * of l2_gva to l1_gpa is done by arch.nested_mmu.gva_to_gpa using * L2's page tables as the first level of translation and L1's @@ -4930,10 +4985,10 @@ void kvm_init_mmu(struct kvm_vcpu *vcpu, bool reset_roots) if (reset_roots) { uint i; - vcpu->arch.mmu.root_hpa = INVALID_PAGE; + vcpu->arch.mmu->root_hpa = INVALID_PAGE; for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) - vcpu->arch.mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID; + vcpu->arch.mmu->prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID; } if (mmu_is_nested(vcpu)) @@ -4948,10 +5003,14 @@ EXPORT_SYMBOL_GPL(kvm_init_mmu); static union kvm_mmu_page_role kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu) { + union kvm_mmu_role role; + if (tdp_enabled) - return kvm_calc_tdp_mmu_root_page_role(vcpu); + role = kvm_calc_tdp_mmu_root_page_role(vcpu, true); else - return kvm_calc_shadow_mmu_root_page_role(vcpu); + role = kvm_calc_shadow_mmu_root_page_role(vcpu, true); + + return role.base; } void kvm_mmu_reset_context(struct kvm_vcpu *vcpu) @@ -4981,8 +5040,10 @@ EXPORT_SYMBOL_GPL(kvm_mmu_load); void kvm_mmu_unload(struct kvm_vcpu *vcpu) { - kvm_mmu_free_roots(vcpu, KVM_MMU_ROOTS_ALL); - WARN_ON(VALID_PAGE(vcpu->arch.mmu.root_hpa)); + kvm_mmu_free_roots(vcpu, &vcpu->arch.root_mmu, KVM_MMU_ROOTS_ALL); + WARN_ON(VALID_PAGE(vcpu->arch.root_mmu.root_hpa)); + kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL); + WARN_ON(VALID_PAGE(vcpu->arch.guest_mmu.root_hpa)); } EXPORT_SYMBOL_GPL(kvm_mmu_unload); @@ -4996,7 +5057,7 @@ static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu, } ++vcpu->kvm->stat.mmu_pte_updated; - vcpu->arch.mmu.update_pte(vcpu, sp, spte, new); + vcpu->arch.mmu->update_pte(vcpu, sp, spte, new); } static bool need_remote_flush(u64 old, u64 new) @@ -5173,10 +5234,12 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, local_flush = true; while (npte--) { + u32 base_role = vcpu->arch.mmu->mmu_role.base.word; + entry = *spte; mmu_page_zap_pte(vcpu->kvm, sp, spte); if (gentry && - !((sp->role.word ^ vcpu->arch.mmu.base_role.word) + !((sp->role.word ^ base_role) & mmu_base_role_mask.word) && rmap_can_add(vcpu)) mmu_pte_write_new_pte(vcpu, sp, spte, &gentry); if (need_remote_flush(entry, *spte)) @@ -5194,7 +5257,7 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva) gpa_t gpa; int r; - if (vcpu->arch.mmu.direct_map) + if (vcpu->arch.mmu->direct_map) return 0; gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL); @@ -5230,10 +5293,10 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code, { int r, emulation_type = 0; enum emulation_result er; - bool direct = vcpu->arch.mmu.direct_map; + bool direct = vcpu->arch.mmu->direct_map; /* With shadow page tables, fault_address contains a GVA or nGPA. */ - if (vcpu->arch.mmu.direct_map) { + if (vcpu->arch.mmu->direct_map) { vcpu->arch.gpa_available = true; vcpu->arch.gpa_val = cr2; } @@ -5246,8 +5309,9 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code, } if (r == RET_PF_INVALID) { - r = vcpu->arch.mmu.page_fault(vcpu, cr2, lower_32_bits(error_code), - false); + r = vcpu->arch.mmu->page_fault(vcpu, cr2, + lower_32_bits(error_code), + false); WARN_ON(r == RET_PF_INVALID); } @@ -5263,7 +5327,7 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code, * paging in both guests. If true, we simply unprotect the page * and resume the guest. */ - if (vcpu->arch.mmu.direct_map && + if (vcpu->arch.mmu->direct_map && (error_code & PFERR_NESTED_GUEST_PAGE) == PFERR_NESTED_GUEST_PAGE) { kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(cr2)); return 1; @@ -5311,7 +5375,7 @@ EXPORT_SYMBOL_GPL(kvm_mmu_page_fault); void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva) { - struct kvm_mmu *mmu = &vcpu->arch.mmu; + struct kvm_mmu *mmu = vcpu->arch.mmu; int i; /* INVLPG on a * non-canonical address is a NOP according to the SDM. */ @@ -5342,7 +5406,7 @@ EXPORT_SYMBOL_GPL(kvm_mmu_invlpg); void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid) { - struct kvm_mmu *mmu = &vcpu->arch.mmu; + struct kvm_mmu *mmu = vcpu->arch.mmu; bool tlb_flush = false; uint i; @@ -5386,8 +5450,8 @@ EXPORT_SYMBOL_GPL(kvm_disable_tdp); static void free_mmu_pages(struct kvm_vcpu *vcpu) { - free_page((unsigned long)vcpu->arch.mmu.pae_root); - free_page((unsigned long)vcpu->arch.mmu.lm_root); + free_page((unsigned long)vcpu->arch.mmu->pae_root); + free_page((unsigned long)vcpu->arch.mmu->lm_root); } static int alloc_mmu_pages(struct kvm_vcpu *vcpu) @@ -5407,9 +5471,9 @@ static int alloc_mmu_pages(struct kvm_vcpu *vcpu) if (!page) return -ENOMEM; - vcpu->arch.mmu.pae_root = page_address(page); + vcpu->arch.mmu->pae_root = page_address(page); for (i = 0; i < 4; ++i) - vcpu->arch.mmu.pae_root[i] = INVALID_PAGE; + vcpu->arch.mmu->pae_root[i] = INVALID_PAGE; return 0; } @@ -5418,27 +5482,21 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu) { uint i; - vcpu->arch.walk_mmu = &vcpu->arch.mmu; - vcpu->arch.mmu.root_hpa = INVALID_PAGE; - vcpu->arch.mmu.translate_gpa = translate_gpa; - vcpu->arch.nested_mmu.translate_gpa = translate_nested_gpa; + vcpu->arch.mmu = &vcpu->arch.root_mmu; + vcpu->arch.walk_mmu = &vcpu->arch.root_mmu; + vcpu->arch.root_mmu.root_hpa = INVALID_PAGE; + vcpu->arch.root_mmu.translate_gpa = translate_gpa; for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) - vcpu->arch.mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID; - - return alloc_mmu_pages(vcpu); -} + vcpu->arch.root_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID; -void kvm_mmu_setup(struct kvm_vcpu *vcpu) -{ - MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu.root_hpa)); + vcpu->arch.guest_mmu.root_hpa = INVALID_PAGE; + vcpu->arch.guest_mmu.translate_gpa = translate_gpa; + for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) + vcpu->arch.guest_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID; - /* - * kvm_mmu_setup() is called only on vCPU initialization. - * Therefore, no need to reset mmu roots as they are not yet - * initialized. - */ - kvm_init_mmu(vcpu, false); + vcpu->arch.nested_mmu.translate_gpa = translate_nested_gpa; + return alloc_mmu_pages(vcpu); } static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm, @@ -5621,7 +5679,7 @@ restart: if (sp->role.direct && !kvm_is_reserved_pfn(pfn) && PageTransCompoundMap(pfn_to_page(pfn))) { - drop_spte(kvm, sptep); + pte_list_remove(rmap_head, sptep); need_tlb_flush = 1; goto restart; } @@ -5878,6 +5936,16 @@ int kvm_mmu_module_init(void) { int ret = -ENOMEM; + /* + * MMU roles use union aliasing which is, generally speaking, an + * undefined behavior. However, we supposedly know how compilers behave + * and the current status quo is unlikely to change. Guardians below are + * supposed to let us know if the assumption becomes false. + */ + BUILD_BUG_ON(sizeof(union kvm_mmu_page_role) != sizeof(u32)); + BUILD_BUG_ON(sizeof(union kvm_mmu_extended_role) != sizeof(u32)); + BUILD_BUG_ON(sizeof(union kvm_mmu_role) != sizeof(u64)); + kvm_mmu_reset_all_pte_masks(); pte_list_desc_cache = kmem_cache_create("pte_list_desc", @@ -5907,7 +5975,7 @@ out: } /* - * Caculate mmu pages needed for kvm. + * Calculate mmu pages needed for kvm. */ unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm) { diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h index 1fab69c0b2f3..c7b333147c4a 100644 --- a/arch/x86/kvm/mmu.h +++ b/arch/x86/kvm/mmu.h @@ -43,11 +43,6 @@ #define PT32_ROOT_LEVEL 2 #define PT32E_ROOT_LEVEL 3 -#define PT_PDPE_LEVEL 3 -#define PT_DIRECTORY_LEVEL 2 -#define PT_PAGE_TABLE_LEVEL 1 -#define PT_MAX_HUGEPAGE_LEVEL (PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES - 1) - static inline u64 rsvd_bits(int s, int e) { if (e < s) @@ -80,7 +75,7 @@ static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm) static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu) { - if (likely(vcpu->arch.mmu.root_hpa != INVALID_PAGE)) + if (likely(vcpu->arch.mmu->root_hpa != INVALID_PAGE)) return 0; return kvm_mmu_load(vcpu); @@ -102,9 +97,9 @@ static inline unsigned long kvm_get_active_pcid(struct kvm_vcpu *vcpu) static inline void kvm_mmu_load_cr3(struct kvm_vcpu *vcpu) { - if (VALID_PAGE(vcpu->arch.mmu.root_hpa)) - vcpu->arch.mmu.set_cr3(vcpu, vcpu->arch.mmu.root_hpa | - kvm_get_active_pcid(vcpu)); + if (VALID_PAGE(vcpu->arch.mmu->root_hpa)) + vcpu->arch.mmu->set_cr3(vcpu, vcpu->arch.mmu->root_hpa | + kvm_get_active_pcid(vcpu)); } /* diff --git a/arch/x86/kvm/mmu_audit.c b/arch/x86/kvm/mmu_audit.c index 1272861e77b9..abac7e208853 100644 --- a/arch/x86/kvm/mmu_audit.c +++ b/arch/x86/kvm/mmu_audit.c @@ -59,19 +59,19 @@ static void mmu_spte_walk(struct kvm_vcpu *vcpu, inspect_spte_fn fn) int i; struct kvm_mmu_page *sp; - if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) + if (!VALID_PAGE(vcpu->arch.mmu->root_hpa)) return; - if (vcpu->arch.mmu.root_level >= PT64_ROOT_4LEVEL) { - hpa_t root = vcpu->arch.mmu.root_hpa; + if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) { + hpa_t root = vcpu->arch.mmu->root_hpa; sp = page_header(root); - __mmu_spte_walk(vcpu, sp, fn, vcpu->arch.mmu.root_level); + __mmu_spte_walk(vcpu, sp, fn, vcpu->arch.mmu->root_level); return; } for (i = 0; i < 4; ++i) { - hpa_t root = vcpu->arch.mmu.pae_root[i]; + hpa_t root = vcpu->arch.mmu->pae_root[i]; if (root && VALID_PAGE(root)) { root &= PT64_BASE_ADDR_MASK; @@ -122,7 +122,7 @@ static void audit_mappings(struct kvm_vcpu *vcpu, u64 *sptep, int level) hpa = pfn << PAGE_SHIFT; if ((*sptep & PT64_BASE_ADDR_MASK) != hpa) audit_printk(vcpu->kvm, "levels %d pfn %llx hpa %llx " - "ent %llxn", vcpu->arch.mmu.root_level, pfn, + "ent %llxn", vcpu->arch.mmu->root_level, pfn, hpa, *sptep); } diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 14ffd973df54..7cf2185b7eb5 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h @@ -158,14 +158,15 @@ static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, u64 *spte, u64 gpte) { - if (is_rsvd_bits_set(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL)) + if (is_rsvd_bits_set(vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL)) goto no_present; if (!FNAME(is_present_gpte)(gpte)) goto no_present; /* if accessed bit is not supported prefetch non accessed gpte */ - if (PT_HAVE_ACCESSED_DIRTY(&vcpu->arch.mmu) && !(gpte & PT_GUEST_ACCESSED_MASK)) + if (PT_HAVE_ACCESSED_DIRTY(vcpu->arch.mmu) && + !(gpte & PT_GUEST_ACCESSED_MASK)) goto no_present; return false; @@ -480,7 +481,7 @@ error: static int FNAME(walk_addr)(struct guest_walker *walker, struct kvm_vcpu *vcpu, gva_t addr, u32 access) { - return FNAME(walk_addr_generic)(walker, vcpu, &vcpu->arch.mmu, addr, + return FNAME(walk_addr_generic)(walker, vcpu, vcpu->arch.mmu, addr, access); } @@ -509,7 +510,7 @@ FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, gfn = gpte_to_gfn(gpte); pte_access = sp->role.access & FNAME(gpte_access)(gpte); - FNAME(protect_clean_gpte)(&vcpu->arch.mmu, &pte_access, gpte); + FNAME(protect_clean_gpte)(vcpu->arch.mmu, &pte_access, gpte); pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn, no_dirty_log && (pte_access & ACC_WRITE_MASK)); if (is_error_pfn(pfn)) @@ -604,7 +605,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, direct_access = gw->pte_access; - top_level = vcpu->arch.mmu.root_level; + top_level = vcpu->arch.mmu->root_level; if (top_level == PT32E_ROOT_LEVEL) top_level = PT32_ROOT_LEVEL; /* @@ -616,7 +617,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, if (FNAME(gpte_changed)(vcpu, gw, top_level)) goto out_gpte_changed; - if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) + if (!VALID_PAGE(vcpu->arch.mmu->root_hpa)) goto out_gpte_changed; for (shadow_walk_init(&it, vcpu, addr); @@ -1004,7 +1005,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) gfn = gpte_to_gfn(gpte); pte_access = sp->role.access; pte_access &= FNAME(gpte_access)(gpte); - FNAME(protect_clean_gpte)(&vcpu->arch.mmu, &pte_access, gpte); + FNAME(protect_clean_gpte)(vcpu->arch.mmu, &pte_access, gpte); if (sync_mmio_spte(vcpu, &sp->spt[i], gfn, pte_access, &nr_present)) diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 61ccfb13899e..0e21ccc46792 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -809,6 +809,8 @@ static void svm_queue_exception(struct kvm_vcpu *vcpu) nested_svm_check_exception(svm, nr, has_error_code, error_code)) return; + kvm_deliver_exception_payload(&svm->vcpu); + if (nr == BP_VECTOR && !static_cpu_has(X86_FEATURE_NRIPS)) { unsigned long rip, old_rip = kvm_rip_read(&svm->vcpu); @@ -2922,18 +2924,18 @@ static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu) { WARN_ON(mmu_is_nested(vcpu)); kvm_init_shadow_mmu(vcpu); - vcpu->arch.mmu.set_cr3 = nested_svm_set_tdp_cr3; - vcpu->arch.mmu.get_cr3 = nested_svm_get_tdp_cr3; - vcpu->arch.mmu.get_pdptr = nested_svm_get_tdp_pdptr; - vcpu->arch.mmu.inject_page_fault = nested_svm_inject_npf_exit; - vcpu->arch.mmu.shadow_root_level = get_npt_level(vcpu); - reset_shadow_zero_bits_mask(vcpu, &vcpu->arch.mmu); + vcpu->arch.mmu->set_cr3 = nested_svm_set_tdp_cr3; + vcpu->arch.mmu->get_cr3 = nested_svm_get_tdp_cr3; + vcpu->arch.mmu->get_pdptr = nested_svm_get_tdp_pdptr; + vcpu->arch.mmu->inject_page_fault = nested_svm_inject_npf_exit; + vcpu->arch.mmu->shadow_root_level = get_npt_level(vcpu); + reset_shadow_zero_bits_mask(vcpu, vcpu->arch.mmu); vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu; } static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu) { - vcpu->arch.walk_mmu = &vcpu->arch.mmu; + vcpu->arch.walk_mmu = &vcpu->arch.root_mmu; } static int nested_svm_check_permissions(struct vcpu_svm *svm) @@ -2969,16 +2971,13 @@ static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr, svm->vmcb->control.exit_info_1 = error_code; /* - * FIXME: we should not write CR2 when L1 intercepts an L2 #PF exception. - * The fix is to add the ancillary datum (CR2 or DR6) to structs - * kvm_queued_exception and kvm_vcpu_events, so that CR2 and DR6 can be - * written only when inject_pending_event runs (DR6 would written here - * too). This should be conditional on a new capability---if the - * capability is disabled, kvm_multiple_exception would write the - * ancillary information to CR2 or DR6, for backwards ABI-compatibility. + * EXITINFO2 is undefined for all exception intercepts other + * than #PF. */ if (svm->vcpu.arch.exception.nested_apf) svm->vmcb->control.exit_info_2 = svm->vcpu.arch.apf.nested_apf_token; + else if (svm->vcpu.arch.exception.has_payload) + svm->vmcb->control.exit_info_2 = svm->vcpu.arch.exception.payload; else svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2; @@ -5642,26 +5641,24 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) "mov %%r13, %c[r13](%[svm]) \n\t" "mov %%r14, %c[r14](%[svm]) \n\t" "mov %%r15, %c[r15](%[svm]) \n\t" -#endif /* * Clear host registers marked as clobbered to prevent * speculative use. */ - "xor %%" _ASM_BX ", %%" _ASM_BX " \n\t" - "xor %%" _ASM_CX ", %%" _ASM_CX " \n\t" - "xor %%" _ASM_DX ", %%" _ASM_DX " \n\t" - "xor %%" _ASM_SI ", %%" _ASM_SI " \n\t" - "xor %%" _ASM_DI ", %%" _ASM_DI " \n\t" -#ifdef CONFIG_X86_64 - "xor %%r8, %%r8 \n\t" - "xor %%r9, %%r9 \n\t" - "xor %%r10, %%r10 \n\t" - "xor %%r11, %%r11 \n\t" - "xor %%r12, %%r12 \n\t" - "xor %%r13, %%r13 \n\t" - "xor %%r14, %%r14 \n\t" - "xor %%r15, %%r15 \n\t" + "xor %%r8d, %%r8d \n\t" + "xor %%r9d, %%r9d \n\t" + "xor %%r10d, %%r10d \n\t" + "xor %%r11d, %%r11d \n\t" + "xor %%r12d, %%r12d \n\t" + "xor %%r13d, %%r13d \n\t" + "xor %%r14d, %%r14d \n\t" + "xor %%r15d, %%r15d \n\t" #endif + "xor %%ebx, %%ebx \n\t" + "xor %%ecx, %%ecx \n\t" + "xor %%edx, %%edx \n\t" + "xor %%esi, %%esi \n\t" + "xor %%edi, %%edi \n\t" "pop %%" _ASM_BP : : [svm]"a"(svm), @@ -7040,6 +7037,13 @@ failed: return ret; } +static int nested_enable_evmcs(struct kvm_vcpu *vcpu, + uint16_t *vmcs_version) +{ + /* Intel-only feature */ + return -ENODEV; +} + static struct kvm_x86_ops svm_x86_ops __ro_after_init = { .cpu_has_kvm_support = has_svm, .disabled_by_bios = is_disabled, @@ -7169,6 +7173,8 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = { .mem_enc_op = svm_mem_enc_op, .mem_enc_reg_region = svm_register_enc_region, .mem_enc_unreg_region = svm_unregister_enc_region, + + .nested_enable_evmcs = nested_enable_evmcs, }; static int __init svm_init(void) diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h index 0f997683404f..0659465a745c 100644 --- a/arch/x86/kvm/trace.h +++ b/arch/x86/kvm/trace.h @@ -1418,6 +1418,48 @@ TRACE_EVENT(kvm_hv_flush_tlb_ex, __entry->valid_bank_mask, __entry->format, __entry->address_space, __entry->flags) ); + +/* + * Tracepoints for kvm_hv_send_ipi. + */ +TRACE_EVENT(kvm_hv_send_ipi, + TP_PROTO(u32 vector, u64 processor_mask), + TP_ARGS(vector, processor_mask), + + TP_STRUCT__entry( + __field(u32, vector) + __field(u64, processor_mask) + ), + + TP_fast_assign( + __entry->vector = vector; + __entry->processor_mask = processor_mask; + ), + + TP_printk("vector %x processor_mask 0x%llx", + __entry->vector, __entry->processor_mask) +); + +TRACE_EVENT(kvm_hv_send_ipi_ex, + TP_PROTO(u32 vector, u64 format, u64 valid_bank_mask), + TP_ARGS(vector, format, valid_bank_mask), + + TP_STRUCT__entry( + __field(u32, vector) + __field(u64, format) + __field(u64, valid_bank_mask) + ), + + TP_fast_assign( + __entry->vector = vector; + __entry->format = format; + __entry->valid_bank_mask = valid_bank_mask; + ), + + TP_printk("vector %x format %llx valid_bank_mask 0x%llx", + __entry->vector, __entry->format, + __entry->valid_bank_mask) +); #endif /* _TRACE_KVM_H */ #undef TRACE_INCLUDE_PATH diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index e665aa7167cf..4555077d69ce 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -20,6 +20,7 @@ #include "mmu.h" #include "cpuid.h" #include "lapic.h" +#include "hyperv.h" #include <linux/kvm_host.h> #include <linux/module.h> @@ -61,7 +62,7 @@ #define __ex(x) __kvm_handle_fault_on_reboot(x) #define __ex_clear(x, reg) \ - ____kvm_handle_fault_on_reboot(x, "xor " reg " , " reg) + ____kvm_handle_fault_on_reboot(x, "xor " reg ", " reg) MODULE_AUTHOR("Qumranet"); MODULE_LICENSE("GPL"); @@ -107,9 +108,12 @@ module_param_named(enable_shadow_vmcs, enable_shadow_vmcs, bool, S_IRUGO); * VMX and be a hypervisor for its own guests. If nested=0, guests may not * use VMX instructions. */ -static bool __read_mostly nested = 0; +static bool __read_mostly nested = 1; module_param(nested, bool, S_IRUGO); +static bool __read_mostly nested_early_check = 0; +module_param(nested_early_check, bool, S_IRUGO); + static u64 __read_mostly host_xss; static bool __read_mostly enable_pml = 1; @@ -131,7 +135,7 @@ static bool __read_mostly enable_preemption_timer = 1; module_param_named(preemption_timer, enable_preemption_timer, bool, S_IRUGO); #endif -#define KVM_GUEST_CR0_MASK (X86_CR0_NW | X86_CR0_CD) +#define KVM_VM_CR0_ALWAYS_OFF (X86_CR0_NW | X86_CR0_CD) #define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST X86_CR0_NE #define KVM_VM_CR0_ALWAYS_ON \ (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | \ @@ -187,6 +191,7 @@ static unsigned int ple_window_max = KVM_VMX_DEFAULT_PLE_WINDOW_MAX; module_param(ple_window_max, uint, 0444); extern const ulong vmx_return; +extern const ulong vmx_early_consistency_check_return; static DEFINE_STATIC_KEY_FALSE(vmx_l1d_should_flush); static DEFINE_STATIC_KEY_FALSE(vmx_l1d_flush_cond); @@ -827,14 +832,28 @@ struct nested_vmx { */ struct vmcs12 *cached_shadow_vmcs12; /* - * Indicates if the shadow vmcs must be updated with the - * data hold by vmcs12 + * Indicates if the shadow vmcs or enlightened vmcs must be updated + * with the data held by struct vmcs12. */ - bool sync_shadow_vmcs; + bool need_vmcs12_sync; bool dirty_vmcs12; + /* + * vmcs02 has been initialized, i.e. state that is constant for + * vmcs02 has been written to the backing VMCS. Initialization + * is delayed until L1 actually attempts to run a nested VM. + */ + bool vmcs02_initialized; + bool change_vmcs01_virtual_apic_mode; + /* + * Enlightened VMCS has been enabled. It does not mean that L1 has to + * use it. However, VMX features available to L1 will be limited based + * on what the enlightened VMCS supports. + */ + bool enlightened_vmcs_enabled; + /* L2 must run next, and mustn't decide to exit to L1. */ bool nested_run_pending; @@ -870,6 +889,10 @@ struct nested_vmx { /* in guest mode on SMM entry? */ bool guest_mode; } smm; + + gpa_t hv_evmcs_vmptr; + struct page *hv_evmcs_page; + struct hv_enlightened_vmcs *hv_evmcs; }; #define POSTED_INTR_ON 0 @@ -1381,6 +1404,49 @@ DEFINE_STATIC_KEY_FALSE(enable_evmcs); #define KVM_EVMCS_VERSION 1 +/* + * Enlightened VMCSv1 doesn't support these: + * + * POSTED_INTR_NV = 0x00000002, + * GUEST_INTR_STATUS = 0x00000810, + * APIC_ACCESS_ADDR = 0x00002014, + * POSTED_INTR_DESC_ADDR = 0x00002016, + * EOI_EXIT_BITMAP0 = 0x0000201c, + * EOI_EXIT_BITMAP1 = 0x0000201e, + * EOI_EXIT_BITMAP2 = 0x00002020, + * EOI_EXIT_BITMAP3 = 0x00002022, + * GUEST_PML_INDEX = 0x00000812, + * PML_ADDRESS = 0x0000200e, + * VM_FUNCTION_CONTROL = 0x00002018, + * EPTP_LIST_ADDRESS = 0x00002024, + * VMREAD_BITMAP = 0x00002026, + * VMWRITE_BITMAP = 0x00002028, + * + * TSC_MULTIPLIER = 0x00002032, + * PLE_GAP = 0x00004020, + * PLE_WINDOW = 0x00004022, + * VMX_PREEMPTION_TIMER_VALUE = 0x0000482E, + * GUEST_IA32_PERF_GLOBAL_CTRL = 0x00002808, + * HOST_IA32_PERF_GLOBAL_CTRL = 0x00002c04, + * + * Currently unsupported in KVM: + * GUEST_IA32_RTIT_CTL = 0x00002814, + */ +#define EVMCS1_UNSUPPORTED_PINCTRL (PIN_BASED_POSTED_INTR | \ + PIN_BASED_VMX_PREEMPTION_TIMER) +#define EVMCS1_UNSUPPORTED_2NDEXEC \ + (SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | \ + SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | \ + SECONDARY_EXEC_APIC_REGISTER_VIRT | \ + SECONDARY_EXEC_ENABLE_PML | \ + SECONDARY_EXEC_ENABLE_VMFUNC | \ + SECONDARY_EXEC_SHADOW_VMCS | \ + SECONDARY_EXEC_TSC_SCALING | \ + SECONDARY_EXEC_PAUSE_LOOP_EXITING) +#define EVMCS1_UNSUPPORTED_VMEXIT_CTRL (VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) +#define EVMCS1_UNSUPPORTED_VMENTRY_CTRL (VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) +#define EVMCS1_UNSUPPORTED_VMFUNC (VMX_VMFUNC_EPTP_SWITCHING) + #if IS_ENABLED(CONFIG_HYPERV) static bool __read_mostly enlightened_vmcs = true; module_param(enlightened_vmcs, bool, 0444); @@ -1473,69 +1539,12 @@ static void evmcs_load(u64 phys_addr) static void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf) { - /* - * Enlightened VMCSv1 doesn't support these: - * - * POSTED_INTR_NV = 0x00000002, - * GUEST_INTR_STATUS = 0x00000810, - * APIC_ACCESS_ADDR = 0x00002014, - * POSTED_INTR_DESC_ADDR = 0x00002016, - * EOI_EXIT_BITMAP0 = 0x0000201c, - * EOI_EXIT_BITMAP1 = 0x0000201e, - * EOI_EXIT_BITMAP2 = 0x00002020, - * EOI_EXIT_BITMAP3 = 0x00002022, - */ - vmcs_conf->pin_based_exec_ctrl &= ~PIN_BASED_POSTED_INTR; - vmcs_conf->cpu_based_2nd_exec_ctrl &= - ~SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY; - vmcs_conf->cpu_based_2nd_exec_ctrl &= - ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; - vmcs_conf->cpu_based_2nd_exec_ctrl &= - ~SECONDARY_EXEC_APIC_REGISTER_VIRT; - - /* - * GUEST_PML_INDEX = 0x00000812, - * PML_ADDRESS = 0x0000200e, - */ - vmcs_conf->cpu_based_2nd_exec_ctrl &= ~SECONDARY_EXEC_ENABLE_PML; - - /* VM_FUNCTION_CONTROL = 0x00002018, */ - vmcs_conf->cpu_based_2nd_exec_ctrl &= ~SECONDARY_EXEC_ENABLE_VMFUNC; - - /* - * EPTP_LIST_ADDRESS = 0x00002024, - * VMREAD_BITMAP = 0x00002026, - * VMWRITE_BITMAP = 0x00002028, - */ - vmcs_conf->cpu_based_2nd_exec_ctrl &= ~SECONDARY_EXEC_SHADOW_VMCS; - - /* - * TSC_MULTIPLIER = 0x00002032, - */ - vmcs_conf->cpu_based_2nd_exec_ctrl &= ~SECONDARY_EXEC_TSC_SCALING; + vmcs_conf->pin_based_exec_ctrl &= ~EVMCS1_UNSUPPORTED_PINCTRL; + vmcs_conf->cpu_based_2nd_exec_ctrl &= ~EVMCS1_UNSUPPORTED_2NDEXEC; - /* - * PLE_GAP = 0x00004020, - * PLE_WINDOW = 0x00004022, - */ - vmcs_conf->cpu_based_2nd_exec_ctrl &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING; - - /* - * VMX_PREEMPTION_TIMER_VALUE = 0x0000482E, - */ - vmcs_conf->pin_based_exec_ctrl &= ~PIN_BASED_VMX_PREEMPTION_TIMER; - - /* - * GUEST_IA32_PERF_GLOBAL_CTRL = 0x00002808, - * HOST_IA32_PERF_GLOBAL_CTRL = 0x00002c04, - */ - vmcs_conf->vmexit_ctrl &= ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL; - vmcs_conf->vmentry_ctrl &= ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL; + vmcs_conf->vmexit_ctrl &= ~EVMCS1_UNSUPPORTED_VMEXIT_CTRL; + vmcs_conf->vmentry_ctrl &= ~EVMCS1_UNSUPPORTED_VMENTRY_CTRL; - /* - * Currently unsupported in KVM: - * GUEST_IA32_RTIT_CTL = 0x00002814, - */ } /* check_ept_pointer() should be under protection of ept_pointer_lock. */ @@ -1560,26 +1569,27 @@ static void check_ept_pointer_match(struct kvm *kvm) static int vmx_hv_remote_flush_tlb(struct kvm *kvm) { - int ret; + struct kvm_vcpu *vcpu; + int ret = -ENOTSUPP, i; spin_lock(&to_kvm_vmx(kvm)->ept_pointer_lock); if (to_kvm_vmx(kvm)->ept_pointers_match == EPT_POINTERS_CHECK) check_ept_pointer_match(kvm); - if (to_kvm_vmx(kvm)->ept_pointers_match != EPT_POINTERS_MATCH) { - ret = -ENOTSUPP; - goto out; - } - /* * FLUSH_GUEST_PHYSICAL_ADDRESS_SPACE hypercall needs the address of the * base of EPT PML4 table, strip off EPT configuration information. */ - ret = hyperv_flush_guest_mapping( - to_vmx(kvm_get_vcpu(kvm, 0))->ept_pointer & PAGE_MASK); + if (to_kvm_vmx(kvm)->ept_pointers_match != EPT_POINTERS_MATCH) { + kvm_for_each_vcpu(i, vcpu, kvm) + ret |= hyperv_flush_guest_mapping( + to_vmx(kvm_get_vcpu(kvm, i))->ept_pointer & PAGE_MASK); + } else { + ret = hyperv_flush_guest_mapping( + to_vmx(kvm_get_vcpu(kvm, 0))->ept_pointer & PAGE_MASK); + } -out: spin_unlock(&to_kvm_vmx(kvm)->ept_pointer_lock); return ret; } @@ -1595,6 +1605,35 @@ static inline void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf) {} static inline void evmcs_touch_msr_bitmap(void) {} #endif /* IS_ENABLED(CONFIG_HYPERV) */ +static int nested_enable_evmcs(struct kvm_vcpu *vcpu, + uint16_t *vmcs_version) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + + /* We don't support disabling the feature for simplicity. */ + if (vmx->nested.enlightened_vmcs_enabled) + return 0; + + vmx->nested.enlightened_vmcs_enabled = true; + + /* + * vmcs_version represents the range of supported Enlightened VMCS + * versions: lower 8 bits is the minimal version, higher 8 bits is the + * maximum supported version. KVM supports versions from 1 to + * KVM_EVMCS_VERSION. + */ + if (vmcs_version) + *vmcs_version = (KVM_EVMCS_VERSION << 8) | 1; + + vmx->nested.msrs.pinbased_ctls_high &= ~EVMCS1_UNSUPPORTED_PINCTRL; + vmx->nested.msrs.entry_ctls_high &= ~EVMCS1_UNSUPPORTED_VMENTRY_CTRL; + vmx->nested.msrs.exit_ctls_high &= ~EVMCS1_UNSUPPORTED_VMEXIT_CTRL; + vmx->nested.msrs.secondary_ctls_high &= ~EVMCS1_UNSUPPORTED_2NDEXEC; + vmx->nested.msrs.vmfunc_controls &= ~EVMCS1_UNSUPPORTED_VMFUNC; + + return 0; +} + static inline bool is_exception_n(u32 intr_info, u8 vector) { return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | @@ -1617,11 +1656,6 @@ static inline bool is_page_fault(u32 intr_info) return is_exception_n(intr_info, PF_VECTOR); } -static inline bool is_no_device(u32 intr_info) -{ - return is_exception_n(intr_info, NM_VECTOR); -} - static inline bool is_invalid_opcode(u32 intr_info) { return is_exception_n(intr_info, UD_VECTOR); @@ -1632,12 +1666,6 @@ static inline bool is_gp_fault(u32 intr_info) return is_exception_n(intr_info, GP_VECTOR); } -static inline bool is_external_interrupt(u32 intr_info) -{ - return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK)) - == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK); -} - static inline bool is_machine_check(u32 intr_info) { return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | @@ -2063,9 +2091,6 @@ static inline bool is_nmi(u32 intr_info) static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, u32 exit_intr_info, unsigned long exit_qualification); -static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu, - struct vmcs12 *vmcs12, - u32 reason, unsigned long qualification); static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr) { @@ -2077,7 +2102,7 @@ static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr) return -1; } -static inline void __invvpid(int ext, u16 vpid, gva_t gva) +static inline void __invvpid(unsigned long ext, u16 vpid, gva_t gva) { struct { u64 vpid : 16; @@ -2086,22 +2111,20 @@ static inline void __invvpid(int ext, u16 vpid, gva_t gva) } operand = { vpid, 0, gva }; bool error; - asm volatile (__ex(ASM_VMX_INVVPID) CC_SET(na) - : CC_OUT(na) (error) : "a"(&operand), "c"(ext) - : "memory"); + asm volatile (__ex("invvpid %2, %1") CC_SET(na) + : CC_OUT(na) (error) : "r"(ext), "m"(operand)); BUG_ON(error); } -static inline void __invept(int ext, u64 eptp, gpa_t gpa) +static inline void __invept(unsigned long ext, u64 eptp, gpa_t gpa) { struct { u64 eptp, gpa; } operand = {eptp, gpa}; bool error; - asm volatile (__ex(ASM_VMX_INVEPT) CC_SET(na) - : CC_OUT(na) (error) : "a" (&operand), "c" (ext) - : "memory"); + asm volatile (__ex("invept %2, %1") CC_SET(na) + : CC_OUT(na) (error) : "r"(ext), "m"(operand)); BUG_ON(error); } @@ -2120,9 +2143,8 @@ static void vmcs_clear(struct vmcs *vmcs) u64 phys_addr = __pa(vmcs); bool error; - asm volatile (__ex(ASM_VMX_VMCLEAR_RAX) CC_SET(na) - : CC_OUT(na) (error) : "a"(&phys_addr), "m"(phys_addr) - : "memory"); + asm volatile (__ex("vmclear %1") CC_SET(na) + : CC_OUT(na) (error) : "m"(phys_addr)); if (unlikely(error)) printk(KERN_ERR "kvm: vmclear fail: %p/%llx\n", vmcs, phys_addr); @@ -2145,9 +2167,8 @@ static void vmcs_load(struct vmcs *vmcs) if (static_branch_unlikely(&enable_evmcs)) return evmcs_load(phys_addr); - asm volatile (__ex(ASM_VMX_VMPTRLD_RAX) CC_SET(na) - : CC_OUT(na) (error) : "a"(&phys_addr), "m"(phys_addr) - : "memory"); + asm volatile (__ex("vmptrld %1") CC_SET(na) + : CC_OUT(na) (error) : "m"(phys_addr)); if (unlikely(error)) printk(KERN_ERR "kvm: vmptrld %p/%llx failed\n", vmcs, phys_addr); @@ -2323,8 +2344,8 @@ static __always_inline unsigned long __vmcs_readl(unsigned long field) { unsigned long value; - asm volatile (__ex_clear(ASM_VMX_VMREAD_RDX_RAX, "%0") - : "=a"(value) : "d"(field) : "cc"); + asm volatile (__ex_clear("vmread %1, %0", "%k0") + : "=r"(value) : "r"(field)); return value; } @@ -2375,8 +2396,8 @@ static __always_inline void __vmcs_writel(unsigned long field, unsigned long val { bool error; - asm volatile (__ex(ASM_VMX_VMWRITE_RAX_RDX) CC_SET(na) - : CC_OUT(na) (error) : "a"(value), "d"(field)); + asm volatile (__ex("vmwrite %2, %1") CC_SET(na) + : CC_OUT(na) (error) : "r"(field), "rm"(value)); if (unlikely(error)) vmwrite_error(field, value); } @@ -2707,7 +2728,8 @@ static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx, u64 guest_val, u64 host_val) { vmcs_write64(guest_val_vmcs, guest_val); - vmcs_write64(host_val_vmcs, host_val); + if (host_val_vmcs != HOST_IA32_EFER) + vmcs_write64(host_val_vmcs, host_val); vm_entry_controls_setbit(vmx, entry); vm_exit_controls_setbit(vmx, exit); } @@ -2805,8 +2827,6 @@ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset) ignore_bits &= ~(u64)EFER_SCE; #endif - clear_atomic_switch_msr(vmx, MSR_EFER); - /* * On EPT, we can't emulate NX, so we must switch EFER atomically. * On CPUs that support "load IA32_EFER", always switch EFER @@ -2819,8 +2839,12 @@ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset) if (guest_efer != host_efer) add_atomic_switch_msr(vmx, MSR_EFER, guest_efer, host_efer, false); + else + clear_atomic_switch_msr(vmx, MSR_EFER); return false; } else { + clear_atomic_switch_msr(vmx, MSR_EFER); + guest_efer &= ~ignore_bits; guest_efer |= host_efer & ignore_bits; @@ -3272,34 +3296,30 @@ static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned long *exit { struct vmcs12 *vmcs12 = get_vmcs12(vcpu); unsigned int nr = vcpu->arch.exception.nr; + bool has_payload = vcpu->arch.exception.has_payload; + unsigned long payload = vcpu->arch.exception.payload; if (nr == PF_VECTOR) { if (vcpu->arch.exception.nested_apf) { *exit_qual = vcpu->arch.apf.nested_apf_token; return 1; } - /* - * FIXME: we must not write CR2 when L1 intercepts an L2 #PF exception. - * The fix is to add the ancillary datum (CR2 or DR6) to structs - * kvm_queued_exception and kvm_vcpu_events, so that CR2 and DR6 - * can be written only when inject_pending_event runs. This should be - * conditional on a new capability---if the capability is disabled, - * kvm_multiple_exception would write the ancillary information to - * CR2 or DR6, for backwards ABI-compatibility. - */ if (nested_vmx_is_page_fault_vmexit(vmcs12, vcpu->arch.exception.error_code)) { - *exit_qual = vcpu->arch.cr2; - return 1; - } - } else { - if (vmcs12->exception_bitmap & (1u << nr)) { - if (nr == DB_VECTOR) - *exit_qual = vcpu->arch.dr6; - else - *exit_qual = 0; + *exit_qual = has_payload ? payload : vcpu->arch.cr2; return 1; } + } else if (vmcs12->exception_bitmap & (1u << nr)) { + if (nr == DB_VECTOR) { + if (!has_payload) { + payload = vcpu->arch.dr6; + payload &= ~(DR6_FIXED_1 | DR6_BT); + payload ^= DR6_RTM; + } + *exit_qual = payload; + } else + *exit_qual = 0; + return 1; } return 0; @@ -3326,6 +3346,8 @@ static void vmx_queue_exception(struct kvm_vcpu *vcpu) u32 error_code = vcpu->arch.exception.error_code; u32 intr_info = nr | INTR_INFO_VALID_MASK; + kvm_deliver_exception_payload(vcpu); + if (has_error_code) { vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code); intr_info |= INTR_INFO_DELIVER_CODE_MASK; @@ -4397,9 +4419,7 @@ static void kvm_cpu_vmxon(u64 addr) cr4_set_bits(X86_CR4_VMXE); intel_pt_handle_vmx(1); - asm volatile (ASM_VMX_VMXON_RAX - : : "a"(&addr), "m"(addr) - : "memory", "cc"); + asm volatile ("vmxon %0" : : "m"(addr)); } static int hardware_enable(void) @@ -4468,7 +4488,7 @@ static void vmclear_local_loaded_vmcss(void) */ static void kvm_cpu_vmxoff(void) { - asm volatile (__ex(ASM_VMX_VMXOFF) : : : "cc"); + asm volatile (__ex("vmxoff")); intel_pt_handle_vmx(0); cr4_clear_bits(X86_CR4_VMXE); @@ -5112,9 +5132,10 @@ static inline void __vmx_flush_tlb(struct kvm_vcpu *vcpu, int vpid, bool invalidate_gpa) { if (enable_ept && (invalidate_gpa || !enable_vpid)) { - if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) + if (!VALID_PAGE(vcpu->arch.mmu->root_hpa)) return; - ept_sync_context(construct_eptp(vcpu, vcpu->arch.mmu.root_hpa)); + ept_sync_context(construct_eptp(vcpu, + vcpu->arch.mmu->root_hpa)); } else { vpid_sync_context(vpid); } @@ -5264,7 +5285,7 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) struct vcpu_vmx *vmx = to_vmx(vcpu); unsigned long hw_cr0; - hw_cr0 = (cr0 & ~KVM_GUEST_CR0_MASK); + hw_cr0 = (cr0 & ~KVM_VM_CR0_ALWAYS_OFF); if (enable_unrestricted_guest) hw_cr0 |= KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST; else { @@ -6339,6 +6360,9 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx) rdmsr(MSR_IA32_CR_PAT, low32, high32); vmcs_write64(HOST_IA32_PAT, low32 | ((u64) high32 << 32)); } + + if (cpu_has_load_ia32_efer) + vmcs_write64(HOST_IA32_EFER, host_efer); } static void set_cr4_guest_host_mask(struct vcpu_vmx *vmx) @@ -6666,7 +6690,6 @@ static void vmx_vcpu_setup(struct vcpu_vmx *vmx) vmcs_write64(XSS_EXIT_BITMAP, VMX_XSS_EXIT_BITMAP); if (enable_pml) { - ASSERT(vmx->pml_pg); vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg)); vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1); } @@ -8067,35 +8090,39 @@ static int handle_monitor(struct kvm_vcpu *vcpu) /* * The following 3 functions, nested_vmx_succeed()/failValid()/failInvalid(), - * set the success or error code of an emulated VMX instruction, as specified - * by Vol 2B, VMX Instruction Reference, "Conventions". + * set the success or error code of an emulated VMX instruction (as specified + * by Vol 2B, VMX Instruction Reference, "Conventions"), and skip the emulated + * instruction. */ -static void nested_vmx_succeed(struct kvm_vcpu *vcpu) +static int nested_vmx_succeed(struct kvm_vcpu *vcpu) { vmx_set_rflags(vcpu, vmx_get_rflags(vcpu) & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF)); + return kvm_skip_emulated_instruction(vcpu); } -static void nested_vmx_failInvalid(struct kvm_vcpu *vcpu) +static int nested_vmx_failInvalid(struct kvm_vcpu *vcpu) { vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu) & ~(X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF)) | X86_EFLAGS_CF); + return kvm_skip_emulated_instruction(vcpu); } -static void nested_vmx_failValid(struct kvm_vcpu *vcpu, - u32 vm_instruction_error) +static int nested_vmx_failValid(struct kvm_vcpu *vcpu, + u32 vm_instruction_error) { - if (to_vmx(vcpu)->nested.current_vmptr == -1ull) { - /* - * failValid writes the error number to the current VMCS, which - * can't be done there isn't a current VMCS. - */ - nested_vmx_failInvalid(vcpu); - return; - } + struct vcpu_vmx *vmx = to_vmx(vcpu); + + /* + * failValid writes the error number to the current VMCS, which + * can't be done if there isn't a current VMCS. + */ + if (vmx->nested.current_vmptr == -1ull && !vmx->nested.hv_evmcs) + return nested_vmx_failInvalid(vcpu); + vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu) & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_SF | X86_EFLAGS_OF)) @@ -8105,6 +8132,7 @@ static void nested_vmx_failValid(struct kvm_vcpu *vcpu, * We don't need to force a shadow sync because * VM_INSTRUCTION_ERROR is not shadowed */ + return kvm_skip_emulated_instruction(vcpu); } static void nested_vmx_abort(struct kvm_vcpu *vcpu, u32 indicator) @@ -8292,6 +8320,7 @@ static int enter_vmx_operation(struct kvm_vcpu *vcpu) vmx->nested.vpid02 = allocate_vpid(); + vmx->nested.vmcs02_initialized = false; vmx->nested.vmxon = true; return 0; @@ -8345,10 +8374,9 @@ static int handle_vmon(struct kvm_vcpu *vcpu) return 1; } - if (vmx->nested.vmxon) { - nested_vmx_failValid(vcpu, VMXERR_VMXON_IN_VMX_ROOT_OPERATION); - return kvm_skip_emulated_instruction(vcpu); - } + if (vmx->nested.vmxon) + return nested_vmx_failValid(vcpu, + VMXERR_VMXON_IN_VMX_ROOT_OPERATION); if ((vmx->msr_ia32_feature_control & VMXON_NEEDED_FEATURES) != VMXON_NEEDED_FEATURES) { @@ -8367,21 +8395,17 @@ static int handle_vmon(struct kvm_vcpu *vcpu) * Note - IA32_VMX_BASIC[48] will never be 1 for the nested case; * which replaces physical address width with 32 */ - if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) { - nested_vmx_failInvalid(vcpu); - return kvm_skip_emulated_instruction(vcpu); - } + if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) + return nested_vmx_failInvalid(vcpu); page = kvm_vcpu_gpa_to_page(vcpu, vmptr); - if (is_error_page(page)) { - nested_vmx_failInvalid(vcpu); - return kvm_skip_emulated_instruction(vcpu); - } + if (is_error_page(page)) + return nested_vmx_failInvalid(vcpu); + if (*(u32 *)kmap(page) != VMCS12_REVISION) { kunmap(page); kvm_release_page_clean(page); - nested_vmx_failInvalid(vcpu); - return kvm_skip_emulated_instruction(vcpu); + return nested_vmx_failInvalid(vcpu); } kunmap(page); kvm_release_page_clean(page); @@ -8391,8 +8415,7 @@ static int handle_vmon(struct kvm_vcpu *vcpu) if (ret) return ret; - nested_vmx_succeed(vcpu); - return kvm_skip_emulated_instruction(vcpu); + return nested_vmx_succeed(vcpu); } /* @@ -8423,8 +8446,24 @@ static void vmx_disable_shadow_vmcs(struct vcpu_vmx *vmx) vmcs_write64(VMCS_LINK_POINTER, -1ull); } -static inline void nested_release_vmcs12(struct vcpu_vmx *vmx) +static inline void nested_release_evmcs(struct kvm_vcpu *vcpu) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + + if (!vmx->nested.hv_evmcs) + return; + + kunmap(vmx->nested.hv_evmcs_page); + kvm_release_page_dirty(vmx->nested.hv_evmcs_page); + vmx->nested.hv_evmcs_vmptr = -1ull; + vmx->nested.hv_evmcs_page = NULL; + vmx->nested.hv_evmcs = NULL; +} + +static inline void nested_release_vmcs12(struct kvm_vcpu *vcpu) { + struct vcpu_vmx *vmx = to_vmx(vcpu); + if (vmx->nested.current_vmptr == -1ull) return; @@ -8432,16 +8471,18 @@ static inline void nested_release_vmcs12(struct vcpu_vmx *vmx) /* copy to memory all shadowed fields in case they were modified */ copy_shadow_to_vmcs12(vmx); - vmx->nested.sync_shadow_vmcs = false; + vmx->nested.need_vmcs12_sync = false; vmx_disable_shadow_vmcs(vmx); } vmx->nested.posted_intr_nv = -1; /* Flush VMCS12 to guest memory */ - kvm_vcpu_write_guest_page(&vmx->vcpu, + kvm_vcpu_write_guest_page(vcpu, vmx->nested.current_vmptr >> PAGE_SHIFT, vmx->nested.cached_vmcs12, 0, VMCS12_SIZE); + kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL); + vmx->nested.current_vmptr = -1ull; } @@ -8449,8 +8490,10 @@ static inline void nested_release_vmcs12(struct vcpu_vmx *vmx) * Free whatever needs to be freed from vmx->nested when L1 goes down, or * just stops using VMX. */ -static void free_nested(struct vcpu_vmx *vmx) +static void free_nested(struct kvm_vcpu *vcpu) { + struct vcpu_vmx *vmx = to_vmx(vcpu); + if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon) return; @@ -8483,6 +8526,10 @@ static void free_nested(struct vcpu_vmx *vmx) vmx->nested.pi_desc = NULL; } + kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL); + + nested_release_evmcs(vcpu); + free_loaded_vmcs(&vmx->nested.vmcs02); } @@ -8491,9 +8538,8 @@ static int handle_vmoff(struct kvm_vcpu *vcpu) { if (!nested_vmx_check_permission(vcpu)) return 1; - free_nested(to_vmx(vcpu)); - nested_vmx_succeed(vcpu); - return kvm_skip_emulated_instruction(vcpu); + free_nested(vcpu); + return nested_vmx_succeed(vcpu); } /* Emulate the VMCLEAR instruction */ @@ -8509,25 +8555,28 @@ static int handle_vmclear(struct kvm_vcpu *vcpu) if (nested_vmx_get_vmptr(vcpu, &vmptr)) return 1; - if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) { - nested_vmx_failValid(vcpu, VMXERR_VMCLEAR_INVALID_ADDRESS); - return kvm_skip_emulated_instruction(vcpu); - } + if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) + return nested_vmx_failValid(vcpu, + VMXERR_VMCLEAR_INVALID_ADDRESS); - if (vmptr == vmx->nested.vmxon_ptr) { - nested_vmx_failValid(vcpu, VMXERR_VMCLEAR_VMXON_POINTER); - return kvm_skip_emulated_instruction(vcpu); - } + if (vmptr == vmx->nested.vmxon_ptr) + return nested_vmx_failValid(vcpu, + VMXERR_VMCLEAR_VMXON_POINTER); - if (vmptr == vmx->nested.current_vmptr) - nested_release_vmcs12(vmx); + if (vmx->nested.hv_evmcs_page) { + if (vmptr == vmx->nested.hv_evmcs_vmptr) + nested_release_evmcs(vcpu); + } else { + if (vmptr == vmx->nested.current_vmptr) + nested_release_vmcs12(vcpu); - kvm_vcpu_write_guest(vcpu, - vmptr + offsetof(struct vmcs12, launch_state), - &zero, sizeof(zero)); + kvm_vcpu_write_guest(vcpu, + vmptr + offsetof(struct vmcs12, + launch_state), + &zero, sizeof(zero)); + } - nested_vmx_succeed(vcpu); - return kvm_skip_emulated_instruction(vcpu); + return nested_vmx_succeed(vcpu); } static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch); @@ -8610,6 +8659,395 @@ static inline int vmcs12_write_any(struct vmcs12 *vmcs12, } +static int copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx) +{ + struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12; + struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs; + + vmcs12->hdr.revision_id = evmcs->revision_id; + + /* HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE */ + vmcs12->tpr_threshold = evmcs->tpr_threshold; + vmcs12->guest_rip = evmcs->guest_rip; + + if (unlikely(!(evmcs->hv_clean_fields & + HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_BASIC))) { + vmcs12->guest_rsp = evmcs->guest_rsp; + vmcs12->guest_rflags = evmcs->guest_rflags; + vmcs12->guest_interruptibility_info = + evmcs->guest_interruptibility_info; + } + + if (unlikely(!(evmcs->hv_clean_fields & + HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_PROC))) { + vmcs12->cpu_based_vm_exec_control = + evmcs->cpu_based_vm_exec_control; + } + + if (unlikely(!(evmcs->hv_clean_fields & + HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_PROC))) { + vmcs12->exception_bitmap = evmcs->exception_bitmap; + } + + if (unlikely(!(evmcs->hv_clean_fields & + HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_ENTRY))) { + vmcs12->vm_entry_controls = evmcs->vm_entry_controls; + } + + if (unlikely(!(evmcs->hv_clean_fields & + HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EVENT))) { + vmcs12->vm_entry_intr_info_field = + evmcs->vm_entry_intr_info_field; + vmcs12->vm_entry_exception_error_code = + evmcs->vm_entry_exception_error_code; + vmcs12->vm_entry_instruction_len = + evmcs->vm_entry_instruction_len; + } + + if (unlikely(!(evmcs->hv_clean_fields & + HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1))) { + vmcs12->host_ia32_pat = evmcs->host_ia32_pat; + vmcs12->host_ia32_efer = evmcs->host_ia32_efer; + vmcs12->host_cr0 = evmcs->host_cr0; + vmcs12->host_cr3 = evmcs->host_cr3; + vmcs12->host_cr4 = evmcs->host_cr4; + vmcs12->host_ia32_sysenter_esp = evmcs->host_ia32_sysenter_esp; + vmcs12->host_ia32_sysenter_eip = evmcs->host_ia32_sysenter_eip; + vmcs12->host_rip = evmcs->host_rip; + vmcs12->host_ia32_sysenter_cs = evmcs->host_ia32_sysenter_cs; + vmcs12->host_es_selector = evmcs->host_es_selector; + vmcs12->host_cs_selector = evmcs->host_cs_selector; + vmcs12->host_ss_selector = evmcs->host_ss_selector; + vmcs12->host_ds_selector = evmcs->host_ds_selector; + vmcs12->host_fs_selector = evmcs->host_fs_selector; + vmcs12->host_gs_selector = evmcs->host_gs_selector; + vmcs12->host_tr_selector = evmcs->host_tr_selector; + } + + if (unlikely(!(evmcs->hv_clean_fields & + HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1))) { + vmcs12->pin_based_vm_exec_control = + evmcs->pin_based_vm_exec_control; + vmcs12->vm_exit_controls = evmcs->vm_exit_controls; + vmcs12->secondary_vm_exec_control = + evmcs->secondary_vm_exec_control; + } + + if (unlikely(!(evmcs->hv_clean_fields & + HV_VMX_ENLIGHTENED_CLEAN_FIELD_IO_BITMAP))) { + vmcs12->io_bitmap_a = evmcs->io_bitmap_a; + vmcs12->io_bitmap_b = evmcs->io_bitmap_b; + } + + if (unlikely(!(evmcs->hv_clean_fields & + HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP))) { + vmcs12->msr_bitmap = evmcs->msr_bitmap; + } + + if (unlikely(!(evmcs->hv_clean_fields & + HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2))) { + vmcs12->guest_es_base = evmcs->guest_es_base; + vmcs12->guest_cs_base = evmcs->guest_cs_base; + vmcs12->guest_ss_base = evmcs->guest_ss_base; + vmcs12->guest_ds_base = evmcs->guest_ds_base; + vmcs12->guest_fs_base = evmcs->guest_fs_base; + vmcs12->guest_gs_base = evmcs->guest_gs_base; + vmcs12->guest_ldtr_base = evmcs->guest_ldtr_base; + vmcs12->guest_tr_base = evmcs->guest_tr_base; + vmcs12->guest_gdtr_base = evmcs->guest_gdtr_base; + vmcs12->guest_idtr_base = evmcs->guest_idtr_base; + vmcs12->guest_es_limit = evmcs->guest_es_limit; + vmcs12->guest_cs_limit = evmcs->guest_cs_limit; + vmcs12->guest_ss_limit = evmcs->guest_ss_limit; + vmcs12->guest_ds_limit = evmcs->guest_ds_limit; + vmcs12->guest_fs_limit = evmcs->guest_fs_limit; + vmcs12->guest_gs_limit = evmcs->guest_gs_limit; + vmcs12->guest_ldtr_limit = evmcs->guest_ldtr_limit; + vmcs12->guest_tr_limit = evmcs->guest_tr_limit; + vmcs12->guest_gdtr_limit = evmcs->guest_gdtr_limit; + vmcs12->guest_idtr_limit = evmcs->guest_idtr_limit; + vmcs12->guest_es_ar_bytes = evmcs->guest_es_ar_bytes; + vmcs12->guest_cs_ar_bytes = evmcs->guest_cs_ar_bytes; + vmcs12->guest_ss_ar_bytes = evmcs->guest_ss_ar_bytes; + vmcs12->guest_ds_ar_bytes = evmcs->guest_ds_ar_bytes; + vmcs12->guest_fs_ar_bytes = evmcs->guest_fs_ar_bytes; + vmcs12->guest_gs_ar_bytes = evmcs->guest_gs_ar_bytes; + vmcs12->guest_ldtr_ar_bytes = evmcs->guest_ldtr_ar_bytes; + vmcs12->guest_tr_ar_bytes = evmcs->guest_tr_ar_bytes; + vmcs12->guest_es_selector = evmcs->guest_es_selector; + vmcs12->guest_cs_selector = evmcs->guest_cs_selector; + vmcs12->guest_ss_selector = evmcs->guest_ss_selector; + vmcs12->guest_ds_selector = evmcs->guest_ds_selector; + vmcs12->guest_fs_selector = evmcs->guest_fs_selector; + vmcs12->guest_gs_selector = evmcs->guest_gs_selector; + vmcs12->guest_ldtr_selector = evmcs->guest_ldtr_selector; + vmcs12->guest_tr_selector = evmcs->guest_tr_selector; + } + + if (unlikely(!(evmcs->hv_clean_fields & + HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP2))) { + vmcs12->tsc_offset = evmcs->tsc_offset; + vmcs12->virtual_apic_page_addr = evmcs->virtual_apic_page_addr; + vmcs12->xss_exit_bitmap = evmcs->xss_exit_bitmap; + } + + if (unlikely(!(evmcs->hv_clean_fields & + HV_VMX_ENLIGHTENED_CLEAN_FIELD_CRDR))) { + vmcs12->cr0_guest_host_mask = evmcs->cr0_guest_host_mask; + vmcs12->cr4_guest_host_mask = evmcs->cr4_guest_host_mask; + vmcs12->cr0_read_shadow = evmcs->cr0_read_shadow; + vmcs12->cr4_read_shadow = evmcs->cr4_read_shadow; + vmcs12->guest_cr0 = evmcs->guest_cr0; + vmcs12->guest_cr3 = evmcs->guest_cr3; + vmcs12->guest_cr4 = evmcs->guest_cr4; + vmcs12->guest_dr7 = evmcs->guest_dr7; + } + + if (unlikely(!(evmcs->hv_clean_fields & + HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_POINTER))) { + vmcs12->host_fs_base = evmcs->host_fs_base; + vmcs12->host_gs_base = evmcs->host_gs_base; + vmcs12->host_tr_base = evmcs->host_tr_base; + vmcs12->host_gdtr_base = evmcs->host_gdtr_base; + vmcs12->host_idtr_base = evmcs->host_idtr_base; + vmcs12->host_rsp = evmcs->host_rsp; + } + + if (unlikely(!(evmcs->hv_clean_fields & + HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_XLAT))) { + vmcs12->ept_pointer = evmcs->ept_pointer; + vmcs12->virtual_processor_id = evmcs->virtual_processor_id; + } + + if (unlikely(!(evmcs->hv_clean_fields & + HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1))) { + vmcs12->vmcs_link_pointer = evmcs->vmcs_link_pointer; + vmcs12->guest_ia32_debugctl = evmcs->guest_ia32_debugctl; + vmcs12->guest_ia32_pat = evmcs->guest_ia32_pat; + vmcs12->guest_ia32_efer = evmcs->guest_ia32_efer; + vmcs12->guest_pdptr0 = evmcs->guest_pdptr0; + vmcs12->guest_pdptr1 = evmcs->guest_pdptr1; + vmcs12->guest_pdptr2 = evmcs->guest_pdptr2; + vmcs12->guest_pdptr3 = evmcs->guest_pdptr3; + vmcs12->guest_pending_dbg_exceptions = + evmcs->guest_pending_dbg_exceptions; + vmcs12->guest_sysenter_esp = evmcs->guest_sysenter_esp; + vmcs12->guest_sysenter_eip = evmcs->guest_sysenter_eip; + vmcs12->guest_bndcfgs = evmcs->guest_bndcfgs; + vmcs12->guest_activity_state = evmcs->guest_activity_state; + vmcs12->guest_sysenter_cs = evmcs->guest_sysenter_cs; + } + + /* + * Not used? + * vmcs12->vm_exit_msr_store_addr = evmcs->vm_exit_msr_store_addr; + * vmcs12->vm_exit_msr_load_addr = evmcs->vm_exit_msr_load_addr; + * vmcs12->vm_entry_msr_load_addr = evmcs->vm_entry_msr_load_addr; + * vmcs12->cr3_target_value0 = evmcs->cr3_target_value0; + * vmcs12->cr3_target_value1 = evmcs->cr3_target_value1; + * vmcs12->cr3_target_value2 = evmcs->cr3_target_value2; + * vmcs12->cr3_target_value3 = evmcs->cr3_target_value3; + * vmcs12->page_fault_error_code_mask = + * evmcs->page_fault_error_code_mask; + * vmcs12->page_fault_error_code_match = + * evmcs->page_fault_error_code_match; + * vmcs12->cr3_target_count = evmcs->cr3_target_count; + * vmcs12->vm_exit_msr_store_count = evmcs->vm_exit_msr_store_count; + * vmcs12->vm_exit_msr_load_count = evmcs->vm_exit_msr_load_count; + * vmcs12->vm_entry_msr_load_count = evmcs->vm_entry_msr_load_count; + */ + + /* + * Read only fields: + * vmcs12->guest_physical_address = evmcs->guest_physical_address; + * vmcs12->vm_instruction_error = evmcs->vm_instruction_error; + * vmcs12->vm_exit_reason = evmcs->vm_exit_reason; + * vmcs12->vm_exit_intr_info = evmcs->vm_exit_intr_info; + * vmcs12->vm_exit_intr_error_code = evmcs->vm_exit_intr_error_code; + * vmcs12->idt_vectoring_info_field = evmcs->idt_vectoring_info_field; + * vmcs12->idt_vectoring_error_code = evmcs->idt_vectoring_error_code; + * vmcs12->vm_exit_instruction_len = evmcs->vm_exit_instruction_len; + * vmcs12->vmx_instruction_info = evmcs->vmx_instruction_info; + * vmcs12->exit_qualification = evmcs->exit_qualification; + * vmcs12->guest_linear_address = evmcs->guest_linear_address; + * + * Not present in struct vmcs12: + * vmcs12->exit_io_instruction_ecx = evmcs->exit_io_instruction_ecx; + * vmcs12->exit_io_instruction_esi = evmcs->exit_io_instruction_esi; + * vmcs12->exit_io_instruction_edi = evmcs->exit_io_instruction_edi; + * vmcs12->exit_io_instruction_eip = evmcs->exit_io_instruction_eip; + */ + + return 0; +} + +static int copy_vmcs12_to_enlightened(struct vcpu_vmx *vmx) +{ + struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12; + struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs; + + /* + * Should not be changed by KVM: + * + * evmcs->host_es_selector = vmcs12->host_es_selector; + * evmcs->host_cs_selector = vmcs12->host_cs_selector; + * evmcs->host_ss_selector = vmcs12->host_ss_selector; + * evmcs->host_ds_selector = vmcs12->host_ds_selector; + * evmcs->host_fs_selector = vmcs12->host_fs_selector; + * evmcs->host_gs_selector = vmcs12->host_gs_selector; + * evmcs->host_tr_selector = vmcs12->host_tr_selector; + * evmcs->host_ia32_pat = vmcs12->host_ia32_pat; + * evmcs->host_ia32_efer = vmcs12->host_ia32_efer; + * evmcs->host_cr0 = vmcs12->host_cr0; + * evmcs->host_cr3 = vmcs12->host_cr3; + * evmcs->host_cr4 = vmcs12->host_cr4; + * evmcs->host_ia32_sysenter_esp = vmcs12->host_ia32_sysenter_esp; + * evmcs->host_ia32_sysenter_eip = vmcs12->host_ia32_sysenter_eip; + * evmcs->host_rip = vmcs12->host_rip; + * evmcs->host_ia32_sysenter_cs = vmcs12->host_ia32_sysenter_cs; + * evmcs->host_fs_base = vmcs12->host_fs_base; + * evmcs->host_gs_base = vmcs12->host_gs_base; + * evmcs->host_tr_base = vmcs12->host_tr_base; + * evmcs->host_gdtr_base = vmcs12->host_gdtr_base; + * evmcs->host_idtr_base = vmcs12->host_idtr_base; + * evmcs->host_rsp = vmcs12->host_rsp; + * sync_vmcs12() doesn't read these: + * evmcs->io_bitmap_a = vmcs12->io_bitmap_a; + * evmcs->io_bitmap_b = vmcs12->io_bitmap_b; + * evmcs->msr_bitmap = vmcs12->msr_bitmap; + * evmcs->ept_pointer = vmcs12->ept_pointer; + * evmcs->xss_exit_bitmap = vmcs12->xss_exit_bitmap; + * evmcs->vm_exit_msr_store_addr = vmcs12->vm_exit_msr_store_addr; + * evmcs->vm_exit_msr_load_addr = vmcs12->vm_exit_msr_load_addr; + * evmcs->vm_entry_msr_load_addr = vmcs12->vm_entry_msr_load_addr; + * evmcs->cr3_target_value0 = vmcs12->cr3_target_value0; + * evmcs->cr3_target_value1 = vmcs12->cr3_target_value1; + * evmcs->cr3_target_value2 = vmcs12->cr3_target_value2; + * evmcs->cr3_target_value3 = vmcs12->cr3_target_value3; + * evmcs->tpr_threshold = vmcs12->tpr_threshold; + * evmcs->virtual_processor_id = vmcs12->virtual_processor_id; + * evmcs->exception_bitmap = vmcs12->exception_bitmap; + * evmcs->vmcs_link_pointer = vmcs12->vmcs_link_pointer; + * evmcs->pin_based_vm_exec_control = vmcs12->pin_based_vm_exec_control; + * evmcs->vm_exit_controls = vmcs12->vm_exit_controls; + * evmcs->secondary_vm_exec_control = vmcs12->secondary_vm_exec_control; + * evmcs->page_fault_error_code_mask = + * vmcs12->page_fault_error_code_mask; + * evmcs->page_fault_error_code_match = + * vmcs12->page_fault_error_code_match; + * evmcs->cr3_target_count = vmcs12->cr3_target_count; + * evmcs->virtual_apic_page_addr = vmcs12->virtual_apic_page_addr; + * evmcs->tsc_offset = vmcs12->tsc_offset; + * evmcs->guest_ia32_debugctl = vmcs12->guest_ia32_debugctl; + * evmcs->cr0_guest_host_mask = vmcs12->cr0_guest_host_mask; + * evmcs->cr4_guest_host_mask = vmcs12->cr4_guest_host_mask; + * evmcs->cr0_read_shadow = vmcs12->cr0_read_shadow; + * evmcs->cr4_read_shadow = vmcs12->cr4_read_shadow; + * evmcs->vm_exit_msr_store_count = vmcs12->vm_exit_msr_store_count; + * evmcs->vm_exit_msr_load_count = vmcs12->vm_exit_msr_load_count; + * evmcs->vm_entry_msr_load_count = vmcs12->vm_entry_msr_load_count; + * + * Not present in struct vmcs12: + * evmcs->exit_io_instruction_ecx = vmcs12->exit_io_instruction_ecx; + * evmcs->exit_io_instruction_esi = vmcs12->exit_io_instruction_esi; + * evmcs->exit_io_instruction_edi = vmcs12->exit_io_instruction_edi; + * evmcs->exit_io_instruction_eip = vmcs12->exit_io_instruction_eip; + */ + + evmcs->guest_es_selector = vmcs12->guest_es_selector; + evmcs->guest_cs_selector = vmcs12->guest_cs_selector; + evmcs->guest_ss_selector = vmcs12->guest_ss_selector; + evmcs->guest_ds_selector = vmcs12->guest_ds_selector; + evmcs->guest_fs_selector = vmcs12->guest_fs_selector; + evmcs->guest_gs_selector = vmcs12->guest_gs_selector; + evmcs->guest_ldtr_selector = vmcs12->guest_ldtr_selector; + evmcs->guest_tr_selector = vmcs12->guest_tr_selector; + + evmcs->guest_es_limit = vmcs12->guest_es_limit; + evmcs->guest_cs_limit = vmcs12->guest_cs_limit; + evmcs->guest_ss_limit = vmcs12->guest_ss_limit; + evmcs->guest_ds_limit = vmcs12->guest_ds_limit; + evmcs->guest_fs_limit = vmcs12->guest_fs_limit; + evmcs->guest_gs_limit = vmcs12->guest_gs_limit; + evmcs->guest_ldtr_limit = vmcs12->guest_ldtr_limit; + evmcs->guest_tr_limit = vmcs12->guest_tr_limit; + evmcs->guest_gdtr_limit = vmcs12->guest_gdtr_limit; + evmcs->guest_idtr_limit = vmcs12->guest_idtr_limit; + + evmcs->guest_es_ar_bytes = vmcs12->guest_es_ar_bytes; + evmcs->guest_cs_ar_bytes = vmcs12->guest_cs_ar_bytes; + evmcs->guest_ss_ar_bytes = vmcs12->guest_ss_ar_bytes; + evmcs->guest_ds_ar_bytes = vmcs12->guest_ds_ar_bytes; + evmcs->guest_fs_ar_bytes = vmcs12->guest_fs_ar_bytes; + evmcs->guest_gs_ar_bytes = vmcs12->guest_gs_ar_bytes; + evmcs->guest_ldtr_ar_bytes = vmcs12->guest_ldtr_ar_bytes; + evmcs->guest_tr_ar_bytes = vmcs12->guest_tr_ar_bytes; + + evmcs->guest_es_base = vmcs12->guest_es_base; + evmcs->guest_cs_base = vmcs12->guest_cs_base; + evmcs->guest_ss_base = vmcs12->guest_ss_base; + evmcs->guest_ds_base = vmcs12->guest_ds_base; + evmcs->guest_fs_base = vmcs12->guest_fs_base; + evmcs->guest_gs_base = vmcs12->guest_gs_base; + evmcs->guest_ldtr_base = vmcs12->guest_ldtr_base; + evmcs->guest_tr_base = vmcs12->guest_tr_base; + evmcs->guest_gdtr_base = vmcs12->guest_gdtr_base; + evmcs->guest_idtr_base = vmcs12->guest_idtr_base; + + evmcs->guest_ia32_pat = vmcs12->guest_ia32_pat; + evmcs->guest_ia32_efer = vmcs12->guest_ia32_efer; + + evmcs->guest_pdptr0 = vmcs12->guest_pdptr0; + evmcs->guest_pdptr1 = vmcs12->guest_pdptr1; + evmcs->guest_pdptr2 = vmcs12->guest_pdptr2; + evmcs->guest_pdptr3 = vmcs12->guest_pdptr3; + + evmcs->guest_pending_dbg_exceptions = + vmcs12->guest_pending_dbg_exceptions; + evmcs->guest_sysenter_esp = vmcs12->guest_sysenter_esp; + evmcs->guest_sysenter_eip = vmcs12->guest_sysenter_eip; + + evmcs->guest_activity_state = vmcs12->guest_activity_state; + evmcs->guest_sysenter_cs = vmcs12->guest_sysenter_cs; + + evmcs->guest_cr0 = vmcs12->guest_cr0; + evmcs->guest_cr3 = vmcs12->guest_cr3; + evmcs->guest_cr4 = vmcs12->guest_cr4; + evmcs->guest_dr7 = vmcs12->guest_dr7; + + evmcs->guest_physical_address = vmcs12->guest_physical_address; + + evmcs->vm_instruction_error = vmcs12->vm_instruction_error; + evmcs->vm_exit_reason = vmcs12->vm_exit_reason; + evmcs->vm_exit_intr_info = vmcs12->vm_exit_intr_info; + evmcs->vm_exit_intr_error_code = vmcs12->vm_exit_intr_error_code; + evmcs->idt_vectoring_info_field = vmcs12->idt_vectoring_info_field; + evmcs->idt_vectoring_error_code = vmcs12->idt_vectoring_error_code; + evmcs->vm_exit_instruction_len = vmcs12->vm_exit_instruction_len; + evmcs->vmx_instruction_info = vmcs12->vmx_instruction_info; + + evmcs->exit_qualification = vmcs12->exit_qualification; + + evmcs->guest_linear_address = vmcs12->guest_linear_address; + evmcs->guest_rsp = vmcs12->guest_rsp; + evmcs->guest_rflags = vmcs12->guest_rflags; + + evmcs->guest_interruptibility_info = + vmcs12->guest_interruptibility_info; + evmcs->cpu_based_vm_exec_control = vmcs12->cpu_based_vm_exec_control; + evmcs->vm_entry_controls = vmcs12->vm_entry_controls; + evmcs->vm_entry_intr_info_field = vmcs12->vm_entry_intr_info_field; + evmcs->vm_entry_exception_error_code = + vmcs12->vm_entry_exception_error_code; + evmcs->vm_entry_instruction_len = vmcs12->vm_entry_instruction_len; + + evmcs->guest_rip = vmcs12->guest_rip; + + evmcs->guest_bndcfgs = vmcs12->guest_bndcfgs; + + return 0; +} + /* * Copy the writable VMCS shadow fields back to the VMCS12, in case * they have been modified by the L1 guest. Note that the "read-only" @@ -8683,20 +9121,6 @@ static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx) vmcs_load(vmx->loaded_vmcs->vmcs); } -/* - * VMX instructions which assume a current vmcs12 (i.e., that VMPTRLD was - * used before) all generate the same failure when it is missing. - */ -static int nested_vmx_check_vmcs12(struct kvm_vcpu *vcpu) -{ - struct vcpu_vmx *vmx = to_vmx(vcpu); - if (vmx->nested.current_vmptr == -1ull) { - nested_vmx_failInvalid(vcpu); - return 0; - } - return 1; -} - static int handle_vmread(struct kvm_vcpu *vcpu) { unsigned long field; @@ -8709,8 +9133,8 @@ static int handle_vmread(struct kvm_vcpu *vcpu) if (!nested_vmx_check_permission(vcpu)) return 1; - if (!nested_vmx_check_vmcs12(vcpu)) - return kvm_skip_emulated_instruction(vcpu); + if (to_vmx(vcpu)->nested.current_vmptr == -1ull) + return nested_vmx_failInvalid(vcpu); if (!is_guest_mode(vcpu)) vmcs12 = get_vmcs12(vcpu); @@ -8719,20 +9143,18 @@ static int handle_vmread(struct kvm_vcpu *vcpu) * When vmcs->vmcs_link_pointer is -1ull, any VMREAD * to shadowed-field sets the ALU flags for VMfailInvalid. */ - if (get_vmcs12(vcpu)->vmcs_link_pointer == -1ull) { - nested_vmx_failInvalid(vcpu); - return kvm_skip_emulated_instruction(vcpu); - } + if (get_vmcs12(vcpu)->vmcs_link_pointer == -1ull) + return nested_vmx_failInvalid(vcpu); vmcs12 = get_shadow_vmcs12(vcpu); } /* Decode instruction info and find the field to read */ field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf)); /* Read the field, zero-extended to a u64 field_value */ - if (vmcs12_read_any(vmcs12, field, &field_value) < 0) { - nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT); - return kvm_skip_emulated_instruction(vcpu); - } + if (vmcs12_read_any(vmcs12, field, &field_value) < 0) + return nested_vmx_failValid(vcpu, + VMXERR_UNSUPPORTED_VMCS_COMPONENT); + /* * Now copy part of this value to register or memory, as requested. * Note that the number of bits actually copied is 32 or 64 depending @@ -8750,8 +9172,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu) (is_long_mode(vcpu) ? 8 : 4), NULL); } - nested_vmx_succeed(vcpu); - return kvm_skip_emulated_instruction(vcpu); + return nested_vmx_succeed(vcpu); } @@ -8776,8 +9197,8 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu) if (!nested_vmx_check_permission(vcpu)) return 1; - if (!nested_vmx_check_vmcs12(vcpu)) - return kvm_skip_emulated_instruction(vcpu); + if (vmx->nested.current_vmptr == -1ull) + return nested_vmx_failInvalid(vcpu); if (vmx_instruction_info & (1u << 10)) field_value = kvm_register_readl(vcpu, @@ -8800,11 +9221,9 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu) * VMCS," then the "read-only" fields are actually read/write. */ if (vmcs_field_readonly(field) && - !nested_cpu_has_vmwrite_any_field(vcpu)) { - nested_vmx_failValid(vcpu, + !nested_cpu_has_vmwrite_any_field(vcpu)) + return nested_vmx_failValid(vcpu, VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT); - return kvm_skip_emulated_instruction(vcpu); - } if (!is_guest_mode(vcpu)) vmcs12 = get_vmcs12(vcpu); @@ -8813,18 +9232,14 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu) * When vmcs->vmcs_link_pointer is -1ull, any VMWRITE * to shadowed-field sets the ALU flags for VMfailInvalid. */ - if (get_vmcs12(vcpu)->vmcs_link_pointer == -1ull) { - nested_vmx_failInvalid(vcpu); - return kvm_skip_emulated_instruction(vcpu); - } + if (get_vmcs12(vcpu)->vmcs_link_pointer == -1ull) + return nested_vmx_failInvalid(vcpu); vmcs12 = get_shadow_vmcs12(vcpu); - } - if (vmcs12_write_any(vmcs12, field, field_value) < 0) { - nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT); - return kvm_skip_emulated_instruction(vcpu); - } + if (vmcs12_write_any(vmcs12, field, field_value) < 0) + return nested_vmx_failValid(vcpu, + VMXERR_UNSUPPORTED_VMCS_COMPONENT); /* * Do not track vmcs12 dirty-state if in guest-mode @@ -8846,8 +9261,7 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu) } } - nested_vmx_succeed(vcpu); - return kvm_skip_emulated_instruction(vcpu); + return nested_vmx_succeed(vcpu); } static void set_current_vmptr(struct vcpu_vmx *vmx, gpa_t vmptr) @@ -8858,7 +9272,7 @@ static void set_current_vmptr(struct vcpu_vmx *vmx, gpa_t vmptr) SECONDARY_EXEC_SHADOW_VMCS); vmcs_write64(VMCS_LINK_POINTER, __pa(vmx->vmcs01.shadow_vmcs)); - vmx->nested.sync_shadow_vmcs = true; + vmx->nested.need_vmcs12_sync = true; } vmx->nested.dirty_vmcs12 = true; } @@ -8875,36 +9289,37 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu) if (nested_vmx_get_vmptr(vcpu, &vmptr)) return 1; - if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) { - nested_vmx_failValid(vcpu, VMXERR_VMPTRLD_INVALID_ADDRESS); - return kvm_skip_emulated_instruction(vcpu); - } + if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) + return nested_vmx_failValid(vcpu, + VMXERR_VMPTRLD_INVALID_ADDRESS); - if (vmptr == vmx->nested.vmxon_ptr) { - nested_vmx_failValid(vcpu, VMXERR_VMPTRLD_VMXON_POINTER); - return kvm_skip_emulated_instruction(vcpu); - } + if (vmptr == vmx->nested.vmxon_ptr) + return nested_vmx_failValid(vcpu, + VMXERR_VMPTRLD_VMXON_POINTER); + + /* Forbid normal VMPTRLD if Enlightened version was used */ + if (vmx->nested.hv_evmcs) + return 1; if (vmx->nested.current_vmptr != vmptr) { struct vmcs12 *new_vmcs12; struct page *page; page = kvm_vcpu_gpa_to_page(vcpu, vmptr); - if (is_error_page(page)) { - nested_vmx_failInvalid(vcpu); - return kvm_skip_emulated_instruction(vcpu); - } + if (is_error_page(page)) + return nested_vmx_failInvalid(vcpu); + new_vmcs12 = kmap(page); if (new_vmcs12->hdr.revision_id != VMCS12_REVISION || (new_vmcs12->hdr.shadow_vmcs && !nested_cpu_has_vmx_shadow_vmcs(vcpu))) { kunmap(page); kvm_release_page_clean(page); - nested_vmx_failValid(vcpu, + return nested_vmx_failValid(vcpu, VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID); - return kvm_skip_emulated_instruction(vcpu); } - nested_release_vmcs12(vmx); + nested_release_vmcs12(vcpu); + /* * Load VMCS12 from guest memory since it is not already * cached. @@ -8916,8 +9331,71 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu) set_current_vmptr(vmx, vmptr); } - nested_vmx_succeed(vcpu); - return kvm_skip_emulated_instruction(vcpu); + return nested_vmx_succeed(vcpu); +} + +/* + * This is an equivalent of the nested hypervisor executing the vmptrld + * instruction. + */ +static int nested_vmx_handle_enlightened_vmptrld(struct kvm_vcpu *vcpu, + bool from_launch) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + struct hv_vp_assist_page assist_page; + + if (likely(!vmx->nested.enlightened_vmcs_enabled)) + return 1; + + if (unlikely(!kvm_hv_get_assist_page(vcpu, &assist_page))) + return 1; + + if (unlikely(!assist_page.enlighten_vmentry)) + return 1; + + if (unlikely(assist_page.current_nested_vmcs != + vmx->nested.hv_evmcs_vmptr)) { + + if (!vmx->nested.hv_evmcs) + vmx->nested.current_vmptr = -1ull; + + nested_release_evmcs(vcpu); + + vmx->nested.hv_evmcs_page = kvm_vcpu_gpa_to_page( + vcpu, assist_page.current_nested_vmcs); + + if (unlikely(is_error_page(vmx->nested.hv_evmcs_page))) + return 0; + + vmx->nested.hv_evmcs = kmap(vmx->nested.hv_evmcs_page); + + if (vmx->nested.hv_evmcs->revision_id != VMCS12_REVISION) { + nested_release_evmcs(vcpu); + return 0; + } + + vmx->nested.dirty_vmcs12 = true; + /* + * As we keep L2 state for one guest only 'hv_clean_fields' mask + * can't be used when we switch between them. Reset it here for + * simplicity. + */ + vmx->nested.hv_evmcs->hv_clean_fields &= + ~HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL; + vmx->nested.hv_evmcs_vmptr = assist_page.current_nested_vmcs; + + /* + * Unlike normal vmcs12, enlightened vmcs12 is not fully + * reloaded from guest's memory (read only fields, fields not + * present in struct hv_enlightened_vmcs, ...). Make sure there + * are no leftovers. + */ + if (from_launch) + memset(vmx->nested.cached_vmcs12, 0, + sizeof(*vmx->nested.cached_vmcs12)); + + } + return 1; } /* Emulate the VMPTRST instruction */ @@ -8932,6 +9410,9 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu) if (!nested_vmx_check_permission(vcpu)) return 1; + if (unlikely(to_vmx(vcpu)->nested.hv_evmcs)) + return 1; + if (get_vmx_mem_address(vcpu, exit_qual, instr_info, true, &gva)) return 1; /* *_system ok, nested_vmx_check_permission has verified cpl=0 */ @@ -8940,8 +9421,7 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu) kvm_inject_page_fault(vcpu, &e); return 1; } - nested_vmx_succeed(vcpu); - return kvm_skip_emulated_instruction(vcpu); + return nested_vmx_succeed(vcpu); } /* Emulate the INVEPT instruction */ @@ -8971,11 +9451,9 @@ static int handle_invept(struct kvm_vcpu *vcpu) types = (vmx->nested.msrs.ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6; - if (type >= 32 || !(types & (1 << type))) { - nested_vmx_failValid(vcpu, + if (type >= 32 || !(types & (1 << type))) + return nested_vmx_failValid(vcpu, VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); - return kvm_skip_emulated_instruction(vcpu); - } /* According to the Intel VMX instruction reference, the memory * operand is read even if it isn't needed (e.g., for type==global) @@ -8997,14 +9475,20 @@ static int handle_invept(struct kvm_vcpu *vcpu) case VMX_EPT_EXTENT_CONTEXT: kvm_mmu_sync_roots(vcpu); kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); - nested_vmx_succeed(vcpu); break; default: BUG_ON(1); break; } - return kvm_skip_emulated_instruction(vcpu); + return nested_vmx_succeed(vcpu); +} + +static u16 nested_get_vpid02(struct kvm_vcpu *vcpu) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + + return vmx->nested.vpid02 ? vmx->nested.vpid02 : vmx->vpid; } static int handle_invvpid(struct kvm_vcpu *vcpu) @@ -9018,6 +9502,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu) u64 vpid; u64 gla; } operand; + u16 vpid02; if (!(vmx->nested.msrs.secondary_ctls_high & SECONDARY_EXEC_ENABLE_VPID) || @@ -9035,11 +9520,9 @@ static int handle_invvpid(struct kvm_vcpu *vcpu) types = (vmx->nested.msrs.vpid_caps & VMX_VPID_EXTENT_SUPPORTED_MASK) >> 8; - if (type >= 32 || !(types & (1 << type))) { - nested_vmx_failValid(vcpu, + if (type >= 32 || !(types & (1 << type))) + return nested_vmx_failValid(vcpu, VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); - return kvm_skip_emulated_instruction(vcpu); - } /* according to the intel vmx instruction reference, the memory * operand is read even if it isn't needed (e.g., for type==global) @@ -9051,47 +9534,39 @@ static int handle_invvpid(struct kvm_vcpu *vcpu) kvm_inject_page_fault(vcpu, &e); return 1; } - if (operand.vpid >> 16) { - nested_vmx_failValid(vcpu, + if (operand.vpid >> 16) + return nested_vmx_failValid(vcpu, VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); - return kvm_skip_emulated_instruction(vcpu); - } + vpid02 = nested_get_vpid02(vcpu); switch (type) { case VMX_VPID_EXTENT_INDIVIDUAL_ADDR: if (!operand.vpid || - is_noncanonical_address(operand.gla, vcpu)) { - nested_vmx_failValid(vcpu, + is_noncanonical_address(operand.gla, vcpu)) + return nested_vmx_failValid(vcpu, VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); - return kvm_skip_emulated_instruction(vcpu); - } - if (cpu_has_vmx_invvpid_individual_addr() && - vmx->nested.vpid02) { + if (cpu_has_vmx_invvpid_individual_addr()) { __invvpid(VMX_VPID_EXTENT_INDIVIDUAL_ADDR, - vmx->nested.vpid02, operand.gla); + vpid02, operand.gla); } else - __vmx_flush_tlb(vcpu, vmx->nested.vpid02, true); + __vmx_flush_tlb(vcpu, vpid02, false); break; case VMX_VPID_EXTENT_SINGLE_CONTEXT: case VMX_VPID_EXTENT_SINGLE_NON_GLOBAL: - if (!operand.vpid) { - nested_vmx_failValid(vcpu, + if (!operand.vpid) + return nested_vmx_failValid(vcpu, VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); - return kvm_skip_emulated_instruction(vcpu); - } - __vmx_flush_tlb(vcpu, vmx->nested.vpid02, true); + __vmx_flush_tlb(vcpu, vpid02, false); break; case VMX_VPID_EXTENT_ALL_CONTEXT: - __vmx_flush_tlb(vcpu, vmx->nested.vpid02, true); + __vmx_flush_tlb(vcpu, vpid02, false); break; default: WARN_ON_ONCE(1); return kvm_skip_emulated_instruction(vcpu); } - nested_vmx_succeed(vcpu); - - return kvm_skip_emulated_instruction(vcpu); + return nested_vmx_succeed(vcpu); } static int handle_invpcid(struct kvm_vcpu *vcpu) @@ -9162,11 +9637,11 @@ static int handle_invpcid(struct kvm_vcpu *vcpu) } for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) - if (kvm_get_pcid(vcpu, vcpu->arch.mmu.prev_roots[i].cr3) + if (kvm_get_pcid(vcpu, vcpu->arch.mmu->prev_roots[i].cr3) == operand.pcid) roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i); - kvm_mmu_free_roots(vcpu, roots_to_free); + kvm_mmu_free_roots(vcpu, vcpu->arch.mmu, roots_to_free); /* * If neither the current cr3 nor any of the prev_roots use the * given PCID, then nothing needs to be done here because a @@ -9293,7 +9768,7 @@ static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu, kvm_mmu_unload(vcpu); mmu->ept_ad = accessed_dirty; - mmu->base_role.ad_disabled = !accessed_dirty; + mmu->mmu_role.base.ad_disabled = !accessed_dirty; vmcs12->ept_pointer = address; /* * TODO: Check what's the correct approach in case @@ -9652,9 +10127,6 @@ static bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason) return false; else if (is_page_fault(intr_info)) return !vmx->vcpu.arch.apf.host_apf_reason && enable_ept; - else if (is_no_device(intr_info) && - !(vmcs12->guest_cr0 & X86_CR0_TS)) - return false; else if (is_debug(intr_info) && vcpu->guest_debug & (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) @@ -10676,9 +11148,25 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) vmcs_write32(PLE_WINDOW, vmx->ple_window); } - if (vmx->nested.sync_shadow_vmcs) { - copy_vmcs12_to_shadow(vmx); - vmx->nested.sync_shadow_vmcs = false; + if (vmx->nested.need_vmcs12_sync) { + /* + * hv_evmcs may end up being not mapped after migration (when + * L2 was running), map it here to make sure vmcs12 changes are + * properly reflected. + */ + if (vmx->nested.enlightened_vmcs_enabled && + !vmx->nested.hv_evmcs) + nested_vmx_handle_enlightened_vmptrld(vcpu, false); + + if (vmx->nested.hv_evmcs) { + copy_vmcs12_to_enlightened(vmx); + /* All fields are clean */ + vmx->nested.hv_evmcs->hv_clean_fields |= + HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL; + } else { + copy_vmcs12_to_shadow(vmx); + } + vmx->nested.need_vmcs12_sync = false; } if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty)) @@ -10745,7 +11233,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) "mov %%" _ASM_SP ", (%%" _ASM_SI ") \n\t" "jmp 1f \n\t" "2: \n\t" - __ex(ASM_VMX_VMWRITE_RSP_RDX) "\n\t" + __ex("vmwrite %%" _ASM_SP ", %%" _ASM_DX) "\n\t" "1: \n\t" /* Reload cr2 if changed */ "mov %c[cr2](%0), %%" _ASM_AX " \n\t" @@ -10777,9 +11265,9 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) /* Enter guest mode */ "jne 1f \n\t" - __ex(ASM_VMX_VMLAUNCH) "\n\t" + __ex("vmlaunch") "\n\t" "jmp 2f \n\t" - "1: " __ex(ASM_VMX_VMRESUME) "\n\t" + "1: " __ex("vmresume") "\n\t" "2: " /* Save guest registers, load host registers, keep flags */ "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t" @@ -10801,6 +11289,10 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) "mov %%r13, %c[r13](%0) \n\t" "mov %%r14, %c[r14](%0) \n\t" "mov %%r15, %c[r15](%0) \n\t" + /* + * Clear host registers marked as clobbered to prevent + * speculative use. + */ "xor %%r8d, %%r8d \n\t" "xor %%r9d, %%r9d \n\t" "xor %%r10d, %%r10d \n\t" @@ -10958,6 +11450,10 @@ static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs) vmx->loaded_vmcs = vmcs; vmx_vcpu_load(vcpu, cpu); put_cpu(); + + vm_entry_controls_reset_shadow(vmx); + vm_exit_controls_reset_shadow(vmx); + vmx_segment_cache_clear(vmx); } /* @@ -10966,12 +11462,10 @@ static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs) */ static void vmx_free_vcpu_nested(struct kvm_vcpu *vcpu) { - struct vcpu_vmx *vmx = to_vmx(vcpu); - - vcpu_load(vcpu); - vmx_switch_vmcs(vcpu, &vmx->vmcs01); - free_nested(vmx); - vcpu_put(vcpu); + vcpu_load(vcpu); + vmx_switch_vmcs(vcpu, &to_vmx(vcpu)->vmcs01); + free_nested(vcpu); + vcpu_put(vcpu); } static void vmx_free_vcpu(struct kvm_vcpu *vcpu) @@ -11334,28 +11828,28 @@ static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu) return get_vmcs12(vcpu)->ept_pointer; } -static int nested_ept_init_mmu_context(struct kvm_vcpu *vcpu) +static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu) { WARN_ON(mmu_is_nested(vcpu)); - if (!valid_ept_address(vcpu, nested_ept_get_cr3(vcpu))) - return 1; + vcpu->arch.mmu = &vcpu->arch.guest_mmu; kvm_init_shadow_ept_mmu(vcpu, to_vmx(vcpu)->nested.msrs.ept_caps & VMX_EPT_EXECUTE_ONLY_BIT, nested_ept_ad_enabled(vcpu), nested_ept_get_cr3(vcpu)); - vcpu->arch.mmu.set_cr3 = vmx_set_cr3; - vcpu->arch.mmu.get_cr3 = nested_ept_get_cr3; - vcpu->arch.mmu.inject_page_fault = nested_ept_inject_page_fault; + vcpu->arch.mmu->set_cr3 = vmx_set_cr3; + vcpu->arch.mmu->get_cr3 = nested_ept_get_cr3; + vcpu->arch.mmu->inject_page_fault = nested_ept_inject_page_fault; + vcpu->arch.mmu->get_pdptr = kvm_pdptr_read; vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu; - return 0; } static void nested_ept_uninit_mmu_context(struct kvm_vcpu *vcpu) { - vcpu->arch.walk_mmu = &vcpu->arch.mmu; + vcpu->arch.mmu = &vcpu->arch.root_mmu; + vcpu->arch.walk_mmu = &vcpu->arch.root_mmu; } static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12, @@ -11716,7 +12210,7 @@ static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu, !nested_exit_intr_ack_set(vcpu) || (vmcs12->posted_intr_nv & 0xff00) || (vmcs12->posted_intr_desc_addr & 0x3f) || - (!page_address_valid(vcpu, vmcs12->posted_intr_desc_addr)))) + (vmcs12->posted_intr_desc_addr >> cpuid_maxphyaddr(vcpu)))) return -EINVAL; /* tpr shadow is needed by all apicv features. */ @@ -11772,15 +12266,12 @@ static int nested_vmx_check_msr_switch_controls(struct kvm_vcpu *vcpu, static int nested_vmx_check_pml_controls(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { - u64 address = vmcs12->pml_address; - int maxphyaddr = cpuid_maxphyaddr(vcpu); + if (!nested_cpu_has_pml(vmcs12)) + return 0; - if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_PML)) { - if (!nested_cpu_has_ept(vmcs12) || - !IS_ALIGNED(address, 4096) || - address >> maxphyaddr) - return -EINVAL; - } + if (!nested_cpu_has_ept(vmcs12) || + !page_address_valid(vcpu, vmcs12->pml_address)) + return -EINVAL; return 0; } @@ -11960,112 +12451,87 @@ static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool ne return 0; } -static void prepare_vmcs02_full(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) +/* + * Returns if KVM is able to config CPU to tag TLB entries + * populated by L2 differently than TLB entries populated + * by L1. + * + * If L1 uses EPT, then TLB entries are tagged with different EPTP. + * + * If L1 uses VPID and we allocated a vpid02, TLB entries are tagged + * with different VPID (L1 entries are tagged with vmx->vpid + * while L2 entries are tagged with vmx->nested.vpid02). + */ +static bool nested_has_guest_tlb_tag(struct kvm_vcpu *vcpu) { - struct vcpu_vmx *vmx = to_vmx(vcpu); + struct vmcs12 *vmcs12 = get_vmcs12(vcpu); - vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector); - vmcs_write16(GUEST_SS_SELECTOR, vmcs12->guest_ss_selector); - vmcs_write16(GUEST_DS_SELECTOR, vmcs12->guest_ds_selector); - vmcs_write16(GUEST_FS_SELECTOR, vmcs12->guest_fs_selector); - vmcs_write16(GUEST_GS_SELECTOR, vmcs12->guest_gs_selector); - vmcs_write16(GUEST_LDTR_SELECTOR, vmcs12->guest_ldtr_selector); - vmcs_write16(GUEST_TR_SELECTOR, vmcs12->guest_tr_selector); - vmcs_write32(GUEST_ES_LIMIT, vmcs12->guest_es_limit); - vmcs_write32(GUEST_SS_LIMIT, vmcs12->guest_ss_limit); - vmcs_write32(GUEST_DS_LIMIT, vmcs12->guest_ds_limit); - vmcs_write32(GUEST_FS_LIMIT, vmcs12->guest_fs_limit); - vmcs_write32(GUEST_GS_LIMIT, vmcs12->guest_gs_limit); - vmcs_write32(GUEST_LDTR_LIMIT, vmcs12->guest_ldtr_limit); - vmcs_write32(GUEST_TR_LIMIT, vmcs12->guest_tr_limit); - vmcs_write32(GUEST_GDTR_LIMIT, vmcs12->guest_gdtr_limit); - vmcs_write32(GUEST_IDTR_LIMIT, vmcs12->guest_idtr_limit); - vmcs_write32(GUEST_ES_AR_BYTES, vmcs12->guest_es_ar_bytes); - vmcs_write32(GUEST_SS_AR_BYTES, vmcs12->guest_ss_ar_bytes); - vmcs_write32(GUEST_DS_AR_BYTES, vmcs12->guest_ds_ar_bytes); - vmcs_write32(GUEST_FS_AR_BYTES, vmcs12->guest_fs_ar_bytes); - vmcs_write32(GUEST_GS_AR_BYTES, vmcs12->guest_gs_ar_bytes); - vmcs_write32(GUEST_LDTR_AR_BYTES, vmcs12->guest_ldtr_ar_bytes); - vmcs_write32(GUEST_TR_AR_BYTES, vmcs12->guest_tr_ar_bytes); - vmcs_writel(GUEST_SS_BASE, vmcs12->guest_ss_base); - vmcs_writel(GUEST_DS_BASE, vmcs12->guest_ds_base); - vmcs_writel(GUEST_FS_BASE, vmcs12->guest_fs_base); - vmcs_writel(GUEST_GS_BASE, vmcs12->guest_gs_base); - vmcs_writel(GUEST_LDTR_BASE, vmcs12->guest_ldtr_base); - vmcs_writel(GUEST_TR_BASE, vmcs12->guest_tr_base); - vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base); - vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base); - - vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs); - vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS, - vmcs12->guest_pending_dbg_exceptions); - vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->guest_sysenter_esp); - vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->guest_sysenter_eip); + return nested_cpu_has_ept(vmcs12) || + (nested_cpu_has_vpid(vmcs12) && to_vmx(vcpu)->nested.vpid02); +} - if (nested_cpu_has_xsaves(vmcs12)) - vmcs_write64(XSS_EXIT_BITMAP, vmcs12->xss_exit_bitmap); - vmcs_write64(VMCS_LINK_POINTER, -1ull); +static u64 nested_vmx_calc_efer(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) +{ + if (vmx->nested.nested_run_pending && + (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)) + return vmcs12->guest_ia32_efer; + else if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) + return vmx->vcpu.arch.efer | (EFER_LMA | EFER_LME); + else + return vmx->vcpu.arch.efer & ~(EFER_LMA | EFER_LME); +} - if (cpu_has_vmx_posted_intr()) - vmcs_write16(POSTED_INTR_NV, POSTED_INTR_NESTED_VECTOR); +static void prepare_vmcs02_constant_state(struct vcpu_vmx *vmx) +{ + /* + * If vmcs02 hasn't been initialized, set the constant vmcs02 state + * according to L0's settings (vmcs12 is irrelevant here). Host + * fields that come from L0 and are not constant, e.g. HOST_CR3, + * will be set as needed prior to VMLAUNCH/VMRESUME. + */ + if (vmx->nested.vmcs02_initialized) + return; + vmx->nested.vmcs02_initialized = true; /* - * Whether page-faults are trapped is determined by a combination of - * 3 settings: PFEC_MASK, PFEC_MATCH and EXCEPTION_BITMAP.PF. - * If enable_ept, L0 doesn't care about page faults and we should - * set all of these to L1's desires. However, if !enable_ept, L0 does - * care about (at least some) page faults, and because it is not easy - * (if at all possible?) to merge L0 and L1's desires, we simply ask - * to exit on each and every L2 page fault. This is done by setting - * MASK=MATCH=0 and (see below) EB.PF=1. - * Note that below we don't need special code to set EB.PF beyond the - * "or"ing of the EB of vmcs01 and vmcs12, because when enable_ept, - * vmcs01's EB.PF is 0 so the "or" will take vmcs12's value, and when - * !enable_ept, EB.PF is 1, so the "or" will always be 1. + * We don't care what the EPTP value is we just need to guarantee + * it's valid so we don't get a false positive when doing early + * consistency checks. */ - vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, - enable_ept ? vmcs12->page_fault_error_code_mask : 0); - vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, - enable_ept ? vmcs12->page_fault_error_code_match : 0); + if (enable_ept && nested_early_check) + vmcs_write64(EPT_POINTER, construct_eptp(&vmx->vcpu, 0)); /* All VMFUNCs are currently emulated through L0 vmexits. */ if (cpu_has_vmx_vmfunc()) vmcs_write64(VM_FUNCTION_CONTROL, 0); - if (cpu_has_vmx_apicv()) { - vmcs_write64(EOI_EXIT_BITMAP0, vmcs12->eoi_exit_bitmap0); - vmcs_write64(EOI_EXIT_BITMAP1, vmcs12->eoi_exit_bitmap1); - vmcs_write64(EOI_EXIT_BITMAP2, vmcs12->eoi_exit_bitmap2); - vmcs_write64(EOI_EXIT_BITMAP3, vmcs12->eoi_exit_bitmap3); - } + if (cpu_has_vmx_posted_intr()) + vmcs_write16(POSTED_INTR_NV, POSTED_INTR_NESTED_VECTOR); - /* - * Set host-state according to L0's settings (vmcs12 is irrelevant here) - * Some constant fields are set here by vmx_set_constant_host_state(). - * Other fields are different per CPU, and will be set later when - * vmx_vcpu_load() is called, and when vmx_prepare_switch_to_guest() - * is called. - */ - vmx_set_constant_host_state(vmx); + if (cpu_has_vmx_msr_bitmap()) + vmcs_write64(MSR_BITMAP, __pa(vmx->nested.vmcs02.msr_bitmap)); + + if (enable_pml) + vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg)); /* - * Set the MSR load/store lists to match L0's settings. + * Set the MSR load/store lists to match L0's settings. Only the + * addresses are constant (for vmcs02), the counts can change based + * on L2's behavior, e.g. switching to/from long mode. */ vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0); - vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val)); - vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val)); - set_cr4_guest_host_mask(vmx); + vmx_set_constant_host_state(vmx); +} - if (kvm_mpx_supported()) { - if (vmx->nested.nested_run_pending && - (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS)) - vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs); - else - vmcs_write64(GUEST_BNDCFGS, vmx->nested.vmcs01_guest_bndcfgs); - } +static void prepare_vmcs02_early_full(struct vcpu_vmx *vmx, + struct vmcs12 *vmcs12) +{ + prepare_vmcs02_constant_state(vmx); + + vmcs_write64(VMCS_LINK_POINTER, -1ull); if (enable_vpid) { if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02) @@ -12073,78 +12539,30 @@ static void prepare_vmcs02_full(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) else vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid); } - - /* - * L1 may access the L2's PDPTR, so save them to construct vmcs12 - */ - if (enable_ept) { - vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0); - vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1); - vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2); - vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3); - } - - if (cpu_has_vmx_msr_bitmap()) - vmcs_write64(MSR_BITMAP, __pa(vmx->nested.vmcs02.msr_bitmap)); } -/* - * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested - * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it - * with L0's requirements for its guest (a.k.a. vmcs01), so we can run the L2 - * guest in a way that will both be appropriate to L1's requests, and our - * needs. In addition to modifying the active vmcs (which is vmcs02), this - * function also has additional necessary side-effects, like setting various - * vcpu->arch fields. - * Returns 0 on success, 1 on failure. Invalid state exit qualification code - * is assigned to entry_failure_code on failure. - */ -static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, - u32 *entry_failure_code) +static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) { - struct vcpu_vmx *vmx = to_vmx(vcpu); u32 exec_control, vmcs12_exec_ctrl; + u64 guest_efer = nested_vmx_calc_efer(vmx, vmcs12); - if (vmx->nested.dirty_vmcs12) { - prepare_vmcs02_full(vcpu, vmcs12); - vmx->nested.dirty_vmcs12 = false; - } + if (vmx->nested.dirty_vmcs12 || vmx->nested.hv_evmcs) + prepare_vmcs02_early_full(vmx, vmcs12); /* - * First, the fields that are shadowed. This must be kept in sync - * with vmx_shadow_fields.h. + * HOST_RSP is normally set correctly in vmx_vcpu_run() just before + * entry, but only if the current (host) sp changed from the value + * we wrote last (vmx->host_rsp). This cache is no longer relevant + * if we switch vmcs, and rather than hold a separate cache per vmcs, + * here we just force the write to happen on entry. host_rsp will + * also be written unconditionally by nested_vmx_check_vmentry_hw() + * if we are doing early consistency checks via hardware. */ + vmx->host_rsp = 0; - vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector); - vmcs_write32(GUEST_CS_LIMIT, vmcs12->guest_cs_limit); - vmcs_write32(GUEST_CS_AR_BYTES, vmcs12->guest_cs_ar_bytes); - vmcs_writel(GUEST_ES_BASE, vmcs12->guest_es_base); - vmcs_writel(GUEST_CS_BASE, vmcs12->guest_cs_base); - - if (vmx->nested.nested_run_pending && - (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) { - kvm_set_dr(vcpu, 7, vmcs12->guest_dr7); - vmcs_write64(GUEST_IA32_DEBUGCTL, vmcs12->guest_ia32_debugctl); - } else { - kvm_set_dr(vcpu, 7, vcpu->arch.dr7); - vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.vmcs01_debugctl); - } - if (vmx->nested.nested_run_pending) { - vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, - vmcs12->vm_entry_intr_info_field); - vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, - vmcs12->vm_entry_exception_error_code); - vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, - vmcs12->vm_entry_instruction_len); - vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, - vmcs12->guest_interruptibility_info); - vmx->loaded_vmcs->nmi_known_unmasked = - !(vmcs12->guest_interruptibility_info & GUEST_INTR_STATE_NMI); - } else { - vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); - } - vmx_set_rflags(vcpu, vmcs12->guest_rflags); - + /* + * PIN CONTROLS + */ exec_control = vmcs12->pin_based_vm_exec_control; /* Preemption timer setting is computed directly in vmx_vcpu_run. */ @@ -12159,13 +12577,43 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, } else { exec_control &= ~PIN_BASED_POSTED_INTR; } - vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, exec_control); - vmx->nested.preemption_timer_expired = false; - if (nested_cpu_has_preemption_timer(vmcs12)) - vmx_start_preemption_timer(vcpu); + /* + * EXEC CONTROLS + */ + exec_control = vmx_exec_control(vmx); /* L0's desires */ + exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING; + exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING; + exec_control &= ~CPU_BASED_TPR_SHADOW; + exec_control |= vmcs12->cpu_based_vm_exec_control; + /* + * Write an illegal value to VIRTUAL_APIC_PAGE_ADDR. Later, if + * nested_get_vmcs12_pages can't fix it up, the illegal value + * will result in a VM entry failure. + */ + if (exec_control & CPU_BASED_TPR_SHADOW) { + vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, -1ull); + vmcs_write32(TPR_THRESHOLD, vmcs12->tpr_threshold); + } else { +#ifdef CONFIG_X86_64 + exec_control |= CPU_BASED_CR8_LOAD_EXITING | + CPU_BASED_CR8_STORE_EXITING; +#endif + } + + /* + * A vmexit (to either L1 hypervisor or L0 userspace) is always needed + * for I/O port accesses. + */ + exec_control &= ~CPU_BASED_USE_IO_BITMAPS; + exec_control |= CPU_BASED_UNCOND_IO_EXITING; + vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_control); + + /* + * SECONDARY EXEC CONTROLS + */ if (cpu_has_secondary_exec_ctrls()) { exec_control = vmx->secondary_exec_control; @@ -12206,43 +12654,214 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, } /* - * HOST_RSP is normally set correctly in vmx_vcpu_run() just before - * entry, but only if the current (host) sp changed from the value - * we wrote last (vmx->host_rsp). This cache is no longer relevant - * if we switch vmcs, and rather than hold a separate cache per vmcs, - * here we just force the write to happen on entry. + * ENTRY CONTROLS + * + * vmcs12's VM_{ENTRY,EXIT}_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE + * are emulated by vmx_set_efer() in prepare_vmcs02(), but speculate + * on the related bits (if supported by the CPU) in the hope that + * we can avoid VMWrites during vmx_set_efer(). + */ + exec_control = (vmcs12->vm_entry_controls | vmcs_config.vmentry_ctrl) & + ~VM_ENTRY_IA32E_MODE & ~VM_ENTRY_LOAD_IA32_EFER; + if (cpu_has_load_ia32_efer) { + if (guest_efer & EFER_LMA) + exec_control |= VM_ENTRY_IA32E_MODE; + if (guest_efer != host_efer) + exec_control |= VM_ENTRY_LOAD_IA32_EFER; + } + vm_entry_controls_init(vmx, exec_control); + + /* + * EXIT CONTROLS + * + * L2->L1 exit controls are emulated - the hardware exit is to L0 so + * we should use its exit controls. Note that VM_EXIT_LOAD_IA32_EFER + * bits may be modified by vmx_set_efer() in prepare_vmcs02(). */ - vmx->host_rsp = 0; + exec_control = vmcs_config.vmexit_ctrl; + if (cpu_has_load_ia32_efer && guest_efer != host_efer) + exec_control |= VM_EXIT_LOAD_IA32_EFER; + vm_exit_controls_init(vmx, exec_control); - exec_control = vmx_exec_control(vmx); /* L0's desires */ - exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING; - exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING; - exec_control &= ~CPU_BASED_TPR_SHADOW; - exec_control |= vmcs12->cpu_based_vm_exec_control; + /* + * Conceptually we want to copy the PML address and index from + * vmcs01 here, and then back to vmcs01 on nested vmexit. But, + * since we always flush the log on each vmexit and never change + * the PML address (once set), this happens to be equivalent to + * simply resetting the index in vmcs02. + */ + if (enable_pml) + vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1); /* - * Write an illegal value to VIRTUAL_APIC_PAGE_ADDR. Later, if - * nested_get_vmcs12_pages can't fix it up, the illegal value - * will result in a VM entry failure. + * Interrupt/Exception Fields */ - if (exec_control & CPU_BASED_TPR_SHADOW) { - vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, -1ull); - vmcs_write32(TPR_THRESHOLD, vmcs12->tpr_threshold); + if (vmx->nested.nested_run_pending) { + vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, + vmcs12->vm_entry_intr_info_field); + vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, + vmcs12->vm_entry_exception_error_code); + vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, + vmcs12->vm_entry_instruction_len); + vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, + vmcs12->guest_interruptibility_info); + vmx->loaded_vmcs->nmi_known_unmasked = + !(vmcs12->guest_interruptibility_info & GUEST_INTR_STATE_NMI); } else { -#ifdef CONFIG_X86_64 - exec_control |= CPU_BASED_CR8_LOAD_EXITING | - CPU_BASED_CR8_STORE_EXITING; -#endif + vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); } +} + +static void prepare_vmcs02_full(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) +{ + struct hv_enlightened_vmcs *hv_evmcs = vmx->nested.hv_evmcs; + + if (!hv_evmcs || !(hv_evmcs->hv_clean_fields & + HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2)) { + vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector); + vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector); + vmcs_write16(GUEST_SS_SELECTOR, vmcs12->guest_ss_selector); + vmcs_write16(GUEST_DS_SELECTOR, vmcs12->guest_ds_selector); + vmcs_write16(GUEST_FS_SELECTOR, vmcs12->guest_fs_selector); + vmcs_write16(GUEST_GS_SELECTOR, vmcs12->guest_gs_selector); + vmcs_write16(GUEST_LDTR_SELECTOR, vmcs12->guest_ldtr_selector); + vmcs_write16(GUEST_TR_SELECTOR, vmcs12->guest_tr_selector); + vmcs_write32(GUEST_ES_LIMIT, vmcs12->guest_es_limit); + vmcs_write32(GUEST_CS_LIMIT, vmcs12->guest_cs_limit); + vmcs_write32(GUEST_SS_LIMIT, vmcs12->guest_ss_limit); + vmcs_write32(GUEST_DS_LIMIT, vmcs12->guest_ds_limit); + vmcs_write32(GUEST_FS_LIMIT, vmcs12->guest_fs_limit); + vmcs_write32(GUEST_GS_LIMIT, vmcs12->guest_gs_limit); + vmcs_write32(GUEST_LDTR_LIMIT, vmcs12->guest_ldtr_limit); + vmcs_write32(GUEST_TR_LIMIT, vmcs12->guest_tr_limit); + vmcs_write32(GUEST_GDTR_LIMIT, vmcs12->guest_gdtr_limit); + vmcs_write32(GUEST_IDTR_LIMIT, vmcs12->guest_idtr_limit); + vmcs_write32(GUEST_ES_AR_BYTES, vmcs12->guest_es_ar_bytes); + vmcs_write32(GUEST_DS_AR_BYTES, vmcs12->guest_ds_ar_bytes); + vmcs_write32(GUEST_FS_AR_BYTES, vmcs12->guest_fs_ar_bytes); + vmcs_write32(GUEST_GS_AR_BYTES, vmcs12->guest_gs_ar_bytes); + vmcs_write32(GUEST_LDTR_AR_BYTES, vmcs12->guest_ldtr_ar_bytes); + vmcs_write32(GUEST_TR_AR_BYTES, vmcs12->guest_tr_ar_bytes); + vmcs_writel(GUEST_ES_BASE, vmcs12->guest_es_base); + vmcs_writel(GUEST_CS_BASE, vmcs12->guest_cs_base); + vmcs_writel(GUEST_SS_BASE, vmcs12->guest_ss_base); + vmcs_writel(GUEST_DS_BASE, vmcs12->guest_ds_base); + vmcs_writel(GUEST_FS_BASE, vmcs12->guest_fs_base); + vmcs_writel(GUEST_GS_BASE, vmcs12->guest_gs_base); + vmcs_writel(GUEST_LDTR_BASE, vmcs12->guest_ldtr_base); + vmcs_writel(GUEST_TR_BASE, vmcs12->guest_tr_base); + vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base); + vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base); + } + + if (!hv_evmcs || !(hv_evmcs->hv_clean_fields & + HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1)) { + vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs); + vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS, + vmcs12->guest_pending_dbg_exceptions); + vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->guest_sysenter_esp); + vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->guest_sysenter_eip); + + /* + * L1 may access the L2's PDPTR, so save them to construct + * vmcs12 + */ + if (enable_ept) { + vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0); + vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1); + vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2); + vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3); + } + } + + if (nested_cpu_has_xsaves(vmcs12)) + vmcs_write64(XSS_EXIT_BITMAP, vmcs12->xss_exit_bitmap); /* - * A vmexit (to either L1 hypervisor or L0 userspace) is always needed - * for I/O port accesses. + * Whether page-faults are trapped is determined by a combination of + * 3 settings: PFEC_MASK, PFEC_MATCH and EXCEPTION_BITMAP.PF. + * If enable_ept, L0 doesn't care about page faults and we should + * set all of these to L1's desires. However, if !enable_ept, L0 does + * care about (at least some) page faults, and because it is not easy + * (if at all possible?) to merge L0 and L1's desires, we simply ask + * to exit on each and every L2 page fault. This is done by setting + * MASK=MATCH=0 and (see below) EB.PF=1. + * Note that below we don't need special code to set EB.PF beyond the + * "or"ing of the EB of vmcs01 and vmcs12, because when enable_ept, + * vmcs01's EB.PF is 0 so the "or" will take vmcs12's value, and when + * !enable_ept, EB.PF is 1, so the "or" will always be 1. */ - exec_control &= ~CPU_BASED_USE_IO_BITMAPS; - exec_control |= CPU_BASED_UNCOND_IO_EXITING; + vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, + enable_ept ? vmcs12->page_fault_error_code_mask : 0); + vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, + enable_ept ? vmcs12->page_fault_error_code_match : 0); - vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_control); + if (cpu_has_vmx_apicv()) { + vmcs_write64(EOI_EXIT_BITMAP0, vmcs12->eoi_exit_bitmap0); + vmcs_write64(EOI_EXIT_BITMAP1, vmcs12->eoi_exit_bitmap1); + vmcs_write64(EOI_EXIT_BITMAP2, vmcs12->eoi_exit_bitmap2); + vmcs_write64(EOI_EXIT_BITMAP3, vmcs12->eoi_exit_bitmap3); + } + + vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); + vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); + + set_cr4_guest_host_mask(vmx); + + if (kvm_mpx_supported()) { + if (vmx->nested.nested_run_pending && + (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS)) + vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs); + else + vmcs_write64(GUEST_BNDCFGS, vmx->nested.vmcs01_guest_bndcfgs); + } +} + +/* + * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested + * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it + * with L0's requirements for its guest (a.k.a. vmcs01), so we can run the L2 + * guest in a way that will both be appropriate to L1's requests, and our + * needs. In addition to modifying the active vmcs (which is vmcs02), this + * function also has additional necessary side-effects, like setting various + * vcpu->arch fields. + * Returns 0 on success, 1 on failure. Invalid state exit qualification code + * is assigned to entry_failure_code on failure. + */ +static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, + u32 *entry_failure_code) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + struct hv_enlightened_vmcs *hv_evmcs = vmx->nested.hv_evmcs; + + if (vmx->nested.dirty_vmcs12 || vmx->nested.hv_evmcs) { + prepare_vmcs02_full(vmx, vmcs12); + vmx->nested.dirty_vmcs12 = false; + } + + /* + * First, the fields that are shadowed. This must be kept in sync + * with vmx_shadow_fields.h. + */ + if (!hv_evmcs || !(hv_evmcs->hv_clean_fields & + HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2)) { + vmcs_write32(GUEST_CS_AR_BYTES, vmcs12->guest_cs_ar_bytes); + vmcs_write32(GUEST_SS_AR_BYTES, vmcs12->guest_ss_ar_bytes); + } + + if (vmx->nested.nested_run_pending && + (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) { + kvm_set_dr(vcpu, 7, vmcs12->guest_dr7); + vmcs_write64(GUEST_IA32_DEBUGCTL, vmcs12->guest_ia32_debugctl); + } else { + kvm_set_dr(vcpu, 7, vcpu->arch.dr7); + vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.vmcs01_debugctl); + } + vmx_set_rflags(vcpu, vmcs12->guest_rflags); + + vmx->nested.preemption_timer_expired = false; + if (nested_cpu_has_preemption_timer(vmcs12)) + vmx_start_preemption_timer(vcpu); /* EXCEPTION_BITMAP and CR0_GUEST_HOST_MASK should basically be the * bitwise-or of what L1 wants to trap for L2, and what we want to @@ -12252,20 +12871,6 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, vcpu->arch.cr0_guest_owned_bits &= ~vmcs12->cr0_guest_host_mask; vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits); - /* L2->L1 exit controls are emulated - the hardware exit is to L0 so - * we should use its exit controls. Note that VM_EXIT_LOAD_IA32_EFER - * bits are further modified by vmx_set_efer() below. - */ - vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl); - - /* vmcs12's VM_ENTRY_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE are - * emulated by vmx_set_efer(), below. - */ - vm_entry_controls_init(vmx, - (vmcs12->vm_entry_controls & ~VM_ENTRY_LOAD_IA32_EFER & - ~VM_ENTRY_IA32E_MODE) | - (vmcs_config.vmentry_ctrl & ~VM_ENTRY_IA32E_MODE)); - if (vmx->nested.nested_run_pending && (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT)) { vmcs_write64(GUEST_IA32_PAT, vmcs12->guest_ia32_pat); @@ -12288,37 +12893,29 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, * influence global bitmap(for vpid01 and vpid02 allocation) * even if spawn a lot of nested vCPUs. */ - if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02) { + if (nested_cpu_has_vpid(vmcs12) && nested_has_guest_tlb_tag(vcpu)) { if (vmcs12->virtual_processor_id != vmx->nested.last_vpid) { vmx->nested.last_vpid = vmcs12->virtual_processor_id; - __vmx_flush_tlb(vcpu, vmx->nested.vpid02, true); + __vmx_flush_tlb(vcpu, nested_get_vpid02(vcpu), false); } } else { - vmx_flush_tlb(vcpu, true); + /* + * If L1 use EPT, then L0 needs to execute INVEPT on + * EPTP02 instead of EPTP01. Therefore, delay TLB + * flush until vmcs02->eptp is fully updated by + * KVM_REQ_LOAD_CR3. Note that this assumes + * KVM_REQ_TLB_FLUSH is evaluated after + * KVM_REQ_LOAD_CR3 in vcpu_enter_guest(). + */ + kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); } } - if (enable_pml) { - /* - * Conceptually we want to copy the PML address and index from - * vmcs01 here, and then back to vmcs01 on nested vmexit. But, - * since we always flush the log on each vmexit, this happens - * to be equivalent to simply resetting the fields in vmcs02. - */ - ASSERT(vmx->pml_pg); - vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg)); - vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1); - } - - if (nested_cpu_has_ept(vmcs12)) { - if (nested_ept_init_mmu_context(vcpu)) { - *entry_failure_code = ENTRY_FAIL_DEFAULT; - return 1; - } - } else if (nested_cpu_has2(vmcs12, - SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { + if (nested_cpu_has_ept(vmcs12)) + nested_ept_init_mmu_context(vcpu); + else if (nested_cpu_has2(vmcs12, + SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) vmx_flush_tlb(vcpu, true); - } /* * This sets GUEST_CR0 to vmcs12->guest_cr0, possibly modifying those @@ -12334,14 +12931,8 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, vmx_set_cr4(vcpu, vmcs12->guest_cr4); vmcs_writel(CR4_READ_SHADOW, nested_read_cr4(vmcs12)); - if (vmx->nested.nested_run_pending && - (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)) - vcpu->arch.efer = vmcs12->guest_ia32_efer; - else if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) - vcpu->arch.efer |= (EFER_LMA | EFER_LME); - else - vcpu->arch.efer &= ~(EFER_LMA | EFER_LME); - /* Note: modifies VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */ + vcpu->arch.efer = nested_vmx_calc_efer(vmx, vmcs12); + /* Note: may modify VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */ vmx_set_efer(vcpu, vcpu->arch.efer); /* @@ -12383,6 +12974,7 @@ static int nested_vmx_check_nmi_controls(struct vmcs12 *vmcs12) static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { struct vcpu_vmx *vmx = to_vmx(vcpu); + bool ia32e; if (vmcs12->guest_activity_state != GUEST_ACTIVITY_ACTIVE && vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT) @@ -12457,6 +13049,21 @@ static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) return VMXERR_ENTRY_INVALID_HOST_STATE_FIELD; /* + * If the load IA32_EFER VM-exit control is 1, bits reserved in the + * IA32_EFER MSR must be 0 in the field for that register. In addition, + * the values of the LMA and LME bits in the field must each be that of + * the host address-space size VM-exit control. + */ + if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) { + ia32e = (vmcs12->vm_exit_controls & + VM_EXIT_HOST_ADDR_SPACE_SIZE) != 0; + if (!kvm_valid_efer(vcpu, vmcs12->host_ia32_efer) || + ia32e != !!(vmcs12->host_ia32_efer & EFER_LMA) || + ia32e != !!(vmcs12->host_ia32_efer & EFER_LME)) + return VMXERR_ENTRY_INVALID_HOST_STATE_FIELD; + } + + /* * From the Intel SDM, volume 3: * Fields relevant to VM-entry event injection must be set properly. * These fields are the VM-entry interruption-information field, the @@ -12512,6 +13119,10 @@ static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) } } + if (nested_cpu_has_ept(vmcs12) && + !valid_ept_address(vcpu, vmcs12->ept_pointer)) + return VMXERR_ENTRY_INVALID_CONTROL_FIELD; + return 0; } @@ -12577,21 +13188,6 @@ static int check_vmentry_postreqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, return 1; } - /* - * If the load IA32_EFER VM-exit control is 1, bits reserved in the - * IA32_EFER MSR must be 0 in the field for that register. In addition, - * the values of the LMA and LME bits in the field must each be that of - * the host address-space size VM-exit control. - */ - if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) { - ia32e = (vmcs12->vm_exit_controls & - VM_EXIT_HOST_ADDR_SPACE_SIZE) != 0; - if (!kvm_valid_efer(vcpu, vmcs12->host_ia32_efer) || - ia32e != !!(vmcs12->host_ia32_efer & EFER_LMA) || - ia32e != !!(vmcs12->host_ia32_efer & EFER_LME)) - return 1; - } - if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS) && (is_noncanonical_address(vmcs12->guest_bndcfgs & PAGE_MASK, vcpu) || (vmcs12->guest_bndcfgs & MSR_IA32_BNDCFGS_RSVD))) @@ -12600,26 +13196,139 @@ static int check_vmentry_postreqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, return 0; } +static int __noclone nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + unsigned long cr3, cr4; + + if (!nested_early_check) + return 0; + + if (vmx->msr_autoload.host.nr) + vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0); + if (vmx->msr_autoload.guest.nr) + vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0); + + preempt_disable(); + + vmx_prepare_switch_to_guest(vcpu); + + /* + * Induce a consistency check VMExit by clearing bit 1 in GUEST_RFLAGS, + * which is reserved to '1' by hardware. GUEST_RFLAGS is guaranteed to + * be written (by preparve_vmcs02()) before the "real" VMEnter, i.e. + * there is no need to preserve other bits or save/restore the field. + */ + vmcs_writel(GUEST_RFLAGS, 0); + + vmcs_writel(HOST_RIP, vmx_early_consistency_check_return); + + cr3 = __get_current_cr3_fast(); + if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) { + vmcs_writel(HOST_CR3, cr3); + vmx->loaded_vmcs->host_state.cr3 = cr3; + } + + cr4 = cr4_read_shadow(); + if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) { + vmcs_writel(HOST_CR4, cr4); + vmx->loaded_vmcs->host_state.cr4 = cr4; + } + + vmx->__launched = vmx->loaded_vmcs->launched; + + asm( + /* Set HOST_RSP */ + __ex("vmwrite %%" _ASM_SP ", %%" _ASM_DX) "\n\t" + "mov %%" _ASM_SP ", %c[host_rsp](%0)\n\t" + + /* Check if vmlaunch of vmresume is needed */ + "cmpl $0, %c[launched](%0)\n\t" + "je 1f\n\t" + __ex("vmresume") "\n\t" + "jmp 2f\n\t" + "1: " __ex("vmlaunch") "\n\t" + "jmp 2f\n\t" + "2: " + + /* Set vmx->fail accordingly */ + "setbe %c[fail](%0)\n\t" + + ".pushsection .rodata\n\t" + ".global vmx_early_consistency_check_return\n\t" + "vmx_early_consistency_check_return: " _ASM_PTR " 2b\n\t" + ".popsection" + : + : "c"(vmx), "d"((unsigned long)HOST_RSP), + [launched]"i"(offsetof(struct vcpu_vmx, __launched)), + [fail]"i"(offsetof(struct vcpu_vmx, fail)), + [host_rsp]"i"(offsetof(struct vcpu_vmx, host_rsp)) + : "rax", "cc", "memory" + ); + + vmcs_writel(HOST_RIP, vmx_return); + + preempt_enable(); + + if (vmx->msr_autoload.host.nr) + vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); + if (vmx->msr_autoload.guest.nr) + vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); + + if (vmx->fail) { + WARN_ON_ONCE(vmcs_read32(VM_INSTRUCTION_ERROR) != + VMXERR_ENTRY_INVALID_CONTROL_FIELD); + vmx->fail = 0; + return 1; + } + + /* + * VMExit clears RFLAGS.IF and DR7, even on a consistency check. + */ + local_irq_enable(); + if (hw_breakpoint_active()) + set_debugreg(__this_cpu_read(cpu_dr7), 7); + + /* + * A non-failing VMEntry means we somehow entered guest mode with + * an illegal RIP, and that's just the tip of the iceberg. There + * is no telling what memory has been modified or what state has + * been exposed to unknown code. Hitting this all but guarantees + * a (very critical) hardware issue. + */ + WARN_ON(!(vmcs_read32(VM_EXIT_REASON) & + VMX_EXIT_REASONS_FAILED_VMENTRY)); + + return 0; +} +STACK_FRAME_NON_STANDARD(nested_vmx_check_vmentry_hw); + +static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, + struct vmcs12 *vmcs12); + /* - * If exit_qual is NULL, this is being called from state restore (either RSM + * If from_vmentry is false, this is being called from state restore (either RSM * or KVM_SET_NESTED_STATE). Otherwise it's called from vmlaunch/vmresume. ++ * ++ * Returns: ++ * 0 - success, i.e. proceed with actual VMEnter ++ * 1 - consistency check VMExit ++ * -1 - consistency check VMFail */ -static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, u32 *exit_qual) +static int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, + bool from_vmentry) { struct vcpu_vmx *vmx = to_vmx(vcpu); struct vmcs12 *vmcs12 = get_vmcs12(vcpu); - bool from_vmentry = !!exit_qual; - u32 dummy_exit_qual; bool evaluate_pending_interrupts; - int r = 0; + u32 exit_reason = EXIT_REASON_INVALID_STATE; + u32 exit_qual; evaluate_pending_interrupts = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) & (CPU_BASED_VIRTUAL_INTR_PENDING | CPU_BASED_VIRTUAL_NMI_PENDING); if (likely(!evaluate_pending_interrupts) && kvm_vcpu_apicv_active(vcpu)) evaluate_pending_interrupts |= vmx_has_apicv_interrupt(vcpu); - enter_guest_mode(vcpu); - if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL); if (kvm_mpx_supported() && @@ -12627,24 +13336,35 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, u32 *exit_qual) vmx->nested.vmcs01_guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS); vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02); - vmx_segment_cache_clear(vmx); + prepare_vmcs02_early(vmx, vmcs12); + + if (from_vmentry) { + nested_get_vmcs12_pages(vcpu); + + if (nested_vmx_check_vmentry_hw(vcpu)) { + vmx_switch_vmcs(vcpu, &vmx->vmcs01); + return -1; + } + + if (check_vmentry_postreqs(vcpu, vmcs12, &exit_qual)) + goto vmentry_fail_vmexit; + } + + enter_guest_mode(vcpu); if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING) vcpu->arch.tsc_offset += vmcs12->tsc_offset; - r = EXIT_REASON_INVALID_STATE; - if (prepare_vmcs02(vcpu, vmcs12, from_vmentry ? exit_qual : &dummy_exit_qual)) - goto fail; + if (prepare_vmcs02(vcpu, vmcs12, &exit_qual)) + goto vmentry_fail_vmexit_guest_mode; if (from_vmentry) { - nested_get_vmcs12_pages(vcpu); - - r = EXIT_REASON_MSR_LOAD_FAIL; - *exit_qual = nested_vmx_load_msr(vcpu, - vmcs12->vm_entry_msr_load_addr, - vmcs12->vm_entry_msr_load_count); - if (*exit_qual) - goto fail; + exit_reason = EXIT_REASON_MSR_LOAD_FAIL; + exit_qual = nested_vmx_load_msr(vcpu, + vmcs12->vm_entry_msr_load_addr, + vmcs12->vm_entry_msr_load_count); + if (exit_qual) + goto vmentry_fail_vmexit_guest_mode; } else { /* * The MMU is not initialized to point at the right entities yet and @@ -12681,12 +13401,28 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, u32 *exit_qual) */ return 0; -fail: + /* + * A failed consistency check that leads to a VMExit during L1's + * VMEnter to L2 is a variation of a normal VMexit, as explained in + * 26.7 "VM-entry failures during or after loading guest state". + */ +vmentry_fail_vmexit_guest_mode: if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING) vcpu->arch.tsc_offset -= vmcs12->tsc_offset; leave_guest_mode(vcpu); + +vmentry_fail_vmexit: vmx_switch_vmcs(vcpu, &vmx->vmcs01); - return r; + + if (!from_vmentry) + return 1; + + load_vmcs12_host_state(vcpu, vmcs12); + vmcs12->vm_exit_reason = exit_reason | VMX_EXIT_REASONS_FAILED_VMENTRY; + vmcs12->exit_qualification = exit_qual; + if (enable_shadow_vmcs || vmx->nested.hv_evmcs) + vmx->nested.need_vmcs12_sync = true; + return 1; } /* @@ -12698,14 +13434,16 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) struct vmcs12 *vmcs12; struct vcpu_vmx *vmx = to_vmx(vcpu); u32 interrupt_shadow = vmx_get_interrupt_shadow(vcpu); - u32 exit_qual; int ret; if (!nested_vmx_check_permission(vcpu)) return 1; - if (!nested_vmx_check_vmcs12(vcpu)) - goto out; + if (!nested_vmx_handle_enlightened_vmptrld(vcpu, true)) + return 1; + + if (!vmx->nested.hv_evmcs && vmx->nested.current_vmptr == -1ull) + return nested_vmx_failInvalid(vcpu); vmcs12 = get_vmcs12(vcpu); @@ -12715,13 +13453,16 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) * rather than RFLAGS.ZF, and no error number is stored to the * VM-instruction error field. */ - if (vmcs12->hdr.shadow_vmcs) { - nested_vmx_failInvalid(vcpu); - goto out; - } + if (vmcs12->hdr.shadow_vmcs) + return nested_vmx_failInvalid(vcpu); - if (enable_shadow_vmcs) + if (vmx->nested.hv_evmcs) { + copy_enlightened_to_vmcs12(vmx); + /* Enlightened VMCS doesn't have launch state */ + vmcs12->launch_state = !launch; + } else if (enable_shadow_vmcs) { copy_shadow_to_vmcs12(vmx); + } /* * The nested entry process starts with enforcing various prerequisites @@ -12733,59 +13474,37 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) * for misconfigurations which will anyway be caught by the processor * when using the merged vmcs02. */ - if (interrupt_shadow & KVM_X86_SHADOW_INT_MOV_SS) { - nested_vmx_failValid(vcpu, - VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS); - goto out; - } + if (interrupt_shadow & KVM_X86_SHADOW_INT_MOV_SS) + return nested_vmx_failValid(vcpu, + VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS); - if (vmcs12->launch_state == launch) { - nested_vmx_failValid(vcpu, + if (vmcs12->launch_state == launch) + return nested_vmx_failValid(vcpu, launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS : VMXERR_VMRESUME_NONLAUNCHED_VMCS); - goto out; - } ret = check_vmentry_prereqs(vcpu, vmcs12); - if (ret) { - nested_vmx_failValid(vcpu, ret); - goto out; - } - - /* - * After this point, the trap flag no longer triggers a singlestep trap - * on the vm entry instructions; don't call kvm_skip_emulated_instruction. - * This is not 100% correct; for performance reasons, we delegate most - * of the checks on host state to the processor. If those fail, - * the singlestep trap is missed. - */ - skip_emulated_instruction(vcpu); - - ret = check_vmentry_postreqs(vcpu, vmcs12, &exit_qual); - if (ret) { - nested_vmx_entry_failure(vcpu, vmcs12, - EXIT_REASON_INVALID_STATE, exit_qual); - return 1; - } + if (ret) + return nested_vmx_failValid(vcpu, ret); /* * We're finally done with prerequisite checking, and can start with * the nested entry. */ - vmx->nested.nested_run_pending = 1; - ret = enter_vmx_non_root_mode(vcpu, &exit_qual); - if (ret) { - nested_vmx_entry_failure(vcpu, vmcs12, ret, exit_qual); - vmx->nested.nested_run_pending = 0; + ret = nested_vmx_enter_non_root_mode(vcpu, true); + vmx->nested.nested_run_pending = !ret; + if (ret > 0) return 1; - } + else if (ret) + return nested_vmx_failValid(vcpu, + VMXERR_ENTRY_INVALID_CONTROL_FIELD); /* Hide L1D cache contents from the nested guest. */ vmx->vcpu.arch.l1tf_flush_l1d = true; /* - * Must happen outside of enter_vmx_non_root_mode() as it will + * Must happen outside of nested_vmx_enter_non_root_mode() as it will * also be used as part of restoring nVMX state for * snapshot restore (migration). * @@ -12806,9 +13525,6 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) return kvm_vcpu_halt(vcpu); } return 1; - -out: - return kvm_skip_emulated_instruction(vcpu); } /* @@ -13122,24 +13838,6 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, kvm_clear_interrupt_queue(vcpu); } -static void load_vmcs12_mmu_host_state(struct kvm_vcpu *vcpu, - struct vmcs12 *vmcs12) -{ - u32 entry_failure_code; - - nested_ept_uninit_mmu_context(vcpu); - - /* - * Only PDPTE load can fail as the value of cr3 was checked on entry and - * couldn't have changed. - */ - if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, &entry_failure_code)) - nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL); - - if (!enable_ept) - vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault; -} - /* * A part of what we need to when the nested L2 guest exits and we want to * run its L1 parent, is to reset L1's guest state to the host state specified @@ -13153,6 +13851,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { struct kvm_segment seg; + u32 entry_failure_code; if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) vcpu->arch.efer = vmcs12->host_ia32_efer; @@ -13165,6 +13864,8 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->host_rsp); kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->host_rip); vmx_set_rflags(vcpu, X86_EFLAGS_FIXED); + vmx_set_interrupt_shadow(vcpu, 0); + /* * Note that calling vmx_set_cr0 is important, even if cr0 hasn't * actually changed, because vmx_set_cr0 refers to efer set above. @@ -13179,23 +13880,35 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK); vmx_set_cr4(vcpu, vmcs12->host_cr4); - load_vmcs12_mmu_host_state(vcpu, vmcs12); + nested_ept_uninit_mmu_context(vcpu); /* - * If vmcs01 don't use VPID, CPU flushes TLB on every + * Only PDPTE load can fail as the value of cr3 was checked on entry and + * couldn't have changed. + */ + if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, &entry_failure_code)) + nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL); + + if (!enable_ept) + vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault; + + /* + * If vmcs01 doesn't use VPID, CPU flushes TLB on every * VMEntry/VMExit. Thus, no need to flush TLB. * - * If vmcs12 uses VPID, TLB entries populated by L2 are - * tagged with vmx->nested.vpid02 while L1 entries are tagged - * with vmx->vpid. Thus, no need to flush TLB. + * If vmcs12 doesn't use VPID, L1 expects TLB to be + * flushed on every VMEntry/VMExit. + * + * Otherwise, we can preserve TLB entries as long as we are + * able to tag L1 TLB entries differently than L2 TLB entries. * - * Therefore, flush TLB only in case vmcs01 uses VPID and - * vmcs12 don't use VPID as in this case L1 & L2 TLB entries - * are both tagged with vmx->vpid. + * If vmcs12 uses EPT, we need to execute this flush on EPTP01 + * and therefore we request the TLB flush to happen only after VMCS EPTP + * has been set by KVM_REQ_LOAD_CR3. */ if (enable_vpid && - !(nested_cpu_has_vpid(vmcs12) && to_vmx(vcpu)->nested.vpid02)) { - vmx_flush_tlb(vcpu, true); + (!nested_cpu_has_vpid(vmcs12) || !nested_has_guest_tlb_tag(vcpu))) { + kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); } vmcs_write32(GUEST_SYSENTER_CS, vmcs12->host_ia32_sysenter_cs); @@ -13275,6 +13988,140 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL); } +static inline u64 nested_vmx_get_vmcs01_guest_efer(struct vcpu_vmx *vmx) +{ + struct shared_msr_entry *efer_msr; + unsigned int i; + + if (vm_entry_controls_get(vmx) & VM_ENTRY_LOAD_IA32_EFER) + return vmcs_read64(GUEST_IA32_EFER); + + if (cpu_has_load_ia32_efer) + return host_efer; + + for (i = 0; i < vmx->msr_autoload.guest.nr; ++i) { + if (vmx->msr_autoload.guest.val[i].index == MSR_EFER) + return vmx->msr_autoload.guest.val[i].value; + } + + efer_msr = find_msr_entry(vmx, MSR_EFER); + if (efer_msr) + return efer_msr->data; + + return host_efer; +} + +static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu) +{ + struct vmcs12 *vmcs12 = get_vmcs12(vcpu); + struct vcpu_vmx *vmx = to_vmx(vcpu); + struct vmx_msr_entry g, h; + struct msr_data msr; + gpa_t gpa; + u32 i, j; + + vcpu->arch.pat = vmcs_read64(GUEST_IA32_PAT); + + if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) { + /* + * L1's host DR7 is lost if KVM_GUESTDBG_USE_HW_BP is set + * as vmcs01.GUEST_DR7 contains a userspace defined value + * and vcpu->arch.dr7 is not squirreled away before the + * nested VMENTER (not worth adding a variable in nested_vmx). + */ + if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) + kvm_set_dr(vcpu, 7, DR7_FIXED_1); + else + WARN_ON(kvm_set_dr(vcpu, 7, vmcs_readl(GUEST_DR7))); + } + + /* + * Note that calling vmx_set_{efer,cr0,cr4} is important as they + * handle a variety of side effects to KVM's software model. + */ + vmx_set_efer(vcpu, nested_vmx_get_vmcs01_guest_efer(vmx)); + + vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS; + vmx_set_cr0(vcpu, vmcs_readl(CR0_READ_SHADOW)); + + vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK); + vmx_set_cr4(vcpu, vmcs_readl(CR4_READ_SHADOW)); + + nested_ept_uninit_mmu_context(vcpu); + vcpu->arch.cr3 = vmcs_readl(GUEST_CR3); + __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); + + /* + * Use ept_save_pdptrs(vcpu) to load the MMU's cached PDPTRs + * from vmcs01 (if necessary). The PDPTRs are not loaded on + * VMFail, like everything else we just need to ensure our + * software model is up-to-date. + */ + ept_save_pdptrs(vcpu); + + kvm_mmu_reset_context(vcpu); + + if (cpu_has_vmx_msr_bitmap()) + vmx_update_msr_bitmap(vcpu); + + /* + * This nasty bit of open coding is a compromise between blindly + * loading L1's MSRs using the exit load lists (incorrect emulation + * of VMFail), leaving the nested VM's MSRs in the software model + * (incorrect behavior) and snapshotting the modified MSRs (too + * expensive since the lists are unbound by hardware). For each + * MSR that was (prematurely) loaded from the nested VMEntry load + * list, reload it from the exit load list if it exists and differs + * from the guest value. The intent is to stuff host state as + * silently as possible, not to fully process the exit load list. + */ + msr.host_initiated = false; + for (i = 0; i < vmcs12->vm_entry_msr_load_count; i++) { + gpa = vmcs12->vm_entry_msr_load_addr + (i * sizeof(g)); + if (kvm_vcpu_read_guest(vcpu, gpa, &g, sizeof(g))) { + pr_debug_ratelimited( + "%s read MSR index failed (%u, 0x%08llx)\n", + __func__, i, gpa); + goto vmabort; + } + + for (j = 0; j < vmcs12->vm_exit_msr_load_count; j++) { + gpa = vmcs12->vm_exit_msr_load_addr + (j * sizeof(h)); + if (kvm_vcpu_read_guest(vcpu, gpa, &h, sizeof(h))) { + pr_debug_ratelimited( + "%s read MSR failed (%u, 0x%08llx)\n", + __func__, j, gpa); + goto vmabort; + } + if (h.index != g.index) + continue; + if (h.value == g.value) + break; + + if (nested_vmx_load_msr_check(vcpu, &h)) { + pr_debug_ratelimited( + "%s check failed (%u, 0x%x, 0x%x)\n", + __func__, j, h.index, h.reserved); + goto vmabort; + } + + msr.index = h.index; + msr.data = h.value; + if (kvm_set_msr(vcpu, &msr)) { + pr_debug_ratelimited( + "%s WRMSR failed (%u, 0x%x, 0x%llx)\n", + __func__, j, h.index, h.value); + goto vmabort; + } + } + } + + return; + +vmabort: + nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL); +} + /* * Emulate an exit from nested guest (L2) to L1, i.e., prepare to run L1 * and modify vmcs12 to make it see what it would expect to see there if @@ -13290,14 +14137,6 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, /* trying to cancel vmlaunch/vmresume is a bug */ WARN_ON_ONCE(vmx->nested.nested_run_pending); - /* - * The only expected VM-instruction error is "VM entry with - * invalid control field(s)." Anything else indicates a - * problem with L0. - */ - WARN_ON_ONCE(vmx->fail && (vmcs_read32(VM_INSTRUCTION_ERROR) != - VMXERR_ENTRY_INVALID_CONTROL_FIELD)); - leave_guest_mode(vcpu); if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING) @@ -13324,12 +14163,19 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, if (nested_vmx_store_msr(vcpu, vmcs12->vm_exit_msr_store_addr, vmcs12->vm_exit_msr_store_count)) nested_vmx_abort(vcpu, VMX_ABORT_SAVE_GUEST_MSR_FAIL); + } else { + /* + * The only expected VM-instruction error is "VM entry with + * invalid control field(s)." Anything else indicates a + * problem with L0. And we should never get here with a + * VMFail of any type if early consistency checks are enabled. + */ + WARN_ON_ONCE(vmcs_read32(VM_INSTRUCTION_ERROR) != + VMXERR_ENTRY_INVALID_CONTROL_FIELD); + WARN_ON_ONCE(nested_early_check); } vmx_switch_vmcs(vcpu, &vmx->vmcs01); - vm_entry_controls_reset_shadow(vmx); - vm_exit_controls_reset_shadow(vmx); - vmx_segment_cache_clear(vmx); /* Update any VMCS fields that might have changed while L2 ran */ vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); @@ -13373,8 +14219,8 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, */ kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu); - if (enable_shadow_vmcs && exit_reason != -1) - vmx->nested.sync_shadow_vmcs = true; + if ((exit_reason != -1) && (enable_shadow_vmcs || vmx->nested.hv_evmcs)) + vmx->nested.need_vmcs12_sync = true; /* in case we halted in L2 */ vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; @@ -13409,24 +14255,24 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, return; } - + /* * After an early L2 VM-entry failure, we're now back * in L1 which thinks it just finished a VMLAUNCH or * VMRESUME instruction, so we need to set the failure * flag and the VM-instruction error field of the VMCS - * accordingly. + * accordingly, and skip the emulated instruction. */ - nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); - - load_vmcs12_mmu_host_state(vcpu, vmcs12); + (void)nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); /* - * The emulated instruction was already skipped in - * nested_vmx_run, but the updated RIP was never - * written back to the vmcs01. + * Restore L1's host state to KVM's software model. We're here + * because a consistency check was caught by hardware, which + * means some amount of guest state has been propagated to KVM's + * model and needs to be unwound to the host's state. */ - skip_emulated_instruction(vcpu); + nested_vmx_restore_host_state(vcpu); + vmx->fail = 0; } @@ -13439,26 +14285,7 @@ static void vmx_leave_nested(struct kvm_vcpu *vcpu) to_vmx(vcpu)->nested.nested_run_pending = 0; nested_vmx_vmexit(vcpu, -1, 0, 0); } - free_nested(to_vmx(vcpu)); -} - -/* - * L1's failure to enter L2 is a subset of a normal exit, as explained in - * 23.7 "VM-entry failures during or after loading guest state" (this also - * lists the acceptable exit-reason and exit-qualification parameters). - * It should only be called before L2 actually succeeded to run, and when - * vmcs01 is current (it doesn't leave_guest_mode() or switch vmcss). - */ -static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu, - struct vmcs12 *vmcs12, - u32 reason, unsigned long qualification) -{ - load_vmcs12_host_state(vcpu, vmcs12); - vmcs12->vm_exit_reason = reason | VMX_EXIT_REASONS_FAILED_VMENTRY; - vmcs12->exit_qualification = qualification; - nested_vmx_succeed(vcpu); - if (enable_shadow_vmcs) - to_vmx(vcpu)->nested.sync_shadow_vmcs = true; + free_nested(vcpu); } static int vmx_check_intercept(struct kvm_vcpu *vcpu, @@ -13884,7 +14711,7 @@ static int vmx_pre_leave_smm(struct kvm_vcpu *vcpu, u64 smbase) if (vmx->nested.smm.guest_mode) { vcpu->arch.hflags &= ~HF_SMM_MASK; - ret = enter_vmx_non_root_mode(vcpu, NULL); + ret = nested_vmx_enter_non_root_mode(vcpu, false); vcpu->arch.hflags |= HF_SMM_MASK; if (ret) return ret; @@ -13899,6 +14726,20 @@ static int enable_smi_window(struct kvm_vcpu *vcpu) return 0; } +static inline int vmx_has_valid_vmcs12(struct kvm_vcpu *vcpu) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + + /* + * In case we do two consecutive get/set_nested_state()s while L2 was + * running hv_evmcs may end up not being mapped (we map it from + * nested_vmx_run()/vmx_vcpu_run()). Check is_guest_mode() as we always + * have vmcs12 if it is true. + */ + return is_guest_mode(vcpu) || vmx->nested.current_vmptr != -1ull || + vmx->nested.hv_evmcs; +} + static int vmx_get_nested_state(struct kvm_vcpu *vcpu, struct kvm_nested_state __user *user_kvm_nested_state, u32 user_data_size) @@ -13918,12 +14759,16 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu, vmx = to_vmx(vcpu); vmcs12 = get_vmcs12(vcpu); + + if (nested_vmx_allowed(vcpu) && vmx->nested.enlightened_vmcs_enabled) + kvm_state.flags |= KVM_STATE_NESTED_EVMCS; + if (nested_vmx_allowed(vcpu) && (vmx->nested.vmxon || vmx->nested.smm.vmxon)) { kvm_state.vmx.vmxon_pa = vmx->nested.vmxon_ptr; kvm_state.vmx.vmcs_pa = vmx->nested.current_vmptr; - if (vmx->nested.current_vmptr != -1ull) { + if (vmx_has_valid_vmcs12(vcpu)) { kvm_state.size += VMCS12_SIZE; if (is_guest_mode(vcpu) && @@ -13952,20 +14797,24 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu, if (copy_to_user(user_kvm_nested_state, &kvm_state, sizeof(kvm_state))) return -EFAULT; - if (vmx->nested.current_vmptr == -1ull) + if (!vmx_has_valid_vmcs12(vcpu)) goto out; /* * When running L2, the authoritative vmcs12 state is in the * vmcs02. When running L1, the authoritative vmcs12 state is - * in the shadow vmcs linked to vmcs01, unless - * sync_shadow_vmcs is set, in which case, the authoritative + * in the shadow or enlightened vmcs linked to vmcs01, unless + * need_vmcs12_sync is set, in which case, the authoritative * vmcs12 state is in the vmcs12 already. */ - if (is_guest_mode(vcpu)) + if (is_guest_mode(vcpu)) { sync_vmcs12(vcpu, vmcs12); - else if (enable_shadow_vmcs && !vmx->nested.sync_shadow_vmcs) - copy_shadow_to_vmcs12(vmx); + } else if (!vmx->nested.need_vmcs12_sync) { + if (vmx->nested.hv_evmcs) + copy_enlightened_to_vmcs12(vmx); + else if (enable_shadow_vmcs) + copy_shadow_to_vmcs12(vmx); + } if (copy_to_user(user_kvm_nested_state->data, vmcs12, sizeof(*vmcs12))) return -EFAULT; @@ -13993,6 +14842,9 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu, if (kvm_state->format != 0) return -EINVAL; + if (kvm_state->flags & KVM_STATE_NESTED_EVMCS) + nested_enable_evmcs(vcpu, NULL); + if (!nested_vmx_allowed(vcpu)) return kvm_state->vmx.vmxon_pa == -1ull ? 0 : -EINVAL; @@ -14010,13 +14862,6 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu, if (!page_address_valid(vcpu, kvm_state->vmx.vmxon_pa)) return -EINVAL; - if (kvm_state->size < sizeof(kvm_state) + sizeof(*vmcs12)) - return -EINVAL; - - if (kvm_state->vmx.vmcs_pa == kvm_state->vmx.vmxon_pa || - !page_address_valid(vcpu, kvm_state->vmx.vmcs_pa)) - return -EINVAL; - if ((kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) && (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) return -EINVAL; @@ -14046,7 +14891,25 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu, if (ret) return ret; - set_current_vmptr(vmx, kvm_state->vmx.vmcs_pa); + /* Empty 'VMXON' state is permitted */ + if (kvm_state->size < sizeof(kvm_state) + sizeof(*vmcs12)) + return 0; + + if (kvm_state->vmx.vmcs_pa != -1ull) { + if (kvm_state->vmx.vmcs_pa == kvm_state->vmx.vmxon_pa || + !page_address_valid(vcpu, kvm_state->vmx.vmcs_pa)) + return -EINVAL; + + set_current_vmptr(vmx, kvm_state->vmx.vmcs_pa); + } else if (kvm_state->flags & KVM_STATE_NESTED_EVMCS) { + /* + * Sync eVMCS upon entry as we may not have + * HV_X64_MSR_VP_ASSIST_PAGE set up yet. + */ + vmx->nested.need_vmcs12_sync = true; + } else { + return -EINVAL; + } if (kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON) { vmx->nested.smm.vmxon = true; @@ -14090,7 +14953,7 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu, return -EINVAL; vmx->nested.dirty_vmcs12 = true; - ret = enter_vmx_non_root_mode(vcpu, NULL); + ret = nested_vmx_enter_non_root_mode(vcpu, false); if (ret) return -EINVAL; @@ -14242,6 +15105,8 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = { .pre_enter_smm = vmx_pre_enter_smm, .pre_leave_smm = vmx_pre_leave_smm, .enable_smi_window = enable_smi_window, + + .nested_enable_evmcs = nested_enable_evmcs, }; static void vmx_cleanup_l1d_flush(void) diff --git a/arch/x86/kvm/vmx_shadow_fields.h b/arch/x86/kvm/vmx_shadow_fields.h index cd0c75f6d037..132432f375c2 100644 --- a/arch/x86/kvm/vmx_shadow_fields.h +++ b/arch/x86/kvm/vmx_shadow_fields.h @@ -28,7 +28,6 @@ */ /* 16-bits */ -SHADOW_FIELD_RW(GUEST_CS_SELECTOR) SHADOW_FIELD_RW(GUEST_INTR_STATUS) SHADOW_FIELD_RW(GUEST_PML_INDEX) SHADOW_FIELD_RW(HOST_FS_SELECTOR) @@ -47,8 +46,8 @@ SHADOW_FIELD_RW(VM_ENTRY_EXCEPTION_ERROR_CODE) SHADOW_FIELD_RW(VM_ENTRY_INTR_INFO_FIELD) SHADOW_FIELD_RW(VM_ENTRY_INSTRUCTION_LEN) SHADOW_FIELD_RW(TPR_THRESHOLD) -SHADOW_FIELD_RW(GUEST_CS_LIMIT) SHADOW_FIELD_RW(GUEST_CS_AR_BYTES) +SHADOW_FIELD_RW(GUEST_SS_AR_BYTES) SHADOW_FIELD_RW(GUEST_INTERRUPTIBILITY_INFO) SHADOW_FIELD_RW(VMX_PREEMPTION_TIMER_VALUE) @@ -61,8 +60,6 @@ SHADOW_FIELD_RW(GUEST_CR0) SHADOW_FIELD_RW(GUEST_CR3) SHADOW_FIELD_RW(GUEST_CR4) SHADOW_FIELD_RW(GUEST_RFLAGS) -SHADOW_FIELD_RW(GUEST_CS_BASE) -SHADOW_FIELD_RW(GUEST_ES_BASE) SHADOW_FIELD_RW(CR0_GUEST_HOST_MASK) SHADOW_FIELD_RW(CR0_READ_SHADOW) SHADOW_FIELD_RW(CR4_READ_SHADOW) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index ca717737347e..66d66d77caee 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -136,7 +136,7 @@ static u32 __read_mostly tsc_tolerance_ppm = 250; module_param(tsc_tolerance_ppm, uint, S_IRUGO | S_IWUSR); /* lapic timer advance (tscdeadline mode only) in nanoseconds */ -unsigned int __read_mostly lapic_timer_advance_ns = 0; +unsigned int __read_mostly lapic_timer_advance_ns = 1000; module_param(lapic_timer_advance_ns, uint, S_IRUGO | S_IWUSR); EXPORT_SYMBOL_GPL(lapic_timer_advance_ns); @@ -400,9 +400,51 @@ static int exception_type(int vector) return EXCPT_FAULT; } +void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu) +{ + unsigned nr = vcpu->arch.exception.nr; + bool has_payload = vcpu->arch.exception.has_payload; + unsigned long payload = vcpu->arch.exception.payload; + + if (!has_payload) + return; + + switch (nr) { + case DB_VECTOR: + /* + * "Certain debug exceptions may clear bit 0-3. The + * remaining contents of the DR6 register are never + * cleared by the processor". + */ + vcpu->arch.dr6 &= ~DR_TRAP_BITS; + /* + * DR6.RTM is set by all #DB exceptions that don't clear it. + */ + vcpu->arch.dr6 |= DR6_RTM; + vcpu->arch.dr6 |= payload; + /* + * Bit 16 should be set in the payload whenever the #DB + * exception should clear DR6.RTM. This makes the payload + * compatible with the pending debug exceptions under VMX. + * Though not currently documented in the SDM, this also + * makes the payload compatible with the exit qualification + * for #DB exceptions under VMX. + */ + vcpu->arch.dr6 ^= payload & DR6_RTM; + break; + case PF_VECTOR: + vcpu->arch.cr2 = payload; + break; + } + + vcpu->arch.exception.has_payload = false; + vcpu->arch.exception.payload = 0; +} +EXPORT_SYMBOL_GPL(kvm_deliver_exception_payload); + static void kvm_multiple_exception(struct kvm_vcpu *vcpu, unsigned nr, bool has_error, u32 error_code, - bool reinject) + bool has_payload, unsigned long payload, bool reinject) { u32 prev_nr; int class1, class2; @@ -424,6 +466,14 @@ static void kvm_multiple_exception(struct kvm_vcpu *vcpu, */ WARN_ON_ONCE(vcpu->arch.exception.pending); vcpu->arch.exception.injected = true; + if (WARN_ON_ONCE(has_payload)) { + /* + * A reinjected event has already + * delivered its payload. + */ + has_payload = false; + payload = 0; + } } else { vcpu->arch.exception.pending = true; vcpu->arch.exception.injected = false; @@ -431,6 +481,22 @@ static void kvm_multiple_exception(struct kvm_vcpu *vcpu, vcpu->arch.exception.has_error_code = has_error; vcpu->arch.exception.nr = nr; vcpu->arch.exception.error_code = error_code; + vcpu->arch.exception.has_payload = has_payload; + vcpu->arch.exception.payload = payload; + /* + * In guest mode, payload delivery should be deferred, + * so that the L1 hypervisor can intercept #PF before + * CR2 is modified (or intercept #DB before DR6 is + * modified under nVMX). However, for ABI + * compatibility with KVM_GET_VCPU_EVENTS and + * KVM_SET_VCPU_EVENTS, we can't delay payload + * delivery unless userspace has enabled this + * functionality via the per-VM capability, + * KVM_CAP_EXCEPTION_PAYLOAD. + */ + if (!vcpu->kvm->arch.exception_payload_enabled || + !is_guest_mode(vcpu)) + kvm_deliver_exception_payload(vcpu); return; } @@ -455,6 +521,8 @@ static void kvm_multiple_exception(struct kvm_vcpu *vcpu, vcpu->arch.exception.has_error_code = true; vcpu->arch.exception.nr = DF_VECTOR; vcpu->arch.exception.error_code = 0; + vcpu->arch.exception.has_payload = false; + vcpu->arch.exception.payload = 0; } else /* replace previous exception with a new one in a hope that instruction re-execution will regenerate lost @@ -464,16 +532,29 @@ static void kvm_multiple_exception(struct kvm_vcpu *vcpu, void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr) { - kvm_multiple_exception(vcpu, nr, false, 0, false); + kvm_multiple_exception(vcpu, nr, false, 0, false, 0, false); } EXPORT_SYMBOL_GPL(kvm_queue_exception); void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr) { - kvm_multiple_exception(vcpu, nr, false, 0, true); + kvm_multiple_exception(vcpu, nr, false, 0, false, 0, true); } EXPORT_SYMBOL_GPL(kvm_requeue_exception); +static void kvm_queue_exception_p(struct kvm_vcpu *vcpu, unsigned nr, + unsigned long payload) +{ + kvm_multiple_exception(vcpu, nr, false, 0, true, payload, false); +} + +static void kvm_queue_exception_e_p(struct kvm_vcpu *vcpu, unsigned nr, + u32 error_code, unsigned long payload) +{ + kvm_multiple_exception(vcpu, nr, true, error_code, + true, payload, false); +} + int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err) { if (err) @@ -490,11 +571,13 @@ void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault) ++vcpu->stat.pf_guest; vcpu->arch.exception.nested_apf = is_guest_mode(vcpu) && fault->async_page_fault; - if (vcpu->arch.exception.nested_apf) + if (vcpu->arch.exception.nested_apf) { vcpu->arch.apf.nested_apf_token = fault->address; - else - vcpu->arch.cr2 = fault->address; - kvm_queue_exception_e(vcpu, PF_VECTOR, fault->error_code); + kvm_queue_exception_e(vcpu, PF_VECTOR, fault->error_code); + } else { + kvm_queue_exception_e_p(vcpu, PF_VECTOR, fault->error_code, + fault->address); + } } EXPORT_SYMBOL_GPL(kvm_inject_page_fault); @@ -503,7 +586,7 @@ static bool kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fau if (mmu_is_nested(vcpu) && !fault->nested_page_fault) vcpu->arch.nested_mmu.inject_page_fault(vcpu, fault); else - vcpu->arch.mmu.inject_page_fault(vcpu, fault); + vcpu->arch.mmu->inject_page_fault(vcpu, fault); return fault->nested_page_fault; } @@ -517,13 +600,13 @@ EXPORT_SYMBOL_GPL(kvm_inject_nmi); void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code) { - kvm_multiple_exception(vcpu, nr, true, error_code, false); + kvm_multiple_exception(vcpu, nr, true, error_code, false, 0, false); } EXPORT_SYMBOL_GPL(kvm_queue_exception_e); void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code) { - kvm_multiple_exception(vcpu, nr, true, error_code, true); + kvm_multiple_exception(vcpu, nr, true, error_code, false, 0, true); } EXPORT_SYMBOL_GPL(kvm_requeue_exception_e); @@ -602,7 +685,7 @@ int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3) for (i = 0; i < ARRAY_SIZE(pdpte); ++i) { if ((pdpte[i] & PT_PRESENT_MASK) && (pdpte[i] & - vcpu->arch.mmu.guest_rsvd_check.rsvd_bits_mask[0][2])) { + vcpu->arch.mmu->guest_rsvd_check.rsvd_bits_mask[0][2])) { ret = 0; goto out; } @@ -2477,7 +2560,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) break; case MSR_KVM_PV_EOI_EN: - if (kvm_lapic_enable_pv_eoi(vcpu, data)) + if (kvm_lapic_enable_pv_eoi(vcpu, data, sizeof(u8))) return 1; break; @@ -2912,6 +2995,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_HYPERV_VP_INDEX: case KVM_CAP_HYPERV_EVENTFD: case KVM_CAP_HYPERV_TLBFLUSH: + case KVM_CAP_HYPERV_SEND_IPI: + case KVM_CAP_HYPERV_ENLIGHTENED_VMCS: case KVM_CAP_PCI_SEGMENT: case KVM_CAP_DEBUGREGS: case KVM_CAP_X86_ROBUST_SINGLESTEP: @@ -2930,6 +3015,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_IMMEDIATE_EXIT: case KVM_CAP_GET_MSR_FEATURES: case KVM_CAP_MSR_PLATFORM_INFO: + case KVM_CAP_EXCEPTION_PAYLOAD: r = 1; break; case KVM_CAP_SYNC_REGS: @@ -3362,19 +3448,33 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, struct kvm_vcpu_events *events) { process_nmi(vcpu); + /* - * FIXME: pass injected and pending separately. This is only - * needed for nested virtualization, whose state cannot be - * migrated yet. For now we can combine them. + * The API doesn't provide the instruction length for software + * exceptions, so don't report them. As long as the guest RIP + * isn't advanced, we should expect to encounter the exception + * again. */ - events->exception.injected = - (vcpu->arch.exception.pending || - vcpu->arch.exception.injected) && - !kvm_exception_is_soft(vcpu->arch.exception.nr); + if (kvm_exception_is_soft(vcpu->arch.exception.nr)) { + events->exception.injected = 0; + events->exception.pending = 0; + } else { + events->exception.injected = vcpu->arch.exception.injected; + events->exception.pending = vcpu->arch.exception.pending; + /* + * For ABI compatibility, deliberately conflate + * pending and injected exceptions when + * KVM_CAP_EXCEPTION_PAYLOAD isn't enabled. + */ + if (!vcpu->kvm->arch.exception_payload_enabled) + events->exception.injected |= + vcpu->arch.exception.pending; + } events->exception.nr = vcpu->arch.exception.nr; events->exception.has_error_code = vcpu->arch.exception.has_error_code; - events->exception.pad = 0; events->exception.error_code = vcpu->arch.exception.error_code; + events->exception_has_payload = vcpu->arch.exception.has_payload; + events->exception_payload = vcpu->arch.exception.payload; events->interrupt.injected = vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft; @@ -3398,6 +3498,9 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING | KVM_VCPUEVENT_VALID_SHADOW | KVM_VCPUEVENT_VALID_SMM); + if (vcpu->kvm->arch.exception_payload_enabled) + events->flags |= KVM_VCPUEVENT_VALID_PAYLOAD; + memset(&events->reserved, 0, sizeof(events->reserved)); } @@ -3409,12 +3512,24 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING | KVM_VCPUEVENT_VALID_SIPI_VECTOR | KVM_VCPUEVENT_VALID_SHADOW - | KVM_VCPUEVENT_VALID_SMM)) + | KVM_VCPUEVENT_VALID_SMM + | KVM_VCPUEVENT_VALID_PAYLOAD)) return -EINVAL; - if (events->exception.injected && - (events->exception.nr > 31 || events->exception.nr == NMI_VECTOR || - is_guest_mode(vcpu))) + if (events->flags & KVM_VCPUEVENT_VALID_PAYLOAD) { + if (!vcpu->kvm->arch.exception_payload_enabled) + return -EINVAL; + if (events->exception.pending) + events->exception.injected = 0; + else + events->exception_has_payload = 0; + } else { + events->exception.pending = 0; + events->exception_has_payload = 0; + } + + if ((events->exception.injected || events->exception.pending) && + (events->exception.nr > 31 || events->exception.nr == NMI_VECTOR)) return -EINVAL; /* INITs are latched while in SMM */ @@ -3424,11 +3539,13 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, return -EINVAL; process_nmi(vcpu); - vcpu->arch.exception.injected = false; - vcpu->arch.exception.pending = events->exception.injected; + vcpu->arch.exception.injected = events->exception.injected; + vcpu->arch.exception.pending = events->exception.pending; vcpu->arch.exception.nr = events->exception.nr; vcpu->arch.exception.has_error_code = events->exception.has_error_code; vcpu->arch.exception.error_code = events->exception.error_code; + vcpu->arch.exception.has_payload = events->exception_has_payload; + vcpu->arch.exception.payload = events->exception_payload; vcpu->arch.interrupt.injected = events->interrupt.injected; vcpu->arch.interrupt.nr = events->interrupt.nr; @@ -3694,6 +3811,10 @@ static int kvm_set_guest_paused(struct kvm_vcpu *vcpu) static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, struct kvm_enable_cap *cap) { + int r; + uint16_t vmcs_version; + void __user *user_ptr; + if (cap->flags) return -EINVAL; @@ -3706,6 +3827,16 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, return -EINVAL; return kvm_hv_activate_synic(vcpu, cap->cap == KVM_CAP_HYPERV_SYNIC2); + case KVM_CAP_HYPERV_ENLIGHTENED_VMCS: + r = kvm_x86_ops->nested_enable_evmcs(vcpu, &vmcs_version); + if (!r) { + user_ptr = (void __user *)(uintptr_t)cap->args[0]; + if (copy_to_user(user_ptr, &vmcs_version, + sizeof(vmcs_version))) + r = -EFAULT; + } + return r; + default: return -EINVAL; } @@ -4047,11 +4178,13 @@ long kvm_arch_vcpu_ioctl(struct file *filp, break; if (kvm_state.flags & - ~(KVM_STATE_NESTED_RUN_PENDING | KVM_STATE_NESTED_GUEST_MODE)) + ~(KVM_STATE_NESTED_RUN_PENDING | KVM_STATE_NESTED_GUEST_MODE + | KVM_STATE_NESTED_EVMCS)) break; /* nested_run_pending implies guest_mode. */ - if (kvm_state.flags == KVM_STATE_NESTED_RUN_PENDING) + if ((kvm_state.flags & KVM_STATE_NESTED_RUN_PENDING) + && !(kvm_state.flags & KVM_STATE_NESTED_GUEST_MODE)) break; r = kvm_x86_ops->set_nested_state(vcpu, user_kvm_nested_state, &kvm_state); @@ -4363,6 +4496,10 @@ split_irqchip_unlock: kvm->arch.guest_can_read_msr_platform_info = cap->args[0]; r = 0; break; + case KVM_CAP_EXCEPTION_PAYLOAD: + kvm->arch.exception_payload_enabled = cap->args[0]; + r = 0; + break; default: r = -EINVAL; break; @@ -4803,7 +4940,7 @@ gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access, /* NPT walks are always user-walks */ access |= PFERR_USER_MASK; - t_gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, exception); + t_gpa = vcpu->arch.mmu->gva_to_gpa(vcpu, gpa, access, exception); return t_gpa; } @@ -5889,7 +6026,7 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2, if (WARN_ON_ONCE(is_guest_mode(vcpu))) return false; - if (!vcpu->arch.mmu.direct_map) { + if (!vcpu->arch.mmu->direct_map) { /* * Write permission should be allowed since only * write access need to be emulated. @@ -5922,7 +6059,7 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2, kvm_release_pfn_clean(pfn); /* The instructions are well-emulated on direct mmu. */ - if (vcpu->arch.mmu.direct_map) { + if (vcpu->arch.mmu->direct_map) { unsigned int indirect_shadow_pages; spin_lock(&vcpu->kvm->mmu_lock); @@ -5989,7 +6126,7 @@ static bool retry_instruction(struct x86_emulate_ctxt *ctxt, vcpu->arch.last_retry_eip = ctxt->eip; vcpu->arch.last_retry_addr = cr2; - if (!vcpu->arch.mmu.direct_map) + if (!vcpu->arch.mmu->direct_map) gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL); kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); @@ -6049,14 +6186,7 @@ static void kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu, int *r) kvm_run->exit_reason = KVM_EXIT_DEBUG; *r = EMULATE_USER_EXIT; } else { - /* - * "Certain debug exceptions may clear bit 0-3. The - * remaining contents of the DR6 register are never - * cleared by the processor". - */ - vcpu->arch.dr6 &= ~15; - vcpu->arch.dr6 |= DR6_BS | DR6_RTM; - kvm_queue_exception(vcpu, DB_VECTOR); + kvm_queue_exception_p(vcpu, DB_VECTOR, DR6_BS); } } @@ -6995,10 +7125,22 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win) __kvm_set_rflags(vcpu, kvm_get_rflags(vcpu) | X86_EFLAGS_RF); - if (vcpu->arch.exception.nr == DB_VECTOR && - (vcpu->arch.dr7 & DR7_GD)) { - vcpu->arch.dr7 &= ~DR7_GD; - kvm_update_dr7(vcpu); + if (vcpu->arch.exception.nr == DB_VECTOR) { + /* + * This code assumes that nSVM doesn't use + * check_nested_events(). If it does, the + * DR6/DR7 changes should happen before L1 + * gets a #VMEXIT for an intercepted #DB in + * L2. (Under VMX, on the other hand, the + * DR6/DR7 changes should not happen in the + * event of a VM-exit to L1 for an intercepted + * #DB in L2.) + */ + kvm_deliver_exception_payload(vcpu); + if (vcpu->arch.dr7 & DR7_GD) { + vcpu->arch.dr7 &= ~DR7_GD; + kvm_update_dr7(vcpu); + } } kvm_x86_ops->queue_exception(vcpu); @@ -8478,7 +8620,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) kvm_vcpu_mtrr_init(vcpu); vcpu_load(vcpu); kvm_vcpu_reset(vcpu, false); - kvm_mmu_setup(vcpu); + kvm_init_mmu(vcpu, false); vcpu_put(vcpu); return 0; } @@ -9327,7 +9469,7 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work) { int r; - if ((vcpu->arch.mmu.direct_map != work->arch.direct_map) || + if ((vcpu->arch.mmu->direct_map != work->arch.direct_map) || work->wakeup_all) return; @@ -9335,11 +9477,11 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work) if (unlikely(r)) return; - if (!vcpu->arch.mmu.direct_map && - work->arch.cr3 != vcpu->arch.mmu.get_cr3(vcpu)) + if (!vcpu->arch.mmu->direct_map && + work->arch.cr3 != vcpu->arch.mmu->get_cr3(vcpu)) return; - vcpu->arch.mmu.page_fault(vcpu, work->gva, 0, true); + vcpu->arch.mmu->page_fault(vcpu, work->gva, 0, true); } static inline u32 kvm_async_pf_hash_fn(gfn_t gfn) @@ -9463,6 +9605,8 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, vcpu->arch.exception.nr = 0; vcpu->arch.exception.has_error_code = false; vcpu->arch.exception.error_code = 0; + vcpu->arch.exception.has_payload = false; + vcpu->arch.exception.payload = 0; } else if (!apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) { fault.vector = PF_VECTOR; fault.error_code_valid = true; diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index 67b9568613f3..224cd0a47568 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -266,6 +266,8 @@ int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, int handle_ud(struct kvm_vcpu *vcpu); +void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu); + void kvm_vcpu_mtrr_init(struct kvm_vcpu *vcpu); u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn); bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data); diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 2b1519bc5381..b24eb4eb9984 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -157,79 +157,6 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr) return prefetch; } -/* - * A protection key fault means that the PKRU value did not allow - * access to some PTE. Userspace can figure out what PKRU was - * from the XSAVE state, and this function fills out a field in - * siginfo so userspace can discover which protection key was set - * on the PTE. - * - * If we get here, we know that the hardware signaled a X86_PF_PK - * fault and that there was a VMA once we got in the fault - * handler. It does *not* guarantee that the VMA we find here - * was the one that we faulted on. - * - * 1. T1 : mprotect_key(foo, PAGE_SIZE, pkey=4); - * 2. T1 : set PKRU to deny access to pkey=4, touches page - * 3. T1 : faults... - * 4. T2: mprotect_key(foo, PAGE_SIZE, pkey=5); - * 5. T1 : enters fault handler, takes mmap_sem, etc... - * 6. T1 : reaches here, sees vma_pkey(vma)=5, when we really - * faulted on a pte with its pkey=4. - */ -static void fill_sig_info_pkey(int si_signo, int si_code, siginfo_t *info, - u32 *pkey) -{ - /* This is effectively an #ifdef */ - if (!boot_cpu_has(X86_FEATURE_OSPKE)) - return; - - /* Fault not from Protection Keys: nothing to do */ - if ((si_code != SEGV_PKUERR) || (si_signo != SIGSEGV)) - return; - /* - * force_sig_info_fault() is called from a number of - * contexts, some of which have a VMA and some of which - * do not. The X86_PF_PK handing happens after we have a - * valid VMA, so we should never reach this without a - * valid VMA. - */ - if (!pkey) { - WARN_ONCE(1, "PKU fault with no VMA passed in"); - info->si_pkey = 0; - return; - } - /* - * si_pkey should be thought of as a strong hint, but not - * absolutely guranteed to be 100% accurate because of - * the race explained above. - */ - info->si_pkey = *pkey; -} - -static void -force_sig_info_fault(int si_signo, int si_code, unsigned long address, - struct task_struct *tsk, u32 *pkey, int fault) -{ - unsigned lsb = 0; - siginfo_t info; - - clear_siginfo(&info); - info.si_signo = si_signo; - info.si_errno = 0; - info.si_code = si_code; - info.si_addr = (void __user *)address; - if (fault & VM_FAULT_HWPOISON_LARGE) - lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault)); - if (fault & VM_FAULT_HWPOISON) - lsb = PAGE_SHIFT; - info.si_addr_lsb = lsb; - - fill_sig_info_pkey(si_signo, si_code, &info, pkey); - - force_sig_info(si_signo, &info, tsk); -} - DEFINE_SPINLOCK(pgd_lock); LIST_HEAD(pgd_list); @@ -734,8 +661,8 @@ no_context(struct pt_regs *regs, unsigned long error_code, tsk->thread.cr2 = address; /* XXX: hwpoison faults will set the wrong code. */ - force_sig_info_fault(signal, si_code, address, - tsk, NULL, 0); + force_sig_fault(signal, si_code, (void __user *)address, + tsk); } /* @@ -862,7 +789,7 @@ static bool is_vsyscall_vaddr(unsigned long vaddr) static void __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, - unsigned long address, u32 *pkey, int si_code) + unsigned long address, u32 pkey, int si_code) { struct task_struct *tsk = current; @@ -898,7 +825,10 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, tsk->thread.error_code = error_code; tsk->thread.trap_nr = X86_TRAP_PF; - force_sig_info_fault(SIGSEGV, si_code, address, tsk, pkey, 0); + if (si_code == SEGV_PKUERR) + force_sig_pkuerr((void __user *)address, pkey); + + force_sig_fault(SIGSEGV, si_code, (void __user *)address, tsk); return; } @@ -911,35 +841,29 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, static noinline void bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, - unsigned long address, u32 *pkey) + unsigned long address) { - __bad_area_nosemaphore(regs, error_code, address, pkey, SEGV_MAPERR); + __bad_area_nosemaphore(regs, error_code, address, 0, SEGV_MAPERR); } static void __bad_area(struct pt_regs *regs, unsigned long error_code, - unsigned long address, struct vm_area_struct *vma, int si_code) + unsigned long address, u32 pkey, int si_code) { struct mm_struct *mm = current->mm; - u32 pkey; - - if (vma) - pkey = vma_pkey(vma); - /* * Something tried to access memory that isn't in our memory map.. * Fix it, but check if it's kernel or user first.. */ up_read(&mm->mmap_sem); - __bad_area_nosemaphore(regs, error_code, address, - (vma) ? &pkey : NULL, si_code); + __bad_area_nosemaphore(regs, error_code, address, pkey, si_code); } static noinline void bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address) { - __bad_area(regs, error_code, address, NULL, SEGV_MAPERR); + __bad_area(regs, error_code, address, 0, SEGV_MAPERR); } static inline bool bad_area_access_from_pkeys(unsigned long error_code, @@ -968,18 +892,40 @@ bad_area_access_error(struct pt_regs *regs, unsigned long error_code, * But, doing it this way allows compiler optimizations * if pkeys are compiled out. */ - if (bad_area_access_from_pkeys(error_code, vma)) - __bad_area(regs, error_code, address, vma, SEGV_PKUERR); - else - __bad_area(regs, error_code, address, vma, SEGV_ACCERR); + if (bad_area_access_from_pkeys(error_code, vma)) { + /* + * A protection key fault means that the PKRU value did not allow + * access to some PTE. Userspace can figure out what PKRU was + * from the XSAVE state. This function captures the pkey from + * the vma and passes it to userspace so userspace can discover + * which protection key was set on the PTE. + * + * If we get here, we know that the hardware signaled a X86_PF_PK + * fault and that there was a VMA once we got in the fault + * handler. It does *not* guarantee that the VMA we find here + * was the one that we faulted on. + * + * 1. T1 : mprotect_key(foo, PAGE_SIZE, pkey=4); + * 2. T1 : set PKRU to deny access to pkey=4, touches page + * 3. T1 : faults... + * 4. T2: mprotect_key(foo, PAGE_SIZE, pkey=5); + * 5. T1 : enters fault handler, takes mmap_sem, etc... + * 6. T1 : reaches here, sees vma_pkey(vma)=5, when we really + * faulted on a pte with its pkey=4. + */ + u32 pkey = vma_pkey(vma); + + __bad_area(regs, error_code, address, pkey, SEGV_PKUERR); + } else { + __bad_area(regs, error_code, address, 0, SEGV_ACCERR); + } } static void do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address, - u32 *pkey, unsigned int fault) + unsigned int fault) { struct task_struct *tsk = current; - int code = BUS_ADRERR; /* Kernel mode? Handle exceptions or die: */ if (!(error_code & X86_PF_USER)) { @@ -997,18 +943,25 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address, #ifdef CONFIG_MEMORY_FAILURE if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) { - printk(KERN_ERR + unsigned lsb = 0; + + pr_err( "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n", tsk->comm, tsk->pid, address); - code = BUS_MCEERR_AR; + if (fault & VM_FAULT_HWPOISON_LARGE) + lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault)); + if (fault & VM_FAULT_HWPOISON) + lsb = PAGE_SHIFT; + force_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb, tsk); + return; } #endif - force_sig_info_fault(SIGBUS, code, address, tsk, pkey, fault); + force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address, tsk); } static noinline void mm_fault_error(struct pt_regs *regs, unsigned long error_code, - unsigned long address, u32 *pkey, vm_fault_t fault) + unsigned long address, vm_fault_t fault) { if (fatal_signal_pending(current) && !(error_code & X86_PF_USER)) { no_context(regs, error_code, address, 0, 0); @@ -1032,9 +985,9 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code, } else { if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON| VM_FAULT_HWPOISON_LARGE)) - do_sigbus(regs, error_code, address, pkey, fault); + do_sigbus(regs, error_code, address, fault); else if (fault & VM_FAULT_SIGSEGV) - bad_area_nosemaphore(regs, error_code, address, pkey); + bad_area_nosemaphore(regs, error_code, address); else BUG(); } @@ -1267,7 +1220,7 @@ do_kern_addr_fault(struct pt_regs *regs, unsigned long hw_error_code, * Don't take the mm semaphore here. If we fixup a prefetch * fault we could otherwise deadlock: */ - bad_area_nosemaphore(regs, hw_error_code, address, NULL); + bad_area_nosemaphore(regs, hw_error_code, address); } NOKPROBE_SYMBOL(do_kern_addr_fault); @@ -1283,7 +1236,6 @@ void do_user_addr_fault(struct pt_regs *regs, struct mm_struct *mm; vm_fault_t fault, major = 0; unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; - u32 pkey; tsk = current; mm = tsk->mm; @@ -1304,7 +1256,7 @@ void do_user_addr_fault(struct pt_regs *regs, * pages in the user address space. */ if (unlikely(smap_violation(hw_error_code, regs))) { - bad_area_nosemaphore(regs, hw_error_code, address, NULL); + bad_area_nosemaphore(regs, hw_error_code, address); return; } @@ -1313,7 +1265,7 @@ void do_user_addr_fault(struct pt_regs *regs, * in a region with pagefaults disabled then we must not take the fault */ if (unlikely(faulthandler_disabled() || !mm)) { - bad_area_nosemaphore(regs, hw_error_code, address, NULL); + bad_area_nosemaphore(regs, hw_error_code, address); return; } @@ -1403,7 +1355,7 @@ void do_user_addr_fault(struct pt_regs *regs, * Fault from code in kernel from * which we do not expect faults. */ - bad_area_nosemaphore(regs, sw_error_code, address, NULL); + bad_area_nosemaphore(regs, sw_error_code, address); return; } retry: @@ -1467,10 +1419,7 @@ good_area: * (potentially after handling any pending signal during the return to * userland). The return to userland is identified whenever * FAULT_FLAG_USER|FAULT_FLAG_KILLABLE are both set in flags. - * Thus we have to be careful about not touching vma after handling the - * fault, so we read the pkey beforehand. */ - pkey = vma_pkey(vma); fault = handle_mm_fault(vma, address, flags); major |= fault & VM_FAULT_MAJOR; @@ -1499,7 +1448,7 @@ good_area: up_read(&mm->mmap_sem); if (unlikely(fault & VM_FAULT_ERROR)) { - mm_fault_error(regs, sw_error_code, address, &pkey, fault); + mm_fault_error(regs, sw_error_code, address, fault); return; } diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c index e500949bae24..2385538e8065 100644 --- a/arch/x86/mm/mpx.c +++ b/arch/x86/mm/mpx.c @@ -118,14 +118,11 @@ bad_opcode: * anything it wants in to the instructions. We can not * trust anything about it. They might not be valid * instructions or might encode invalid registers, etc... - * - * The caller is expected to kfree() the returned siginfo_t. */ -siginfo_t *mpx_generate_siginfo(struct pt_regs *regs) +int mpx_fault_info(struct mpx_fault_info *info, struct pt_regs *regs) { const struct mpx_bndreg_state *bndregs; const struct mpx_bndreg *bndreg; - siginfo_t *info = NULL; struct insn insn; uint8_t bndregno; int err; @@ -153,11 +150,6 @@ siginfo_t *mpx_generate_siginfo(struct pt_regs *regs) /* now go select the individual register in the set of 4 */ bndreg = &bndregs->bndreg[bndregno]; - info = kzalloc(sizeof(*info), GFP_KERNEL); - if (!info) { - err = -ENOMEM; - goto err_out; - } /* * The registers are always 64-bit, but the upper 32 * bits are ignored in 32-bit mode. Also, note that the @@ -168,27 +160,23 @@ siginfo_t *mpx_generate_siginfo(struct pt_regs *regs) * complains when casting from integers to different-size * pointers. */ - info->si_lower = (void __user *)(unsigned long)bndreg->lower_bound; - info->si_upper = (void __user *)(unsigned long)~bndreg->upper_bound; - info->si_addr_lsb = 0; - info->si_signo = SIGSEGV; - info->si_errno = 0; - info->si_code = SEGV_BNDERR; - info->si_addr = insn_get_addr_ref(&insn, regs); + info->lower = (void __user *)(unsigned long)bndreg->lower_bound; + info->upper = (void __user *)(unsigned long)~bndreg->upper_bound; + info->addr = insn_get_addr_ref(&insn, regs); + /* * We were not able to extract an address from the instruction, * probably because there was something invalid in it. */ - if (info->si_addr == (void __user *)-1) { + if (info->addr == (void __user *)-1) { err = -EINVAL; goto err_out; } - trace_mpx_bounds_register_exception(info->si_addr, bndreg); - return info; + trace_mpx_bounds_register_exception(info->addr, bndreg); + return 0; err_out: /* info might be NULL, but kfree() handles that */ - kfree(info); - return ERR_PTR(err); + return err; } static __user void *mpx_get_bounds_dir(void) diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c index 5559dcaddd5e..948656069cdd 100644 --- a/arch/x86/pci/acpi.c +++ b/arch/x86/pci/acpi.c @@ -356,7 +356,7 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root) } else { struct pci_root_info *info; - info = kzalloc_node(sizeof(*info), GFP_KERNEL, node); + info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) dev_err(&root->device->dev, "pci_bus %04x:%02x: ignored (out of memory)\n", diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c index 13f4485ca388..30a5111ae5fd 100644 --- a/arch/x86/pci/fixup.c +++ b/arch/x86/pci/fixup.c @@ -629,17 +629,11 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x8c10, quirk_apple_mbp_poweroff); static void quirk_no_aersid(struct pci_dev *pdev) { /* VMD Domain */ - if (is_vmd(pdev->bus)) + if (is_vmd(pdev->bus) && pci_is_root_bus(pdev->bus)) pdev->bus->bus_flags |= PCI_BUS_FLAGS_NO_AERSID; } -DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2030, quirk_no_aersid); -DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2031, quirk_no_aersid); -DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2032, quirk_no_aersid); -DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2033, quirk_no_aersid); -DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x334a, quirk_no_aersid); -DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x334b, quirk_no_aersid); -DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x334c, quirk_no_aersid); -DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x334d, quirk_no_aersid); +DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, + PCI_CLASS_BRIDGE_PCI, 8, quirk_no_aersid); #ifdef CONFIG_PHYS_ADDR_T_64BIT diff --git a/arch/xtensa/Makefile b/arch/xtensa/Makefile index d67e30faff9c..be060dfb1cc3 100644 --- a/arch/xtensa/Makefile +++ b/arch/xtensa/Makefile @@ -80,28 +80,18 @@ LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name) head-y := arch/xtensa/kernel/head.o core-y += arch/xtensa/kernel/ arch/xtensa/mm/ core-y += $(buildvar) $(buildplf) +core-y += arch/xtensa/boot/dts/ libs-y += arch/xtensa/lib/ $(LIBGCC) drivers-$(CONFIG_OPROFILE) += arch/xtensa/oprofile/ -ifneq ($(CONFIG_BUILTIN_DTB),"") -core-$(CONFIG_OF) += arch/xtensa/boot/dts/ -endif - boot := arch/xtensa/boot all Image zImage uImage: vmlinux $(Q)$(MAKE) $(build)=$(boot) $@ -%.dtb: - $(Q)$(MAKE) $(build)=$(boot)/dts $(boot)/dts/$@ - -dtbs: scripts - $(Q)$(MAKE) $(build)=$(boot)/dts - define archhelp @echo '* Image - Kernel ELF image with reset vector' @echo '* zImage - Compressed kernel image (arch/xtensa/boot/images/zImage.*)' @echo '* uImage - U-Boot wrapped image' - @echo ' dtbs - Build device tree blobs for enabled boards' endef diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild index 82c756431b49..3310adecafb0 100644 --- a/arch/xtensa/include/asm/Kbuild +++ b/arch/xtensa/include/asm/Kbuild @@ -26,5 +26,6 @@ generic-y += rwsem.h generic-y += sections.h generic-y += topology.h generic-y += trace_clock.h +generic-y += vga.h generic-y += word-at-a-time.h generic-y += xor.h diff --git a/arch/xtensa/include/asm/unistd.h b/arch/xtensa/include/asm/unistd.h index ed66db3bc9bb..574e5520968c 100644 --- a/arch/xtensa/include/asm/unistd.h +++ b/arch/xtensa/include/asm/unistd.h @@ -5,9 +5,9 @@ #define __ARCH_WANT_SYS_CLONE #include <uapi/asm/unistd.h> +#define __ARCH_WANT_NEW_STAT #define __ARCH_WANT_STAT64 #define __ARCH_WANT_SYS_UTIME -#define __ARCH_WANT_SYS_LLSEEK #define __ARCH_WANT_SYS_GETPGRP /* diff --git a/arch/xtensa/include/asm/vga.h b/arch/xtensa/include/asm/vga.h deleted file mode 100644 index 1fd8cab3a297..000000000000 --- a/arch/xtensa/include/asm/vga.h +++ /dev/null @@ -1,19 +0,0 @@ -/* - * include/asm-xtensa/vga.h - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 2001 - 2005 Tensilica Inc. - */ - -#ifndef _XTENSA_VGA_H -#define _XTENSA_VGA_H - -#define VGA_MAP_MEM(x,s) (unsigned long)phys_to_virt(x) - -#define vga_readb(x) (*(x)) -#define vga_writeb(x,y) (*(y) = (x)) - -#endif diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S index a1c3edb8ad56..b727b18a68ac 100644 --- a/arch/xtensa/kernel/vmlinux.lds.S +++ b/arch/xtensa/kernel/vmlinux.lds.S @@ -197,7 +197,6 @@ SECTIONS INIT_SETUP(XCHAL_ICACHE_LINESIZE) INIT_CALLS CON_INITCALL - SECURITY_INITCALL INIT_RAM_FS } diff --git a/arch/xtensa/platforms/xtfpga/setup.c b/arch/xtensa/platforms/xtfpga/setup.c index 42285f35d313..820e8738af11 100644 --- a/arch/xtensa/platforms/xtfpga/setup.c +++ b/arch/xtensa/platforms/xtfpga/setup.c @@ -94,7 +94,7 @@ static void __init xtfpga_clk_setup(struct device_node *np) u32 freq; if (!base) { - pr_err("%s: invalid address\n", np->name); + pr_err("%pOFn: invalid address\n", np); return; } @@ -103,12 +103,12 @@ static void __init xtfpga_clk_setup(struct device_node *np) clk = clk_register_fixed_rate(NULL, np->name, NULL, 0, freq); if (IS_ERR(clk)) { - pr_err("%s: clk registration failed\n", np->name); + pr_err("%pOFn: clk registration failed\n", np); return; } if (of_clk_add_provider(np, of_clk_src_simple_get, clk)) { - pr_err("%s: clk provider registration failed\n", np->name); + pr_err("%pOFn: clk provider registration failed\n", np); return; } } |