diff options
author | David S. Miller <davem@davemloft.net> | 2017-06-30 12:43:08 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2017-06-30 12:43:08 -0400 |
commit | b07911593719828cac023bdcf6bf4da1c9ba546f (patch) | |
tree | c4d2f66e6f2506e5b5439ffca0449e646f677b33 | |
parent | 52a623bd6189b6ea8f06a0d7594c7604deaab24a (diff) | |
parent | 4d8a991d460d4fa4829beaffdcba45a217ca0fa7 (diff) | |
download | linux-b07911593719828cac023bdcf6bf4da1c9ba546f.tar.gz linux-b07911593719828cac023bdcf6bf4da1c9ba546f.tar.bz2 linux-b07911593719828cac023bdcf6bf4da1c9ba546f.zip |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
A set of overlapping changes in macvlan and the rocker
driver, nothing serious.
Signed-off-by: David S. Miller <davem@davemloft.net>
200 files changed, 1392 insertions, 881 deletions
diff --git a/Documentation/devicetree/bindings/gpio/gpio-mvebu.txt b/Documentation/devicetree/bindings/gpio/gpio-mvebu.txt index 42c3bb2d53e8..01e331a5f3e7 100644 --- a/Documentation/devicetree/bindings/gpio/gpio-mvebu.txt +++ b/Documentation/devicetree/bindings/gpio/gpio-mvebu.txt @@ -41,9 +41,9 @@ Required properties: Optional properties: In order to use the GPIO lines in PWM mode, some additional optional -properties are required. Only Armada 370 and XP support these properties. +properties are required. -- compatible: Must contain "marvell,armada-370-xp-gpio" +- compatible: Must contain "marvell,armada-370-gpio" - reg: an additional register set is needed, for the GPIO Blink Counter on/off registers. @@ -71,7 +71,7 @@ Example: }; gpio1: gpio@18140 { - compatible = "marvell,armada-370-xp-gpio"; + compatible = "marvell,armada-370-gpio"; reg = <0x18140 0x40>, <0x181c8 0x08>; reg-names = "gpio", "pwm"; ngpios = <17>; diff --git a/Documentation/devicetree/bindings/mfd/stm32-timers.txt b/Documentation/devicetree/bindings/mfd/stm32-timers.txt index bbd083f5600a..1db6e0057a63 100644 --- a/Documentation/devicetree/bindings/mfd/stm32-timers.txt +++ b/Documentation/devicetree/bindings/mfd/stm32-timers.txt @@ -31,7 +31,7 @@ Example: compatible = "st,stm32-timers"; reg = <0x40010000 0x400>; clocks = <&rcc 0 160>; - clock-names = "clk_int"; + clock-names = "int"; pwm { compatible = "st,stm32-pwm"; diff --git a/MAINTAINERS b/MAINTAINERS index c324460d5042..4c913c81cb49 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2964,7 +2964,7 @@ F: sound/pci/oxygen/ C6X ARCHITECTURE M: Mark Salter <msalter@redhat.com> -M: Aurelien Jacquiot <a-jacquiot@ti.com> +M: Aurelien Jacquiot <jacquiot.aurelien@gmail.com> L: linux-c6x-dev@linux-c6x.org W: http://www.linux-c6x.org/wiki/index.php/Main_Page S: Maintained @@ -1,7 +1,7 @@ VERSION = 4 PATCHLEVEL = 12 SUBLEVEL = 0 -EXTRAVERSION = -rc6 +EXTRAVERSION = -rc7 NAME = Fearless Coyote # *DOCUMENTATION* @@ -1437,7 +1437,7 @@ help: @echo ' make V=0|1 [targets] 0 => quiet build (default), 1 => verbose build' @echo ' make V=2 [targets] 2 => give reason for rebuild of target' @echo ' make O=dir [targets] Locate all output files in "dir", including .config' - @echo ' make C=1 [targets] Check all c source with $$CHECK (sparse by default)' + @echo ' make C=1 [targets] Check re-compiled c source with $$CHECK (sparse by default)' @echo ' make C=2 [targets] Force check of all c source with $$CHECK' @echo ' make RECORDMCOUNT_WARN=1 [targets] Warn about ignored mcount sections' @echo ' make W=n [targets] Enable extra gcc checks, n=1,2,3 where' diff --git a/arch/arc/include/asm/processor.h b/arch/arc/include/asm/processor.h index 6e1242da0159..4104a0839214 100644 --- a/arch/arc/include/asm/processor.h +++ b/arch/arc/include/asm/processor.h @@ -86,8 +86,6 @@ struct task_struct; #define TSK_K_BLINK(tsk) TSK_K_REG(tsk, 4) #define TSK_K_FP(tsk) TSK_K_REG(tsk, 0) -#define thread_saved_pc(tsk) TSK_K_BLINK(tsk) - extern void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long usp); diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 4c1a35f15838..c0fcab6a5504 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1416,6 +1416,7 @@ choice config VMSPLIT_3G bool "3G/1G user/kernel split" config VMSPLIT_3G_OPT + depends on !ARM_LPAE bool "3G/1G user/kernel split (for full 1G low memory)" config VMSPLIT_2G bool "2G/2G user/kernel split" diff --git a/arch/arm/boot/compressed/efi-header.S b/arch/arm/boot/compressed/efi-header.S index 3f7d1b74c5e0..a17ca8d78656 100644 --- a/arch/arm/boot/compressed/efi-header.S +++ b/arch/arm/boot/compressed/efi-header.S @@ -17,7 +17,8 @@ @ there. .inst 'M' | ('Z' << 8) | (0x1310 << 16) @ tstne r0, #0x4d000 #else - W(mov) r0, r0 + AR_CLASS( mov r0, r0 ) + M_CLASS( nop.w ) #endif .endm diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 32e1a9513dc7..4e80bf7420d4 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -315,7 +315,7 @@ static void __init cacheid_init(void) if (arch >= CPU_ARCH_ARMv6) { unsigned int cachetype = read_cpuid_cachetype(); - if ((arch == CPU_ARCH_ARMv7M) && !cachetype) { + if ((arch == CPU_ARCH_ARMv7M) && !(cachetype & 0xf000f)) { cacheid = 0; } else if ((cachetype & (7 << 29)) == 4 << 29) { /* ARMv7 register format */ diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c index 41b6e31f8f55..d0cb007fa482 100644 --- a/arch/arm64/kernel/vdso.c +++ b/arch/arm64/kernel/vdso.c @@ -221,10 +221,11 @@ void update_vsyscall(struct timekeeper *tk) /* tkr_mono.cycle_last == tkr_raw.cycle_last */ vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last; vdso_data->raw_time_sec = tk->raw_time.tv_sec; - vdso_data->raw_time_nsec = tk->raw_time.tv_nsec; + vdso_data->raw_time_nsec = (tk->raw_time.tv_nsec << + tk->tkr_raw.shift) + + tk->tkr_raw.xtime_nsec; vdso_data->xtime_clock_sec = tk->xtime_sec; vdso_data->xtime_clock_nsec = tk->tkr_mono.xtime_nsec; - /* tkr_raw.xtime_nsec == 0 */ vdso_data->cs_mono_mult = tk->tkr_mono.mult; vdso_data->cs_raw_mult = tk->tkr_raw.mult; /* tkr_mono.shift == tkr_raw.shift */ diff --git a/arch/arm64/kernel/vdso/gettimeofday.S b/arch/arm64/kernel/vdso/gettimeofday.S index e00b4671bd7c..76320e920965 100644 --- a/arch/arm64/kernel/vdso/gettimeofday.S +++ b/arch/arm64/kernel/vdso/gettimeofday.S @@ -256,7 +256,6 @@ monotonic_raw: seqcnt_check fail=monotonic_raw /* All computations are done with left-shifted nsecs. */ - lsl x14, x14, x12 get_nsec_per_sec res=x9 lsl x9, x9, x12 diff --git a/arch/blackfin/include/asm/processor.h b/arch/blackfin/include/asm/processor.h index 85d4af97c986..dbdbb8a558df 100644 --- a/arch/blackfin/include/asm/processor.h +++ b/arch/blackfin/include/asm/processor.h @@ -75,11 +75,6 @@ static inline void release_thread(struct task_struct *dead_task) { } -/* - * Return saved PC of a blocked thread. - */ -#define thread_saved_pc(tsk) (tsk->thread.pc) - unsigned long get_wchan(struct task_struct *p); #define KSTK_EIP(tsk) \ diff --git a/arch/c6x/include/asm/processor.h b/arch/c6x/include/asm/processor.h index b9eb3da7f278..7c87b5be53b5 100644 --- a/arch/c6x/include/asm/processor.h +++ b/arch/c6x/include/asm/processor.h @@ -96,11 +96,6 @@ static inline void release_thread(struct task_struct *dead_task) #define release_segments(mm) do { } while (0) /* - * saved PC of a blocked thread. - */ -#define thread_saved_pc(tsk) (task_pt_regs(tsk)->pc) - -/* * saved kernel SP and DP of a blocked thread. */ #ifdef _BIG_ENDIAN diff --git a/arch/cris/arch-v10/kernel/process.c b/arch/cris/arch-v10/kernel/process.c index e299d30105b5..a2cdb1521aca 100644 --- a/arch/cris/arch-v10/kernel/process.c +++ b/arch/cris/arch-v10/kernel/process.c @@ -69,14 +69,6 @@ void hard_reset_now (void) while(1) /* waiting for RETRIBUTION! */ ; } -/* - * Return saved PC of a blocked thread. - */ -unsigned long thread_saved_pc(struct task_struct *t) -{ - return task_pt_regs(t)->irp; -} - /* setup the child's kernel stack with a pt_regs and switch_stack on it. * it will be un-nested during _resume and _ret_from_sys_call when the * new thread is scheduled. diff --git a/arch/cris/arch-v32/kernel/process.c b/arch/cris/arch-v32/kernel/process.c index c530a8fa87ce..fe87b383fbf3 100644 --- a/arch/cris/arch-v32/kernel/process.c +++ b/arch/cris/arch-v32/kernel/process.c @@ -85,14 +85,6 @@ hard_reset_now(void) } /* - * Return saved PC of a blocked thread. - */ -unsigned long thread_saved_pc(struct task_struct *t) -{ - return task_pt_regs(t)->erp; -} - -/* * Setup the child's kernel stack with a pt_regs and call switch_stack() on it. * It will be unnested during _resume and _ret_from_sys_call when the new thread * is scheduled. diff --git a/arch/cris/include/asm/processor.h b/arch/cris/include/asm/processor.h index 15b815df29c1..bc2729e4b2c9 100644 --- a/arch/cris/include/asm/processor.h +++ b/arch/cris/include/asm/processor.h @@ -52,8 +52,6 @@ unsigned long get_wchan(struct task_struct *p); #define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->thread.usp) -extern unsigned long thread_saved_pc(struct task_struct *tsk); - /* Free all resources held by a thread. */ static inline void release_thread(struct task_struct *dead_task) { diff --git a/arch/frv/include/asm/processor.h b/arch/frv/include/asm/processor.h index ddaeb9cc9143..e4d08d74ed9f 100644 --- a/arch/frv/include/asm/processor.h +++ b/arch/frv/include/asm/processor.h @@ -96,11 +96,6 @@ extern asmlinkage void *restore_user_regs(const struct user_context *target, ... #define release_segments(mm) do { } while (0) #define forget_segments() do { } while (0) -/* - * Return saved PC of a blocked thread. - */ -extern unsigned long thread_saved_pc(struct task_struct *tsk); - unsigned long get_wchan(struct task_struct *p); #define KSTK_EIP(tsk) ((tsk)->thread.frame0->pc) diff --git a/arch/frv/kernel/process.c b/arch/frv/kernel/process.c index 5a4c92abc99e..a957b374e3a6 100644 --- a/arch/frv/kernel/process.c +++ b/arch/frv/kernel/process.c @@ -198,15 +198,6 @@ unsigned long get_wchan(struct task_struct *p) return 0; } -unsigned long thread_saved_pc(struct task_struct *tsk) -{ - /* Check whether the thread is blocked in resume() */ - if (in_sched_functions(tsk->thread.pc)) - return ((unsigned long *)tsk->thread.fp)[2]; - else - return tsk->thread.pc; -} - int elf_check_arch(const struct elf32_hdr *hdr) { unsigned long hsr0 = __get_HSR(0); diff --git a/arch/h8300/include/asm/processor.h b/arch/h8300/include/asm/processor.h index 65132d7ae9e5..afa53147e66a 100644 --- a/arch/h8300/include/asm/processor.h +++ b/arch/h8300/include/asm/processor.h @@ -110,10 +110,6 @@ static inline void release_thread(struct task_struct *dead_task) { } -/* - * Return saved PC of a blocked thread. - */ -unsigned long thread_saved_pc(struct task_struct *tsk); unsigned long get_wchan(struct task_struct *p); #define KSTK_EIP(tsk) \ diff --git a/arch/h8300/kernel/process.c b/arch/h8300/kernel/process.c index 0f5db5bb561b..d1ddcabbbe83 100644 --- a/arch/h8300/kernel/process.c +++ b/arch/h8300/kernel/process.c @@ -129,11 +129,6 @@ int copy_thread(unsigned long clone_flags, return 0; } -unsigned long thread_saved_pc(struct task_struct *tsk) -{ - return ((struct pt_regs *)tsk->thread.esp0)->pc; -} - unsigned long get_wchan(struct task_struct *p) { unsigned long fp, pc; diff --git a/arch/hexagon/include/asm/processor.h b/arch/hexagon/include/asm/processor.h index 45a825402f63..ce67940860a5 100644 --- a/arch/hexagon/include/asm/processor.h +++ b/arch/hexagon/include/asm/processor.h @@ -33,9 +33,6 @@ /* task_struct, defined elsewhere, is the "process descriptor" */ struct task_struct; -/* this is defined in arch/process.c */ -extern unsigned long thread_saved_pc(struct task_struct *tsk); - extern void start_thread(struct pt_regs *, unsigned long, unsigned long); /* diff --git a/arch/hexagon/kernel/process.c b/arch/hexagon/kernel/process.c index de715bab7956..656050c2e6a0 100644 --- a/arch/hexagon/kernel/process.c +++ b/arch/hexagon/kernel/process.c @@ -61,14 +61,6 @@ void arch_cpu_idle(void) } /* - * Return saved PC of a blocked thread - */ -unsigned long thread_saved_pc(struct task_struct *tsk) -{ - return 0; -} - -/* * Copy architecture-specific thread state */ int copy_thread(unsigned long clone_flags, unsigned long usp, diff --git a/arch/ia64/include/asm/processor.h b/arch/ia64/include/asm/processor.h index 26a63d69c599..ab982f07ea68 100644 --- a/arch/ia64/include/asm/processor.h +++ b/arch/ia64/include/asm/processor.h @@ -602,23 +602,6 @@ ia64_set_unat (__u64 *unat, void *spill_addr, unsigned long nat) } /* - * Return saved PC of a blocked thread. - * Note that the only way T can block is through a call to schedule() -> switch_to(). - */ -static inline unsigned long -thread_saved_pc (struct task_struct *t) -{ - struct unw_frame_info info; - unsigned long ip; - - unw_init_from_blocked_task(&info, t); - if (unw_unwind(&info) < 0) - return 0; - unw_get_ip(&info, &ip); - return ip; -} - -/* * Get the current instruction/program counter value. */ #define current_text_addr() \ diff --git a/arch/m32r/include/asm/processor.h b/arch/m32r/include/asm/processor.h index 5767367550c6..657874eeeccc 100644 --- a/arch/m32r/include/asm/processor.h +++ b/arch/m32r/include/asm/processor.h @@ -122,8 +122,6 @@ extern void release_thread(struct task_struct *); extern void copy_segments(struct task_struct *p, struct mm_struct * mm); extern void release_segments(struct mm_struct * mm); -extern unsigned long thread_saved_pc(struct task_struct *); - /* Copy and release all segment info associated with a VM */ #define copy_segments(p, mm) do { } while (0) #define release_segments(mm) do { } while (0) diff --git a/arch/m32r/kernel/process.c b/arch/m32r/kernel/process.c index d8ffcfec599c..8cd7e03f4370 100644 --- a/arch/m32r/kernel/process.c +++ b/arch/m32r/kernel/process.c @@ -39,14 +39,6 @@ #include <linux/err.h> -/* - * Return saved PC of a blocked thread. - */ -unsigned long thread_saved_pc(struct task_struct *tsk) -{ - return tsk->thread.lr; -} - void (*pm_power_off)(void) = NULL; EXPORT_SYMBOL(pm_power_off); diff --git a/arch/m68k/include/asm/processor.h b/arch/m68k/include/asm/processor.h index 77239e81379b..94c36030440c 100644 --- a/arch/m68k/include/asm/processor.h +++ b/arch/m68k/include/asm/processor.h @@ -130,8 +130,6 @@ static inline void release_thread(struct task_struct *dead_task) { } -extern unsigned long thread_saved_pc(struct task_struct *tsk); - unsigned long get_wchan(struct task_struct *p); #define KSTK_EIP(tsk) \ diff --git a/arch/m68k/kernel/process.c b/arch/m68k/kernel/process.c index e475c945c8b2..7df92f8b0781 100644 --- a/arch/m68k/kernel/process.c +++ b/arch/m68k/kernel/process.c @@ -40,20 +40,6 @@ asmlinkage void ret_from_fork(void); asmlinkage void ret_from_kernel_thread(void); - -/* - * Return saved PC from a blocked thread - */ -unsigned long thread_saved_pc(struct task_struct *tsk) -{ - struct switch_stack *sw = (struct switch_stack *)tsk->thread.ksp; - /* Check whether the thread is blocked in resume() */ - if (in_sched_functions(sw->retpc)) - return ((unsigned long *)sw->a6)[1]; - else - return sw->retpc; -} - void arch_cpu_idle(void) { #if defined(MACH_ATARI_ONLY) diff --git a/arch/microblaze/include/asm/processor.h b/arch/microblaze/include/asm/processor.h index 37ef196e4519..330d556860ba 100644 --- a/arch/microblaze/include/asm/processor.h +++ b/arch/microblaze/include/asm/processor.h @@ -69,8 +69,6 @@ static inline void release_thread(struct task_struct *dead_task) { } -extern unsigned long thread_saved_pc(struct task_struct *t); - extern unsigned long get_wchan(struct task_struct *p); # define KSTK_EIP(tsk) (0) @@ -121,10 +119,6 @@ static inline void release_thread(struct task_struct *dead_task) { } -/* Return saved (kernel) PC of a blocked thread. */ -# define thread_saved_pc(tsk) \ - ((tsk)->thread.regs ? (tsk)->thread.regs->r15 : 0) - unsigned long get_wchan(struct task_struct *p); /* The size allocated for kernel stacks. This _must_ be a power of two! */ diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c index e92a817e645f..6527ec22f158 100644 --- a/arch/microblaze/kernel/process.c +++ b/arch/microblaze/kernel/process.c @@ -119,23 +119,6 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, return 0; } -#ifndef CONFIG_MMU -/* - * Return saved PC of a blocked thread. - */ -unsigned long thread_saved_pc(struct task_struct *tsk) -{ - struct cpu_context *ctx = - &(((struct thread_info *)(tsk->stack))->cpu_context); - - /* Check whether the thread is blocked in resume() */ - if (in_sched_functions(ctx->r15)) - return (unsigned long)ctx->r15; - else - return ctx->r14; -} -#endif - unsigned long get_wchan(struct task_struct *p) { /* TBD (used by procfs) */ diff --git a/arch/mips/kvm/tlb.c b/arch/mips/kvm/tlb.c index 7c6336dd2638..7cd92166a0b9 100644 --- a/arch/mips/kvm/tlb.c +++ b/arch/mips/kvm/tlb.c @@ -166,7 +166,11 @@ static int _kvm_mips_host_tlb_inv(unsigned long entryhi) int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va, bool user, bool kernel) { - int idx_user, idx_kernel; + /* + * Initialize idx_user and idx_kernel to workaround bogus + * maybe-initialized warning when using GCC 6. + */ + int idx_user = 0, idx_kernel = 0; unsigned long flags, old_entryhi; local_irq_save(flags); diff --git a/arch/mn10300/include/asm/processor.h b/arch/mn10300/include/asm/processor.h index 18e17abf7664..3ae479117b42 100644 --- a/arch/mn10300/include/asm/processor.h +++ b/arch/mn10300/include/asm/processor.h @@ -132,11 +132,6 @@ static inline void start_thread(struct pt_regs *regs, /* Free all resources held by a thread. */ extern void release_thread(struct task_struct *); -/* - * Return saved PC of a blocked thread. - */ -extern unsigned long thread_saved_pc(struct task_struct *tsk); - unsigned long get_wchan(struct task_struct *p); #define task_pt_regs(task) ((task)->thread.uregs) diff --git a/arch/mn10300/kernel/process.c b/arch/mn10300/kernel/process.c index c9fa42619c6a..89e8027e07fb 100644 --- a/arch/mn10300/kernel/process.c +++ b/arch/mn10300/kernel/process.c @@ -40,14 +40,6 @@ #include "internal.h" /* - * return saved PC of a blocked thread. - */ -unsigned long thread_saved_pc(struct task_struct *tsk) -{ - return ((unsigned long *) tsk->thread.sp)[3]; -} - -/* * power off function, if any */ void (*pm_power_off)(void); diff --git a/arch/nios2/include/asm/processor.h b/arch/nios2/include/asm/processor.h index 3bbbc3d798e5..4944e2e1d8b0 100644 --- a/arch/nios2/include/asm/processor.h +++ b/arch/nios2/include/asm/processor.h @@ -75,9 +75,6 @@ static inline void release_thread(struct task_struct *dead_task) { } -/* Return saved PC of a blocked thread. */ -#define thread_saved_pc(tsk) ((tsk)->thread.kregs->ea) - extern unsigned long get_wchan(struct task_struct *p); #define task_pt_regs(p) \ diff --git a/arch/openrisc/include/asm/processor.h b/arch/openrisc/include/asm/processor.h index a908e6c30a00..396d8f306c21 100644 --- a/arch/openrisc/include/asm/processor.h +++ b/arch/openrisc/include/asm/processor.h @@ -84,11 +84,6 @@ void start_thread(struct pt_regs *regs, unsigned long nip, unsigned long sp); void release_thread(struct task_struct *); unsigned long get_wchan(struct task_struct *p); -/* - * Return saved PC of a blocked thread. For now, this is the "user" PC - */ -extern unsigned long thread_saved_pc(struct task_struct *t); - #define init_stack (init_thread_union.stack) #define cpu_relax() barrier() diff --git a/arch/openrisc/kernel/process.c b/arch/openrisc/kernel/process.c index 106859ae27ff..f9b77003f113 100644 --- a/arch/openrisc/kernel/process.c +++ b/arch/openrisc/kernel/process.c @@ -110,11 +110,6 @@ void show_regs(struct pt_regs *regs) show_registers(regs); } -unsigned long thread_saved_pc(struct task_struct *t) -{ - return (unsigned long)user_regs(t->stack)->pc; -} - void release_thread(struct task_struct *dead_task) { } diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h index a3661ee6b060..4c6694b4e77e 100644 --- a/arch/parisc/include/asm/processor.h +++ b/arch/parisc/include/asm/processor.h @@ -163,12 +163,7 @@ struct thread_struct { .flags = 0 \ } -/* - * Return saved PC of a blocked thread. This is used by ps mostly. - */ - struct task_struct; -unsigned long thread_saved_pc(struct task_struct *t); void show_trace(struct task_struct *task, unsigned long *stack); /* diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c index 4516a5b53f38..b64d7d21646e 100644 --- a/arch/parisc/kernel/process.c +++ b/arch/parisc/kernel/process.c @@ -239,11 +239,6 @@ copy_thread(unsigned long clone_flags, unsigned long usp, return 0; } -unsigned long thread_saved_pc(struct task_struct *t) -{ - return t->thread.regs.kpc; -} - unsigned long get_wchan(struct task_struct *p) { diff --git a/arch/powerpc/include/asm/kprobes.h b/arch/powerpc/include/asm/kprobes.h index a83821f33ea3..8814a7249ceb 100644 --- a/arch/powerpc/include/asm/kprobes.h +++ b/arch/powerpc/include/asm/kprobes.h @@ -103,6 +103,7 @@ extern int kprobe_exceptions_notify(struct notifier_block *self, extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr); extern int kprobe_handler(struct pt_regs *regs); extern int kprobe_post_handler(struct pt_regs *regs); +extern int is_current_kprobe_addr(unsigned long addr); #ifdef CONFIG_KPROBES_ON_FTRACE extern int skip_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb); diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index bb99b651085a..1189d04f3bd1 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h @@ -378,12 +378,6 @@ struct thread_struct { } #endif -/* - * Return saved PC of a blocked thread. For now, this is the "user" PC - */ -#define thread_saved_pc(tsk) \ - ((tsk)->thread.regs? (tsk)->thread.regs->nip: 0) - #define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.regs) unsigned long get_wchan(struct task_struct *p); diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index ae418b85c17c..b886795060fd 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -1411,10 +1411,8 @@ USE_TEXT_SECTION() .balign IFETCH_ALIGN_BYTES do_hash_page: #ifdef CONFIG_PPC_STD_MMU_64 - andis. r0,r4,0xa410 /* weird error? */ + andis. r0,r4,0xa450 /* weird error? */ bne- handle_page_fault /* if not, try to insert a HPTE */ - andis. r0,r4,DSISR_DABRMATCH@h - bne- handle_dabr_fault CURRENT_THREAD_INFO(r11, r1) lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */ andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */ @@ -1438,11 +1436,16 @@ do_hash_page: /* Error */ blt- 13f + + /* Reload DSISR into r4 for the DABR check below */ + ld r4,_DSISR(r1) #endif /* CONFIG_PPC_STD_MMU_64 */ /* Here we have a page fault that hash_page can't handle. */ handle_page_fault: -11: ld r4,_DAR(r1) +11: andis. r0,r4,DSISR_DABRMATCH@h + bne- handle_dabr_fault + ld r4,_DAR(r1) ld r5,_DSISR(r1) addi r3,r1,STACK_FRAME_OVERHEAD bl do_page_fault diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c index fc4343514bed..01addfb0ed0a 100644 --- a/arch/powerpc/kernel/kprobes.c +++ b/arch/powerpc/kernel/kprobes.c @@ -43,6 +43,12 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}}; +int is_current_kprobe_addr(unsigned long addr) +{ + struct kprobe *p = kprobe_running(); + return (p && (unsigned long)p->addr == addr) ? 1 : 0; +} + bool arch_within_kprobe_blacklist(unsigned long addr) { return (addr >= (unsigned long)__kprobes_text_start && @@ -617,6 +623,15 @@ int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) regs->gpr[2] = (unsigned long)(((func_descr_t *)jp->entry)->toc); #endif + /* + * jprobes use jprobe_return() which skips the normal return + * path of the function, and this messes up the accounting of the + * function graph tracer. + * + * Pause function graph tracing while performing the jprobe function. + */ + pause_graph_tracing(); + return 1; } NOKPROBE_SYMBOL(setjmp_pre_handler); @@ -642,6 +657,8 @@ int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) * saved regs... */ memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs)); + /* It's OK to start function graph tracing again */ + unpause_graph_tracing(); preempt_enable_no_resched(); return 1; } diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index a8c1f99e9607..4640f6d64f8b 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -616,6 +616,24 @@ void __init exc_lvl_early_init(void) #endif /* + * Emergency stacks are used for a range of things, from asynchronous + * NMIs (system reset, machine check) to synchronous, process context. + * We set preempt_count to zero, even though that isn't necessarily correct. To + * get the right value we'd need to copy it from the previous thread_info, but + * doing that might fault causing more problems. + * TODO: what to do with accounting? + */ +static void emerg_stack_init_thread_info(struct thread_info *ti, int cpu) +{ + ti->task = NULL; + ti->cpu = cpu; + ti->preempt_count = 0; + ti->local_flags = 0; + ti->flags = 0; + klp_init_thread_info(ti); +} + +/* * Stack space used when we detect a bad kernel stack pointer, and * early in SMP boots before relocation is enabled. Exclusive emergency * stack for machine checks. @@ -633,24 +651,31 @@ void __init emergency_stack_init(void) * Since we use these as temporary stacks during secondary CPU * bringup, we need to get at them in real mode. This means they * must also be within the RMO region. + * + * The IRQ stacks allocated elsewhere in this file are zeroed and + * initialized in kernel/irq.c. These are initialized here in order + * to have emergency stacks available as early as possible. */ limit = min(safe_stack_limit(), ppc64_rma_size); for_each_possible_cpu(i) { struct thread_info *ti; ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit)); - klp_init_thread_info(ti); + memset(ti, 0, THREAD_SIZE); + emerg_stack_init_thread_info(ti, i); paca[i].emergency_sp = (void *)ti + THREAD_SIZE; #ifdef CONFIG_PPC_BOOK3S_64 /* emergency stack for NMI exception handling. */ ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit)); - klp_init_thread_info(ti); + memset(ti, 0, THREAD_SIZE); + emerg_stack_init_thread_info(ti, i); paca[i].nmi_emergency_sp = (void *)ti + THREAD_SIZE; /* emergency stack for machine check exception handling. */ ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit)); - klp_init_thread_info(ti); + memset(ti, 0, THREAD_SIZE); + emerg_stack_init_thread_info(ti, i); paca[i].mc_emergency_sp = (void *)ti + THREAD_SIZE; #endif } diff --git a/arch/powerpc/kernel/trace/ftrace_64_mprofile.S b/arch/powerpc/kernel/trace/ftrace_64_mprofile.S index 7c933a99f5d5..c98e90b4ea7b 100644 --- a/arch/powerpc/kernel/trace/ftrace_64_mprofile.S +++ b/arch/powerpc/kernel/trace/ftrace_64_mprofile.S @@ -45,10 +45,14 @@ _GLOBAL(ftrace_caller) stdu r1,-SWITCH_FRAME_SIZE(r1) /* Save all gprs to pt_regs */ - SAVE_8GPRS(0,r1) - SAVE_8GPRS(8,r1) - SAVE_8GPRS(16,r1) - SAVE_8GPRS(24,r1) + SAVE_GPR(0, r1) + SAVE_10GPRS(2, r1) + SAVE_10GPRS(12, r1) + SAVE_10GPRS(22, r1) + + /* Save previous stack pointer (r1) */ + addi r8, r1, SWITCH_FRAME_SIZE + std r8, GPR1(r1) /* Load special regs for save below */ mfmsr r8 @@ -95,18 +99,44 @@ ftrace_call: bl ftrace_stub nop - /* Load ctr with the possibly modified NIP */ - ld r3, _NIP(r1) - mtctr r3 + /* Load the possibly modified NIP */ + ld r15, _NIP(r1) + #ifdef CONFIG_LIVEPATCH - cmpd r14,r3 /* has NIP been altered? */ + cmpd r14, r15 /* has NIP been altered? */ +#endif + +#if defined(CONFIG_LIVEPATCH) && defined(CONFIG_KPROBES_ON_FTRACE) + /* NIP has not been altered, skip over further checks */ + beq 1f + + /* Check if there is an active kprobe on us */ + subi r3, r14, 4 + bl is_current_kprobe_addr + nop + + /* + * If r3 == 1, then this is a kprobe/jprobe. + * else, this is livepatched function. + * + * The conditional branch for livepatch_handler below will use the + * result of this comparison. For kprobe/jprobe, we just need to branch to + * the new NIP, not call livepatch_handler. The branch below is bne, so we + * want CR0[EQ] to be true if this is a kprobe/jprobe. Which means we want + * CR0[EQ] = (r3 == 1). + */ + cmpdi r3, 1 +1: #endif + /* Load CTR with the possibly modified NIP */ + mtctr r15 + /* Restore gprs */ - REST_8GPRS(0,r1) - REST_8GPRS(8,r1) - REST_8GPRS(16,r1) - REST_8GPRS(24,r1) + REST_GPR(0,r1) + REST_10GPRS(2,r1) + REST_10GPRS(12,r1) + REST_10GPRS(22,r1) /* Restore possibly modified LR */ ld r0, _LINK(r1) @@ -119,7 +149,10 @@ ftrace_call: addi r1, r1, SWITCH_FRAME_SIZE #ifdef CONFIG_LIVEPATCH - /* Based on the cmpd above, if the NIP was altered handle livepatch */ + /* + * Based on the cmpd or cmpdi above, if the NIP was altered and we're + * not on a kprobe/jprobe, then handle livepatch. + */ bne- livepatch_handler #endif diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 42b7a4fd57d9..8d1a365b8edc 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -1486,6 +1486,14 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len); break; case KVM_REG_PPC_TB_OFFSET: + /* + * POWER9 DD1 has an erratum where writing TBU40 causes + * the timebase to lose ticks. So we don't let the + * timebase offset be changed on P9 DD1. (It is + * initialized to zero.) + */ + if (cpu_has_feature(CPU_FTR_POWER9_DD1)) + break; /* round up to multiple of 2^24 */ vcpu->arch.vcore->tb_offset = ALIGN(set_reg_val(id, *val), 1UL << 24); @@ -2907,12 +2915,36 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu) { int r; int srcu_idx; + unsigned long ebb_regs[3] = {}; /* shut up GCC */ + unsigned long user_tar = 0; + unsigned int user_vrsave; if (!vcpu->arch.sane) { run->exit_reason = KVM_EXIT_INTERNAL_ERROR; return -EINVAL; } + /* + * Don't allow entry with a suspended transaction, because + * the guest entry/exit code will lose it. + * If the guest has TM enabled, save away their TM-related SPRs + * (they will get restored by the TM unavailable interrupt). + */ +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + if (cpu_has_feature(CPU_FTR_TM) && current->thread.regs && + (current->thread.regs->msr & MSR_TM)) { + if (MSR_TM_ACTIVE(current->thread.regs->msr)) { + run->exit_reason = KVM_EXIT_FAIL_ENTRY; + run->fail_entry.hardware_entry_failure_reason = 0; + return -EINVAL; + } + current->thread.tm_tfhar = mfspr(SPRN_TFHAR); + current->thread.tm_tfiar = mfspr(SPRN_TFIAR); + current->thread.tm_texasr = mfspr(SPRN_TEXASR); + current->thread.regs->msr &= ~MSR_TM; + } +#endif + kvmppc_core_prepare_to_enter(vcpu); /* No need to go into the guest when all we'll do is come back out */ @@ -2934,6 +2966,15 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu) flush_all_to_thread(current); + /* Save userspace EBB and other register values */ + if (cpu_has_feature(CPU_FTR_ARCH_207S)) { + ebb_regs[0] = mfspr(SPRN_EBBHR); + ebb_regs[1] = mfspr(SPRN_EBBRR); + ebb_regs[2] = mfspr(SPRN_BESCR); + user_tar = mfspr(SPRN_TAR); + } + user_vrsave = mfspr(SPRN_VRSAVE); + vcpu->arch.wqp = &vcpu->arch.vcore->wq; vcpu->arch.pgdir = current->mm->pgd; vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; @@ -2960,6 +3001,16 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu) } } while (is_kvmppc_resume_guest(r)); + /* Restore userspace EBB and other register values */ + if (cpu_has_feature(CPU_FTR_ARCH_207S)) { + mtspr(SPRN_EBBHR, ebb_regs[0]); + mtspr(SPRN_EBBRR, ebb_regs[1]); + mtspr(SPRN_BESCR, ebb_regs[2]); + mtspr(SPRN_TAR, user_tar); + mtspr(SPRN_FSCR, current->thread.fscr); + } + mtspr(SPRN_VRSAVE, user_vrsave); + out: vcpu->arch.state = KVMPPC_VCPU_NOTREADY; atomic_dec(&vcpu->kvm->arch.vcpus_running); diff --git a/arch/powerpc/kvm/book3s_hv_interrupts.S b/arch/powerpc/kvm/book3s_hv_interrupts.S index 0fdc4a28970b..404deb512844 100644 --- a/arch/powerpc/kvm/book3s_hv_interrupts.S +++ b/arch/powerpc/kvm/book3s_hv_interrupts.S @@ -121,10 +121,20 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) * Put whatever is in the decrementer into the * hypervisor decrementer. */ +BEGIN_FTR_SECTION + ld r5, HSTATE_KVM_VCORE(r13) + ld r6, VCORE_KVM(r5) + ld r9, KVM_HOST_LPCR(r6) + andis. r9, r9, LPCR_LD@h +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) mfspr r8,SPRN_DEC mftb r7 - mtspr SPRN_HDEC,r8 +BEGIN_FTR_SECTION + /* On POWER9, don't sign-extend if host LPCR[LD] bit is set */ + bne 32f +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) extsw r8,r8 +32: mtspr SPRN_HDEC,r8 add r8,r8,r7 std r8,HSTATE_DECEXP(r13) diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index bdb3f76ceb6b..4888dd494604 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -32,12 +32,29 @@ #include <asm/opal.h> #include <asm/xive-regs.h> +/* Sign-extend HDEC if not on POWER9 */ +#define EXTEND_HDEC(reg) \ +BEGIN_FTR_SECTION; \ + extsw reg, reg; \ +END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) + #define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM) /* Values in HSTATE_NAPPING(r13) */ #define NAPPING_CEDE 1 #define NAPPING_NOVCPU 2 +/* Stack frame offsets for kvmppc_hv_entry */ +#define SFS 144 +#define STACK_SLOT_TRAP (SFS-4) +#define STACK_SLOT_TID (SFS-16) +#define STACK_SLOT_PSSCR (SFS-24) +#define STACK_SLOT_PID (SFS-32) +#define STACK_SLOT_IAMR (SFS-40) +#define STACK_SLOT_CIABR (SFS-48) +#define STACK_SLOT_DAWR (SFS-56) +#define STACK_SLOT_DAWRX (SFS-64) + /* * Call kvmppc_hv_entry in real mode. * Must be called with interrupts hard-disabled. @@ -214,6 +231,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) kvmppc_primary_no_guest: /* We handle this much like a ceded vcpu */ /* put the HDEC into the DEC, since HDEC interrupts don't wake us */ + /* HDEC may be larger than DEC for arch >= v3.00, but since the */ + /* HDEC value came from DEC in the first place, it will fit */ mfspr r3, SPRN_HDEC mtspr SPRN_DEC, r3 /* @@ -295,8 +314,9 @@ kvm_novcpu_wakeup: /* See if our timeslice has expired (HDEC is negative) */ mfspr r0, SPRN_HDEC + EXTEND_HDEC(r0) li r12, BOOK3S_INTERRUPT_HV_DECREMENTER - cmpwi r0, 0 + cmpdi r0, 0 blt kvm_novcpu_exit /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */ @@ -319,10 +339,10 @@ kvm_novcpu_exit: bl kvmhv_accumulate_time #endif 13: mr r3, r12 - stw r12, 112-4(r1) + stw r12, STACK_SLOT_TRAP(r1) bl kvmhv_commence_exit nop - lwz r12, 112-4(r1) + lwz r12, STACK_SLOT_TRAP(r1) b kvmhv_switch_to_host /* @@ -390,8 +410,8 @@ kvm_secondary_got_guest: lbz r4, HSTATE_PTID(r13) cmpwi r4, 0 bne 63f - lis r6, 0x7fff - ori r6, r6, 0xffff + LOAD_REG_ADDR(r6, decrementer_max) + ld r6, 0(r6) mtspr SPRN_HDEC, r6 /* and set per-LPAR registers, if doing dynamic micro-threading */ ld r6, HSTATE_SPLIT_MODE(r13) @@ -545,11 +565,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) * * *****************************************************************************/ -/* Stack frame offsets */ -#define STACK_SLOT_TID (112-16) -#define STACK_SLOT_PSSCR (112-24) -#define STACK_SLOT_PID (112-32) - .global kvmppc_hv_entry kvmppc_hv_entry: @@ -565,7 +580,7 @@ kvmppc_hv_entry: */ mflr r0 std r0, PPC_LR_STKOFF(r1) - stdu r1, -112(r1) + stdu r1, -SFS(r1) /* Save R1 in the PACA */ std r1, HSTATE_HOST_R1(r13) @@ -749,10 +764,20 @@ BEGIN_FTR_SECTION mfspr r5, SPRN_TIDR mfspr r6, SPRN_PSSCR mfspr r7, SPRN_PID + mfspr r8, SPRN_IAMR std r5, STACK_SLOT_TID(r1) std r6, STACK_SLOT_PSSCR(r1) std r7, STACK_SLOT_PID(r1) + std r8, STACK_SLOT_IAMR(r1) END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) +BEGIN_FTR_SECTION + mfspr r5, SPRN_CIABR + mfspr r6, SPRN_DAWR + mfspr r7, SPRN_DAWRX + std r5, STACK_SLOT_CIABR(r1) + std r6, STACK_SLOT_DAWR(r1) + std r7, STACK_SLOT_DAWRX(r1) +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) BEGIN_FTR_SECTION /* Set partition DABR */ @@ -968,7 +993,8 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300) /* Check if HDEC expires soon */ mfspr r3, SPRN_HDEC - cmpwi r3, 512 /* 1 microsecond */ + EXTEND_HDEC(r3) + cmpdi r3, 512 /* 1 microsecond */ blt hdec_soon #ifdef CONFIG_KVM_XICS @@ -1505,11 +1531,10 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300) * set by the guest could disrupt the host. */ li r0, 0 - mtspr SPRN_IAMR, r0 - mtspr SPRN_CIABR, r0 - mtspr SPRN_DAWRX, r0 + mtspr SPRN_PSPB, r0 mtspr SPRN_WORT, r0 BEGIN_FTR_SECTION + mtspr SPRN_IAMR, r0 mtspr SPRN_TCSCR, r0 /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */ li r0, 1 @@ -1525,6 +1550,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) std r6,VCPU_UAMOR(r9) li r6,0 mtspr SPRN_AMR,r6 + mtspr SPRN_UAMOR, r6 /* Switch DSCR back to host value */ mfspr r8, SPRN_DSCR @@ -1670,12 +1696,22 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) /* Restore host values of some registers */ BEGIN_FTR_SECTION + ld r5, STACK_SLOT_CIABR(r1) + ld r6, STACK_SLOT_DAWR(r1) + ld r7, STACK_SLOT_DAWRX(r1) + mtspr SPRN_CIABR, r5 + mtspr SPRN_DAWR, r6 + mtspr SPRN_DAWRX, r7 +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) +BEGIN_FTR_SECTION ld r5, STACK_SLOT_TID(r1) ld r6, STACK_SLOT_PSSCR(r1) ld r7, STACK_SLOT_PID(r1) + ld r8, STACK_SLOT_IAMR(r1) mtspr SPRN_TIDR, r5 mtspr SPRN_PSSCR, r6 mtspr SPRN_PID, r7 + mtspr SPRN_IAMR, r8 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) BEGIN_FTR_SECTION PPC_INVALIDATE_ERAT @@ -1819,8 +1855,8 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX) li r0, KVM_GUEST_MODE_NONE stb r0, HSTATE_IN_GUEST(r13) - ld r0, 112+PPC_LR_STKOFF(r1) - addi r1, r1, 112 + ld r0, SFS+PPC_LR_STKOFF(r1) + addi r1, r1, SFS mtlr r0 blr @@ -2366,12 +2402,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM) mfspr r3, SPRN_DEC mfspr r4, SPRN_HDEC mftb r5 - cmpw r3, r4 + extsw r3, r3 + EXTEND_HDEC(r4) + cmpd r3, r4 ble 67f mtspr SPRN_DEC, r4 67: /* save expiry time of guest decrementer */ - extsw r3, r3 add r3, r3, r5 ld r4, HSTATE_KVM_VCPU(r13) ld r5, HSTATE_KVM_VCORE(r13) diff --git a/arch/powerpc/perf/perf_regs.c b/arch/powerpc/perf/perf_regs.c index cbd82fde5770..09ceea6175ba 100644 --- a/arch/powerpc/perf/perf_regs.c +++ b/arch/powerpc/perf/perf_regs.c @@ -101,5 +101,6 @@ void perf_get_regs_user(struct perf_regs *regs_user, struct pt_regs *regs_user_copy) { regs_user->regs = task_pt_regs(current); - regs_user->abi = perf_reg_abi(current); + regs_user->abi = (regs_user->regs) ? perf_reg_abi(current) : + PERF_SAMPLE_REGS_ABI_NONE; } diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c index e6f444b46207..b5d960d6db3d 100644 --- a/arch/powerpc/platforms/powernv/npu-dma.c +++ b/arch/powerpc/platforms/powernv/npu-dma.c @@ -449,7 +449,7 @@ static int mmio_launch_invalidate(struct npu *npu, unsigned long launch, return mmio_atsd_reg; } -static int mmio_invalidate_pid(struct npu *npu, unsigned long pid) +static int mmio_invalidate_pid(struct npu *npu, unsigned long pid, bool flush) { unsigned long launch; @@ -465,12 +465,15 @@ static int mmio_invalidate_pid(struct npu *npu, unsigned long pid) /* PID */ launch |= pid << PPC_BITLSHIFT(38); + /* No flush */ + launch |= !flush << PPC_BITLSHIFT(39); + /* Invalidating the entire process doesn't use a va */ return mmio_launch_invalidate(npu, launch, 0); } static int mmio_invalidate_va(struct npu *npu, unsigned long va, - unsigned long pid) + unsigned long pid, bool flush) { unsigned long launch; @@ -486,26 +489,60 @@ static int mmio_invalidate_va(struct npu *npu, unsigned long va, /* PID */ launch |= pid << PPC_BITLSHIFT(38); + /* No flush */ + launch |= !flush << PPC_BITLSHIFT(39); + return mmio_launch_invalidate(npu, launch, va); } #define mn_to_npu_context(x) container_of(x, struct npu_context, mn) +struct mmio_atsd_reg { + struct npu *npu; + int reg; +}; + +static void mmio_invalidate_wait( + struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS], bool flush) +{ + struct npu *npu; + int i, reg; + + /* Wait for all invalidations to complete */ + for (i = 0; i <= max_npu2_index; i++) { + if (mmio_atsd_reg[i].reg < 0) + continue; + + /* Wait for completion */ + npu = mmio_atsd_reg[i].npu; + reg = mmio_atsd_reg[i].reg; + while (__raw_readq(npu->mmio_atsd_regs[reg] + XTS_ATSD_STAT)) + cpu_relax(); + + put_mmio_atsd_reg(npu, reg); + + /* + * The GPU requires two flush ATSDs to ensure all entries have + * been flushed. We use PID 0 as it will never be used for a + * process on the GPU. + */ + if (flush) + mmio_invalidate_pid(npu, 0, true); + } +} + /* * Invalidate either a single address or an entire PID depending on * the value of va. */ static void mmio_invalidate(struct npu_context *npu_context, int va, - unsigned long address) + unsigned long address, bool flush) { - int i, j, reg; + int i, j; struct npu *npu; struct pnv_phb *nphb; struct pci_dev *npdev; - struct { - struct npu *npu; - int reg; - } mmio_atsd_reg[NV_MAX_NPUS]; + struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS]; unsigned long pid = npu_context->mm->context.id; /* @@ -525,10 +562,11 @@ static void mmio_invalidate(struct npu_context *npu_context, int va, if (va) mmio_atsd_reg[i].reg = - mmio_invalidate_va(npu, address, pid); + mmio_invalidate_va(npu, address, pid, + flush); else mmio_atsd_reg[i].reg = - mmio_invalidate_pid(npu, pid); + mmio_invalidate_pid(npu, pid, flush); /* * The NPU hardware forwards the shootdown to all GPUs @@ -544,18 +582,10 @@ static void mmio_invalidate(struct npu_context *npu_context, int va, */ flush_tlb_mm(npu_context->mm); - /* Wait for all invalidations to complete */ - for (i = 0; i <= max_npu2_index; i++) { - if (mmio_atsd_reg[i].reg < 0) - continue; - - /* Wait for completion */ - npu = mmio_atsd_reg[i].npu; - reg = mmio_atsd_reg[i].reg; - while (__raw_readq(npu->mmio_atsd_regs[reg] + XTS_ATSD_STAT)) - cpu_relax(); - put_mmio_atsd_reg(npu, reg); - } + mmio_invalidate_wait(mmio_atsd_reg, flush); + if (flush) + /* Wait for the flush to complete */ + mmio_invalidate_wait(mmio_atsd_reg, false); } static void pnv_npu2_mn_release(struct mmu_notifier *mn, @@ -571,7 +601,7 @@ static void pnv_npu2_mn_release(struct mmu_notifier *mn, * There should be no more translation requests for this PID, but we * need to ensure any entries for it are removed from the TLB. */ - mmio_invalidate(npu_context, 0, 0); + mmio_invalidate(npu_context, 0, 0, true); } static void pnv_npu2_mn_change_pte(struct mmu_notifier *mn, @@ -581,7 +611,7 @@ static void pnv_npu2_mn_change_pte(struct mmu_notifier *mn, { struct npu_context *npu_context = mn_to_npu_context(mn); - mmio_invalidate(npu_context, 1, address); + mmio_invalidate(npu_context, 1, address, true); } static void pnv_npu2_mn_invalidate_page(struct mmu_notifier *mn, @@ -590,7 +620,7 @@ static void pnv_npu2_mn_invalidate_page(struct mmu_notifier *mn, { struct npu_context *npu_context = mn_to_npu_context(mn); - mmio_invalidate(npu_context, 1, address); + mmio_invalidate(npu_context, 1, address, true); } static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn, @@ -600,8 +630,11 @@ static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn, struct npu_context *npu_context = mn_to_npu_context(mn); unsigned long address; - for (address = start; address <= end; address += PAGE_SIZE) - mmio_invalidate(npu_context, 1, address); + for (address = start; address < end; address += PAGE_SIZE) + mmio_invalidate(npu_context, 1, address, false); + + /* Do the flush only on the final addess == end */ + mmio_invalidate(npu_context, 1, address, true); } static const struct mmu_notifier_ops nv_nmmu_notifier_ops = { @@ -651,8 +684,11 @@ struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev, /* No nvlink associated with this GPU device */ return ERR_PTR(-ENODEV); - if (!mm) { - /* kernel thread contexts are not supported */ + if (!mm || mm->context.id == 0) { + /* + * Kernel thread contexts are not supported and context id 0 is + * reserved on the GPU. + */ return ERR_PTR(-EINVAL); } diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h index 60d395fdc864..aeac013968f2 100644 --- a/arch/s390/include/asm/processor.h +++ b/arch/s390/include/asm/processor.h @@ -221,11 +221,6 @@ extern void release_thread(struct task_struct *); /* Free guarded storage control block for current */ void exit_thread_gs(void); -/* - * Return saved PC of a blocked thread. - */ -extern unsigned long thread_saved_pc(struct task_struct *t); - unsigned long get_wchan(struct task_struct *p); #define task_pt_regs(tsk) ((struct pt_regs *) \ (task_stack_page(tsk) + THREAD_SIZE) - 1) diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index e545ffe5155a..8e622bb52f7a 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c @@ -564,8 +564,6 @@ static struct kset *ipl_kset; static void __ipl_run(void *unused) { - if (MACHINE_IS_LPAR && ipl_info.type == IPL_TYPE_CCW) - diag308(DIAG308_LOAD_NORMAL_DUMP, NULL); diag308(DIAG308_LOAD_CLEAR, NULL); if (MACHINE_IS_VM) __cpcmd("IPL", NULL, 0, NULL); @@ -1088,10 +1086,7 @@ static void __reipl_run(void *unused) break; case REIPL_METHOD_CCW_DIAG: diag308(DIAG308_SET, reipl_block_ccw); - if (MACHINE_IS_LPAR) - diag308(DIAG308_LOAD_NORMAL_DUMP, NULL); - else - diag308(DIAG308_LOAD_CLEAR, NULL); + diag308(DIAG308_LOAD_CLEAR, NULL); break; case REIPL_METHOD_FCP_RW_DIAG: diag308(DIAG308_SET, reipl_block_fcp); diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index 999d7154bbdc..bb32b8618bf6 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c @@ -41,31 +41,6 @@ asmlinkage void ret_from_fork(void) asm ("ret_from_fork"); -/* - * Return saved PC of a blocked thread. used in kernel/sched. - * resume in entry.S does not create a new stack frame, it - * just stores the registers %r6-%r15 to the frame given by - * schedule. We want to return the address of the caller of - * schedule, so we have to walk the backchain one time to - * find the frame schedule() store its return address. - */ -unsigned long thread_saved_pc(struct task_struct *tsk) -{ - struct stack_frame *sf, *low, *high; - - if (!tsk || !task_stack_page(tsk)) - return 0; - low = task_stack_page(tsk); - high = (struct stack_frame *) task_pt_regs(tsk); - sf = (struct stack_frame *) tsk->thread.ksp; - if (sf <= low || sf > high) - return 0; - sf = (struct stack_frame *) sf->back_chain; - if (sf <= low || sf > high) - return 0; - return sf->gprs[8]; -} - extern void kernel_thread_starter(void); /* diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c index 9da243d94cc3..3b297fa3aa67 100644 --- a/arch/s390/kvm/gaccess.c +++ b/arch/s390/kvm/gaccess.c @@ -977,11 +977,12 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr, ptr = asce.origin * 4096; if (asce.r) { *fake = 1; + ptr = 0; asce.dt = ASCE_TYPE_REGION1; } switch (asce.dt) { case ASCE_TYPE_REGION1: - if (vaddr.rfx01 > asce.tl && !asce.r) + if (vaddr.rfx01 > asce.tl && !*fake) return PGM_REGION_FIRST_TRANS; break; case ASCE_TYPE_REGION2: @@ -1009,8 +1010,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr, union region1_table_entry rfte; if (*fake) { - /* offset in 16EB guest memory block */ - ptr = ptr + ((unsigned long) vaddr.rsx << 53UL); + ptr += (unsigned long) vaddr.rfx << 53; rfte.val = ptr; goto shadow_r2t; } @@ -1036,8 +1036,7 @@ shadow_r2t: union region2_table_entry rste; if (*fake) { - /* offset in 8PB guest memory block */ - ptr = ptr + ((unsigned long) vaddr.rtx << 42UL); + ptr += (unsigned long) vaddr.rsx << 42; rste.val = ptr; goto shadow_r3t; } @@ -1064,8 +1063,7 @@ shadow_r3t: union region3_table_entry rtte; if (*fake) { - /* offset in 4TB guest memory block */ - ptr = ptr + ((unsigned long) vaddr.sx << 31UL); + ptr += (unsigned long) vaddr.rtx << 31; rtte.val = ptr; goto shadow_sgt; } @@ -1101,8 +1099,7 @@ shadow_sgt: union segment_table_entry ste; if (*fake) { - /* offset in 2G guest memory block */ - ptr = ptr + ((unsigned long) vaddr.sx << 20UL); + ptr += (unsigned long) vaddr.sx << 20; ste.val = ptr; goto shadow_pgt; } diff --git a/arch/score/include/asm/processor.h b/arch/score/include/asm/processor.h index d9a922d8711b..299274581968 100644 --- a/arch/score/include/asm/processor.h +++ b/arch/score/include/asm/processor.h @@ -13,7 +13,6 @@ struct task_struct; */ extern void (*cpu_wait)(void); -extern unsigned long thread_saved_pc(struct task_struct *tsk); extern void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp); extern unsigned long get_wchan(struct task_struct *p); diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c index eb64d7a677cb..6e20241a1ed4 100644 --- a/arch/score/kernel/process.c +++ b/arch/score/kernel/process.c @@ -101,11 +101,6 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *r) return 1; } -unsigned long thread_saved_pc(struct task_struct *tsk) -{ - return task_pt_regs(tsk)->cp0_epc; -} - unsigned long get_wchan(struct task_struct *task) { if (!task || task == current || task->state == TASK_RUNNING) diff --git a/arch/sparc/include/asm/processor_32.h b/arch/sparc/include/asm/processor_32.h index dd27159819eb..b395e5620c0b 100644 --- a/arch/sparc/include/asm/processor_32.h +++ b/arch/sparc/include/asm/processor_32.h @@ -67,9 +67,6 @@ struct thread_struct { .current_ds = KERNEL_DS, \ } -/* Return saved PC of a blocked thread. */ -unsigned long thread_saved_pc(struct task_struct *t); - /* Do necessary setup to start up a newly executed thread. */ static inline void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp) diff --git a/arch/sparc/include/asm/processor_64.h b/arch/sparc/include/asm/processor_64.h index b58ee9018433..f04dc5a43062 100644 --- a/arch/sparc/include/asm/processor_64.h +++ b/arch/sparc/include/asm/processor_64.h @@ -89,9 +89,7 @@ struct thread_struct { #include <linux/types.h> #include <asm/fpumacro.h> -/* Return saved PC of a blocked thread. */ struct task_struct; -unsigned long thread_saved_pc(struct task_struct *); /* On Uniprocessor, even in RMO processes see TSO semantics */ #ifdef CONFIG_SMP diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c index b6dac8e980f0..9245f93398c7 100644 --- a/arch/sparc/kernel/process_32.c +++ b/arch/sparc/kernel/process_32.c @@ -177,14 +177,6 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp) } /* - * Note: sparc64 has a pretty intricated thread_saved_pc, check it out. - */ -unsigned long thread_saved_pc(struct task_struct *tsk) -{ - return task_thread_info(tsk)->kpc; -} - -/* * Free current thread data structures etc.. */ void exit_thread(struct task_struct *tsk) diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c index 1badc493e62e..b96104da5bd6 100644 --- a/arch/sparc/kernel/process_64.c +++ b/arch/sparc/kernel/process_64.c @@ -400,25 +400,6 @@ core_initcall(sparc_sysrq_init); #endif -unsigned long thread_saved_pc(struct task_struct *tsk) -{ - struct thread_info *ti = task_thread_info(tsk); - unsigned long ret = 0xdeadbeefUL; - - if (ti && ti->ksp) { - unsigned long *sp; - sp = (unsigned long *)(ti->ksp + STACK_BIAS); - if (((unsigned long)sp & (sizeof(long) - 1)) == 0UL && - sp[14]) { - unsigned long *fp; - fp = (unsigned long *)(sp[14] + STACK_BIAS); - if (((unsigned long)fp & (sizeof(long) - 1)) == 0UL) - ret = fp[15]; - } - } - return ret; -} - /* Free current thread data structures etc.. */ void exit_thread(struct task_struct *tsk) { diff --git a/arch/tile/include/asm/processor.h b/arch/tile/include/asm/processor.h index 0bc9968b97a1..f71e5206650b 100644 --- a/arch/tile/include/asm/processor.h +++ b/arch/tile/include/asm/processor.h @@ -214,13 +214,6 @@ static inline void release_thread(struct task_struct *dead_task) extern void prepare_exit_to_usermode(struct pt_regs *regs, u32 flags); - -/* - * Return saved (kernel) PC of a blocked thread. - * Only used in a printk() in kernel/sched/core.c, so don't work too hard. - */ -#define thread_saved_pc(t) ((t)->thread.pc) - unsigned long get_wchan(struct task_struct *p); /* Return initial ksp value for given task. */ diff --git a/arch/um/include/asm/processor-generic.h b/arch/um/include/asm/processor-generic.h index 2d1e0dd5bb0b..f6d1a3f747a9 100644 --- a/arch/um/include/asm/processor-generic.h +++ b/arch/um/include/asm/processor-generic.h @@ -58,8 +58,6 @@ static inline void release_thread(struct task_struct *task) { } -extern unsigned long thread_saved_pc(struct task_struct *t); - static inline void mm_copy_segments(struct mm_struct *from_mm, struct mm_struct *new_mm) { diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c index 64a1fd06f3fd..7b5640117325 100644 --- a/arch/um/kernel/um_arch.c +++ b/arch/um/kernel/um_arch.c @@ -56,12 +56,6 @@ union thread_union cpu0_irqstack __attribute__((__section__(".data..init_irqstack"))) = { INIT_THREAD_INFO(init_task) }; -unsigned long thread_saved_pc(struct task_struct *task) -{ - /* FIXME: Need to look up userspace_pid by cpu */ - return os_process_pc(userspace_pid[0]); -} - /* Changed in setup_arch, which is called in early boot */ static char host_info[(__NEW_UTS_LEN + 1) * 5]; diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index a6d91d4e37a1..110ce8238466 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -431,11 +431,11 @@ static __initconst const u64 skl_hw_cache_event_ids [ C(DTLB) ] = { [ C(OP_READ) ] = { [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_INST_RETIRED.ALL_LOADS */ - [ C(RESULT_MISS) ] = 0x608, /* DTLB_LOAD_MISSES.WALK_COMPLETED */ + [ C(RESULT_MISS) ] = 0xe08, /* DTLB_LOAD_MISSES.WALK_COMPLETED */ }, [ C(OP_WRITE) ] = { [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_INST_RETIRED.ALL_STORES */ - [ C(RESULT_MISS) ] = 0x649, /* DTLB_STORE_MISSES.WALK_COMPLETED */ + [ C(RESULT_MISS) ] = 0xe49, /* DTLB_STORE_MISSES.WALK_COMPLETED */ }, [ C(OP_PREFETCH) ] = { [ C(RESULT_ACCESS) ] = 0x0, diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h index 055962615779..722d0e568863 100644 --- a/arch/x86/include/asm/kvm_emulate.h +++ b/arch/x86/include/asm/kvm_emulate.h @@ -296,6 +296,7 @@ struct x86_emulate_ctxt { bool perm_ok; /* do not check permissions if true */ bool ud; /* inject an #UD if host doesn't support insn */ + bool tf; /* TF value before instruction (after for syscall/sysret) */ bool have_exception; struct x86_exception exception; diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h index fba100713924..d5acc27ed1cc 100644 --- a/arch/x86/include/asm/mshyperv.h +++ b/arch/x86/include/asm/mshyperv.h @@ -2,8 +2,7 @@ #define _ASM_X86_MSHYPER_H #include <linux/types.h> -#include <linux/interrupt.h> -#include <linux/clocksource.h> +#include <linux/atomic.h> #include <asm/hyperv.h> /* diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 3cada998a402..a28b671f1549 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -860,8 +860,6 @@ extern unsigned long KSTK_ESP(struct task_struct *task); #endif /* CONFIG_X86_64 */ -extern unsigned long thread_saved_pc(struct task_struct *tsk); - extern void start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp); diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 0bb88428cbf2..3ca198080ea9 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -545,17 +545,6 @@ unsigned long arch_randomize_brk(struct mm_struct *mm) } /* - * Return saved PC of a blocked thread. - * What is this good for? it will be always the scheduler or ret_from_fork. - */ -unsigned long thread_saved_pc(struct task_struct *tsk) -{ - struct inactive_task_frame *frame = - (struct inactive_task_frame *) READ_ONCE(tsk->thread.sp); - return READ_ONCE_NOCHECK(frame->ret_addr); -} - -/* * Called from fs/proc with a reference on @p to find the function * which called into schedule(). This needs to be done carefully * because the task might wake up and we might look at a stack diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 0816ab2e8adc..80890dee66ce 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -2742,6 +2742,7 @@ static int em_syscall(struct x86_emulate_ctxt *ctxt) ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF); } + ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0; return X86EMUL_CONTINUE; } diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 87d3cb901935..0e846f0cb83b 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -5313,6 +5313,8 @@ static void init_emulate_ctxt(struct kvm_vcpu *vcpu) kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); ctxt->eflags = kvm_get_rflags(vcpu); + ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0; + ctxt->eip = kvm_rip_read(vcpu); ctxt->mode = (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL : (ctxt->eflags & X86_EFLAGS_VM) ? X86EMUL_MODE_VM86 : @@ -5528,36 +5530,25 @@ static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7, return dr6; } -static void kvm_vcpu_check_singlestep(struct kvm_vcpu *vcpu, unsigned long rflags, int *r) +static void kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu, int *r) { struct kvm_run *kvm_run = vcpu->run; - /* - * rflags is the old, "raw" value of the flags. The new value has - * not been saved yet. - * - * This is correct even for TF set by the guest, because "the - * processor will not generate this exception after the instruction - * that sets the TF flag". - */ - if (unlikely(rflags & X86_EFLAGS_TF)) { - if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { - kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1 | - DR6_RTM; - kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip; - kvm_run->debug.arch.exception = DB_VECTOR; - kvm_run->exit_reason = KVM_EXIT_DEBUG; - *r = EMULATE_USER_EXIT; - } else { - /* - * "Certain debug exceptions may clear bit 0-3. The - * remaining contents of the DR6 register are never - * cleared by the processor". - */ - vcpu->arch.dr6 &= ~15; - vcpu->arch.dr6 |= DR6_BS | DR6_RTM; - kvm_queue_exception(vcpu, DB_VECTOR); - } + if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { + kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1 | DR6_RTM; + kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip; + kvm_run->debug.arch.exception = DB_VECTOR; + kvm_run->exit_reason = KVM_EXIT_DEBUG; + *r = EMULATE_USER_EXIT; + } else { + /* + * "Certain debug exceptions may clear bit 0-3. The + * remaining contents of the DR6 register are never + * cleared by the processor". + */ + vcpu->arch.dr6 &= ~15; + vcpu->arch.dr6 |= DR6_BS | DR6_RTM; + kvm_queue_exception(vcpu, DB_VECTOR); } } @@ -5567,7 +5558,17 @@ int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu) int r = EMULATE_DONE; kvm_x86_ops->skip_emulated_instruction(vcpu); - kvm_vcpu_check_singlestep(vcpu, rflags, &r); + + /* + * rflags is the old, "raw" value of the flags. The new value has + * not been saved yet. + * + * This is correct even for TF set by the guest, because "the + * processor will not generate this exception after the instruction + * that sets the TF flag". + */ + if (unlikely(rflags & X86_EFLAGS_TF)) + kvm_vcpu_do_singlestep(vcpu, &r); return r == EMULATE_DONE; } EXPORT_SYMBOL_GPL(kvm_skip_emulated_instruction); @@ -5726,8 +5727,9 @@ restart: toggle_interruptibility(vcpu, ctxt->interruptibility); vcpu->arch.emulate_regs_need_sync_to_vcpu = false; kvm_rip_write(vcpu, ctxt->eip); - if (r == EMULATE_DONE) - kvm_vcpu_check_singlestep(vcpu, rflags, &r); + if (r == EMULATE_DONE && + (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP))) + kvm_vcpu_do_singlestep(vcpu, &r); if (!ctxt->have_exception || exception_type(ctxt->exception.vector) == EXCPT_TRAP) __kvm_set_rflags(vcpu, ctxt->eflags); diff --git a/arch/xtensa/include/asm/processor.h b/arch/xtensa/include/asm/processor.h index 003eeee3fbc6..30ee8c608853 100644 --- a/arch/xtensa/include/asm/processor.h +++ b/arch/xtensa/include/asm/processor.h @@ -213,8 +213,6 @@ struct mm_struct; #define release_segments(mm) do { } while(0) #define forget_segments() do { } while (0) -#define thread_saved_pc(tsk) (task_pt_regs(tsk)->pc) - extern unsigned long get_wchan(struct task_struct *p); #define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc) diff --git a/block/bio.c b/block/bio.c index 888e7801c638..26b0810fb8ea 100644 --- a/block/bio.c +++ b/block/bio.c @@ -240,20 +240,21 @@ fallback: return bvl; } -static void __bio_free(struct bio *bio) +void bio_uninit(struct bio *bio) { bio_disassociate_task(bio); if (bio_integrity(bio)) bio_integrity_free(bio); } +EXPORT_SYMBOL(bio_uninit); static void bio_free(struct bio *bio) { struct bio_set *bs = bio->bi_pool; void *p; - __bio_free(bio); + bio_uninit(bio); if (bs) { bvec_free(bs->bvec_pool, bio->bi_io_vec, BVEC_POOL_IDX(bio)); @@ -271,6 +272,11 @@ static void bio_free(struct bio *bio) } } +/* + * Users of this function have their own bio allocation. Subsequently, + * they must remember to pair any call to bio_init() with bio_uninit() + * when IO has completed, or when the bio is released. + */ void bio_init(struct bio *bio, struct bio_vec *table, unsigned short max_vecs) { @@ -297,7 +303,7 @@ void bio_reset(struct bio *bio) { unsigned long flags = bio->bi_flags & (~0UL << BIO_RESET_BITS); - __bio_free(bio); + bio_uninit(bio); memset(bio, 0, BIO_RESET_BYTES); bio->bi_flags = flags; diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index 1f5b692526ae..0ded5e846335 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -68,6 +68,45 @@ static void blk_mq_sched_assign_ioc(struct request_queue *q, __blk_mq_sched_assign_ioc(q, rq, bio, ioc); } +/* + * Mark a hardware queue as needing a restart. For shared queues, maintain + * a count of how many hardware queues are marked for restart. + */ +static void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx) +{ + if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) + return; + + if (hctx->flags & BLK_MQ_F_TAG_SHARED) { + struct request_queue *q = hctx->queue; + + if (!test_and_set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) + atomic_inc(&q->shared_hctx_restart); + } else + set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); +} + +static bool blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx) +{ + if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) + return false; + + if (hctx->flags & BLK_MQ_F_TAG_SHARED) { + struct request_queue *q = hctx->queue; + + if (test_and_clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) + atomic_dec(&q->shared_hctx_restart); + } else + clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); + + if (blk_mq_hctx_has_pending(hctx)) { + blk_mq_run_hw_queue(hctx, true); + return true; + } + + return false; +} + struct request *blk_mq_sched_get_request(struct request_queue *q, struct bio *bio, unsigned int op, @@ -266,18 +305,6 @@ static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx, return true; } -static bool blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx) -{ - if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) { - clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); - if (blk_mq_hctx_has_pending(hctx)) { - blk_mq_run_hw_queue(hctx, true); - return true; - } - } - return false; -} - /** * list_for_each_entry_rcu_rr - iterate in a round-robin fashion over rcu list * @pos: loop cursor. @@ -309,6 +336,13 @@ void blk_mq_sched_restart(struct blk_mq_hw_ctx *const hctx) unsigned int i, j; if (set->flags & BLK_MQ_F_TAG_SHARED) { + /* + * If this is 0, then we know that no hardware queues + * have RESTART marked. We're done. + */ + if (!atomic_read(&queue->shared_hctx_restart)) + return; + rcu_read_lock(); list_for_each_entry_rcu_rr(q, queue, &set->tag_list, tag_set_list) { diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h index edafb5383b7b..5007edece51a 100644 --- a/block/blk-mq-sched.h +++ b/block/blk-mq-sched.h @@ -115,15 +115,6 @@ static inline bool blk_mq_sched_has_work(struct blk_mq_hw_ctx *hctx) return false; } -/* - * Mark a hardware queue as needing a restart. - */ -static inline void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx) -{ - if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) - set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); -} - static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx) { return test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); diff --git a/block/blk-mq.c b/block/blk-mq.c index bb66c96850b1..958cedaff8b8 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2103,20 +2103,30 @@ static void blk_mq_map_swqueue(struct request_queue *q, } } +/* + * Caller needs to ensure that we're either frozen/quiesced, or that + * the queue isn't live yet. + */ static void queue_set_hctx_shared(struct request_queue *q, bool shared) { struct blk_mq_hw_ctx *hctx; int i; queue_for_each_hw_ctx(q, hctx, i) { - if (shared) + if (shared) { + if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) + atomic_inc(&q->shared_hctx_restart); hctx->flags |= BLK_MQ_F_TAG_SHARED; - else + } else { + if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) + atomic_dec(&q->shared_hctx_restart); hctx->flags &= ~BLK_MQ_F_TAG_SHARED; + } } } -static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set, bool shared) +static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set, + bool shared) { struct request_queue *q; diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 3a10d7573477..d53162997f32 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -1428,6 +1428,37 @@ static void acpi_init_coherency(struct acpi_device *adev) adev->flags.coherent_dma = cca; } +static int acpi_check_spi_i2c_slave(struct acpi_resource *ares, void *data) +{ + bool *is_spi_i2c_slave_p = data; + + if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS) + return 1; + + /* + * devices that are connected to UART still need to be enumerated to + * platform bus + */ + if (ares->data.common_serial_bus.type != ACPI_RESOURCE_SERIAL_TYPE_UART) + *is_spi_i2c_slave_p = true; + + /* no need to do more checking */ + return -1; +} + +static bool acpi_is_spi_i2c_slave(struct acpi_device *device) +{ + struct list_head resource_list; + bool is_spi_i2c_slave = false; + + INIT_LIST_HEAD(&resource_list); + acpi_dev_get_resources(device, &resource_list, acpi_check_spi_i2c_slave, + &is_spi_i2c_slave); + acpi_dev_free_resource_list(&resource_list); + + return is_spi_i2c_slave; +} + void acpi_init_device_object(struct acpi_device *device, acpi_handle handle, int type, unsigned long long sta) { @@ -1443,6 +1474,7 @@ void acpi_init_device_object(struct acpi_device *device, acpi_handle handle, acpi_bus_get_flags(device); device->flags.match_driver = false; device->flags.initialized = true; + device->flags.spi_i2c_slave = acpi_is_spi_i2c_slave(device); acpi_device_clear_enumerated(device); device_initialize(&device->dev); dev_set_uevent_suppress(&device->dev, true); @@ -1727,38 +1759,13 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl_not_used, return AE_OK; } -static int acpi_check_spi_i2c_slave(struct acpi_resource *ares, void *data) -{ - bool *is_spi_i2c_slave_p = data; - - if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS) - return 1; - - /* - * devices that are connected to UART still need to be enumerated to - * platform bus - */ - if (ares->data.common_serial_bus.type != ACPI_RESOURCE_SERIAL_TYPE_UART) - *is_spi_i2c_slave_p = true; - - /* no need to do more checking */ - return -1; -} - static void acpi_default_enumeration(struct acpi_device *device) { - struct list_head resource_list; - bool is_spi_i2c_slave = false; - /* * Do not enumerate SPI/I2C slaves as they will be enumerated by their * respective parents. */ - INIT_LIST_HEAD(&resource_list); - acpi_dev_get_resources(device, &resource_list, acpi_check_spi_i2c_slave, - &is_spi_i2c_slave); - acpi_dev_free_resource_list(&resource_list); - if (!is_spi_i2c_slave) { + if (!device->flags.spi_i2c_slave) { acpi_create_platform_device(device, NULL); acpi_device_set_enumerated(device); } else { @@ -1854,7 +1861,7 @@ static void acpi_bus_attach(struct acpi_device *device) return; device->flags.match_driver = true; - if (ret > 0) { + if (ret > 0 && !device->flags.spi_i2c_slave) { acpi_device_set_enumerated(device); goto ok; } @@ -1863,10 +1870,10 @@ static void acpi_bus_attach(struct acpi_device *device) if (ret < 0) return; - if (device->pnp.type.platform_id) - acpi_default_enumeration(device); - else + if (!device->pnp.type.platform_id && !device->flags.spi_i2c_slave) acpi_device_set_enumerated(device); + else + acpi_default_enumeration(device); ok: list_for_each_entry(child, &device->children, node) diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index 726c32e35db9..0e824091a12f 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c @@ -609,8 +609,6 @@ int xen_blkif_schedule(void *arg) unsigned long timeout; int ret; - xen_blkif_get(blkif); - set_freezable(); while (!kthread_should_stop()) { if (try_to_freeze()) @@ -665,7 +663,6 @@ purge_gnt_list: print_stats(ring); ring->xenblkd = NULL; - xen_blkif_put(blkif); return 0; } @@ -1436,34 +1433,35 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring, static void make_response(struct xen_blkif_ring *ring, u64 id, unsigned short op, int st) { - struct blkif_response resp; + struct blkif_response *resp; unsigned long flags; union blkif_back_rings *blk_rings; int notify; - resp.id = id; - resp.operation = op; - resp.status = st; - spin_lock_irqsave(&ring->blk_ring_lock, flags); blk_rings = &ring->blk_rings; /* Place on the response ring for the relevant domain. */ switch (ring->blkif->blk_protocol) { case BLKIF_PROTOCOL_NATIVE: - memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt), - &resp, sizeof(resp)); + resp = RING_GET_RESPONSE(&blk_rings->native, + blk_rings->native.rsp_prod_pvt); break; case BLKIF_PROTOCOL_X86_32: - memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt), - &resp, sizeof(resp)); + resp = RING_GET_RESPONSE(&blk_rings->x86_32, + blk_rings->x86_32.rsp_prod_pvt); break; case BLKIF_PROTOCOL_X86_64: - memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt), - &resp, sizeof(resp)); + resp = RING_GET_RESPONSE(&blk_rings->x86_64, + blk_rings->x86_64.rsp_prod_pvt); break; default: BUG(); } + + resp->id = id; + resp->operation = op; + resp->status = st; + blk_rings->common.rsp_prod_pvt++; RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify); spin_unlock_irqrestore(&ring->blk_ring_lock, flags); diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h index dea61f6ab8cb..ecb35fe8ca8d 100644 --- a/drivers/block/xen-blkback/common.h +++ b/drivers/block/xen-blkback/common.h @@ -75,9 +75,8 @@ extern unsigned int xenblk_max_queues; struct blkif_common_request { char dummy; }; -struct blkif_common_response { - char dummy; -}; + +/* i386 protocol version */ struct blkif_x86_32_request_rw { uint8_t nr_segments; /* number of segments */ @@ -129,14 +128,6 @@ struct blkif_x86_32_request { } u; } __attribute__((__packed__)); -/* i386 protocol version */ -#pragma pack(push, 4) -struct blkif_x86_32_response { - uint64_t id; /* copied from request */ - uint8_t operation; /* copied from request */ - int16_t status; /* BLKIF_RSP_??? */ -}; -#pragma pack(pop) /* x86_64 protocol version */ struct blkif_x86_64_request_rw { @@ -193,18 +184,12 @@ struct blkif_x86_64_request { } u; } __attribute__((__packed__)); -struct blkif_x86_64_response { - uint64_t __attribute__((__aligned__(8))) id; - uint8_t operation; /* copied from request */ - int16_t status; /* BLKIF_RSP_??? */ -}; - DEFINE_RING_TYPES(blkif_common, struct blkif_common_request, - struct blkif_common_response); + struct blkif_response); DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request, - struct blkif_x86_32_response); + struct blkif_response __packed); DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request, - struct blkif_x86_64_response); + struct blkif_response); union blkif_back_rings { struct blkif_back_ring native; @@ -281,6 +266,7 @@ struct xen_blkif_ring { wait_queue_head_t wq; atomic_t inflight; + bool active; /* One thread per blkif ring. */ struct task_struct *xenblkd; unsigned int waiting_reqs; diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c index 1f3dfaa54d87..792da683e70d 100644 --- a/drivers/block/xen-blkback/xenbus.c +++ b/drivers/block/xen-blkback/xenbus.c @@ -159,7 +159,7 @@ static int xen_blkif_alloc_rings(struct xen_blkif *blkif) init_waitqueue_head(&ring->shutdown_wq); ring->blkif = blkif; ring->st_print = jiffies; - xen_blkif_get(blkif); + ring->active = true; } return 0; @@ -249,10 +249,12 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif) struct xen_blkif_ring *ring = &blkif->rings[r]; unsigned int i = 0; + if (!ring->active) + continue; + if (ring->xenblkd) { kthread_stop(ring->xenblkd); wake_up(&ring->shutdown_wq); - ring->xenblkd = NULL; } /* The above kthread_stop() guarantees that at this point we @@ -296,7 +298,7 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif) BUG_ON(ring->free_pages_num != 0); BUG_ON(ring->persistent_gnt_c != 0); WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages)); - xen_blkif_put(blkif); + ring->active = false; } blkif->nr_ring_pages = 0; /* @@ -312,9 +314,10 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif) static void xen_blkif_free(struct xen_blkif *blkif) { - - xen_blkif_disconnect(blkif); + WARN_ON(xen_blkif_disconnect(blkif)); xen_vbd_free(&blkif->vbd); + kfree(blkif->be->mode); + kfree(blkif->be); /* Make sure everything is drained before shutting down */ kmem_cache_free(xen_blkif_cachep, blkif); @@ -511,8 +514,6 @@ static int xen_blkbk_remove(struct xenbus_device *dev) xen_blkif_put(be->blkif); } - kfree(be->mode); - kfree(be); return 0; } diff --git a/drivers/char/random.c b/drivers/char/random.c index e870f329db88..01a260f67437 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -803,13 +803,13 @@ static int crng_fast_load(const char *cp, size_t len) p[crng_init_cnt % CHACHA20_KEY_SIZE] ^= *cp; cp++; crng_init_cnt++; len--; } + spin_unlock_irqrestore(&primary_crng.lock, flags); if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) { invalidate_batched_entropy(); crng_init = 1; wake_up_interruptible(&crng_init_wait); pr_notice("random: fast init done\n"); } - spin_unlock_irqrestore(&primary_crng.lock, flags); return 1; } @@ -841,6 +841,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r) } memzero_explicit(&buf, sizeof(buf)); crng->init_time = jiffies; + spin_unlock_irqrestore(&primary_crng.lock, flags); if (crng == &primary_crng && crng_init < 2) { invalidate_batched_entropy(); crng_init = 2; @@ -848,7 +849,6 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r) wake_up_interruptible(&crng_init_wait); pr_notice("random: crng init done\n"); } - spin_unlock_irqrestore(&primary_crng.lock, flags); } static inline void crng_wait_ready(void) @@ -2041,8 +2041,8 @@ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64); u64 get_random_u64(void) { u64 ret; - bool use_lock = crng_init < 2; - unsigned long flags; + bool use_lock = READ_ONCE(crng_init) < 2; + unsigned long flags = 0; struct batched_entropy *batch; #if BITS_PER_LONG == 64 @@ -2073,8 +2073,8 @@ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32); u32 get_random_u32(void) { u32 ret; - bool use_lock = crng_init < 2; - unsigned long flags; + bool use_lock = READ_ONCE(crng_init) < 2; + unsigned long flags = 0; struct batched_entropy *batch; if (arch_get_random_int(&ret)) diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index 4bed671e490e..8b5c30062d99 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c @@ -1209,9 +1209,9 @@ arch_timer_mem_frame_get_cntfrq(struct arch_timer_mem_frame *frame) return 0; } - rate = readl_relaxed(frame + CNTFRQ); + rate = readl_relaxed(base + CNTFRQ); - iounmap(frame); + iounmap(base); return rate; } diff --git a/drivers/clocksource/cadence_ttc_timer.c b/drivers/clocksource/cadence_ttc_timer.c index 44e5e951583b..8e64b8460f11 100644 --- a/drivers/clocksource/cadence_ttc_timer.c +++ b/drivers/clocksource/cadence_ttc_timer.c @@ -18,6 +18,7 @@ #include <linux/clk.h> #include <linux/interrupt.h> #include <linux/clockchips.h> +#include <linux/clocksource.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/slab.h> diff --git a/drivers/clocksource/timer-sun5i.c b/drivers/clocksource/timer-sun5i.c index 2e9c830ae1cd..c4656c4d44a6 100644 --- a/drivers/clocksource/timer-sun5i.c +++ b/drivers/clocksource/timer-sun5i.c @@ -12,6 +12,7 @@ #include <linux/clk.h> #include <linux/clockchips.h> +#include <linux/clocksource.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/irq.h> diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c index 5104b6398139..c83ea68be792 100644 --- a/drivers/gpio/gpio-mvebu.c +++ b/drivers/gpio/gpio-mvebu.c @@ -721,7 +721,7 @@ static int mvebu_pwm_probe(struct platform_device *pdev, u32 set; if (!of_device_is_compatible(mvchip->chip.of_node, - "marvell,armada-370-xp-gpio")) + "marvell,armada-370-gpio")) return 0; if (IS_ERR(mvchip->clk)) @@ -852,7 +852,7 @@ static const struct of_device_id mvebu_gpio_of_match[] = { .data = (void *) MVEBU_GPIO_SOC_VARIANT_ARMADAXP, }, { - .compatible = "marvell,armada-370-xp-gpio", + .compatible = "marvell,armada-370-gpio", .data = (void *) MVEBU_GPIO_SOC_VARIANT_ORION, }, { @@ -1128,7 +1128,7 @@ static int mvebu_gpio_probe(struct platform_device *pdev) mvchip); } - /* Armada 370/XP has simple PWM support for GPIO lines */ + /* Some MVEBU SoCs have simple PWM support for GPIO lines */ if (IS_ENABLED(CONFIG_PWM)) return mvebu_pwm_probe(pdev, mvchip, id); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c index 1cf78f4dd339..1e8e1123ddf4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c @@ -693,6 +693,10 @@ int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev) DRM_INFO("Changing default dispclk from %dMhz to 600Mhz\n", adev->clock.default_dispclk / 100); adev->clock.default_dispclk = 60000; + } else if (adev->clock.default_dispclk <= 60000) { + DRM_INFO("Changing default dispclk from %dMhz to 625Mhz\n", + adev->clock.default_dispclk / 100); + adev->clock.default_dispclk = 62500; } adev->clock.dp_extclk = le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index f2d705e6a75a..ab6b0d0febab 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -449,6 +449,7 @@ static const struct pci_device_id pciidlist[] = { {0x1002, 0x6986, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, {0x1002, 0x6987, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, {0x1002, 0x6995, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, + {0x1002, 0x6997, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, {0x1002, 0x699F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, /* Vega 10 */ {0x1002, 0x6860, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT}, diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c index 8c9bc75a9c2d..8a0818b23ea4 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c +++ b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c @@ -165,7 +165,7 @@ void amdgpu_atombios_crtc_powergate(struct drm_crtc *crtc, int state) struct drm_device *dev = crtc->dev; struct amdgpu_device *adev = dev->dev_private; int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating); - ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 args; + ENABLE_DISP_POWER_GATING_PS_ALLOCATION args; memset(&args, 0, sizeof(args)); @@ -178,7 +178,7 @@ void amdgpu_atombios_crtc_powergate(struct drm_crtc *crtc, int state) void amdgpu_atombios_crtc_powergate_init(struct amdgpu_device *adev) { int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating); - ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 args; + ENABLE_DISP_POWER_GATING_PS_ALLOCATION args; memset(&args, 0, sizeof(args)); diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c index 9f847615ac74..48ca2457df8c 100644 --- a/drivers/gpu/drm/drm_connector.c +++ b/drivers/gpu/drm/drm_connector.c @@ -1229,21 +1229,6 @@ int drm_mode_getconnector(struct drm_device *dev, void *data, if (!connector) return -ENOENT; - drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); - encoder = drm_connector_get_encoder(connector); - if (encoder) - out_resp->encoder_id = encoder->base.id; - else - out_resp->encoder_id = 0; - - ret = drm_mode_object_get_properties(&connector->base, file_priv->atomic, - (uint32_t __user *)(unsigned long)(out_resp->props_ptr), - (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr), - &out_resp->count_props); - drm_modeset_unlock(&dev->mode_config.connection_mutex); - if (ret) - goto out_unref; - for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) if (connector->encoder_ids[i] != 0) encoders_count++; @@ -1256,7 +1241,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data, if (put_user(connector->encoder_ids[i], encoder_ptr + copied)) { ret = -EFAULT; - goto out_unref; + goto out; } copied++; } @@ -1300,15 +1285,32 @@ int drm_mode_getconnector(struct drm_device *dev, void *data, if (copy_to_user(mode_ptr + copied, &u_mode, sizeof(u_mode))) { ret = -EFAULT; + mutex_unlock(&dev->mode_config.mutex); + goto out; } copied++; } } out_resp->count_modes = mode_count; -out: mutex_unlock(&dev->mode_config.mutex); -out_unref: + + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); + encoder = drm_connector_get_encoder(connector); + if (encoder) + out_resp->encoder_id = encoder->base.id; + else + out_resp->encoder_id = 0; + + /* Only grab properties after probing, to make sure EDID and other + * properties reflect the latest status. */ + ret = drm_mode_object_get_properties(&connector->base, file_priv->atomic, + (uint32_t __user *)(unsigned long)(out_resp->props_ptr), + (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr), + &out_resp->count_props); + drm_modeset_unlock(&dev->mode_config.connection_mutex); + +out: drm_connector_put(connector); return ret; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h b/drivers/gpu/drm/etnaviv/etnaviv_gem.h index c4a091e87426..e437fba1209d 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h @@ -106,9 +106,10 @@ struct etnaviv_gem_submit { struct etnaviv_gpu *gpu; struct ww_acquire_ctx ticket; struct dma_fence *fence; + u32 flags; unsigned int nr_bos; struct etnaviv_gem_submit_bo bos[0]; - u32 flags; + /* No new members here, the previous one is variable-length! */ }; int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj, diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c index de80ee1b71df..1013765274da 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c @@ -172,7 +172,7 @@ static int submit_fence_sync(const struct etnaviv_gem_submit *submit) for (i = 0; i < submit->nr_bos; i++) { struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj; bool write = submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE; - bool explicit = !(submit->flags & ETNA_SUBMIT_NO_IMPLICIT); + bool explicit = !!(submit->flags & ETNA_SUBMIT_NO_IMPLICIT); ret = etnaviv_gpu_fence_sync_obj(etnaviv_obj, context, write, explicit); diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index d689e511744e..4bd1467c17b1 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -292,6 +292,8 @@ static int per_file_stats(int id, void *ptr, void *data) struct file_stats *stats = data; struct i915_vma *vma; + lockdep_assert_held(&obj->base.dev->struct_mutex); + stats->count++; stats->total += obj->base.size; if (!obj->bind_count) @@ -476,6 +478,8 @@ static int i915_gem_object_info(struct seq_file *m, void *data) struct drm_i915_gem_request *request; struct task_struct *task; + mutex_lock(&dev->struct_mutex); + memset(&stats, 0, sizeof(stats)); stats.file_priv = file->driver_priv; spin_lock(&file->table_lock); @@ -487,7 +491,6 @@ static int i915_gem_object_info(struct seq_file *m, void *data) * still alive (e.g. get_pid(current) => fork() => exit()). * Therefore, we need to protect this ->comm access using RCU. */ - mutex_lock(&dev->struct_mutex); request = list_first_entry_or_null(&file_priv->mm.request_list, struct drm_i915_gem_request, client_link); @@ -497,6 +500,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data) PIDTYPE_PID); print_file_stats(m, task ? task->comm : "<unknown>", stats); rcu_read_unlock(); + mutex_unlock(&dev->struct_mutex); } mutex_unlock(&dev->filelist_mutex); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 462031cbd77f..615f0a855222 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2285,8 +2285,8 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) struct page *page; unsigned long last_pfn = 0; /* suppress gcc warning */ unsigned int max_segment; + gfp_t noreclaim; int ret; - gfp_t gfp; /* Assert that the object is not currently in any GPU domain. As it * wasn't in the GTT, there shouldn't be any way it could have been in @@ -2315,22 +2315,31 @@ rebuild_st: * Fail silently without starting the shrinker */ mapping = obj->base.filp->f_mapping; - gfp = mapping_gfp_constraint(mapping, ~(__GFP_IO | __GFP_RECLAIM)); - gfp |= __GFP_NORETRY | __GFP_NOWARN; + noreclaim = mapping_gfp_constraint(mapping, + ~(__GFP_IO | __GFP_RECLAIM)); + noreclaim |= __GFP_NORETRY | __GFP_NOWARN; + sg = st->sgl; st->nents = 0; for (i = 0; i < page_count; i++) { - page = shmem_read_mapping_page_gfp(mapping, i, gfp); - if (unlikely(IS_ERR(page))) { - i915_gem_shrink(dev_priv, - page_count, - I915_SHRINK_BOUND | - I915_SHRINK_UNBOUND | - I915_SHRINK_PURGEABLE); + const unsigned int shrink[] = { + I915_SHRINK_BOUND | I915_SHRINK_UNBOUND | I915_SHRINK_PURGEABLE, + 0, + }, *s = shrink; + gfp_t gfp = noreclaim; + + do { page = shmem_read_mapping_page_gfp(mapping, i, gfp); - } - if (unlikely(IS_ERR(page))) { - gfp_t reclaim; + if (likely(!IS_ERR(page))) + break; + + if (!*s) { + ret = PTR_ERR(page); + goto err_sg; + } + + i915_gem_shrink(dev_priv, 2 * page_count, *s++); + cond_resched(); /* We've tried hard to allocate the memory by reaping * our own buffer, now let the real VM do its job and @@ -2340,15 +2349,26 @@ rebuild_st: * defer the oom here by reporting the ENOMEM back * to userspace. */ - reclaim = mapping_gfp_mask(mapping); - reclaim |= __GFP_NORETRY; /* reclaim, but no oom */ - - page = shmem_read_mapping_page_gfp(mapping, i, reclaim); - if (IS_ERR(page)) { - ret = PTR_ERR(page); - goto err_sg; + if (!*s) { + /* reclaim and warn, but no oom */ + gfp = mapping_gfp_mask(mapping); + + /* Our bo are always dirty and so we require + * kswapd to reclaim our pages (direct reclaim + * does not effectively begin pageout of our + * buffers on its own). However, direct reclaim + * only waits for kswapd when under allocation + * congestion. So as a result __GFP_RECLAIM is + * unreliable and fails to actually reclaim our + * dirty pages -- unless you try over and over + * again with !__GFP_NORETRY. However, we still + * want to fail this allocation rather than + * trigger the out-of-memory killer and for + * this we want the future __GFP_MAYFAIL. + */ } - } + } while (1); + if (!i || sg->length >= max_segment || page_to_pfn(page) != last_pfn + 1) { @@ -4222,6 +4242,7 @@ i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size) mapping = obj->base.filp->f_mapping; mapping_set_gfp_mask(mapping, mask); + GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM)); i915_gem_object_init(obj, &i915_gem_object_ops); diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index a3e59c8ef27b..9ad13eeed904 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -546,11 +546,12 @@ repeat: } static int -i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, +i915_gem_execbuffer_relocate_entry(struct i915_vma *vma, struct eb_vmas *eb, struct drm_i915_gem_relocation_entry *reloc, struct reloc_cache *cache) { + struct drm_i915_gem_object *obj = vma->obj; struct drm_i915_private *dev_priv = to_i915(obj->base.dev); struct drm_gem_object *target_obj; struct drm_i915_gem_object *target_i915_obj; @@ -628,6 +629,16 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, return -EINVAL; } + /* + * If we write into the object, we need to force the synchronisation + * barrier, either with an asynchronous clflush or if we executed the + * patching using the GPU (though that should be serialised by the + * timeline). To be completely sure, and since we are required to + * do relocations we are already stalling, disable the user's opt + * of our synchronisation. + */ + vma->exec_entry->flags &= ~EXEC_OBJECT_ASYNC; + ret = relocate_entry(obj, reloc, cache, target_offset); if (ret) return ret; @@ -678,7 +689,7 @@ i915_gem_execbuffer_relocate_vma(struct i915_vma *vma, do { u64 offset = r->presumed_offset; - ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r, &cache); + ret = i915_gem_execbuffer_relocate_entry(vma, eb, r, &cache); if (ret) goto out; @@ -726,7 +737,7 @@ i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma, reloc_cache_init(&cache, eb->i915); for (i = 0; i < entry->relocation_count; i++) { - ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i], &cache); + ret = i915_gem_execbuffer_relocate_entry(vma, eb, &relocs[i], &cache); if (ret) break; } diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c index 5ddbc9499775..a74d0ac737cb 100644 --- a/drivers/gpu/drm/i915/i915_gem_request.c +++ b/drivers/gpu/drm/i915/i915_gem_request.c @@ -623,7 +623,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine, * GPU processing the request, we never over-estimate the * position of the head. */ - req->head = req->ring->tail; + req->head = req->ring->emit; /* Check that we didn't interrupt ourselves with a new request */ GEM_BUG_ON(req->timeline->seqno != req->fence.seqno); diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c index 1642fff9cf13..ab5140ba108d 100644 --- a/drivers/gpu/drm/i915/i915_guc_submission.c +++ b/drivers/gpu/drm/i915/i915_guc_submission.c @@ -480,9 +480,7 @@ static void guc_wq_item_append(struct i915_guc_client *client, GEM_BUG_ON(freespace < wqi_size); /* The GuC firmware wants the tail index in QWords, not bytes */ - tail = rq->tail; - assert_ring_tail_valid(rq->ring, rq->tail); - tail >>= 3; + tail = intel_ring_set_tail(rq->ring, rq->tail) >> 3; GEM_BUG_ON(tail > WQ_RING_TAIL_MAX); /* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index 1aba47024656..f066e2d785f5 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -650,6 +650,11 @@ int i915_vma_unbind(struct i915_vma *vma) break; } + if (!ret) { + ret = i915_gem_active_retire(&vma->last_fence, + &vma->vm->i915->drm.struct_mutex); + } + __i915_vma_unpin(vma); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 96b0b01677e2..9106ea32b048 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -120,7 +120,8 @@ static void intel_crtc_init_scalers(struct intel_crtc *crtc, static void skylake_pfit_enable(struct intel_crtc *crtc); static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force); static void ironlake_pfit_enable(struct intel_crtc *crtc); -static void intel_modeset_setup_hw_state(struct drm_device *dev); +static void intel_modeset_setup_hw_state(struct drm_device *dev, + struct drm_modeset_acquire_ctx *ctx); static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc); struct intel_limit { @@ -3449,7 +3450,7 @@ __intel_display_resume(struct drm_device *dev, struct drm_crtc *crtc; int i, ret; - intel_modeset_setup_hw_state(dev); + intel_modeset_setup_hw_state(dev, ctx); i915_redisable_vga(to_i915(dev)); if (!state) @@ -5825,7 +5826,8 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state, intel_update_watermarks(intel_crtc); } -static void intel_crtc_disable_noatomic(struct drm_crtc *crtc) +static void intel_crtc_disable_noatomic(struct drm_crtc *crtc, + struct drm_modeset_acquire_ctx *ctx) { struct intel_encoder *encoder; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); @@ -5855,7 +5857,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc) return; } - state->acquire_ctx = crtc->dev->mode_config.acquire_ctx; + state->acquire_ctx = ctx; /* Everything's already locked, -EDEADLK can't happen. */ crtc_state = intel_atomic_get_crtc_state(state, intel_crtc); @@ -15030,7 +15032,7 @@ int intel_modeset_init(struct drm_device *dev) intel_setup_outputs(dev_priv); drm_modeset_lock_all(dev); - intel_modeset_setup_hw_state(dev); + intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx); drm_modeset_unlock_all(dev); for_each_intel_crtc(dev, crtc) { @@ -15067,13 +15069,13 @@ int intel_modeset_init(struct drm_device *dev) return 0; } -static void intel_enable_pipe_a(struct drm_device *dev) +static void intel_enable_pipe_a(struct drm_device *dev, + struct drm_modeset_acquire_ctx *ctx) { struct intel_connector *connector; struct drm_connector_list_iter conn_iter; struct drm_connector *crt = NULL; struct intel_load_detect_pipe load_detect_temp; - struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx; int ret; /* We can't just switch on the pipe A, we need to set things up with a @@ -15145,7 +15147,8 @@ static bool has_pch_trancoder(struct drm_i915_private *dev_priv, (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == TRANSCODER_A); } -static void intel_sanitize_crtc(struct intel_crtc *crtc) +static void intel_sanitize_crtc(struct intel_crtc *crtc, + struct drm_modeset_acquire_ctx *ctx) { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); @@ -15191,7 +15194,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc) plane = crtc->plane; crtc->base.primary->state->visible = true; crtc->plane = !plane; - intel_crtc_disable_noatomic(&crtc->base); + intel_crtc_disable_noatomic(&crtc->base, ctx); crtc->plane = plane; } @@ -15201,13 +15204,13 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc) * resume. Force-enable the pipe to fix this, the update_dpms * call below we restore the pipe to the right state, but leave * the required bits on. */ - intel_enable_pipe_a(dev); + intel_enable_pipe_a(dev, ctx); } /* Adjust the state of the output pipe according to whether we * have active connectors/encoders. */ if (crtc->active && !intel_crtc_has_encoders(crtc)) - intel_crtc_disable_noatomic(&crtc->base); + intel_crtc_disable_noatomic(&crtc->base, ctx); if (crtc->active || HAS_GMCH_DISPLAY(dev_priv)) { /* @@ -15505,7 +15508,8 @@ get_encoder_power_domains(struct drm_i915_private *dev_priv) * and sanitizes it to the current state */ static void -intel_modeset_setup_hw_state(struct drm_device *dev) +intel_modeset_setup_hw_state(struct drm_device *dev, + struct drm_modeset_acquire_ctx *ctx) { struct drm_i915_private *dev_priv = to_i915(dev); enum pipe pipe; @@ -15525,7 +15529,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev) for_each_pipe(dev_priv, pipe) { crtc = intel_get_crtc_for_pipe(dev_priv, pipe); - intel_sanitize_crtc(crtc); + intel_sanitize_crtc(crtc, ctx); intel_dump_pipe_config(crtc, crtc->config, "[setup_hw_state]"); } diff --git a/drivers/gpu/drm/i915/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/intel_dp_aux_backlight.c index 6532e226db29..40ba3134545e 100644 --- a/drivers/gpu/drm/i915/intel_dp_aux_backlight.c +++ b/drivers/gpu/drm/i915/intel_dp_aux_backlight.c @@ -119,8 +119,6 @@ static int intel_dp_aux_setup_backlight(struct intel_connector *connector, struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base); struct intel_panel *panel = &connector->panel; - intel_dp_aux_enable_backlight(connector); - if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT) panel->backlight.max = 0xFFFF; else diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index dac4e003c1f3..62f44d3e7c43 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -326,8 +326,7 @@ static u64 execlists_update_context(struct drm_i915_gem_request *rq) rq->ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt; u32 *reg_state = ce->lrc_reg_state; - assert_ring_tail_valid(rq->ring, rq->tail); - reg_state[CTX_RING_TAIL+1] = rq->tail; + reg_state[CTX_RING_TAIL+1] = intel_ring_set_tail(rq->ring, rq->tail); /* True 32b PPGTT with dynamic page allocation: update PDP * registers and point the unallocated PDPs to scratch page. @@ -2036,8 +2035,7 @@ void intel_lr_context_resume(struct drm_i915_private *dev_priv) ce->state->obj->mm.dirty = true; i915_gem_object_unpin_map(ce->state->obj); - ce->ring->head = ce->ring->tail = 0; - intel_ring_update_space(ce->ring); + intel_ring_reset(ce->ring, 0); } } } diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 66a2b8b83972..513a0f4b469b 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -49,7 +49,7 @@ static int __intel_ring_space(int head, int tail, int size) void intel_ring_update_space(struct intel_ring *ring) { - ring->space = __intel_ring_space(ring->head, ring->tail, ring->size); + ring->space = __intel_ring_space(ring->head, ring->emit, ring->size); } static int @@ -774,8 +774,8 @@ static void i9xx_submit_request(struct drm_i915_gem_request *request) i915_gem_request_submit(request); - assert_ring_tail_valid(request->ring, request->tail); - I915_WRITE_TAIL(request->engine, request->tail); + I915_WRITE_TAIL(request->engine, + intel_ring_set_tail(request->ring, request->tail)); } static void i9xx_emit_breadcrumb(struct drm_i915_gem_request *req, u32 *cs) @@ -1316,11 +1316,23 @@ err: return PTR_ERR(addr); } +void intel_ring_reset(struct intel_ring *ring, u32 tail) +{ + GEM_BUG_ON(!list_empty(&ring->request_list)); + ring->tail = tail; + ring->head = tail; + ring->emit = tail; + intel_ring_update_space(ring); +} + void intel_ring_unpin(struct intel_ring *ring) { GEM_BUG_ON(!ring->vma); GEM_BUG_ON(!ring->vaddr); + /* Discard any unused bytes beyond that submitted to hw. */ + intel_ring_reset(ring, ring->tail); + if (i915_vma_is_map_and_fenceable(ring->vma)) i915_vma_unpin_iomap(ring->vma); else @@ -1562,8 +1574,9 @@ void intel_legacy_submission_resume(struct drm_i915_private *dev_priv) struct intel_engine_cs *engine; enum intel_engine_id id; + /* Restart from the beginning of the rings for convenience */ for_each_engine(engine, dev_priv, id) - engine->buffer->head = engine->buffer->tail; + intel_ring_reset(engine->buffer, 0); } static int ring_request_alloc(struct drm_i915_gem_request *request) @@ -1616,7 +1629,7 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes) unsigned space; /* Would completion of this request free enough space? */ - space = __intel_ring_space(target->postfix, ring->tail, + space = __intel_ring_space(target->postfix, ring->emit, ring->size); if (space >= bytes) break; @@ -1641,8 +1654,8 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes) u32 *intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords) { struct intel_ring *ring = req->ring; - int remain_actual = ring->size - ring->tail; - int remain_usable = ring->effective_size - ring->tail; + int remain_actual = ring->size - ring->emit; + int remain_usable = ring->effective_size - ring->emit; int bytes = num_dwords * sizeof(u32); int total_bytes, wait_bytes; bool need_wrap = false; @@ -1678,17 +1691,17 @@ u32 *intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords) if (unlikely(need_wrap)) { GEM_BUG_ON(remain_actual > ring->space); - GEM_BUG_ON(ring->tail + remain_actual > ring->size); + GEM_BUG_ON(ring->emit + remain_actual > ring->size); /* Fill the tail with MI_NOOP */ - memset(ring->vaddr + ring->tail, 0, remain_actual); - ring->tail = 0; + memset(ring->vaddr + ring->emit, 0, remain_actual); + ring->emit = 0; ring->space -= remain_actual; } - GEM_BUG_ON(ring->tail > ring->size - bytes); - cs = ring->vaddr + ring->tail; - ring->tail += bytes; + GEM_BUG_ON(ring->emit > ring->size - bytes); + cs = ring->vaddr + ring->emit; + ring->emit += bytes; ring->space -= bytes; GEM_BUG_ON(ring->space < 0); @@ -1699,7 +1712,7 @@ u32 *intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords) int intel_ring_cacheline_align(struct drm_i915_gem_request *req) { int num_dwords = - (req->ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t); + (req->ring->emit & (CACHELINE_BYTES - 1)) / sizeof(uint32_t); u32 *cs; if (num_dwords == 0) diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index a82a0807f64d..f7144fe09613 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -145,6 +145,7 @@ struct intel_ring { u32 head; u32 tail; + u32 emit; int space; int size; @@ -488,6 +489,8 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value) struct intel_ring * intel_engine_create_ring(struct intel_engine_cs *engine, int size); int intel_ring_pin(struct intel_ring *ring, unsigned int offset_bias); +void intel_ring_reset(struct intel_ring *ring, u32 tail); +void intel_ring_update_space(struct intel_ring *ring); void intel_ring_unpin(struct intel_ring *ring); void intel_ring_free(struct intel_ring *ring); @@ -511,7 +514,7 @@ intel_ring_advance(struct drm_i915_gem_request *req, u32 *cs) * reserved for the command packet (i.e. the value passed to * intel_ring_begin()). */ - GEM_BUG_ON((req->ring->vaddr + req->ring->tail) != cs); + GEM_BUG_ON((req->ring->vaddr + req->ring->emit) != cs); } static inline u32 @@ -540,7 +543,19 @@ assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail) GEM_BUG_ON(tail >= ring->size); } -void intel_ring_update_space(struct intel_ring *ring); +static inline unsigned int +intel_ring_set_tail(struct intel_ring *ring, unsigned int tail) +{ + /* Whilst writes to the tail are strictly order, there is no + * serialisation between readers and the writers. The tail may be + * read by i915_gem_request_retire() just as it is being updated + * by execlists, as although the breadcrumb is complete, the context + * switch hasn't been seen. + */ + assert_ring_tail_valid(ring, tail); + ring->tail = tail; + return tail; +} void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno); diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index 432480ff9d22..3178ba0c537c 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c @@ -3393,6 +3393,13 @@ void radeon_combios_asic_init(struct drm_device *dev) rdev->pdev->subsystem_vendor == 0x103c && rdev->pdev->subsystem_device == 0x280a) return; + /* quirk for rs4xx Toshiba Sattellite L20-183 latop to make it resume + * - it hangs on resume inside the dynclk 1 table. + */ + if (rdev->family == CHIP_RS400 && + rdev->pdev->subsystem_vendor == 0x1179 && + rdev->pdev->subsystem_device == 0xff31) + return; /* DYN CLK 1 */ table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE); diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 6ecf42783d4b..0a6444d72000 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -136,6 +136,10 @@ static struct radeon_px_quirk radeon_px_quirk_list[] = { * https://bugzilla.kernel.org/show_bug.cgi?id=51381 */ { PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX }, + /* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU + * https://bugs.freedesktop.org/show_bug.cgi?id=101491 + */ + { PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX }, /* macbook pro 8.2 */ { PCI_VENDOR_ID_ATI, 0x6741, PCI_VENDOR_ID_APPLE, 0x00e2, RADEON_PX_QUIRK_LONG_WAKEUP }, { 0, 0, 0, 0, 0 }, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c index 13db8a2851ed..1f013d45c9e9 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c @@ -321,6 +321,7 @@ void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man) list_for_each_entry_safe(entry, next, &man->list, head) vmw_cmdbuf_res_free(man, entry); + drm_ht_remove(&man->resources); kfree(man); } diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c index 95ed17183e73..54a47b40546f 100644 --- a/drivers/i2c/busses/i2c-imx.c +++ b/drivers/i2c/busses/i2c-imx.c @@ -734,9 +734,9 @@ static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx, * the first read operation, otherwise the first read cost * one extra clock cycle. */ - temp = readb(i2c_imx->base + IMX_I2C_I2CR); + temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); temp |= I2CR_MTX; - writeb(temp, i2c_imx->base + IMX_I2C_I2CR); + imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); } msgs->buf[msgs->len-1] = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR); @@ -857,9 +857,9 @@ static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs, bo * the first read operation, otherwise the first read cost * one extra clock cycle. */ - temp = readb(i2c_imx->base + IMX_I2C_I2CR); + temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); temp |= I2CR_MTX; - writeb(temp, i2c_imx->base + IMX_I2C_I2CR); + imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); } } else if (i == (msgs->len - 2)) { dev_dbg(&i2c_imx->adapter.dev, diff --git a/drivers/input/misc/soc_button_array.c b/drivers/input/misc/soc_button_array.c index e37d37273182..f600f3a7a3c6 100644 --- a/drivers/input/misc/soc_button_array.c +++ b/drivers/input/misc/soc_button_array.c @@ -248,7 +248,8 @@ static struct soc_button_info *soc_button_get_button_info(struct device *dev) if (!btns_desc) { dev_err(dev, "ACPI Button Descriptors not found\n"); - return ERR_PTR(-ENODEV); + button_info = ERR_PTR(-ENODEV); + goto out; } /* The first package describes the collection */ @@ -264,24 +265,31 @@ static struct soc_button_info *soc_button_get_button_info(struct device *dev) } if (collection_uid == -1) { dev_err(dev, "Invalid Button Collection Descriptor\n"); - return ERR_PTR(-ENODEV); + button_info = ERR_PTR(-ENODEV); + goto out; } /* There are package.count - 1 buttons + 1 terminating empty entry */ button_info = devm_kcalloc(dev, btns_desc->package.count, sizeof(*button_info), GFP_KERNEL); - if (!button_info) - return ERR_PTR(-ENOMEM); + if (!button_info) { + button_info = ERR_PTR(-ENOMEM); + goto out; + } /* Parse the button descriptors */ for (i = 1, btn = 0; i < btns_desc->package.count; i++, btn++) { if (soc_button_parse_btn_desc(dev, &btns_desc->package.elements[i], collection_uid, - &button_info[btn])) - return ERR_PTR(-ENODEV); + &button_info[btn])) { + button_info = ERR_PTR(-ENODEV); + goto out; + } } +out: + kfree(buf.pointer); return button_info; } diff --git a/drivers/input/rmi4/rmi_f54.c b/drivers/input/rmi4/rmi_f54.c index dea63e2db3e6..f5206e2c767e 100644 --- a/drivers/input/rmi4/rmi_f54.c +++ b/drivers/input/rmi4/rmi_f54.c @@ -31,9 +31,6 @@ #define F54_GET_REPORT 1 #define F54_FORCE_CAL 2 -/* Fixed sizes of reports */ -#define F54_QUERY_LEN 27 - /* F54 capabilities */ #define F54_CAP_BASELINE (1 << 2) #define F54_CAP_IMAGE8 (1 << 3) @@ -95,7 +92,6 @@ struct rmi_f54_reports { struct f54_data { struct rmi_function *fn; - u8 qry[F54_QUERY_LEN]; u8 num_rx_electrodes; u8 num_tx_electrodes; u8 capabilities; @@ -632,22 +628,23 @@ static int rmi_f54_detect(struct rmi_function *fn) { int error; struct f54_data *f54; + u8 buf[6]; f54 = dev_get_drvdata(&fn->dev); error = rmi_read_block(fn->rmi_dev, fn->fd.query_base_addr, - &f54->qry, sizeof(f54->qry)); + buf, sizeof(buf)); if (error) { dev_err(&fn->dev, "%s: Failed to query F54 properties\n", __func__); return error; } - f54->num_rx_electrodes = f54->qry[0]; - f54->num_tx_electrodes = f54->qry[1]; - f54->capabilities = f54->qry[2]; - f54->clock_rate = f54->qry[3] | (f54->qry[4] << 8); - f54->family = f54->qry[5]; + f54->num_rx_electrodes = buf[0]; + f54->num_tx_electrodes = buf[1]; + f54->capabilities = buf[2]; + f54->clock_rate = buf[3] | (buf[4] << 8); + f54->family = buf[5]; rmi_dbg(RMI_DEBUG_FN, &fn->dev, "F54 num_rx_electrodes: %d\n", f54->num_rx_electrodes); diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index 09720d950686..f932a83b4990 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h @@ -723,6 +723,13 @@ static const struct dmi_system_id __initconst i8042_dmi_notimeout_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U574"), }, }, + { + /* Fujitsu UH554 laptop */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), + DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK UH544"), + }, + }, { } }; diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c index eb7fbe159963..929f8558bf1c 100644 --- a/drivers/irqchip/irq-mips-gic.c +++ b/drivers/irqchip/irq-mips-gic.c @@ -140,7 +140,7 @@ static inline void gic_map_to_vpe(unsigned int intr, unsigned int vpe) } #ifdef CONFIG_CLKSRC_MIPS_GIC -u64 gic_read_count(void) +u64 notrace gic_read_count(void) { unsigned int hi, hi2, lo; @@ -167,7 +167,7 @@ unsigned int gic_get_count_width(void) return bits; } -void gic_write_compare(u64 cnt) +void notrace gic_write_compare(u64 cnt) { if (mips_cm_is64) { gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE), cnt); @@ -179,7 +179,7 @@ void gic_write_compare(u64 cnt) } } -void gic_write_cpu_compare(u64 cnt, int cpu) +void notrace gic_write_cpu_compare(u64 cnt, int cpu) { unsigned long flags; diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index 7910bfe50da4..93b181088168 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c @@ -1105,10 +1105,13 @@ static void schedule_autocommit(struct dm_integrity_c *ic) static void submit_flush_bio(struct dm_integrity_c *ic, struct dm_integrity_io *dio) { struct bio *bio; - spin_lock_irq(&ic->endio_wait.lock); + unsigned long flags; + + spin_lock_irqsave(&ic->endio_wait.lock, flags); bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io)); bio_list_add(&ic->flush_bio_list, bio); - spin_unlock_irq(&ic->endio_wait.lock); + spin_unlock_irqrestore(&ic->endio_wait.lock, flags); + queue_work(ic->commit_wq, &ic->commit_work); } @@ -3040,6 +3043,11 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv) ti->error = "The device is too small"; goto bad; } + if (ti->len > ic->provided_data_sectors) { + r = -EINVAL; + ti->error = "Not enough provided sectors for requested mapping size"; + goto bad; + } if (!buffer_sectors) buffer_sectors = 1; diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c index 3702e502466d..8d5ca30f6551 100644 --- a/drivers/md/dm-io.c +++ b/drivers/md/dm-io.c @@ -317,8 +317,8 @@ static void do_region(int op, int op_flags, unsigned region, else if (op == REQ_OP_WRITE_SAME) special_cmd_max_sectors = q->limits.max_write_same_sectors; if ((op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES || - op == REQ_OP_WRITE_SAME) && - special_cmd_max_sectors == 0) { + op == REQ_OP_WRITE_SAME) && special_cmd_max_sectors == 0) { + atomic_inc(&io->count); dec_count(io, region, -EOPNOTSUPP); return; } diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 7d893228c40f..b4b75dad816a 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -1927,7 +1927,7 @@ struct dm_raid_superblock { /******************************************************************** * BELOW FOLLOW V1.9.0 EXTENSIONS TO THE PRISTINE SUPERBLOCK FORMAT!!! * - * FEATURE_FLAG_SUPPORTS_V190 in the features member indicates that those exist + * FEATURE_FLAG_SUPPORTS_V190 in the compat_features member indicates that those exist */ __le32 flags; /* Flags defining array states for reshaping */ @@ -2092,6 +2092,11 @@ static void super_sync(struct mddev *mddev, struct md_rdev *rdev) sb->layout = cpu_to_le32(mddev->layout); sb->stripe_sectors = cpu_to_le32(mddev->chunk_sectors); + /******************************************************************** + * BELOW FOLLOW V1.9.0 EXTENSIONS TO THE PRISTINE SUPERBLOCK FORMAT!!! + * + * FEATURE_FLAG_SUPPORTS_V190 in the compat_features member indicates that those exist + */ sb->new_level = cpu_to_le32(mddev->new_level); sb->new_layout = cpu_to_le32(mddev->new_layout); sb->new_stripe_sectors = cpu_to_le32(mddev->new_chunk_sectors); @@ -2438,8 +2443,14 @@ static int super_validate(struct raid_set *rs, struct md_rdev *rdev) mddev->bitmap_info.default_offset = mddev->bitmap_info.offset; if (!test_and_clear_bit(FirstUse, &rdev->flags)) { - /* Retrieve device size stored in superblock to be prepared for shrink */ - rdev->sectors = le64_to_cpu(sb->sectors); + /* + * Retrieve rdev size stored in superblock to be prepared for shrink. + * Check extended superblock members are present otherwise the size + * will not be set! + */ + if (le32_to_cpu(sb->compat_features) & FEATURE_FLAG_SUPPORTS_V190) + rdev->sectors = le64_to_cpu(sb->sectors); + rdev->recovery_offset = le64_to_cpu(sb->disk_recovery_offset); if (rdev->recovery_offset == MaxSector) set_bit(In_sync, &rdev->flags); diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index e61c45047c25..4da8858856fb 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c @@ -145,6 +145,7 @@ static void dispatch_bios(void *context, struct bio_list *bio_list) struct dm_raid1_bio_record { struct mirror *m; + /* if details->bi_bdev == NULL, details were not saved */ struct dm_bio_details details; region_t write_region; }; @@ -1198,6 +1199,8 @@ static int mirror_map(struct dm_target *ti, struct bio *bio) struct dm_raid1_bio_record *bio_record = dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record)); + bio_record->details.bi_bdev = NULL; + if (rw == WRITE) { /* Save region for mirror_end_io() handler */ bio_record->write_region = dm_rh_bio_to_region(ms->rh, bio); @@ -1256,12 +1259,22 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error) } if (error == -EOPNOTSUPP) - return error; + goto out; if ((error == -EWOULDBLOCK) && (bio->bi_opf & REQ_RAHEAD)) - return error; + goto out; if (unlikely(error)) { + if (!bio_record->details.bi_bdev) { + /* + * There wasn't enough memory to record necessary + * information for a retry or there was no other + * mirror in-sync. + */ + DMERR_LIMIT("Mirror read failed."); + return -EIO; + } + m = bio_record->m; DMERR("Mirror read failed from %s. Trying alternative device.", @@ -1277,6 +1290,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error) bd = &bio_record->details; dm_bio_restore(bd, bio); + bio_record->details.bi_bdev = NULL; bio->bi_error = 0; queue_bio(ms, bio, rw); @@ -1285,6 +1299,9 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error) DMERR("All replicated volumes dead, failing I/O"); } +out: + bio_record->details.bi_bdev = NULL; + return error; } diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 17ad50daed08..28808e5ec0fd 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -1094,6 +1094,19 @@ static void process_prepared_discard_passdown_pt1(struct dm_thin_new_mapping *m) return; } + /* + * Increment the unmapped blocks. This prevents a race between the + * passdown io and reallocation of freed blocks. + */ + r = dm_pool_inc_data_range(pool->pmd, m->data_block, data_end); + if (r) { + metadata_operation_failed(pool, "dm_pool_inc_data_range", r); + bio_io_error(m->bio); + cell_defer_no_holder(tc, m->cell); + mempool_free(m, pool->mapping_pool); + return; + } + discard_parent = bio_alloc(GFP_NOIO, 1); if (!discard_parent) { DMWARN("%s: unable to allocate top level discard bio for passdown. Skipping passdown.", @@ -1114,19 +1127,6 @@ static void process_prepared_discard_passdown_pt1(struct dm_thin_new_mapping *m) end_discard(&op, r); } } - - /* - * Increment the unmapped blocks. This prevents a race between the - * passdown io and reallocation of freed blocks. - */ - r = dm_pool_inc_data_range(pool->pmd, m->data_block, data_end); - if (r) { - metadata_operation_failed(pool, "dm_pool_inc_data_range", r); - bio_io_error(m->bio); - cell_defer_no_holder(tc, m->cell); - mempool_free(m, pool->mapping_pool); - return; - } } static void process_prepared_discard_passdown_pt2(struct dm_thin_new_mapping *m) diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c index 75488e65cd96..8d46e3ad9529 100644 --- a/drivers/mfd/arizona-core.c +++ b/drivers/mfd/arizona-core.c @@ -245,8 +245,7 @@ static int arizona_poll_reg(struct arizona *arizona, int ret; ret = regmap_read_poll_timeout(arizona->regmap, - ARIZONA_INTERRUPT_RAW_STATUS_5, val, - ((val & mask) == target), + reg, val, ((val & mask) == target), ARIZONA_REG_POLL_DELAY_US, timeout_ms * 1000); if (ret) diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c index d87f4da29f11..fcfccbb3d9a2 100644 --- a/drivers/net/arcnet/arcnet.c +++ b/drivers/net/arcnet/arcnet.c @@ -809,6 +809,7 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id) struct net_device *dev = dev_id; struct arcnet_local *lp; int recbuf, status, diagstatus, didsomething, boguscount; + unsigned long flags; int retval = IRQ_NONE; arc_printk(D_DURING, dev, "\n"); @@ -818,7 +819,7 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id) lp = netdev_priv(dev); BUG_ON(!lp); - spin_lock(&lp->lock); + spin_lock_irqsave(&lp->lock, flags); /* RESET flag was enabled - if device is not running, we must * clear it right away (but nothing else). @@ -827,7 +828,7 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id) if (lp->hw.status(dev) & RESETflag) lp->hw.command(dev, CFLAGScmd | RESETclear); lp->hw.intmask(dev, 0); - spin_unlock(&lp->lock); + spin_unlock_irqrestore(&lp->lock, flags); return retval; } @@ -1052,7 +1053,7 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id) udelay(1); lp->hw.intmask(dev, lp->intmask); - spin_unlock(&lp->lock); + spin_unlock_irqrestore(&lp->lock, flags); return retval; } EXPORT_SYMBOL(arcnet_interrupt); diff --git a/drivers/net/arcnet/capmode.c b/drivers/net/arcnet/capmode.c index a80f4eb9262d..b780be6f41ff 100644 --- a/drivers/net/arcnet/capmode.c +++ b/drivers/net/arcnet/capmode.c @@ -212,7 +212,7 @@ static int ack_tx(struct net_device *dev, int acked) ackpkt->soft.cap.proto = 0; /* using protocol 0 for acknowledge */ ackpkt->soft.cap.mes.ack = acked; - arc_printk(D_PROTO, dev, "Ackknowledge for cap packet %x.\n", + arc_printk(D_PROTO, dev, "Acknowledge for cap packet %x.\n", *((int *)&ackpkt->soft.cap.cookie[0])); ackskb->protocol = cpu_to_be16(ETH_P_ARCNET); diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c index 24deb88a37f0..2d956cb59d06 100644 --- a/drivers/net/arcnet/com20020-pci.c +++ b/drivers/net/arcnet/com20020-pci.c @@ -156,6 +156,7 @@ static int com20020pci_probe(struct pci_dev *pdev, for (i = 0; i < ci->devcount; i++) { struct com20020_pci_channel_map *cm = &ci->chan_map_tbl[i]; struct com20020_dev *card; + int dev_id_mask = 0xf; dev = alloc_arcdev(device); if (!dev) { @@ -187,6 +188,7 @@ static int com20020pci_probe(struct pci_dev *pdev, arcnet_outb(0x00, ioaddr, COM20020_REG_W_COMMAND); arcnet_inb(ioaddr, COM20020_REG_R_DIAGSTAT); + SET_NETDEV_DEV(dev, &pdev->dev); dev->base_addr = ioaddr; dev->dev_addr[0] = node; dev->sysfs_groups[0] = &com20020_state_group; @@ -206,8 +208,8 @@ static int com20020pci_probe(struct pci_dev *pdev, /* Get the dev_id from the PLX rotary coder */ if (!strncmp(ci->name, "EAE PLX-PCI MA1", 15)) - dev->dev_id = 0xc; - dev->dev_id ^= inb(priv->misc + ci->rotary) >> 4; + dev_id_mask = 0x3; + dev->dev_id = (inb(priv->misc + ci->rotary) >> 4) & dev_id_mask; snprintf(dev->name, sizeof(dev->name), "arc%d-%d", dev->dev_id, i); diff --git a/drivers/net/arcnet/com20020.c b/drivers/net/arcnet/com20020.c index 13d9ad4b3f5c..78043a9c5981 100644 --- a/drivers/net/arcnet/com20020.c +++ b/drivers/net/arcnet/com20020.c @@ -246,8 +246,6 @@ int com20020_found(struct net_device *dev, int shared) return -ENODEV; } - dev->base_addr = ioaddr; - arc_printk(D_NORMAL, dev, "%s: station %02Xh found at %03lXh, IRQ %d.\n", lp->card_name, dev->dev_addr[0], dev->base_addr, dev->irq); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 14c236e5bdb1..c12b4d3e946e 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -12729,7 +12729,7 @@ static int bnx2x_set_mc_list(struct bnx2x *bp) } else { /* If no mc addresses are required, flush the configuration */ rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL); - if (rc) + if (rc < 0) BNX2X_ERR("Failed to clear multicast configuration %d\n", rc); } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 11e8a866a312..a19f68f5862d 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -1311,10 +1311,11 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, cp_cons = NEXT_CMP(cp_cons); } - if (unlikely(agg_bufs > MAX_SKB_FRAGS)) { + if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) { bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs); - netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n", - agg_bufs, (int)MAX_SKB_FRAGS); + if (agg_bufs > MAX_SKB_FRAGS) + netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n", + agg_bufs, (int)MAX_SKB_FRAGS); return NULL; } @@ -1573,6 +1574,45 @@ next_rx_no_prod: return rc; } +/* In netpoll mode, if we are using a combined completion ring, we need to + * discard the rx packets and recycle the buffers. + */ +static int bnxt_force_rx_discard(struct bnxt *bp, struct bnxt_napi *bnapi, + u32 *raw_cons, u8 *event) +{ + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + u32 tmp_raw_cons = *raw_cons; + struct rx_cmp_ext *rxcmp1; + struct rx_cmp *rxcmp; + u16 cp_cons; + u8 cmp_type; + + cp_cons = RING_CMP(tmp_raw_cons); + rxcmp = (struct rx_cmp *) + &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; + + tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons); + cp_cons = RING_CMP(tmp_raw_cons); + rxcmp1 = (struct rx_cmp_ext *) + &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; + + if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) + return -EBUSY; + + cmp_type = RX_CMP_TYPE(rxcmp); + if (cmp_type == CMP_TYPE_RX_L2_CMP) { + rxcmp1->rx_cmp_cfa_code_errors_v2 |= + cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR); + } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { + struct rx_tpa_end_cmp_ext *tpa_end1; + + tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1; + tpa_end1->rx_tpa_end_cmp_errors_v2 |= + cpu_to_le32(RX_TPA_END_CMP_ERRORS); + } + return bnxt_rx_pkt(bp, bnapi, raw_cons, event); +} + #define BNXT_GET_EVENT_PORT(data) \ ((data) & \ ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK) @@ -1755,7 +1795,11 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) if (unlikely(tx_pkts > bp->tx_wake_thresh)) rx_pkts = budget; } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) { - rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event); + if (likely(budget)) + rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event); + else + rc = bnxt_force_rx_discard(bp, bnapi, &raw_cons, + &event); if (likely(rc >= 0)) rx_pkts += rc; else if (rc == -EBUSY) /* partial completion */ @@ -6730,12 +6774,11 @@ static void bnxt_poll_controller(struct net_device *dev) struct bnxt *bp = netdev_priv(dev); int i; - for (i = 0; i < bp->cp_nr_rings; i++) { - struct bnxt_irq *irq = &bp->irq_tbl[i]; + /* Only process tx rings/combined rings in netpoll mode. */ + for (i = 0; i < bp->tx_nr_rings; i++) { + struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; - disable_irq(irq->vector); - irq->handler(irq->vector, bp->bnapi[i]); - enable_irq(irq->vector); + napi_schedule(&txr->bnapi->napi); } } #endif diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 5984423499e6..f872a7db2ca8 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -374,12 +374,16 @@ struct rx_tpa_end_cmp_ext { __le32 rx_tpa_end_cmp_errors_v2; #define RX_TPA_END_CMP_V2 (0x1 << 0) - #define RX_TPA_END_CMP_ERRORS (0x7fff << 1) + #define RX_TPA_END_CMP_ERRORS (0x3 << 1) #define RX_TPA_END_CMPL_ERRORS_SHIFT 1 u32 rx_tpa_end_cmp_start_opaque; }; +#define TPA_END_ERRORS(rx_tpa_end_ext) \ + ((rx_tpa_end_ext)->rx_tpa_end_cmp_errors_v2 & \ + cpu_to_le32(RX_TPA_END_CMP_ERRORS)) + #define DB_IDX_MASK 0xffffff #define DB_IDX_VALID (0x1 << 26) #define DB_IRQ_DIS (0x1 << 27) diff --git a/drivers/net/ethernet/freescale/fman/Kconfig b/drivers/net/ethernet/freescale/fman/Kconfig index dc0850b3b517..8870a9a798ca 100644 --- a/drivers/net/ethernet/freescale/fman/Kconfig +++ b/drivers/net/ethernet/freescale/fman/Kconfig @@ -2,6 +2,7 @@ config FSL_FMAN tristate "FMan support" depends on FSL_SOC || ARCH_LAYERSCAPE || COMPILE_TEST select GENERIC_ALLOCATOR + depends on HAS_DMA select PHYLIB default n help diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 700cc8c6aa5b..192cb93e7669 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -3301,6 +3301,9 @@ static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev, struct net_device *real_dev = vlan_dev_real_dev(vlan_dev); u16 vid = vlan_dev_vlan_id(vlan_dev); + if (netif_is_bridge_port(vlan_dev)) + return 0; + if (mlxsw_sp_port_dev_check(real_dev)) return mlxsw_sp_inetaddr_port_vlan_event(vlan_dev, real_dev, event, vid); diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c index bd0e3f157e9e..600e30e8f0be 100644 --- a/drivers/net/ethernet/rocker/rocker_ofdpa.c +++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c @@ -1409,8 +1409,8 @@ static int ofdpa_port_ipv4_nh(struct ofdpa_port *ofdpa_port, *index = entry->index; resolved = false; } else if (removing) { - ofdpa_neigh_del(found); *index = found->index; + ofdpa_neigh_del(found); } else if (updating) { ofdpa_neigh_update(found, NULL, false); resolved = !is_zero_ether_addr(found->eth_dst); diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index ad9c4ded2b90..761c518b2f92 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -4172,7 +4172,7 @@ found: * recipients */ if (is_mc_recip) { - MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN); + MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN); unsigned int depth, i; memset(inbuf, 0, sizeof(inbuf)); @@ -4320,7 +4320,7 @@ static int efx_ef10_filter_remove_internal(struct efx_nic *efx, efx_ef10_filter_set_entry(table, filter_idx, NULL, 0); } else { efx_mcdi_display_error(efx, MC_CMD_FILTER_OP, - MC_CMD_FILTER_OP_IN_LEN, + MC_CMD_FILTER_OP_EXT_IN_LEN, NULL, 0, rc); } } @@ -4453,7 +4453,7 @@ static s32 efx_ef10_filter_rfs_insert(struct efx_nic *efx, struct efx_filter_spec *spec) { struct efx_ef10_filter_table *table = efx->filter_state; - MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN); + MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN); struct efx_filter_spec *saved_spec; unsigned int hash, i, depth = 1; bool replacing = false; @@ -4940,7 +4940,7 @@ not_restored: static void efx_ef10_filter_table_remove(struct efx_nic *efx) { struct efx_ef10_filter_table *table = efx->filter_state; - MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN); + MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN); struct efx_filter_spec *spec; unsigned int filter_idx; int rc; @@ -5105,6 +5105,7 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx, /* Insert/renew filters */ for (i = 0; i < addr_count; i++) { + EFX_WARN_ON_PARANOID(ids[i] != EFX_EF10_FILTER_ID_INVALID); efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0); efx_filter_set_eth_local(&spec, vlan->vid, addr_list[i].addr); rc = efx_ef10_filter_insert(efx, &spec, true); @@ -5122,11 +5123,11 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx, } return rc; } else { - /* mark as not inserted, and carry on */ - rc = EFX_EF10_FILTER_ID_INVALID; + /* keep invalid ID, and carry on */ } + } else { + ids[i] = efx_ef10_filter_get_unsafe_id(rc); } - ids[i] = efx_ef10_filter_get_unsafe_id(rc); } if (multicast && rollback) { diff --git a/drivers/net/ethernet/ti/cpsw-common.c b/drivers/net/ethernet/ti/cpsw-common.c index 1562ab4151e1..56ba411421f0 100644 --- a/drivers/net/ethernet/ti/cpsw-common.c +++ b/drivers/net/ethernet/ti/cpsw-common.c @@ -90,7 +90,7 @@ int ti_cm_get_macid(struct device *dev, int slave, u8 *mac_addr) if (of_device_is_compatible(dev->of_node, "ti,dm816-emac")) return cpsw_am33xx_cm_get_macid(dev, 0x30, slave, mac_addr); - if (of_machine_is_compatible("ti,am4372")) + if (of_machine_is_compatible("ti,am43")) return cpsw_am33xx_cm_get_macid(dev, 0x630, slave, mac_addr); if (of_machine_is_compatible("ti,dra7")) diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 991372150463..63c98bbbc596 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -752,7 +752,7 @@ static int netvsc_set_channels(struct net_device *net, channels->rx_count || channels->tx_count || channels->other_count) return -EINVAL; - if (count > net->num_tx_queues || count > net->num_rx_queues) + if (count > net->num_tx_queues || count > VRSS_CHANNEL_MAX) return -EINVAL; if (!nvdev || nvdev->destroy) @@ -1179,7 +1179,7 @@ static int netvsc_set_rxfh(struct net_device *dev, const u32 *indir, rndis_dev = ndev->extension; if (indir) { for (i = 0; i < ITAB_NUM; i++) - if (indir[i] >= dev->num_rx_queues) + if (indir[i] >= VRSS_CHANNEL_MAX) return -EINVAL; for (i = 0; i < ITAB_NUM; i++) diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 9ffff0362a11..0f581ee74fe4 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -39,16 +39,20 @@ #define MACVLAN_HASH_SIZE (1<<MACVLAN_HASH_BITS) #define MACVLAN_BC_QUEUE_LEN 1000 +#define MACVLAN_F_PASSTHRU 1 +#define MACVLAN_F_ADDRCHANGE 2 + struct macvlan_port { struct net_device *dev; struct hlist_head vlan_hash[MACVLAN_HASH_SIZE]; struct list_head vlans; struct sk_buff_head bc_queue; struct work_struct bc_work; - bool passthru; + u32 flags; int count; struct hlist_head vlan_source_hash[MACVLAN_HASH_SIZE]; DECLARE_BITMAP(mc_filter, MACVLAN_MC_FILTER_SZ); + unsigned char perm_addr[ETH_ALEN]; }; struct macvlan_source_entry { @@ -66,6 +70,31 @@ struct macvlan_skb_cb { static void macvlan_port_destroy(struct net_device *dev); +static inline bool macvlan_passthru(const struct macvlan_port *port) +{ + return port->flags & MACVLAN_F_PASSTHRU; +} + +static inline void macvlan_set_passthru(struct macvlan_port *port) +{ + port->flags |= MACVLAN_F_PASSTHRU; +} + +static inline bool macvlan_addr_change(const struct macvlan_port *port) +{ + return port->flags & MACVLAN_F_ADDRCHANGE; +} + +static inline void macvlan_set_addr_change(struct macvlan_port *port) +{ + port->flags |= MACVLAN_F_ADDRCHANGE; +} + +static inline void macvlan_clear_addr_change(struct macvlan_port *port) +{ + port->flags &= ~MACVLAN_F_ADDRCHANGE; +} + /* Hash Ethernet address */ static u32 macvlan_eth_hash(const unsigned char *addr) { @@ -181,11 +210,12 @@ static void macvlan_hash_change_addr(struct macvlan_dev *vlan, static bool macvlan_addr_busy(const struct macvlan_port *port, const unsigned char *addr) { - /* Test to see if the specified multicast address is + /* Test to see if the specified address is * currently in use by the underlying device or * another macvlan. */ - if (ether_addr_equal_64bits(port->dev->dev_addr, addr)) + if (!macvlan_passthru(port) && !macvlan_addr_change(port) && + ether_addr_equal_64bits(port->dev->dev_addr, addr)) return true; if (macvlan_hash_lookup(port, addr)) @@ -445,7 +475,7 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb) } macvlan_forward_source(skb, port, eth->h_source); - if (port->passthru) + if (macvlan_passthru(port)) vlan = list_first_or_null_rcu(&port->vlans, struct macvlan_dev, list); else @@ -574,7 +604,7 @@ static int macvlan_open(struct net_device *dev) struct net_device *lowerdev = vlan->lowerdev; int err; - if (vlan->port->passthru) { + if (macvlan_passthru(vlan->port)) { if (!(vlan->flags & MACVLAN_FLAG_NOPROMISC)) { err = dev_set_promiscuity(lowerdev, 1); if (err < 0) @@ -649,7 +679,7 @@ static int macvlan_stop(struct net_device *dev) dev_uc_unsync(lowerdev, dev); dev_mc_unsync(lowerdev, dev); - if (vlan->port->passthru) { + if (macvlan_passthru(vlan->port)) { if (!(vlan->flags & MACVLAN_FLAG_NOPROMISC)) dev_set_promiscuity(lowerdev, -1); goto hash_del; @@ -672,6 +702,7 @@ static int macvlan_sync_address(struct net_device *dev, unsigned char *addr) { struct macvlan_dev *vlan = netdev_priv(dev); struct net_device *lowerdev = vlan->lowerdev; + struct macvlan_port *port = vlan->port; int err; if (!(dev->flags & IFF_UP)) { @@ -682,7 +713,7 @@ static int macvlan_sync_address(struct net_device *dev, unsigned char *addr) if (macvlan_addr_busy(vlan->port, addr)) return -EBUSY; - if (!vlan->port->passthru) { + if (!macvlan_passthru(port)) { err = dev_uc_add(lowerdev, addr); if (err) return err; @@ -692,6 +723,15 @@ static int macvlan_sync_address(struct net_device *dev, unsigned char *addr) macvlan_hash_change_addr(vlan, addr); } + if (macvlan_passthru(port) && !macvlan_addr_change(port)) { + /* Since addr_change isn't set, we are here due to lower + * device change. Save the lower-dev address so we can + * restore it later. + */ + ether_addr_copy(vlan->port->perm_addr, + lowerdev->dev_addr); + } + macvlan_clear_addr_change(port); return 0; } @@ -703,8 +743,14 @@ static int macvlan_set_mac_address(struct net_device *dev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - if (vlan->mode == MACVLAN_MODE_PASSTHRU) + /* If the addresses are the same, this is a no-op */ + if (ether_addr_equal(dev->dev_addr, addr->sa_data)) + return 0; + + if (vlan->mode == MACVLAN_MODE_PASSTHRU) { + macvlan_set_addr_change(vlan->port); return dev_set_mac_address(vlan->lowerdev, addr); + } return macvlan_sync_address(dev, addr->sa_data); } @@ -926,7 +972,7 @@ static int macvlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], /* Support unicast filter only on passthru devices. * Multicast filter should be allowed on all devices. */ - if (!vlan->port->passthru && is_unicast_ether_addr(addr)) + if (!macvlan_passthru(vlan->port) && is_unicast_ether_addr(addr)) return -EOPNOTSUPP; if (flags & NLM_F_REPLACE) @@ -950,7 +996,7 @@ static int macvlan_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], /* Support unicast filter only on passthru devices. * Multicast filter should be allowed on all devices. */ - if (!vlan->port->passthru && is_unicast_ether_addr(addr)) + if (!macvlan_passthru(vlan->port) && is_unicast_ether_addr(addr)) return -EOPNOTSUPP; if (is_unicast_ether_addr(addr)) @@ -1118,8 +1164,8 @@ static int macvlan_port_create(struct net_device *dev) if (port == NULL) return -ENOMEM; - port->passthru = false; port->dev = dev; + ether_addr_copy(port->perm_addr, dev->dev_addr); INIT_LIST_HEAD(&port->vlans); for (i = 0; i < MACVLAN_HASH_SIZE; i++) INIT_HLIST_HEAD(&port->vlan_hash[i]); @@ -1159,6 +1205,18 @@ static void macvlan_port_destroy(struct net_device *dev) kfree_skb(skb); } + /* If the lower device address has been changed by passthru + * macvlan, put it back. + */ + if (macvlan_passthru(port) && + !ether_addr_equal(port->dev->dev_addr, port->perm_addr)) { + struct sockaddr sa; + + sa.sa_family = port->dev->type; + memcpy(&sa.sa_data, port->perm_addr, port->dev->addr_len); + dev_set_mac_address(port->dev, &sa); + } + kfree(port); } @@ -1325,7 +1383,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev, port = macvlan_port_get_rtnl(lowerdev); /* Only 1 macvlan device can be created in passthru mode */ - if (port->passthru) { + if (macvlan_passthru(port)) { /* The macvlan port must be not created this time, * still goto destroy_macvlan_port for readability. */ @@ -1351,7 +1409,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev, err = -EINVAL; goto destroy_macvlan_port; } - port->passthru = true; + macvlan_set_passthru(port); eth_hw_addr_inherit(dev, lowerdev); } @@ -1435,7 +1493,7 @@ static int macvlan_changelink(struct net_device *dev, if (data && data[IFLA_MACVLAN_FLAGS]) { __u16 flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]); bool promisc = (flags ^ vlan->flags) & MACVLAN_FLAG_NOPROMISC; - if (vlan->port->passthru && promisc) { + if (macvlan_passthru(vlan->port) && promisc) { int err; if (flags & MACVLAN_FLAG_NOPROMISC) @@ -1598,7 +1656,7 @@ static int macvlan_device_event(struct notifier_block *unused, } break; case NETDEV_CHANGEADDR: - if (!port->passthru) + if (!macvlan_passthru(port)) return NOTIFY_DONE; vlan = list_first_entry_or_null(&port->vlans, diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c index ed0d10f54f26..c3065236ffcc 100644 --- a/drivers/net/phy/dp83640.c +++ b/drivers/net/phy/dp83640.c @@ -908,7 +908,7 @@ static void decode_txts(struct dp83640_private *dp83640, if (overflow) { pr_debug("tx timestamp queue overflow, count %d\n", overflow); while (skb) { - skb_complete_tx_timestamp(skb, NULL); + kfree_skb(skb); skb = skb_dequeue(&dp83640->tx_queue); } return; diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 9365b0792309..fdb43dd9b5cd 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -620,6 +620,8 @@ static int ksz9031_read_status(struct phy_device *phydev) if ((regval & 0xFF) == 0xFF) { phy_init_hw(phydev); phydev->link = 0; + if (phydev->drv->config_intr && phy_interrupt_is_valid(phydev)) + phydev->drv->config_intr(phydev); } return 0; diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c index 793ce900dffa..f32261ecd215 100644 --- a/drivers/net/usb/ax88179_178a.c +++ b/drivers/net/usb/ax88179_178a.c @@ -1725,6 +1725,18 @@ static const struct driver_info lenovo_info = { .tx_fixup = ax88179_tx_fixup, }; +static const struct driver_info belkin_info = { + .description = "Belkin USB Ethernet Adapter", + .bind = ax88179_bind, + .unbind = ax88179_unbind, + .status = ax88179_status, + .link_reset = ax88179_link_reset, + .reset = ax88179_reset, + .flags = FLAG_ETHER | FLAG_FRAMING_AX, + .rx_fixup = ax88179_rx_fixup, + .tx_fixup = ax88179_tx_fixup, +}; + static const struct usb_device_id products[] = { { /* ASIX AX88179 10/100/1000 */ @@ -1754,6 +1766,10 @@ static const struct usb_device_id products[] = { /* Lenovo OneLinkDock Gigabit LAN */ USB_DEVICE(0x17ef, 0x304b), .driver_info = (unsigned long)&lenovo_info, +}, { + /* Belkin B2B128 USB 3.0 Hub + Gigabit Ethernet Adapter */ + USB_DEVICE(0x050d, 0x0128), + .driver_info = (unsigned long)&belkin_info, }, { }, }; diff --git a/drivers/net/veth.c b/drivers/net/veth.c index b33553b1e19c..f5438d0978ca 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -385,7 +385,7 @@ static int veth_newlink(struct net *src_net, struct net_device *dev, tbp = tb; } - if (tbp[IFLA_IFNAME]) { + if (ifmp && tbp[IFLA_IFNAME]) { nla_strlcpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ); name_assign_type = NET_NAME_USER; } else { @@ -404,7 +404,7 @@ static int veth_newlink(struct net *src_net, struct net_device *dev, return PTR_ERR(peer); } - if (tbp[IFLA_ADDRESS] == NULL) + if (!ifmp || !tbp[IFLA_ADDRESS]) eth_hw_addr_random(peer); if (ifmp && (dev->ifindex != 0)) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 5c6388fb7dd1..2e69bcdc5b07 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -1802,6 +1802,7 @@ static void virtnet_freeze_down(struct virtio_device *vdev) flush_work(&vi->config_work); netif_device_detach(vi->dev); + netif_tx_disable(vi->dev); cancel_delayed_work_sync(&vi->refill); if (netif_running(vi->dev)) { diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h index 530586be05b4..5b1d2e8402d9 100644 --- a/drivers/net/xen-netback/common.h +++ b/drivers/net/xen-netback/common.h @@ -199,6 +199,7 @@ struct xenvif_queue { /* Per-queue data for xenvif */ unsigned long remaining_credit; struct timer_list credit_timeout; u64 credit_window_start; + bool rate_limited; /* Statistics */ struct xenvif_stats stats; diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 8397f6c92451..e322a862ddfe 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -106,7 +106,11 @@ static int xenvif_poll(struct napi_struct *napi, int budget) if (work_done < budget) { napi_complete_done(napi, work_done); - xenvif_napi_schedule_or_enable_events(queue); + /* If the queue is rate-limited, it shall be + * rescheduled in the timer callback. + */ + if (likely(!queue->rate_limited)) + xenvif_napi_schedule_or_enable_events(queue); } return work_done; diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 602d408fa25e..5042ff8d449a 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -180,6 +180,7 @@ static void tx_add_credit(struct xenvif_queue *queue) max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */ queue->remaining_credit = min(max_credit, max_burst); + queue->rate_limited = false; } void xenvif_tx_credit_callback(unsigned long data) @@ -686,8 +687,10 @@ static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size) msecs_to_jiffies(queue->credit_usec / 1000); /* Timer could already be pending in rare cases. */ - if (timer_pending(&queue->credit_timeout)) + if (timer_pending(&queue->credit_timeout)) { + queue->rate_limited = true; return true; + } /* Passed the point where we can replenish credit? */ if (time_after_eq64(now, next_credit)) { @@ -702,6 +705,7 @@ static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size) mod_timer(&queue->credit_timeout, next_credit); queue->credit_window_start = next_credit; + queue->rate_limited = true; return true; } diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 951042a375d6..40c7581caeb0 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -1805,7 +1805,8 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) if (pci_is_enabled(pdev)) { u32 csts = readl(dev->bar + NVME_REG_CSTS); - if (dev->ctrl.state == NVME_CTRL_LIVE) + if (dev->ctrl.state == NVME_CTRL_LIVE || + dev->ctrl.state == NVME_CTRL_RESETTING) nvme_start_freeze(&dev->ctrl); dead = !!((csts & NVME_CSTS_CFS) || !(csts & NVME_CSTS_RDY) || pdev->error_state != pci_channel_io_normal); diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c index 2ee92aa90fe9..e937490d5d97 100644 --- a/drivers/scsi/qedi/qedi_fw.c +++ b/drivers/scsi/qedi/qedi_fw.c @@ -870,7 +870,6 @@ static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi, QEDI_ERR(&qedi->dbg_ctx, "Delayed or untracked cleanup response, itt=0x%x, tid=0x%x, cid=0x%x, task=%p\n", protoitt, cqe->itid, qedi_conn->iscsi_conn_id, task); - WARN_ON(1); } } diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c index f46880315ba8..5f5a4ef2e529 100644 --- a/drivers/scsi/qedi/qedi_main.c +++ b/drivers/scsi/qedi/qedi_main.c @@ -1499,11 +1499,9 @@ err_idx: void qedi_clear_task_idx(struct qedi_ctx *qedi, int idx) { - if (!test_and_clear_bit(idx, qedi->task_idx_map)) { + if (!test_and_clear_bit(idx, qedi->task_idx_map)) QEDI_ERR(&qedi->dbg_ctx, "FW task context, already cleared, tid=0x%x\n", idx); - WARN_ON(1); - } } void qedi_update_itt_map(struct qedi_ctx *qedi, u32 tid, u32 proto_itt, diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 0d8f81591bed..3fdca2cdd8da 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -1279,6 +1279,18 @@ iscsit_get_immediate_data(struct iscsi_cmd *cmd, struct iscsi_scsi_req *hdr, */ if (dump_payload) goto after_immediate_data; + /* + * Check for underflow case where both EDTL and immediate data payload + * exceeds what is presented by CDB's TRANSFER LENGTH, and what has + * already been set in target_cmd_size_check() as se_cmd->data_length. + * + * For this special case, fail the command and dump the immediate data + * payload. + */ + if (cmd->first_burst_len > cmd->se_cmd.data_length) { + cmd->sense_reason = TCM_INVALID_CDB_FIELD; + goto after_immediate_data; + } immed_ret = iscsit_handle_immediate_data(cmd, hdr, cmd->first_burst_len); @@ -4423,8 +4435,11 @@ static void iscsit_logout_post_handler_closesession( * always sleep waiting for RX/TX thread shutdown to complete * within iscsit_close_connection(). */ - if (!conn->conn_transport->rdma_shutdown) + if (!conn->conn_transport->rdma_shutdown) { sleep = cmpxchg(&conn->tx_thread_active, true, false); + if (!sleep) + return; + } atomic_set(&conn->conn_logout_remove, 0); complete(&conn->conn_logout_comp); @@ -4440,8 +4455,11 @@ static void iscsit_logout_post_handler_samecid( { int sleep = 1; - if (!conn->conn_transport->rdma_shutdown) + if (!conn->conn_transport->rdma_shutdown) { sleep = cmpxchg(&conn->tx_thread_active, true, false); + if (!sleep) + return; + } atomic_set(&conn->conn_logout_remove, 0); complete(&conn->conn_logout_comp); diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h index 9ab7090f7c83..0912de7c0cf8 100644 --- a/drivers/target/target_core_internal.h +++ b/drivers/target/target_core_internal.h @@ -136,7 +136,7 @@ int init_se_kmem_caches(void); void release_se_kmem_caches(void); u32 scsi_get_new_index(scsi_index_t); void transport_subsystem_check_init(void); -void transport_cmd_finish_abort(struct se_cmd *, int); +int transport_cmd_finish_abort(struct se_cmd *, int); unsigned char *transport_dump_cmd_direction(struct se_cmd *); void transport_dump_dev_state(struct se_device *, char *, int *); void transport_dump_dev_info(struct se_device *, struct se_lun *, diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index dce1e1b47316..13f47bf4d16b 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c @@ -75,7 +75,7 @@ void core_tmr_release_req(struct se_tmr_req *tmr) kfree(tmr); } -static void core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas) +static int core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas) { unsigned long flags; bool remove = true, send_tas; @@ -91,7 +91,7 @@ static void core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas) transport_send_task_abort(cmd); } - transport_cmd_finish_abort(cmd, remove); + return transport_cmd_finish_abort(cmd, remove); } static int target_check_cdb_and_preempt(struct list_head *list, @@ -184,8 +184,8 @@ void core_tmr_abort_task( cancel_work_sync(&se_cmd->work); transport_wait_for_tasks(se_cmd); - transport_cmd_finish_abort(se_cmd, true); - target_put_sess_cmd(se_cmd); + if (!transport_cmd_finish_abort(se_cmd, true)) + target_put_sess_cmd(se_cmd); printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for" " ref_tag: %llu\n", ref_tag); @@ -281,8 +281,8 @@ static void core_tmr_drain_tmr_list( cancel_work_sync(&cmd->work); transport_wait_for_tasks(cmd); - transport_cmd_finish_abort(cmd, 1); - target_put_sess_cmd(cmd); + if (!transport_cmd_finish_abort(cmd, 1)) + target_put_sess_cmd(cmd); } } @@ -380,8 +380,8 @@ static void core_tmr_drain_state_list( cancel_work_sync(&cmd->work); transport_wait_for_tasks(cmd); - core_tmr_handle_tas_abort(cmd, tas); - target_put_sess_cmd(cmd); + if (!core_tmr_handle_tas_abort(cmd, tas)) + target_put_sess_cmd(cmd); } } diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 6025935036c9..f1b3a46bdcaf 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -651,9 +651,10 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd) percpu_ref_put(&lun->lun_ref); } -void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) +int transport_cmd_finish_abort(struct se_cmd *cmd, int remove) { bool ack_kref = (cmd->se_cmd_flags & SCF_ACK_KREF); + int ret = 0; if (cmd->se_cmd_flags & SCF_SE_LUN_CMD) transport_lun_remove_cmd(cmd); @@ -665,9 +666,11 @@ void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) cmd->se_tfo->aborted_task(cmd); if (transport_cmd_check_stop_to_fabric(cmd)) - return; + return 1; if (remove && ack_kref) - transport_put_cmd(cmd); + ret = transport_put_cmd(cmd); + + return ret; } static void target_complete_failure_work(struct work_struct *work) diff --git a/fs/autofs4/dev-ioctl.c b/fs/autofs4/dev-ioctl.c index 734cbf8d9676..dd9f1bebb5a3 100644 --- a/fs/autofs4/dev-ioctl.c +++ b/fs/autofs4/dev-ioctl.c @@ -344,7 +344,7 @@ static int autofs_dev_ioctl_fail(struct file *fp, int status; token = (autofs_wqt_t) param->fail.token; - status = param->fail.status ? param->fail.status : -ENOENT; + status = param->fail.status < 0 ? param->fail.status : -ENOENT; return autofs4_wait_release(sbi, token, status); } diff --git a/fs/block_dev.c b/fs/block_dev.c index 519599dddd36..0a7404ef9335 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -263,7 +263,10 @@ __blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter, kfree(vecs); if (unlikely(bio.bi_error)) - return bio.bi_error; + ret = bio.bi_error; + + bio_uninit(&bio); + return ret; } diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 0fd081bd2a2f..fcef70602b27 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -3271,7 +3271,7 @@ ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to) if (!is_sync_kiocb(iocb)) ctx->iocb = iocb; - if (to->type & ITER_IOVEC) + if (to->type == ITER_IOVEC) ctx->should_dirty = true; rc = setup_aio_ctx_iter(ctx, to, READ); diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c index b08531977daa..3b147dc6af63 100644 --- a/fs/cifs/misc.c +++ b/fs/cifs/misc.c @@ -810,7 +810,7 @@ setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw) if (!pages) { pages = vmalloc(max_pages * sizeof(struct page *)); - if (!bv) { + if (!pages) { kvfree(bv); return -ENOMEM; } diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c index 27bc360c7ffd..a723df3e0197 100644 --- a/fs/cifs/smb1ops.c +++ b/fs/cifs/smb1ops.c @@ -849,8 +849,13 @@ cifs_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *fid, __u16 search_flags, struct cifs_search_info *srch_inf) { - return CIFSFindFirst(xid, tcon, path, cifs_sb, - &fid->netfid, search_flags, srch_inf, true); + int rc; + + rc = CIFSFindFirst(xid, tcon, path, cifs_sb, + &fid->netfid, search_flags, srch_inf, true); + if (rc) + cifs_dbg(FYI, "find first failed=%d\n", rc); + return rc; } static int diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index c58691834eb2..7e48561abd29 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c @@ -982,7 +982,7 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon, rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL); kfree(utf16_path); if (rc) { - cifs_dbg(VFS, "open dir failed\n"); + cifs_dbg(FYI, "open dir failed rc=%d\n", rc); return rc; } @@ -992,7 +992,7 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon, rc = SMB2_query_directory(xid, tcon, fid->persistent_fid, fid->volatile_fid, 0, srch_inf); if (rc) { - cifs_dbg(VFS, "query directory failed\n"); + cifs_dbg(FYI, "query directory failed rc=%d\n", rc); SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid); } return rc; @@ -1809,7 +1809,8 @@ crypt_message(struct TCP_Server_Info *server, struct smb_rqst *rqst, int enc) sg = init_sg(rqst, sign); if (!sg) { - cifs_dbg(VFS, "%s: Failed to init sg %d", __func__, rc); + cifs_dbg(VFS, "%s: Failed to init sg", __func__); + rc = -ENOMEM; goto free_req; } @@ -1817,6 +1818,7 @@ crypt_message(struct TCP_Server_Info *server, struct smb_rqst *rqst, int enc) iv = kzalloc(iv_len, GFP_KERNEL); if (!iv) { cifs_dbg(VFS, "%s: Failed to alloc IV", __func__); + rc = -ENOMEM; goto free_sg; } iv[0] = 3; diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c index 3cb5c9e2d4e7..de50e749ff05 100644 --- a/fs/cifs/xattr.c +++ b/fs/cifs/xattr.c @@ -188,8 +188,6 @@ static int cifs_creation_time_get(struct dentry *dentry, struct inode *inode, pcreatetime = (__u64 *)value; *pcreatetime = CIFS_I(inode)->createtime; return sizeof(__u64); - - return rc; } @@ -859,6 +859,7 @@ int dax_writeback_mapping_range(struct address_space *mapping, if (ret < 0) goto out; } + start_index = indices[pvec.nr - 1] + 1; } out: put_dax(dax_dev); diff --git a/fs/exec.c b/fs/exec.c index 72934df68471..904199086490 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -220,8 +220,26 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, if (write) { unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start; + unsigned long ptr_size; struct rlimit *rlim; + /* + * Since the stack will hold pointers to the strings, we + * must account for them as well. + * + * The size calculation is the entire vma while each arg page is + * built, so each time we get here it's calculating how far it + * is currently (rather than each call being just the newly + * added size from the arg page). As a result, we need to + * always add the entire size of the pointers, so that on the + * last call to get_arg_page() we'll actually have the entire + * correct size. + */ + ptr_size = (bprm->argc + bprm->envc) * sizeof(void *); + if (ptr_size > ULONG_MAX - size) + goto fail; + size += ptr_size; + acct_arg_size(bprm, size / PAGE_SIZE); /* @@ -239,13 +257,15 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, * to work from. */ rlim = current->signal->rlim; - if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur) / 4) { - put_page(page); - return NULL; - } + if (size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur) / 4) + goto fail; } return page; + +fail: + put_page(page); + return NULL; } static void put_arg_page(struct page *page) diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c index c14758e08d73..390ac9c39c59 100644 --- a/fs/nfs/callback_xdr.c +++ b/fs/nfs/callback_xdr.c @@ -753,7 +753,6 @@ static void nfs4_callback_free_slot(struct nfs4_session *session, * A single slot, so highest used slotid is either 0 or -1 */ nfs4_free_slot(tbl, slot); - nfs4_slot_tbl_drain_complete(tbl); spin_unlock(&tbl->slot_tbl_lock); } diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 32ccd7754f8a..2ac00bf4ecf1 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -1946,29 +1946,6 @@ nfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) } EXPORT_SYMBOL_GPL(nfs_link); -static void -nfs_complete_rename(struct rpc_task *task, struct nfs_renamedata *data) -{ - struct dentry *old_dentry = data->old_dentry; - struct dentry *new_dentry = data->new_dentry; - struct inode *old_inode = d_inode(old_dentry); - struct inode *new_inode = d_inode(new_dentry); - - nfs_mark_for_revalidate(old_inode); - - switch (task->tk_status) { - case 0: - if (new_inode != NULL) - nfs_drop_nlink(new_inode); - d_move(old_dentry, new_dentry); - nfs_set_verifier(new_dentry, - nfs_save_change_attribute(data->new_dir)); - break; - case -ENOENT: - nfs_dentry_handle_enoent(old_dentry); - } -} - /* * RENAME * FIXME: Some nfsds, like the Linux user space nfsd, may generate a @@ -1999,7 +1976,7 @@ int nfs_rename(struct inode *old_dir, struct dentry *old_dentry, { struct inode *old_inode = d_inode(old_dentry); struct inode *new_inode = d_inode(new_dentry); - struct dentry *dentry = NULL; + struct dentry *dentry = NULL, *rehash = NULL; struct rpc_task *task; int error = -EBUSY; @@ -2022,8 +1999,10 @@ int nfs_rename(struct inode *old_dir, struct dentry *old_dentry, * To prevent any new references to the target during the * rename, we unhash the dentry in advance. */ - if (!d_unhashed(new_dentry)) + if (!d_unhashed(new_dentry)) { d_drop(new_dentry); + rehash = new_dentry; + } if (d_count(new_dentry) > 2) { int err; @@ -2040,6 +2019,7 @@ int nfs_rename(struct inode *old_dir, struct dentry *old_dentry, goto out; new_dentry = dentry; + rehash = NULL; new_inode = NULL; } } @@ -2048,8 +2028,7 @@ int nfs_rename(struct inode *old_dir, struct dentry *old_dentry, if (new_inode != NULL) NFS_PROTO(new_inode)->return_delegation(new_inode); - task = nfs_async_rename(old_dir, new_dir, old_dentry, new_dentry, - nfs_complete_rename); + task = nfs_async_rename(old_dir, new_dir, old_dentry, new_dentry, NULL); if (IS_ERR(task)) { error = PTR_ERR(task); goto out; @@ -2059,9 +2038,27 @@ int nfs_rename(struct inode *old_dir, struct dentry *old_dentry, if (error == 0) error = task->tk_status; rpc_put_task(task); + nfs_mark_for_revalidate(old_inode); out: + if (rehash) + d_rehash(rehash); trace_nfs_rename_exit(old_dir, old_dentry, new_dir, new_dentry, error); + if (!error) { + if (new_inode != NULL) + nfs_drop_nlink(new_inode); + /* + * The d_move() should be here instead of in an async RPC completion + * handler because we need the proper locks to move the dentry. If + * we're interrupted by a signal, the async RPC completion handler + * should mark the directories for revalidation. + */ + d_move(old_dentry, new_dentry); + nfs_set_verifier(new_dentry, + nfs_save_change_attribute(new_dir)); + } else if (error == -ENOENT) + nfs_dentry_handle_enoent(old_dentry); + /* new dentry created? */ if (dentry) dput(dentry); diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index c08c46a3b8cd..dbfa18900e25 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -2589,7 +2589,8 @@ static inline void nfs4_exclusive_attrset(struct nfs4_opendata *opendata, /* Except MODE, it seems harmless of setting twice. */ if (opendata->o_arg.createmode != NFS4_CREATE_EXCLUSIVE && - attrset[1] & FATTR4_WORD1_MODE) + (attrset[1] & FATTR4_WORD1_MODE || + attrset[2] & FATTR4_WORD2_MODE_UMASK)) sattr->ia_valid &= ~ATTR_MODE; if (attrset[2] & FATTR4_WORD2_SECURITY_LABEL) @@ -8416,6 +8417,7 @@ static void nfs4_layoutget_release(void *calldata) size_t max_pages = max_response_pages(server); dprintk("--> %s\n", __func__); + nfs4_sequence_free_slot(&lgp->res.seq_res); nfs4_free_pages(lgp->args.layout.pages, max_pages); pnfs_put_layout_hdr(NFS_I(inode)->layout); put_nfs_open_context(lgp->args.ctx); @@ -8490,7 +8492,6 @@ nfs4_proc_layoutget(struct nfs4_layoutget *lgp, long *timeout, gfp_t gfp_flags) /* if layoutp->len is 0, nfs4_layoutget_prepare called rpc_exit */ if (status == 0 && lgp->res.layoutp->len) lseg = pnfs_layout_process(lgp); - nfs4_sequence_free_slot(&lgp->res.seq_res); rpc_put_task(task); dprintk("<-- %s status=%d\n", __func__, status); if (status) diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index b34de036501b..cbf82b0d4467 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -2134,6 +2134,8 @@ again: put_rpccred(cred); switch (status) { case 0: + case -EINTR: + case -ERESTARTSYS: break; case -ETIMEDOUT: if (clnt->cl_softrtry) diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c index 3b7c937a36b5..4689940a953c 100644 --- a/fs/ocfs2/dlmglue.c +++ b/fs/ocfs2/dlmglue.c @@ -2591,6 +2591,10 @@ void ocfs2_inode_unlock_tracker(struct inode *inode, struct ocfs2_lock_res *lockres; lockres = &OCFS2_I(inode)->ip_inode_lockres; + /* had_lock means that the currect process already takes the cluster + * lock previously. If had_lock is 1, we have nothing to do here, and + * it will get unlocked where we got the lock. + */ if (!had_lock) { ocfs2_remove_holder(lockres, oh); ocfs2_inode_unlock(inode, ex); diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index 3c5384d9b3a5..f70c3778d600 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -1328,20 +1328,21 @@ static int ocfs2_xattr_get(struct inode *inode, void *buffer, size_t buffer_size) { - int ret; + int ret, had_lock; struct buffer_head *di_bh = NULL; + struct ocfs2_lock_holder oh; - ret = ocfs2_inode_lock(inode, &di_bh, 0); - if (ret < 0) { - mlog_errno(ret); - return ret; + had_lock = ocfs2_inode_lock_tracker(inode, &di_bh, 0, &oh); + if (had_lock < 0) { + mlog_errno(had_lock); + return had_lock; } down_read(&OCFS2_I(inode)->ip_xattr_sem); ret = ocfs2_xattr_get_nolock(inode, di_bh, name_index, name, buffer, buffer_size); up_read(&OCFS2_I(inode)->ip_xattr_sem); - ocfs2_inode_unlock(inode, 0); + ocfs2_inode_unlock_tracker(inode, 0, &oh, had_lock); brelse(di_bh); @@ -3537,11 +3538,12 @@ int ocfs2_xattr_set(struct inode *inode, { struct buffer_head *di_bh = NULL; struct ocfs2_dinode *di; - int ret, credits, ref_meta = 0, ref_credits = 0; + int ret, credits, had_lock, ref_meta = 0, ref_credits = 0; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct inode *tl_inode = osb->osb_tl_inode; struct ocfs2_xattr_set_ctxt ctxt = { NULL, NULL, NULL, }; struct ocfs2_refcount_tree *ref_tree = NULL; + struct ocfs2_lock_holder oh; struct ocfs2_xattr_info xi = { .xi_name_index = name_index, @@ -3572,8 +3574,9 @@ int ocfs2_xattr_set(struct inode *inode, return -ENOMEM; } - ret = ocfs2_inode_lock(inode, &di_bh, 1); - if (ret < 0) { + had_lock = ocfs2_inode_lock_tracker(inode, &di_bh, 1, &oh); + if (had_lock < 0) { + ret = had_lock; mlog_errno(ret); goto cleanup_nolock; } @@ -3670,7 +3673,7 @@ cleanup: if (ret) mlog_errno(ret); } - ocfs2_inode_unlock(inode, 1); + ocfs2_inode_unlock_tracker(inode, 1, &oh, had_lock); cleanup_nolock: brelse(di_bh); brelse(xbs.xattr_bh); diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c index 09af0f7cd55e..3b91faacc1ba 100644 --- a/fs/xfs/xfs_aops.c +++ b/fs/xfs/xfs_aops.c @@ -1316,9 +1316,12 @@ xfs_vm_bmap( * The swap code (ab-)uses ->bmap to get a block mapping and then * bypasseѕ the file system for actual I/O. We really can't allow * that on reflinks inodes, so we have to skip out here. And yes, - * 0 is the magic code for a bmap error.. + * 0 is the magic code for a bmap error. + * + * Since we don't pass back blockdev info, we can't return bmap + * information for rt files either. */ - if (xfs_is_reflink_inode(ip)) + if (xfs_is_reflink_inode(ip) || XFS_IS_REALTIME_INODE(ip)) return 0; filemap_write_and_wait(mapping); diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index 197f3fffc9a7..408c7820e200 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h @@ -210,7 +210,8 @@ struct acpi_device_flags { u32 of_compatible_ok:1; u32 coherent_dma:1; u32 cca_seen:1; - u32 reserved:20; + u32 spi_i2c_slave:1; + u32 reserved:19; }; /* File System */ diff --git a/include/linux/bio.h b/include/linux/bio.h index d1b04b0e99cf..a7e29fa0981f 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -426,6 +426,7 @@ extern void bio_advance(struct bio *, unsigned); extern void bio_init(struct bio *bio, struct bio_vec *table, unsigned short max_vecs); +extern void bio_uninit(struct bio *); extern void bio_reset(struct bio *); void bio_chain(struct bio *, struct bio *); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index b74a3edcb3da..1ddd36bd2173 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -391,6 +391,8 @@ struct request_queue { int nr_rqs[2]; /* # allocated [a]sync rqs */ int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */ + atomic_t shared_hctx_restart; + struct blk_queue_stats *stats; struct rq_wb *rq_wb; diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 07ef550c6627..93315d6b21a8 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -84,6 +84,7 @@ struct kmem_cache { int red_left_pad; /* Left redzone padding size */ #ifdef CONFIG_SYSFS struct kobject kobj; /* For sysfs */ + struct work_struct kobj_remove_work; #endif #ifdef CONFIG_MEMCG struct memcg_cache_params memcg_params; diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h index 110f4532188c..f7043ccca81c 100644 --- a/include/linux/timekeeper_internal.h +++ b/include/linux/timekeeper_internal.h @@ -29,7 +29,6 @@ */ struct tk_read_base { struct clocksource *clock; - u64 (*read)(struct clocksource *cs); u64 mask; u64 cycle_last; u32 mult; @@ -58,7 +57,7 @@ struct tk_read_base { * interval. * @xtime_remainder: Shifted nano seconds left over when rounding * @cycle_interval - * @raw_interval: Raw nano seconds accumulated per NTP interval. + * @raw_interval: Shifted raw nano seconds accumulated per NTP interval. * @ntp_error: Difference between accumulated time and NTP time in ntp * shifted nano seconds. * @ntp_error_shift: Shift conversion between clock shifted nano seconds and @@ -100,7 +99,7 @@ struct timekeeper { u64 cycle_interval; u64 xtime_interval; s64 xtime_remainder; - u32 raw_interval; + u64 raw_interval; /* The ntp_tick_length() value currently being used. * This cached copy ensures we consistently apply the tick * length for an entire tick, as ntp_tick_length may change diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 01f5bc144ee5..01fa357e9a32 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -1854,8 +1854,9 @@ static inline struct xfrm_offload *xfrm_offload(struct sk_buff *skb) } #endif -#ifdef CONFIG_XFRM_OFFLOAD void __net_init xfrm_dev_init(void); + +#ifdef CONFIG_XFRM_OFFLOAD int validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features); int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, struct xfrm_user_offload *xuo); @@ -1881,10 +1882,6 @@ static inline void xfrm_dev_state_free(struct xfrm_state *x) } } #else -static inline void __net_init xfrm_dev_init(void) -{ -} - static inline int validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features) { return 0; diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 74ea96ea391b..6ea2adcb233b 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -1016,6 +1016,11 @@ static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_ins if (err) return err; + if (is_pointer_value(env, insn->src_reg)) { + verbose("R%d leaks addr into mem\n", insn->src_reg); + return -EACCES; + } + /* check whether atomic_add can read the memory */ err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, BPF_SIZE(insn->code), BPF_READ, -1); diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c index 2831480c63a2..ee97196bb151 100644 --- a/kernel/events/ring_buffer.c +++ b/kernel/events/ring_buffer.c @@ -580,7 +580,7 @@ int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event, int ret = -ENOMEM, max_order = 0; if (!has_aux(event)) - return -ENOTSUPP; + return -EOPNOTSUPP; if (event->pmu->capabilities & PERF_PMU_CAP_AUX_NO_SG) { /* diff --git a/kernel/signal.c b/kernel/signal.c index ca92bcfeb322..45b4c1ffe14e 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -510,7 +510,8 @@ int unhandled_signal(struct task_struct *tsk, int sig) return !tsk->ptrace; } -static void collect_signal(int sig, struct sigpending *list, siginfo_t *info) +static void collect_signal(int sig, struct sigpending *list, siginfo_t *info, + bool *resched_timer) { struct sigqueue *q, *first = NULL; @@ -532,6 +533,12 @@ static void collect_signal(int sig, struct sigpending *list, siginfo_t *info) still_pending: list_del_init(&first->list); copy_siginfo(info, &first->info); + + *resched_timer = + (first->flags & SIGQUEUE_PREALLOC) && + (info->si_code == SI_TIMER) && + (info->si_sys_private); + __sigqueue_free(first); } else { /* @@ -548,12 +555,12 @@ still_pending: } static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, - siginfo_t *info) + siginfo_t *info, bool *resched_timer) { int sig = next_signal(pending, mask); if (sig) - collect_signal(sig, pending, info); + collect_signal(sig, pending, info, resched_timer); return sig; } @@ -565,15 +572,16 @@ static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, */ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) { + bool resched_timer = false; int signr; /* We only dequeue private signals from ourselves, we don't let * signalfd steal them */ - signr = __dequeue_signal(&tsk->pending, mask, info); + signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer); if (!signr) { signr = __dequeue_signal(&tsk->signal->shared_pending, - mask, info); + mask, info, &resched_timer); #ifdef CONFIG_POSIX_TIMERS /* * itimer signal ? @@ -621,7 +629,7 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) current->jobctl |= JOBCTL_STOP_DEQUEUED; } #ifdef CONFIG_POSIX_TIMERS - if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) { + if (resched_timer) { /* * Release the siglock to ensure proper locking order * of timer locks outside of siglocks. Note, we leave diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 9652bc57fd09..b602c48cb841 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -118,6 +118,26 @@ static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta) tk->offs_boot = ktime_add(tk->offs_boot, delta); } +/* + * tk_clock_read - atomic clocksource read() helper + * + * This helper is necessary to use in the read paths because, while the + * seqlock ensures we don't return a bad value while structures are updated, + * it doesn't protect from potential crashes. There is the possibility that + * the tkr's clocksource may change between the read reference, and the + * clock reference passed to the read function. This can cause crashes if + * the wrong clocksource is passed to the wrong read function. + * This isn't necessary to use when holding the timekeeper_lock or doing + * a read of the fast-timekeeper tkrs (which is protected by its own locking + * and update logic). + */ +static inline u64 tk_clock_read(struct tk_read_base *tkr) +{ + struct clocksource *clock = READ_ONCE(tkr->clock); + + return clock->read(clock); +} + #ifdef CONFIG_DEBUG_TIMEKEEPING #define WARNING_FREQ (HZ*300) /* 5 minute rate-limiting */ @@ -175,7 +195,7 @@ static inline u64 timekeeping_get_delta(struct tk_read_base *tkr) */ do { seq = read_seqcount_begin(&tk_core.seq); - now = tkr->read(tkr->clock); + now = tk_clock_read(tkr); last = tkr->cycle_last; mask = tkr->mask; max = tkr->clock->max_cycles; @@ -209,7 +229,7 @@ static inline u64 timekeeping_get_delta(struct tk_read_base *tkr) u64 cycle_now, delta; /* read clocksource */ - cycle_now = tkr->read(tkr->clock); + cycle_now = tk_clock_read(tkr); /* calculate the delta since the last update_wall_time */ delta = clocksource_delta(cycle_now, tkr->cycle_last, tkr->mask); @@ -238,12 +258,10 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock) ++tk->cs_was_changed_seq; old_clock = tk->tkr_mono.clock; tk->tkr_mono.clock = clock; - tk->tkr_mono.read = clock->read; tk->tkr_mono.mask = clock->mask; - tk->tkr_mono.cycle_last = tk->tkr_mono.read(clock); + tk->tkr_mono.cycle_last = tk_clock_read(&tk->tkr_mono); tk->tkr_raw.clock = clock; - tk->tkr_raw.read = clock->read; tk->tkr_raw.mask = clock->mask; tk->tkr_raw.cycle_last = tk->tkr_mono.cycle_last; @@ -262,7 +280,7 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock) /* Go back from cycles -> shifted ns */ tk->xtime_interval = interval * clock->mult; tk->xtime_remainder = ntpinterval - tk->xtime_interval; - tk->raw_interval = (interval * clock->mult) >> clock->shift; + tk->raw_interval = interval * clock->mult; /* if changing clocks, convert xtime_nsec shift units */ if (old_clock) { @@ -404,7 +422,7 @@ static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf) now += timekeeping_delta_to_ns(tkr, clocksource_delta( - tkr->read(tkr->clock), + tk_clock_read(tkr), tkr->cycle_last, tkr->mask)); } while (read_seqcount_retry(&tkf->seq, seq)); @@ -461,6 +479,10 @@ static u64 dummy_clock_read(struct clocksource *cs) return cycles_at_suspend; } +static struct clocksource dummy_clock = { + .read = dummy_clock_read, +}; + /** * halt_fast_timekeeper - Prevent fast timekeeper from accessing clocksource. * @tk: Timekeeper to snapshot. @@ -477,13 +499,13 @@ static void halt_fast_timekeeper(struct timekeeper *tk) struct tk_read_base *tkr = &tk->tkr_mono; memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy)); - cycles_at_suspend = tkr->read(tkr->clock); - tkr_dummy.read = dummy_clock_read; + cycles_at_suspend = tk_clock_read(tkr); + tkr_dummy.clock = &dummy_clock; update_fast_timekeeper(&tkr_dummy, &tk_fast_mono); tkr = &tk->tkr_raw; memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy)); - tkr_dummy.read = dummy_clock_read; + tkr_dummy.clock = &dummy_clock; update_fast_timekeeper(&tkr_dummy, &tk_fast_raw); } @@ -649,11 +671,10 @@ static void timekeeping_update(struct timekeeper *tk, unsigned int action) */ static void timekeeping_forward_now(struct timekeeper *tk) { - struct clocksource *clock = tk->tkr_mono.clock; u64 cycle_now, delta; u64 nsec; - cycle_now = tk->tkr_mono.read(clock); + cycle_now = tk_clock_read(&tk->tkr_mono); delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask); tk->tkr_mono.cycle_last = cycle_now; tk->tkr_raw.cycle_last = cycle_now; @@ -929,8 +950,7 @@ void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot) do { seq = read_seqcount_begin(&tk_core.seq); - - now = tk->tkr_mono.read(tk->tkr_mono.clock); + now = tk_clock_read(&tk->tkr_mono); systime_snapshot->cs_was_changed_seq = tk->cs_was_changed_seq; systime_snapshot->clock_was_set_seq = tk->clock_was_set_seq; base_real = ktime_add(tk->tkr_mono.base, @@ -1108,7 +1128,7 @@ int get_device_system_crosststamp(int (*get_time_fn) * Check whether the system counter value provided by the * device driver is on the current timekeeping interval. */ - now = tk->tkr_mono.read(tk->tkr_mono.clock); + now = tk_clock_read(&tk->tkr_mono); interval_start = tk->tkr_mono.cycle_last; if (!cycle_between(interval_start, cycles, now)) { clock_was_set_seq = tk->clock_was_set_seq; @@ -1629,7 +1649,7 @@ void timekeeping_resume(void) * The less preferred source will only be tried if there is no better * usable source. The rtc part is handled separately in rtc core code. */ - cycle_now = tk->tkr_mono.read(clock); + cycle_now = tk_clock_read(&tk->tkr_mono); if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) && cycle_now > tk->tkr_mono.cycle_last) { u64 nsec, cyc_delta; @@ -1976,7 +1996,7 @@ static u64 logarithmic_accumulation(struct timekeeper *tk, u64 offset, u32 shift, unsigned int *clock_set) { u64 interval = tk->cycle_interval << shift; - u64 raw_nsecs; + u64 snsec_per_sec; /* If the offset is smaller than a shifted interval, do nothing */ if (offset < interval) @@ -1991,14 +2011,15 @@ static u64 logarithmic_accumulation(struct timekeeper *tk, u64 offset, *clock_set |= accumulate_nsecs_to_secs(tk); /* Accumulate raw time */ - raw_nsecs = (u64)tk->raw_interval << shift; - raw_nsecs += tk->raw_time.tv_nsec; - if (raw_nsecs >= NSEC_PER_SEC) { - u64 raw_secs = raw_nsecs; - raw_nsecs = do_div(raw_secs, NSEC_PER_SEC); - tk->raw_time.tv_sec += raw_secs; + tk->tkr_raw.xtime_nsec += (u64)tk->raw_time.tv_nsec << tk->tkr_raw.shift; + tk->tkr_raw.xtime_nsec += tk->raw_interval << shift; + snsec_per_sec = (u64)NSEC_PER_SEC << tk->tkr_raw.shift; + while (tk->tkr_raw.xtime_nsec >= snsec_per_sec) { + tk->tkr_raw.xtime_nsec -= snsec_per_sec; + tk->raw_time.tv_sec++; } - tk->raw_time.tv_nsec = raw_nsecs; + tk->raw_time.tv_nsec = tk->tkr_raw.xtime_nsec >> tk->tkr_raw.shift; + tk->tkr_raw.xtime_nsec -= (u64)tk->raw_time.tv_nsec << tk->tkr_raw.shift; /* Accumulate error between NTP and clock interval */ tk->ntp_error += tk->ntp_tick << shift; @@ -2030,7 +2051,7 @@ void update_wall_time(void) #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET offset = real_tk->cycle_interval; #else - offset = clocksource_delta(tk->tkr_mono.read(tk->tkr_mono.clock), + offset = clocksource_delta(tk_clock_read(&tk->tkr_mono), tk->tkr_mono.cycle_last, tk->tkr_mono.mask); #endif diff --git a/lib/cmdline.c b/lib/cmdline.c index 3c6432df7e63..4c0888c4a68d 100644 --- a/lib/cmdline.c +++ b/lib/cmdline.c @@ -23,14 +23,14 @@ * the values[M, M+1, ..., N] into the ints array in get_options. */ -static int get_range(char **str, int *pint) +static int get_range(char **str, int *pint, int n) { int x, inc_counter, upper_range; (*str)++; upper_range = simple_strtol((*str), NULL, 0); inc_counter = upper_range - *pint; - for (x = *pint; x < upper_range; x++) + for (x = *pint; n && x < upper_range; x++, n--) *pint++ = x; return inc_counter; } @@ -97,7 +97,7 @@ char *get_options(const char *str, int nints, int *ints) break; if (res == 3) { int range_nums; - range_nums = get_range((char **)&str, ints + i); + range_nums = get_range((char **)&str, ints + i, nints - i); if (range_nums < 0) break; /* diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 945fd1ca49b5..df4ebdb2b10a 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -652,7 +652,6 @@ static void __collapse_huge_page_copy(pte_t *pte, struct page *page, spin_unlock(ptl); free_page_and_swap_cache(src_page); } - cond_resched(); } } diff --git a/mm/slub.c b/mm/slub.c index 7449593fca72..8addc535bcdc 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -5625,6 +5625,28 @@ static char *create_unique_id(struct kmem_cache *s) return name; } +static void sysfs_slab_remove_workfn(struct work_struct *work) +{ + struct kmem_cache *s = + container_of(work, struct kmem_cache, kobj_remove_work); + + if (!s->kobj.state_in_sysfs) + /* + * For a memcg cache, this may be called during + * deactivation and again on shutdown. Remove only once. + * A cache is never shut down before deactivation is + * complete, so no need to worry about synchronization. + */ + return; + +#ifdef CONFIG_MEMCG + kset_unregister(s->memcg_kset); +#endif + kobject_uevent(&s->kobj, KOBJ_REMOVE); + kobject_del(&s->kobj); + kobject_put(&s->kobj); +} + static int sysfs_slab_add(struct kmem_cache *s) { int err; @@ -5632,6 +5654,8 @@ static int sysfs_slab_add(struct kmem_cache *s) struct kset *kset = cache_kset(s); int unmergeable = slab_unmergeable(s); + INIT_WORK(&s->kobj_remove_work, sysfs_slab_remove_workfn); + if (!kset) { kobject_init(&s->kobj, &slab_ktype); return 0; @@ -5695,20 +5719,8 @@ static void sysfs_slab_remove(struct kmem_cache *s) */ return; - if (!s->kobj.state_in_sysfs) - /* - * For a memcg cache, this may be called during - * deactivation and again on shutdown. Remove only once. - * A cache is never shut down before deactivation is - * complete, so no need to worry about synchronization. - */ - return; - -#ifdef CONFIG_MEMCG - kset_unregister(s->memcg_kset); -#endif - kobject_uevent(&s->kobj, KOBJ_REMOVE); - kobject_del(&s->kobj); + kobject_get(&s->kobj); + schedule_work(&s->kobj_remove_work); } void sysfs_slab_release(struct kmem_cache *s) diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 34a1c3e46ed7..ecc97f74ab18 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -287,10 +287,21 @@ struct page *vmalloc_to_page(const void *vmalloc_addr) if (p4d_none(*p4d)) return NULL; pud = pud_offset(p4d, addr); - if (pud_none(*pud)) + + /* + * Don't dereference bad PUD or PMD (below) entries. This will also + * identify huge mappings, which we may encounter on architectures + * that define CONFIG_HAVE_ARCH_HUGE_VMAP=y. Such regions will be + * identified as vmalloc addresses by is_vmalloc_addr(), but are + * not [unambiguously] associated with a struct page, so there is + * no correct value to return for them. + */ + WARN_ON_ONCE(pud_bad(*pud)); + if (pud_none(*pud) || pud_bad(*pud)) return NULL; pmd = pmd_offset(pud, addr); - if (pmd_none(*pmd)) + WARN_ON_ONCE(pmd_bad(*pmd)); + if (pmd_none(*pmd) || pmd_bad(*pmd)) return NULL; ptep = pte_offset_map(pmd, addr); diff --git a/net/core/dev.c b/net/core/dev.c index a91572aa73d5..88927f1a3e4f 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4844,6 +4844,13 @@ struct packet_offload *gro_find_complete_by_type(__be16 type) } EXPORT_SYMBOL(gro_find_complete_by_type); +static void napi_skb_free_stolen_head(struct sk_buff *skb) +{ + skb_dst_drop(skb); + secpath_reset(skb); + kmem_cache_free(skbuff_head_cache, skb); +} + static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb) { switch (ret) { @@ -4857,13 +4864,10 @@ static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb) break; case GRO_MERGED_FREE: - if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) { - skb_dst_drop(skb); - secpath_reset(skb); - kmem_cache_free(skbuff_head_cache, skb); - } else { + if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) + napi_skb_free_stolen_head(skb); + else __kfree_skb(skb); - } break; case GRO_HELD: @@ -4935,10 +4939,16 @@ static gro_result_t napi_frags_finish(struct napi_struct *napi, break; case GRO_DROP: - case GRO_MERGED_FREE: napi_reuse_skb(napi, skb); break; + case GRO_MERGED_FREE: + if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) + napi_skb_free_stolen_head(skb); + else + napi_reuse_skb(napi, skb); + break; + case GRO_MERGED: case GRO_CONSUMED: break; @@ -7867,9 +7877,9 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, } else { netdev_stats_to_stats64(storage, &dev->stats); } - storage->rx_dropped += atomic_long_read(&dev->rx_dropped); - storage->tx_dropped += atomic_long_read(&dev->tx_dropped); - storage->rx_nohandler += atomic_long_read(&dev->rx_nohandler); + storage->rx_dropped += (unsigned long)atomic_long_read(&dev->rx_dropped); + storage->tx_dropped += (unsigned long)atomic_long_read(&dev->tx_dropped); + storage->rx_nohandler += (unsigned long)atomic_long_read(&dev->rx_nohandler); return storage; } EXPORT_SYMBOL(dev_get_stats); diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 7a3fd25e8913..532b36e9ce2a 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -964,7 +964,8 @@ static int __ip_append_data(struct sock *sk, csummode = CHECKSUM_PARTIAL; cork->length += length; - if ((((length + fragheaderlen) > mtu) || (skb && skb_is_gso(skb))) && + if ((((length + (skb ? skb->len : fragheaderlen)) > mtu) || + (skb && skb_is_gso(skb))) && (sk->sk_protocol == IPPROTO_UDP) && (rt->dst.dev->features & NETIF_F_UFO) && !dst_xfrm(&rt->dst) && (sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx) { diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 4c88d20d91d4..4793fb78d93b 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -2350,6 +2350,8 @@ int tcp_disconnect(struct sock *sk, int flags) tcp_init_send_head(sk); memset(&tp->rx_opt, 0, sizeof(tp->rx_opt)); __sk_dst_reset(sk); + dst_release(sk->sk_rx_dst); + sk->sk_rx_dst = NULL; tcp_saved_syn_free(tp); /* Clean up fastopen related fields */ diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index a885ffcf0973..5bea601aa3f5 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -3384,6 +3384,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct netdev_notifier_changeupper_info *info; struct inet6_dev *idev = __in6_dev_get(dev); + struct net *net = dev_net(dev); int run_pending = 0; int err; @@ -3399,7 +3400,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, case NETDEV_CHANGEMTU: /* if MTU under IPV6_MIN_MTU stop IPv6 on this interface. */ if (dev->mtu < IPV6_MIN_MTU) { - addrconf_ifdown(dev, 1); + addrconf_ifdown(dev, dev != net->loopback_dev); break; } @@ -3515,7 +3516,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, * IPV6_MIN_MTU stop IPv6 on this interface. */ if (dev->mtu < IPV6_MIN_MTU) - addrconf_ifdown(dev, 1); + addrconf_ifdown(dev, dev != net->loopback_dev); } break; diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index e011122ebd43..5c786f5ab961 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c @@ -250,8 +250,14 @@ ipv4_connected: */ err = ip6_datagram_dst_update(sk, true); - if (err) + if (err) { + /* Reset daddr and dport so that udp_v6_early_demux() + * fails to find this socket + */ + memset(&sk->sk_v6_daddr, 0, sizeof(sk->sk_v6_daddr)); + inet->inet_dport = 0; goto out; + } sk->sk_state = TCP_ESTABLISHED; sk_set_txhash(sk); diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c index d950d43ba255..f02f131f6435 100644 --- a/net/ipv6/esp6_offload.c +++ b/net/ipv6/esp6_offload.c @@ -30,6 +30,25 @@ #include <net/ipv6.h> #include <linux/icmpv6.h> +static __u16 esp6_nexthdr_esp_offset(struct ipv6hdr *ipv6_hdr, int nhlen) +{ + int off = sizeof(struct ipv6hdr); + struct ipv6_opt_hdr *exthdr; + + if (likely(ipv6_hdr->nexthdr == NEXTHDR_ESP)) + return offsetof(struct ipv6hdr, nexthdr); + + while (off < nhlen) { + exthdr = (void *)ipv6_hdr + off; + if (exthdr->nexthdr == NEXTHDR_ESP) + return off; + + off += ipv6_optlen(exthdr); + } + + return 0; +} + static struct sk_buff **esp6_gro_receive(struct sk_buff **head, struct sk_buff *skb) { @@ -38,6 +57,7 @@ static struct sk_buff **esp6_gro_receive(struct sk_buff **head, struct xfrm_state *x; __be32 seq; __be32 spi; + int nhoff; int err; skb_pull(skb, offset); @@ -72,6 +92,11 @@ static struct sk_buff **esp6_gro_receive(struct sk_buff **head, xo->flags |= XFRM_GRO; + nhoff = esp6_nexthdr_esp_offset(ipv6_hdr(skb), offset); + if (!nhoff) + goto out; + + IP6CB(skb)->nhoff = nhoff; XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = NULL; XFRM_SPI_SKB_CB(skb)->family = AF_INET6; XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct ipv6hdr, daddr); diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 5baa6fab4b97..5f2657d98e82 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -1385,7 +1385,7 @@ emsgsize: */ cork->length += length; - if ((((length + fragheaderlen) > mtu) || + if ((((length + (skb ? skb->len : headersize)) > mtu) || (skb && skb_is_gso(skb))) && (sk->sk_protocol == IPPROTO_UDP) && (rt->dst.dev->features & NETIF_F_UFO) && !dst_xfrm(&rt->dst) && diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 2e4490076061..0488a24c2a44 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -3730,7 +3730,11 @@ static int ip6_route_dev_notify(struct notifier_block *this, net->ipv6.ip6_blk_hole_entry->dst.dev = dev; net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev); #endif - } else if (event == NETDEV_UNREGISTER) { + } else if (event == NETDEV_UNREGISTER && + dev->reg_state != NETREG_UNREGISTERED) { + /* NETDEV_UNREGISTER could be fired for multiple times by + * netdev_wait_allrefs(). Make sure we only call this once. + */ in6_dev_put(net->ipv6.ip6_null_entry->rt6i_idev); #ifdef CONFIG_IPV6_MULTIPLE_TABLES in6_dev_put(net->ipv6.ip6_prohibit_entry->rt6i_idev); diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index e9958b1398cb..ac912bb21747 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -305,7 +305,7 @@ static int ipip6_tunnel_get_prl(struct ip_tunnel *t, * we try harder to allocate. */ kp = (cmax <= 1 || capable(CAP_NET_ADMIN)) ? - kcalloc(cmax, sizeof(*kp), GFP_KERNEL) : + kcalloc(cmax, sizeof(*kp), GFP_KERNEL | __GFP_NOWARN) : NULL; rcu_read_lock(); diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 450829dd6384..319aa8ed9cf8 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -885,7 +885,8 @@ static struct sock *__udp6_lib_demux_lookup(struct net *net, struct sock *sk; udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) { - if (INET6_MATCH(sk, net, rmt_addr, loc_addr, ports, dif)) + if (sk->sk_state == TCP_ESTABLISHED && + INET6_MATCH(sk, net, rmt_addr, loc_addr, ports, dif)) return sk; /* Only check first socket in chain */ break; diff --git a/net/ipv6/xfrm6_input.c b/net/ipv6/xfrm6_input.c index 08a807b29298..3ef5d913e7a3 100644 --- a/net/ipv6/xfrm6_input.c +++ b/net/ipv6/xfrm6_input.c @@ -43,8 +43,8 @@ int xfrm6_transport_finish(struct sk_buff *skb, int async) return 1; #endif - ipv6_hdr(skb)->payload_len = htons(skb->len); __skb_push(skb, skb->data - skb_network_header(skb)); + ipv6_hdr(skb)->payload_len = htons(skb->len - sizeof(struct ipv6hdr)); if (xo && (xo->flags & XFRM_GRO)) { skb_mac_header_rebuild(skb); diff --git a/net/key/af_key.c b/net/key/af_key.c index ce9b8565d825..376fdcf7a6b9 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -1150,6 +1150,7 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net, goto out; } + err = -ENOBUFS; key = ext_hdrs[SADB_EXT_KEY_AUTH - 1]; if (sa->sadb_sa_auth) { int keysize = 0; @@ -1161,8 +1162,10 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net, if (key) keysize = (key->sadb_key_bits + 7) / 8; x->aalg = kmalloc(sizeof(*x->aalg) + keysize, GFP_KERNEL); - if (!x->aalg) + if (!x->aalg) { + err = -ENOMEM; goto out; + } strcpy(x->aalg->alg_name, a->name); x->aalg->alg_key_len = 0; if (key) { @@ -1181,8 +1184,10 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net, goto out; } x->calg = kmalloc(sizeof(*x->calg), GFP_KERNEL); - if (!x->calg) + if (!x->calg) { + err = -ENOMEM; goto out; + } strcpy(x->calg->alg_name, a->name); x->props.calgo = sa->sadb_sa_encrypt; } else { @@ -1196,8 +1201,10 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net, if (key) keysize = (key->sadb_key_bits + 7) / 8; x->ealg = kmalloc(sizeof(*x->ealg) + keysize, GFP_KERNEL); - if (!x->ealg) + if (!x->ealg) { + err = -ENOMEM; goto out; + } strcpy(x->ealg->alg_name, a->name); x->ealg->alg_key_len = 0; if (key) { @@ -1242,8 +1249,10 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net, struct xfrm_encap_tmpl *natt; x->encap = kmalloc(sizeof(*x->encap), GFP_KERNEL); - if (!x->encap) + if (!x->encap) { + err = -ENOMEM; goto out; + } natt = x->encap; n_type = ext_hdrs[SADB_X_EXT_NAT_T_TYPE-1]; @@ -2742,6 +2751,8 @@ static int pfkey_spdflush(struct sock *sk, struct sk_buff *skb, const struct sad int err, err2; err = xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, true); + if (!err) + xfrm_garbage_collect(net); err2 = unicast_flush_resp(sk, hdr); if (err || err2) { if (err == -ESRCH) /* empty table - old silent behavior */ diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 5d95401bbc02..43b94c7b69bd 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -1019,7 +1019,8 @@ static struct Qdisc *qdisc_create(struct net_device *dev, return sch; } /* ops->init() failed, we call ->destroy() like qdisc_create_dflt() */ - ops->destroy(sch); + if (ops->destroy) + ops->destroy(sch); err_out3: dev_put(dev); kfree((char *) sch - sch->padded); diff --git a/net/xfrm/Makefile b/net/xfrm/Makefile index abf81b329dc1..55b2ac300995 100644 --- a/net/xfrm/Makefile +++ b/net/xfrm/Makefile @@ -4,8 +4,7 @@ obj-$(CONFIG_XFRM) := xfrm_policy.o xfrm_state.o xfrm_hash.o \ xfrm_input.o xfrm_output.o \ - xfrm_sysctl.o xfrm_replay.o -obj-$(CONFIG_XFRM_OFFLOAD) += xfrm_device.o + xfrm_sysctl.o xfrm_replay.o xfrm_device.o obj-$(CONFIG_XFRM_STATISTICS) += xfrm_proc.o obj-$(CONFIG_XFRM_ALGO) += xfrm_algo.o obj-$(CONFIG_XFRM_USER) += xfrm_user.o diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c index 6d4a60d1bf19..5f7e8bfa0c2d 100644 --- a/net/xfrm/xfrm_device.c +++ b/net/xfrm/xfrm_device.c @@ -22,6 +22,7 @@ #include <net/xfrm.h> #include <linux/notifier.h> +#ifdef CONFIG_XFRM_OFFLOAD int validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features) { int err; @@ -137,6 +138,7 @@ ok: return true; } EXPORT_SYMBOL_GPL(xfrm_dev_offload_ok); +#endif static int xfrm_dev_register(struct net_device *dev) { diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index a3dc7ab0b7ed..4706df612170 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -1006,10 +1006,6 @@ int xfrm_policy_flush(struct net *net, u8 type, bool task_valid) err = -ESRCH; out: spin_unlock_bh(&net->xfrm.xfrm_policy_lock); - - if (cnt) - xfrm_garbage_collect(net); - return err; } EXPORT_SYMBOL(xfrm_policy_flush); diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index 6197c7231bc7..2be4c6af008a 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -2027,6 +2027,7 @@ static int xfrm_flush_policy(struct sk_buff *skb, struct nlmsghdr *nlh, return 0; return err; } + xfrm_garbage_collect(net); c.data.type = type; c.event = nlh->nlmsg_type; diff --git a/scripts/Makefile.headersinst b/scripts/Makefile.headersinst index ce753a408c56..c583a1e1bd3c 100644 --- a/scripts/Makefile.headersinst +++ b/scripts/Makefile.headersinst @@ -14,7 +14,15 @@ __headers: include scripts/Kbuild.include srcdir := $(srctree)/$(obj) -subdirs := $(patsubst $(srcdir)/%/.,%,$(wildcard $(srcdir)/*/.)) + +# When make is run under a fakechroot environment, the function +# $(wildcard $(srcdir)/*/.) doesn't only return directories, but also regular +# files. So, we are using a combination of sort/dir/wildcard which works +# with fakechroot. +subdirs := $(patsubst $(srcdir)/%/,%,\ + $(filter-out $(srcdir)/,\ + $(sort $(dir $(wildcard $(srcdir)/*/))))) + # caller may set destination dir (when installing to asm/) _dst := $(if $(dst),$(dst),$(obj)) diff --git a/scripts/genksyms/genksyms.h b/scripts/genksyms/genksyms.h index 3bffdcaaa274..b724a0290c75 100644 --- a/scripts/genksyms/genksyms.h +++ b/scripts/genksyms/genksyms.h @@ -75,7 +75,7 @@ struct string_list *copy_list_range(struct string_list *start, int yylex(void); int yyparse(void); -void error_with_pos(const char *, ...); +void error_with_pos(const char *, ...) __attribute__ ((format(printf, 1, 2))); /*----------------------------------------------------------------------*/ #define xmalloc(size) ({ void *__ptr = malloc(size); \ diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile index 90a091b6ae4d..eb8144643b78 100644 --- a/scripts/kconfig/Makefile +++ b/scripts/kconfig/Makefile @@ -196,7 +196,7 @@ clean-files += config.pot linux.pot # Check that we have the required ncurses stuff installed for lxdialog (menuconfig) PHONY += $(obj)/dochecklxdialog -$(addprefix $(obj)/,$(lxdialog)): $(obj)/dochecklxdialog +$(addprefix $(obj)/, mconf.o $(lxdialog)): $(obj)/dochecklxdialog $(obj)/dochecklxdialog: $(Q)$(CONFIG_SHELL) $(check-lxdialog) -check $(HOSTCC) $(HOST_EXTRACFLAGS) $(HOSTLOADLIBES_mconf) diff --git a/scripts/kconfig/nconf.c b/scripts/kconfig/nconf.c index a9bc5334a478..003114779815 100644 --- a/scripts/kconfig/nconf.c +++ b/scripts/kconfig/nconf.c @@ -271,7 +271,7 @@ static struct mitem k_menu_items[MAX_MENU_ITEMS]; static int items_num; static int global_exit; /* the currently selected button */ -const char *current_instructions = menu_instructions; +static const char *current_instructions = menu_instructions; static char *dialog_input_result; static int dialog_input_result_len; @@ -305,7 +305,7 @@ struct function_keys { }; static const int function_keys_num = 9; -struct function_keys function_keys[] = { +static struct function_keys function_keys[] = { { .key_str = "F1", .func = "Help", @@ -508,7 +508,7 @@ static int get_mext_match(const char *match_str, match_f flag) index = (index + items_num) % items_num; while (true) { char *str = k_menu_items[index].str; - if (strcasestr(str, match_str) != 0) + if (strcasestr(str, match_str) != NULL) return index; if (flag == FIND_NEXT_MATCH_UP || flag == MATCH_TINKER_PATTERN_UP) @@ -1067,7 +1067,7 @@ static int do_match(int key, struct match_state *state, int *ans) static void conf(struct menu *menu) { - struct menu *submenu = 0; + struct menu *submenu = NULL; const char *prompt = menu_get_prompt(menu); struct symbol *sym; int res; @@ -1234,7 +1234,7 @@ static void show_help(struct menu *menu) static void conf_choice(struct menu *menu) { const char *prompt = _(menu_get_prompt(menu)); - struct menu *child = 0; + struct menu *child = NULL; struct symbol *active; int selected_index = 0; int last_top_row = 0; @@ -1456,7 +1456,7 @@ static void conf_save(void) } } -void setup_windows(void) +static void setup_windows(void) { int lines, columns; diff --git a/scripts/kconfig/nconf.gui.c b/scripts/kconfig/nconf.gui.c index 4b2f44c20caf..a64b1c31253e 100644 --- a/scripts/kconfig/nconf.gui.c +++ b/scripts/kconfig/nconf.gui.c @@ -129,7 +129,7 @@ static void no_colors_theme(void) mkattrn(FUNCTION_TEXT, A_REVERSE); } -void set_colors() +void set_colors(void) { start_color(); use_default_colors(); @@ -192,7 +192,7 @@ const char *get_line(const char *text, int line_no) int lines = 0; if (!text) - return 0; + return NULL; for (i = 0; text[i] != '\0' && lines < line_no; i++) if (text[i] == '\n') diff --git a/scripts/tags.sh b/scripts/tags.sh index d661f2f3ef61..d23dcbf17457 100755 --- a/scripts/tags.sh +++ b/scripts/tags.sh @@ -106,6 +106,7 @@ all_compiled_sources() case "$i" in *.[cS]) j=${i/\.[cS]/\.o} + j="${j#$tree}" if [ -e $j ]; then echo $i fi diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c index 5088d4b8db22..009e6c98754e 100644 --- a/sound/core/pcm_lib.c +++ b/sound/core/pcm_lib.c @@ -2492,7 +2492,7 @@ static int pcm_chmap_ctl_get(struct snd_kcontrol *kcontrol, struct snd_pcm_substream *substream; const struct snd_pcm_chmap_elem *map; - if (snd_BUG_ON(!info->chmap)) + if (!info->chmap) return -EINVAL; substream = snd_pcm_chmap_substream(info, idx); if (!substream) @@ -2524,7 +2524,7 @@ static int pcm_chmap_ctl_tlv(struct snd_kcontrol *kcontrol, int op_flag, unsigned int __user *dst; int c, count = 0; - if (snd_BUG_ON(!info->chmap)) + if (!info->chmap) return -EINVAL; if (size < 8) return -ENOMEM; diff --git a/sound/firewire/amdtp-stream.c b/sound/firewire/amdtp-stream.c index 9e6f54f8c45d..1e26854b3425 100644 --- a/sound/firewire/amdtp-stream.c +++ b/sound/firewire/amdtp-stream.c @@ -682,7 +682,9 @@ static void out_stream_callback(struct fw_iso_context *context, u32 tstamp, cycle = increment_cycle_count(cycle, 1); if (s->handle_packet(s, 0, cycle, i) < 0) { s->packet_index = -1; - amdtp_stream_pcm_abort(s); + if (in_interrupt()) + amdtp_stream_pcm_abort(s); + WRITE_ONCE(s->pcm_buffer_pointer, SNDRV_PCM_POS_XRUN); return; } } @@ -734,7 +736,9 @@ static void in_stream_callback(struct fw_iso_context *context, u32 tstamp, /* Queueing error or detecting invalid payload. */ if (i < packets) { s->packet_index = -1; - amdtp_stream_pcm_abort(s); + if (in_interrupt()) + amdtp_stream_pcm_abort(s); + WRITE_ONCE(s->pcm_buffer_pointer, SNDRV_PCM_POS_XRUN); return; } diff --git a/sound/firewire/amdtp-stream.h b/sound/firewire/amdtp-stream.h index 7e8831722821..ea1a91e99875 100644 --- a/sound/firewire/amdtp-stream.h +++ b/sound/firewire/amdtp-stream.h @@ -135,7 +135,7 @@ struct amdtp_stream { /* For a PCM substream processing. */ struct snd_pcm_substream *pcm; struct tasklet_struct period_tasklet; - unsigned int pcm_buffer_pointer; + snd_pcm_uframes_t pcm_buffer_pointer; unsigned int pcm_period_pointer; /* To wait for first packet. */ diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 1770f085c2a6..01eb1dc7b5b3 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -370,10 +370,12 @@ enum { #define IS_KBL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d71) #define IS_KBL_H(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa2f0) #define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98) +#define IS_BXT_T(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x1a98) #define IS_GLK(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x3198) -#define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci)) || \ - IS_KBL(pci) || IS_KBL_LP(pci) || IS_KBL_H(pci) || \ - IS_GLK(pci) +#define IS_CFL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa348) +#define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci) || \ + IS_BXT_T(pci) || IS_KBL(pci) || IS_KBL_LP(pci) || \ + IS_KBL_H(pci) || IS_GLK(pci) || IS_CFL(pci)) static char *driver_short_names[] = { [AZX_DRIVER_ICH] = "HDA Intel", @@ -2378,6 +2380,9 @@ static const struct pci_device_id azx_ids[] = { /* Kabylake-H */ { PCI_DEVICE(0x8086, 0xa2f0), .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE }, + /* Coffelake */ + { PCI_DEVICE(0x8086, 0xa348), + .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE}, /* Broxton-P(Apollolake) */ { PCI_DEVICE(0x8086, 0x5a98), .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON }, diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c index 84e7e698411e..a2670e9d652d 100644 --- a/tools/perf/util/probe-event.c +++ b/tools/perf/util/probe-event.c @@ -619,7 +619,7 @@ static int post_process_probe_trace_point(struct probe_trace_point *tp, struct map *map, unsigned long offs) { struct symbol *sym; - u64 addr = tp->address + tp->offset - offs; + u64 addr = tp->address - offs; sym = map__find_symbol(map, addr); if (!sym) diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index c0af0195432f..26f1eefdc782 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -3767,6 +3767,72 @@ static struct bpf_test tests[] = { .errstr = "invalid bpf_context access", }, { + "leak pointer into ctx 1", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[0])), + BPF_LD_MAP_FD(BPF_REG_2, 0), + BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_2, + offsetof(struct __sk_buff, cb[0])), + BPF_EXIT_INSN(), + }, + .fixup_map1 = { 2 }, + .errstr_unpriv = "R2 leaks addr into mem", + .result_unpriv = REJECT, + .result = ACCEPT, + }, + { + "leak pointer into ctx 2", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[0])), + BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_10, + offsetof(struct __sk_buff, cb[0])), + BPF_EXIT_INSN(), + }, + .errstr_unpriv = "R10 leaks addr into mem", + .result_unpriv = REJECT, + .result = ACCEPT, + }, + { + "leak pointer into ctx 3", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_LD_MAP_FD(BPF_REG_2, 0), + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, + offsetof(struct __sk_buff, cb[0])), + BPF_EXIT_INSN(), + }, + .fixup_map1 = { 1 }, + .errstr_unpriv = "R2 leaks addr into ctx", + .result_unpriv = REJECT, + .result = ACCEPT, + }, + { + "leak pointer into map val", + .insns = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0), + BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_6, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map1 = { 4 }, + .errstr_unpriv = "R6 leaks addr into mem", + .result_unpriv = REJECT, + .result = ACCEPT, + }, + { "helper access to map: full range", .insns = { BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), |