diff options
author | David S. Miller <davem@davemloft.net> | 2017-09-01 17:42:05 -0700 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2017-09-01 17:42:05 -0700 |
commit | 6026e043d09012c6269f9a96a808d52d9c498224 (patch) | |
tree | a80578915557db98596821ff60d2ff37dafffb4f | |
parent | 4cc5b44b29a9de9b3f841efedaa3f769066c63cc (diff) | |
parent | 138e4ad67afd5c6c318b056b4d17c17f2c0ca5c0 (diff) | |
download | linux-stable-6026e043d09012c6269f9a96a808d52d9c498224.tar.gz linux-stable-6026e043d09012c6269f9a96a808d52d9c498224.tar.bz2 linux-stable-6026e043d09012c6269f9a96a808d52d9c498224.zip |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Three cases of simple overlapping changes.
Signed-off-by: David S. Miller <davem@davemloft.net>
359 files changed, 2437 insertions, 1658 deletions
@@ -1,7 +1,7 @@ VERSION = 4 PATCHLEVEL = 13 SUBLEVEL = 0 -EXTRAVERSION = -rc6 +EXTRAVERSION = -rc7 NAME = Fearless Coyote # *DOCUMENTATION* @@ -396,7 +396,7 @@ LINUXINCLUDE := \ KBUILD_CPPFLAGS := -D__KERNEL__ KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \ - -fno-strict-aliasing -fno-common \ + -fno-strict-aliasing -fno-common -fshort-wchar \ -Werror-implicit-function-declaration \ -Wno-format-security \ -std=gnu89 $(call cc-option,-fno-PIE) @@ -442,7 +442,7 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \ # =========================================================================== # Rules shared between *config targets and build targets -# Basic helpers built in scripts/ +# Basic helpers built in scripts/basic/ PHONY += scripts_basic scripts_basic: $(Q)$(MAKE) $(build)=scripts/basic @@ -505,7 +505,7 @@ ifeq ($(KBUILD_EXTMOD),) endif endif endif -# install and module_install need also be processed one by one +# install and modules_install need also be processed one by one ifneq ($(filter install,$(MAKECMDGOALS)),) ifneq ($(filter modules_install,$(MAKECMDGOALS)),) mixed-targets := 1 @@ -964,7 +964,7 @@ export KBUILD_VMLINUX_MAIN := $(core-y) $(libs-y2) $(drivers-y) $(net-y) $(virt- export KBUILD_VMLINUX_LIBS := $(libs-y1) export KBUILD_LDS := arch/$(SRCARCH)/kernel/vmlinux.lds export LDFLAGS_vmlinux -# used by scripts/pacmage/Makefile +# used by scripts/package/Makefile export KBUILD_ALLDIRS := $(sort $(filter-out arch/%,$(vmlinux-alldirs)) arch Documentation include samples scripts tools) vmlinux-deps := $(KBUILD_LDS) $(KBUILD_VMLINUX_INIT) $(KBUILD_VMLINUX_MAIN) $(KBUILD_VMLINUX_LIBS) @@ -992,8 +992,8 @@ include/generated/autoksyms.h: FORCE ARCH_POSTLINK := $(wildcard $(srctree)/arch/$(SRCARCH)/Makefile.postlink) # Final link of vmlinux with optional arch pass after final link - cmd_link-vmlinux = \ - $(CONFIG_SHELL) $< $(LD) $(LDFLAGS) $(LDFLAGS_vmlinux) ; \ +cmd_link-vmlinux = \ + $(CONFIG_SHELL) $< $(LD) $(LDFLAGS) $(LDFLAGS_vmlinux) ; \ $(if $(ARCH_POSTLINK), $(MAKE) -f $(ARCH_POSTLINK) $@, true) vmlinux: scripts/link-vmlinux.sh vmlinux_prereq $(vmlinux-deps) FORCE @@ -1184,6 +1184,7 @@ PHONY += kselftest kselftest: $(Q)$(MAKE) -C tools/testing/selftests run_tests +PHONY += kselftest-clean kselftest-clean: $(Q)$(MAKE) -C tools/testing/selftests clean diff --git a/arch/alpha/include/asm/io.h b/arch/alpha/include/asm/io.h index ff4049155c84..4d61d2a50c52 100644 --- a/arch/alpha/include/asm/io.h +++ b/arch/alpha/include/asm/io.h @@ -299,6 +299,7 @@ static inline void __iomem * ioremap_nocache(unsigned long offset, return ioremap(offset, size); } +#define ioremap_wc ioremap_nocache #define ioremap_uc ioremap_nocache static inline void iounmap(volatile void __iomem *addr) diff --git a/arch/alpha/include/asm/types.h b/arch/alpha/include/asm/types.h index 4cb4b6d3452c..0bc66e1d3a7e 100644 --- a/arch/alpha/include/asm/types.h +++ b/arch/alpha/include/asm/types.h @@ -1,6 +1,6 @@ #ifndef _ALPHA_TYPES_H #define _ALPHA_TYPES_H -#include <asm-generic/int-ll64.h> +#include <uapi/asm/types.h> #endif /* _ALPHA_TYPES_H */ diff --git a/arch/alpha/include/asm/unistd.h b/arch/alpha/include/asm/unistd.h index b37153ecf2ac..db7fc0f511e2 100644 --- a/arch/alpha/include/asm/unistd.h +++ b/arch/alpha/include/asm/unistd.h @@ -3,7 +3,7 @@ #include <uapi/asm/unistd.h> -#define NR_SYSCALLS 514 +#define NR_SYSCALLS 523 #define __ARCH_WANT_OLD_READDIR #define __ARCH_WANT_STAT64 diff --git a/arch/alpha/include/uapi/asm/types.h b/arch/alpha/include/uapi/asm/types.h index 9fd3cd459777..8d1024d7be05 100644 --- a/arch/alpha/include/uapi/asm/types.h +++ b/arch/alpha/include/uapi/asm/types.h @@ -9,8 +9,18 @@ * need to be careful to avoid a name clashes. */ -#ifndef __KERNEL__ +/* + * This is here because we used to use l64 for alpha + * and we don't want to impact user mode with our change to ll64 + * in the kernel. + * + * However, some user programs are fine with this. They can + * flag __SANE_USERSPACE_TYPES__ to get int-ll64.h here. + */ +#if !defined(__SANE_USERSPACE_TYPES__) && !defined(__KERNEL__) #include <asm-generic/int-l64.h> +#else +#include <asm-generic/int-ll64.h> #endif #endif /* _UAPI_ALPHA_TYPES_H */ diff --git a/arch/alpha/include/uapi/asm/unistd.h b/arch/alpha/include/uapi/asm/unistd.h index aa33bf5aacb6..a2945fea6c86 100644 --- a/arch/alpha/include/uapi/asm/unistd.h +++ b/arch/alpha/include/uapi/asm/unistd.h @@ -475,5 +475,19 @@ #define __NR_getrandom 511 #define __NR_memfd_create 512 #define __NR_execveat 513 +#define __NR_seccomp 514 +#define __NR_bpf 515 +#define __NR_userfaultfd 516 +#define __NR_membarrier 517 +#define __NR_mlock2 518 +#define __NR_copy_file_range 519 +#define __NR_preadv2 520 +#define __NR_pwritev2 521 +#define __NR_statx 522 + +/* Alpha doesn't have protection keys. */ +#define __IGNORE_pkey_mprotect +#define __IGNORE_pkey_alloc +#define __IGNORE_pkey_free #endif /* _UAPI_ALPHA_UNISTD_H */ diff --git a/arch/alpha/kernel/core_marvel.c b/arch/alpha/kernel/core_marvel.c index d5f0580746a5..03ff832b1cb4 100644 --- a/arch/alpha/kernel/core_marvel.c +++ b/arch/alpha/kernel/core_marvel.c @@ -351,7 +351,7 @@ marvel_init_io7(struct io7 *io7) } } -void +void __init marvel_io7_present(gct6_node *node) { int pe; @@ -369,6 +369,7 @@ marvel_io7_present(gct6_node *node) static void __init marvel_find_console_vga_hose(void) { +#ifdef CONFIG_VGA_HOSE u64 *pu64 = (u64 *)((u64)hwrpb + hwrpb->ctbt_offset); if (pu64[7] == 3) { /* TERM_TYPE == graphics */ @@ -402,9 +403,10 @@ marvel_find_console_vga_hose(void) pci_vga_hose = hose; } } +#endif } -gct6_search_struct gct_wanted_node_list[] = { +gct6_search_struct gct_wanted_node_list[] __initdata = { { GCT_TYPE_HOSE, GCT_SUBTYPE_IO_PORT_MODULE, marvel_io7_present }, { 0, 0, NULL } }; diff --git a/arch/alpha/kernel/core_titan.c b/arch/alpha/kernel/core_titan.c index 219bf271c0ba..b532d925443d 100644 --- a/arch/alpha/kernel/core_titan.c +++ b/arch/alpha/kernel/core_titan.c @@ -461,6 +461,7 @@ titan_ioremap(unsigned long addr, unsigned long size) unsigned long *ptes; unsigned long pfn; +#ifdef CONFIG_VGA_HOSE /* * Adjust the address and hose, if necessary. */ @@ -468,6 +469,7 @@ titan_ioremap(unsigned long addr, unsigned long size) h = pci_vga_hose->index; addr += pci_vga_hose->mem_space->start; } +#endif /* * Find the hose. diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c index 936bc8f89a67..47632fa8c24e 100644 --- a/arch/alpha/kernel/module.c +++ b/arch/alpha/kernel/module.c @@ -181,6 +181,9 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab, switch (r_type) { case R_ALPHA_NONE: break; + case R_ALPHA_REFLONG: + *(u32 *)location = value; + break; case R_ALPHA_REFQUAD: /* BUG() can produce misaligned relocations. */ ((u32 *)location)[0] = value; diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c index 9fc560459ebd..f6726a746427 100644 --- a/arch/alpha/kernel/smp.c +++ b/arch/alpha/kernel/smp.c @@ -115,7 +115,7 @@ wait_boot_cpu_to_stop(int cpuid) /* * Where secondaries begin a life of C. */ -void +void __init smp_callin(void) { int cpuid = hard_smp_processor_id(); diff --git a/arch/alpha/kernel/systbls.S b/arch/alpha/kernel/systbls.S index 9b62e3fd4f03..5b4514abb234 100644 --- a/arch/alpha/kernel/systbls.S +++ b/arch/alpha/kernel/systbls.S @@ -532,6 +532,15 @@ sys_call_table: .quad sys_getrandom .quad sys_memfd_create .quad sys_execveat + .quad sys_seccomp + .quad sys_bpf /* 515 */ + .quad sys_userfaultfd + .quad sys_membarrier + .quad sys_mlock2 + .quad sys_copy_file_range + .quad sys_preadv2 /* 520 */ + .quad sys_pwritev2 + .quad sys_statx .size sys_call_table, . - sys_call_table .type sys_call_table, @object diff --git a/arch/alpha/lib/Makefile b/arch/alpha/lib/Makefile index 7083434dd241..a80815960364 100644 --- a/arch/alpha/lib/Makefile +++ b/arch/alpha/lib/Makefile @@ -20,12 +20,8 @@ lib-y = __divqu.o __remqu.o __divlu.o __remlu.o \ checksum.o \ csum_partial_copy.o \ $(ev67-y)strlen.o \ - $(ev67-y)strcat.o \ - strcpy.o \ - $(ev67-y)strncat.o \ - strncpy.o \ - $(ev6-y)stxcpy.o \ - $(ev6-y)stxncpy.o \ + stycpy.o \ + styncpy.o \ $(ev67-y)strchr.o \ $(ev67-y)strrchr.o \ $(ev6-y)memchr.o \ @@ -49,3 +45,17 @@ AFLAGS___remlu.o = -DREM -DINTSIZE $(addprefix $(obj)/,__divqu.o __remqu.o __divlu.o __remlu.o): \ $(src)/$(ev6-y)divide.S FORCE $(call if_changed_rule,as_o_S) + +# There are direct branches between {str*cpy,str*cat} and stx*cpy. +# Ensure the branches are within range by merging these objects. + +LDFLAGS_stycpy.o := -r +LDFLAGS_styncpy.o := -r + +$(obj)/stycpy.o: $(obj)/strcpy.o $(obj)/$(ev67-y)strcat.o \ + $(obj)/$(ev6-y)stxcpy.o FORCE + $(call if_changed,ld) + +$(obj)/styncpy.o: $(obj)/strncpy.o $(obj)/$(ev67-y)strncat.o \ + $(obj)/$(ev6-y)stxncpy.o FORCE + $(call if_changed,ld) diff --git a/arch/alpha/lib/copy_user.S b/arch/alpha/lib/copy_user.S index 159f1b7e6e49..c277a1a4383e 100644 --- a/arch/alpha/lib/copy_user.S +++ b/arch/alpha/lib/copy_user.S @@ -34,7 +34,7 @@ .ent __copy_user __copy_user: .prologue 0 - and $18,$18,$0 + mov $18,$0 and $16,7,$3 beq $0,$35 beq $3,$36 diff --git a/arch/alpha/lib/ev6-copy_user.S b/arch/alpha/lib/ev6-copy_user.S index 35e6710d0700..954ca03ebebe 100644 --- a/arch/alpha/lib/ev6-copy_user.S +++ b/arch/alpha/lib/ev6-copy_user.S @@ -45,9 +45,10 @@ # Pipeline info: Slotting & Comments __copy_user: .prologue 0 - andq $18, $18, $0 - subq $18, 32, $1 # .. E .. .. : Is this going to be a small copy? - beq $0, $zerolength # U .. .. .. : U L U L + mov $18, $0 # .. .. .. E + subq $18, 32, $1 # .. .. E. .. : Is this going to be a small copy? + nop # .. E .. .. + beq $18, $zerolength # U .. .. .. : U L U L and $16,7,$3 # .. .. .. E : is leading dest misalignment ble $1, $onebyteloop # .. .. U .. : 1st branch : small amount of data diff --git a/arch/arc/kernel/intc-arcv2.c b/arch/arc/kernel/intc-arcv2.c index cf90714a676d..067ea362fb3e 100644 --- a/arch/arc/kernel/intc-arcv2.c +++ b/arch/arc/kernel/intc-arcv2.c @@ -75,13 +75,20 @@ void arc_init_IRQ(void) * Set a default priority for all available interrupts to prevent * switching of register banks if Fast IRQ and multiple register banks * are supported by CPU. - * Also disable all IRQ lines so faulty external hardware won't + * Also disable private-per-core IRQ lines so faulty external HW won't * trigger interrupt that kernel is not ready to handle. */ for (i = NR_EXCEPTIONS; i < irq_bcr.irqs + NR_EXCEPTIONS; i++) { write_aux_reg(AUX_IRQ_SELECT, i); write_aux_reg(AUX_IRQ_PRIORITY, ARCV2_IRQ_DEF_PRIO); - write_aux_reg(AUX_IRQ_ENABLE, 0); + + /* + * Only mask cpu private IRQs here. + * "common" interrupts are masked at IDU, otherwise it would + * need to be unmasked at each cpu, with IPIs + */ + if (i < FIRST_EXT_IRQ) + write_aux_reg(AUX_IRQ_ENABLE, 0); } /* setup status32, don't enable intr yet as kernel doesn't want */ diff --git a/arch/arc/kernel/intc-compact.c b/arch/arc/kernel/intc-compact.c index cef388025adf..47b421fa0147 100644 --- a/arch/arc/kernel/intc-compact.c +++ b/arch/arc/kernel/intc-compact.c @@ -27,7 +27,7 @@ */ void arc_init_IRQ(void) { - int level_mask = 0, i; + unsigned int level_mask = 0, i; /* Is timer high priority Interrupt (Level2 in ARCompact jargon) */ level_mask |= IS_ENABLED(CONFIG_ARC_COMPACT_IRQ_LEVELS) << TIMER0_IRQ; diff --git a/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi b/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi index f92f95741207..a183b56283f8 100644 --- a/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi +++ b/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi @@ -266,6 +266,7 @@ &hdmicec { status = "okay"; + needs-hpd; }; &hsi2c_4 { diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index 127e2dd2e21c..4a879f6ff13b 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -225,12 +225,6 @@ int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices); int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); -/* We do not have shadow page tables, hence the empty hooks */ -static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm, - unsigned long address) -{ -} - struct kvm_vcpu *kvm_arm_get_running_vcpu(void); struct kvm_vcpu __percpu **kvm_get_running_vcpus(void); void kvm_arm_halt_guest(struct kvm *kvm); diff --git a/arch/arm/mach-at91/Kconfig b/arch/arm/mach-at91/Kconfig index d735e5fc4772..195da38cb9a2 100644 --- a/arch/arm/mach-at91/Kconfig +++ b/arch/arm/mach-at91/Kconfig @@ -1,7 +1,7 @@ menuconfig ARCH_AT91 bool "Atmel SoCs" depends on ARCH_MULTI_V4T || ARCH_MULTI_V5 || ARCH_MULTI_V7 || ARM_SINGLE_ARMV7M - select ARM_CPU_SUSPEND if PM + select ARM_CPU_SUSPEND if PM && ARCH_MULTI_V7 select COMMON_CLK_AT91 select GPIOLIB select PINCTRL diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c index 667fddac3856..5036f996e694 100644 --- a/arch/arm/mach-at91/pm.c +++ b/arch/arm/mach-at91/pm.c @@ -608,6 +608,9 @@ static void __init at91_pm_init(void (*pm_idle)(void)) void __init at91rm9200_pm_init(void) { + if (!IS_ENABLED(CONFIG_SOC_AT91RM9200)) + return; + at91_dt_ramc(); /* @@ -620,18 +623,27 @@ void __init at91rm9200_pm_init(void) void __init at91sam9_pm_init(void) { + if (!IS_ENABLED(CONFIG_SOC_AT91SAM9)) + return; + at91_dt_ramc(); at91_pm_init(at91sam9_idle); } void __init sama5_pm_init(void) { + if (!IS_ENABLED(CONFIG_SOC_SAMA5)) + return; + at91_dt_ramc(); at91_pm_init(NULL); } void __init sama5d2_pm_init(void) { + if (!IS_ENABLED(CONFIG_SOC_SAMA5D2)) + return; + at91_pm_backup_init(); sama5_pm_init(); } diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index d68630007b14..e923b58606e2 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -326,12 +326,6 @@ void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); -/* We do not have shadow page tables, hence the empty hooks */ -static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm, - unsigned long address) -{ -} - struct kvm_vcpu *kvm_arm_get_running_vcpu(void); struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void); void kvm_arm_halt_guest(struct kvm *kvm); diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index 06da8ea16bbe..c7b4995868e1 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -161,9 +161,11 @@ void fpsimd_flush_thread(void) { if (!system_supports_fpsimd()) return; + preempt_disable(); memset(¤t->thread.fpsimd_state, 0, sizeof(struct fpsimd_state)); fpsimd_flush_task_state(current); set_thread_flag(TIF_FOREIGN_FPSTATE); + preempt_enable(); } /* diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 973df7de7bf8..adb0910b88f5 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -354,7 +354,6 @@ __primary_switched: tst x23, ~(MIN_KIMG_ALIGN - 1) // already running randomized? b.ne 0f mov x0, x21 // pass FDT address in x0 - mov x1, x23 // pass modulo offset in x1 bl kaslr_early_init // parse FDT for KASLR options cbz x0, 0f // KASLR disabled? just proceed orr x23, x23, x0 // record KASLR offset diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c index a9710efb8c01..47080c49cc7e 100644 --- a/arch/arm64/kernel/kaslr.c +++ b/arch/arm64/kernel/kaslr.c @@ -75,7 +75,7 @@ extern void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size, * containing function pointers) to be reinitialized, and zero-initialized * .bss variables will be reset to 0. */ -u64 __init kaslr_early_init(u64 dt_phys, u64 modulo_offset) +u64 __init kaslr_early_init(u64 dt_phys) { void *fdt; u64 seed, offset, mask, module_range; @@ -131,15 +131,17 @@ u64 __init kaslr_early_init(u64 dt_phys, u64 modulo_offset) /* * The kernel Image should not extend across a 1GB/32MB/512MB alignment * boundary (for 4KB/16KB/64KB granule kernels, respectively). If this - * happens, increase the KASLR offset by the size of the kernel image - * rounded up by SWAPPER_BLOCK_SIZE. + * happens, round down the KASLR offset by (1 << SWAPPER_TABLE_SHIFT). + * + * NOTE: The references to _text and _end below will already take the + * modulo offset (the physical displacement modulo 2 MB) into + * account, given that the physical placement is controlled by + * the loader, and will not change as a result of the virtual + * mapping we choose. */ - if ((((u64)_text + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT) != - (((u64)_end + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT)) { - u64 kimg_sz = _end - _text; - offset = (offset + round_up(kimg_sz, SWAPPER_BLOCK_SIZE)) - & mask; - } + if ((((u64)_text + offset) >> SWAPPER_TABLE_SHIFT) != + (((u64)_end + offset) >> SWAPPER_TABLE_SHIFT)) + offset = round_down(offset, 1 << SWAPPER_TABLE_SHIFT); if (IS_ENABLED(CONFIG_KASAN)) /* diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 2509e4fe6992..1f22a41565a3 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -435,8 +435,11 @@ retry: * the mmap_sem because it would already be released * in __lock_page_or_retry in mm/filemap.c. */ - if (fatal_signal_pending(current)) + if (fatal_signal_pending(current)) { + if (!user_mode(regs)) + goto no_context; return 0; + } /* * Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk of diff --git a/arch/c6x/configs/dsk6455_defconfig b/arch/c6x/configs/dsk6455_defconfig index 4663487c67a1..d764ea4cce7f 100644 --- a/arch/c6x/configs/dsk6455_defconfig +++ b/arch/c6x/configs/dsk6455_defconfig @@ -1,5 +1,4 @@ CONFIG_SOC_TMS320C6455=y -CONFIG_EXPERIMENTAL=y # CONFIG_LOCALVERSION_AUTO is not set CONFIG_SYSVIPC=y CONFIG_SPARSE_IRQ=y @@ -25,7 +24,6 @@ CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_COUNT=2 CONFIG_BLK_DEV_RAM_SIZE=17000 -CONFIG_MISC_DEVICES=y # CONFIG_INPUT is not set # CONFIG_SERIO is not set # CONFIG_VT is not set diff --git a/arch/c6x/configs/evmc6457_defconfig b/arch/c6x/configs/evmc6457_defconfig index bba40e195ec4..05d0b4a25ab1 100644 --- a/arch/c6x/configs/evmc6457_defconfig +++ b/arch/c6x/configs/evmc6457_defconfig @@ -1,5 +1,4 @@ CONFIG_SOC_TMS320C6457=y -CONFIG_EXPERIMENTAL=y # CONFIG_LOCALVERSION_AUTO is not set CONFIG_SYSVIPC=y CONFIG_SPARSE_IRQ=y @@ -26,7 +25,6 @@ CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_COUNT=2 CONFIG_BLK_DEV_RAM_SIZE=17000 -CONFIG_MISC_DEVICES=y # CONFIG_INPUT is not set # CONFIG_SERIO is not set # CONFIG_VT is not set diff --git a/arch/c6x/configs/evmc6472_defconfig b/arch/c6x/configs/evmc6472_defconfig index 8c46155f6d31..8d81fcf86b0e 100644 --- a/arch/c6x/configs/evmc6472_defconfig +++ b/arch/c6x/configs/evmc6472_defconfig @@ -1,5 +1,4 @@ CONFIG_SOC_TMS320C6472=y -CONFIG_EXPERIMENTAL=y # CONFIG_LOCALVERSION_AUTO is not set CONFIG_SYSVIPC=y CONFIG_SPARSE_IRQ=y @@ -27,7 +26,6 @@ CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_COUNT=2 CONFIG_BLK_DEV_RAM_SIZE=17000 -CONFIG_MISC_DEVICES=y # CONFIG_INPUT is not set # CONFIG_SERIO is not set # CONFIG_VT is not set diff --git a/arch/c6x/configs/evmc6474_defconfig b/arch/c6x/configs/evmc6474_defconfig index 15533f632313..8156a98f3958 100644 --- a/arch/c6x/configs/evmc6474_defconfig +++ b/arch/c6x/configs/evmc6474_defconfig @@ -1,5 +1,4 @@ CONFIG_SOC_TMS320C6474=y -CONFIG_EXPERIMENTAL=y # CONFIG_LOCALVERSION_AUTO is not set CONFIG_SYSVIPC=y CONFIG_SPARSE_IRQ=y @@ -27,7 +26,6 @@ CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_COUNT=2 CONFIG_BLK_DEV_RAM_SIZE=17000 -CONFIG_MISC_DEVICES=y # CONFIG_INPUT is not set # CONFIG_SERIO is not set # CONFIG_VT is not set diff --git a/arch/c6x/configs/evmc6678_defconfig b/arch/c6x/configs/evmc6678_defconfig index 5f126d4905b1..c4f433c25b69 100644 --- a/arch/c6x/configs/evmc6678_defconfig +++ b/arch/c6x/configs/evmc6678_defconfig @@ -1,5 +1,4 @@ CONFIG_SOC_TMS320C6678=y -CONFIG_EXPERIMENTAL=y # CONFIG_LOCALVERSION_AUTO is not set CONFIG_SYSVIPC=y CONFIG_SPARSE_IRQ=y @@ -27,7 +26,6 @@ CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_COUNT=2 CONFIG_BLK_DEV_RAM_SIZE=17000 -CONFIG_MISC_DEVICES=y # CONFIG_INPUT is not set # CONFIG_SERIO is not set # CONFIG_VT is not set diff --git a/arch/c6x/platforms/megamod-pic.c b/arch/c6x/platforms/megamod-pic.c index 43afc03e4125..9519fa5f97d0 100644 --- a/arch/c6x/platforms/megamod-pic.c +++ b/arch/c6x/platforms/megamod-pic.c @@ -208,14 +208,14 @@ static struct megamod_pic * __init init_megamod_pic(struct device_node *np) pic = kzalloc(sizeof(struct megamod_pic), GFP_KERNEL); if (!pic) { - pr_err("%s: Could not alloc PIC structure.\n", np->full_name); + pr_err("%pOF: Could not alloc PIC structure.\n", np); return NULL; } pic->irqhost = irq_domain_add_linear(np, NR_COMBINERS * 32, &megamod_domain_ops, pic); if (!pic->irqhost) { - pr_err("%s: Could not alloc host.\n", np->full_name); + pr_err("%pOF: Could not alloc host.\n", np); goto error_free; } @@ -225,7 +225,7 @@ static struct megamod_pic * __init init_megamod_pic(struct device_node *np) pic->regs = of_iomap(np, 0); if (!pic->regs) { - pr_err("%s: Could not map registers.\n", np->full_name); + pr_err("%pOF: Could not map registers.\n", np); goto error_free; } @@ -253,8 +253,8 @@ static struct megamod_pic * __init init_megamod_pic(struct device_node *np) irq_data = irq_get_irq_data(irq); if (!irq_data) { - pr_err("%s: combiner-%d no irq_data for virq %d!\n", - np->full_name, i, irq); + pr_err("%pOF: combiner-%d no irq_data for virq %d!\n", + np, i, irq); continue; } @@ -265,16 +265,16 @@ static struct megamod_pic * __init init_megamod_pic(struct device_node *np) * of the core priority interrupts (4 - 15). */ if (hwirq < 4 || hwirq >= NR_PRIORITY_IRQS) { - pr_err("%s: combiner-%d core irq %ld out of range!\n", - np->full_name, i, hwirq); + pr_err("%pOF: combiner-%d core irq %ld out of range!\n", + np, i, hwirq); continue; } /* record the mapping */ mapping[hwirq - 4] = i; - pr_debug("%s: combiner-%d cascading to hwirq %ld\n", - np->full_name, i, hwirq); + pr_debug("%pOF: combiner-%d cascading to hwirq %ld\n", + np, i, hwirq); cascade_data[i].pic = pic; cascade_data[i].index = i; @@ -290,8 +290,8 @@ static struct megamod_pic * __init init_megamod_pic(struct device_node *np) /* Finally, set up the MUX registers */ for (i = 0; i < NR_MUX_OUTPUTS; i++) { if (mapping[i] != IRQ_UNMAPPED) { - pr_debug("%s: setting mux %d to priority %d\n", - np->full_name, mapping[i], i + 4); + pr_debug("%pOF: setting mux %d to priority %d\n", + np, mapping[i], i + 4); set_megamod_mux(pic, mapping[i], i); } } diff --git a/arch/c6x/platforms/plldata.c b/arch/c6x/platforms/plldata.c index 755359eb6286..e8b6cc6a7b5a 100644 --- a/arch/c6x/platforms/plldata.c +++ b/arch/c6x/platforms/plldata.c @@ -436,8 +436,8 @@ void __init c64x_setup_clocks(void) err = of_property_read_u32(node, "clock-frequency", &val); if (err || val == 0) { - pr_err("%s: no clock-frequency found! Using %dMHz\n", - node->full_name, (int)val / 1000000); + pr_err("%pOF: no clock-frequency found! Using %dMHz\n", + node, (int)val / 1000000); val = 25000000; } clkin1.rate = val; diff --git a/arch/c6x/platforms/timer64.c b/arch/c6x/platforms/timer64.c index 0bd0452ded80..241a9a607193 100644 --- a/arch/c6x/platforms/timer64.c +++ b/arch/c6x/platforms/timer64.c @@ -204,14 +204,14 @@ void __init timer64_init(void) timer = of_iomap(np, 0); if (!timer) { - pr_debug("%s: Cannot map timer registers.\n", np->full_name); + pr_debug("%pOF: Cannot map timer registers.\n", np); goto out; } - pr_debug("%s: Timer registers=%p.\n", np->full_name, timer); + pr_debug("%pOF: Timer registers=%p.\n", np, timer); cd->irq = irq_of_parse_and_map(np, 0); if (cd->irq == NO_IRQ) { - pr_debug("%s: Cannot find interrupt.\n", np->full_name); + pr_debug("%pOF: Cannot find interrupt.\n", np); iounmap(timer); goto out; } @@ -229,7 +229,7 @@ void __init timer64_init(void) dscr_set_devstate(timer64_devstate_id, DSCR_DEVSTATE_ENABLED); } - pr_debug("%s: Timer irq=%d.\n", np->full_name, cd->irq); + pr_debug("%pOF: Timer irq=%d.\n", np, cd->irq); clockevents_calc_mult_shift(cd, c6x_core_freq / TIMER_DIVISOR, 5); diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index 2998479fd4e8..a9af1d2dcd69 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h @@ -938,11 +938,6 @@ void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); -static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm, - unsigned long address) -{ -} - /* Emulation */ int kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu, u32 *out); enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause); diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 8b3f1238d07f..e372ed871c51 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -67,11 +67,6 @@ extern int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); extern int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); extern void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); -static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm, - unsigned long address) -{ -} - #define HPTEG_CACHE_NUM (1 << 15) #define HPTEG_HASH_BITS_PTE 13 #define HPTEG_HASH_BITS_PTE_LONG 12 diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h index 0c76675394c5..35bec1c5bd5a 100644 --- a/arch/powerpc/include/asm/mmu_context.h +++ b/arch/powerpc/include/asm/mmu_context.h @@ -90,6 +90,24 @@ static inline void switch_mm_irqs_off(struct mm_struct *prev, /* Mark this context has been used on the new CPU */ if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(next))) { cpumask_set_cpu(smp_processor_id(), mm_cpumask(next)); + + /* + * This full barrier orders the store to the cpumask above vs + * a subsequent operation which allows this CPU to begin loading + * translations for next. + * + * When using the radix MMU that operation is the load of the + * MMU context id, which is then moved to SPRN_PID. + * + * For the hash MMU it is either the first load from slb_cache + * in switch_slb(), and/or the store of paca->mm_ctx_id in + * copy_mm_to_paca(). + * + * On the read side the barrier is in pte_xchg(), which orders + * the store to the PTE vs the load of mm_cpumask. + */ + smp_mb(); + new_on_cpu = true; } diff --git a/arch/powerpc/include/asm/pgtable-be-types.h b/arch/powerpc/include/asm/pgtable-be-types.h index 9c0f5db5cf46..67e7e3d990f4 100644 --- a/arch/powerpc/include/asm/pgtable-be-types.h +++ b/arch/powerpc/include/asm/pgtable-be-types.h @@ -87,6 +87,7 @@ static inline bool pte_xchg(pte_t *ptep, pte_t old, pte_t new) unsigned long *p = (unsigned long *)ptep; __be64 prev; + /* See comment in switch_mm_irqs_off() */ prev = (__force __be64)__cmpxchg_u64(p, (__force unsigned long)pte_raw(old), (__force unsigned long)pte_raw(new)); diff --git a/arch/powerpc/include/asm/pgtable-types.h b/arch/powerpc/include/asm/pgtable-types.h index 8bd3b13fe2fb..369a164b545c 100644 --- a/arch/powerpc/include/asm/pgtable-types.h +++ b/arch/powerpc/include/asm/pgtable-types.h @@ -62,6 +62,7 @@ static inline bool pte_xchg(pte_t *ptep, pte_t old, pte_t new) { unsigned long *p = (unsigned long *)ptep; + /* See comment in switch_mm_irqs_off() */ return pte_val(old) == __cmpxchg_u64(p, pte_val(old), pte_val(new)); } #endif diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c index a160c14304eb..53766e2bc029 100644 --- a/arch/powerpc/kvm/book3s_64_vio.c +++ b/arch/powerpc/kvm/book3s_64_vio.c @@ -294,32 +294,26 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, struct kvm_create_spapr_tce_64 *args) { struct kvmppc_spapr_tce_table *stt = NULL; + struct kvmppc_spapr_tce_table *siter; unsigned long npages, size; int ret = -ENOMEM; int i; + int fd = -1; if (!args->size) return -EINVAL; - /* Check this LIOBN hasn't been previously allocated */ - list_for_each_entry(stt, &kvm->arch.spapr_tce_tables, list) { - if (stt->liobn == args->liobn) - return -EBUSY; - } - size = _ALIGN_UP(args->size, PAGE_SIZE >> 3); npages = kvmppc_tce_pages(size); ret = kvmppc_account_memlimit(kvmppc_stt_pages(npages), true); - if (ret) { - stt = NULL; - goto fail; - } + if (ret) + return ret; ret = -ENOMEM; stt = kzalloc(sizeof(*stt) + npages * sizeof(struct page *), GFP_KERNEL); if (!stt) - goto fail; + goto fail_acct; stt->liobn = args->liobn; stt->page_shift = args->page_shift; @@ -334,24 +328,42 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, goto fail; } - kvm_get_kvm(kvm); + ret = fd = anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops, + stt, O_RDWR | O_CLOEXEC); + if (ret < 0) + goto fail; mutex_lock(&kvm->lock); - list_add_rcu(&stt->list, &kvm->arch.spapr_tce_tables); + + /* Check this LIOBN hasn't been previously allocated */ + ret = 0; + list_for_each_entry(siter, &kvm->arch.spapr_tce_tables, list) { + if (siter->liobn == args->liobn) { + ret = -EBUSY; + break; + } + } + + if (!ret) { + list_add_rcu(&stt->list, &kvm->arch.spapr_tce_tables); + kvm_get_kvm(kvm); + } mutex_unlock(&kvm->lock); - return anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops, - stt, O_RDWR | O_CLOEXEC); + if (!ret) + return fd; -fail: - if (stt) { - for (i = 0; i < npages; i++) - if (stt->pages[i]) - __free_page(stt->pages[i]); + put_unused_fd(fd); - kfree(stt); - } + fail: + for (i = 0; i < npages; i++) + if (stt->pages[i]) + __free_page(stt->pages[i]); + + kfree(stt); + fail_acct: + kvmppc_account_memlimit(kvmppc_stt_pages(npages), false); return ret; } diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index c52184a8efdf..9c9c983b864f 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -1291,6 +1291,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) /* Hypervisor doorbell - exit only if host IPI flag set */ cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL bne 3f +BEGIN_FTR_SECTION + PPC_MSGSYNC +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) lbz r0, HSTATE_HOST_IPI(r13) cmpwi r0, 0 beq 4f diff --git a/arch/powerpc/kvm/book3s_xive_template.c b/arch/powerpc/kvm/book3s_xive_template.c index 4636ca6e7d38..d1ed2c41b5d2 100644 --- a/arch/powerpc/kvm/book3s_xive_template.c +++ b/arch/powerpc/kvm/book3s_xive_template.c @@ -16,7 +16,22 @@ static void GLUE(X_PFX,ack_pending)(struct kvmppc_xive_vcpu *xc) u8 cppr; u16 ack; - /* XXX DD1 bug workaround: Check PIPR vs. CPPR first ! */ + /* + * Ensure any previous store to CPPR is ordered vs. + * the subsequent loads from PIPR or ACK. + */ + eieio(); + + /* + * DD1 bug workaround: If PIPR is less favored than CPPR + * ignore the interrupt or we might incorrectly lose an IPB + * bit. + */ + if (cpu_has_feature(CPU_FTR_POWER9_DD1)) { + u8 pipr = __x_readb(__x_tima + TM_QW1_OS + TM_PIPR); + if (pipr >= xc->hw_cppr) + return; + } /* Perform the acknowledge OS to register cycle. */ ack = be16_to_cpu(__x_readw(__x_tima + TM_SPC_ACK_OS_REG)); @@ -235,6 +250,11 @@ skip_ipi: /* * If we found an interrupt, adjust what the guest CPPR should * be as if we had just fetched that interrupt from HW. + * + * Note: This can only make xc->cppr smaller as the previous + * loop will only exit with hirq != 0 if prio is lower than + * the current xc->cppr. Thus we don't need to re-check xc->mfrr + * for pending IPIs. */ if (hirq) xc->cppr = prio; @@ -381,6 +401,12 @@ X_STATIC int GLUE(X_PFX,h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr) xc->cppr = cppr; /* + * Order the above update of xc->cppr with the subsequent + * read of xc->mfrr inside push_pending_to_hw() + */ + smp_mb(); + + /* * We are masking less, we need to look for pending things * to deliver and set VP pending bits accordingly to trigger * a new interrupt otherwise we might miss MFRR changes for @@ -420,21 +446,37 @@ X_STATIC int GLUE(X_PFX,h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr) * used to signal MFRR changes is EOId when fetched from * the queue. */ - if (irq == XICS_IPI || irq == 0) + if (irq == XICS_IPI || irq == 0) { + /* + * This barrier orders the setting of xc->cppr vs. + * subsquent test of xc->mfrr done inside + * scan_interrupts and push_pending_to_hw + */ + smp_mb(); goto bail; + } /* Find interrupt source */ sb = kvmppc_xive_find_source(xive, irq, &src); if (!sb) { pr_devel(" source not found !\n"); rc = H_PARAMETER; + /* Same as above */ + smp_mb(); goto bail; } state = &sb->irq_state[src]; kvmppc_xive_select_irq(state, &hw_num, &xd); state->in_eoi = true; - mb(); + + /* + * This barrier orders both setting of in_eoi above vs, + * subsequent test of guest_priority, and the setting + * of xc->cppr vs. subsquent test of xc->mfrr done inside + * scan_interrupts and push_pending_to_hw + */ + smp_mb(); again: if (state->guest_priority == MASKED) { @@ -461,6 +503,14 @@ again: } + /* + * This barrier orders the above guest_priority check + * and spin_lock/unlock with clearing in_eoi below. + * + * It also has to be a full mb() as it must ensure + * the MMIOs done in source_eoi() are completed before + * state->in_eoi is visible. + */ mb(); state->in_eoi = false; bail: @@ -495,6 +545,18 @@ X_STATIC int GLUE(X_PFX,h_ipi)(struct kvm_vcpu *vcpu, unsigned long server, /* Locklessly write over MFRR */ xc->mfrr = mfrr; + /* + * The load of xc->cppr below and the subsequent MMIO store + * to the IPI must happen after the above mfrr update is + * globally visible so that: + * + * - Synchronize with another CPU doing an H_EOI or a H_CPPR + * updating xc->cppr then reading xc->mfrr. + * + * - The target of the IPI sees the xc->mfrr update + */ + mb(); + /* Shoot the IPI if most favored than target cppr */ if (mfrr < xc->cppr) __x_writeq(0, __x_trig_page(&xc->vp_ipi_data)); diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c index b5d960d6db3d..4c7b8591f737 100644 --- a/arch/powerpc/platforms/powernv/npu-dma.c +++ b/arch/powerpc/platforms/powernv/npu-dma.c @@ -614,15 +614,6 @@ static void pnv_npu2_mn_change_pte(struct mmu_notifier *mn, mmio_invalidate(npu_context, 1, address, true); } -static void pnv_npu2_mn_invalidate_page(struct mmu_notifier *mn, - struct mm_struct *mm, - unsigned long address) -{ - struct npu_context *npu_context = mn_to_npu_context(mn); - - mmio_invalidate(npu_context, 1, address, true); -} - static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn, struct mm_struct *mm, unsigned long start, unsigned long end) @@ -640,7 +631,6 @@ static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn, static const struct mmu_notifier_ops nv_nmmu_notifier_ops = { .release = pnv_npu2_mn_release, .change_pte = pnv_npu2_mn_change_pte, - .invalidate_page = pnv_npu2_mn_invalidate_page, .invalidate_range = pnv_npu2_mn_invalidate_range, }; diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h index 4541ac44b35f..24bc41622a98 100644 --- a/arch/s390/include/asm/mmu_context.h +++ b/arch/s390/include/asm/mmu_context.h @@ -44,6 +44,11 @@ static inline int init_new_context(struct task_struct *tsk, mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | _ASCE_USER_BITS | _ASCE_TYPE_REGION3; break; + case -PAGE_SIZE: + /* forked 5-level task, set new asce with new_mm->pgd */ + mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | + _ASCE_USER_BITS | _ASCE_TYPE_REGION1; + break; case 1UL << 53: /* forked 4-level task, set new asce with new mm->pgd */ mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | diff --git a/arch/s390/kvm/sthyi.c b/arch/s390/kvm/sthyi.c index 926b5244263e..a2e5c24f47a7 100644 --- a/arch/s390/kvm/sthyi.c +++ b/arch/s390/kvm/sthyi.c @@ -394,7 +394,7 @@ static int sthyi(u64 vaddr) "srl %[cc],28\n" : [cc] "=d" (cc) : [code] "d" (code), [addr] "a" (addr) - : "memory", "cc"); + : "3", "memory", "cc"); return cc; } @@ -425,7 +425,7 @@ int handle_sthyi(struct kvm_vcpu *vcpu) VCPU_EVENT(vcpu, 3, "STHYI: fc: %llu addr: 0x%016llx", code, addr); trace_kvm_s390_handle_sthyi(vcpu, code, addr); - if (reg1 == reg2 || reg1 & 1 || reg2 & 1 || addr & ~PAGE_MASK) + if (reg1 == reg2 || reg1 & 1 || reg2 & 1) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); if (code & 0xffff) { @@ -433,6 +433,9 @@ int handle_sthyi(struct kvm_vcpu *vcpu) goto out; } + if (addr & ~PAGE_MASK) + return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); + /* * If the page has not yet been faulted in, we want to do that * now and not after all the expensive calculations. diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c index 2e10d2b8ad35..5bea139517a2 100644 --- a/arch/s390/mm/mmap.c +++ b/arch/s390/mm/mmap.c @@ -119,7 +119,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, return addr; check_asce_limit: - if (addr + len > current->mm->context.asce_limit) { + if (addr + len > current->mm->context.asce_limit && + addr + len <= TASK_SIZE) { rc = crst_table_upgrade(mm, addr + len); if (rc) return (unsigned long) rc; @@ -183,7 +184,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, } check_asce_limit: - if (addr + len > current->mm->context.asce_limit) { + if (addr + len > current->mm->context.asce_limit && + addr + len <= TASK_SIZE) { rc = crst_table_upgrade(mm, addr + len); if (rc) return (unsigned long) rc; diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h index 255645f60ca2..554cdb205d17 100644 --- a/arch/x86/include/asm/fpu/internal.h +++ b/arch/x86/include/asm/fpu/internal.h @@ -450,10 +450,10 @@ static inline int copy_fpregs_to_fpstate(struct fpu *fpu) return 0; } -static inline void __copy_kernel_to_fpregs(union fpregs_state *fpstate) +static inline void __copy_kernel_to_fpregs(union fpregs_state *fpstate, u64 mask) { if (use_xsave()) { - copy_kernel_to_xregs(&fpstate->xsave, -1); + copy_kernel_to_xregs(&fpstate->xsave, mask); } else { if (use_fxsr()) copy_kernel_to_fxregs(&fpstate->fxsave); @@ -477,7 +477,7 @@ static inline void copy_kernel_to_fpregs(union fpregs_state *fpstate) : : [addr] "m" (fpstate)); } - __copy_kernel_to_fpregs(fpstate); + __copy_kernel_to_fpregs(fpstate, -1); } extern int copy_fpstate_to_sigframe(void __user *buf, void __user *fp, int size); diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 87ac4fba6d8e..92c9032502d8 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -492,6 +492,7 @@ struct kvm_vcpu_arch { unsigned long cr4; unsigned long cr4_guest_owned_bits; unsigned long cr8; + u32 pkru; u32 hflags; u64 efer; u64 apic_base; @@ -1374,8 +1375,6 @@ int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu); int kvm_cpu_get_interrupt(struct kvm_vcpu *v); void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event); void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu); -void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm, - unsigned long address); void kvm_define_shared_msr(unsigned index, u32 msr); int kvm_set_shared_msr(unsigned index, u64 val, u64 mask); diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h index 265c907d7d4c..7a234be7e298 100644 --- a/arch/x86/include/asm/mmu_context.h +++ b/arch/x86/include/asm/mmu_context.h @@ -140,9 +140,7 @@ static inline int init_new_context(struct task_struct *tsk, mm->context.execute_only_pkey = -1; } #endif - init_new_context_ldt(tsk, mm); - - return 0; + return init_new_context_ldt(tsk, mm); } static inline void destroy_context(struct mm_struct *mm) { diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 59ca2eea522c..19adbb418443 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -469,7 +469,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, entry->ecx &= kvm_cpuid_7_0_ecx_x86_features; cpuid_mask(&entry->ecx, CPUID_7_ECX); /* PKU is not yet implemented for shadow paging. */ - if (!tdp_enabled) + if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE)) entry->ecx &= ~F(PKU); entry->edx &= kvm_cpuid_7_0_edx_x86_features; entry->edx &= get_scattered_cpuid_leaf(7, 0, CPUID_EDX); diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h index 762cdf2595f9..e1e89ee4af75 100644 --- a/arch/x86/kvm/kvm_cache_regs.h +++ b/arch/x86/kvm/kvm_cache_regs.h @@ -84,11 +84,6 @@ static inline u64 kvm_read_edx_eax(struct kvm_vcpu *vcpu) | ((u64)(kvm_register_read(vcpu, VCPU_REGS_RDX) & -1u) << 32); } -static inline u32 kvm_read_pkru(struct kvm_vcpu *vcpu) -{ - return kvm_x86_ops->get_pkru(vcpu); -} - static inline void enter_guest_mode(struct kvm_vcpu *vcpu) { vcpu->arch.hflags |= HF_GUEST_MASK; diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h index d7d248a000dd..4b9a3ae6b725 100644 --- a/arch/x86/kvm/mmu.h +++ b/arch/x86/kvm/mmu.h @@ -185,7 +185,7 @@ static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, * index of the protection domain, so pte_pkey * 2 is * is the index of the first bit for the domain. */ - pkru_bits = (kvm_read_pkru(vcpu) >> (pte_pkey * 2)) & 3; + pkru_bits = (vcpu->arch.pkru >> (pte_pkey * 2)) & 3; /* clear present bit, replace PFEC.RSVD with ACC_USER_MASK. */ offset = (pfec & ~1) + diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 56ba05312759..af256b786a70 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -1777,11 +1777,6 @@ static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) to_svm(vcpu)->vmcb->save.rflags = rflags; } -static u32 svm_get_pkru(struct kvm_vcpu *vcpu) -{ - return 0; -} - static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg) { switch (reg) { @@ -5413,8 +5408,6 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = { .get_rflags = svm_get_rflags, .set_rflags = svm_set_rflags, - .get_pkru = svm_get_pkru, - .tlb_flush = svm_flush_tlb, .run = svm_vcpu_run, diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 9b21b1223035..c6ef2940119b 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -636,8 +636,6 @@ struct vcpu_vmx { u64 current_tsc_ratio; - bool guest_pkru_valid; - u32 guest_pkru; u32 host_pkru; /* @@ -2383,11 +2381,6 @@ static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) to_vmx(vcpu)->emulation_required = emulation_required(vcpu); } -static u32 vmx_get_pkru(struct kvm_vcpu *vcpu) -{ - return to_vmx(vcpu)->guest_pkru; -} - static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu) { u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO); @@ -9020,8 +9013,10 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) vmx_set_interrupt_shadow(vcpu, 0); - if (vmx->guest_pkru_valid) - __write_pkru(vmx->guest_pkru); + if (static_cpu_has(X86_FEATURE_PKU) && + kvm_read_cr4_bits(vcpu, X86_CR4_PKE) && + vcpu->arch.pkru != vmx->host_pkru) + __write_pkru(vcpu->arch.pkru); atomic_switch_perf_msrs(vmx); debugctlmsr = get_debugctlmsr(); @@ -9169,13 +9164,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) * back on host, so it is safe to read guest PKRU from current * XSAVE. */ - if (boot_cpu_has(X86_FEATURE_OSPKE)) { - vmx->guest_pkru = __read_pkru(); - if (vmx->guest_pkru != vmx->host_pkru) { - vmx->guest_pkru_valid = true; + if (static_cpu_has(X86_FEATURE_PKU) && + kvm_read_cr4_bits(vcpu, X86_CR4_PKE)) { + vcpu->arch.pkru = __read_pkru(); + if (vcpu->arch.pkru != vmx->host_pkru) __write_pkru(vmx->host_pkru); - } else - vmx->guest_pkru_valid = false; } /* @@ -11682,8 +11675,6 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = { .get_rflags = vmx_get_rflags, .set_rflags = vmx_set_rflags, - .get_pkru = vmx_get_pkru, - .tlb_flush = vmx_flush_tlb, .run = vmx_vcpu_run, diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index d734aa8c5b4f..272320eb328c 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -3245,7 +3245,12 @@ static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu) u32 size, offset, ecx, edx; cpuid_count(XSTATE_CPUID, index, &size, &offset, &ecx, &edx); - memcpy(dest + offset, src, size); + if (feature == XFEATURE_MASK_PKRU) + memcpy(dest + offset, &vcpu->arch.pkru, + sizeof(vcpu->arch.pkru)); + else + memcpy(dest + offset, src, size); + } valid -= feature; @@ -3283,7 +3288,11 @@ static void load_xsave(struct kvm_vcpu *vcpu, u8 *src) u32 size, offset, ecx, edx; cpuid_count(XSTATE_CPUID, index, &size, &offset, &ecx, &edx); - memcpy(dest, src + offset, size); + if (feature == XFEATURE_MASK_PKRU) + memcpy(&vcpu->arch.pkru, src + offset, + sizeof(vcpu->arch.pkru)); + else + memcpy(dest, src + offset, size); } valid -= feature; @@ -6725,17 +6734,6 @@ void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu) } EXPORT_SYMBOL_GPL(kvm_vcpu_reload_apic_access_page); -void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm, - unsigned long address) -{ - /* - * The physical address of apic access page is stored in the VMCS. - * Update it when it becomes invalid. - */ - if (address == gfn_to_hva(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT)) - kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD); -} - /* * Returns 1 to let vcpu_run() continue the guest execution loop without * exiting to the userspace. Otherwise, the value will be returned to the @@ -7633,7 +7631,9 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu) */ vcpu->guest_fpu_loaded = 1; __kernel_fpu_begin(); - __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state); + /* PKRU is separately restored in kvm_x86_ops->run. */ + __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state, + ~XFEATURE_MASK_PKRU); trace_kvm_fpu(1); } diff --git a/arch/x86/um/user-offsets.c b/arch/x86/um/user-offsets.c index ae4cd58c0c7a..02250b2633b8 100644 --- a/arch/x86/um/user-offsets.c +++ b/arch/x86/um/user-offsets.c @@ -50,7 +50,7 @@ void foo(void) DEFINE(HOST_GS, GS); DEFINE(HOST_ORIG_AX, ORIG_EAX); #else -#if defined(PTRACE_GETREGSET) && defined(PTRACE_SETREGSET) +#ifdef FP_XSTATE_MAGIC1 DEFINE(HOST_FP_SIZE, sizeof(struct _xstate) / sizeof(unsigned long)); #else DEFINE(HOST_FP_SIZE, sizeof(struct _fpstate) / sizeof(unsigned long)); diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index 9ebc2945f991..4f927a58dff8 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c @@ -75,6 +75,8 @@ static const char *const blk_queue_flag_name[] = { QUEUE_FLAG_NAME(STATS), QUEUE_FLAG_NAME(POLL_STATS), QUEUE_FLAG_NAME(REGISTERED), + QUEUE_FLAG_NAME(SCSI_PASSTHROUGH), + QUEUE_FLAG_NAME(QUIESCED), }; #undef QUEUE_FLAG_NAME @@ -265,6 +267,7 @@ static const char *const cmd_flag_name[] = { CMD_FLAG_NAME(RAHEAD), CMD_FLAG_NAME(BACKGROUND), CMD_FLAG_NAME(NOUNMAP), + CMD_FLAG_NAME(NOWAIT), }; #undef CMD_FLAG_NAME diff --git a/block/blk-throttle.c b/block/blk-throttle.c index a7285bf2831c..80f5481fe9f6 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -382,6 +382,14 @@ static unsigned int tg_iops_limit(struct throtl_grp *tg, int rw) } \ } while (0) +static inline unsigned int throtl_bio_data_size(struct bio *bio) +{ + /* assume it's one sector */ + if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) + return 512; + return bio->bi_iter.bi_size; +} + static void throtl_qnode_init(struct throtl_qnode *qn, struct throtl_grp *tg) { INIT_LIST_HEAD(&qn->node); @@ -934,6 +942,7 @@ static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio, bool rw = bio_data_dir(bio); u64 bytes_allowed, extra_bytes, tmp; unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd; + unsigned int bio_size = throtl_bio_data_size(bio); jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw]; @@ -947,14 +956,14 @@ static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio, do_div(tmp, HZ); bytes_allowed = tmp; - if (tg->bytes_disp[rw] + bio->bi_iter.bi_size <= bytes_allowed) { + if (tg->bytes_disp[rw] + bio_size <= bytes_allowed) { if (wait) *wait = 0; return true; } /* Calc approx time to dispatch */ - extra_bytes = tg->bytes_disp[rw] + bio->bi_iter.bi_size - bytes_allowed; + extra_bytes = tg->bytes_disp[rw] + bio_size - bytes_allowed; jiffy_wait = div64_u64(extra_bytes * HZ, tg_bps_limit(tg, rw)); if (!jiffy_wait) @@ -1034,11 +1043,12 @@ static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio, static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio) { bool rw = bio_data_dir(bio); + unsigned int bio_size = throtl_bio_data_size(bio); /* Charge the bio to the group */ - tg->bytes_disp[rw] += bio->bi_iter.bi_size; + tg->bytes_disp[rw] += bio_size; tg->io_disp[rw]++; - tg->last_bytes_disp[rw] += bio->bi_iter.bi_size; + tg->last_bytes_disp[rw] += bio_size; tg->last_io_disp[rw]++; /* diff --git a/block/bsg-lib.c b/block/bsg-lib.c index c4513b23f57a..dd56d7460cb9 100644 --- a/block/bsg-lib.c +++ b/block/bsg-lib.c @@ -29,26 +29,25 @@ #include <scsi/scsi_cmnd.h> /** - * bsg_destroy_job - routine to teardown/delete a bsg job + * bsg_teardown_job - routine to teardown a bsg job * @job: bsg_job that is to be torn down */ -static void bsg_destroy_job(struct kref *kref) +static void bsg_teardown_job(struct kref *kref) { struct bsg_job *job = container_of(kref, struct bsg_job, kref); struct request *rq = job->req; - blk_end_request_all(rq, BLK_STS_OK); - put_device(job->dev); /* release reference for the request */ kfree(job->request_payload.sg_list); kfree(job->reply_payload.sg_list); - kfree(job); + + blk_end_request_all(rq, BLK_STS_OK); } void bsg_job_put(struct bsg_job *job) { - kref_put(&job->kref, bsg_destroy_job); + kref_put(&job->kref, bsg_teardown_job); } EXPORT_SYMBOL_GPL(bsg_job_put); @@ -100,7 +99,7 @@ EXPORT_SYMBOL_GPL(bsg_job_done); */ static void bsg_softirq_done(struct request *rq) { - struct bsg_job *job = rq->special; + struct bsg_job *job = blk_mq_rq_to_pdu(rq); bsg_job_put(job); } @@ -122,33 +121,20 @@ static int bsg_map_buffer(struct bsg_buffer *buf, struct request *req) } /** - * bsg_create_job - create the bsg_job structure for the bsg request + * bsg_prepare_job - create the bsg_job structure for the bsg request * @dev: device that is being sent the bsg request * @req: BSG request that needs a job structure */ -static int bsg_create_job(struct device *dev, struct request *req) +static int bsg_prepare_job(struct device *dev, struct request *req) { struct request *rsp = req->next_rq; - struct request_queue *q = req->q; struct scsi_request *rq = scsi_req(req); - struct bsg_job *job; + struct bsg_job *job = blk_mq_rq_to_pdu(req); int ret; - BUG_ON(req->special); - - job = kzalloc(sizeof(struct bsg_job) + q->bsg_job_size, GFP_KERNEL); - if (!job) - return -ENOMEM; - - req->special = job; - job->req = req; - if (q->bsg_job_size) - job->dd_data = (void *)&job[1]; job->request = rq->cmd; job->request_len = rq->cmd_len; - job->reply = rq->sense; - job->reply_len = SCSI_SENSE_BUFFERSIZE; /* Size of sense buffer - * allocated */ + if (req->bio) { ret = bsg_map_buffer(&job->request_payload, req); if (ret) @@ -187,7 +173,6 @@ static void bsg_request_fn(struct request_queue *q) { struct device *dev = q->queuedata; struct request *req; - struct bsg_job *job; int ret; if (!get_device(dev)) @@ -199,7 +184,7 @@ static void bsg_request_fn(struct request_queue *q) break; spin_unlock_irq(q->queue_lock); - ret = bsg_create_job(dev, req); + ret = bsg_prepare_job(dev, req); if (ret) { scsi_req(req)->result = ret; blk_end_request_all(req, BLK_STS_OK); @@ -207,8 +192,7 @@ static void bsg_request_fn(struct request_queue *q) continue; } - job = req->special; - ret = q->bsg_job_fn(job); + ret = q->bsg_job_fn(blk_mq_rq_to_pdu(req)); spin_lock_irq(q->queue_lock); if (ret) break; @@ -219,6 +203,35 @@ static void bsg_request_fn(struct request_queue *q) spin_lock_irq(q->queue_lock); } +static int bsg_init_rq(struct request_queue *q, struct request *req, gfp_t gfp) +{ + struct bsg_job *job = blk_mq_rq_to_pdu(req); + struct scsi_request *sreq = &job->sreq; + + memset(job, 0, sizeof(*job)); + + scsi_req_init(sreq); + sreq->sense_len = SCSI_SENSE_BUFFERSIZE; + sreq->sense = kzalloc(sreq->sense_len, gfp); + if (!sreq->sense) + return -ENOMEM; + + job->req = req; + job->reply = sreq->sense; + job->reply_len = sreq->sense_len; + job->dd_data = job + 1; + + return 0; +} + +static void bsg_exit_rq(struct request_queue *q, struct request *req) +{ + struct bsg_job *job = blk_mq_rq_to_pdu(req); + struct scsi_request *sreq = &job->sreq; + + kfree(sreq->sense); +} + /** * bsg_setup_queue - Create and add the bsg hooks so we can receive requests * @dev: device to attach bsg device to @@ -235,7 +248,9 @@ struct request_queue *bsg_setup_queue(struct device *dev, char *name, q = blk_alloc_queue(GFP_KERNEL); if (!q) return ERR_PTR(-ENOMEM); - q->cmd_size = sizeof(struct scsi_request); + q->cmd_size = sizeof(struct bsg_job) + dd_job_size; + q->init_rq_fn = bsg_init_rq; + q->exit_rq_fn = bsg_exit_rq; q->request_fn = bsg_request_fn; ret = blk_init_allocated_queue(q); @@ -243,7 +258,6 @@ struct request_queue *bsg_setup_queue(struct device *dev, char *name, goto out_cleanup_queue; q->queuedata = dev; - q->bsg_job_size = dd_job_size; q->bsg_job_fn = job_fn; queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q); queue_flag_set_unlocked(QUEUE_FLAG_SCSI_PASSTHROUGH, q); diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c index 43839b00fe6c..903605dbc1a5 100644 --- a/crypto/algif_skcipher.c +++ b/crypto/algif_skcipher.c @@ -87,8 +87,13 @@ static void skcipher_free_async_sgls(struct skcipher_async_req *sreq) } sgl = sreq->tsg; n = sg_nents(sgl); - for_each_sg(sgl, sg, n, i) - put_page(sg_page(sg)); + for_each_sg(sgl, sg, n, i) { + struct page *page = sg_page(sg); + + /* some SGs may not have a page mapped */ + if (page && page_ref_count(page)) + put_page(page); + } kfree(sreq->tsg); } diff --git a/crypto/chacha20_generic.c b/crypto/chacha20_generic.c index 8b3c04d625c3..4a45fa4890c0 100644 --- a/crypto/chacha20_generic.c +++ b/crypto/chacha20_generic.c @@ -91,9 +91,14 @@ int crypto_chacha20_crypt(struct skcipher_request *req) crypto_chacha20_init(state, ctx, walk.iv); while (walk.nbytes > 0) { + unsigned int nbytes = walk.nbytes; + + if (nbytes < walk.total) + nbytes = round_down(nbytes, walk.stride); + chacha20_docrypt(state, walk.dst.virt.addr, walk.src.virt.addr, - walk.nbytes); - err = skcipher_walk_done(&walk, 0); + nbytes); + err = skcipher_walk_done(&walk, walk.nbytes - nbytes); } return err; diff --git a/crypto/testmgr.h b/crypto/testmgr.h index 6ceb0e2758bb..d54971d2d1c8 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h @@ -32675,6 +32675,10 @@ static const struct cipher_testvec chacha20_enc_tv_template[] = { "\x5b\x86\x2f\x37\x30\xe3\x7c\xfd" "\xc4\xfd\x80\x6c\x22\xf2\x21", .rlen = 375, + .also_non_np = 1, + .np = 3, + .tap = { 375 - 20, 4, 16 }, + }, { /* RFC7539 A.2. Test Vector #3 */ .key = "\x1c\x92\x40\xa5\xeb\x55\xd3\x8a" "\xf3\x33\x88\x86\x04\xf6\xb5\xf0" @@ -33049,6 +33053,9 @@ static const struct cipher_testvec chacha20_enc_tv_template[] = { "\xa1\xed\xad\xd5\x76\xfa\x24\x8f" "\x98", .rlen = 1281, + .also_non_np = 1, + .np = 3, + .tap = { 1200, 1, 80 }, }, }; diff --git a/drivers/acpi/acpica/nsxfeval.c b/drivers/acpi/acpica/nsxfeval.c index 538c61677c10..783f4c838aee 100644 --- a/drivers/acpi/acpica/nsxfeval.c +++ b/drivers/acpi/acpica/nsxfeval.c @@ -100,9 +100,13 @@ acpi_evaluate_object_typed(acpi_handle handle, free_buffer_on_error = TRUE; } - status = acpi_get_handle(handle, pathname, &target_handle); - if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); + if (pathname) { + status = acpi_get_handle(handle, pathname, &target_handle); + if (ACPI_FAILURE(status)) { + return_ACPI_STATUS(status); + } + } else { + target_handle = handle; } full_pathname = acpi_ns_get_external_pathname(target_handle); diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 62068a5e814f..ae3d6d152633 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -1741,7 +1741,7 @@ error: * functioning ECDT EC first in order to handle the events. * https://bugzilla.kernel.org/show_bug.cgi?id=115021 */ -int __init acpi_ec_ecdt_start(void) +static int __init acpi_ec_ecdt_start(void) { acpi_handle handle; @@ -2003,20 +2003,17 @@ static inline void acpi_ec_query_exit(void) int __init acpi_ec_init(void) { int result; + int ecdt_fail, dsdt_fail; /* register workqueue for _Qxx evaluations */ result = acpi_ec_query_init(); if (result) - goto err_exit; - /* Now register the driver for the EC */ - result = acpi_bus_register_driver(&acpi_ec_driver); - if (result) - goto err_exit; + return result; -err_exit: - if (result) - acpi_ec_query_exit(); - return result; + /* Drivers must be started after acpi_ec_query_init() */ + ecdt_fail = acpi_ec_ecdt_start(); + dsdt_fail = acpi_bus_register_driver(&acpi_ec_driver); + return ecdt_fail && dsdt_fail ? -ENODEV : 0; } /* EC driver currently not unloadable */ diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h index 58dd7ab3c653..3f5af4d7a739 100644 --- a/drivers/acpi/internal.h +++ b/drivers/acpi/internal.h @@ -185,7 +185,6 @@ typedef int (*acpi_ec_query_func) (void *data); int acpi_ec_init(void); int acpi_ec_ecdt_probe(void); int acpi_ec_dsdt_probe(void); -int acpi_ec_ecdt_start(void); void acpi_ec_block_transactions(void); void acpi_ec_unblock_transactions(void); int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit, diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c index 917c789f953d..476a52c60cf3 100644 --- a/drivers/acpi/property.c +++ b/drivers/acpi/property.c @@ -1047,7 +1047,7 @@ static struct fwnode_handle *acpi_graph_get_child_prop_value( fwnode_for_each_child_node(fwnode, child) { u32 nr; - if (!fwnode_property_read_u32(fwnode, prop_name, &nr)) + if (fwnode_property_read_u32(child, prop_name, &nr)) continue; if (val == nr) diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 33897298f03e..70fd5502c284 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -2084,7 +2084,6 @@ int __init acpi_scan_init(void) acpi_gpe_apply_masked_gpes(); acpi_update_all_gpes(); - acpi_ec_ecdt_start(); acpi_scan_initialized = true; diff --git a/drivers/android/binder.c b/drivers/android/binder.c index f7665c31feca..831cdd7d197d 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -3362,7 +3362,7 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma) const char *failure_string; struct binder_buffer *buffer; - if (proc->tsk != current) + if (proc->tsk != current->group_leader) return -EINVAL; if ((vma->vm_end - vma->vm_start) > SZ_4M) diff --git a/drivers/ata/ahci_da850.c b/drivers/ata/ahci_da850.c index 1a50cd3b4233..9b34dff64536 100644 --- a/drivers/ata/ahci_da850.c +++ b/drivers/ata/ahci_da850.c @@ -216,12 +216,16 @@ static int ahci_da850_probe(struct platform_device *pdev) return rc; res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - if (!res) + if (!res) { + rc = -ENODEV; goto disable_resources; + } pwrdn_reg = devm_ioremap(dev, res->start, resource_size(res)); - if (!pwrdn_reg) + if (!pwrdn_reg) { + rc = -ENOMEM; goto disable_resources; + } da850_sata_init(dev, pwrdn_reg, hpriv->mmio, mpy); diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index fa7dd4394c02..1945a8ea2099 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -2411,6 +2411,9 @@ static void ata_dev_config_trusted(struct ata_device *dev) u64 trusted_cap; unsigned int err; + if (!ata_id_has_trusted(dev->id)) + return; + if (!ata_identify_page_supported(dev, ATA_LOG_SECURITY)) { ata_dev_warn(dev, "Security Log not supported\n"); diff --git a/drivers/block/loop.c b/drivers/block/loop.c index ef8334949b42..f321b96405f5 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -221,8 +221,7 @@ static void __loop_update_dio(struct loop_device *lo, bool dio) } static int -figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit, - loff_t logical_blocksize) +figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit) { loff_t size = get_size(offset, sizelimit, lo->lo_backing_file); sector_t x = (sector_t)size; @@ -234,12 +233,6 @@ figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit, lo->lo_offset = offset; if (lo->lo_sizelimit != sizelimit) lo->lo_sizelimit = sizelimit; - if (lo->lo_flags & LO_FLAGS_BLOCKSIZE) { - lo->lo_logical_blocksize = logical_blocksize; - blk_queue_physical_block_size(lo->lo_queue, lo->lo_blocksize); - blk_queue_logical_block_size(lo->lo_queue, - lo->lo_logical_blocksize); - } set_capacity(lo->lo_disk, x); bd_set_size(bdev, (loff_t)get_capacity(bdev->bd_disk) << 9); /* let user-space know about the new size */ @@ -820,7 +813,6 @@ static void loop_config_discard(struct loop_device *lo) struct file *file = lo->lo_backing_file; struct inode *inode = file->f_mapping->host; struct request_queue *q = lo->lo_queue; - int lo_bits = 9; /* * We use punch hole to reclaim the free space used by the @@ -840,11 +832,9 @@ static void loop_config_discard(struct loop_device *lo) q->limits.discard_granularity = inode->i_sb->s_blocksize; q->limits.discard_alignment = 0; - if (lo->lo_flags & LO_FLAGS_BLOCKSIZE) - lo_bits = blksize_bits(lo->lo_logical_blocksize); - blk_queue_max_discard_sectors(q, UINT_MAX >> lo_bits); - blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> lo_bits); + blk_queue_max_discard_sectors(q, UINT_MAX >> 9); + blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> 9); queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); } @@ -938,7 +928,6 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode, lo->use_dio = false; lo->lo_blocksize = lo_blocksize; - lo->lo_logical_blocksize = 512; lo->lo_device = bdev; lo->lo_flags = lo_flags; lo->lo_backing_file = file; @@ -1104,7 +1093,6 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info) int err; struct loop_func_table *xfer; kuid_t uid = current_uid(); - int lo_flags = lo->lo_flags; if (lo->lo_encrypt_key_size && !uid_eq(lo->lo_key_owner, uid) && @@ -1137,26 +1125,9 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info) if (err) goto exit; - if (info->lo_flags & LO_FLAGS_BLOCKSIZE) { - if (!(lo->lo_flags & LO_FLAGS_BLOCKSIZE)) - lo->lo_logical_blocksize = 512; - lo->lo_flags |= LO_FLAGS_BLOCKSIZE; - if (LO_INFO_BLOCKSIZE(info) != 512 && - LO_INFO_BLOCKSIZE(info) != 1024 && - LO_INFO_BLOCKSIZE(info) != 2048 && - LO_INFO_BLOCKSIZE(info) != 4096) - return -EINVAL; - if (LO_INFO_BLOCKSIZE(info) > lo->lo_blocksize) - return -EINVAL; - } - if (lo->lo_offset != info->lo_offset || - lo->lo_sizelimit != info->lo_sizelimit || - lo->lo_flags != lo_flags || - ((lo->lo_flags & LO_FLAGS_BLOCKSIZE) && - lo->lo_logical_blocksize != LO_INFO_BLOCKSIZE(info))) { - if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit, - LO_INFO_BLOCKSIZE(info))) { + lo->lo_sizelimit != info->lo_sizelimit) { + if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit)) { err = -EFBIG; goto exit; } @@ -1348,8 +1319,7 @@ static int loop_set_capacity(struct loop_device *lo) if (unlikely(lo->lo_state != Lo_bound)) return -ENXIO; - return figure_loop_size(lo, lo->lo_offset, lo->lo_sizelimit, - lo->lo_logical_blocksize); + return figure_loop_size(lo, lo->lo_offset, lo->lo_sizelimit); } static int loop_set_dio(struct loop_device *lo, unsigned long arg) diff --git a/drivers/block/loop.h b/drivers/block/loop.h index 2c096b9a17b8..fecd3f97ef8c 100644 --- a/drivers/block/loop.h +++ b/drivers/block/loop.h @@ -49,7 +49,6 @@ struct loop_device { struct file * lo_backing_file; struct block_device *lo_device; unsigned lo_blocksize; - unsigned lo_logical_blocksize; void *key_data; gfp_t old_gfp_mask; diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 1498b899a593..d3d5523862c2 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -381,6 +381,7 @@ static void virtblk_config_changed_work(struct work_struct *work) struct request_queue *q = vblk->disk->queue; char cap_str_2[10], cap_str_10[10]; char *envp[] = { "RESIZE=1", NULL }; + unsigned long long nblocks; u64 capacity; /* Host must always specify the capacity. */ @@ -393,16 +394,19 @@ static void virtblk_config_changed_work(struct work_struct *work) capacity = (sector_t)-1; } - string_get_size(capacity, queue_logical_block_size(q), + nblocks = DIV_ROUND_UP_ULL(capacity, queue_logical_block_size(q) >> 9); + + string_get_size(nblocks, queue_logical_block_size(q), STRING_UNITS_2, cap_str_2, sizeof(cap_str_2)); - string_get_size(capacity, queue_logical_block_size(q), + string_get_size(nblocks, queue_logical_block_size(q), STRING_UNITS_10, cap_str_10, sizeof(cap_str_10)); dev_notice(&vdev->dev, - "new size: %llu %d-byte logical blocks (%s/%s)\n", - (unsigned long long)capacity, - queue_logical_block_size(q), - cap_str_10, cap_str_2); + "new size: %llu %d-byte logical blocks (%s/%s)\n", + nblocks, + queue_logical_block_size(q), + cap_str_10, + cap_str_2); set_capacity(vblk->disk, capacity); revalidate_disk(vblk->disk); diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c index 792da683e70d..2adb8599be93 100644 --- a/drivers/block/xen-blkback/xenbus.c +++ b/drivers/block/xen-blkback/xenbus.c @@ -244,6 +244,7 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif) { struct pending_req *req, *n; unsigned int j, r; + bool busy = false; for (r = 0; r < blkif->nr_rings; r++) { struct xen_blkif_ring *ring = &blkif->rings[r]; @@ -261,8 +262,10 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif) * don't have any discard_io or other_io requests. So, checking * for inflight IO is enough. */ - if (atomic_read(&ring->inflight) > 0) - return -EBUSY; + if (atomic_read(&ring->inflight) > 0) { + busy = true; + continue; + } if (ring->irq) { unbind_from_irqhandler(ring->irq, ring); @@ -300,6 +303,9 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif) WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages)); ring->active = false; } + if (busy) + return -EBUSY; + blkif->nr_ring_pages = 0; /* * blkif->rings was allocated in connect_ring, so we should free it in diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c index b10cbaa82ff5..b26256f23d67 100644 --- a/drivers/dma/tegra210-adma.c +++ b/drivers/dma/tegra210-adma.c @@ -717,8 +717,8 @@ static int tegra_adma_probe(struct platform_device *pdev) tdc->chan_addr = tdma->base_addr + ADMA_CH_REG_OFFSET(i); tdc->irq = of_irq_get(pdev->dev.of_node, i); - if (tdc->irq < 0) { - ret = tdc->irq; + if (tdc->irq <= 0) { + ret = tdc->irq ?: -ENXIO; goto irq_dispose; } diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c index e338c3743562..45c65f805fd6 100644 --- a/drivers/gpio/gpio-mvebu.c +++ b/drivers/gpio/gpio-mvebu.c @@ -557,7 +557,7 @@ static void mvebu_gpio_irq_handler(struct irq_desc *desc) edge_cause = mvebu_gpio_read_edge_cause(mvchip); edge_mask = mvebu_gpio_read_edge_mask(mvchip); - cause = (data_in ^ level_mask) | (edge_cause & edge_mask); + cause = (data_in & level_mask) | (edge_cause & edge_mask); for (i = 0; i < mvchip->chip.ngpio; i++) { int irq; diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c index 16fe9742597b..fc80add5fedb 100644 --- a/drivers/gpio/gpiolib-sysfs.c +++ b/drivers/gpio/gpiolib-sysfs.c @@ -2,6 +2,7 @@ #include <linux/mutex.h> #include <linux/device.h> #include <linux/sysfs.h> +#include <linux/gpio.h> #include <linux/gpio/consumer.h> #include <linux/gpio/driver.h> #include <linux/interrupt.h> @@ -432,6 +433,11 @@ static struct attribute *gpiochip_attrs[] = { }; ATTRIBUTE_GROUPS(gpiochip); +static struct gpio_desc *gpio_to_valid_desc(int gpio) +{ + return gpio_is_valid(gpio) ? gpio_to_desc(gpio) : NULL; +} + /* * /sys/class/gpio/export ... write-only * integer N ... number of GPIO to export (full access) @@ -450,7 +456,7 @@ static ssize_t export_store(struct class *class, if (status < 0) goto done; - desc = gpio_to_desc(gpio); + desc = gpio_to_valid_desc(gpio); /* reject invalid GPIOs */ if (!desc) { pr_warn("%s: invalid GPIO %ld\n", __func__, gpio); @@ -493,7 +499,7 @@ static ssize_t unexport_store(struct class *class, if (status < 0) goto done; - desc = gpio_to_desc(gpio); + desc = gpio_to_valid_desc(gpio); /* reject bogus commands (gpio_unexport ignores them) */ if (!desc) { pr_warn("%s: invalid GPIO %ld\n", __func__, gpio); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c index 6558a3ed57a7..e1cde6b80027 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c @@ -147,36 +147,6 @@ static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node, } /** - * amdgpu_mn_invalidate_page - callback to notify about mm change - * - * @mn: our notifier - * @mn: the mm this callback is about - * @address: address of invalidate page - * - * Invalidation of a single page. Blocks for all BOs mapping it - * and unmap them by move them into system domain again. - */ -static void amdgpu_mn_invalidate_page(struct mmu_notifier *mn, - struct mm_struct *mm, - unsigned long address) -{ - struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn); - struct interval_tree_node *it; - - mutex_lock(&rmn->lock); - - it = interval_tree_iter_first(&rmn->objects, address, address); - if (it) { - struct amdgpu_mn_node *node; - - node = container_of(it, struct amdgpu_mn_node, it); - amdgpu_mn_invalidate_node(node, address, address); - } - - mutex_unlock(&rmn->lock); -} - -/** * amdgpu_mn_invalidate_range_start - callback to notify about mm change * * @mn: our notifier @@ -215,7 +185,6 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn, static const struct mmu_notifier_ops amdgpu_mn_ops = { .release = amdgpu_mn_release, - .invalidate_page = amdgpu_mn_invalidate_page, .invalidate_range_start = amdgpu_mn_invalidate_range_start, }; diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c index 2d51a2269fc6..5131bfb94f06 100644 --- a/drivers/gpu/drm/bridge/sil-sii8620.c +++ b/drivers/gpu/drm/bridge/sil-sii8620.c @@ -597,9 +597,9 @@ static void sii8620_mt_read_devcap(struct sii8620 *ctx, bool xdevcap) static void sii8620_mt_read_devcap_reg_recv(struct sii8620 *ctx, struct sii8620_mt_msg *msg) { - u8 reg = msg->reg[0] & 0x7f; + u8 reg = msg->reg[1] & 0x7f; - if (msg->reg[0] & 0x80) + if (msg->reg[1] & 0x80) ctx->xdevcap[reg] = msg->ret; else ctx->devcap[reg] = msg->ret; diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index c0f336d23f9c..aed25c4183bb 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -1655,6 +1655,9 @@ int drm_atomic_check_only(struct drm_atomic_state *state) if (config->funcs->atomic_check) ret = config->funcs->atomic_check(state->dev, state); + if (ret) + return ret; + if (!state->allow_modeset) { for_each_new_crtc_in_state(state, crtc, crtc_state, i) { if (drm_atomic_crtc_needs_modeset(crtc_state)) { @@ -1665,7 +1668,7 @@ int drm_atomic_check_only(struct drm_atomic_state *state) } } - return ret; + return 0; } EXPORT_SYMBOL(drm_atomic_check_only); @@ -2167,10 +2170,10 @@ int drm_mode_atomic_ioctl(struct drm_device *dev, struct drm_atomic_state *state; struct drm_modeset_acquire_ctx ctx; struct drm_plane *plane; - struct drm_out_fence_state *fence_state = NULL; + struct drm_out_fence_state *fence_state; unsigned plane_mask; int ret = 0; - unsigned int i, j, num_fences = 0; + unsigned int i, j, num_fences; /* disallow for drivers not supporting atomic: */ if (!drm_core_check_feature(dev, DRIVER_ATOMIC)) @@ -2211,6 +2214,8 @@ retry: plane_mask = 0; copied_objs = 0; copied_props = 0; + fence_state = NULL; + num_fences = 0; for (i = 0; i < arg->count_objs; i++) { uint32_t obj_id, count_props; diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 8dc11064253d..cdaac37907b1 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -255,13 +255,13 @@ drm_gem_object_release_handle(int id, void *ptr, void *data) struct drm_gem_object *obj = ptr; struct drm_device *dev = obj->dev; + if (dev->driver->gem_close_object) + dev->driver->gem_close_object(obj, file_priv); + if (drm_core_check_feature(dev, DRIVER_PRIME)) drm_gem_remove_prime_handles(obj, file_priv); drm_vma_node_revoke(&obj->vma_node, file_priv); - if (dev->driver->gem_close_object) - dev->driver->gem_close_object(obj, file_priv); - drm_gem_object_handle_put_unlocked(obj); return 0; diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c index 5dc8c4350602..e40c12fabbde 100644 --- a/drivers/gpu/drm/drm_plane.c +++ b/drivers/gpu/drm/drm_plane.c @@ -601,6 +601,7 @@ int drm_mode_setplane(struct drm_device *dev, void *data, crtc = drm_crtc_find(dev, plane_req->crtc_id); if (!crtc) { + drm_framebuffer_put(fb); DRM_DEBUG_KMS("Unknown crtc ID %d\n", plane_req->crtc_id); return -ENOENT; diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index 713848c36349..e556a46cd4c2 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -2714,7 +2714,7 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx) unmap_src: i915_gem_object_unpin_map(obj); put_obj: - i915_gem_object_put(wa_ctx->indirect_ctx.obj); + i915_gem_object_put(obj); return ret; } diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 639d45c1dd2e..7ea7fd1e8856 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -1120,8 +1120,8 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port, bool is_dvi, is_hdmi, is_dp, is_edp, is_crt; uint8_t aux_channel, ddc_pin; /* Each DDI port can have more than one value on the "DVO Port" field, - * so look for all the possible values for each port and abort if more - * than one is found. */ + * so look for all the possible values for each port. + */ int dvo_ports[][3] = { {DVO_PORT_HDMIA, DVO_PORT_DPA, -1}, {DVO_PORT_HDMIB, DVO_PORT_DPB, -1}, @@ -1130,7 +1130,10 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port, {DVO_PORT_CRT, DVO_PORT_HDMIE, DVO_PORT_DPE}, }; - /* Find the child device to use, abort if more than one found. */ + /* + * Find the first child device to reference the port, report if more + * than one found. + */ for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { it = dev_priv->vbt.child_dev + i; @@ -1140,11 +1143,11 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port, if (it->common.dvo_port == dvo_ports[port][j]) { if (child) { - DRM_DEBUG_KMS("More than one child device for port %c in VBT.\n", + DRM_DEBUG_KMS("More than one child device for port %c in VBT, using the first.\n", port_name(port)); - return; + } else { + child = it; } - child = it; } } } diff --git a/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c b/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c index 6e09ceb71500..150a156f3b1e 100644 --- a/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c +++ b/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c @@ -46,7 +46,7 @@ static u32 dcs_get_backlight(struct intel_connector *connector) struct intel_encoder *encoder = connector->encoder; struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); struct mipi_dsi_device *dsi_device; - u8 data; + u8 data = 0; enum port port; /* FIXME: Need to take care of 16 bit brightness level */ diff --git a/drivers/gpu/drm/i915/intel_dsi_vbt.c b/drivers/gpu/drm/i915/intel_dsi_vbt.c index 7158c7ce9c09..91c07b0c8db9 100644 --- a/drivers/gpu/drm/i915/intel_dsi_vbt.c +++ b/drivers/gpu/drm/i915/intel_dsi_vbt.c @@ -306,7 +306,7 @@ static void bxt_exec_gpio(struct drm_i915_private *dev_priv, if (!gpio_desc) { gpio_desc = devm_gpiod_get_index(dev_priv->drm.dev, - "panel", gpio_index, + NULL, gpio_index, value ? GPIOD_OUT_LOW : GPIOD_OUT_HIGH); diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 7404cf2aac28..2afa4daa88e8 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -1221,6 +1221,14 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine) return ret; } +static u8 gtiir[] = { + [RCS] = 0, + [BCS] = 0, + [VCS] = 1, + [VCS2] = 1, + [VECS] = 3, +}; + static int gen8_init_common_ring(struct intel_engine_cs *engine) { struct drm_i915_private *dev_priv = engine->i915; @@ -1245,9 +1253,22 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine) DRM_DEBUG_DRIVER("Execlists enabled for %s\n", engine->name); - /* After a GPU reset, we may have requests to replay */ + GEM_BUG_ON(engine->id >= ARRAY_SIZE(gtiir)); + + /* + * Clear any pending interrupt state. + * + * We do it twice out of paranoia that some of the IIR are double + * buffered, and if we only reset it once there may still be + * an interrupt pending. + */ + I915_WRITE(GEN8_GT_IIR(gtiir[engine->id]), + GT_CONTEXT_SWITCH_INTERRUPT << engine->irq_shift); + I915_WRITE(GEN8_GT_IIR(gtiir[engine->id]), + GT_CONTEXT_SWITCH_INTERRUPT << engine->irq_shift); clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted); + /* After a GPU reset, we may have requests to replay */ submit = false; for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++) { if (!port_isset(&port[n])) diff --git a/drivers/gpu/drm/i915/intel_lspcon.c b/drivers/gpu/drm/i915/intel_lspcon.c index 5abef482eacf..beb9baaf2f2e 100644 --- a/drivers/gpu/drm/i915/intel_lspcon.c +++ b/drivers/gpu/drm/i915/intel_lspcon.c @@ -210,8 +210,8 @@ bool lspcon_init(struct intel_digital_port *intel_dig_port) struct drm_device *dev = intel_dig_port->base.base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - if (!IS_GEN9(dev_priv)) { - DRM_ERROR("LSPCON is supported on GEN9 only\n"); + if (!HAS_LSPCON(dev_priv)) { + DRM_ERROR("LSPCON is not supported on this platform\n"); return false; } diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c index 6276bb834b4f..d3845989a29d 100644 --- a/drivers/gpu/drm/imx/ipuv3-plane.c +++ b/drivers/gpu/drm/imx/ipuv3-plane.c @@ -545,15 +545,13 @@ static void ipu_plane_atomic_update(struct drm_plane *plane, return; } + ics = ipu_drm_fourcc_to_colorspace(fb->format->format); switch (ipu_plane->dp_flow) { case IPU_DP_FLOW_SYNC_BG: - ipu_dp_setup_channel(ipu_plane->dp, - IPUV3_COLORSPACE_RGB, - IPUV3_COLORSPACE_RGB); + ipu_dp_setup_channel(ipu_plane->dp, ics, IPUV3_COLORSPACE_RGB); ipu_dp_set_global_alpha(ipu_plane->dp, true, 0, true); break; case IPU_DP_FLOW_SYNC_FG: - ics = ipu_drm_fourcc_to_colorspace(state->fb->format->format); ipu_dp_setup_channel(ipu_plane->dp, ics, IPUV3_COLORSPACE_UNKNOWN); /* Enable local alpha on partial plane */ diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c index c6b1b7f3a2a3..c16bc0a7115b 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c @@ -275,11 +275,15 @@ static void rockchip_drm_fb_resume(struct drm_device *drm) static int rockchip_drm_sys_suspend(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); - struct rockchip_drm_private *priv = drm->dev_private; + struct rockchip_drm_private *priv; + + if (!drm) + return 0; drm_kms_helper_poll_disable(drm); rockchip_drm_fb_suspend(drm); + priv = drm->dev_private; priv->state = drm_atomic_helper_suspend(drm); if (IS_ERR(priv->state)) { rockchip_drm_fb_resume(drm); @@ -293,8 +297,12 @@ static int rockchip_drm_sys_suspend(struct device *dev) static int rockchip_drm_sys_resume(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); - struct rockchip_drm_private *priv = drm->dev_private; + struct rockchip_drm_private *priv; + + if (!drm) + return 0; + priv = drm->dev_private; drm_atomic_helper_resume(drm, priv->state); rockchip_drm_fb_resume(drm); drm_kms_helper_poll_enable(drm); diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c index abc7d8fe06b4..a45a627283a1 100644 --- a/drivers/gpu/drm/sun4i/sun4i_drv.c +++ b/drivers/gpu/drm/sun4i/sun4i_drv.c @@ -25,12 +25,20 @@ #include "sun4i_framebuffer.h" #include "sun4i_tcon.h" +static void sun4i_drv_lastclose(struct drm_device *dev) +{ + struct sun4i_drv *drv = dev->dev_private; + + drm_fbdev_cma_restore_mode(drv->fbdev); +} + DEFINE_DRM_GEM_CMA_FOPS(sun4i_drv_fops); static struct drm_driver sun4i_drv_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC, /* Generic Operations */ + .lastclose = sun4i_drv_lastclose, .fops = &sun4i_drv_fops, .name = "sun4i-drm", .desc = "Allwinner sun4i Display Engine", diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 61e06f0e8cd3..625ba24f143f 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -1567,10 +1567,34 @@ vmw_kms_atomic_check_modeset(struct drm_device *dev, } +/** + * vmw_kms_atomic_commit - Perform an atomic state commit + * + * @dev: DRM device + * @state: the driver state object + * @nonblock: Whether nonblocking behaviour is requested + * + * This is a simple wrapper around drm_atomic_helper_commit() for + * us to clear the nonblocking value. + * + * Nonblocking commits currently cause synchronization issues + * for vmwgfx. + * + * RETURNS + * Zero for success or negative error code on failure. + */ +int vmw_kms_atomic_commit(struct drm_device *dev, + struct drm_atomic_state *state, + bool nonblock) +{ + return drm_atomic_helper_commit(dev, state, false); +} + + static const struct drm_mode_config_funcs vmw_kms_funcs = { .fb_create = vmw_kms_fb_create, .atomic_check = vmw_kms_atomic_check_modeset, - .atomic_commit = drm_atomic_helper_commit, + .atomic_commit = vmw_kms_atomic_commit, }; static int vmw_kms_generic_present(struct vmw_private *dev_priv, diff --git a/drivers/gpu/ipu-v3/Kconfig b/drivers/gpu/ipu-v3/Kconfig index 08766c6e7856..87a20b3dcf7a 100644 --- a/drivers/gpu/ipu-v3/Kconfig +++ b/drivers/gpu/ipu-v3/Kconfig @@ -1,6 +1,7 @@ config IMX_IPUV3_CORE tristate "IPUv3 core support" depends on SOC_IMX5 || SOC_IMX6Q || ARCH_MULTIPLATFORM + depends on DRM || !DRM # if DRM=m, this can't be 'y' select GENERIC_IRQ_CHIP help Choose this if you have a i.MX5/6 system and want to use the Image diff --git a/drivers/i2c/busses/i2c-aspeed.c b/drivers/i2c/busses/i2c-aspeed.c index f19348328a71..6fdf9231c23c 100644 --- a/drivers/i2c/busses/i2c-aspeed.c +++ b/drivers/i2c/busses/i2c-aspeed.c @@ -410,10 +410,11 @@ static bool aspeed_i2c_master_irq(struct aspeed_i2c_bus *bus) } /* We are in an invalid state; reset bus to a known state. */ - if (!bus->msgs && bus->master_state != ASPEED_I2C_MASTER_STOP) { + if (!bus->msgs) { dev_err(bus->dev, "bus in unknown state"); bus->cmd_err = -EIO; - aspeed_i2c_do_stop(bus); + if (bus->master_state != ASPEED_I2C_MASTER_STOP) + aspeed_i2c_do_stop(bus); goto out_no_complete; } msg = &bus->msgs[bus->msgs_index]; diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index 143a8fd582b4..57248bccadbc 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c @@ -198,8 +198,7 @@ static void i2c_dw_configure_slave(struct dw_i2c_dev *dev) dev->functionality = I2C_FUNC_SLAVE | DW_IC_DEFAULT_FUNCTIONALITY; dev->slave_cfg = DW_IC_CON_RX_FIFO_FULL_HLD_CTRL | - DW_IC_CON_RESTART_EN | DW_IC_CON_STOP_DET_IFADDRESSED | - DW_IC_CON_SPEED_FAST; + DW_IC_CON_RESTART_EN | DW_IC_CON_STOP_DET_IFADDRESSED; dev->mode = DW_IC_SLAVE; @@ -430,7 +429,7 @@ static void dw_i2c_plat_complete(struct device *dev) #endif #ifdef CONFIG_PM -static int dw_i2c_plat_suspend(struct device *dev) +static int dw_i2c_plat_runtime_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct dw_i2c_dev *i_dev = platform_get_drvdata(pdev); @@ -452,11 +451,21 @@ static int dw_i2c_plat_resume(struct device *dev) return 0; } +#ifdef CONFIG_PM_SLEEP +static int dw_i2c_plat_suspend(struct device *dev) +{ + pm_runtime_resume(dev); + return dw_i2c_plat_runtime_suspend(dev); +} +#endif + static const struct dev_pm_ops dw_i2c_dev_pm_ops = { .prepare = dw_i2c_plat_prepare, .complete = dw_i2c_plat_complete, SET_SYSTEM_SLEEP_PM_OPS(dw_i2c_plat_suspend, dw_i2c_plat_resume) - SET_RUNTIME_PM_OPS(dw_i2c_plat_suspend, dw_i2c_plat_resume, NULL) + SET_RUNTIME_PM_OPS(dw_i2c_plat_runtime_suspend, + dw_i2c_plat_resume, + NULL) }; #define DW_I2C_DEV_PMOPS (&dw_i2c_dev_pm_ops) diff --git a/drivers/i2c/busses/i2c-designware-slave.c b/drivers/i2c/busses/i2c-designware-slave.c index 0548c7ea578c..78d8fb73927d 100644 --- a/drivers/i2c/busses/i2c-designware-slave.c +++ b/drivers/i2c/busses/i2c-designware-slave.c @@ -177,6 +177,8 @@ static int i2c_dw_reg_slave(struct i2c_client *slave) return -EBUSY; if (slave->flags & I2C_CLIENT_TEN) return -EAFNOSUPPORT; + pm_runtime_get_sync(dev->dev); + /* * Set slave address in the IC_SAR register, * the address to which the DW_apb_i2c responds. @@ -205,6 +207,7 @@ static int i2c_dw_unreg_slave(struct i2c_client *slave) dev->disable_int(dev); dev->disable(dev); dev->slave = NULL; + pm_runtime_put(dev->dev); return 0; } @@ -272,7 +275,7 @@ static int i2c_dw_irq_handler_slave(struct dw_i2c_dev *dev) slave_activity = ((dw_readl(dev, DW_IC_STATUS) & DW_IC_STATUS_SLAVE_ACTIVITY) >> 6); - if (!enabled || !(raw_stat & ~DW_IC_INTR_ACTIVITY)) + if (!enabled || !(raw_stat & ~DW_IC_INTR_ACTIVITY) || !dev->slave) return 0; dev_dbg(dev->dev, @@ -382,7 +385,6 @@ int i2c_dw_probe_slave(struct dw_i2c_dev *dev) ret = i2c_add_numbered_adapter(adap); if (ret) dev_err(dev->dev, "failure adding adapter: %d\n", ret); - pm_runtime_put_noidle(dev->dev); return ret; } diff --git a/drivers/i2c/busses/i2c-simtec.c b/drivers/i2c/busses/i2c-simtec.c index b4685bb9b5d7..adca51a99487 100644 --- a/drivers/i2c/busses/i2c-simtec.c +++ b/drivers/i2c/busses/i2c-simtec.c @@ -127,8 +127,7 @@ static int simtec_i2c_probe(struct platform_device *dev) iounmap(pd->reg); err_res: - release_resource(pd->ioarea); - kfree(pd->ioarea); + release_mem_region(pd->ioarea->start, size); err: kfree(pd); @@ -142,8 +141,7 @@ static int simtec_i2c_remove(struct platform_device *dev) i2c_del_adapter(&pd->adap); iounmap(pd->reg); - release_resource(pd->ioarea); - kfree(pd->ioarea); + release_mem_region(pd->ioarea->start, resource_size(pd->ioarea)); kfree(pd); return 0; diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c index 12822a4b8f8f..56e46581b84b 100644 --- a/drivers/i2c/i2c-core-base.c +++ b/drivers/i2c/i2c-core-base.c @@ -353,8 +353,8 @@ static int i2c_device_probe(struct device *dev) } /* - * An I2C ID table is not mandatory, if and only if, a suitable Device - * Tree match table entry is supplied for the probing device. + * An I2C ID table is not mandatory, if and only if, a suitable OF + * or ACPI ID table is supplied for the probing device. */ if (!driver->id_table && !i2c_acpi_match_device(dev->driver->acpi_match_table, client) && diff --git a/drivers/iio/adc/ina2xx-adc.c b/drivers/iio/adc/ina2xx-adc.c index 232c0b80d658..c3f86138cb55 100644 --- a/drivers/iio/adc/ina2xx-adc.c +++ b/drivers/iio/adc/ina2xx-adc.c @@ -644,7 +644,7 @@ static int ina2xx_capture_thread(void *data) { struct iio_dev *indio_dev = data; struct ina2xx_chip_info *chip = iio_priv(indio_dev); - unsigned int sampling_us = SAMPLING_PERIOD(chip); + int sampling_us = SAMPLING_PERIOD(chip); int buffer_us; /* diff --git a/drivers/iio/adc/stm32-adc-core.c b/drivers/iio/adc/stm32-adc-core.c index e09233b03c05..609676384f5e 100644 --- a/drivers/iio/adc/stm32-adc-core.c +++ b/drivers/iio/adc/stm32-adc-core.c @@ -64,7 +64,7 @@ #define STM32H7_CKMODE_MASK GENMASK(17, 16) /* STM32 H7 maximum analog clock rate (from datasheet) */ -#define STM32H7_ADC_MAX_CLK_RATE 72000000 +#define STM32H7_ADC_MAX_CLK_RATE 36000000 /** * stm32_adc_common_regs - stm32 common registers, compatible dependent data @@ -148,14 +148,14 @@ static int stm32f4_adc_clk_sel(struct platform_device *pdev, return -EINVAL; } - priv->common.rate = rate; + priv->common.rate = rate / stm32f4_pclk_div[i]; val = readl_relaxed(priv->common.base + STM32F4_ADC_CCR); val &= ~STM32F4_ADC_ADCPRE_MASK; val |= i << STM32F4_ADC_ADCPRE_SHIFT; writel_relaxed(val, priv->common.base + STM32F4_ADC_CCR); dev_dbg(&pdev->dev, "Using analog clock source at %ld kHz\n", - rate / (stm32f4_pclk_div[i] * 1000)); + priv->common.rate / 1000); return 0; } @@ -250,7 +250,7 @@ static int stm32h7_adc_clk_sel(struct platform_device *pdev, out: /* rate used later by each ADC instance to control BOOST mode */ - priv->common.rate = rate; + priv->common.rate = rate / div; /* Set common clock mode and prescaler */ val = readl_relaxed(priv->common.base + STM32H7_ADC_CCR); @@ -260,7 +260,7 @@ out: writel_relaxed(val, priv->common.base + STM32H7_ADC_CCR); dev_dbg(&pdev->dev, "Using %s clock/%d source at %ld kHz\n", - ckmode ? "bus" : "adc", div, rate / (div * 1000)); + ckmode ? "bus" : "adc", div, priv->common.rate / 1000); return 0; } diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c index 16ade0a0327b..0e4b379ada45 100644 --- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c +++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c @@ -111,8 +111,6 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state) s32 poll_value = 0; if (state) { - if (!atomic_read(&st->user_requested_state)) - return 0; if (sensor_hub_device_open(st->hsdev)) return -EIO; @@ -161,6 +159,9 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state) &report_val); } + pr_debug("HID_SENSOR %s set power_state %d report_state %d\n", + st->pdev->name, state_val, report_val); + sensor_hub_get_feature(st->hsdev, st->power_state.report_id, st->power_state.index, sizeof(state_val), &state_val); @@ -182,6 +183,7 @@ int hid_sensor_power_state(struct hid_sensor_common *st, bool state) ret = pm_runtime_get_sync(&st->pdev->dev); else { pm_runtime_mark_last_busy(&st->pdev->dev); + pm_runtime_use_autosuspend(&st->pdev->dev); ret = pm_runtime_put_autosuspend(&st->pdev->dev); } if (ret < 0) { @@ -285,8 +287,6 @@ int hid_sensor_setup_trigger(struct iio_dev *indio_dev, const char *name, /* Default to 3 seconds, but can be changed from sysfs */ pm_runtime_set_autosuspend_delay(&attrb->pdev->dev, 3000); - pm_runtime_use_autosuspend(&attrb->pdev->dev); - return ret; error_unreg_trigger: iio_trigger_unregister(trig); diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c index 8cf84d3488b2..12898424d838 100644 --- a/drivers/iio/imu/adis16480.c +++ b/drivers/iio/imu/adis16480.c @@ -696,7 +696,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = { .gyro_max_val = IIO_RAD_TO_DEGREE(22500), .gyro_max_scale = 450, .accel_max_val = IIO_M_S_2_TO_G(12500), - .accel_max_scale = 5, + .accel_max_scale = 10, }, [ADIS16485] = { .channels = adis16485_channels, diff --git a/drivers/iio/magnetometer/st_magn_core.c b/drivers/iio/magnetometer/st_magn_core.c index 8e1b0861fbe4..c38563699984 100644 --- a/drivers/iio/magnetometer/st_magn_core.c +++ b/drivers/iio/magnetometer/st_magn_core.c @@ -356,9 +356,7 @@ static const struct st_sensor_settings st_magn_sensors_settings[] = { .drdy_irq = { .addr = 0x62, .mask_int1 = 0x01, - .addr_ihl = 0x63, - .mask_ihl = 0x04, - .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, + .addr_stat_drdy = 0x67, }, .multi_read_bit = false, .bootime = 2, diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c index d82b788374b6..0d2ea3ee371b 100644 --- a/drivers/iio/pressure/bmp280-core.c +++ b/drivers/iio/pressure/bmp280-core.c @@ -282,6 +282,11 @@ static int bmp280_read_temp(struct bmp280_data *data, } adc_temp = be32_to_cpu(tmp) >> 12; + if (adc_temp == BMP280_TEMP_SKIPPED) { + /* reading was skipped */ + dev_err(data->dev, "reading temperature skipped\n"); + return -EIO; + } comp_temp = bmp280_compensate_temp(data, adc_temp); /* @@ -317,6 +322,11 @@ static int bmp280_read_press(struct bmp280_data *data, } adc_press = be32_to_cpu(tmp) >> 12; + if (adc_press == BMP280_PRESS_SKIPPED) { + /* reading was skipped */ + dev_err(data->dev, "reading pressure skipped\n"); + return -EIO; + } comp_press = bmp280_compensate_press(data, adc_press); *val = comp_press; @@ -345,6 +355,11 @@ static int bmp280_read_humid(struct bmp280_data *data, int *val, int *val2) } adc_humidity = be16_to_cpu(tmp); + if (adc_humidity == BMP280_HUMIDITY_SKIPPED) { + /* reading was skipped */ + dev_err(data->dev, "reading humidity skipped\n"); + return -EIO; + } comp_humidity = bmp280_compensate_humidity(data, adc_humidity); *val = comp_humidity; @@ -597,14 +612,20 @@ static const struct bmp280_chip_info bmp280_chip_info = { static int bme280_chip_config(struct bmp280_data *data) { - int ret = bmp280_chip_config(data); + int ret; u8 osrs = BMP280_OSRS_HUMIDITIY_X(data->oversampling_humid + 1); + /* + * Oversampling of humidity must be set before oversampling of + * temperature/pressure is set to become effective. + */ + ret = regmap_update_bits(data->regmap, BMP280_REG_CTRL_HUMIDITY, + BMP280_OSRS_HUMIDITY_MASK, osrs); + if (ret < 0) return ret; - return regmap_update_bits(data->regmap, BMP280_REG_CTRL_HUMIDITY, - BMP280_OSRS_HUMIDITY_MASK, osrs); + return bmp280_chip_config(data); } static const struct bmp280_chip_info bme280_chip_info = { diff --git a/drivers/iio/pressure/bmp280.h b/drivers/iio/pressure/bmp280.h index 2c770e13be0e..61347438b779 100644 --- a/drivers/iio/pressure/bmp280.h +++ b/drivers/iio/pressure/bmp280.h @@ -96,6 +96,11 @@ #define BME280_CHIP_ID 0x60 #define BMP280_SOFT_RESET_VAL 0xB6 +/* BMP280 register skipped special values */ +#define BMP280_TEMP_SKIPPED 0x80000 +#define BMP280_PRESS_SKIPPED 0x80000 +#define BMP280_HUMIDITY_SKIPPED 0x8000 + /* Regmap configurations */ extern const struct regmap_config bmp180_regmap_config; extern const struct regmap_config bmp280_regmap_config; diff --git a/drivers/iio/trigger/stm32-timer-trigger.c b/drivers/iio/trigger/stm32-timer-trigger.c index d22bc56dd9fc..25ad6abfee22 100644 --- a/drivers/iio/trigger/stm32-timer-trigger.c +++ b/drivers/iio/trigger/stm32-timer-trigger.c @@ -366,34 +366,32 @@ static int stm32_counter_read_raw(struct iio_dev *indio_dev, int *val, int *val2, long mask) { struct stm32_timer_trigger *priv = iio_priv(indio_dev); + u32 dat; switch (mask) { case IIO_CHAN_INFO_RAW: - { - u32 cnt; - - regmap_read(priv->regmap, TIM_CNT, &cnt); - *val = cnt; + regmap_read(priv->regmap, TIM_CNT, &dat); + *val = dat; + return IIO_VAL_INT; + case IIO_CHAN_INFO_ENABLE: + regmap_read(priv->regmap, TIM_CR1, &dat); + *val = (dat & TIM_CR1_CEN) ? 1 : 0; return IIO_VAL_INT; - } - case IIO_CHAN_INFO_SCALE: - { - u32 smcr; - regmap_read(priv->regmap, TIM_SMCR, &smcr); - smcr &= TIM_SMCR_SMS; + case IIO_CHAN_INFO_SCALE: + regmap_read(priv->regmap, TIM_SMCR, &dat); + dat &= TIM_SMCR_SMS; *val = 1; *val2 = 0; /* in quadrature case scale = 0.25 */ - if (smcr == 3) + if (dat == 3) *val2 = 2; return IIO_VAL_FRACTIONAL_LOG2; } - } return -EINVAL; } @@ -403,15 +401,31 @@ static int stm32_counter_write_raw(struct iio_dev *indio_dev, int val, int val2, long mask) { struct stm32_timer_trigger *priv = iio_priv(indio_dev); + u32 dat; switch (mask) { case IIO_CHAN_INFO_RAW: - regmap_write(priv->regmap, TIM_CNT, val); + return regmap_write(priv->regmap, TIM_CNT, val); - return IIO_VAL_INT; case IIO_CHAN_INFO_SCALE: /* fixed scale */ return -EINVAL; + + case IIO_CHAN_INFO_ENABLE: + if (val) { + regmap_read(priv->regmap, TIM_CR1, &dat); + if (!(dat & TIM_CR1_CEN)) + clk_enable(priv->clk); + regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN, + TIM_CR1_CEN); + } else { + regmap_read(priv->regmap, TIM_CR1, &dat); + regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN, + 0); + if (dat & TIM_CR1_CEN) + clk_disable(priv->clk); + } + return 0; } return -EINVAL; @@ -471,7 +485,7 @@ static int stm32_get_trigger_mode(struct iio_dev *indio_dev, regmap_read(priv->regmap, TIM_SMCR, &smcr); - return smcr == TIM_SMCR_SMS ? 0 : -EINVAL; + return (smcr & TIM_SMCR_SMS) == TIM_SMCR_SMS ? 0 : -EINVAL; } static const struct iio_enum stm32_trigger_mode_enum = { @@ -507,9 +521,19 @@ static int stm32_set_enable_mode(struct iio_dev *indio_dev, { struct stm32_timer_trigger *priv = iio_priv(indio_dev); int sms = stm32_enable_mode2sms(mode); + u32 val; if (sms < 0) return sms; + /* + * Triggered mode sets CEN bit automatically by hardware. So, first + * enable counter clock, so it can use it. Keeps it in sync with CEN. + */ + if (sms == 6) { + regmap_read(priv->regmap, TIM_CR1, &val); + if (!(val & TIM_CR1_CEN)) + clk_enable(priv->clk); + } regmap_update_bits(priv->regmap, TIM_SMCR, TIM_SMCR_SMS, sms); @@ -571,11 +595,14 @@ static int stm32_get_quadrature_mode(struct iio_dev *indio_dev, { struct stm32_timer_trigger *priv = iio_priv(indio_dev); u32 smcr; + int mode; regmap_read(priv->regmap, TIM_SMCR, &smcr); - smcr &= TIM_SMCR_SMS; + mode = (smcr & TIM_SMCR_SMS) - 1; + if ((mode < 0) || (mode > ARRAY_SIZE(stm32_quadrature_modes))) + return -EINVAL; - return smcr - 1; + return mode; } static const struct iio_enum stm32_quadrature_mode_enum = { @@ -592,13 +619,20 @@ static const char *const stm32_count_direction_states[] = { static int stm32_set_count_direction(struct iio_dev *indio_dev, const struct iio_chan_spec *chan, - unsigned int mode) + unsigned int dir) { struct stm32_timer_trigger *priv = iio_priv(indio_dev); + u32 val; + int mode; - regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_DIR, mode); + /* In encoder mode, direction is RO (given by TI1/TI2 signals) */ + regmap_read(priv->regmap, TIM_SMCR, &val); + mode = (val & TIM_SMCR_SMS) - 1; + if ((mode >= 0) || (mode < ARRAY_SIZE(stm32_quadrature_modes))) + return -EBUSY; - return 0; + return regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_DIR, + dir ? TIM_CR1_DIR : 0); } static int stm32_get_count_direction(struct iio_dev *indio_dev, @@ -609,7 +643,7 @@ static int stm32_get_count_direction(struct iio_dev *indio_dev, regmap_read(priv->regmap, TIM_CR1, &cr1); - return (cr1 & TIM_CR1_DIR); + return ((cr1 & TIM_CR1_DIR) ? 1 : 0); } static const struct iio_enum stm32_count_direction_enum = { @@ -672,7 +706,9 @@ static const struct iio_chan_spec_ext_info stm32_trigger_count_info[] = { static const struct iio_chan_spec stm32_trigger_channel = { .type = IIO_COUNT, .channel = 0, - .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE), + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | + BIT(IIO_CHAN_INFO_ENABLE) | + BIT(IIO_CHAN_INFO_SCALE), .ext_info = stm32_trigger_count_info, .indexed = 1 }; diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c index 8c4ec564e495..55e8f5ed8b3c 100644 --- a/drivers/infiniband/core/umem_odp.c +++ b/drivers/infiniband/core/umem_odp.c @@ -166,24 +166,6 @@ static int invalidate_page_trampoline(struct ib_umem *item, u64 start, return 0; } -static void ib_umem_notifier_invalidate_page(struct mmu_notifier *mn, - struct mm_struct *mm, - unsigned long address) -{ - struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn); - - if (!context->invalidate_range) - return; - - ib_ucontext_notifier_start_account(context); - down_read(&context->umem_rwsem); - rbt_ib_umem_for_each_in_range(&context->umem_tree, address, - address + PAGE_SIZE, - invalidate_page_trampoline, NULL); - up_read(&context->umem_rwsem); - ib_ucontext_notifier_end_account(context); -} - static int invalidate_range_start_trampoline(struct ib_umem *item, u64 start, u64 end, void *cookie) { @@ -237,7 +219,6 @@ static void ib_umem_notifier_invalidate_range_end(struct mmu_notifier *mn, static const struct mmu_notifier_ops ib_umem_notifiers = { .release = ib_umem_notifier_release, - .invalidate_page = ib_umem_notifier_invalidate_page, .invalidate_range_start = ib_umem_notifier_invalidate_range_start, .invalidate_range_end = ib_umem_notifier_invalidate_range_end, }; diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index c551d2b275fd..739bd69ef1d4 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -1015,7 +1015,7 @@ static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file, cq->uobject = &obj->uobject; cq->comp_handler = ib_uverbs_comp_handler; cq->event_handler = ib_uverbs_cq_event_handler; - cq->cq_context = &ev_file->ev_queue; + cq->cq_context = ev_file ? &ev_file->ev_queue : NULL; atomic_set(&cq->usecnt, 0); obj->uobject.object = cq; @@ -1522,6 +1522,7 @@ static int create_qp(struct ib_uverbs_file *file, qp->qp_type = attr.qp_type; atomic_set(&qp->usecnt, 0); atomic_inc(&pd->usecnt); + qp->port = 0; if (attr.send_cq) atomic_inc(&attr.send_cq->usecnt); if (attr.recv_cq) @@ -1962,8 +1963,9 @@ static int modify_qp(struct ib_uverbs_file *file, attr->alt_timeout = cmd->base.alt_timeout; attr->rate_limit = cmd->rate_limit; - attr->ah_attr.type = rdma_ah_find_type(qp->device, - cmd->base.dest.port_num); + if (cmd->base.attr_mask & IB_QP_AV) + attr->ah_attr.type = rdma_ah_find_type(qp->device, + cmd->base.dest.port_num); if (cmd->base.dest.is_global) { rdma_ah_set_grh(&attr->ah_attr, NULL, cmd->base.dest.flow_label, @@ -1981,8 +1983,9 @@ static int modify_qp(struct ib_uverbs_file *file, rdma_ah_set_port_num(&attr->ah_attr, cmd->base.dest.port_num); - attr->alt_ah_attr.type = rdma_ah_find_type(qp->device, - cmd->base.dest.port_num); + if (cmd->base.attr_mask & IB_QP_ALT_PATH) + attr->alt_ah_attr.type = + rdma_ah_find_type(qp->device, cmd->base.dest.port_num); if (cmd->base.alt_dest.is_global) { rdma_ah_set_grh(&attr->alt_ah_attr, NULL, cmd->base.alt_dest.flow_label, diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index 7f8fe443df46..b456e3ca1876 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -838,6 +838,7 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd, spin_lock_init(&qp->mr_lock); INIT_LIST_HEAD(&qp->rdma_mrs); INIT_LIST_HEAD(&qp->sig_mrs); + qp->port = 0; if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) return ib_create_xrc_qp(qp, qp_init_attr); @@ -1297,7 +1298,11 @@ int ib_modify_qp_with_udata(struct ib_qp *qp, struct ib_qp_attr *attr, if (ret) return ret; } - return ib_security_modify_qp(qp, attr, attr_mask, udata); + ret = ib_security_modify_qp(qp, attr, attr_mask, udata); + if (!ret && (attr_mask & IB_QP_PORT)) + qp->port = attr->port_num; + + return ret; } EXPORT_SYMBOL(ib_modify_qp_with_udata); diff --git a/drivers/infiniband/hw/hfi1/mmu_rb.c b/drivers/infiniband/hw/hfi1/mmu_rb.c index ccbf52c8ff6f..e4b56a0dd6d0 100644 --- a/drivers/infiniband/hw/hfi1/mmu_rb.c +++ b/drivers/infiniband/hw/hfi1/mmu_rb.c @@ -67,8 +67,6 @@ struct mmu_rb_handler { static unsigned long mmu_node_start(struct mmu_rb_node *); static unsigned long mmu_node_last(struct mmu_rb_node *); -static inline void mmu_notifier_page(struct mmu_notifier *, struct mm_struct *, - unsigned long); static inline void mmu_notifier_range_start(struct mmu_notifier *, struct mm_struct *, unsigned long, unsigned long); @@ -82,7 +80,6 @@ static void do_remove(struct mmu_rb_handler *handler, static void handle_remove(struct work_struct *work); static const struct mmu_notifier_ops mn_opts = { - .invalidate_page = mmu_notifier_page, .invalidate_range_start = mmu_notifier_range_start, }; @@ -285,12 +282,6 @@ void hfi1_mmu_rb_remove(struct mmu_rb_handler *handler, handler->ops->remove(handler->ops_arg, node); } -static inline void mmu_notifier_page(struct mmu_notifier *mn, - struct mm_struct *mm, unsigned long addr) -{ - mmu_notifier_mem_invalidate(mn, mm, addr, addr + PAGE_SIZE); -} - static inline void mmu_notifier_range_start(struct mmu_notifier *mn, struct mm_struct *mm, unsigned long start, diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index a7f2e60085c4..f7fcde1ff0aa 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -1085,6 +1085,12 @@ static int mlx5_ib_modify_port(struct ib_device *ibdev, u8 port, int mask, bool is_ib = (mlx5_ib_port_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND); + /* CM layer calls ib_modify_port() regardless of the link layer. For + * Ethernet ports, qkey violation and Port capabilities are meaningless. + */ + if (!is_ib) + return 0; + if (MLX5_CAP_GEN(dev->mdev, ib_virt) && is_ib) { change_mask = props->clr_port_cap_mask | props->set_port_cap_mask; value = ~props->clr_port_cap_mask | props->set_port_cap_mask; diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 0889ff367c86..f58f8f5f3ebe 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -1238,6 +1238,7 @@ static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, goto err_destroy_tis; sq->base.container_mibqp = qp; + sq->base.mqp.event = mlx5_ib_qp_event; } if (qp->rq.wqe_cnt) { diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c index 298a6ba51411..ca0e19ae7a90 100644 --- a/drivers/input/joystick/xpad.c +++ b/drivers/input/joystick/xpad.c @@ -476,10 +476,21 @@ static const u8 xboxone_hori_init[] = { }; /* - * A rumble packet is required for some PowerA pads to start + * A specific rumble packet is required for some PowerA pads to start * sending input reports. One of those pads is (0x24c6:0x543a). */ -static const u8 xboxone_zerorumble_init[] = { +static const u8 xboxone_rumblebegin_init[] = { + 0x09, 0x00, 0x00, 0x09, 0x00, 0x0F, 0x00, 0x00, + 0x1D, 0x1D, 0xFF, 0x00, 0x00 +}; + +/* + * A rumble packet with zero FF intensity will immediately + * terminate the rumbling required to init PowerA pads. + * This should happen fast enough that the motors don't + * spin up to enough speed to actually vibrate the gamepad. + */ +static const u8 xboxone_rumbleend_init[] = { 0x09, 0x00, 0x00, 0x09, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; @@ -494,9 +505,12 @@ static const struct xboxone_init_packet xboxone_init_packets[] = { XBOXONE_INIT_PKT(0x0e6f, 0x0165, xboxone_hori_init), XBOXONE_INIT_PKT(0x0f0d, 0x0067, xboxone_hori_init), XBOXONE_INIT_PKT(0x0000, 0x0000, xboxone_fw2015_init), - XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_zerorumble_init), - XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_zerorumble_init), - XBOXONE_INIT_PKT(0x24c6, 0x543a, xboxone_zerorumble_init), + XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumblebegin_init), + XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_rumblebegin_init), + XBOXONE_INIT_PKT(0x24c6, 0x543a, xboxone_rumblebegin_init), + XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumbleend_init), + XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_rumbleend_init), + XBOXONE_INIT_PKT(0x24c6, 0x543a, xboxone_rumbleend_init), }; struct xpad_output_packet { diff --git a/drivers/input/misc/soc_button_array.c b/drivers/input/misc/soc_button_array.c index f600f3a7a3c6..23520df7650f 100644 --- a/drivers/input/misc/soc_button_array.c +++ b/drivers/input/misc/soc_button_array.c @@ -331,7 +331,7 @@ static int soc_button_probe(struct platform_device *pdev) error = gpiod_count(dev, NULL); if (error < 0) { dev_dbg(dev, "no GPIO attached, ignoring...\n"); - return error; + return -ENODEV; } priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c index 262d1057c1da..850b00e3ad8e 100644 --- a/drivers/input/mouse/alps.c +++ b/drivers/input/mouse/alps.c @@ -1215,14 +1215,24 @@ static int alps_decode_ss4_v2(struct alps_fields *f, case SS4_PACKET_ID_TWO: if (priv->flags & ALPS_BUTTONPAD) { - f->mt[0].x = SS4_BTL_MF_X_V2(p, 0); + if (IS_SS4PLUS_DEV(priv->dev_id)) { + f->mt[0].x = SS4_PLUS_BTL_MF_X_V2(p, 0); + f->mt[1].x = SS4_PLUS_BTL_MF_X_V2(p, 1); + } else { + f->mt[0].x = SS4_BTL_MF_X_V2(p, 0); + f->mt[1].x = SS4_BTL_MF_X_V2(p, 1); + } f->mt[0].y = SS4_BTL_MF_Y_V2(p, 0); - f->mt[1].x = SS4_BTL_MF_X_V2(p, 1); f->mt[1].y = SS4_BTL_MF_Y_V2(p, 1); } else { - f->mt[0].x = SS4_STD_MF_X_V2(p, 0); + if (IS_SS4PLUS_DEV(priv->dev_id)) { + f->mt[0].x = SS4_PLUS_STD_MF_X_V2(p, 0); + f->mt[1].x = SS4_PLUS_STD_MF_X_V2(p, 1); + } else { + f->mt[0].x = SS4_STD_MF_X_V2(p, 0); + f->mt[1].x = SS4_STD_MF_X_V2(p, 1); + } f->mt[0].y = SS4_STD_MF_Y_V2(p, 0); - f->mt[1].x = SS4_STD_MF_X_V2(p, 1); f->mt[1].y = SS4_STD_MF_Y_V2(p, 1); } f->pressure = SS4_MF_Z_V2(p, 0) ? 0x30 : 0; @@ -1239,16 +1249,27 @@ static int alps_decode_ss4_v2(struct alps_fields *f, case SS4_PACKET_ID_MULTI: if (priv->flags & ALPS_BUTTONPAD) { - f->mt[2].x = SS4_BTL_MF_X_V2(p, 0); + if (IS_SS4PLUS_DEV(priv->dev_id)) { + f->mt[0].x = SS4_PLUS_BTL_MF_X_V2(p, 0); + f->mt[1].x = SS4_PLUS_BTL_MF_X_V2(p, 1); + } else { + f->mt[2].x = SS4_BTL_MF_X_V2(p, 0); + f->mt[3].x = SS4_BTL_MF_X_V2(p, 1); + } + f->mt[2].y = SS4_BTL_MF_Y_V2(p, 0); - f->mt[3].x = SS4_BTL_MF_X_V2(p, 1); f->mt[3].y = SS4_BTL_MF_Y_V2(p, 1); no_data_x = SS4_MFPACKET_NO_AX_BL; no_data_y = SS4_MFPACKET_NO_AY_BL; } else { - f->mt[2].x = SS4_STD_MF_X_V2(p, 0); + if (IS_SS4PLUS_DEV(priv->dev_id)) { + f->mt[0].x = SS4_PLUS_STD_MF_X_V2(p, 0); + f->mt[1].x = SS4_PLUS_STD_MF_X_V2(p, 1); + } else { + f->mt[0].x = SS4_STD_MF_X_V2(p, 0); + f->mt[1].x = SS4_STD_MF_X_V2(p, 1); + } f->mt[2].y = SS4_STD_MF_Y_V2(p, 0); - f->mt[3].x = SS4_STD_MF_X_V2(p, 1); f->mt[3].y = SS4_STD_MF_Y_V2(p, 1); no_data_x = SS4_MFPACKET_NO_AX; no_data_y = SS4_MFPACKET_NO_AY; @@ -2541,8 +2562,8 @@ static int alps_set_defaults_ss4_v2(struct psmouse *psmouse, memset(otp, 0, sizeof(otp)); - if (alps_get_otp_values_ss4_v2(psmouse, 0, &otp[0][0]) || - alps_get_otp_values_ss4_v2(psmouse, 1, &otp[1][0])) + if (alps_get_otp_values_ss4_v2(psmouse, 1, &otp[1][0]) || + alps_get_otp_values_ss4_v2(psmouse, 0, &otp[0][0])) return -1; alps_update_device_area_ss4_v2(otp, priv); diff --git a/drivers/input/mouse/alps.h b/drivers/input/mouse/alps.h index ed2d6879fa52..c80a7c76cb76 100644 --- a/drivers/input/mouse/alps.h +++ b/drivers/input/mouse/alps.h @@ -100,6 +100,10 @@ enum SS4_PACKET_ID { ((_b[1 + _i * 3] << 5) & 0x1F00) \ ) +#define SS4_PLUS_STD_MF_X_V2(_b, _i) (((_b[0 + (_i) * 3] << 4) & 0x0070) | \ + ((_b[1 + (_i) * 3] << 4) & 0x0F80) \ + ) + #define SS4_STD_MF_Y_V2(_b, _i) (((_b[1 + (_i) * 3] << 3) & 0x0010) | \ ((_b[2 + (_i) * 3] << 5) & 0x01E0) | \ ((_b[2 + (_i) * 3] << 4) & 0x0E00) \ @@ -109,6 +113,10 @@ enum SS4_PACKET_ID { ((_b[0 + (_i) * 3] >> 3) & 0x0010) \ ) +#define SS4_PLUS_BTL_MF_X_V2(_b, _i) (SS4_PLUS_STD_MF_X_V2(_b, _i) | \ + ((_b[0 + (_i) * 3] >> 4) & 0x0008) \ + ) + #define SS4_BTL_MF_Y_V2(_b, _i) (SS4_STD_MF_Y_V2(_b, _i) | \ ((_b[0 + (_i) * 3] >> 3) & 0x0008) \ ) diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c index 714cf7f9b138..cfbc8ba4c96c 100644 --- a/drivers/input/mouse/elan_i2c_core.c +++ b/drivers/input/mouse/elan_i2c_core.c @@ -1247,6 +1247,7 @@ static const struct acpi_device_id elan_acpi_id[] = { { "ELAN0000", 0 }, { "ELAN0100", 0 }, { "ELAN0600", 0 }, + { "ELAN0602", 0 }, { "ELAN0605", 0 }, { "ELAN0608", 0 }, { "ELAN0605", 0 }, diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index 16c30460ef04..5af0b7d200bc 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c @@ -535,16 +535,17 @@ static void synaptics_apply_quirks(struct psmouse *psmouse, } } +static bool synaptics_has_agm(struct synaptics_data *priv) +{ + return (SYN_CAP_ADV_GESTURE(priv->info.ext_cap_0c) || + SYN_CAP_IMAGE_SENSOR(priv->info.ext_cap_0c)); +} + static int synaptics_set_advanced_gesture_mode(struct psmouse *psmouse) { static u8 param = 0xc8; - struct synaptics_data *priv = psmouse->private; int error; - if (!(SYN_CAP_ADV_GESTURE(priv->info.ext_cap_0c) || - SYN_CAP_IMAGE_SENSOR(priv->info.ext_cap_0c))) - return 0; - error = psmouse_sliced_command(psmouse, SYN_QUE_MODEL); if (error) return error; @@ -553,9 +554,6 @@ static int synaptics_set_advanced_gesture_mode(struct psmouse *psmouse) if (error) return error; - /* Advanced gesture mode also sends multi finger data */ - priv->info.capabilities |= BIT(1); - return 0; } @@ -578,7 +576,7 @@ static int synaptics_set_mode(struct psmouse *psmouse) if (error) return error; - if (priv->absolute_mode) { + if (priv->absolute_mode && synaptics_has_agm(priv)) { error = synaptics_set_advanced_gesture_mode(psmouse); if (error) { psmouse_err(psmouse, @@ -766,9 +764,7 @@ static int synaptics_parse_hw_state(const u8 buf[], ((buf[0] & 0x04) >> 1) | ((buf[3] & 0x04) >> 2)); - if ((SYN_CAP_ADV_GESTURE(priv->info.ext_cap_0c) || - SYN_CAP_IMAGE_SENSOR(priv->info.ext_cap_0c)) && - hw->w == 2) { + if (synaptics_has_agm(priv) && hw->w == 2) { synaptics_parse_agm(buf, priv, hw); return 1; } @@ -1033,6 +1029,15 @@ static void synaptics_image_sensor_process(struct psmouse *psmouse, synaptics_report_mt_data(psmouse, sgm, num_fingers); } +static bool synaptics_has_multifinger(struct synaptics_data *priv) +{ + if (SYN_CAP_MULTIFINGER(priv->info.capabilities)) + return true; + + /* Advanced gesture mode also sends multi finger data */ + return synaptics_has_agm(priv); +} + /* * called for each full received packet from the touchpad */ @@ -1079,7 +1084,7 @@ static void synaptics_process_packet(struct psmouse *psmouse) if (SYN_CAP_EXTENDED(info->capabilities)) { switch (hw.w) { case 0 ... 1: - if (SYN_CAP_MULTIFINGER(info->capabilities)) + if (synaptics_has_multifinger(priv)) num_fingers = hw.w + 2; break; case 2: @@ -1123,7 +1128,7 @@ static void synaptics_process_packet(struct psmouse *psmouse) input_report_abs(dev, ABS_TOOL_WIDTH, finger_width); input_report_key(dev, BTN_TOOL_FINGER, num_fingers == 1); - if (SYN_CAP_MULTIFINGER(info->capabilities)) { + if (synaptics_has_multifinger(priv)) { input_report_key(dev, BTN_TOOL_DOUBLETAP, num_fingers == 2); input_report_key(dev, BTN_TOOL_TRIPLETAP, num_fingers == 3); } @@ -1283,7 +1288,7 @@ static void set_input_params(struct psmouse *psmouse, __set_bit(BTN_TOUCH, dev->keybit); __set_bit(BTN_TOOL_FINGER, dev->keybit); - if (SYN_CAP_MULTIFINGER(info->capabilities)) { + if (synaptics_has_multifinger(priv)) { __set_bit(BTN_TOOL_DOUBLETAP, dev->keybit); __set_bit(BTN_TOOL_TRIPLETAP, dev->keybit); } diff --git a/drivers/input/mouse/trackpoint.c b/drivers/input/mouse/trackpoint.c index 20b5b21c1bba..0871010f18d5 100644 --- a/drivers/input/mouse/trackpoint.c +++ b/drivers/input/mouse/trackpoint.c @@ -265,7 +265,8 @@ static int trackpoint_start_protocol(struct psmouse *psmouse, unsigned char *fir if (ps2_command(&psmouse->ps2dev, param, MAKE_PS2_CMD(0, 2, TP_READ_ID))) return -1; - if (param[0] != TP_MAGIC_IDENT) + /* add new TP ID. */ + if (!(param[0] & TP_MAGIC_IDENT)) return -1; if (firmware_id) diff --git a/drivers/input/mouse/trackpoint.h b/drivers/input/mouse/trackpoint.h index 5617ed3a7d7a..88055755f82e 100644 --- a/drivers/input/mouse/trackpoint.h +++ b/drivers/input/mouse/trackpoint.h @@ -21,8 +21,9 @@ #define TP_COMMAND 0xE2 /* Commands start with this */ #define TP_READ_ID 0xE1 /* Sent for device identification */ -#define TP_MAGIC_IDENT 0x01 /* Sent after a TP_READ_ID followed */ +#define TP_MAGIC_IDENT 0x03 /* Sent after a TP_READ_ID followed */ /* by the firmware ID */ + /* Firmware ID includes 0x1, 0x2, 0x3 */ /* diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h index 294a409e283b..d6b873b57054 100644 --- a/drivers/iommu/amd_iommu_types.h +++ b/drivers/iommu/amd_iommu_types.h @@ -574,7 +574,9 @@ struct amd_iommu { static inline struct amd_iommu *dev_to_amd_iommu(struct device *dev) { - return container_of(dev, struct amd_iommu, iommu.dev); + struct iommu_device *iommu = dev_to_iommu_device(dev); + + return container_of(iommu, struct amd_iommu, iommu); } #define ACPIHID_UID_LEN 256 diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c index 6629c472eafd..dccf5b76eff2 100644 --- a/drivers/iommu/amd_iommu_v2.c +++ b/drivers/iommu/amd_iommu_v2.c @@ -391,13 +391,6 @@ static int mn_clear_flush_young(struct mmu_notifier *mn, return 0; } -static void mn_invalidate_page(struct mmu_notifier *mn, - struct mm_struct *mm, - unsigned long address) -{ - __mn_flush_page(mn, address); -} - static void mn_invalidate_range(struct mmu_notifier *mn, struct mm_struct *mm, unsigned long start, unsigned long end) @@ -436,7 +429,6 @@ static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm) static const struct mmu_notifier_ops iommu_mn = { .release = mn_release, .clear_flush_young = mn_clear_flush_young, - .invalidate_page = mn_invalidate_page, .invalidate_range = mn_invalidate_range, }; diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 687f18f65cea..3e8636f1220e 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -4736,7 +4736,9 @@ static void intel_disable_iommus(void) static inline struct intel_iommu *dev_to_intel_iommu(struct device *dev) { - return container_of(dev, struct intel_iommu, iommu.dev); + struct iommu_device *iommu_dev = dev_to_iommu_device(dev); + + return container_of(iommu_dev, struct intel_iommu, iommu); } static ssize_t intel_iommu_show_version(struct device *dev, diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c index f167c0d84ebf..f620dccec8ee 100644 --- a/drivers/iommu/intel-svm.c +++ b/drivers/iommu/intel-svm.c @@ -223,14 +223,6 @@ static void intel_change_pte(struct mmu_notifier *mn, struct mm_struct *mm, intel_flush_svm_range(svm, address, 1, 1, 0); } -static void intel_invalidate_page(struct mmu_notifier *mn, struct mm_struct *mm, - unsigned long address) -{ - struct intel_svm *svm = container_of(mn, struct intel_svm, notifier); - - intel_flush_svm_range(svm, address, 1, 1, 0); -} - /* Pages have been freed at this point */ static void intel_invalidate_range(struct mmu_notifier *mn, struct mm_struct *mm, @@ -285,7 +277,6 @@ static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm) static const struct mmu_notifier_ops intel_mmuops = { .release = intel_mm_release, .change_pte = intel_change_pte, - .invalidate_page = intel_invalidate_page, .invalidate_range = intel_invalidate_range, }; diff --git a/drivers/iommu/iommu-sysfs.c b/drivers/iommu/iommu-sysfs.c index c58351ed61c1..36d1a7ce7fc4 100644 --- a/drivers/iommu/iommu-sysfs.c +++ b/drivers/iommu/iommu-sysfs.c @@ -62,32 +62,40 @@ int iommu_device_sysfs_add(struct iommu_device *iommu, va_list vargs; int ret; - device_initialize(&iommu->dev); + iommu->dev = kzalloc(sizeof(*iommu->dev), GFP_KERNEL); + if (!iommu->dev) + return -ENOMEM; - iommu->dev.class = &iommu_class; - iommu->dev.parent = parent; - iommu->dev.groups = groups; + device_initialize(iommu->dev); + + iommu->dev->class = &iommu_class; + iommu->dev->parent = parent; + iommu->dev->groups = groups; va_start(vargs, fmt); - ret = kobject_set_name_vargs(&iommu->dev.kobj, fmt, vargs); + ret = kobject_set_name_vargs(&iommu->dev->kobj, fmt, vargs); va_end(vargs); if (ret) goto error; - ret = device_add(&iommu->dev); + ret = device_add(iommu->dev); if (ret) goto error; + dev_set_drvdata(iommu->dev, iommu); + return 0; error: - put_device(&iommu->dev); + put_device(iommu->dev); return ret; } void iommu_device_sysfs_remove(struct iommu_device *iommu) { - device_unregister(&iommu->dev); + dev_set_drvdata(iommu->dev, NULL); + device_unregister(iommu->dev); + iommu->dev = NULL; } /* * IOMMU drivers can indicate a device is managed by a given IOMMU using @@ -102,14 +110,14 @@ int iommu_device_link(struct iommu_device *iommu, struct device *link) if (!iommu || IS_ERR(iommu)) return -ENODEV; - ret = sysfs_add_link_to_group(&iommu->dev.kobj, "devices", + ret = sysfs_add_link_to_group(&iommu->dev->kobj, "devices", &link->kobj, dev_name(link)); if (ret) return ret; - ret = sysfs_create_link_nowarn(&link->kobj, &iommu->dev.kobj, "iommu"); + ret = sysfs_create_link_nowarn(&link->kobj, &iommu->dev->kobj, "iommu"); if (ret) - sysfs_remove_link_from_group(&iommu->dev.kobj, "devices", + sysfs_remove_link_from_group(&iommu->dev->kobj, "devices", dev_name(link)); return ret; @@ -121,5 +129,5 @@ void iommu_device_unlink(struct iommu_device *iommu, struct device *link) return; sysfs_remove_link(&link->kobj, "iommu"); - sysfs_remove_link_from_group(&iommu->dev.kobj, "devices", dev_name(link)); + sysfs_remove_link_from_group(&iommu->dev->kobj, "devices", dev_name(link)); } diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 0e8ab5bb3575..d24e4b05f5da 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -504,7 +504,6 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq, if (queue_dying) { atomic_inc(&m->pg_init_in_progress); activate_or_offline_path(pgpath); - return DM_MAPIO_REQUEUE; } return DM_MAPIO_DELAY_REQUEUE; } @@ -1458,7 +1457,6 @@ static int noretry_error(blk_status_t error) case BLK_STS_TARGET: case BLK_STS_NEXUS: case BLK_STS_MEDIUM: - case BLK_STS_RESOURCE: return 1; } diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 2edbcc2d7d3f..d669fddd9290 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -27,16 +27,6 @@ #define DM_MSG_PREFIX "core" -#ifdef CONFIG_PRINTK -/* - * ratelimit state to be used in DMXXX_LIMIT(). - */ -DEFINE_RATELIMIT_STATE(dm_ratelimit_state, - DEFAULT_RATELIMIT_INTERVAL, - DEFAULT_RATELIMIT_BURST); -EXPORT_SYMBOL(dm_ratelimit_state); -#endif - /* * Cookies are numeric values sent with CHANGE and REMOVE * uevents while resuming, removing or renaming the device. @@ -1523,7 +1513,7 @@ static void __split_and_process_bio(struct mapped_device *md, } /* drop the extra reference count */ - dec_pending(ci.io, error); + dec_pending(ci.io, errno_to_blk_status(error)); } /*----------------------------------------------------------------- * CRUD END diff --git a/drivers/memory/atmel-ebi.c b/drivers/memory/atmel-ebi.c index 99e644cda4d1..ebf69ff48ae2 100644 --- a/drivers/memory/atmel-ebi.c +++ b/drivers/memory/atmel-ebi.c @@ -72,7 +72,7 @@ struct atmel_smc_timing_xlate { { .name = nm, .converter = atmel_smc_cs_conf_set_pulse, .shift = pos} #define ATMEL_SMC_CYCLE_XLATE(nm, pos) \ - { .name = nm, .converter = atmel_smc_cs_conf_set_setup, .shift = pos} + { .name = nm, .converter = atmel_smc_cs_conf_set_cycle, .shift = pos} static void at91sam9_ebi_get_config(struct atmel_ebi_dev *ebid, struct atmel_ebi_dev_config *conf) @@ -120,12 +120,14 @@ static int atmel_ebi_xslate_smc_timings(struct atmel_ebi_dev *ebid, if (!ret) { required = true; ncycles = DIV_ROUND_UP(val, clk_period_ns); - if (ncycles > ATMEL_SMC_MODE_TDF_MAX || - ncycles < ATMEL_SMC_MODE_TDF_MIN) { + if (ncycles > ATMEL_SMC_MODE_TDF_MAX) { ret = -EINVAL; goto out; } + if (ncycles < ATMEL_SMC_MODE_TDF_MIN) + ncycles = ATMEL_SMC_MODE_TDF_MIN; + smcconf->mode |= ATMEL_SMC_MODE_TDF(ncycles); } @@ -263,7 +265,7 @@ static int atmel_ebi_xslate_smc_config(struct atmel_ebi_dev *ebid, } ret = atmel_ebi_xslate_smc_timings(ebid, np, &conf->smcconf); - if (ret) + if (ret < 0) return -EINVAL; if ((ret > 0 && !required) || (!ret && required)) { diff --git a/drivers/mfd/atmel-smc.c b/drivers/mfd/atmel-smc.c index 954cf0f66a31..20cc0ea470fa 100644 --- a/drivers/mfd/atmel-smc.c +++ b/drivers/mfd/atmel-smc.c @@ -206,7 +206,7 @@ EXPORT_SYMBOL_GPL(atmel_smc_cs_conf_set_pulse); * parameter * * This function encodes the @ncycles value as described in the datasheet - * (section "SMC Pulse Register"), and then stores the result in the + * (section "SMC Cycle Register"), and then stores the result in the * @conf->setup field at @shift position. * * Returns -EINVAL if @shift is invalid, -ERANGE if @ncycles does not fit in diff --git a/drivers/mfd/da9062-core.c b/drivers/mfd/da9062-core.c index fbe0f245ce8e..fe1811523e4a 100644 --- a/drivers/mfd/da9062-core.c +++ b/drivers/mfd/da9062-core.c @@ -645,6 +645,9 @@ static const struct regmap_range da9062_aa_readable_ranges[] = { .range_min = DA9062AA_VLDO1_B, .range_max = DA9062AA_VLDO4_B, }, { + .range_min = DA9062AA_BBAT_CONT, + .range_max = DA9062AA_BBAT_CONT, + }, { .range_min = DA9062AA_INTERFACE, .range_max = DA9062AA_CONFIG_E, }, { @@ -721,6 +724,9 @@ static const struct regmap_range da9062_aa_writeable_ranges[] = { .range_min = DA9062AA_VLDO1_B, .range_max = DA9062AA_VLDO4_B, }, { + .range_min = DA9062AA_BBAT_CONT, + .range_max = DA9062AA_BBAT_CONT, + }, { .range_min = DA9062AA_GP_ID_0, .range_max = DA9062AA_GP_ID_19, }, diff --git a/drivers/misc/mic/scif/scif_dma.c b/drivers/misc/mic/scif/scif_dma.c index 64d5760d069a..63d6246d6dff 100644 --- a/drivers/misc/mic/scif/scif_dma.c +++ b/drivers/misc/mic/scif/scif_dma.c @@ -200,16 +200,6 @@ static void scif_mmu_notifier_release(struct mmu_notifier *mn, schedule_work(&scif_info.misc_work); } -static void scif_mmu_notifier_invalidate_page(struct mmu_notifier *mn, - struct mm_struct *mm, - unsigned long address) -{ - struct scif_mmu_notif *mmn; - - mmn = container_of(mn, struct scif_mmu_notif, ep_mmu_notifier); - scif_rma_destroy_tcw(mmn, address, PAGE_SIZE); -} - static void scif_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, struct mm_struct *mm, unsigned long start, @@ -235,7 +225,6 @@ static void scif_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn, static const struct mmu_notifier_ops scif_mmu_notifier_ops = { .release = scif_mmu_notifier_release, .clear_flush_young = NULL, - .invalidate_page = scif_mmu_notifier_invalidate_page, .invalidate_range_start = scif_mmu_notifier_invalidate_range_start, .invalidate_range_end = scif_mmu_notifier_invalidate_range_end}; diff --git a/drivers/misc/sgi-gru/grutlbpurge.c b/drivers/misc/sgi-gru/grutlbpurge.c index e936d43895d2..9918eda0e05f 100644 --- a/drivers/misc/sgi-gru/grutlbpurge.c +++ b/drivers/misc/sgi-gru/grutlbpurge.c @@ -247,17 +247,6 @@ static void gru_invalidate_range_end(struct mmu_notifier *mn, gru_dbg(grudev, "gms %p, start 0x%lx, end 0x%lx\n", gms, start, end); } -static void gru_invalidate_page(struct mmu_notifier *mn, struct mm_struct *mm, - unsigned long address) -{ - struct gru_mm_struct *gms = container_of(mn, struct gru_mm_struct, - ms_notifier); - - STAT(mmu_invalidate_page); - gru_flush_tlb_range(gms, address, PAGE_SIZE); - gru_dbg(grudev, "gms %p, address 0x%lx\n", gms, address); -} - static void gru_release(struct mmu_notifier *mn, struct mm_struct *mm) { struct gru_mm_struct *gms = container_of(mn, struct gru_mm_struct, @@ -269,7 +258,6 @@ static void gru_release(struct mmu_notifier *mn, struct mm_struct *mm) static const struct mmu_notifier_ops gru_mmuops = { - .invalidate_page = gru_invalidate_page, .invalidate_range_start = gru_invalidate_range_start, .invalidate_range_end = gru_invalidate_range_end, .release = gru_release, diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index f1bbfd389367..8bd7aba811e9 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -1213,7 +1213,7 @@ static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req) break; } mq_rq->drv_op_result = ret; - blk_end_request_all(req, ret); + blk_end_request_all(req, ret ? BLK_STS_IOERR : BLK_STS_OK); } static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) @@ -1371,12 +1371,46 @@ static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq, R1_CC_ERROR | /* Card controller error */ \ R1_ERROR) /* General/unknown error */ -static bool mmc_blk_has_cmd_err(struct mmc_command *cmd) +static void mmc_blk_eval_resp_error(struct mmc_blk_request *brq) { - if (!cmd->error && cmd->resp[0] & CMD_ERRORS) - cmd->error = -EIO; + u32 val; + + /* + * Per the SD specification(physical layer version 4.10)[1], + * section 4.3.3, it explicitly states that "When the last + * block of user area is read using CMD18, the host should + * ignore OUT_OF_RANGE error that may occur even the sequence + * is correct". And JESD84-B51 for eMMC also has a similar + * statement on section 6.8.3. + * + * Multiple block read/write could be done by either predefined + * method, namely CMD23, or open-ending mode. For open-ending mode, + * we should ignore the OUT_OF_RANGE error as it's normal behaviour. + * + * However the spec[1] doesn't tell us whether we should also + * ignore that for predefined method. But per the spec[1], section + * 4.15 Set Block Count Command, it says"If illegal block count + * is set, out of range error will be indicated during read/write + * operation (For example, data transfer is stopped at user area + * boundary)." In another word, we could expect a out of range error + * in the response for the following CMD18/25. And if argument of + * CMD23 + the argument of CMD18/25 exceed the max number of blocks, + * we could also expect to get a -ETIMEDOUT or any error number from + * the host drivers due to missing data response(for write)/data(for + * read), as the cards will stop the data transfer by itself per the + * spec. So we only need to check R1_OUT_OF_RANGE for open-ending mode. + */ - return cmd->error; + if (!brq->stop.error) { + bool oor_with_open_end; + /* If there is no error yet, check R1 response */ + + val = brq->stop.resp[0] & CMD_ERRORS; + oor_with_open_end = val & R1_OUT_OF_RANGE && !brq->mrq.sbc; + + if (val && !oor_with_open_end) + brq->stop.error = -EIO; + } } static enum mmc_blk_status mmc_blk_err_check(struct mmc_card *card, @@ -1400,8 +1434,11 @@ static enum mmc_blk_status mmc_blk_err_check(struct mmc_card *card, * stop.error indicates a problem with the stop command. Data * may have been transferred, or may still be transferring. */ - if (brq->sbc.error || brq->cmd.error || mmc_blk_has_cmd_err(&brq->stop) || - brq->data.error) { + + mmc_blk_eval_resp_error(brq); + + if (brq->sbc.error || brq->cmd.error || + brq->stop.error || brq->data.error) { switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err, &gen_err)) { case ERR_RETRY: return MMC_BLK_RETRY; @@ -1681,9 +1718,9 @@ static bool mmc_blk_rw_cmd_err(struct mmc_blk_data *md, struct mmc_card *card, if (err) req_pending = old_req_pending; else - req_pending = blk_end_request(req, 0, blocks << 9); + req_pending = blk_end_request(req, BLK_STS_OK, blocks << 9); } else { - req_pending = blk_end_request(req, 0, brq->data.bytes_xfered); + req_pending = blk_end_request(req, BLK_STS_OK, brq->data.bytes_xfered); } return req_pending; } diff --git a/drivers/mmc/host/sdhci-xenon.c b/drivers/mmc/host/sdhci-xenon.c index bc1781bb070b..c580af05b033 100644 --- a/drivers/mmc/host/sdhci-xenon.c +++ b/drivers/mmc/host/sdhci-xenon.c @@ -210,8 +210,27 @@ static void xenon_set_uhs_signaling(struct sdhci_host *host, sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); } +static void xenon_set_power(struct sdhci_host *host, unsigned char mode, + unsigned short vdd) +{ + struct mmc_host *mmc = host->mmc; + u8 pwr = host->pwr; + + sdhci_set_power_noreg(host, mode, vdd); + + if (host->pwr == pwr) + return; + + if (host->pwr == 0) + vdd = 0; + + if (!IS_ERR(mmc->supply.vmmc)) + mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd); +} + static const struct sdhci_ops sdhci_xenon_ops = { .set_clock = sdhci_set_clock, + .set_power = xenon_set_power, .set_bus_width = sdhci_set_bus_width, .reset = xenon_reset, .set_uhs_signaling = xenon_set_uhs_signaling, diff --git a/drivers/mtd/nand/atmel/nand-controller.c b/drivers/mtd/nand/atmel/nand-controller.c index 2c8baa0c2c4e..ceec21bd30c4 100644 --- a/drivers/mtd/nand/atmel/nand-controller.c +++ b/drivers/mtd/nand/atmel/nand-controller.c @@ -1364,7 +1364,18 @@ static int atmel_smc_nand_prepare_smcconf(struct atmel_nand *nand, ret = atmel_smc_cs_conf_set_timing(smcconf, ATMEL_HSMC_TIMINGS_TADL_SHIFT, ncycles); - if (ret) + /* + * Version 4 of the ONFI spec mandates that tADL be at least 400 + * nanoseconds, but, depending on the master clock rate, 400 ns may not + * fit in the tADL field of the SMC reg. We need to relax the check and + * accept the -ERANGE return code. + * + * Note that previous versions of the ONFI spec had a lower tADL_min + * (100 or 200 ns). It's not clear why this timing constraint got + * increased but it seems most NANDs are fine with values lower than + * 400ns, so we should be safe. + */ + if (ret && ret != -ERANGE) return ret; ncycles = DIV_ROUND_UP(conf->timings.sdr.tAR_min, mckperiodps); diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c index 03a0d057bf2f..e4211c3cc49b 100644 --- a/drivers/mtd/nand/nandsim.c +++ b/drivers/mtd/nand/nandsim.c @@ -2373,6 +2373,7 @@ static int __init ns_init_module(void) return 0; err_exit: + nandsim_debugfs_remove(nand); free_nandsim(nand); nand_release(nsmtd); for (i = 0;i < ARRAY_SIZE(nand->partitions); ++i) diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 8492c9d64004..554fe2df9365 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -1034,6 +1034,7 @@ struct bcm_sf2_of_data { u32 type; const u16 *reg_offsets; unsigned int core_reg_align; + unsigned int num_cfp_rules; }; /* Register offsets for the SWITCH_REG_* block */ @@ -1057,6 +1058,7 @@ static const struct bcm_sf2_of_data bcm_sf2_7445_data = { .type = BCM7445_DEVICE_ID, .core_reg_align = 0, .reg_offsets = bcm_sf2_7445_reg_offsets, + .num_cfp_rules = 256, }; static const u16 bcm_sf2_7278_reg_offsets[] = { @@ -1079,6 +1081,7 @@ static const struct bcm_sf2_of_data bcm_sf2_7278_data = { .type = BCM7278_DEVICE_ID, .core_reg_align = 1, .reg_offsets = bcm_sf2_7278_reg_offsets, + .num_cfp_rules = 128, }; static const struct of_device_id bcm_sf2_of_match[] = { @@ -1135,6 +1138,7 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev) priv->type = data->type; priv->reg_offsets = data->reg_offsets; priv->core_reg_align = data->core_reg_align; + priv->num_cfp_rules = data->num_cfp_rules; /* Auto-detection using standard registers will not work, so * provide an indication of what kind of device we are for diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h index d9c96b281fc0..02c499f9c56b 100644 --- a/drivers/net/dsa/bcm_sf2.h +++ b/drivers/net/dsa/bcm_sf2.h @@ -72,6 +72,7 @@ struct bcm_sf2_priv { u32 type; const u16 *reg_offsets; unsigned int core_reg_align; + unsigned int num_cfp_rules; /* spinlock protecting access to the indirect registers */ spinlock_t indir_lock; diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c index 2fb32d67065f..8a1da7e67707 100644 --- a/drivers/net/dsa/bcm_sf2_cfp.c +++ b/drivers/net/dsa/bcm_sf2_cfp.c @@ -98,7 +98,7 @@ static inline void bcm_sf2_cfp_rule_addr_set(struct bcm_sf2_priv *priv, { u32 reg; - WARN_ON(addr >= CFP_NUM_RULES); + WARN_ON(addr >= priv->num_cfp_rules); reg = core_readl(priv, CORE_CFP_ACC); reg &= ~(XCESS_ADDR_MASK << XCESS_ADDR_SHIFT); @@ -109,7 +109,7 @@ static inline void bcm_sf2_cfp_rule_addr_set(struct bcm_sf2_priv *priv, static inline unsigned int bcm_sf2_cfp_rule_size(struct bcm_sf2_priv *priv) { /* Entry #0 is reserved */ - return CFP_NUM_RULES - 1; + return priv->num_cfp_rules - 1; } static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port, @@ -523,7 +523,7 @@ static int bcm_sf2_cfp_rule_get_all(struct bcm_sf2_priv *priv, if (!(reg & OP_STR_DONE)) break; - } while (index < CFP_NUM_RULES); + } while (index < priv->num_cfp_rules); /* Put the TCAM size here */ nfc->data = bcm_sf2_cfp_rule_size(priv); @@ -544,7 +544,7 @@ int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port, case ETHTOOL_GRXCLSRLCNT: /* Subtract the default, unusable rule */ nfc->rule_cnt = bitmap_weight(priv->cfp.used, - CFP_NUM_RULES) - 1; + priv->num_cfp_rules) - 1; /* We support specifying rule locations */ nfc->data |= RX_CLS_LOC_SPECIAL; break; diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index 1d307f2def2d..6e253d913fe2 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c @@ -1661,21 +1661,21 @@ static int xgene_enet_get_irqs(struct xgene_enet_pdata *pdata) return 0; } -static int xgene_enet_check_phy_handle(struct xgene_enet_pdata *pdata) +static void xgene_enet_check_phy_handle(struct xgene_enet_pdata *pdata) { int ret; if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) - return 0; + return; if (!IS_ENABLED(CONFIG_MDIO_XGENE)) - return 0; + return; ret = xgene_enet_phy_connect(pdata->ndev); if (!ret) pdata->mdio_driver = true; - return 0; + return; } static void xgene_enet_gpiod_get(struct xgene_enet_pdata *pdata) @@ -1779,10 +1779,6 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata) if (ret) return ret; - ret = xgene_enet_check_phy_handle(pdata); - if (ret) - return ret; - xgene_enet_gpiod_get(pdata); pdata->clk = devm_clk_get(&pdev->dev, NULL); @@ -2097,9 +2093,11 @@ static int xgene_enet_probe(struct platform_device *pdev) goto err; } + xgene_enet_check_phy_handle(pdata); + ret = xgene_enet_init_hw(pdata); if (ret) - goto err; + goto err2; link_state = pdata->mac_ops->link_state; if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) { @@ -2117,29 +2115,30 @@ static int xgene_enet_probe(struct platform_device *pdev) spin_lock_init(&pdata->stats_lock); ret = xgene_extd_stats_init(pdata); if (ret) - goto err2; + goto err1; xgene_enet_napi_add(pdata); ret = register_netdev(ndev); if (ret) { netdev_err(ndev, "Failed to register netdev\n"); - goto err2; + goto err1; } return 0; -err2: +err1: /* * If necessary, free_netdev() will call netif_napi_del() and undo * the effects of xgene_enet_napi_add()'s calls to netif_napi_add(). */ + xgene_enet_delete_desc_rings(pdata); + +err2: if (pdata->mdio_driver) xgene_enet_phy_disconnect(pdata); else if (phy_interface_mode_is_rgmii(pdata->phy_mode)) xgene_enet_mdio_remove(pdata); -err1: - xgene_enet_delete_desc_rings(pdata); err: free_netdev(ndev); return ret; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h index fce0fd3f23ff..bf9b3f020e10 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h @@ -105,8 +105,7 @@ struct aq_hw_ops { int (*hw_set_mac_address)(struct aq_hw_s *self, u8 *mac_addr); - int (*hw_get_link_status)(struct aq_hw_s *self, - struct aq_hw_link_status_s *link_status); + int (*hw_get_link_status)(struct aq_hw_s *self); int (*hw_set_link_speed)(struct aq_hw_s *self, u32 speed); diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index 9ee1c5016784..6ac9e2602d6d 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c @@ -103,6 +103,8 @@ int aq_nic_cfg_start(struct aq_nic_s *self) else cfg->vecs = 1U; + cfg->num_rss_queues = min(cfg->vecs, AQ_CFG_NUM_RSS_QUEUES_DEF); + cfg->irq_type = aq_pci_func_get_irq_type(self->aq_pci_func); if ((cfg->irq_type == AQ_HW_IRQ_LEGACY) || @@ -123,33 +125,30 @@ static void aq_nic_service_timer_cb(unsigned long param) struct net_device *ndev = aq_nic_get_ndev(self); int err = 0; unsigned int i = 0U; - struct aq_hw_link_status_s link_status; struct aq_ring_stats_rx_s stats_rx; struct aq_ring_stats_tx_s stats_tx; if (aq_utils_obj_test(&self->header.flags, AQ_NIC_FLAGS_IS_NOT_READY)) goto err_exit; - err = self->aq_hw_ops.hw_get_link_status(self->aq_hw, &link_status); + err = self->aq_hw_ops.hw_get_link_status(self->aq_hw); if (err < 0) goto err_exit; - self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw, - self->aq_nic_cfg.is_interrupt_moderation); - - if (memcmp(&link_status, &self->link_status, sizeof(link_status))) { - if (link_status.mbps) { - aq_utils_obj_set(&self->header.flags, - AQ_NIC_FLAG_STARTED); - aq_utils_obj_clear(&self->header.flags, - AQ_NIC_LINK_DOWN); - netif_carrier_on(self->ndev); - } else { - netif_carrier_off(self->ndev); - aq_utils_obj_set(&self->header.flags, AQ_NIC_LINK_DOWN); - } + self->link_status = self->aq_hw->aq_link_status; - self->link_status = link_status; + self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw, + self->aq_nic_cfg.is_interrupt_moderation); + + if (self->link_status.mbps) { + aq_utils_obj_set(&self->header.flags, + AQ_NIC_FLAG_STARTED); + aq_utils_obj_clear(&self->header.flags, + AQ_NIC_LINK_DOWN); + netif_carrier_on(self->ndev); + } else { + netif_carrier_off(self->ndev); + aq_utils_obj_set(&self->header.flags, AQ_NIC_LINK_DOWN); } memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s)); @@ -597,14 +596,11 @@ exit: } int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb) -__releases(&ring->lock) -__acquires(&ring->lock) { struct aq_ring_s *ring = NULL; unsigned int frags = 0U; unsigned int vec = skb->queue_mapping % self->aq_nic_cfg.vecs; unsigned int tc = 0U; - unsigned int trys = AQ_CFG_LOCK_TRYS; int err = NETDEV_TX_OK; bool is_nic_in_bad_state; @@ -628,36 +624,21 @@ __acquires(&ring->lock) goto err_exit; } - do { - if (spin_trylock(&ring->header.lock)) { - frags = aq_nic_map_skb(self, skb, ring); - - if (likely(frags)) { - err = self->aq_hw_ops.hw_ring_tx_xmit( - self->aq_hw, - ring, frags); - if (err >= 0) { - if (aq_ring_avail_dx(ring) < - AQ_CFG_SKB_FRAGS_MAX + 1) - aq_nic_ndev_queue_stop( - self, - ring->idx); - - ++ring->stats.tx.packets; - ring->stats.tx.bytes += skb->len; - } - } else { - err = NETDEV_TX_BUSY; - } + frags = aq_nic_map_skb(self, skb, ring); - spin_unlock(&ring->header.lock); - break; - } - } while (--trys); + if (likely(frags)) { + err = self->aq_hw_ops.hw_ring_tx_xmit(self->aq_hw, + ring, + frags); + if (err >= 0) { + if (aq_ring_avail_dx(ring) < AQ_CFG_SKB_FRAGS_MAX + 1) + aq_nic_ndev_queue_stop(self, ring->idx); - if (!trys) { + ++ring->stats.tx.packets; + ring->stats.tx.bytes += skb->len; + } + } else { err = NETDEV_TX_BUSY; - goto err_exit; } err_exit: @@ -688,11 +669,26 @@ int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev) netdev_for_each_mc_addr(ha, ndev) { ether_addr_copy(self->mc_list.ar[i++], ha->addr); ++self->mc_list.count; + + if (i >= AQ_CFG_MULTICAST_ADDRESS_MAX) + break; } - return self->aq_hw_ops.hw_multicast_list_set(self->aq_hw, + if (i >= AQ_CFG_MULTICAST_ADDRESS_MAX) { + /* Number of filters is too big: atlantic does not support this. + * Force all multi filter to support this. + * With this we disable all UC filters and setup "all pass" + * multicast mask + */ + self->packet_filter |= IFF_ALLMULTI; + self->aq_hw->aq_nic_cfg->mc_list_count = 0; + return self->aq_hw_ops.hw_packet_filter_set(self->aq_hw, + self->packet_filter); + } else { + return self->aq_hw_ops.hw_multicast_list_set(self->aq_hw, self->mc_list.ar, self->mc_list.count); + } } int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c index 4b445750b93e..4eee1996a825 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c @@ -101,7 +101,6 @@ int aq_ring_init(struct aq_ring_s *self) self->hw_head = 0; self->sw_head = 0; self->sw_tail = 0; - spin_lock_init(&self->header.lock); return 0; } diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_utils.h b/drivers/net/ethernet/aquantia/atlantic/aq_utils.h index f6012b34abe6..e12bcdfb874a 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_utils.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_utils.h @@ -17,7 +17,6 @@ #define AQ_DIMOF(_ARY_) ARRAY_SIZE(_ARY_) struct aq_obj_s { - spinlock_t lock; /* spinlock for nic/rings processing */ atomic_t flags; }; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c index ec390c5eed35..ebf588004c46 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c @@ -34,8 +34,6 @@ struct aq_vec_s { #define AQ_VEC_RX_ID 1 static int aq_vec_poll(struct napi_struct *napi, int budget) -__releases(&self->lock) -__acquires(&self->lock) { struct aq_vec_s *self = container_of(napi, struct aq_vec_s, napi); struct aq_ring_s *ring = NULL; @@ -47,7 +45,7 @@ __acquires(&self->lock) if (!self) { err = -EINVAL; - } else if (spin_trylock(&self->header.lock)) { + } else { for (i = 0U, ring = self->ring[0]; self->tx_rings > i; ++i, ring = self->ring[i]) { if (self->aq_hw_ops->hw_ring_tx_head_update) { @@ -106,11 +104,8 @@ __acquires(&self->lock) self->aq_hw_ops->hw_irq_enable(self->aq_hw, 1U << self->aq_ring_param.vec_idx); } - -err_exit: - spin_unlock(&self->header.lock); } - +err_exit: return work_done; } @@ -186,8 +181,6 @@ int aq_vec_init(struct aq_vec_s *self, struct aq_hw_ops *aq_hw_ops, self->aq_hw_ops = aq_hw_ops; self->aq_hw = aq_hw; - spin_lock_init(&self->header.lock); - for (i = 0U, ring = self->ring[0]; self->tx_rings > i; ++i, ring = self->ring[i]) { err = aq_ring_init(&ring[AQ_VEC_TX_ID]); diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c index faeb4935ef3e..c5a02df7a48b 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c @@ -629,6 +629,12 @@ static int hw_atl_a0_hw_ring_rx_receive(struct aq_hw_s *self, buff->is_udp_cso = (is_err & 0x10U) ? 0 : 1; else if (0x0U == (pkt_type & 0x1CU)) buff->is_tcp_cso = (is_err & 0x10U) ? 0 : 1; + + /* Checksum offload workaround for small packets */ + if (rxd_wb->pkt_len <= 60) { + buff->is_ip_cso = 0U; + buff->is_cso_err = 0U; + } } is_err &= ~0x18U; diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c index 1bceb7358e5c..21784cc39dab 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c @@ -645,6 +645,12 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self, buff->is_udp_cso = buff->is_cso_err ? 0U : 1U; else if (0x0U == (pkt_type & 0x1CU)) buff->is_tcp_cso = buff->is_cso_err ? 0U : 1U; + + /* Checksum offload workaround for small packets */ + if (rxd_wb->pkt_len <= 60) { + buff->is_ip_cso = 0U; + buff->is_cso_err = 0U; + } } is_err &= ~0x18U; diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c index 8d6d8f5804da..4f5ec9a0fbfb 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c @@ -141,6 +141,12 @@ static int hw_atl_utils_init_ucp(struct aq_hw_s *self, err = hw_atl_utils_ver_match(aq_hw_caps->fw_ver_expected, aq_hw_read_reg(self, 0x18U)); + + if (err < 0) + pr_err("%s: Bad FW version detected: expected=%x, actual=%x\n", + AQ_CFG_DRV_NAME, + aq_hw_caps->fw_ver_expected, + aq_hw_read_reg(self, 0x18U)); return err; } @@ -313,11 +319,11 @@ void hw_atl_utils_mpi_set(struct aq_hw_s *self, err_exit:; } -int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self, - struct aq_hw_link_status_s *link_status) +int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self) { u32 cp0x036C = aq_hw_read_reg(self, HW_ATL_MPI_STATE_ADR); u32 link_speed_mask = cp0x036C >> HW_ATL_MPI_SPEED_SHIFT; + struct aq_hw_link_status_s *link_status = &self->aq_link_status; if (!link_speed_mask) { link_status->mbps = 0U; diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h index a66aee51ab5b..e0360a6b2202 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h @@ -180,8 +180,7 @@ void hw_atl_utils_mpi_set(struct aq_hw_s *self, int hw_atl_utils_mpi_set_speed(struct aq_hw_s *self, u32 speed, enum hal_atl_utils_fw_state_e state); -int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self, - struct aq_hw_link_status_s *link_status); +int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self); int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self, struct aq_hw_caps_s *aq_hw_caps, diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 931751e4f369..eec77fae12a1 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -610,7 +610,7 @@ static int bcm_sysport_set_coalesce(struct net_device *dev, static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb) { - dev_kfree_skb_any(cb->skb); + dev_consume_skb_any(cb->skb); cb->skb = NULL; dma_unmap_addr_set(cb, dma_addr, 0); } @@ -1367,6 +1367,8 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv, ring->cbs = kcalloc(size, sizeof(struct bcm_sysport_cb), GFP_KERNEL); if (!ring->cbs) { + dma_free_coherent(kdev, sizeof(struct dma_desc), + ring->desc_cpu, ring->desc_dma); netif_err(priv, hw, priv->netdev, "CB allocation failed\n"); return -ENOMEM; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index d6367c10afb5..aacec8bc19d5 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -4736,7 +4736,6 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp) pf->port_id = le16_to_cpu(resp->port_id); bp->dev->dev_port = pf->port_id; memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN); - memcpy(bp->dev->dev_addr, pf->mac_addr, ETH_ALEN); pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); pf->max_tx_rings = le16_to_cpu(resp->max_tx_rings); @@ -4776,16 +4775,6 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp) vf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx); memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN); - mutex_unlock(&bp->hwrm_cmd_lock); - - if (is_valid_ether_addr(vf->mac_addr)) { - /* overwrite netdev dev_adr with admin VF MAC */ - memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN); - } else { - eth_hw_addr_random(bp->dev); - rc = bnxt_approve_mac(bp, bp->dev->dev_addr); - } - return rc; #endif } @@ -7297,6 +7286,7 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc) bp->tx_nr_rings = bp->tx_nr_rings_per_tc; netdev_reset_tc(dev); } + bp->tx_nr_rings += bp->tx_nr_rings_xdp; bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) : bp->tx_nr_rings + bp->rx_nr_rings; bp->num_stat_ctxs = bp->cp_nr_rings; @@ -7929,6 +7919,28 @@ void bnxt_restore_pf_fw_resources(struct bnxt *bp) bnxt_subtract_ulp_resources(bp, BNXT_ROCE_ULP); } +static int bnxt_init_mac_addr(struct bnxt *bp) +{ + int rc = 0; + + if (BNXT_PF(bp)) { + memcpy(bp->dev->dev_addr, bp->pf.mac_addr, ETH_ALEN); + } else { +#ifdef CONFIG_BNXT_SRIOV + struct bnxt_vf_info *vf = &bp->vf; + + if (is_valid_ether_addr(vf->mac_addr)) { + /* overwrite netdev dev_adr with admin VF MAC */ + memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN); + } else { + eth_hw_addr_random(bp->dev); + rc = bnxt_approve_mac(bp, bp->dev->dev_addr); + } +#endif + } + return rc; +} + static void bnxt_parse_log_pcie_link(struct bnxt *bp) { enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN; @@ -8059,7 +8071,12 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) rc = -1; goto init_err_pci_clean; } - + rc = bnxt_init_mac_addr(bp); + if (rc) { + dev_err(&pdev->dev, "Unable to initialize mac address.\n"); + rc = -EADDRNOTAVAIL; + goto init_err_pci_clean; + } rc = bnxt_hwrm_queue_qportcfg(bp); if (rc) { netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n", diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c index 77da75a55c02..997e10e8b863 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c @@ -84,6 +84,8 @@ static int bnxt_unregister_dev(struct bnxt_en_dev *edev, int ulp_id) max_stat_ctxs = bnxt_get_max_func_stat_ctxs(bp); bnxt_set_max_func_stat_ctxs(bp, max_stat_ctxs + 1); + if (ulp->msix_requested) + edev->en_ops->bnxt_free_msix(edev, ulp_id); } if (ulp->max_async_event_id) bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0); diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 612d1ef3b5f5..9cebca896913 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -1379,7 +1379,7 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev, if (skb) { pkts_compl++; bytes_compl += GENET_CB(skb)->bytes_sent; - dev_kfree_skb_any(skb); + dev_consume_skb_any(skb); } txbds_processed++; @@ -1894,7 +1894,7 @@ static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv, cb = ring->cbs + i; skb = bcmgenet_rx_refill(priv, cb); if (skb) - dev_kfree_skb_any(skb); + dev_consume_skb_any(skb); if (!cb->skb) return -ENOMEM; } @@ -1913,7 +1913,7 @@ static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv) skb = bcmgenet_free_rx_cb(&priv->pdev->dev, cb); if (skb) - dev_kfree_skb_any(skb); + dev_consume_skb_any(skb); } } diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index a4a33ebd0b98..08624db8a6e9 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -369,12 +369,12 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd, list_del(&entry.list); spin_unlock(&adap->mbox_lock); ret = (v == MBOX_OWNER_FW) ? -EBUSY : -ETIMEDOUT; - t4_record_mbox(adap, cmd, MBOX_LEN, access, ret); + t4_record_mbox(adap, cmd, size, access, ret); return ret; } /* Copy in the new mailbox command and send it on its way ... */ - t4_record_mbox(adap, cmd, MBOX_LEN, access, 0); + t4_record_mbox(adap, cmd, size, access, 0); for (i = 0; i < size; i += 8) t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++)); @@ -426,7 +426,7 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd, } ret = (pcie_fw & PCIE_FW_ERR_F) ? -ENXIO : -ETIMEDOUT; - t4_record_mbox(adap, cmd, MBOX_LEN, access, ret); + t4_record_mbox(adap, cmd, size, access, ret); dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n", *(const u8 *)cmd, mbox); t4_report_fw_error(adap); diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c index 05fe7123d5ae..9ed8e4b81530 100644 --- a/drivers/net/ethernet/faraday/ftgmac100.c +++ b/drivers/net/ethernet/faraday/ftgmac100.c @@ -1868,7 +1868,6 @@ err_setup_mdio: err_ioremap: release_resource(priv->res); err_req_mem: - netif_napi_del(&priv->napi); free_netdev(netdev); err_alloc_etherdev: return err; diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c index 14cd2c8b0024..387eb4a88b72 100644 --- a/drivers/net/ethernet/freescale/fman/mac.c +++ b/drivers/net/ethernet/freescale/fman/mac.c @@ -623,6 +623,8 @@ static struct platform_device *dpaa_eth_add_device(int fman_id, goto no_mem; } + pdev->dev.of_node = node; + pdev->dev.parent = priv->dev; set_dma_ops(&pdev->dev, get_dma_ops(priv->dev)); ret = platform_device_add_data(pdev, &data, sizeof(data)); diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index f37c05fed5bc..d5624894152e 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -7478,7 +7478,7 @@ static int mvpp2_port_probe(struct platform_device *pdev, struct resource *res; const char *dt_mac_addr; const char *mac_from; - char hw_mac_addr[ETH_ALEN]; + char hw_mac_addr[ETH_ALEN] = {0}; unsigned int ntxqs, nrxqs; bool has_tx_irqs; u32 id; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index a31912415264..6c2abeccfa5a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -263,6 +263,7 @@ struct mlx5e_dcbx { /* The only setting that cannot be read from FW */ u8 tc_tsa[IEEE_8021QAZ_MAX_TCS]; + u8 cap; }; #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c index 2eb54d36e16e..c1d384fca4dc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c @@ -288,13 +288,8 @@ static int mlx5e_dcbnl_ieee_setpfc(struct net_device *dev, static u8 mlx5e_dcbnl_getdcbx(struct net_device *dev) { struct mlx5e_priv *priv = netdev_priv(dev); - struct mlx5e_dcbx *dcbx = &priv->dcbx; - u8 mode = DCB_CAP_DCBX_VER_IEEE | DCB_CAP_DCBX_VER_CEE; - - if (dcbx->mode == MLX5E_DCBX_PARAM_VER_OPER_HOST) - mode |= DCB_CAP_DCBX_HOST; - return mode; + return priv->dcbx.cap; } static u8 mlx5e_dcbnl_setdcbx(struct net_device *dev, u8 mode) @@ -312,6 +307,7 @@ static u8 mlx5e_dcbnl_setdcbx(struct net_device *dev, u8 mode) /* set dcbx to fw controlled */ if (!mlx5e_dcbnl_set_dcbx_mode(priv, MLX5E_DCBX_PARAM_VER_OPER_AUTO)) { dcbx->mode = MLX5E_DCBX_PARAM_VER_OPER_AUTO; + dcbx->cap &= ~DCB_CAP_DCBX_HOST; return 0; } @@ -324,6 +320,8 @@ static u8 mlx5e_dcbnl_setdcbx(struct net_device *dev, u8 mode) if (mlx5e_dcbnl_switch_to_host_mode(netdev_priv(dev))) return 1; + dcbx->cap = mode; + return 0; } @@ -628,9 +626,9 @@ static u8 mlx5e_dcbnl_getcap(struct net_device *netdev, *cap = false; break; case DCB_CAP_ATTR_DCBX: - *cap = (DCB_CAP_DCBX_LLD_MANAGED | - DCB_CAP_DCBX_VER_CEE | - DCB_CAP_DCBX_STATIC); + *cap = priv->dcbx.cap | + DCB_CAP_DCBX_VER_CEE | + DCB_CAP_DCBX_VER_IEEE; break; default: *cap = 0; @@ -754,8 +752,16 @@ void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv) { struct mlx5e_dcbx *dcbx = &priv->dcbx; + if (!MLX5_CAP_GEN(priv->mdev, qos)) + return; + if (MLX5_CAP_GEN(priv->mdev, dcbx)) mlx5e_dcbnl_query_dcbx_mode(priv, &dcbx->mode); + priv->dcbx.cap = DCB_CAP_DCBX_VER_CEE | + DCB_CAP_DCBX_VER_IEEE; + if (priv->dcbx.mode == MLX5E_DCBX_PARAM_VER_OPER_HOST) + priv->dcbx.cap |= DCB_CAP_DCBX_HOST; + mlx5e_ets_init(priv); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index c6ec90e9c95b..6127e0d2f310 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -662,8 +662,10 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv, new_channels.params = priv->channels.params; new_channels.params.num_channels = count; - mlx5e_build_default_indir_rqt(priv->mdev, new_channels.params.indirection_rqt, - MLX5E_INDIR_RQT_SIZE, count); + if (!netif_is_rxfh_configured(priv->netdev)) + mlx5e_build_default_indir_rqt(priv->mdev, + new_channels.params.indirection_rqt, + MLX5E_INDIR_RQT_SIZE, count); if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { priv->channels.params = new_channels.params; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 111c7523d448..85841e24c65b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -1975,6 +1975,7 @@ static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv, } mlx5e_build_common_cq_param(priv, param); + param->cq_period_mode = params->rx_cq_period_mode; } static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 55a6786d3c4c..be8197a75a63 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -222,13 +222,13 @@ static inline int mlx5e_page_alloc_mapped(struct mlx5e_rq *rq, if (unlikely(!page)) return -ENOMEM; - dma_info->page = page; dma_info->addr = dma_map_page(rq->pdev, page, 0, RQ_PAGE_SIZE(rq), rq->buff.map_dir); if (unlikely(dma_mapping_error(rq->pdev, dma_info->addr))) { put_page(page); return -ENOMEM; } + dma_info->page = page; return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 3b10d3df7627..da503e6411da 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -1443,12 +1443,10 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv, struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; int ret; - dst = ip6_route_output(dev_net(mirred_dev), NULL, fl6); - ret = dst->error; - if (ret) { - dst_release(dst); + ret = ipv6_stub->ipv6_dst_lookup(dev_net(mirred_dev), NULL, &dst, + fl6); + if (ret < 0) return ret; - } *out_ttl = ip6_dst_hoplimit(dst); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index aaa0f4ebba9a..31353e5c3c78 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -128,10 +128,10 @@ static inline int mlx5e_skb_l3_header_offset(struct sk_buff *skb) return mlx5e_skb_l2_header_offset(skb); } -static inline unsigned int mlx5e_calc_min_inline(enum mlx5_inline_modes mode, - struct sk_buff *skb) +static inline u16 mlx5e_calc_min_inline(enum mlx5_inline_modes mode, + struct sk_buff *skb) { - int hlen; + u16 hlen; switch (mode) { case MLX5_INLINE_MODE_NONE: @@ -140,19 +140,22 @@ static inline unsigned int mlx5e_calc_min_inline(enum mlx5_inline_modes mode, hlen = eth_get_headlen(skb->data, skb_headlen(skb)); if (hlen == ETH_HLEN && !skb_vlan_tag_present(skb)) hlen += VLAN_HLEN; - return hlen; + break; case MLX5_INLINE_MODE_IP: /* When transport header is set to zero, it means no transport * header. When transport header is set to 0xff's, it means * transport header wasn't set. */ - if (skb_transport_offset(skb)) - return mlx5e_skb_l3_header_offset(skb); + if (skb_transport_offset(skb)) { + hlen = mlx5e_skb_l3_header_offset(skb); + break; + } /* fall through */ case MLX5_INLINE_MODE_L2: default: - return mlx5e_skb_l2_header_offset(skb); + hlen = mlx5e_skb_l2_header_offset(skb); } + return min_t(u16, hlen, skb->len); } static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index e7c186b58579..d9fd8570b07c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -817,7 +817,7 @@ void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports) struct mlx5_eswitch_rep *rep; int vport; - for (vport = 0; vport < nvports; vport++) { + for (vport = nvports - 1; vport >= 0; vport--) { rep = &esw->offloads.vport_reps[vport]; if (!rep->valid) continue; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 514c22d21729..bd84bdf56a83 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -1181,7 +1181,6 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, } } - clear_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state); set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state); out: mutex_unlock(&dev->intf_state_mutex); @@ -1253,7 +1252,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, mlx5_drain_health_recovery(dev); mutex_lock(&dev->intf_state_mutex); - if (test_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state)) { + if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) { dev_warn(&dev->pdev->dev, "%s: interface is down, NOP\n", __func__); if (cleanup) @@ -1262,7 +1261,6 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, } clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state); - set_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state); if (mlx5_device_registered(dev)) mlx5_detach_device(dev); @@ -1555,8 +1553,6 @@ static void shutdown(struct pci_dev *pdev) int err; dev_info(&pdev->dev, "Shutdown was called\n"); - /* Notify mlx5 clients that the kernel is being shut down */ - set_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &dev->intf_state); err = mlx5_try_fast_unload(dev); if (err) mlx5_unload_one(dev, priv, false); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/srq.c b/drivers/net/ethernet/mellanox/mlx5/core/srq.c index f774de6f5fcb..520f6382dfde 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/srq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/srq.c @@ -201,13 +201,13 @@ static int destroy_srq_cmd(struct mlx5_core_dev *dev, static int arm_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, u16 lwm, int is_srq) { - /* arm_srq structs missing using identical xrc ones */ - u32 srq_in[MLX5_ST_SZ_DW(arm_xrc_srq_in)] = {0}; - u32 srq_out[MLX5_ST_SZ_DW(arm_xrc_srq_out)] = {0}; + u32 srq_in[MLX5_ST_SZ_DW(arm_rq_in)] = {0}; + u32 srq_out[MLX5_ST_SZ_DW(arm_rq_out)] = {0}; - MLX5_SET(arm_xrc_srq_in, srq_in, opcode, MLX5_CMD_OP_ARM_XRC_SRQ); - MLX5_SET(arm_xrc_srq_in, srq_in, xrc_srqn, srq->srqn); - MLX5_SET(arm_xrc_srq_in, srq_in, lwm, lwm); + MLX5_SET(arm_rq_in, srq_in, opcode, MLX5_CMD_OP_ARM_RQ); + MLX5_SET(arm_rq_in, srq_in, op_mod, MLX5_ARM_RQ_IN_OP_MOD_SRQ); + MLX5_SET(arm_rq_in, srq_in, srq_number, srq->srqn); + MLX5_SET(arm_rq_in, srq_in, lwm, lwm); return mlx5_cmd_exec(dev, srq_in, sizeof(srq_in), srq_out, sizeof(srq_out)); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 17fc98881642..992cbfa1f2bc 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -4201,6 +4201,8 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, return -EINVAL; if (!info->linking) break; + if (netdev_has_any_upper_dev(upper_dev)) + return -EINVAL; if (netif_is_lag_master(upper_dev) && !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev, info->upper_info)) @@ -4320,6 +4322,10 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev, upper_dev = info->upper_dev; if (!netif_is_bridge_master(upper_dev)) return -EINVAL; + if (!info->linking) + break; + if (netdev_has_any_upper_dev(upper_dev)) + return -EINVAL; break; case NETDEV_CHANGEUPPER: upper_dev = info->upper_dev; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index 5eb1606765c5..d39ffbfcc436 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -705,6 +705,7 @@ static int mlxsw_sp_port_attr_mc_router_set(struct mlxsw_sp_port *mlxsw_sp_port, bool is_port_mc_router) { struct mlxsw_sp_bridge_port *bridge_port; + int err; if (switchdev_trans_ph_prepare(trans)) return 0; @@ -715,11 +716,17 @@ static int mlxsw_sp_port_attr_mc_router_set(struct mlxsw_sp_port *mlxsw_sp_port, return 0; if (!bridge_port->bridge_device->multicast_enabled) - return 0; + goto out; - return mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port, - MLXSW_SP_FLOOD_TYPE_MC, - is_port_mc_router); + err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port, + MLXSW_SP_FLOOD_TYPE_MC, + is_port_mc_router); + if (err) + return err; + +out: + bridge_port->mrouter = is_port_mc_router; + return 0; } static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port, diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c index 0e08404480ef..d25b5038c3a2 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/match.c +++ b/drivers/net/ethernet/netronome/nfp/flower/match.c @@ -42,33 +42,29 @@ nfp_flower_compile_meta_tci(struct nfp_flower_meta_two *frame, struct tc_cls_flower_offload *flow, u8 key_type, bool mask_version) { + struct fl_flow_key *target = mask_version ? flow->mask : flow->key; struct flow_dissector_key_vlan *flow_vlan; u16 tmp_tci; + memset(frame, 0, sizeof(struct nfp_flower_meta_two)); /* Populate the metadata frame. */ frame->nfp_flow_key_layer = key_type; frame->mask_id = ~0; - if (mask_version) { - frame->tci = cpu_to_be16(~0); - return; - } - - flow_vlan = skb_flow_dissector_target(flow->dissector, - FLOW_DISSECTOR_KEY_VLAN, - flow->key); - - /* Populate the tci field. */ - if (!flow_vlan->vlan_id) { - tmp_tci = 0; - } else { - tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO, - flow_vlan->vlan_priority) | - FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID, - flow_vlan->vlan_id) | - NFP_FLOWER_MASK_VLAN_CFI; + if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_VLAN)) { + flow_vlan = skb_flow_dissector_target(flow->dissector, + FLOW_DISSECTOR_KEY_VLAN, + target); + /* Populate the tci field. */ + if (flow_vlan->vlan_id) { + tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO, + flow_vlan->vlan_priority) | + FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID, + flow_vlan->vlan_id) | + NFP_FLOWER_MASK_VLAN_CFI; + frame->tci = cpu_to_be16(tmp_tci); + } } - frame->tci = cpu_to_be16(tmp_tci); } static void @@ -99,17 +95,18 @@ nfp_flower_compile_mac(struct nfp_flower_mac_mpls *frame, bool mask_version) { struct fl_flow_key *target = mask_version ? flow->mask : flow->key; - struct flow_dissector_key_eth_addrs *flow_mac; - - flow_mac = skb_flow_dissector_target(flow->dissector, - FLOW_DISSECTOR_KEY_ETH_ADDRS, - target); + struct flow_dissector_key_eth_addrs *addr; memset(frame, 0, sizeof(struct nfp_flower_mac_mpls)); - /* Populate mac frame. */ - ether_addr_copy(frame->mac_dst, &flow_mac->dst[0]); - ether_addr_copy(frame->mac_src, &flow_mac->src[0]); + if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { + addr = skb_flow_dissector_target(flow->dissector, + FLOW_DISSECTOR_KEY_ETH_ADDRS, + target); + /* Populate mac frame. */ + ether_addr_copy(frame->mac_dst, &addr->dst[0]); + ether_addr_copy(frame->mac_src, &addr->src[0]); + } if (mask_version) frame->mpls_lse = cpu_to_be32(~0); @@ -121,14 +118,17 @@ nfp_flower_compile_tport(struct nfp_flower_tp_ports *frame, bool mask_version) { struct fl_flow_key *target = mask_version ? flow->mask : flow->key; - struct flow_dissector_key_ports *flow_tp; + struct flow_dissector_key_ports *tp; - flow_tp = skb_flow_dissector_target(flow->dissector, - FLOW_DISSECTOR_KEY_PORTS, - target); + memset(frame, 0, sizeof(struct nfp_flower_tp_ports)); - frame->port_src = flow_tp->src; - frame->port_dst = flow_tp->dst; + if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_PORTS)) { + tp = skb_flow_dissector_target(flow->dissector, + FLOW_DISSECTOR_KEY_PORTS, + target); + frame->port_src = tp->src; + frame->port_dst = tp->dst; + } } static void @@ -137,25 +137,27 @@ nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *frame, bool mask_version) { struct fl_flow_key *target = mask_version ? flow->mask : flow->key; - struct flow_dissector_key_ipv4_addrs *flow_ipv4; - struct flow_dissector_key_basic *flow_basic; - - flow_ipv4 = skb_flow_dissector_target(flow->dissector, - FLOW_DISSECTOR_KEY_IPV4_ADDRS, - target); - - flow_basic = skb_flow_dissector_target(flow->dissector, - FLOW_DISSECTOR_KEY_BASIC, - target); + struct flow_dissector_key_ipv4_addrs *addr; + struct flow_dissector_key_basic *basic; - /* Populate IPv4 frame. */ - frame->reserved = 0; - frame->ipv4_src = flow_ipv4->src; - frame->ipv4_dst = flow_ipv4->dst; - frame->proto = flow_basic->ip_proto; /* Wildcard TOS/TTL for now. */ - frame->tos = 0; - frame->ttl = 0; + memset(frame, 0, sizeof(struct nfp_flower_ipv4)); + + if (dissector_uses_key(flow->dissector, + FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { + addr = skb_flow_dissector_target(flow->dissector, + FLOW_DISSECTOR_KEY_IPV4_ADDRS, + target); + frame->ipv4_src = addr->src; + frame->ipv4_dst = addr->dst; + } + + if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) { + basic = skb_flow_dissector_target(flow->dissector, + FLOW_DISSECTOR_KEY_BASIC, + target); + frame->proto = basic->ip_proto; + } } static void @@ -164,26 +166,27 @@ nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *frame, bool mask_version) { struct fl_flow_key *target = mask_version ? flow->mask : flow->key; - struct flow_dissector_key_ipv6_addrs *flow_ipv6; - struct flow_dissector_key_basic *flow_basic; - - flow_ipv6 = skb_flow_dissector_target(flow->dissector, - FLOW_DISSECTOR_KEY_IPV6_ADDRS, - target); + struct flow_dissector_key_ipv6_addrs *addr; + struct flow_dissector_key_basic *basic; - flow_basic = skb_flow_dissector_target(flow->dissector, - FLOW_DISSECTOR_KEY_BASIC, - target); - - /* Populate IPv6 frame. */ - frame->reserved = 0; - frame->ipv6_src = flow_ipv6->src; - frame->ipv6_dst = flow_ipv6->dst; - frame->proto = flow_basic->ip_proto; /* Wildcard LABEL/TOS/TTL for now. */ - frame->ipv6_flow_label_exthdr = 0; - frame->tos = 0; - frame->ttl = 0; + memset(frame, 0, sizeof(struct nfp_flower_ipv6)); + + if (dissector_uses_key(flow->dissector, + FLOW_DISSECTOR_KEY_IPV6_ADDRS)) { + addr = skb_flow_dissector_target(flow->dissector, + FLOW_DISSECTOR_KEY_IPV6_ADDRS, + target); + frame->ipv6_src = addr->src; + frame->ipv6_dst = addr->dst; + } + + if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) { + basic = skb_flow_dissector_target(flow->dissector, + FLOW_DISSECTOR_KEY_BASIC, + target); + frame->proto = basic->ip_proto; + } } int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow, diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index d868a5700e01..d396183108f7 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -105,43 +105,62 @@ static int nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls, struct tc_cls_flower_offload *flow) { - struct flow_dissector_key_control *mask_enc_ctl; - struct flow_dissector_key_basic *mask_basic; - struct flow_dissector_key_basic *key_basic; + struct flow_dissector_key_basic *mask_basic = NULL; + struct flow_dissector_key_basic *key_basic = NULL; + struct flow_dissector_key_ip *mask_ip = NULL; u32 key_layer_two; u8 key_layer; int key_size; - mask_enc_ctl = skb_flow_dissector_target(flow->dissector, - FLOW_DISSECTOR_KEY_ENC_CONTROL, - flow->mask); + if (dissector_uses_key(flow->dissector, + FLOW_DISSECTOR_KEY_ENC_CONTROL)) { + struct flow_dissector_key_control *mask_enc_ctl = + skb_flow_dissector_target(flow->dissector, + FLOW_DISSECTOR_KEY_ENC_CONTROL, + flow->mask); + /* We are expecting a tunnel. For now we ignore offloading. */ + if (mask_enc_ctl->addr_type) + return -EOPNOTSUPP; + } - mask_basic = skb_flow_dissector_target(flow->dissector, - FLOW_DISSECTOR_KEY_BASIC, - flow->mask); + if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) { + mask_basic = skb_flow_dissector_target(flow->dissector, + FLOW_DISSECTOR_KEY_BASIC, + flow->mask); + + key_basic = skb_flow_dissector_target(flow->dissector, + FLOW_DISSECTOR_KEY_BASIC, + flow->key); + } + + if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_IP)) + mask_ip = skb_flow_dissector_target(flow->dissector, + FLOW_DISSECTOR_KEY_IP, + flow->mask); - key_basic = skb_flow_dissector_target(flow->dissector, - FLOW_DISSECTOR_KEY_BASIC, - flow->key); key_layer_two = 0; key_layer = NFP_FLOWER_LAYER_PORT | NFP_FLOWER_LAYER_MAC; key_size = sizeof(struct nfp_flower_meta_one) + sizeof(struct nfp_flower_in_port) + sizeof(struct nfp_flower_mac_mpls); - /* We are expecting a tunnel. For now we ignore offloading. */ - if (mask_enc_ctl->addr_type) - return -EOPNOTSUPP; - - if (mask_basic->n_proto) { + if (mask_basic && mask_basic->n_proto) { /* Ethernet type is present in the key. */ switch (key_basic->n_proto) { case cpu_to_be16(ETH_P_IP): + if (mask_ip && mask_ip->tos) + return -EOPNOTSUPP; + if (mask_ip && mask_ip->ttl) + return -EOPNOTSUPP; key_layer |= NFP_FLOWER_LAYER_IPV4; key_size += sizeof(struct nfp_flower_ipv4); break; case cpu_to_be16(ETH_P_IPV6): + if (mask_ip && mask_ip->tos) + return -EOPNOTSUPP; + if (mask_ip && mask_ip->ttl) + return -EOPNOTSUPP; key_layer |= NFP_FLOWER_LAYER_IPV6; key_size += sizeof(struct nfp_flower_ipv6); break; @@ -152,6 +171,11 @@ nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls, case cpu_to_be16(ETH_P_ARP): return -EOPNOTSUPP; + /* Currently we do not offload MPLS. */ + case cpu_to_be16(ETH_P_MPLS_UC): + case cpu_to_be16(ETH_P_MPLS_MC): + return -EOPNOTSUPP; + /* Will be included in layer 2. */ case cpu_to_be16(ETH_P_8021Q): break; @@ -166,7 +190,7 @@ nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls, } } - if (mask_basic->ip_proto) { + if (mask_basic && mask_basic->ip_proto) { /* Ethernet type is present in the key. */ switch (key_basic->ip_proto) { case IPPROTO_TCP: diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c index dd769eceb33d..f055b1774d65 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c @@ -98,21 +98,20 @@ static int nfp_pcie_sriov_enable(struct pci_dev *pdev, int num_vfs) struct nfp_pf *pf = pci_get_drvdata(pdev); int err; - mutex_lock(&pf->lock); - if (num_vfs > pf->limit_vfs) { nfp_info(pf->cpp, "Firmware limits number of VFs to %u\n", pf->limit_vfs); - err = -EINVAL; - goto err_unlock; + return -EINVAL; } err = pci_enable_sriov(pdev, num_vfs); if (err) { dev_warn(&pdev->dev, "Failed to enable PCI SR-IOV: %d\n", err); - goto err_unlock; + return err; } + mutex_lock(&pf->lock); + err = nfp_app_sriov_enable(pf->app, num_vfs); if (err) { dev_warn(&pdev->dev, @@ -129,9 +128,8 @@ static int nfp_pcie_sriov_enable(struct pci_dev *pdev, int num_vfs) return num_vfs; err_sriov_disable: - pci_disable_sriov(pdev); -err_unlock: mutex_unlock(&pf->lock); + pci_disable_sriov(pdev); return err; #endif return 0; @@ -158,10 +156,10 @@ static int nfp_pcie_sriov_disable(struct pci_dev *pdev) pf->num_vfs = 0; + mutex_unlock(&pf->lock); + pci_disable_sriov(pdev); dev_dbg(&pdev->dev, "Removed VFs.\n"); - - mutex_unlock(&pf->lock); #endif return 0; } diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index ecbec28cfa76..2920889fa6d6 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -896,6 +896,8 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev) netdev_tx_sent_queue(nd_q, txbuf->real_len); + skb_tx_timestamp(skb); + tx_ring->wr_p += nr_frags + 1; if (nfp_net_tx_ring_should_stop(tx_ring)) nfp_net_tx_ring_stop(nd_q, tx_ring); @@ -904,8 +906,6 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev) if (!skb->xmit_more || netif_xmit_stopped(nd_q)) nfp_net_tx_xmit_more_flush(tx_ring); - skb_tx_timestamp(skb); - return NETDEV_TX_OK; err_unmap: @@ -1752,6 +1752,10 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget) continue; } + nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr); + + nfp_net_rx_give_one(dp, rx_ring, new_frag, new_dma_addr); + if (likely(!meta.portid)) { netdev = dp->netdev; } else { @@ -1760,16 +1764,12 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget) nn = netdev_priv(dp->netdev); netdev = nfp_app_repr_get(nn->app, meta.portid); if (unlikely(!netdev)) { - nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, skb); + nfp_net_rx_drop(dp, r_vec, rx_ring, NULL, skb); continue; } nfp_repr_inc_rx_stats(netdev, pkt_len); } - nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr); - - nfp_net_rx_give_one(dp, rx_ring, new_frag, new_dma_addr); - skb_reserve(skb, pkt_off); skb_put(skb, pkt_len); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c index 2da083fd5e13..7c22cc4654b7 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c @@ -457,13 +457,9 @@ static int nfp_net_pf_app_start(struct nfp_pf *pf) { int err; - err = nfp_net_pf_app_start_ctrl(pf); - if (err) - return err; - err = nfp_app_start(pf->app, pf->ctrl_vnic); if (err) - goto err_ctrl_stop; + return err; if (pf->num_vfs) { err = nfp_app_sriov_enable(pf->app, pf->num_vfs); @@ -475,8 +471,6 @@ static int nfp_net_pf_app_start(struct nfp_pf *pf) err_app_stop: nfp_app_stop(pf->app); -err_ctrl_stop: - nfp_net_pf_app_stop_ctrl(pf); return err; } @@ -485,7 +479,6 @@ static void nfp_net_pf_app_stop(struct nfp_pf *pf) if (pf->num_vfs) nfp_app_sriov_disable(pf->app); nfp_app_stop(pf->app); - nfp_net_pf_app_stop_ctrl(pf); } static void nfp_net_pci_unmap_mem(struct nfp_pf *pf) @@ -577,7 +570,7 @@ err_unmap_ctrl: static void nfp_net_pci_remove_finish(struct nfp_pf *pf) { - nfp_net_pf_app_stop(pf); + nfp_net_pf_app_stop_ctrl(pf); /* stop app first, to avoid double free of ctrl vNIC's ddir */ nfp_net_debugfs_dir_clean(&pf->ddir); @@ -708,6 +701,7 @@ int nfp_net_pci_probe(struct nfp_pf *pf) { struct nfp_net_fw_version fw_ver; u8 __iomem *ctrl_bar, *qc_bar; + struct nfp_net *nn; int stride; int err; @@ -784,7 +778,7 @@ int nfp_net_pci_probe(struct nfp_pf *pf) if (err) goto err_free_vnics; - err = nfp_net_pf_app_start(pf); + err = nfp_net_pf_app_start_ctrl(pf); if (err) goto err_free_irqs; @@ -792,12 +786,20 @@ int nfp_net_pci_probe(struct nfp_pf *pf) if (err) goto err_stop_app; + err = nfp_net_pf_app_start(pf); + if (err) + goto err_clean_vnics; + mutex_unlock(&pf->lock); return 0; +err_clean_vnics: + list_for_each_entry(nn, &pf->vnics, vnic_list) + if (nfp_net_is_data_vnic(nn)) + nfp_net_pf_clean_vnic(pf, nn); err_stop_app: - nfp_net_pf_app_stop(pf); + nfp_net_pf_app_stop_ctrl(pf); err_free_irqs: nfp_net_pf_free_irqs(pf); err_free_vnics: @@ -821,6 +823,8 @@ void nfp_net_pci_remove(struct nfp_pf *pf) if (list_empty(&pf->vnics)) goto out; + nfp_net_pf_app_stop(pf); + list_for_each_entry(nn, &pf->vnics, vnic_list) if (nfp_net_is_data_vnic(nn)) nfp_net_pf_clean_vnic(pf, nn); diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c index 458d55ba423f..fe2599b83d09 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c @@ -705,7 +705,7 @@ static void ql_build_coredump_seg_header( seg_hdr->cookie = MPI_COREDUMP_COOKIE; seg_hdr->segNum = seg_number; seg_hdr->segSize = seg_size; - memcpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1); + strncpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1); } /* diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index bd07a15d3b7c..e03fcf914690 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -6863,8 +6863,7 @@ static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start, rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb, tp->TxDescArray + entry); if (skb) { - tp->dev->stats.tx_dropped++; - dev_kfree_skb_any(skb); + dev_consume_skb_any(skb); tx_skb->skb = NULL; } } @@ -7319,7 +7318,7 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp) tp->tx_stats.packets++; tp->tx_stats.bytes += tx_skb->skb->len; u64_stats_update_end(&tp->tx_stats.syncp); - dev_kfree_skb_any(tx_skb->skb); + dev_consume_skb_any(tx_skb->skb); tx_skb->skb = NULL; } dirty_tx++; diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c index 73427e29df2a..fbd00cb0cb7d 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c @@ -47,6 +47,8 @@ static int sxgbe_probe_config_dt(struct platform_device *pdev, plat->mdio_bus_data = devm_kzalloc(&pdev->dev, sizeof(*plat->mdio_bus_data), GFP_KERNEL); + if (!plat->mdio_bus_data) + return -ENOMEM; dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg), GFP_KERNEL); if (!dma_cfg) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c index 17d4bbaeb65c..6e359572b9f0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c @@ -269,7 +269,10 @@ static int socfpga_dwmac_set_phy_mode(struct socfpga_dwmac *dwmac) ctrl &= ~(SYSMGR_EMACGRP_CTRL_PHYSEL_MASK << reg_shift); ctrl |= val << reg_shift; - if (dwmac->f2h_ptp_ref_clk) { + if (dwmac->f2h_ptp_ref_clk || + phymode == PHY_INTERFACE_MODE_MII || + phymode == PHY_INTERFACE_MODE_GMII || + phymode == PHY_INTERFACE_MODE_SGMII) { ctrl |= SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << (reg_shift / 2); regmap_read(sys_mgr_base_addr, SYSMGR_FPGAGRP_MODULE_REG, &module); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c index fffd6d5fc907..39c2122a4f26 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c @@ -979,14 +979,6 @@ static int sun8i_dwmac_probe(struct platform_device *pdev) } static const struct of_device_id sun8i_dwmac_match[] = { - { .compatible = "allwinner,sun8i-h3-emac", - .data = &emac_variant_h3 }, - { .compatible = "allwinner,sun8i-v3s-emac", - .data = &emac_variant_v3s }, - { .compatible = "allwinner,sun8i-a83t-emac", - .data = &emac_variant_a83t }, - { .compatible = "allwinner,sun50i-a64-emac", - .data = &emac_variant_a64 }, { } }; MODULE_DEVICE_TABLE(of, sun8i_dwmac_match); diff --git a/drivers/net/ethernet/ti/cpsw-common.c b/drivers/net/ethernet/ti/cpsw-common.c index 56ba411421f0..38d1cc557c11 100644 --- a/drivers/net/ethernet/ti/cpsw-common.c +++ b/drivers/net/ethernet/ti/cpsw-common.c @@ -96,7 +96,7 @@ int ti_cm_get_macid(struct device *dev, int slave, u8 *mac_addr) if (of_machine_is_compatible("ti,dra7")) return davinci_emac_3517_get_macid(dev, 0x514, slave, mac_addr); - dev_err(dev, "incompatible machine/device type for reading mac address\n"); + dev_info(dev, "incompatible machine/device type for reading mac address\n"); return -ENOENT; } EXPORT_SYMBOL_GPL(ti_cm_get_macid); diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index fac44c5c8d0d..05ee870c3636 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -1578,7 +1578,12 @@ static void netvsc_link_change(struct work_struct *w) bool notify = false, reschedule = false; unsigned long flags, next_reconfig, delay; - rtnl_lock(); + /* if changes are happening, comeback later */ + if (!rtnl_trylock()) { + schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT); + return; + } + net_device = rtnl_dereference(ndev_ctx->nvdev); if (!net_device) goto out_unlock; diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index 5e1ab1160856..98e4deaa3a6a 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -3521,6 +3521,7 @@ module_init(macsec_init); module_exit(macsec_exit); MODULE_ALIAS_RTNL_LINK("macsec"); +MODULE_ALIAS_GENL_FAMILY("macsec"); MODULE_DESCRIPTION("MACsec IEEE 802.1AE"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index dae13f028c84..e842d2cd1ee7 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -593,9 +593,6 @@ void phy_stop_machine(struct phy_device *phydev) if (phydev->state > PHY_UP && phydev->state != PHY_HALTED) phydev->state = PHY_UP; mutex_unlock(&phydev->lock); - - /* Now we can run the state machine synchronously */ - phy_state_machine(&phydev->state_queue.work); } /** diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 9493fb369682..810f6fd2f639 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -877,15 +877,17 @@ EXPORT_SYMBOL(phy_attached_info); #define ATTACHED_FMT "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)" void phy_attached_print(struct phy_device *phydev, const char *fmt, ...) { + const char *drv_name = phydev->drv ? phydev->drv->name : "unbound"; + if (!fmt) { dev_info(&phydev->mdio.dev, ATTACHED_FMT "\n", - phydev->drv->name, phydev_name(phydev), + drv_name, phydev_name(phydev), phydev->irq); } else { va_list ap; dev_info(&phydev->mdio.dev, ATTACHED_FMT, - phydev->drv->name, phydev_name(phydev), + drv_name, phydev_name(phydev), phydev->irq); va_start(ap, fmt); diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index 811b18215cae..47cab1bde065 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -1758,6 +1758,13 @@ static const struct usb_device_id cdc_devs[] = { .driver_info = (unsigned long)&wwan_noarp_info, }, + /* u-blox TOBY-L4 */ + { USB_DEVICE_AND_INTERFACE_INFO(0x1546, 0x1010, + USB_CLASS_COMM, + USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE), + .driver_info = (unsigned long)&wwan_info, + }, + /* Generic CDC-NCM devices */ { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE), diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 52ae78ca3d38..511f8339fa96 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -1123,7 +1123,7 @@ static void free_old_xmit_skbs(struct send_queue *sq) bytes += skb->len; packets++; - dev_kfree_skb_any(skb); + dev_consume_skb_any(skb); } /* Avoid overhead when no packets have been processed diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index 79020cf8c79c..4fb7647995c3 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -788,6 +788,8 @@ int iwl_pci_fw_enter_d0i3(struct iwl_trans *trans); void iwl_pcie_enable_rx_wake(struct iwl_trans *trans, bool enable); +void iwl_pcie_rx_allocator_work(struct work_struct *data); + /* common functions that are used by gen2 transport */ void iwl_pcie_apm_config(struct iwl_trans *trans); int iwl_pcie_prepare_card_hw(struct iwl_trans *trans); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index e5d2bf0bde37..a06b6612b658 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -597,7 +597,7 @@ static void iwl_pcie_rx_allocator_get(struct iwl_trans *trans, rxq->free_count += RX_CLAIM_REQ_ALLOC; } -static void iwl_pcie_rx_allocator_work(struct work_struct *data) +void iwl_pcie_rx_allocator_work(struct work_struct *data) { struct iwl_rb_allocator *rba_p = container_of(data, struct iwl_rb_allocator, rx_alloc); @@ -900,10 +900,6 @@ static int _iwl_pcie_rx_init(struct iwl_trans *trans) return err; } def_rxq = trans_pcie->rxq; - if (!rba->alloc_wq) - rba->alloc_wq = alloc_workqueue("rb_allocator", - WQ_HIGHPRI | WQ_UNBOUND, 1); - INIT_WORK(&rba->rx_alloc, iwl_pcie_rx_allocator_work); spin_lock(&rba->lock); atomic_set(&rba->req_pending, 0); @@ -1017,10 +1013,6 @@ void iwl_pcie_rx_free(struct iwl_trans *trans) } cancel_work_sync(&rba->rx_alloc); - if (rba->alloc_wq) { - destroy_workqueue(rba->alloc_wq); - rba->alloc_wq = NULL; - } iwl_pcie_free_rbs_pool(trans); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index 58873cc27396..2e3e013ec95a 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -1786,6 +1786,11 @@ void iwl_trans_pcie_free(struct iwl_trans *trans) iwl_pcie_tx_free(trans); iwl_pcie_rx_free(trans); + if (trans_pcie->rba.alloc_wq) { + destroy_workqueue(trans_pcie->rba.alloc_wq); + trans_pcie->rba.alloc_wq = NULL; + } + if (trans_pcie->msix_enabled) { for (i = 0; i < trans_pcie->alloc_vecs; i++) { irq_set_affinity_hint( @@ -3181,6 +3186,10 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, trans_pcie->inta_mask = CSR_INI_SET_MASK; } + trans_pcie->rba.alloc_wq = alloc_workqueue("rb_allocator", + WQ_HIGHPRI | WQ_UNBOUND, 1); + INIT_WORK(&trans_pcie->rba.rx_alloc, iwl_pcie_rx_allocator_work); + #ifdef CONFIG_IWLWIFI_PCIE_RTPM trans->runtime_pm_mode = IWL_PLAT_PM_MODE_D0I3; #else diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c index 9a03c5871efe..f58d8e305323 100644 --- a/drivers/ntb/ntb_transport.c +++ b/drivers/ntb/ntb_transport.c @@ -924,10 +924,8 @@ out1: ntb_free_mw(nt, i); /* if there's an actual failure, we should just bail */ - if (rc < 0) { - ntb_link_disable(ndev); + if (rc < 0) return; - } out: if (ntb_link_is_up(ndev, NULL, NULL) == 1) @@ -1059,7 +1057,7 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev) int node; int rc, i; - mw_count = ntb_mw_count(ndev, PIDX); + mw_count = ntb_peer_mw_count(ndev); if (!ndev->ops->mw_set_trans) { dev_err(&ndev->dev, "Inbound MW based NTB API is required\n"); diff --git a/drivers/ntb/test/ntb_tool.c b/drivers/ntb/test/ntb_tool.c index f002bf48a08d..a69815c45ce6 100644 --- a/drivers/ntb/test/ntb_tool.c +++ b/drivers/ntb/test/ntb_tool.c @@ -959,7 +959,7 @@ static int tool_probe(struct ntb_client *self, struct ntb_dev *ntb) tc->ntb = ntb; init_waitqueue_head(&tc->link_wq); - tc->mw_count = min(ntb_mw_count(tc->ntb, PIDX), MAX_MWS); + tc->mw_count = min(ntb_peer_mw_count(tc->ntb), MAX_MWS); for (i = 0; i < tc->mw_count; i++) { rc = tool_init_mw(tc, i); if (rc) diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 925467b31a33..ea892e732268 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -109,6 +109,7 @@ struct nvme_dev { /* host memory buffer support: */ u64 host_mem_size; u32 nr_host_mem_descs; + dma_addr_t host_mem_descs_dma; struct nvme_host_mem_buf_desc *host_mem_descs; void **host_mem_desc_bufs; }; @@ -1565,16 +1566,10 @@ static inline void nvme_release_cmb(struct nvme_dev *dev) static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits) { - size_t len = dev->nr_host_mem_descs * sizeof(*dev->host_mem_descs); + u64 dma_addr = dev->host_mem_descs_dma; struct nvme_command c; - u64 dma_addr; int ret; - dma_addr = dma_map_single(dev->dev, dev->host_mem_descs, len, - DMA_TO_DEVICE); - if (dma_mapping_error(dev->dev, dma_addr)) - return -ENOMEM; - memset(&c, 0, sizeof(c)); c.features.opcode = nvme_admin_set_features; c.features.fid = cpu_to_le32(NVME_FEAT_HOST_MEM_BUF); @@ -1591,7 +1586,6 @@ static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits) "failed to set host mem (err %d, flags %#x).\n", ret, bits); } - dma_unmap_single(dev->dev, dma_addr, len, DMA_TO_DEVICE); return ret; } @@ -1609,7 +1603,9 @@ static void nvme_free_host_mem(struct nvme_dev *dev) kfree(dev->host_mem_desc_bufs); dev->host_mem_desc_bufs = NULL; - kfree(dev->host_mem_descs); + dma_free_coherent(dev->dev, + dev->nr_host_mem_descs * sizeof(*dev->host_mem_descs), + dev->host_mem_descs, dev->host_mem_descs_dma); dev->host_mem_descs = NULL; } @@ -1617,6 +1613,7 @@ static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred) { struct nvme_host_mem_buf_desc *descs; u32 chunk_size, max_entries, len; + dma_addr_t descs_dma; int i = 0; void **bufs; u64 size = 0, tmp; @@ -1627,7 +1624,8 @@ retry: tmp = (preferred + chunk_size - 1); do_div(tmp, chunk_size); max_entries = tmp; - descs = kcalloc(max_entries, sizeof(*descs), GFP_KERNEL); + descs = dma_zalloc_coherent(dev->dev, max_entries * sizeof(*descs), + &descs_dma, GFP_KERNEL); if (!descs) goto out; @@ -1661,6 +1659,7 @@ retry: dev->nr_host_mem_descs = i; dev->host_mem_size = size; dev->host_mem_descs = descs; + dev->host_mem_descs_dma = descs_dma; dev->host_mem_desc_bufs = bufs; return 0; @@ -1674,7 +1673,8 @@ out_free_bufs: kfree(bufs); out_free_descs: - kfree(descs); + dma_free_coherent(dev->dev, max_entries * sizeof(*descs), descs, + descs_dma); out: /* try a smaller chunk size if we failed early */ if (chunk_size >= PAGE_SIZE * 2 && (i == 0 || size < min)) { diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index da04df1af231..a03299d77922 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -920,7 +920,11 @@ static int nvme_rdma_map_sg_fr(struct nvme_rdma_queue *queue, struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl; int nr; - nr = ib_map_mr_sg(req->mr, req->sg_table.sgl, count, NULL, PAGE_SIZE); + /* + * Align the MR to a 4K page size to match the ctrl page size and + * the block virtual boundary. + */ + nr = ib_map_mr_sg(req->mr, req->sg_table.sgl, count, NULL, SZ_4K); if (nr < count) { if (nr < 0) return nr; @@ -1583,7 +1587,7 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl) goto out_cleanup_queue; ctrl->ctrl.max_hw_sectors = - (ctrl->max_fr_pages - 1) << (PAGE_SHIFT - 9); + (ctrl->max_fr_pages - 1) << (ilog2(SZ_4K) - 9); error = nvme_init_identify(&ctrl->ctrl); if (error) diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 253d92409bb3..2225afc1cbbb 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -538,12 +538,9 @@ msi_setup_entry(struct pci_dev *dev, int nvec, const struct irq_affinity *affd) struct msi_desc *entry; u16 control; - if (affd) { + if (affd) masks = irq_create_affinity_masks(nvec, affd); - if (!masks) - dev_err(&dev->dev, "can't allocate MSI affinity masks for %d vectors\n", - nvec); - } + /* MSI Entry Initialization */ entry = alloc_msi_entry(&dev->dev, nvec, masks); @@ -679,12 +676,8 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base, struct msi_desc *entry; int ret, i; - if (affd) { + if (affd) masks = irq_create_affinity_masks(nvec, affd); - if (!masks) - dev_err(&dev->dev, "can't allocate MSI-X affinity masks for %d vectors\n", - nvec); - } for (i = 0, curmsk = masks; i < nvec; i++) { entry = alloc_msi_entry(&dev->dev, 1, curmsk); diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c index ba6ac83a6c25..5ccfdc80d0ec 100644 --- a/drivers/s390/cio/vfio_ccw_cp.c +++ b/drivers/s390/cio/vfio_ccw_cp.c @@ -481,7 +481,7 @@ static int ccwchain_fetch_tic(struct ccwchain *chain, ccw_tail = ccw_head + (iter->ch_len - 1) * sizeof(struct ccw1); if ((ccw_head <= ccw->cda) && (ccw->cda <= ccw_tail)) { - ccw->cda = (__u32) (addr_t) (iter->ch_ccw + + ccw->cda = (__u32) (addr_t) (((char *)iter->ch_ccw) + (ccw->cda - ccw_head)); return 0; } diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index f4538d7a3016..d145e0d90227 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -47,6 +47,17 @@ config SCSI_NETLINK default n depends on NET +config SCSI_MQ_DEFAULT + bool "SCSI: use blk-mq I/O path by default" + depends on SCSI + ---help--- + This option enables the new blk-mq based I/O path for SCSI + devices by default. With the option the scsi_mod.use_blk_mq + module/boot option defaults to Y, without it to N, but it can + still be overridden either way. + + If unsure say N. + config SCSI_PROC_FS bool "legacy /proc/scsi/ support" depends on SCSI && PROC_FS diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c index 4591113c49de..a1a2c71e1626 100644 --- a/drivers/scsi/aacraid/aachba.c +++ b/drivers/scsi/aacraid/aachba.c @@ -549,7 +549,9 @@ static void get_container_name_callback(void *context, struct fib * fibptr) if ((le32_to_cpu(get_name_reply->status) == CT_OK) && (get_name_reply->data[0] != '\0')) { char *sp = get_name_reply->data; - sp[sizeof(((struct aac_get_name_resp *)NULL)->data)] = '\0'; + int data_size = FIELD_SIZEOF(struct aac_get_name_resp, data); + + sp[data_size - 1] = '\0'; while (*sp == ' ') ++sp; if (*sp) { @@ -579,12 +581,15 @@ static void get_container_name_callback(void *context, struct fib * fibptr) static int aac_get_container_name(struct scsi_cmnd * scsicmd) { int status; + int data_size; struct aac_get_name *dinfo; struct fib * cmd_fibcontext; struct aac_dev * dev; dev = (struct aac_dev *)scsicmd->device->host->hostdata; + data_size = FIELD_SIZEOF(struct aac_get_name_resp, data); + cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd); aac_fib_init(cmd_fibcontext); @@ -593,7 +598,7 @@ static int aac_get_container_name(struct scsi_cmnd * scsicmd) dinfo->command = cpu_to_le32(VM_ContainerConfig); dinfo->type = cpu_to_le32(CT_READ_NAME); dinfo->cid = cpu_to_le32(scmd_id(scsicmd)); - dinfo->count = cpu_to_le32(sizeof(((struct aac_get_name_resp *)NULL)->data)); + dinfo->count = cpu_to_le32(data_size - 1); status = aac_fib_send(ContainerCommand, cmd_fibcontext, diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index d31a9bc2ba69..ee2667e20e42 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h @@ -2274,7 +2274,7 @@ struct aac_get_name_resp { __le32 parm3; __le32 parm4; __le32 parm5; - u8 data[16]; + u8 data[17]; }; #define CT_CID_TO_32BITS_UID 165 diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c index 2029ad225121..5be0086142ca 100644 --- a/drivers/scsi/csiostor/csio_hw.c +++ b/drivers/scsi/csiostor/csio_hw.c @@ -3845,8 +3845,10 @@ csio_hw_start(struct csio_hw *hw) if (csio_is_hw_ready(hw)) return 0; - else + else if (csio_match_state(hw, csio_hws_uninit)) return -EINVAL; + else + return -ENODEV; } int diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c index ea0c31086cc6..dcd074169aa9 100644 --- a/drivers/scsi/csiostor/csio_init.c +++ b/drivers/scsi/csiostor/csio_init.c @@ -969,10 +969,14 @@ static int csio_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) pci_set_drvdata(pdev, hw); - if (csio_hw_start(hw) != 0) { - dev_err(&pdev->dev, - "Failed to start FW, continuing in debug mode.\n"); - return 0; + rv = csio_hw_start(hw); + if (rv) { + if (rv == -EINVAL) { + dev_err(&pdev->dev, + "Failed to start FW, continuing in debug mode.\n"); + return 0; + } + goto err_lnode_exit; } sprintf(hw->fwrev_str, "%u.%u.%u.%u\n", diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c index a69a9ac836f5..1d02cf9fe06c 100644 --- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c +++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c @@ -1635,6 +1635,9 @@ static int init_act_open(struct cxgbi_sock *csk) goto rel_resource; } + if (!(n->nud_state & NUD_VALID)) + neigh_event_send(n, NULL); + csk->atid = cxgb4_alloc_atid(lldi->tids, csk); if (csk->atid < 0) { pr_err("%s, NO atid available.\n", ndev->name); diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index da5bdbdcce52..f838bd73befa 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -4945,6 +4945,7 @@ static int ipr_slave_configure(struct scsi_device *sdev) } if (ipr_is_vset_device(res)) { sdev->scsi_level = SCSI_SPC_3; + sdev->no_report_opcodes = 1; blk_queue_rq_timeout(sdev->request_queue, IPR_VSET_RW_TIMEOUT); blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS); diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 316c3df0c3fd..71c4746341ea 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -6228,8 +6228,8 @@ static int megasas_probe_one(struct pci_dev *pdev, fail_start_aen: fail_io_attach: megasas_mgmt_info.count--; - megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL; megasas_mgmt_info.max_index--; + megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL; instance->instancet->disable_intr(instance); megasas_destroy_irqs(instance); diff --git a/drivers/scsi/qedf/qedf_els.c b/drivers/scsi/qedf/qedf_els.c index eb07f1de8afa..59c18ca4cda9 100644 --- a/drivers/scsi/qedf/qedf_els.c +++ b/drivers/scsi/qedf/qedf_els.c @@ -489,7 +489,7 @@ static void qedf_srr_compl(struct qedf_els_cb_arg *cb_arg) /* If a SRR times out, simply free resources */ if (srr_req->event == QEDF_IOREQ_EV_ELS_TMO) - goto out_free; + goto out_put; /* Normalize response data into struct fc_frame */ mp_req = &(srr_req->mp_req); @@ -501,7 +501,7 @@ static void qedf_srr_compl(struct qedf_els_cb_arg *cb_arg) if (!fp) { QEDF_ERR(&(qedf->dbg_ctx), "fc_frame_alloc failure.\n"); - goto out_free; + goto out_put; } /* Copy frame header from firmware into fp */ @@ -526,9 +526,10 @@ static void qedf_srr_compl(struct qedf_els_cb_arg *cb_arg) } fc_frame_free(fp); -out_free: +out_put: /* Put reference for original command since SRR completed */ kref_put(&orig_io_req->refcount, qedf_release_cmd); +out_free: kfree(cb_arg); } @@ -780,7 +781,7 @@ static void qedf_rec_compl(struct qedf_els_cb_arg *cb_arg) /* If a REC times out, free resources */ if (rec_req->event == QEDF_IOREQ_EV_ELS_TMO) - goto out_free; + goto out_put; /* Normalize response data into struct fc_frame */ mp_req = &(rec_req->mp_req); @@ -792,7 +793,7 @@ static void qedf_rec_compl(struct qedf_els_cb_arg *cb_arg) if (!fp) { QEDF_ERR(&(qedf->dbg_ctx), "fc_frame_alloc failure.\n"); - goto out_free; + goto out_put; } /* Copy frame header from firmware into fp */ @@ -884,9 +885,10 @@ static void qedf_rec_compl(struct qedf_els_cb_arg *cb_arg) out_free_frame: fc_frame_free(fp); -out_free: +out_put: /* Put reference for original command since REC completed */ kref_put(&orig_io_req->refcount, qedf_release_cmd); +out_free: kfree(cb_arg); } diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index 3d38c6d463b8..1bf274e3b2b6 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c @@ -800,7 +800,11 @@ MODULE_LICENSE("GPL"); module_param(scsi_logging_level, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(scsi_logging_level, "a bit mask of logging levels"); +#ifdef CONFIG_SCSI_MQ_DEFAULT bool scsi_use_blk_mq = true; +#else +bool scsi_use_blk_mq = false; +#endif module_param_named(use_blk_mq, scsi_use_blk_mq, bool, S_IWUSR | S_IRUGO); static int __init init_scsi(void) diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index bea36adeee17..e2647f2d4430 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -1277,6 +1277,9 @@ static void sd_uninit_command(struct scsi_cmnd *SCpnt) { struct request *rq = SCpnt->request; + if (SCpnt->flags & SCMD_ZONE_WRITE_LOCK) + sd_zbc_write_unlock_zone(SCpnt); + if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) __free_page(rq->special_vec.bv_page); diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c index 96855df9f49d..8aa54779aac1 100644 --- a/drivers/scsi/sd_zbc.c +++ b/drivers/scsi/sd_zbc.c @@ -294,6 +294,9 @@ int sd_zbc_write_lock_zone(struct scsi_cmnd *cmd) test_and_set_bit(zno, sdkp->zones_wlock)) return BLKPREP_DEFER; + WARN_ON_ONCE(cmd->flags & SCMD_ZONE_WRITE_LOCK); + cmd->flags |= SCMD_ZONE_WRITE_LOCK; + return BLKPREP_OK; } @@ -302,9 +305,10 @@ void sd_zbc_write_unlock_zone(struct scsi_cmnd *cmd) struct request *rq = cmd->request; struct scsi_disk *sdkp = scsi_disk(rq->rq_disk); - if (sdkp->zones_wlock) { + if (sdkp->zones_wlock && cmd->flags & SCMD_ZONE_WRITE_LOCK) { unsigned int zno = sd_zbc_zone_no(sdkp, blk_rq_pos(rq)); WARN_ON_ONCE(!test_bit(zno, sdkp->zones_wlock)); + cmd->flags &= ~SCMD_ZONE_WRITE_LOCK; clear_bit_unlock(zno, sdkp->zones_wlock); smp_mb__after_atomic(); } @@ -335,9 +339,6 @@ void sd_zbc_complete(struct scsi_cmnd *cmd, case REQ_OP_WRITE_ZEROES: case REQ_OP_WRITE_SAME: - /* Unlock the zone */ - sd_zbc_write_unlock_zone(cmd); - if (result && sshdr->sense_key == ILLEGAL_REQUEST && sshdr->asc == 0x21) diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index d7ff71e0c85c..84e782d8e7c3 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -1021,7 +1021,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) read_lock_irqsave(&sfp->rq_list_lock, iflags); val = 0; list_for_each_entry(srp, &sfp->rq_list, entry) { - if (val > SG_MAX_QUEUE) + if (val >= SG_MAX_QUEUE) break; memset(&rinfo[val], 0, SZ_SG_REQ_INFO); rinfo[val].req_state = srp->done + 1; diff --git a/drivers/soc/ti/knav_qmss_queue.c b/drivers/soc/ti/knav_qmss_queue.c index 279e7c5551dd..39225de9d7f1 100644 --- a/drivers/soc/ti/knav_qmss_queue.c +++ b/drivers/soc/ti/knav_qmss_queue.c @@ -745,6 +745,9 @@ void *knav_pool_create(const char *name, bool slot_found; int ret; + if (!kdev) + return ERR_PTR(-EPROBE_DEFER); + if (!kdev->dev) return ERR_PTR(-ENODEV); diff --git a/drivers/staging/fsl-mc/bus/fsl-mc-allocator.c b/drivers/staging/fsl-mc/bus/fsl-mc-allocator.c index b37a6f48225f..8ea3920400a0 100644 --- a/drivers/staging/fsl-mc/bus/fsl-mc-allocator.c +++ b/drivers/staging/fsl-mc/bus/fsl-mc-allocator.c @@ -16,9 +16,9 @@ static bool __must_check fsl_mc_is_allocatable(const char *obj_type) { - return strcmp(obj_type, "dpbp") || - strcmp(obj_type, "dpmcp") || - strcmp(obj_type, "dpcon"); + return strcmp(obj_type, "dpbp") == 0 || + strcmp(obj_type, "dpmcp") == 0 || + strcmp(obj_type, "dpcon") == 0; } /** diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c index d283341cfe43..56cd4e5e51b2 100644 --- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c +++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c @@ -45,6 +45,7 @@ static struct usb_device_id rtw_usb_id_tbl[] = { {USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */ {USB_DEVICE(0x2357, 0x010c)}, /* TP-Link TL-WN722N v2 */ {USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */ + {USB_DEVICE(USB_VENDER_ID_REALTEK, 0xffef)}, /* Rosewill RNX-N150NUB */ {} /* Terminating entry */ }; diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c index 1fc80ea87c13..a6d5164c33a9 100644 --- a/drivers/tty/pty.c +++ b/drivers/tty/pty.c @@ -69,13 +69,8 @@ static void pty_close(struct tty_struct *tty, struct file *filp) #ifdef CONFIG_UNIX98_PTYS if (tty->driver == ptm_driver) { mutex_lock(&devpts_mutex); - if (tty->link->driver_data) { - struct path *path = tty->link->driver_data; - - devpts_pty_kill(path->dentry); - path_put(path); - kfree(path); - } + if (tty->link->driver_data) + devpts_pty_kill(tty->link->driver_data); mutex_unlock(&devpts_mutex); } #endif @@ -607,25 +602,24 @@ static inline void legacy_pty_init(void) { } static struct cdev ptmx_cdev; /** - * pty_open_peer - open the peer of a pty - * @tty: the peer of the pty being opened + * ptm_open_peer - open the peer of a pty + * @master: the open struct file of the ptmx device node + * @tty: the master of the pty being opened + * @flags: the flags for open * - * Open the cached dentry in tty->link, providing a safe way for userspace - * to get the slave end of a pty (where they have the master fd and cannot - * access or trust the mount namespace /dev/pts was mounted inside). + * Provide a race free way for userspace to open the slave end of a pty + * (where they have the master fd and cannot access or trust the mount + * namespace /dev/pts was mounted inside). */ -static struct file *pty_open_peer(struct tty_struct *tty, int flags) -{ - if (tty->driver->subtype != PTY_TYPE_MASTER) - return ERR_PTR(-EIO); - return dentry_open(tty->link->driver_data, flags, current_cred()); -} - -static int pty_get_peer(struct tty_struct *tty, int flags) +int ptm_open_peer(struct file *master, struct tty_struct *tty, int flags) { int fd = -1; - struct file *filp = NULL; + struct file *filp; int retval = -EINVAL; + struct path path; + + if (tty->driver != ptm_driver) + return -EIO; fd = get_unused_fd_flags(0); if (fd < 0) { @@ -633,7 +627,16 @@ static int pty_get_peer(struct tty_struct *tty, int flags) goto err; } - filp = pty_open_peer(tty, flags); + /* Compute the slave's path */ + path.mnt = devpts_mntget(master, tty->driver_data); + if (IS_ERR(path.mnt)) { + retval = PTR_ERR(path.mnt); + goto err_put; + } + path.dentry = tty->link->driver_data; + + filp = dentry_open(&path, flags, current_cred()); + mntput(path.mnt); if (IS_ERR(filp)) { retval = PTR_ERR(filp); goto err_put; @@ -662,8 +665,6 @@ static int pty_unix98_ioctl(struct tty_struct *tty, return pty_get_pktmode(tty, (int __user *)arg); case TIOCGPTN: /* Get PT Number */ return put_user(tty->index, (unsigned int __user *)arg); - case TIOCGPTPEER: /* Open the other end */ - return pty_get_peer(tty, (int) arg); case TIOCSIG: /* Send signal to other side of pty */ return pty_signal(tty, (int) arg); } @@ -791,9 +792,7 @@ static int ptmx_open(struct inode *inode, struct file *filp) { struct pts_fs_info *fsi; struct tty_struct *tty; - struct path *pts_path; struct dentry *dentry; - struct vfsmount *mnt; int retval; int index; @@ -806,7 +805,7 @@ static int ptmx_open(struct inode *inode, struct file *filp) if (retval) return retval; - fsi = devpts_acquire(filp, &mnt); + fsi = devpts_acquire(filp); if (IS_ERR(fsi)) { retval = PTR_ERR(fsi); goto out_free_file; @@ -846,28 +845,17 @@ static int ptmx_open(struct inode *inode, struct file *filp) retval = PTR_ERR(dentry); goto err_release; } - /* We need to cache a fake path for TIOCGPTPEER. */ - pts_path = kmalloc(sizeof(struct path), GFP_KERNEL); - if (!pts_path) - goto err_release; - pts_path->mnt = mnt; - pts_path->dentry = dentry; - path_get(pts_path); - tty->link->driver_data = pts_path; + tty->link->driver_data = dentry; retval = ptm_driver->ops->open(tty, filp); if (retval) - goto err_path_put; + goto err_release; tty_debug_hangup(tty, "opening (count=%d)\n", tty->count); tty_unlock(tty); return 0; -err_path_put: - path_put(pts_path); - kfree(pts_path); err_release: - mntput(mnt); tty_unlock(tty); // This will also put-ref the fsi tty_release(inode, filp); @@ -876,7 +864,6 @@ out: devpts_kill_index(fsi, index); out_put_fsi: devpts_release(fsi); - mntput(mnt); out_free_file: tty_free_file(filp); return retval; diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c index 974b13d24401..10c4038c0e8d 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c @@ -2518,6 +2518,9 @@ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg) case TIOCSSERIAL: tty_warn_deprecated_flags(p); break; + case TIOCGPTPEER: + /* Special because the struct file is needed */ + return ptm_open_peer(file, tty, (int)arg); default: retval = tty_jobctrl_ioctl(tty, real_tty, file, cmd, arg); if (retval != -ENOIOCTLCMD) diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c index 007a4f366086..1c4797e53f68 100644 --- a/drivers/virtio/virtio_pci_common.c +++ b/drivers/virtio/virtio_pci_common.c @@ -107,6 +107,7 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors, { struct virtio_pci_device *vp_dev = to_vp_device(vdev); const char *name = dev_name(&vp_dev->vdev.dev); + unsigned flags = PCI_IRQ_MSIX; unsigned i, v; int err = -ENOMEM; @@ -126,10 +127,13 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors, GFP_KERNEL)) goto error; + if (desc) { + flags |= PCI_IRQ_AFFINITY; + desc->pre_vectors++; /* virtio config vector */ + } + err = pci_alloc_irq_vectors_affinity(vp_dev->pci_dev, nvectors, - nvectors, PCI_IRQ_MSIX | - (desc ? PCI_IRQ_AFFINITY : 0), - desc); + nvectors, flags, desc); if (err < 0) goto error; vp_dev->msix_enabled = 1; diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile index 8feab810aed9..7f188b8d0c67 100644 --- a/drivers/xen/Makefile +++ b/drivers/xen/Makefile @@ -7,9 +7,6 @@ obj-y += xenbus/ nostackp := $(call cc-option, -fno-stack-protector) CFLAGS_features.o := $(nostackp) -CFLAGS_efi.o += -fshort-wchar -LDFLAGS += $(call ld-option, --no-wchar-size-warning) - dom0-$(CONFIG_ARM64) += arm-device.o dom0-$(CONFIG_PCI) += pci.o dom0-$(CONFIG_USB_SUPPORT) += dbgp.o diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c index f3bf8f4e2d6c..82360594fa8e 100644 --- a/drivers/xen/gntdev.c +++ b/drivers/xen/gntdev.c @@ -484,13 +484,6 @@ static void mn_invl_range_start(struct mmu_notifier *mn, mutex_unlock(&priv->lock); } -static void mn_invl_page(struct mmu_notifier *mn, - struct mm_struct *mm, - unsigned long address) -{ - mn_invl_range_start(mn, mm, address, address + PAGE_SIZE); -} - static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm) { @@ -522,7 +515,6 @@ static void mn_release(struct mmu_notifier *mn, static const struct mmu_notifier_ops gntdev_mmu_ops = { .release = mn_release, - .invalidate_page = mn_invl_page, .invalidate_range_start = mn_invl_range_start, }; diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 080e2ebb8aa0..f45b61fe9a9a 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -3516,7 +3516,7 @@ static blk_status_t wait_dev_flush(struct btrfs_device *device) struct bio *bio = device->flush_bio; if (!device->flush_bio_sent) - return 0; + return BLK_STS_OK; device->flush_bio_sent = 0; wait_for_completion_io(&device->flush_wait); @@ -3563,7 +3563,7 @@ static int barrier_all_devices(struct btrfs_fs_info *info) continue; write_dev_flush(dev); - dev->last_flush_error = 0; + dev->last_flush_error = BLK_STS_OK; } /* wait for all the barriers */ diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 95c212037095..24bcd5cd9cf2 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -7924,11 +7924,12 @@ err: return ret; } -static inline int submit_dio_repair_bio(struct inode *inode, struct bio *bio, - int mirror_num) +static inline blk_status_t submit_dio_repair_bio(struct inode *inode, + struct bio *bio, + int mirror_num) { struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); - int ret; + blk_status_t ret; BUG_ON(bio_op(bio) == REQ_OP_WRITE); @@ -7980,10 +7981,10 @@ static int btrfs_check_dio_repairable(struct inode *inode, return 1; } -static int dio_read_error(struct inode *inode, struct bio *failed_bio, - struct page *page, unsigned int pgoff, - u64 start, u64 end, int failed_mirror, - bio_end_io_t *repair_endio, void *repair_arg) +static blk_status_t dio_read_error(struct inode *inode, struct bio *failed_bio, + struct page *page, unsigned int pgoff, + u64 start, u64 end, int failed_mirror, + bio_end_io_t *repair_endio, void *repair_arg) { struct io_failure_record *failrec; struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; @@ -7993,18 +7994,19 @@ static int dio_read_error(struct inode *inode, struct bio *failed_bio, int read_mode = 0; int segs; int ret; + blk_status_t status; BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE); ret = btrfs_get_io_failure_record(inode, start, end, &failrec); if (ret) - return ret; + return errno_to_blk_status(ret); ret = btrfs_check_dio_repairable(inode, failed_bio, failrec, failed_mirror); if (!ret) { free_io_failure(failure_tree, io_tree, failrec); - return -EIO; + return BLK_STS_IOERR; } segs = bio_segments(failed_bio); @@ -8022,13 +8024,13 @@ static int dio_read_error(struct inode *inode, struct bio *failed_bio, "Repair DIO Read Error: submitting new dio read[%#x] to this_mirror=%d, in_validation=%d\n", read_mode, failrec->this_mirror, failrec->in_validation); - ret = submit_dio_repair_bio(inode, bio, failrec->this_mirror); - if (ret) { + status = submit_dio_repair_bio(inode, bio, failrec->this_mirror); + if (status) { free_io_failure(failure_tree, io_tree, failrec); bio_put(bio); } - return ret; + return status; } struct btrfs_retry_complete { @@ -8065,8 +8067,8 @@ end: bio_put(bio); } -static int __btrfs_correct_data_nocsum(struct inode *inode, - struct btrfs_io_bio *io_bio) +static blk_status_t __btrfs_correct_data_nocsum(struct inode *inode, + struct btrfs_io_bio *io_bio) { struct btrfs_fs_info *fs_info; struct bio_vec bvec; @@ -8076,8 +8078,8 @@ static int __btrfs_correct_data_nocsum(struct inode *inode, unsigned int pgoff; u32 sectorsize; int nr_sectors; - int ret; - int err = 0; + blk_status_t ret; + blk_status_t err = BLK_STS_OK; fs_info = BTRFS_I(inode)->root->fs_info; sectorsize = fs_info->sectorsize; @@ -8183,11 +8185,12 @@ static blk_status_t __btrfs_subio_endio_read(struct inode *inode, int csum_pos; bool uptodate = (err == 0); int ret; + blk_status_t status; fs_info = BTRFS_I(inode)->root->fs_info; sectorsize = fs_info->sectorsize; - err = 0; + err = BLK_STS_OK; start = io_bio->logical; done.inode = inode; io_bio->bio.bi_iter = io_bio->iter; @@ -8209,12 +8212,12 @@ try_again: done.start = start; init_completion(&done.done); - ret = dio_read_error(inode, &io_bio->bio, bvec.bv_page, - pgoff, start, start + sectorsize - 1, - io_bio->mirror_num, - btrfs_retry_endio, &done); - if (ret) { - err = errno_to_blk_status(ret); + status = dio_read_error(inode, &io_bio->bio, bvec.bv_page, + pgoff, start, start + sectorsize - 1, + io_bio->mirror_num, btrfs_retry_endio, + &done); + if (status) { + err = status; goto next; } @@ -8250,7 +8253,7 @@ static blk_status_t btrfs_subio_endio_read(struct inode *inode, if (unlikely(err)) return __btrfs_correct_data_nocsum(inode, io_bio); else - return 0; + return BLK_STS_OK; } else { return __btrfs_subio_endio_read(inode, io_bio, err); } @@ -8423,9 +8426,9 @@ static inline blk_status_t btrfs_lookup_and_bind_dio_csum(struct inode *inode, return 0; } -static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode, - u64 file_offset, int skip_sum, - int async_submit) +static inline blk_status_t +__btrfs_submit_dio_bio(struct bio *bio, struct inode *inode, u64 file_offset, + int skip_sum, int async_submit) { struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct btrfs_dio_private *dip = bio->bi_private; @@ -8488,6 +8491,7 @@ static int btrfs_submit_direct_hook(struct btrfs_dio_private *dip, int clone_offset = 0; int clone_len; int ret; + blk_status_t status; map_length = orig_bio->bi_iter.bi_size; submit_len = map_length; @@ -8537,9 +8541,9 @@ static int btrfs_submit_direct_hook(struct btrfs_dio_private *dip, */ atomic_inc(&dip->pending_bios); - ret = __btrfs_submit_dio_bio(bio, inode, file_offset, skip_sum, - async_submit); - if (ret) { + status = __btrfs_submit_dio_bio(bio, inode, file_offset, skip_sum, + async_submit); + if (status) { bio_put(bio); atomic_dec(&dip->pending_bios); goto out_err; @@ -8557,9 +8561,9 @@ static int btrfs_submit_direct_hook(struct btrfs_dio_private *dip, } while (submit_len > 0); submit: - ret = __btrfs_submit_dio_bio(bio, inode, file_offset, skip_sum, - async_submit); - if (!ret) + status = __btrfs_submit_dio_bio(bio, inode, file_offset, skip_sum, + async_submit); + if (!status) return 0; bio_put(bio); diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c index 208638384cd2..2cf6ba40f7c4 100644 --- a/fs/btrfs/raid56.c +++ b/fs/btrfs/raid56.c @@ -905,7 +905,7 @@ static void raid_write_end_io(struct bio *bio) if (!atomic_dec_and_test(&rbio->stripes_pending)) return; - err = 0; + err = BLK_STS_OK; /* OK, we have read all the stripes we need to. */ max_errors = (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) ? @@ -1324,7 +1324,7 @@ write_data: return; cleanup: - rbio_orig_end_io(rbio, -EIO); + rbio_orig_end_io(rbio, BLK_STS_IOERR); } /* @@ -1475,7 +1475,7 @@ static void raid_rmw_end_io(struct bio *bio) cleanup: - rbio_orig_end_io(rbio, -EIO); + rbio_orig_end_io(rbio, BLK_STS_IOERR); } static void async_rmw_stripe(struct btrfs_raid_bio *rbio) @@ -1579,7 +1579,7 @@ static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio) return 0; cleanup: - rbio_orig_end_io(rbio, -EIO); + rbio_orig_end_io(rbio, BLK_STS_IOERR); return -EIO; finish: @@ -1795,12 +1795,12 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio) void **pointers; int faila = -1, failb = -1; struct page *page; - int err; + blk_status_t err; int i; pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS); if (!pointers) { - err = -ENOMEM; + err = BLK_STS_RESOURCE; goto cleanup_io; } @@ -1856,7 +1856,7 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio) * a bad data or Q stripe. * TODO, we should redo the xor here. */ - err = -EIO; + err = BLK_STS_IOERR; goto cleanup; } /* @@ -1882,7 +1882,7 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio) if (rbio->bbio->raid_map[failb] == RAID6_Q_STRIPE) { if (rbio->bbio->raid_map[faila] == RAID5_P_STRIPE) { - err = -EIO; + err = BLK_STS_IOERR; goto cleanup; } /* @@ -1954,13 +1954,13 @@ pstripe: } } - err = 0; + err = BLK_STS_OK; cleanup: kfree(pointers); cleanup_io: if (rbio->operation == BTRFS_RBIO_READ_REBUILD) { - if (err == 0) + if (err == BLK_STS_OK) cache_rbio_pages(rbio); else clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags); @@ -1968,7 +1968,7 @@ cleanup_io: rbio_orig_end_io(rbio, err); } else if (rbio->operation == BTRFS_RBIO_REBUILD_MISSING) { rbio_orig_end_io(rbio, err); - } else if (err == 0) { + } else if (err == BLK_STS_OK) { rbio->faila = -1; rbio->failb = -1; @@ -2005,7 +2005,7 @@ static void raid_recover_end_io(struct bio *bio) return; if (atomic_read(&rbio->error) > rbio->bbio->max_errors) - rbio_orig_end_io(rbio, -EIO); + rbio_orig_end_io(rbio, BLK_STS_IOERR); else __raid_recover_end_io(rbio); } @@ -2104,7 +2104,7 @@ out: cleanup: if (rbio->operation == BTRFS_RBIO_READ_REBUILD || rbio->operation == BTRFS_RBIO_REBUILD_MISSING) - rbio_orig_end_io(rbio, -EIO); + rbio_orig_end_io(rbio, BLK_STS_IOERR); return -EIO; } @@ -2431,7 +2431,7 @@ submit_write: nr_data = bio_list_size(&bio_list); if (!nr_data) { /* Every parity is right */ - rbio_orig_end_io(rbio, 0); + rbio_orig_end_io(rbio, BLK_STS_OK); return; } @@ -2451,7 +2451,7 @@ submit_write: return; cleanup: - rbio_orig_end_io(rbio, -EIO); + rbio_orig_end_io(rbio, BLK_STS_IOERR); } static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe) @@ -2519,7 +2519,7 @@ static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio *rbio) return; cleanup: - rbio_orig_end_io(rbio, -EIO); + rbio_orig_end_io(rbio, BLK_STS_IOERR); } /* @@ -2633,7 +2633,7 @@ static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio) return; cleanup: - rbio_orig_end_io(rbio, -EIO); + rbio_orig_end_io(rbio, BLK_STS_IOERR); return; finish: diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index e8b9a269fdde..bd679bc7a1a9 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -6212,8 +6212,8 @@ static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical) } } -int btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio, - int mirror_num, int async_submit) +blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio, + int mirror_num, int async_submit) { struct btrfs_device *dev; struct bio *first_bio = bio; @@ -6233,7 +6233,7 @@ int btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio, &map_length, &bbio, mirror_num, 1); if (ret) { btrfs_bio_counter_dec(fs_info); - return ret; + return errno_to_blk_status(ret); } total_devs = bbio->num_stripes; @@ -6256,7 +6256,7 @@ int btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio, } btrfs_bio_counter_dec(fs_info); - return ret; + return errno_to_blk_status(ret); } if (map_length < length) { @@ -6283,7 +6283,7 @@ int btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio, dev_nr, async_submit); } btrfs_bio_counter_dec(fs_info); - return 0; + return BLK_STS_OK; } struct btrfs_device *btrfs_find_device(struct btrfs_fs_info *fs_info, u64 devid, diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index 6f45fd60d15a..93277fc60930 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h @@ -74,7 +74,7 @@ struct btrfs_device { int missing; int can_discard; int is_tgtdev_for_dev_replace; - int last_flush_error; + blk_status_t last_flush_error; int flush_bio_sent; #ifdef __BTRFS_NEED_DEVICE_DATA_ORDERED @@ -416,8 +416,8 @@ int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info, u64 type); void btrfs_mapping_init(struct btrfs_mapping_tree *tree); void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree); -int btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio, - int mirror_num, int async_submit); +blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio, + int mirror_num, int async_submit); int btrfs_open_devices(struct btrfs_fs_devices *fs_devices, fmode_t flags, void *holder); int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder, diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index 50836280a6f8..1bc709fe330a 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -189,7 +189,7 @@ static int ceph_releasepage(struct page *page, gfp_t g) /* * read a single page, without unlocking it. */ -static int readpage_nounlock(struct file *filp, struct page *page) +static int ceph_do_readpage(struct file *filp, struct page *page) { struct inode *inode = file_inode(filp); struct ceph_inode_info *ci = ceph_inode(inode); @@ -219,7 +219,7 @@ static int readpage_nounlock(struct file *filp, struct page *page) err = ceph_readpage_from_fscache(inode, page); if (err == 0) - goto out; + return -EINPROGRESS; dout("readpage inode %p file %p page %p index %lu\n", inode, filp, page, page->index); @@ -249,8 +249,11 @@ out: static int ceph_readpage(struct file *filp, struct page *page) { - int r = readpage_nounlock(filp, page); - unlock_page(page); + int r = ceph_do_readpage(filp, page); + if (r != -EINPROGRESS) + unlock_page(page); + else + r = 0; return r; } @@ -1237,7 +1240,7 @@ retry_locked: goto retry_locked; r = writepage_nounlock(page, NULL); if (r < 0) - goto fail_nosnap; + goto fail_unlock; goto retry_locked; } @@ -1265,11 +1268,14 @@ retry_locked: } /* we need to read it. */ - r = readpage_nounlock(file, page); - if (r < 0) - goto fail_nosnap; + r = ceph_do_readpage(file, page); + if (r < 0) { + if (r == -EINPROGRESS) + return -EAGAIN; + goto fail_unlock; + } goto retry_locked; -fail_nosnap: +fail_unlock: unlock_page(page); return r; } diff --git a/fs/ceph/cache.c b/fs/ceph/cache.c index fd1172823f86..337f88673ed9 100644 --- a/fs/ceph/cache.c +++ b/fs/ceph/cache.c @@ -297,13 +297,7 @@ void ceph_fscache_file_set_cookie(struct inode *inode, struct file *filp) } } -static void ceph_vfs_readpage_complete(struct page *page, void *data, int error) -{ - if (!error) - SetPageUptodate(page); -} - -static void ceph_vfs_readpage_complete_unlock(struct page *page, void *data, int error) +static void ceph_readpage_from_fscache_complete(struct page *page, void *data, int error) { if (!error) SetPageUptodate(page); @@ -331,7 +325,7 @@ int ceph_readpage_from_fscache(struct inode *inode, struct page *page) return -ENOBUFS; ret = fscache_read_or_alloc_page(ci->fscache, page, - ceph_vfs_readpage_complete, NULL, + ceph_readpage_from_fscache_complete, NULL, GFP_KERNEL); switch (ret) { @@ -360,7 +354,7 @@ int ceph_readpages_from_fscache(struct inode *inode, return -ENOBUFS; ret = fscache_read_or_alloc_pages(ci->fscache, mapping, pages, nr_pages, - ceph_vfs_readpage_complete_unlock, + ceph_readpage_from_fscache_complete, NULL, mapping_gfp_mask(mapping)); switch (ret) { diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c index 56366e984076..e702d48bd023 100644 --- a/fs/cifs/dir.c +++ b/fs/cifs/dir.c @@ -194,15 +194,20 @@ cifs_bp_rename_retry: } /* + * Don't allow path components longer than the server max. * Don't allow the separator character in a path component. * The VFS will not allow "/", but "\" is allowed by posix. */ static int -check_name(struct dentry *direntry) +check_name(struct dentry *direntry, struct cifs_tcon *tcon) { struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb); int i; + if (unlikely(direntry->d_name.len > + le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength))) + return -ENAMETOOLONG; + if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)) { for (i = 0; i < direntry->d_name.len; i++) { if (direntry->d_name.name[i] == '\\') { @@ -500,10 +505,6 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry, return finish_no_open(file, res); } - rc = check_name(direntry); - if (rc) - return rc; - xid = get_xid(); cifs_dbg(FYI, "parent inode = 0x%p name is: %pd and dentry = 0x%p\n", @@ -516,6 +517,11 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry, } tcon = tlink_tcon(tlink); + + rc = check_name(direntry, tcon); + if (rc) + goto out_free_xid; + server = tcon->ses->server; if (server->ops->new_lease_key) @@ -776,7 +782,7 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry, } pTcon = tlink_tcon(tlink); - rc = check_name(direntry); + rc = check_name(direntry, pTcon); if (rc) goto lookup_out; diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index 5fb2fc2d0080..97edb4d376cd 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c @@ -3219,8 +3219,8 @@ copy_fs_info_to_kstatfs(struct smb2_fs_full_size_info *pfs_inf, kst->f_bsize = le32_to_cpu(pfs_inf->BytesPerSector) * le32_to_cpu(pfs_inf->SectorsPerAllocationUnit); kst->f_blocks = le64_to_cpu(pfs_inf->TotalAllocationUnits); - kst->f_bfree = le64_to_cpu(pfs_inf->ActualAvailableAllocationUnits); - kst->f_bavail = le64_to_cpu(pfs_inf->CallerAvailableAllocationUnits); + kst->f_bfree = kst->f_bavail = + le64_to_cpu(pfs_inf->CallerAvailableAllocationUnits); return; } diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h index 18700fd25a0b..2826882c81d1 100644 --- a/fs/cifs/smb2pdu.h +++ b/fs/cifs/smb2pdu.h @@ -84,8 +84,8 @@ #define NUMBER_OF_SMB2_COMMANDS 0x0013 -/* BB FIXME - analyze following length BB */ -#define MAX_SMB2_HDR_SIZE 0x78 /* 4 len + 64 hdr + (2*24 wct) + 2 bct + 2 pad */ +/* 4 len + 52 transform hdr + 64 hdr + 56 create rsp */ +#define MAX_SMB2_HDR_SIZE 0x00b0 #define SMB2_PROTO_NUMBER cpu_to_le32(0x424d53fe) #define SMB2_TRANSFORM_PROTO_NUM cpu_to_le32(0x424d53fd) @@ -646,11 +646,10 @@ static void dax_mapping_entry_mkclean(struct address_space *mapping, pte_t pte, *ptep = NULL; pmd_t *pmdp = NULL; spinlock_t *ptl; - bool changed; i_mmap_lock_read(mapping); vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) { - unsigned long address; + unsigned long address, start, end; cond_resched(); @@ -658,8 +657,13 @@ static void dax_mapping_entry_mkclean(struct address_space *mapping, continue; address = pgoff_address(index, vma); - changed = false; - if (follow_pte_pmd(vma->vm_mm, address, &ptep, &pmdp, &ptl)) + + /* + * Note because we provide start/end to follow_pte_pmd it will + * call mmu_notifier_invalidate_range_start() on our behalf + * before taking any lock. + */ + if (follow_pte_pmd(vma->vm_mm, address, &start, &end, &ptep, &pmdp, &ptl)) continue; if (pmdp) { @@ -676,7 +680,7 @@ static void dax_mapping_entry_mkclean(struct address_space *mapping, pmd = pmd_wrprotect(pmd); pmd = pmd_mkclean(pmd); set_pmd_at(vma->vm_mm, address, pmdp, pmd); - changed = true; + mmu_notifier_invalidate_range(vma->vm_mm, start, end); unlock_pmd: spin_unlock(ptl); #endif @@ -691,13 +695,12 @@ unlock_pmd: pte = pte_wrprotect(pte); pte = pte_mkclean(pte); set_pte_at(vma->vm_mm, address, ptep, pte); - changed = true; + mmu_notifier_invalidate_range(vma->vm_mm, start, end); unlock_pte: pte_unmap_unlock(ptep, ptl); } - if (changed) - mmu_notifier_invalidate_page(vma->vm_mm, address); + mmu_notifier_invalidate_range_end(vma->vm_mm, start, end); } i_mmap_unlock_read(mapping); } @@ -1383,6 +1386,16 @@ static int dax_iomap_pmd_fault(struct vm_fault *vmf, trace_dax_pmd_fault(inode, vmf, max_pgoff, 0); + /* + * Make sure that the faulting address's PMD offset (color) matches + * the PMD offset from the start of the file. This is necessary so + * that a PMD range in the page table overlaps exactly with a PMD + * range in the radix tree. + */ + if ((vmf->pgoff & PG_PMD_COLOUR) != + ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR)) + goto fallback; + /* Fall back to PTEs if we're going to COW */ if (write && !(vma->vm_flags & VM_SHARED)) goto fallback; diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c index 44dfbca9306f..7eae33ffa3fc 100644 --- a/fs/devpts/inode.c +++ b/fs/devpts/inode.c @@ -133,7 +133,51 @@ static inline struct pts_fs_info *DEVPTS_SB(struct super_block *sb) return sb->s_fs_info; } -struct pts_fs_info *devpts_acquire(struct file *filp, struct vfsmount **ptsmnt) +static int devpts_ptmx_path(struct path *path) +{ + struct super_block *sb; + int err; + + /* Has the devpts filesystem already been found? */ + if (path->mnt->mnt_sb->s_magic == DEVPTS_SUPER_MAGIC) + return 0; + + /* Is a devpts filesystem at "pts" in the same directory? */ + err = path_pts(path); + if (err) + return err; + + /* Is the path the root of a devpts filesystem? */ + sb = path->mnt->mnt_sb; + if ((sb->s_magic != DEVPTS_SUPER_MAGIC) || + (path->mnt->mnt_root != sb->s_root)) + return -ENODEV; + + return 0; +} + +struct vfsmount *devpts_mntget(struct file *filp, struct pts_fs_info *fsi) +{ + struct path path; + int err; + + path = filp->f_path; + path_get(&path); + + err = devpts_ptmx_path(&path); + dput(path.dentry); + if (err) { + mntput(path.mnt); + path.mnt = ERR_PTR(err); + } + if (DEVPTS_SB(path.mnt->mnt_sb) != fsi) { + mntput(path.mnt); + path.mnt = ERR_PTR(-ENODEV); + } + return path.mnt; +} + +struct pts_fs_info *devpts_acquire(struct file *filp) { struct pts_fs_info *result; struct path path; @@ -142,31 +186,18 @@ struct pts_fs_info *devpts_acquire(struct file *filp, struct vfsmount **ptsmnt) path = filp->f_path; path_get(&path); - *ptsmnt = NULL; - /* Has the devpts filesystem already been found? */ - sb = path.mnt->mnt_sb; - if (sb->s_magic != DEVPTS_SUPER_MAGIC) { - /* Is a devpts filesystem at "pts" in the same directory? */ - err = path_pts(&path); - if (err) { - result = ERR_PTR(err); - goto out; - } - - /* Is the path the root of a devpts filesystem? */ - result = ERR_PTR(-ENODEV); - sb = path.mnt->mnt_sb; - if ((sb->s_magic != DEVPTS_SUPER_MAGIC) || - (path.mnt->mnt_root != sb->s_root)) - goto out; + err = devpts_ptmx_path(&path); + if (err) { + result = ERR_PTR(err); + goto out; } /* * pty code needs to hold extra references in case of last /dev/tty close */ + sb = path.mnt->mnt_sb; atomic_inc(&sb->s_active); - *ptsmnt = mntget(path.mnt); result = DEVPTS_SB(sb); out: diff --git a/fs/eventpoll.c b/fs/eventpoll.c index e767e4389cb1..adbe328b957c 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -600,8 +600,13 @@ static void ep_remove_wait_queue(struct eppoll_entry *pwq) wait_queue_head_t *whead; rcu_read_lock(); - /* If it is cleared by POLLFREE, it should be rcu-safe */ - whead = rcu_dereference(pwq->whead); + /* + * If it is cleared by POLLFREE, it should be rcu-safe. + * If we read NULL we need a barrier paired with + * smp_store_release() in ep_poll_callback(), otherwise + * we rely on whead->lock. + */ + whead = smp_load_acquire(&pwq->whead); if (whead) remove_wait_queue(whead, &pwq->wait); rcu_read_unlock(); @@ -1134,17 +1139,6 @@ static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, v struct eventpoll *ep = epi->ep; int ewake = 0; - if ((unsigned long)key & POLLFREE) { - ep_pwq_from_wait(wait)->whead = NULL; - /* - * whead = NULL above can race with ep_remove_wait_queue() - * which can do another remove_wait_queue() after us, so we - * can't use __remove_wait_queue(). whead->lock is held by - * the caller. - */ - list_del_init(&wait->entry); - } - spin_lock_irqsave(&ep->lock, flags); ep_set_busy_poll_napi_id(epi); @@ -1228,10 +1222,26 @@ out_unlock: if (pwake) ep_poll_safewake(&ep->poll_wait); - if (epi->event.events & EPOLLEXCLUSIVE) - return ewake; + if (!(epi->event.events & EPOLLEXCLUSIVE)) + ewake = 1; + + if ((unsigned long)key & POLLFREE) { + /* + * If we race with ep_remove_wait_queue() it can miss + * ->whead = NULL and do another remove_wait_queue() after + * us, so we can't use __remove_wait_queue(). + */ + list_del_init(&wait->entry); + /* + * ->whead != NULL protects us from the race with ep_free() + * or ep_remove(), ep_remove_wait_queue() takes whead->lock + * held by the caller. Once we nullify it, nothing protects + * ep/epi or even wait. + */ + smp_store_release(&ep_pwq_from_wait(wait)->whead, NULL); + } - return 1; + return ewake; } /* diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 5a1052627a81..701085620cd8 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -2300,7 +2300,7 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v) EXT4_MAX_BLOCK_LOG_SIZE); struct sg { struct ext4_group_info info; - ext4_grpblk_t counters[blocksize_bits + 2]; + ext4_grpblk_t counters[EXT4_MAX_BLOCK_LOG_SIZE + 2]; } sg; group--; @@ -2309,6 +2309,9 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v) " 2^0 2^1 2^2 2^3 2^4 2^5 2^6 " " 2^7 2^8 2^9 2^10 2^11 2^12 2^13 ]\n"); + i = (blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) + + sizeof(struct ext4_group_info); + grinfo = ext4_get_group_info(sb, group); /* Load the group info in memory only if not already loaded. */ if (unlikely(EXT4_MB_GRP_NEED_INIT(grinfo))) { @@ -2320,7 +2323,7 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v) buddy_loaded = 1; } - memcpy(&sg, ext4_get_group_info(sb, group), sizeof(sg)); + memcpy(&sg, ext4_get_group_info(sb, group), i); if (buddy_loaded) ext4_mb_unload_buddy(&e4b); diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c index 82a5af9f6668..3dd970168448 100644 --- a/fs/ext4/xattr.c +++ b/fs/ext4/xattr.c @@ -1543,7 +1543,7 @@ static int ext4_xattr_set_entry(struct ext4_xattr_info *i, /* Clear padding bytes. */ memset(val + i->value_len, 0, new_size - i->value_len); } - return 0; + goto update_hash; } /* Compute min_offs and last. */ @@ -1707,6 +1707,7 @@ static int ext4_xattr_set_entry(struct ext4_xattr_info *i, here->e_value_size = cpu_to_le32(i->value_len); } +update_hash: if (i->value) { __le32 hash = 0; @@ -1725,7 +1726,8 @@ static int ext4_xattr_set_entry(struct ext4_xattr_info *i, here->e_name_len, &crc32c_hash, 1); } else if (is_block) { - __le32 *value = s->base + min_offs - new_size; + __le32 *value = s->base + le16_to_cpu( + here->e_value_offs); hash = ext4_xattr_hash_entry(here->e_name, here->e_name_len, value, diff --git a/fs/jfs/super.c b/fs/jfs/super.c index 78b41e1d5c67..60726ae7cf26 100644 --- a/fs/jfs/super.c +++ b/fs/jfs/super.c @@ -619,16 +619,10 @@ static int jfs_fill_super(struct super_block *sb, void *data, int silent) if (!sb->s_root) goto out_no_root; - /* logical blocks are represented by 40 bits in pxd_t, etc. */ - sb->s_maxbytes = ((u64) sb->s_blocksize) << 40; -#if BITS_PER_LONG == 32 - /* - * Page cache is indexed by long. - * I would use MAX_LFS_FILESIZE, but it's only half as big + /* logical blocks are represented by 40 bits in pxd_t, etc. + * and page cache is indexed by long */ - sb->s_maxbytes = min(((u64) PAGE_SIZE << 32) - 1, - (u64)sb->s_maxbytes); -#endif + sb->s_maxbytes = min(((loff_t)sb->s_blocksize) << 40, MAX_LFS_FILESIZE); sb->s_time_gran = 1; return 0; diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index 20fbcab97753..5f940d2a136b 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -144,7 +144,7 @@ static void next_decode_page(struct nfsd4_compoundargs *argp) argp->p = page_address(argp->pagelist[0]); argp->pagelist++; if (argp->pagelen < PAGE_SIZE) { - argp->end = argp->p + (argp->pagelen>>2); + argp->end = argp->p + XDR_QUADLEN(argp->pagelen); argp->pagelen = 0; } else { argp->end = argp->p + (PAGE_SIZE>>2); @@ -1279,9 +1279,7 @@ nfsd4_decode_write(struct nfsd4_compoundargs *argp, struct nfsd4_write *write) argp->pagelen -= pages * PAGE_SIZE; len -= pages * PAGE_SIZE; - argp->p = (__be32 *)page_address(argp->pagelist[0]); - argp->pagelist++; - argp->end = argp->p + XDR_QUADLEN(PAGE_SIZE); + next_decode_page(argp); } argp->p += XDR_QUADLEN(len); diff --git a/fs/select.c b/fs/select.c index 9d5f15ed87fe..c6362e38ae92 100644 --- a/fs/select.c +++ b/fs/select.c @@ -1164,11 +1164,7 @@ int compat_get_fd_set(unsigned long nr, compat_ulong_t __user *ufdset, if (ufdset) { return compat_get_bitmap(fdset, ufdset, nr); } else { - /* Tricky, must clear full unsigned long in the - * kernel fdset at the end, ALIGN makes sure that - * actually happens. - */ - memset(fdset, 0, ALIGN(nr, BITS_PER_LONG)); + zero_fd_set(nr, fdset); return 0; } } diff --git a/include/asm-generic/topology.h b/include/asm-generic/topology.h index fc824e2828f3..5d2add1a6c96 100644 --- a/include/asm-generic/topology.h +++ b/include/asm-generic/topology.h @@ -48,7 +48,11 @@ #define parent_node(node) ((void)(node),0) #endif #ifndef cpumask_of_node -#define cpumask_of_node(node) ((void)node, cpu_online_mask) + #ifdef CONFIG_NEED_MULTIPLE_NODES + #define cpumask_of_node(node) ((node) == 0 ? cpu_online_mask : cpu_none_mask) + #else + #define cpumask_of_node(node) ((void)node, cpu_online_mask) + #endif #endif #ifndef pcibus_to_node #define pcibus_to_node(bus) ((void)(bus), -1) diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index da0be9a8d1de..9623d78f8494 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -60,6 +60,22 @@ #define ALIGN_FUNCTION() . = ALIGN(8) /* + * LD_DEAD_CODE_DATA_ELIMINATION option enables -fdata-sections, which + * generates .data.identifier sections, which need to be pulled in with + * .data. We don't want to pull in .data..other sections, which Linux + * has defined. Same for text and bss. + */ +#ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION +#define TEXT_MAIN .text .text.[0-9a-zA-Z_]* +#define DATA_MAIN .data .data.[0-9a-zA-Z_]* +#define BSS_MAIN .bss .bss.[0-9a-zA-Z_]* +#else +#define TEXT_MAIN .text +#define DATA_MAIN .data +#define BSS_MAIN .bss +#endif + +/* * Align to a 32 byte boundary equal to the * alignment gcc 4.5 uses for a struct */ @@ -198,12 +214,9 @@ /* * .data section - * LD_DEAD_CODE_DATA_ELIMINATION option enables -fdata-sections generates - * .data.identifier which needs to be pulled in with .data, but don't want to - * pull in .data..stuff which has its own requirements. Same for bss. */ #define DATA_DATA \ - *(.data .data.[0-9a-zA-Z_]*) \ + *(DATA_MAIN) \ *(.ref.data) \ *(.data..shared_aligned) /* percpu related */ \ MEM_KEEP(init.data) \ @@ -434,16 +447,17 @@ VMLINUX_SYMBOL(__security_initcall_end) = .; \ } -/* .text section. Map to function alignment to avoid address changes +/* + * .text section. Map to function alignment to avoid address changes * during second ld run in second ld pass when generating System.map - * LD_DEAD_CODE_DATA_ELIMINATION option enables -ffunction-sections generates - * .text.identifier which needs to be pulled in with .text , but some - * architectures define .text.foo which is not intended to be pulled in here. - * Those enabling LD_DEAD_CODE_DATA_ELIMINATION must ensure they don't have - * conflicting section names, and must pull in .text.[0-9a-zA-Z_]* */ + * + * TEXT_MAIN here will match .text.fixup and .text.unlikely if dead + * code elimination is enabled, so these sections should be converted + * to use ".." first. + */ #define TEXT_TEXT \ ALIGN_FUNCTION(); \ - *(.text.hot .text .text.fixup .text.unlikely) \ + *(.text.hot TEXT_MAIN .text.fixup .text.unlikely) \ *(.ref.text) \ MEM_KEEP(init.text) \ MEM_KEEP(exit.text) \ @@ -613,7 +627,7 @@ BSS_FIRST_SECTIONS \ *(.bss..page_aligned) \ *(.dynbss) \ - *(.bss .bss.[0-9a-zA-Z_]*) \ + *(BSS_MAIN) \ *(COMMON) \ } diff --git a/include/linux/ata.h b/include/linux/ata.h index e65ae4b2ed48..c7a353825450 100644 --- a/include/linux/ata.h +++ b/include/linux/ata.h @@ -60,7 +60,8 @@ enum { ATA_ID_FW_REV = 23, ATA_ID_PROD = 27, ATA_ID_MAX_MULTSECT = 47, - ATA_ID_DWORD_IO = 48, + ATA_ID_DWORD_IO = 48, /* before ATA-8 */ + ATA_ID_TRUSTED = 48, /* ATA-8 and later */ ATA_ID_CAPABILITY = 49, ATA_ID_OLD_PIO_MODES = 51, ATA_ID_OLD_DMA_MODES = 52, @@ -889,6 +890,13 @@ static inline bool ata_id_has_dword_io(const u16 *id) return id[ATA_ID_DWORD_IO] & (1 << 0); } +static inline bool ata_id_has_trusted(const u16 *id) +{ + if (ata_id_major_version(id) <= 7) + return false; + return id[ATA_ID_TRUSTED] & (1 << 0); +} + static inline bool ata_id_has_unload(const u16 *id) { if (ata_id_major_version(id) >= 7 && diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 25f6a0cb27d3..2a5d52fa90f5 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -568,7 +568,6 @@ struct request_queue { #if defined(CONFIG_BLK_DEV_BSG) bsg_job_fn *bsg_job_fn; - int bsg_job_size; struct bsg_class_device bsg_dev; #endif diff --git a/include/linux/bsg-lib.h b/include/linux/bsg-lib.h index e34dde2da0ef..637a20cfb237 100644 --- a/include/linux/bsg-lib.h +++ b/include/linux/bsg-lib.h @@ -24,6 +24,7 @@ #define _BLK_BSG_ #include <linux/blkdev.h> +#include <scsi/scsi_request.h> struct request; struct device; @@ -37,6 +38,7 @@ struct bsg_buffer { }; struct bsg_job { + struct scsi_request sreq; struct device *dev; struct request *req; diff --git a/include/linux/compiler.h b/include/linux/compiler.h index eca8ad75e28b..043b60de041e 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -517,7 +517,8 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s # define __compiletime_error_fallback(condition) do { } while (0) #endif -#define __compiletime_assert(condition, msg, prefix, suffix) \ +#ifdef __OPTIMIZE__ +# define __compiletime_assert(condition, msg, prefix, suffix) \ do { \ bool __cond = !(condition); \ extern void prefix ## suffix(void) __compiletime_error(msg); \ @@ -525,6 +526,9 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s prefix ## suffix(); \ __compiletime_error_fallback(__cond); \ } while (0) +#else +# define __compiletime_assert(condition, msg, prefix, suffix) do { } while (0) +#endif #define _compiletime_assert(condition, msg, prefix, suffix) \ __compiletime_assert(condition, msg, prefix, suffix) diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index 1473455d0341..4f2b3b2076c4 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -549,46 +549,29 @@ void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size); *---------------------------------------------------------------*/ #define DM_NAME "device-mapper" -#ifdef CONFIG_PRINTK -extern struct ratelimit_state dm_ratelimit_state; - -#define dm_ratelimit() __ratelimit(&dm_ratelimit_state) -#else -#define dm_ratelimit() 0 -#endif +#define DM_RATELIMIT(pr_func, fmt, ...) \ +do { \ + static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, \ + DEFAULT_RATELIMIT_BURST); \ + \ + if (__ratelimit(&rs)) \ + pr_func(DM_FMT(fmt), ##__VA_ARGS__); \ +} while (0) #define DM_FMT(fmt) DM_NAME ": " DM_MSG_PREFIX ": " fmt "\n" #define DMCRIT(fmt, ...) pr_crit(DM_FMT(fmt), ##__VA_ARGS__) #define DMERR(fmt, ...) pr_err(DM_FMT(fmt), ##__VA_ARGS__) -#define DMERR_LIMIT(fmt, ...) \ -do { \ - if (dm_ratelimit()) \ - DMERR(fmt, ##__VA_ARGS__); \ -} while (0) - +#define DMERR_LIMIT(fmt, ...) DM_RATELIMIT(pr_err, fmt, ##__VA_ARGS__) #define DMWARN(fmt, ...) pr_warn(DM_FMT(fmt), ##__VA_ARGS__) -#define DMWARN_LIMIT(fmt, ...) \ -do { \ - if (dm_ratelimit()) \ - DMWARN(fmt, ##__VA_ARGS__); \ -} while (0) - +#define DMWARN_LIMIT(fmt, ...) DM_RATELIMIT(pr_warn, fmt, ##__VA_ARGS__) #define DMINFO(fmt, ...) pr_info(DM_FMT(fmt), ##__VA_ARGS__) -#define DMINFO_LIMIT(fmt, ...) \ -do { \ - if (dm_ratelimit()) \ - DMINFO(fmt, ##__VA_ARGS__); \ -} while (0) +#define DMINFO_LIMIT(fmt, ...) DM_RATELIMIT(pr_info, fmt, ##__VA_ARGS__) #ifdef CONFIG_DM_DEBUG #define DMDEBUG(fmt, ...) printk(KERN_DEBUG DM_FMT(fmt), ##__VA_ARGS__) -#define DMDEBUG_LIMIT(fmt, ...) \ -do { \ - if (dm_ratelimit()) \ - DMDEBUG(fmt, ##__VA_ARGS__); \ -} while (0) +#define DMDEBUG_LIMIT(fmt, ...) DM_RATELIMIT(pr_debug, fmt, ##__VA_ARGS__) #else #define DMDEBUG(fmt, ...) no_printk(fmt, ##__VA_ARGS__) #define DMDEBUG_LIMIT(fmt, ...) no_printk(fmt, ##__VA_ARGS__) diff --git a/include/linux/devpts_fs.h b/include/linux/devpts_fs.h index 7883e901f65c..100cb4343763 100644 --- a/include/linux/devpts_fs.h +++ b/include/linux/devpts_fs.h @@ -19,7 +19,8 @@ struct pts_fs_info; -struct pts_fs_info *devpts_acquire(struct file *, struct vfsmount **ptsmnt); +struct vfsmount *devpts_mntget(struct file *, struct pts_fs_info *); +struct pts_fs_info *devpts_acquire(struct file *); void devpts_release(struct pts_fs_info *); int devpts_new_index(struct pts_fs_info *); @@ -32,6 +33,15 @@ void *devpts_get_priv(struct dentry *); /* unlink */ void devpts_pty_kill(struct dentry *); +/* in pty.c */ +int ptm_open_peer(struct file *master, struct tty_struct *tty, int flags); + +#else +static inline int +ptm_open_peer(struct file *master, struct tty_struct *tty, int flags) +{ + return -EIO; +} #endif diff --git a/include/linux/fs.h b/include/linux/fs.h index 6e1fd5d21248..cbfe127bccf8 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -907,9 +907,9 @@ static inline struct file *get_file(struct file *f) /* Page cache limit. The filesystems should put that into their s_maxbytes limits, otherwise bad things can happen in VM. */ #if BITS_PER_LONG==32 -#define MAX_LFS_FILESIZE (((loff_t)PAGE_SIZE << (BITS_PER_LONG-1))-1) +#define MAX_LFS_FILESIZE ((loff_t)ULONG_MAX << PAGE_SHIFT) #elif BITS_PER_LONG==64 -#define MAX_LFS_FILESIZE ((loff_t)0x7fffffffffffffffLL) +#define MAX_LFS_FILESIZE ((loff_t)LLONG_MAX) #endif #define FL_POSIX 1 diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h index d68bec297a45..c380daa40c0e 100644 --- a/include/linux/iio/iio.h +++ b/include/linux/iio/iio.h @@ -535,7 +535,7 @@ struct iio_buffer_setup_ops { * @scan_timestamp: [INTERN] set if any buffers have requested timestamp * @scan_index_timestamp:[INTERN] cache of the index to the timestamp * @trig: [INTERN] current device trigger (buffer modes) - * @trig_readonly [INTERN] mark the current trigger immutable + * @trig_readonly: [INTERN] mark the current trigger immutable * @pollfunc: [DRIVER] function run on trigger being received * @pollfunc_event: [DRIVER] function run on events trigger being received * @channels: [DRIVER] channel specification structure table diff --git a/include/linux/iio/trigger.h b/include/linux/iio/trigger.h index ea08302f2d7b..7142d8d6e470 100644 --- a/include/linux/iio/trigger.h +++ b/include/linux/iio/trigger.h @@ -144,8 +144,8 @@ void devm_iio_trigger_unregister(struct device *dev, /** * iio_trigger_set_immutable() - set an immutable trigger on destination * - * @indio_dev - IIO device structure containing the device - * @trig - trigger to assign to device + * @indio_dev: IIO device structure containing the device + * @trig: trigger to assign to device * **/ int iio_trigger_set_immutable(struct iio_dev *indio_dev, struct iio_trigger *trig); diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 2cb54adc4a33..176f7569d874 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -240,7 +240,7 @@ struct iommu_device { struct list_head list; const struct iommu_ops *ops; struct fwnode_handle *fwnode; - struct device dev; + struct device *dev; }; int iommu_device_register(struct iommu_device *iommu); @@ -265,6 +265,11 @@ static inline void iommu_device_set_fwnode(struct iommu_device *iommu, iommu->fwnode = fwnode; } +static inline struct iommu_device *dev_to_iommu_device(struct device *dev) +{ + return (struct iommu_device *)dev_get_drvdata(dev); +} + #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */ #define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */ #define IOMMU_GROUP_NOTIFY_BIND_DRIVER 3 /* Pre Driver bind */ @@ -589,6 +594,11 @@ static inline void iommu_device_set_fwnode(struct iommu_device *iommu, { } +static inline struct iommu_device *dev_to_iommu_device(struct device *dev) +{ + return NULL; +} + static inline void iommu_device_unregister(struct iommu_device *iommu) { } diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index d5b6f6a9fcc5..023b29d973e6 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -678,9 +678,7 @@ enum mlx5_device_state { }; enum mlx5_interface_state { - MLX5_INTERFACE_STATE_DOWN = BIT(0), - MLX5_INTERFACE_STATE_UP = BIT(1), - MLX5_INTERFACE_STATE_SHUTDOWN = BIT(2), + MLX5_INTERFACE_STATE_UP = BIT(0), }; enum mlx5_pci_status { diff --git a/include/linux/mm.h b/include/linux/mm.h index 46b9ac5e8569..c1f6c95f3496 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1260,6 +1260,7 @@ int copy_page_range(struct mm_struct *dst, struct mm_struct *src, void unmap_mapping_range(struct address_space *mapping, loff_t const holebegin, loff_t const holelen, int even_cows); int follow_pte_pmd(struct mm_struct *mm, unsigned long address, + unsigned long *start, unsigned long *end, pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp); int follow_pfn(struct vm_area_struct *vma, unsigned long address, unsigned long *pfn); diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h index c91b3bcd158f..7b2e31b1745a 100644 --- a/include/linux/mmu_notifier.h +++ b/include/linux/mmu_notifier.h @@ -95,17 +95,6 @@ struct mmu_notifier_ops { pte_t pte); /* - * Before this is invoked any secondary MMU is still ok to - * read/write to the page previously pointed to by the Linux - * pte because the page hasn't been freed yet and it won't be - * freed until this returns. If required set_page_dirty has to - * be called internally to this method. - */ - void (*invalidate_page)(struct mmu_notifier *mn, - struct mm_struct *mm, - unsigned long address); - - /* * invalidate_range_start() and invalidate_range_end() must be * paired and are called only when the mmap_sem and/or the * locks protecting the reverse maps are held. If the subsystem @@ -220,8 +209,6 @@ extern int __mmu_notifier_test_young(struct mm_struct *mm, unsigned long address); extern void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address, pte_t pte); -extern void __mmu_notifier_invalidate_page(struct mm_struct *mm, - unsigned long address); extern void __mmu_notifier_invalidate_range_start(struct mm_struct *mm, unsigned long start, unsigned long end); extern void __mmu_notifier_invalidate_range_end(struct mm_struct *mm, @@ -268,13 +255,6 @@ static inline void mmu_notifier_change_pte(struct mm_struct *mm, __mmu_notifier_change_pte(mm, address, pte); } -static inline void mmu_notifier_invalidate_page(struct mm_struct *mm, - unsigned long address) -{ - if (mm_has_notifiers(mm)) - __mmu_notifier_invalidate_page(mm, address); -} - static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm, unsigned long start, unsigned long end) { @@ -442,11 +422,6 @@ static inline void mmu_notifier_change_pte(struct mm_struct *mm, { } -static inline void mmu_notifier_invalidate_page(struct mm_struct *mm, - unsigned long address) -{ -} - static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm, unsigned long start, unsigned long end) { diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 35de8312e0b5..8aba119bb005 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -3858,6 +3858,8 @@ int netdev_walk_all_upper_dev_rcu(struct net_device *dev, bool netdev_has_upper_dev_all_rcu(struct net_device *dev, struct net_device *upper_dev); +bool netdev_has_any_upper_dev(struct net_device *dev); + void *netdev_lower_get_next_private(struct net_device *dev, struct list_head **iter); void *netdev_lower_get_next_private_rcu(struct net_device *dev, diff --git a/include/linux/nvme.h b/include/linux/nvme.h index 25d8225dbd04..8efff888bd9b 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -254,7 +254,7 @@ enum { NVME_CTRL_VWC_PRESENT = 1 << 0, NVME_CTRL_OACS_SEC_SUPP = 1 << 0, NVME_CTRL_OACS_DIRECTIVES = 1 << 5, - NVME_CTRL_OACS_DBBUF_SUPP = 1 << 7, + NVME_CTRL_OACS_DBBUF_SUPP = 1 << 8, }; struct nvme_lbaf { diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 7594e19bce62..f93cc01064cb 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1039,7 +1039,23 @@ int __must_check skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg int __must_check skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len); int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer); -int skb_pad(struct sk_buff *skb, int pad); +int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error); + +/** + * skb_pad - zero pad the tail of an skb + * @skb: buffer to pad + * @pad: space to pad + * + * Ensure that a buffer is followed by a padding area that is zero + * filled. Used by network drivers which may DMA or transfer data + * beyond the buffer end onto the wire. + * + * May return error in out of memory cases. The skb is freed on error. + */ +static inline int skb_pad(struct sk_buff *skb, int pad) +{ + return __skb_pad(skb, pad, true); +} #define dev_kfree_skb(a) consume_skb(a) int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, @@ -2934,25 +2950,42 @@ static inline int skb_padto(struct sk_buff *skb, unsigned int len) * skb_put_padto - increase size and pad an skbuff up to a minimal size * @skb: buffer to pad * @len: minimal length + * @free_on_error: free buffer on error * * Pads up a buffer to ensure the trailing bytes exist and are * blanked. If the buffer already contains sufficient data it * is untouched. Otherwise it is extended. Returns zero on - * success. The skb is freed on error. + * success. The skb is freed on error if @free_on_error is true. */ -static inline int skb_put_padto(struct sk_buff *skb, unsigned int len) +static inline int __skb_put_padto(struct sk_buff *skb, unsigned int len, + bool free_on_error) { unsigned int size = skb->len; if (unlikely(size < len)) { len -= size; - if (skb_pad(skb, len)) + if (__skb_pad(skb, len, free_on_error)) return -ENOMEM; __skb_put(skb, len); } return 0; } +/** + * skb_put_padto - increase size and pad an skbuff up to a minimal size + * @skb: buffer to pad + * @len: minimal length + * + * Pads up a buffer to ensure the trailing bytes exist and are + * blanked. If the buffer already contains sufficient data it + * is untouched. Otherwise it is extended. Returns zero on + * success. The skb is freed on error. + */ +static inline int skb_put_padto(struct sk_buff *skb, unsigned int len) +{ + return __skb_put_padto(skb, len, true); +} + static inline int skb_add_data(struct sk_buff *skb, struct iov_iter *from, int copy) { diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h index 71c1646298ae..d060d711a624 100644 --- a/include/net/ip6_fib.h +++ b/include/net/ip6_fib.h @@ -72,6 +72,7 @@ struct fib6_node { __u16 fn_flags; int fn_sernum; struct rt6_info *rr_ptr; + struct rcu_head rcu; }; #ifndef CONFIG_IPV6_SUBTREES @@ -106,7 +107,7 @@ struct rt6_info { * the same cache line. */ struct fib6_table *rt6i_table; - struct fib6_node *rt6i_node; + struct fib6_node __rcu *rt6i_node; struct in6_addr rt6i_gateway; @@ -171,13 +172,40 @@ static inline void rt6_update_expires(struct rt6_info *rt0, int timeout) rt0->rt6i_flags |= RTF_EXPIRES; } +/* Function to safely get fn->sernum for passed in rt + * and store result in passed in cookie. + * Return true if we can get cookie safely + * Return false if not + */ +static inline bool rt6_get_cookie_safe(const struct rt6_info *rt, + u32 *cookie) +{ + struct fib6_node *fn; + bool status = false; + + rcu_read_lock(); + fn = rcu_dereference(rt->rt6i_node); + + if (fn) { + *cookie = fn->fn_sernum; + status = true; + } + + rcu_read_unlock(); + return status; +} + static inline u32 rt6_get_cookie(const struct rt6_info *rt) { + u32 cookie = 0; + if (rt->rt6i_flags & RTF_PCPU || (unlikely(!list_empty(&rt->rt6i_uncached)) && rt->dst.from)) rt = (struct rt6_info *)(rt->dst.from); - return rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0; + rt6_get_cookie_safe(rt, &cookie); + + return cookie; } static inline void ip6_rt_put(struct rt6_info *rt) diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index d6247a3c40df..135f5a2dd931 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -100,6 +100,13 @@ struct Qdisc { spinlock_t busylock ____cacheline_aligned_in_smp; }; +static inline void qdisc_refcount_inc(struct Qdisc *qdisc) +{ + if (qdisc->flags & TCQ_F_BUILTIN) + return; + refcount_inc(&qdisc->refcnt); +} + static inline bool qdisc_is_running(const struct Qdisc *qdisc) { return (raw_read_seqcount(&qdisc->running) & 1) ? true : false; diff --git a/include/net/tcp.h b/include/net/tcp.h index 9c3db054e47f..b510f284427a 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -1032,9 +1032,7 @@ void tcp_get_default_congestion_control(char *name); void tcp_get_available_congestion_control(char *buf, size_t len); void tcp_get_allowed_congestion_control(char *buf, size_t len); int tcp_set_allowed_congestion_control(char *allowed); -int tcp_set_congestion_control(struct sock *sk, const char *name, bool load); -void tcp_reinit_congestion_control(struct sock *sk, - const struct tcp_congestion_ops *ca); +int tcp_set_congestion_control(struct sock *sk, const char *name, bool load, bool reinit); u32 tcp_slow_start(struct tcp_sock *tp, u32 acked); void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked); diff --git a/include/net/udp.h b/include/net/udp.h index 4e5f23fec35e..12dfbfe2e2d7 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -260,7 +260,7 @@ static inline struct sk_buff *skb_recv_udp(struct sock *sk, unsigned int flags, } void udp_v4_early_demux(struct sk_buff *skb); -void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst); +bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst); int udp_get_port(struct sock *sk, unsigned short snum, int (*saddr_cmp)(const struct sock *, const struct sock *)); diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index b5732432bb29..88c32aba32f7 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -1683,6 +1683,7 @@ struct ib_qp { enum ib_qp_type qp_type; struct ib_rwq_ind_table *rwq_ind_tbl; struct ib_qp_security *qp_sec; + u8 port; }; struct ib_mr { diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h index a1266d318c85..6af198d8120b 100644 --- a/include/scsi/scsi_cmnd.h +++ b/include/scsi/scsi_cmnd.h @@ -57,6 +57,7 @@ struct scsi_pointer { /* for scmd->flags */ #define SCMD_TAGGED (1 << 0) #define SCMD_UNCHECKED_ISA_DMA (1 << 1) +#define SCMD_ZONE_WRITE_LOCK (1 << 2) struct scsi_cmnd { struct scsi_request req; diff --git a/include/uapi/linux/loop.h b/include/uapi/linux/loop.h index a3960f98679c..c8125ec1f4f2 100644 --- a/include/uapi/linux/loop.h +++ b/include/uapi/linux/loop.h @@ -22,7 +22,6 @@ enum { LO_FLAGS_AUTOCLEAR = 4, LO_FLAGS_PARTSCAN = 8, LO_FLAGS_DIRECT_IO = 16, - LO_FLAGS_BLOCKSIZE = 32, }; #include <asm/posix_types.h> /* for __kernel_old_dev_t */ @@ -60,8 +59,6 @@ struct loop_info64 { __u64 lo_init[2]; }; -#define LO_INFO_BLOCKSIZE(l) (l)->lo_init[0] - /* * Loop filter types */ diff --git a/include/uapi/linux/ndctl.h b/include/uapi/linux/ndctl.h index 6d3c54264d8e..3f03567631cb 100644 --- a/include/uapi/linux/ndctl.h +++ b/include/uapi/linux/ndctl.h @@ -145,43 +145,6 @@ struct nd_cmd_clear_error { __u64 cleared; } __packed; -struct nd_cmd_trans_spa { - __u64 spa; - __u32 status; - __u8 flags; - __u8 _reserved[3]; - __u64 trans_length; - __u32 num_nvdimms; - struct nd_nvdimm_device { - __u32 nfit_device_handle; - __u32 _reserved; - __u64 dpa; - } __packed devices[0]; - -} __packed; - -struct nd_cmd_ars_err_inj { - __u64 err_inj_spa_range_base; - __u64 err_inj_spa_range_length; - __u8 err_inj_options; - __u32 status; -} __packed; - -struct nd_cmd_ars_err_inj_clr { - __u64 err_inj_clr_spa_range_base; - __u64 err_inj_clr_spa_range_length; - __u32 status; -} __packed; - -struct nd_cmd_ars_err_inj_stat { - __u32 status; - __u32 inj_err_rec_count; - struct nd_error_stat_query_record { - __u64 err_inj_stat_spa_range_base; - __u64 err_inj_stat_spa_range_length; - } __packed record[0]; -} __packed; - enum { ND_CMD_IMPLEMENTED = 0, diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index 8d5151688504..87a1213dd326 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -1892,6 +1892,7 @@ static struct cftype files[] = { { .name = "memory_pressure", .read_u64 = cpuset_read_u64, + .private = FILE_MEMORY_PRESSURE, }, { diff --git a/kernel/events/core.c b/kernel/events/core.c index 8c01572709ac..36f98198877c 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -10034,28 +10034,27 @@ SYSCALL_DEFINE5(perf_event_open, goto err_context; /* - * Do not allow to attach to a group in a different - * task or CPU context: + * Make sure we're both events for the same CPU; + * grouping events for different CPUs is broken; since + * you can never concurrently schedule them anyhow. */ - if (move_group) { - /* - * Make sure we're both on the same task, or both - * per-cpu events. - */ - if (group_leader->ctx->task != ctx->task) - goto err_context; + if (group_leader->cpu != event->cpu) + goto err_context; - /* - * Make sure we're both events for the same CPU; - * grouping events for different CPUs is broken; since - * you can never concurrently schedule them anyhow. - */ - if (group_leader->cpu != event->cpu) - goto err_context; - } else { - if (group_leader->ctx != ctx) - goto err_context; - } + /* + * Make sure we're both on the same task, or both + * per-CPU events. + */ + if (group_leader->ctx->task != ctx->task) + goto err_context; + + /* + * Do not allow to attach to a group in a different task + * or CPU context. If we're moving SW events, we'll fix + * this up later, so allow that. + */ + if (!move_group && group_leader->ctx != ctx) + goto err_context; /* * Only a group leader can be exclusive or pinned diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 0e137f98a50c..267f6ef91d97 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -1262,8 +1262,6 @@ void uprobe_end_dup_mmap(void) void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm) { - newmm->uprobes_state.xol_area = NULL; - if (test_bit(MMF_HAS_UPROBES, &oldmm->flags)) { set_bit(MMF_HAS_UPROBES, &newmm->flags); /* unconditionally, dup_mmap() skips VM_DONTCOPY vmas */ diff --git a/kernel/fork.c b/kernel/fork.c index e075b7780421..b7e9e57b71ea 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -785,6 +785,13 @@ static void mm_init_owner(struct mm_struct *mm, struct task_struct *p) #endif } +static void mm_init_uprobes_state(struct mm_struct *mm) +{ +#ifdef CONFIG_UPROBES + mm->uprobes_state.xol_area = NULL; +#endif +} + static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p, struct user_namespace *user_ns) { @@ -806,11 +813,13 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p, mm_init_cpumask(mm); mm_init_aio(mm); mm_init_owner(mm, p); + RCU_INIT_POINTER(mm->exe_file, NULL); mmu_notifier_mm_init(mm); init_tlb_flush_pending(mm); #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS mm->pmd_huge_pte = NULL; #endif + mm_init_uprobes_state(mm); if (current->mm) { mm->flags = current->mm->flags & MMF_INIT_MASK; diff --git a/kernel/kthread.c b/kernel/kthread.c index 26db528c1d88..1c19edf82427 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -637,6 +637,7 @@ repeat: schedule(); try_to_freeze(); + cond_resched(); goto repeat; } EXPORT_SYMBOL_GPL(kthread_worker_fn); diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c index 17f11c6b0a9f..d6afed6d0752 100644 --- a/kernel/sched/wait.c +++ b/kernel/sched/wait.c @@ -70,9 +70,10 @@ static void __wake_up_common(struct wait_queue_head *wq_head, unsigned int mode, list_for_each_entry_safe(curr, next, &wq_head->head, entry) { unsigned flags = curr->flags; - - if (curr->func(curr, mode, wake_flags, key) && - (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive) + int ret = curr->func(curr, mode, wake_flags, key); + if (ret < 0) + break; + if (ret && (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive) break; } } diff --git a/kernel/time/timer.c b/kernel/time/timer.c index 8f5d1bf18854..f2674a056c26 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -203,6 +203,7 @@ struct timer_base { bool migration_enabled; bool nohz_active; bool is_idle; + bool must_forward_clk; DECLARE_BITMAP(pending_map, WHEEL_SIZE); struct hlist_head vectors[WHEEL_SIZE]; } ____cacheline_aligned; @@ -856,13 +857,19 @@ get_target_base(struct timer_base *base, unsigned tflags) static inline void forward_timer_base(struct timer_base *base) { - unsigned long jnow = READ_ONCE(jiffies); + unsigned long jnow; /* - * We only forward the base when it's idle and we have a delta between - * base clock and jiffies. + * We only forward the base when we are idle or have just come out of + * idle (must_forward_clk logic), and have a delta between base clock + * and jiffies. In the common case, run_timers will take care of it. */ - if (!base->is_idle || (long) (jnow - base->clk) < 2) + if (likely(!base->must_forward_clk)) + return; + + jnow = READ_ONCE(jiffies); + base->must_forward_clk = base->is_idle; + if ((long)(jnow - base->clk) < 2) return; /* @@ -938,6 +945,11 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only) * same array bucket then just return: */ if (timer_pending(timer)) { + /* + * The downside of this optimization is that it can result in + * larger granularity than you would get from adding a new + * timer with this expiry. + */ if (timer->expires == expires) return 1; @@ -948,6 +960,7 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only) * dequeue/enqueue dance. */ base = lock_timer_base(timer, &flags); + forward_timer_base(base); clk = base->clk; idx = calc_wheel_index(expires, clk); @@ -964,6 +977,7 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only) } } else { base = lock_timer_base(timer, &flags); + forward_timer_base(base); } ret = detach_if_pending(timer, base, false); @@ -991,12 +1005,10 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only) raw_spin_lock(&base->lock); WRITE_ONCE(timer->flags, (timer->flags & ~TIMER_BASEMASK) | base->cpu); + forward_timer_base(base); } } - /* Try to forward a stale timer base clock */ - forward_timer_base(base); - timer->expires = expires; /* * If 'idx' was calculated above and the base time did not advance @@ -1112,6 +1124,7 @@ void add_timer_on(struct timer_list *timer, int cpu) WRITE_ONCE(timer->flags, (timer->flags & ~TIMER_BASEMASK) | cpu); } + forward_timer_base(base); debug_activate(timer, timer->expires); internal_add_timer(base, timer); @@ -1497,10 +1510,16 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem) if (!is_max_delta) expires = basem + (u64)(nextevt - basej) * TICK_NSEC; /* - * If we expect to sleep more than a tick, mark the base idle: + * If we expect to sleep more than a tick, mark the base idle. + * Also the tick is stopped so any added timer must forward + * the base clk itself to keep granularity small. This idle + * logic is only maintained for the BASE_STD base, deferrable + * timers may still see large granularity skew (by design). */ - if ((expires - basem) > TICK_NSEC) + if ((expires - basem) > TICK_NSEC) { + base->must_forward_clk = true; base->is_idle = true; + } } raw_spin_unlock(&base->lock); @@ -1611,6 +1630,19 @@ static __latent_entropy void run_timer_softirq(struct softirq_action *h) { struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); + /* + * must_forward_clk must be cleared before running timers so that any + * timer functions that call mod_timer will not try to forward the + * base. idle trcking / clock forwarding logic is only used with + * BASE_STD timers. + * + * The deferrable base does not do idle tracking at all, so we do + * not forward it. This can result in very large variations in + * granularity for deferrable timers, but they can be deferred for + * long periods due to idle. + */ + base->must_forward_clk = false; + __run_timers(base); if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active) __run_timers(this_cpu_ptr(&timer_bases[BASE_DEF])); diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 02004ae91860..96cea88fa00f 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -889,6 +889,10 @@ static int profile_graph_entry(struct ftrace_graph_ent *trace) function_profile_call(trace->func, 0, NULL, NULL); + /* If function graph is shutting down, ret_stack can be NULL */ + if (!current->ret_stack) + return 0; + if (index >= 0 && index < FTRACE_RETFUNC_DEPTH) current->ret_stack[index].subtime = 0; diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 529cc50d7243..81279c6602ff 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -4386,15 +4386,19 @@ EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu); * the page that was allocated, with the read page of the buffer. * * Returns: - * The page allocated, or NULL on error. + * The page allocated, or ERR_PTR */ void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu) { - struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; + struct ring_buffer_per_cpu *cpu_buffer; struct buffer_data_page *bpage = NULL; unsigned long flags; struct page *page; + if (!cpumask_test_cpu(cpu, buffer->cpumask)) + return ERR_PTR(-ENODEV); + + cpu_buffer = buffer->buffers[cpu]; local_irq_save(flags); arch_spin_lock(&cpu_buffer->lock); @@ -4412,7 +4416,7 @@ void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu) page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL | __GFP_NORETRY, 0); if (!page) - return NULL; + return ERR_PTR(-ENOMEM); bpage = page_address(page); @@ -4467,8 +4471,8 @@ EXPORT_SYMBOL_GPL(ring_buffer_free_read_page); * * for example: * rpage = ring_buffer_alloc_read_page(buffer, cpu); - * if (!rpage) - * return error; + * if (IS_ERR(rpage)) + * return PTR_ERR(rpage); * ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0); * if (ret >= 0) * process_page(rpage, ret); diff --git a/kernel/trace/ring_buffer_benchmark.c b/kernel/trace/ring_buffer_benchmark.c index 9fbcaf567886..68ee79afe31c 100644 --- a/kernel/trace/ring_buffer_benchmark.c +++ b/kernel/trace/ring_buffer_benchmark.c @@ -113,7 +113,7 @@ static enum event_status read_page(int cpu) int i; bpage = ring_buffer_alloc_read_page(buffer, cpu); - if (!bpage) + if (IS_ERR(bpage)) return EVENT_DROPPED; ret = ring_buffer_read_page(buffer, &bpage, PAGE_SIZE, cpu, 1); diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 42b9355033d4..44004d8aa3b3 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -6598,7 +6598,7 @@ tracing_buffers_read(struct file *filp, char __user *ubuf, { struct ftrace_buffer_info *info = filp->private_data; struct trace_iterator *iter = &info->iter; - ssize_t ret; + ssize_t ret = 0; ssize_t size; if (!count) @@ -6612,10 +6612,15 @@ tracing_buffers_read(struct file *filp, char __user *ubuf, if (!info->spare) { info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer, iter->cpu_file); - info->spare_cpu = iter->cpu_file; + if (IS_ERR(info->spare)) { + ret = PTR_ERR(info->spare); + info->spare = NULL; + } else { + info->spare_cpu = iter->cpu_file; + } } if (!info->spare) - return -ENOMEM; + return ret; /* Do we have previous read data to read? */ if (info->read < PAGE_SIZE) @@ -6790,8 +6795,9 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, ref->ref = 1; ref->buffer = iter->trace_buffer->buffer; ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file); - if (!ref->page) { - ret = -ENOMEM; + if (IS_ERR(ref->page)) { + ret = PTR_ERR(ref->page); + ref->page = NULL; kfree(ref); break; } @@ -8293,6 +8299,7 @@ __init static int tracer_alloc_buffers(void) if (ret < 0) goto out_free_cpumask; /* Used for event triggers */ + ret = -ENOMEM; temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE); if (!temp_buffer) goto out_rm_hp_state; @@ -8407,4 +8414,4 @@ __init static int clear_boot_tracer(void) } fs_initcall(tracer_init_tracefs); -late_initcall(clear_boot_tracer); +late_initcall_sync(clear_boot_tracer); diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index 59a411ff60c7..181e139a8057 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c @@ -1959,6 +1959,10 @@ static int create_filter(struct trace_event_call *call, if (err && set_str) append_filter_err(ps, filter); } + if (err && !set_str) { + free_event_filter(filter); + filter = NULL; + } create_filter_finish(ps); *filterp = filter; diff --git a/kernel/trace/tracing_map.c b/kernel/trace/tracing_map.c index 0a689bbb78ef..305039b122fa 100644 --- a/kernel/trace/tracing_map.c +++ b/kernel/trace/tracing_map.c @@ -221,16 +221,19 @@ void tracing_map_array_free(struct tracing_map_array *a) if (!a) return; - if (!a->pages) { - kfree(a); - return; - } + if (!a->pages) + goto free; for (i = 0; i < a->n_pages; i++) { if (!a->pages[i]) break; free_page((unsigned long)a->pages[i]); } + + kfree(a->pages); + + free: + kfree(a); } struct tracing_map_array *tracing_map_array_alloc(unsigned int n_elts, diff --git a/lib/mpi/mpicoder.c b/lib/mpi/mpicoder.c index 5a0f75a3bf01..eead4b339466 100644 --- a/lib/mpi/mpicoder.c +++ b/lib/mpi/mpicoder.c @@ -364,11 +364,11 @@ MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int nbytes) } miter.consumed = lzeros; - sg_miter_stop(&miter); nbytes -= lzeros; nbits = nbytes * 8; if (nbits > MAX_EXTERN_MPI_BITS) { + sg_miter_stop(&miter); pr_info("MPI: mpi too large (%u bits)\n", nbits); return NULL; } @@ -376,6 +376,8 @@ MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int nbytes) if (nbytes > 0) nbits -= count_leading_zeros(*buff) - (BITS_PER_LONG - 8); + sg_miter_stop(&miter); + nlimbs = DIV_ROUND_UP(nbytes, BYTES_PER_MPI_LIMB); val = mpi_alloc(nlimbs); if (!val) diff --git a/mm/filemap.c b/mm/filemap.c index a49702445ce0..65b4b6e7f7bd 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -885,6 +885,7 @@ void __init pagecache_init(void) page_writeback_init(); } +/* This has the same layout as wait_bit_key - see fs/cachefiles/rdwr.c */ struct wait_page_key { struct page *page; int bit_nr; @@ -909,8 +910,10 @@ static int wake_page_function(wait_queue_entry_t *wait, unsigned mode, int sync, if (wait_page->bit_nr != key->bit_nr) return 0; + + /* Stop walking if it's locked */ if (test_bit(key->bit_nr, &key->page->flags)) - return 0; + return -1; return autoremove_wake_function(wait, mode, sync, key); } @@ -964,6 +967,7 @@ static inline int wait_on_page_bit_common(wait_queue_head_t *q, int ret = 0; init_wait(wait); + wait->flags = lock ? WQ_FLAG_EXCLUSIVE : 0; wait->func = wake_page_function; wait_page.page = page; wait_page.bit_nr = bit_nr; @@ -972,10 +976,7 @@ static inline int wait_on_page_bit_common(wait_queue_head_t *q, spin_lock_irq(&q->lock); if (likely(list_empty(&wait->entry))) { - if (lock) - __add_wait_queue_entry_tail_exclusive(q, wait); - else - __add_wait_queue(q, wait); + __add_wait_queue_entry_tail(q, wait); SetPageWaiters(page); } @@ -985,10 +986,6 @@ static inline int wait_on_page_bit_common(wait_queue_head_t *q, if (likely(test_bit(bit_nr, &page->flags))) { io_schedule(); - if (unlikely(signal_pending_state(state, current))) { - ret = -EINTR; - break; - } } if (lock) { @@ -998,6 +995,11 @@ static inline int wait_on_page_bit_common(wait_queue_head_t *q, if (!test_bit(bit_nr, &page->flags)) break; } + + if (unlikely(signal_pending_state(state, current))) { + ret = -EINTR; + break; + } } finish_wait(q, wait); @@ -1039,7 +1041,7 @@ void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter) unsigned long flags; spin_lock_irqsave(&q->lock, flags); - __add_wait_queue(q, waiter); + __add_wait_queue_entry_tail(q, waiter); SetPageWaiters(page); spin_unlock_irqrestore(&q->lock, flags); } diff --git a/mm/madvise.c b/mm/madvise.c index 47d8d8a25eae..4d7d1e5ddba9 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -368,8 +368,8 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr, pte_offset_map_lock(mm, pmd, addr, &ptl); goto out; } - put_page(page); unlock_page(page); + put_page(page); pte = pte_offset_map_lock(mm, pmd, addr, &ptl); pte--; addr -= PAGE_SIZE; @@ -613,6 +613,7 @@ static int madvise_inject_error(int behavior, unsigned long start, unsigned long end) { struct page *page; + struct zone *zone; if (!capable(CAP_SYS_ADMIN)) return -EPERM; @@ -646,6 +647,11 @@ static int madvise_inject_error(int behavior, if (ret) return ret; } + + /* Ensure that all poisoned pages are removed from per-cpu lists */ + for_each_populated_zone(zone) + drain_all_pages(zone); + return 0; } #endif diff --git a/mm/memblock.c b/mm/memblock.c index bf14aea6ab70..91205780e6b1 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -299,7 +299,7 @@ void __init memblock_discard(void) __memblock_free_late(addr, size); } - if (memblock.memory.regions == memblock_memory_init_regions) { + if (memblock.memory.regions != memblock_memory_init_regions) { addr = __pa(memblock.memory.regions); size = PAGE_ALIGN(sizeof(struct memblock_region) * memblock.memory.max); diff --git a/mm/memory.c b/mm/memory.c index fe2fba27ded2..56e48e4593cb 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -4008,7 +4008,8 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) #endif /* __PAGETABLE_PMD_FOLDED */ static int __follow_pte_pmd(struct mm_struct *mm, unsigned long address, - pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp) + unsigned long *start, unsigned long *end, + pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp) { pgd_t *pgd; p4d_t *p4d; @@ -4035,17 +4036,29 @@ static int __follow_pte_pmd(struct mm_struct *mm, unsigned long address, if (!pmdpp) goto out; + if (start && end) { + *start = address & PMD_MASK; + *end = *start + PMD_SIZE; + mmu_notifier_invalidate_range_start(mm, *start, *end); + } *ptlp = pmd_lock(mm, pmd); if (pmd_huge(*pmd)) { *pmdpp = pmd; return 0; } spin_unlock(*ptlp); + if (start && end) + mmu_notifier_invalidate_range_end(mm, *start, *end); } if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) goto out; + if (start && end) { + *start = address & PAGE_MASK; + *end = *start + PAGE_SIZE; + mmu_notifier_invalidate_range_start(mm, *start, *end); + } ptep = pte_offset_map_lock(mm, pmd, address, ptlp); if (!pte_present(*ptep)) goto unlock; @@ -4053,6 +4066,8 @@ static int __follow_pte_pmd(struct mm_struct *mm, unsigned long address, return 0; unlock: pte_unmap_unlock(ptep, *ptlp); + if (start && end) + mmu_notifier_invalidate_range_end(mm, *start, *end); out: return -EINVAL; } @@ -4064,20 +4079,21 @@ static inline int follow_pte(struct mm_struct *mm, unsigned long address, /* (void) is needed to make gcc happy */ (void) __cond_lock(*ptlp, - !(res = __follow_pte_pmd(mm, address, ptepp, NULL, - ptlp))); + !(res = __follow_pte_pmd(mm, address, NULL, NULL, + ptepp, NULL, ptlp))); return res; } int follow_pte_pmd(struct mm_struct *mm, unsigned long address, + unsigned long *start, unsigned long *end, pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp) { int res; /* (void) is needed to make gcc happy */ (void) __cond_lock(*ptlp, - !(res = __follow_pte_pmd(mm, address, ptepp, pmdpp, - ptlp))); + !(res = __follow_pte_pmd(mm, address, start, end, + ptepp, pmdpp, ptlp))); return res; } EXPORT_SYMBOL(follow_pte_pmd); diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c index 54ca54562928..314285284e6e 100644 --- a/mm/mmu_notifier.c +++ b/mm/mmu_notifier.c @@ -174,20 +174,6 @@ void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address, srcu_read_unlock(&srcu, id); } -void __mmu_notifier_invalidate_page(struct mm_struct *mm, - unsigned long address) -{ - struct mmu_notifier *mn; - int id; - - id = srcu_read_lock(&srcu); - hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { - if (mn->ops->invalidate_page) - mn->ops->invalidate_page(mn, mm, address); - } - srcu_read_unlock(&srcu, id); -} - void __mmu_notifier_invalidate_range_start(struct mm_struct *mm, unsigned long start, unsigned long end) { diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 1bad301820c7..1423da8dd16f 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -66,6 +66,7 @@ #include <linux/kthread.h> #include <linux/memcontrol.h> #include <linux/ftrace.h> +#include <linux/nmi.h> #include <asm/sections.h> #include <asm/tlbflush.h> @@ -2535,9 +2536,14 @@ void drain_all_pages(struct zone *zone) #ifdef CONFIG_HIBERNATION +/* + * Touch the watchdog for every WD_PAGE_COUNT pages. + */ +#define WD_PAGE_COUNT (128*1024) + void mark_free_pages(struct zone *zone) { - unsigned long pfn, max_zone_pfn; + unsigned long pfn, max_zone_pfn, page_count = WD_PAGE_COUNT; unsigned long flags; unsigned int order, t; struct page *page; @@ -2552,6 +2558,11 @@ void mark_free_pages(struct zone *zone) if (pfn_valid(pfn)) { page = pfn_to_page(pfn); + if (!--page_count) { + touch_nmi_watchdog(); + page_count = WD_PAGE_COUNT; + } + if (page_zone(page) != zone) continue; @@ -2565,8 +2576,13 @@ void mark_free_pages(struct zone *zone) unsigned long i; pfn = page_to_pfn(page); - for (i = 0; i < (1UL << order); i++) + for (i = 0; i < (1UL << order); i++) { + if (!--page_count) { + touch_nmi_watchdog(); + page_count = WD_PAGE_COUNT; + } swsusp_set_page_free(pfn_to_page(pfn + i)); + } } } spin_unlock_irqrestore(&zone->lock, flags); @@ -3275,10 +3291,13 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, /* * Go through the zonelist yet one more time, keep very high watermark * here, this is only to catch a parallel oom killing, we must fail if - * we're still under heavy pressure. + * we're still under heavy pressure. But make sure that this reclaim + * attempt shall not depend on __GFP_DIRECT_RECLAIM && !__GFP_NORETRY + * allocation which will never fail due to oom_lock already held. */ - page = get_page_from_freelist(gfp_mask | __GFP_HARDWALL, order, - ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac); + page = get_page_from_freelist((gfp_mask | __GFP_HARDWALL) & + ~__GFP_DIRECT_RECLAIM, order, + ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac); if (page) goto out; diff --git a/mm/rmap.c b/mm/rmap.c index c1286d47aa1f..c570f82e6827 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -887,11 +887,21 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma, .address = address, .flags = PVMW_SYNC, }; + unsigned long start = address, end; int *cleaned = arg; - bool invalidation_needed = false; + + /* + * We have to assume the worse case ie pmd for invalidation. Note that + * the page can not be free from this function. + */ + end = min(vma->vm_end, start + (PAGE_SIZE << compound_order(page))); + mmu_notifier_invalidate_range_start(vma->vm_mm, start, end); while (page_vma_mapped_walk(&pvmw)) { + unsigned long cstart, cend; int ret = 0; + + cstart = address = pvmw.address; if (pvmw.pte) { pte_t entry; pte_t *pte = pvmw.pte; @@ -899,11 +909,12 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma, if (!pte_dirty(*pte) && !pte_write(*pte)) continue; - flush_cache_page(vma, pvmw.address, pte_pfn(*pte)); - entry = ptep_clear_flush(vma, pvmw.address, pte); + flush_cache_page(vma, address, pte_pfn(*pte)); + entry = ptep_clear_flush(vma, address, pte); entry = pte_wrprotect(entry); entry = pte_mkclean(entry); - set_pte_at(vma->vm_mm, pvmw.address, pte, entry); + set_pte_at(vma->vm_mm, address, pte, entry); + cend = cstart + PAGE_SIZE; ret = 1; } else { #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE @@ -913,11 +924,13 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma, if (!pmd_dirty(*pmd) && !pmd_write(*pmd)) continue; - flush_cache_page(vma, pvmw.address, page_to_pfn(page)); - entry = pmdp_huge_clear_flush(vma, pvmw.address, pmd); + flush_cache_page(vma, address, page_to_pfn(page)); + entry = pmdp_huge_clear_flush(vma, address, pmd); entry = pmd_wrprotect(entry); entry = pmd_mkclean(entry); - set_pmd_at(vma->vm_mm, pvmw.address, pmd, entry); + set_pmd_at(vma->vm_mm, address, pmd, entry); + cstart &= PMD_MASK; + cend = cstart + PMD_SIZE; ret = 1; #else /* unexpected pmd-mapped page? */ @@ -926,15 +939,12 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma, } if (ret) { + mmu_notifier_invalidate_range(vma->vm_mm, cstart, cend); (*cleaned)++; - invalidation_needed = true; } } - if (invalidation_needed) { - mmu_notifier_invalidate_range(vma->vm_mm, address, - address + (1UL << compound_order(page))); - } + mmu_notifier_invalidate_range_end(vma->vm_mm, start, end); return true; } @@ -1328,7 +1338,8 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, }; pte_t pteval; struct page *subpage; - bool ret = true, invalidation_needed = false; + bool ret = true; + unsigned long start = address, end; enum ttu_flags flags = (enum ttu_flags)arg; /* munlock has nothing to gain from examining un-locked vmas */ @@ -1340,6 +1351,14 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, flags & TTU_MIGRATION, page); } + /* + * We have to assume the worse case ie pmd for invalidation. Note that + * the page can not be free in this function as call of try_to_unmap() + * must hold a reference on the page. + */ + end = min(vma->vm_end, start + (PAGE_SIZE << compound_order(page))); + mmu_notifier_invalidate_range_start(vma->vm_mm, start, end); + while (page_vma_mapped_walk(&pvmw)) { /* * If the page is mlock()d, we cannot swap it out. @@ -1368,9 +1387,11 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, VM_BUG_ON_PAGE(!pvmw.pte, page); subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte); + address = pvmw.address; + if (!(flags & TTU_IGNORE_ACCESS)) { - if (ptep_clear_flush_young_notify(vma, pvmw.address, + if (ptep_clear_flush_young_notify(vma, address, pvmw.pte)) { ret = false; page_vma_mapped_walk_done(&pvmw); @@ -1379,7 +1400,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, } /* Nuke the page table entry. */ - flush_cache_page(vma, pvmw.address, pte_pfn(*pvmw.pte)); + flush_cache_page(vma, address, pte_pfn(*pvmw.pte)); if (should_defer_flush(mm, flags)) { /* * We clear the PTE but do not flush so potentially @@ -1389,12 +1410,11 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, * transition on a cached TLB entry is written through * and traps if the PTE is unmapped. */ - pteval = ptep_get_and_clear(mm, pvmw.address, - pvmw.pte); + pteval = ptep_get_and_clear(mm, address, pvmw.pte); set_tlb_ubc_flush_pending(mm, pte_dirty(pteval)); } else { - pteval = ptep_clear_flush(vma, pvmw.address, pvmw.pte); + pteval = ptep_clear_flush(vma, address, pvmw.pte); } /* Move the dirty bit to the page. Now the pte is gone. */ @@ -1409,12 +1429,12 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, if (PageHuge(page)) { int nr = 1 << compound_order(page); hugetlb_count_sub(nr, mm); - set_huge_swap_pte_at(mm, pvmw.address, + set_huge_swap_pte_at(mm, address, pvmw.pte, pteval, vma_mmu_pagesize(vma)); } else { dec_mm_counter(mm, mm_counter(page)); - set_pte_at(mm, pvmw.address, pvmw.pte, pteval); + set_pte_at(mm, address, pvmw.pte, pteval); } } else if (pte_unused(pteval)) { @@ -1438,7 +1458,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, swp_pte = swp_entry_to_pte(entry); if (pte_soft_dirty(pteval)) swp_pte = pte_swp_mksoft_dirty(swp_pte); - set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte); + set_pte_at(mm, address, pvmw.pte, swp_pte); } else if (PageAnon(page)) { swp_entry_t entry = { .val = page_private(subpage) }; pte_t swp_pte; @@ -1449,6 +1469,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, if (unlikely(PageSwapBacked(page) != PageSwapCache(page))) { WARN_ON_ONCE(1); ret = false; + /* We have to invalidate as we cleared the pte */ page_vma_mapped_walk_done(&pvmw); break; } @@ -1464,7 +1485,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, * If the page was redirtied, it cannot be * discarded. Remap the page to page table. */ - set_pte_at(mm, pvmw.address, pvmw.pte, pteval); + set_pte_at(mm, address, pvmw.pte, pteval); SetPageSwapBacked(page); ret = false; page_vma_mapped_walk_done(&pvmw); @@ -1472,7 +1493,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, } if (swap_duplicate(entry) < 0) { - set_pte_at(mm, pvmw.address, pvmw.pte, pteval); + set_pte_at(mm, address, pvmw.pte, pteval); ret = false; page_vma_mapped_walk_done(&pvmw); break; @@ -1488,18 +1509,18 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, swp_pte = swp_entry_to_pte(entry); if (pte_soft_dirty(pteval)) swp_pte = pte_swp_mksoft_dirty(swp_pte); - set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte); + set_pte_at(mm, address, pvmw.pte, swp_pte); } else dec_mm_counter(mm, mm_counter_file(page)); discard: page_remove_rmap(subpage, PageHuge(page)); put_page(page); - invalidation_needed = true; + mmu_notifier_invalidate_range(mm, address, + address + PAGE_SIZE); } - if (invalidation_needed) - mmu_notifier_invalidate_range(mm, address, - address + (1UL << compound_order(page))); + mmu_notifier_invalidate_range_end(vma->vm_mm, start, end); + return ret; } diff --git a/mm/shmem.c b/mm/shmem.c index 6540e5982444..fbcb3c96a186 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -3967,7 +3967,7 @@ int __init shmem_init(void) } #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE - if (has_transparent_hugepage() && shmem_huge < SHMEM_HUGE_DENY) + if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY) SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge; else shmem_huge = 0; /* just in case it was patched */ @@ -4028,7 +4028,7 @@ static ssize_t shmem_enabled_store(struct kobject *kobj, return -EINVAL; shmem_huge = huge; - if (shmem_huge < SHMEM_HUGE_DENY) + if (shmem_huge > SHMEM_HUGE_DENY) SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge; return count; } diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index 861ae2a165f4..5a7be3bddfa9 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c @@ -53,6 +53,9 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev) brstats->tx_bytes += skb->len; u64_stats_update_end(&brstats->syncp); +#ifdef CONFIG_NET_SWITCHDEV + skb->offload_fwd_mark = 0; +#endif BR_INPUT_SKB_CB(skb)->brdev = dev; skb_reset_mac_header(skb); diff --git a/net/bridge/br_switchdev.c b/net/bridge/br_switchdev.c index 181a44d0f1da..f6b1c7de059d 100644 --- a/net/bridge/br_switchdev.c +++ b/net/bridge/br_switchdev.c @@ -115,7 +115,7 @@ br_switchdev_fdb_call_notifiers(bool adding, const unsigned char *mac, void br_switchdev_fdb_notify(const struct net_bridge_fdb_entry *fdb, int type) { - if (!fdb->added_by_user) + if (!fdb->added_by_user || !fdb->dst) return; switch (type) { diff --git a/net/core/datagram.c b/net/core/datagram.c index a4d5f10d83a1..f7fb7e3f2acf 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c @@ -362,7 +362,7 @@ int __sk_queue_drop_skb(struct sock *sk, struct sk_buff_head *sk_queue, if (flags & MSG_PEEK) { err = -ENOENT; spin_lock_bh(&sk_queue->lock); - if (skb == skb_peek(sk_queue)) { + if (skb->next) { __skb_unlink(skb, sk_queue); refcount_dec(&skb->users); if (destructor) diff --git a/net/core/dev.c b/net/core/dev.c index 270b54754821..6f845e4fec17 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -5319,6 +5319,7 @@ static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock) * Ideally, a new ndo_busy_poll_stop() could avoid another round. */ rc = napi->poll(napi, BUSY_POLL_BUDGET); + trace_napi_poll(napi, rc, BUSY_POLL_BUDGET); netpoll_poll_unlock(have_poll_lock); if (rc == BUSY_POLL_BUDGET) __napi_schedule(napi); @@ -5697,12 +5698,13 @@ EXPORT_SYMBOL(netdev_has_upper_dev_all_rcu); * Find out if a device is linked to an upper device and return true in case * it is. The caller must hold the RTNL lock. */ -static bool netdev_has_any_upper_dev(struct net_device *dev) +bool netdev_has_any_upper_dev(struct net_device *dev) { ASSERT_RTNL(); return !list_empty(&dev->adj_list.upper); } +EXPORT_SYMBOL(netdev_has_any_upper_dev); /** * netdev_master_upper_dev_get - Get master upper device diff --git a/net/core/filter.c b/net/core/filter.c index f9add024d92f..5912c738a7b2 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -3066,15 +3066,12 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock, sk->sk_prot->setsockopt == tcp_setsockopt) { if (optname == TCP_CONGESTION) { char name[TCP_CA_NAME_MAX]; + bool reinit = bpf_sock->op > BPF_SOCK_OPS_NEEDS_ECN; strncpy(name, optval, min_t(long, optlen, TCP_CA_NAME_MAX-1)); name[TCP_CA_NAME_MAX-1] = 0; - ret = tcp_set_congestion_control(sk, name, false); - if (!ret && bpf_sock->op > BPF_SOCK_OPS_NEEDS_ECN) - /* replacing an existing ca */ - tcp_reinit_congestion_control(sk, - inet_csk(sk)->icsk_ca_ops); + ret = tcp_set_congestion_control(sk, name, false, reinit); } else { struct tcp_sock *tp = tcp_sk(sk); @@ -3102,7 +3099,6 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock, ret = -EINVAL; } } - ret = -EINVAL; #endif } else { ret = -EINVAL; diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 917da73d3ab3..246ca1c81715 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -1615,18 +1615,20 @@ struct sk_buff *skb_copy_expand(const struct sk_buff *skb, EXPORT_SYMBOL(skb_copy_expand); /** - * skb_pad - zero pad the tail of an skb + * __skb_pad - zero pad the tail of an skb * @skb: buffer to pad * @pad: space to pad + * @free_on_error: free buffer on error * * Ensure that a buffer is followed by a padding area that is zero * filled. Used by network drivers which may DMA or transfer data * beyond the buffer end onto the wire. * - * May return error in out of memory cases. The skb is freed on error. + * May return error in out of memory cases. The skb is freed on error + * if @free_on_error is true. */ -int skb_pad(struct sk_buff *skb, int pad) +int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error) { int err; int ntail; @@ -1655,10 +1657,11 @@ int skb_pad(struct sk_buff *skb, int pad) return 0; free_skb: - kfree_skb(skb); + if (free_on_error) + kfree_skb(skb); return err; } -EXPORT_SYMBOL(skb_pad); +EXPORT_SYMBOL(__skb_pad); /** * pskb_put - add data to the tail of a potentially fragmented buffer diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c index cceaa4dd9f53..873af0108e24 100644 --- a/net/dsa/dsa2.c +++ b/net/dsa/dsa2.c @@ -577,7 +577,7 @@ static int dsa_dst_parse(struct dsa_switch_tree *dst) return err; } - if (!dst->cpu_dp->netdev) { + if (!dst->cpu_dp) { pr_warn("Tree has no master device\n"); return -EINVAL; } diff --git a/net/dsa/tag_ksz.c b/net/dsa/tag_ksz.c index 17f30675c15c..010ca0a336c4 100644 --- a/net/dsa/tag_ksz.c +++ b/net/dsa/tag_ksz.c @@ -42,7 +42,8 @@ static struct sk_buff *ksz_xmit(struct sk_buff *skb, struct net_device *dev) padlen = (skb->len >= ETH_ZLEN) ? 0 : ETH_ZLEN - skb->len; if (skb_tailroom(skb) >= padlen + KSZ_INGRESS_TAG_LEN) { - if (skb_put_padto(skb, skb->len + padlen)) + /* Let dsa_slave_xmit() free skb */ + if (__skb_put_padto(skb, skb->len + padlen, false)) return NULL; nskb = skb; @@ -60,12 +61,13 @@ static struct sk_buff *ksz_xmit(struct sk_buff *skb, struct net_device *dev) skb_transport_header(skb) - skb->head); skb_copy_and_csum_dev(skb, skb_put(nskb, skb->len)); - if (skb_put_padto(nskb, nskb->len + padlen)) { - kfree_skb(nskb); + /* Let skb_put_padto() free nskb, and let dsa_slave_xmit() free + * skb + */ + if (skb_put_padto(nskb, nskb->len + padlen)) return NULL; - } - kfree_skb(skb); + consume_skb(skb); } tag = skb_put(nskb, KSZ_INGRESS_TAG_LEN); diff --git a/net/dsa/tag_trailer.c b/net/dsa/tag_trailer.c index 8707157dea32..d2fd4923aa3e 100644 --- a/net/dsa/tag_trailer.c +++ b/net/dsa/tag_trailer.c @@ -40,7 +40,7 @@ static struct sk_buff *trailer_xmit(struct sk_buff *skb, struct net_device *dev) skb_set_network_header(nskb, skb_network_header(skb) - skb->head); skb_set_transport_header(nskb, skb_transport_header(skb) - skb->head); skb_copy_and_csum_dev(skb, skb_put(nskb, skb->len)); - kfree_skb(skb); + consume_skb(skb); if (padlen) { skb_put_zero(nskb, padlen); diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c index 4e7bdb213cd0..172d8309f89e 100644 --- a/net/hsr/hsr_device.c +++ b/net/hsr/hsr_device.c @@ -314,7 +314,8 @@ static void send_hsr_supervision_frame(struct hsr_port *master, hsr_sp = skb_put(skb, sizeof(struct hsr_sup_payload)); ether_addr_copy(hsr_sp->MacAddressA, master->dev->dev_addr); - skb_put_padto(skb, ETH_ZLEN + HSR_HLEN); + if (skb_put_padto(skb, ETH_ZLEN + HSR_HLEN)) + return; hsr_forward_skb(skb, master); return; diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index 319000573bc7..b00e4a43b4dc 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c @@ -258,7 +258,7 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info * esp_output_udp_encap(x, skb, esp); if (!skb_cloned(skb)) { - if (tailen <= skb_availroom(skb)) { + if (tailen <= skb_tailroom(skb)) { nfrags = 1; trailer = skb; tail = skb_tail_pointer(trailer); @@ -292,8 +292,6 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info * kunmap_atomic(vaddr); - spin_unlock_bh(&x->lock); - nfrags = skb_shinfo(skb)->nr_frags; __skb_fill_page_desc(skb, nfrags, page, pfrag->offset, @@ -301,6 +299,9 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info * skb_shinfo(skb)->nr_frags = ++nfrags; pfrag->offset = pfrag->offset + allocsize; + + spin_unlock_bh(&x->lock); + nfrags++; skb->len += tailen; @@ -381,7 +382,7 @@ int esp_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info * (unsigned char *)esph - skb->data, assoclen + ivlen + esp->clen + alen); if (unlikely(err < 0)) - goto error; + goto error_free; if (!esp->inplace) { int allocsize; @@ -392,7 +393,7 @@ int esp_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info * spin_lock_bh(&x->lock); if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) { spin_unlock_bh(&x->lock); - goto error; + goto error_free; } skb_shinfo(skb)->nr_frags = 1; @@ -409,7 +410,7 @@ int esp_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info * (unsigned char *)esph - skb->data, assoclen + ivlen + esp->clen + alen); if (unlikely(err < 0)) - goto error; + goto error_free; } if ((x->props.flags & XFRM_STATE_ESN)) @@ -442,8 +443,9 @@ int esp_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info * if (sg != dsg) esp_ssg_unref(x, tmp); - kfree(tmp); +error_free: + kfree(tmp); error: return err; } @@ -727,8 +729,10 @@ skip_cow: sg_init_table(sg, nfrags); err = skb_to_sgvec(skb, sg, 0, skb->len); - if (unlikely(err < 0)) + if (unlikely(err < 0)) { + kfree(tmp); goto out; + } skb->ip_summed = CHECKSUM_NONE; diff --git a/net/ipv4/esp4_offload.c b/net/ipv4/esp4_offload.c index aca1c85f0795..f8b918c766b0 100644 --- a/net/ipv4/esp4_offload.c +++ b/net/ipv4/esp4_offload.c @@ -259,7 +259,7 @@ static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_ esp.seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32)); err = esp_output_tail(x, skb, &esp); - if (err < 0) + if (err) return err; secpath_reset(skb); diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index 0bc3c3d73e61..9e9d9afd18f7 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c @@ -268,14 +268,14 @@ unsigned int arpt_do_table(struct sk_buff *skb, acpar.targinfo = t->data; verdict = t->u.kernel.target->target(skb, &acpar); - /* Target might have changed stuff. */ - arp = arp_hdr(skb); - - if (verdict == XT_CONTINUE) + if (verdict == XT_CONTINUE) { + /* Target might have changed stuff. */ + arp = arp_hdr(skb); e = arpt_next_entry(e); - else + } else { /* Verdict */ break; + } } while (!acpar.hotdrop); xt_write_recseq_end(addend); local_bh_enable(); diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index 2a55a40211cb..622ed2887cd5 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c @@ -352,13 +352,14 @@ ipt_do_table(struct sk_buff *skb, acpar.targinfo = t->data; verdict = t->u.kernel.target->target(skb, &acpar); - /* Target might have changed stuff. */ - ip = ip_hdr(skb); - if (verdict == XT_CONTINUE) + if (verdict == XT_CONTINUE) { + /* Target might have changed stuff. */ + ip = ip_hdr(skb); e = ipt_next_entry(e); - else + } else { /* Verdict */ break; + } } while (!acpar.hotdrop); xt_write_recseq_end(addend); diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c index 7d72decb80f9..efaa04dcc80e 100644 --- a/net/ipv4/netfilter/ipt_CLUSTERIP.c +++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c @@ -117,7 +117,8 @@ clusterip_config_entry_put(struct net *net, struct clusterip_config *c) * functions are also incrementing the refcount on their own, * so it's safe to remove the entry even if it's in use. */ #ifdef CONFIG_PROC_FS - proc_remove(c->pde); + if (cn->procdir) + proc_remove(c->pde); #endif return; } @@ -815,6 +816,7 @@ static void clusterip_net_exit(struct net *net) #ifdef CONFIG_PROC_FS struct clusterip_net *cn = net_generic(net, clusterip_net_id); proc_remove(cn->procdir); + cn->procdir = NULL; #endif nf_unregister_net_hook(net, &cip_arp_ops); } diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 21ca2df274c5..7a3d84375836 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -2498,7 +2498,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level, name[val] = 0; lock_sock(sk); - err = tcp_set_congestion_control(sk, name, true); + err = tcp_set_congestion_control(sk, name, true, true); release_sock(sk); return err; } diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c index c2b174469645..2f26124fd160 100644 --- a/net/ipv4/tcp_cong.c +++ b/net/ipv4/tcp_cong.c @@ -189,8 +189,8 @@ void tcp_init_congestion_control(struct sock *sk) INET_ECN_dontxmit(sk); } -void tcp_reinit_congestion_control(struct sock *sk, - const struct tcp_congestion_ops *ca) +static void tcp_reinit_congestion_control(struct sock *sk, + const struct tcp_congestion_ops *ca) { struct inet_connection_sock *icsk = inet_csk(sk); @@ -338,7 +338,7 @@ out: * tcp_reinit_congestion_control (if the current congestion control was * already initialized. */ -int tcp_set_congestion_control(struct sock *sk, const char *name, bool load) +int tcp_set_congestion_control(struct sock *sk, const char *name, bool load, bool reinit) { struct inet_connection_sock *icsk = inet_csk(sk); const struct tcp_congestion_ops *ca; @@ -360,9 +360,18 @@ int tcp_set_congestion_control(struct sock *sk, const char *name, bool load) if (!ca) { err = -ENOENT; } else if (!load) { - icsk->icsk_ca_ops = ca; - if (!try_module_get(ca->owner)) + const struct tcp_congestion_ops *old_ca = icsk->icsk_ca_ops; + + if (try_module_get(ca->owner)) { + if (reinit) { + tcp_reinit_congestion_control(sk, ca); + } else { + icsk->icsk_ca_ops = ca; + module_put(old_ca->owner); + } + } else { err = -EBUSY; + } } else if (!((ca->flags & TCP_CONG_NON_RESTRICTED) || ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))) { err = -EPERM; diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index bf6c406bf5e7..f900cdd0fbfb 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -1933,14 +1933,16 @@ drop: /* For TCP sockets, sk_rx_dst is protected by socket lock * For UDP, we use xchg() to guard against concurrent changes. */ -void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst) +bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst) { struct dst_entry *old; if (dst_hold_safe(dst)) { old = xchg(&sk->sk_rx_dst, dst); dst_release(old); + return old != dst; } + return false; } EXPORT_SYMBOL(udp_sk_rx_dst_set); diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 45d0a24644de..c2e2a78787ec 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -5514,7 +5514,7 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp) * our DAD process, so we don't need * to do it again */ - if (!(ifp->rt->rt6i_node)) + if (!rcu_access_pointer(ifp->rt->rt6i_node)) ip6_ins_rt(ifp->rt); if (ifp->idev->cnf.forwarding) addrconf_join_anycast(ifp); diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index 7fb41b0ad437..89910e2c10f4 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c @@ -226,7 +226,7 @@ int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info int tailen = esp->tailen; if (!skb_cloned(skb)) { - if (tailen <= skb_availroom(skb)) { + if (tailen <= skb_tailroom(skb)) { nfrags = 1; trailer = skb; tail = skb_tail_pointer(trailer); @@ -260,8 +260,6 @@ int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info kunmap_atomic(vaddr); - spin_unlock_bh(&x->lock); - nfrags = skb_shinfo(skb)->nr_frags; __skb_fill_page_desc(skb, nfrags, page, pfrag->offset, @@ -269,6 +267,9 @@ int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info skb_shinfo(skb)->nr_frags = ++nfrags; pfrag->offset = pfrag->offset + allocsize; + + spin_unlock_bh(&x->lock); + nfrags++; skb->len += tailen; @@ -345,7 +346,7 @@ int esp6_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info (unsigned char *)esph - skb->data, assoclen + ivlen + esp->clen + alen); if (unlikely(err < 0)) - goto error; + goto error_free; if (!esp->inplace) { int allocsize; @@ -356,7 +357,7 @@ int esp6_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info spin_lock_bh(&x->lock); if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) { spin_unlock_bh(&x->lock); - goto error; + goto error_free; } skb_shinfo(skb)->nr_frags = 1; @@ -373,7 +374,7 @@ int esp6_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info (unsigned char *)esph - skb->data, assoclen + ivlen + esp->clen + alen); if (unlikely(err < 0)) - goto error; + goto error_free; } if ((x->props.flags & XFRM_STATE_ESN)) @@ -406,8 +407,9 @@ int esp6_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info if (sg != dsg) esp_ssg_unref(x, tmp); - kfree(tmp); +error_free: + kfree(tmp); error: return err; } diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c index 8d4e2ba9163d..333a478aa161 100644 --- a/net/ipv6/esp6_offload.c +++ b/net/ipv6/esp6_offload.c @@ -288,7 +288,7 @@ static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features esp.seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32)); err = esp6_output_tail(x, skb, &esp); - if (err < 0) + if (err) return err; secpath_reset(skb); diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 549aacc3cb2c..a3b5c163325f 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -149,11 +149,23 @@ static struct fib6_node *node_alloc(void) return fn; } -static void node_free(struct fib6_node *fn) +static void node_free_immediate(struct fib6_node *fn) +{ + kmem_cache_free(fib6_node_kmem, fn); +} + +static void node_free_rcu(struct rcu_head *head) { + struct fib6_node *fn = container_of(head, struct fib6_node, rcu); + kmem_cache_free(fib6_node_kmem, fn); } +static void node_free(struct fib6_node *fn) +{ + call_rcu(&fn->rcu, node_free_rcu); +} + void rt6_free_pcpu(struct rt6_info *non_pcpu_rt) { int cpu; @@ -697,9 +709,9 @@ insert_above: if (!in || !ln) { if (in) - node_free(in); + node_free_immediate(in); if (ln) - node_free(ln); + node_free_immediate(ln); return ERR_PTR(-ENOMEM); } @@ -971,7 +983,7 @@ add: rt->dst.rt6_next = iter; *ins = rt; - rt->rt6i_node = fn; + rcu_assign_pointer(rt->rt6i_node, fn); atomic_inc(&rt->rt6i_ref); call_fib6_entry_notifiers(info->nl_net, FIB_EVENT_ENTRY_ADD, rt); @@ -999,7 +1011,7 @@ add: return err; *ins = rt; - rt->rt6i_node = fn; + rcu_assign_pointer(rt->rt6i_node, fn); rt->dst.rt6_next = iter->dst.rt6_next; atomic_inc(&rt->rt6i_ref); call_fib6_entry_notifiers(info->nl_net, FIB_EVENT_ENTRY_REPLACE, @@ -1138,7 +1150,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, root, and then (in failure) stale node in main tree. */ - node_free(sfn); + node_free_immediate(sfn); err = PTR_ERR(sn); goto failure; } @@ -1569,8 +1581,9 @@ static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp, int fib6_del(struct rt6_info *rt, struct nl_info *info) { + struct fib6_node *fn = rcu_dereference_protected(rt->rt6i_node, + lockdep_is_held(&rt->rt6i_table->tb6_lock)); struct net *net = info->nl_net; - struct fib6_node *fn = rt->rt6i_node; struct rt6_info **rtp; #if RT6_DEBUG >= 2 @@ -1759,7 +1772,9 @@ static int fib6_clean_node(struct fib6_walker *w) if (res) { #if RT6_DEBUG >= 2 pr_debug("%s: del failed: rt=%p@%p err=%d\n", - __func__, rt, rt->rt6i_node, res); + __func__, rt, + rcu_access_pointer(rt->rt6i_node), + res); #endif continue; } @@ -1881,8 +1896,10 @@ static int fib6_age(struct rt6_info *rt, void *arg) } gc_args->more++; } else if (rt->rt6i_flags & RTF_CACHE) { + if (time_after_eq(now, rt->dst.lastuse + gc_args->timeout)) + rt->dst.obsolete = DST_OBSOLETE_KILL; if (atomic_read(&rt->dst.__refcnt) == 1 && - time_after_eq(now, rt->dst.lastuse + gc_args->timeout)) { + rt->dst.obsolete == DST_OBSOLETE_KILL) { RT6_TRACE("aging clone %p\n", rt); return -1; } else if (rt->rt6i_flags & RTF_GATEWAY) { diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index 02d795fe3d7f..a5e466d4e093 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -242,7 +242,6 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, pktopt = xchg(&np->pktoptions, NULL); kfree_skb(pktopt); - sk->sk_destruct = inet_sock_destruct; /* * ... and add it to the refcnt debug socks count * in the new family. -acme diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c index abb2c307fbe8..a338bbc33cf3 100644 --- a/net/ipv6/output_core.c +++ b/net/ipv6/output_core.c @@ -86,7 +86,6 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr) while (offset <= packet_len) { struct ipv6_opt_hdr *exthdr; - unsigned int len; switch (**nexthdr) { @@ -112,10 +111,9 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr) exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) + offset); - len = ipv6_optlen(exthdr); - if (len + offset >= IPV6_MAXPLEN) + offset += ipv6_optlen(exthdr); + if (offset > IPV6_MAXPLEN) return -EINVAL; - offset += len; *nexthdr = &exthdr->nexthdr; } diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 4d0273459d49..26cc9f483b6d 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -440,7 +440,8 @@ static bool rt6_check_expired(const struct rt6_info *rt) if (time_after(jiffies, rt->dst.expires)) return true; } else if (rt->dst.from) { - return rt6_check_expired((struct rt6_info *) rt->dst.from); + return rt->dst.obsolete != DST_OBSOLETE_FORCE_CHK || + rt6_check_expired((struct rt6_info *)rt->dst.from); } return false; } @@ -1363,7 +1364,9 @@ static void rt6_dst_from_metrics_check(struct rt6_info *rt) static struct dst_entry *rt6_check(struct rt6_info *rt, u32 cookie) { - if (!rt->rt6i_node || (rt->rt6i_node->fn_sernum != cookie)) + u32 rt_cookie = 0; + + if (!rt6_get_cookie_safe(rt, &rt_cookie) || rt_cookie != cookie) return NULL; if (rt6_check_expired(rt)) @@ -1431,8 +1434,14 @@ static void ip6_link_failure(struct sk_buff *skb) if (rt->rt6i_flags & RTF_CACHE) { if (dst_hold_safe(&rt->dst)) ip6_del_rt(rt); - } else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT)) { - rt->rt6i_node->fn_sernum = -1; + } else { + struct fib6_node *fn; + + rcu_read_lock(); + fn = rcu_dereference(rt->rt6i_node); + if (fn && (rt->rt6i_flags & RTF_DEFAULT)) + fn->fn_sernum = -1; + rcu_read_unlock(); } } } @@ -1449,7 +1458,8 @@ static void rt6_do_update_pmtu(struct rt6_info *rt, u32 mtu) static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt) { return !(rt->rt6i_flags & RTF_CACHE) && - (rt->rt6i_flags & RTF_PCPU || rt->rt6i_node); + (rt->rt6i_flags & RTF_PCPU || + rcu_access_pointer(rt->rt6i_node)); } static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk, diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 976f30391356..42ebb9ad46cc 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -772,6 +772,15 @@ start_lookup: return 0; } +static void udp6_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst) +{ + if (udp_sk_rx_dst_set(sk, dst)) { + const struct rt6_info *rt = (const struct rt6_info *)dst; + + inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt); + } +} + int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, int proto) { @@ -821,7 +830,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, int ret; if (unlikely(sk->sk_rx_dst != dst)) - udp_sk_rx_dst_set(sk, dst); + udp6_sk_rx_dst_set(sk, dst); ret = udpv6_queue_rcv_skb(sk, skb); sock_put(sk); diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c index 48e993b2dbcf..af4e76ac88ff 100644 --- a/net/kcm/kcmsock.c +++ b/net/kcm/kcmsock.c @@ -1387,6 +1387,10 @@ static int kcm_attach(struct socket *sock, struct socket *csock, if (!csk) return -EINVAL; + /* We must prevent loops or risk deadlock ! */ + if (csk->sk_family == PF_KCM) + return -EOPNOTSUPP; + psock = kmem_cache_zalloc(kcm_psockp, GFP_KERNEL); if (!psock) return -ENOMEM; diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index b0c2d4ae781d..90165a6874bc 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -113,7 +113,6 @@ struct l2tp_net { spinlock_t l2tp_session_hlist_lock; }; -static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel); static inline struct l2tp_tunnel *l2tp_tunnel(struct sock *sk) { @@ -127,39 +126,6 @@ static inline struct l2tp_net *l2tp_pernet(const struct net *net) return net_generic(net, l2tp_net_id); } -/* Tunnel reference counts. Incremented per session that is added to - * the tunnel. - */ -static inline void l2tp_tunnel_inc_refcount_1(struct l2tp_tunnel *tunnel) -{ - refcount_inc(&tunnel->ref_count); -} - -static inline void l2tp_tunnel_dec_refcount_1(struct l2tp_tunnel *tunnel) -{ - if (refcount_dec_and_test(&tunnel->ref_count)) - l2tp_tunnel_free(tunnel); -} -#ifdef L2TP_REFCNT_DEBUG -#define l2tp_tunnel_inc_refcount(_t) \ -do { \ - pr_debug("l2tp_tunnel_inc_refcount: %s:%d %s: cnt=%d\n", \ - __func__, __LINE__, (_t)->name, \ - refcount_read(&_t->ref_count)); \ - l2tp_tunnel_inc_refcount_1(_t); \ -} while (0) -#define l2tp_tunnel_dec_refcount(_t) \ -do { \ - pr_debug("l2tp_tunnel_dec_refcount: %s:%d %s: cnt=%d\n", \ - __func__, __LINE__, (_t)->name, \ - refcount_read(&_t->ref_count)); \ - l2tp_tunnel_dec_refcount_1(_t); \ -} while (0) -#else -#define l2tp_tunnel_inc_refcount(t) l2tp_tunnel_inc_refcount_1(t) -#define l2tp_tunnel_dec_refcount(t) l2tp_tunnel_dec_refcount_1(t) -#endif - /* Session hash global list for L2TPv3. * The session_id SHOULD be random according to RFC3931, but several * L2TP implementations use incrementing session_ids. So we do a real @@ -229,6 +195,27 @@ l2tp_session_id_hash(struct l2tp_tunnel *tunnel, u32 session_id) return &tunnel->session_hlist[hash_32(session_id, L2TP_HASH_BITS)]; } +/* Lookup a tunnel. A new reference is held on the returned tunnel. */ +struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id) +{ + const struct l2tp_net *pn = l2tp_pernet(net); + struct l2tp_tunnel *tunnel; + + rcu_read_lock_bh(); + list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) { + if (tunnel->tunnel_id == tunnel_id) { + l2tp_tunnel_inc_refcount(tunnel); + rcu_read_unlock_bh(); + + return tunnel; + } + } + rcu_read_unlock_bh(); + + return NULL; +} +EXPORT_SYMBOL_GPL(l2tp_tunnel_get); + /* Lookup a session. A new reference is held on the returned session. * Optionally calls session->ref() too if do_ref is true. */ @@ -1348,17 +1335,6 @@ static void l2tp_udp_encap_destroy(struct sock *sk) } } -/* Really kill the tunnel. - * Come here only when all sessions have been cleared from the tunnel. - */ -static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel) -{ - BUG_ON(refcount_read(&tunnel->ref_count) != 0); - BUG_ON(tunnel->sock != NULL); - l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: free...\n", tunnel->name); - kfree_rcu(tunnel, rcu); -} - /* Workqueue tunnel deletion function */ static void l2tp_tunnel_del_work(struct work_struct *work) { @@ -1844,6 +1820,8 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn l2tp_session_set_header_len(session, tunnel->version); + refcount_set(&session->ref_count, 1); + err = l2tp_session_add_to_tunnel(tunnel, session); if (err) { kfree(session); @@ -1851,10 +1829,6 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn return ERR_PTR(err); } - /* Bump the reference count. The session context is deleted - * only when this drops to zero. - */ - refcount_set(&session->ref_count, 1); l2tp_tunnel_inc_refcount(tunnel); /* Ensure tunnel socket isn't deleted */ diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h index cdb6e3327f74..9101297f27ad 100644 --- a/net/l2tp/l2tp_core.h +++ b/net/l2tp/l2tp_core.h @@ -231,6 +231,8 @@ out: return tunnel; } +struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id); + struct l2tp_session *l2tp_session_get(const struct net *net, struct l2tp_tunnel *tunnel, u32 session_id, bool do_ref); @@ -269,6 +271,17 @@ int l2tp_nl_register_ops(enum l2tp_pwtype pw_type, void l2tp_nl_unregister_ops(enum l2tp_pwtype pw_type); int l2tp_ioctl(struct sock *sk, int cmd, unsigned long arg); +static inline void l2tp_tunnel_inc_refcount(struct l2tp_tunnel *tunnel) +{ + refcount_inc(&tunnel->ref_count); +} + +static inline void l2tp_tunnel_dec_refcount(struct l2tp_tunnel *tunnel) +{ + if (refcount_dec_and_test(&tunnel->ref_count)) + kfree_rcu(tunnel, rcu); +} + /* Session reference counts. Incremented when code obtains a reference * to a session. */ diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c index 12cfcd0ca807..57427d430f10 100644 --- a/net/l2tp/l2tp_netlink.c +++ b/net/l2tp/l2tp_netlink.c @@ -65,10 +65,12 @@ static struct l2tp_session *l2tp_nl_session_get(struct genl_info *info, (info->attrs[L2TP_ATTR_CONN_ID])) { tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]); session_id = nla_get_u32(info->attrs[L2TP_ATTR_SESSION_ID]); - tunnel = l2tp_tunnel_find(net, tunnel_id); - if (tunnel) + tunnel = l2tp_tunnel_get(net, tunnel_id); + if (tunnel) { session = l2tp_session_get(net, tunnel, session_id, do_ref); + l2tp_tunnel_dec_refcount(tunnel); + } } return session; @@ -271,8 +273,8 @@ static int l2tp_nl_cmd_tunnel_delete(struct sk_buff *skb, struct genl_info *info } tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]); - tunnel = l2tp_tunnel_find(net, tunnel_id); - if (tunnel == NULL) { + tunnel = l2tp_tunnel_get(net, tunnel_id); + if (!tunnel) { ret = -ENODEV; goto out; } @@ -282,6 +284,8 @@ static int l2tp_nl_cmd_tunnel_delete(struct sk_buff *skb, struct genl_info *info (void) l2tp_tunnel_delete(tunnel); + l2tp_tunnel_dec_refcount(tunnel); + out: return ret; } @@ -299,8 +303,8 @@ static int l2tp_nl_cmd_tunnel_modify(struct sk_buff *skb, struct genl_info *info } tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]); - tunnel = l2tp_tunnel_find(net, tunnel_id); - if (tunnel == NULL) { + tunnel = l2tp_tunnel_get(net, tunnel_id); + if (!tunnel) { ret = -ENODEV; goto out; } @@ -311,6 +315,8 @@ static int l2tp_nl_cmd_tunnel_modify(struct sk_buff *skb, struct genl_info *info ret = l2tp_tunnel_notify(&l2tp_nl_family, info, tunnel, L2TP_CMD_TUNNEL_MODIFY); + l2tp_tunnel_dec_refcount(tunnel); + out: return ret; } @@ -438,34 +444,37 @@ static int l2tp_nl_cmd_tunnel_get(struct sk_buff *skb, struct genl_info *info) if (!info->attrs[L2TP_ATTR_CONN_ID]) { ret = -EINVAL; - goto out; + goto err; } tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]); - tunnel = l2tp_tunnel_find(net, tunnel_id); - if (tunnel == NULL) { - ret = -ENODEV; - goto out; - } - msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) { ret = -ENOMEM; - goto out; + goto err; + } + + tunnel = l2tp_tunnel_get(net, tunnel_id); + if (!tunnel) { + ret = -ENODEV; + goto err_nlmsg; } ret = l2tp_nl_tunnel_send(msg, info->snd_portid, info->snd_seq, NLM_F_ACK, tunnel, L2TP_CMD_TUNNEL_GET); if (ret < 0) - goto err_out; + goto err_nlmsg_tunnel; + + l2tp_tunnel_dec_refcount(tunnel); return genlmsg_unicast(net, msg, info->snd_portid); -err_out: +err_nlmsg_tunnel: + l2tp_tunnel_dec_refcount(tunnel); +err_nlmsg: nlmsg_free(msg); - -out: +err: return ret; } @@ -509,8 +518,9 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf ret = -EINVAL; goto out; } + tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]); - tunnel = l2tp_tunnel_find(net, tunnel_id); + tunnel = l2tp_tunnel_get(net, tunnel_id); if (!tunnel) { ret = -ENODEV; goto out; @@ -518,24 +528,24 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf if (!info->attrs[L2TP_ATTR_SESSION_ID]) { ret = -EINVAL; - goto out; + goto out_tunnel; } session_id = nla_get_u32(info->attrs[L2TP_ATTR_SESSION_ID]); if (!info->attrs[L2TP_ATTR_PEER_SESSION_ID]) { ret = -EINVAL; - goto out; + goto out_tunnel; } peer_session_id = nla_get_u32(info->attrs[L2TP_ATTR_PEER_SESSION_ID]); if (!info->attrs[L2TP_ATTR_PW_TYPE]) { ret = -EINVAL; - goto out; + goto out_tunnel; } cfg.pw_type = nla_get_u16(info->attrs[L2TP_ATTR_PW_TYPE]); if (cfg.pw_type >= __L2TP_PWTYPE_MAX) { ret = -EINVAL; - goto out; + goto out_tunnel; } if (tunnel->version > 2) { @@ -557,7 +567,7 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf u16 len = nla_len(info->attrs[L2TP_ATTR_COOKIE]); if (len > 8) { ret = -EINVAL; - goto out; + goto out_tunnel; } cfg.cookie_len = len; memcpy(&cfg.cookie[0], nla_data(info->attrs[L2TP_ATTR_COOKIE]), len); @@ -566,7 +576,7 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf u16 len = nla_len(info->attrs[L2TP_ATTR_PEER_COOKIE]); if (len > 8) { ret = -EINVAL; - goto out; + goto out_tunnel; } cfg.peer_cookie_len = len; memcpy(&cfg.peer_cookie[0], nla_data(info->attrs[L2TP_ATTR_PEER_COOKIE]), len); @@ -609,7 +619,7 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf if ((l2tp_nl_cmd_ops[cfg.pw_type] == NULL) || (l2tp_nl_cmd_ops[cfg.pw_type]->session_create == NULL)) { ret = -EPROTONOSUPPORT; - goto out; + goto out_tunnel; } /* Check that pseudowire-specific params are present */ @@ -619,7 +629,7 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf case L2TP_PWTYPE_ETH_VLAN: if (!info->attrs[L2TP_ATTR_VLAN_ID]) { ret = -EINVAL; - goto out; + goto out_tunnel; } break; case L2TP_PWTYPE_ETH: @@ -647,6 +657,8 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf } } +out_tunnel: + l2tp_tunnel_dec_refcount(tunnel); out: return ret; } diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c index eb541786ccb7..b1d3740ae36a 100644 --- a/net/netfilter/nf_nat_core.c +++ b/net/netfilter/nf_nat_core.c @@ -441,7 +441,7 @@ nf_nat_setup_info(struct nf_conn *ct, else ct->status |= IPS_DST_NAT; - if (nfct_help(ct)) + if (nfct_help(ct) && !nfct_seqadj(ct)) if (!nfct_seqadj_ext_add(ct)) return NF_DROP; } diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c index f5a7cb68694e..b89f4f65b2a0 100644 --- a/net/netfilter/nft_compat.c +++ b/net/netfilter/nft_compat.c @@ -305,7 +305,7 @@ static int nft_target_validate(const struct nft_ctx *ctx, const struct nf_hook_ops *ops = &basechain->ops[0]; hook_mask = 1 << ops->hooknum; - if (!(hook_mask & target->hooks)) + if (target->hooks && !(hook_mask & target->hooks)) return -EINVAL; ret = nft_compat_chain_validate_dependency(target->table, @@ -484,7 +484,7 @@ static int nft_match_validate(const struct nft_ctx *ctx, const struct nf_hook_ops *ops = &basechain->ops[0]; hook_mask = 1 << ops->hooknum; - if (!(hook_mask & match->hooks)) + if (match->hooks && !(hook_mask & match->hooks)) return -EINVAL; ret = nft_compat_chain_validate_dependency(match->table, diff --git a/net/netfilter/nft_limit.c b/net/netfilter/nft_limit.c index 18dd57a52651..14538b1d4d11 100644 --- a/net/netfilter/nft_limit.c +++ b/net/netfilter/nft_limit.c @@ -65,19 +65,23 @@ static int nft_limit_init(struct nft_limit *limit, limit->nsecs = unit * NSEC_PER_SEC; if (limit->rate == 0 || limit->nsecs < unit) return -EOVERFLOW; - limit->tokens = limit->tokens_max = limit->nsecs; - - if (tb[NFTA_LIMIT_BURST]) { - u64 rate; + if (tb[NFTA_LIMIT_BURST]) limit->burst = ntohl(nla_get_be32(tb[NFTA_LIMIT_BURST])); + else + limit->burst = 0; + + if (limit->rate + limit->burst < limit->rate) + return -EOVERFLOW; - rate = limit->rate + limit->burst; - if (rate < limit->rate) - return -EOVERFLOW; + /* The token bucket size limits the number of tokens can be + * accumulated. tokens_max specifies the bucket size. + * tokens_max = unit * (rate + burst) / rate. + */ + limit->tokens = div_u64(limit->nsecs * (limit->rate + limit->burst), + limit->rate); + limit->tokens_max = limit->tokens; - limit->rate = rate; - } if (tb[NFTA_LIMIT_FLAGS]) { u32 flags = ntohl(nla_get_be32(tb[NFTA_LIMIT_FLAGS])); @@ -95,9 +99,8 @@ static int nft_limit_dump(struct sk_buff *skb, const struct nft_limit *limit, { u32 flags = limit->invert ? NFT_LIMIT_F_INV : 0; u64 secs = div_u64(limit->nsecs, NSEC_PER_SEC); - u64 rate = limit->rate - limit->burst; - if (nla_put_be64(skb, NFTA_LIMIT_RATE, cpu_to_be64(rate), + if (nla_put_be64(skb, NFTA_LIMIT_RATE, cpu_to_be64(limit->rate), NFTA_LIMIT_PAD) || nla_put_be64(skb, NFTA_LIMIT_UNIT, cpu_to_be64(secs), NFTA_LIMIT_PAD) || diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index f31cb71172e0..c26172995511 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -2189,6 +2189,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct timespec ts; __u32 ts_status; bool is_drop_n_account = false; + bool do_vnet = false; /* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT. * We may add members to them until current aligned size without forcing @@ -2239,8 +2240,10 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, netoff = TPACKET_ALIGN(po->tp_hdrlen + (maclen < 16 ? 16 : maclen)) + po->tp_reserve; - if (po->has_vnet_hdr) + if (po->has_vnet_hdr) { netoff += sizeof(struct virtio_net_hdr); + do_vnet = true; + } macoff = netoff - maclen; } if (po->tp_version <= TPACKET_V2) { @@ -2257,8 +2260,10 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, skb_set_owner_r(copy_skb, sk); } snaplen = po->rx_ring.frame_size - macoff; - if ((int)snaplen < 0) + if ((int)snaplen < 0) { snaplen = 0; + do_vnet = false; + } } } else if (unlikely(macoff + snaplen > GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) { @@ -2271,6 +2276,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, if (unlikely((int)snaplen < 0)) { snaplen = 0; macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len; + do_vnet = false; } } spin_lock(&sk->sk_receive_queue.lock); @@ -2296,7 +2302,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, } spin_unlock(&sk->sk_receive_queue.lock); - if (po->has_vnet_hdr) { + if (do_vnet) { if (virtio_net_hdr_from_skb(skb, h.raw + macoff - sizeof(struct virtio_net_hdr), vio_le(), true)) { diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index d470a4e2de58..ea6c65fd5fc5 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -200,9 +200,15 @@ static void tcf_chain_flush(struct tcf_chain *chain) static void tcf_chain_destroy(struct tcf_chain *chain) { - list_del(&chain->list); - tcf_chain_flush(chain); - kfree(chain); + /* May be already removed from the list by the previous call. */ + if (!list_empty(&chain->list)) + list_del_init(&chain->list); + + /* There might still be a reference held when we got here from + * tcf_block_put. Wait for the user to drop reference before free. + */ + if (!chain->refcnt) + kfree(chain); } struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index, @@ -273,8 +279,10 @@ void tcf_block_put(struct tcf_block *block) if (!block) return; - list_for_each_entry_safe(chain, tmp, &block->chain_list, list) + list_for_each_entry_safe(chain, tmp, &block->chain_list, list) { + tcf_chain_flush(chain); tcf_chain_destroy(chain); + } kfree(block); } EXPORT_SYMBOL(tcf_block_put); diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 929b024f41ba..c6deb74e3d2f 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -927,7 +927,7 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent, old = dev_graft_qdisc(dev_queue, new); if (new && i > 0) - refcount_inc(&new->refcnt); + qdisc_refcount_inc(new); if (!ingress) qdisc_destroy(old); @@ -938,7 +938,7 @@ skip: notify_and_destroy(net, skb, n, classid, dev->qdisc, new); if (new && !new->ops->attach) - refcount_inc(&new->refcnt); + qdisc_refcount_inc(new); dev->qdisc = new ? : &noop_qdisc; if (new && new->ops->attach) @@ -1347,7 +1347,7 @@ replay: if (q == p || (p && check_loop(q, p, 0))) return -ELOOP; - refcount_inc(&q->refcnt); + qdisc_refcount_inc(q); goto graft; } else { if (!q) diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index 3ec8bec109bb..dcef97fa8047 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c @@ -1138,6 +1138,13 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt) struct tc_ratespec *r; int err; + qdisc_watchdog_init(&q->watchdog, sch); + hrtimer_init(&q->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); + q->delay_timer.function = cbq_undelay; + + if (!opt) + return -EINVAL; + err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy, NULL); if (err < 0) return err; @@ -1175,9 +1182,6 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt) q->link.avpkt = q->link.allot/2; q->link.minidle = -0x7FFFFFFF; - qdisc_watchdog_init(&q->watchdog, sch); - hrtimer_init(&q->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); - q->delay_timer.function = cbq_undelay; q->toplevel = TC_CBQ_MAXLEVEL; q->now = psched_get_time(); diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c index 7699b50688cd..de3b57ceca7b 100644 --- a/net/sched/sch_fq_codel.c +++ b/net/sched/sch_fq_codel.c @@ -491,10 +491,8 @@ static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt) if (!q->flows) return -ENOMEM; q->backlogs = kvzalloc(q->flows_cnt * sizeof(u32), GFP_KERNEL); - if (!q->backlogs) { - kvfree(q->flows); + if (!q->backlogs) return -ENOMEM; - } for (i = 0; i < q->flows_cnt; i++) { struct fq_codel_flow *flow = q->flows + i; diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index c6b89a34e8d2..92237e75dbbc 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -789,7 +789,7 @@ static void attach_default_qdiscs(struct net_device *dev) dev->priv_flags & IFF_NO_QUEUE) { netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL); dev->qdisc = txq->qdisc_sleeping; - refcount_inc(&dev->qdisc->refcnt); + qdisc_refcount_inc(dev->qdisc); } else { qdisc = qdisc_create_dflt(txq, &mq_qdisc_ops, TC_H_ROOT); if (qdisc) { diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index 7c7820d0fdc7..daaf214e5201 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c @@ -1378,6 +1378,8 @@ hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt) struct tc_hfsc_qopt *qopt; int err; + qdisc_watchdog_init(&q->watchdog, sch); + if (opt == NULL || nla_len(opt) < sizeof(*qopt)) return -EINVAL; qopt = nla_data(opt); @@ -1390,7 +1392,7 @@ hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt) err = tcf_block_get(&q->root.block, &q->root.filter_list); if (err) - goto err_tcf; + return err; q->root.cl_common.classid = sch->handle; q->root.sched = q; @@ -1407,13 +1409,7 @@ hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt) qdisc_class_hash_insert(&q->clhash, &q->root.cl_common); qdisc_class_hash_grow(sch, &q->clhash); - qdisc_watchdog_init(&q->watchdog, sch); - return 0; - -err_tcf: - qdisc_class_hash_destroy(&q->clhash); - return err; } static int diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c index 51d3ba682af9..73a53c08091b 100644 --- a/net/sched/sch_hhf.c +++ b/net/sched/sch_hhf.c @@ -477,6 +477,9 @@ static void hhf_destroy(struct Qdisc *sch) kvfree(q->hhf_valid_bits[i]); } + if (!q->hh_flows) + return; + for (i = 0; i < HH_FLOWS_CNT; i++) { struct hh_flow_state *flow, *next; struct list_head *head = &q->hh_flows[i]; diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index f955b59d3c7c..7e148376ba52 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c @@ -1020,6 +1020,9 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt) int err; int i; + qdisc_watchdog_init(&q->watchdog, sch); + INIT_WORK(&q->work, htb_work_func); + if (!opt) return -EINVAL; @@ -1044,8 +1047,6 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt) for (i = 0; i < TC_HTB_NUMPRIO; i++) INIT_LIST_HEAD(q->drops + i); - qdisc_watchdog_init(&q->watchdog, sch); - INIT_WORK(&q->work, htb_work_func); qdisc_skb_head_init(&q->direct_queue); if (tb[TCA_HTB_DIRECT_QLEN]) diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c index a5df979b6248..ff4fc3e0facd 100644 --- a/net/sched/sch_multiq.c +++ b/net/sched/sch_multiq.c @@ -257,12 +257,7 @@ static int multiq_init(struct Qdisc *sch, struct nlattr *opt) for (i = 0; i < q->max_bands; i++) q->queues[i] = &noop_qdisc; - err = multiq_tune(sch, opt); - - if (err) - kfree(q->queues); - - return err; + return multiq_tune(sch, opt); } static int multiq_dump(struct Qdisc *sch, struct sk_buff *skb) diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index cf5aad0aabfc..b1266e75ca43 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -933,11 +933,11 @@ static int netem_init(struct Qdisc *sch, struct nlattr *opt) struct netem_sched_data *q = qdisc_priv(sch); int ret; + qdisc_watchdog_init(&q->watchdog, sch); + if (!opt) return -EINVAL; - qdisc_watchdog_init(&q->watchdog, sch); - q->loss_model = CLG_RANDOM; ret = netem_change(sch, opt); if (ret) diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index e0f029a887ac..74ea863b8240 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c @@ -722,13 +722,13 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt) int i; int err; + setup_deferrable_timer(&q->perturb_timer, sfq_perturbation, + (unsigned long)sch); + err = tcf_block_get(&q->block, &q->filter_list); if (err) return err; - setup_deferrable_timer(&q->perturb_timer, sfq_perturbation, - (unsigned long)sch); - for (i = 0; i < SFQ_MAX_DEPTH + 1; i++) { q->dep[i].next = i + SFQ_MAX_FLOWS; q->dep[i].prev = i + SFQ_MAX_FLOWS; diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c index d5dba972ab06..120f4f365967 100644 --- a/net/sched/sch_tbf.c +++ b/net/sched/sch_tbf.c @@ -425,12 +425,13 @@ static int tbf_init(struct Qdisc *sch, struct nlattr *opt) { struct tbf_sched_data *q = qdisc_priv(sch); + qdisc_watchdog_init(&q->watchdog, sch); + q->qdisc = &noop_qdisc; + if (opt == NULL) return -EINVAL; q->t_c = ktime_get_ns(); - qdisc_watchdog_init(&q->watchdog, sch); - q->qdisc = &noop_qdisc; return tbf_change(sch, opt); } diff --git a/net/sctp/sctp_diag.c b/net/sctp/sctp_diag.c index 9a647214a91e..e99518e79b52 100644 --- a/net/sctp/sctp_diag.c +++ b/net/sctp/sctp_diag.c @@ -70,7 +70,8 @@ static int inet_diag_msg_sctpladdrs_fill(struct sk_buff *skb, info = nla_data(attr); list_for_each_entry_rcu(laddr, address_list, list) { - memcpy(info, &laddr->a, addrlen); + memcpy(info, &laddr->a, sizeof(laddr->a)); + memset(info + sizeof(laddr->a), 0, addrlen - sizeof(laddr->a)); info += addrlen; } @@ -93,7 +94,9 @@ static int inet_diag_msg_sctpaddrs_fill(struct sk_buff *skb, info = nla_data(attr); list_for_each_entry(from, &asoc->peer.transport_addr_list, transports) { - memcpy(info, &from->ipaddr, addrlen); + memcpy(info, &from->ipaddr, sizeof(from->ipaddr)); + memset(info + sizeof(from->ipaddr), 0, + addrlen - sizeof(from->ipaddr)); info += addrlen; } diff --git a/net/sctp/socket.c b/net/sctp/socket.c index c01af72cc603..1b00a1e09b93 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -4540,8 +4540,7 @@ int sctp_get_sctp_info(struct sock *sk, struct sctp_association *asoc, info->sctpi_ictrlchunks = asoc->stats.ictrlchunks; prim = asoc->peer.primary_path; - memcpy(&info->sctpi_p_address, &prim->ipaddr, - sizeof(struct sockaddr_storage)); + memcpy(&info->sctpi_p_address, &prim->ipaddr, sizeof(prim->ipaddr)); info->sctpi_p_state = prim->state; info->sctpi_p_cwnd = prim->cwnd; info->sctpi_p_srtt = prim->srtt; diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 2b720fa35c4f..e18500151236 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -421,6 +421,9 @@ static void svc_data_ready(struct sock *sk) dprintk("svc: socket %p(inet %p), busy=%d\n", svsk, sk, test_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags)); + + /* Refer to svc_setup_socket() for details. */ + rmb(); svsk->sk_odata(sk); if (!test_and_set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags)) svc_xprt_enqueue(&svsk->sk_xprt); @@ -437,6 +440,9 @@ static void svc_write_space(struct sock *sk) if (svsk) { dprintk("svc: socket %p(inet %p), write_space busy=%d\n", svsk, sk, test_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags)); + + /* Refer to svc_setup_socket() for details. */ + rmb(); svsk->sk_owspace(sk); svc_xprt_enqueue(&svsk->sk_xprt); } @@ -760,8 +766,12 @@ static void svc_tcp_listen_data_ready(struct sock *sk) dprintk("svc: socket %p TCP (listen) state change %d\n", sk, sk->sk_state); - if (svsk) + if (svsk) { + /* Refer to svc_setup_socket() for details. */ + rmb(); svsk->sk_odata(sk); + } + /* * This callback may called twice when a new connection * is established as a child socket inherits everything @@ -794,6 +804,8 @@ static void svc_tcp_state_change(struct sock *sk) if (!svsk) printk("svc: socket %p: no user data\n", sk); else { + /* Refer to svc_setup_socket() for details. */ + rmb(); svsk->sk_ostate(sk); if (sk->sk_state != TCP_ESTABLISHED) { set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags); @@ -1381,12 +1393,18 @@ static struct svc_sock *svc_setup_socket(struct svc_serv *serv, return ERR_PTR(err); } - inet->sk_user_data = svsk; svsk->sk_sock = sock; svsk->sk_sk = inet; svsk->sk_ostate = inet->sk_state_change; svsk->sk_odata = inet->sk_data_ready; svsk->sk_owspace = inet->sk_write_space; + /* + * This barrier is necessary in order to prevent race condition + * with svc_data_ready(), svc_listen_data_ready() and others + * when calling callbacks above. + */ + wmb(); + inet->sk_user_data = svsk; /* Initialize the socket */ if (sock->type == SOCK_DGRAM) diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c index d49598f6002b..ac1d66d7e1fd 100644 --- a/net/tipc/bearer.c +++ b/net/tipc/bearer.c @@ -65,6 +65,8 @@ static struct tipc_bearer *bearer_get(struct net *net, int bearer_id) } static void bearer_disable(struct net *net, struct tipc_bearer *b); +static int tipc_l2_rcv_msg(struct sk_buff *skb, struct net_device *dev, + struct packet_type *pt, struct net_device *orig_dev); /** * tipc_media_find - locates specified media object by name @@ -404,6 +406,10 @@ int tipc_enable_l2_media(struct net *net, struct tipc_bearer *b, /* Associate TIPC bearer with L2 bearer */ rcu_assign_pointer(b->media_ptr, dev); + b->pt.dev = dev; + b->pt.type = htons(ETH_P_TIPC); + b->pt.func = tipc_l2_rcv_msg; + dev_add_pack(&b->pt); memset(&b->bcast_addr, 0, sizeof(b->bcast_addr)); memcpy(b->bcast_addr.value, dev->broadcast, b->media->hwaddr_len); b->bcast_addr.media_id = b->media->type_id; @@ -423,6 +429,7 @@ void tipc_disable_l2_media(struct tipc_bearer *b) struct net_device *dev; dev = (struct net_device *)rtnl_dereference(b->media_ptr); + dev_remove_pack(&b->pt); RCU_INIT_POINTER(dev->tipc_ptr, NULL); synchronize_net(); dev_put(dev); @@ -570,11 +577,12 @@ static int tipc_l2_rcv_msg(struct sk_buff *skb, struct net_device *dev, struct tipc_bearer *b; rcu_read_lock(); - b = rcu_dereference_rtnl(dev->tipc_ptr); + b = rcu_dereference_rtnl(dev->tipc_ptr) ?: + rcu_dereference_rtnl(orig_dev->tipc_ptr); if (likely(b && test_bit(0, &b->up) && (skb->pkt_type <= PACKET_MULTICAST))) { skb->next = NULL; - tipc_rcv(dev_net(dev), skb, b); + tipc_rcv(dev_net(b->pt.dev), skb, b); rcu_read_unlock(); return NET_RX_SUCCESS; } @@ -635,11 +643,6 @@ static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt, return NOTIFY_OK; } -static struct packet_type tipc_packet_type __read_mostly = { - .type = htons(ETH_P_TIPC), - .func = tipc_l2_rcv_msg, -}; - static struct notifier_block notifier = { .notifier_call = tipc_l2_device_event, .priority = 0, @@ -647,19 +650,12 @@ static struct notifier_block notifier = { int tipc_bearer_setup(void) { - int err; - - err = register_netdevice_notifier(¬ifier); - if (err) - return err; - dev_add_pack(&tipc_packet_type); - return 0; + return register_netdevice_notifier(¬ifier); } void tipc_bearer_cleanup(void) { unregister_netdevice_notifier(¬ifier); - dev_remove_pack(&tipc_packet_type); } void tipc_bearer_stop(struct net *net) diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h index 865cb0901a20..42d6eeeb646d 100644 --- a/net/tipc/bearer.h +++ b/net/tipc/bearer.h @@ -131,6 +131,7 @@ struct tipc_media { * @name: bearer name (format = media:interface) * @media: ptr to media structure associated with bearer * @bcast_addr: media address used in broadcasting + * @pt: packet type for bearer * @rcu: rcu struct for tipc_bearer * @priority: default link priority for bearer * @window: default window size for bearer @@ -151,6 +152,7 @@ struct tipc_bearer { char name[TIPC_MAX_BEARER_NAME]; struct tipc_media *media; struct tipc_media_addr bcast_addr; + struct packet_type pt; struct rcu_head rcu; u32 priority; u32 window; diff --git a/net/tipc/msg.c b/net/tipc/msg.c index dcd90e6fa7c3..6ef379f004ac 100644 --- a/net/tipc/msg.c +++ b/net/tipc/msg.c @@ -479,13 +479,14 @@ bool tipc_msg_make_bundle(struct sk_buff **skb, struct tipc_msg *msg, bool tipc_msg_reverse(u32 own_node, struct sk_buff **skb, int err) { struct sk_buff *_skb = *skb; - struct tipc_msg *hdr = buf_msg(_skb); + struct tipc_msg *hdr; struct tipc_msg ohdr; - int dlen = min_t(uint, msg_data_sz(hdr), MAX_FORWARD_SIZE); + int dlen; if (skb_linearize(_skb)) goto exit; hdr = buf_msg(_skb); + dlen = min_t(uint, msg_data_sz(hdr), MAX_FORWARD_SIZE); if (msg_dest_droppable(hdr)) goto exit; if (msg_errcode(hdr)) @@ -511,6 +512,8 @@ bool tipc_msg_reverse(u32 own_node, struct sk_buff **skb, int err) pskb_expand_head(_skb, BUF_HEADROOM, BUF_TAILROOM, GFP_ATOMIC)) goto exit; + /* reassign after skb header modifications */ + hdr = buf_msg(_skb); /* Now reverse the concerned fields */ msg_set_errcode(hdr, err); msg_set_non_seq(hdr, 0); diff --git a/net/tipc/node.c b/net/tipc/node.c index eb728397c810..198dbc7adbe1 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c @@ -1126,8 +1126,8 @@ int tipc_node_get_linkname(struct net *net, u32 bearer_id, u32 addr, strncpy(linkname, tipc_link_name(link), len); err = 0; } -exit: tipc_node_read_unlock(node); +exit: tipc_node_put(node); return err; } @@ -1551,6 +1551,8 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b) /* Check/update node state before receiving */ if (unlikely(skb)) { + if (unlikely(skb_linearize(skb))) + goto discard; tipc_node_write_lock(n); if (tipc_node_check_state(n, skb, bearer_id, &xmitq)) { if (le->link) { diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 101e3597338f..d50edd6e0019 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -2255,8 +2255,8 @@ void tipc_sk_reinit(struct net *net) do { tsk = ERR_PTR(rhashtable_walk_start(&iter)); - if (tsk) - continue; + if (IS_ERR(tsk)) + goto walk_stop; while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) { spin_lock_bh(&tsk->sk.sk_lock.slock); @@ -2265,7 +2265,7 @@ void tipc_sk_reinit(struct net *net) msg_set_orignode(msg, tn->own_addr); spin_unlock_bh(&tsk->sk.sk_lock.slock); } - +walk_stop: rhashtable_walk_stop(&iter); } while (tsk == ERR_PTR(-EAGAIN)); } diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c index 0bf91cd3733c..be3d9e3183dc 100644 --- a/net/tipc/subscr.c +++ b/net/tipc/subscr.c @@ -52,7 +52,6 @@ struct tipc_subscriber { struct list_head subscrp_list; }; -static void tipc_subscrp_delete(struct tipc_subscription *sub); static void tipc_subscrb_put(struct tipc_subscriber *subscriber); /** @@ -197,15 +196,19 @@ static void tipc_subscrb_subscrp_delete(struct tipc_subscriber *subscriber, { struct list_head *subscription_list = &subscriber->subscrp_list; struct tipc_subscription *sub, *temp; + u32 timeout; spin_lock_bh(&subscriber->lock); list_for_each_entry_safe(sub, temp, subscription_list, subscrp_list) { if (s && memcmp(s, &sub->evt.s, sizeof(struct tipc_subscr))) continue; - tipc_nametbl_unsubscribe(sub); - list_del(&sub->subscrp_list); - tipc_subscrp_delete(sub); + timeout = htohl(sub->evt.s.timeout, sub->swap); + if (timeout == TIPC_WAIT_FOREVER || del_timer(&sub->timer)) { + tipc_nametbl_unsubscribe(sub); + list_del(&sub->subscrp_list); + tipc_subscrp_put(sub); + } if (s) break; @@ -236,18 +239,12 @@ static void tipc_subscrb_delete(struct tipc_subscriber *subscriber) tipc_subscrb_put(subscriber); } -static void tipc_subscrp_delete(struct tipc_subscription *sub) -{ - u32 timeout = htohl(sub->evt.s.timeout, sub->swap); - - if (timeout == TIPC_WAIT_FOREVER || del_timer(&sub->timer)) - tipc_subscrp_put(sub); -} - static void tipc_subscrp_cancel(struct tipc_subscr *s, struct tipc_subscriber *subscriber) { + tipc_subscrb_get(subscriber); tipc_subscrb_subscrp_delete(subscriber, s); + tipc_subscrb_put(subscriber); } static struct tipc_subscription *tipc_subscrp_create(struct net *net, diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index cc0d783ccbad..f06253969972 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -2151,7 +2151,6 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig, goto no_transform; } - dst_hold(&xdst->u.dst); route = xdst->route; } } @@ -3209,9 +3208,15 @@ int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, struct xfrm_state *x_new[XFRM_MAX_DEPTH]; struct xfrm_migrate *mp; + /* Stage 0 - sanity checks */ if ((err = xfrm_migrate_check(m, num_migrate)) < 0) goto out; + if (dir >= XFRM_POLICY_MAX) { + err = -EINVAL; + goto out; + } + /* Stage 1 - find policy */ if ((pol = xfrm_migrate_policy_find(sel, dir, type, net)) == NULL) { err = -ENOENT; diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index a41e2ef789c0..0dab1cd79ce4 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -1629,6 +1629,7 @@ int xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n, unsigned short family, struct net *net) { + int i; int err = 0; struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family); if (!afinfo) @@ -1637,6 +1638,9 @@ xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n, spin_lock_bh(&net->xfrm.xfrm_state_lock); /*FIXME*/ if (afinfo->tmpl_sort) err = afinfo->tmpl_sort(dst, src, n); + else + for (i = 0; i < n; i++) + dst[i] = src[i]; spin_unlock_bh(&net->xfrm.xfrm_state_lock); rcu_read_unlock(); return err; @@ -1647,6 +1651,7 @@ int xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n, unsigned short family) { + int i; int err = 0; struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family); struct net *net = xs_net(*src); @@ -1657,6 +1662,9 @@ xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n, spin_lock_bh(&net->xfrm.xfrm_state_lock); if (afinfo->state_sort) err = afinfo->state_sort(dst, src, n); + else + for (i = 0; i < n; i++) + dst[i] = src[i]; spin_unlock_bh(&net->xfrm.xfrm_state_lock); rcu_read_unlock(); return err; diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index 490132d6dc36..2bfbd9121e3b 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -799,7 +799,7 @@ static int copy_user_offload(struct xfrm_state_offload *xso, struct sk_buff *skb return -EMSGSIZE; xuo = nla_data(attr); - + memset(xuo, 0, sizeof(*xuo)); xuo->ifindex = xso->dev->ifindex; xuo->flags = xso->flags; @@ -1875,6 +1875,7 @@ static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, const struct return -EMSGSIZE; id = nlmsg_data(nlh); + memset(&id->sa_id, 0, sizeof(id->sa_id)); memcpy(&id->sa_id.daddr, &x->id.daddr, sizeof(x->id.daddr)); id->sa_id.spi = x->id.spi; id->sa_id.family = x->props.family; @@ -2584,6 +2585,8 @@ static int build_expire(struct sk_buff *skb, struct xfrm_state *x, const struct ue = nlmsg_data(nlh); copy_to_user_state(x, &ue->state); ue->hard = (c->data.hard != 0) ? 1 : 0; + /* clear the padding bytes */ + memset(&ue->hard + 1, 0, sizeof(*ue) - offsetofend(typeof(*ue), hard)); err = xfrm_mark_put(skb, &x->mark); if (err) @@ -2723,6 +2726,7 @@ static int xfrm_notify_sa(struct xfrm_state *x, const struct km_event *c) struct nlattr *attr; id = nlmsg_data(nlh); + memset(id, 0, sizeof(*id)); memcpy(&id->daddr, &x->id.daddr, sizeof(id->daddr)); id->spi = x->id.spi; id->family = x->props.family; diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include index dd8e2dde0b34..9ffd3dda3889 100644 --- a/scripts/Kbuild.include +++ b/scripts/Kbuild.include @@ -85,8 +85,8 @@ TMPOUT := $(if $(KBUILD_EXTMOD),$(firstword $(KBUILD_EXTMOD))/) # try-run # Usage: option = $(call try-run, $(CC)...-o "$$TMP",option-ok,otherwise) -# Exit code chooses option. "$$TMP" is can be used as temporary file and -# is automatically cleaned up. +# Exit code chooses option. "$$TMP" serves as a temporary file and is +# automatically cleaned up. try-run = $(shell set -e; \ TMP="$(TMPOUT).$$$$.tmp"; \ TMPO="$(TMPOUT).$$$$.o"; \ @@ -261,7 +261,6 @@ make-cmd = $(call escsq,$(subst \#,\\\#,$(subst $$,$$$$,$(cmd_$(1))))) any-prereq = $(filter-out $(PHONY),$?) $(filter-out $(PHONY) $(wildcard $^),$^) # Execute command if command has changed or prerequisite(s) are updated. -# if_changed = $(if $(strip $(any-prereq) $(arg-check)), \ @set -e; \ $(echo-cmd) $(cmd_$(1)); \ @@ -315,7 +314,7 @@ if_changed_rule = $(if $(strip $(any-prereq) $(arg-check) ), \ $(rule_$(1)), @:) ### -# why - tell why a a target got build +# why - tell why a target got built # enabled by make V=2 # Output (listed in the order they are checked): # (1) - due to target is PHONY diff --git a/scripts/Makefile.asm-generic b/scripts/Makefile.asm-generic index 95f7d8090152..a6c8c1780855 100644 --- a/scripts/Makefile.asm-generic +++ b/scripts/Makefile.asm-generic @@ -1,9 +1,9 @@ # include/asm-generic contains a lot of files that are used # verbatim by several architectures. # -# This Makefile reads the file arch/$(SRCARCH)/include/asm/Kbuild +# This Makefile reads the file arch/$(SRCARCH)/include/$(src)/Kbuild # and for each file listed in this file with generic-y creates -# a small wrapper file in $(obj) (arch/$(SRCARCH)/include/generated/asm) +# a small wrapper file in $(obj) (arch/$(SRCARCH)/include/generated/$(src)) kbuild-file := $(srctree)/arch/$(SRCARCH)/include/$(src)/Kbuild -include $(kbuild-file) diff --git a/scripts/Makefile.build b/scripts/Makefile.build index 4a9a2cec0a1b..f6152c70f7f4 100644 --- a/scripts/Makefile.build +++ b/scripts/Makefile.build @@ -229,8 +229,8 @@ ifeq ("$(origin RECORDMCOUNT_WARN)", "command line") endif # Due to recursion, we must skip empty.o. # The empty.o file is created in the make process in order to determine -# the target endianness and word size. It is made before all other C -# files, including recordmcount. +# the target endianness and word size. It is made before all other C +# files, including recordmcount. sub_cmd_record_mcount = \ if [ $(@) != "scripts/mod/empty.o" ]; then \ $(objtree)/scripts/recordmcount $(RECORDMCOUNT_FLAGS) "$(@)"; \ @@ -245,13 +245,13 @@ sub_cmd_record_mcount = set -e ; perl $(srctree)/scripts/recordmcount.pl "$(ARCH "$(LD)" "$(NM)" "$(RM)" "$(MV)" \ "$(if $(part-of-module),1,0)" "$(@)"; recordmcount_source := $(srctree)/scripts/recordmcount.pl -endif +endif # BUILD_C_RECORDMCOUNT cmd_record_mcount = \ if [ "$(findstring $(CC_FLAGS_FTRACE),$(_c_flags))" = \ "$(CC_FLAGS_FTRACE)" ]; then \ $(sub_cmd_record_mcount) \ fi; -endif +endif # CONFIG_FTRACE_MCOUNT_RECORD ifdef CONFIG_STACK_VALIDATION ifneq ($(SKIP_STACK_VALIDATION),1) diff --git a/scripts/Makefile.dtbinst b/scripts/Makefile.dtbinst index 34614a48b717..993fb85982df 100644 --- a/scripts/Makefile.dtbinst +++ b/scripts/Makefile.dtbinst @@ -14,7 +14,7 @@ src := $(obj) PHONY := __dtbs_install __dtbs_install: -export dtbinst-root ?= $(obj) +export dtbinst_root ?= $(obj) include include/config/auto.conf include scripts/Kbuild.include @@ -27,7 +27,7 @@ dtbinst-dirs := $(dts-dirs) quiet_cmd_dtb_install = INSTALL $< cmd_dtb_install = mkdir -p $(2); cp $< $(2) -install-dir = $(patsubst $(dtbinst-root)%,$(INSTALL_DTBS_PATH)%,$(obj)) +install-dir = $(patsubst $(dtbinst_root)%,$(INSTALL_DTBS_PATH)%,$(obj)) $(dtbinst-files): %.dtb: $(obj)/%.dtb $(call cmd,dtb_install,$(install-dir)) diff --git a/scripts/basic/Makefile b/scripts/basic/Makefile index ec10d9345bc2..0372b33febe5 100644 --- a/scripts/basic/Makefile +++ b/scripts/basic/Makefile @@ -1,5 +1,5 @@ ### -# Makefile.basic lists the most basic programs used during the build process. +# This Makefile lists the most basic programs used during the build process. # The programs listed herein are what are needed to do the basic stuff, # such as fix file dependencies. # This initial step is needed to avoid files to be recompiled diff --git a/scripts/basic/fixdep.c b/scripts/basic/fixdep.c index fff818b92acb..bbf62cb1f819 100644 --- a/scripts/basic/fixdep.c +++ b/scripts/basic/fixdep.c @@ -25,7 +25,7 @@ * * So we play the same trick that "mkdep" played before. We replace * the dependency on autoconf.h by a dependency on every config - * option which is mentioned in any of the listed prequisites. + * option which is mentioned in any of the listed prerequisites. * * kconfig populates a tree in include/config/ with an empty file * for each config symbol and when the configuration is updated @@ -34,7 +34,7 @@ * the config symbols are rebuilt. * * So if the user changes his CONFIG_HIS_DRIVER option, only the objects - * which depend on "include/linux/config/his/driver.h" will be rebuilt, + * which depend on "include/config/his/driver.h" will be rebuilt, * so most likely only his driver ;-) * * The idea above dates, by the way, back to Michael E Chastain, AFAIK. @@ -75,7 +75,7 @@ * and then basically copies the .<target>.d file to stdout, in the * process filtering out the dependency on autoconf.h and adding * dependencies on include/config/my/option.h for every - * CONFIG_MY_OPTION encountered in any of the prequisites. + * CONFIG_MY_OPTION encountered in any of the prerequisites. * * It will also filter out all the dependencies on *.ver. We need * to make sure that the generated version checksum are globally up diff --git a/scripts/dtc/checks.c b/scripts/dtc/checks.c index 4b72b530c84f..62ea8f83d4a0 100644 --- a/scripts/dtc/checks.c +++ b/scripts/dtc/checks.c @@ -873,7 +873,7 @@ static void check_simple_bus_reg(struct check *c, struct dt_info *dti, struct no while (size--) reg = (reg << 32) | fdt32_to_cpu(*(cells++)); - snprintf(unit_addr, sizeof(unit_addr), "%zx", reg); + snprintf(unit_addr, sizeof(unit_addr), "%llx", (unsigned long long)reg); if (!streq(unitname, unit_addr)) FAIL(c, dti, "Node %s simple-bus unit address format error, expected \"%s\"", node->fullpath, unit_addr); diff --git a/sound/core/control.c b/sound/core/control.c index 3c6be1452e35..4525e127afd9 100644 --- a/sound/core/control.c +++ b/sound/core/control.c @@ -1137,7 +1137,7 @@ static int snd_ctl_elem_user_tlv(struct snd_kcontrol *kcontrol, mutex_lock(&ue->card->user_ctl_lock); change = ue->tlv_data_size != size; if (!change) - change = memcmp(ue->tlv_data, new_data, size); + change = memcmp(ue->tlv_data, new_data, size) != 0; kfree(ue->tlv_data); ue->tlv_data = new_data; ue->tlv_data_size = size; diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c index 22995cb3bd44..cf0433f80067 100644 --- a/sound/core/pcm_native.c +++ b/sound/core/pcm_native.c @@ -3064,6 +3064,7 @@ int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream, { snd_pcm_uframes_t *frames = arg; snd_pcm_sframes_t result; + int err; switch (cmd) { case SNDRV_PCM_IOCTL_FORWARD: @@ -3083,7 +3084,10 @@ int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream, case SNDRV_PCM_IOCTL_START: return snd_pcm_start_lock_irq(substream); case SNDRV_PCM_IOCTL_DRAIN: - return snd_pcm_drain(substream, NULL); + snd_power_lock(substream->pcm->card); + err = snd_pcm_drain(substream, NULL); + snd_power_unlock(substream->pcm->card); + return err; case SNDRV_PCM_IOCTL_DROP: return snd_pcm_drop(substream); case SNDRV_PCM_IOCTL_DELAY: diff --git a/sound/firewire/iso-resources.c b/sound/firewire/iso-resources.c index f0e4d502d604..066b5df666f4 100644 --- a/sound/firewire/iso-resources.c +++ b/sound/firewire/iso-resources.c @@ -210,9 +210,14 @@ EXPORT_SYMBOL(fw_iso_resources_update); */ void fw_iso_resources_free(struct fw_iso_resources *r) { - struct fw_card *card = fw_parent_device(r->unit)->card; + struct fw_card *card; int bandwidth, channel; + /* Not initialized. */ + if (r->unit == NULL) + return; + card = fw_parent_device(r->unit)->card; + mutex_lock(&r->mutex); if (r->allocated) { diff --git a/sound/firewire/motu/motu.c b/sound/firewire/motu/motu.c index bf779cfeef0d..59a270406353 100644 --- a/sound/firewire/motu/motu.c +++ b/sound/firewire/motu/motu.c @@ -128,6 +128,7 @@ static void do_registration(struct work_struct *work) return; error: snd_motu_transaction_unregister(motu); + snd_motu_stream_destroy_duplex(motu); snd_card_free(motu->card); dev_info(&motu->unit->device, "Sound card registration failed: %d\n", err); diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index 8c1289963c80..a81aacf684b2 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c @@ -947,6 +947,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = { SND_PCI_QUIRK(0x17aa, 0x390b, "Lenovo G50-80", CXT_FIXUP_STEREO_DMIC), SND_PCI_QUIRK(0x17aa, 0x3975, "Lenovo U300s", CXT_FIXUP_STEREO_DMIC), SND_PCI_QUIRK(0x17aa, 0x3977, "Lenovo IdeaPad U310", CXT_FIXUP_STEREO_DMIC), + SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo G50-70", CXT_FIXUP_STEREO_DMIC), SND_PCI_QUIRK(0x17aa, 0x397b, "Lenovo S205", CXT_FIXUP_STEREO_DMIC), SND_PCI_QUIRK_VENDOR(0x17aa, "Thinkpad", CXT_FIXUP_THINKPAD_ACPI), SND_PCI_QUIRK(0x1c06, 0x2011, "Lemote A1004", CXT_PINCFG_LEMOTE_A1004), diff --git a/sound/soc/codecs/rt5670.c b/sound/soc/codecs/rt5670.c index 0ec7985ed306..054b613cb0d0 100644 --- a/sound/soc/codecs/rt5670.c +++ b/sound/soc/codecs/rt5670.c @@ -567,7 +567,7 @@ int rt5670_set_jack_detect(struct snd_soc_codec *codec, rt5670->jack = jack; rt5670->hp_gpio.gpiod_dev = codec->dev; - rt5670->hp_gpio.name = "headphone detect"; + rt5670->hp_gpio.name = "headset"; rt5670->hp_gpio.report = SND_JACK_HEADSET | SND_JACK_BTN_0 | SND_JACK_BTN_1 | SND_JACK_BTN_2; rt5670->hp_gpio.debounce_time = 150; diff --git a/sound/soc/codecs/rt5677.c b/sound/soc/codecs/rt5677.c index 36e530a36c82..6f629278d982 100644 --- a/sound/soc/codecs/rt5677.c +++ b/sound/soc/codecs/rt5677.c @@ -5021,6 +5021,7 @@ static const struct regmap_config rt5677_regmap = { static const struct i2c_device_id rt5677_i2c_id[] = { { "rt5677", RT5677 }, { "rt5676", RT5676 }, + { "RT5677CE:00", RT5677 }, { } }; MODULE_DEVICE_TABLE(i2c, rt5677_i2c_id); diff --git a/sound/soc/generic/simple-card-utils.c b/sound/soc/generic/simple-card-utils.c index 7d7ab4aee42e..d72f7d58102f 100644 --- a/sound/soc/generic/simple-card-utils.c +++ b/sound/soc/generic/simple-card-utils.c @@ -132,7 +132,7 @@ int asoc_simple_card_parse_card_name(struct snd_soc_card *card, /* Parse the card name from DT */ ret = snd_soc_of_parse_card_name(card, "label"); - if (ret < 0) { + if (ret < 0 || !card->name) { char prop[128]; snprintf(prop, sizeof(prop), "%sname", prefix); diff --git a/sound/soc/intel/boards/cht_bsw_rt5672.c b/sound/soc/intel/boards/cht_bsw_rt5672.c index bc2a52de06a3..f597d5582223 100644 --- a/sound/soc/intel/boards/cht_bsw_rt5672.c +++ b/sound/soc/intel/boards/cht_bsw_rt5672.c @@ -184,6 +184,13 @@ static int cht_aif1_hw_params(struct snd_pcm_substream *substream, return 0; } +static const struct acpi_gpio_params headset_gpios = { 0, 0, false }; + +static const struct acpi_gpio_mapping cht_rt5672_gpios[] = { + { "headset-gpios", &headset_gpios, 1 }, + {}, +}; + static int cht_codec_init(struct snd_soc_pcm_runtime *runtime) { int ret; @@ -191,6 +198,9 @@ static int cht_codec_init(struct snd_soc_pcm_runtime *runtime) struct snd_soc_codec *codec = codec_dai->codec; struct cht_mc_private *ctx = snd_soc_card_get_drvdata(runtime->card); + if (devm_acpi_dev_add_driver_gpios(codec->dev, cht_rt5672_gpios)) + dev_warn(runtime->dev, "Unable to add GPIO mapping table\n"); + /* TDM 4 slots 24 bit, set Rx & Tx bitmask to 4 active slots */ ret = snd_soc_dai_set_tdm_slot(codec_dai, 0xF, 0xF, 4, 24); if (ret < 0) { diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c index 6a03f9697039..5d2a63248b1d 100644 --- a/sound/usb/quirks.c +++ b/sound/usb/quirks.c @@ -1309,10 +1309,13 @@ void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe, && (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS) mdelay(20); - /* Zoom R16/24 needs a tiny delay here, otherwise requests like - * get/set frequency return as failed despite actually succeeding. + /* Zoom R16/24, Logitech H650e, Jabra 550a needs a tiny delay here, + * otherwise requests like get/set frequency return as failed despite + * actually succeeding. */ - if (chip->usb_id == USB_ID(0x1686, 0x00dd) && + if ((chip->usb_id == USB_ID(0x1686, 0x00dd) || + chip->usb_id == USB_ID(0x046d, 0x0a46) || + chip->usb_id == USB_ID(0x0b0e, 0x0349)) && (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS) mdelay(1); } diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c index a36c2eba64e7..4559a21a8de2 100644 --- a/tools/objtool/arch/x86/decode.c +++ b/tools/objtool/arch/x86/decode.c @@ -271,7 +271,7 @@ int arch_decode_instruction(struct elf *elf, struct section *sec, case 0x8d: if (rex == 0x48 && modrm == 0x65) { - /* lea -disp(%rbp), %rsp */ + /* lea disp(%rbp), %rsp */ *type = INSN_STACK; op->src.type = OP_SRC_ADD; op->src.reg = CFI_BP; @@ -281,6 +281,30 @@ int arch_decode_instruction(struct elf *elf, struct section *sec, break; } + if (rex == 0x48 && (modrm == 0xa4 || modrm == 0x64) && + sib == 0x24) { + + /* lea disp(%rsp), %rsp */ + *type = INSN_STACK; + op->src.type = OP_SRC_ADD; + op->src.reg = CFI_SP; + op->src.offset = insn.displacement.value; + op->dest.type = OP_DEST_REG; + op->dest.reg = CFI_SP; + break; + } + + if (rex == 0x48 && modrm == 0x2c && sib == 0x24) { + + /* lea (%rsp), %rbp */ + *type = INSN_STACK; + op->src.type = OP_SRC_REG; + op->src.reg = CFI_SP; + op->dest.type = OP_DEST_REG; + op->dest.reg = CFI_BP; + break; + } + if (rex == 0x4c && modrm == 0x54 && sib == 0x24 && insn.displacement.value == 8) { diff --git a/tools/testing/selftests/ntb/ntb_test.sh b/tools/testing/selftests/ntb/ntb_test.sh index 1c12b5855e4f..5fc7ad359e21 100755 --- a/tools/testing/selftests/ntb/ntb_test.sh +++ b/tools/testing/selftests/ntb/ntb_test.sh @@ -333,6 +333,10 @@ function ntb_tool_tests() link_test $LOCAL_TOOL $REMOTE_TOOL link_test $REMOTE_TOOL $LOCAL_TOOL + #Ensure the link is up on both sides before continuing + write_file Y $LOCAL_TOOL/link_event + write_file Y $REMOTE_TOOL/link_event + for PEER_TRANS in $(ls $LOCAL_TOOL/peer_trans*); do PT=$(basename $PEER_TRANS) write_file $MW_SIZE $LOCAL_TOOL/$PT diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 15252d723b54..4d81f6ded88e 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -322,47 +322,6 @@ static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn) return container_of(mn, struct kvm, mmu_notifier); } -static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn, - struct mm_struct *mm, - unsigned long address) -{ - struct kvm *kvm = mmu_notifier_to_kvm(mn); - int need_tlb_flush, idx; - - /* - * When ->invalidate_page runs, the linux pte has been zapped - * already but the page is still allocated until - * ->invalidate_page returns. So if we increase the sequence - * here the kvm page fault will notice if the spte can't be - * established because the page is going to be freed. If - * instead the kvm page fault establishes the spte before - * ->invalidate_page runs, kvm_unmap_hva will release it - * before returning. - * - * The sequence increase only need to be seen at spin_unlock - * time, and not at spin_lock time. - * - * Increasing the sequence after the spin_unlock would be - * unsafe because the kvm page fault could then establish the - * pte after kvm_unmap_hva returned, without noticing the page - * is going to be freed. - */ - idx = srcu_read_lock(&kvm->srcu); - spin_lock(&kvm->mmu_lock); - - kvm->mmu_notifier_seq++; - need_tlb_flush = kvm_unmap_hva(kvm, address) | kvm->tlbs_dirty; - /* we've to flush the tlb before the pages can be freed */ - if (need_tlb_flush) - kvm_flush_remote_tlbs(kvm); - - spin_unlock(&kvm->mmu_lock); - - kvm_arch_mmu_notifier_invalidate_page(kvm, address); - - srcu_read_unlock(&kvm->srcu, idx); -} - static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn, struct mm_struct *mm, unsigned long address, @@ -510,7 +469,6 @@ static void kvm_mmu_notifier_release(struct mmu_notifier *mn, } static const struct mmu_notifier_ops kvm_mmu_notifier_ops = { - .invalidate_page = kvm_mmu_notifier_invalidate_page, .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start, .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end, .clear_flush_young = kvm_mmu_notifier_clear_flush_young, |