diff options
author | David S. Miller <davem@davemloft.net> | 2019-08-27 14:23:31 -0700 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2019-08-27 14:23:31 -0700 |
commit | 68aaf4459556b1f9370c259fd486aecad2257552 (patch) | |
tree | 99d92536a3263634969be6b70a96facea85a0df1 | |
parent | d00ee466a07eb9182ad3caf6140c7ebb527b4c64 (diff) | |
parent | 9e8312f5e160ade069e131d54ab8652cf0e86e1a (diff) | |
download | linux-stable-68aaf4459556b1f9370c259fd486aecad2257552.tar.gz linux-stable-68aaf4459556b1f9370c259fd486aecad2257552.tar.bz2 linux-stable-68aaf4459556b1f9370c259fd486aecad2257552.zip |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Minor conflict in r8169, bug fix had two versions in net
and net-next, take the net-next hunks.
Signed-off-by: David S. Miller <davem@davemloft.net>
279 files changed, 2599 insertions, 1287 deletions
diff --git a/Documentation/PCI/index.rst b/Documentation/PCI/index.rst index f4c6121868c3..6768305e4c26 100644 --- a/Documentation/PCI/index.rst +++ b/Documentation/PCI/index.rst @@ -9,7 +9,7 @@ Linux PCI Bus Subsystem :numbered: pci - picebus-howto + pciebus-howto pci-iov-howto msi-howto acpi-info diff --git a/Documentation/PCI/picebus-howto.rst b/Documentation/PCI/pciebus-howto.rst index f882ff62c51f..f882ff62c51f 100644 --- a/Documentation/PCI/picebus-howto.rst +++ b/Documentation/PCI/pciebus-howto.rst diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 47d981a86e2f..4c1971960afa 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -4090,6 +4090,13 @@ Run specified binary instead of /init from the ramdisk, used for early userspace startup. See initrd. + rdrand= [X86] + force - Override the decision by the kernel to hide the + advertisement of RDRAND support (this affects + certain AMD processors because of buggy BIOS + support, specifically around the suspend/resume + path). + rdt= [HW,X86,RDT] Turn on/off individual RDT features. List is: cmt, mbmtotal, mbmlocal, l3cat, l3cdp, l2cat, l2cdp, diff --git a/Documentation/devicetree/bindings/interrupt-controller/snps,archs-idu-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/snps,archs-idu-intc.txt index 09fc02b99845..a5c1db95b3ec 100644 --- a/Documentation/devicetree/bindings/interrupt-controller/snps,archs-idu-intc.txt +++ b/Documentation/devicetree/bindings/interrupt-controller/snps,archs-idu-intc.txt @@ -1,20 +1,30 @@ * ARC-HS Interrupt Distribution Unit - This optional 2nd level interrupt controller can be used in SMP configurations for - dynamic IRQ routing, load balancing of common/external IRQs towards core intc. + This optional 2nd level interrupt controller can be used in SMP configurations + for dynamic IRQ routing, load balancing of common/external IRQs towards core + intc. Properties: - compatible: "snps,archs-idu-intc" - interrupt-controller: This is an interrupt controller. -- #interrupt-cells: Must be <1>. - - Value of the cell specifies the "common" IRQ from peripheral to IDU. Number N - of the particular interrupt line of IDU corresponds to the line N+24 of the - core interrupt controller. - - intc accessed via the special ARC AUX register interface, hence "reg" property - is not specified. +- #interrupt-cells: Must be <1> or <2>. + + Value of the first cell specifies the "common" IRQ from peripheral to IDU. + Number N of the particular interrupt line of IDU corresponds to the line N+24 + of the core interrupt controller. + + The (optional) second cell specifies any of the following flags: + - bits[3:0] trigger type and level flags + 1 = low-to-high edge triggered + 2 = NOT SUPPORTED (high-to-low edge triggered) + 4 = active high level-sensitive <<< DEFAULT + 8 = NOT SUPPORTED (active low level-sensitive) + When no second cell is specified, the interrupt is assumed to be level + sensitive. + + The interrupt controller is accessed via the special ARC AUX register + interface, hence "reg" property is not specified. Example: core_intc: core-interrupt-controller { diff --git a/MAINTAINERS b/MAINTAINERS index 986085351d79..818f2a17699a 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -8468,11 +8468,6 @@ S: Maintained F: fs/io_uring.c F: include/uapi/linux/io_uring.h -IP MASQUERADING -M: Juanjo Ciarlante <jjciarla@raiz.uncu.edu.ar> -S: Maintained -F: net/ipv4/netfilter/ipt_MASQUERADE.c - IPMI SUBSYSTEM M: Corey Minyard <minyard@acm.org> L: openipmi-developer@lists.sourceforge.net (moderated for non-subscribers) @@ -8846,14 +8841,6 @@ F: virt/kvm/* F: tools/kvm/ F: tools/testing/selftests/kvm/ -KERNEL VIRTUAL MACHINE FOR AMD-V (KVM/amd) -M: Joerg Roedel <joro@8bytes.org> -L: kvm@vger.kernel.org -W: http://www.linux-kvm.org/ -S: Maintained -F: arch/x86/include/asm/svm.h -F: arch/x86/kvm/svm.c - KERNEL VIRTUAL MACHINE FOR ARM/ARM64 (KVM/arm, KVM/arm64) M: Marc Zyngier <maz@kernel.org> R: James Morse <james.morse@arm.com> @@ -8896,7 +8883,7 @@ M: Christian Borntraeger <borntraeger@de.ibm.com> M: Janosch Frank <frankja@linux.ibm.com> R: David Hildenbrand <david@redhat.com> R: Cornelia Huck <cohuck@redhat.com> -L: linux-s390@vger.kernel.org +L: kvm@vger.kernel.org W: http://www.ibm.com/developerworks/linux/linux390/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux.git S: Supported @@ -8911,6 +8898,11 @@ F: tools/testing/selftests/kvm/*/s390x/ KERNEL VIRTUAL MACHINE FOR X86 (KVM/x86) M: Paolo Bonzini <pbonzini@redhat.com> M: Radim Krčmář <rkrcmar@redhat.com> +R: Sean Christopherson <sean.j.christopherson@intel.com> +R: Vitaly Kuznetsov <vkuznets@redhat.com> +R: Wanpeng Li <wanpengli@tencent.com> +R: Jim Mattson <jmattson@google.com> +R: Joerg Roedel <joro@8bytes.org> L: kvm@vger.kernel.org W: http://www.linux-kvm.org T: git git://git.kernel.org/pub/scm/virt/kvm/kvm.git @@ -8918,8 +8910,12 @@ S: Supported F: arch/x86/kvm/ F: arch/x86/kvm/*/ F: arch/x86/include/uapi/asm/kvm* +F: arch/x86/include/uapi/asm/vmx.h +F: arch/x86/include/uapi/asm/svm.h F: arch/x86/include/asm/kvm* F: arch/x86/include/asm/pvclock-abi.h +F: arch/x86/include/asm/svm.h +F: arch/x86/include/asm/vmx.h F: arch/x86/kernel/kvm.c F: arch/x86/kernel/kvmclock.c @@ -11099,7 +11095,7 @@ NET_FAILOVER MODULE M: Sridhar Samudrala <sridhar.samudrala@intel.com> L: netdev@vger.kernel.org S: Supported -F: driver/net/net_failover.c +F: drivers/net/net_failover.c F: include/net/net_failover.h F: Documentation/networking/net_failover.rst @@ -14491,6 +14487,7 @@ F: drivers/net/phy/phylink.c F: drivers/net/phy/sfp* F: include/linux/phylink.h F: include/linux/sfp.h +K: phylink SGI GRU DRIVER M: Dimitri Sivanich <sivanich@sgi.com> @@ -14896,9 +14893,9 @@ F: include/linux/arm_sdei.h F: include/uapi/linux/arm_sdei.h SOFTWARE RAID (Multiple Disks) SUPPORT -M: Shaohua Li <shli@kernel.org> +M: Song Liu <song@kernel.org> L: linux-raid@vger.kernel.org -T: git git://git.kernel.org/pub/scm/linux/kernel/git/shli/md.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/song/md.git S: Supported F: drivers/md/Makefile F: drivers/md/Kconfig @@ -2,7 +2,7 @@ VERSION = 5 PATCHLEVEL = 3 SUBLEVEL = 0 -EXTRAVERSION = -rc5 +EXTRAVERSION = -rc6 NAME = Bobtail Squid # *DOCUMENTATION* diff --git a/arch/arc/boot/dts/Makefile b/arch/arc/boot/dts/Makefile index a83c4f5e928b..8483a86c743d 100644 --- a/arch/arc/boot/dts/Makefile +++ b/arch/arc/boot/dts/Makefile @@ -12,3 +12,6 @@ dtb-y := $(builtindtb-y).dtb # for CONFIG_OF_ALL_DTBS test dtstree := $(srctree)/$(src) dtb- := $(patsubst $(dtstree)/%.dts,%.dtb, $(wildcard $(dtstree)/*.dts)) + +# board-specific dtc flags +DTC_FLAGS_hsdk += --pad 20 diff --git a/arch/arc/include/asm/entry-arcv2.h b/arch/arc/include/asm/entry-arcv2.h index f5ae394ebe06..41b16f21beec 100644 --- a/arch/arc/include/asm/entry-arcv2.h +++ b/arch/arc/include/asm/entry-arcv2.h @@ -256,7 +256,7 @@ .macro FAKE_RET_FROM_EXCPN lr r9, [status32] - bic r9, r9, (STATUS_U_MASK|STATUS_DE_MASK|STATUS_AE_MASK) + bic r9, r9, STATUS_AE_MASK or r9, r9, STATUS_IE_MASK kflag r9 .endm diff --git a/arch/arc/include/asm/linkage.h b/arch/arc/include/asm/linkage.h index a0eeb9f8f0a9..d9ee43c6b7db 100644 --- a/arch/arc/include/asm/linkage.h +++ b/arch/arc/include/asm/linkage.h @@ -62,15 +62,15 @@ #else /* !__ASSEMBLY__ */ #ifdef CONFIG_ARC_HAS_ICCM -#define __arcfp_code __attribute__((__section__(".text.arcfp"))) +#define __arcfp_code __section(.text.arcfp) #else -#define __arcfp_code __attribute__((__section__(".text"))) +#define __arcfp_code __section(.text) #endif #ifdef CONFIG_ARC_HAS_DCCM -#define __arcfp_data __attribute__((__section__(".data.arcfp"))) +#define __arcfp_data __section(.data.arcfp) #else -#define __arcfp_data __attribute__((__section__(".data"))) +#define __arcfp_data __section(.data) #endif #endif /* __ASSEMBLY__ */ diff --git a/arch/arc/include/asm/mach_desc.h b/arch/arc/include/asm/mach_desc.h index 8ac0e2ac3e70..73746ed5b834 100644 --- a/arch/arc/include/asm/mach_desc.h +++ b/arch/arc/include/asm/mach_desc.h @@ -53,8 +53,7 @@ extern const struct machine_desc __arch_info_begin[], __arch_info_end[]; */ #define MACHINE_START(_type, _name) \ static const struct machine_desc __mach_desc_##_type \ -__used \ -__attribute__((__section__(".arch.info.init"))) = { \ +__used __section(.arch.info.init) = { \ .name = _name, #define MACHINE_END \ diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c index 18b493dfb3a8..abf9398cc333 100644 --- a/arch/arc/kernel/mcip.c +++ b/arch/arc/kernel/mcip.c @@ -202,8 +202,8 @@ static void idu_set_dest(unsigned int cmn_irq, unsigned int cpu_mask) __mcip_cmd_data(CMD_IDU_SET_DEST, cmn_irq, cpu_mask); } -static void idu_set_mode(unsigned int cmn_irq, unsigned int lvl, - unsigned int distr) +static void idu_set_mode(unsigned int cmn_irq, bool set_lvl, unsigned int lvl, + bool set_distr, unsigned int distr) { union { unsigned int word; @@ -212,8 +212,11 @@ static void idu_set_mode(unsigned int cmn_irq, unsigned int lvl, }; } data; - data.distr = distr; - data.lvl = lvl; + data.word = __mcip_cmd_read(CMD_IDU_READ_MODE, cmn_irq); + if (set_distr) + data.distr = distr; + if (set_lvl) + data.lvl = lvl; __mcip_cmd_data(CMD_IDU_SET_MODE, cmn_irq, data.word); } @@ -240,6 +243,25 @@ static void idu_irq_unmask(struct irq_data *data) raw_spin_unlock_irqrestore(&mcip_lock, flags); } +static void idu_irq_ack(struct irq_data *data) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&mcip_lock, flags); + __mcip_cmd(CMD_IDU_ACK_CIRQ, data->hwirq); + raw_spin_unlock_irqrestore(&mcip_lock, flags); +} + +static void idu_irq_mask_ack(struct irq_data *data) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&mcip_lock, flags); + __mcip_cmd_data(CMD_IDU_SET_MASK, data->hwirq, 1); + __mcip_cmd(CMD_IDU_ACK_CIRQ, data->hwirq); + raw_spin_unlock_irqrestore(&mcip_lock, flags); +} + static int idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask, bool force) @@ -263,13 +285,36 @@ idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask, else distribution_mode = IDU_M_DISTRI_RR; - idu_set_mode(data->hwirq, IDU_M_TRIG_LEVEL, distribution_mode); + idu_set_mode(data->hwirq, false, 0, true, distribution_mode); raw_spin_unlock_irqrestore(&mcip_lock, flags); return IRQ_SET_MASK_OK; } +static int idu_irq_set_type(struct irq_data *data, u32 type) +{ + unsigned long flags; + + /* + * ARCv2 IDU HW does not support inverse polarity, so these are the + * only interrupt types supported. + */ + if (type & ~(IRQ_TYPE_EDGE_RISING | IRQ_TYPE_LEVEL_HIGH)) + return -EINVAL; + + raw_spin_lock_irqsave(&mcip_lock, flags); + + idu_set_mode(data->hwirq, true, + type & IRQ_TYPE_EDGE_RISING ? IDU_M_TRIG_EDGE : + IDU_M_TRIG_LEVEL, + false, 0); + + raw_spin_unlock_irqrestore(&mcip_lock, flags); + + return 0; +} + static void idu_irq_enable(struct irq_data *data) { /* @@ -289,7 +334,10 @@ static struct irq_chip idu_irq_chip = { .name = "MCIP IDU Intc", .irq_mask = idu_irq_mask, .irq_unmask = idu_irq_unmask, + .irq_ack = idu_irq_ack, + .irq_mask_ack = idu_irq_mask_ack, .irq_enable = idu_irq_enable, + .irq_set_type = idu_irq_set_type, #ifdef CONFIG_SMP .irq_set_affinity = idu_irq_set_affinity, #endif @@ -317,7 +365,7 @@ static int idu_irq_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t } static const struct irq_domain_ops idu_irq_ops = { - .xlate = irq_domain_xlate_onecell, + .xlate = irq_domain_xlate_onetwocell, .map = idu_irq_map, }; diff --git a/arch/arc/kernel/unwind.c b/arch/arc/kernel/unwind.c index c2663fce7f6c..445e4d702f43 100644 --- a/arch/arc/kernel/unwind.c +++ b/arch/arc/kernel/unwind.c @@ -826,7 +826,7 @@ static int processCFI(const u8 *start, const u8 *end, unsigned long targetLoc, case DW_CFA_def_cfa: state->cfa.reg = get_uleb128(&ptr.p8, end); unw_debug("cfa_def_cfa: r%lu ", state->cfa.reg); - /*nobreak*/ + /* fall through */ case DW_CFA_def_cfa_offset: state->cfa.offs = get_uleb128(&ptr.p8, end); unw_debug("cfa_def_cfa_offset: 0x%lx ", @@ -834,7 +834,7 @@ static int processCFI(const u8 *start, const u8 *end, unsigned long targetLoc, break; case DW_CFA_def_cfa_sf: state->cfa.reg = get_uleb128(&ptr.p8, end); - /*nobreak */ + /* fall through */ case DW_CFA_def_cfa_offset_sf: state->cfa.offs = get_sleb128(&ptr.p8, end) * state->dataAlign; diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c index 62c210e7ee4c..70a3fbe79fba 100644 --- a/arch/arc/mm/dma.c +++ b/arch/arc/mm/dma.c @@ -101,7 +101,7 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, if (is_isa_arcv2() && ioc_enable && coherent) dev->dma_coherent = true; - dev_info(dev, "use %sncoherent DMA ops\n", + dev_info(dev, "use %scoherent DMA ops\n", dev->dma_coherent ? "" : "non"); } diff --git a/arch/arc/plat-hsdk/platform.c b/arch/arc/plat-hsdk/platform.c index 7dd2dd335cf6..0b961a2a10b8 100644 --- a/arch/arc/plat-hsdk/platform.c +++ b/arch/arc/plat-hsdk/platform.c @@ -6,11 +6,15 @@ */ #include <linux/init.h> +#include <linux/of_fdt.h> +#include <linux/libfdt.h> #include <linux/smp.h> #include <asm/arcregs.h> #include <asm/io.h> #include <asm/mach_desc.h> +int arc_hsdk_axi_dmac_coherent __section(.data) = 0; + #define ARC_CCM_UNUSED_ADDR 0x60000000 static void __init hsdk_init_per_cpu(unsigned int cpu) @@ -97,6 +101,42 @@ static void __init hsdk_enable_gpio_intc_wire(void) iowrite32(GPIO_INT_CONNECTED_MASK, (void __iomem *) GPIO_INTEN); } +static int __init hsdk_tweak_node_coherency(const char *path, bool coherent) +{ + void *fdt = initial_boot_params; + const void *prop; + int node, ret; + bool dt_coh_set; + + node = fdt_path_offset(fdt, path); + if (node < 0) + goto tweak_fail; + + prop = fdt_getprop(fdt, node, "dma-coherent", &ret); + if (!prop && ret != -FDT_ERR_NOTFOUND) + goto tweak_fail; + + dt_coh_set = ret != -FDT_ERR_NOTFOUND; + ret = 0; + + /* need to remove "dma-coherent" property */ + if (dt_coh_set && !coherent) + ret = fdt_delprop(fdt, node, "dma-coherent"); + + /* need to set "dma-coherent" property */ + if (!dt_coh_set && coherent) + ret = fdt_setprop(fdt, node, "dma-coherent", NULL, 0); + + if (ret < 0) + goto tweak_fail; + + return 0; + +tweak_fail: + pr_err("failed to tweak %s to %scoherent\n", path, coherent ? "" : "non"); + return -EFAULT; +} + enum hsdk_axi_masters { M_HS_CORE = 0, M_HS_RTT, @@ -162,6 +202,39 @@ enum hsdk_axi_masters { #define CREG_PAE ((void __iomem *)(CREG_BASE + 0x180)) #define CREG_PAE_UPDT ((void __iomem *)(CREG_BASE + 0x194)) +static void __init hsdk_init_memory_bridge_axi_dmac(void) +{ + bool coherent = !!arc_hsdk_axi_dmac_coherent; + u32 axi_m_slv1, axi_m_oft1; + + /* + * Don't tweak memory bridge configuration if we failed to tweak DTB + * as we will end up in a inconsistent state. + */ + if (hsdk_tweak_node_coherency("/soc/dmac@80000", coherent)) + return; + + if (coherent) { + axi_m_slv1 = 0x77999999; + axi_m_oft1 = 0x76DCBA98; + } else { + axi_m_slv1 = 0x77777777; + axi_m_oft1 = 0x76543210; + } + + writel(0x77777777, CREG_AXI_M_SLV0(M_DMAC_0)); + writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_DMAC_0)); + writel(axi_m_slv1, CREG_AXI_M_SLV1(M_DMAC_0)); + writel(axi_m_oft1, CREG_AXI_M_OFT1(M_DMAC_0)); + writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_DMAC_0)); + + writel(0x77777777, CREG_AXI_M_SLV0(M_DMAC_1)); + writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_DMAC_1)); + writel(axi_m_slv1, CREG_AXI_M_SLV1(M_DMAC_1)); + writel(axi_m_oft1, CREG_AXI_M_OFT1(M_DMAC_1)); + writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_DMAC_1)); +} + static void __init hsdk_init_memory_bridge(void) { u32 reg; @@ -227,24 +300,14 @@ static void __init hsdk_init_memory_bridge(void) writel(0x76543210, CREG_AXI_M_OFT1(M_GPU)); writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_GPU)); - writel(0x77777777, CREG_AXI_M_SLV0(M_DMAC_0)); - writel(0x77777777, CREG_AXI_M_SLV1(M_DMAC_0)); - writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_DMAC_0)); - writel(0x76543210, CREG_AXI_M_OFT1(M_DMAC_0)); - writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_DMAC_0)); - - writel(0x77777777, CREG_AXI_M_SLV0(M_DMAC_1)); - writel(0x77777777, CREG_AXI_M_SLV1(M_DMAC_1)); - writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_DMAC_1)); - writel(0x76543210, CREG_AXI_M_OFT1(M_DMAC_1)); - writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_DMAC_1)); - writel(0x00000000, CREG_AXI_M_SLV0(M_DVFS)); writel(0x60000000, CREG_AXI_M_SLV1(M_DVFS)); writel(0x00000000, CREG_AXI_M_OFT0(M_DVFS)); writel(0x00000000, CREG_AXI_M_OFT1(M_DVFS)); writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_DVFS)); + hsdk_init_memory_bridge_axi_dmac(); + /* * PAE remapping for DMA clients does not work due to an RTL bug, so * CREG_PAE register must be programmed to all zeroes, otherwise it diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 33b00579beff..24360211534a 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -7,6 +7,8 @@ config ARM select ARCH_HAS_BINFMT_FLAT select ARCH_HAS_DEBUG_VIRTUAL if MMU select ARCH_HAS_DEVMEM_IS_ALLOWED + select ARCH_HAS_DMA_COHERENT_TO_PFN if SWIOTLB + select ARCH_HAS_DMA_MMAP_PGPROT if SWIOTLB select ARCH_HAS_ELF_RANDOMIZE select ARCH_HAS_FORTIFY_SOURCE select ARCH_HAS_KEEPINITRD @@ -18,6 +20,8 @@ config ARM select ARCH_HAS_SET_MEMORY select ARCH_HAS_STRICT_KERNEL_RWX if MMU && !XIP_KERNEL select ARCH_HAS_STRICT_MODULE_RWX if MMU + select ARCH_HAS_SYNC_DMA_FOR_DEVICE if SWIOTLB + select ARCH_HAS_SYNC_DMA_FOR_CPU if SWIOTLB select ARCH_HAS_TEARDOWN_DMA_OPS if MMU select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_HAVE_CUSTOM_GPIO_H diff --git a/arch/arm/mach-rpc/riscpc.c b/arch/arm/mach-rpc/riscpc.c index 0ce56ad754ce..ea2c84214bac 100644 --- a/arch/arm/mach-rpc/riscpc.c +++ b/arch/arm/mach-rpc/riscpc.c @@ -46,6 +46,7 @@ static int __init parse_tag_acorn(const struct tag *tag) switch (tag->u.acorn.vram_pages) { case 512: vram_size += PAGE_SIZE * 256; + /* Fall through - ??? */ case 256: vram_size += PAGE_SIZE * 256; default: diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index c54cd7ed90ba..c1222c0e9fd3 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -664,10 +664,6 @@ config ARM_LPAE !CPU_32v4 && !CPU_32v3 select PHYS_ADDR_T_64BIT select SWIOTLB - select ARCH_HAS_DMA_COHERENT_TO_PFN - select ARCH_HAS_DMA_MMAP_PGPROT - select ARCH_HAS_SYNC_DMA_FOR_DEVICE - select ARCH_HAS_SYNC_DMA_FOR_CPU help Say Y if you have an ARMv7 processor supporting the LPAE page table format and you would like to access memory beyond the diff --git a/arch/mips/include/asm/octeon/cvmx-sli-defs.h b/arch/mips/include/asm/octeon/cvmx-sli-defs.h index 52cf96ea43e5..cbc7cdae1c6a 100644 --- a/arch/mips/include/asm/octeon/cvmx-sli-defs.h +++ b/arch/mips/include/asm/octeon/cvmx-sli-defs.h @@ -46,6 +46,7 @@ static inline uint64_t CVMX_SLI_PCIE_MSI_RCV_FUNC(void) case OCTEON_CN78XX & OCTEON_FAMILY_MASK: if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X)) return 0x0000000000003CB0ull; + /* Else, fall through */ default: return 0x0000000000023CB0ull; } diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h index a39b079e73f2..6d58c1739b42 100644 --- a/arch/parisc/include/asm/pgtable.h +++ b/arch/parisc/include/asm/pgtable.h @@ -2,6 +2,7 @@ #ifndef _PARISC_PGTABLE_H #define _PARISC_PGTABLE_H +#include <asm/page.h> #include <asm-generic/4level-fixup.h> #include <asm/fixmap.h> @@ -98,8 +99,6 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) #endif /* !__ASSEMBLY__ */ -#include <asm/page.h> - #define pte_ERROR(e) \ printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) #define pmd_ERROR(e) \ diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index e636728ab452..955eb355c2fd 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -863,7 +863,7 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i break; case BPF_ALU64 | BPF_NEG: /* dst = -dst */ /* lcgr %dst,%dst */ - EMIT4(0xb9130000, dst_reg, dst_reg); + EMIT4(0xb9030000, dst_reg, dst_reg); break; /* * BPF_FROM_BE/LE @@ -1049,8 +1049,8 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i /* llgf %w1,map.max_entries(%b2) */ EMIT6_DISP_LH(0xe3000000, 0x0016, REG_W1, REG_0, BPF_REG_2, offsetof(struct bpf_array, map.max_entries)); - /* clgrj %b3,%w1,0xa,label0: if %b3 >= %w1 goto out */ - EMIT6_PCREL_LABEL(0xec000000, 0x0065, BPF_REG_3, + /* clrj %b3,%w1,0xa,label0: if (u32)%b3 >= (u32)%w1 goto out */ + EMIT6_PCREL_LABEL(0xec000000, 0x0077, BPF_REG_3, REG_W1, 0, 0xa); /* @@ -1076,8 +1076,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i * goto out; */ - /* sllg %r1,%b3,3: %r1 = index * 8 */ - EMIT6_DISP_LH(0xeb000000, 0x000d, REG_1, BPF_REG_3, REG_0, 3); + /* llgfr %r1,%b3: %r1 = (u32) index */ + EMIT4(0xb9160000, REG_1, BPF_REG_3); + /* sllg %r1,%r1,3: %r1 *= 8 */ + EMIT6_DISP_LH(0xeb000000, 0x000d, REG_1, REG_1, REG_0, 3); /* lg %r1,prog(%b2,%r1) */ EMIT6_DISP_LH(0xe3000000, 0x0004, REG_1, BPF_REG_2, REG_1, offsetof(struct bpf_array, ptrs)); diff --git a/arch/um/include/shared/timer-internal.h b/arch/um/include/shared/timer-internal.h index 8574338bf23b..9991ec2371e4 100644 --- a/arch/um/include/shared/timer-internal.h +++ b/arch/um/include/shared/timer-internal.h @@ -34,10 +34,13 @@ static inline void time_travel_set_time(unsigned long long ns) time_travel_time = ns; } -static inline void time_travel_set_timer(enum time_travel_timer_mode mode, - unsigned long long expiry) +static inline void time_travel_set_timer_mode(enum time_travel_timer_mode mode) { time_travel_timer_mode = mode; +} + +static inline void time_travel_set_timer_expiry(unsigned long long expiry) +{ time_travel_timer_expiry = expiry; } #else @@ -50,8 +53,11 @@ static inline void time_travel_set_time(unsigned long long ns) { } -static inline void time_travel_set_timer(enum time_travel_timer_mode mode, - unsigned long long expiry) +static inline void time_travel_set_timer_mode(enum time_travel_timer_mode mode) +{ +} + +static inline void time_travel_set_timer_expiry(unsigned long long expiry) { } diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c index 67c0d1a860e9..6bede7888fc2 100644 --- a/arch/um/kernel/process.c +++ b/arch/um/kernel/process.c @@ -213,7 +213,7 @@ static void time_travel_sleep(unsigned long long duration) if (time_travel_timer_mode != TT_TMR_DISABLED || time_travel_timer_expiry < next) { if (time_travel_timer_mode == TT_TMR_ONESHOT) - time_travel_set_timer(TT_TMR_DISABLED, 0); + time_travel_set_timer_mode(TT_TMR_DISABLED); /* * time_travel_time will be adjusted in the timer * IRQ handler so it works even when the signal diff --git a/arch/um/kernel/time.c b/arch/um/kernel/time.c index 6a051b078359..234757233355 100644 --- a/arch/um/kernel/time.c +++ b/arch/um/kernel/time.c @@ -50,7 +50,7 @@ void timer_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs) static int itimer_shutdown(struct clock_event_device *evt) { if (time_travel_mode != TT_MODE_OFF) - time_travel_set_timer(TT_TMR_DISABLED, 0); + time_travel_set_timer_mode(TT_TMR_DISABLED); if (time_travel_mode != TT_MODE_INFCPU) os_timer_disable(); @@ -62,9 +62,10 @@ static int itimer_set_periodic(struct clock_event_device *evt) { unsigned long long interval = NSEC_PER_SEC / HZ; - if (time_travel_mode != TT_MODE_OFF) - time_travel_set_timer(TT_TMR_PERIODIC, - time_travel_time + interval); + if (time_travel_mode != TT_MODE_OFF) { + time_travel_set_timer_mode(TT_TMR_PERIODIC); + time_travel_set_timer_expiry(time_travel_time + interval); + } if (time_travel_mode != TT_MODE_INFCPU) os_timer_set_interval(interval); @@ -77,9 +78,10 @@ static int itimer_next_event(unsigned long delta, { delta += 1; - if (time_travel_mode != TT_MODE_OFF) - time_travel_set_timer(TT_TMR_ONESHOT, - time_travel_time + delta); + if (time_travel_mode != TT_MODE_OFF) { + time_travel_set_timer_mode(TT_TMR_ONESHOT); + time_travel_set_timer_expiry(time_travel_time + delta); + } if (time_travel_mode != TT_MODE_INFCPU) return os_timer_one_shot(delta); diff --git a/arch/x86/boot/compressed/pgtable_64.c b/arch/x86/boot/compressed/pgtable_64.c index 5f2d03067ae5..2faddeb0398a 100644 --- a/arch/x86/boot/compressed/pgtable_64.c +++ b/arch/x86/boot/compressed/pgtable_64.c @@ -72,6 +72,8 @@ static unsigned long find_trampoline_placement(void) /* Find the first usable memory region under bios_start. */ for (i = boot_params->e820_entries - 1; i >= 0; i--) { + unsigned long new; + entry = &boot_params->e820_table[i]; /* Skip all entries above bios_start. */ @@ -84,15 +86,20 @@ static unsigned long find_trampoline_placement(void) /* Adjust bios_start to the end of the entry if needed. */ if (bios_start > entry->addr + entry->size) - bios_start = entry->addr + entry->size; + new = entry->addr + entry->size; /* Keep bios_start page-aligned. */ - bios_start = round_down(bios_start, PAGE_SIZE); + new = round_down(new, PAGE_SIZE); /* Skip the entry if it's too small. */ - if (bios_start - TRAMPOLINE_32BIT_SIZE < entry->addr) + if (new - TRAMPOLINE_32BIT_SIZE < entry->addr) continue; + /* Protect against underflow. */ + if (new - TRAMPOLINE_32BIT_SIZE > bios_start) + break; + + bios_start = new; break; } diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 81b005e4c7d9..325959d19d9a 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -1236,7 +1236,7 @@ void x86_pmu_enable_event(struct perf_event *event) * Add a single event to the PMU. * * The event is added to the group of enabled events - * but only if it can be scehduled with existing events. + * but only if it can be scheduled with existing events. */ static int x86_pmu_add(struct perf_event *event, int flags) { diff --git a/arch/x86/include/asm/bootparam_utils.h b/arch/x86/include/asm/bootparam_utils.h index f5e90a849bca..9e5f3c722c33 100644 --- a/arch/x86/include/asm/bootparam_utils.h +++ b/arch/x86/include/asm/bootparam_utils.h @@ -59,7 +59,6 @@ static void sanitize_boot_params(struct boot_params *boot_params) BOOT_PARAM_PRESERVE(apm_bios_info), BOOT_PARAM_PRESERVE(tboot_addr), BOOT_PARAM_PRESERVE(ist_info), - BOOT_PARAM_PRESERVE(acpi_rsdp_addr), BOOT_PARAM_PRESERVE(hd0_info), BOOT_PARAM_PRESERVE(hd1_info), BOOT_PARAM_PRESERVE(sys_desc_table), @@ -71,6 +70,7 @@ static void sanitize_boot_params(struct boot_params *boot_params) BOOT_PARAM_PRESERVE(eddbuf_entries), BOOT_PARAM_PRESERVE(edd_mbr_sig_buf_entries), BOOT_PARAM_PRESERVE(edd_mbr_sig_buffer), + BOOT_PARAM_PRESERVE(hdr), BOOT_PARAM_PRESERVE(e820_table), BOOT_PARAM_PRESERVE(eddbuf), }; diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h index 0278aa66ef62..fe7c205233f1 100644 --- a/arch/x86/include/asm/intel-family.h +++ b/arch/x86/include/asm/intel-family.h @@ -11,6 +11,21 @@ * While adding a new CPUID for a new microarchitecture, add a new * group to keep logically sorted out in chronological order. Within * that group keep the CPUID for the variants sorted by model number. + * + * The defined symbol names have the following form: + * INTEL_FAM6{OPTFAMILY}_{MICROARCH}{OPTDIFF} + * where: + * OPTFAMILY Describes the family of CPUs that this belongs to. Default + * is assumed to be "_CORE" (and should be omitted). Other values + * currently in use are _ATOM and _XEON_PHI + * MICROARCH Is the code name for the micro-architecture for this core. + * N.B. Not the platform name. + * OPTDIFF If needed, a short string to differentiate by market segment. + * Exact strings here will vary over time. _DESKTOP, _MOBILE, and + * _X (short for Xeon server) should be used when they are + * appropriate. + * + * The #define line may optionally include a comment including platform names. */ #define INTEL_FAM6_CORE_YONAH 0x0E diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 6b4fc2788078..271d837d69a8 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -381,6 +381,7 @@ #define MSR_AMD64_PATCH_LEVEL 0x0000008b #define MSR_AMD64_TSC_RATIO 0xc0000104 #define MSR_AMD64_NB_CFG 0xc001001f +#define MSR_AMD64_CPUID_FN_1 0xc0011004 #define MSR_AMD64_PATCH_LOADER 0xc0010020 #define MSR_AMD64_OSVW_ID_LENGTH 0xc0010140 #define MSR_AMD64_OSVW_STATUS 0xc0010141 diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index 109f974f9835..80bc209c0708 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -192,7 +192,7 @@ " lfence;\n" \ " jmp 902b;\n" \ " .align 16\n" \ - "903: addl $4, %%esp;\n" \ + "903: lea 4(%%esp), %%esp;\n" \ " pushl %[thunk_target];\n" \ " ret;\n" \ " .align 16\n" \ diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index f5291362da1a..aa5495d0f478 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -722,7 +722,7 @@ static __initdata unsigned long lapic_cal_pm1, lapic_cal_pm2; static __initdata unsigned long lapic_cal_j1, lapic_cal_j2; /* - * Temporary interrupt handler. + * Temporary interrupt handler and polled calibration function. */ static void __init lapic_cal_handler(struct clock_event_device *dev) { @@ -851,7 +851,8 @@ bool __init apic_needs_pit(void) static int __init calibrate_APIC_clock(void) { struct clock_event_device *levt = this_cpu_ptr(&lapic_events); - void (*real_handler)(struct clock_event_device *dev); + u64 tsc_perj = 0, tsc_start = 0; + unsigned long jif_start; unsigned long deltaj; long delta, deltatsc; int pm_referenced = 0; @@ -878,28 +879,64 @@ static int __init calibrate_APIC_clock(void) apic_printk(APIC_VERBOSE, "Using local APIC timer interrupts.\n" "calibrating APIC timer ...\n"); + /* + * There are platforms w/o global clockevent devices. Instead of + * making the calibration conditional on that, use a polling based + * approach everywhere. + */ local_irq_disable(); - /* Replace the global interrupt handler */ - real_handler = global_clock_event->event_handler; - global_clock_event->event_handler = lapic_cal_handler; - /* * Setup the APIC counter to maximum. There is no way the lapic * can underflow in the 100ms detection time frame */ __setup_APIC_LVTT(0xffffffff, 0, 0); - /* Let the interrupts run */ + /* + * Methods to terminate the calibration loop: + * 1) Global clockevent if available (jiffies) + * 2) TSC if available and frequency is known + */ + jif_start = READ_ONCE(jiffies); + + if (tsc_khz) { + tsc_start = rdtsc(); + tsc_perj = div_u64((u64)tsc_khz * 1000, HZ); + } + + /* + * Enable interrupts so the tick can fire, if a global + * clockevent device is available + */ local_irq_enable(); - while (lapic_cal_loops <= LAPIC_CAL_LOOPS) - cpu_relax(); + while (lapic_cal_loops <= LAPIC_CAL_LOOPS) { + /* Wait for a tick to elapse */ + while (1) { + if (tsc_khz) { + u64 tsc_now = rdtsc(); + if ((tsc_now - tsc_start) >= tsc_perj) { + tsc_start += tsc_perj; + break; + } + } else { + unsigned long jif_now = READ_ONCE(jiffies); - local_irq_disable(); + if (time_after(jif_now, jif_start)) { + jif_start = jif_now; + break; + } + } + cpu_relax(); + } - /* Restore the real event handler */ - global_clock_event->event_handler = real_handler; + /* Invoke the calibration routine */ + local_irq_disable(); + lapic_cal_handler(NULL); + local_irq_enable(); + } + + local_irq_disable(); /* Build delta t1-t2 as apic timer counts down */ delta = lapic_cal_t1 - lapic_cal_t2; @@ -943,10 +980,11 @@ static int __init calibrate_APIC_clock(void) levt->features &= ~CLOCK_EVT_FEAT_DUMMY; /* - * PM timer calibration failed or not turned on - * so lets try APIC timer based calibration + * PM timer calibration failed or not turned on so lets try APIC + * timer based calibration, if a global clockevent device is + * available. */ - if (!pm_referenced) { + if (!pm_referenced && global_clock_event) { apic_printk(APIC_VERBOSE, "... verify APIC timer\n"); /* diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 8d4e50428b68..68c363c341bf 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -804,6 +804,64 @@ static void init_amd_ln(struct cpuinfo_x86 *c) msr_set_bit(MSR_AMD64_DE_CFG, 31); } +static bool rdrand_force; + +static int __init rdrand_cmdline(char *str) +{ + if (!str) + return -EINVAL; + + if (!strcmp(str, "force")) + rdrand_force = true; + else + return -EINVAL; + + return 0; +} +early_param("rdrand", rdrand_cmdline); + +static void clear_rdrand_cpuid_bit(struct cpuinfo_x86 *c) +{ + /* + * Saving of the MSR used to hide the RDRAND support during + * suspend/resume is done by arch/x86/power/cpu.c, which is + * dependent on CONFIG_PM_SLEEP. + */ + if (!IS_ENABLED(CONFIG_PM_SLEEP)) + return; + + /* + * The nordrand option can clear X86_FEATURE_RDRAND, so check for + * RDRAND support using the CPUID function directly. + */ + if (!(cpuid_ecx(1) & BIT(30)) || rdrand_force) + return; + + msr_clear_bit(MSR_AMD64_CPUID_FN_1, 62); + + /* + * Verify that the CPUID change has occurred in case the kernel is + * running virtualized and the hypervisor doesn't support the MSR. + */ + if (cpuid_ecx(1) & BIT(30)) { + pr_info_once("BIOS may not properly restore RDRAND after suspend, but hypervisor does not support hiding RDRAND via CPUID.\n"); + return; + } + + clear_cpu_cap(c, X86_FEATURE_RDRAND); + pr_info_once("BIOS may not properly restore RDRAND after suspend, hiding RDRAND via CPUID. Use rdrand=force to reenable.\n"); +} + +static void init_amd_jg(struct cpuinfo_x86 *c) +{ + /* + * Some BIOS implementations do not restore proper RDRAND support + * across suspend and resume. Check on whether to hide the RDRAND + * instruction support via CPUID. + */ + clear_rdrand_cpuid_bit(c); +} + static void init_amd_bd(struct cpuinfo_x86 *c) { u64 value; @@ -818,6 +876,13 @@ static void init_amd_bd(struct cpuinfo_x86 *c) wrmsrl_safe(MSR_F15H_IC_CFG, value); } } + + /* + * Some BIOS implementations do not restore proper RDRAND support + * across suspend and resume. Check on whether to hide the RDRAND + * instruction support via CPUID. + */ + clear_rdrand_cpuid_bit(c); } static void init_amd_zn(struct cpuinfo_x86 *c) @@ -860,6 +925,7 @@ static void init_amd(struct cpuinfo_x86 *c) case 0x10: init_amd_gh(c); break; case 0x12: init_amd_ln(c); break; case 0x15: init_amd_bd(c); break; + case 0x16: init_amd_jg(c); break; case 0x17: init_amd_zn(c); break; } diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 685d17c11461..e904ff06a83d 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -216,6 +216,9 @@ static void recalculate_apic_map(struct kvm *kvm) if (!apic_x2apic_mode(apic) && !new->phys_map[xapic_id]) new->phys_map[xapic_id] = apic; + if (!kvm_apic_sw_enabled(apic)) + continue; + ldr = kvm_lapic_get_reg(apic, APIC_LDR); if (apic_x2apic_mode(apic)) { @@ -258,6 +261,8 @@ static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val) static_key_slow_dec_deferred(&apic_sw_disabled); else static_key_slow_inc(&apic_sw_disabled.key); + + recalculate_apic_map(apic->vcpu->kvm); } } diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 24843cf49579..218b277bfda3 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -5653,38 +5653,7 @@ static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, struct kvm_page_track_notifier_node *node) { - struct kvm_mmu_page *sp; - LIST_HEAD(invalid_list); - unsigned long i; - bool flush; - gfn_t gfn; - - spin_lock(&kvm->mmu_lock); - - if (list_empty(&kvm->arch.active_mmu_pages)) - goto out_unlock; - - flush = slot_handle_all_level(kvm, slot, kvm_zap_rmapp, false); - - for (i = 0; i < slot->npages; i++) { - gfn = slot->base_gfn + i; - - for_each_valid_sp(kvm, sp, gfn) { - if (sp->gfn != gfn) - continue; - - kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list); - } - if (need_resched() || spin_needbreak(&kvm->mmu_lock)) { - kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush); - flush = false; - cond_resched_lock(&kvm->mmu_lock); - } - } - kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush); - -out_unlock: - spin_unlock(&kvm->mmu_lock); + kvm_mmu_zap_all(kvm); } void kvm_mmu_init_vm(struct kvm *kvm) diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index d685491fce4d..e3d3b2128f2b 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -1714,7 +1714,6 @@ static int avic_init_backing_page(struct kvm_vcpu *vcpu) if (!entry) return -EINVAL; - new_entry = READ_ONCE(*entry); new_entry = __sme_set((page_to_phys(svm->avic_backing_page) & AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK) | AVIC_PHYSICAL_ID_ENTRY_VALID_MASK); diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c index 24b079e94bc2..c9ef6a7a4a1a 100644 --- a/arch/x86/power/cpu.c +++ b/arch/x86/power/cpu.c @@ -12,6 +12,7 @@ #include <linux/smp.h> #include <linux/perf_event.h> #include <linux/tboot.h> +#include <linux/dmi.h> #include <asm/pgtable.h> #include <asm/proto.h> @@ -23,7 +24,7 @@ #include <asm/debugreg.h> #include <asm/cpu.h> #include <asm/mmu_context.h> -#include <linux/dmi.h> +#include <asm/cpu_device_id.h> #ifdef CONFIG_X86_32 __visible unsigned long saved_context_ebx; @@ -397,15 +398,14 @@ static int __init bsp_pm_check_init(void) core_initcall(bsp_pm_check_init); -static int msr_init_context(const u32 *msr_id, const int total_num) +static int msr_build_context(const u32 *msr_id, const int num) { - int i = 0; + struct saved_msrs *saved_msrs = &saved_context.saved_msrs; struct saved_msr *msr_array; + int total_num; + int i, j; - if (saved_context.saved_msrs.array || saved_context.saved_msrs.num > 0) { - pr_err("x86/pm: MSR quirk already applied, please check your DMI match table.\n"); - return -EINVAL; - } + total_num = saved_msrs->num + num; msr_array = kmalloc_array(total_num, sizeof(struct saved_msr), GFP_KERNEL); if (!msr_array) { @@ -413,19 +413,30 @@ static int msr_init_context(const u32 *msr_id, const int total_num) return -ENOMEM; } - for (i = 0; i < total_num; i++) { - msr_array[i].info.msr_no = msr_id[i]; + if (saved_msrs->array) { + /* + * Multiple callbacks can invoke this function, so copy any + * MSR save requests from previous invocations. + */ + memcpy(msr_array, saved_msrs->array, + sizeof(struct saved_msr) * saved_msrs->num); + + kfree(saved_msrs->array); + } + + for (i = saved_msrs->num, j = 0; i < total_num; i++, j++) { + msr_array[i].info.msr_no = msr_id[j]; msr_array[i].valid = false; msr_array[i].info.reg.q = 0; } - saved_context.saved_msrs.num = total_num; - saved_context.saved_msrs.array = msr_array; + saved_msrs->num = total_num; + saved_msrs->array = msr_array; return 0; } /* - * The following section is a quirk framework for problematic BIOSen: + * The following sections are a quirk framework for problematic BIOSen: * Sometimes MSRs are modified by the BIOSen after suspended to * RAM, this might cause unexpected behavior after wakeup. * Thus we save/restore these specified MSRs across suspend/resume @@ -440,7 +451,7 @@ static int msr_initialize_bdw(const struct dmi_system_id *d) u32 bdw_msr_id[] = { MSR_IA32_THERM_CONTROL }; pr_info("x86/pm: %s detected, MSR saving is needed during suspending.\n", d->ident); - return msr_init_context(bdw_msr_id, ARRAY_SIZE(bdw_msr_id)); + return msr_build_context(bdw_msr_id, ARRAY_SIZE(bdw_msr_id)); } static const struct dmi_system_id msr_save_dmi_table[] = { @@ -455,9 +466,58 @@ static const struct dmi_system_id msr_save_dmi_table[] = { {} }; +static int msr_save_cpuid_features(const struct x86_cpu_id *c) +{ + u32 cpuid_msr_id[] = { + MSR_AMD64_CPUID_FN_1, + }; + + pr_info("x86/pm: family %#hx cpu detected, MSR saving is needed during suspending.\n", + c->family); + + return msr_build_context(cpuid_msr_id, ARRAY_SIZE(cpuid_msr_id)); +} + +static const struct x86_cpu_id msr_save_cpu_table[] = { + { + .vendor = X86_VENDOR_AMD, + .family = 0x15, + .model = X86_MODEL_ANY, + .feature = X86_FEATURE_ANY, + .driver_data = (kernel_ulong_t)msr_save_cpuid_features, + }, + { + .vendor = X86_VENDOR_AMD, + .family = 0x16, + .model = X86_MODEL_ANY, + .feature = X86_FEATURE_ANY, + .driver_data = (kernel_ulong_t)msr_save_cpuid_features, + }, + {} +}; + +typedef int (*pm_cpu_match_t)(const struct x86_cpu_id *); +static int pm_cpu_check(const struct x86_cpu_id *c) +{ + const struct x86_cpu_id *m; + int ret = 0; + + m = x86_match_cpu(msr_save_cpu_table); + if (m) { + pm_cpu_match_t fn; + + fn = (pm_cpu_match_t)m->driver_data; + ret = fn(m); + } + + return ret; +} + static int pm_check_save_msr(void) { dmi_check_system(msr_save_dmi_table); + pm_cpu_check(msr_save_cpu_table); + return 0; } diff --git a/drivers/atm/Kconfig b/drivers/atm/Kconfig index 2e2efa577437..8c37294f1d1e 100644 --- a/drivers/atm/Kconfig +++ b/drivers/atm/Kconfig @@ -200,7 +200,7 @@ config ATM_NICSTAR_USE_SUNI make the card work). config ATM_NICSTAR_USE_IDT77105 - bool "Use IDT77015 PHY driver (25Mbps)" + bool "Use IDT77105 PHY driver (25Mbps)" depends on ATM_NICSTAR help Support for the PHYsical layer chip in ForeRunner LE25 cards. In diff --git a/drivers/auxdisplay/ht16k33.c b/drivers/auxdisplay/ht16k33.c index 9c0bb771751d..a2fcde582e2a 100644 --- a/drivers/auxdisplay/ht16k33.c +++ b/drivers/auxdisplay/ht16k33.c @@ -74,7 +74,7 @@ struct ht16k33_priv { struct ht16k33_fbdev fbdev; }; -static struct fb_fix_screeninfo ht16k33_fb_fix = { +static const struct fb_fix_screeninfo ht16k33_fb_fix = { .id = DRIVER_NAME, .type = FB_TYPE_PACKED_PIXELS, .visual = FB_VISUAL_MONO10, @@ -85,7 +85,7 @@ static struct fb_fix_screeninfo ht16k33_fb_fix = { .accel = FB_ACCEL_NONE, }; -static struct fb_var_screeninfo ht16k33_fb_var = { +static const struct fb_var_screeninfo ht16k33_fb_var = { .xres = HT16K33_MATRIX_LED_MAX_ROWS, .yres = HT16K33_MATRIX_LED_MAX_COLS, .xres_virtual = HT16K33_MATRIX_LED_MAX_ROWS, diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 9bd4ddd12b25..5b248763a672 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -322,6 +322,8 @@ static int drbd_thread_setup(void *arg) thi->name[0], resource->name); + allow_kernel_signal(DRBD_SIGKILL); + allow_kernel_signal(SIGXCPU); restart: retval = thi->function(thi); diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index c0990703ce54..1c46babeb093 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -324,6 +324,25 @@ static struct clk_core *clk_core_lookup(const char *name) return NULL; } +#ifdef CONFIG_OF +static int of_parse_clkspec(const struct device_node *np, int index, + const char *name, struct of_phandle_args *out_args); +static struct clk_hw * +of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec); +#else +static inline int of_parse_clkspec(const struct device_node *np, int index, + const char *name, + struct of_phandle_args *out_args) +{ + return -ENOENT; +} +static inline struct clk_hw * +of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec) +{ + return ERR_PTR(-ENOENT); +} +#endif + /** * clk_core_get - Find the clk_core parent of a clk * @core: clk to find parent of @@ -355,8 +374,9 @@ static struct clk_core *clk_core_lookup(const char *name) * }; * * Returns: -ENOENT when the provider can't be found or the clk doesn't - * exist in the provider. -EINVAL when the name can't be found. NULL when the - * provider knows about the clk but it isn't provided on this system. + * exist in the provider or the name can't be found in the DT node or + * in a clkdev lookup. NULL when the provider knows about the clk but it + * isn't provided on this system. * A valid clk_core pointer when the clk can be found in the provider. */ static struct clk_core *clk_core_get(struct clk_core *core, u8 p_index) @@ -367,17 +387,19 @@ static struct clk_core *clk_core_get(struct clk_core *core, u8 p_index) struct device *dev = core->dev; const char *dev_id = dev ? dev_name(dev) : NULL; struct device_node *np = core->of_node; + struct of_phandle_args clkspec; - if (np && (name || index >= 0)) - hw = of_clk_get_hw(np, index, name); - - /* - * If the DT search above couldn't find the provider or the provider - * didn't know about this clk, fallback to looking up via clkdev based - * clk_lookups - */ - if (PTR_ERR(hw) == -ENOENT && name) + if (np && (name || index >= 0) && + !of_parse_clkspec(np, index, name, &clkspec)) { + hw = of_clk_get_hw_from_clkspec(&clkspec); + of_node_put(clkspec.np); + } else if (name) { + /* + * If the DT search above couldn't find the provider fallback to + * looking up via clkdev based clk_lookups. + */ hw = clk_find_hw(dev_id, name); + } if (IS_ERR(hw)) return ERR_CAST(hw); @@ -401,7 +423,7 @@ static void clk_core_fill_parent_index(struct clk_core *core, u8 index) parent = ERR_PTR(-EPROBE_DEFER); } else { parent = clk_core_get(core, index); - if (IS_ERR(parent) && PTR_ERR(parent) == -ENOENT) + if (IS_ERR(parent) && PTR_ERR(parent) == -ENOENT && entry->name) parent = clk_core_lookup(entry->name); } @@ -1632,7 +1654,8 @@ static int clk_fetch_parent_index(struct clk_core *core, break; /* Fallback to comparing globally unique names */ - if (!strcmp(parent->name, core->parents[i].name)) + if (core->parents[i].name && + !strcmp(parent->name, core->parents[i].name)) break; } diff --git a/drivers/clk/samsung/clk-exynos5-subcmu.c b/drivers/clk/samsung/clk-exynos5-subcmu.c index 91db7894125d..65c82d922b05 100644 --- a/drivers/clk/samsung/clk-exynos5-subcmu.c +++ b/drivers/clk/samsung/clk-exynos5-subcmu.c @@ -14,7 +14,7 @@ #include "clk-exynos5-subcmu.h" static struct samsung_clk_provider *ctx; -static const struct exynos5_subcmu_info *cmu; +static const struct exynos5_subcmu_info **cmu; static int nr_cmus; static void exynos5_subcmu_clk_save(void __iomem *base, @@ -56,17 +56,17 @@ static void exynos5_subcmu_defer_gate(struct samsung_clk_provider *ctx, * when OF-core populates all device-tree nodes. */ void exynos5_subcmus_init(struct samsung_clk_provider *_ctx, int _nr_cmus, - const struct exynos5_subcmu_info *_cmu) + const struct exynos5_subcmu_info **_cmu) { ctx = _ctx; cmu = _cmu; nr_cmus = _nr_cmus; for (; _nr_cmus--; _cmu++) { - exynos5_subcmu_defer_gate(ctx, _cmu->gate_clks, - _cmu->nr_gate_clks); - exynos5_subcmu_clk_save(ctx->reg_base, _cmu->suspend_regs, - _cmu->nr_suspend_regs); + exynos5_subcmu_defer_gate(ctx, (*_cmu)->gate_clks, + (*_cmu)->nr_gate_clks); + exynos5_subcmu_clk_save(ctx->reg_base, (*_cmu)->suspend_regs, + (*_cmu)->nr_suspend_regs); } } @@ -163,9 +163,9 @@ static int __init exynos5_clk_probe(struct platform_device *pdev) if (of_property_read_string(np, "label", &name) < 0) continue; for (i = 0; i < nr_cmus; i++) - if (strcmp(cmu[i].pd_name, name) == 0) + if (strcmp(cmu[i]->pd_name, name) == 0) exynos5_clk_register_subcmu(&pdev->dev, - &cmu[i], np); + cmu[i], np); } return 0; } diff --git a/drivers/clk/samsung/clk-exynos5-subcmu.h b/drivers/clk/samsung/clk-exynos5-subcmu.h index 755ee8aaa3de..9ae5356f25aa 100644 --- a/drivers/clk/samsung/clk-exynos5-subcmu.h +++ b/drivers/clk/samsung/clk-exynos5-subcmu.h @@ -21,6 +21,6 @@ struct exynos5_subcmu_info { }; void exynos5_subcmus_init(struct samsung_clk_provider *ctx, int nr_cmus, - const struct exynos5_subcmu_info *cmu); + const struct exynos5_subcmu_info **cmu); #endif diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c index f2b896881768..931c70a4da19 100644 --- a/drivers/clk/samsung/clk-exynos5250.c +++ b/drivers/clk/samsung/clk-exynos5250.c @@ -681,6 +681,10 @@ static const struct exynos5_subcmu_info exynos5250_disp_subcmu = { .pd_name = "DISP1", }; +static const struct exynos5_subcmu_info *exynos5250_subcmus[] = { + &exynos5250_disp_subcmu, +}; + static const struct samsung_pll_rate_table vpll_24mhz_tbl[] __initconst = { /* sorted in descending order */ /* PLL_36XX_RATE(rate, m, p, s, k) */ @@ -843,7 +847,8 @@ static void __init exynos5250_clk_init(struct device_node *np) samsung_clk_sleep_init(reg_base, exynos5250_clk_regs, ARRAY_SIZE(exynos5250_clk_regs)); - exynos5_subcmus_init(ctx, 1, &exynos5250_disp_subcmu); + exynos5_subcmus_init(ctx, ARRAY_SIZE(exynos5250_subcmus), + exynos5250_subcmus); samsung_clk_of_add_provider(np, ctx); diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c index 01bca5a498b2..7670cc596c74 100644 --- a/drivers/clk/samsung/clk-exynos5420.c +++ b/drivers/clk/samsung/clk-exynos5420.c @@ -534,8 +534,6 @@ static const struct samsung_gate_clock exynos5800_gate_clks[] __initconst = { GATE_BUS_TOP, 24, 0, 0), GATE(CLK_ACLK432_SCALER, "aclk432_scaler", "mout_user_aclk432_scaler", GATE_BUS_TOP, 27, CLK_IS_CRITICAL, 0), - GATE(CLK_MAU_EPLL, "mau_epll", "mout_user_mau_epll", - SRC_MASK_TOP7, 20, CLK_SET_RATE_PARENT, 0), }; static const struct samsung_mux_clock exynos5420_mux_clks[] __initconst = { @@ -577,8 +575,13 @@ static const struct samsung_div_clock exynos5420_div_clks[] __initconst = { static const struct samsung_gate_clock exynos5420_gate_clks[] __initconst = { GATE(CLK_SECKEY, "seckey", "aclk66_psgen", GATE_BUS_PERIS1, 1, 0, 0), + /* Maudio Block */ GATE(CLK_MAU_EPLL, "mau_epll", "mout_mau_epll_clk", SRC_MASK_TOP7, 20, CLK_SET_RATE_PARENT, 0), + GATE(CLK_SCLK_MAUDIO0, "sclk_maudio0", "dout_maudio0", + GATE_TOP_SCLK_MAU, 0, CLK_SET_RATE_PARENT, 0), + GATE(CLK_SCLK_MAUPCM0, "sclk_maupcm0", "dout_maupcm0", + GATE_TOP_SCLK_MAU, 1, CLK_SET_RATE_PARENT, 0), }; static const struct samsung_mux_clock exynos5x_mux_clks[] __initconst = { @@ -890,9 +893,6 @@ static const struct samsung_div_clock exynos5x_div_clks[] __initconst = { /* GSCL Block */ DIV(0, "dout_gscl_blk_333", "aclk333_432_gscl", DIV2_RATIO0, 6, 2), - /* MSCL Block */ - DIV(0, "dout_mscl_blk", "aclk400_mscl", DIV2_RATIO0, 28, 2), - /* PSGEN */ DIV(0, "dout_gen_blk", "mout_user_aclk266", DIV2_RATIO0, 8, 1), DIV(0, "dout_jpg_blk", "aclk166", DIV2_RATIO0, 20, 1), @@ -1017,12 +1017,6 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = { GATE(CLK_SCLK_DP1, "sclk_dp1", "dout_dp1", GATE_TOP_SCLK_DISP1, 20, CLK_SET_RATE_PARENT, 0), - /* Maudio Block */ - GATE(CLK_SCLK_MAUDIO0, "sclk_maudio0", "dout_maudio0", - GATE_TOP_SCLK_MAU, 0, CLK_SET_RATE_PARENT, 0), - GATE(CLK_SCLK_MAUPCM0, "sclk_maupcm0", "dout_maupcm0", - GATE_TOP_SCLK_MAU, 1, CLK_SET_RATE_PARENT, 0), - /* FSYS Block */ GATE(CLK_TSI, "tsi", "aclk200_fsys", GATE_BUS_FSYS0, 0, 0, 0), GATE(CLK_PDMA0, "pdma0", "aclk200_fsys", GATE_BUS_FSYS0, 1, 0, 0), @@ -1162,17 +1156,6 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = { GATE(CLK_FIMC_LITE3, "fimc_lite3", "aclk333_432_gscl", GATE_IP_GSCL1, 17, 0, 0), - /* MSCL Block */ - GATE(CLK_MSCL0, "mscl0", "aclk400_mscl", GATE_IP_MSCL, 0, 0, 0), - GATE(CLK_MSCL1, "mscl1", "aclk400_mscl", GATE_IP_MSCL, 1, 0, 0), - GATE(CLK_MSCL2, "mscl2", "aclk400_mscl", GATE_IP_MSCL, 2, 0, 0), - GATE(CLK_SMMU_MSCL0, "smmu_mscl0", "dout_mscl_blk", - GATE_IP_MSCL, 8, 0, 0), - GATE(CLK_SMMU_MSCL1, "smmu_mscl1", "dout_mscl_blk", - GATE_IP_MSCL, 9, 0, 0), - GATE(CLK_SMMU_MSCL2, "smmu_mscl2", "dout_mscl_blk", - GATE_IP_MSCL, 10, 0, 0), - /* ISP */ GATE(CLK_SCLK_UART_ISP, "sclk_uart_isp", "dout_uart_isp", GATE_TOP_SCLK_ISP, 0, CLK_SET_RATE_PARENT, 0), @@ -1281,32 +1264,103 @@ static struct exynos5_subcmu_reg_dump exynos5x_mfc_suspend_regs[] = { { DIV4_RATIO, 0, 0x3 }, /* DIV dout_mfc_blk */ }; -static const struct exynos5_subcmu_info exynos5x_subcmus[] = { - { - .div_clks = exynos5x_disp_div_clks, - .nr_div_clks = ARRAY_SIZE(exynos5x_disp_div_clks), - .gate_clks = exynos5x_disp_gate_clks, - .nr_gate_clks = ARRAY_SIZE(exynos5x_disp_gate_clks), - .suspend_regs = exynos5x_disp_suspend_regs, - .nr_suspend_regs = ARRAY_SIZE(exynos5x_disp_suspend_regs), - .pd_name = "DISP", - }, { - .div_clks = exynos5x_gsc_div_clks, - .nr_div_clks = ARRAY_SIZE(exynos5x_gsc_div_clks), - .gate_clks = exynos5x_gsc_gate_clks, - .nr_gate_clks = ARRAY_SIZE(exynos5x_gsc_gate_clks), - .suspend_regs = exynos5x_gsc_suspend_regs, - .nr_suspend_regs = ARRAY_SIZE(exynos5x_gsc_suspend_regs), - .pd_name = "GSC", - }, { - .div_clks = exynos5x_mfc_div_clks, - .nr_div_clks = ARRAY_SIZE(exynos5x_mfc_div_clks), - .gate_clks = exynos5x_mfc_gate_clks, - .nr_gate_clks = ARRAY_SIZE(exynos5x_mfc_gate_clks), - .suspend_regs = exynos5x_mfc_suspend_regs, - .nr_suspend_regs = ARRAY_SIZE(exynos5x_mfc_suspend_regs), - .pd_name = "MFC", - }, +static const struct samsung_gate_clock exynos5x_mscl_gate_clks[] __initconst = { + /* MSCL Block */ + GATE(CLK_MSCL0, "mscl0", "aclk400_mscl", GATE_IP_MSCL, 0, 0, 0), + GATE(CLK_MSCL1, "mscl1", "aclk400_mscl", GATE_IP_MSCL, 1, 0, 0), + GATE(CLK_MSCL2, "mscl2", "aclk400_mscl", GATE_IP_MSCL, 2, 0, 0), + GATE(CLK_SMMU_MSCL0, "smmu_mscl0", "dout_mscl_blk", + GATE_IP_MSCL, 8, 0, 0), + GATE(CLK_SMMU_MSCL1, "smmu_mscl1", "dout_mscl_blk", + GATE_IP_MSCL, 9, 0, 0), + GATE(CLK_SMMU_MSCL2, "smmu_mscl2", "dout_mscl_blk", + GATE_IP_MSCL, 10, 0, 0), +}; + +static const struct samsung_div_clock exynos5x_mscl_div_clks[] __initconst = { + DIV(0, "dout_mscl_blk", "aclk400_mscl", DIV2_RATIO0, 28, 2), +}; + +static struct exynos5_subcmu_reg_dump exynos5x_mscl_suspend_regs[] = { + { GATE_IP_MSCL, 0xffffffff, 0xffffffff }, /* MSCL gates */ + { SRC_TOP3, 0, BIT(4) }, /* MUX mout_user_aclk400_mscl */ + { DIV2_RATIO0, 0, 0x30000000 }, /* DIV dout_mscl_blk */ +}; + +static const struct samsung_gate_clock exynos5800_mau_gate_clks[] __initconst = { + GATE(CLK_MAU_EPLL, "mau_epll", "mout_user_mau_epll", + SRC_MASK_TOP7, 20, CLK_SET_RATE_PARENT, 0), + GATE(CLK_SCLK_MAUDIO0, "sclk_maudio0", "dout_maudio0", + GATE_TOP_SCLK_MAU, 0, CLK_SET_RATE_PARENT, 0), + GATE(CLK_SCLK_MAUPCM0, "sclk_maupcm0", "dout_maupcm0", + GATE_TOP_SCLK_MAU, 1, CLK_SET_RATE_PARENT, 0), +}; + +static struct exynos5_subcmu_reg_dump exynos5800_mau_suspend_regs[] = { + { SRC_TOP9, 0, BIT(8) }, /* MUX mout_user_mau_epll */ +}; + +static const struct exynos5_subcmu_info exynos5x_disp_subcmu = { + .div_clks = exynos5x_disp_div_clks, + .nr_div_clks = ARRAY_SIZE(exynos5x_disp_div_clks), + .gate_clks = exynos5x_disp_gate_clks, + .nr_gate_clks = ARRAY_SIZE(exynos5x_disp_gate_clks), + .suspend_regs = exynos5x_disp_suspend_regs, + .nr_suspend_regs = ARRAY_SIZE(exynos5x_disp_suspend_regs), + .pd_name = "DISP", +}; + +static const struct exynos5_subcmu_info exynos5x_gsc_subcmu = { + .div_clks = exynos5x_gsc_div_clks, + .nr_div_clks = ARRAY_SIZE(exynos5x_gsc_div_clks), + .gate_clks = exynos5x_gsc_gate_clks, + .nr_gate_clks = ARRAY_SIZE(exynos5x_gsc_gate_clks), + .suspend_regs = exynos5x_gsc_suspend_regs, + .nr_suspend_regs = ARRAY_SIZE(exynos5x_gsc_suspend_regs), + .pd_name = "GSC", +}; + +static const struct exynos5_subcmu_info exynos5x_mfc_subcmu = { + .div_clks = exynos5x_mfc_div_clks, + .nr_div_clks = ARRAY_SIZE(exynos5x_mfc_div_clks), + .gate_clks = exynos5x_mfc_gate_clks, + .nr_gate_clks = ARRAY_SIZE(exynos5x_mfc_gate_clks), + .suspend_regs = exynos5x_mfc_suspend_regs, + .nr_suspend_regs = ARRAY_SIZE(exynos5x_mfc_suspend_regs), + .pd_name = "MFC", +}; + +static const struct exynos5_subcmu_info exynos5x_mscl_subcmu = { + .div_clks = exynos5x_mscl_div_clks, + .nr_div_clks = ARRAY_SIZE(exynos5x_mscl_div_clks), + .gate_clks = exynos5x_mscl_gate_clks, + .nr_gate_clks = ARRAY_SIZE(exynos5x_mscl_gate_clks), + .suspend_regs = exynos5x_mscl_suspend_regs, + .nr_suspend_regs = ARRAY_SIZE(exynos5x_mscl_suspend_regs), + .pd_name = "MSC", +}; + +static const struct exynos5_subcmu_info exynos5800_mau_subcmu = { + .gate_clks = exynos5800_mau_gate_clks, + .nr_gate_clks = ARRAY_SIZE(exynos5800_mau_gate_clks), + .suspend_regs = exynos5800_mau_suspend_regs, + .nr_suspend_regs = ARRAY_SIZE(exynos5800_mau_suspend_regs), + .pd_name = "MAU", +}; + +static const struct exynos5_subcmu_info *exynos5x_subcmus[] = { + &exynos5x_disp_subcmu, + &exynos5x_gsc_subcmu, + &exynos5x_mfc_subcmu, + &exynos5x_mscl_subcmu, +}; + +static const struct exynos5_subcmu_info *exynos5800_subcmus[] = { + &exynos5x_disp_subcmu, + &exynos5x_gsc_subcmu, + &exynos5x_mfc_subcmu, + &exynos5x_mscl_subcmu, + &exynos5800_mau_subcmu, }; static const struct samsung_pll_rate_table exynos5420_pll2550x_24mhz_tbl[] __initconst = { @@ -1539,11 +1593,17 @@ static void __init exynos5x_clk_init(struct device_node *np, samsung_clk_extended_sleep_init(reg_base, exynos5x_clk_regs, ARRAY_SIZE(exynos5x_clk_regs), exynos5420_set_clksrc, ARRAY_SIZE(exynos5420_set_clksrc)); - if (soc == EXYNOS5800) + + if (soc == EXYNOS5800) { samsung_clk_sleep_init(reg_base, exynos5800_clk_regs, ARRAY_SIZE(exynos5800_clk_regs)); - exynos5_subcmus_init(ctx, ARRAY_SIZE(exynos5x_subcmus), - exynos5x_subcmus); + + exynos5_subcmus_init(ctx, ARRAY_SIZE(exynos5800_subcmus), + exynos5800_subcmus); + } else { + exynos5_subcmus_init(ctx, ARRAY_SIZE(exynos5x_subcmus), + exynos5x_subcmus); + } samsung_clk_of_add_provider(np, ctx); } diff --git a/drivers/clk/socfpga/clk-periph-s10.c b/drivers/clk/socfpga/clk-periph-s10.c index 5c50e723ecae..1a191eeeebba 100644 --- a/drivers/clk/socfpga/clk-periph-s10.c +++ b/drivers/clk/socfpga/clk-periph-s10.c @@ -38,7 +38,7 @@ static unsigned long clk_peri_cnt_clk_recalc_rate(struct clk_hw *hwclk, if (socfpgaclk->fixed_div) { div = socfpgaclk->fixed_div; } else { - if (!socfpgaclk->bypass_reg) + if (socfpgaclk->hw.reg) div = ((readl(socfpgaclk->hw.reg) & 0x7ff) + 1); } diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 23e0a356f167..ad72b3f42ffa 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -1163,6 +1163,7 @@ static int fsl_dma_chan_probe(struct fsldma_device *fdev, switch (chan->feature & FSL_DMA_IP_MASK) { case FSL_DMA_IP_85XX: chan->toggle_ext_pause = fsl_chan_toggle_ext_pause; + /* Fall through */ case FSL_DMA_IP_83XX: chan->toggle_ext_start = fsl_chan_toggle_ext_start; chan->set_src_loop_size = fsl_chan_set_src_loop_size; diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c index 567fb98c0892..9762dd6d99fa 100644 --- a/drivers/gpio/gpiolib-of.c +++ b/drivers/gpio/gpiolib-of.c @@ -363,7 +363,7 @@ struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id, /* Special handling for SPI GPIOs if used */ if (IS_ERR(desc)) desc = of_find_spi_gpio(dev, con_id, &of_flags); - if (IS_ERR(desc)) { + if (IS_ERR(desc) && PTR_ERR(desc) != -EPROBE_DEFER) { /* This quirk looks up flags and all */ desc = of_find_spi_cs_gpio(dev, con_id, idx, flags); if (!IS_ERR(desc)) diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index f497003f119c..cca749010cd0 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -1091,9 +1091,11 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) if (test_bit(FLAG_ACTIVE_LOW, &desc->flags)) lineinfo.flags |= GPIOLINE_FLAG_ACTIVE_LOW; if (test_bit(FLAG_OPEN_DRAIN, &desc->flags)) - lineinfo.flags |= GPIOLINE_FLAG_OPEN_DRAIN; + lineinfo.flags |= (GPIOLINE_FLAG_OPEN_DRAIN | + GPIOLINE_FLAG_IS_OUT); if (test_bit(FLAG_OPEN_SOURCE, &desc->flags)) - lineinfo.flags |= GPIOLINE_FLAG_OPEN_SOURCE; + lineinfo.flags |= (GPIOLINE_FLAG_OPEN_SOURCE | + GPIOLINE_FLAG_IS_OUT); if (copy_to_user(ip, &lineinfo, sizeof(lineinfo))) return -EFAULT; @@ -1371,21 +1373,13 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data, if (status) goto err_remove_from_list; - status = gpiochip_irqchip_init_valid_mask(chip); - if (status) - goto err_remove_from_list; - status = gpiochip_alloc_valid_mask(chip); if (status) - goto err_remove_irqchip_mask; - - status = gpiochip_add_irqchip(chip, lock_key, request_key); - if (status) - goto err_free_gpiochip_mask; + goto err_remove_from_list; status = of_gpiochip_add(chip); if (status) - goto err_remove_chip; + goto err_free_gpiochip_mask; status = gpiochip_init_valid_mask(chip); if (status) @@ -1411,6 +1405,14 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data, machine_gpiochip_add(chip); + status = gpiochip_irqchip_init_valid_mask(chip); + if (status) + goto err_remove_acpi_chip; + + status = gpiochip_add_irqchip(chip, lock_key, request_key); + if (status) + goto err_remove_irqchip_mask; + /* * By first adding the chardev, and then adding the device, * we get a device node entry in sysfs under @@ -1422,21 +1424,21 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data, if (gpiolib_initialized) { status = gpiochip_setup_dev(gdev); if (status) - goto err_remove_acpi_chip; + goto err_remove_irqchip; } return 0; +err_remove_irqchip: + gpiochip_irqchip_remove(chip); +err_remove_irqchip_mask: + gpiochip_irqchip_free_valid_mask(chip); err_remove_acpi_chip: acpi_gpiochip_remove(chip); err_remove_of_chip: gpiochip_free_hogs(chip); of_gpiochip_remove(chip); -err_remove_chip: - gpiochip_irqchip_remove(chip); err_free_gpiochip_mask: gpiochip_free_valid_mask(chip); -err_remove_irqchip_mask: - gpiochip_irqchip_free_valid_mask(chip); err_remove_from_list: spin_lock_irqsave(&gpio_lock, flags); list_del(&gdev->list); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 4e4094f842e7..8b26c970a3cb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -1143,6 +1143,9 @@ static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p, num_deps = chunk->length_dw * 4 / sizeof(struct drm_amdgpu_cs_chunk_sem); + if (p->post_deps) + return -EINVAL; + p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps), GFP_KERNEL); p->num_post_deps = 0; @@ -1166,8 +1169,7 @@ static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p, static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser *p, - struct amdgpu_cs_chunk - *chunk) + struct amdgpu_cs_chunk *chunk) { struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps; unsigned num_deps; @@ -1177,6 +1179,9 @@ static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser *p num_deps = chunk->length_dw * 4 / sizeof(struct drm_amdgpu_cs_chunk_syncobj); + if (p->post_deps) + return -EINVAL; + p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps), GFP_KERNEL); p->num_post_deps = 0; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 04b8ac4432c7..4ea67f94cae2 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -604,6 +604,10 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev) (adev->gfx.rlc_feature_version < 1) || !adev->gfx.rlc.is_rlc_v2_1) adev->pm.pp_feature &= ~PP_GFXOFF_MASK; + if (adev->pm.pp_feature & PP_GFXOFF_MASK) + adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG | + AMD_PG_SUPPORT_CP | + AMD_PG_SUPPORT_RLC_SMU_HS; break; default: break; diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index 662612f89c70..9922bce3fd89 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -552,7 +552,6 @@ static int nv_common_early_init(void *handle) AMD_CG_SUPPORT_BIF_LS; adev->pg_flags = AMD_PG_SUPPORT_VCN | AMD_PG_SUPPORT_VCN_DPG | - AMD_PG_SUPPORT_MMHUB | AMD_PG_SUPPORT_ATHUB; adev->external_rev_id = adev->rev_id + 0x1; break; diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 23265414d448..04fbf05d7176 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -992,11 +992,6 @@ static int soc15_common_early_init(void *handle) adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN; } - - if (adev->pm.pp_feature & PP_GFXOFF_MASK) - adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG | - AMD_PG_SUPPORT_CP | - AMD_PG_SUPPORT_RLC_SMU_HS; break; default: /* FIXME: not supported yet */ diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 4a29f72334d0..45be7a2132bb 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -3131,13 +3131,25 @@ static enum dc_color_depth convert_color_depth_from_display_info(const struct drm_connector *connector, const struct drm_connector_state *state) { - uint32_t bpc = connector->display_info.bpc; + uint8_t bpc = (uint8_t)connector->display_info.bpc; + + /* Assume 8 bpc by default if no bpc is specified. */ + bpc = bpc ? bpc : 8; if (!state) state = connector->state; if (state) { - bpc = state->max_bpc; + /* + * Cap display bpc based on the user requested value. + * + * The value for state->max_bpc may not correctly updated + * depending on when the connector gets added to the state + * or if this was called outside of atomic check, so it + * can't be used directly. + */ + bpc = min(bpc, state->max_requested_bpc); + /* Round down to the nearest even number. */ bpc = bpc - (bpc & 1); } diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h index a0f52c86d8c7..a78b2e295895 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h @@ -907,8 +907,6 @@ struct smu_funcs ((smu)->funcs->register_irq_handler ? (smu)->funcs->register_irq_handler(smu) : 0) #define smu_set_azalia_d3_pme(smu) \ ((smu)->funcs->set_azalia_d3_pme ? (smu)->funcs->set_azalia_d3_pme((smu)) : 0) -#define smu_get_uclk_dpm_states(smu, clocks_in_khz, num_states) \ - ((smu)->ppt_funcs->get_uclk_dpm_states ? (smu)->ppt_funcs->get_uclk_dpm_states((smu), (clocks_in_khz), (num_states)) : 0) #define smu_get_max_sustainable_clocks_by_dc(smu, max_clocks) \ ((smu)->funcs->get_max_sustainable_clocks_by_dc ? (smu)->funcs->get_max_sustainable_clocks_by_dc((smu), (max_clocks)) : 0) #define smu_get_uclk_dpm_states(smu, clocks_in_khz, num_states) \ diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c index 5fde5cf65b42..53097961bf2b 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c +++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c @@ -326,7 +326,8 @@ static int smu_v11_0_setup_pptable(struct smu_context *smu) struct amdgpu_device *adev = smu->adev; const struct smc_firmware_header_v1_0 *hdr; int ret, index; - uint32_t size; + uint32_t size = 0; + uint16_t atom_table_size; uint8_t frev, crev; void *table; uint16_t version_major, version_minor; @@ -354,10 +355,11 @@ static int smu_v11_0_setup_pptable(struct smu_context *smu) index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, powerplayinfo); - ret = smu_get_atom_data_table(smu, index, (uint16_t *)&size, &frev, &crev, + ret = smu_get_atom_data_table(smu, index, &atom_table_size, &frev, &crev, (uint8_t **)&table); if (ret) return ret; + size = atom_table_size; } if (!smu->smu_table.power_play_table) diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_dev.c b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c index 5a118984de33..a0eabc134dd6 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_dev.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c @@ -8,6 +8,7 @@ #include <linux/iommu.h> #include <linux/of_device.h> #include <linux/of_graph.h> +#include <linux/of_reserved_mem.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> #ifdef CONFIG_DEBUG_FS @@ -143,6 +144,12 @@ static int komeda_parse_dt(struct device *dev, struct komeda_dev *mdev) return mdev->irq; } + /* Get the optional framebuffer memory resource */ + ret = of_reserved_mem_device_init(dev); + if (ret && ret != -ENODEV) + return ret; + ret = 0; + for_each_available_child_of_node(np, child) { if (of_node_cmp(child->name, "pipeline") == 0) { ret = komeda_parse_pipe_dt(mdev, child); @@ -289,6 +296,8 @@ void komeda_dev_destroy(struct komeda_dev *mdev) mdev->n_pipelines = 0; + of_reserved_mem_device_release(dev); + if (funcs && funcs->cleanup) funcs->cleanup(mdev); diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.c b/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.c index cd4d9f53ddef..c9a1edb9a000 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.c @@ -35,6 +35,25 @@ komeda_get_format_caps(struct komeda_format_caps_table *table, return NULL; } +u32 komeda_get_afbc_format_bpp(const struct drm_format_info *info, u64 modifier) +{ + u32 bpp; + + switch (info->format) { + case DRM_FORMAT_YUV420_8BIT: + bpp = 12; + break; + case DRM_FORMAT_YUV420_10BIT: + bpp = 15; + break; + default: + bpp = info->cpp[0] * 8; + break; + } + + return bpp; +} + /* Two assumptions * 1. RGB always has YTR * 2. Tiled RGB always has SC diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.h b/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.h index 3631910d33b5..32273cf18f7c 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.h +++ b/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.h @@ -97,6 +97,9 @@ const struct komeda_format_caps * komeda_get_format_caps(struct komeda_format_caps_table *table, u32 fourcc, u64 modifier); +u32 komeda_get_afbc_format_bpp(const struct drm_format_info *info, + u64 modifier); + u32 *komeda_get_layer_fourcc_list(struct komeda_format_caps_table *table, u32 layer_type, u32 *n_fmts); diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c b/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c index 3b0a70ed6aa0..1b01a625f40e 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c @@ -43,7 +43,7 @@ komeda_fb_afbc_size_check(struct komeda_fb *kfb, struct drm_file *file, struct drm_framebuffer *fb = &kfb->base; const struct drm_format_info *info = fb->format; struct drm_gem_object *obj; - u32 alignment_w = 0, alignment_h = 0, alignment_header, n_blocks; + u32 alignment_w = 0, alignment_h = 0, alignment_header, n_blocks, bpp; u64 min_size; obj = drm_gem_object_lookup(file, mode_cmd->handles[0]); @@ -88,8 +88,9 @@ komeda_fb_afbc_size_check(struct komeda_fb *kfb, struct drm_file *file, kfb->offset_payload = ALIGN(n_blocks * AFBC_HEADER_SIZE, alignment_header); + bpp = komeda_get_afbc_format_bpp(info, fb->modifier); kfb->afbc_size = kfb->offset_payload + n_blocks * - ALIGN(info->cpp[0] * AFBC_SUPERBLK_PIXELS, + ALIGN(bpp * AFBC_SUPERBLK_PIXELS / 8, AFBC_SUPERBLK_ALIGNMENT); min_size = kfb->afbc_size + fb->offsets[0]; if (min_size > obj->size) { diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c index 419a8b0e5de8..d50e75f0b2bd 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c @@ -15,6 +15,7 @@ #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_irq.h> #include <drm/drm_vblank.h> +#include <drm/drm_probe_helper.h> #include "komeda_dev.h" #include "komeda_framebuffer.h" @@ -315,6 +316,8 @@ struct komeda_kms_dev *komeda_kms_attach(struct komeda_dev *mdev) drm->irq_enabled = true; + drm_kms_helper_poll_init(drm); + err = drm_dev_register(drm, 0); if (err) goto cleanup_mode_config; @@ -322,6 +325,7 @@ struct komeda_kms_dev *komeda_kms_attach(struct komeda_dev *mdev) return kms; cleanup_mode_config: + drm_kms_helper_poll_fini(drm); drm->irq_enabled = false; drm_mode_config_cleanup(drm); komeda_kms_cleanup_private_objs(kms); @@ -338,6 +342,7 @@ void komeda_kms_detach(struct komeda_kms_dev *kms) drm->irq_enabled = false; mdev->funcs->disable_irq(mdev); drm_dev_unregister(drm); + drm_kms_helper_poll_fini(drm); component_unbind_all(mdev->dev, drm); komeda_kms_cleanup_private_objs(kms); drm_mode_config_cleanup(drm); diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index 7925a176f900..1cb1fa74cfbc 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -1465,8 +1465,8 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config) else if (intel_crtc_has_dp_encoder(pipe_config)) dotclock = intel_dotclock_calculate(pipe_config->port_clock, &pipe_config->dp_m_n); - else if (pipe_config->has_hdmi_sink && pipe_config->pipe_bpp == 36) - dotclock = pipe_config->port_clock * 2 / 3; + else if (pipe_config->has_hdmi_sink && pipe_config->pipe_bpp > 24) + dotclock = pipe_config->port_clock * 24 / pipe_config->pipe_bpp; else dotclock = pipe_config->port_clock; diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 1d58f7ec5d84..f11979879e7b 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -829,7 +829,7 @@ struct intel_crtc_state { /* * Frequence the dpll for the port should run at. Differs from the - * adjusted dotclock e.g. for DP or 12bpc hdmi mode. This is also + * adjusted dotclock e.g. for DP or 10/12bpc hdmi mode. This is also * already multiplied by pixel_multiplier. */ int port_clock; diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index 95fdbd0fbcac..945bc20f1d33 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c @@ -17,6 +17,7 @@ #include <linux/of_address.h> #include <linux/of_platform.h> #include <linux/pm_runtime.h> +#include <linux/dma-mapping.h> #include "mtk_drm_crtc.h" #include "mtk_drm_ddp.h" @@ -213,6 +214,7 @@ static int mtk_drm_kms_init(struct drm_device *drm) struct mtk_drm_private *private = drm->dev_private; struct platform_device *pdev; struct device_node *np; + struct device *dma_dev; int ret; if (!iommu_present(&platform_bus_type)) @@ -275,7 +277,29 @@ static int mtk_drm_kms_init(struct drm_device *drm) goto err_component_unbind; } - private->dma_dev = &pdev->dev; + dma_dev = &pdev->dev; + private->dma_dev = dma_dev; + + /* + * Configure the DMA segment size to make sure we get contiguous IOVA + * when importing PRIME buffers. + */ + if (!dma_dev->dma_parms) { + private->dma_parms_allocated = true; + dma_dev->dma_parms = + devm_kzalloc(drm->dev, sizeof(*dma_dev->dma_parms), + GFP_KERNEL); + } + if (!dma_dev->dma_parms) { + ret = -ENOMEM; + goto err_component_unbind; + } + + ret = dma_set_max_seg_size(dma_dev, (unsigned int)DMA_BIT_MASK(32)); + if (ret) { + dev_err(dma_dev, "Failed to set DMA segment size\n"); + goto err_unset_dma_parms; + } /* * We don't use the drm_irq_install() helpers provided by the DRM @@ -285,13 +309,16 @@ static int mtk_drm_kms_init(struct drm_device *drm) drm->irq_enabled = true; ret = drm_vblank_init(drm, MAX_CRTC); if (ret < 0) - goto err_component_unbind; + goto err_unset_dma_parms; drm_kms_helper_poll_init(drm); drm_mode_config_reset(drm); return 0; +err_unset_dma_parms: + if (private->dma_parms_allocated) + dma_dev->dma_parms = NULL; err_component_unbind: component_unbind_all(drm->dev, drm); err_config_cleanup: @@ -302,9 +329,14 @@ err_config_cleanup: static void mtk_drm_kms_deinit(struct drm_device *drm) { + struct mtk_drm_private *private = drm->dev_private; + drm_kms_helper_poll_fini(drm); drm_atomic_helper_shutdown(drm); + if (private->dma_parms_allocated) + private->dma_dev->dma_parms = NULL; + component_unbind_all(drm->dev, drm); drm_mode_config_cleanup(drm); } @@ -320,6 +352,18 @@ static const struct file_operations mtk_drm_fops = { .compat_ioctl = drm_compat_ioctl, }; +/* + * We need to override this because the device used to import the memory is + * not dev->dev, as drm_gem_prime_import() expects. + */ +struct drm_gem_object *mtk_drm_gem_prime_import(struct drm_device *dev, + struct dma_buf *dma_buf) +{ + struct mtk_drm_private *private = dev->dev_private; + + return drm_gem_prime_import_dev(dev, dma_buf, private->dma_dev); +} + static struct drm_driver mtk_drm_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_ATOMIC, @@ -331,7 +375,7 @@ static struct drm_driver mtk_drm_driver = { .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_export = drm_gem_prime_export, - .gem_prime_import = drm_gem_prime_import, + .gem_prime_import = mtk_drm_gem_prime_import, .gem_prime_get_sg_table = mtk_gem_prime_get_sg_table, .gem_prime_import_sg_table = mtk_gem_prime_import_sg_table, .gem_prime_mmap = mtk_drm_gem_mmap_buf, @@ -524,12 +568,15 @@ static int mtk_drm_probe(struct platform_device *pdev) comp = devm_kzalloc(dev, sizeof(*comp), GFP_KERNEL); if (!comp) { ret = -ENOMEM; + of_node_put(node); goto err_node; } ret = mtk_ddp_comp_init(dev, node, comp, comp_id, NULL); - if (ret) + if (ret) { + of_node_put(node); goto err_node; + } private->ddp_comp[comp_id] = comp; } diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.h b/drivers/gpu/drm/mediatek/mtk_drm_drv.h index 598ff3e70446..e03fea12ff59 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.h +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.h @@ -51,6 +51,8 @@ struct mtk_drm_private { } commit; struct drm_atomic_state *suspend_state; + + bool dma_parms_allocated; }; extern struct platform_driver mtk_ddp_driver; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c index b4e7404fe660..a11637b0f6cc 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c @@ -40,8 +40,7 @@ nvkm_i2c_aux_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) u8 *ptr = msg->buf; while (remaining) { - u8 cnt = (remaining > 16) ? 16 : remaining; - u8 cmd; + u8 cnt, retries, cmd; if (msg->flags & I2C_M_RD) cmd = 1; @@ -51,10 +50,19 @@ nvkm_i2c_aux_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) if (mcnt || remaining > 16) cmd |= 4; /* MOT */ - ret = aux->func->xfer(aux, true, cmd, msg->addr, ptr, &cnt); - if (ret < 0) { - nvkm_i2c_aux_release(aux); - return ret; + for (retries = 0, cnt = 0; + retries < 32 && !cnt; + retries++) { + cnt = min_t(u8, remaining, 16); + ret = aux->func->xfer(aux, true, cmd, + msg->addr, ptr, &cnt); + if (ret < 0) + goto out; + } + if (!cnt) { + AUX_TRACE(aux, "no data after 32 retries"); + ret = -EIO; + goto out; } ptr += cnt; @@ -64,8 +72,10 @@ nvkm_i2c_aux_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) msg++; } + ret = num; +out: nvkm_i2c_aux_release(aux); - return num; + return ret; } static u32 diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c index 288c59dae56a..1bad0a2cc5c6 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.c +++ b/drivers/gpu/drm/omapdrm/omap_drv.c @@ -669,7 +669,7 @@ static int pdev_probe(struct platform_device *pdev) if (omapdss_is_initialized() == false) return -EPROBE_DEFER; - ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); + ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (ret) { dev_err(&pdev->dev, "Failed to set the DMA mask\n"); return ret; diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds.c b/drivers/gpu/drm/rcar-du/rcar_lvds.c index 1c62578590f4..082d02c84024 100644 --- a/drivers/gpu/drm/rcar-du/rcar_lvds.c +++ b/drivers/gpu/drm/rcar-du/rcar_lvds.c @@ -673,10 +673,8 @@ static int rcar_lvds_parse_dt_companion(struct rcar_lvds *lvds) /* Locate the companion LVDS encoder for dual-link operation, if any. */ companion = of_parse_phandle(dev->of_node, "renesas,companion", 0); - if (!companion) { - dev_err(dev, "Companion LVDS encoder not found\n"); - return -ENXIO; - } + if (!companion) + return 0; /* * Sanity check: the companion encoder must have the same compatible diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c index 64c43ee6bd92..df0cc8f46d7b 100644 --- a/drivers/gpu/drm/sun4i/sun4i_tcon.c +++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c @@ -314,6 +314,7 @@ static void sun4i_tcon0_mode_set_dithering(struct sun4i_tcon *tcon, /* R and B components are only 5 bits deep */ val |= SUN4I_TCON0_FRM_CTL_MODE_R; val |= SUN4I_TCON0_FRM_CTL_MODE_B; + /* Fall through */ case MEDIA_BUS_FMT_RGB666_1X18: case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG: /* Fall through: enable dithering */ diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c index a1fc8b520985..b889ad3e86e1 100644 --- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c +++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c @@ -993,6 +993,7 @@ static ssize_t sun6i_dsi_transfer(struct mipi_dsi_host *host, ret = sun6i_dsi_dcs_read(dsi, msg); break; } + /* Else, fall through */ default: ret = -EINVAL; diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c index 2310c96ccf4a..db1b55df0d13 100644 --- a/drivers/hid/hid-cp2112.c +++ b/drivers/hid/hid-cp2112.c @@ -1153,8 +1153,6 @@ static unsigned int cp2112_gpio_irq_startup(struct irq_data *d) INIT_DELAYED_WORK(&dev->gpio_poll_worker, cp2112_gpio_poll_callback); - cp2112_gpio_direction_input(gc, d->hwirq); - if (!dev->gpio_poll) { dev->gpio_poll = true; schedule_delayed_work(&dev->gpio_poll_worker, 0); @@ -1204,6 +1202,12 @@ static int __maybe_unused cp2112_allocate_irq(struct cp2112_device *dev, return PTR_ERR(dev->desc[pin]); } + ret = cp2112_gpio_direction_input(&dev->gc, pin); + if (ret < 0) { + dev_err(dev->gc.parent, "Failed to set GPIO to input dir\n"); + goto err_desc; + } + ret = gpiochip_lock_as_irq(&dev->gc, pin); if (ret) { dev_err(dev->gc.parent, "Failed to lock GPIO as interrupt\n"); diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c index 21268c9fa71a..0179f7ed77e5 100644 --- a/drivers/hid/hid-logitech-hidpp.c +++ b/drivers/hid/hid-logitech-hidpp.c @@ -3749,30 +3749,8 @@ static const struct hid_device_id hidpp_devices[] = { { L27MHZ_DEVICE(HID_ANY_ID) }, - { /* Logitech G203/Prodigy Gaming Mouse */ - HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC084) }, - { /* Logitech G302 Gaming Mouse */ - HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC07F) }, - { /* Logitech G303 Gaming Mouse */ - HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC080) }, - { /* Logitech G400 Gaming Mouse */ - HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC07E) }, { /* Logitech G403 Wireless Gaming Mouse over USB */ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC082) }, - { /* Logitech G403 Gaming Mouse */ - HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC083) }, - { /* Logitech G403 Hero Gaming Mouse over USB */ - HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC08F) }, - { /* Logitech G502 Proteus Core Gaming Mouse */ - HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC07D) }, - { /* Logitech G502 Proteus Spectrum Gaming Mouse over USB */ - HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC332) }, - { /* Logitech G502 Hero Gaming Mouse over USB */ - HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC08B) }, - { /* Logitech G700 Gaming Mouse over USB */ - HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC06B) }, - { /* Logitech G700s Gaming Mouse over USB */ - HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC07C) }, { /* Logitech G703 Gaming Mouse over USB */ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC087) }, { /* Logitech G703 Hero Gaming Mouse over USB */ diff --git a/drivers/hid/intel-ish-hid/ipc/hw-ish.h b/drivers/hid/intel-ish-hid/ipc/hw-ish.h index 1065692f90e2..5792a104000a 100644 --- a/drivers/hid/intel-ish-hid/ipc/hw-ish.h +++ b/drivers/hid/intel-ish-hid/ipc/hw-ish.h @@ -24,6 +24,7 @@ #define ICL_MOBILE_DEVICE_ID 0x34FC #define SPT_H_DEVICE_ID 0xA135 #define CML_LP_DEVICE_ID 0x02FC +#define EHL_Ax_DEVICE_ID 0x4BB3 #define REVISION_ID_CHT_A0 0x6 #define REVISION_ID_CHT_Ax_SI 0x0 diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c index aa80b4d3b740..279567baca3d 100644 --- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c +++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c @@ -33,6 +33,7 @@ static const struct pci_device_id ish_pci_tbl[] = { {PCI_DEVICE(PCI_VENDOR_ID_INTEL, ICL_MOBILE_DEVICE_ID)}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, SPT_H_DEVICE_ID)}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CML_LP_DEVICE_ID)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, EHL_Ax_DEVICE_ID)}, {0, } }; MODULE_DEVICE_TABLE(pci, ish_pci_tbl); diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c index 7a8ddc999a8e..1713235d28cb 100644 --- a/drivers/hid/wacom_wac.c +++ b/drivers/hid/wacom_wac.c @@ -846,6 +846,8 @@ static int wacom_intuos_general(struct wacom_wac *wacom) y >>= 1; distance >>= 1; } + if (features->type == INTUOSHT2) + distance = features->distance_max - distance; input_report_abs(input, ABS_X, x); input_report_abs(input, ABS_Y, y); input_report_abs(input, ABS_DISTANCE, distance); @@ -1059,7 +1061,7 @@ static int wacom_remote_irq(struct wacom_wac *wacom_wac, size_t len) input_report_key(input, BTN_BASE2, (data[11] & 0x02)); if (data[12] & 0x80) - input_report_abs(input, ABS_WHEEL, (data[12] & 0x7f)); + input_report_abs(input, ABS_WHEEL, (data[12] & 0x7f) - 1); else input_report_abs(input, ABS_WHEEL, 0); @@ -1290,7 +1292,8 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom) } if (wacom->tool[0]) { input_report_abs(pen_input, ABS_PRESSURE, get_unaligned_le16(&frame[5])); - if (wacom->features.type == INTUOSP2_BT) { + if (wacom->features.type == INTUOSP2_BT || + wacom->features.type == INTUOSP2S_BT) { input_report_abs(pen_input, ABS_DISTANCE, range ? frame[13] : wacom->features.distance_max); } else { diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c index 5f9505a087f6..23f358cb7f49 100644 --- a/drivers/hv/channel.c +++ b/drivers/hv/channel.c @@ -26,7 +26,7 @@ static unsigned long virt_to_hvpfn(void *addr) { - unsigned long paddr; + phys_addr_t paddr; if (is_vmalloc_addr(addr)) paddr = page_to_phys(vmalloc_to_page(addr)) + diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h index 362e70e9d145..fb16a622e8ab 100644 --- a/drivers/hv/hyperv_vmbus.h +++ b/drivers/hv/hyperv_vmbus.h @@ -146,8 +146,6 @@ struct hv_context { */ u64 guestid; - void *tsc_page; - struct hv_per_cpu_context __percpu *cpu_context; /* diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 19f1730a4f24..a68d0ccf67a4 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -4724,10 +4724,14 @@ static int __init cma_init(void) if (ret) goto err; - cma_configfs_init(); + ret = cma_configfs_init(); + if (ret) + goto err_ib; return 0; +err_ib: + ib_unregister_client(&cma_client); err: unregister_netdevice_notifier(&cma_nb); ib_sa_unregister_client(&sa_client); diff --git a/drivers/infiniband/core/counters.c b/drivers/infiniband/core/counters.c index b79890739a2c..af8c85d18e62 100644 --- a/drivers/infiniband/core/counters.c +++ b/drivers/infiniband/core/counters.c @@ -149,13 +149,11 @@ static bool auto_mode_match(struct ib_qp *qp, struct rdma_counter *counter, struct auto_mode_param *param = &counter->mode.param; bool match = true; - if (rdma_is_kernel_res(&counter->res) != rdma_is_kernel_res(&qp->res)) + if (!rdma_is_visible_in_pid_ns(&qp->res)) return false; - /* Ensure that counter belong to right PID */ - if (!rdma_is_kernel_res(&counter->res) && - !rdma_is_kernel_res(&qp->res) && - (task_pid_vnr(counter->res.task) != current->pid)) + /* Ensure that counter belongs to the right PID */ + if (task_pid_nr(counter->res.task) != task_pid_nr(qp->res.task)) return false; if (auto_mask & RDMA_COUNTER_MASK_QP_TYPE) @@ -424,7 +422,7 @@ static struct ib_qp *rdma_counter_get_qp(struct ib_device *dev, u32 qp_num) return qp; err: - rdma_restrack_put(&qp->res); + rdma_restrack_put(res); return NULL; } diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c index 87d40d1ecdde..020c26976558 100644 --- a/drivers/infiniband/core/nldev.c +++ b/drivers/infiniband/core/nldev.c @@ -382,8 +382,7 @@ static int fill_res_info(struct sk_buff *msg, struct ib_device *device) for (i = 0; i < RDMA_RESTRACK_MAX; i++) { if (!names[i]) continue; - curr = rdma_restrack_count(device, i, - task_active_pid_ns(current)); + curr = rdma_restrack_count(device, i); ret = fill_res_info_entry(msg, names[i], curr); if (ret) goto err; diff --git a/drivers/infiniband/core/restrack.c b/drivers/infiniband/core/restrack.c index bddff426ee0f..a07665f7ef8c 100644 --- a/drivers/infiniband/core/restrack.c +++ b/drivers/infiniband/core/restrack.c @@ -107,10 +107,8 @@ void rdma_restrack_clean(struct ib_device *dev) * rdma_restrack_count() - the current usage of specific object * @dev: IB device * @type: actual type of object to operate - * @ns: PID namespace */ -int rdma_restrack_count(struct ib_device *dev, enum rdma_restrack_type type, - struct pid_namespace *ns) +int rdma_restrack_count(struct ib_device *dev, enum rdma_restrack_type type) { struct rdma_restrack_root *rt = &dev->res[type]; struct rdma_restrack_entry *e; @@ -119,10 +117,9 @@ int rdma_restrack_count(struct ib_device *dev, enum rdma_restrack_type type, xa_lock(&rt->xa); xas_for_each(&xas, e, U32_MAX) { - if (ns == &init_pid_ns || - (!rdma_is_kernel_res(e) && - ns == task_active_pid_ns(e->task))) - cnt++; + if (!rdma_is_visible_in_pid_ns(e)) + continue; + cnt++; } xa_unlock(&rt->xa); return cnt; @@ -360,5 +357,7 @@ bool rdma_is_visible_in_pid_ns(struct rdma_restrack_entry *res) */ if (rdma_is_kernel_res(res)) return task_active_pid_ns(current) == &init_pid_ns; - return task_active_pid_ns(current) == task_active_pid_ns(res->task); + + /* PID 0 means that resource is not found in current namespace */ + return task_pid_vnr(res->task); } diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c index 08da840ed7ee..56553668256f 100644 --- a/drivers/infiniband/core/umem.c +++ b/drivers/infiniband/core/umem.c @@ -379,14 +379,9 @@ EXPORT_SYMBOL(ib_umem_release); int ib_umem_page_count(struct ib_umem *umem) { - int i; - int n; + int i, n = 0; struct scatterlist *sg; - if (umem->is_odp) - return ib_umem_num_pages(umem); - - n = 0; for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) n += sg_dma_len(sg) >> PAGE_SHIFT; diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c index 48b04d2f175f..60c8f76aab33 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c @@ -136,6 +136,13 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req, spin_unlock_irqrestore(&cmdq->lock, flags); return -EBUSY; } + + size = req->cmd_size; + /* change the cmd_size to the number of 16byte cmdq unit. + * req->cmd_size is modified here + */ + bnxt_qplib_set_cmd_slots(req); + memset(resp, 0, sizeof(*resp)); crsqe->resp = (struct creq_qp_event *)resp; crsqe->resp->cookie = req->cookie; @@ -150,7 +157,6 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req, cmdq_ptr = (struct bnxt_qplib_cmdqe **)cmdq->pbl_ptr; preq = (u8 *)req; - size = req->cmd_size * BNXT_QPLIB_CMDQE_UNITS; do { /* Locate the next cmdq slot */ sw_prod = HWQ_CMP(cmdq->prod, cmdq); diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h index 2138533bb642..dfeadc192e17 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h @@ -55,9 +55,7 @@ do { \ memset(&(req), 0, sizeof((req))); \ (req).opcode = CMDQ_BASE_OPCODE_##CMD; \ - (req).cmd_size = (sizeof((req)) + \ - BNXT_QPLIB_CMDQE_UNITS - 1) / \ - BNXT_QPLIB_CMDQE_UNITS; \ + (req).cmd_size = sizeof((req)); \ (req).flags = cpu_to_le16(cmd_flags); \ } while (0) @@ -95,6 +93,13 @@ static inline u32 bnxt_qplib_cmdqe_cnt_per_pg(u32 depth) BNXT_QPLIB_CMDQE_UNITS); } +/* Set the cmd_size to a factor of CMDQE unit */ +static inline void bnxt_qplib_set_cmd_slots(struct cmdq_base *req) +{ + req->cmd_size = (req->cmd_size + BNXT_QPLIB_CMDQE_UNITS - 1) / + BNXT_QPLIB_CMDQE_UNITS; +} + #define MAX_CMDQ_IDX(depth) ((depth) - 1) static inline u32 bnxt_qplib_max_cmdq_idx_per_pg(u32 depth) diff --git a/drivers/infiniband/hw/hfi1/fault.c b/drivers/infiniband/hw/hfi1/fault.c index 93613e5def9b..986c12153e62 100644 --- a/drivers/infiniband/hw/hfi1/fault.c +++ b/drivers/infiniband/hw/hfi1/fault.c @@ -141,12 +141,14 @@ static ssize_t fault_opcodes_write(struct file *file, const char __user *buf, if (!data) return -ENOMEM; copy = min(len, datalen - 1); - if (copy_from_user(data, buf, copy)) - return -EFAULT; + if (copy_from_user(data, buf, copy)) { + ret = -EFAULT; + goto free_data; + } ret = debugfs_file_get(file->f_path.dentry); if (unlikely(ret)) - return ret; + goto free_data; ptr = data; token = ptr; for (ptr = data; *ptr; ptr = end + 1, token = ptr) { @@ -195,6 +197,7 @@ static ssize_t fault_opcodes_write(struct file *file, const char __user *buf, ret = len; debugfs_file_put(file->f_path.dentry); +free_data: kfree(data); return ret; } @@ -214,7 +217,7 @@ static ssize_t fault_opcodes_read(struct file *file, char __user *buf, return -ENOMEM; ret = debugfs_file_get(file->f_path.dentry); if (unlikely(ret)) - return ret; + goto free_data; bit = find_first_bit(fault->opcodes, bitsize); while (bit < bitsize) { zero = find_next_zero_bit(fault->opcodes, bitsize, bit); @@ -232,6 +235,7 @@ static ssize_t fault_opcodes_read(struct file *file, char __user *buf, data[size - 1] = '\n'; data[size] = '\0'; ret = simple_read_from_buffer(buf, len, pos, data, size); +free_data: kfree(data); return ret; } diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.c b/drivers/infiniband/hw/hfi1/tid_rdma.c index 996fc298207e..6141f4edc6bf 100644 --- a/drivers/infiniband/hw/hfi1/tid_rdma.c +++ b/drivers/infiniband/hw/hfi1/tid_rdma.c @@ -2574,18 +2574,9 @@ void hfi1_kern_read_tid_flow_free(struct rvt_qp *qp) hfi1_kern_clear_hw_flow(priv->rcd, qp); } -static bool tid_rdma_tid_err(struct hfi1_ctxtdata *rcd, - struct hfi1_packet *packet, u8 rcv_type, - u8 opcode) +static bool tid_rdma_tid_err(struct hfi1_packet *packet, u8 rcv_type) { struct rvt_qp *qp = packet->qp; - struct hfi1_qp_priv *qpriv = qp->priv; - u32 ipsn; - struct ib_other_headers *ohdr = packet->ohdr; - struct rvt_ack_entry *e; - struct tid_rdma_request *req; - struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device); - u32 i; if (rcv_type >= RHF_RCV_TYPE_IB) goto done; @@ -2602,41 +2593,9 @@ static bool tid_rdma_tid_err(struct hfi1_ctxtdata *rcd, if (rcv_type == RHF_RCV_TYPE_EAGER) { hfi1_restart_rc(qp, qp->s_last_psn + 1, 1); hfi1_schedule_send(qp); - goto done_unlock; - } - - /* - * For TID READ response, error out QP after freeing the tid - * resources. - */ - if (opcode == TID_OP(READ_RESP)) { - ipsn = mask_psn(be32_to_cpu(ohdr->u.tid_rdma.r_rsp.verbs_psn)); - if (cmp_psn(ipsn, qp->s_last_psn) > 0 && - cmp_psn(ipsn, qp->s_psn) < 0) { - hfi1_kern_read_tid_flow_free(qp); - spin_unlock(&qp->s_lock); - rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR); - goto done; - } - goto done_unlock; - } - - /* - * Error out the qp for TID RDMA WRITE - */ - hfi1_kern_clear_hw_flow(qpriv->rcd, qp); - for (i = 0; i < rvt_max_atomic(rdi); i++) { - e = &qp->s_ack_queue[i]; - if (e->opcode == TID_OP(WRITE_REQ)) { - req = ack_to_tid_req(e); - hfi1_kern_exp_rcv_clear_all(req); - } } - spin_unlock(&qp->s_lock); - rvt_rc_error(qp, IB_WC_LOC_LEN_ERR); - goto done; -done_unlock: + /* Since no payload is delivered, just drop the packet */ spin_unlock(&qp->s_lock); done: return true; @@ -2687,12 +2646,12 @@ static bool handle_read_kdeth_eflags(struct hfi1_ctxtdata *rcd, u32 fpsn; lockdep_assert_held(&qp->r_lock); + spin_lock(&qp->s_lock); /* If the psn is out of valid range, drop the packet */ if (cmp_psn(ibpsn, qp->s_last_psn) < 0 || cmp_psn(ibpsn, qp->s_psn) > 0) - return ret; + goto s_unlock; - spin_lock(&qp->s_lock); /* * Note that NAKs implicitly ACK outstanding SEND and RDMA write * requests and implicitly NAK RDMA read and atomic requests issued @@ -2740,9 +2699,12 @@ static bool handle_read_kdeth_eflags(struct hfi1_ctxtdata *rcd, wqe = do_rc_completion(qp, wqe, ibp); if (qp->s_acked == qp->s_tail) - break; + goto s_unlock; } + if (qp->s_acked == qp->s_tail) + goto s_unlock; + /* Handle the eflags for the request */ if (wqe->wr.opcode != IB_WR_TID_RDMA_READ) goto s_unlock; @@ -2922,7 +2884,7 @@ bool hfi1_handle_kdeth_eflags(struct hfi1_ctxtdata *rcd, if (lnh == HFI1_LRH_GRH) goto r_unlock; - if (tid_rdma_tid_err(rcd, packet, rcv_type, opcode)) + if (tid_rdma_tid_err(packet, rcv_type)) goto r_unlock; } @@ -2942,8 +2904,15 @@ bool hfi1_handle_kdeth_eflags(struct hfi1_ctxtdata *rcd, */ spin_lock(&qp->s_lock); qpriv = qp->priv; + if (qpriv->r_tid_tail == HFI1_QP_WQE_INVALID || + qpriv->r_tid_tail == qpriv->r_tid_head) + goto unlock; e = &qp->s_ack_queue[qpriv->r_tid_tail]; + if (e->opcode != TID_OP(WRITE_REQ)) + goto unlock; req = ack_to_tid_req(e); + if (req->comp_seg == req->cur_seg) + goto unlock; flow = &req->flows[req->clear_tail]; trace_hfi1_eflags_err_write(qp, rcv_type, rte, psn); trace_hfi1_rsp_handle_kdeth_eflags(qp, psn); @@ -4509,7 +4478,7 @@ void hfi1_rc_rcv_tid_rdma_ack(struct hfi1_packet *packet) struct rvt_swqe *wqe; struct tid_rdma_request *req; struct tid_rdma_flow *flow; - u32 aeth, psn, req_psn, ack_psn, resync_psn, ack_kpsn; + u32 aeth, psn, req_psn, ack_psn, flpsn, resync_psn, ack_kpsn; unsigned long flags; u16 fidx; @@ -4538,6 +4507,9 @@ void hfi1_rc_rcv_tid_rdma_ack(struct hfi1_packet *packet) ack_kpsn--; } + if (unlikely(qp->s_acked == qp->s_tail)) + goto ack_op_err; + wqe = rvt_get_swqe_ptr(qp, qp->s_acked); if (wqe->wr.opcode != IB_WR_TID_RDMA_WRITE) @@ -4550,7 +4522,8 @@ void hfi1_rc_rcv_tid_rdma_ack(struct hfi1_packet *packet) trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail, flow); /* Drop stale ACK/NAK */ - if (cmp_psn(psn, full_flow_psn(flow, flow->flow_state.spsn)) < 0) + if (cmp_psn(psn, full_flow_psn(flow, flow->flow_state.spsn)) < 0 || + cmp_psn(req_psn, flow->flow_state.resp_ib_psn) < 0) goto ack_op_err; while (cmp_psn(ack_kpsn, @@ -4712,7 +4685,12 @@ done: switch ((aeth >> IB_AETH_CREDIT_SHIFT) & IB_AETH_CREDIT_MASK) { case 0: /* PSN sequence error */ + if (!req->flows) + break; flow = &req->flows[req->acked_tail]; + flpsn = full_flow_psn(flow, flow->flow_state.lpsn); + if (cmp_psn(psn, flpsn) > 0) + break; trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail, flow); req->r_ack_psn = mask_psn(be32_to_cpu(ohdr->bth[2])); diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c index 68c951491a08..57079110af9b 100644 --- a/drivers/infiniband/hw/mlx4/mad.c +++ b/drivers/infiniband/hw/mlx4/mad.c @@ -1677,8 +1677,6 @@ tx_err: tx_buf_size, DMA_TO_DEVICE); kfree(tun_qp->tx_ring[i].buf.addr); } - kfree(tun_qp->tx_ring); - tun_qp->tx_ring = NULL; i = MLX4_NUM_TUNNEL_BUFS; err: while (i > 0) { @@ -1687,6 +1685,8 @@ err: rx_buf_size, DMA_FROM_DEVICE); kfree(tun_qp->ring[i].addr); } + kfree(tun_qp->tx_ring); + tun_qp->tx_ring = NULL; kfree(tun_qp->ring); tun_qp->ring = NULL; return -ENOMEM; diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index e12a4404096b..0569bcab02d4 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -1023,7 +1023,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, props->timestamp_mask = 0x7FFFFFFFFFFFFFFFULL; if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) { - if (MLX5_CAP_GEN(mdev, pg)) + if (dev->odp_caps.general_caps & IB_ODP_SUPPORT) props->device_cap_flags |= IB_DEVICE_ON_DEMAND_PAGING; props->odp_caps = dev->odp_caps; } @@ -6139,6 +6139,8 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev) dev->port[i].roce.last_port_state = IB_PORT_DOWN; } + mlx5_ib_internal_fill_odp_caps(dev); + err = mlx5_ib_init_multiport_master(dev); if (err) return err; @@ -6563,8 +6565,6 @@ static void mlx5_ib_stage_dev_res_cleanup(struct mlx5_ib_dev *dev) static int mlx5_ib_stage_odp_init(struct mlx5_ib_dev *dev) { - mlx5_ib_internal_fill_odp_caps(dev); - return mlx5_ib_odp_init_one(dev); } diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c index fe1a76d8531c..a40e0abf2338 100644 --- a/drivers/infiniband/hw/mlx5/mem.c +++ b/drivers/infiniband/hw/mlx5/mem.c @@ -57,9 +57,10 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int entry; if (umem->is_odp) { - unsigned int page_shift = to_ib_umem_odp(umem)->page_shift; + struct ib_umem_odp *odp = to_ib_umem_odp(umem); + unsigned int page_shift = odp->page_shift; - *ncont = ib_umem_page_count(umem); + *ncont = ib_umem_odp_num_pages(odp); *count = *ncont << (page_shift - PAGE_SHIFT); *shift = page_shift; if (order) diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index f6a53455bf8b..9ae587b74b12 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -1475,4 +1475,18 @@ int bfregn_to_uar_index(struct mlx5_ib_dev *dev, bool dyn_bfreg); int mlx5_ib_qp_set_counter(struct ib_qp *qp, struct rdma_counter *counter); + +static inline bool mlx5_ib_can_use_umr(struct mlx5_ib_dev *dev, + bool do_modify_atomic) +{ + if (MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled)) + return false; + + if (do_modify_atomic && + MLX5_CAP_GEN(dev->mdev, atomic) && + MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled)) + return false; + + return true; +} #endif /* MLX5_IB_H */ diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index b74fad08412f..3401f5f6792e 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -1293,9 +1293,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, if (err < 0) return ERR_PTR(err); - use_umr = !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled) && - (!MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled) || - !MLX5_CAP_GEN(dev->mdev, atomic)); + use_umr = mlx5_ib_can_use_umr(dev, true); if (order <= mr_cache_max_order(dev) && use_umr) { mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont, @@ -1448,7 +1446,8 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, goto err; } - if (flags & IB_MR_REREG_TRANS && !use_umr_mtt_update(mr, addr, len)) { + if (!mlx5_ib_can_use_umr(dev, true) || + (flags & IB_MR_REREG_TRANS && !use_umr_mtt_update(mr, addr, len))) { /* * UMR can't be used - MKey needs to be replaced. */ diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c index 1d257d1b3b0d..0a59912a4cef 100644 --- a/drivers/infiniband/hw/mlx5/odp.c +++ b/drivers/infiniband/hw/mlx5/odp.c @@ -301,7 +301,8 @@ void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev) memset(caps, 0, sizeof(*caps)); - if (!MLX5_CAP_GEN(dev->mdev, pg)) + if (!MLX5_CAP_GEN(dev->mdev, pg) || + !mlx5_ib_can_use_umr(dev, true)) return; caps->general_caps = IB_ODP_SUPPORT; @@ -355,7 +356,8 @@ void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev) if (MLX5_CAP_GEN(dev->mdev, fixed_buffer_size) && MLX5_CAP_GEN(dev->mdev, null_mkey) && - MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) + MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset) && + !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled)) caps->general_caps |= IB_ODP_SUPPORT_IMPLICIT; return; @@ -1622,8 +1624,10 @@ int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev) { int ret = 0; - if (dev->odp_caps.general_caps & IB_ODP_SUPPORT) - ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_odp_ops); + if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT)) + return ret; + + ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_odp_ops); if (dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT) { ret = mlx5_cmd_null_mkey(dev->mdev, &dev->null_mkey); @@ -1633,9 +1637,6 @@ int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev) } } - if (!MLX5_CAP_GEN(dev->mdev, pg)) - return ret; - ret = mlx5_ib_create_pf_eq(dev, &dev->odp_pf_eq); return ret; @@ -1643,7 +1644,7 @@ int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev) void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *dev) { - if (!MLX5_CAP_GEN(dev->mdev, pg)) + if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT)) return; mlx5_ib_destroy_pf_eq(dev, &dev->odp_pf_eq); diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 379328b2598f..72869ff4a334 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -4162,7 +4162,7 @@ static u64 get_xlt_octo(u64 bytes) MLX5_IB_UMR_OCTOWORD; } -static __be64 frwr_mkey_mask(void) +static __be64 frwr_mkey_mask(bool atomic) { u64 result; @@ -4175,10 +4175,12 @@ static __be64 frwr_mkey_mask(void) MLX5_MKEY_MASK_LW | MLX5_MKEY_MASK_RR | MLX5_MKEY_MASK_RW | - MLX5_MKEY_MASK_A | MLX5_MKEY_MASK_SMALL_FENCE | MLX5_MKEY_MASK_FREE; + if (atomic) + result |= MLX5_MKEY_MASK_A; + return cpu_to_be64(result); } @@ -4204,7 +4206,7 @@ static __be64 sig_mkey_mask(void) } static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr, - struct mlx5_ib_mr *mr, u8 flags) + struct mlx5_ib_mr *mr, u8 flags, bool atomic) { int size = (mr->ndescs + mr->meta_ndescs) * mr->desc_size; @@ -4212,7 +4214,7 @@ static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr, umr->flags = flags; umr->xlt_octowords = cpu_to_be16(get_xlt_octo(size)); - umr->mkey_mask = frwr_mkey_mask(); + umr->mkey_mask = frwr_mkey_mask(atomic); } static void set_linv_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr) @@ -4811,10 +4813,22 @@ static int set_reg_wr(struct mlx5_ib_qp *qp, { struct mlx5_ib_mr *mr = to_mmr(wr->mr); struct mlx5_ib_pd *pd = to_mpd(qp->ibqp.pd); + struct mlx5_ib_dev *dev = to_mdev(pd->ibpd.device); int mr_list_size = (mr->ndescs + mr->meta_ndescs) * mr->desc_size; bool umr_inline = mr_list_size <= MLX5_IB_SQ_UMR_INLINE_THRESHOLD; + bool atomic = wr->access & IB_ACCESS_REMOTE_ATOMIC; u8 flags = 0; + if (!mlx5_ib_can_use_umr(dev, atomic)) { + mlx5_ib_warn(to_mdev(qp->ibqp.device), + "Fast update of %s for MR is disabled\n", + (MLX5_CAP_GEN(dev->mdev, + umr_modify_entity_size_disabled)) ? + "entity size" : + "atomic access"); + return -EINVAL; + } + if (unlikely(wr->wr.send_flags & IB_SEND_INLINE)) { mlx5_ib_warn(to_mdev(qp->ibqp.device), "Invalid IB_SEND_INLINE send flag\n"); @@ -4826,7 +4840,7 @@ static int set_reg_wr(struct mlx5_ib_qp *qp, if (umr_inline) flags |= MLX5_UMR_INLINE; - set_reg_umr_seg(*seg, mr, flags); + set_reg_umr_seg(*seg, mr, flags, atomic); *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg); *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16; handle_post_send_edge(&qp->sq, seg, *size, cur_edge); diff --git a/drivers/infiniband/sw/siw/siw.h b/drivers/infiniband/sw/siw/siw.h index 77b1aabf6ff3..dba4535494ab 100644 --- a/drivers/infiniband/sw/siw/siw.h +++ b/drivers/infiniband/sw/siw/siw.h @@ -138,9 +138,9 @@ struct siw_umem { }; struct siw_pble { - u64 addr; /* Address of assigned user buffer */ - u64 size; /* Size of this entry */ - u64 pbl_off; /* Total offset from start of PBL */ + dma_addr_t addr; /* Address of assigned buffer */ + unsigned int size; /* Size of this entry */ + unsigned long pbl_off; /* Total offset from start of PBL */ }; struct siw_pbl { @@ -734,7 +734,7 @@ static inline void siw_crc_skb(struct siw_rx_stream *srx, unsigned int len) "MEM[0x%08x] %s: " fmt, mem->stag, __func__, ##__VA_ARGS__) #define siw_dbg_cep(cep, fmt, ...) \ - ibdev_dbg(&cep->sdev->base_dev, "CEP[0x%p] %s: " fmt, \ + ibdev_dbg(&cep->sdev->base_dev, "CEP[0x%pK] %s: " fmt, \ cep, __func__, ##__VA_ARGS__) void siw_cq_flush(struct siw_cq *cq); diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c index 9ce8a1b925d2..1db5ad3d9580 100644 --- a/drivers/infiniband/sw/siw/siw_cm.c +++ b/drivers/infiniband/sw/siw/siw_cm.c @@ -355,8 +355,8 @@ static int siw_cm_upcall(struct siw_cep *cep, enum iw_cm_event_type reason, getname_local(cep->sock, &event.local_addr); getname_peer(cep->sock, &event.remote_addr); } - siw_dbg_cep(cep, "[QP %u]: id 0x%p, reason=%d, status=%d\n", - cep->qp ? qp_id(cep->qp) : -1, id, reason, status); + siw_dbg_cep(cep, "[QP %u]: reason=%d, status=%d\n", + cep->qp ? qp_id(cep->qp) : UINT_MAX, reason, status); return id->event_handler(id, &event); } @@ -947,8 +947,6 @@ static void siw_accept_newconn(struct siw_cep *cep) siw_cep_get(new_cep); new_s->sk->sk_user_data = new_cep; - siw_dbg_cep(cep, "listen socket 0x%p, new 0x%p\n", s, new_s); - if (siw_tcp_nagle == false) { int val = 1; @@ -1011,7 +1009,8 @@ static void siw_cm_work_handler(struct work_struct *w) cep = work->cep; siw_dbg_cep(cep, "[QP %u]: work type: %d, state %d\n", - cep->qp ? qp_id(cep->qp) : -1, work->type, cep->state); + cep->qp ? qp_id(cep->qp) : UINT_MAX, + work->type, cep->state); siw_cep_set_inuse(cep); @@ -1145,9 +1144,9 @@ static void siw_cm_work_handler(struct work_struct *w) } if (release_cep) { siw_dbg_cep(cep, - "release: timer=%s, QP[%u], id 0x%p\n", + "release: timer=%s, QP[%u]\n", cep->mpa_timer ? "y" : "n", - cep->qp ? qp_id(cep->qp) : -1, cep->cm_id); + cep->qp ? qp_id(cep->qp) : UINT_MAX); siw_cancel_mpatimer(cep); @@ -1211,8 +1210,8 @@ int siw_cm_queue_work(struct siw_cep *cep, enum siw_work_type type) else delay = MPAREP_TIMEOUT; } - siw_dbg_cep(cep, "[QP %u]: work type: %d, work 0x%p, timeout %lu\n", - cep->qp ? qp_id(cep->qp) : -1, type, work, delay); + siw_dbg_cep(cep, "[QP %u]: work type: %d, timeout %lu\n", + cep->qp ? qp_id(cep->qp) : -1, type, delay); queue_delayed_work(siw_cm_wq, &work->work, delay); @@ -1376,16 +1375,16 @@ int siw_connect(struct iw_cm_id *id, struct iw_cm_conn_param *params) } if (v4) siw_dbg_qp(qp, - "id 0x%p, pd_len %d, laddr %pI4 %d, raddr %pI4 %d\n", - id, pd_len, + "pd_len %d, laddr %pI4 %d, raddr %pI4 %d\n", + pd_len, &((struct sockaddr_in *)(laddr))->sin_addr, ntohs(((struct sockaddr_in *)(laddr))->sin_port), &((struct sockaddr_in *)(raddr))->sin_addr, ntohs(((struct sockaddr_in *)(raddr))->sin_port)); else siw_dbg_qp(qp, - "id 0x%p, pd_len %d, laddr %pI6 %d, raddr %pI6 %d\n", - id, pd_len, + "pd_len %d, laddr %pI6 %d, raddr %pI6 %d\n", + pd_len, &((struct sockaddr_in6 *)(laddr))->sin6_addr, ntohs(((struct sockaddr_in6 *)(laddr))->sin6_port), &((struct sockaddr_in6 *)(raddr))->sin6_addr, @@ -1508,14 +1507,13 @@ int siw_connect(struct iw_cm_id *id, struct iw_cm_conn_param *params) if (rv >= 0) { rv = siw_cm_queue_work(cep, SIW_CM_WORK_MPATIMEOUT); if (!rv) { - siw_dbg_cep(cep, "id 0x%p, [QP %u]: exit\n", id, - qp_id(qp)); + siw_dbg_cep(cep, "[QP %u]: exit\n", qp_id(qp)); siw_cep_set_free(cep); return 0; } } error: - siw_dbg_qp(qp, "failed: %d\n", rv); + siw_dbg(id->device, "failed: %d\n", rv); if (cep) { siw_socket_disassoc(s); @@ -1540,7 +1538,8 @@ error: } else if (s) { sock_release(s); } - siw_qp_put(qp); + if (qp) + siw_qp_put(qp); return rv; } @@ -1580,7 +1579,7 @@ int siw_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params) siw_cancel_mpatimer(cep); if (cep->state != SIW_EPSTATE_RECVD_MPAREQ) { - siw_dbg_cep(cep, "id 0x%p: out of state\n", id); + siw_dbg_cep(cep, "out of state\n"); siw_cep_set_free(cep); siw_cep_put(cep); @@ -1601,7 +1600,7 @@ int siw_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params) up_write(&qp->state_lock); goto error; } - siw_dbg_cep(cep, "id 0x%p\n", id); + siw_dbg_cep(cep, "[QP %d]\n", params->qpn); if (try_gso && cep->mpa.hdr.params.bits & MPA_RR_FLAG_GSO_EXP) { siw_dbg_cep(cep, "peer allows GSO on TX\n"); @@ -1611,8 +1610,8 @@ int siw_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params) params->ird > sdev->attrs.max_ird) { siw_dbg_cep( cep, - "id 0x%p, [QP %u]: ord %d (max %d), ird %d (max %d)\n", - id, qp_id(qp), params->ord, sdev->attrs.max_ord, + "[QP %u]: ord %d (max %d), ird %d (max %d)\n", + qp_id(qp), params->ord, sdev->attrs.max_ord, params->ird, sdev->attrs.max_ird); rv = -EINVAL; up_write(&qp->state_lock); @@ -1624,8 +1623,8 @@ int siw_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params) if (params->private_data_len > max_priv_data) { siw_dbg_cep( cep, - "id 0x%p, [QP %u]: private data length: %d (max %d)\n", - id, qp_id(qp), params->private_data_len, max_priv_data); + "[QP %u]: private data length: %d (max %d)\n", + qp_id(qp), params->private_data_len, max_priv_data); rv = -EINVAL; up_write(&qp->state_lock); goto error; @@ -1679,7 +1678,7 @@ int siw_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params) qp_attrs.flags = SIW_MPA_CRC; qp_attrs.state = SIW_QP_STATE_RTS; - siw_dbg_cep(cep, "id 0x%p, [QP%u]: moving to rts\n", id, qp_id(qp)); + siw_dbg_cep(cep, "[QP%u]: moving to rts\n", qp_id(qp)); /* Associate QP with CEP */ siw_cep_get(cep); @@ -1700,8 +1699,8 @@ int siw_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params) if (rv) goto error; - siw_dbg_cep(cep, "id 0x%p, [QP %u]: send mpa reply, %d byte pdata\n", - id, qp_id(qp), params->private_data_len); + siw_dbg_cep(cep, "[QP %u]: send mpa reply, %d byte pdata\n", + qp_id(qp), params->private_data_len); rv = siw_send_mpareqrep(cep, params->private_data, params->private_data_len); @@ -1759,14 +1758,14 @@ int siw_reject(struct iw_cm_id *id, const void *pdata, u8 pd_len) siw_cancel_mpatimer(cep); if (cep->state != SIW_EPSTATE_RECVD_MPAREQ) { - siw_dbg_cep(cep, "id 0x%p: out of state\n", id); + siw_dbg_cep(cep, "out of state\n"); siw_cep_set_free(cep); siw_cep_put(cep); /* put last reference */ return -ECONNRESET; } - siw_dbg_cep(cep, "id 0x%p, cep->state %d, pd_len %d\n", id, cep->state, + siw_dbg_cep(cep, "cep->state %d, pd_len %d\n", cep->state, pd_len); if (__mpa_rr_revision(cep->mpa.hdr.params.bits) >= MPA_REVISION_1) { @@ -1804,14 +1803,14 @@ static int siw_listen_address(struct iw_cm_id *id, int backlog, rv = kernel_setsockopt(s, SOL_SOCKET, SO_REUSEADDR, (char *)&s_val, sizeof(s_val)); if (rv) { - siw_dbg(id->device, "id 0x%p: setsockopt error: %d\n", id, rv); + siw_dbg(id->device, "setsockopt error: %d\n", rv); goto error; } rv = s->ops->bind(s, laddr, addr_family == AF_INET ? sizeof(struct sockaddr_in) : sizeof(struct sockaddr_in6)); if (rv) { - siw_dbg(id->device, "id 0x%p: socket bind error: %d\n", id, rv); + siw_dbg(id->device, "socket bind error: %d\n", rv); goto error; } cep = siw_cep_alloc(sdev); @@ -1824,13 +1823,13 @@ static int siw_listen_address(struct iw_cm_id *id, int backlog, rv = siw_cm_alloc_work(cep, backlog); if (rv) { siw_dbg(id->device, - "id 0x%p: alloc_work error %d, backlog %d\n", id, + "alloc_work error %d, backlog %d\n", rv, backlog); goto error; } rv = s->ops->listen(s, backlog); if (rv) { - siw_dbg(id->device, "id 0x%p: listen error %d\n", id, rv); + siw_dbg(id->device, "listen error %d\n", rv); goto error; } cep->cm_id = id; @@ -1914,8 +1913,7 @@ static void siw_drop_listeners(struct iw_cm_id *id) list_del(p); - siw_dbg_cep(cep, "id 0x%p: drop cep, state %d\n", id, - cep->state); + siw_dbg_cep(cep, "drop cep, state %d\n", cep->state); siw_cep_set_inuse(cep); @@ -1952,7 +1950,7 @@ int siw_create_listen(struct iw_cm_id *id, int backlog) struct net_device *dev = to_siw_dev(id->device)->netdev; int rv = 0, listeners = 0; - siw_dbg(id->device, "id 0x%p: backlog %d\n", id, backlog); + siw_dbg(id->device, "backlog %d\n", backlog); /* * For each attached address of the interface, create a @@ -1968,8 +1966,8 @@ int siw_create_listen(struct iw_cm_id *id, int backlog) s_raddr = (struct sockaddr_in *)&id->remote_addr; siw_dbg(id->device, - "id 0x%p: laddr %pI4:%d, raddr %pI4:%d\n", - id, &s_laddr.sin_addr, ntohs(s_laddr.sin_port), + "laddr %pI4:%d, raddr %pI4:%d\n", + &s_laddr.sin_addr, ntohs(s_laddr.sin_port), &s_raddr->sin_addr, ntohs(s_raddr->sin_port)); rtnl_lock(); @@ -1994,8 +1992,8 @@ int siw_create_listen(struct iw_cm_id *id, int backlog) *s_raddr = &to_sockaddr_in6(id->remote_addr); siw_dbg(id->device, - "id 0x%p: laddr %pI6:%d, raddr %pI6:%d\n", - id, &s_laddr->sin6_addr, ntohs(s_laddr->sin6_port), + "laddr %pI6:%d, raddr %pI6:%d\n", + &s_laddr->sin6_addr, ntohs(s_laddr->sin6_port), &s_raddr->sin6_addr, ntohs(s_raddr->sin6_port)); read_lock_bh(&in6_dev->lock); @@ -2028,17 +2026,15 @@ int siw_create_listen(struct iw_cm_id *id, int backlog) else if (!rv) rv = -EINVAL; - siw_dbg(id->device, "id 0x%p: %s\n", id, rv ? "FAIL" : "OK"); + siw_dbg(id->device, "%s\n", rv ? "FAIL" : "OK"); return rv; } int siw_destroy_listen(struct iw_cm_id *id) { - siw_dbg(id->device, "id 0x%p\n", id); - if (!id->provider_data) { - siw_dbg(id->device, "id 0x%p: no cep(s)\n", id); + siw_dbg(id->device, "no cep(s)\n"); return 0; } siw_drop_listeners(id); diff --git a/drivers/infiniband/sw/siw/siw_cq.c b/drivers/infiniband/sw/siw/siw_cq.c index e381ae9b7d62..d8db3bee9da7 100644 --- a/drivers/infiniband/sw/siw/siw_cq.c +++ b/drivers/infiniband/sw/siw/siw_cq.c @@ -71,9 +71,10 @@ int siw_reap_cqe(struct siw_cq *cq, struct ib_wc *wc) wc->wc_flags = IB_WC_WITH_INVALIDATE; } wc->qp = cqe->base_qp; - siw_dbg_cq(cq, "idx %u, type %d, flags %2x, id 0x%p\n", + siw_dbg_cq(cq, + "idx %u, type %d, flags %2x, id 0x%pK\n", cq->cq_get % cq->num_cqe, cqe->opcode, - cqe->flags, (void *)cqe->id); + cqe->flags, (void *)(uintptr_t)cqe->id); } WRITE_ONCE(cqe->flags, 0); cq->cq_get++; diff --git a/drivers/infiniband/sw/siw/siw_mem.c b/drivers/infiniband/sw/siw/siw_mem.c index 67171c82b0c4..87a56039f0ef 100644 --- a/drivers/infiniband/sw/siw/siw_mem.c +++ b/drivers/infiniband/sw/siw/siw_mem.c @@ -197,12 +197,12 @@ int siw_check_mem(struct ib_pd *pd, struct siw_mem *mem, u64 addr, */ if (addr < mem->va || addr + len > mem->va + mem->len) { siw_dbg_pd(pd, "MEM interval len %d\n", len); - siw_dbg_pd(pd, "[0x%016llx, 0x%016llx] out of bounds\n", - (unsigned long long)addr, - (unsigned long long)(addr + len)); - siw_dbg_pd(pd, "[0x%016llx, 0x%016llx] STag=0x%08x\n", - (unsigned long long)mem->va, - (unsigned long long)(mem->va + mem->len), + siw_dbg_pd(pd, "[0x%pK, 0x%pK] out of bounds\n", + (void *)(uintptr_t)addr, + (void *)(uintptr_t)(addr + len)); + siw_dbg_pd(pd, "[0x%pK, 0x%pK] STag=0x%08x\n", + (void *)(uintptr_t)mem->va, + (void *)(uintptr_t)(mem->va + mem->len), mem->stag); return -E_BASE_BOUNDS; @@ -330,7 +330,7 @@ out: * Optionally, provides remaining len within current element, and * current PBL index for later resume at same element. */ -u64 siw_pbl_get_buffer(struct siw_pbl *pbl, u64 off, int *len, int *idx) +dma_addr_t siw_pbl_get_buffer(struct siw_pbl *pbl, u64 off, int *len, int *idx) { int i = idx ? *idx : 0; diff --git a/drivers/infiniband/sw/siw/siw_mem.h b/drivers/infiniband/sw/siw/siw_mem.h index f43daf280891..db138c8423da 100644 --- a/drivers/infiniband/sw/siw/siw_mem.h +++ b/drivers/infiniband/sw/siw/siw_mem.h @@ -9,7 +9,7 @@ struct siw_umem *siw_umem_get(u64 start, u64 len, bool writable); void siw_umem_release(struct siw_umem *umem, bool dirty); struct siw_pbl *siw_pbl_alloc(u32 num_buf); -u64 siw_pbl_get_buffer(struct siw_pbl *pbl, u64 off, int *len, int *idx); +dma_addr_t siw_pbl_get_buffer(struct siw_pbl *pbl, u64 off, int *len, int *idx); struct siw_mem *siw_mem_id2obj(struct siw_device *sdev, int stag_index); int siw_mem_add(struct siw_device *sdev, struct siw_mem *m); int siw_invalidate_stag(struct ib_pd *pd, u32 stag); diff --git a/drivers/infiniband/sw/siw/siw_qp.c b/drivers/infiniband/sw/siw/siw_qp.c index 0990307c5d2c..430314c8abd9 100644 --- a/drivers/infiniband/sw/siw/siw_qp.c +++ b/drivers/infiniband/sw/siw/siw_qp.c @@ -949,7 +949,7 @@ skip_irq: rv = -EINVAL; goto out; } - wqe->sqe.sge[0].laddr = (u64)&wqe->sqe.sge[1]; + wqe->sqe.sge[0].laddr = (uintptr_t)&wqe->sqe.sge[1]; wqe->sqe.sge[0].lkey = 0; wqe->sqe.num_sge = 1; } diff --git a/drivers/infiniband/sw/siw/siw_qp_rx.c b/drivers/infiniband/sw/siw/siw_qp_rx.c index f87657a11657..c0a887240325 100644 --- a/drivers/infiniband/sw/siw/siw_qp_rx.c +++ b/drivers/infiniband/sw/siw/siw_qp_rx.c @@ -38,9 +38,10 @@ static int siw_rx_umem(struct siw_rx_stream *srx, struct siw_umem *umem, p = siw_get_upage(umem, dest_addr); if (unlikely(!p)) { - pr_warn("siw: %s: [QP %u]: bogus addr: %p, %p\n", + pr_warn("siw: %s: [QP %u]: bogus addr: %pK, %pK\n", __func__, qp_id(rx_qp(srx)), - (void *)dest_addr, (void *)umem->fp_addr); + (void *)(uintptr_t)dest_addr, + (void *)(uintptr_t)umem->fp_addr); /* siw internal error */ srx->skb_copied += copied; srx->skb_new -= copied; @@ -50,7 +51,7 @@ static int siw_rx_umem(struct siw_rx_stream *srx, struct siw_umem *umem, pg_off = dest_addr & ~PAGE_MASK; bytes = min(len, (int)PAGE_SIZE - pg_off); - siw_dbg_qp(rx_qp(srx), "page %p, bytes=%u\n", p, bytes); + siw_dbg_qp(rx_qp(srx), "page %pK, bytes=%u\n", p, bytes); dest = kmap_atomic(p); rv = skb_copy_bits(srx->skb, srx->skb_offset, dest + pg_off, @@ -104,11 +105,11 @@ static int siw_rx_kva(struct siw_rx_stream *srx, void *kva, int len) { int rv; - siw_dbg_qp(rx_qp(srx), "kva: 0x%p, len: %u\n", kva, len); + siw_dbg_qp(rx_qp(srx), "kva: 0x%pK, len: %u\n", kva, len); rv = skb_copy_bits(srx->skb, srx->skb_offset, kva, len); if (unlikely(rv)) { - pr_warn("siw: [QP %u]: %s, len %d, kva 0x%p, rv %d\n", + pr_warn("siw: [QP %u]: %s, len %d, kva 0x%pK, rv %d\n", qp_id(rx_qp(srx)), __func__, len, kva, rv); return rv; @@ -132,7 +133,7 @@ static int siw_rx_pbl(struct siw_rx_stream *srx, int *pbl_idx, while (len) { int bytes; - u64 buf_addr = + dma_addr_t buf_addr = siw_pbl_get_buffer(pbl, offset, &bytes, pbl_idx); if (!buf_addr) break; @@ -485,8 +486,8 @@ int siw_proc_send(struct siw_qp *qp) mem_p = *mem; if (mem_p->mem_obj == NULL) rv = siw_rx_kva(srx, - (void *)(sge->laddr + frx->sge_off), - sge_bytes); + (void *)(uintptr_t)(sge->laddr + frx->sge_off), + sge_bytes); else if (!mem_p->is_pbl) rv = siw_rx_umem(srx, mem_p->umem, sge->laddr + frx->sge_off, sge_bytes); @@ -598,8 +599,8 @@ int siw_proc_write(struct siw_qp *qp) if (mem->mem_obj == NULL) rv = siw_rx_kva(srx, - (void *)(srx->ddp_to + srx->fpdu_part_rcvd), - bytes); + (void *)(uintptr_t)(srx->ddp_to + srx->fpdu_part_rcvd), + bytes); else if (!mem->is_pbl) rv = siw_rx_umem(srx, mem->umem, srx->ddp_to + srx->fpdu_part_rcvd, bytes); @@ -841,8 +842,9 @@ int siw_proc_rresp(struct siw_qp *qp) bytes = min(srx->fpdu_part_rem, srx->skb_new); if (mem_p->mem_obj == NULL) - rv = siw_rx_kva(srx, (void *)(sge->laddr + wqe->processed), - bytes); + rv = siw_rx_kva(srx, + (void *)(uintptr_t)(sge->laddr + wqe->processed), + bytes); else if (!mem_p->is_pbl) rv = siw_rx_umem(srx, mem_p->umem, sge->laddr + wqe->processed, bytes); diff --git a/drivers/infiniband/sw/siw/siw_qp_tx.c b/drivers/infiniband/sw/siw/siw_qp_tx.c index 43020d2040fc..438a2917a47c 100644 --- a/drivers/infiniband/sw/siw/siw_qp_tx.c +++ b/drivers/infiniband/sw/siw/siw_qp_tx.c @@ -26,7 +26,7 @@ static struct page *siw_get_pblpage(struct siw_mem *mem, u64 addr, int *idx) { struct siw_pbl *pbl = mem->pbl; u64 offset = addr - mem->va; - u64 paddr = siw_pbl_get_buffer(pbl, offset, NULL, idx); + dma_addr_t paddr = siw_pbl_get_buffer(pbl, offset, NULL, idx); if (paddr) return virt_to_page(paddr); @@ -37,7 +37,7 @@ static struct page *siw_get_pblpage(struct siw_mem *mem, u64 addr, int *idx) /* * Copy short payload at provided destination payload address */ -static int siw_try_1seg(struct siw_iwarp_tx *c_tx, u64 paddr) +static int siw_try_1seg(struct siw_iwarp_tx *c_tx, void *paddr) { struct siw_wqe *wqe = &c_tx->wqe_active; struct siw_sge *sge = &wqe->sqe.sge[0]; @@ -50,16 +50,16 @@ static int siw_try_1seg(struct siw_iwarp_tx *c_tx, u64 paddr) return 0; if (tx_flags(wqe) & SIW_WQE_INLINE) { - memcpy((void *)paddr, &wqe->sqe.sge[1], bytes); + memcpy(paddr, &wqe->sqe.sge[1], bytes); } else { struct siw_mem *mem = wqe->mem[0]; if (!mem->mem_obj) { /* Kernel client using kva */ - memcpy((void *)paddr, (void *)sge->laddr, bytes); + memcpy(paddr, + (const void *)(uintptr_t)sge->laddr, bytes); } else if (c_tx->in_syscall) { - if (copy_from_user((void *)paddr, - (const void __user *)sge->laddr, + if (copy_from_user(paddr, u64_to_user_ptr(sge->laddr), bytes)) return -EFAULT; } else { @@ -79,12 +79,12 @@ static int siw_try_1seg(struct siw_iwarp_tx *c_tx, u64 paddr) buffer = kmap_atomic(p); if (likely(PAGE_SIZE - off >= bytes)) { - memcpy((void *)paddr, buffer + off, bytes); + memcpy(paddr, buffer + off, bytes); kunmap_atomic(buffer); } else { unsigned long part = bytes - (PAGE_SIZE - off); - memcpy((void *)paddr, buffer + off, part); + memcpy(paddr, buffer + off, part); kunmap_atomic(buffer); if (!mem->is_pbl) @@ -98,7 +98,7 @@ static int siw_try_1seg(struct siw_iwarp_tx *c_tx, u64 paddr) return -EFAULT; buffer = kmap_atomic(p); - memcpy((void *)(paddr + part), buffer, + memcpy(paddr + part, buffer, bytes - part); kunmap_atomic(buffer); } @@ -166,7 +166,7 @@ static int siw_qp_prepare_tx(struct siw_iwarp_tx *c_tx) c_tx->ctrl_len = sizeof(struct iwarp_send); crc = (char *)&c_tx->pkt.send_pkt.crc; - data = siw_try_1seg(c_tx, (u64)crc); + data = siw_try_1seg(c_tx, crc); break; case SIW_OP_SEND_REMOTE_INV: @@ -189,7 +189,7 @@ static int siw_qp_prepare_tx(struct siw_iwarp_tx *c_tx) c_tx->ctrl_len = sizeof(struct iwarp_send_inv); crc = (char *)&c_tx->pkt.send_pkt.crc; - data = siw_try_1seg(c_tx, (u64)crc); + data = siw_try_1seg(c_tx, crc); break; case SIW_OP_WRITE: @@ -201,7 +201,7 @@ static int siw_qp_prepare_tx(struct siw_iwarp_tx *c_tx) c_tx->ctrl_len = sizeof(struct iwarp_rdma_write); crc = (char *)&c_tx->pkt.write_pkt.crc; - data = siw_try_1seg(c_tx, (u64)crc); + data = siw_try_1seg(c_tx, crc); break; case SIW_OP_READ_RESPONSE: @@ -216,7 +216,7 @@ static int siw_qp_prepare_tx(struct siw_iwarp_tx *c_tx) c_tx->ctrl_len = sizeof(struct iwarp_rdma_rresp); crc = (char *)&c_tx->pkt.write_pkt.crc; - data = siw_try_1seg(c_tx, (u64)crc); + data = siw_try_1seg(c_tx, crc); break; default: @@ -398,15 +398,13 @@ static int siw_0copy_tx(struct socket *s, struct page **page, #define MAX_TRAILER (MPA_CRC_SIZE + 4) -static void siw_unmap_pages(struct page **pages, int hdr_len, int num_maps) +static void siw_unmap_pages(struct page **pp, unsigned long kmap_mask) { - if (hdr_len) { - ++pages; - --num_maps; - } - while (num_maps-- > 0) { - kunmap(*pages); - pages++; + while (kmap_mask) { + if (kmap_mask & BIT(0)) + kunmap(*pp); + pp++; + kmap_mask >>= 1; } } @@ -437,6 +435,7 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s) unsigned int data_len = c_tx->bytes_unsent, hdr_len = 0, trl_len = 0, sge_off = c_tx->sge_off, sge_idx = c_tx->sge_idx, pbl_idx = c_tx->pbl_idx; + unsigned long kmap_mask = 0L; if (c_tx->state == SIW_SEND_HDR) { if (c_tx->use_sendpage) { @@ -463,8 +462,7 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s) if (!(tx_flags(wqe) & SIW_WQE_INLINE)) { mem = wqe->mem[sge_idx]; - if (!mem->mem_obj) - is_kva = 1; + is_kva = mem->mem_obj == NULL ? 1 : 0; } else { is_kva = 1; } @@ -473,7 +471,8 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s) * tx from kernel virtual address: either inline data * or memory region with assigned kernel buffer */ - iov[seg].iov_base = (void *)(sge->laddr + sge_off); + iov[seg].iov_base = + (void *)(uintptr_t)(sge->laddr + sge_off); iov[seg].iov_len = sge_len; if (do_crc) @@ -500,12 +499,7 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s) p = siw_get_upage(mem->umem, sge->laddr + sge_off); if (unlikely(!p)) { - if (hdr_len) - seg--; - if (!c_tx->use_sendpage && seg) { - siw_unmap_pages(page_array, - hdr_len, seg); - } + siw_unmap_pages(page_array, kmap_mask); wqe->processed -= c_tx->bytes_unsent; rv = -EFAULT; goto done_crc; @@ -515,6 +509,10 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s) if (!c_tx->use_sendpage) { iov[seg].iov_base = kmap(p) + fp_off; iov[seg].iov_len = plen; + + /* Remember for later kunmap() */ + kmap_mask |= BIT(seg); + if (do_crc) crypto_shash_update( c_tx->mpa_crc_hd, @@ -526,13 +524,13 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s) page_address(p) + fp_off, plen); } else { - u64 pa = ((sge->laddr + sge_off) & PAGE_MASK); + u64 va = sge->laddr + sge_off; - page_array[seg] = virt_to_page(pa); + page_array[seg] = virt_to_page(va & PAGE_MASK); if (do_crc) crypto_shash_update( c_tx->mpa_crc_hd, - (void *)(sge->laddr + sge_off), + (void *)(uintptr_t)va, plen); } @@ -543,10 +541,7 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s) if (++seg > (int)MAX_ARRAY) { siw_dbg_qp(tx_qp(c_tx), "to many fragments\n"); - if (!is_kva && !c_tx->use_sendpage) { - siw_unmap_pages(page_array, hdr_len, - seg - 1); - } + siw_unmap_pages(page_array, kmap_mask); wqe->processed -= c_tx->bytes_unsent; rv = -EMSGSIZE; goto done_crc; @@ -597,8 +592,7 @@ sge_done: } else { rv = kernel_sendmsg(s, &msg, iov, seg + 1, hdr_len + data_len + trl_len); - if (!is_kva) - siw_unmap_pages(page_array, hdr_len, seg); + siw_unmap_pages(page_array, kmap_mask); } if (rv < (int)hdr_len) { /* Not even complete hdr pushed or negative rv */ @@ -829,7 +823,8 @@ static int siw_qp_sq_proc_tx(struct siw_qp *qp, struct siw_wqe *wqe) rv = -EINVAL; goto tx_error; } - wqe->sqe.sge[0].laddr = (u64)&wqe->sqe.sge[1]; + wqe->sqe.sge[0].laddr = + (u64)(uintptr_t)&wqe->sqe.sge[1]; } } wqe->wr_status = SIW_WR_INPROGRESS; @@ -924,7 +919,7 @@ tx_error: static int siw_fastreg_mr(struct ib_pd *pd, struct siw_sqe *sqe) { - struct ib_mr *base_mr = (struct ib_mr *)sqe->base_mr; + struct ib_mr *base_mr = (struct ib_mr *)(uintptr_t)sqe->base_mr; struct siw_device *sdev = to_siw_dev(pd->device); struct siw_mem *mem = siw_mem_id2obj(sdev, sqe->rkey >> 8); int rv = 0; @@ -954,8 +949,7 @@ static int siw_fastreg_mr(struct ib_pd *pd, struct siw_sqe *sqe) mem->stag = sqe->rkey; mem->perms = sqe->access; - siw_dbg_mem(mem, "STag now valid, MR va: 0x%016llx -> 0x%016llx\n", - mem->va, base_mr->iova); + siw_dbg_mem(mem, "STag 0x%08x now valid\n", sqe->rkey); mem->va = base_mr->iova; mem->stag_valid = 1; out: diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c index e7f3a2379d9d..da52c90e06d4 100644 --- a/drivers/infiniband/sw/siw/siw_verbs.c +++ b/drivers/infiniband/sw/siw/siw_verbs.c @@ -424,8 +424,7 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd, */ qp->srq = to_siw_srq(attrs->srq); qp->attrs.rq_size = 0; - siw_dbg(base_dev, "QP [%u]: [SRQ 0x%p] attached\n", - qp->qp_num, qp->srq); + siw_dbg(base_dev, "QP [%u]: SRQ attached\n", qp->qp_num); } else if (num_rqe) { if (qp->kernel_verbs) qp->recvq = vzalloc(num_rqe * sizeof(struct siw_rqe)); @@ -610,7 +609,7 @@ int siw_destroy_qp(struct ib_qp *base_qp, struct ib_udata *udata) base_ucontext); struct siw_qp_attrs qp_attrs; - siw_dbg_qp(qp, "state %d, cep 0x%p\n", qp->attrs.state, qp->cep); + siw_dbg_qp(qp, "state %d\n", qp->attrs.state); /* * Mark QP as in process of destruction to prevent from @@ -662,7 +661,7 @@ static int siw_copy_inline_sgl(const struct ib_send_wr *core_wr, void *kbuf = &sqe->sge[1]; int num_sge = core_wr->num_sge, bytes = 0; - sqe->sge[0].laddr = (u64)kbuf; + sqe->sge[0].laddr = (uintptr_t)kbuf; sqe->sge[0].lkey = 0; while (num_sge--) { @@ -825,7 +824,7 @@ int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr, break; case IB_WR_REG_MR: - sqe->base_mr = (uint64_t)reg_wr(wr)->mr; + sqe->base_mr = (uintptr_t)reg_wr(wr)->mr; sqe->rkey = reg_wr(wr)->key; sqe->access = reg_wr(wr)->access & IWARP_ACCESS_MASK; sqe->opcode = SIW_OP_REG_MR; @@ -842,8 +841,9 @@ int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr, rv = -EINVAL; break; } - siw_dbg_qp(qp, "opcode %d, flags 0x%x, wr_id 0x%p\n", - sqe->opcode, sqe->flags, (void *)sqe->id); + siw_dbg_qp(qp, "opcode %d, flags 0x%x, wr_id 0x%pK\n", + sqe->opcode, sqe->flags, + (void *)(uintptr_t)sqe->id); if (unlikely(rv < 0)) break; @@ -1205,8 +1205,8 @@ struct ib_mr *siw_reg_user_mr(struct ib_pd *pd, u64 start, u64 len, unsigned long mem_limit = rlimit(RLIMIT_MEMLOCK); int rv; - siw_dbg_pd(pd, "start: 0x%016llx, va: 0x%016llx, len: %llu\n", - (unsigned long long)start, (unsigned long long)rnic_va, + siw_dbg_pd(pd, "start: 0x%pK, va: 0x%pK, len: %llu\n", + (void *)(uintptr_t)start, (void *)(uintptr_t)rnic_va, (unsigned long long)len); if (atomic_inc_return(&sdev->num_mr) > SIW_MAX_MR) { @@ -1363,7 +1363,7 @@ int siw_map_mr_sg(struct ib_mr *base_mr, struct scatterlist *sl, int num_sle, struct siw_mem *mem = mr->mem; struct siw_pbl *pbl = mem->pbl; struct siw_pble *pble; - u64 pbl_size; + unsigned long pbl_size; int i, rv; if (!pbl) { @@ -1402,16 +1402,18 @@ int siw_map_mr_sg(struct ib_mr *base_mr, struct scatterlist *sl, int num_sle, pbl_size += sg_dma_len(slp); } siw_dbg_mem(mem, - "sge[%d], size %llu, addr 0x%016llx, total %llu\n", - i, pble->size, pble->addr, pbl_size); + "sge[%d], size %u, addr 0x%p, total %lu\n", + i, pble->size, (void *)(uintptr_t)pble->addr, + pbl_size); } rv = ib_sg_to_pages(base_mr, sl, num_sle, sg_off, siw_set_pbl_page); if (rv > 0) { mem->len = base_mr->length; mem->va = base_mr->iova; siw_dbg_mem(mem, - "%llu bytes, start 0x%016llx, %u SLE to %u entries\n", - mem->len, mem->va, num_sle, pbl->num_buf); + "%llu bytes, start 0x%pK, %u SLE to %u entries\n", + mem->len, (void *)(uintptr_t)mem->va, num_sle, + pbl->num_buf); } return rv; } @@ -1529,7 +1531,7 @@ int siw_create_srq(struct ib_srq *base_srq, } spin_lock_init(&srq->lock); - siw_dbg_pd(base_srq->pd, "[SRQ 0x%p]: success\n", srq); + siw_dbg_pd(base_srq->pd, "[SRQ]: success\n"); return 0; @@ -1650,8 +1652,7 @@ int siw_post_srq_recv(struct ib_srq *base_srq, const struct ib_recv_wr *wr, if (unlikely(!srq->kernel_verbs)) { siw_dbg_pd(base_srq->pd, - "[SRQ 0x%p]: no kernel post_recv for mapped srq\n", - srq); + "[SRQ]: no kernel post_recv for mapped srq\n"); rv = -EINVAL; goto out; } @@ -1673,8 +1674,7 @@ int siw_post_srq_recv(struct ib_srq *base_srq, const struct ib_recv_wr *wr, } if (unlikely(wr->num_sge > srq->max_sge)) { siw_dbg_pd(base_srq->pd, - "[SRQ 0x%p]: too many sge's: %d\n", srq, - wr->num_sge); + "[SRQ]: too many sge's: %d\n", wr->num_sge); rv = -EINVAL; break; } @@ -1693,7 +1693,7 @@ int siw_post_srq_recv(struct ib_srq *base_srq, const struct ib_recv_wr *wr, spin_unlock_irqrestore(&srq->lock, flags); out: if (unlikely(rv < 0)) { - siw_dbg_pd(base_srq->pd, "[SRQ 0x%p]: error %d\n", srq, rv); + siw_dbg_pd(base_srq->pd, "[SRQ]: error %d\n", rv); *bad_wr = wr; } return rv; diff --git a/drivers/input/serio/hyperv-keyboard.c b/drivers/input/serio/hyperv-keyboard.c index 88ae7c2ac3c8..e486a8a74c40 100644 --- a/drivers/input/serio/hyperv-keyboard.c +++ b/drivers/input/serio/hyperv-keyboard.c @@ -237,40 +237,17 @@ static void hv_kbd_handle_received_packet(struct hv_device *hv_dev, static void hv_kbd_on_channel_callback(void *context) { + struct vmpacket_descriptor *desc; struct hv_device *hv_dev = context; - void *buffer; - int bufferlen = 0x100; /* Start with sensible size */ u32 bytes_recvd; u64 req_id; - int error; - buffer = kmalloc(bufferlen, GFP_ATOMIC); - if (!buffer) - return; - - while (1) { - error = vmbus_recvpacket_raw(hv_dev->channel, buffer, bufferlen, - &bytes_recvd, &req_id); - switch (error) { - case 0: - if (bytes_recvd == 0) { - kfree(buffer); - return; - } - - hv_kbd_handle_received_packet(hv_dev, buffer, - bytes_recvd, req_id); - break; + foreach_vmbus_pkt(desc, hv_dev->channel) { + bytes_recvd = desc->len8 * 8; + req_id = desc->trans_id; - case -ENOBUFS: - kfree(buffer); - /* Handle large packet */ - bufferlen = bytes_recvd; - buffer = kmalloc(bytes_recvd, GFP_ATOMIC); - if (!buffer) - return; - break; - } + hv_kbd_handle_received_packet(hv_dev, desc, bytes_recvd, + req_id); } } diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index d991d40f797f..f68a62c3c32b 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -965,11 +965,14 @@ static void *iommu_dma_alloc_pages(struct device *dev, size_t size, { bool coherent = dev_is_dma_coherent(dev); size_t alloc_size = PAGE_ALIGN(size); + int node = dev_to_node(dev); struct page *page = NULL; void *cpu_addr; page = dma_alloc_contiguous(dev, alloc_size, gfp); if (!page) + page = alloc_pages_node(node, gfp, get_order(alloc_size)); + if (!page) return NULL; if (IS_ENABLED(CONFIG_DMA_REMAP) && (!coherent || PageHighMem(page))) { diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index b6b5acc92ca2..2a48ea3f1b30 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c @@ -1599,7 +1599,9 @@ dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) unsigned long freed; c = container_of(shrink, struct dm_bufio_client, shrinker); - if (!dm_bufio_trylock(c)) + if (sc->gfp_mask & __GFP_FS) + dm_bufio_lock(c); + else if (!dm_bufio_trylock(c)) return SHRINK_STOP; freed = __scan(c, sc->nr_to_scan, sc->gfp_mask); diff --git a/drivers/md/dm-dust.c b/drivers/md/dm-dust.c index 845f376a72d9..8288887b7f94 100644 --- a/drivers/md/dm-dust.c +++ b/drivers/md/dm-dust.c @@ -25,6 +25,7 @@ struct dust_device { unsigned long long badblock_count; spinlock_t dust_lock; unsigned int blksz; + int sect_per_block_shift; unsigned int sect_per_block; sector_t start; bool fail_read_on_bb:1; @@ -79,7 +80,7 @@ static int dust_remove_block(struct dust_device *dd, unsigned long long block) unsigned long flags; spin_lock_irqsave(&dd->dust_lock, flags); - bblock = dust_rb_search(&dd->badblocklist, block * dd->sect_per_block); + bblock = dust_rb_search(&dd->badblocklist, block); if (bblock == NULL) { if (!dd->quiet_mode) { @@ -113,7 +114,7 @@ static int dust_add_block(struct dust_device *dd, unsigned long long block) } spin_lock_irqsave(&dd->dust_lock, flags); - bblock->bb = block * dd->sect_per_block; + bblock->bb = block; if (!dust_rb_insert(&dd->badblocklist, bblock)) { if (!dd->quiet_mode) { DMERR("%s: block %llu already in badblocklist", @@ -138,7 +139,7 @@ static int dust_query_block(struct dust_device *dd, unsigned long long block) unsigned long flags; spin_lock_irqsave(&dd->dust_lock, flags); - bblock = dust_rb_search(&dd->badblocklist, block * dd->sect_per_block); + bblock = dust_rb_search(&dd->badblocklist, block); if (bblock != NULL) DMINFO("%s: block %llu found in badblocklist", __func__, block); else @@ -165,6 +166,7 @@ static int dust_map_read(struct dust_device *dd, sector_t thisblock, int ret = DM_MAPIO_REMAPPED; if (fail_read_on_bb) { + thisblock >>= dd->sect_per_block_shift; spin_lock_irqsave(&dd->dust_lock, flags); ret = __dust_map_read(dd, thisblock); spin_unlock_irqrestore(&dd->dust_lock, flags); @@ -195,6 +197,7 @@ static int dust_map_write(struct dust_device *dd, sector_t thisblock, unsigned long flags; if (fail_read_on_bb) { + thisblock >>= dd->sect_per_block_shift; spin_lock_irqsave(&dd->dust_lock, flags); __dust_map_write(dd, thisblock); spin_unlock_irqrestore(&dd->dust_lock, flags); @@ -331,6 +334,8 @@ static int dust_ctr(struct dm_target *ti, unsigned int argc, char **argv) dd->blksz = blksz; dd->start = tmp; + dd->sect_per_block_shift = __ffs(sect_per_block); + /* * Whether to fail a read on a "bad" block. * Defaults to false; enabled later by message. diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index b1b0de402dfc..9118ab85cb3a 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c @@ -1943,7 +1943,22 @@ offload_to_thread: queue_work(ic->wait_wq, &dio->work); return; } + if (journal_read_pos != NOT_FOUND) + dio->range.n_sectors = ic->sectors_per_block; wait_and_add_new_range(ic, &dio->range); + /* + * wait_and_add_new_range drops the spinlock, so the journal + * may have been changed arbitrarily. We need to recheck. + * To simplify the code, we restrict I/O size to just one block. + */ + if (journal_read_pos != NOT_FOUND) { + sector_t next_sector; + unsigned new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector); + if (unlikely(new_pos != journal_read_pos)) { + remove_range_unlocked(ic, &dio->range); + goto retry; + } + } } spin_unlock_irq(&ic->endio_wait.lock); diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c index df2011de7be2..1bbe4a34ef4c 100644 --- a/drivers/md/dm-kcopyd.c +++ b/drivers/md/dm-kcopyd.c @@ -566,8 +566,10 @@ static int run_io_job(struct kcopyd_job *job) * no point in continuing. */ if (test_bit(DM_KCOPYD_WRITE_SEQ, &job->flags) && - job->master_job->write_err) + job->master_job->write_err) { + job->write_err = job->master_job->write_err; return -EIO; + } io_job_start(job->kc->throttle); @@ -619,6 +621,7 @@ static int process_jobs(struct list_head *jobs, struct dm_kcopyd_client *kc, else job->read_err = 1; push(&kc->complete_jobs, job); + wake(kc); break; } diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 8a60a4a070ac..1f933dd197cd 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -3194,7 +3194,7 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv) */ r = rs_prepare_reshape(rs); if (r) - return r; + goto bad; /* Reshaping ain't recovery, so disable recovery */ rs_setup_recovery(rs, MaxSector); diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 7b6c3ee9e755..8820931ec7d2 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -1342,7 +1342,7 @@ void dm_table_event(struct dm_table *t) } EXPORT_SYMBOL(dm_table_event); -sector_t dm_table_get_size(struct dm_table *t) +inline sector_t dm_table_get_size(struct dm_table *t) { return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0; } @@ -1367,6 +1367,9 @@ struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector) unsigned int l, n = 0, k = 0; sector_t *node; + if (unlikely(sector >= dm_table_get_size(t))) + return &t->targets[t->num_targets]; + for (l = 0; l < t->depth; l++) { n = get_child(n, k); node = get_node(t, l, n); diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c index 8545dcee9fd0..595a73110e17 100644 --- a/drivers/md/dm-zoned-metadata.c +++ b/drivers/md/dm-zoned-metadata.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2017 Western Digital Corporation or its affiliates. * @@ -34,7 +35,7 @@ * (1) Super block (1 block) * (2) Chunk mapping table (nr_map_blocks) * (3) Bitmap blocks (nr_bitmap_blocks) - * All metadata blocks are stored in conventional zones, starting from the + * All metadata blocks are stored in conventional zones, starting from * the first conventional zone found on disk. */ struct dmz_super { @@ -233,7 +234,7 @@ void dmz_unlock_map(struct dmz_metadata *zmd) * Lock/unlock metadata access. This is a "read" lock on a semaphore * that prevents metadata flush from running while metadata are being * modified. The actual metadata write mutual exclusion is achieved with - * the map lock and zone styate management (active and reclaim state are + * the map lock and zone state management (active and reclaim state are * mutually exclusive). */ void dmz_lock_metadata(struct dmz_metadata *zmd) @@ -402,15 +403,18 @@ static struct dmz_mblock *dmz_get_mblock_slow(struct dmz_metadata *zmd, sector_t block = zmd->sb[zmd->mblk_primary].block + mblk_no; struct bio *bio; + if (dmz_bdev_is_dying(zmd->dev)) + return ERR_PTR(-EIO); + /* Get a new block and a BIO to read it */ mblk = dmz_alloc_mblock(zmd, mblk_no); if (!mblk) - return NULL; + return ERR_PTR(-ENOMEM); bio = bio_alloc(GFP_NOIO, 1); if (!bio) { dmz_free_mblock(zmd, mblk); - return NULL; + return ERR_PTR(-ENOMEM); } spin_lock(&zmd->mblk_lock); @@ -541,8 +545,8 @@ static struct dmz_mblock *dmz_get_mblock(struct dmz_metadata *zmd, if (!mblk) { /* Cache miss: read the block from disk */ mblk = dmz_get_mblock_slow(zmd, mblk_no); - if (!mblk) - return ERR_PTR(-ENOMEM); + if (IS_ERR(mblk)) + return mblk; } /* Wait for on-going read I/O and check for error */ @@ -570,16 +574,19 @@ static void dmz_dirty_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk) /* * Issue a metadata block write BIO. */ -static void dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk, - unsigned int set) +static int dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk, + unsigned int set) { sector_t block = zmd->sb[set].block + mblk->no; struct bio *bio; + if (dmz_bdev_is_dying(zmd->dev)) + return -EIO; + bio = bio_alloc(GFP_NOIO, 1); if (!bio) { set_bit(DMZ_META_ERROR, &mblk->state); - return; + return -ENOMEM; } set_bit(DMZ_META_WRITING, &mblk->state); @@ -591,6 +598,8 @@ static void dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk, bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_META | REQ_PRIO); bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0); submit_bio(bio); + + return 0; } /* @@ -602,6 +611,9 @@ static int dmz_rdwr_block(struct dmz_metadata *zmd, int op, sector_t block, struct bio *bio; int ret; + if (dmz_bdev_is_dying(zmd->dev)) + return -EIO; + bio = bio_alloc(GFP_NOIO, 1); if (!bio) return -ENOMEM; @@ -659,22 +671,29 @@ static int dmz_write_dirty_mblocks(struct dmz_metadata *zmd, { struct dmz_mblock *mblk; struct blk_plug plug; - int ret = 0; + int ret = 0, nr_mblks_submitted = 0; /* Issue writes */ blk_start_plug(&plug); - list_for_each_entry(mblk, write_list, link) - dmz_write_mblock(zmd, mblk, set); + list_for_each_entry(mblk, write_list, link) { + ret = dmz_write_mblock(zmd, mblk, set); + if (ret) + break; + nr_mblks_submitted++; + } blk_finish_plug(&plug); /* Wait for completion */ list_for_each_entry(mblk, write_list, link) { + if (!nr_mblks_submitted) + break; wait_on_bit_io(&mblk->state, DMZ_META_WRITING, TASK_UNINTERRUPTIBLE); if (test_bit(DMZ_META_ERROR, &mblk->state)) { clear_bit(DMZ_META_ERROR, &mblk->state); ret = -EIO; } + nr_mblks_submitted--; } /* Flush drive cache (this will also sync data) */ @@ -736,6 +755,11 @@ int dmz_flush_metadata(struct dmz_metadata *zmd) */ dmz_lock_flush(zmd); + if (dmz_bdev_is_dying(zmd->dev)) { + ret = -EIO; + goto out; + } + /* Get dirty blocks */ spin_lock(&zmd->mblk_lock); list_splice_init(&zmd->mblk_dirty_list, &write_list); @@ -1542,7 +1566,7 @@ static struct dm_zone *dmz_get_rnd_zone_for_reclaim(struct dmz_metadata *zmd) struct dm_zone *zone; if (list_empty(&zmd->map_rnd_list)) - return NULL; + return ERR_PTR(-EBUSY); list_for_each_entry(zone, &zmd->map_rnd_list, link) { if (dmz_is_buf(zone)) @@ -1553,7 +1577,7 @@ static struct dm_zone *dmz_get_rnd_zone_for_reclaim(struct dmz_metadata *zmd) return dzone; } - return NULL; + return ERR_PTR(-EBUSY); } /* @@ -1564,7 +1588,7 @@ static struct dm_zone *dmz_get_seq_zone_for_reclaim(struct dmz_metadata *zmd) struct dm_zone *zone; if (list_empty(&zmd->map_seq_list)) - return NULL; + return ERR_PTR(-EBUSY); list_for_each_entry(zone, &zmd->map_seq_list, link) { if (!zone->bzone) @@ -1573,7 +1597,7 @@ static struct dm_zone *dmz_get_seq_zone_for_reclaim(struct dmz_metadata *zmd) return zone; } - return NULL; + return ERR_PTR(-EBUSY); } /* @@ -1628,9 +1652,13 @@ again: if (op != REQ_OP_WRITE) goto out; - /* Alloate a random zone */ + /* Allocate a random zone */ dzone = dmz_alloc_zone(zmd, DMZ_ALLOC_RND); if (!dzone) { + if (dmz_bdev_is_dying(zmd->dev)) { + dzone = ERR_PTR(-EIO); + goto out; + } dmz_wait_for_free_zones(zmd); goto again; } @@ -1725,9 +1753,13 @@ again: if (bzone) goto out; - /* Alloate a random zone */ + /* Allocate a random zone */ bzone = dmz_alloc_zone(zmd, DMZ_ALLOC_RND); if (!bzone) { + if (dmz_bdev_is_dying(zmd->dev)) { + bzone = ERR_PTR(-EIO); + goto out; + } dmz_wait_for_free_zones(zmd); goto again; } diff --git a/drivers/md/dm-zoned-reclaim.c b/drivers/md/dm-zoned-reclaim.c index edf4b95eb075..d240d7ca8a8a 100644 --- a/drivers/md/dm-zoned-reclaim.c +++ b/drivers/md/dm-zoned-reclaim.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2017 Western Digital Corporation or its affiliates. * @@ -37,7 +38,7 @@ enum { /* * Number of seconds of target BIO inactivity to consider the target idle. */ -#define DMZ_IDLE_PERIOD (10UL * HZ) +#define DMZ_IDLE_PERIOD (10UL * HZ) /* * Percentage of unmapped (free) random zones below which reclaim starts @@ -134,6 +135,9 @@ static int dmz_reclaim_copy(struct dmz_reclaim *zrc, set_bit(DM_KCOPYD_WRITE_SEQ, &flags); while (block < end_block) { + if (dev->flags & DMZ_BDEV_DYING) + return -EIO; + /* Get a valid region from the source zone */ ret = dmz_first_valid_block(zmd, src_zone, &block); if (ret <= 0) @@ -215,7 +219,7 @@ static int dmz_reclaim_buf(struct dmz_reclaim *zrc, struct dm_zone *dzone) dmz_unlock_flush(zmd); - return 0; + return ret; } /* @@ -259,7 +263,7 @@ static int dmz_reclaim_seq_data(struct dmz_reclaim *zrc, struct dm_zone *dzone) dmz_unlock_flush(zmd); - return 0; + return ret; } /* @@ -312,7 +316,7 @@ static int dmz_reclaim_rnd_data(struct dmz_reclaim *zrc, struct dm_zone *dzone) dmz_unlock_flush(zmd); - return 0; + return ret; } /* @@ -334,7 +338,7 @@ static void dmz_reclaim_empty(struct dmz_reclaim *zrc, struct dm_zone *dzone) /* * Find a candidate zone for reclaim and process it. */ -static void dmz_reclaim(struct dmz_reclaim *zrc) +static int dmz_do_reclaim(struct dmz_reclaim *zrc) { struct dmz_metadata *zmd = zrc->metadata; struct dm_zone *dzone; @@ -344,8 +348,8 @@ static void dmz_reclaim(struct dmz_reclaim *zrc) /* Get a data zone */ dzone = dmz_get_zone_for_reclaim(zmd); - if (!dzone) - return; + if (IS_ERR(dzone)) + return PTR_ERR(dzone); start = jiffies; @@ -391,13 +395,20 @@ static void dmz_reclaim(struct dmz_reclaim *zrc) out: if (ret) { dmz_unlock_zone_reclaim(dzone); - return; + return ret; } - (void) dmz_flush_metadata(zrc->metadata); + ret = dmz_flush_metadata(zrc->metadata); + if (ret) { + dmz_dev_debug(zrc->dev, + "Metadata flush for zone %u failed, err %d\n", + dmz_id(zmd, rzone), ret); + return ret; + } dmz_dev_debug(zrc->dev, "Reclaimed zone %u in %u ms", dmz_id(zmd, rzone), jiffies_to_msecs(jiffies - start)); + return 0; } /* @@ -427,7 +438,7 @@ static bool dmz_should_reclaim(struct dmz_reclaim *zrc) return false; /* - * If the percentage of unmappped random zones is low, + * If the percentage of unmapped random zones is low, * reclaim even if the target is busy. */ return p_unmap_rnd <= DMZ_RECLAIM_LOW_UNMAP_RND; @@ -442,6 +453,10 @@ static void dmz_reclaim_work(struct work_struct *work) struct dmz_metadata *zmd = zrc->metadata; unsigned int nr_rnd, nr_unmap_rnd; unsigned int p_unmap_rnd; + int ret; + + if (dmz_bdev_is_dying(zrc->dev)) + return; if (!dmz_should_reclaim(zrc)) { mod_delayed_work(zrc->wq, &zrc->work, DMZ_IDLE_PERIOD); @@ -471,7 +486,17 @@ static void dmz_reclaim_work(struct work_struct *work) (dmz_target_idle(zrc) ? "Idle" : "Busy"), p_unmap_rnd, nr_unmap_rnd, nr_rnd); - dmz_reclaim(zrc); + ret = dmz_do_reclaim(zrc); + if (ret) { + dmz_dev_debug(zrc->dev, "Reclaim error %d\n", ret); + if (ret == -EIO) + /* + * LLD might be performing some error handling sequence + * at the underlying device. To not interfere, do not + * attempt to schedule the next reclaim run immediately. + */ + return; + } dmz_schedule_reclaim(zrc); } diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c index 51d029bbb740..31478fef6032 100644 --- a/drivers/md/dm-zoned-target.c +++ b/drivers/md/dm-zoned-target.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2017 Western Digital Corporation or its affiliates. * @@ -133,6 +134,8 @@ static int dmz_submit_bio(struct dmz_target *dmz, struct dm_zone *zone, refcount_inc(&bioctx->ref); generic_make_request(clone); + if (clone->bi_status == BLK_STS_IOERR) + return -EIO; if (bio_op(bio) == REQ_OP_WRITE && dmz_is_seq(zone)) zone->wp_block += nr_blocks; @@ -277,8 +280,8 @@ static int dmz_handle_buffered_write(struct dmz_target *dmz, /* Get the buffer zone. One will be allocated if needed */ bzone = dmz_get_chunk_buffer(zmd, zone); - if (!bzone) - return -ENOSPC; + if (IS_ERR(bzone)) + return PTR_ERR(bzone); if (dmz_is_readonly(bzone)) return -EROFS; @@ -389,6 +392,11 @@ static void dmz_handle_bio(struct dmz_target *dmz, struct dm_chunk_work *cw, dmz_lock_metadata(zmd); + if (dmz->dev->flags & DMZ_BDEV_DYING) { + ret = -EIO; + goto out; + } + /* * Get the data zone mapping the chunk. There may be no * mapping for read and discard. If a mapping is obtained, @@ -493,6 +501,8 @@ static void dmz_flush_work(struct work_struct *work) /* Flush dirty metadata blocks */ ret = dmz_flush_metadata(dmz->metadata); + if (ret) + dmz_dev_debug(dmz->dev, "Metadata flush failed, rc=%d\n", ret); /* Process queued flush requests */ while (1) { @@ -513,22 +523,24 @@ static void dmz_flush_work(struct work_struct *work) * Get a chunk work and start it to process a new BIO. * If the BIO chunk has no work yet, create one. */ -static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio) +static int dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio) { unsigned int chunk = dmz_bio_chunk(dmz->dev, bio); struct dm_chunk_work *cw; + int ret = 0; mutex_lock(&dmz->chunk_lock); /* Get the BIO chunk work. If one is not active yet, create one */ cw = radix_tree_lookup(&dmz->chunk_rxtree, chunk); if (!cw) { - int ret; /* Create a new chunk work */ cw = kmalloc(sizeof(struct dm_chunk_work), GFP_NOIO); - if (!cw) + if (unlikely(!cw)) { + ret = -ENOMEM; goto out; + } INIT_WORK(&cw->work, dmz_chunk_work); refcount_set(&cw->refcount, 0); @@ -539,7 +551,6 @@ static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio) ret = radix_tree_insert(&dmz->chunk_rxtree, chunk, cw); if (unlikely(ret)) { kfree(cw); - cw = NULL; goto out; } } @@ -547,10 +558,38 @@ static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio) bio_list_add(&cw->bio_list, bio); dmz_get_chunk_work(cw); + dmz_reclaim_bio_acc(dmz->reclaim); if (queue_work(dmz->chunk_wq, &cw->work)) dmz_get_chunk_work(cw); out: mutex_unlock(&dmz->chunk_lock); + return ret; +} + +/* + * Check the backing device availability. If it's on the way out, + * start failing I/O. Reclaim and metadata components also call this + * function to cleanly abort operation in the event of such failure. + */ +bool dmz_bdev_is_dying(struct dmz_dev *dmz_dev) +{ + struct gendisk *disk; + + if (!(dmz_dev->flags & DMZ_BDEV_DYING)) { + disk = dmz_dev->bdev->bd_disk; + if (blk_queue_dying(bdev_get_queue(dmz_dev->bdev))) { + dmz_dev_warn(dmz_dev, "Backing device queue dying"); + dmz_dev->flags |= DMZ_BDEV_DYING; + } else if (disk->fops->check_events) { + if (disk->fops->check_events(disk, 0) & + DISK_EVENT_MEDIA_CHANGE) { + dmz_dev_warn(dmz_dev, "Backing device offline"); + dmz_dev->flags |= DMZ_BDEV_DYING; + } + } + } + + return dmz_dev->flags & DMZ_BDEV_DYING; } /* @@ -564,6 +603,10 @@ static int dmz_map(struct dm_target *ti, struct bio *bio) sector_t sector = bio->bi_iter.bi_sector; unsigned int nr_sectors = bio_sectors(bio); sector_t chunk_sector; + int ret; + + if (dmz_bdev_is_dying(dmz->dev)) + return DM_MAPIO_KILL; dmz_dev_debug(dev, "BIO op %d sector %llu + %u => chunk %llu, block %llu, %u blocks", bio_op(bio), (unsigned long long)sector, nr_sectors, @@ -601,8 +644,14 @@ static int dmz_map(struct dm_target *ti, struct bio *bio) dm_accept_partial_bio(bio, dev->zone_nr_sectors - chunk_sector); /* Now ready to handle this BIO */ - dmz_reclaim_bio_acc(dmz->reclaim); - dmz_queue_chunk_work(dmz, bio); + ret = dmz_queue_chunk_work(dmz, bio); + if (ret) { + dmz_dev_debug(dmz->dev, + "BIO op %d, can't process chunk %llu, err %i\n", + bio_op(bio), (u64)dmz_bio_chunk(dmz->dev, bio), + ret); + return DM_MAPIO_REQUEUE; + } return DM_MAPIO_SUBMITTED; } @@ -855,6 +904,9 @@ static int dmz_prepare_ioctl(struct dm_target *ti, struct block_device **bdev) { struct dmz_target *dmz = ti->private; + if (dmz_bdev_is_dying(dmz->dev)) + return -ENODEV; + *bdev = dmz->dev->bdev; return 0; diff --git a/drivers/md/dm-zoned.h b/drivers/md/dm-zoned.h index ed8de49c9a08..d8e70b0ade35 100644 --- a/drivers/md/dm-zoned.h +++ b/drivers/md/dm-zoned.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2017 Western Digital Corporation or its affiliates. * @@ -56,6 +57,8 @@ struct dmz_dev { unsigned int nr_zones; + unsigned int flags; + sector_t zone_nr_sectors; unsigned int zone_nr_sectors_shift; @@ -67,6 +70,9 @@ struct dmz_dev { (dev)->zone_nr_sectors_shift) #define dmz_chunk_block(dev, b) ((b) & ((dev)->zone_nr_blocks - 1)) +/* Device flags. */ +#define DMZ_BDEV_DYING (1 << 0) + /* * Zone descriptor. */ @@ -245,4 +251,9 @@ void dmz_resume_reclaim(struct dmz_reclaim *zrc); void dmz_reclaim_bio_acc(struct dmz_reclaim *zrc); void dmz_schedule_reclaim(struct dmz_reclaim *zrc); +/* + * Functions defined in dm-zoned-target.c + */ +bool dmz_bdev_is_dying(struct dmz_dev *dmz_dev); + #endif /* DM_ZONED_H */ diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c index 58b319757b1e..8aae0624a297 100644 --- a/drivers/md/persistent-data/dm-btree.c +++ b/drivers/md/persistent-data/dm-btree.c @@ -628,39 +628,40 @@ static int btree_split_beneath(struct shadow_spine *s, uint64_t key) new_parent = shadow_current(s); + pn = dm_block_data(new_parent); + size = le32_to_cpu(pn->header.flags) & INTERNAL_NODE ? + sizeof(__le64) : s->info->value_type.size; + + /* create & init the left block */ r = new_block(s->info, &left); if (r < 0) return r; + ln = dm_block_data(left); + nr_left = le32_to_cpu(pn->header.nr_entries) / 2; + + ln->header.flags = pn->header.flags; + ln->header.nr_entries = cpu_to_le32(nr_left); + ln->header.max_entries = pn->header.max_entries; + ln->header.value_size = pn->header.value_size; + memcpy(ln->keys, pn->keys, nr_left * sizeof(pn->keys[0])); + memcpy(value_ptr(ln, 0), value_ptr(pn, 0), nr_left * size); + + /* create & init the right block */ r = new_block(s->info, &right); if (r < 0) { unlock_block(s->info, left); return r; } - pn = dm_block_data(new_parent); - ln = dm_block_data(left); rn = dm_block_data(right); - - nr_left = le32_to_cpu(pn->header.nr_entries) / 2; nr_right = le32_to_cpu(pn->header.nr_entries) - nr_left; - ln->header.flags = pn->header.flags; - ln->header.nr_entries = cpu_to_le32(nr_left); - ln->header.max_entries = pn->header.max_entries; - ln->header.value_size = pn->header.value_size; - rn->header.flags = pn->header.flags; rn->header.nr_entries = cpu_to_le32(nr_right); rn->header.max_entries = pn->header.max_entries; rn->header.value_size = pn->header.value_size; - - memcpy(ln->keys, pn->keys, nr_left * sizeof(pn->keys[0])); memcpy(rn->keys, pn->keys + nr_left, nr_right * sizeof(pn->keys[0])); - - size = le32_to_cpu(pn->header.flags) & INTERNAL_NODE ? - sizeof(__le64) : s->info->value_type.size; - memcpy(value_ptr(ln, 0), value_ptr(pn, 0), nr_left * size); memcpy(value_ptr(rn, 0), value_ptr(pn, nr_left), nr_right * size); diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c index aec449243966..25328582cc48 100644 --- a/drivers/md/persistent-data/dm-space-map-metadata.c +++ b/drivers/md/persistent-data/dm-space-map-metadata.c @@ -249,7 +249,7 @@ static int out(struct sm_metadata *smm) } if (smm->recursion_count == 1) - apply_bops(smm); + r = apply_bops(smm); smm->recursion_count--; diff --git a/drivers/mfd/rk808.c b/drivers/mfd/rk808.c index 601cefb5c9d8..050478cabc95 100644 --- a/drivers/mfd/rk808.c +++ b/drivers/mfd/rk808.c @@ -729,7 +729,7 @@ static int rk808_remove(struct i2c_client *client) return 0; } -static int rk8xx_suspend(struct device *dev) +static int __maybe_unused rk8xx_suspend(struct device *dev) { struct rk808 *rk808 = i2c_get_clientdata(rk808_i2c_client); int ret = 0; @@ -749,7 +749,7 @@ static int rk8xx_suspend(struct device *dev) return ret; } -static int rk8xx_resume(struct device *dev) +static int __maybe_unused rk8xx_resume(struct device *dev) { struct rk808 *rk808 = i2c_get_clientdata(rk808_i2c_client); int ret = 0; @@ -768,7 +768,7 @@ static int rk8xx_resume(struct device *dev) return ret; } -SIMPLE_DEV_PM_OPS(rk8xx_pm_ops, rk8xx_suspend, rk8xx_resume); +static SIMPLE_DEV_PM_OPS(rk8xx_pm_ops, rk8xx_suspend, rk8xx_resume); static struct i2c_driver rk808_i2c_driver = { .driver = { diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c index 895510d40ce4..47602af4ee34 100644 --- a/drivers/mtd/maps/sa1100-flash.c +++ b/drivers/mtd/maps/sa1100-flash.c @@ -81,6 +81,7 @@ static int sa1100_probe_subdev(struct sa_subdev_info *subdev, struct resource *r default: printk(KERN_WARNING "SA1100 flash: unknown base address " "0x%08lx, assuming CS0\n", phys); + /* Fall through */ case SA1100_CS0_PHYS: subdev->map.bankwidth = (MSC0 & MSC_RBW) ? 2 : 4; diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 4f839348011d..26509fa37a50 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -481,6 +481,7 @@ static void bcm_sf2_sw_validate(struct dsa_switch *ds, int port, unsigned long *supported, struct phylink_link_state *state) { + struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; if (!phy_interface_mode_is_rgmii(state->interface) && @@ -490,8 +491,10 @@ static void bcm_sf2_sw_validate(struct dsa_switch *ds, int port, state->interface != PHY_INTERFACE_MODE_INTERNAL && state->interface != PHY_INTERFACE_MODE_MOCA) { bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); - dev_err(ds->dev, - "Unsupported interface: %d\n", state->interface); + if (port != core_readl(priv, CORE_IMP0_PRT_ID)) + dev_err(ds->dev, + "Unsupported interface: %d for port %d\n", + state->interface, port); return; } @@ -529,6 +532,9 @@ static void bcm_sf2_sw_mac_config(struct dsa_switch *ds, int port, u32 id_mode_dis = 0, port_mode; u32 reg, offset; + if (port == core_readl(priv, CORE_IMP0_PRT_ID)) + return; + if (priv->type == BCM7445_DEVICE_ID) offset = CORE_STS_OVERRIDE_GMIIP_PORT(port); else diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c index 497298752381..aca95f64bde8 100644 --- a/drivers/net/ethernet/google/gve/gve_main.c +++ b/drivers/net/ethernet/google/gve/gve_main.c @@ -50,7 +50,7 @@ static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s) u64_stats_fetch_begin(&priv->tx[ring].statss); s->tx_packets += priv->tx[ring].pkt_done; s->tx_bytes += priv->tx[ring].bytes_done; - } while (u64_stats_fetch_retry(&priv->rx[ring].statss, + } while (u64_stats_fetch_retry(&priv->tx[ring].statss, start)); } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c index 8b93101e1a09..7833ddef0427 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c @@ -109,13 +109,15 @@ build_progress_params(struct mlx5e_tx_wqe *wqe, u16 pc, u32 sqn, static void tx_fill_wi(struct mlx5e_txqsq *sq, u16 pi, u8 num_wqebbs, - skb_frag_t *resync_dump_frag) + skb_frag_t *resync_dump_frag, + u32 num_bytes) { struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi]; wi->skb = NULL; wi->num_wqebbs = num_wqebbs; wi->resync_dump_frag = resync_dump_frag; + wi->num_bytes = num_bytes; } void mlx5e_ktls_tx_offload_set_pending(struct mlx5e_ktls_offload_context_tx *priv_tx) @@ -143,7 +145,7 @@ post_static_params(struct mlx5e_txqsq *sq, umr_wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_STATIC_UMR_WQE_SZ, &pi); build_static_params(umr_wqe, sq->pc, sq->sqn, priv_tx, fence); - tx_fill_wi(sq, pi, MLX5E_KTLS_STATIC_WQEBBS, NULL); + tx_fill_wi(sq, pi, MLX5E_KTLS_STATIC_WQEBBS, NULL, 0); sq->pc += MLX5E_KTLS_STATIC_WQEBBS; } @@ -157,7 +159,7 @@ post_progress_params(struct mlx5e_txqsq *sq, wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_PROGRESS_WQE_SZ, &pi); build_progress_params(wqe, sq->pc, sq->sqn, priv_tx, fence); - tx_fill_wi(sq, pi, MLX5E_KTLS_PROGRESS_WQEBBS, NULL); + tx_fill_wi(sq, pi, MLX5E_KTLS_PROGRESS_WQEBBS, NULL, 0); sq->pc += MLX5E_KTLS_PROGRESS_WQEBBS; } @@ -248,43 +250,37 @@ tx_post_resync_params(struct mlx5e_txqsq *sq, mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, skip_static_post, true); } +struct mlx5e_dump_wqe { + struct mlx5_wqe_ctrl_seg ctrl; + struct mlx5_wqe_data_seg data; +}; + static int tx_post_resync_dump(struct mlx5e_txqsq *sq, struct sk_buff *skb, skb_frag_t *frag, u32 tisn, bool first) { struct mlx5_wqe_ctrl_seg *cseg; - struct mlx5_wqe_eth_seg *eseg; struct mlx5_wqe_data_seg *dseg; - struct mlx5e_tx_wqe *wqe; + struct mlx5e_dump_wqe *wqe; dma_addr_t dma_addr = 0; - u16 ds_cnt, ds_cnt_inl; u8 num_wqebbs; - u16 pi, ihs; + u16 ds_cnt; int fsz; - - ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS; - ihs = eth_get_headlen(skb->dev, skb->data, skb_headlen(skb)); - ds_cnt_inl = DIV_ROUND_UP(ihs - INL_HDR_START_SZ, MLX5_SEND_WQE_DS); - ds_cnt += ds_cnt_inl; - ds_cnt += 1; /* one frag */ + u16 pi; wqe = mlx5e_sq_fetch_wqe(sq, sizeof(*wqe), &pi); + ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS; num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); cseg = &wqe->ctrl; - eseg = &wqe->eth; - dseg = wqe->data; + dseg = &wqe->data; cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_DUMP); cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); cseg->tisn = cpu_to_be32(tisn << 8); cseg->fm_ce_se = first ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0; - eseg->inline_hdr.sz = cpu_to_be16(ihs); - memcpy(eseg->inline_hdr.start, skb->data, ihs); - dseg += ds_cnt_inl; - fsz = skb_frag_size(frag); dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz, DMA_TO_DEVICE); @@ -296,7 +292,7 @@ tx_post_resync_dump(struct mlx5e_txqsq *sq, struct sk_buff *skb, dseg->byte_count = cpu_to_be32(fsz); mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE); - tx_fill_wi(sq, pi, num_wqebbs, frag); + tx_fill_wi(sq, pi, num_wqebbs, frag, fsz); sq->pc += num_wqebbs; WARN(num_wqebbs > MLX5E_KTLS_MAX_DUMP_WQEBBS, @@ -323,7 +319,7 @@ static void tx_post_fence_nop(struct mlx5e_txqsq *sq) struct mlx5_wq_cyc *wq = &sq->wq; u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); - tx_fill_wi(sq, pi, 1, NULL); + tx_fill_wi(sq, pi, 1, NULL, 0); mlx5e_post_nop_fence(wq, sq->sqn, &sq->pc); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index 9314777d99e3..d685122d9ff7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -590,7 +590,8 @@ mlx5_fw_fatal_reporter_dump(struct devlink_health_reporter *reporter, data_size = crdump_size - offset; else data_size = MLX5_CR_DUMP_CHUNK_SIZE; - err = devlink_fmsg_binary_put(fmsg, cr_data, data_size); + err = devlink_fmsg_binary_put(fmsg, (char *)cr_data + offset, + data_size); if (err) goto free_data; } @@ -700,6 +701,16 @@ static void poll_health(struct timer_list *t) if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) goto out; + fatal_error = check_fatal_sensors(dev); + + if (fatal_error && !health->fatal_error) { + mlx5_core_err(dev, "Fatal error %u detected\n", fatal_error); + dev->priv.health.fatal_error = fatal_error; + print_health_info(dev); + mlx5_trigger_health_work(dev); + goto out; + } + count = ioread32be(health->health_counter); if (count == health->prev) ++health->miss_counter; @@ -718,15 +729,6 @@ static void poll_health(struct timer_list *t) if (health->synd && health->synd != prev_synd) queue_work(health->wq, &health->report_work); - fatal_error = check_fatal_sensors(dev); - - if (fatal_error && !health->fatal_error) { - mlx5_core_err(dev, "Fatal error %u detected\n", fatal_error); - dev->priv.health.fatal_error = fatal_error; - print_health_info(dev); - mlx5_trigger_health_work(dev); - } - out: mod_timer(&health->timer, get_next_poll_jiffies()); } diff --git a/drivers/net/ethernet/mscc/ocelot_ace.c b/drivers/net/ethernet/mscc/ocelot_ace.c index 39aca1ab4687..86fc6e6b46dd 100644 --- a/drivers/net/ethernet/mscc/ocelot_ace.c +++ b/drivers/net/ethernet/mscc/ocelot_ace.c @@ -317,7 +317,7 @@ static void is2_action_set(struct vcap_data *data, break; case OCELOT_ACL_ACTION_TRAP: VCAP_ACT_SET(PORT_MASK, 0x0); - VCAP_ACT_SET(MASK_MODE, 0x0); + VCAP_ACT_SET(MASK_MODE, 0x1); VCAP_ACT_SET(POLICE_ENA, 0x0); VCAP_ACT_SET(POLICE_IDX, 0x0); VCAP_ACT_SET(CPU_QU_NUM, 0x0); diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index 3a4f4f042ae7..b0708460e342 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -1586,6 +1586,13 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app, switch (f->command) { case FLOW_BLOCK_BIND: + cb_priv = nfp_flower_indr_block_cb_priv_lookup(app, netdev); + if (cb_priv && + flow_block_cb_is_busy(nfp_flower_setup_indr_block_cb, + cb_priv, + &nfp_block_cb_list)) + return -EBUSY; + cb_priv = kmalloc(sizeof(*cb_priv), GFP_KERNEL); if (!cb_priv) return -ENOMEM; diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 0a7645937412..7891f8c5a1bc 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -1327,7 +1327,7 @@ static int qed_slowpath_start(struct qed_dev *cdev, &drv_version); if (rc) { DP_NOTICE(cdev, "Failed sending drv version command\n"); - return rc; + goto err4; } } @@ -1335,6 +1335,8 @@ static int qed_slowpath_start(struct qed_dev *cdev, return 0; +err4: + qed_ll2_dealloc_if(cdev); err3: qed_hw_stop(cdev); err2: diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 54010957da5c..f298d714efd6 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -2775,6 +2775,7 @@ static int cpsw_probe(struct platform_device *pdev) if (!cpsw) return -ENOMEM; + platform_set_drvdata(pdev, cpsw); cpsw->dev = dev; mode = devm_gpiod_get_array_optional(dev, "mode", GPIOD_OUT_LOW); @@ -2878,7 +2879,6 @@ static int cpsw_probe(struct platform_device *pdev) goto clean_cpts; } - platform_set_drvdata(pdev, cpsw); priv = netdev_priv(ndev); priv->cpsw = cpsw; priv->ndev = ndev; diff --git a/drivers/net/ieee802154/mac802154_hwsim.c b/drivers/net/ieee802154/mac802154_hwsim.c index b41696e16bdc..c20e7ef18bc9 100644 --- a/drivers/net/ieee802154/mac802154_hwsim.c +++ b/drivers/net/ieee802154/mac802154_hwsim.c @@ -802,7 +802,7 @@ static int hwsim_add_one(struct genl_info *info, struct device *dev, err = hwsim_subscribe_all_others(phy); if (err < 0) { mutex_unlock(&hwsim_phys_lock); - goto err_reg; + goto err_subscribe; } } list_add_tail(&phy->list, &hwsim_phys); @@ -812,6 +812,8 @@ static int hwsim_add_one(struct genl_info *info, struct device *dev, return idx; +err_subscribe: + ieee802154_unregister_hw(phy->hw); err_reg: kfree(pib); err_pib: @@ -901,9 +903,9 @@ static __init int hwsim_init_module(void) return 0; platform_drv: - genl_unregister_family(&hwsim_genl_family); -platform_dev: platform_device_unregister(mac802154hwsim_dev); +platform_dev: + genl_unregister_family(&hwsim_genl_family); return rc; } diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 17f0e9e98697..c6fa0c17c13d 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -812,8 +812,11 @@ int get_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data) ret = usb_control_msg(tp->udev, usb_rcvctrlpipe(tp->udev, 0), RTL8152_REQ_GET_REGS, RTL8152_REQT_READ, value, index, tmp, size, 500); + if (ret < 0) + memset(data, 0xff, size); + else + memcpy(data, tmp, size); - memcpy(data, tmp, size); kfree(tmp); return ret; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c index cb22d447fcb8..fe776e35b9d0 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c @@ -554,7 +554,7 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm, cpu_to_le32(vif->bss_conf.use_short_slot ? MAC_FLG_SHORT_SLOT : 0); - cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP); + cmd->filter_flags = 0; for (i = 0; i < IEEE80211_NUM_ACS; i++) { u8 txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, i); @@ -623,6 +623,8 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm, /* We need the dtim_period to set the MAC as associated */ if (vif->bss_conf.assoc && vif->bss_conf.dtim_period && !force_assoc_off) { + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + u8 ap_sta_id = mvmvif->ap_sta_id; u32 dtim_offs; /* @@ -658,6 +660,29 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm, dtim_offs); ctxt_sta->is_assoc = cpu_to_le32(1); + + /* + * allow multicast data frames only as long as the station is + * authorized, i.e., GTK keys are already installed (if needed) + */ + if (ap_sta_id < IWL_MVM_STATION_COUNT) { + struct ieee80211_sta *sta; + + rcu_read_lock(); + + sta = rcu_dereference(mvm->fw_id_to_mac_id[ap_sta_id]); + if (!IS_ERR_OR_NULL(sta)) { + struct iwl_mvm_sta *mvmsta = + iwl_mvm_sta_from_mac80211(sta); + + if (mvmsta->sta_state == + IEEE80211_STA_AUTHORIZED) + cmd.filter_flags |= + cpu_to_le32(MAC_FILTER_ACCEPT_GRP); + } + + rcu_read_unlock(); + } } else { ctxt_sta->is_assoc = cpu_to_le32(0); @@ -703,7 +728,8 @@ static int iwl_mvm_mac_ctxt_cmd_listener(struct iwl_mvm *mvm, MAC_FILTER_IN_CONTROL_AND_MGMT | MAC_FILTER_IN_BEACON | MAC_FILTER_IN_PROBE_REQUEST | - MAC_FILTER_IN_CRC32); + MAC_FILTER_IN_CRC32 | + MAC_FILTER_ACCEPT_GRP); ieee80211_hw_set(mvm->hw, RX_INCLUDES_FCS); /* Allocate sniffer station */ @@ -727,7 +753,8 @@ static int iwl_mvm_mac_ctxt_cmd_ibss(struct iwl_mvm *mvm, iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action); cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_BEACON | - MAC_FILTER_IN_PROBE_REQUEST); + MAC_FILTER_IN_PROBE_REQUEST | + MAC_FILTER_ACCEPT_GRP); /* cmd.ibss.beacon_time/cmd.ibss.beacon_tsf are curently ignored */ cmd.ibss.bi = cpu_to_le32(vif->bss_conf.beacon_int); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index b74bd58f3f45..d6499763f0dd 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -3327,10 +3327,20 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, /* enable beacon filtering */ WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0)); + /* + * Now that the station is authorized, i.e., keys were already + * installed, need to indicate to the FW that + * multicast data frames can be forwarded to the driver + */ + iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); + iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, true); } else if (old_state == IEEE80211_STA_AUTHORIZED && new_state == IEEE80211_STA_ASSOC) { + /* Multicast data frames are no longer allowed */ + iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); + /* disable beacon filtering */ ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0); WARN_ON(ret && diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index de711c1160d3..7c5aaeaf7fe5 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -1063,6 +1063,23 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) else if (iwl_trans->cfg == &iwl9560_2ac_160_cfg_qu_b0_jf_b0) iwl_trans->cfg = &iwl9560_2ac_160_cfg_qu_c0_jf_b0; } + + /* same thing for QuZ... */ + if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QUZ) { + if (cfg == &iwl_ax101_cfg_qu_hr) + cfg = &iwl_ax101_cfg_quz_hr; + else if (cfg == &iwl_ax201_cfg_qu_hr) + cfg = &iwl_ax201_cfg_quz_hr; + else if (cfg == &iwl9461_2ac_cfg_qu_b0_jf_b0) + cfg = &iwl9461_2ac_cfg_quz_a0_jf_b0_soc; + else if (cfg == &iwl9462_2ac_cfg_qu_b0_jf_b0) + cfg = &iwl9462_2ac_cfg_quz_a0_jf_b0_soc; + else if (cfg == &iwl9560_2ac_cfg_qu_b0_jf_b0) + cfg = &iwl9560_2ac_cfg_quz_a0_jf_b0_soc; + else if (cfg == &iwl9560_2ac_160_cfg_qu_b0_jf_b0) + cfg = &iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc; + } + #endif pci_set_drvdata(pdev, iwl_trans); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index f5df5b370d78..935e35dafce5 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -3603,6 +3603,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) && ((trans->cfg != &iwl_ax200_cfg_cc && + trans->cfg != &iwl_ax201_cfg_qu_hr && trans->cfg != &killer1650x_2ax_cfg && trans->cfg != &killer1650w_2ax_cfg && trans->cfg != &iwl_ax201_cfg_quz_hr) || diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c index 38d110338987..9ef6b8fe03c1 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c @@ -99,10 +99,7 @@ void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie, u16 len = byte_cnt; __le16 bc_ent; - if (trans_pcie->bc_table_dword) - len = DIV_ROUND_UP(len, 4); - - if (WARN_ON(len > 0xFFF || idx >= txq->n_window)) + if (WARN(idx >= txq->n_window, "%d >= %d\n", idx, txq->n_window)) return; filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) + @@ -117,11 +114,20 @@ void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie, */ num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1; - bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12)); - if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) + if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) { + /* Starting from 22560, the HW expects bytes */ + WARN_ON(trans_pcie->bc_table_dword); + WARN_ON(len > 0x3FFF); + bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14)); scd_bc_tbl_gen3->tfd_offset[idx] = bc_ent; - else + } else { + /* Until 22560, the HW expects DW */ + WARN_ON(!trans_pcie->bc_table_dword); + len = DIV_ROUND_UP(len, 4); + WARN_ON(len > 0xFFF); + bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12)); scd_bc_tbl->tfd_offset[idx] = bc_ent; + } } /* diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c index 627ed1fc7b15..645f4d15fb61 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c @@ -136,11 +136,11 @@ static const struct ieee80211_ops mt76x0u_ops = { .release_buffered_frames = mt76_release_buffered_frames, }; -static int mt76x0u_init_hardware(struct mt76x02_dev *dev) +static int mt76x0u_init_hardware(struct mt76x02_dev *dev, bool reset) { int err; - mt76x0_chip_onoff(dev, true, true); + mt76x0_chip_onoff(dev, true, reset); if (!mt76x02_wait_for_mac(&dev->mt76)) return -ETIMEDOUT; @@ -173,7 +173,7 @@ static int mt76x0u_register_device(struct mt76x02_dev *dev) if (err < 0) goto out_err; - err = mt76x0u_init_hardware(dev); + err = mt76x0u_init_hardware(dev, true); if (err < 0) goto out_err; @@ -309,7 +309,7 @@ static int __maybe_unused mt76x0_resume(struct usb_interface *usb_intf) if (ret < 0) goto err; - ret = mt76x0u_init_hardware(dev); + ret = mt76x0u_init_hardware(dev, false); if (ret) goto err; diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c index c9b957ac5733..ecbe78b8027b 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c @@ -6095,6 +6095,15 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev) } /* + * Clear encryption initialization vectors on start, but keep them + * for watchdog reset. Otherwise we will have wrong IVs and not be + * able to keep connections after reset. + */ + if (!test_bit(DEVICE_STATE_RESET, &rt2x00dev->flags)) + for (i = 0; i < 256; i++) + rt2800_register_write(rt2x00dev, MAC_IVEIV_ENTRY(i), 0); + + /* * Clear all beacons */ for (i = 0; i < 8; i++) diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h index 7e43690a861c..2b216edd0c7d 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h @@ -658,6 +658,7 @@ enum rt2x00_state_flags { DEVICE_STATE_ENABLED_RADIO, DEVICE_STATE_SCANNING, DEVICE_STATE_FLUSHING, + DEVICE_STATE_RESET, /* * Driver configuration diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c index 35414f97a978..9d158237ac67 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c @@ -1256,13 +1256,14 @@ static int rt2x00lib_initialize(struct rt2x00_dev *rt2x00dev) int rt2x00lib_start(struct rt2x00_dev *rt2x00dev) { - int retval; + int retval = 0; if (test_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags)) { /* * This is special case for ieee80211_restart_hw(), otherwise * mac80211 never call start() two times in row without stop(); */ + set_bit(DEVICE_STATE_RESET, &rt2x00dev->flags); rt2x00dev->ops->lib->pre_reset_hw(rt2x00dev); rt2x00lib_stop(rt2x00dev); } @@ -1273,14 +1274,14 @@ int rt2x00lib_start(struct rt2x00_dev *rt2x00dev) */ retval = rt2x00lib_load_firmware(rt2x00dev); if (retval) - return retval; + goto out; /* * Initialize the device. */ retval = rt2x00lib_initialize(rt2x00dev); if (retval) - return retval; + goto out; rt2x00dev->intf_ap_count = 0; rt2x00dev->intf_sta_count = 0; @@ -1289,11 +1290,13 @@ int rt2x00lib_start(struct rt2x00_dev *rt2x00dev) /* Enable the radio */ retval = rt2x00lib_enable_radio(rt2x00dev); if (retval) - return retval; + goto out; set_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags); - return 0; +out: + clear_bit(DEVICE_STATE_RESET, &rt2x00dev->flags); + return retval; } void rt2x00lib_stop(struct rt2x00_dev *rt2x00dev) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index c258a1ce4b28..d3d6b7bd6903 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -2257,6 +2257,16 @@ static const struct nvme_core_quirk_entry core_quirks[] = { .vid = 0x1179, .mn = "THNSF5256GPUK TOSHIBA", .quirks = NVME_QUIRK_NO_APST, + }, + { + /* + * This LiteON CL1-3D*-Q11 firmware version has a race + * condition associated with actions related to suspend to idle + * LiteON has resolved the problem in future firmware + */ + .vid = 0x14a4, + .fr = "22301111", + .quirks = NVME_QUIRK_SIMPLE_SUSPEND, } }; @@ -2597,6 +2607,9 @@ int nvme_init_identify(struct nvme_ctrl *ctrl) goto out_free; } + if (!(ctrl->ops->flags & NVME_F_FABRICS)) + ctrl->cntlid = le16_to_cpu(id->cntlid); + if (!ctrl->identified) { int i; @@ -2697,7 +2710,6 @@ int nvme_init_identify(struct nvme_ctrl *ctrl) goto out_free; } } else { - ctrl->cntlid = le16_to_cpu(id->cntlid); ctrl->hmpre = le32_to_cpu(id->hmpre); ctrl->hmmin = le32_to_cpu(id->hmmin); ctrl->hmminds = le32_to_cpu(id->hmminds); diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index 888d4543894e..af831d3d15d0 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -428,6 +428,7 @@ static void nvme_mpath_set_live(struct nvme_ns *ns) srcu_read_unlock(&head->srcu, srcu_idx); } + synchronize_srcu(&ns->head->srcu); kblockd_schedule_work(&ns->head->requeue_work); } diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 778b3a0b6adb..2d678fb968c7 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -92,6 +92,11 @@ enum nvme_quirks { * Broken Write Zeroes. */ NVME_QUIRK_DISABLE_WRITE_ZEROES = (1 << 9), + + /* + * Force simple suspend/resume path. + */ + NVME_QUIRK_SIMPLE_SUSPEND = (1 << 10), }; /* diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 6bd9b1033965..732d5b63ec05 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -2876,7 +2876,8 @@ static int nvme_suspend(struct device *dev) * state (which may not be possible if the link is up). */ if (pm_suspend_via_firmware() || !ctrl->npss || - !pcie_aspm_enabled(pdev)) { + !pcie_aspm_enabled(pdev) || + (ndev->ctrl.quirks & NVME_QUIRK_SIMPLE_SUSPEND)) { nvme_dev_disable(ndev, true); return 0; } diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 208aacf39329..44c4ae1abd00 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -5256,7 +5256,7 @@ static void quirk_reset_lenovo_thinkpad_p50_nvgpu(struct pci_dev *pdev) */ if (ioread32(map + 0x2240c) & 0x2) { pci_info(pdev, FW_BUG "GPU left initialized by EFI, resetting\n"); - ret = pci_reset_function(pdev); + ret = pci_reset_bus(pdev); if (ret < 0) pci_err(pdev, "Failed to reset GPU: %d\n", ret); } diff --git a/drivers/platform/chrome/cros_ec_ishtp.c b/drivers/platform/chrome/cros_ec_ishtp.c index e504d255d5ce..430731cdf827 100644 --- a/drivers/platform/chrome/cros_ec_ishtp.c +++ b/drivers/platform/chrome/cros_ec_ishtp.c @@ -707,7 +707,7 @@ static int cros_ec_ishtp_reset(struct ishtp_cl_device *cl_device) */ static int __maybe_unused cros_ec_ishtp_suspend(struct device *device) { - struct ishtp_cl_device *cl_device = dev_get_drvdata(device); + struct ishtp_cl_device *cl_device = ishtp_dev_to_cl_device(device); struct ishtp_cl *cros_ish_cl = ishtp_get_drvdata(cl_device); struct ishtp_cl_data *client_data = ishtp_get_client_data(cros_ish_cl); @@ -722,7 +722,7 @@ static int __maybe_unused cros_ec_ishtp_suspend(struct device *device) */ static int __maybe_unused cros_ec_ishtp_resume(struct device *device) { - struct ishtp_cl_device *cl_device = dev_get_drvdata(device); + struct ishtp_cl_device *cl_device = ishtp_dev_to_cl_device(device); struct ishtp_cl *cros_ish_cl = ishtp_get_drvdata(cl_device); struct ishtp_cl_data *client_data = ishtp_get_client_data(cros_ish_cl); diff --git a/drivers/power/supply/ab8500_charger.c b/drivers/power/supply/ab8500_charger.c index 30de448de802..86d88aec94a1 100644 --- a/drivers/power/supply/ab8500_charger.c +++ b/drivers/power/supply/ab8500_charger.c @@ -742,6 +742,7 @@ static int ab8500_charger_max_usb_curr(struct ab8500_charger *di, USB_CH_IP_CUR_LVL_1P5; break; } + /* Else, fall through */ case USB_STAT_HM_IDGND: dev_err(di->dev, "USB Type - Charging not allowed\n"); di->max_usb_in_curr.usb_type_max = USB_CH_IP_CUR_LVL_0P05; diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 8b4ea5f2832b..a7868c8133ee 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -4410,6 +4410,10 @@ static int qeth_snmp_command(struct qeth_card *card, char __user *udata) get_user(req_len, &ureq->hdr.req_len)) return -EFAULT; + /* Sanitize user input, to avoid overflows in iob size calculation: */ + if (req_len > QETH_BUFSIZE) + return -EINVAL; + iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL, req_len); if (!iob) return -ENOMEM; diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c index abcad097ff2f..f47b4b281b14 100644 --- a/drivers/scsi/libsas/sas_discover.c +++ b/drivers/scsi/libsas/sas_discover.c @@ -459,6 +459,7 @@ static void sas_discover_domain(struct work_struct *work) pr_notice("ATA device seen but CONFIG_SCSI_SAS_ATA=N so cannot attach\n"); /* Fall through */ #endif + /* Fall through - only for the #else condition above. */ default: error = -ENXIO; pr_err("unhandled device %d\n", dev->dev_type); diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index 2c3bb8a966e5..bade2e025ecf 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -824,6 +824,7 @@ struct lpfc_hba { uint32_t cfg_cq_poll_threshold; uint32_t cfg_cq_max_proc_limit; uint32_t cfg_fcp_cpu_map; + uint32_t cfg_fcp_mq_threshold; uint32_t cfg_hdw_queue; uint32_t cfg_irq_chann; uint32_t cfg_suppress_rsp; diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index ea62322ffe2b..8d8c495b5b60 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -5709,6 +5709,19 @@ LPFC_ATTR_RW(nvme_embed_cmd, 1, 0, 2, "Embed NVME Command in WQE"); /* + * lpfc_fcp_mq_threshold: Set the maximum number of Hardware Queues + * the driver will advertise it supports to the SCSI layer. + * + * 0 = Set nr_hw_queues by the number of CPUs or HW queues. + * 1,128 = Manually specify the maximum nr_hw_queue value to be set, + * + * Value range is [0,128]. Default value is 8. + */ +LPFC_ATTR_R(fcp_mq_threshold, LPFC_FCP_MQ_THRESHOLD_DEF, + LPFC_FCP_MQ_THRESHOLD_MIN, LPFC_FCP_MQ_THRESHOLD_MAX, + "Set the number of SCSI Queues advertised"); + +/* * lpfc_hdw_queue: Set the number of Hardware Queues the driver * will advertise it supports to the NVME and SCSI layers. This also * will map to the number of CQ/WQ pairs the driver will create. @@ -6030,6 +6043,7 @@ struct device_attribute *lpfc_hba_attrs[] = { &dev_attr_lpfc_cq_poll_threshold, &dev_attr_lpfc_cq_max_proc_limit, &dev_attr_lpfc_fcp_cpu_map, + &dev_attr_lpfc_fcp_mq_threshold, &dev_attr_lpfc_hdw_queue, &dev_attr_lpfc_irq_chann, &dev_attr_lpfc_suppress_rsp, @@ -7112,6 +7126,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) /* Initialize first burst. Target vs Initiator are different. */ lpfc_nvme_enable_fb_init(phba, lpfc_nvme_enable_fb); lpfc_nvmet_fb_size_init(phba, lpfc_nvmet_fb_size); + lpfc_fcp_mq_threshold_init(phba, lpfc_fcp_mq_threshold); lpfc_hdw_queue_init(phba, lpfc_hdw_queue); lpfc_irq_chann_init(phba, lpfc_irq_chann); lpfc_enable_bbcr_init(phba, lpfc_enable_bbcr); diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index a7549ae32542..1ac98becb5ba 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -4309,10 +4309,12 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) shost->max_cmd_len = 16; if (phba->sli_rev == LPFC_SLI_REV4) { - if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) - shost->nr_hw_queues = phba->cfg_hdw_queue; - else - shost->nr_hw_queues = phba->sli4_hba.num_present_cpu; + if (!phba->cfg_fcp_mq_threshold || + phba->cfg_fcp_mq_threshold > phba->cfg_hdw_queue) + phba->cfg_fcp_mq_threshold = phba->cfg_hdw_queue; + + shost->nr_hw_queues = min_t(int, 2 * num_possible_nodes(), + phba->cfg_fcp_mq_threshold); shost->dma_boundary = phba->sli4_hba.pc_sli4_params.sge_supp_len-1; diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index 3aeca387b22a..329f7aa7e169 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h @@ -44,6 +44,11 @@ #define LPFC_HBA_HDWQ_MAX 128 #define LPFC_HBA_HDWQ_DEF 0 +/* FCP MQ queue count limiting */ +#define LPFC_FCP_MQ_THRESHOLD_MIN 0 +#define LPFC_FCP_MQ_THRESHOLD_MAX 128 +#define LPFC_FCP_MQ_THRESHOLD_DEF 8 + /* Common buffer size to accomidate SCSI and NVME IO buffers */ #define LPFC_COMMON_IO_BUF_SZ 768 diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 8d560c562e9c..6b7b390b2e52 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -2956,6 +2956,8 @@ qla24xx_vport_delete(struct fc_vport *fc_vport) dma_free_coherent(&ha->pdev->dev, vha->gnl.size, vha->gnl.l, vha->gnl.ldma); + vha->gnl.l = NULL; + vfree(vha->scan.l); if (vha->qpair && vha->qpair->vp_idx == vha->vp_idx) { diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 2e58cff9d200..98e60a34afd9 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -3440,6 +3440,12 @@ skip_dpc: return 0; probe_failed: + if (base_vha->gnl.l) { + dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size, + base_vha->gnl.l, base_vha->gnl.ldma); + base_vha->gnl.l = NULL; + } + if (base_vha->timer_active) qla2x00_stop_timer(base_vha); base_vha->flags.online = 0; @@ -3673,7 +3679,7 @@ qla2x00_remove_one(struct pci_dev *pdev) if (!atomic_read(&pdev->enable_cnt)) { dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size, base_vha->gnl.l, base_vha->gnl.ldma); - + base_vha->gnl.l = NULL; scsi_host_put(base_vha->host); kfree(ha); pci_set_drvdata(pdev, NULL); @@ -3713,6 +3719,8 @@ qla2x00_remove_one(struct pci_dev *pdev) dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size, base_vha->gnl.l, base_vha->gnl.ldma); + base_vha->gnl.l = NULL; + vfree(base_vha->scan.l); if (IS_QLAFX00(ha)) @@ -4816,6 +4824,7 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht, "Alloc failed for scan database.\n"); dma_free_coherent(&ha->pdev->dev, vha->gnl.size, vha->gnl.l, vha->gnl.ldma); + vha->gnl.l = NULL; scsi_remove_host(vha->host); return NULL; } diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index e274053109d0..029da74bb2f5 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -7062,6 +7062,9 @@ static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba, static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba, struct ufs_vreg *vreg) { + if (!vreg) + return 0; + return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA); } diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 04eda111920e..661bb9358364 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -1132,14 +1132,16 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry * struct se_cmd *se_cmd = cmd->se_cmd; struct tcmu_dev *udev = cmd->tcmu_dev; bool read_len_valid = false; - uint32_t read_len = se_cmd->data_length; + uint32_t read_len; /* * cmd has been completed already from timeout, just reclaim * data area space and free cmd */ - if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) + if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) { + WARN_ON_ONCE(se_cmd); goto out; + } list_del_init(&cmd->queue_entry); @@ -1152,6 +1154,7 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry * goto done; } + read_len = se_cmd->data_length; if (se_cmd->data_direction == DMA_FROM_DEVICE && (entry->hdr.uflags & TCMU_UFLAG_READ_LEN) && entry->rsp.read_len) { read_len_valid = true; @@ -1307,6 +1310,7 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data) */ scsi_status = SAM_STAT_CHECK_CONDITION; list_del_init(&cmd->queue_entry); + cmd->se_cmd = NULL; } else { list_del_init(&cmd->queue_entry); idr_remove(&udev->commands, id); @@ -2022,6 +2026,7 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level) idr_remove(&udev->commands, i); if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) { + WARN_ON(!cmd->se_cmd); list_del_init(&cmd->queue_entry); if (err_level == 1) { /* diff --git a/drivers/video/fbdev/acornfb.c b/drivers/video/fbdev/acornfb.c index 92f23e3bc27a..7cacae5a8797 100644 --- a/drivers/video/fbdev/acornfb.c +++ b/drivers/video/fbdev/acornfb.c @@ -858,6 +858,7 @@ static void acornfb_parse_dram(char *opt) case 'M': case 'm': size *= 1024; + /* Fall through */ case 'K': case 'k': size *= 1024; diff --git a/drivers/watchdog/wdt285.c b/drivers/watchdog/wdt285.c index 4eacfb1ce1ac..eb729d704836 100644 --- a/drivers/watchdog/wdt285.c +++ b/drivers/watchdog/wdt285.c @@ -168,7 +168,7 @@ static long watchdog_ioctl(struct file *file, unsigned int cmd, soft_margin = new_margin; reload = soft_margin * (mem_fclk_21285 / 256); watchdog_ping(); - /* Fall */ + /* Fall through */ case WDIOC_GETTIMEOUT: ret = put_user(soft_margin, int_arg); break; diff --git a/fs/afs/cell.c b/fs/afs/cell.c index a2a87117d262..fd5133e26a38 100644 --- a/fs/afs/cell.c +++ b/fs/afs/cell.c @@ -74,6 +74,7 @@ struct afs_cell *afs_lookup_cell_rcu(struct afs_net *net, cell = rcu_dereference_raw(net->ws_cell); if (cell) { afs_get_cell(cell); + ret = 0; break; } ret = -EDESTADDRREQ; @@ -108,6 +109,9 @@ struct afs_cell *afs_lookup_cell_rcu(struct afs_net *net, done_seqretry(&net->cells_lock, seq); + if (ret != 0 && cell) + afs_put_cell(net, cell); + return ret == 0 ? cell : ERR_PTR(ret); } diff --git a/fs/afs/dir.c b/fs/afs/dir.c index 81207dc3c997..139b4e3cc946 100644 --- a/fs/afs/dir.c +++ b/fs/afs/dir.c @@ -959,7 +959,8 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry, inode ? AFS_FS_I(inode) : NULL); } else { trace_afs_lookup(dvnode, &dentry->d_name, - inode ? AFS_FS_I(inode) : NULL); + IS_ERR_OR_NULL(inode) ? NULL + : AFS_FS_I(inode)); } return d; } diff --git a/fs/afs/yfsclient.c b/fs/afs/yfsclient.c index 2575503170fc..ca2452806ebf 100644 --- a/fs/afs/yfsclient.c +++ b/fs/afs/yfsclient.c @@ -2171,7 +2171,7 @@ int yfs_fs_store_opaque_acl2(struct afs_fs_cursor *fc, const struct afs_acl *acl key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode); size = round_up(acl->size, 4); - call = afs_alloc_flat_call(net, &yfs_RXYFSStoreStatus, + call = afs_alloc_flat_call(net, &yfs_RXYFSStoreOpaqueACL2, sizeof(__be32) * 2 + sizeof(struct yfs_xdr_YFSFid) + sizeof(__be32) + size, diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index e078cc55b989..b3c8b886bf64 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -913,8 +913,9 @@ get_more_pages: if (page_offset(page) >= ceph_wbc.i_size) { dout("%p page eof %llu\n", page, ceph_wbc.i_size); - if (ceph_wbc.size_stable || - page_offset(page) >= i_size_read(inode)) + if ((ceph_wbc.size_stable || + page_offset(page) >= i_size_read(inode)) && + clear_page_dirty_for_io(page)) mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE); unlock_page(page); diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index d98dcd976c80..ce0f5658720a 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -1301,6 +1301,7 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap, { struct ceph_inode_info *ci = cap->ci; struct inode *inode = &ci->vfs_inode; + struct ceph_buffer *old_blob = NULL; struct cap_msg_args arg; int held, revoking; int wake = 0; @@ -1365,7 +1366,7 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap, ci->i_requested_max_size = arg.max_size; if (flushing & CEPH_CAP_XATTR_EXCL) { - __ceph_build_xattrs_blob(ci); + old_blob = __ceph_build_xattrs_blob(ci); arg.xattr_version = ci->i_xattrs.version; arg.xattr_buf = ci->i_xattrs.blob; } else { @@ -1409,6 +1410,8 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap, spin_unlock(&ci->i_ceph_lock); + ceph_buffer_put(old_blob); + ret = send_cap_msg(&arg); if (ret < 0) { dout("error sending cap msg, must requeue %p\n", inode); diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index 791f84a13bb8..18500edefc56 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -736,6 +736,7 @@ static int fill_inode(struct inode *inode, struct page *locked_page, int issued, new_issued, info_caps; struct timespec64 mtime, atime, ctime; struct ceph_buffer *xattr_blob = NULL; + struct ceph_buffer *old_blob = NULL; struct ceph_string *pool_ns = NULL; struct ceph_cap *new_cap = NULL; int err = 0; @@ -881,7 +882,7 @@ static int fill_inode(struct inode *inode, struct page *locked_page, if ((ci->i_xattrs.version == 0 || !(issued & CEPH_CAP_XATTR_EXCL)) && le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) { if (ci->i_xattrs.blob) - ceph_buffer_put(ci->i_xattrs.blob); + old_blob = ci->i_xattrs.blob; ci->i_xattrs.blob = xattr_blob; if (xattr_blob) memcpy(ci->i_xattrs.blob->vec.iov_base, @@ -1022,8 +1023,8 @@ static int fill_inode(struct inode *inode, struct page *locked_page, out: if (new_cap) ceph_put_cap(mdsc, new_cap); - if (xattr_blob) - ceph_buffer_put(xattr_blob); + ceph_buffer_put(old_blob); + ceph_buffer_put(xattr_blob); ceph_put_string(pool_ns); return err; } diff --git a/fs/ceph/locks.c b/fs/ceph/locks.c index ac9b53b89365..5083e238ad15 100644 --- a/fs/ceph/locks.c +++ b/fs/ceph/locks.c @@ -111,8 +111,7 @@ static int ceph_lock_message(u8 lock_type, u16 operation, struct inode *inode, req->r_wait_for_completion = ceph_lock_wait_for_completion; err = ceph_mdsc_do_request(mdsc, inode, req); - - if (operation == CEPH_MDS_OP_GETFILELOCK) { + if (!err && operation == CEPH_MDS_OP_GETFILELOCK) { fl->fl_pid = -le64_to_cpu(req->r_reply_info.filelock_reply->pid); if (CEPH_LOCK_SHARED == req->r_reply_info.filelock_reply->type) fl->fl_type = F_RDLCK; diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c index 4c6494eb02b5..ccfcc66aaf44 100644 --- a/fs/ceph/snap.c +++ b/fs/ceph/snap.c @@ -465,6 +465,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci) struct inode *inode = &ci->vfs_inode; struct ceph_cap_snap *capsnap; struct ceph_snap_context *old_snapc, *new_snapc; + struct ceph_buffer *old_blob = NULL; int used, dirty; capsnap = kzalloc(sizeof(*capsnap), GFP_NOFS); @@ -541,7 +542,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci) capsnap->gid = inode->i_gid; if (dirty & CEPH_CAP_XATTR_EXCL) { - __ceph_build_xattrs_blob(ci); + old_blob = __ceph_build_xattrs_blob(ci); capsnap->xattr_blob = ceph_buffer_get(ci->i_xattrs.blob); capsnap->xattr_version = ci->i_xattrs.version; @@ -584,6 +585,7 @@ update_snapc: } spin_unlock(&ci->i_ceph_lock); + ceph_buffer_put(old_blob); kfree(capsnap); ceph_put_snap_context(old_snapc); } diff --git a/fs/ceph/super.h b/fs/ceph/super.h index d2352fd95dbc..6b9f1ee7de85 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -926,7 +926,7 @@ extern int ceph_getattr(const struct path *path, struct kstat *stat, int __ceph_setxattr(struct inode *, const char *, const void *, size_t, int); ssize_t __ceph_getxattr(struct inode *, const char *, void *, size_t); extern ssize_t ceph_listxattr(struct dentry *, char *, size_t); -extern void __ceph_build_xattrs_blob(struct ceph_inode_info *ci); +extern struct ceph_buffer *__ceph_build_xattrs_blob(struct ceph_inode_info *ci); extern void __ceph_destroy_xattrs(struct ceph_inode_info *ci); extern const struct xattr_handler *ceph_xattr_handlers[]; diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c index 37b458a9af3a..939eab7aa219 100644 --- a/fs/ceph/xattr.c +++ b/fs/ceph/xattr.c @@ -754,12 +754,15 @@ static int __get_required_blob_size(struct ceph_inode_info *ci, int name_size, /* * If there are dirty xattrs, reencode xattrs into the prealloc_blob - * and swap into place. + * and swap into place. It returns the old i_xattrs.blob (or NULL) so + * that it can be freed by the caller as the i_ceph_lock is likely to be + * held. */ -void __ceph_build_xattrs_blob(struct ceph_inode_info *ci) +struct ceph_buffer *__ceph_build_xattrs_blob(struct ceph_inode_info *ci) { struct rb_node *p; struct ceph_inode_xattr *xattr = NULL; + struct ceph_buffer *old_blob = NULL; void *dest; dout("__build_xattrs_blob %p\n", &ci->vfs_inode); @@ -790,12 +793,14 @@ void __ceph_build_xattrs_blob(struct ceph_inode_info *ci) dest - ci->i_xattrs.prealloc_blob->vec.iov_base; if (ci->i_xattrs.blob) - ceph_buffer_put(ci->i_xattrs.blob); + old_blob = ci->i_xattrs.blob; ci->i_xattrs.blob = ci->i_xattrs.prealloc_blob; ci->i_xattrs.prealloc_blob = NULL; ci->i_xattrs.dirty = false; ci->i_xattrs.version++; } + + return old_blob; } static inline int __get_request_mask(struct inode *in) { @@ -1036,6 +1041,7 @@ int __ceph_setxattr(struct inode *inode, const char *name, struct ceph_inode_info *ci = ceph_inode(inode); struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; struct ceph_cap_flush *prealloc_cf = NULL; + struct ceph_buffer *old_blob = NULL; int issued; int err; int dirty = 0; @@ -1109,13 +1115,15 @@ retry: struct ceph_buffer *blob; spin_unlock(&ci->i_ceph_lock); - dout(" preaallocating new blob size=%d\n", required_blob_size); + ceph_buffer_put(old_blob); /* Shouldn't be required */ + dout(" pre-allocating new blob size=%d\n", required_blob_size); blob = ceph_buffer_new(required_blob_size, GFP_NOFS); if (!blob) goto do_sync_unlocked; spin_lock(&ci->i_ceph_lock); + /* prealloc_blob can't be released while holding i_ceph_lock */ if (ci->i_xattrs.prealloc_blob) - ceph_buffer_put(ci->i_xattrs.prealloc_blob); + old_blob = ci->i_xattrs.prealloc_blob; ci->i_xattrs.prealloc_blob = blob; goto retry; } @@ -1131,6 +1139,7 @@ retry: } spin_unlock(&ci->i_ceph_lock); + ceph_buffer_put(old_blob); if (lock_snap_rwsem) up_read(&mdsc->snap_rwsem); if (dirty) diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index a15a6e738eb5..1795e80cbdf7 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -1113,7 +1113,7 @@ cifs_demultiplex_thread(void *p) mempool_resize(cifs_req_poolp, length + cifs_min_rcv); set_freezable(); - allow_signal(SIGKILL); + allow_kernel_signal(SIGKILL); while (server->tcpStatus != CifsExiting) { if (try_to_freeze()) continue; diff --git a/fs/io_uring.c b/fs/io_uring.c index 24bbe3cb7ad4..cfb48bd088e1 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -679,6 +679,13 @@ static void io_put_req(struct io_kiocb *req) io_free_req(req); } +static unsigned io_cqring_events(struct io_cq_ring *ring) +{ + /* See comment at the top of this file */ + smp_rmb(); + return READ_ONCE(ring->r.tail) - READ_ONCE(ring->r.head); +} + /* * Find and free completed poll iocbs */ @@ -771,7 +778,7 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events, static int io_iopoll_getevents(struct io_ring_ctx *ctx, unsigned int *nr_events, long min) { - while (!list_empty(&ctx->poll_list)) { + while (!list_empty(&ctx->poll_list) && !need_resched()) { int ret; ret = io_do_iopoll(ctx, nr_events, min); @@ -798,6 +805,12 @@ static void io_iopoll_reap_events(struct io_ring_ctx *ctx) unsigned int nr_events = 0; io_iopoll_getevents(ctx, &nr_events, 1); + + /* + * Ensure we allow local-to-the-cpu processing to take place, + * in this case we need to ensure that we reap all events. + */ + cond_resched(); } mutex_unlock(&ctx->uring_lock); } @@ -805,11 +818,42 @@ static void io_iopoll_reap_events(struct io_ring_ctx *ctx) static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events, long min) { - int ret = 0; + int iters, ret = 0; + /* + * We disallow the app entering submit/complete with polling, but we + * still need to lock the ring to prevent racing with polled issue + * that got punted to a workqueue. + */ + mutex_lock(&ctx->uring_lock); + + iters = 0; do { int tmin = 0; + /* + * Don't enter poll loop if we already have events pending. + * If we do, we can potentially be spinning for commands that + * already triggered a CQE (eg in error). + */ + if (io_cqring_events(ctx->cq_ring)) + break; + + /* + * If a submit got punted to a workqueue, we can have the + * application entering polling for a command before it gets + * issued. That app will hold the uring_lock for the duration + * of the poll right here, so we need to take a breather every + * now and then to ensure that the issue has a chance to add + * the poll to the issued list. Otherwise we can spin here + * forever, while the workqueue is stuck trying to acquire the + * very same mutex. + */ + if (!(++iters & 7)) { + mutex_unlock(&ctx->uring_lock); + mutex_lock(&ctx->uring_lock); + } + if (*nr_events < min) tmin = min - *nr_events; @@ -819,6 +863,7 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events, ret = 0; } while (min && !*nr_events && !need_resched()); + mutex_unlock(&ctx->uring_lock); return ret; } @@ -2280,15 +2325,7 @@ static int io_sq_thread(void *data) unsigned nr_events = 0; if (ctx->flags & IORING_SETUP_IOPOLL) { - /* - * We disallow the app entering submit/complete - * with polling, but we still need to lock the - * ring to prevent racing with polled issue - * that got punted to a workqueue. - */ - mutex_lock(&ctx->uring_lock); io_iopoll_check(ctx, &nr_events, 0); - mutex_unlock(&ctx->uring_lock); } else { /* * Normal IO, just pretend everything completed. @@ -2433,13 +2470,6 @@ static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit) return submit; } -static unsigned io_cqring_events(struct io_cq_ring *ring) -{ - /* See comment at the top of this file */ - smp_rmb(); - return READ_ONCE(ring->r.tail) - READ_ONCE(ring->r.head); -} - /* * Wait until events become available, if we don't already have some. The * application must reap them itself, as they reside on the shared cq ring. @@ -3190,9 +3220,7 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit, min_complete = min(min_complete, ctx->cq_entries); if (ctx->flags & IORING_SETUP_IOPOLL) { - mutex_lock(&ctx->uring_lock); ret = io_iopoll_check(ctx, &nr_events, min_complete); - mutex_unlock(&ctx->uring_lock); } else { ret = io_cqring_wait(ctx, min_complete, sig, sigsz); } diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 8d501093660f..0adfd8840110 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -1487,7 +1487,7 @@ static int nfs_finish_open(struct nfs_open_context *ctx, if (S_ISREG(file->f_path.dentry->d_inode->i_mode)) nfs_file_set_open_context(file, ctx); else - err = -ESTALE; + err = -EOPENSTALE; out: return err; } diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index 0cb442406168..222d7115db71 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c @@ -401,15 +401,21 @@ static void nfs_direct_read_completion(struct nfs_pgio_header *hdr) unsigned long bytes = 0; struct nfs_direct_req *dreq = hdr->dreq; - if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) - goto out_put; - spin_lock(&dreq->lock); - if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) && (hdr->good_bytes == 0)) + if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) dreq->error = hdr->error; - else + + if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) { + spin_unlock(&dreq->lock); + goto out_put; + } + + if (hdr->good_bytes != 0) nfs_direct_good_bytes(dreq, hdr); + if (test_bit(NFS_IOHDR_EOF, &hdr->flags)) + dreq->error = 0; + spin_unlock(&dreq->lock); while (!list_empty(&hdr->pages)) { @@ -782,16 +788,19 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr) bool request_commit = false; struct nfs_page *req = nfs_list_entry(hdr->pages.next); - if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) - goto out_put; - nfs_init_cinfo_from_dreq(&cinfo, dreq); spin_lock(&dreq->lock); if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) dreq->error = hdr->error; - if (dreq->error == 0) { + + if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) { + spin_unlock(&dreq->lock); + goto out_put; + } + + if (hdr->good_bytes != 0) { nfs_direct_good_bytes(dreq, hdr); if (nfs_write_need_commit(hdr)) { if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES) diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c index b04e20d28162..5657b7f2611f 100644 --- a/fs/nfs/flexfilelayout/flexfilelayout.c +++ b/fs/nfs/flexfilelayout/flexfilelayout.c @@ -8,6 +8,7 @@ */ #include <linux/nfs_fs.h> +#include <linux/nfs_mount.h> #include <linux/nfs_page.h> #include <linux/module.h> #include <linux/sched/mm.h> @@ -928,7 +929,9 @@ retry: pgm = &pgio->pg_mirrors[0]; pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].rsize; - pgio->pg_maxretrans = io_maxretrans; + if (NFS_SERVER(pgio->pg_inode)->flags & + (NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR)) + pgio->pg_maxretrans = io_maxretrans; return; out_nolseg: if (pgio->pg_error < 0) @@ -940,6 +943,7 @@ out_mds: pgio->pg_lseg); pnfs_put_lseg(pgio->pg_lseg); pgio->pg_lseg = NULL; + pgio->pg_maxretrans = 0; nfs_pageio_reset_read_mds(pgio); } @@ -1000,7 +1004,9 @@ retry: pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].wsize; } - pgio->pg_maxretrans = io_maxretrans; + if (NFS_SERVER(pgio->pg_inode)->flags & + (NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR)) + pgio->pg_maxretrans = io_maxretrans; return; out_mds: @@ -1010,6 +1016,7 @@ out_mds: pgio->pg_lseg); pnfs_put_lseg(pgio->pg_lseg); pgio->pg_lseg = NULL; + pgio->pg_maxretrans = 0; nfs_pageio_reset_write_mds(pgio); } @@ -1148,8 +1155,6 @@ static int ff_layout_async_handle_error_v4(struct rpc_task *task, break; case -NFS4ERR_RETRY_UNCACHED_REP: break; - case -EAGAIN: - return -NFS4ERR_RESET_TO_PNFS; /* Invalidate Layout errors */ case -NFS4ERR_PNFS_NO_LAYOUT: case -ESTALE: /* mapped NFS4ERR_STALE */ @@ -1210,7 +1215,6 @@ static int ff_layout_async_handle_error_v3(struct rpc_task *task, case -EBADHANDLE: case -ELOOP: case -ENOSPC: - case -EAGAIN: break; case -EJUKEBOX: nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY); @@ -1445,16 +1449,6 @@ static void ff_layout_read_prepare_v4(struct rpc_task *task, void *data) ff_layout_read_prepare_common(task, hdr); } -static void -ff_layout_io_prepare_transmit(struct rpc_task *task, - void *data) -{ - struct nfs_pgio_header *hdr = data; - - if (!pnfs_is_valid_lseg(hdr->lseg)) - rpc_exit(task, -EAGAIN); -} - static void ff_layout_read_call_done(struct rpc_task *task, void *data) { struct nfs_pgio_header *hdr = data; @@ -1740,7 +1734,6 @@ static void ff_layout_commit_release(void *data) static const struct rpc_call_ops ff_layout_read_call_ops_v3 = { .rpc_call_prepare = ff_layout_read_prepare_v3, - .rpc_call_prepare_transmit = ff_layout_io_prepare_transmit, .rpc_call_done = ff_layout_read_call_done, .rpc_count_stats = ff_layout_read_count_stats, .rpc_release = ff_layout_read_release, @@ -1748,7 +1741,6 @@ static const struct rpc_call_ops ff_layout_read_call_ops_v3 = { static const struct rpc_call_ops ff_layout_read_call_ops_v4 = { .rpc_call_prepare = ff_layout_read_prepare_v4, - .rpc_call_prepare_transmit = ff_layout_io_prepare_transmit, .rpc_call_done = ff_layout_read_call_done, .rpc_count_stats = ff_layout_read_count_stats, .rpc_release = ff_layout_read_release, @@ -1756,7 +1748,6 @@ static const struct rpc_call_ops ff_layout_read_call_ops_v4 = { static const struct rpc_call_ops ff_layout_write_call_ops_v3 = { .rpc_call_prepare = ff_layout_write_prepare_v3, - .rpc_call_prepare_transmit = ff_layout_io_prepare_transmit, .rpc_call_done = ff_layout_write_call_done, .rpc_count_stats = ff_layout_write_count_stats, .rpc_release = ff_layout_write_release, @@ -1764,7 +1755,6 @@ static const struct rpc_call_ops ff_layout_write_call_ops_v3 = { static const struct rpc_call_ops ff_layout_write_call_ops_v4 = { .rpc_call_prepare = ff_layout_write_prepare_v4, - .rpc_call_prepare_transmit = ff_layout_io_prepare_transmit, .rpc_call_done = ff_layout_write_call_done, .rpc_count_stats = ff_layout_write_count_stats, .rpc_release = ff_layout_write_release, diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 8a1758200b57..c764cfe456e5 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -1403,12 +1403,21 @@ static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fat if (NFS_PROTO(inode)->have_delegation(inode, FMODE_READ)) return 0; + /* No fileid? Just exit */ + if (!(fattr->valid & NFS_ATTR_FATTR_FILEID)) + return 0; /* Has the inode gone and changed behind our back? */ - if ((fattr->valid & NFS_ATTR_FATTR_FILEID) && nfsi->fileid != fattr->fileid) + if (nfsi->fileid != fattr->fileid) { + /* Is this perhaps the mounted-on fileid? */ + if ((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) && + nfsi->fileid == fattr->mounted_on_fileid) + return 0; return -ESTALE; + } if ((fattr->valid & NFS_ATTR_FATTR_TYPE) && (inode->i_mode & S_IFMT) != (fattr->mode & S_IFMT)) return -ESTALE; + if (!nfs_file_has_buffered_writers(nfsi)) { /* Verify a few of the more important attributes */ if ((fattr->valid & NFS_ATTR_FATTR_CHANGE) != 0 && !inode_eq_iversion_raw(inode, fattr->change_attr)) @@ -1768,18 +1777,6 @@ int nfs_post_op_update_inode_force_wcc(struct inode *inode, struct nfs_fattr *fa EXPORT_SYMBOL_GPL(nfs_post_op_update_inode_force_wcc); -static inline bool nfs_fileid_valid(struct nfs_inode *nfsi, - struct nfs_fattr *fattr) -{ - bool ret1 = true, ret2 = true; - - if (fattr->valid & NFS_ATTR_FATTR_FILEID) - ret1 = (nfsi->fileid == fattr->fileid); - if (fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) - ret2 = (nfsi->fileid == fattr->mounted_on_fileid); - return ret1 || ret2; -} - /* * Many nfs protocol calls return the new file attributes after * an operation. Here we update the inode to reflect the state @@ -1810,7 +1807,15 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr) nfs_display_fhandle_hash(NFS_FH(inode)), atomic_read(&inode->i_count), fattr->valid); - if (!nfs_fileid_valid(nfsi, fattr)) { + /* No fileid? Just exit */ + if (!(fattr->valid & NFS_ATTR_FATTR_FILEID)) + return 0; + /* Has the inode gone and changed behind our back? */ + if (nfsi->fileid != fattr->fileid) { + /* Is this perhaps the mounted-on fileid? */ + if ((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) && + nfsi->fileid == fattr->mounted_on_fileid) + return 0; printk(KERN_ERR "NFS: server %s error: fileid changed\n" "fsid %s: expected fileid 0x%Lx, got 0x%Lx\n", NFS_SERVER(inode)->nfs_client->cl_hostname, diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index a2346a2f8361..e64f810223be 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h @@ -775,3 +775,13 @@ static inline bool nfs_error_is_fatal(int err) } } +static inline bool nfs_error_is_fatal_on_server(int err) +{ + switch (err) { + case 0: + case -ERESTARTSYS: + case -EINTR: + return false; + } + return nfs_error_is_fatal(err); +} diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c index 96db471ca2e5..339663d04bf8 100644 --- a/fs/nfs/nfs4file.c +++ b/fs/nfs/nfs4file.c @@ -73,13 +73,13 @@ nfs4_file_open(struct inode *inode, struct file *filp) if (IS_ERR(inode)) { err = PTR_ERR(inode); switch (err) { - case -EPERM: - case -EACCES: - case -EDQUOT: - case -ENOSPC: - case -EROFS: - goto out_put_ctx; default: + goto out_put_ctx; + case -ENOENT: + case -ESTALE: + case -EISDIR: + case -ENOTDIR: + case -ELOOP: goto out_drop; } } diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index ed4e1b07447b..20b3717cd7ca 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c @@ -590,7 +590,7 @@ static void nfs_pgio_rpcsetup(struct nfs_pgio_header *hdr, } hdr->res.fattr = &hdr->fattr; - hdr->res.count = count; + hdr->res.count = 0; hdr->res.eof = 0; hdr->res.verf = &hdr->verf; nfs_fattr_init(&hdr->fattr); @@ -1251,20 +1251,23 @@ static void nfs_pageio_complete_mirror(struct nfs_pageio_descriptor *desc, int nfs_pageio_resend(struct nfs_pageio_descriptor *desc, struct nfs_pgio_header *hdr) { - LIST_HEAD(failed); + LIST_HEAD(pages); desc->pg_io_completion = hdr->io_completion; desc->pg_dreq = hdr->dreq; - while (!list_empty(&hdr->pages)) { - struct nfs_page *req = nfs_list_entry(hdr->pages.next); + list_splice_init(&hdr->pages, &pages); + while (!list_empty(&pages)) { + struct nfs_page *req = nfs_list_entry(pages.next); if (!nfs_pageio_add_request(desc, req)) - nfs_list_move_request(req, &failed); + break; } nfs_pageio_complete(desc); - if (!list_empty(&failed)) { - list_move(&failed, &hdr->pages); - return desc->pg_error < 0 ? desc->pg_error : -EIO; + if (!list_empty(&pages)) { + int err = desc->pg_error < 0 ? desc->pg_error : -EIO; + hdr->completion_ops->error_cleanup(&pages, err); + nfs_set_pgio_error(hdr, err, hdr->io_start); + return err; } return 0; } diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c index c0046c348910..82af4809b869 100644 --- a/fs/nfs/pnfs_nfs.c +++ b/fs/nfs/pnfs_nfs.c @@ -627,11 +627,16 @@ static int _nfs4_pnfs_v3_ds_connect(struct nfs_server *mds_srv, /* Add this address as an alias */ rpc_clnt_add_xprt(clp->cl_rpcclient, &xprt_args, rpc_clnt_test_and_add_xprt, NULL); - } else - clp = get_v3_ds_connect(mds_srv, - (struct sockaddr *)&da->da_addr, - da->da_addrlen, IPPROTO_TCP, - timeo, retrans); + continue; + } + clp = get_v3_ds_connect(mds_srv, + (struct sockaddr *)&da->da_addr, + da->da_addrlen, IPPROTO_TCP, + timeo, retrans); + if (IS_ERR(clp)) + continue; + clp->cl_rpcclient->cl_softerr = 0; + clp->cl_rpcclient->cl_softrtry = 0; } if (IS_ERR(clp)) { diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c index 5552fa8b6e12..0f7288b94633 100644 --- a/fs/nfs/proc.c +++ b/fs/nfs/proc.c @@ -594,7 +594,8 @@ static int nfs_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr) /* Emulate the eof flag, which isn't normally needed in NFSv2 * as it is guaranteed to always return the file attributes */ - if (hdr->args.offset + hdr->res.count >= hdr->res.fattr->size) + if ((hdr->res.count == 0 && hdr->args.count > 0) || + hdr->args.offset + hdr->res.count >= hdr->res.fattr->size) hdr->res.eof = 1; } return 0; @@ -615,8 +616,10 @@ static int nfs_proc_pgio_rpc_prepare(struct rpc_task *task, static int nfs_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr) { - if (task->tk_status >= 0) + if (task->tk_status >= 0) { + hdr->res.count = hdr->args.count; nfs_writeback_update_inode(hdr); + } return 0; } diff --git a/fs/nfs/read.c b/fs/nfs/read.c index c19841c82b6a..cfe0b586eadd 100644 --- a/fs/nfs/read.c +++ b/fs/nfs/read.c @@ -91,19 +91,25 @@ void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio) } EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds); -static void nfs_readpage_release(struct nfs_page *req) +static void nfs_readpage_release(struct nfs_page *req, int error) { struct inode *inode = d_inode(nfs_req_openctx(req)->dentry); + struct page *page = req->wb_page; dprintk("NFS: read done (%s/%llu %d@%lld)\n", inode->i_sb->s_id, (unsigned long long)NFS_FILEID(inode), req->wb_bytes, (long long)req_offset(req)); + if (nfs_error_is_fatal_on_server(error) && error != -ETIMEDOUT) + SetPageError(page); if (nfs_page_group_sync_on_bit(req, PG_UNLOCKPAGE)) { - if (PageUptodate(req->wb_page)) - nfs_readpage_to_fscache(inode, req->wb_page, 0); + struct address_space *mapping = page_file_mapping(page); - unlock_page(req->wb_page); + if (PageUptodate(page)) + nfs_readpage_to_fscache(inode, page, 0); + else if (!PageError(page) && !PagePrivate(page)) + generic_error_remove_page(mapping, page); + unlock_page(page); } nfs_release_request(req); } @@ -131,7 +137,7 @@ int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode, &nfs_async_read_completion_ops); if (!nfs_pageio_add_request(&pgio, new)) { nfs_list_remove_request(new); - nfs_readpage_release(new); + nfs_readpage_release(new, pgio.pg_error); } nfs_pageio_complete(&pgio); @@ -153,6 +159,7 @@ static void nfs_page_group_set_uptodate(struct nfs_page *req) static void nfs_read_completion(struct nfs_pgio_header *hdr) { unsigned long bytes = 0; + int error; if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) goto out; @@ -179,14 +186,19 @@ static void nfs_read_completion(struct nfs_pgio_header *hdr) zero_user_segment(page, start, end); } } + error = 0; bytes += req->wb_bytes; if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) { if (bytes <= hdr->good_bytes) nfs_page_group_set_uptodate(req); + else { + error = hdr->error; + xchg(&nfs_req_openctx(req)->error, error); + } } else nfs_page_group_set_uptodate(req); nfs_list_remove_request(req); - nfs_readpage_release(req); + nfs_readpage_release(req, error); } out: hdr->release(hdr); @@ -213,7 +225,7 @@ nfs_async_read_error(struct list_head *head, int error) while (!list_empty(head)) { req = nfs_list_entry(head->next); nfs_list_remove_request(req); - nfs_readpage_release(req); + nfs_readpage_release(req, error); } } @@ -337,8 +349,13 @@ int nfs_readpage(struct file *file, struct page *page) goto out; } + xchg(&ctx->error, 0); error = nfs_readpage_async(ctx, inode, page); - + if (!error) { + error = wait_on_page_locked_killable(page); + if (!PageUptodate(page) && !error) + error = xchg(&ctx->error, 0); + } out: put_nfs_open_context(ctx); return error; @@ -372,8 +389,8 @@ readpage_async_filler(void *data, struct page *page) zero_user_segment(page, len, PAGE_SIZE); if (!nfs_pageio_add_request(desc->pgio, new)) { nfs_list_remove_request(new); - nfs_readpage_release(new); error = desc->pgio->pg_error; + nfs_readpage_release(new, error); goto out; } return 0; diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 92d9cadc6102..85ca49549b39 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -57,6 +57,7 @@ static const struct rpc_call_ops nfs_commit_ops; static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops; static const struct nfs_commit_completion_ops nfs_commit_completion_ops; static const struct nfs_rw_ops nfs_rw_write_ops; +static void nfs_inode_remove_request(struct nfs_page *req); static void nfs_clear_request_commit(struct nfs_page *req); static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo, struct inode *inode); @@ -591,23 +592,13 @@ release_request: static void nfs_write_error(struct nfs_page *req, int error) { + nfs_set_pageerror(page_file_mapping(req->wb_page)); nfs_mapping_set_error(req->wb_page, error); + nfs_inode_remove_request(req); nfs_end_page_writeback(req); nfs_release_request(req); } -static bool -nfs_error_is_fatal_on_server(int err) -{ - switch (err) { - case 0: - case -ERESTARTSYS: - case -EINTR: - return false; - } - return nfs_error_is_fatal(err); -} - /* * Find an associated nfs write request, and prepare to flush it out * May return an error if the user signalled nfs_wait_on_request(). @@ -615,7 +606,6 @@ nfs_error_is_fatal_on_server(int err) static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio, struct page *page) { - struct address_space *mapping; struct nfs_page *req; int ret = 0; @@ -630,12 +620,11 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio, WARN_ON_ONCE(test_bit(PG_CLEAN, &req->wb_flags)); /* If there is a fatal error that covers this write, just exit */ - ret = 0; - mapping = page_file_mapping(page); - if (test_bit(AS_ENOSPC, &mapping->flags) || - test_bit(AS_EIO, &mapping->flags)) + ret = pgio->pg_error; + if (nfs_error_is_fatal_on_server(ret)) goto out_launder; + ret = 0; if (!nfs_pageio_add_request(pgio, req)) { ret = pgio->pg_error; /* @@ -647,6 +636,7 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio, } else ret = -EAGAIN; nfs_redirty_request(req); + pgio->pg_error = 0; } else nfs_add_stats(page_file_mapping(page)->host, NFSIOS_WRITEPAGES, 1); @@ -666,7 +656,7 @@ static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, ret = nfs_page_async_flush(pgio, page); if (ret == -EAGAIN) { redirty_page_for_writepage(wbc, page); - ret = 0; + ret = AOP_WRITEPAGE_ACTIVATE; } return ret; } @@ -685,10 +675,11 @@ static int nfs_writepage_locked(struct page *page, nfs_pageio_init_write(&pgio, inode, 0, false, &nfs_async_write_completion_ops); err = nfs_do_writepage(page, wbc, &pgio); + pgio.pg_error = 0; nfs_pageio_complete(&pgio); if (err < 0) return err; - if (pgio.pg_error < 0) + if (nfs_error_is_fatal(pgio.pg_error)) return pgio.pg_error; return 0; } @@ -698,7 +689,8 @@ int nfs_writepage(struct page *page, struct writeback_control *wbc) int ret; ret = nfs_writepage_locked(page, wbc); - unlock_page(page); + if (ret != AOP_WRITEPAGE_ACTIVATE) + unlock_page(page); return ret; } @@ -707,7 +699,8 @@ static int nfs_writepages_callback(struct page *page, struct writeback_control * int ret; ret = nfs_do_writepage(page, wbc, data); - unlock_page(page); + if (ret != AOP_WRITEPAGE_ACTIVATE) + unlock_page(page); return ret; } @@ -733,13 +726,14 @@ int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc) &nfs_async_write_completion_ops); pgio.pg_io_completion = ioc; err = write_cache_pages(mapping, wbc, nfs_writepages_callback, &pgio); + pgio.pg_error = 0; nfs_pageio_complete(&pgio); nfs_io_completion_put(ioc); if (err < 0) goto out_err; err = pgio.pg_error; - if (err < 0) + if (nfs_error_is_fatal(err)) goto out_err; return 0; out_err: diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c index 26ad75ae2be0..96352ab7bd81 100644 --- a/fs/nfsd/nfscache.c +++ b/fs/nfsd/nfscache.c @@ -571,7 +571,7 @@ nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data) */ static int nfsd_reply_cache_stats_show(struct seq_file *m, void *v) { - struct nfsd_net *nn = v; + struct nfsd_net *nn = m->private; seq_printf(m, "max entries: %u\n", nn->max_drc_entries); seq_printf(m, "num entries: %u\n", diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c index 13c548733860..3cf4f6aa48d6 100644 --- a/fs/nfsd/nfsctl.c +++ b/fs/nfsd/nfsctl.c @@ -1171,13 +1171,17 @@ static struct inode *nfsd_get_inode(struct super_block *sb, umode_t mode) return inode; } -static int __nfsd_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) +static int __nfsd_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode, struct nfsdfs_client *ncl) { struct inode *inode; inode = nfsd_get_inode(dir->i_sb, mode); if (!inode) return -ENOMEM; + if (ncl) { + inode->i_private = ncl; + kref_get(&ncl->cl_ref); + } d_add(dentry, inode); inc_nlink(dir); fsnotify_mkdir(dir, dentry); @@ -1194,17 +1198,14 @@ static struct dentry *nfsd_mkdir(struct dentry *parent, struct nfsdfs_client *nc dentry = d_alloc_name(parent, name); if (!dentry) goto out_err; - ret = __nfsd_mkdir(d_inode(parent), dentry, S_IFDIR | 0600); + ret = __nfsd_mkdir(d_inode(parent), dentry, S_IFDIR | 0600, ncl); if (ret) goto out_err; - if (ncl) { - d_inode(dentry)->i_private = ncl; - kref_get(&ncl->cl_ref); - } out: inode_unlock(dir); return dentry; out_err: + dput(dentry); dentry = ERR_PTR(ret); goto out; } @@ -1214,11 +1215,9 @@ static void clear_ncl(struct inode *inode) struct nfsdfs_client *ncl = inode->i_private; inode->i_private = NULL; - synchronize_rcu(); kref_put(&ncl->cl_ref, ncl->cl_release); } - static struct nfsdfs_client *__get_nfsdfs_client(struct inode *inode) { struct nfsdfs_client *nc = inode->i_private; @@ -1232,9 +1231,9 @@ struct nfsdfs_client *get_nfsdfs_client(struct inode *inode) { struct nfsdfs_client *nc; - rcu_read_lock(); + inode_lock_shared(inode); nc = __get_nfsdfs_client(inode); - rcu_read_unlock(); + inode_unlock_shared(inode); return nc; } /* from __rpc_unlink */ diff --git a/fs/read_write.c b/fs/read_write.c index 1f5088dec566..5bbf587f5bc1 100644 --- a/fs/read_write.c +++ b/fs/read_write.c @@ -1811,10 +1811,7 @@ static int generic_remap_check_len(struct inode *inode_in, return (remap_flags & REMAP_FILE_DEDUP) ? -EBADE : -EINVAL; } -/* - * Read a page's worth of file data into the page cache. Return the page - * locked. - */ +/* Read a page's worth of file data into the page cache. */ static struct page *vfs_dedupe_get_page(struct inode *inode, loff_t offset) { struct page *page; @@ -1826,11 +1823,33 @@ static struct page *vfs_dedupe_get_page(struct inode *inode, loff_t offset) put_page(page); return ERR_PTR(-EIO); } - lock_page(page); return page; } /* + * Lock two pages, ensuring that we lock in offset order if the pages are from + * the same file. + */ +static void vfs_lock_two_pages(struct page *page1, struct page *page2) +{ + /* Always lock in order of increasing index. */ + if (page1->index > page2->index) + swap(page1, page2); + + lock_page(page1); + if (page1 != page2) + lock_page(page2); +} + +/* Unlock two pages, being careful not to unlock the same page twice. */ +static void vfs_unlock_two_pages(struct page *page1, struct page *page2) +{ + unlock_page(page1); + if (page1 != page2) + unlock_page(page2); +} + +/* * Compare extents of two files to see if they are the same. * Caller must have locked both inodes to prevent write races. */ @@ -1867,10 +1886,24 @@ static int vfs_dedupe_file_range_compare(struct inode *src, loff_t srcoff, dest_page = vfs_dedupe_get_page(dest, destoff); if (IS_ERR(dest_page)) { error = PTR_ERR(dest_page); - unlock_page(src_page); put_page(src_page); goto out_error; } + + vfs_lock_two_pages(src_page, dest_page); + + /* + * Now that we've locked both pages, make sure they're still + * mapped to the file data we're interested in. If not, + * someone is invalidating pages on us and we lose. + */ + if (!PageUptodate(src_page) || !PageUptodate(dest_page) || + src_page->mapping != src->i_mapping || + dest_page->mapping != dest->i_mapping) { + same = false; + goto unlock; + } + src_addr = kmap_atomic(src_page); dest_addr = kmap_atomic(dest_page); @@ -1882,8 +1915,8 @@ static int vfs_dedupe_file_range_compare(struct inode *src, loff_t srcoff, kunmap_atomic(dest_addr); kunmap_atomic(src_addr); - unlock_page(dest_page); - unlock_page(src_page); +unlock: + vfs_unlock_two_pages(src_page, dest_page); put_page(dest_page); put_page(src_page); diff --git a/fs/ubifs/budget.c b/fs/ubifs/budget.c index 80d7301ab76d..c0b84e960b20 100644 --- a/fs/ubifs/budget.c +++ b/fs/ubifs/budget.c @@ -51,7 +51,7 @@ static void shrink_liability(struct ubifs_info *c, int nr_to_write) { down_read(&c->vfs_sb->s_umount); - writeback_inodes_sb(c->vfs_sb, WB_REASON_FS_FREE_SPACE); + writeback_inodes_sb_nr(c->vfs_sb, nr_to_write, WB_REASON_FS_FREE_SPACE); up_read(&c->vfs_sb->s_umount); } diff --git a/fs/ubifs/orphan.c b/fs/ubifs/orphan.c index b52624e28fa1..3b4b4114f208 100644 --- a/fs/ubifs/orphan.c +++ b/fs/ubifs/orphan.c @@ -129,7 +129,6 @@ static void __orphan_drop(struct ubifs_info *c, struct ubifs_orphan *o) static void orphan_delete(struct ubifs_info *c, struct ubifs_orphan *orph) { if (orph->del) { - spin_unlock(&c->orphan_lock); dbg_gen("deleted twice ino %lu", orph->inum); return; } @@ -138,7 +137,6 @@ static void orphan_delete(struct ubifs_info *c, struct ubifs_orphan *orph) orph->del = 1; orph->dnext = c->orph_dnext; c->orph_dnext = orph; - spin_unlock(&c->orphan_lock); dbg_gen("delete later ino %lu", orph->inum); return; } diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index 2c0803b0ac3a..8c1d571334bc 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -609,6 +609,10 @@ static int init_constants_early(struct ubifs_info *c) c->max_bu_buf_len = UBIFS_MAX_BULK_READ * UBIFS_MAX_DATA_NODE_SZ; if (c->max_bu_buf_len > c->leb_size) c->max_bu_buf_len = c->leb_size; + + /* Log is ready, preserve one LEB for commits. */ + c->min_log_bytes = c->leb_size; + return 0; } diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index ccbdbd62f0d8..fe6d804a38dc 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c @@ -880,6 +880,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file) /* len == 0 means wake all */ struct userfaultfd_wake_range range = { .len = 0, }; unsigned long new_flags; + bool still_valid; WRITE_ONCE(ctx->released, true); @@ -895,8 +896,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file) * taking the mmap_sem for writing. */ down_write(&mm->mmap_sem); - if (!mmget_still_valid(mm)) - goto skip_mm; + still_valid = mmget_still_valid(mm); prev = NULL; for (vma = mm->mmap; vma; vma = vma->vm_next) { cond_resched(); @@ -907,19 +907,20 @@ static int userfaultfd_release(struct inode *inode, struct file *file) continue; } new_flags = vma->vm_flags & ~(VM_UFFD_MISSING | VM_UFFD_WP); - prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end, - new_flags, vma->anon_vma, - vma->vm_file, vma->vm_pgoff, - vma_policy(vma), - NULL_VM_UFFD_CTX); - if (prev) - vma = prev; - else - prev = vma; + if (still_valid) { + prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end, + new_flags, vma->anon_vma, + vma->vm_file, vma->vm_pgoff, + vma_policy(vma), + NULL_VM_UFFD_CTX); + if (prev) + vma = prev; + else + prev = vma; + } vma->vm_flags = new_flags; vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; } -skip_mm: up_write(&mm->mmap_sem); mmput(mm); wakeup: diff --git a/fs/xfs/xfs_ioctl32.c b/fs/xfs/xfs_ioctl32.c index 7fcf7569743f..7bd7534f5051 100644 --- a/fs/xfs/xfs_ioctl32.c +++ b/fs/xfs/xfs_ioctl32.c @@ -547,63 +547,12 @@ xfs_file_compat_ioctl( struct inode *inode = file_inode(filp); struct xfs_inode *ip = XFS_I(inode); struct xfs_mount *mp = ip->i_mount; - void __user *arg = (void __user *)p; + void __user *arg = compat_ptr(p); int error; trace_xfs_file_compat_ioctl(ip); switch (cmd) { - /* No size or alignment issues on any arch */ - case XFS_IOC_DIOINFO: - case XFS_IOC_FSGEOMETRY_V4: - case XFS_IOC_FSGEOMETRY: - case XFS_IOC_AG_GEOMETRY: - case XFS_IOC_FSGETXATTR: - case XFS_IOC_FSSETXATTR: - case XFS_IOC_FSGETXATTRA: - case XFS_IOC_FSSETDM: - case XFS_IOC_GETBMAP: - case XFS_IOC_GETBMAPA: - case XFS_IOC_GETBMAPX: - case XFS_IOC_FSCOUNTS: - case XFS_IOC_SET_RESBLKS: - case XFS_IOC_GET_RESBLKS: - case XFS_IOC_FSGROWFSLOG: - case XFS_IOC_GOINGDOWN: - case XFS_IOC_ERROR_INJECTION: - case XFS_IOC_ERROR_CLEARALL: - case FS_IOC_GETFSMAP: - case XFS_IOC_SCRUB_METADATA: - case XFS_IOC_BULKSTAT: - case XFS_IOC_INUMBERS: - return xfs_file_ioctl(filp, cmd, p); -#if !defined(BROKEN_X86_ALIGNMENT) || defined(CONFIG_X86_X32) - /* - * These are handled fine if no alignment issues. To support x32 - * which uses native 64-bit alignment we must emit these cases in - * addition to the ia-32 compat set below. - */ - case XFS_IOC_ALLOCSP: - case XFS_IOC_FREESP: - case XFS_IOC_RESVSP: - case XFS_IOC_UNRESVSP: - case XFS_IOC_ALLOCSP64: - case XFS_IOC_FREESP64: - case XFS_IOC_RESVSP64: - case XFS_IOC_UNRESVSP64: - case XFS_IOC_FSGEOMETRY_V1: - case XFS_IOC_FSGROWFSDATA: - case XFS_IOC_FSGROWFSRT: - case XFS_IOC_ZERO_RANGE: -#ifdef CONFIG_X86_X32 - /* - * x32 special: this gets a different cmd number from the ia-32 compat - * case below; the associated data will match native 64-bit alignment. - */ - case XFS_IOC_SWAPEXT: -#endif - return xfs_file_ioctl(filp, cmd, p); -#endif #if defined(BROKEN_X86_ALIGNMENT) case XFS_IOC_ALLOCSP_32: case XFS_IOC_FREESP_32: @@ -705,6 +654,7 @@ xfs_file_compat_ioctl( case XFS_IOC_FSSETDM_BY_HANDLE_32: return xfs_compat_fssetdm_by_handle(filp, arg); default: - return -ENOIOCTLCMD; + /* try the native version */ + return xfs_file_ioctl(filp, cmd, (unsigned long)arg); } } diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c index ff3c1fae5357..fe285d123d69 100644 --- a/fs/xfs/xfs_iops.c +++ b/fs/xfs/xfs_iops.c @@ -793,6 +793,7 @@ xfs_setattr_nonsize( out_cancel: xfs_trans_cancel(tp); + xfs_iunlock(ip, XFS_ILOCK_EXCL); out_dqrele: xfs_qm_dqrele(udqp); xfs_qm_dqrele(gdqp); diff --git a/fs/xfs/xfs_pnfs.c b/fs/xfs/xfs_pnfs.c index 0c954cad7449..a339bd5fa260 100644 --- a/fs/xfs/xfs_pnfs.c +++ b/fs/xfs/xfs_pnfs.c @@ -32,7 +32,7 @@ xfs_break_leased_layouts( struct xfs_inode *ip = XFS_I(inode); int error; - while ((error = break_layout(inode, false) == -EWOULDBLOCK)) { + while ((error = break_layout(inode, false)) == -EWOULDBLOCK) { xfs_iunlock(ip, *iolock); *did_unlock = true; error = break_layout(inode, true); diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c index c4ec7afd1170..edbe37b7f636 100644 --- a/fs/xfs/xfs_reflink.c +++ b/fs/xfs/xfs_reflink.c @@ -1190,11 +1190,11 @@ xfs_reflink_remap_blocks( } /* - * Grab the exclusive iolock for a data copy from src to dest, making - * sure to abide vfs locking order (lowest pointer value goes first) and - * breaking the pnfs layout leases on dest before proceeding. The loop - * is needed because we cannot call the blocking break_layout() with the - * src iolock held, and therefore have to back out both locks. + * Grab the exclusive iolock for a data copy from src to dest, making sure to + * abide vfs locking order (lowest pointer value goes first) and breaking the + * layout leases before proceeding. The loop is needed because we cannot call + * the blocking break_layout() with the iolocks held, and therefore have to + * back out both locks. */ static int xfs_iolock_two_inodes_and_break_layout( @@ -1203,33 +1203,44 @@ xfs_iolock_two_inodes_and_break_layout( { int error; -retry: - if (src < dest) { - inode_lock_shared(src); - inode_lock_nested(dest, I_MUTEX_NONDIR2); - } else { - /* src >= dest */ - inode_lock(dest); - } + if (src > dest) + swap(src, dest); - error = break_layout(dest, false); - if (error == -EWOULDBLOCK) { - inode_unlock(dest); - if (src < dest) - inode_unlock_shared(src); +retry: + /* Wait to break both inodes' layouts before we start locking. */ + error = break_layout(src, true); + if (error) + return error; + if (src != dest) { error = break_layout(dest, true); if (error) return error; - goto retry; } + + /* Lock one inode and make sure nobody got in and leased it. */ + inode_lock(src); + error = break_layout(src, false); if (error) { + inode_unlock(src); + if (error == -EWOULDBLOCK) + goto retry; + return error; + } + + if (src == dest) + return 0; + + /* Lock the other inode and make sure nobody got in and leased it. */ + inode_lock_nested(dest, I_MUTEX_NONDIR2); + error = break_layout(dest, false); + if (error) { + inode_unlock(src); inode_unlock(dest); - if (src < dest) - inode_unlock_shared(src); + if (error == -EWOULDBLOCK) + goto retry; return error; } - if (src > dest) - inode_lock_shared_nested(src, I_MUTEX_NONDIR2); + return 0; } @@ -1247,10 +1258,10 @@ xfs_reflink_remap_unlock( xfs_iunlock(dest, XFS_MMAPLOCK_EXCL); if (!same_inode) - xfs_iunlock(src, XFS_MMAPLOCK_SHARED); + xfs_iunlock(src, XFS_MMAPLOCK_EXCL); inode_unlock(inode_out); if (!same_inode) - inode_unlock_shared(inode_in); + inode_unlock(inode_in); } /* @@ -1325,7 +1336,7 @@ xfs_reflink_remap_prep( if (same_inode) xfs_ilock(src, XFS_MMAPLOCK_EXCL); else - xfs_lock_two_inodes(src, XFS_MMAPLOCK_SHARED, dest, + xfs_lock_two_inodes(src, XFS_MMAPLOCK_EXCL, dest, XFS_MMAPLOCK_EXCL); /* Check file eligibility and prepare for block sharing. */ diff --git a/include/linux/ceph/buffer.h b/include/linux/ceph/buffer.h index 5e58bb29b1a3..11cdc7c60480 100644 --- a/include/linux/ceph/buffer.h +++ b/include/linux/ceph/buffer.h @@ -30,7 +30,8 @@ static inline struct ceph_buffer *ceph_buffer_get(struct ceph_buffer *b) static inline void ceph_buffer_put(struct ceph_buffer *b) { - kref_put(&b->kref, ceph_buffer_release); + if (b) + kref_put(&b->kref, ceph_buffer_release); } extern int ceph_decode_buffer(struct ceph_buffer **b, void **p, void *end); diff --git a/include/linux/dma-contiguous.h b/include/linux/dma-contiguous.h index c05d4e661489..03f8e98e3bcc 100644 --- a/include/linux/dma-contiguous.h +++ b/include/linux/dma-contiguous.h @@ -160,10 +160,7 @@ bool dma_release_from_contiguous(struct device *dev, struct page *pages, static inline struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp) { - int node = dev ? dev_to_node(dev) : NUMA_NO_NODE; - size_t align = get_order(PAGE_ALIGN(size)); - - return alloc_pages_node(node, gfp, align); + return NULL; } static inline void dma_free_contiguous(struct device *dev, struct page *page, diff --git a/include/linux/gpio.h b/include/linux/gpio.h index 40915b461f18..f757a58191a6 100644 --- a/include/linux/gpio.h +++ b/include/linux/gpio.h @@ -241,30 +241,6 @@ static inline int irq_to_gpio(unsigned irq) return -EINVAL; } -static inline int -gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name, - unsigned int gpio_offset, unsigned int pin_offset, - unsigned int npins) -{ - WARN_ON(1); - return -EINVAL; -} - -static inline int -gpiochip_add_pingroup_range(struct gpio_chip *chip, - struct pinctrl_dev *pctldev, - unsigned int gpio_offset, const char *pin_group) -{ - WARN_ON(1); - return -EINVAL; -} - -static inline void -gpiochip_remove_pin_ranges(struct gpio_chip *chip) -{ - WARN_ON(1); -} - static inline int devm_gpio_request(struct device *dev, unsigned gpio, const char *label) { diff --git a/include/linux/netfilter/nf_conntrack_h323_types.h b/include/linux/netfilter/nf_conntrack_h323_types.h index 7a6871ac8784..74c6f9241944 100644 --- a/include/linux/netfilter/nf_conntrack_h323_types.h +++ b/include/linux/netfilter/nf_conntrack_h323_types.h @@ -4,6 +4,9 @@ * Copyright (c) 2006 Jing Min Zhao <zhaojingmin@users.sourceforge.net> */ +#ifndef _NF_CONNTRACK_H323_TYPES_H +#define _NF_CONNTRACK_H323_TYPES_H + typedef struct TransportAddress_ipAddress { /* SEQUENCE */ int options; /* No use */ unsigned int ip; @@ -931,3 +934,5 @@ typedef struct RasMessage { /* CHOICE */ InfoRequestResponse infoRequestResponse; }; } RasMessage; + +#endif /* _NF_CONNTRACK_H323_TYPES_H */ diff --git a/include/linux/signal.h b/include/linux/signal.h index b5d99482d3fe..1a5f88316b08 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h @@ -282,6 +282,9 @@ extern void signal_setup_done(int failed, struct ksignal *ksig, int stepping); extern void exit_signals(struct task_struct *tsk); extern void kernel_sigaction(int, __sighandler_t); +#define SIG_KTHREAD ((__force __sighandler_t)2) +#define SIG_KTHREAD_KERNEL ((__force __sighandler_t)3) + static inline void allow_signal(int sig) { /* @@ -289,7 +292,17 @@ static inline void allow_signal(int sig) * know it'll be handled, so that they don't get converted to * SIGKILL or just silently dropped. */ - kernel_sigaction(sig, (__force __sighandler_t)2); + kernel_sigaction(sig, SIG_KTHREAD); +} + +static inline void allow_kernel_signal(int sig) +{ + /* + * Kernel threads handle their own signals. Let the signal code + * know signals sent by the kernel will be handled, so that they + * don't get silently dropped. + */ + kernel_sigaction(sig, SIG_KTHREAD_KERNEL); } static inline void disallow_signal(int sig) diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h index baa3ecdb882f..27536b961552 100644 --- a/include/linux/sunrpc/sched.h +++ b/include/linux/sunrpc/sched.h @@ -98,7 +98,6 @@ typedef void (*rpc_action)(struct rpc_task *); struct rpc_call_ops { void (*rpc_call_prepare)(struct rpc_task *, void *); - void (*rpc_call_prepare_transmit)(struct rpc_task *, void *); void (*rpc_call_done)(struct rpc_task *, void *); void (*rpc_count_stats)(struct rpc_task *, void *); void (*rpc_release)(void *); diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h index 7acb953298a7..84ff2844df2a 100644 --- a/include/linux/timekeeper_internal.h +++ b/include/linux/timekeeper_internal.h @@ -57,6 +57,7 @@ struct tk_read_base { * @cs_was_changed_seq: The sequence number of clocksource change events * @next_leap_ktime: CLOCK_MONOTONIC time value of a pending leap-second * @raw_sec: CLOCK_MONOTONIC_RAW time in seconds + * @monotonic_to_boot: CLOCK_MONOTONIC to CLOCK_BOOTTIME offset * @cycle_interval: Number of clock cycles in one NTP interval * @xtime_interval: Number of clock shifted nano seconds in one NTP * interval. @@ -84,6 +85,9 @@ struct tk_read_base { * * wall_to_monotonic is no longer the boot time, getboottime must be * used instead. + * + * @monotonic_to_boottime is a timespec64 representation of @offs_boot to + * accelerate the VDSO update for CLOCK_BOOTTIME. */ struct timekeeper { struct tk_read_base tkr_mono; @@ -99,6 +103,7 @@ struct timekeeper { u8 cs_was_changed_seq; ktime_t next_leap_ktime; u64 raw_sec; + struct timespec64 monotonic_to_boot; /* The following members are for timekeeping internal use */ u64 cycle_interval; diff --git a/include/net/addrconf.h b/include/net/addrconf.h index becdad576859..3f62b347b04a 100644 --- a/include/net/addrconf.h +++ b/include/net/addrconf.h @@ -206,7 +206,7 @@ static inline int ipv6_mc_may_pull(struct sk_buff *skb, unsigned int len) { if (skb_transport_offset(skb) + ipv6_transport_len(skb) < len) - return -EINVAL; + return 0; return pskb_may_pull(skb, len); } diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index cb668bc2692d..ab40d7afdc54 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h @@ -52,7 +52,7 @@ struct bpf_prog; #define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS) struct net { - refcount_t passive; /* To decided when the network + refcount_t passive; /* To decide when the network * namespace should be freed. */ refcount_t count; /* To decided when the network diff --git a/include/net/nexthop.h b/include/net/nexthop.h index 25f1f9a8419b..95f766c31c90 100644 --- a/include/net/nexthop.h +++ b/include/net/nexthop.h @@ -141,12 +141,6 @@ static inline unsigned int nexthop_num_path(const struct nexthop *nh) nh_grp = rcu_dereference_rtnl(nh->nh_grp); rc = nh_grp->num_nh; - } else { - const struct nh_info *nhi; - - nhi = rcu_dereference_rtnl(nh->nh_info); - if (nhi->reject_nh) - rc = 0; } return rc; diff --git a/include/net/route.h b/include/net/route.h index 630a0493f1f3..dfce19c9fa96 100644 --- a/include/net/route.h +++ b/include/net/route.h @@ -233,7 +233,7 @@ void rt_del_uncached_list(struct rtable *rt); int fib_dump_info_fnhe(struct sk_buff *skb, struct netlink_callback *cb, u32 table_id, struct fib_info *fi, - int *fa_index, int fa_start); + int *fa_index, int fa_start, unsigned int flags); static inline void ip_rt_put(struct rtable *rt) { diff --git a/include/rdma/restrack.h b/include/rdma/restrack.h index b0fc6b26bdf5..83df1ec6664e 100644 --- a/include/rdma/restrack.h +++ b/include/rdma/restrack.h @@ -105,8 +105,7 @@ struct rdma_restrack_entry { }; int rdma_restrack_count(struct ib_device *dev, - enum rdma_restrack_type type, - struct pid_namespace *ns); + enum rdma_restrack_type type); void rdma_restrack_kadd(struct rdma_restrack_entry *res); void rdma_restrack_uadd(struct rdma_restrack_entry *res); diff --git a/include/soc/arc/mcip.h b/include/soc/arc/mcip.h index 50f49e043668..d1a93c73f006 100644 --- a/include/soc/arc/mcip.h +++ b/include/soc/arc/mcip.h @@ -46,7 +46,9 @@ struct mcip_cmd { #define CMD_IDU_ENABLE 0x71 #define CMD_IDU_DISABLE 0x72 #define CMD_IDU_SET_MODE 0x74 +#define CMD_IDU_READ_MODE 0x75 #define CMD_IDU_SET_DEST 0x76 +#define CMD_IDU_ACK_CIRQ 0x79 #define CMD_IDU_SET_MASK 0x7C #define IDU_M_TRIG_LEVEL 0x0 @@ -119,4 +121,13 @@ static inline void __mcip_cmd_data(unsigned int cmd, unsigned int param, __mcip_cmd(cmd, param); } +/* + * Read MCIP register + */ +static inline unsigned int __mcip_cmd_read(unsigned int cmd, unsigned int param) +{ + __mcip_cmd(cmd, param); + return read_aux_reg(ARC_REG_MCIP_READBACK); +} + #endif diff --git a/include/uapi/linux/jffs2.h b/include/uapi/linux/jffs2.h index a18b719f49d4..784ba0b9690a 100644 --- a/include/uapi/linux/jffs2.h +++ b/include/uapi/linux/jffs2.h @@ -77,11 +77,6 @@ #define JFFS2_ACL_VERSION 0x0001 -// Maybe later... -//#define JFFS2_NODETYPE_CHECKPOINT (JFFS2_FEATURE_RWCOMPAT_DELETE | JFFS2_NODE_ACCURATE | 3) -//#define JFFS2_NODETYPE_OPTIONS (JFFS2_FEATURE_RWCOMPAT_COPY | JFFS2_NODE_ACCURATE | 4) - - #define JFFS2_INO_FLAG_PREREAD 1 /* Do read_inode() for this one at mount time, don't wait for it to happen later */ diff --git a/include/uapi/linux/netfilter/xt_nfacct.h b/include/uapi/linux/netfilter/xt_nfacct.h index 5c8a4d760ee3..b5123ab8d54a 100644 --- a/include/uapi/linux/netfilter/xt_nfacct.h +++ b/include/uapi/linux/netfilter/xt_nfacct.h @@ -11,4 +11,9 @@ struct xt_nfacct_match_info { struct nf_acct *nfacct; }; +struct xt_nfacct_match_info_v1 { + char name[NFACCT_NAME_MAX]; + struct nf_acct *nfacct __attribute__((aligned(8))); +}; + #endif /* _XT_NFACCT_MATCH_H */ diff --git a/include/uapi/linux/rds.h b/include/uapi/linux/rds.h index fd6b5f66e2c5..cba368e55863 100644 --- a/include/uapi/linux/rds.h +++ b/include/uapi/linux/rds.h @@ -250,6 +250,7 @@ struct rds_info_rdma_connection { __u32 rdma_mr_max; __u32 rdma_mr_size; __u8 tos; + __u8 sl; __u32 cache_allocs; }; @@ -265,6 +266,7 @@ struct rds6_info_rdma_connection { __u32 rdma_mr_max; __u32 rdma_mr_size; __u8 tos; + __u8 sl; __u32 cache_allocs; }; diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 5d141f16f6fa..272071e9112f 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -1707,20 +1707,26 @@ static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr) if (err) goto free_used_maps; - err = bpf_prog_new_fd(prog); - if (err < 0) { - /* failed to allocate fd. - * bpf_prog_put() is needed because the above - * bpf_prog_alloc_id() has published the prog - * to the userspace and the userspace may - * have refcnt-ed it through BPF_PROG_GET_FD_BY_ID. - */ - bpf_prog_put(prog); - return err; - } - + /* Upon success of bpf_prog_alloc_id(), the BPF prog is + * effectively publicly exposed. However, retrieving via + * bpf_prog_get_fd_by_id() will take another reference, + * therefore it cannot be gone underneath us. + * + * Only for the time /after/ successful bpf_prog_new_fd() + * and before returning to userspace, we might just hold + * one reference and any parallel close on that fd could + * rip everything out. Hence, below notifications must + * happen before bpf_prog_new_fd(). + * + * Also, any failure handling from this point onwards must + * be using bpf_prog_put() given the program is exposed. + */ bpf_prog_kallsyms_add(prog); perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_LOAD, 0); + + err = bpf_prog_new_fd(prog); + if (err < 0) + bpf_prog_put(prog); return err; free_used_maps: diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 10c0ff93f52b..16d66bd7af09 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -985,9 +985,6 @@ static void __mark_reg_unbounded(struct bpf_reg_state *reg) reg->smax_value = S64_MAX; reg->umin_value = 0; reg->umax_value = U64_MAX; - - /* constant backtracking is enabled for root only for now */ - reg->precise = capable(CAP_SYS_ADMIN) ? false : true; } /* Mark a register as having a completely unknown (scalar) value. */ @@ -1014,7 +1011,11 @@ static void mark_reg_unknown(struct bpf_verifier_env *env, __mark_reg_not_init(regs + regno); return; } - __mark_reg_unknown(regs + regno); + regs += regno; + __mark_reg_unknown(regs); + /* constant backtracking is enabled for root without bpf2bpf calls */ + regs->precise = env->subprog_cnt > 1 || !env->allow_ptr_leaks ? + true : false; } static void __mark_reg_not_init(struct bpf_reg_state *reg) diff --git a/kernel/dma/contiguous.c b/kernel/dma/contiguous.c index 2bd410f934b3..69cfb4345388 100644 --- a/kernel/dma/contiguous.c +++ b/kernel/dma/contiguous.c @@ -230,9 +230,7 @@ bool dma_release_from_contiguous(struct device *dev, struct page *pages, */ struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp) { - int node = dev ? dev_to_node(dev) : NUMA_NO_NODE; - size_t count = PAGE_ALIGN(size) >> PAGE_SHIFT; - size_t align = get_order(PAGE_ALIGN(size)); + size_t count = size >> PAGE_SHIFT; struct page *page = NULL; struct cma *cma = NULL; @@ -243,14 +241,12 @@ struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp) /* CMA can be used only in the context which permits sleeping */ if (cma && gfpflags_allow_blocking(gfp)) { + size_t align = get_order(size); size_t cma_align = min_t(size_t, align, CONFIG_CMA_ALIGNMENT); page = cma_alloc(cma, count, cma_align, gfp & __GFP_NOWARN); } - /* Fallback allocation of normal pages */ - if (!page) - page = alloc_pages_node(node, gfp, align); return page; } diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index 795c9b095d75..706113c6bebc 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -85,6 +85,8 @@ static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) struct page *__dma_direct_alloc_pages(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) { + size_t alloc_size = PAGE_ALIGN(size); + int node = dev_to_node(dev); struct page *page = NULL; u64 phys_mask; @@ -95,8 +97,14 @@ struct page *__dma_direct_alloc_pages(struct device *dev, size_t size, gfp &= ~__GFP_ZERO; gfp |= __dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask, &phys_mask); + page = dma_alloc_contiguous(dev, alloc_size, gfp); + if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { + dma_free_contiguous(dev, page, alloc_size); + page = NULL; + } again: - page = dma_alloc_contiguous(dev, size, gfp); + if (!page) + page = alloc_pages_node(node, gfp, get_order(alloc_size)); if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { dma_free_contiguous(dev, page, size); page = NULL; diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index 9484e88dabc2..9be995fc3c5a 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c @@ -295,6 +295,18 @@ static void irq_sysfs_add(int irq, struct irq_desc *desc) } } +static void irq_sysfs_del(struct irq_desc *desc) +{ + /* + * If irq_sysfs_init() has not yet been invoked (early boot), then + * irq_kobj_base is NULL and the descriptor was never added. + * kobject_del() complains about a object with no parent, so make + * it conditional. + */ + if (irq_kobj_base) + kobject_del(&desc->kobj); +} + static int __init irq_sysfs_init(void) { struct irq_desc *desc; @@ -325,6 +337,7 @@ static struct kobj_type irq_kobj_type = { }; static void irq_sysfs_add(int irq, struct irq_desc *desc) {} +static void irq_sysfs_del(struct irq_desc *desc) {} #endif /* CONFIG_SYSFS */ @@ -438,7 +451,7 @@ static void free_desc(unsigned int irq) * The sysfs entry must be serialized against a concurrent * irq_sysfs_init() as well. */ - kobject_del(&desc->kobj); + irq_sysfs_del(desc); delete_irq_desc(irq); /* diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 9873fc627d61..d9770a5393c8 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -470,6 +470,7 @@ static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer); */ static void do_optimize_kprobes(void) { + lockdep_assert_held(&text_mutex); /* * The optimization/unoptimization refers online_cpus via * stop_machine() and cpu-hotplug modifies online_cpus. @@ -487,9 +488,7 @@ static void do_optimize_kprobes(void) list_empty(&optimizing_list)) return; - mutex_lock(&text_mutex); arch_optimize_kprobes(&optimizing_list); - mutex_unlock(&text_mutex); } /* @@ -500,6 +499,7 @@ static void do_unoptimize_kprobes(void) { struct optimized_kprobe *op, *tmp; + lockdep_assert_held(&text_mutex); /* See comment in do_optimize_kprobes() */ lockdep_assert_cpus_held(); @@ -507,7 +507,6 @@ static void do_unoptimize_kprobes(void) if (list_empty(&unoptimizing_list)) return; - mutex_lock(&text_mutex); arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list); /* Loop free_list for disarming */ list_for_each_entry_safe(op, tmp, &freeing_list, list) { @@ -524,7 +523,6 @@ static void do_unoptimize_kprobes(void) } else list_del_init(&op->list); } - mutex_unlock(&text_mutex); } /* Reclaim all kprobes on the free_list */ @@ -556,6 +554,7 @@ static void kprobe_optimizer(struct work_struct *work) { mutex_lock(&kprobe_mutex); cpus_read_lock(); + mutex_lock(&text_mutex); /* Lock modules while optimizing kprobes */ mutex_lock(&module_mutex); @@ -583,6 +582,7 @@ static void kprobe_optimizer(struct work_struct *work) do_free_cleaned_kprobes(); mutex_unlock(&module_mutex); + mutex_unlock(&text_mutex); cpus_read_unlock(); mutex_unlock(&kprobe_mutex); diff --git a/kernel/module.c b/kernel/module.c index 5933395af9a0..9ee93421269c 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -65,9 +65,9 @@ /* * Modules' sections will be aligned on page boundaries * to ensure complete separation of code and data, but - * only when CONFIG_STRICT_MODULE_RWX=y + * only when CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y */ -#ifdef CONFIG_STRICT_MODULE_RWX +#ifdef CONFIG_ARCH_HAS_STRICT_MODULE_RWX # define debug_align(X) ALIGN(X, PAGE_SIZE) #else # define debug_align(X) (X) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 2b037f195473..010d578118d6 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3904,7 +3904,7 @@ void __noreturn do_task_dead(void) static inline void sched_submit_work(struct task_struct *tsk) { - if (!tsk->state || tsk_is_pi_blocked(tsk)) + if (!tsk->state) return; /* @@ -3920,6 +3920,9 @@ static inline void sched_submit_work(struct task_struct *tsk) preempt_enable_no_resched(); } + if (tsk_is_pi_blocked(tsk)) + return; + /* * If we are going to sleep and we have plugged IO queued, * make sure to submit it to avoid deadlocks. diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c index 23fbbcc414d5..6e52b67b420e 100644 --- a/kernel/sched/psi.c +++ b/kernel/sched/psi.c @@ -1131,7 +1131,15 @@ static void psi_trigger_destroy(struct kref *ref) * deadlock while waiting for psi_poll_work to acquire trigger_lock */ if (kworker_to_destroy) { + /* + * After the RCU grace period has expired, the worker + * can no longer be found through group->poll_kworker. + * But it might have been already scheduled before + * that - deschedule it cleanly before destroying it. + */ kthread_cancel_delayed_work_sync(&group->poll_work); + atomic_set(&group->poll_scheduled, 0); + kthread_destroy_worker(kworker_to_destroy); } kfree(t); diff --git a/kernel/signal.c b/kernel/signal.c index e667be6907d7..534fec266a33 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -90,6 +90,11 @@ static bool sig_task_ignored(struct task_struct *t, int sig, bool force) handler == SIG_DFL && !(force && sig_kernel_only(sig))) return true; + /* Only allow kernel generated signals to this kthread */ + if (unlikely((t->flags & PF_KTHREAD) && + (handler == SIG_KTHREAD_KERNEL) && !force)) + return true; + return sig_handler_ignored(handler, sig); } diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index d911c8470149..ca69290bee2a 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -146,6 +146,11 @@ static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec64 wtm) static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta) { tk->offs_boot = ktime_add(tk->offs_boot, delta); + /* + * Timespec representation for VDSO update to avoid 64bit division + * on every update. + */ + tk->monotonic_to_boot = ktime_to_timespec64(tk->offs_boot); } /* diff --git a/kernel/time/vsyscall.c b/kernel/time/vsyscall.c index 8cf3596a4ce6..4bc37ac3bb05 100644 --- a/kernel/time/vsyscall.c +++ b/kernel/time/vsyscall.c @@ -17,7 +17,7 @@ static inline void update_vdso_data(struct vdso_data *vdata, struct timekeeper *tk) { struct vdso_timestamp *vdso_ts; - u64 nsec; + u64 nsec, sec; vdata[CS_HRES_COARSE].cycle_last = tk->tkr_mono.cycle_last; vdata[CS_HRES_COARSE].mask = tk->tkr_mono.mask; @@ -45,23 +45,27 @@ static inline void update_vdso_data(struct vdso_data *vdata, } vdso_ts->nsec = nsec; - /* CLOCK_MONOTONIC_RAW */ - vdso_ts = &vdata[CS_RAW].basetime[CLOCK_MONOTONIC_RAW]; - vdso_ts->sec = tk->raw_sec; - vdso_ts->nsec = tk->tkr_raw.xtime_nsec; + /* Copy MONOTONIC time for BOOTTIME */ + sec = vdso_ts->sec; + /* Add the boot offset */ + sec += tk->monotonic_to_boot.tv_sec; + nsec += (u64)tk->monotonic_to_boot.tv_nsec << tk->tkr_mono.shift; /* CLOCK_BOOTTIME */ vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_BOOTTIME]; - vdso_ts->sec = tk->xtime_sec + tk->wall_to_monotonic.tv_sec; - nsec = tk->tkr_mono.xtime_nsec; - nsec += ((u64)(tk->wall_to_monotonic.tv_nsec + - ktime_to_ns(tk->offs_boot)) << tk->tkr_mono.shift); + vdso_ts->sec = sec; + while (nsec >= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) { nsec -= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift); vdso_ts->sec++; } vdso_ts->nsec = nsec; + /* CLOCK_MONOTONIC_RAW */ + vdso_ts = &vdata[CS_RAW].basetime[CLOCK_MONOTONIC_RAW]; + vdso_ts->sec = tk->raw_sec; + vdso_ts->nsec = tk->tkr_raw.xtime_nsec; + /* CLOCK_TAI */ vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_TAI]; vdso_ts->sec = tk->xtime_sec + (s64)tk->tai_offset; diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 738065f765ab..de1f15969e27 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -32,6 +32,7 @@ #include <linux/shmem_fs.h> #include <linux/oom.h> #include <linux/numa.h> +#include <linux/page_owner.h> #include <asm/tlb.h> #include <asm/pgalloc.h> @@ -2516,6 +2517,9 @@ static void __split_huge_page(struct page *page, struct list_head *list, } ClearPageCompound(head); + + split_page_owner(head, HPAGE_PMD_ORDER); + /* See comment in __split_huge_page_tail() */ if (PageAnon(head)) { /* Additional pin to swap cache */ diff --git a/mm/kasan/common.c b/mm/kasan/common.c index 2277b82902d8..95d16a42db6b 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -407,8 +407,14 @@ static inline bool shadow_invalid(u8 tag, s8 shadow_byte) if (IS_ENABLED(CONFIG_KASAN_GENERIC)) return shadow_byte < 0 || shadow_byte >= KASAN_SHADOW_SCALE_SIZE; - else - return tag != (u8)shadow_byte; + + /* else CONFIG_KASAN_SW_TAGS: */ + if ((u8)shadow_byte == KASAN_TAG_INVALID) + return true; + if ((tag != KASAN_TAG_KERNEL) && (tag != (u8)shadow_byte)) + return true; + + return false; } static bool __kasan_slab_free(struct kmem_cache *cache, void *object, diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 6f5c0c517c49..26e2999af608 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -3260,6 +3260,60 @@ static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css, } } +static void memcg_flush_percpu_vmstats(struct mem_cgroup *memcg) +{ + unsigned long stat[MEMCG_NR_STAT]; + struct mem_cgroup *mi; + int node, cpu, i; + + for (i = 0; i < MEMCG_NR_STAT; i++) + stat[i] = 0; + + for_each_online_cpu(cpu) + for (i = 0; i < MEMCG_NR_STAT; i++) + stat[i] += raw_cpu_read(memcg->vmstats_percpu->stat[i]); + + for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) + for (i = 0; i < MEMCG_NR_STAT; i++) + atomic_long_add(stat[i], &mi->vmstats[i]); + + for_each_node(node) { + struct mem_cgroup_per_node *pn = memcg->nodeinfo[node]; + struct mem_cgroup_per_node *pi; + + for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) + stat[i] = 0; + + for_each_online_cpu(cpu) + for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) + stat[i] += raw_cpu_read( + pn->lruvec_stat_cpu->count[i]); + + for (pi = pn; pi; pi = parent_nodeinfo(pi, node)) + for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) + atomic_long_add(stat[i], &pi->lruvec_stat[i]); + } +} + +static void memcg_flush_percpu_vmevents(struct mem_cgroup *memcg) +{ + unsigned long events[NR_VM_EVENT_ITEMS]; + struct mem_cgroup *mi; + int cpu, i; + + for (i = 0; i < NR_VM_EVENT_ITEMS; i++) + events[i] = 0; + + for_each_online_cpu(cpu) + for (i = 0; i < NR_VM_EVENT_ITEMS; i++) + events[i] += raw_cpu_read( + memcg->vmstats_percpu->events[i]); + + for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) + for (i = 0; i < NR_VM_EVENT_ITEMS; i++) + atomic_long_add(events[i], &mi->vmevents[i]); +} + #ifdef CONFIG_MEMCG_KMEM static int memcg_online_kmem(struct mem_cgroup *memcg) { @@ -4682,6 +4736,12 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg) { int node; + /* + * Flush percpu vmstats and vmevents to guarantee the value correctness + * on parent's and all ancestor levels. + */ + memcg_flush_percpu_vmstats(memcg); + memcg_flush_percpu_vmevents(memcg); for_each_node(node) free_mem_cgroup_per_node_info(memcg, node); free_percpu(memcg->vmstats_percpu); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 272c6de1bf4e..9c9194959271 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2238,27 +2238,12 @@ static int move_freepages(struct zone *zone, unsigned int order; int pages_moved = 0; -#ifndef CONFIG_HOLES_IN_ZONE - /* - * page_zone is not safe to call in this context when - * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant - * anyway as we check zone boundaries in move_freepages_block(). - * Remove at a later date when no bug reports exist related to - * grouping pages by mobility - */ - VM_BUG_ON(pfn_valid(page_to_pfn(start_page)) && - pfn_valid(page_to_pfn(end_page)) && - page_zone(start_page) != page_zone(end_page)); -#endif for (page = start_page; page <= end_page;) { if (!pfn_valid_within(page_to_pfn(page))) { page++; continue; } - /* Make sure we are not inadvertently changing nodes */ - VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page); - if (!PageBuddy(page)) { /* * We assume that pages that could be isolated for @@ -2273,6 +2258,10 @@ static int move_freepages(struct zone *zone, continue; } + /* Make sure we are not inadvertently changing nodes */ + VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page); + VM_BUG_ON_PAGE(page_zone(page) != zone, page); + order = page_order(page); move_to_free_area(page, &zone->free_area[order], migratetype); page += 1 << order; diff --git a/mm/z3fold.c b/mm/z3fold.c index ed19d98c9dcd..e31cd9bd4ed5 100644 --- a/mm/z3fold.c +++ b/mm/z3fold.c @@ -41,6 +41,7 @@ #include <linux/workqueue.h> #include <linux/slab.h> #include <linux/spinlock.h> +#include <linux/wait.h> #include <linux/zpool.h> #include <linux/magic.h> @@ -145,6 +146,8 @@ struct z3fold_header { * @release_wq: workqueue for safe page release * @work: work_struct for safe page release * @inode: inode for z3fold pseudo filesystem + * @destroying: bool to stop migration once we start destruction + * @isolated: int to count the number of pages currently in isolation * * This structure is allocated at pool creation time and maintains metadata * pertaining to a particular z3fold pool. @@ -163,8 +166,11 @@ struct z3fold_pool { const struct zpool_ops *zpool_ops; struct workqueue_struct *compact_wq; struct workqueue_struct *release_wq; + struct wait_queue_head isolate_wait; struct work_struct work; struct inode *inode; + bool destroying; + int isolated; }; /* @@ -769,6 +775,7 @@ static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp, goto out_c; spin_lock_init(&pool->lock); spin_lock_init(&pool->stale_lock); + init_waitqueue_head(&pool->isolate_wait); pool->unbuddied = __alloc_percpu(sizeof(struct list_head)*NCHUNKS, 2); if (!pool->unbuddied) goto out_pool; @@ -808,6 +815,15 @@ out: return NULL; } +static bool pool_isolated_are_drained(struct z3fold_pool *pool) +{ + bool ret; + + spin_lock(&pool->lock); + ret = pool->isolated == 0; + spin_unlock(&pool->lock); + return ret; +} /** * z3fold_destroy_pool() - destroys an existing z3fold pool * @pool: the z3fold pool to be destroyed @@ -817,6 +833,22 @@ out: static void z3fold_destroy_pool(struct z3fold_pool *pool) { kmem_cache_destroy(pool->c_handle); + /* + * We set pool-> destroying under lock to ensure that + * z3fold_page_isolate() sees any changes to destroying. This way we + * avoid the need for any memory barriers. + */ + + spin_lock(&pool->lock); + pool->destroying = true; + spin_unlock(&pool->lock); + + /* + * We need to ensure that no pages are being migrated while we destroy + * these workqueues, as migration can queue work on either of the + * workqueues. + */ + wait_event(pool->isolate_wait, !pool_isolated_are_drained(pool)); /* * We need to destroy pool->compact_wq before pool->release_wq, @@ -1307,6 +1339,28 @@ static u64 z3fold_get_pool_size(struct z3fold_pool *pool) return atomic64_read(&pool->pages_nr); } +/* + * z3fold_dec_isolated() expects to be called while pool->lock is held. + */ +static void z3fold_dec_isolated(struct z3fold_pool *pool) +{ + assert_spin_locked(&pool->lock); + VM_BUG_ON(pool->isolated <= 0); + pool->isolated--; + + /* + * If we have no more isolated pages, we have to see if + * z3fold_destroy_pool() is waiting for a signal. + */ + if (pool->isolated == 0 && waitqueue_active(&pool->isolate_wait)) + wake_up_all(&pool->isolate_wait); +} + +static void z3fold_inc_isolated(struct z3fold_pool *pool) +{ + pool->isolated++; +} + static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode) { struct z3fold_header *zhdr; @@ -1333,6 +1387,33 @@ static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode) spin_lock(&pool->lock); if (!list_empty(&page->lru)) list_del(&page->lru); + /* + * We need to check for destruction while holding pool->lock, as + * otherwise destruction could see 0 isolated pages, and + * proceed. + */ + if (unlikely(pool->destroying)) { + spin_unlock(&pool->lock); + /* + * If this page isn't stale, somebody else holds a + * reference to it. Let't drop our refcount so that they + * can call the release logic. + */ + if (unlikely(kref_put(&zhdr->refcount, + release_z3fold_page_locked))) { + /* + * If we get here we have kref problems, so we + * should freak out. + */ + WARN(1, "Z3fold is experiencing kref problems\n"); + return false; + } + z3fold_page_unlock(zhdr); + return false; + } + + + z3fold_inc_isolated(pool); spin_unlock(&pool->lock); z3fold_page_unlock(zhdr); return true; @@ -1401,6 +1482,10 @@ static int z3fold_page_migrate(struct address_space *mapping, struct page *newpa queue_work_on(new_zhdr->cpu, pool->compact_wq, &new_zhdr->work); + spin_lock(&pool->lock); + z3fold_dec_isolated(pool); + spin_unlock(&pool->lock); + page_mapcount_reset(page); put_page(page); return 0; @@ -1420,10 +1505,14 @@ static void z3fold_page_putback(struct page *page) INIT_LIST_HEAD(&page->lru); if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) { atomic64_dec(&pool->pages_nr); + spin_lock(&pool->lock); + z3fold_dec_isolated(pool); + spin_unlock(&pool->lock); return; } spin_lock(&pool->lock); list_add(&page->lru, &pool->lru); + z3fold_dec_isolated(pool); spin_unlock(&pool->lock); z3fold_page_unlock(zhdr); } diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 57fbb7ced69f..08def3a0d200 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -54,6 +54,7 @@ #include <linux/mount.h> #include <linux/pseudo_fs.h> #include <linux/migrate.h> +#include <linux/wait.h> #include <linux/pagemap.h> #include <linux/fs.h> @@ -268,6 +269,10 @@ struct zs_pool { #ifdef CONFIG_COMPACTION struct inode *inode; struct work_struct free_work; + /* A wait queue for when migration races with async_free_zspage() */ + struct wait_queue_head migration_wait; + atomic_long_t isolated_pages; + bool destroying; #endif }; @@ -1862,6 +1867,31 @@ static void dec_zspage_isolation(struct zspage *zspage) zspage->isolated--; } +static void putback_zspage_deferred(struct zs_pool *pool, + struct size_class *class, + struct zspage *zspage) +{ + enum fullness_group fg; + + fg = putback_zspage(class, zspage); + if (fg == ZS_EMPTY) + schedule_work(&pool->free_work); + +} + +static inline void zs_pool_dec_isolated(struct zs_pool *pool) +{ + VM_BUG_ON(atomic_long_read(&pool->isolated_pages) <= 0); + atomic_long_dec(&pool->isolated_pages); + /* + * There's no possibility of racing, since wait_for_isolated_drain() + * checks the isolated count under &class->lock after enqueuing + * on migration_wait. + */ + if (atomic_long_read(&pool->isolated_pages) == 0 && pool->destroying) + wake_up_all(&pool->migration_wait); +} + static void replace_sub_page(struct size_class *class, struct zspage *zspage, struct page *newpage, struct page *oldpage) { @@ -1931,6 +1961,7 @@ static bool zs_page_isolate(struct page *page, isolate_mode_t mode) */ if (!list_empty(&zspage->list) && !is_zspage_isolated(zspage)) { get_zspage_mapping(zspage, &class_idx, &fullness); + atomic_long_inc(&pool->isolated_pages); remove_zspage(class, zspage, fullness); } @@ -2030,8 +2061,16 @@ static int zs_page_migrate(struct address_space *mapping, struct page *newpage, * Page migration is done so let's putback isolated zspage to * the list if @page is final isolated subpage in the zspage. */ - if (!is_zspage_isolated(zspage)) - putback_zspage(class, zspage); + if (!is_zspage_isolated(zspage)) { + /* + * We cannot race with zs_destroy_pool() here because we wait + * for isolation to hit zero before we start destroying. + * Also, we ensure that everyone can see pool->destroying before + * we start waiting. + */ + putback_zspage_deferred(pool, class, zspage); + zs_pool_dec_isolated(pool); + } reset_page(page); put_page(page); @@ -2077,13 +2116,12 @@ static void zs_page_putback(struct page *page) spin_lock(&class->lock); dec_zspage_isolation(zspage); if (!is_zspage_isolated(zspage)) { - fg = putback_zspage(class, zspage); /* * Due to page_lock, we cannot free zspage immediately * so let's defer. */ - if (fg == ZS_EMPTY) - schedule_work(&pool->free_work); + putback_zspage_deferred(pool, class, zspage); + zs_pool_dec_isolated(pool); } spin_unlock(&class->lock); } @@ -2107,8 +2145,36 @@ static int zs_register_migration(struct zs_pool *pool) return 0; } +static bool pool_isolated_are_drained(struct zs_pool *pool) +{ + return atomic_long_read(&pool->isolated_pages) == 0; +} + +/* Function for resolving migration */ +static void wait_for_isolated_drain(struct zs_pool *pool) +{ + + /* + * We're in the process of destroying the pool, so there are no + * active allocations. zs_page_isolate() fails for completely free + * zspages, so we need only wait for the zs_pool's isolated + * count to hit zero. + */ + wait_event(pool->migration_wait, + pool_isolated_are_drained(pool)); +} + static void zs_unregister_migration(struct zs_pool *pool) { + pool->destroying = true; + /* + * We need a memory barrier here to ensure global visibility of + * pool->destroying. Thus pool->isolated pages will either be 0 in which + * case we don't care, or it will be > 0 and pool->destroying will + * ensure that we wake up once isolation hits 0. + */ + smp_mb(); + wait_for_isolated_drain(pool); /* This can block */ flush_work(&pool->free_work); iput(pool->inode); } @@ -2346,6 +2412,8 @@ struct zs_pool *zs_create_pool(const char *name) if (!pool->name) goto err; + init_waitqueue_head(&pool->migration_wait); + if (create_cache(pool)) goto err; diff --git a/net/batman-adv/netlink.c b/net/batman-adv/netlink.c index 6f08fd122a8d..7e052d6f759b 100644 --- a/net/batman-adv/netlink.c +++ b/net/batman-adv/netlink.c @@ -164,7 +164,7 @@ batadv_netlink_get_ifindex(const struct nlmsghdr *nlh, int attrtype) { struct nlattr *attr = nlmsg_find_attr(nlh, GENL_HDRLEN, attrtype); - return attr ? nla_get_u32(attr) : 0; + return (attr && nla_len(attr) == sizeof(u32)) ? nla_get_u32(attr) : 0; } /** diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index c8177a89f52c..4096d8a74a2b 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c @@ -221,7 +221,7 @@ unsigned int ebt_do_table(struct sk_buff *skb, return NF_DROP; } - ADD_COUNTER(*(counter_base + i), 1, skb->len); + ADD_COUNTER(*(counter_base + i), skb->len, 1); /* these should only watch: not modify, nor tell us * what to do with the packet @@ -959,8 +959,8 @@ static void get_counters(const struct ebt_counter *oldcounters, continue; counter_base = COUNTER_BASE(oldcounters, nentries, cpu); for (i = 0; i < nentries; i++) - ADD_COUNTER(counters[i], counter_base[i].pcnt, - counter_base[i].bcnt); + ADD_COUNTER(counters[i], counter_base[i].bcnt, + counter_base[i].pcnt); } } @@ -1280,7 +1280,7 @@ static int do_update_counters(struct net *net, const char *name, /* we add to the counters of the first cpu */ for (i = 0; i < num_counters; i++) - ADD_COUNTER(t->private->counters[i], tmp[i].pcnt, tmp[i].bcnt); + ADD_COUNTER(t->private->counters[i], tmp[i].bcnt, tmp[i].pcnt); write_unlock_bh(&t->lock); ret = 0; diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 0b2df09b2554..78ae6e8c953d 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -1496,7 +1496,7 @@ static enum calc_target_result calc_target(struct ceph_osd_client *osdc, struct ceph_osds up, acting; bool force_resend = false; bool unpaused = false; - bool legacy_change; + bool legacy_change = false; bool split = false; bool sort_bitwise = ceph_osdmap_flag(osdc, CEPH_OSDMAP_SORTBITWISE); bool recovery_deletes = ceph_osdmap_flag(osdc, @@ -1584,15 +1584,14 @@ static enum calc_target_result calc_target(struct ceph_osd_client *osdc, t->osd = acting.primary; } - if (unpaused || legacy_change || force_resend || - (split && con && CEPH_HAVE_FEATURE(con->peer_features, - RESEND_ON_SPLIT))) + if (unpaused || legacy_change || force_resend || split) ct_res = CALC_TARGET_NEED_RESEND; else ct_res = CALC_TARGET_NO_ACTION; out: - dout("%s t %p -> ct_res %d osd %d\n", __func__, t, ct_res, t->osd); + dout("%s t %p -> %d%d%d%d ct_res %d osd%d\n", __func__, t, unpaused, + legacy_change, force_resend, split, ct_res, t->osd); return ct_res; } diff --git a/net/core/filter.c b/net/core/filter.c index 0c1059cdad3d..b91988f8b94e 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -8841,13 +8841,13 @@ sk_reuseport_is_valid_access(int off, int size, return size == size_default; /* Fields that allow narrowing */ - case offsetof(struct sk_reuseport_md, eth_protocol): + case bpf_ctx_range(struct sk_reuseport_md, eth_protocol): if (size < FIELD_SIZEOF(struct sk_buff, protocol)) return false; /* fall through */ - case offsetof(struct sk_reuseport_md, ip_protocol): - case offsetof(struct sk_reuseport_md, bind_inany): - case offsetof(struct sk_reuseport_md, len): + case bpf_ctx_range(struct sk_reuseport_md, ip_protocol): + case bpf_ctx_range(struct sk_reuseport_md, bind_inany): + case bpf_ctx_range(struct sk_reuseport_md, len): bpf_ctx_record_field_size(info, size_default); return bpf_ctx_narrow_access_ok(off, size, size_default); diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index 9741b593ea53..7c09d87d3269 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -142,8 +142,8 @@ int skb_flow_dissector_bpf_prog_detach(const union bpf_attr *attr) mutex_unlock(&flow_dissector_mutex); return -ENOENT; } - bpf_prog_put(attached); RCU_INIT_POINTER(net->flow_dissector_prog, NULL); + bpf_prog_put(attached); mutex_unlock(&flow_dissector_mutex); return 0; } diff --git a/net/core/sock.c b/net/core/sock.c index 6d08553f885c..545fac19a711 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -3287,16 +3287,17 @@ static __init int net_inuse_init(void) core_initcall(net_inuse_init); -static void assign_proto_idx(struct proto *prot) +static int assign_proto_idx(struct proto *prot) { prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR); if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) { pr_err("PROTO_INUSE_NR exhausted\n"); - return; + return -ENOSPC; } set_bit(prot->inuse_idx, proto_inuse_idx); + return 0; } static void release_proto_idx(struct proto *prot) @@ -3305,8 +3306,9 @@ static void release_proto_idx(struct proto *prot) clear_bit(prot->inuse_idx, proto_inuse_idx); } #else -static inline void assign_proto_idx(struct proto *prot) +static inline int assign_proto_idx(struct proto *prot) { + return 0; } static inline void release_proto_idx(struct proto *prot) @@ -3355,6 +3357,8 @@ static int req_prot_init(const struct proto *prot) int proto_register(struct proto *prot, int alloc_slab) { + int ret = -ENOBUFS; + if (alloc_slab) { prot->slab = kmem_cache_create_usercopy(prot->name, prot->obj_size, 0, @@ -3391,20 +3395,27 @@ int proto_register(struct proto *prot, int alloc_slab) } mutex_lock(&proto_list_mutex); + ret = assign_proto_idx(prot); + if (ret) { + mutex_unlock(&proto_list_mutex); + goto out_free_timewait_sock_slab_name; + } list_add(&prot->node, &proto_list); - assign_proto_idx(prot); mutex_unlock(&proto_list_mutex); - return 0; + return ret; out_free_timewait_sock_slab_name: - kfree(prot->twsk_prot->twsk_slab_name); + if (alloc_slab && prot->twsk_prot) + kfree(prot->twsk_prot->twsk_slab_name); out_free_request_sock_slab: - req_prot_cleanup(prot->rsk_prot); + if (alloc_slab) { + req_prot_cleanup(prot->rsk_prot); - kmem_cache_destroy(prot->slab); - prot->slab = NULL; + kmem_cache_destroy(prot->slab); + prot->slab = NULL; + } out: - return -ENOBUFS; + return ret; } EXPORT_SYMBOL(proto_register); diff --git a/net/core/stream.c b/net/core/stream.c index e94bb02a5629..4f1d4aa5fb38 100644 --- a/net/core/stream.c +++ b/net/core/stream.c @@ -120,7 +120,6 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p) int err = 0; long vm_wait = 0; long current_timeo = *timeo_p; - bool noblock = (*timeo_p ? false : true); DEFINE_WAIT_FUNC(wait, woken_wake_function); if (sk_stream_memory_free(sk)) @@ -133,11 +132,8 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p) if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) goto do_error; - if (!*timeo_p) { - if (noblock) - set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); - goto do_nonblock; - } + if (!*timeo_p) + goto do_eagain; if (signal_pending(current)) goto do_interrupted; sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); @@ -169,7 +165,13 @@ out: do_error: err = -EPIPE; goto out; -do_nonblock: +do_eagain: + /* Make sure that whenever EAGAIN is returned, EPOLLOUT event can + * be generated later. + * When TCP receives ACK packets that make room, tcp_check_space() + * only calls tcp_new_space() if SOCK_NOSPACE is set. + */ + set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); err = -EAGAIN; goto out; do_interrupted: diff --git a/net/ieee802154/socket.c b/net/ieee802154/socket.c index dacbd58e1799..badc5cfe4dc6 100644 --- a/net/ieee802154/socket.c +++ b/net/ieee802154/socket.c @@ -1092,7 +1092,7 @@ static struct packet_type ieee802154_packet_type = { static int __init af_ieee802154_init(void) { - int rc = -EINVAL; + int rc; rc = proto_register(&ieee802154_raw_prot, 1); if (rc) diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 2b2b3d291ab0..1ab2fb6bb37d 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -2145,7 +2145,7 @@ static int fn_trie_dump_leaf(struct key_vector *l, struct fib_table *tb, if (filter->dump_exceptions) { err = fib_dump_info_fnhe(skb, cb, tb->tb_id, fi, - &i_fa, s_fa); + &i_fa, s_fa, flags); if (err < 0) goto stop; } diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 1510e951f451..4298aae74e0e 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -582,7 +582,13 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info, if (!rt) goto out; - net = dev_net(rt->dst.dev); + + if (rt->dst.dev) + net = dev_net(rt->dst.dev); + else if (skb_in->dev) + net = dev_net(skb_in->dev); + else + goto out; /* * Find the original header. It is expected to be valid, of course. @@ -902,7 +908,7 @@ static bool icmp_redirect(struct sk_buff *skb) return false; } - icmp_socket_deliver(skb, icmp_hdr(skb)->un.gateway); + icmp_socket_deliver(skb, ntohl(icmp_hdr(skb)->un.gateway)); return true; } diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 180f6896b98b..480d0b22db1a 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -1475,7 +1475,7 @@ EXPORT_SYMBOL(__ip_mc_inc_group); void ip_mc_inc_group(struct in_device *in_dev, __be32 addr) { - __ip_mc_inc_group(in_dev, addr, MCAST_EXCLUDE); + __ip_mc_inc_group(in_dev, addr, GFP_KERNEL); } EXPORT_SYMBOL(ip_mc_inc_group); @@ -2197,7 +2197,7 @@ static int __ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr, iml->sflist = NULL; iml->sfmode = mode; rcu_assign_pointer(inet->mc_list, iml); - __ip_mc_inc_group(in_dev, addr, mode); + ____ip_mc_inc_group(in_dev, addr, mode, GFP_KERNEL); err = 0; done: return err; diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 517300d587a7..b6a6f18c3dd1 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -2728,7 +2728,8 @@ EXPORT_SYMBOL_GPL(ip_route_output_flow); /* called with rcu_read_lock held */ static int rt_fill_info(struct net *net, __be32 dst, __be32 src, struct rtable *rt, u32 table_id, struct flowi4 *fl4, - struct sk_buff *skb, u32 portid, u32 seq) + struct sk_buff *skb, u32 portid, u32 seq, + unsigned int flags) { struct rtmsg *r; struct nlmsghdr *nlh; @@ -2736,7 +2737,7 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src, u32 error; u32 metrics[RTAX_MAX]; - nlh = nlmsg_put(skb, portid, seq, RTM_NEWROUTE, sizeof(*r), 0); + nlh = nlmsg_put(skb, portid, seq, RTM_NEWROUTE, sizeof(*r), flags); if (!nlh) return -EMSGSIZE; @@ -2860,7 +2861,7 @@ nla_put_failure: static int fnhe_dump_bucket(struct net *net, struct sk_buff *skb, struct netlink_callback *cb, u32 table_id, struct fnhe_hash_bucket *bucket, int genid, - int *fa_index, int fa_start) + int *fa_index, int fa_start, unsigned int flags) { int i; @@ -2891,7 +2892,7 @@ static int fnhe_dump_bucket(struct net *net, struct sk_buff *skb, err = rt_fill_info(net, fnhe->fnhe_daddr, 0, rt, table_id, NULL, skb, NETLINK_CB(cb->skb).portid, - cb->nlh->nlmsg_seq); + cb->nlh->nlmsg_seq, flags); if (err) return err; next: @@ -2904,7 +2905,7 @@ next: int fib_dump_info_fnhe(struct sk_buff *skb, struct netlink_callback *cb, u32 table_id, struct fib_info *fi, - int *fa_index, int fa_start) + int *fa_index, int fa_start, unsigned int flags) { struct net *net = sock_net(cb->skb->sk); int nhsel, genid = fnhe_genid(net); @@ -2922,7 +2923,8 @@ int fib_dump_info_fnhe(struct sk_buff *skb, struct netlink_callback *cb, err = 0; if (bucket) err = fnhe_dump_bucket(net, skb, cb, table_id, bucket, - genid, fa_index, fa_start); + genid, fa_index, fa_start, + flags); rcu_read_unlock(); if (err) return err; @@ -3183,7 +3185,8 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, fl4.flowi4_tos, res.fi, 0); } else { err = rt_fill_info(net, dst, src, rt, table_id, &fl4, skb, - NETLINK_CB(in_skb).portid, nlh->nlmsg_seq); + NETLINK_CB(in_skb).portid, + nlh->nlmsg_seq, 0); } if (err < 0) goto errout_rcu; diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index dc73888c7859..6a576ff92c39 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -478,7 +478,7 @@ static struct inet6_dev *ipv6_find_idev(struct net_device *dev) if (!idev) { idev = ipv6_add_dev(dev); if (IS_ERR(idev)) - return NULL; + return idev; } if (dev->flags&IFF_UP) @@ -1045,7 +1045,8 @@ ipv6_add_addr(struct inet6_dev *idev, struct ifa6_config *cfg, int err = 0; if (addr_type == IPV6_ADDR_ANY || - addr_type & IPV6_ADDR_MULTICAST || + (addr_type & IPV6_ADDR_MULTICAST && + !(cfg->ifa_flags & IFA_F_MCAUTOJOIN)) || (!(idev->dev->flags & IFF_LOOPBACK) && !netif_is_l3_master(idev->dev) && addr_type & IPV6_ADDR_LOOPBACK)) @@ -2465,8 +2466,8 @@ static struct inet6_dev *addrconf_add_dev(struct net_device *dev) ASSERT_RTNL(); idev = ipv6_find_idev(dev); - if (!idev) - return ERR_PTR(-ENOBUFS); + if (IS_ERR(idev)) + return idev; if (idev->cnf.disable_ipv6) return ERR_PTR(-EACCES); @@ -3158,7 +3159,7 @@ static void init_loopback(struct net_device *dev) ASSERT_RTNL(); idev = ipv6_find_idev(dev); - if (!idev) { + if (IS_ERR(idev)) { pr_debug("%s: add_dev failed\n", __func__); return; } @@ -3373,7 +3374,7 @@ static void addrconf_sit_config(struct net_device *dev) */ idev = ipv6_find_idev(dev); - if (!idev) { + if (IS_ERR(idev)) { pr_debug("%s: add_dev failed\n", __func__); return; } @@ -3398,7 +3399,7 @@ static void addrconf_gre_config(struct net_device *dev) ASSERT_RTNL(); idev = ipv6_find_idev(dev); - if (!idev) { + if (IS_ERR(idev)) { pr_debug("%s: add_dev failed\n", __func__); return; } @@ -4772,8 +4773,8 @@ inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, IFA_F_MCAUTOJOIN | IFA_F_OPTIMISTIC; idev = ipv6_find_idev(dev); - if (!idev) - return -ENOBUFS; + if (IS_ERR(idev)) + return PTR_ERR(idev); if (!ipv6_allow_optimistic_dad(net, idev)) cfg.ifa_flags &= ~IFA_F_OPTIMISTIC; diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index ed56b0c6fe19..7c6edb7c5f10 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -1549,6 +1549,11 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev, if (!is_valid_ether_addr(mac)) return -EINVAL; + if (params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER) && + sdata->vif.type == NL80211_IFTYPE_STATION && + !sdata->u.mgd.associated) + return -EINVAL; + sta = sta_info_alloc(sdata, mac, GFP_KERNEL); if (!sta) return -ENOMEM; @@ -1556,10 +1561,6 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev, if (params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) sta->sta.tdls = true; - if (sta->sta.tdls && sdata->vif.type == NL80211_IFTYPE_STATION && - !sdata->u.mgd.associated) - return -EINVAL; - err = sta_apply_parameters(local, sta, params); if (err) { sta_info_free(local, sta); diff --git a/net/mpls/mpls_iptunnel.c b/net/mpls/mpls_iptunnel.c index d25e91d7bdc1..44b675016393 100644 --- a/net/mpls/mpls_iptunnel.c +++ b/net/mpls/mpls_iptunnel.c @@ -133,12 +133,12 @@ static int mpls_xmit(struct sk_buff *skb) mpls_stats_inc_outucastpkts(out_dev, skb); if (rt) { - if (rt->rt_gw_family == AF_INET) - err = neigh_xmit(NEIGH_ARP_TABLE, out_dev, &rt->rt_gw4, - skb); - else if (rt->rt_gw_family == AF_INET6) + if (rt->rt_gw_family == AF_INET6) err = neigh_xmit(NEIGH_ND_TABLE, out_dev, &rt->rt_gw6, skb); + else + err = neigh_xmit(NEIGH_ARP_TABLE, out_dev, &rt->rt_gw4, + skb); } else if (rt6) { if (ipv6_addr_v4mapped(&rt6->rt6i_gateway)) { /* 6PE (RFC 4798) */ diff --git a/net/ncsi/ncsi-cmd.c b/net/ncsi/ncsi-cmd.c index 5c3fad8cba57..0187e65176c0 100644 --- a/net/ncsi/ncsi-cmd.c +++ b/net/ncsi/ncsi-cmd.c @@ -54,7 +54,7 @@ static void ncsi_cmd_build_header(struct ncsi_pkt_hdr *h, checksum = ncsi_calculate_checksum((unsigned char *)h, sizeof(*h) + nca->payload); pchecksum = (__be32 *)((void *)h + sizeof(struct ncsi_pkt_hdr) + - nca->payload); + ALIGN(nca->payload, 4)); *pchecksum = htonl(checksum); } @@ -309,14 +309,21 @@ static struct ncsi_request *ncsi_alloc_command(struct ncsi_cmd_arg *nca) int ncsi_xmit_cmd(struct ncsi_cmd_arg *nca) { + struct ncsi_cmd_handler *nch = NULL; struct ncsi_request *nr; + unsigned char type; struct ethhdr *eh; - struct ncsi_cmd_handler *nch = NULL; int i, ret; + /* Use OEM generic handler for Netlink request */ + if (nca->req_flags == NCSI_REQ_FLAG_NETLINK_DRIVEN) + type = NCSI_PKT_CMD_OEM; + else + type = nca->type; + /* Search for the handler */ for (i = 0; i < ARRAY_SIZE(ncsi_cmd_handlers); i++) { - if (ncsi_cmd_handlers[i].type == nca->type) { + if (ncsi_cmd_handlers[i].type == type) { if (ncsi_cmd_handlers[i].handler) nch = &ncsi_cmd_handlers[i]; else diff --git a/net/ncsi/ncsi-rsp.c b/net/ncsi/ncsi-rsp.c index 5254004f2b42..dacabff9c467 100644 --- a/net/ncsi/ncsi-rsp.c +++ b/net/ncsi/ncsi-rsp.c @@ -47,7 +47,8 @@ static int ncsi_validate_rsp_pkt(struct ncsi_request *nr, if (ntohs(h->code) != NCSI_PKT_RSP_C_COMPLETED || ntohs(h->reason) != NCSI_PKT_RSP_R_NO_ERROR) { netdev_dbg(nr->ndp->ndev.dev, - "NCSI: non zero response/reason code\n"); + "NCSI: non zero response/reason code %04xh, %04xh\n", + ntohs(h->code), ntohs(h->reason)); return -EPERM; } @@ -55,7 +56,7 @@ static int ncsi_validate_rsp_pkt(struct ncsi_request *nr, * sender doesn't support checksum according to NCSI * specification. */ - pchecksum = (__be32 *)((void *)(h + 1) + payload - 4); + pchecksum = (__be32 *)((void *)(h + 1) + ALIGN(payload, 4) - 4); if (ntohl(*pchecksum) == 0) return 0; @@ -63,7 +64,9 @@ static int ncsi_validate_rsp_pkt(struct ncsi_request *nr, sizeof(*h) + payload - 4); if (*pchecksum != htonl(checksum)) { - netdev_dbg(nr->ndp->ndev.dev, "NCSI: checksum mismatched\n"); + netdev_dbg(nr->ndp->ndev.dev, + "NCSI: checksum mismatched; recd: %08x calc: %08x\n", + *pchecksum, htonl(checksum)); return -EINVAL; } diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c index 060a4ed46d5e..01705ad74a9a 100644 --- a/net/netfilter/nft_flow_offload.c +++ b/net/netfilter/nft_flow_offload.c @@ -149,6 +149,11 @@ static int nft_flow_offload_validate(const struct nft_ctx *ctx, return nft_chain_validate_hooks(ctx->chain, hook_mask); } +static const struct nla_policy nft_flow_offload_policy[NFTA_FLOW_MAX + 1] = { + [NFTA_FLOW_TABLE_NAME] = { .type = NLA_STRING, + .len = NFT_NAME_MAXLEN - 1 }, +}; + static int nft_flow_offload_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]) @@ -207,6 +212,7 @@ static const struct nft_expr_ops nft_flow_offload_ops = { static struct nft_expr_type nft_flow_offload_type __read_mostly = { .name = "flow_offload", .ops = &nft_flow_offload_ops, + .policy = nft_flow_offload_policy, .maxattr = NFTA_FLOW_MAX, .owner = THIS_MODULE, }; diff --git a/net/netfilter/xt_nfacct.c b/net/netfilter/xt_nfacct.c index d0ab1adf5bff..5aab6df74e0f 100644 --- a/net/netfilter/xt_nfacct.c +++ b/net/netfilter/xt_nfacct.c @@ -54,25 +54,39 @@ nfacct_mt_destroy(const struct xt_mtdtor_param *par) nfnl_acct_put(info->nfacct); } -static struct xt_match nfacct_mt_reg __read_mostly = { - .name = "nfacct", - .family = NFPROTO_UNSPEC, - .checkentry = nfacct_mt_checkentry, - .match = nfacct_mt, - .destroy = nfacct_mt_destroy, - .matchsize = sizeof(struct xt_nfacct_match_info), - .usersize = offsetof(struct xt_nfacct_match_info, nfacct), - .me = THIS_MODULE, +static struct xt_match nfacct_mt_reg[] __read_mostly = { + { + .name = "nfacct", + .revision = 0, + .family = NFPROTO_UNSPEC, + .checkentry = nfacct_mt_checkentry, + .match = nfacct_mt, + .destroy = nfacct_mt_destroy, + .matchsize = sizeof(struct xt_nfacct_match_info), + .usersize = offsetof(struct xt_nfacct_match_info, nfacct), + .me = THIS_MODULE, + }, + { + .name = "nfacct", + .revision = 1, + .family = NFPROTO_UNSPEC, + .checkentry = nfacct_mt_checkentry, + .match = nfacct_mt, + .destroy = nfacct_mt_destroy, + .matchsize = sizeof(struct xt_nfacct_match_info_v1), + .usersize = offsetof(struct xt_nfacct_match_info_v1, nfacct), + .me = THIS_MODULE, + }, }; static int __init nfacct_mt_init(void) { - return xt_register_match(&nfacct_mt_reg); + return xt_register_matches(nfacct_mt_reg, ARRAY_SIZE(nfacct_mt_reg)); } static void __exit nfacct_mt_exit(void) { - xt_unregister_match(&nfacct_mt_reg); + xt_unregister_matches(nfacct_mt_reg, ARRAY_SIZE(nfacct_mt_reg)); } module_init(nfacct_mt_init); diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c index 848c6eb55064..d8da6477d6be 100644 --- a/net/openvswitch/conntrack.c +++ b/net/openvswitch/conntrack.c @@ -67,6 +67,7 @@ struct ovs_conntrack_info { struct md_mark mark; struct md_labels labels; char timeout[CTNL_TIMEOUT_NAME_MAX]; + struct nf_ct_timeout *nf_ct_timeout; #if IS_ENABLED(CONFIG_NF_NAT) struct nf_nat_range2 range; /* Only present for SRC NAT and DST NAT. */ #endif @@ -697,6 +698,14 @@ static bool skb_nfct_cached(struct net *net, if (help && rcu_access_pointer(help->helper) != info->helper) return false; } + if (info->nf_ct_timeout) { + struct nf_conn_timeout *timeout_ext; + + timeout_ext = nf_ct_timeout_find(ct); + if (!timeout_ext || info->nf_ct_timeout != + rcu_dereference(timeout_ext->timeout)) + return false; + } /* Force conntrack entry direction to the current packet? */ if (info->force && CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) { /* Delete the conntrack entry if confirmed, else just release @@ -1565,7 +1574,7 @@ static int parse_ct(const struct nlattr *attr, struct ovs_conntrack_info *info, case OVS_CT_ATTR_TIMEOUT: memcpy(info->timeout, nla_data(a), nla_len(a)); if (!memchr(info->timeout, '\0', nla_len(a))) { - OVS_NLERR(log, "Invalid conntrack helper"); + OVS_NLERR(log, "Invalid conntrack timeout"); return -EINVAL; } break; @@ -1657,6 +1666,10 @@ int ovs_ct_copy_action(struct net *net, const struct nlattr *attr, ct_info.timeout)) pr_info_ratelimited("Failed to associated timeout " "policy `%s'\n", ct_info.timeout); + else + ct_info.nf_ct_timeout = rcu_dereference( + nf_ct_timeout_find(ct_info.ct)->timeout); + } if (helper) { diff --git a/net/rds/ib.c b/net/rds/ib.c index ec05d91aa9a2..45acab2de0cf 100644 --- a/net/rds/ib.c +++ b/net/rds/ib.c @@ -291,7 +291,7 @@ static int rds_ib_conn_info_visitor(struct rds_connection *conn, void *buffer) { struct rds_info_rdma_connection *iinfo = buffer; - struct rds_ib_connection *ic; + struct rds_ib_connection *ic = conn->c_transport_data; /* We will only ever look at IB transports */ if (conn->c_trans != &rds_ib_transport) @@ -301,15 +301,16 @@ static int rds_ib_conn_info_visitor(struct rds_connection *conn, iinfo->src_addr = conn->c_laddr.s6_addr32[3]; iinfo->dst_addr = conn->c_faddr.s6_addr32[3]; - iinfo->tos = conn->c_tos; + if (ic) { + iinfo->tos = conn->c_tos; + iinfo->sl = ic->i_sl; + } memset(&iinfo->src_gid, 0, sizeof(iinfo->src_gid)); memset(&iinfo->dst_gid, 0, sizeof(iinfo->dst_gid)); if (rds_conn_state(conn) == RDS_CONN_UP) { struct rds_ib_device *rds_ibdev; - ic = conn->c_transport_data; - rdma_read_gids(ic->i_cm_id, (union ib_gid *)&iinfo->src_gid, (union ib_gid *)&iinfo->dst_gid); @@ -329,7 +330,7 @@ static int rds6_ib_conn_info_visitor(struct rds_connection *conn, void *buffer) { struct rds6_info_rdma_connection *iinfo6 = buffer; - struct rds_ib_connection *ic; + struct rds_ib_connection *ic = conn->c_transport_data; /* We will only ever look at IB transports */ if (conn->c_trans != &rds_ib_transport) @@ -337,6 +338,10 @@ static int rds6_ib_conn_info_visitor(struct rds_connection *conn, iinfo6->src_addr = conn->c_laddr; iinfo6->dst_addr = conn->c_faddr; + if (ic) { + iinfo6->tos = conn->c_tos; + iinfo6->sl = ic->i_sl; + } memset(&iinfo6->src_gid, 0, sizeof(iinfo6->src_gid)); memset(&iinfo6->dst_gid, 0, sizeof(iinfo6->dst_gid)); @@ -344,7 +349,6 @@ static int rds6_ib_conn_info_visitor(struct rds_connection *conn, if (rds_conn_state(conn) == RDS_CONN_UP) { struct rds_ib_device *rds_ibdev; - ic = conn->c_transport_data; rdma_read_gids(ic->i_cm_id, (union ib_gid *)&iinfo6->src_gid, (union ib_gid *)&iinfo6->dst_gid); rds_ibdev = ic->rds_ibdev; diff --git a/net/rds/ib.h b/net/rds/ib.h index 303c6ee8bdb7..f2b558e8b5ea 100644 --- a/net/rds/ib.h +++ b/net/rds/ib.h @@ -220,6 +220,7 @@ struct rds_ib_connection { /* Send/Recv vectors */ int i_scq_vector; int i_rcq_vector; + u8 i_sl; }; /* This assumes that atomic_t is at least 32 bits */ diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c index fddaa09f7b0d..233f1368162b 100644 --- a/net/rds/ib_cm.c +++ b/net/rds/ib_cm.c @@ -152,6 +152,9 @@ void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_even RDS_PROTOCOL_MINOR(conn->c_version), ic->i_flowctl ? ", flow control" : ""); + /* receive sl from the peer */ + ic->i_sl = ic->i_cm_id->route.path_rec->sl; + atomic_set(&ic->i_cq_quiesce, 0); /* Init rings and fill recv. this needs to wait until protocol diff --git a/net/rds/rdma_transport.c b/net/rds/rdma_transport.c index 9986d6065c4d..5f741e51b4ba 100644 --- a/net/rds/rdma_transport.c +++ b/net/rds/rdma_transport.c @@ -43,6 +43,9 @@ static struct rdma_cm_id *rds_rdma_listen_id; static struct rdma_cm_id *rds6_rdma_listen_id; #endif +/* Per IB specification 7.7.3, service level is a 4-bit field. */ +#define TOS_TO_SL(tos) ((tos) & 0xF) + static int rds_rdma_cm_event_handler_cmn(struct rdma_cm_id *cm_id, struct rdma_cm_event *event, bool isv6) @@ -97,10 +100,13 @@ static int rds_rdma_cm_event_handler_cmn(struct rdma_cm_id *cm_id, struct rds_ib_connection *ibic; ibic = conn->c_transport_data; - if (ibic && ibic->i_cm_id == cm_id) + if (ibic && ibic->i_cm_id == cm_id) { + cm_id->route.path_rec[0].sl = + TOS_TO_SL(conn->c_tos); ret = trans->cm_initiate_connect(cm_id, isv6); - else + } else { rds_conn_drop(conn); + } } break; diff --git a/net/smc/smc_tx.c b/net/smc/smc_tx.c index f0de323d15d6..6c8f09c1ce51 100644 --- a/net/smc/smc_tx.c +++ b/net/smc/smc_tx.c @@ -76,13 +76,11 @@ static int smc_tx_wait(struct smc_sock *smc, int flags) DEFINE_WAIT_FUNC(wait, woken_wake_function); struct smc_connection *conn = &smc->conn; struct sock *sk = &smc->sk; - bool noblock; long timeo; int rc = 0; /* similar to sk_stream_wait_memory */ timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); - noblock = timeo ? false : true; add_wait_queue(sk_sleep(sk), &wait); while (1) { sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); @@ -97,8 +95,8 @@ static int smc_tx_wait(struct smc_sock *smc, int flags) break; } if (!timeo) { - if (noblock) - set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); + /* ensure EPOLLOUT is subsequently generated */ + set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); rc = -EAGAIN; break; } diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index d8679b6027e9..a07b516e503a 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -1970,6 +1970,7 @@ call_bind(struct rpc_task *task) static void call_bind_status(struct rpc_task *task) { + struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt; int status = -EIO; if (rpc_task_transmitted(task)) { @@ -1977,14 +1978,15 @@ call_bind_status(struct rpc_task *task) return; } - if (task->tk_status >= 0) { - dprint_status(task); + dprint_status(task); + trace_rpc_bind_status(task); + if (task->tk_status >= 0) + goto out_next; + if (xprt_bound(xprt)) { task->tk_status = 0; - task->tk_action = call_connect; - return; + goto out_next; } - trace_rpc_bind_status(task); switch (task->tk_status) { case -ENOMEM: dprintk("RPC: %5u rpcbind out of memory\n", task->tk_pid); @@ -2003,6 +2005,9 @@ call_bind_status(struct rpc_task *task) task->tk_rebind_retry--; rpc_delay(task, 3*HZ); goto retry_timeout; + case -ENOBUFS: + rpc_delay(task, HZ >> 2); + goto retry_timeout; case -EAGAIN: goto retry_timeout; case -ETIMEDOUT: @@ -2026,7 +2031,6 @@ call_bind_status(struct rpc_task *task) case -ENETDOWN: case -EHOSTUNREACH: case -ENETUNREACH: - case -ENOBUFS: case -EPIPE: dprintk("RPC: %5u remote rpcbind unreachable: %d\n", task->tk_pid, task->tk_status); @@ -2043,7 +2047,9 @@ call_bind_status(struct rpc_task *task) rpc_call_rpcerror(task, status); return; - +out_next: + task->tk_action = call_connect; + return; retry_timeout: task->tk_status = 0; task->tk_action = call_bind; @@ -2090,6 +2096,7 @@ call_connect(struct rpc_task *task) static void call_connect_status(struct rpc_task *task) { + struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt; struct rpc_clnt *clnt = task->tk_client; int status = task->tk_status; @@ -2099,8 +2106,17 @@ call_connect_status(struct rpc_task *task) } dprint_status(task); - trace_rpc_connect_status(task); + + if (task->tk_status == 0) { + clnt->cl_stats->netreconn++; + goto out_next; + } + if (xprt_connected(xprt)) { + task->tk_status = 0; + goto out_next; + } + task->tk_status = 0; switch (status) { case -ECONNREFUSED: @@ -2117,8 +2133,6 @@ call_connect_status(struct rpc_task *task) case -ENETDOWN: case -ENETUNREACH: case -EHOSTUNREACH: - case -EADDRINUSE: - case -ENOBUFS: case -EPIPE: xprt_conditional_disconnect(task->tk_rqstp->rq_xprt, task->tk_rqstp->rq_connect_cookie); @@ -2127,17 +2141,20 @@ call_connect_status(struct rpc_task *task) /* retry with existing socket, after a delay */ rpc_delay(task, 3*HZ); /* fall through */ + case -EADDRINUSE: case -ENOTCONN: case -EAGAIN: case -ETIMEDOUT: goto out_retry; - case 0: - clnt->cl_stats->netreconn++; - task->tk_action = call_transmit; - return; + case -ENOBUFS: + rpc_delay(task, HZ >> 2); + goto out_retry; } rpc_call_rpcerror(task, status); return; +out_next: + task->tk_action = call_transmit; + return; out_retry: /* Check for timeouts before looping back to call_bind */ task->tk_action = call_bind; @@ -2365,7 +2382,7 @@ call_status(struct rpc_task *task) case -ECONNABORTED: case -ENOTCONN: rpc_force_rebind(clnt); - /* fall through */ + break; case -EADDRINUSE: rpc_delay(task, 3*HZ); /* fall through */ diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 783748dc5e6f..2e71f5455c6c 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -1408,13 +1408,6 @@ xprt_request_transmit(struct rpc_rqst *req, struct rpc_task *snd_task) status = -EBADMSG; goto out_dequeue; } - if (task->tk_ops->rpc_call_prepare_transmit) { - task->tk_ops->rpc_call_prepare_transmit(task, - task->tk_calldata); - status = task->tk_status; - if (status < 0) - goto out_dequeue; - } if (RPC_SIGNALLED(task)) { status = -ERESTARTSYS; goto out_dequeue; diff --git a/net/wireless/reg.c b/net/wireless/reg.c index 646107af9f41..5311d0ae2454 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -2788,7 +2788,7 @@ static void reg_process_pending_hints(void) /* When last_request->processed becomes true this will be rescheduled */ if (lr && !lr->processed) { - reg_process_hint(lr); + pr_debug("Pending regulatory request, waiting for it to be processed...\n"); return; } diff --git a/net/wireless/util.c b/net/wireless/util.c index c99939067bb0..92cb2cbb179b 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -242,25 +242,30 @@ int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev, switch (params->cipher) { case WLAN_CIPHER_SUITE_TKIP: + /* Extended Key ID can only be used with CCMP/GCMP ciphers */ + if ((pairwise && key_idx) || + params->mode != NL80211_KEY_RX_TX) + return -EINVAL; + break; case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_CCMP_256: case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: - /* IEEE802.11-2016 allows only 0 and - when using Extended Key - * ID - 1 as index for pairwise keys. + /* IEEE802.11-2016 allows only 0 and - when supporting + * Extended Key ID - 1 as index for pairwise keys. * @NL80211_KEY_NO_TX is only allowed for pairwise keys when * the driver supports Extended Key ID. * @NL80211_KEY_SET_TX can't be set when installing and * validating a key. */ - if (params->mode == NL80211_KEY_NO_TX) { - if (!wiphy_ext_feature_isset(&rdev->wiphy, - NL80211_EXT_FEATURE_EXT_KEY_ID)) - return -EINVAL; - else if (!pairwise || key_idx < 0 || key_idx > 1) + if ((params->mode == NL80211_KEY_NO_TX && !pairwise) || + params->mode == NL80211_KEY_SET_TX) + return -EINVAL; + if (wiphy_ext_feature_isset(&rdev->wiphy, + NL80211_EXT_FEATURE_EXT_KEY_ID)) { + if (pairwise && (key_idx < 0 || key_idx > 1)) return -EINVAL; - } else if ((pairwise && key_idx) || - params->mode == NL80211_KEY_SET_TX) { + } else if (pairwise && key_idx) { return -EINVAL; } break; diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c index a0607969f8c0..0e0062127124 100644 --- a/net/xdp/xdp_umem.c +++ b/net/xdp/xdp_umem.c @@ -375,7 +375,7 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr) umem->pages = kcalloc(umem->npgs, sizeof(*umem->pages), GFP_KERNEL); if (!umem->pages) { err = -ENOMEM; - goto out_account; + goto out_pin; } for (i = 0; i < umem->npgs; i++) @@ -383,6 +383,8 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr) return 0; +out_pin: + xdp_umem_unpin_pages(umem); out_account: xdp_umem_unaccount_pages(umem); return err; diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 8ca637a72697..ec94f5795ea4 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -3269,7 +3269,7 @@ decode_session4(struct sk_buff *skb, struct flowi *fl, bool reverse) struct flowi4 *fl4 = &fl->u.ip4; int oif = 0; - if (skb_dst(skb)) + if (skb_dst(skb) && skb_dst(skb)->dev) oif = skb_dst(skb)->dev->ifindex; memset(fl4, 0, sizeof(struct flowi4)); @@ -3387,7 +3387,7 @@ decode_session6(struct sk_buff *skb, struct flowi *fl, bool reverse) nexthdr = nh[nhoff]; - if (skb_dst(skb)) + if (skb_dst(skb) && skb_dst(skb)->dev) oif = skb_dst(skb)->dev->ifindex; memset(fl6, 0, sizeof(struct flowi6)); diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c index 7737b2670064..6d9592f0ae1d 100644 --- a/sound/core/seq/seq_clientmgr.c +++ b/sound/core/seq/seq_clientmgr.c @@ -1835,8 +1835,7 @@ static int snd_seq_ioctl_get_client_pool(struct snd_seq_client *client, if (cptr->type == USER_CLIENT) { info->input_pool = cptr->data.user.fifo_pool_size; info->input_free = info->input_pool; - if (cptr->data.user.fifo) - info->input_free = snd_seq_unused_cells(cptr->data.user.fifo->pool); + info->input_free = snd_seq_fifo_unused_cells(cptr->data.user.fifo); } else { info->input_pool = 0; info->input_free = 0; diff --git a/sound/core/seq/seq_fifo.c b/sound/core/seq/seq_fifo.c index ea69261f269a..eaaa8b5830bb 100644 --- a/sound/core/seq/seq_fifo.c +++ b/sound/core/seq/seq_fifo.c @@ -263,3 +263,20 @@ int snd_seq_fifo_resize(struct snd_seq_fifo *f, int poolsize) return 0; } + +/* get the number of unused cells safely */ +int snd_seq_fifo_unused_cells(struct snd_seq_fifo *f) +{ + unsigned long flags; + int cells; + + if (!f) + return 0; + + snd_use_lock_use(&f->use_lock); + spin_lock_irqsave(&f->lock, flags); + cells = snd_seq_unused_cells(f->pool); + spin_unlock_irqrestore(&f->lock, flags); + snd_use_lock_free(&f->use_lock); + return cells; +} diff --git a/sound/core/seq/seq_fifo.h b/sound/core/seq/seq_fifo.h index edc68743943d..b56a7b897c9c 100644 --- a/sound/core/seq/seq_fifo.h +++ b/sound/core/seq/seq_fifo.h @@ -53,5 +53,7 @@ int snd_seq_fifo_poll_wait(struct snd_seq_fifo *f, struct file *file, poll_table /* resize pool in fifo */ int snd_seq_fifo_resize(struct snd_seq_fifo *f, int poolsize); +/* get the number of unused cells safely */ +int snd_seq_fifo_unused_cells(struct snd_seq_fifo *f); #endif diff --git a/sound/firewire/oxfw/oxfw-pcm.c b/sound/firewire/oxfw/oxfw-pcm.c index 9ea39348cdf5..7c6d1c277d4d 100644 --- a/sound/firewire/oxfw/oxfw-pcm.c +++ b/sound/firewire/oxfw/oxfw-pcm.c @@ -248,7 +248,7 @@ static int pcm_playback_hw_params(struct snd_pcm_substream *substream, unsigned int channels = params_channels(hw_params); mutex_lock(&oxfw->mutex); - err = snd_oxfw_stream_reserve_duplex(oxfw, &oxfw->tx_stream, + err = snd_oxfw_stream_reserve_duplex(oxfw, &oxfw->rx_stream, rate, channels); if (err >= 0) ++oxfw->substreams_count; diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c index 0d51823d7270..6d1fb7c11f17 100644 --- a/sound/pci/hda/patch_ca0132.c +++ b/sound/pci/hda/patch_ca0132.c @@ -1175,6 +1175,7 @@ static const struct snd_pci_quirk ca0132_quirks[] = { SND_PCI_QUIRK(0x1028, 0x0708, "Alienware 15 R2 2016", QUIRK_ALIENWARE), SND_PCI_QUIRK(0x1102, 0x0010, "Sound Blaster Z", QUIRK_SBZ), SND_PCI_QUIRK(0x1102, 0x0023, "Sound Blaster Z", QUIRK_SBZ), + SND_PCI_QUIRK(0x1102, 0x0027, "Sound Blaster Z", QUIRK_SBZ), SND_PCI_QUIRK(0x1102, 0x0033, "Sound Blaster ZxR", QUIRK_SBZ), SND_PCI_QUIRK(0x1458, 0xA016, "Recon3Di", QUIRK_R3DI), SND_PCI_QUIRK(0x1458, 0xA026, "Gigabyte G1.Sniper Z97", QUIRK_R3DI), diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index 14298ef45b21..968d3caab6ac 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c @@ -611,18 +611,20 @@ static void cxt_fixup_hp_gate_mic_jack(struct hda_codec *codec, /* update LED status via GPIO */ static void cxt_update_gpio_led(struct hda_codec *codec, unsigned int mask, - bool enabled) + bool led_on) { struct conexant_spec *spec = codec->spec; unsigned int oldval = spec->gpio_led; if (spec->mute_led_polarity) - enabled = !enabled; + led_on = !led_on; - if (enabled) - spec->gpio_led &= ~mask; - else + if (led_on) spec->gpio_led |= mask; + else + spec->gpio_led &= ~mask; + codec_dbg(codec, "mask:%d enabled:%d gpio_led:%d\n", + mask, led_on, spec->gpio_led); if (spec->gpio_led != oldval) snd_hda_codec_write(codec, 0x01, 0, AC_VERB_SET_GPIO_DATA, spec->gpio_led); @@ -633,8 +635,8 @@ static void cxt_fixup_gpio_mute_hook(void *private_data, int enabled) { struct hda_codec *codec = private_data; struct conexant_spec *spec = codec->spec; - - cxt_update_gpio_led(codec, spec->gpio_mute_led_mask, enabled); + /* muted -> LED on */ + cxt_update_gpio_led(codec, spec->gpio_mute_led_mask, !enabled); } /* turn on/off mic-mute LED via GPIO per capture hook */ @@ -656,7 +658,6 @@ static void cxt_fixup_mute_led_gpio(struct hda_codec *codec, { 0x01, AC_VERB_SET_GPIO_DIRECTION, 0x03 }, {} }; - codec_info(codec, "action: %d gpio_led: %d\n", action, spec->gpio_led); if (action == HDA_FIXUP_ACT_PRE_PROBE) { spec->gen.vmaster_mute.hook = cxt_fixup_gpio_mute_hook; diff --git a/sound/usb/line6/pcm.c b/sound/usb/line6/pcm.c index 2c03e0f6bf72..f70211e6b174 100644 --- a/sound/usb/line6/pcm.c +++ b/sound/usb/line6/pcm.c @@ -550,6 +550,15 @@ int line6_init_pcm(struct usb_line6 *line6, line6pcm->volume_monitor = 255; line6pcm->line6 = line6; + spin_lock_init(&line6pcm->out.lock); + spin_lock_init(&line6pcm->in.lock); + line6pcm->impulse_period = LINE6_IMPULSE_DEFAULT_PERIOD; + + line6->line6pcm = line6pcm; + + pcm->private_data = line6pcm; + pcm->private_free = line6_cleanup_pcm; + line6pcm->max_packet_size_in = usb_maxpacket(line6->usbdev, usb_rcvisocpipe(line6->usbdev, ep_read), 0); @@ -562,15 +571,6 @@ int line6_init_pcm(struct usb_line6 *line6, return -EINVAL; } - spin_lock_init(&line6pcm->out.lock); - spin_lock_init(&line6pcm->in.lock); - line6pcm->impulse_period = LINE6_IMPULSE_DEFAULT_PERIOD; - - line6->line6pcm = line6pcm; - - pcm->private_data = line6pcm; - pcm->private_free = line6_cleanup_pcm; - err = line6_create_audio_out_urbs(line6pcm); if (err < 0) return err; diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index b5927c3d5bc0..eceab19766db 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c @@ -739,7 +739,6 @@ static int uac_mixer_unit_get_channels(struct mixer_build *state, struct uac_mixer_unit_descriptor *desc) { int mu_channels; - void *c; if (desc->bLength < sizeof(*desc)) return -EINVAL; @@ -762,13 +761,6 @@ static int uac_mixer_unit_get_channels(struct mixer_build *state, break; } - if (!mu_channels) - return 0; - - c = uac_mixer_unit_bmControls(desc, state->mixer->protocol); - if (c - (void *)desc + (mu_channels - 1) / 8 >= desc->bLength) - return 0; /* no bmControls -> skip */ - return mu_channels; } @@ -2009,6 +2001,31 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid, * Mixer Unit */ +/* check whether the given in/out overflows bmMixerControls matrix */ +static bool mixer_bitmap_overflow(struct uac_mixer_unit_descriptor *desc, + int protocol, int num_ins, int num_outs) +{ + u8 *hdr = (u8 *)desc; + u8 *c = uac_mixer_unit_bmControls(desc, protocol); + size_t rest; /* remaining bytes after bmMixerControls */ + + switch (protocol) { + case UAC_VERSION_1: + default: + rest = 1; /* iMixer */ + break; + case UAC_VERSION_2: + rest = 2; /* bmControls + iMixer */ + break; + case UAC_VERSION_3: + rest = 6; /* bmControls + wMixerDescrStr */ + break; + } + + /* overflow? */ + return c + (num_ins * num_outs + 7) / 8 + rest > hdr + hdr[0]; +} + /* * build a mixer unit control * @@ -2137,6 +2154,9 @@ static int parse_audio_mixer_unit(struct mixer_build *state, int unitid, if (err < 0) return err; num_ins += iterm.channels; + if (mixer_bitmap_overflow(desc, state->mixer->protocol, + num_ins, num_outs)) + break; for (; ich < num_ins; ich++) { int och, ich_has_controls = 0; diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c index 199fa157a411..27dcb3743690 100644 --- a/sound/usb/mixer_quirks.c +++ b/sound/usb/mixer_quirks.c @@ -1155,17 +1155,17 @@ void snd_emuusb_set_samplerate(struct snd_usb_audio *chip, { struct usb_mixer_interface *mixer; struct usb_mixer_elem_info *cval; - int unitid = 12; /* SamleRate ExtensionUnit ID */ + int unitid = 12; /* SampleRate ExtensionUnit ID */ list_for_each_entry(mixer, &chip->mixer_list, list) { - cval = mixer_elem_list_to_info(mixer->id_elems[unitid]); - if (cval) { + if (mixer->id_elems[unitid]) { + cval = mixer_elem_list_to_info(mixer->id_elems[unitid]); snd_usb_mixer_set_ctl_value(cval, UAC_SET_CUR, cval->control << 8, samplerate_id); snd_usb_mixer_notify_id(mixer, unitid); + break; } - break; } } diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c index 75b96929f76c..e4bbf79de956 100644 --- a/sound/usb/pcm.c +++ b/sound/usb/pcm.c @@ -339,6 +339,7 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs, ep = 0x81; ifnum = 2; goto add_sync_ep_from_ifnum; + case USB_ID(0x1397, 0x0001): /* Behringer UFX1604 */ case USB_ID(0x1397, 0x0002): /* Behringer UFX1204 */ ep = 0x81; ifnum = 1; diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c index 66f04a4846a5..43fdbbfe41bb 100644 --- a/tools/bpf/bpftool/prog.c +++ b/tools/bpf/bpftool/prog.c @@ -363,7 +363,9 @@ static int do_show(int argc, char **argv) if (fd < 0) return -1; - return show_prog(fd); + err = show_prog(fd); + close(fd); + return err; } if (argc) diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c index f5597503c771..e9ef4ca6a655 100644 --- a/tools/hv/hv_kvp_daemon.c +++ b/tools/hv/hv_kvp_daemon.c @@ -809,7 +809,7 @@ kvp_get_ip_info(int family, char *if_name, int op, int sn_offset = 0; int error = 0; char *buffer; - struct hv_kvp_ipaddr_value *ip_buffer; + struct hv_kvp_ipaddr_value *ip_buffer = NULL; char cidr_mask[5]; /* /xyz */ int weight; int i; diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 29001f944db7..d69c541e2039 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -34,6 +34,9 @@ TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test BPF_OBJ_FILES = $(patsubst %.c,%.o, $(notdir $(wildcard progs/*.c))) TEST_GEN_FILES = $(BPF_OBJ_FILES) +BTF_C_FILES = $(wildcard progs/btf_dump_test_case_*.c) +TEST_FILES = $(BTF_C_FILES) + # Also test sub-register code-gen if LLVM has eBPF v3 processor support which # contains both ALU32 and JMP32 instructions. SUBREG_CODEGEN := $(shell echo "int cal(int a) { return a > 0; }" | \ @@ -68,7 +71,8 @@ TEST_PROGS := test_kmod.sh \ TEST_PROGS_EXTENDED := with_addr.sh \ with_tunnels.sh \ tcp_client.py \ - tcp_server.py + tcp_server.py \ + test_xdp_vlan.sh # Compile but not part of 'make run_tests' TEST_GEN_PROGS_EXTENDED = test_libbpf_open test_sock_addr test_skb_cgroup_id_user \ diff --git a/tools/testing/selftests/bpf/config b/tools/testing/selftests/bpf/config index f7a0744db31e..5dc109f4c097 100644 --- a/tools/testing/selftests/bpf/config +++ b/tools/testing/selftests/bpf/config @@ -34,3 +34,4 @@ CONFIG_NET_MPLS_GSO=m CONFIG_MPLS_ROUTING=m CONFIG_MPLS_IPTUNNEL=m CONFIG_IPV6_SIT=m +CONFIG_BPF_JIT=y diff --git a/tools/testing/selftests/bpf/test_btf_dump.c b/tools/testing/selftests/bpf/test_btf_dump.c index 8f850823d35f..6e75dd3cb14f 100644 --- a/tools/testing/selftests/bpf/test_btf_dump.c +++ b/tools/testing/selftests/bpf/test_btf_dump.c @@ -97,6 +97,13 @@ int test_btf_dump_case(int n, struct btf_dump_test_case *test_case) } snprintf(test_file, sizeof(test_file), "progs/%s.c", test_case->name); + if (access(test_file, R_OK) == -1) + /* + * When the test is run with O=, kselftest copies TEST_FILES + * without preserving the directory structure. + */ + snprintf(test_file, sizeof(test_file), "%s.c", + test_case->name); /* * Diff test output and expected test output, contained between * START-EXPECTED-OUTPUT and END-EXPECTED-OUTPUT lines in test case. diff --git a/tools/testing/selftests/bpf/test_cgroup_storage.c b/tools/testing/selftests/bpf/test_cgroup_storage.c index 2fc4625c1a15..655729004391 100644 --- a/tools/testing/selftests/bpf/test_cgroup_storage.c +++ b/tools/testing/selftests/bpf/test_cgroup_storage.c @@ -20,9 +20,9 @@ int main(int argc, char **argv) BPF_MOV64_IMM(BPF_REG_2, 0), /* flags, not used */ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), + BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0), BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 0x1), - BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0), + BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0), BPF_LD_MAP_FD(BPF_REG_1, 0), /* map fd */ BPF_MOV64_IMM(BPF_REG_2, 0), /* flags, not used */ @@ -30,7 +30,7 @@ int main(int argc, char **argv) BPF_FUNC_get_local_storage), BPF_MOV64_IMM(BPF_REG_1, 1), BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_1, 0), - BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), + BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0), BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x1), BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), BPF_EXIT_INSN(), diff --git a/tools/testing/selftests/bpf/test_sock.c b/tools/testing/selftests/bpf/test_sock.c index fb679ac3d4b0..0e6652733462 100644 --- a/tools/testing/selftests/bpf/test_sock.c +++ b/tools/testing/selftests/bpf/test_sock.c @@ -13,6 +13,7 @@ #include <bpf/bpf.h> #include "cgroup_helpers.h" +#include "bpf_endian.h" #include "bpf_rlimit.h" #include "bpf_util.h" @@ -232,7 +233,8 @@ static struct sock_test tests[] = { /* if (ip == expected && port == expected) */ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6, offsetof(struct bpf_sock, src_ip6[3])), - BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x01000000, 4), + BPF_JMP_IMM(BPF_JNE, BPF_REG_7, + __bpf_constant_ntohl(0x00000001), 4), BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6, offsetof(struct bpf_sock, src_port)), BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x2001, 2), @@ -261,7 +263,8 @@ static struct sock_test tests[] = { /* if (ip == expected && port == expected) */ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6, offsetof(struct bpf_sock, src_ip4)), - BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x0100007F, 4), + BPF_JMP_IMM(BPF_JNE, BPF_REG_7, + __bpf_constant_ntohl(0x7F000001), 4), BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6, offsetof(struct bpf_sock, src_port)), BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x1002, 2), diff --git a/tools/testing/selftests/kvm/include/evmcs.h b/tools/testing/selftests/kvm/include/evmcs.h index 4059014d93ea..4912d23844bc 100644 --- a/tools/testing/selftests/kvm/include/evmcs.h +++ b/tools/testing/selftests/kvm/include/evmcs.h @@ -220,6 +220,8 @@ struct hv_enlightened_vmcs { struct hv_enlightened_vmcs *current_evmcs; struct hv_vp_assist_page *current_vp_assist; +int vcpu_enable_evmcs(struct kvm_vm *vm, int vcpu_id); + static inline int enable_vp_assist(uint64_t vp_assist_pa, void *vp_assist) { u64 val = (vp_assist_pa & HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_MASK) | diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c index 6cb34a0fa200..0a5e487dbc50 100644 --- a/tools/testing/selftests/kvm/lib/x86_64/processor.c +++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c @@ -1060,9 +1060,11 @@ struct kvm_x86_state *vcpu_save_state(struct kvm_vm *vm, uint32_t vcpuid) TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_XSAVE, r: %i", r); - r = ioctl(vcpu->fd, KVM_GET_XCRS, &state->xcrs); - TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_XCRS, r: %i", - r); + if (kvm_check_cap(KVM_CAP_XCRS)) { + r = ioctl(vcpu->fd, KVM_GET_XCRS, &state->xcrs); + TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_XCRS, r: %i", + r); + } r = ioctl(vcpu->fd, KVM_GET_SREGS, &state->sregs); TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_SREGS, r: %i", @@ -1103,9 +1105,11 @@ void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_x86_state *s TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_XSAVE, r: %i", r); - r = ioctl(vcpu->fd, KVM_SET_XCRS, &state->xcrs); - TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_XCRS, r: %i", - r); + if (kvm_check_cap(KVM_CAP_XCRS)) { + r = ioctl(vcpu->fd, KVM_SET_XCRS, &state->xcrs); + TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_XCRS, r: %i", + r); + } r = ioctl(vcpu->fd, KVM_SET_SREGS, &state->sregs); TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_SREGS, r: %i", diff --git a/tools/testing/selftests/kvm/lib/x86_64/vmx.c b/tools/testing/selftests/kvm/lib/x86_64/vmx.c index 204f847bd065..9cef0455b819 100644 --- a/tools/testing/selftests/kvm/lib/x86_64/vmx.c +++ b/tools/testing/selftests/kvm/lib/x86_64/vmx.c @@ -12,6 +12,26 @@ bool enable_evmcs; +int vcpu_enable_evmcs(struct kvm_vm *vm, int vcpu_id) +{ + uint16_t evmcs_ver; + + struct kvm_enable_cap enable_evmcs_cap = { + .cap = KVM_CAP_HYPERV_ENLIGHTENED_VMCS, + .args[0] = (unsigned long)&evmcs_ver + }; + + vcpu_ioctl(vm, vcpu_id, KVM_ENABLE_CAP, &enable_evmcs_cap); + + /* KVM should return supported EVMCS version range */ + TEST_ASSERT(((evmcs_ver >> 8) >= (evmcs_ver & 0xff)) && + (evmcs_ver & 0xff) > 0, + "Incorrect EVMCS version range: %x:%x\n", + evmcs_ver & 0xff, evmcs_ver >> 8); + + return evmcs_ver; +} + /* Allocate memory regions for nested VMX tests. * * Input Args: diff --git a/tools/testing/selftests/kvm/x86_64/evmcs_test.c b/tools/testing/selftests/kvm/x86_64/evmcs_test.c index f95c08343b48..92915e6408e7 100644 --- a/tools/testing/selftests/kvm/x86_64/evmcs_test.c +++ b/tools/testing/selftests/kvm/x86_64/evmcs_test.c @@ -79,11 +79,6 @@ int main(int argc, char *argv[]) struct kvm_x86_state *state; struct ucall uc; int stage; - uint16_t evmcs_ver; - struct kvm_enable_cap enable_evmcs_cap = { - .cap = KVM_CAP_HYPERV_ENLIGHTENED_VMCS, - .args[0] = (unsigned long)&evmcs_ver - }; /* Create VM */ vm = vm_create_default(VCPU_ID, 0, guest_code); @@ -96,13 +91,7 @@ int main(int argc, char *argv[]) exit(KSFT_SKIP); } - vcpu_ioctl(vm, VCPU_ID, KVM_ENABLE_CAP, &enable_evmcs_cap); - - /* KVM should return supported EVMCS version range */ - TEST_ASSERT(((evmcs_ver >> 8) >= (evmcs_ver & 0xff)) && - (evmcs_ver & 0xff) > 0, - "Incorrect EVMCS version range: %x:%x\n", - evmcs_ver & 0xff, evmcs_ver >> 8); + vcpu_enable_evmcs(vm, VCPU_ID); run = vcpu_state(vm, VCPU_ID); @@ -146,7 +135,7 @@ int main(int argc, char *argv[]) kvm_vm_restart(vm, O_RDWR); vm_vcpu_add(vm, VCPU_ID); vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); - vcpu_ioctl(vm, VCPU_ID, KVM_ENABLE_CAP, &enable_evmcs_cap); + vcpu_enable_evmcs(vm, VCPU_ID); vcpu_load_state(vm, VCPU_ID, state); run = vcpu_state(vm, VCPU_ID); free(state); diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c b/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c index f72b3043db0e..ee59831fbc98 100644 --- a/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c +++ b/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c @@ -18,6 +18,7 @@ #include "test_util.h" #include "kvm_util.h" #include "processor.h" +#include "vmx.h" #define VCPU_ID 0 @@ -106,12 +107,7 @@ int main(int argc, char *argv[]) { struct kvm_vm *vm; int rv; - uint16_t evmcs_ver; struct kvm_cpuid2 *hv_cpuid_entries; - struct kvm_enable_cap enable_evmcs_cap = { - .cap = KVM_CAP_HYPERV_ENLIGHTENED_VMCS, - .args[0] = (unsigned long)&evmcs_ver - }; /* Tell stdout not to buffer its content */ setbuf(stdout, NULL); @@ -136,14 +132,14 @@ int main(int argc, char *argv[]) free(hv_cpuid_entries); - rv = _vcpu_ioctl(vm, VCPU_ID, KVM_ENABLE_CAP, &enable_evmcs_cap); - - if (rv) { + if (!kvm_check_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS)) { fprintf(stderr, "Enlightened VMCS is unsupported, skip related test\n"); goto vm_free; } + vcpu_enable_evmcs(vm, VCPU_ID); + hv_cpuid_entries = kvm_get_supported_hv_cpuid(vm); if (!hv_cpuid_entries) return 1; diff --git a/tools/testing/selftests/kvm/x86_64/platform_info_test.c b/tools/testing/selftests/kvm/x86_64/platform_info_test.c index 40050e44ec0a..f9334bd3cce9 100644 --- a/tools/testing/selftests/kvm/x86_64/platform_info_test.c +++ b/tools/testing/selftests/kvm/x86_64/platform_info_test.c @@ -99,8 +99,8 @@ int main(int argc, char *argv[]) msr_platform_info = vcpu_get_msr(vm, VCPU_ID, MSR_PLATFORM_INFO); vcpu_set_msr(vm, VCPU_ID, MSR_PLATFORM_INFO, msr_platform_info | MSR_PLATFORM_INFO_MAX_TURBO_RATIO); - test_msr_platform_info_disabled(vm); test_msr_platform_info_enabled(vm); + test_msr_platform_info_disabled(vm); vcpu_set_msr(vm, VCPU_ID, MSR_PLATFORM_INFO, msr_platform_info); kvm_vm_free(vm); diff --git a/tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c b/tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c index ed7218d166da..853e370e8a39 100644 --- a/tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c +++ b/tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c @@ -25,24 +25,17 @@ #define VMCS12_REVISION 0x11e57ed0 #define VCPU_ID 5 +bool have_evmcs; + void test_nested_state(struct kvm_vm *vm, struct kvm_nested_state *state) { - volatile struct kvm_run *run; - vcpu_nested_state_set(vm, VCPU_ID, state, false); - run = vcpu_state(vm, VCPU_ID); - vcpu_run(vm, VCPU_ID); - TEST_ASSERT(run->exit_reason == KVM_EXIT_SHUTDOWN, - "Got exit_reason other than KVM_EXIT_SHUTDOWN: %u (%s),\n", - run->exit_reason, - exit_reason_str(run->exit_reason)); } void test_nested_state_expect_errno(struct kvm_vm *vm, struct kvm_nested_state *state, int expected_errno) { - volatile struct kvm_run *run; int rv; rv = vcpu_nested_state_set(vm, VCPU_ID, state, true); @@ -50,12 +43,6 @@ void test_nested_state_expect_errno(struct kvm_vm *vm, "Expected %s (%d) from vcpu_nested_state_set but got rv: %i errno: %s (%d)", strerror(expected_errno), expected_errno, rv, strerror(errno), errno); - run = vcpu_state(vm, VCPU_ID); - vcpu_run(vm, VCPU_ID); - TEST_ASSERT(run->exit_reason == KVM_EXIT_SHUTDOWN, - "Got exit_reason other than KVM_EXIT_SHUTDOWN: %u (%s),\n", - run->exit_reason, - exit_reason_str(run->exit_reason)); } void test_nested_state_expect_einval(struct kvm_vm *vm, @@ -90,8 +77,9 @@ void set_default_vmx_state(struct kvm_nested_state *state, int size) { memset(state, 0, size); state->flags = KVM_STATE_NESTED_GUEST_MODE | - KVM_STATE_NESTED_RUN_PENDING | - KVM_STATE_NESTED_EVMCS; + KVM_STATE_NESTED_RUN_PENDING; + if (have_evmcs) + state->flags |= KVM_STATE_NESTED_EVMCS; state->format = 0; state->size = size; state->hdr.vmx.vmxon_pa = 0x1000; @@ -141,13 +129,19 @@ void test_vmx_nested_state(struct kvm_vm *vm) /* * Setting vmxon_pa == -1ull and vmcs_pa == -1ull exits early without * setting the nested state but flags other than eVMCS must be clear. + * The eVMCS flag can be set if the enlightened VMCS capability has + * been enabled. */ set_default_vmx_state(state, state_sz); state->hdr.vmx.vmxon_pa = -1ull; state->hdr.vmx.vmcs12_pa = -1ull; test_nested_state_expect_einval(vm, state); - state->flags = KVM_STATE_NESTED_EVMCS; + state->flags &= KVM_STATE_NESTED_EVMCS; + if (have_evmcs) { + test_nested_state_expect_einval(vm, state); + vcpu_enable_evmcs(vm, VCPU_ID); + } test_nested_state(vm, state); /* It is invalid to have vmxon_pa == -1ull and SMM flags non-zero. */ @@ -232,6 +226,8 @@ int main(int argc, char *argv[]) struct kvm_nested_state state; struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1); + have_evmcs = kvm_check_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS); + if (!kvm_check_cap(KVM_CAP_NESTED_STATE)) { printf("KVM_CAP_NESTED_STATE not available, skipping test\n"); exit(KSFT_SKIP); diff --git a/virt/kvm/arm/mmio.c b/virt/kvm/arm/mmio.c index a8a6a0c883f1..6af5c91337f2 100644 --- a/virt/kvm/arm/mmio.c +++ b/virt/kvm/arm/mmio.c @@ -86,6 +86,12 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run) unsigned int len; int mask; + /* Detect an already handled MMIO return */ + if (unlikely(!vcpu->mmio_needed)) + return 0; + + vcpu->mmio_needed = 0; + if (!run->mmio.is_write) { len = run->mmio.len; if (len > sizeof(unsigned long)) @@ -188,6 +194,7 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, run->mmio.is_write = is_write; run->mmio.phys_addr = fault_ipa; run->mmio.len = len; + vcpu->mmio_needed = 1; if (!ret) { /* We handled the access successfully in the kernel. */ diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c index bdbc297d06fb..e621b5d45b27 100644 --- a/virt/kvm/arm/vgic/vgic-init.c +++ b/virt/kvm/arm/vgic/vgic-init.c @@ -8,6 +8,7 @@ #include <linux/cpu.h> #include <linux/kvm_host.h> #include <kvm/arm_vgic.h> +#include <asm/kvm_emulate.h> #include <asm/kvm_mmu.h> #include "vgic.h" @@ -164,12 +165,18 @@ static int kvm_vgic_dist_init(struct kvm *kvm, unsigned int nr_spis) irq->vcpu = NULL; irq->target_vcpu = vcpu0; kref_init(&irq->refcount); - if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2) { + switch (dist->vgic_model) { + case KVM_DEV_TYPE_ARM_VGIC_V2: irq->targets = 0; irq->group = 0; - } else { + break; + case KVM_DEV_TYPE_ARM_VGIC_V3: irq->mpidr = 0; irq->group = 1; + break; + default: + kfree(dist->spis); + return -EINVAL; } } return 0; @@ -209,7 +216,6 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu) irq->intid = i; irq->vcpu = NULL; irq->target_vcpu = vcpu; - irq->targets = 1U << vcpu->vcpu_id; kref_init(&irq->refcount); if (vgic_irq_is_sgi(i)) { /* SGIs */ @@ -219,11 +225,6 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu) /* PPIs */ irq->config = VGIC_CONFIG_LEVEL; } - - if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) - irq->group = 1; - else - irq->group = 0; } if (!irqchip_in_kernel(vcpu->kvm)) @@ -286,10 +287,19 @@ int vgic_init(struct kvm *kvm) for (i = 0; i < VGIC_NR_PRIVATE_IRQS; i++) { struct vgic_irq *irq = &vgic_cpu->private_irqs[i]; - if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) + switch (dist->vgic_model) { + case KVM_DEV_TYPE_ARM_VGIC_V3: irq->group = 1; - else + irq->mpidr = kvm_vcpu_get_mpidr_aff(vcpu); + break; + case KVM_DEV_TYPE_ARM_VGIC_V2: irq->group = 0; + irq->targets = 1U << idx; + break; + default: + ret = -EINVAL; + goto out; + } } } |