diff options
453 files changed, 4096 insertions, 2231 deletions
diff --git a/Documentation/devicetree/bindings/input/gpio-keys.txt b/Documentation/devicetree/bindings/input/gpio-keys.txt index 996ce84352cb..7cccc49b6bea 100644 --- a/Documentation/devicetree/bindings/input/gpio-keys.txt +++ b/Documentation/devicetree/bindings/input/gpio-keys.txt @@ -1,4 +1,4 @@ -Device-Tree bindings for input/gpio_keys.c keyboard driver +Device-Tree bindings for input/keyboard/gpio_keys.c keyboard driver Required properties: - compatible = "gpio-keys"; diff --git a/Documentation/devicetree/bindings/net/macb.txt b/Documentation/devicetree/bindings/net/macb.txt index 457d5ae16f23..3e17ac1d5d58 100644 --- a/Documentation/devicetree/bindings/net/macb.txt +++ b/Documentation/devicetree/bindings/net/macb.txt @@ -10,6 +10,7 @@ Required properties: Use "cdns,pc302-gem" for Picochip picoXcell pc302 and later devices based on the Cadence GEM, or the generic form: "cdns,gem". Use "atmel,sama5d2-gem" for the GEM IP (10/100) available on Atmel sama5d2 SoCs. + Use "atmel,sama5d3-macb" for the 10/100Mbit IP available on Atmel sama5d3 SoCs. Use "atmel,sama5d3-gem" for the Gigabit IP available on Atmel sama5d3 SoCs. Use "atmel,sama5d4-gem" for the GEM IP (10/100) available on Atmel sama5d4 SoCs. Use "cdns,zynq-gem" Xilinx Zynq-7xxx SoC. diff --git a/Documentation/media/uapi/dvb/video_function_calls.rst b/Documentation/media/uapi/dvb/video_function_calls.rst index 3f4f6c9ffad7..a4222b6cd2d3 100644 --- a/Documentation/media/uapi/dvb/video_function_calls.rst +++ b/Documentation/media/uapi/dvb/video_function_calls.rst @@ -33,4 +33,3 @@ Video Function Calls video-clear-buffer video-set-streamtype video-set-format - video-set-attributes diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt index c664064f76fb..647f94128a85 100644 --- a/Documentation/virtual/kvm/api.txt +++ b/Documentation/virtual/kvm/api.txt @@ -4510,7 +4510,8 @@ Do not enable KVM_FEATURE_PV_UNHALT if you disable HLT exits. Architectures: s390 Parameters: none Returns: 0 on success, -EINVAL if hpage module parameter was not set - or cmma is enabled + or cmma is enabled, or the VM has the KVM_VM_S390_UCONTROL + flag set With this capability the KVM support for memory backing with 1m pages through hugetlbfs can be enabled for a VM. After the capability is @@ -4521,6 +4522,15 @@ hpage module parameter is not set to 1, -EINVAL is returned. While it is generally possible to create a huge page backed VM without this capability, the VM will not be able to run. +7.14 KVM_CAP_MSR_PLATFORM_INFO + +Architectures: x86 +Parameters: args[0] whether feature should be enabled or not + +With this capability, a guest may read the MSR_PLATFORM_INFO MSR. Otherwise, +a #GP would be raised when the guest tries to access. Currently, this +capability does not enable write permissions of this MSR for the guest. + 8. Other capabilities. ---------------------- diff --git a/MAINTAINERS b/MAINTAINERS index 075b61bfb1c9..a3743d50f83a 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -9732,13 +9732,6 @@ Q: http://patchwork.linuxtv.org/project/linux-media/list/ S: Maintained F: drivers/media/dvb-frontends/mn88473* -PCI DRIVER FOR MOBIVEIL PCIE IP -M: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in> -L: linux-pci@vger.kernel.org -S: Supported -F: Documentation/devicetree/bindings/pci/mobiveil-pcie.txt -F: drivers/pci/controller/pcie-mobiveil.c - MODULE SUPPORT M: Jessica Yu <jeyu@kernel.org> T: git git://git.kernel.org/pub/scm/linux/kernel/git/jeyu/linux.git modules-next @@ -10965,7 +10958,7 @@ M: Willy Tarreau <willy@haproxy.com> M: Ksenija Stanojevic <ksenija.stanojevic@gmail.com> S: Odd Fixes F: Documentation/auxdisplay/lcd-panel-cgram.txt -F: drivers/misc/panel.c +F: drivers/auxdisplay/panel.c PARALLEL PORT SUBSYSTEM M: Sudip Mukherjee <sudipm.mukherjee@gmail.com> @@ -11153,6 +11146,13 @@ F: include/uapi/linux/switchtec_ioctl.h F: include/linux/switchtec.h F: drivers/ntb/hw/mscc/ +PCI DRIVER FOR MOBIVEIL PCIE IP +M: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in> +L: linux-pci@vger.kernel.org +S: Supported +F: Documentation/devicetree/bindings/pci/mobiveil-pcie.txt +F: drivers/pci/controller/pcie-mobiveil.c + PCI DRIVER FOR MVEBU (Marvell Armada 370 and Armada XP SOC support) M: Thomas Petazzoni <thomas.petazzoni@free-electrons.com> M: Jason Cooper <jason@lakedaemon.net> @@ -11219,8 +11219,14 @@ F: tools/pci/ PCI ENHANCED ERROR HANDLING (EEH) FOR POWERPC M: Russell Currey <ruscur@russell.cc> +M: Sam Bobroff <sbobroff@linux.ibm.com> +M: Oliver O'Halloran <oohall@gmail.com> L: linuxppc-dev@lists.ozlabs.org S: Supported +F: Documentation/PCI/pci-error-recovery.txt +F: drivers/pci/pcie/aer.c +F: drivers/pci/pcie/dpc.c +F: drivers/pci/pcie/err.c F: Documentation/powerpc/eeh-pci-error-recovery.txt F: arch/powerpc/kernel/eeh*.c F: arch/powerpc/platforms/*/eeh*.c @@ -12276,6 +12282,7 @@ F: Documentation/networking/rds.txt RDT - RESOURCE ALLOCATION M: Fenghua Yu <fenghua.yu@intel.com> +M: Reinette Chatre <reinette.chatre@intel.com> L: linux-kernel@vger.kernel.org S: Supported F: arch/x86/kernel/cpu/intel_rdt* @@ -13465,9 +13472,8 @@ F: drivers/i2c/busses/i2c-synquacer.c F: Documentation/devicetree/bindings/i2c/i2c-synquacer.txt SOCIONEXT UNIPHIER SOUND DRIVER -M: Katsuhiro Suzuki <suzuki.katsuhiro@socionext.com> L: alsa-devel@alsa-project.org (moderated for non-subscribers) -S: Maintained +S: Orphan F: sound/soc/uniphier/ SOEKRIS NET48XX LED SUPPORT @@ -15929,6 +15935,7 @@ F: net/x25/ X86 ARCHITECTURE (32-BIT AND 64-BIT) M: Thomas Gleixner <tglx@linutronix.de> M: Ingo Molnar <mingo@redhat.com> +M: Borislav Petkov <bp@alien8.de> R: "H. Peter Anvin" <hpa@zytor.com> M: x86@kernel.org L: linux-kernel@vger.kernel.org @@ -15957,6 +15964,15 @@ M: Borislav Petkov <bp@alien8.de> S: Maintained F: arch/x86/kernel/cpu/microcode/* +X86 MM +M: Dave Hansen <dave.hansen@linux.intel.com> +M: Andy Lutomirski <luto@kernel.org> +M: Peter Zijlstra <peterz@infradead.org> +L: linux-kernel@vger.kernel.org +T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/mm +S: Maintained +F: arch/x86/mm/ + X86 PLATFORM DRIVERS M: Darren Hart <dvhart@infradead.org> M: Andy Shevchenko <andy@infradead.org> @@ -2,7 +2,7 @@ VERSION = 4 PATCHLEVEL = 19 SUBLEVEL = 0 -EXTRAVERSION = -rc4 +EXTRAVERSION = -rc6 NAME = Merciless Moray # *DOCUMENTATION* @@ -299,19 +299,7 @@ KERNELRELEASE = $(shell cat include/config/kernel.release 2> /dev/null) KERNELVERSION = $(VERSION)$(if $(PATCHLEVEL),.$(PATCHLEVEL)$(if $(SUBLEVEL),.$(SUBLEVEL)))$(EXTRAVERSION) export VERSION PATCHLEVEL SUBLEVEL KERNELRELEASE KERNELVERSION -# SUBARCH tells the usermode build what the underlying arch is. That is set -# first, and if a usermode build is happening, the "ARCH=um" on the command -# line overrides the setting of ARCH below. If a native build is happening, -# then ARCH is assigned, getting whatever value it gets normally, and -# SUBARCH is subsequently ignored. - -SUBARCH := $(shell uname -m | sed -e s/i.86/x86/ -e s/x86_64/x86/ \ - -e s/sun4u/sparc64/ \ - -e s/arm.*/arm/ -e s/sa110/arm/ \ - -e s/s390x/s390/ -e s/parisc64/parisc/ \ - -e s/ppc.*/powerpc/ -e s/mips.*/mips/ \ - -e s/sh[234].*/sh/ -e s/aarch64.*/arm64/ \ - -e s/riscv.*/riscv/) +include scripts/subarch.include # Cross compiling and selecting different set of gcc/bin-utils # --------------------------------------------------------------------------- diff --git a/arch/arm/boot/dts/sama5d3_emac.dtsi b/arch/arm/boot/dts/sama5d3_emac.dtsi index 7cb235ef0fb6..6e9e1c2f9def 100644 --- a/arch/arm/boot/dts/sama5d3_emac.dtsi +++ b/arch/arm/boot/dts/sama5d3_emac.dtsi @@ -41,7 +41,7 @@ }; macb1: ethernet@f802c000 { - compatible = "cdns,at91sam9260-macb", "cdns,macb"; + compatible = "atmel,sama5d3-macb", "cdns,at91sam9260-macb", "cdns,macb"; reg = <0xf802c000 0x100>; interrupts = <35 IRQ_TYPE_LEVEL_HIGH 3>; pinctrl-names = "default"; diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index 13a688fc8cd0..2fdc865ca374 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -1051,7 +1051,6 @@ static inline void vmemmap_remove_mapping(unsigned long start, return hash__vmemmap_remove_mapping(start, page_size); } #endif -struct page *realmode_pfn_to_page(unsigned long pfn); static inline pte_t pmd_pte(pmd_t pmd) { diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h index ab3a4fba38e3..3d4b88cb8599 100644 --- a/arch/powerpc/include/asm/iommu.h +++ b/arch/powerpc/include/asm/iommu.h @@ -220,8 +220,6 @@ extern void iommu_del_device(struct device *dev); extern int __init tce_iommu_bus_notifier_init(void); extern long iommu_tce_xchg(struct iommu_table *tbl, unsigned long entry, unsigned long *hpa, enum dma_data_direction *direction); -extern long iommu_tce_xchg_rm(struct iommu_table *tbl, unsigned long entry, - unsigned long *hpa, enum dma_data_direction *direction); #else static inline void iommu_register_group(struct iommu_table_group *table_group, int pci_domain_number, diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h index b2f89b621b15..b694d6af1150 100644 --- a/arch/powerpc/include/asm/mmu_context.h +++ b/arch/powerpc/include/asm/mmu_context.h @@ -38,6 +38,7 @@ extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem, unsigned long ua, unsigned int pageshift, unsigned long *hpa); extern long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem, unsigned long ua, unsigned int pageshift, unsigned long *hpa); +extern void mm_iommu_ua_mark_dirty_rm(struct mm_struct *mm, unsigned long ua); extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem); extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem); #endif diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h index 1a951b00465d..1fffbba8d6a5 100644 --- a/arch/powerpc/include/asm/setup.h +++ b/arch/powerpc/include/asm/setup.h @@ -9,6 +9,7 @@ extern void ppc_printk_progress(char *s, unsigned short hex); extern unsigned int rtas_data; extern unsigned long long memory_limit; +extern bool init_mem_is_free; extern unsigned long klimit; extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask); diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index ea04dfb8c092..2d8fc8c9da7a 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -1314,9 +1314,7 @@ EXC_REAL_BEGIN(denorm_exception_hv, 0x1500, 0x100) #ifdef CONFIG_PPC_DENORMALISATION mfspr r10,SPRN_HSRR1 - mfspr r11,SPRN_HSRR0 /* save HSRR0 */ andis. r10,r10,(HSRR1_DENORM)@h /* denorm? */ - addi r11,r11,-4 /* HSRR0 is next instruction */ bne+ denorm_assist #endif @@ -1382,6 +1380,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) */ XVCPSGNDP32(32) denorm_done: + mfspr r11,SPRN_HSRR0 + subi r11,r11,4 mtspr SPRN_HSRR0,r11 mtcrf 0x80,r9 ld r9,PACA_EXGEN+EX_R9(r13) diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index af7a20dc6e09..19b4c628f3be 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c @@ -1013,31 +1013,6 @@ long iommu_tce_xchg(struct iommu_table *tbl, unsigned long entry, } EXPORT_SYMBOL_GPL(iommu_tce_xchg); -#ifdef CONFIG_PPC_BOOK3S_64 -long iommu_tce_xchg_rm(struct iommu_table *tbl, unsigned long entry, - unsigned long *hpa, enum dma_data_direction *direction) -{ - long ret; - - ret = tbl->it_ops->exchange_rm(tbl, entry, hpa, direction); - - if (!ret && ((*direction == DMA_FROM_DEVICE) || - (*direction == DMA_BIDIRECTIONAL))) { - struct page *pg = realmode_pfn_to_page(*hpa >> PAGE_SHIFT); - - if (likely(pg)) { - SetPageDirty(pg); - } else { - tbl->it_ops->exchange_rm(tbl, entry, hpa, direction); - ret = -EFAULT; - } - } - - return ret; -} -EXPORT_SYMBOL_GPL(iommu_tce_xchg_rm); -#endif - int iommu_take_ownership(struct iommu_table *tbl) { unsigned long flags, i, sz = (tbl->it_size + 7) >> 3; diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S index 6bffbc5affe7..7716374786bd 100644 --- a/arch/powerpc/kernel/tm.S +++ b/arch/powerpc/kernel/tm.S @@ -176,13 +176,27 @@ _GLOBAL(tm_reclaim) std r1, PACATMSCRATCH(r13) ld r1, PACAR1(r13) - /* Store the PPR in r11 and reset to decent value */ std r11, GPR11(r1) /* Temporary stash */ + /* + * Move the saved user r1 to the kernel stack in case PACATMSCRATCH is + * clobbered by an exception once we turn on MSR_RI below. + */ + ld r11, PACATMSCRATCH(r13) + std r11, GPR1(r1) + + /* + * Store r13 away so we can free up the scratch SPR for the SLB fault + * handler (needed once we start accessing the thread_struct). + */ + GET_SCRATCH0(r11) + std r11, GPR13(r1) + /* Reset MSR RI so we can take SLB faults again */ li r11, MSR_RI mtmsrd r11, 1 + /* Store the PPR in r11 and reset to decent value */ mfspr r11, SPRN_PPR HMT_MEDIUM @@ -207,11 +221,11 @@ _GLOBAL(tm_reclaim) SAVE_GPR(8, r7) /* user r8 */ SAVE_GPR(9, r7) /* user r9 */ SAVE_GPR(10, r7) /* user r10 */ - ld r3, PACATMSCRATCH(r13) /* user r1 */ + ld r3, GPR1(r1) /* user r1 */ ld r4, GPR7(r1) /* user r7 */ ld r5, GPR11(r1) /* user r11 */ ld r6, GPR12(r1) /* user r12 */ - GET_SCRATCH0(8) /* user r13 */ + ld r8, GPR13(r1) /* user r13 */ std r3, GPR1(r7) std r4, GPR7(r7) std r5, GPR11(r7) diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c index fd6e8c13685f..933c574e1cf7 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c @@ -525,8 +525,8 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, unsigned long ea, unsigned long dsisr) { struct kvm *kvm = vcpu->kvm; - unsigned long mmu_seq, pte_size; - unsigned long gpa, gfn, hva, pfn; + unsigned long mmu_seq; + unsigned long gpa, gfn, hva; struct kvm_memory_slot *memslot; struct page *page = NULL; long ret; @@ -623,9 +623,10 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, */ hva = gfn_to_hva_memslot(memslot, gfn); if (upgrade_p && __get_user_pages_fast(hva, 1, 1, &page) == 1) { - pfn = page_to_pfn(page); upgrade_write = true; } else { + unsigned long pfn; + /* Call KVM generic code to do the slow-path check */ pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL, writing, upgrade_p); @@ -639,63 +640,45 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, } } - /* See if we can insert a 1GB or 2MB large PTE here */ - level = 0; - if (page && PageCompound(page)) { - pte_size = PAGE_SIZE << compound_order(compound_head(page)); - if (pte_size >= PUD_SIZE && - (gpa & (PUD_SIZE - PAGE_SIZE)) == - (hva & (PUD_SIZE - PAGE_SIZE))) { - level = 2; - pfn &= ~((PUD_SIZE >> PAGE_SHIFT) - 1); - } else if (pte_size >= PMD_SIZE && - (gpa & (PMD_SIZE - PAGE_SIZE)) == - (hva & (PMD_SIZE - PAGE_SIZE))) { - level = 1; - pfn &= ~((PMD_SIZE >> PAGE_SHIFT) - 1); - } - } - /* - * Compute the PTE value that we need to insert. + * Read the PTE from the process' radix tree and use that + * so we get the shift and attribute bits. */ - if (page) { - pgflags = _PAGE_READ | _PAGE_EXEC | _PAGE_PRESENT | _PAGE_PTE | - _PAGE_ACCESSED; - if (writing || upgrade_write) - pgflags |= _PAGE_WRITE | _PAGE_DIRTY; - pte = pfn_pte(pfn, __pgprot(pgflags)); + local_irq_disable(); + ptep = __find_linux_pte(vcpu->arch.pgdir, hva, NULL, &shift); + pte = *ptep; + local_irq_enable(); + + /* Get pte level from shift/size */ + if (shift == PUD_SHIFT && + (gpa & (PUD_SIZE - PAGE_SIZE)) == + (hva & (PUD_SIZE - PAGE_SIZE))) { + level = 2; + } else if (shift == PMD_SHIFT && + (gpa & (PMD_SIZE - PAGE_SIZE)) == + (hva & (PMD_SIZE - PAGE_SIZE))) { + level = 1; } else { - /* - * Read the PTE from the process' radix tree and use that - * so we get the attribute bits. - */ - local_irq_disable(); - ptep = __find_linux_pte(vcpu->arch.pgdir, hva, NULL, &shift); - pte = *ptep; - local_irq_enable(); - if (shift == PUD_SHIFT && - (gpa & (PUD_SIZE - PAGE_SIZE)) == - (hva & (PUD_SIZE - PAGE_SIZE))) { - level = 2; - } else if (shift == PMD_SHIFT && - (gpa & (PMD_SIZE - PAGE_SIZE)) == - (hva & (PMD_SIZE - PAGE_SIZE))) { - level = 1; - } else if (shift && shift != PAGE_SHIFT) { - /* Adjust PFN */ - unsigned long mask = (1ul << shift) - PAGE_SIZE; - pte = __pte(pte_val(pte) | (hva & mask)); - } - pte = __pte(pte_val(pte) | _PAGE_EXEC | _PAGE_ACCESSED); - if (writing || upgrade_write) { - if (pte_val(pte) & _PAGE_WRITE) - pte = __pte(pte_val(pte) | _PAGE_DIRTY); - } else { - pte = __pte(pte_val(pte) & ~(_PAGE_WRITE | _PAGE_DIRTY)); + level = 0; + if (shift > PAGE_SHIFT) { + /* + * If the pte maps more than one page, bring over + * bits from the virtual address to get the real + * address of the specific single page we want. + */ + unsigned long rpnmask = (1ul << shift) - PAGE_SIZE; + pte = __pte(pte_val(pte) | (hva & rpnmask)); } } + pte = __pte(pte_val(pte) | _PAGE_EXEC | _PAGE_ACCESSED); + if (writing || upgrade_write) { + if (pte_val(pte) & _PAGE_WRITE) + pte = __pte(pte_val(pte) | _PAGE_DIRTY); + } else { + pte = __pte(pte_val(pte) & ~(_PAGE_WRITE | _PAGE_DIRTY)); + } + /* Allocate space in the tree and write the PTE */ ret = kvmppc_create_pte(kvm, pte, gpa, level, mmu_seq); diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c index 506a4d400458..6821ead4b4eb 100644 --- a/arch/powerpc/kvm/book3s_64_vio_hv.c +++ b/arch/powerpc/kvm/book3s_64_vio_hv.c @@ -187,12 +187,35 @@ long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa, EXPORT_SYMBOL_GPL(kvmppc_gpa_to_ua); #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE -static void kvmppc_rm_clear_tce(struct iommu_table *tbl, unsigned long entry) +static long iommu_tce_xchg_rm(struct mm_struct *mm, struct iommu_table *tbl, + unsigned long entry, unsigned long *hpa, + enum dma_data_direction *direction) +{ + long ret; + + ret = tbl->it_ops->exchange_rm(tbl, entry, hpa, direction); + + if (!ret && ((*direction == DMA_FROM_DEVICE) || + (*direction == DMA_BIDIRECTIONAL))) { + __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RM(tbl, entry); + /* + * kvmppc_rm_tce_iommu_do_map() updates the UA cache after + * calling this so we still get here a valid UA. + */ + if (pua && *pua) + mm_iommu_ua_mark_dirty_rm(mm, be64_to_cpu(*pua)); + } + + return ret; +} + +static void kvmppc_rm_clear_tce(struct kvm *kvm, struct iommu_table *tbl, + unsigned long entry) { unsigned long hpa = 0; enum dma_data_direction dir = DMA_NONE; - iommu_tce_xchg_rm(tbl, entry, &hpa, &dir); + iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir); } static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm, @@ -224,7 +247,7 @@ static long kvmppc_rm_tce_iommu_do_unmap(struct kvm *kvm, unsigned long hpa = 0; long ret; - if (iommu_tce_xchg_rm(tbl, entry, &hpa, &dir)) + if (iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir)) /* * real mode xchg can fail if struct page crosses * a page boundary @@ -236,7 +259,7 @@ static long kvmppc_rm_tce_iommu_do_unmap(struct kvm *kvm, ret = kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry); if (ret) - iommu_tce_xchg_rm(tbl, entry, &hpa, &dir); + iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir); return ret; } @@ -282,7 +305,7 @@ static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl, if (WARN_ON_ONCE_RM(mm_iommu_mapped_inc(mem))) return H_CLOSED; - ret = iommu_tce_xchg_rm(tbl, entry, &hpa, &dir); + ret = iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir); if (ret) { mm_iommu_mapped_dec(mem); /* @@ -371,7 +394,7 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, return ret; WARN_ON_ONCE_RM(1); - kvmppc_rm_clear_tce(stit->tbl, entry); + kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry); } kvmppc_tce_put(stt, entry, tce); @@ -520,7 +543,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu, goto unlock_exit; WARN_ON_ONCE_RM(1); - kvmppc_rm_clear_tce(stit->tbl, entry); + kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry); } kvmppc_tce_put(stt, entry + i, tce); @@ -571,7 +594,7 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu, return ret; WARN_ON_ONCE_RM(1); - kvmppc_rm_clear_tce(stit->tbl, entry); + kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry); } } diff --git a/arch/powerpc/lib/checksum_64.S b/arch/powerpc/lib/checksum_64.S index 886ed94b9c13..d05c8af4ac51 100644 --- a/arch/powerpc/lib/checksum_64.S +++ b/arch/powerpc/lib/checksum_64.S @@ -443,6 +443,9 @@ _GLOBAL(csum_ipv6_magic) addc r0, r8, r9 ld r10, 0(r4) ld r11, 8(r4) +#ifdef CONFIG_CPU_LITTLE_ENDIAN + rotldi r5, r5, 8 +#endif adde r0, r0, r10 add r5, r5, r7 adde r0, r0, r11 diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c index 850f3b8f4da5..6ae2777c220d 100644 --- a/arch/powerpc/lib/code-patching.c +++ b/arch/powerpc/lib/code-patching.c @@ -28,6 +28,12 @@ static int __patch_instruction(unsigned int *exec_addr, unsigned int instr, { int err; + /* Make sure we aren't patching a freed init section */ + if (init_mem_is_free && init_section_contains(exec_addr, 4)) { + pr_debug("Skipping init section patching addr: 0x%px\n", exec_addr); + return 0; + } + __put_user_size(instr, patch_addr, 4, err); if (err) return err; diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index 51ce091914f9..7a9886f98b0c 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c @@ -308,55 +308,6 @@ void register_page_bootmem_memmap(unsigned long section_nr, { } -/* - * We do not have access to the sparsemem vmemmap, so we fallback to - * walking the list of sparsemem blocks which we already maintain for - * the sake of crashdump. In the long run, we might want to maintain - * a tree if performance of that linear walk becomes a problem. - * - * realmode_pfn_to_page functions can fail due to: - * 1) As real sparsemem blocks do not lay in RAM continously (they - * are in virtual address space which is not available in the real mode), - * the requested page struct can be split between blocks so get_page/put_page - * may fail. - * 2) When huge pages are used, the get_page/put_page API will fail - * in real mode as the linked addresses in the page struct are virtual - * too. - */ -struct page *realmode_pfn_to_page(unsigned long pfn) -{ - struct vmemmap_backing *vmem_back; - struct page *page; - unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift; - unsigned long pg_va = (unsigned long) pfn_to_page(pfn); - - for (vmem_back = vmemmap_list; vmem_back; vmem_back = vmem_back->list) { - if (pg_va < vmem_back->virt_addr) - continue; - - /* After vmemmap_list entry free is possible, need check all */ - if ((pg_va + sizeof(struct page)) <= - (vmem_back->virt_addr + page_size)) { - page = (struct page *) (vmem_back->phys + pg_va - - vmem_back->virt_addr); - return page; - } - } - - /* Probably that page struct is split between real pages */ - return NULL; -} -EXPORT_SYMBOL_GPL(realmode_pfn_to_page); - -#else - -struct page *realmode_pfn_to_page(unsigned long pfn) -{ - struct page *page = pfn_to_page(pfn); - return page; -} -EXPORT_SYMBOL_GPL(realmode_pfn_to_page); - #endif /* CONFIG_SPARSEMEM_VMEMMAP */ #ifdef CONFIG_PPC_BOOK3S_64 diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 5c8530d0c611..04ccb274a620 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -63,6 +63,7 @@ #endif unsigned long long memory_limit; +bool init_mem_is_free; #ifdef CONFIG_HIGHMEM pte_t *kmap_pte; @@ -396,6 +397,7 @@ void free_initmem(void) { ppc_md.progress = ppc_printk_progress; mark_initmem_nx(); + init_mem_is_free = true; free_initmem_default(POISON_FREE_INITMEM); } diff --git a/arch/powerpc/mm/mmu_context_iommu.c b/arch/powerpc/mm/mmu_context_iommu.c index c9ee9e23845f..56c2234cc6ae 100644 --- a/arch/powerpc/mm/mmu_context_iommu.c +++ b/arch/powerpc/mm/mmu_context_iommu.c @@ -18,11 +18,15 @@ #include <linux/migrate.h> #include <linux/hugetlb.h> #include <linux/swap.h> +#include <linux/sizes.h> #include <asm/mmu_context.h> #include <asm/pte-walk.h> static DEFINE_MUTEX(mem_list_mutex); +#define MM_IOMMU_TABLE_GROUP_PAGE_DIRTY 0x1 +#define MM_IOMMU_TABLE_GROUP_PAGE_MASK ~(SZ_4K - 1) + struct mm_iommu_table_group_mem_t { struct list_head next; struct rcu_head rcu; @@ -263,6 +267,9 @@ static void mm_iommu_unpin(struct mm_iommu_table_group_mem_t *mem) if (!page) continue; + if (mem->hpas[i] & MM_IOMMU_TABLE_GROUP_PAGE_DIRTY) + SetPageDirty(page); + put_page(page); mem->hpas[i] = 0; } @@ -360,7 +367,6 @@ struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm(struct mm_struct *mm, return ret; } -EXPORT_SYMBOL_GPL(mm_iommu_lookup_rm); struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm, unsigned long ua, unsigned long entries) @@ -390,7 +396,7 @@ long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem, if (pageshift > mem->pageshift) return -EFAULT; - *hpa = *va | (ua & ~PAGE_MASK); + *hpa = (*va & MM_IOMMU_TABLE_GROUP_PAGE_MASK) | (ua & ~PAGE_MASK); return 0; } @@ -413,11 +419,31 @@ long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem, if (!pa) return -EFAULT; - *hpa = *pa | (ua & ~PAGE_MASK); + *hpa = (*pa & MM_IOMMU_TABLE_GROUP_PAGE_MASK) | (ua & ~PAGE_MASK); return 0; } -EXPORT_SYMBOL_GPL(mm_iommu_ua_to_hpa_rm); + +extern void mm_iommu_ua_mark_dirty_rm(struct mm_struct *mm, unsigned long ua) +{ + struct mm_iommu_table_group_mem_t *mem; + long entry; + void *va; + unsigned long *pa; + + mem = mm_iommu_lookup_rm(mm, ua, PAGE_SIZE); + if (!mem) + return; + + entry = (ua - mem->ua) >> PAGE_SHIFT; + va = &mem->hpas[entry]; + + pa = (void *) vmalloc_to_phys(va); + if (!pa) + return; + + *pa |= MM_IOMMU_TABLE_GROUP_PAGE_DIRTY; +} long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem) { diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 35ac5422903a..59d07bd5374a 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -1204,7 +1204,9 @@ int find_and_online_cpu_nid(int cpu) int new_nid; /* Use associativity from first thread for all siblings */ - vphn_get_associativity(cpu, associativity); + if (vphn_get_associativity(cpu, associativity)) + return cpu_to_node(cpu); + new_nid = associativity_to_nid(associativity); if (new_nid < 0 || !node_possible(new_nid)) new_nid = first_online_node; @@ -1452,7 +1454,8 @@ static struct timer_list topology_timer; static void reset_topology_timer(void) { - mod_timer(&topology_timer, jiffies + topology_timer_secs * HZ); + if (vphn_enabled) + mod_timer(&topology_timer, jiffies + topology_timer_secs * HZ); } #ifdef CONFIG_SMP diff --git a/arch/powerpc/mm/pkeys.c b/arch/powerpc/mm/pkeys.c index 333b1f80c435..b271b283c785 100644 --- a/arch/powerpc/mm/pkeys.c +++ b/arch/powerpc/mm/pkeys.c @@ -45,7 +45,7 @@ static void scan_pkey_feature(void) * Since any pkey can be used for data or execute, we will just treat * all keys as equal and track them as one entity. */ - pkeys_total = be32_to_cpu(vals[0]); + pkeys_total = vals[0]; pkeys_devtree_defined = true; } diff --git a/arch/powerpc/platforms/powernv/pci-ioda-tce.c b/arch/powerpc/platforms/powernv/pci-ioda-tce.c index 6c5db1acbe8d..fe9691040f54 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda-tce.c +++ b/arch/powerpc/platforms/powernv/pci-ioda-tce.c @@ -276,7 +276,7 @@ long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset, level_shift = entries_shift + 3; level_shift = max_t(unsigned int, level_shift, PAGE_SHIFT); - if ((level_shift - 3) * levels + page_shift >= 60) + if ((level_shift - 3) * levels + page_shift >= 55) return -EINVAL; /* Allocate TCE table */ diff --git a/arch/riscv/include/asm/asm-prototypes.h b/arch/riscv/include/asm/asm-prototypes.h new file mode 100644 index 000000000000..c9fecd120d18 --- /dev/null +++ b/arch/riscv/include/asm/asm-prototypes.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_RISCV_PROTOTYPES_H + +#include <linux/ftrace.h> +#include <asm-generic/asm-prototypes.h> + +#endif /* _ASM_RISCV_PROTOTYPES_H */ diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index f69333fd2fa3..ac5da6b0b862 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -481,7 +481,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) break; case KVM_CAP_S390_HPAGE_1M: r = 0; - if (hpage) + if (hpage && !kvm_is_ucontrol(kvm)) r = 1; break; case KVM_CAP_S390_MEM_OP: @@ -691,7 +691,7 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) mutex_lock(&kvm->lock); if (kvm->created_vcpus) r = -EBUSY; - else if (!hpage || kvm->arch.use_cmma) + else if (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm)) r = -EINVAL; else { r = 0; diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c index bb44990c8212..911c7ded35f1 100644 --- a/arch/s390/mm/gmap.c +++ b/arch/s390/mm/gmap.c @@ -708,11 +708,13 @@ void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to) vmaddr |= gaddr & ~PMD_MASK; /* Find vma in the parent mm */ vma = find_vma(gmap->mm, vmaddr); + if (!vma) + continue; /* * We do not discard pages that are backed by * hugetlbfs, so we don't have to refault them. */ - if (vma && is_vm_hugetlb_page(vma)) + if (is_vm_hugetlb_page(vma)) continue; size = min(to - gaddr, PMD_SIZE - (gaddr & ~PMD_MASK)); zap_page_range(vma, vmaddr, size); diff --git a/arch/x86/boot/compressed/mem_encrypt.S b/arch/x86/boot/compressed/mem_encrypt.S index eaa843a52907..a480356e0ed8 100644 --- a/arch/x86/boot/compressed/mem_encrypt.S +++ b/arch/x86/boot/compressed/mem_encrypt.S @@ -25,20 +25,6 @@ ENTRY(get_sev_encryption_bit) push %ebx push %ecx push %edx - push %edi - - /* - * RIP-relative addressing is needed to access the encryption bit - * variable. Since we are running in 32-bit mode we need this call/pop - * sequence to get the proper relative addressing. - */ - call 1f -1: popl %edi - subl $1b, %edi - - movl enc_bit(%edi), %eax - cmpl $0, %eax - jge .Lsev_exit /* Check if running under a hypervisor */ movl $1, %eax @@ -69,15 +55,12 @@ ENTRY(get_sev_encryption_bit) movl %ebx, %eax andl $0x3f, %eax /* Return the encryption bit location */ - movl %eax, enc_bit(%edi) jmp .Lsev_exit .Lno_sev: xor %eax, %eax - movl %eax, enc_bit(%edi) .Lsev_exit: - pop %edi pop %edx pop %ecx pop %ebx @@ -113,8 +96,6 @@ ENTRY(set_sev_encryption_mask) ENDPROC(set_sev_encryption_mask) .data -enc_bit: - .int 0xffffffff #ifdef CONFIG_AMD_MEM_ENCRYPT .balign 8 diff --git a/arch/x86/crypto/aegis128-aesni-glue.c b/arch/x86/crypto/aegis128-aesni-glue.c index acd11b3bf639..2a356b948720 100644 --- a/arch/x86/crypto/aegis128-aesni-glue.c +++ b/arch/x86/crypto/aegis128-aesni-glue.c @@ -379,7 +379,6 @@ static int __init crypto_aegis128_aesni_module_init(void) { if (!boot_cpu_has(X86_FEATURE_XMM2) || !boot_cpu_has(X86_FEATURE_AES) || - !boot_cpu_has(X86_FEATURE_OSXSAVE) || !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL)) return -ENODEV; diff --git a/arch/x86/crypto/aegis128l-aesni-glue.c b/arch/x86/crypto/aegis128l-aesni-glue.c index 2071c3d1ae07..dbe8bb980da1 100644 --- a/arch/x86/crypto/aegis128l-aesni-glue.c +++ b/arch/x86/crypto/aegis128l-aesni-glue.c @@ -379,7 +379,6 @@ static int __init crypto_aegis128l_aesni_module_init(void) { if (!boot_cpu_has(X86_FEATURE_XMM2) || !boot_cpu_has(X86_FEATURE_AES) || - !boot_cpu_has(X86_FEATURE_OSXSAVE) || !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL)) return -ENODEV; diff --git a/arch/x86/crypto/aegis256-aesni-glue.c b/arch/x86/crypto/aegis256-aesni-glue.c index b5f2a8fd5a71..8bebda2de92f 100644 --- a/arch/x86/crypto/aegis256-aesni-glue.c +++ b/arch/x86/crypto/aegis256-aesni-glue.c @@ -379,7 +379,6 @@ static int __init crypto_aegis256_aesni_module_init(void) { if (!boot_cpu_has(X86_FEATURE_XMM2) || !boot_cpu_has(X86_FEATURE_AES) || - !boot_cpu_has(X86_FEATURE_OSXSAVE) || !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL)) return -ENODEV; diff --git a/arch/x86/crypto/morus1280-sse2-glue.c b/arch/x86/crypto/morus1280-sse2-glue.c index 95cf857d2cbb..f40244eaf14d 100644 --- a/arch/x86/crypto/morus1280-sse2-glue.c +++ b/arch/x86/crypto/morus1280-sse2-glue.c @@ -40,7 +40,6 @@ MORUS1280_DECLARE_ALGS(sse2, "morus1280-sse2", 350); static int __init crypto_morus1280_sse2_module_init(void) { if (!boot_cpu_has(X86_FEATURE_XMM2) || - !boot_cpu_has(X86_FEATURE_OSXSAVE) || !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL)) return -ENODEV; diff --git a/arch/x86/crypto/morus640-sse2-glue.c b/arch/x86/crypto/morus640-sse2-glue.c index 615fb7bc9a32..9afaf8f8565a 100644 --- a/arch/x86/crypto/morus640-sse2-glue.c +++ b/arch/x86/crypto/morus640-sse2-glue.c @@ -40,7 +40,6 @@ MORUS640_DECLARE_ALGS(sse2, "morus640-sse2", 400); static int __init crypto_morus640_sse2_module_init(void) { if (!boot_cpu_has(X86_FEATURE_XMM2) || - !boot_cpu_has(X86_FEATURE_OSXSAVE) || !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL)) return -ENODEV; diff --git a/arch/x86/hyperv/hv_apic.c b/arch/x86/hyperv/hv_apic.c index 5b0f613428c2..2c43e3055948 100644 --- a/arch/x86/hyperv/hv_apic.c +++ b/arch/x86/hyperv/hv_apic.c @@ -95,8 +95,8 @@ static void hv_apic_eoi_write(u32 reg, u32 val) */ static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector) { - struct ipi_arg_ex **arg; - struct ipi_arg_ex *ipi_arg; + struct hv_send_ipi_ex **arg; + struct hv_send_ipi_ex *ipi_arg; unsigned long flags; int nr_bank = 0; int ret = 1; @@ -105,7 +105,7 @@ static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector) return false; local_irq_save(flags); - arg = (struct ipi_arg_ex **)this_cpu_ptr(hyperv_pcpu_input_arg); + arg = (struct hv_send_ipi_ex **)this_cpu_ptr(hyperv_pcpu_input_arg); ipi_arg = *arg; if (unlikely(!ipi_arg)) @@ -135,7 +135,7 @@ ipi_mask_ex_done: static bool __send_ipi_mask(const struct cpumask *mask, int vector) { int cur_cpu, vcpu; - struct ipi_arg_non_ex ipi_arg; + struct hv_send_ipi ipi_arg; int ret = 1; trace_hyperv_send_ipi_mask(mask, vector); diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h index e203169931c7..6390bd8c141b 100644 --- a/arch/x86/include/asm/fixmap.h +++ b/arch/x86/include/asm/fixmap.h @@ -14,6 +14,16 @@ #ifndef _ASM_X86_FIXMAP_H #define _ASM_X86_FIXMAP_H +/* + * Exposed to assembly code for setting up initial page tables. Cannot be + * calculated in assembly code (fixmap entries are an enum), but is sanity + * checked in the actual fixmap C code to make sure that the fixmap is + * covered fully. + */ +#define FIXMAP_PMD_NUM 2 +/* fixmap starts downwards from the 507th entry in level2_fixmap_pgt */ +#define FIXMAP_PMD_TOP 507 + #ifndef __ASSEMBLY__ #include <linux/kernel.h> #include <asm/acpi.h> diff --git a/arch/x86/include/asm/hyperv-tlfs.h b/arch/x86/include/asm/hyperv-tlfs.h index e977b6b3a538..00e01d215f74 100644 --- a/arch/x86/include/asm/hyperv-tlfs.h +++ b/arch/x86/include/asm/hyperv-tlfs.h @@ -726,19 +726,21 @@ struct hv_enlightened_vmcs { #define HV_STIMER_AUTOENABLE (1ULL << 3) #define HV_STIMER_SINT(config) (__u8)(((config) >> 16) & 0x0F) -struct ipi_arg_non_ex { - u32 vector; - u32 reserved; - u64 cpu_mask; -}; - struct hv_vpset { u64 format; u64 valid_bank_mask; u64 bank_contents[]; }; -struct ipi_arg_ex { +/* HvCallSendSyntheticClusterIpi hypercall */ +struct hv_send_ipi { + u32 vector; + u32 reserved; + u64 cpu_mask; +}; + +/* HvCallSendSyntheticClusterIpiEx hypercall */ +struct hv_send_ipi_ex { u32 vector; u32 reserved; struct hv_vpset vp_set; diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 8e90488c3d56..09b2e3e2cf1b 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -869,6 +869,8 @@ struct kvm_arch { bool x2apic_format; bool x2apic_broadcast_quirk_disabled; + + bool guest_can_read_msr_platform_info; }; struct kvm_vm_stat { @@ -1022,6 +1024,7 @@ struct kvm_x86_ops { void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *vcpu); void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr); void (*hwapic_isr_update)(struct kvm_vcpu *vcpu, int isr); + bool (*guest_apic_has_interrupt)(struct kvm_vcpu *vcpu); void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap); void (*set_virtual_apic_mode)(struct kvm_vcpu *vcpu); void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu, hpa_t hpa); @@ -1055,6 +1058,7 @@ struct kvm_x86_ops { bool (*umip_emulated)(void); int (*check_nested_events)(struct kvm_vcpu *vcpu, bool external_intr); + void (*request_immediate_exit)(struct kvm_vcpu *vcpu); void (*sched_in)(struct kvm_vcpu *kvm, int cpu); @@ -1482,6 +1486,7 @@ extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn); int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu); int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err); +void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu); int kvm_is_in_guest(void); diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h index c0643831706e..616f8e637bc3 100644 --- a/arch/x86/include/asm/mem_encrypt.h +++ b/arch/x86/include/asm/mem_encrypt.h @@ -48,10 +48,13 @@ int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size); /* Architecture __weak replacement functions */ void __init mem_encrypt_init(void); +void __init mem_encrypt_free_decrypted_mem(void); bool sme_active(void); bool sev_active(void); +#define __bss_decrypted __attribute__((__section__(".bss..decrypted"))) + #else /* !CONFIG_AMD_MEM_ENCRYPT */ #define sme_me_mask 0ULL @@ -77,6 +80,8 @@ early_set_memory_decrypted(unsigned long vaddr, unsigned long size) { return 0; static inline int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size) { return 0; } +#define __bss_decrypted + #endif /* CONFIG_AMD_MEM_ENCRYPT */ /* @@ -88,6 +93,8 @@ early_set_memory_encrypted(unsigned long vaddr, unsigned long size) { return 0; #define __sme_pa(x) (__pa(x) | sme_me_mask) #define __sme_pa_nodebug(x) (__pa_nodebug(x) | sme_me_mask) +extern char __start_bss_decrypted[], __end_bss_decrypted[], __start_bss_decrypted_unused[]; + #endif /* __ASSEMBLY__ */ #endif /* __X86_MEM_ENCRYPT_H__ */ diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index ce2b59047cb8..9c85b54bf03c 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h @@ -14,6 +14,7 @@ #include <asm/processor.h> #include <linux/bitops.h> #include <linux/threads.h> +#include <asm/fixmap.h> extern p4d_t level4_kernel_pgt[512]; extern p4d_t level4_ident_pgt[512]; @@ -22,7 +23,7 @@ extern pud_t level3_ident_pgt[512]; extern pmd_t level2_kernel_pgt[512]; extern pmd_t level2_fixmap_pgt[512]; extern pmd_t level2_ident_pgt[512]; -extern pte_t level1_fixmap_pgt[512]; +extern pte_t level1_fixmap_pgt[512 * FIXMAP_PMD_NUM]; extern pgd_t init_top_pgt[]; #define swapper_pg_dir init_top_pgt diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h index 86299efa804a..fd23d5778ea1 100644 --- a/arch/x86/include/uapi/asm/kvm.h +++ b/arch/x86/include/uapi/asm/kvm.h @@ -377,6 +377,7 @@ struct kvm_sync_regs { #define KVM_X86_QUIRK_LINT0_REENABLED (1 << 0) #define KVM_X86_QUIRK_CD_NW_CLEARED (1 << 1) +#define KVM_X86_QUIRK_LAPIC_MMIO_HOLE (1 << 2) #define KVM_STATE_NESTED_GUEST_MODE 0x00000001 #define KVM_STATE_NESTED_RUN_PENDING 0x00000002 diff --git a/arch/x86/kernel/cpu/intel_rdt.h b/arch/x86/kernel/cpu/intel_rdt.h index 4e588f36228f..285eb3ec4200 100644 --- a/arch/x86/kernel/cpu/intel_rdt.h +++ b/arch/x86/kernel/cpu/intel_rdt.h @@ -382,6 +382,11 @@ static inline bool is_mbm_event(int e) e <= QOS_L3_MBM_LOCAL_EVENT_ID); } +struct rdt_parse_data { + struct rdtgroup *rdtgrp; + char *buf; +}; + /** * struct rdt_resource - attributes of an RDT resource * @rid: The index of the resource @@ -423,16 +428,19 @@ struct rdt_resource { struct rdt_cache cache; struct rdt_membw membw; const char *format_str; - int (*parse_ctrlval) (void *data, struct rdt_resource *r, - struct rdt_domain *d); + int (*parse_ctrlval)(struct rdt_parse_data *data, + struct rdt_resource *r, + struct rdt_domain *d); struct list_head evt_list; int num_rmid; unsigned int mon_scale; unsigned long fflags; }; -int parse_cbm(void *_data, struct rdt_resource *r, struct rdt_domain *d); -int parse_bw(void *_buf, struct rdt_resource *r, struct rdt_domain *d); +int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r, + struct rdt_domain *d); +int parse_bw(struct rdt_parse_data *data, struct rdt_resource *r, + struct rdt_domain *d); extern struct mutex rdtgroup_mutex; @@ -536,6 +544,7 @@ int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp); void rdtgroup_pseudo_lock_remove(struct rdtgroup *rdtgrp); struct rdt_domain *get_domain_from_cpu(int cpu, struct rdt_resource *r); int update_domains(struct rdt_resource *r, int closid); +int closids_supported(void); void closid_free(int closid); int alloc_rmid(void); void free_rmid(u32 rmid); diff --git a/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c b/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c index af358ca05160..0f53049719cd 100644 --- a/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c +++ b/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c @@ -64,19 +64,19 @@ static bool bw_validate(char *buf, unsigned long *data, struct rdt_resource *r) return true; } -int parse_bw(void *_buf, struct rdt_resource *r, struct rdt_domain *d) +int parse_bw(struct rdt_parse_data *data, struct rdt_resource *r, + struct rdt_domain *d) { - unsigned long data; - char *buf = _buf; + unsigned long bw_val; if (d->have_new_ctrl) { rdt_last_cmd_printf("duplicate domain %d\n", d->id); return -EINVAL; } - if (!bw_validate(buf, &data, r)) + if (!bw_validate(data->buf, &bw_val, r)) return -EINVAL; - d->new_ctrl = data; + d->new_ctrl = bw_val; d->have_new_ctrl = true; return 0; @@ -123,18 +123,13 @@ static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r) return true; } -struct rdt_cbm_parse_data { - struct rdtgroup *rdtgrp; - char *buf; -}; - /* * Read one cache bit mask (hex). Check that it is valid for the current * resource type. */ -int parse_cbm(void *_data, struct rdt_resource *r, struct rdt_domain *d) +int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r, + struct rdt_domain *d) { - struct rdt_cbm_parse_data *data = _data; struct rdtgroup *rdtgrp = data->rdtgrp; u32 cbm_val; @@ -195,11 +190,17 @@ int parse_cbm(void *_data, struct rdt_resource *r, struct rdt_domain *d) static int parse_line(char *line, struct rdt_resource *r, struct rdtgroup *rdtgrp) { - struct rdt_cbm_parse_data data; + struct rdt_parse_data data; char *dom = NULL, *id; struct rdt_domain *d; unsigned long dom_id; + if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP && + r->rid == RDT_RESOURCE_MBA) { + rdt_last_cmd_puts("Cannot pseudo-lock MBA resource\n"); + return -EINVAL; + } + next: if (!line || line[0] == '\0') return 0; diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c index b799c00bef09..1b8e86a5d5e1 100644 --- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c +++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c @@ -97,6 +97,12 @@ void rdt_last_cmd_printf(const char *fmt, ...) * limited as the number of resources grows. */ static int closid_free_map; +static int closid_free_map_len; + +int closids_supported(void) +{ + return closid_free_map_len; +} static void closid_init(void) { @@ -111,6 +117,7 @@ static void closid_init(void) /* CLOSID 0 is always reserved for the default group */ closid_free_map &= ~1; + closid_free_map_len = rdt_min_closid; } static int closid_alloc(void) @@ -802,7 +809,7 @@ static int rdt_bit_usage_show(struct kernfs_open_file *of, sw_shareable = 0; exclusive = 0; seq_printf(seq, "%d=", dom->id); - for (i = 0; i < r->num_closid; i++, ctrl++) { + for (i = 0; i < closids_supported(); i++, ctrl++) { if (!closid_allocated(i)) continue; mode = rdtgroup_mode_by_closid(i); @@ -989,7 +996,7 @@ bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d, /* Check for overlap with other resource groups */ ctrl = d->ctrl_val; - for (i = 0; i < r->num_closid; i++, ctrl++) { + for (i = 0; i < closids_supported(); i++, ctrl++) { ctrl_b = (unsigned long *)ctrl; mode = rdtgroup_mode_by_closid(i); if (closid_allocated(i) && i != closid && @@ -1024,16 +1031,27 @@ static bool rdtgroup_mode_test_exclusive(struct rdtgroup *rdtgrp) { int closid = rdtgrp->closid; struct rdt_resource *r; + bool has_cache = false; struct rdt_domain *d; for_each_alloc_enabled_rdt_resource(r) { + if (r->rid == RDT_RESOURCE_MBA) + continue; + has_cache = true; list_for_each_entry(d, &r->domains, list) { if (rdtgroup_cbm_overlaps(r, d, d->ctrl_val[closid], - rdtgrp->closid, false)) + rdtgrp->closid, false)) { + rdt_last_cmd_puts("schemata overlaps\n"); return false; + } } } + if (!has_cache) { + rdt_last_cmd_puts("cannot be exclusive without CAT/CDP\n"); + return false; + } + return true; } @@ -1085,7 +1103,6 @@ static ssize_t rdtgroup_mode_write(struct kernfs_open_file *of, rdtgrp->mode = RDT_MODE_SHAREABLE; } else if (!strcmp(buf, "exclusive")) { if (!rdtgroup_mode_test_exclusive(rdtgrp)) { - rdt_last_cmd_printf("schemata overlaps\n"); ret = -EINVAL; goto out; } @@ -1155,8 +1172,8 @@ static int rdtgroup_size_show(struct kernfs_open_file *of, struct rdt_resource *r; struct rdt_domain *d; unsigned int size; - bool sep = false; - u32 cbm; + bool sep; + u32 ctrl; rdtgrp = rdtgroup_kn_lock_live(of->kn); if (!rdtgrp) { @@ -1174,6 +1191,7 @@ static int rdtgroup_size_show(struct kernfs_open_file *of, } for_each_alloc_enabled_rdt_resource(r) { + sep = false; seq_printf(s, "%*s:", max_name_width, r->name); list_for_each_entry(d, &r->domains, list) { if (sep) @@ -1181,8 +1199,13 @@ static int rdtgroup_size_show(struct kernfs_open_file *of, if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { size = 0; } else { - cbm = d->ctrl_val[rdtgrp->closid]; - size = rdtgroup_cbm_to_size(r, d, cbm); + ctrl = (!is_mba_sc(r) ? + d->ctrl_val[rdtgrp->closid] : + d->mbps_val[rdtgrp->closid]); + if (r->rid == RDT_RESOURCE_MBA) + size = ctrl; + else + size = rdtgroup_cbm_to_size(r, d, ctrl); } seq_printf(s, "%d=%u", d->id, size); sep = true; @@ -2336,12 +2359,18 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp) u32 *ctrl; for_each_alloc_enabled_rdt_resource(r) { + /* + * Only initialize default allocations for CBM cache + * resources + */ + if (r->rid == RDT_RESOURCE_MBA) + continue; list_for_each_entry(d, &r->domains, list) { d->have_new_ctrl = false; d->new_ctrl = r->cache.shareable_bits; used_b = r->cache.shareable_bits; ctrl = d->ctrl_val; - for (i = 0; i < r->num_closid; i++, ctrl++) { + for (i = 0; i < closids_supported(); i++, ctrl++) { if (closid_allocated(i) && i != closid) { mode = rdtgroup_mode_by_closid(i); if (mode == RDT_MODE_PSEUDO_LOCKSETUP) @@ -2373,6 +2402,12 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp) } for_each_alloc_enabled_rdt_resource(r) { + /* + * Only initialize default allocations for CBM cache + * resources + */ + if (r->rid == RDT_RESOURCE_MBA) + continue; ret = update_domains(r, rdtgrp->closid); if (ret < 0) { rdt_last_cmd_puts("failed to initialize allocations\n"); diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index 8047379e575a..ddee1f0870c4 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -35,6 +35,7 @@ #include <asm/bootparam_utils.h> #include <asm/microcode.h> #include <asm/kasan.h> +#include <asm/fixmap.h> /* * Manage page tables very early on. @@ -112,6 +113,7 @@ static bool __head check_la57_support(unsigned long physaddr) unsigned long __head __startup_64(unsigned long physaddr, struct boot_params *bp) { + unsigned long vaddr, vaddr_end; unsigned long load_delta, *p; unsigned long pgtable_flags; pgdval_t *pgd; @@ -165,7 +167,8 @@ unsigned long __head __startup_64(unsigned long physaddr, pud[511] += load_delta; pmd = fixup_pointer(level2_fixmap_pgt, physaddr); - pmd[506] += load_delta; + for (i = FIXMAP_PMD_TOP; i > FIXMAP_PMD_TOP - FIXMAP_PMD_NUM; i--) + pmd[i] += load_delta; /* * Set up the identity mapping for the switchover. These @@ -235,6 +238,21 @@ unsigned long __head __startup_64(unsigned long physaddr, sme_encrypt_kernel(bp); /* + * Clear the memory encryption mask from the .bss..decrypted section. + * The bss section will be memset to zero later in the initialization so + * there is no need to zero it after changing the memory encryption + * attribute. + */ + if (mem_encrypt_active()) { + vaddr = (unsigned long)__start_bss_decrypted; + vaddr_end = (unsigned long)__end_bss_decrypted; + for (; vaddr < vaddr_end; vaddr += PMD_SIZE) { + i = pmd_index(vaddr); + pmd[i] -= sme_get_me_mask(); + } + } + + /* * Return the SME encryption mask (if SME is active) to be used as a * modifier for the initial pgdir entry programmed into CR3. */ diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index 15ebc2fc166e..a3618cf04cf6 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S @@ -24,6 +24,7 @@ #include "../entry/calling.h" #include <asm/export.h> #include <asm/nospec-branch.h> +#include <asm/fixmap.h> #ifdef CONFIG_PARAVIRT #include <asm/asm-offsets.h> @@ -445,13 +446,20 @@ NEXT_PAGE(level2_kernel_pgt) KERNEL_IMAGE_SIZE/PMD_SIZE) NEXT_PAGE(level2_fixmap_pgt) - .fill 506,8,0 - .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC - /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */ - .fill 5,8,0 + .fill (512 - 4 - FIXMAP_PMD_NUM),8,0 + pgtno = 0 + .rept (FIXMAP_PMD_NUM) + .quad level1_fixmap_pgt + (pgtno << PAGE_SHIFT) - __START_KERNEL_map \ + + _PAGE_TABLE_NOENC; + pgtno = pgtno + 1 + .endr + /* 6 MB reserved space + a 2MB hole */ + .fill 4,8,0 NEXT_PAGE(level1_fixmap_pgt) + .rept (FIXMAP_PMD_NUM) .fill 512,8,0 + .endr #undef PMDS diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c index 1e6764648af3..013fe3d21dbb 100644 --- a/arch/x86/kernel/kvmclock.c +++ b/arch/x86/kernel/kvmclock.c @@ -28,6 +28,7 @@ #include <linux/sched/clock.h> #include <linux/mm.h> #include <linux/slab.h> +#include <linux/set_memory.h> #include <asm/hypervisor.h> #include <asm/mem_encrypt.h> @@ -61,9 +62,10 @@ early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall); (PAGE_SIZE / sizeof(struct pvclock_vsyscall_time_info)) static struct pvclock_vsyscall_time_info - hv_clock_boot[HVC_BOOT_ARRAY_SIZE] __aligned(PAGE_SIZE); -static struct pvclock_wall_clock wall_clock; + hv_clock_boot[HVC_BOOT_ARRAY_SIZE] __bss_decrypted __aligned(PAGE_SIZE); +static struct pvclock_wall_clock wall_clock __bss_decrypted; static DEFINE_PER_CPU(struct pvclock_vsyscall_time_info *, hv_clock_per_cpu); +static struct pvclock_vsyscall_time_info *hvclock_mem; static inline struct pvclock_vcpu_time_info *this_cpu_pvti(void) { @@ -236,6 +238,45 @@ static void kvm_shutdown(void) native_machine_shutdown(); } +static void __init kvmclock_init_mem(void) +{ + unsigned long ncpus; + unsigned int order; + struct page *p; + int r; + + if (HVC_BOOT_ARRAY_SIZE >= num_possible_cpus()) + return; + + ncpus = num_possible_cpus() - HVC_BOOT_ARRAY_SIZE; + order = get_order(ncpus * sizeof(*hvclock_mem)); + + p = alloc_pages(GFP_KERNEL, order); + if (!p) { + pr_warn("%s: failed to alloc %d pages", __func__, (1U << order)); + return; + } + + hvclock_mem = page_address(p); + + /* + * hvclock is shared between the guest and the hypervisor, must + * be mapped decrypted. + */ + if (sev_active()) { + r = set_memory_decrypted((unsigned long) hvclock_mem, + 1UL << order); + if (r) { + __free_pages(p, order); + hvclock_mem = NULL; + pr_warn("kvmclock: set_memory_decrypted() failed. Disabling\n"); + return; + } + } + + memset(hvclock_mem, 0, PAGE_SIZE << order); +} + static int __init kvm_setup_vsyscall_timeinfo(void) { #ifdef CONFIG_X86_64 @@ -250,6 +291,9 @@ static int __init kvm_setup_vsyscall_timeinfo(void) kvm_clock.archdata.vclock_mode = VCLOCK_PVCLOCK; #endif + + kvmclock_init_mem(); + return 0; } early_initcall(kvm_setup_vsyscall_timeinfo); @@ -269,8 +313,10 @@ static int kvmclock_setup_percpu(unsigned int cpu) /* Use the static page for the first CPUs, allocate otherwise */ if (cpu < HVC_BOOT_ARRAY_SIZE) p = &hv_clock_boot[cpu]; + else if (hvclock_mem) + p = hvclock_mem + cpu - HVC_BOOT_ARRAY_SIZE; else - p = kzalloc(sizeof(*p), GFP_KERNEL); + return -ENOMEM; per_cpu(hv_clock_per_cpu, cpu) = p; return p ? 0 : -ENOMEM; diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index afdb303285f8..8dc69d82567e 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -91,7 +91,7 @@ unsigned paravirt_patch_call(void *insnbuf, if (len < 5) { #ifdef CONFIG_RETPOLINE - WARN_ONCE("Failing to patch indirect CALL in %ps\n", (void *)addr); + WARN_ONCE(1, "Failing to patch indirect CALL in %ps\n", (void *)addr); #endif return len; /* call too long for patch site */ } @@ -111,7 +111,7 @@ unsigned paravirt_patch_jmp(void *insnbuf, const void *target, if (len < 5) { #ifdef CONFIG_RETPOLINE - WARN_ONCE("Failing to patch indirect JMP in %ps\n", (void *)addr); + WARN_ONCE(1, "Failing to patch indirect JMP in %ps\n", (void *)addr); #endif return len; /* call too long for patch site */ } diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index 8bde0a419f86..5dd3317d761f 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -65,6 +65,23 @@ jiffies_64 = jiffies; #define ALIGN_ENTRY_TEXT_BEGIN . = ALIGN(PMD_SIZE); #define ALIGN_ENTRY_TEXT_END . = ALIGN(PMD_SIZE); +/* + * This section contains data which will be mapped as decrypted. Memory + * encryption operates on a page basis. Make this section PMD-aligned + * to avoid splitting the pages while mapping the section early. + * + * Note: We use a separate section so that only this section gets + * decrypted to avoid exposing more than we wish. + */ +#define BSS_DECRYPTED \ + . = ALIGN(PMD_SIZE); \ + __start_bss_decrypted = .; \ + *(.bss..decrypted); \ + . = ALIGN(PAGE_SIZE); \ + __start_bss_decrypted_unused = .; \ + . = ALIGN(PMD_SIZE); \ + __end_bss_decrypted = .; \ + #else #define X86_ALIGN_RODATA_BEGIN @@ -74,6 +91,7 @@ jiffies_64 = jiffies; #define ALIGN_ENTRY_TEXT_BEGIN #define ALIGN_ENTRY_TEXT_END +#define BSS_DECRYPTED #endif @@ -355,6 +373,7 @@ SECTIONS __bss_start = .; *(.bss..page_aligned) *(.bss) + BSS_DECRYPTED . = ALIGN(PAGE_SIZE); __bss_stop = .; } diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 17c0472c5b34..fbb0e6df121b 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -1344,9 +1344,8 @@ EXPORT_SYMBOL_GPL(kvm_lapic_reg_read); static int apic_mmio_in_range(struct kvm_lapic *apic, gpa_t addr) { - return kvm_apic_hw_enabled(apic) && - addr >= apic->base_address && - addr < apic->base_address + LAPIC_MMIO_LENGTH; + return addr >= apic->base_address && + addr < apic->base_address + LAPIC_MMIO_LENGTH; } static int apic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this, @@ -1358,6 +1357,15 @@ static int apic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this, if (!apic_mmio_in_range(apic, address)) return -EOPNOTSUPP; + if (!kvm_apic_hw_enabled(apic) || apic_x2apic_mode(apic)) { + if (!kvm_check_has_quirk(vcpu->kvm, + KVM_X86_QUIRK_LAPIC_MMIO_HOLE)) + return -EOPNOTSUPP; + + memset(data, 0xff, len); + return 0; + } + kvm_lapic_reg_read(apic, offset, len, data); return 0; @@ -1917,6 +1925,14 @@ static int apic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this, if (!apic_mmio_in_range(apic, address)) return -EOPNOTSUPP; + if (!kvm_apic_hw_enabled(apic) || apic_x2apic_mode(apic)) { + if (!kvm_check_has_quirk(vcpu->kvm, + KVM_X86_QUIRK_LAPIC_MMIO_HOLE)) + return -EOPNOTSUPP; + + return 0; + } + /* * APIC register must be aligned on 128-bits boundary. * 32/64/128 bits registers must be accessed thru 32 bits. diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index e24ea7067373..d7e9bce6ff61 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -899,7 +899,7 @@ static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu) { /* * Make sure the write to vcpu->mode is not reordered in front of - * reads to sptes. If it does, kvm_commit_zap_page() can see us + * reads to sptes. If it does, kvm_mmu_commit_zap_page() can see us * OUTSIDE_GUEST_MODE and proceed to free the shadow page table. */ smp_store_release(&vcpu->mode, OUTSIDE_GUEST_MODE); @@ -5417,7 +5417,12 @@ void kvm_mmu_setup(struct kvm_vcpu *vcpu) { MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu.root_hpa)); - kvm_init_mmu(vcpu, true); + /* + * kvm_mmu_setup() is called only on vCPU initialization. + * Therefore, no need to reset mmu roots as they are not yet + * initialized. + */ + kvm_init_mmu(vcpu, false); } static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm, diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 89c4c5aa15f1..d96092b35936 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -1226,8 +1226,7 @@ static __init int sev_hardware_setup(void) min_sev_asid = cpuid_edx(0x8000001F); /* Initialize SEV ASID bitmap */ - sev_asid_bitmap = kcalloc(BITS_TO_LONGS(max_sev_asid), - sizeof(unsigned long), GFP_KERNEL); + sev_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL); if (!sev_asid_bitmap) return 1; @@ -1405,7 +1404,7 @@ static __exit void svm_hardware_unsetup(void) int cpu; if (svm_sev_enabled()) - kfree(sev_asid_bitmap); + bitmap_free(sev_asid_bitmap); for_each_possible_cpu(cpu) svm_cpu_uninit(cpu); @@ -7149,6 +7148,8 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = { .check_intercept = svm_check_intercept, .handle_external_intr = svm_handle_external_intr, + .request_immediate_exit = __kvm_request_immediate_exit, + .sched_in = svm_sched_in, .pmu_ops = &amd_pmu_ops, diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 533a327372c8..06412ba46aa3 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -397,6 +397,7 @@ struct loaded_vmcs { int cpu; bool launched; bool nmi_known_unmasked; + bool hv_timer_armed; /* Support for vnmi-less CPUs */ int soft_vnmi_blocked; ktime_t entry_time; @@ -1019,6 +1020,8 @@ struct vcpu_vmx { int ple_window; bool ple_window_dirty; + bool req_immediate_exit; + /* Support for PML */ #define PML_ENTITY_NUM 512 struct page *pml_pg; @@ -2864,6 +2867,8 @@ static void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu) u16 fs_sel, gs_sel; int i; + vmx->req_immediate_exit = false; + if (vmx->loaded_cpu_state) return; @@ -5393,9 +5398,10 @@ static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) * To use VMXON (and later other VMX instructions), a guest * must first be able to turn on cr4.VMXE (see handle_vmon()). * So basically the check on whether to allow nested VMX - * is here. + * is here. We operate under the default treatment of SMM, + * so VMX cannot be enabled under SMM. */ - if (!nested_vmx_allowed(vcpu)) + if (!nested_vmx_allowed(vcpu) || is_smm(vcpu)) return 1; } @@ -6183,6 +6189,27 @@ static void vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu) nested_mark_vmcs12_pages_dirty(vcpu); } +static bool vmx_guest_apic_has_interrupt(struct kvm_vcpu *vcpu) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + void *vapic_page; + u32 vppr; + int rvi; + + if (WARN_ON_ONCE(!is_guest_mode(vcpu)) || + !nested_cpu_has_vid(get_vmcs12(vcpu)) || + WARN_ON_ONCE(!vmx->nested.virtual_apic_page)) + return false; + + rvi = vmcs_read16(GUEST_INTR_STATUS) & 0xff; + + vapic_page = kmap(vmx->nested.virtual_apic_page); + vppr = *((u32 *)(vapic_page + APIC_PROCPRI)); + kunmap(vmx->nested.virtual_apic_page); + + return ((rvi & 0xf0) > (vppr & 0xf0)); +} + static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu, bool nested) { @@ -7966,6 +7993,9 @@ static __init int hardware_setup(void) kvm_x86_ops->enable_log_dirty_pt_masked = NULL; } + if (!cpu_has_vmx_preemption_timer()) + kvm_x86_ops->request_immediate_exit = __kvm_request_immediate_exit; + if (cpu_has_vmx_preemption_timer() && enable_preemption_timer) { u64 vmx_msr; @@ -9208,7 +9238,8 @@ static int handle_pml_full(struct kvm_vcpu *vcpu) static int handle_preemption_timer(struct kvm_vcpu *vcpu) { - kvm_lapic_expired_hv_timer(vcpu); + if (!to_vmx(vcpu)->req_immediate_exit) + kvm_lapic_expired_hv_timer(vcpu); return 1; } @@ -10595,24 +10626,43 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx) msrs[i].host, false); } -static void vmx_arm_hv_timer(struct kvm_vcpu *vcpu) +static void vmx_arm_hv_timer(struct vcpu_vmx *vmx, u32 val) +{ + vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, val); + if (!vmx->loaded_vmcs->hv_timer_armed) + vmcs_set_bits(PIN_BASED_VM_EXEC_CONTROL, + PIN_BASED_VMX_PREEMPTION_TIMER); + vmx->loaded_vmcs->hv_timer_armed = true; +} + +static void vmx_update_hv_timer(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); u64 tscl; u32 delta_tsc; - if (vmx->hv_deadline_tsc == -1) + if (vmx->req_immediate_exit) { + vmx_arm_hv_timer(vmx, 0); return; + } - tscl = rdtsc(); - if (vmx->hv_deadline_tsc > tscl) - /* sure to be 32 bit only because checked on set_hv_timer */ - delta_tsc = (u32)((vmx->hv_deadline_tsc - tscl) >> - cpu_preemption_timer_multi); - else - delta_tsc = 0; + if (vmx->hv_deadline_tsc != -1) { + tscl = rdtsc(); + if (vmx->hv_deadline_tsc > tscl) + /* set_hv_timer ensures the delta fits in 32-bits */ + delta_tsc = (u32)((vmx->hv_deadline_tsc - tscl) >> + cpu_preemption_timer_multi); + else + delta_tsc = 0; - vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, delta_tsc); + vmx_arm_hv_timer(vmx, delta_tsc); + return; + } + + if (vmx->loaded_vmcs->hv_timer_armed) + vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL, + PIN_BASED_VMX_PREEMPTION_TIMER); + vmx->loaded_vmcs->hv_timer_armed = false; } static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) @@ -10672,7 +10722,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) atomic_switch_perf_msrs(vmx); - vmx_arm_hv_timer(vcpu); + vmx_update_hv_timer(vcpu); /* * If this vCPU has touched SPEC_CTRL, restore the guest's value if @@ -11427,16 +11477,18 @@ static void vmx_start_preemption_timer(struct kvm_vcpu *vcpu) u64 preemption_timeout = get_vmcs12(vcpu)->vmx_preemption_timer_value; struct vcpu_vmx *vmx = to_vmx(vcpu); - if (vcpu->arch.virtual_tsc_khz == 0) - return; - - /* Make sure short timeouts reliably trigger an immediate vmexit. - * hrtimer_start does not guarantee this. */ - if (preemption_timeout <= 1) { + /* + * A timer value of zero is architecturally guaranteed to cause + * a VMExit prior to executing any instructions in the guest. + */ + if (preemption_timeout == 0) { vmx_preemption_timer_fn(&vmx->nested.preemption_timer); return; } + if (vcpu->arch.virtual_tsc_khz == 0) + return; + preemption_timeout <<= VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE; preemption_timeout *= 1000000; do_div(preemption_timeout, vcpu->arch.virtual_tsc_khz); @@ -11646,11 +11698,15 @@ static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu, * bits 15:8 should be zero in posted_intr_nv, * the descriptor address has been already checked * in nested_get_vmcs12_pages. + * + * bits 5:0 of posted_intr_desc_addr should be zero. */ if (nested_cpu_has_posted_intr(vmcs12) && (!nested_cpu_has_vid(vmcs12) || !nested_exit_intr_ack_set(vcpu) || - vmcs12->posted_intr_nv & 0xff00)) + (vmcs12->posted_intr_nv & 0xff00) || + (vmcs12->posted_intr_desc_addr & 0x3f) || + (!page_address_valid(vcpu, vmcs12->posted_intr_desc_addr)))) return -EINVAL; /* tpr shadow is needed by all apicv features. */ @@ -12076,11 +12132,10 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, exec_control = vmcs12->pin_based_vm_exec_control; - /* Preemption timer setting is only taken from vmcs01. */ - exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER; + /* Preemption timer setting is computed directly in vmx_vcpu_run. */ exec_control |= vmcs_config.pin_based_exec_ctrl; - if (vmx->hv_deadline_tsc == -1) - exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER; + exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER; + vmx->loaded_vmcs->hv_timer_armed = false; /* Posted interrupts setting is only taken from vmcs12. */ if (nested_cpu_has_posted_intr(vmcs12)) { @@ -12318,6 +12373,9 @@ static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT) return VMXERR_ENTRY_INVALID_CONTROL_FIELD; + if (nested_cpu_has_vpid(vmcs12) && !vmcs12->virtual_processor_id) + return VMXERR_ENTRY_INVALID_CONTROL_FIELD; + if (nested_vmx_check_io_bitmap_controls(vcpu, vmcs12)) return VMXERR_ENTRY_INVALID_CONTROL_FIELD; @@ -12863,6 +12921,11 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr) return 0; } +static void vmx_request_immediate_exit(struct kvm_vcpu *vcpu) +{ + to_vmx(vcpu)->req_immediate_exit = true; +} + static u32 vmx_get_preemption_timer_value(struct kvm_vcpu *vcpu) { ktime_t remaining = @@ -13253,12 +13316,7 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset); - if (vmx->hv_deadline_tsc == -1) - vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL, - PIN_BASED_VMX_PREEMPTION_TIMER); - else - vmcs_set_bits(PIN_BASED_VM_EXEC_CONTROL, - PIN_BASED_VMX_PREEMPTION_TIMER); + if (kvm_has_tsc_control) decache_tsc_multiplier(vmx); @@ -13462,18 +13520,12 @@ static int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc) return -ERANGE; vmx->hv_deadline_tsc = tscl + delta_tsc; - vmcs_set_bits(PIN_BASED_VM_EXEC_CONTROL, - PIN_BASED_VMX_PREEMPTION_TIMER); - return delta_tsc == 0; } static void vmx_cancel_hv_timer(struct kvm_vcpu *vcpu) { - struct vcpu_vmx *vmx = to_vmx(vcpu); - vmx->hv_deadline_tsc = -1; - vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL, - PIN_BASED_VMX_PREEMPTION_TIMER); + to_vmx(vcpu)->hv_deadline_tsc = -1; } #endif @@ -13954,6 +14006,14 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu, ~(KVM_STATE_NESTED_SMM_GUEST_MODE | KVM_STATE_NESTED_SMM_VMXON)) return -EINVAL; + /* + * SMM temporarily disables VMX, so we cannot be in guest mode, + * nor can VMLAUNCH/VMRESUME be pending. Outside SMM, SMM flags + * must be zero. + */ + if (is_smm(vcpu) ? kvm_state->flags : kvm_state->vmx.smm.flags) + return -EINVAL; + if ((kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) && !(kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON)) return -EINVAL; @@ -14097,6 +14157,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = { .apicv_post_state_restore = vmx_apicv_post_state_restore, .hwapic_irr_update = vmx_hwapic_irr_update, .hwapic_isr_update = vmx_hwapic_isr_update, + .guest_apic_has_interrupt = vmx_guest_apic_has_interrupt, .sync_pir_to_irr = vmx_sync_pir_to_irr, .deliver_posted_interrupt = vmx_deliver_posted_interrupt, @@ -14130,6 +14191,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = { .umip_emulated = vmx_umip_emulated, .check_nested_events = vmx_check_nested_events, + .request_immediate_exit = vmx_request_immediate_exit, .sched_in = vmx_sched_in, diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 542f6315444d..edbf00ec56b3 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -628,7 +628,7 @@ bool pdptrs_changed(struct kvm_vcpu *vcpu) gfn_t gfn; int r; - if (is_long_mode(vcpu) || !is_pae(vcpu)) + if (is_long_mode(vcpu) || !is_pae(vcpu) || !is_paging(vcpu)) return false; if (!test_bit(VCPU_EXREG_PDPTR, @@ -2537,7 +2537,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) break; case MSR_PLATFORM_INFO: if (!msr_info->host_initiated || - data & ~MSR_PLATFORM_INFO_CPUID_FAULT || (!(data & MSR_PLATFORM_INFO_CPUID_FAULT) && cpuid_fault_enabled(vcpu))) return 1; @@ -2780,6 +2779,9 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) msr_info->data = vcpu->arch.osvw.status; break; case MSR_PLATFORM_INFO: + if (!msr_info->host_initiated && + !vcpu->kvm->arch.guest_can_read_msr_platform_info) + return 1; msr_info->data = vcpu->arch.msr_platform_info; break; case MSR_MISC_FEATURES_ENABLES: @@ -2927,6 +2929,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_SPLIT_IRQCHIP: case KVM_CAP_IMMEDIATE_EXIT: case KVM_CAP_GET_MSR_FEATURES: + case KVM_CAP_MSR_PLATFORM_INFO: r = 1; break; case KVM_CAP_SYNC_REGS: @@ -4007,19 +4010,23 @@ long kvm_arch_vcpu_ioctl(struct file *filp, break; BUILD_BUG_ON(sizeof(user_data_size) != sizeof(user_kvm_nested_state->size)); + r = -EFAULT; if (get_user(user_data_size, &user_kvm_nested_state->size)) - return -EFAULT; + break; r = kvm_x86_ops->get_nested_state(vcpu, user_kvm_nested_state, user_data_size); if (r < 0) - return r; + break; if (r > user_data_size) { if (put_user(r, &user_kvm_nested_state->size)) - return -EFAULT; - return -E2BIG; + r = -EFAULT; + else + r = -E2BIG; + break; } + r = 0; break; } @@ -4031,19 +4038,21 @@ long kvm_arch_vcpu_ioctl(struct file *filp, if (!kvm_x86_ops->set_nested_state) break; + r = -EFAULT; if (copy_from_user(&kvm_state, user_kvm_nested_state, sizeof(kvm_state))) - return -EFAULT; + break; + r = -EINVAL; if (kvm_state.size < sizeof(kvm_state)) - return -EINVAL; + break; if (kvm_state.flags & ~(KVM_STATE_NESTED_RUN_PENDING | KVM_STATE_NESTED_GUEST_MODE)) - return -EINVAL; + break; /* nested_run_pending implies guest_mode. */ if (kvm_state.flags == KVM_STATE_NESTED_RUN_PENDING) - return -EINVAL; + break; r = kvm_x86_ops->set_nested_state(vcpu, user_kvm_nested_state, &kvm_state); break; @@ -4350,6 +4359,10 @@ split_irqchip_unlock: kvm->arch.pause_in_guest = true; r = 0; break; + case KVM_CAP_MSR_PLATFORM_INFO: + kvm->arch.guest_can_read_msr_platform_info = cap->args[0]; + r = 0; + break; default: r = -EINVAL; break; @@ -7361,6 +7374,12 @@ void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu) } EXPORT_SYMBOL_GPL(kvm_vcpu_reload_apic_access_page); +void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu) +{ + smp_send_reschedule(vcpu->cpu); +} +EXPORT_SYMBOL_GPL(__kvm_request_immediate_exit); + /* * Returns 1 to let vcpu_run() continue the guest execution loop without * exiting to the userspace. Otherwise, the value will be returned to the @@ -7565,7 +7584,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) if (req_immediate_exit) { kvm_make_request(KVM_REQ_EVENT, vcpu); - smp_send_reschedule(vcpu->cpu); + kvm_x86_ops->request_immediate_exit(vcpu); } trace_kvm_entry(vcpu->vcpu_id); @@ -7829,6 +7848,29 @@ static int complete_emulated_mmio(struct kvm_vcpu *vcpu) return 0; } +/* Swap (qemu) user FPU context for the guest FPU context. */ +static void kvm_load_guest_fpu(struct kvm_vcpu *vcpu) +{ + preempt_disable(); + copy_fpregs_to_fpstate(&vcpu->arch.user_fpu); + /* PKRU is separately restored in kvm_x86_ops->run. */ + __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state, + ~XFEATURE_MASK_PKRU); + preempt_enable(); + trace_kvm_fpu(1); +} + +/* When vcpu_run ends, restore user space FPU context. */ +static void kvm_put_guest_fpu(struct kvm_vcpu *vcpu) +{ + preempt_disable(); + copy_fpregs_to_fpstate(&vcpu->arch.guest_fpu); + copy_kernel_to_fpregs(&vcpu->arch.user_fpu.state); + preempt_enable(); + ++vcpu->stat.fpu_reload; + trace_kvm_fpu(0); +} + int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { int r; @@ -8177,7 +8219,7 @@ static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) kvm_update_cpuid(vcpu); idx = srcu_read_lock(&vcpu->kvm->srcu); - if (!is_long_mode(vcpu) && is_pae(vcpu)) { + if (!is_long_mode(vcpu) && is_pae(vcpu) && is_paging(vcpu)) { load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu)); mmu_reset_needed = 1; } @@ -8406,29 +8448,6 @@ static void fx_init(struct kvm_vcpu *vcpu) vcpu->arch.cr0 |= X86_CR0_ET; } -/* Swap (qemu) user FPU context for the guest FPU context. */ -void kvm_load_guest_fpu(struct kvm_vcpu *vcpu) -{ - preempt_disable(); - copy_fpregs_to_fpstate(&vcpu->arch.user_fpu); - /* PKRU is separately restored in kvm_x86_ops->run. */ - __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state, - ~XFEATURE_MASK_PKRU); - preempt_enable(); - trace_kvm_fpu(1); -} - -/* When vcpu_run ends, restore user space FPU context. */ -void kvm_put_guest_fpu(struct kvm_vcpu *vcpu) -{ - preempt_disable(); - copy_fpregs_to_fpstate(&vcpu->arch.guest_fpu); - copy_kernel_to_fpregs(&vcpu->arch.user_fpu.state); - preempt_enable(); - ++vcpu->stat.fpu_reload; - trace_kvm_fpu(0); -} - void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) { void *wbinvd_dirty_mask = vcpu->arch.wbinvd_dirty_mask; @@ -8852,6 +8871,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) kvm->arch.kvmclock_offset = -ktime_get_boot_ns(); pvclock_update_vm_gtod_copy(kvm); + kvm->arch.guest_can_read_msr_platform_info = true; + INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn); INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn); @@ -9200,6 +9221,13 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm, kvm_page_track_flush_slot(kvm, slot); } +static inline bool kvm_guest_apic_has_interrupt(struct kvm_vcpu *vcpu) +{ + return (is_guest_mode(vcpu) && + kvm_x86_ops->guest_apic_has_interrupt && + kvm_x86_ops->guest_apic_has_interrupt(vcpu)); +} + static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu) { if (!list_empty_careful(&vcpu->async_pf.done)) @@ -9224,7 +9252,8 @@ static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu) return true; if (kvm_arch_interrupt_allowed(vcpu) && - kvm_cpu_has_interrupt(vcpu)) + (kvm_cpu_has_interrupt(vcpu) || + kvm_guest_apic_has_interrupt(vcpu))) return true; if (kvm_hv_has_stimer_pending(vcpu)) diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 7a8fc26c1115..faca978ebf9d 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -815,10 +815,14 @@ void free_kernel_image_pages(void *begin, void *end) set_memory_np_noalias(begin_ul, len_pages); } +void __weak mem_encrypt_free_decrypted_mem(void) { } + void __ref free_initmem(void) { e820__reallocate_tables(); + mem_encrypt_free_decrypted_mem(); + free_kernel_image_pages(&__init_begin, &__init_end); } diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c index b2de398d1fd3..006f373f54ab 100644 --- a/arch/x86/mm/mem_encrypt.c +++ b/arch/x86/mm/mem_encrypt.c @@ -348,6 +348,30 @@ bool sev_active(void) EXPORT_SYMBOL(sev_active); /* Architecture __weak replacement functions */ +void __init mem_encrypt_free_decrypted_mem(void) +{ + unsigned long vaddr, vaddr_end, npages; + int r; + + vaddr = (unsigned long)__start_bss_decrypted_unused; + vaddr_end = (unsigned long)__end_bss_decrypted; + npages = (vaddr_end - vaddr) >> PAGE_SHIFT; + + /* + * The unused memory range was mapped decrypted, change the encryption + * attribute from decrypted to encrypted before freeing it. + */ + if (mem_encrypt_active()) { + r = set_memory_encrypted(vaddr, npages); + if (r) { + pr_warn("failed to free unused decrypted pages\n"); + return; + } + } + + free_init_pages("unused decrypted", vaddr, vaddr_end); +} + void __init mem_encrypt_init(void) { if (!sme_me_mask) diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index ae394552fb94..089e78c4effd 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c @@ -637,6 +637,15 @@ void __native_set_fixmap(enum fixed_addresses idx, pte_t pte) { unsigned long address = __fix_to_virt(idx); +#ifdef CONFIG_X86_64 + /* + * Ensure that the static initial page tables are covering the + * fixmap completely. + */ + BUILD_BUG_ON(__end_of_permanent_fixed_addresses > + (FIXMAP_PMD_NUM * PTRS_PER_PTE)); +#endif + if (idx >= __end_of_fixed_addresses) { BUG(); return; diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c index 2fe5c9b1816b..dd461c0167ef 100644 --- a/arch/x86/xen/mmu_pv.c +++ b/arch/x86/xen/mmu_pv.c @@ -1907,7 +1907,7 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) /* L3_k[511] -> level2_fixmap_pgt */ convert_pfn_mfn(level3_kernel_pgt); - /* L3_k[511][506] -> level1_fixmap_pgt */ + /* L3_k[511][508-FIXMAP_PMD_NUM ... 507] -> level1_fixmap_pgt */ convert_pfn_mfn(level2_fixmap_pgt); /* We get [511][511] and have Xen's version of level2_kernel_pgt */ @@ -1952,7 +1952,11 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO); set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO); set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO); - set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO); + + for (i = 0; i < FIXMAP_PMD_NUM; i++) { + set_page_prot(level1_fixmap_pgt + i * PTRS_PER_PTE, + PAGE_KERNEL_RO); + } /* Pin down new L4 */ pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE, diff --git a/arch/x86/xen/pmu.c b/arch/x86/xen/pmu.c index 7d00d4ad44d4..95997e6c0696 100644 --- a/arch/x86/xen/pmu.c +++ b/arch/x86/xen/pmu.c @@ -478,7 +478,7 @@ static void xen_convert_regs(const struct xen_pmu_regs *xen_regs, irqreturn_t xen_pmu_irq_handler(int irq, void *dev_id) { int err, ret = IRQ_NONE; - struct pt_regs regs; + struct pt_regs regs = {0}; const struct xen_pmu_data *xenpmu_data = get_xenpmu_data(); uint8_t xenpmu_flags = get_xenpmu_flags(); diff --git a/block/bio.c b/block/bio.c index 8c680a776171..0093bed81c0e 100644 --- a/block/bio.c +++ b/block/bio.c @@ -1684,7 +1684,7 @@ void generic_end_io_acct(struct request_queue *q, int req_op, const int sgrp = op_stat_group(req_op); int cpu = part_stat_lock(); - part_stat_add(cpu, part, ticks[sgrp], duration); + part_stat_add(cpu, part, nsecs[sgrp], jiffies_to_nsecs(duration)); part_round_stats(q, cpu, part); part_dec_in_flight(q, part, op_is_write(req_op)); diff --git a/block/blk-core.c b/block/blk-core.c index 4dbc93f43b38..cff0a60ee200 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -2733,17 +2733,15 @@ void blk_account_io_done(struct request *req, u64 now) * containing request is enough. */ if (blk_do_io_stat(req) && !(req->rq_flags & RQF_FLUSH_SEQ)) { - unsigned long duration; const int sgrp = op_stat_group(req_op(req)); struct hd_struct *part; int cpu; - duration = nsecs_to_jiffies(now - req->start_time_ns); cpu = part_stat_lock(); part = req->part; part_stat_inc(cpu, part, ios[sgrp]); - part_stat_add(cpu, part, ticks[sgrp], duration); + part_stat_add(cpu, part, nsecs[sgrp], now - req->start_time_ns); part_round_stats(req->q, cpu, part); part_dec_in_flight(req->q, part, rq_data_dir(req)); diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index 94e1ed667b6e..41317c50a446 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -322,16 +322,11 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn, /* * __blk_mq_update_nr_hw_queues will update the nr_hw_queues and - * queue_hw_ctx after freeze the queue. So we could use q_usage_counter - * to avoid race with it. __blk_mq_update_nr_hw_queues will users - * synchronize_rcu to ensure all of the users go out of the critical - * section below and see zeroed q_usage_counter. + * queue_hw_ctx after freeze the queue, so we use q_usage_counter + * to avoid race with it. */ - rcu_read_lock(); - if (percpu_ref_is_zero(&q->q_usage_counter)) { - rcu_read_unlock(); + if (!percpu_ref_tryget(&q->q_usage_counter)) return; - } queue_for_each_hw_ctx(q, hctx, i) { struct blk_mq_tags *tags = hctx->tags; @@ -347,7 +342,7 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn, bt_for_each(hctx, &tags->breserved_tags, fn, priv, true); bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false); } - rcu_read_unlock(); + blk_queue_exit(q); } static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth, diff --git a/block/blk-mq.c b/block/blk-mq.c index 85a1c1a59c72..e3c39ea8e17b 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1628,7 +1628,7 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule) BUG_ON(!rq->q); if (rq->mq_ctx != this_ctx) { if (this_ctx) { - trace_block_unplug(this_q, depth, from_schedule); + trace_block_unplug(this_q, depth, !from_schedule); blk_mq_sched_insert_requests(this_q, this_ctx, &ctx_list, from_schedule); @@ -1648,7 +1648,7 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule) * on 'ctx_list'. Do those. */ if (this_ctx) { - trace_block_unplug(this_q, depth, from_schedule); + trace_block_unplug(this_q, depth, !from_schedule); blk_mq_sched_insert_requests(this_q, this_ctx, &ctx_list, from_schedule); } diff --git a/block/elevator.c b/block/elevator.c index 6a06b5d040e5..fae58b2f906f 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -609,7 +609,7 @@ void elv_drain_elevator(struct request_queue *q) while (e->type->ops.sq.elevator_dispatch_fn(q, 1)) ; - if (q->nr_sorted && printed++ < 10) { + if (q->nr_sorted && !blk_queue_is_zoned(q) && printed++ < 10 ) { printk(KERN_ERR "%s: forced dispatching is broken " "(nr_sorted=%u), please report this\n", q->elevator->type->elevator_name, q->nr_sorted); diff --git a/block/genhd.c b/block/genhd.c index 8cc719a37b32..be5bab20b2ab 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -1343,18 +1343,18 @@ static int diskstats_show(struct seq_file *seqf, void *v) part_stat_read(hd, ios[STAT_READ]), part_stat_read(hd, merges[STAT_READ]), part_stat_read(hd, sectors[STAT_READ]), - jiffies_to_msecs(part_stat_read(hd, ticks[STAT_READ])), + (unsigned int)part_stat_read_msecs(hd, STAT_READ), part_stat_read(hd, ios[STAT_WRITE]), part_stat_read(hd, merges[STAT_WRITE]), part_stat_read(hd, sectors[STAT_WRITE]), - jiffies_to_msecs(part_stat_read(hd, ticks[STAT_WRITE])), + (unsigned int)part_stat_read_msecs(hd, STAT_WRITE), inflight[0], jiffies_to_msecs(part_stat_read(hd, io_ticks)), jiffies_to_msecs(part_stat_read(hd, time_in_queue)), part_stat_read(hd, ios[STAT_DISCARD]), part_stat_read(hd, merges[STAT_DISCARD]), part_stat_read(hd, sectors[STAT_DISCARD]), - jiffies_to_msecs(part_stat_read(hd, ticks[STAT_DISCARD])) + (unsigned int)part_stat_read_msecs(hd, STAT_DISCARD) ); } disk_part_iter_exit(&piter); diff --git a/block/partition-generic.c b/block/partition-generic.c index 5a8975a1201c..d3d14e81fb12 100644 --- a/block/partition-generic.c +++ b/block/partition-generic.c @@ -136,18 +136,18 @@ ssize_t part_stat_show(struct device *dev, part_stat_read(p, ios[STAT_READ]), part_stat_read(p, merges[STAT_READ]), (unsigned long long)part_stat_read(p, sectors[STAT_READ]), - jiffies_to_msecs(part_stat_read(p, ticks[STAT_READ])), + (unsigned int)part_stat_read_msecs(p, STAT_READ), part_stat_read(p, ios[STAT_WRITE]), part_stat_read(p, merges[STAT_WRITE]), (unsigned long long)part_stat_read(p, sectors[STAT_WRITE]), - jiffies_to_msecs(part_stat_read(p, ticks[STAT_WRITE])), + (unsigned int)part_stat_read_msecs(p, STAT_WRITE), inflight[0], jiffies_to_msecs(part_stat_read(p, io_ticks)), jiffies_to_msecs(part_stat_read(p, time_in_queue)), part_stat_read(p, ios[STAT_DISCARD]), part_stat_read(p, merges[STAT_DISCARD]), (unsigned long long)part_stat_read(p, sectors[STAT_DISCARD]), - jiffies_to_msecs(part_stat_read(p, ticks[STAT_DISCARD]))); + (unsigned int)part_stat_read_msecs(p, STAT_DISCARD)); } ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr, diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 599e01bcdef2..a9dd4ea7467d 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -5359,10 +5359,20 @@ void ata_qc_complete(struct ata_queued_cmd *qc) */ int ata_qc_complete_multiple(struct ata_port *ap, u64 qc_active) { + u64 done_mask, ap_qc_active = ap->qc_active; int nr_done = 0; - u64 done_mask; - done_mask = ap->qc_active ^ qc_active; + /* + * If the internal tag is set on ap->qc_active, then we care about + * bit0 on the passed in qc_active mask. Move that bit up to match + * the internal tag. + */ + if (ap_qc_active & (1ULL << ATA_TAG_INTERNAL)) { + qc_active |= (qc_active & 0x01) << ATA_TAG_INTERNAL; + qc_active ^= qc_active & 0x01; + } + + done_mask = ap_qc_active ^ qc_active; if (unlikely(done_mask & qc_active)) { ata_port_err(ap, "illegal qc_active transition (%08llx->%08llx)\n", diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 48f622728ce6..f2b6f4da1034 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -3467,6 +3467,9 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int (struct floppy_struct **)&outparam); if (ret) return ret; + memcpy(&inparam.g, outparam, + offsetof(struct floppy_struct, name)); + outparam = &inparam.g; break; case FDMSGON: UDP->flags |= FTD_MSG; diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index a71d817e900d..429d20131c7e 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -2670,8 +2670,8 @@ static void purge_persistent_grants(struct blkfront_info *info) list_del(&gnt_list_entry->node); gnttab_end_foreign_access(gnt_list_entry->gref, 0, 0UL); rinfo->persistent_gnts_c--; - __free_page(gnt_list_entry->page); - kfree(gnt_list_entry); + gnt_list_entry->gref = GRANT_INVALID_REF; + list_add_tail(&gnt_list_entry->node, &rinfo->grants); } spin_unlock_irqrestore(&rinfo->ring_lock, flags); diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c index 963bb0309e25..ea6238ed5c0e 100644 --- a/drivers/bluetooth/hci_ldisc.c +++ b/drivers/bluetooth/hci_ldisc.c @@ -543,6 +543,8 @@ static void hci_uart_tty_close(struct tty_struct *tty) } clear_bit(HCI_UART_PROTO_SET, &hu->flags); + percpu_free_rwsem(&hu->proto_lock); + kfree(hu); } diff --git a/drivers/clk/x86/clk-pmc-atom.c b/drivers/clk/x86/clk-pmc-atom.c index 08ef69945ffb..d977193842df 100644 --- a/drivers/clk/x86/clk-pmc-atom.c +++ b/drivers/clk/x86/clk-pmc-atom.c @@ -55,6 +55,7 @@ struct clk_plt_data { u8 nparents; struct clk_plt *clks[PMC_CLK_NUM]; struct clk_lookup *mclk_lookup; + struct clk_lookup *ether_clk_lookup; }; /* Return an index in parent table */ @@ -186,13 +187,6 @@ static struct clk_plt *plt_clk_register(struct platform_device *pdev, int id, pclk->reg = base + PMC_CLK_CTL_OFFSET + id * PMC_CLK_CTL_SIZE; spin_lock_init(&pclk->lock); - /* - * If the clock was already enabled by the firmware mark it as critical - * to avoid it being gated by the clock framework if no driver owns it. - */ - if (plt_clk_is_enabled(&pclk->hw)) - init.flags |= CLK_IS_CRITICAL; - ret = devm_clk_hw_register(&pdev->dev, &pclk->hw); if (ret) { pclk = ERR_PTR(ret); @@ -351,11 +345,20 @@ static int plt_clk_probe(struct platform_device *pdev) goto err_unreg_clk_plt; } + data->ether_clk_lookup = clkdev_hw_create(&data->clks[4]->hw, + "ether_clk", NULL); + if (!data->ether_clk_lookup) { + err = -ENOMEM; + goto err_drop_mclk; + } + plt_clk_free_parent_names_loop(parent_names, data->nparents); platform_set_drvdata(pdev, data); return 0; +err_drop_mclk: + clkdev_drop(data->mclk_lookup); err_unreg_clk_plt: plt_clk_unregister_loop(data, i); plt_clk_unregister_parents(data); @@ -369,6 +372,7 @@ static int plt_clk_remove(struct platform_device *pdev) data = platform_get_drvdata(pdev); + clkdev_drop(data->ether_clk_lookup); clkdev_drop(data->mclk_lookup); plt_clk_unregister_loop(data, PMC_CLK_NUM); plt_clk_unregister_parents(data); diff --git a/drivers/clocksource/timer-atmel-pit.c b/drivers/clocksource/timer-atmel-pit.c index ec8a4376f74f..2fab18fae4fc 100644 --- a/drivers/clocksource/timer-atmel-pit.c +++ b/drivers/clocksource/timer-atmel-pit.c @@ -180,26 +180,29 @@ static int __init at91sam926x_pit_dt_init(struct device_node *node) data->base = of_iomap(node, 0); if (!data->base) { pr_err("Could not map PIT address\n"); - return -ENXIO; + ret = -ENXIO; + goto exit; } data->mck = of_clk_get(node, 0); if (IS_ERR(data->mck)) { pr_err("Unable to get mck clk\n"); - return PTR_ERR(data->mck); + ret = PTR_ERR(data->mck); + goto exit; } ret = clk_prepare_enable(data->mck); if (ret) { pr_err("Unable to enable mck\n"); - return ret; + goto exit; } /* Get the interrupts property */ data->irq = irq_of_parse_and_map(node, 0); if (!data->irq) { pr_err("Unable to get IRQ from DT\n"); - return -EINVAL; + ret = -EINVAL; + goto exit; } /* @@ -227,7 +230,7 @@ static int __init at91sam926x_pit_dt_init(struct device_node *node) ret = clocksource_register_hz(&data->clksrc, pit_rate); if (ret) { pr_err("Failed to register clocksource\n"); - return ret; + goto exit; } /* Set up irq handler */ @@ -236,7 +239,8 @@ static int __init at91sam926x_pit_dt_init(struct device_node *node) "at91_tick", data); if (ret) { pr_err("Unable to setup IRQ\n"); - return ret; + clocksource_unregister(&data->clksrc); + goto exit; } /* Set up and register clockevents */ @@ -254,6 +258,10 @@ static int __init at91sam926x_pit_dt_init(struct device_node *node) clockevents_register_device(&data->clkevt); return 0; + +exit: + kfree(data); + return ret; } TIMER_OF_DECLARE(at91sam926x_pit, "atmel,at91sam9260-pit", at91sam926x_pit_dt_init); diff --git a/drivers/clocksource/timer-fttmr010.c b/drivers/clocksource/timer-fttmr010.c index c020038ebfab..cf93f6419b51 100644 --- a/drivers/clocksource/timer-fttmr010.c +++ b/drivers/clocksource/timer-fttmr010.c @@ -130,13 +130,17 @@ static int fttmr010_timer_set_next_event(unsigned long cycles, cr &= ~fttmr010->t1_enable_val; writel(cr, fttmr010->base + TIMER_CR); - /* Setup the match register forward/backward in time */ - cr = readl(fttmr010->base + TIMER1_COUNT); - if (fttmr010->count_down) - cr -= cycles; - else - cr += cycles; - writel(cr, fttmr010->base + TIMER1_MATCH1); + if (fttmr010->count_down) { + /* + * ASPEED Timer Controller will load TIMER1_LOAD register + * into TIMER1_COUNT register when the timer is re-enabled. + */ + writel(cycles, fttmr010->base + TIMER1_LOAD); + } else { + /* Setup the match register forward in time */ + cr = readl(fttmr010->base + TIMER1_COUNT); + writel(cr + cycles, fttmr010->base + TIMER1_MATCH1); + } /* Start */ cr = readl(fttmr010->base + TIMER_CR); diff --git a/drivers/clocksource/timer-ti-32k.c b/drivers/clocksource/timer-ti-32k.c index 29e2e1a78a43..6949a9113dbb 100644 --- a/drivers/clocksource/timer-ti-32k.c +++ b/drivers/clocksource/timer-ti-32k.c @@ -97,6 +97,9 @@ static int __init ti_32k_timer_init(struct device_node *np) return -ENXIO; } + if (!of_machine_is_compatible("ti,am43")) + ti_32k_timer.cs.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP; + ti_32k_timer.counter = ti_32k_timer.base; /* diff --git a/drivers/cpufreq/qcom-cpufreq-kryo.c b/drivers/cpufreq/qcom-cpufreq-kryo.c index a1830fa25fc5..2a3675c24032 100644 --- a/drivers/cpufreq/qcom-cpufreq-kryo.c +++ b/drivers/cpufreq/qcom-cpufreq-kryo.c @@ -44,7 +44,7 @@ enum _msm8996_version { struct platform_device *cpufreq_dt_pdev, *kryo_cpufreq_pdev; -static enum _msm8996_version __init qcom_cpufreq_kryo_get_msm_id(void) +static enum _msm8996_version qcom_cpufreq_kryo_get_msm_id(void) { size_t len; u32 *msm_id; @@ -222,7 +222,7 @@ static int __init qcom_cpufreq_kryo_init(void) } module_init(qcom_cpufreq_kryo_init); -static void __init qcom_cpufreq_kryo_exit(void) +static void __exit qcom_cpufreq_kryo_exit(void) { platform_device_unregister(kryo_cpufreq_pdev); platform_driver_unregister(&qcom_cpufreq_kryo_driver); diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c index 218739b961fe..72790d88236d 100644 --- a/drivers/crypto/ccp/psp-dev.c +++ b/drivers/crypto/ccp/psp-dev.c @@ -38,6 +38,17 @@ static DEFINE_MUTEX(sev_cmd_mutex); static struct sev_misc_dev *misc_dev; static struct psp_device *psp_master; +static int psp_cmd_timeout = 100; +module_param(psp_cmd_timeout, int, 0644); +MODULE_PARM_DESC(psp_cmd_timeout, " default timeout value, in seconds, for PSP commands"); + +static int psp_probe_timeout = 5; +module_param(psp_probe_timeout, int, 0644); +MODULE_PARM_DESC(psp_probe_timeout, " default timeout value, in seconds, during PSP device probe"); + +static bool psp_dead; +static int psp_timeout; + static struct psp_device *psp_alloc_struct(struct sp_device *sp) { struct device *dev = sp->dev; @@ -82,10 +93,19 @@ done: return IRQ_HANDLED; } -static void sev_wait_cmd_ioc(struct psp_device *psp, unsigned int *reg) +static int sev_wait_cmd_ioc(struct psp_device *psp, + unsigned int *reg, unsigned int timeout) { - wait_event(psp->sev_int_queue, psp->sev_int_rcvd); + int ret; + + ret = wait_event_timeout(psp->sev_int_queue, + psp->sev_int_rcvd, timeout * HZ); + if (!ret) + return -ETIMEDOUT; + *reg = ioread32(psp->io_regs + psp->vdata->cmdresp_reg); + + return 0; } static int sev_cmd_buffer_len(int cmd) @@ -133,12 +153,15 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret) if (!psp) return -ENODEV; + if (psp_dead) + return -EBUSY; + /* Get the physical address of the command buffer */ phys_lsb = data ? lower_32_bits(__psp_pa(data)) : 0; phys_msb = data ? upper_32_bits(__psp_pa(data)) : 0; - dev_dbg(psp->dev, "sev command id %#x buffer 0x%08x%08x\n", - cmd, phys_msb, phys_lsb); + dev_dbg(psp->dev, "sev command id %#x buffer 0x%08x%08x timeout %us\n", + cmd, phys_msb, phys_lsb, psp_timeout); print_hex_dump_debug("(in): ", DUMP_PREFIX_OFFSET, 16, 2, data, sev_cmd_buffer_len(cmd), false); @@ -154,7 +177,18 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret) iowrite32(reg, psp->io_regs + psp->vdata->cmdresp_reg); /* wait for command completion */ - sev_wait_cmd_ioc(psp, ®); + ret = sev_wait_cmd_ioc(psp, ®, psp_timeout); + if (ret) { + if (psp_ret) + *psp_ret = 0; + + dev_err(psp->dev, "sev command %#x timed out, disabling PSP \n", cmd); + psp_dead = true; + + return ret; + } + + psp_timeout = psp_cmd_timeout; if (psp_ret) *psp_ret = reg & PSP_CMDRESP_ERR_MASK; @@ -888,6 +922,8 @@ void psp_pci_init(void) psp_master = sp->psp_data; + psp_timeout = psp_probe_timeout; + if (sev_get_api_version()) goto err; diff --git a/drivers/dax/device.c b/drivers/dax/device.c index bbe4d72ca105..948806e57cee 100644 --- a/drivers/dax/device.c +++ b/drivers/dax/device.c @@ -535,6 +535,11 @@ static unsigned long dax_get_unmapped_area(struct file *filp, return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags); } +static const struct address_space_operations dev_dax_aops = { + .set_page_dirty = noop_set_page_dirty, + .invalidatepage = noop_invalidatepage, +}; + static int dax_open(struct inode *inode, struct file *filp) { struct dax_device *dax_dev = inode_dax(inode); @@ -544,6 +549,7 @@ static int dax_open(struct inode *inode, struct file *filp) dev_dbg(&dev_dax->dev, "trace\n"); inode->i_mapping = __dax_inode->i_mapping; inode->i_mapping->host = __dax_inode; + inode->i_mapping->a_ops = &dev_dax_aops; filp->f_mapping = inode->i_mapping; filp->f_wb_err = filemap_sample_wb_err(filp->f_mapping); filp->private_data = dev_dax; diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig index d8e159feb573..89110dfc7127 100644 --- a/drivers/firmware/efi/Kconfig +++ b/drivers/firmware/efi/Kconfig @@ -90,14 +90,17 @@ config EFI_ARMSTUB config EFI_ARMSTUB_DTB_LOADER bool "Enable the DTB loader" depends on EFI_ARMSTUB + default y help Select this config option to add support for the dtb= command line parameter, allowing a device tree blob to be loaded into memory from the EFI System Partition by the stub. - The device tree is typically provided by the platform or by - the bootloader, so this option is mostly for development - purposes only. + If the device tree is provided by the platform or by + the bootloader this option may not be needed. + But, for various development reasons and to maintain existing + functionality for bootloaders that do not have such support + this option is necessary. config EFI_BOOTLOADER_CONTROL tristate "EFI Bootloader Control" diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index f8bbbb3a9504..0c791e35acf0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -272,7 +272,7 @@ void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd) int alloc_gtt_mem(struct kgd_dev *kgd, size_t size, void **mem_obj, uint64_t *gpu_addr, - void **cpu_ptr) + void **cpu_ptr, bool mqd_gfx9) { struct amdgpu_device *adev = (struct amdgpu_device *)kgd; struct amdgpu_bo *bo = NULL; @@ -287,6 +287,10 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size, bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC; bp.type = ttm_bo_type_kernel; bp.resv = NULL; + + if (mqd_gfx9) + bp.flags |= AMDGPU_GEM_CREATE_MQD_GFX9; + r = amdgpu_bo_create(adev, &bp, &bo); if (r) { dev_err(adev->dev, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index 2f379c183ed2..cc9aeab5468c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -136,7 +136,7 @@ void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd); /* Shared API */ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size, void **mem_obj, uint64_t *gpu_addr, - void **cpu_ptr); + void **cpu_ptr, bool mqd_gfx9); void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj); void get_local_mem_info(struct kgd_dev *kgd, struct kfd_local_mem_info *mem_info); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c index ea3f698aef5e..9803b91f3e77 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c @@ -685,7 +685,7 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, while (true) { temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS); - if (temp & SDMA0_STATUS_REG__RB_CMD_IDLE__SHIFT) + if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK) break; if (time_after(jiffies, end_jiffies)) return -ETIME; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c index 693ec5ea4950..8816c697b205 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c @@ -367,12 +367,14 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, break; case CHIP_POLARIS10: if (type == CGS_UCODE_ID_SMU) { - if ((adev->pdev->device == 0x67df) && - ((adev->pdev->revision == 0xe0) || - (adev->pdev->revision == 0xe3) || - (adev->pdev->revision == 0xe4) || - (adev->pdev->revision == 0xe5) || - (adev->pdev->revision == 0xe7) || + if (((adev->pdev->device == 0x67df) && + ((adev->pdev->revision == 0xe0) || + (adev->pdev->revision == 0xe3) || + (adev->pdev->revision == 0xe4) || + (adev->pdev->revision == 0xe5) || + (adev->pdev->revision == 0xe7) || + (adev->pdev->revision == 0xef))) || + ((adev->pdev->device == 0x6fdf) && (adev->pdev->revision == 0xef))) { info->is_kicker = true; strcpy(fw_name, "amdgpu/polaris10_k_smc.bin"); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 8843a06360fa..0f41d8647376 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -740,6 +740,7 @@ static const struct pci_device_id pciidlist[] = { {0x1002, 0x67CA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, {0x1002, 0x67CC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, {0x1002, 0x67CF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, + {0x1002, 0x6FDF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, /* Polaris12 */ {0x1002, 0x6980, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, {0x1002, 0x6981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index 0cc5190f4f36..5f3f54073818 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -258,6 +258,8 @@ int amdgpu_vce_suspend(struct amdgpu_device *adev) { int i; + cancel_delayed_work_sync(&adev->vce.idle_work); + if (adev->vce.vcpu_bo == NULL) return 0; @@ -268,7 +270,6 @@ int amdgpu_vce_suspend(struct amdgpu_device *adev) if (i == AMDGPU_MAX_VCE_HANDLES) return 0; - cancel_delayed_work_sync(&adev->vce.idle_work); /* TODO: suspending running encoding sessions isn't supported */ return -EINVAL; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index fd654a4406db..400fc74bbae2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -153,11 +153,11 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev) unsigned size; void *ptr; + cancel_delayed_work_sync(&adev->vcn.idle_work); + if (adev->vcn.vcpu_bo == NULL) return 0; - cancel_delayed_work_sync(&adev->vcn.idle_work); - size = amdgpu_bo_size(adev->vcn.vcpu_bo); ptr = adev->vcn.cpu_addr; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 1b048715ab8a..29ac74f40dce 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -457,7 +457,8 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, if (kfd->kfd2kgd->init_gtt_mem_allocation( kfd->kgd, size, &kfd->gtt_mem, - &kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr)){ + &kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr, + false)) { dev_err(kfd_device, "Could not allocate %d bytes\n", size); goto out; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c index 7a61f38c09e6..01494752c36a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c @@ -62,9 +62,20 @@ int kfd_iommu_device_init(struct kfd_dev *kfd) struct amd_iommu_device_info iommu_info; unsigned int pasid_limit; int err; + struct kfd_topology_device *top_dev; - if (!kfd->device_info->needs_iommu_device) + top_dev = kfd_topology_device_by_id(kfd->id); + + /* + * Overwrite ATS capability according to needs_iommu_device to fix + * potential missing corresponding bit in CRAT of BIOS. + */ + if (!kfd->device_info->needs_iommu_device) { + top_dev->node_props.capability &= ~HSA_CAP_ATS_PRESENT; return 0; + } + + top_dev->node_props.capability |= HSA_CAP_ATS_PRESENT; iommu_info.flags = 0; err = amd_iommu_device_info(kfd->pdev, &iommu_info); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c index f5fc3675f21e..0cedb37cf513 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c @@ -88,7 +88,7 @@ static int init_mqd(struct mqd_manager *mm, void **mqd, ALIGN(sizeof(struct v9_mqd), PAGE_SIZE), &((*mqd_mem_obj)->gtt_mem), &((*mqd_mem_obj)->gpu_addr), - (void *)&((*mqd_mem_obj)->cpu_ptr)); + (void *)&((*mqd_mem_obj)->cpu_ptr), true); } else retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct v9_mqd), mqd_mem_obj); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index f971710f1c91..92b285ca73aa 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -806,6 +806,7 @@ int kfd_topology_add_device(struct kfd_dev *gpu); int kfd_topology_remove_device(struct kfd_dev *gpu); struct kfd_topology_device *kfd_topology_device_by_proximity_domain( uint32_t proximity_domain); +struct kfd_topology_device *kfd_topology_device_by_id(uint32_t gpu_id); struct kfd_dev *kfd_device_by_id(uint32_t gpu_id); struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev); int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_dev **kdev); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index bc95d4dfee2e..80f5db4ef75f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -63,22 +63,33 @@ struct kfd_topology_device *kfd_topology_device_by_proximity_domain( return device; } -struct kfd_dev *kfd_device_by_id(uint32_t gpu_id) +struct kfd_topology_device *kfd_topology_device_by_id(uint32_t gpu_id) { - struct kfd_topology_device *top_dev; - struct kfd_dev *device = NULL; + struct kfd_topology_device *top_dev = NULL; + struct kfd_topology_device *ret = NULL; down_read(&topology_lock); list_for_each_entry(top_dev, &topology_device_list, list) if (top_dev->gpu_id == gpu_id) { - device = top_dev->gpu; + ret = top_dev; break; } up_read(&topology_lock); - return device; + return ret; +} + +struct kfd_dev *kfd_device_by_id(uint32_t gpu_id) +{ + struct kfd_topology_device *top_dev; + + top_dev = kfd_topology_device_by_id(gpu_id); + if (!top_dev) + return NULL; + + return top_dev->gpu; } struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 800f481a6995..96875950845a 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -641,6 +641,87 @@ amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state, return NULL; } +static void emulated_link_detect(struct dc_link *link) +{ + struct dc_sink_init_data sink_init_data = { 0 }; + struct display_sink_capability sink_caps = { 0 }; + enum dc_edid_status edid_status; + struct dc_context *dc_ctx = link->ctx; + struct dc_sink *sink = NULL; + struct dc_sink *prev_sink = NULL; + + link->type = dc_connection_none; + prev_sink = link->local_sink; + + if (prev_sink != NULL) + dc_sink_retain(prev_sink); + + switch (link->connector_signal) { + case SIGNAL_TYPE_HDMI_TYPE_A: { + sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; + sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A; + break; + } + + case SIGNAL_TYPE_DVI_SINGLE_LINK: { + sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; + sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK; + break; + } + + case SIGNAL_TYPE_DVI_DUAL_LINK: { + sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; + sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK; + break; + } + + case SIGNAL_TYPE_LVDS: { + sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; + sink_caps.signal = SIGNAL_TYPE_LVDS; + break; + } + + case SIGNAL_TYPE_EDP: { + sink_caps.transaction_type = + DDC_TRANSACTION_TYPE_I2C_OVER_AUX; + sink_caps.signal = SIGNAL_TYPE_EDP; + break; + } + + case SIGNAL_TYPE_DISPLAY_PORT: { + sink_caps.transaction_type = + DDC_TRANSACTION_TYPE_I2C_OVER_AUX; + sink_caps.signal = SIGNAL_TYPE_VIRTUAL; + break; + } + + default: + DC_ERROR("Invalid connector type! signal:%d\n", + link->connector_signal); + return; + } + + sink_init_data.link = link; + sink_init_data.sink_signal = sink_caps.signal; + + sink = dc_sink_create(&sink_init_data); + if (!sink) { + DC_ERROR("Failed to create sink!\n"); + return; + } + + link->local_sink = sink; + + edid_status = dm_helpers_read_local_edid( + link->ctx, + link, + sink); + + if (edid_status != EDID_OK) + DC_ERROR("Failed to read EDID"); + +} + static int dm_resume(void *handle) { struct amdgpu_device *adev = handle; @@ -654,6 +735,7 @@ static int dm_resume(void *handle) struct drm_plane *plane; struct drm_plane_state *new_plane_state; struct dm_plane_state *dm_new_plane_state; + enum dc_connection_type new_connection_type = dc_connection_none; int ret; int i; @@ -684,7 +766,13 @@ static int dm_resume(void *handle) continue; mutex_lock(&aconnector->hpd_lock); - dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD); + if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type)) + DRM_ERROR("KMS: Failed to detect connector\n"); + + if (aconnector->base.force && new_connection_type == dc_connection_none) + emulated_link_detect(aconnector->dc_link); + else + dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD); if (aconnector->fake_enable && aconnector->dc_link->local_sink) aconnector->fake_enable = false; @@ -922,6 +1010,7 @@ static void handle_hpd_irq(void *param) struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param; struct drm_connector *connector = &aconnector->base; struct drm_device *dev = connector->dev; + enum dc_connection_type new_connection_type = dc_connection_none; /* In case of failure or MST no need to update connector status or notify the OS * since (for MST case) MST does this in it's own context. @@ -931,7 +1020,21 @@ static void handle_hpd_irq(void *param) if (aconnector->fake_enable) aconnector->fake_enable = false; - if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) { + if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type)) + DRM_ERROR("KMS: Failed to detect connector\n"); + + if (aconnector->base.force && new_connection_type == dc_connection_none) { + emulated_link_detect(aconnector->dc_link); + + + drm_modeset_lock_all(dev); + dm_restore_drm_connector_state(dev, connector); + drm_modeset_unlock_all(dev); + + if (aconnector->base.force == DRM_FORCE_UNSPECIFIED) + drm_kms_helper_hotplug_event(dev); + + } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) { amdgpu_dm_update_connector_after_detect(aconnector); @@ -1031,6 +1134,7 @@ static void handle_hpd_rx_irq(void *param) struct drm_device *dev = connector->dev; struct dc_link *dc_link = aconnector->dc_link; bool is_mst_root_connector = aconnector->mst_mgr.mst_state; + enum dc_connection_type new_connection_type = dc_connection_none; /* TODO:Temporary add mutex to protect hpd interrupt not have a gpio * conflict, after implement i2c helper, this mutex should be @@ -1042,7 +1146,24 @@ static void handle_hpd_rx_irq(void *param) if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) && !is_mst_root_connector) { /* Downstream Port status changed. */ - if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) { + if (!dc_link_detect_sink(dc_link, &new_connection_type)) + DRM_ERROR("KMS: Failed to detect connector\n"); + + if (aconnector->base.force && new_connection_type == dc_connection_none) { + emulated_link_detect(dc_link); + + if (aconnector->fake_enable) + aconnector->fake_enable = false; + + amdgpu_dm_update_connector_after_detect(aconnector); + + + drm_modeset_lock_all(dev); + dm_restore_drm_connector_state(dev, connector); + drm_modeset_unlock_all(dev); + + drm_kms_helper_hotplug_event(dev); + } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) { if (aconnector->fake_enable) aconnector->fake_enable = false; @@ -1433,6 +1554,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) struct amdgpu_mode_info *mode_info = &adev->mode_info; uint32_t link_cnt; int32_t total_overlay_planes, total_primary_planes; + enum dc_connection_type new_connection_type = dc_connection_none; link_cnt = dm->dc->caps.max_links; if (amdgpu_dm_mode_config_init(dm->adev)) { @@ -1499,7 +1621,14 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) link = dc_get_link_at_index(dm->dc, i); - if (dc_link_detect(link, DETECT_REASON_BOOT)) { + if (!dc_link_detect_sink(link, &new_connection_type)) + DRM_ERROR("KMS: Failed to detect connector\n"); + + if (aconnector->base.force && new_connection_type == dc_connection_none) { + emulated_link_detect(link); + amdgpu_dm_update_connector_after_detect(aconnector); + + } else if (dc_link_detect(link, DETECT_REASON_BOOT)) { amdgpu_dm_update_connector_after_detect(aconnector); register_backlight_device(dm, link); } @@ -2494,7 +2623,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, if (dm_state && dm_state->freesync_capable) stream->ignore_msa_timing_param = true; finish: - if (sink && sink->sink_signal == SIGNAL_TYPE_VIRTUAL) + if (sink && sink->sink_signal == SIGNAL_TYPE_VIRTUAL && aconnector->base.force != DRM_FORCE_ON) dc_sink_release(sink); return stream; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 37eaf72ace54..fced3c1c2ef5 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -195,7 +195,7 @@ static bool program_hpd_filter( return result; } -static bool detect_sink(struct dc_link *link, enum dc_connection_type *type) +bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type) { uint32_t is_hpd_high = 0; struct gpio *hpd_pin; @@ -604,7 +604,7 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) if (link->connector_signal == SIGNAL_TYPE_VIRTUAL) return false; - if (false == detect_sink(link, &new_connection_type)) { + if (false == dc_link_detect_sink(link, &new_connection_type)) { BREAK_TO_DEBUGGER(); return false; } diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h index d43cefbc43d3..1b48ab9aea89 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_link.h +++ b/drivers/gpu/drm/amd/display/dc/dc_link.h @@ -215,6 +215,7 @@ void dc_link_enable_hpd_filter(struct dc_link *link, bool enable); bool dc_link_is_dp_sink_present(struct dc_link *link); +bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type); /* * DPCD access interfaces */ diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index 14384d9675a8..b2f308766a9e 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -2560,7 +2560,7 @@ static void pplib_apply_display_requirements( dc->prev_display_config = *pp_display_cfg; } -void dce110_set_bandwidth( +static void dce110_set_bandwidth( struct dc *dc, struct dc_state *context, bool decrease_allowed) diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h index e4c5db75c4c6..d6db3dbd9015 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h @@ -68,11 +68,6 @@ void dce110_fill_display_configs( const struct dc_state *context, struct dm_pp_display_configuration *pp_display_cfg); -void dce110_set_bandwidth( - struct dc *dc, - struct dc_state *context, - bool decrease_allowed); - uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context); void dp_receiver_power_ctrl(struct dc_link *link, bool on); diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c index 5853522a6182..eb0f5f9a973b 100644 --- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c @@ -244,17 +244,6 @@ static void dce120_update_dchub( dh_data->dchub_info_valid = false; } -static void dce120_set_bandwidth( - struct dc *dc, - struct dc_state *context, - bool decrease_allowed) -{ - if (context->stream_count <= 0) - return; - - dce110_set_bandwidth(dc, context, decrease_allowed); -} - void dce120_hw_sequencer_construct(struct dc *dc) { /* All registers used by dce11.2 match those in dce11 in offset and @@ -263,6 +252,5 @@ void dce120_hw_sequencer_construct(struct dc *dc) dce110_hw_sequencer_construct(dc); dc->hwss.enable_display_power_gating = dce120_enable_display_power_gating; dc->hwss.update_dchub = dce120_update_dchub; - dc->hwss.set_bandwidth = dce120_set_bandwidth; } diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h index 14391b06080c..43b82e14007e 100644 --- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h @@ -292,7 +292,7 @@ struct tile_config { struct kfd2kgd_calls { int (*init_gtt_mem_allocation)(struct kgd_dev *kgd, size_t size, void **mem_obj, uint64_t *gpu_addr, - void **cpu_ptr); + void **cpu_ptr, bool mqd_gfx9); void (*free_gtt_mem)(struct kgd_dev *kgd, void *mem_obj); diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c index 08b5bb219816..94d6dabec2dc 100644 --- a/drivers/gpu/drm/arm/malidp_drv.c +++ b/drivers/gpu/drm/arm/malidp_drv.c @@ -754,6 +754,7 @@ static int malidp_bind(struct device *dev) drm->irq_enabled = true; ret = drm_vblank_init(drm, drm->mode_config.num_crtc); + drm_crtc_vblank_reset(&malidp->crtc); if (ret < 0) { DRM_ERROR("failed to initialise vblank\n"); goto vblank_fail; diff --git a/drivers/gpu/drm/arm/malidp_hw.c b/drivers/gpu/drm/arm/malidp_hw.c index c94a4422e0e9..2781e462c1ed 100644 --- a/drivers/gpu/drm/arm/malidp_hw.c +++ b/drivers/gpu/drm/arm/malidp_hw.c @@ -384,7 +384,8 @@ static long malidp500_se_calc_mclk(struct malidp_hw_device *hwdev, static int malidp500_enable_memwrite(struct malidp_hw_device *hwdev, dma_addr_t *addrs, s32 *pitches, - int num_planes, u16 w, u16 h, u32 fmt_id) + int num_planes, u16 w, u16 h, u32 fmt_id, + const s16 *rgb2yuv_coeffs) { u32 base = MALIDP500_SE_MEMWRITE_BASE; u32 de_base = malidp_get_block_base(hwdev, MALIDP_DE_BLOCK); @@ -416,6 +417,16 @@ static int malidp500_enable_memwrite(struct malidp_hw_device *hwdev, malidp_hw_write(hwdev, MALIDP_DE_H_ACTIVE(w) | MALIDP_DE_V_ACTIVE(h), MALIDP500_SE_MEMWRITE_OUT_SIZE); + + if (rgb2yuv_coeffs) { + int i; + + for (i = 0; i < MALIDP_COLORADJ_NUM_COEFFS; i++) { + malidp_hw_write(hwdev, rgb2yuv_coeffs[i], + MALIDP500_SE_RGB_YUV_COEFFS + i * 4); + } + } + malidp_hw_setbits(hwdev, MALIDP_SE_MEMWRITE_EN, MALIDP500_SE_CONTROL); return 0; @@ -658,7 +669,8 @@ static long malidp550_se_calc_mclk(struct malidp_hw_device *hwdev, static int malidp550_enable_memwrite(struct malidp_hw_device *hwdev, dma_addr_t *addrs, s32 *pitches, - int num_planes, u16 w, u16 h, u32 fmt_id) + int num_planes, u16 w, u16 h, u32 fmt_id, + const s16 *rgb2yuv_coeffs) { u32 base = MALIDP550_SE_MEMWRITE_BASE; u32 de_base = malidp_get_block_base(hwdev, MALIDP_DE_BLOCK); @@ -689,6 +701,15 @@ static int malidp550_enable_memwrite(struct malidp_hw_device *hwdev, malidp_hw_setbits(hwdev, MALIDP550_SE_MEMWRITE_ONESHOT | MALIDP_SE_MEMWRITE_EN, MALIDP550_SE_CONTROL); + if (rgb2yuv_coeffs) { + int i; + + for (i = 0; i < MALIDP_COLORADJ_NUM_COEFFS; i++) { + malidp_hw_write(hwdev, rgb2yuv_coeffs[i], + MALIDP550_SE_RGB_YUV_COEFFS + i * 4); + } + } + return 0; } diff --git a/drivers/gpu/drm/arm/malidp_hw.h b/drivers/gpu/drm/arm/malidp_hw.h index ad2e96915d44..9fc94c08190f 100644 --- a/drivers/gpu/drm/arm/malidp_hw.h +++ b/drivers/gpu/drm/arm/malidp_hw.h @@ -191,7 +191,8 @@ struct malidp_hw { * @param fmt_id - internal format ID of output buffer */ int (*enable_memwrite)(struct malidp_hw_device *hwdev, dma_addr_t *addrs, - s32 *pitches, int num_planes, u16 w, u16 h, u32 fmt_id); + s32 *pitches, int num_planes, u16 w, u16 h, u32 fmt_id, + const s16 *rgb2yuv_coeffs); /* * Disable the writing to memory of the next frame's content. diff --git a/drivers/gpu/drm/arm/malidp_mw.c b/drivers/gpu/drm/arm/malidp_mw.c index ba6ae66387c9..91472e5e0c8b 100644 --- a/drivers/gpu/drm/arm/malidp_mw.c +++ b/drivers/gpu/drm/arm/malidp_mw.c @@ -26,6 +26,8 @@ struct malidp_mw_connector_state { s32 pitches[2]; u8 format; u8 n_planes; + bool rgb2yuv_initialized; + const s16 *rgb2yuv_coeffs; }; static int malidp_mw_connector_get_modes(struct drm_connector *connector) @@ -84,7 +86,7 @@ static void malidp_mw_connector_destroy(struct drm_connector *connector) static struct drm_connector_state * malidp_mw_connector_duplicate_state(struct drm_connector *connector) { - struct malidp_mw_connector_state *mw_state; + struct malidp_mw_connector_state *mw_state, *mw_current_state; if (WARN_ON(!connector->state)) return NULL; @@ -93,7 +95,10 @@ malidp_mw_connector_duplicate_state(struct drm_connector *connector) if (!mw_state) return NULL; - /* No need to preserve any of our driver-local data */ + mw_current_state = to_mw_state(connector->state); + mw_state->rgb2yuv_coeffs = mw_current_state->rgb2yuv_coeffs; + mw_state->rgb2yuv_initialized = mw_current_state->rgb2yuv_initialized; + __drm_atomic_helper_connector_duplicate_state(connector, &mw_state->base); return &mw_state->base; @@ -108,6 +113,13 @@ static const struct drm_connector_funcs malidp_mw_connector_funcs = { .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, }; +static const s16 rgb2yuv_coeffs_bt709_limited[MALIDP_COLORADJ_NUM_COEFFS] = { + 47, 157, 16, + -26, -87, 112, + 112, -102, -10, + 16, 128, 128 +}; + static int malidp_mw_encoder_atomic_check(struct drm_encoder *encoder, struct drm_crtc_state *crtc_state, @@ -157,6 +169,9 @@ malidp_mw_encoder_atomic_check(struct drm_encoder *encoder, } mw_state->n_planes = n_planes; + if (fb->format->is_yuv) + mw_state->rgb2yuv_coeffs = rgb2yuv_coeffs_bt709_limited; + return 0; } @@ -239,10 +254,12 @@ void malidp_mw_atomic_commit(struct drm_device *drm, drm_writeback_queue_job(mw_conn, conn_state->writeback_job); conn_state->writeback_job = NULL; - hwdev->hw->enable_memwrite(hwdev, mw_state->addrs, mw_state->pitches, mw_state->n_planes, - fb->width, fb->height, mw_state->format); + fb->width, fb->height, mw_state->format, + !mw_state->rgb2yuv_initialized ? + mw_state->rgb2yuv_coeffs : NULL); + mw_state->rgb2yuv_initialized = !!mw_state->rgb2yuv_coeffs; } else { DRM_DEV_DEBUG_DRIVER(drm->dev, "Disable memwrite\n"); hwdev->hw->disable_memwrite(hwdev); diff --git a/drivers/gpu/drm/arm/malidp_regs.h b/drivers/gpu/drm/arm/malidp_regs.h index 3579d36b2a71..6ffe849774f2 100644 --- a/drivers/gpu/drm/arm/malidp_regs.h +++ b/drivers/gpu/drm/arm/malidp_regs.h @@ -205,6 +205,7 @@ #define MALIDP500_SE_BASE 0x00c00 #define MALIDP500_SE_CONTROL 0x00c0c #define MALIDP500_SE_MEMWRITE_OUT_SIZE 0x00c2c +#define MALIDP500_SE_RGB_YUV_COEFFS 0x00C74 #define MALIDP500_SE_MEMWRITE_BASE 0x00e00 #define MALIDP500_DC_IRQ_BASE 0x00f00 #define MALIDP500_CONFIG_VALID 0x00f00 @@ -238,6 +239,7 @@ #define MALIDP550_SE_CONTROL 0x08010 #define MALIDP550_SE_MEMWRITE_ONESHOT (1 << 7) #define MALIDP550_SE_MEMWRITE_OUT_SIZE 0x08030 +#define MALIDP550_SE_RGB_YUV_COEFFS 0x08078 #define MALIDP550_SE_MEMWRITE_BASE 0x08100 #define MALIDP550_DC_BASE 0x0c000 #define MALIDP550_DC_CONTROL 0x0c010 diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index 3eb061e11e2e..018fcdb353d2 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -2067,7 +2067,7 @@ static void __drm_state_dump(struct drm_device *dev, struct drm_printer *p, struct drm_connector *connector; struct drm_connector_list_iter conn_iter; - if (!drm_core_check_feature(dev, DRIVER_ATOMIC)) + if (!drm_drv_uses_atomic_modeset(dev)) return; list_for_each_entry(plane, &config->plane_list, head) { diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c index 6f28fe58f169..373bd4c2b698 100644 --- a/drivers/gpu/drm/drm_debugfs.c +++ b/drivers/gpu/drm/drm_debugfs.c @@ -151,7 +151,7 @@ int drm_debugfs_init(struct drm_minor *minor, int minor_id, return ret; } - if (drm_core_check_feature(dev, DRIVER_ATOMIC)) { + if (drm_drv_uses_atomic_modeset(dev)) { ret = drm_atomic_debugfs_init(minor); if (ret) { DRM_ERROR("Failed to create atomic debugfs files\n"); diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 4b0dd20bccb8..16ec93b75dbf 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -2370,7 +2370,6 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper, { int c, o; struct drm_connector *connector; - const struct drm_connector_helper_funcs *connector_funcs; int my_score, best_score, score; struct drm_fb_helper_crtc **crtcs, *crtc; struct drm_fb_helper_connector *fb_helper_conn; @@ -2399,8 +2398,6 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper, if (drm_has_preferred_mode(fb_helper_conn, width, height)) my_score++; - connector_funcs = connector->helper_private; - /* * select a crtc for this connector and then attempt to configure * remaining connectors diff --git a/drivers/gpu/drm/drm_panel.c b/drivers/gpu/drm/drm_panel.c index b902361dee6e..1d9a9d2fe0e0 100644 --- a/drivers/gpu/drm/drm_panel.c +++ b/drivers/gpu/drm/drm_panel.c @@ -24,7 +24,6 @@ #include <linux/err.h> #include <linux/module.h> -#include <drm/drm_device.h> #include <drm/drm_crtc.h> #include <drm/drm_panel.h> @@ -105,13 +104,6 @@ int drm_panel_attach(struct drm_panel *panel, struct drm_connector *connector) if (panel->connector) return -EBUSY; - panel->link = device_link_add(connector->dev->dev, panel->dev, 0); - if (!panel->link) { - dev_err(panel->dev, "failed to link panel to %s\n", - dev_name(connector->dev->dev)); - return -EINVAL; - } - panel->connector = connector; panel->drm = connector->dev; @@ -133,8 +125,6 @@ EXPORT_SYMBOL(drm_panel_attach); */ int drm_panel_detach(struct drm_panel *panel) { - device_link_del(panel->link); - panel->connector = NULL; panel->drm = NULL; diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c index adb3cb27d31e..759278fef35a 100644 --- a/drivers/gpu/drm/drm_syncobj.c +++ b/drivers/gpu/drm/drm_syncobj.c @@ -97,6 +97,8 @@ static int drm_syncobj_fence_get_or_add_callback(struct drm_syncobj *syncobj, { int ret; + WARN_ON(*fence); + *fence = drm_syncobj_fence_get(syncobj); if (*fence) return 1; @@ -743,6 +745,9 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs, if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) { for (i = 0; i < count; ++i) { + if (entries[i].fence) + continue; + drm_syncobj_fence_get_or_add_callback(syncobjs[i], &entries[i].fence, &entries[i].syncobj_cb, diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c index 9b2720b41571..83c1f46670bf 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c @@ -592,8 +592,6 @@ static int etnaviv_pdev_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct component_match *match = NULL; - dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); - if (!dev->platform_data) { struct device_node *core_node; @@ -655,13 +653,30 @@ static int __init etnaviv_init(void) for_each_compatible_node(np, NULL, "vivante,gc") { if (!of_device_is_available(np)) continue; - pdev = platform_device_register_simple("etnaviv", -1, - NULL, 0); - if (IS_ERR(pdev)) { - ret = PTR_ERR(pdev); + + pdev = platform_device_alloc("etnaviv", -1); + if (!pdev) { + ret = -ENOMEM; + of_node_put(np); + goto unregister_platform_driver; + } + pdev->dev.coherent_dma_mask = DMA_BIT_MASK(40); + pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; + + /* + * Apply the same DMA configuration to the virtual etnaviv + * device as the GPU we found. This assumes that all Vivante + * GPUs in the system share the same DMA constraints. + */ + of_dma_configure(&pdev->dev, np, true); + + ret = platform_device_add(pdev); + if (ret) { + platform_device_put(pdev); of_node_put(np); goto unregister_platform_driver; } + etnaviv_drm = pdev; of_node_put(np); break; diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 72afa518edd9..94c1089ecf59 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -3210,6 +3210,7 @@ static int init_bxt_mmio_info(struct intel_gvt *gvt) MMIO_D(BXT_DSI_PLL_ENABLE, D_BXT); MMIO_D(GEN9_CLKGATE_DIS_0, D_BXT); + MMIO_D(GEN9_CLKGATE_DIS_4, D_BXT); MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_A), D_BXT); MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_B), D_BXT); diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index c7afee37b2b8..9ad89e38f6c0 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -1833,6 +1833,8 @@ static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn) { struct kvmgt_guest_info *info; struct kvm *kvm; + int idx; + bool ret; if (!handle_valid(handle)) return false; @@ -1840,8 +1842,11 @@ static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn) info = (struct kvmgt_guest_info *)handle; kvm = info->kvm; - return kvm_is_visible_gfn(kvm, gfn); + idx = srcu_read_lock(&kvm->srcu); + ret = kvm_is_visible_gfn(kvm, gfn); + srcu_read_unlock(&kvm->srcu, idx); + return ret; } struct intel_gvt_mpt kvmgt_mpt = { diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c index 994366035364..9bb9a85c992c 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.c +++ b/drivers/gpu/drm/i915/gvt/mmio.c @@ -244,6 +244,34 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr) /* set the bit 0:2(Core C-State ) to C0 */ vgpu_vreg_t(vgpu, GEN6_GT_CORE_STATUS) = 0; + + if (IS_BROXTON(vgpu->gvt->dev_priv)) { + vgpu_vreg_t(vgpu, BXT_P_CR_GT_DISP_PWRON) &= + ~(BIT(0) | BIT(1)); + vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) &= + ~PHY_POWER_GOOD; + vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) &= + ~PHY_POWER_GOOD; + vgpu_vreg_t(vgpu, BXT_PHY_CTL_FAMILY(DPIO_PHY0)) &= + ~BIT(30); + vgpu_vreg_t(vgpu, BXT_PHY_CTL_FAMILY(DPIO_PHY1)) &= + ~BIT(30); + vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_A)) &= + ~BXT_PHY_LANE_ENABLED; + vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_A)) |= + BXT_PHY_CMNLANE_POWERDOWN_ACK | + BXT_PHY_LANE_POWERDOWN_ACK; + vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_B)) &= + ~BXT_PHY_LANE_ENABLED; + vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_B)) |= + BXT_PHY_CMNLANE_POWERDOWN_ACK | + BXT_PHY_LANE_POWERDOWN_ACK; + vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_C)) &= + ~BXT_PHY_LANE_ENABLED; + vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_C)) |= + BXT_PHY_CMNLANE_POWERDOWN_ACK | + BXT_PHY_LANE_POWERDOWN_ACK; + } } else { #define GVT_GEN8_MMIO_RESET_OFFSET (0x44200) /* only reset the engine related, so starting with 0x44200 diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index a4e8e3cf74fd..c628be05fbfe 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c @@ -281,6 +281,7 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu) intel_vgpu_clean_submission(vgpu); intel_vgpu_clean_display(vgpu); intel_vgpu_clean_opregion(vgpu); + intel_vgpu_reset_ggtt(vgpu, true); intel_vgpu_clean_gtt(vgpu); intel_gvt_hypervisor_detach_vgpu(vgpu); intel_vgpu_free_resource(vgpu); diff --git a/drivers/gpu/drm/pl111/pl111_vexpress.c b/drivers/gpu/drm/pl111/pl111_vexpress.c index a534b225e31b..5fa0441bb6df 100644 --- a/drivers/gpu/drm/pl111/pl111_vexpress.c +++ b/drivers/gpu/drm/pl111/pl111_vexpress.c @@ -111,7 +111,8 @@ static int vexpress_muxfpga_probe(struct platform_device *pdev) } static const struct of_device_id vexpress_muxfpga_match[] = { - { .compatible = "arm,vexpress-muxfpga", } + { .compatible = "arm,vexpress-muxfpga", }, + {} }; static struct platform_driver vexpress_muxfpga_driver = { diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c index dd19d674055c..8b0cd08034e0 100644 --- a/drivers/gpu/drm/sun4i/sun4i_drv.c +++ b/drivers/gpu/drm/sun4i/sun4i_drv.c @@ -418,7 +418,6 @@ static const struct of_device_id sun4i_drv_of_table[] = { { .compatible = "allwinner,sun8i-a33-display-engine" }, { .compatible = "allwinner,sun8i-a83t-display-engine" }, { .compatible = "allwinner,sun8i-h3-display-engine" }, - { .compatible = "allwinner,sun8i-r40-display-engine" }, { .compatible = "allwinner,sun8i-v3s-display-engine" }, { .compatible = "allwinner,sun9i-a80-display-engine" }, { } diff --git a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c index 82502b351aec..a564b5dfe082 100644 --- a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c +++ b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c @@ -398,7 +398,6 @@ static struct regmap_config sun8i_hdmi_phy_regmap_config = { static const struct sun8i_hdmi_phy_variant sun50i_a64_hdmi_phy = { .has_phy_clk = true, - .has_second_pll = true, .phy_init = &sun8i_hdmi_phy_init_h3, .phy_disable = &sun8i_hdmi_phy_disable_h3, .phy_config = &sun8i_hdmi_phy_config_h3, diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.c b/drivers/gpu/drm/sun4i/sun8i_mixer.c index fc3713608f78..cb65b0ed53fd 100644 --- a/drivers/gpu/drm/sun4i/sun8i_mixer.c +++ b/drivers/gpu/drm/sun4i/sun8i_mixer.c @@ -545,22 +545,6 @@ static const struct sun8i_mixer_cfg sun8i_h3_mixer0_cfg = { .vi_num = 1, }; -static const struct sun8i_mixer_cfg sun8i_r40_mixer0_cfg = { - .ccsc = 0, - .mod_rate = 297000000, - .scaler_mask = 0xf, - .ui_num = 3, - .vi_num = 1, -}; - -static const struct sun8i_mixer_cfg sun8i_r40_mixer1_cfg = { - .ccsc = 1, - .mod_rate = 297000000, - .scaler_mask = 0x3, - .ui_num = 1, - .vi_num = 1, -}; - static const struct sun8i_mixer_cfg sun8i_v3s_mixer_cfg = { .vi_num = 2, .ui_num = 1, @@ -583,14 +567,6 @@ static const struct of_device_id sun8i_mixer_of_table[] = { .data = &sun8i_h3_mixer0_cfg, }, { - .compatible = "allwinner,sun8i-r40-de2-mixer-0", - .data = &sun8i_r40_mixer0_cfg, - }, - { - .compatible = "allwinner,sun8i-r40-de2-mixer-1", - .data = &sun8i_r40_mixer1_cfg, - }, - { .compatible = "allwinner,sun8i-v3s-de2-mixer", .data = &sun8i_v3s_mixer_cfg, }, diff --git a/drivers/gpu/drm/sun4i/sun8i_tcon_top.c b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c index 55fe398d8290..d5240b777a8f 100644 --- a/drivers/gpu/drm/sun4i/sun8i_tcon_top.c +++ b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c @@ -253,7 +253,6 @@ static int sun8i_tcon_top_remove(struct platform_device *pdev) /* sun4i_drv uses this list to check if a device node is a TCON TOP */ const struct of_device_id sun8i_tcon_top_of_table[] = { - { .compatible = "allwinner,sun8i-r40-tcon-top" }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, sun8i_tcon_top_of_table); diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c index dbb62f6eb48a..dd9ffded223b 100644 --- a/drivers/gpu/drm/udl/udl_fb.c +++ b/drivers/gpu/drm/udl/udl_fb.c @@ -432,9 +432,11 @@ static void udl_fbdev_destroy(struct drm_device *dev, { drm_fb_helper_unregister_fbi(&ufbdev->helper); drm_fb_helper_fini(&ufbdev->helper); - drm_framebuffer_unregister_private(&ufbdev->ufb.base); - drm_framebuffer_cleanup(&ufbdev->ufb.base); - drm_gem_object_put_unlocked(&ufbdev->ufb.obj->base); + if (ufbdev->ufb.obj) { + drm_framebuffer_unregister_private(&ufbdev->ufb.base); + drm_framebuffer_cleanup(&ufbdev->ufb.base); + drm_gem_object_put_unlocked(&ufbdev->ufb.obj->base); + } } int udl_fbdev_init(struct drm_device *dev) diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c index cfb50fedfa2b..a3275fa66b7b 100644 --- a/drivers/gpu/drm/vc4/vc4_plane.c +++ b/drivers/gpu/drm/vc4/vc4_plane.c @@ -297,6 +297,9 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state) vc4_state->y_scaling[0] = vc4_get_scaling_mode(vc4_state->src_h[0], vc4_state->crtc_h); + vc4_state->is_unity = (vc4_state->x_scaling[0] == VC4_SCALING_NONE && + vc4_state->y_scaling[0] == VC4_SCALING_NONE); + if (num_planes > 1) { vc4_state->is_yuv = true; @@ -312,24 +315,17 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state) vc4_get_scaling_mode(vc4_state->src_h[1], vc4_state->crtc_h); - /* YUV conversion requires that scaling be enabled, - * even on a plane that's otherwise 1:1. Choose TPZ - * for simplicity. + /* YUV conversion requires that horizontal scaling be enabled, + * even on a plane that's otherwise 1:1. Looks like only PPF + * works in that case, so let's pick that one. */ - if (vc4_state->x_scaling[0] == VC4_SCALING_NONE) - vc4_state->x_scaling[0] = VC4_SCALING_TPZ; - if (vc4_state->y_scaling[0] == VC4_SCALING_NONE) - vc4_state->y_scaling[0] = VC4_SCALING_TPZ; + if (vc4_state->is_unity) + vc4_state->x_scaling[0] = VC4_SCALING_PPF; } else { vc4_state->x_scaling[1] = VC4_SCALING_NONE; vc4_state->y_scaling[1] = VC4_SCALING_NONE; } - vc4_state->is_unity = (vc4_state->x_scaling[0] == VC4_SCALING_NONE && - vc4_state->y_scaling[0] == VC4_SCALING_NONE && - vc4_state->x_scaling[1] == VC4_SCALING_NONE && - vc4_state->y_scaling[1] == VC4_SCALING_NONE); - /* No configuring scaling on the cursor plane, since it gets non-vblank-synced updates, and scaling requires requires LBM changes which have to be vblank-synced. @@ -672,7 +668,10 @@ static int vc4_plane_mode_set(struct drm_plane *plane, vc4_dlist_write(vc4_state, SCALER_CSC2_ITR_R_601_5); } - if (!vc4_state->is_unity) { + if (vc4_state->x_scaling[0] != VC4_SCALING_NONE || + vc4_state->x_scaling[1] != VC4_SCALING_NONE || + vc4_state->y_scaling[0] != VC4_SCALING_NONE || + vc4_state->y_scaling[1] != VC4_SCALING_NONE) { /* LBM Base Address. */ if (vc4_state->y_scaling[0] != VC4_SCALING_NONE || vc4_state->y_scaling[1] != VC4_SCALING_NONE) { diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 1f134570b759..f0ab6b2313bb 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -3729,7 +3729,7 @@ int vmw_validate_single_buffer(struct vmw_private *dev_priv, { struct vmw_buffer_object *vbo = container_of(bo, struct vmw_buffer_object, base); - struct ttm_operation_ctx ctx = { interruptible, true }; + struct ttm_operation_ctx ctx = { interruptible, false }; int ret; if (vbo->pin_count > 0) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 23beff5d8e3c..6a712a8d59e9 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -1512,21 +1512,19 @@ static int vmw_kms_check_display_memory(struct drm_device *dev, struct drm_rect *rects) { struct vmw_private *dev_priv = vmw_priv(dev); - struct drm_mode_config *mode_config = &dev->mode_config; struct drm_rect bounding_box = {0}; u64 total_pixels = 0, pixel_mem, bb_mem; int i; for (i = 0; i < num_rects; i++) { /* - * Currently this check is limiting the topology within max - * texture/screentarget size. This should change in future when - * user-space support multiple fb with topology. + * For STDU only individual screen (screen target) is limited by + * SCREENTARGET_MAX_WIDTH/HEIGHT registers. */ - if (rects[i].x1 < 0 || rects[i].y1 < 0 || - rects[i].x2 > mode_config->max_width || - rects[i].y2 > mode_config->max_height) { - DRM_ERROR("Invalid GUI layout.\n"); + if (dev_priv->active_display_unit == vmw_du_screen_target && + (drm_rect_width(&rects[i]) > dev_priv->stdu_max_width || + drm_rect_height(&rects[i]) > dev_priv->stdu_max_height)) { + DRM_ERROR("Screen size not supported.\n"); return -EINVAL; } @@ -1615,7 +1613,7 @@ static int vmw_kms_check_topology(struct drm_device *dev, struct drm_connector_state *conn_state; struct vmw_connector_state *vmw_conn_state; - if (!new_crtc_state->enable && old_crtc_state->enable) { + if (!new_crtc_state->enable) { rects[i].x1 = 0; rects[i].y1 = 0; rects[i].x2 = 0; @@ -2216,12 +2214,16 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector, if (dev_priv->assume_16bpp) assumed_bpp = 2; + max_width = min(max_width, dev_priv->texture_max_width); + max_height = min(max_height, dev_priv->texture_max_height); + + /* + * For STDU extra limit for a mode on SVGA_REG_SCREENTARGET_MAX_WIDTH/ + * HEIGHT registers. + */ if (dev_priv->active_display_unit == vmw_du_screen_target) { max_width = min(max_width, dev_priv->stdu_max_width); - max_width = min(max_width, dev_priv->texture_max_width); - max_height = min(max_height, dev_priv->stdu_max_height); - max_height = min(max_height, dev_priv->texture_max_height); } /* Add preferred mode */ @@ -2376,6 +2378,7 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct vmw_private *dev_priv = vmw_priv(dev); + struct drm_mode_config *mode_config = &dev->mode_config; struct drm_vmw_update_layout_arg *arg = (struct drm_vmw_update_layout_arg *)data; void __user *user_rects; @@ -2421,6 +2424,21 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, drm_rects[i].y1 = curr_rect.y; drm_rects[i].x2 = curr_rect.x + curr_rect.w; drm_rects[i].y2 = curr_rect.y + curr_rect.h; + + /* + * Currently this check is limiting the topology within + * mode_config->max (which actually is max texture size + * supported by virtual device). This limit is here to address + * window managers that create a big framebuffer for whole + * topology. + */ + if (drm_rects[i].x1 < 0 || drm_rects[i].y1 < 0 || + drm_rects[i].x2 > mode_config->max_width || + drm_rects[i].y2 > mode_config->max_height) { + DRM_ERROR("Invalid GUI layout.\n"); + ret = -EINVAL; + goto out_free; + } } ret = vmw_kms_check_display_memory(dev, arg->num_outputs, drm_rects); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c index 93f6b96ca7bb..f30e839f7bfd 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c @@ -1600,31 +1600,6 @@ int vmw_kms_stdu_init_display(struct vmw_private *dev_priv) dev_priv->active_display_unit = vmw_du_screen_target; - if (dev_priv->capabilities & SVGA_CAP_3D) { - /* - * For 3D VMs, display (scanout) buffer size is the smaller of - * max texture and max STDU - */ - uint32_t max_width, max_height; - - max_width = min(dev_priv->texture_max_width, - dev_priv->stdu_max_width); - max_height = min(dev_priv->texture_max_height, - dev_priv->stdu_max_height); - - dev->mode_config.max_width = max_width; - dev->mode_config.max_height = max_height; - } else { - /* - * Given various display aspect ratios, there's no way to - * estimate these using prim_bb_mem. So just set these to - * something arbitrarily large and we will reject any layout - * that doesn't fit prim_bb_mem later - */ - dev->mode_config.max_width = 8192; - dev->mode_config.max_height = 8192; - } - vmw_kms_create_implicit_placement_property(dev_priv, false); for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i) { diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c index e125233e074b..80a01cd4c051 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c @@ -1404,22 +1404,17 @@ int vmw_surface_gb_priv_define(struct drm_device *dev, *srf_out = NULL; if (for_scanout) { - uint32_t max_width, max_height; - if (!svga3dsurface_is_screen_target_format(format)) { DRM_ERROR("Invalid Screen Target surface format."); return -EINVAL; } - max_width = min(dev_priv->texture_max_width, - dev_priv->stdu_max_width); - max_height = min(dev_priv->texture_max_height, - dev_priv->stdu_max_height); - - if (size.width > max_width || size.height > max_height) { + if (size.width > dev_priv->texture_max_width || + size.height > dev_priv->texture_max_height) { DRM_ERROR("%ux%u\n, exceeds max surface size %ux%u", size.width, size.height, - max_width, max_height); + dev_priv->texture_max_width, + dev_priv->texture_max_height); return -EINVAL; } } else { @@ -1495,8 +1490,17 @@ int vmw_surface_gb_priv_define(struct drm_device *dev, if (srf->flags & SVGA3D_SURFACE_BIND_STREAM_OUTPUT) srf->res.backup_size += sizeof(SVGA3dDXSOState); + /* + * Don't set SVGA3D_SURFACE_SCREENTARGET flag for a scanout surface with + * size greater than STDU max width/height. This is really a workaround + * to support creation of big framebuffer requested by some user-space + * for whole topology. That big framebuffer won't really be used for + * binding with screen target as during prepare_fb a separate surface is + * created so it's safe to ignore SVGA3D_SURFACE_SCREENTARGET flag. + */ if (dev_priv->active_display_unit == vmw_du_screen_target && - for_scanout) + for_scanout && size.width <= dev_priv->stdu_max_width && + size.height <= dev_priv->stdu_max_height) srf->flags |= SVGA3D_SURFACE_SCREENTARGET; /* diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c index a96bf46bc483..cf2a18571d48 100644 --- a/drivers/gpu/vga/vga_switcheroo.c +++ b/drivers/gpu/vga/vga_switcheroo.c @@ -215,6 +215,8 @@ static void vga_switcheroo_enable(void) return; client->id = ret | ID_BIT_AUDIO; + if (client->ops->gpu_bound) + client->ops->gpu_bound(client->pdev, ret); } vga_switcheroo_debugfs_init(&vgasr_priv); diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c index 944f5b63aecd..78603b78cf41 100644 --- a/drivers/hwmon/nct6775.c +++ b/drivers/hwmon/nct6775.c @@ -207,8 +207,6 @@ superio_exit(int ioreg) #define NUM_FAN 7 -#define TEMP_SOURCE_VIRTUAL 0x1f - /* Common and NCT6775 specific data */ /* Voltage min/max registers for nr=7..14 are in bank 5 */ @@ -299,8 +297,9 @@ static const u16 NCT6775_REG_PWM_READ[] = { static const u16 NCT6775_REG_FAN[] = { 0x630, 0x632, 0x634, 0x636, 0x638 }; static const u16 NCT6775_REG_FAN_MIN[] = { 0x3b, 0x3c, 0x3d }; -static const u16 NCT6775_REG_FAN_PULSES[] = { 0x641, 0x642, 0x643, 0x644, 0 }; -static const u16 NCT6775_FAN_PULSE_SHIFT[] = { 0, 0, 0, 0, 0, 0 }; +static const u16 NCT6775_REG_FAN_PULSES[NUM_FAN] = { + 0x641, 0x642, 0x643, 0x644 }; +static const u16 NCT6775_FAN_PULSE_SHIFT[NUM_FAN] = { }; static const u16 NCT6775_REG_TEMP[] = { 0x27, 0x150, 0x250, 0x62b, 0x62c, 0x62d }; @@ -373,6 +372,7 @@ static const char *const nct6775_temp_label[] = { }; #define NCT6775_TEMP_MASK 0x001ffffe +#define NCT6775_VIRT_TEMP_MASK 0x00000000 static const u16 NCT6775_REG_TEMP_ALTERNATE[32] = { [13] = 0x661, @@ -425,8 +425,8 @@ static const u8 NCT6776_PWM_MODE_MASK[] = { 0x01, 0, 0, 0, 0, 0 }; static const u16 NCT6776_REG_FAN_MIN[] = { 0x63a, 0x63c, 0x63e, 0x640, 0x642, 0x64a, 0x64c }; -static const u16 NCT6776_REG_FAN_PULSES[] = { - 0x644, 0x645, 0x646, 0x647, 0x648, 0x649, 0 }; +static const u16 NCT6776_REG_FAN_PULSES[NUM_FAN] = { + 0x644, 0x645, 0x646, 0x647, 0x648, 0x649 }; static const u16 NCT6776_REG_WEIGHT_DUTY_BASE[] = { 0x13e, 0x23e, 0x33e, 0x83e, 0x93e, 0xa3e }; @@ -461,6 +461,7 @@ static const char *const nct6776_temp_label[] = { }; #define NCT6776_TEMP_MASK 0x007ffffe +#define NCT6776_VIRT_TEMP_MASK 0x00000000 static const u16 NCT6776_REG_TEMP_ALTERNATE[32] = { [14] = 0x401, @@ -501,9 +502,9 @@ static const s8 NCT6779_BEEP_BITS[] = { 30, 31 }; /* intrusion0, intrusion1 */ static const u16 NCT6779_REG_FAN[] = { - 0x4b0, 0x4b2, 0x4b4, 0x4b6, 0x4b8, 0x4ba, 0x660 }; -static const u16 NCT6779_REG_FAN_PULSES[] = { - 0x644, 0x645, 0x646, 0x647, 0x648, 0x649, 0 }; + 0x4c0, 0x4c2, 0x4c4, 0x4c6, 0x4c8, 0x4ca, 0x4ce }; +static const u16 NCT6779_REG_FAN_PULSES[NUM_FAN] = { + 0x644, 0x645, 0x646, 0x647, 0x648, 0x649 }; static const u16 NCT6779_REG_CRITICAL_PWM_ENABLE[] = { 0x136, 0x236, 0x336, 0x836, 0x936, 0xa36, 0xb36 }; @@ -559,7 +560,9 @@ static const char *const nct6779_temp_label[] = { }; #define NCT6779_TEMP_MASK 0x07ffff7e +#define NCT6779_VIRT_TEMP_MASK 0x00000000 #define NCT6791_TEMP_MASK 0x87ffff7e +#define NCT6791_VIRT_TEMP_MASK 0x80000000 static const u16 NCT6779_REG_TEMP_ALTERNATE[32] = { 0x490, 0x491, 0x492, 0x493, 0x494, 0x495, 0, 0, @@ -638,6 +641,7 @@ static const char *const nct6792_temp_label[] = { }; #define NCT6792_TEMP_MASK 0x9fffff7e +#define NCT6792_VIRT_TEMP_MASK 0x80000000 static const char *const nct6793_temp_label[] = { "", @@ -675,6 +679,7 @@ static const char *const nct6793_temp_label[] = { }; #define NCT6793_TEMP_MASK 0xbfff037e +#define NCT6793_VIRT_TEMP_MASK 0x80000000 static const char *const nct6795_temp_label[] = { "", @@ -712,6 +717,7 @@ static const char *const nct6795_temp_label[] = { }; #define NCT6795_TEMP_MASK 0xbfffff7e +#define NCT6795_VIRT_TEMP_MASK 0x80000000 static const char *const nct6796_temp_label[] = { "", @@ -724,8 +730,8 @@ static const char *const nct6796_temp_label[] = { "AUXTIN4", "SMBUSMASTER 0", "SMBUSMASTER 1", - "", - "", + "Virtual_TEMP", + "Virtual_TEMP", "", "", "", @@ -748,7 +754,8 @@ static const char *const nct6796_temp_label[] = { "Virtual_TEMP" }; -#define NCT6796_TEMP_MASK 0xbfff03fe +#define NCT6796_TEMP_MASK 0xbfff0ffe +#define NCT6796_VIRT_TEMP_MASK 0x80000c00 /* NCT6102D/NCT6106D specific data */ @@ -779,8 +786,8 @@ static const u16 NCT6106_REG_TEMP_CONFIG[] = { static const u16 NCT6106_REG_FAN[] = { 0x20, 0x22, 0x24 }; static const u16 NCT6106_REG_FAN_MIN[] = { 0xe0, 0xe2, 0xe4 }; -static const u16 NCT6106_REG_FAN_PULSES[] = { 0xf6, 0xf6, 0xf6, 0, 0 }; -static const u16 NCT6106_FAN_PULSE_SHIFT[] = { 0, 2, 4, 0, 0 }; +static const u16 NCT6106_REG_FAN_PULSES[] = { 0xf6, 0xf6, 0xf6 }; +static const u16 NCT6106_FAN_PULSE_SHIFT[] = { 0, 2, 4 }; static const u8 NCT6106_REG_PWM_MODE[] = { 0xf3, 0xf3, 0xf3 }; static const u8 NCT6106_PWM_MODE_MASK[] = { 0x01, 0x02, 0x04 }; @@ -917,6 +924,11 @@ static unsigned int fan_from_reg16(u16 reg, unsigned int divreg) return 1350000U / (reg << divreg); } +static unsigned int fan_from_reg_rpm(u16 reg, unsigned int divreg) +{ + return reg; +} + static u16 fan_to_reg(u32 fan, unsigned int divreg) { if (!fan) @@ -969,6 +981,7 @@ struct nct6775_data { u16 reg_temp_config[NUM_TEMP]; const char * const *temp_label; u32 temp_mask; + u32 virt_temp_mask; u16 REG_CONFIG; u16 REG_VBAT; @@ -1276,11 +1289,11 @@ static bool is_word_sized(struct nct6775_data *data, u16 reg) case nct6795: case nct6796: return reg == 0x150 || reg == 0x153 || reg == 0x155 || - ((reg & 0xfff0) == 0x4b0 && (reg & 0x000f) < 0x0b) || + (reg & 0xfff0) == 0x4c0 || reg == 0x402 || reg == 0x63a || reg == 0x63c || reg == 0x63e || reg == 0x640 || reg == 0x642 || reg == 0x64a || - reg == 0x64c || reg == 0x660 || + reg == 0x64c || reg == 0x73 || reg == 0x75 || reg == 0x77 || reg == 0x79 || reg == 0x7b || reg == 0x7d; } @@ -1558,7 +1571,7 @@ static void nct6775_update_pwm(struct device *dev) reg = nct6775_read_value(data, data->REG_WEIGHT_TEMP_SEL[i]); data->pwm_weight_temp_sel[i] = reg & 0x1f; /* If weight is disabled, report weight source as 0 */ - if (j == 1 && !(reg & 0x80)) + if (!(reg & 0x80)) data->pwm_weight_temp_sel[i] = 0; /* Weight temp data */ @@ -1682,9 +1695,13 @@ static struct nct6775_data *nct6775_update_device(struct device *dev) if (data->has_fan_min & BIT(i)) data->fan_min[i] = nct6775_read_value(data, data->REG_FAN_MIN[i]); - data->fan_pulses[i] = - (nct6775_read_value(data, data->REG_FAN_PULSES[i]) - >> data->FAN_PULSE_SHIFT[i]) & 0x03; + + if (data->REG_FAN_PULSES[i]) { + data->fan_pulses[i] = + (nct6775_read_value(data, + data->REG_FAN_PULSES[i]) + >> data->FAN_PULSE_SHIFT[i]) & 0x03; + } nct6775_select_fan_div(dev, data, i, reg); } @@ -3639,6 +3656,7 @@ static int nct6775_probe(struct platform_device *pdev) data->temp_label = nct6776_temp_label; data->temp_mask = NCT6776_TEMP_MASK; + data->virt_temp_mask = NCT6776_VIRT_TEMP_MASK; data->REG_VBAT = NCT6106_REG_VBAT; data->REG_DIODE = NCT6106_REG_DIODE; @@ -3717,6 +3735,7 @@ static int nct6775_probe(struct platform_device *pdev) data->temp_label = nct6775_temp_label; data->temp_mask = NCT6775_TEMP_MASK; + data->virt_temp_mask = NCT6775_VIRT_TEMP_MASK; data->REG_CONFIG = NCT6775_REG_CONFIG; data->REG_VBAT = NCT6775_REG_VBAT; @@ -3789,6 +3808,7 @@ static int nct6775_probe(struct platform_device *pdev) data->temp_label = nct6776_temp_label; data->temp_mask = NCT6776_TEMP_MASK; + data->virt_temp_mask = NCT6776_VIRT_TEMP_MASK; data->REG_CONFIG = NCT6775_REG_CONFIG; data->REG_VBAT = NCT6775_REG_VBAT; @@ -3853,7 +3873,7 @@ static int nct6775_probe(struct platform_device *pdev) data->ALARM_BITS = NCT6779_ALARM_BITS; data->BEEP_BITS = NCT6779_BEEP_BITS; - data->fan_from_reg = fan_from_reg13; + data->fan_from_reg = fan_from_reg_rpm; data->fan_from_reg_min = fan_from_reg13; data->target_temp_mask = 0xff; data->tolerance_mask = 0x07; @@ -3861,6 +3881,7 @@ static int nct6775_probe(struct platform_device *pdev) data->temp_label = nct6779_temp_label; data->temp_mask = NCT6779_TEMP_MASK; + data->virt_temp_mask = NCT6779_VIRT_TEMP_MASK; data->REG_CONFIG = NCT6775_REG_CONFIG; data->REG_VBAT = NCT6775_REG_VBAT; @@ -3933,7 +3954,7 @@ static int nct6775_probe(struct platform_device *pdev) data->ALARM_BITS = NCT6791_ALARM_BITS; data->BEEP_BITS = NCT6779_BEEP_BITS; - data->fan_from_reg = fan_from_reg13; + data->fan_from_reg = fan_from_reg_rpm; data->fan_from_reg_min = fan_from_reg13; data->target_temp_mask = 0xff; data->tolerance_mask = 0x07; @@ -3944,22 +3965,27 @@ static int nct6775_probe(struct platform_device *pdev) case nct6791: data->temp_label = nct6779_temp_label; data->temp_mask = NCT6791_TEMP_MASK; + data->virt_temp_mask = NCT6791_VIRT_TEMP_MASK; break; case nct6792: data->temp_label = nct6792_temp_label; data->temp_mask = NCT6792_TEMP_MASK; + data->virt_temp_mask = NCT6792_VIRT_TEMP_MASK; break; case nct6793: data->temp_label = nct6793_temp_label; data->temp_mask = NCT6793_TEMP_MASK; + data->virt_temp_mask = NCT6793_VIRT_TEMP_MASK; break; case nct6795: data->temp_label = nct6795_temp_label; data->temp_mask = NCT6795_TEMP_MASK; + data->virt_temp_mask = NCT6795_VIRT_TEMP_MASK; break; case nct6796: data->temp_label = nct6796_temp_label; data->temp_mask = NCT6796_TEMP_MASK; + data->virt_temp_mask = NCT6796_VIRT_TEMP_MASK; break; } @@ -4143,7 +4169,7 @@ static int nct6775_probe(struct platform_device *pdev) * for each fan reflects a different temperature, and there * are no duplicates. */ - if (src != TEMP_SOURCE_VIRTUAL) { + if (!(data->virt_temp_mask & BIT(src))) { if (mask & BIT(src)) continue; mask |= BIT(src); diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c index da962aa2cef5..fc6b7f8b62fb 100644 --- a/drivers/hwtracing/intel_th/core.c +++ b/drivers/hwtracing/intel_th/core.c @@ -139,7 +139,8 @@ static int intel_th_remove(struct device *dev) th->thdev[i] = NULL; } - th->num_thdevs = lowest; + if (lowest >= 0) + th->num_thdevs = lowest; } if (thdrv->attr_group) @@ -487,7 +488,7 @@ static const struct intel_th_subdevice { .flags = IORESOURCE_MEM, }, { - .start = TH_MMIO_SW, + .start = 1, /* use resource[1] */ .end = 0, .flags = IORESOURCE_MEM, }, @@ -580,6 +581,7 @@ intel_th_subdevice_alloc(struct intel_th *th, struct intel_th_device *thdev; struct resource res[3]; unsigned int req = 0; + bool is64bit = false; int r, err; thdev = intel_th_device_alloc(th, subdev->type, subdev->name, @@ -589,12 +591,18 @@ intel_th_subdevice_alloc(struct intel_th *th, thdev->drvdata = th->drvdata; + for (r = 0; r < th->num_resources; r++) + if (th->resource[r].flags & IORESOURCE_MEM_64) { + is64bit = true; + break; + } + memcpy(res, subdev->res, sizeof(struct resource) * subdev->nres); for (r = 0; r < subdev->nres; r++) { struct resource *devres = th->resource; - int bar = TH_MMIO_CONFIG; + int bar = 0; /* cut subdevices' MMIO from resource[0] */ /* * Take .end == 0 to mean 'take the whole bar', @@ -603,6 +611,8 @@ intel_th_subdevice_alloc(struct intel_th *th, */ if (!res[r].end && res[r].flags == IORESOURCE_MEM) { bar = res[r].start; + if (is64bit) + bar *= 2; res[r].start = 0; res[r].end = resource_size(&devres[bar]) - 1; } diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c index c2e55e5d97f6..1cf6290d6435 100644 --- a/drivers/hwtracing/intel_th/pci.c +++ b/drivers/hwtracing/intel_th/pci.c @@ -160,6 +160,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x18e1), .driver_data = (kernel_ulong_t)&intel_th_2x, }, + { + /* Ice Lake PCH */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x34a6), + .driver_data = (kernel_ulong_t)&intel_th_2x, + }, { 0 }, }; diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c index 0bee1f4b914e..3208ad6ad540 100644 --- a/drivers/infiniband/core/cache.c +++ b/drivers/infiniband/core/cache.c @@ -338,6 +338,39 @@ static int add_roce_gid(struct ib_gid_table_entry *entry) } /** + * del_gid - Delete GID table entry + * + * @ib_dev: IB device whose GID entry to be deleted + * @port: Port number of the IB device + * @table: GID table of the IB device for a port + * @ix: GID entry index to delete + * + */ +static void del_gid(struct ib_device *ib_dev, u8 port, + struct ib_gid_table *table, int ix) +{ + struct ib_gid_table_entry *entry; + + lockdep_assert_held(&table->lock); + + pr_debug("%s device=%s port=%d index=%d gid %pI6\n", __func__, + ib_dev->name, port, ix, + table->data_vec[ix]->attr.gid.raw); + + write_lock_irq(&table->rwlock); + entry = table->data_vec[ix]; + entry->state = GID_TABLE_ENTRY_PENDING_DEL; + /* + * For non RoCE protocol, GID entry slot is ready to use. + */ + if (!rdma_protocol_roce(ib_dev, port)) + table->data_vec[ix] = NULL; + write_unlock_irq(&table->rwlock); + + put_gid_entry_locked(entry); +} + +/** * add_modify_gid - Add or modify GID table entry * * @table: GID table in which GID to be added or modified @@ -358,7 +391,7 @@ static int add_modify_gid(struct ib_gid_table *table, * this index. */ if (is_gid_entry_valid(table->data_vec[attr->index])) - put_gid_entry(table->data_vec[attr->index]); + del_gid(attr->device, attr->port_num, table, attr->index); /* * Some HCA's report multiple GID entries with only one valid GID, and @@ -386,39 +419,6 @@ done: return ret; } -/** - * del_gid - Delete GID table entry - * - * @ib_dev: IB device whose GID entry to be deleted - * @port: Port number of the IB device - * @table: GID table of the IB device for a port - * @ix: GID entry index to delete - * - */ -static void del_gid(struct ib_device *ib_dev, u8 port, - struct ib_gid_table *table, int ix) -{ - struct ib_gid_table_entry *entry; - - lockdep_assert_held(&table->lock); - - pr_debug("%s device=%s port=%d index=%d gid %pI6\n", __func__, - ib_dev->name, port, ix, - table->data_vec[ix]->attr.gid.raw); - - write_lock_irq(&table->rwlock); - entry = table->data_vec[ix]; - entry->state = GID_TABLE_ENTRY_PENDING_DEL; - /* - * For non RoCE protocol, GID entry slot is ready to use. - */ - if (!rdma_protocol_roce(ib_dev, port)) - table->data_vec[ix] = NULL; - write_unlock_irq(&table->rwlock); - - put_gid_entry_locked(entry); -} - /* rwlock should be read locked, or lock should be held */ static int find_gid(struct ib_gid_table *table, const union ib_gid *gid, const struct ib_gid_attr *val, bool default_gid, diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index 5f437d1570fb..21863ddde63e 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c @@ -1759,6 +1759,8 @@ static int ucma_close(struct inode *inode, struct file *filp) mutex_lock(&mut); if (!ctx->closing) { mutex_unlock(&mut); + ucma_put_ctx(ctx); + wait_for_completion(&ctx->comp); /* rdma_destroy_id ensures that no event handlers are * inflight for that id before releasing it. */ diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index a21d5214afc3..e012ca80f9d1 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -2027,33 +2027,55 @@ static int modify_qp(struct ib_uverbs_file *file, if ((cmd->base.attr_mask & IB_QP_CUR_STATE && cmd->base.cur_qp_state > IB_QPS_ERR) || - cmd->base.qp_state > IB_QPS_ERR) { + (cmd->base.attr_mask & IB_QP_STATE && + cmd->base.qp_state > IB_QPS_ERR)) { ret = -EINVAL; goto release_qp; } - attr->qp_state = cmd->base.qp_state; - attr->cur_qp_state = cmd->base.cur_qp_state; - attr->path_mtu = cmd->base.path_mtu; - attr->path_mig_state = cmd->base.path_mig_state; - attr->qkey = cmd->base.qkey; - attr->rq_psn = cmd->base.rq_psn; - attr->sq_psn = cmd->base.sq_psn; - attr->dest_qp_num = cmd->base.dest_qp_num; - attr->qp_access_flags = cmd->base.qp_access_flags; - attr->pkey_index = cmd->base.pkey_index; - attr->alt_pkey_index = cmd->base.alt_pkey_index; - attr->en_sqd_async_notify = cmd->base.en_sqd_async_notify; - attr->max_rd_atomic = cmd->base.max_rd_atomic; - attr->max_dest_rd_atomic = cmd->base.max_dest_rd_atomic; - attr->min_rnr_timer = cmd->base.min_rnr_timer; - attr->port_num = cmd->base.port_num; - attr->timeout = cmd->base.timeout; - attr->retry_cnt = cmd->base.retry_cnt; - attr->rnr_retry = cmd->base.rnr_retry; - attr->alt_port_num = cmd->base.alt_port_num; - attr->alt_timeout = cmd->base.alt_timeout; - attr->rate_limit = cmd->rate_limit; + if (cmd->base.attr_mask & IB_QP_STATE) + attr->qp_state = cmd->base.qp_state; + if (cmd->base.attr_mask & IB_QP_CUR_STATE) + attr->cur_qp_state = cmd->base.cur_qp_state; + if (cmd->base.attr_mask & IB_QP_PATH_MTU) + attr->path_mtu = cmd->base.path_mtu; + if (cmd->base.attr_mask & IB_QP_PATH_MIG_STATE) + attr->path_mig_state = cmd->base.path_mig_state; + if (cmd->base.attr_mask & IB_QP_QKEY) + attr->qkey = cmd->base.qkey; + if (cmd->base.attr_mask & IB_QP_RQ_PSN) + attr->rq_psn = cmd->base.rq_psn; + if (cmd->base.attr_mask & IB_QP_SQ_PSN) + attr->sq_psn = cmd->base.sq_psn; + if (cmd->base.attr_mask & IB_QP_DEST_QPN) + attr->dest_qp_num = cmd->base.dest_qp_num; + if (cmd->base.attr_mask & IB_QP_ACCESS_FLAGS) + attr->qp_access_flags = cmd->base.qp_access_flags; + if (cmd->base.attr_mask & IB_QP_PKEY_INDEX) + attr->pkey_index = cmd->base.pkey_index; + if (cmd->base.attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) + attr->en_sqd_async_notify = cmd->base.en_sqd_async_notify; + if (cmd->base.attr_mask & IB_QP_MAX_QP_RD_ATOMIC) + attr->max_rd_atomic = cmd->base.max_rd_atomic; + if (cmd->base.attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) + attr->max_dest_rd_atomic = cmd->base.max_dest_rd_atomic; + if (cmd->base.attr_mask & IB_QP_MIN_RNR_TIMER) + attr->min_rnr_timer = cmd->base.min_rnr_timer; + if (cmd->base.attr_mask & IB_QP_PORT) + attr->port_num = cmd->base.port_num; + if (cmd->base.attr_mask & IB_QP_TIMEOUT) + attr->timeout = cmd->base.timeout; + if (cmd->base.attr_mask & IB_QP_RETRY_CNT) + attr->retry_cnt = cmd->base.retry_cnt; + if (cmd->base.attr_mask & IB_QP_RNR_RETRY) + attr->rnr_retry = cmd->base.rnr_retry; + if (cmd->base.attr_mask & IB_QP_ALT_PATH) { + attr->alt_port_num = cmd->base.alt_port_num; + attr->alt_timeout = cmd->base.alt_timeout; + attr->alt_pkey_index = cmd->base.alt_pkey_index; + } + if (cmd->base.attr_mask & IB_QP_RATE_LIMIT) + attr->rate_limit = cmd->rate_limit; if (cmd->base.attr_mask & IB_QP_AV) copy_ah_attr_from_uverbs(qp->device, &attr->ah_attr, diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index 6d974e2363df..50152c1b1004 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c @@ -440,6 +440,7 @@ static int ib_uverbs_comp_event_close(struct inode *inode, struct file *filp) list_del(&entry->obj_list); kfree(entry); } + file->ev_queue.is_closed = 1; spin_unlock_irq(&file->ev_queue.lock); uverbs_close_fd(filp); diff --git a/drivers/infiniband/core/uverbs_uapi.c b/drivers/infiniband/core/uverbs_uapi.c index 73ea6f0db88f..be854628a7c6 100644 --- a/drivers/infiniband/core/uverbs_uapi.c +++ b/drivers/infiniband/core/uverbs_uapi.c @@ -248,6 +248,7 @@ void uverbs_destroy_api(struct uverbs_api *uapi) kfree(rcu_dereference_protected(*slot, true)); radix_tree_iter_delete(&uapi->radix, &iter, slot); } + kfree(uapi); } struct uverbs_api *uverbs_alloc_api( diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index 20b9f31052bf..85cd1a3593d6 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c @@ -78,7 +78,7 @@ static struct list_head bnxt_re_dev_list = LIST_HEAD_INIT(bnxt_re_dev_list); /* Mutex to protect the list of bnxt_re devices added */ static DEFINE_MUTEX(bnxt_re_dev_lock); static struct workqueue_struct *bnxt_re_wq; -static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev, bool lock_wait); +static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev); /* SR-IOV helper functions */ @@ -182,7 +182,7 @@ static void bnxt_re_shutdown(void *p) if (!rdev) return; - bnxt_re_ib_unreg(rdev, false); + bnxt_re_ib_unreg(rdev); } static void bnxt_re_stop_irq(void *handle) @@ -251,7 +251,7 @@ static struct bnxt_ulp_ops bnxt_re_ulp_ops = { /* Driver registration routines used to let the networking driver (bnxt_en) * to know that the RoCE driver is now installed */ -static int bnxt_re_unregister_netdev(struct bnxt_re_dev *rdev, bool lock_wait) +static int bnxt_re_unregister_netdev(struct bnxt_re_dev *rdev) { struct bnxt_en_dev *en_dev; int rc; @@ -260,14 +260,9 @@ static int bnxt_re_unregister_netdev(struct bnxt_re_dev *rdev, bool lock_wait) return -EINVAL; en_dev = rdev->en_dev; - /* Acquire rtnl lock if it is not invokded from netdev event */ - if (lock_wait) - rtnl_lock(); rc = en_dev->en_ops->bnxt_unregister_device(rdev->en_dev, BNXT_ROCE_ULP); - if (lock_wait) - rtnl_unlock(); return rc; } @@ -281,14 +276,12 @@ static int bnxt_re_register_netdev(struct bnxt_re_dev *rdev) en_dev = rdev->en_dev; - rtnl_lock(); rc = en_dev->en_ops->bnxt_register_device(en_dev, BNXT_ROCE_ULP, &bnxt_re_ulp_ops, rdev); - rtnl_unlock(); return rc; } -static int bnxt_re_free_msix(struct bnxt_re_dev *rdev, bool lock_wait) +static int bnxt_re_free_msix(struct bnxt_re_dev *rdev) { struct bnxt_en_dev *en_dev; int rc; @@ -298,13 +291,9 @@ static int bnxt_re_free_msix(struct bnxt_re_dev *rdev, bool lock_wait) en_dev = rdev->en_dev; - if (lock_wait) - rtnl_lock(); rc = en_dev->en_ops->bnxt_free_msix(rdev->en_dev, BNXT_ROCE_ULP); - if (lock_wait) - rtnl_unlock(); return rc; } @@ -320,7 +309,6 @@ static int bnxt_re_request_msix(struct bnxt_re_dev *rdev) num_msix_want = min_t(u32, BNXT_RE_MAX_MSIX, num_online_cpus()); - rtnl_lock(); num_msix_got = en_dev->en_ops->bnxt_request_msix(en_dev, BNXT_ROCE_ULP, rdev->msix_entries, num_msix_want); @@ -335,7 +323,6 @@ static int bnxt_re_request_msix(struct bnxt_re_dev *rdev) } rdev->num_msix = num_msix_got; done: - rtnl_unlock(); return rc; } @@ -358,24 +345,18 @@ static void bnxt_re_fill_fw_msg(struct bnxt_fw_msg *fw_msg, void *msg, fw_msg->timeout = timeout; } -static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev, u16 fw_ring_id, - bool lock_wait) +static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev, u16 fw_ring_id) { struct bnxt_en_dev *en_dev = rdev->en_dev; struct hwrm_ring_free_input req = {0}; struct hwrm_ring_free_output resp; struct bnxt_fw_msg fw_msg; - bool do_unlock = false; int rc = -EINVAL; if (!en_dev) return rc; memset(&fw_msg, 0, sizeof(fw_msg)); - if (lock_wait) { - rtnl_lock(); - do_unlock = true; - } bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_RING_FREE, -1, -1); req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL; @@ -386,8 +367,6 @@ static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev, u16 fw_ring_id, if (rc) dev_err(rdev_to_dev(rdev), "Failed to free HW ring:%d :%#x", req.ring_id, rc); - if (do_unlock) - rtnl_unlock(); return rc; } @@ -405,7 +384,6 @@ static int bnxt_re_net_ring_alloc(struct bnxt_re_dev *rdev, dma_addr_t *dma_arr, return rc; memset(&fw_msg, 0, sizeof(fw_msg)); - rtnl_lock(); bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_RING_ALLOC, -1, -1); req.enables = 0; req.page_tbl_addr = cpu_to_le64(dma_arr[0]); @@ -426,27 +404,21 @@ static int bnxt_re_net_ring_alloc(struct bnxt_re_dev *rdev, dma_addr_t *dma_arr, if (!rc) *fw_ring_id = le16_to_cpu(resp.ring_id); - rtnl_unlock(); return rc; } static int bnxt_re_net_stats_ctx_free(struct bnxt_re_dev *rdev, - u32 fw_stats_ctx_id, bool lock_wait) + u32 fw_stats_ctx_id) { struct bnxt_en_dev *en_dev = rdev->en_dev; struct hwrm_stat_ctx_free_input req = {0}; struct bnxt_fw_msg fw_msg; - bool do_unlock = false; int rc = -EINVAL; if (!en_dev) return rc; memset(&fw_msg, 0, sizeof(fw_msg)); - if (lock_wait) { - rtnl_lock(); - do_unlock = true; - } bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_FREE, -1, -1); req.stat_ctx_id = cpu_to_le32(fw_stats_ctx_id); @@ -457,8 +429,6 @@ static int bnxt_re_net_stats_ctx_free(struct bnxt_re_dev *rdev, dev_err(rdev_to_dev(rdev), "Failed to free HW stats context %#x", rc); - if (do_unlock) - rtnl_unlock(); return rc; } @@ -478,7 +448,6 @@ static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev, return rc; memset(&fw_msg, 0, sizeof(fw_msg)); - rtnl_lock(); bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_ALLOC, -1, -1); req.update_period_ms = cpu_to_le32(1000); @@ -490,7 +459,6 @@ static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev, if (!rc) *fw_stats_ctx_id = le32_to_cpu(resp.stat_ctx_id); - rtnl_unlock(); return rc; } @@ -929,19 +897,19 @@ fail: return rc; } -static void bnxt_re_free_nq_res(struct bnxt_re_dev *rdev, bool lock_wait) +static void bnxt_re_free_nq_res(struct bnxt_re_dev *rdev) { int i; for (i = 0; i < rdev->num_msix - 1; i++) { - bnxt_re_net_ring_free(rdev, rdev->nq[i].ring_id, lock_wait); + bnxt_re_net_ring_free(rdev, rdev->nq[i].ring_id); bnxt_qplib_free_nq(&rdev->nq[i]); } } -static void bnxt_re_free_res(struct bnxt_re_dev *rdev, bool lock_wait) +static void bnxt_re_free_res(struct bnxt_re_dev *rdev) { - bnxt_re_free_nq_res(rdev, lock_wait); + bnxt_re_free_nq_res(rdev); if (rdev->qplib_res.dpi_tbl.max) { bnxt_qplib_dealloc_dpi(&rdev->qplib_res, @@ -1219,7 +1187,7 @@ static int bnxt_re_setup_qos(struct bnxt_re_dev *rdev) return 0; } -static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev, bool lock_wait) +static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev) { int i, rc; @@ -1234,28 +1202,27 @@ static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev, bool lock_wait) cancel_delayed_work(&rdev->worker); bnxt_re_cleanup_res(rdev); - bnxt_re_free_res(rdev, lock_wait); + bnxt_re_free_res(rdev); if (test_and_clear_bit(BNXT_RE_FLAG_RCFW_CHANNEL_EN, &rdev->flags)) { rc = bnxt_qplib_deinit_rcfw(&rdev->rcfw); if (rc) dev_warn(rdev_to_dev(rdev), "Failed to deinitialize RCFW: %#x", rc); - bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id, - lock_wait); + bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id); bnxt_qplib_free_ctx(rdev->en_dev->pdev, &rdev->qplib_ctx); bnxt_qplib_disable_rcfw_channel(&rdev->rcfw); - bnxt_re_net_ring_free(rdev, rdev->rcfw.creq_ring_id, lock_wait); + bnxt_re_net_ring_free(rdev, rdev->rcfw.creq_ring_id); bnxt_qplib_free_rcfw_channel(&rdev->rcfw); } if (test_and_clear_bit(BNXT_RE_FLAG_GOT_MSIX, &rdev->flags)) { - rc = bnxt_re_free_msix(rdev, lock_wait); + rc = bnxt_re_free_msix(rdev); if (rc) dev_warn(rdev_to_dev(rdev), "Failed to free MSI-X vectors: %#x", rc); } if (test_and_clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags)) { - rc = bnxt_re_unregister_netdev(rdev, lock_wait); + rc = bnxt_re_unregister_netdev(rdev); if (rc) dev_warn(rdev_to_dev(rdev), "Failed to unregister with netdev: %#x", rc); @@ -1276,6 +1243,12 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev) { int i, j, rc; + bool locked; + + /* Acquire rtnl lock through out this function */ + rtnl_lock(); + locked = true; + /* Registered a new RoCE device instance to netdev */ rc = bnxt_re_register_netdev(rdev); if (rc) { @@ -1374,12 +1347,16 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev) schedule_delayed_work(&rdev->worker, msecs_to_jiffies(30000)); } + rtnl_unlock(); + locked = false; + /* Register ib dev */ rc = bnxt_re_register_ib(rdev); if (rc) { pr_err("Failed to register with IB: %#x\n", rc); goto fail; } + set_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags); dev_info(rdev_to_dev(rdev), "Device registered successfully"); for (i = 0; i < ARRAY_SIZE(bnxt_re_attributes); i++) { rc = device_create_file(&rdev->ibdev.dev, @@ -1395,7 +1372,6 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev) goto fail; } } - set_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags); ib_get_eth_speed(&rdev->ibdev, 1, &rdev->active_speed, &rdev->active_width); set_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags); @@ -1404,17 +1380,21 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev) return 0; free_sctx: - bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id, true); + bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id); free_ctx: bnxt_qplib_free_ctx(rdev->en_dev->pdev, &rdev->qplib_ctx); disable_rcfw: bnxt_qplib_disable_rcfw_channel(&rdev->rcfw); free_ring: - bnxt_re_net_ring_free(rdev, rdev->rcfw.creq_ring_id, true); + bnxt_re_net_ring_free(rdev, rdev->rcfw.creq_ring_id); free_rcfw: bnxt_qplib_free_rcfw_channel(&rdev->rcfw); fail: - bnxt_re_ib_unreg(rdev, true); + if (!locked) + rtnl_lock(); + bnxt_re_ib_unreg(rdev); + rtnl_unlock(); + return rc; } @@ -1567,7 +1547,7 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier, */ if (atomic_read(&rdev->sched_count) > 0) goto exit; - bnxt_re_ib_unreg(rdev, false); + bnxt_re_ib_unreg(rdev); bnxt_re_remove_one(rdev); bnxt_re_dev_unreg(rdev); break; @@ -1646,7 +1626,10 @@ static void __exit bnxt_re_mod_exit(void) */ flush_workqueue(bnxt_re_wq); bnxt_re_dev_stop(rdev); - bnxt_re_ib_unreg(rdev, true); + /* Acquire the rtnl_lock as the L2 resources are freed here */ + rtnl_lock(); + bnxt_re_ib_unreg(rdev); + rtnl_unlock(); bnxt_re_remove_one(rdev); bnxt_re_dev_unreg(rdev); } diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c index 2c19bf772451..e1668bcc2d13 100644 --- a/drivers/infiniband/hw/hfi1/chip.c +++ b/drivers/infiniband/hw/hfi1/chip.c @@ -6733,6 +6733,7 @@ void start_freeze_handling(struct hfi1_pportdata *ppd, int flags) struct hfi1_devdata *dd = ppd->dd; struct send_context *sc; int i; + int sc_flags; if (flags & FREEZE_SELF) write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK); @@ -6743,11 +6744,13 @@ void start_freeze_handling(struct hfi1_pportdata *ppd, int flags) /* notify all SDMA engines that they are going into a freeze */ sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN)); + sc_flags = SCF_FROZEN | SCF_HALTED | (flags & FREEZE_LINK_DOWN ? + SCF_LINK_DOWN : 0); /* do halt pre-handling on all enabled send contexts */ for (i = 0; i < dd->num_send_contexts; i++) { sc = dd->send_contexts[i].sc; if (sc && (sc->flags & SCF_ENABLED)) - sc_stop(sc, SCF_FROZEN | SCF_HALTED); + sc_stop(sc, sc_flags); } /* Send context are frozen. Notify user space */ @@ -10674,6 +10677,7 @@ int set_link_state(struct hfi1_pportdata *ppd, u32 state) add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK); handle_linkup_change(dd, 1); + pio_kernel_linkup(dd); /* * After link up, a new link width will have been set. diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c index c2c1cba5b23b..752057647f09 100644 --- a/drivers/infiniband/hw/hfi1/pio.c +++ b/drivers/infiniband/hw/hfi1/pio.c @@ -86,6 +86,7 @@ void pio_send_control(struct hfi1_devdata *dd, int op) unsigned long flags; int write = 1; /* write sendctrl back */ int flush = 0; /* re-read sendctrl to make sure it is flushed */ + int i; spin_lock_irqsave(&dd->sendctrl_lock, flags); @@ -95,9 +96,13 @@ void pio_send_control(struct hfi1_devdata *dd, int op) reg |= SEND_CTRL_SEND_ENABLE_SMASK; /* Fall through */ case PSC_DATA_VL_ENABLE: + mask = 0; + for (i = 0; i < ARRAY_SIZE(dd->vld); i++) + if (!dd->vld[i].mtu) + mask |= BIT_ULL(i); /* Disallow sending on VLs not enabled */ - mask = (((~0ull) << num_vls) & SEND_CTRL_UNSUPPORTED_VL_MASK) << - SEND_CTRL_UNSUPPORTED_VL_SHIFT; + mask = (mask & SEND_CTRL_UNSUPPORTED_VL_MASK) << + SEND_CTRL_UNSUPPORTED_VL_SHIFT; reg = (reg & ~SEND_CTRL_UNSUPPORTED_VL_SMASK) | mask; break; case PSC_GLOBAL_DISABLE: @@ -921,20 +926,18 @@ void sc_free(struct send_context *sc) void sc_disable(struct send_context *sc) { u64 reg; - unsigned long flags; struct pio_buf *pbuf; if (!sc) return; /* do all steps, even if already disabled */ - spin_lock_irqsave(&sc->alloc_lock, flags); + spin_lock_irq(&sc->alloc_lock); reg = read_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL)); reg &= ~SC(CTRL_CTXT_ENABLE_SMASK); sc->flags &= ~SCF_ENABLED; sc_wait_for_packet_egress(sc, 1); write_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL), reg); - spin_unlock_irqrestore(&sc->alloc_lock, flags); /* * Flush any waiters. Once the context is disabled, @@ -944,7 +947,7 @@ void sc_disable(struct send_context *sc) * proceed with the flush. */ udelay(1); - spin_lock_irqsave(&sc->release_lock, flags); + spin_lock(&sc->release_lock); if (sc->sr) { /* this context has a shadow ring */ while (sc->sr_tail != sc->sr_head) { pbuf = &sc->sr[sc->sr_tail].pbuf; @@ -955,7 +958,8 @@ void sc_disable(struct send_context *sc) sc->sr_tail = 0; } } - spin_unlock_irqrestore(&sc->release_lock, flags); + spin_unlock(&sc->release_lock); + spin_unlock_irq(&sc->alloc_lock); } /* return SendEgressCtxtStatus.PacketOccupancy */ @@ -1178,11 +1182,39 @@ void pio_kernel_unfreeze(struct hfi1_devdata *dd) sc = dd->send_contexts[i].sc; if (!sc || !(sc->flags & SCF_FROZEN) || sc->type == SC_USER) continue; + if (sc->flags & SCF_LINK_DOWN) + continue; sc_enable(sc); /* will clear the sc frozen flag */ } } +/** + * pio_kernel_linkup() - Re-enable send contexts after linkup event + * @dd: valid devive data + * + * When the link goes down, the freeze path is taken. However, a link down + * event is different from a freeze because if the send context is re-enabled + * whowever is sending data will start sending data again, which will hang + * any QP that is sending data. + * + * The freeze path now looks at the type of event that occurs and takes this + * path for link down event. + */ +void pio_kernel_linkup(struct hfi1_devdata *dd) +{ + struct send_context *sc; + int i; + + for (i = 0; i < dd->num_send_contexts; i++) { + sc = dd->send_contexts[i].sc; + if (!sc || !(sc->flags & SCF_LINK_DOWN) || sc->type == SC_USER) + continue; + + sc_enable(sc); /* will clear the sc link down flag */ + } +} + /* * Wait for the SendPioInitCtxt.PioInitInProgress bit to clear. * Returns: @@ -1382,11 +1414,10 @@ void sc_stop(struct send_context *sc, int flag) { unsigned long flags; - /* mark the context */ - sc->flags |= flag; - /* stop buffer allocations */ spin_lock_irqsave(&sc->alloc_lock, flags); + /* mark the context */ + sc->flags |= flag; sc->flags &= ~SCF_ENABLED; spin_unlock_irqrestore(&sc->alloc_lock, flags); wake_up(&sc->halt_wait); diff --git a/drivers/infiniband/hw/hfi1/pio.h b/drivers/infiniband/hw/hfi1/pio.h index 058b08f459ab..aaf372c3e5d6 100644 --- a/drivers/infiniband/hw/hfi1/pio.h +++ b/drivers/infiniband/hw/hfi1/pio.h @@ -139,6 +139,7 @@ struct send_context { #define SCF_IN_FREE 0x02 #define SCF_HALTED 0x04 #define SCF_FROZEN 0x08 +#define SCF_LINK_DOWN 0x10 struct send_context_info { struct send_context *sc; /* allocated working context */ @@ -306,6 +307,7 @@ void set_pio_integrity(struct send_context *sc); void pio_reset_all(struct hfi1_devdata *dd); void pio_freeze(struct hfi1_devdata *dd); void pio_kernel_unfreeze(struct hfi1_devdata *dd); +void pio_kernel_linkup(struct hfi1_devdata *dd); /* global PIO send control operations */ #define PSC_GLOBAL_ENABLE 0 diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c index a3a7b33196d6..5c88706121c1 100644 --- a/drivers/infiniband/hw/hfi1/user_sdma.c +++ b/drivers/infiniband/hw/hfi1/user_sdma.c @@ -828,7 +828,7 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts) if (READ_ONCE(iovec->offset) == iovec->iov.iov_len) { if (++req->iov_idx == req->data_iovs) { ret = -EFAULT; - goto free_txreq; + goto free_tx; } iovec = &req->iovs[req->iov_idx]; WARN_ON(iovec->offset); diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c index 13374c727b14..a7c586a5589d 100644 --- a/drivers/infiniband/hw/hfi1/verbs.c +++ b/drivers/infiniband/hw/hfi1/verbs.c @@ -1582,6 +1582,7 @@ static int hfi1_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr) struct hfi1_pportdata *ppd; struct hfi1_devdata *dd; u8 sc5; + u8 sl; if (hfi1_check_mcast(rdma_ah_get_dlid(ah_attr)) && !(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH)) @@ -1590,8 +1591,13 @@ static int hfi1_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr) /* test the mapping for validity */ ibp = to_iport(ibdev, rdma_ah_get_port_num(ah_attr)); ppd = ppd_from_ibp(ibp); - sc5 = ibp->sl_to_sc[rdma_ah_get_sl(ah_attr)]; dd = dd_from_ppd(ppd); + + sl = rdma_ah_get_sl(ah_attr); + if (sl >= ARRAY_SIZE(ibp->sl_to_sc)) + return -EINVAL; + + sc5 = ibp->sl_to_sc[sl]; if (sc_to_vlt(dd, sc5) > num_vls && sc_to_vlt(dd, sc5) != 0xf) return -EINVAL; return 0; diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c index ac116d63e466..f2f11e652dcd 100644 --- a/drivers/infiniband/hw/mlx5/devx.c +++ b/drivers/infiniband/hw/mlx5/devx.c @@ -723,6 +723,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)( attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE); struct mlx5_ib_ucontext *c = to_mucontext(uobj->context); struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device); + u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)]; struct devx_obj *obj; int err; @@ -754,10 +755,12 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)( err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT, cmd_out, cmd_out_len); if (err) - goto obj_free; + goto obj_destroy; return 0; +obj_destroy: + mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out)); obj_free: kfree(obj); return err; diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 444d16520506..0b34e909505f 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -2951,7 +2951,7 @@ static int srp_reset_device(struct scsi_cmnd *scmnd) { struct srp_target_port *target = host_to_target(scmnd->device->host); struct srp_rdma_ch *ch; - int i; + int i, j; u8 status; shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n"); @@ -2965,8 +2965,8 @@ static int srp_reset_device(struct scsi_cmnd *scmnd) for (i = 0; i < target->ch_count; i++) { ch = &target->ch[i]; - for (i = 0; i < target->req_ring_size; ++i) { - struct srp_request *req = &ch->req_ring[i]; + for (j = 0; j < target->req_ring_size; ++j) { + struct srp_request *req = &ch->req_ring[j]; srp_finish_req(ch, req, scmnd->device, DID_RESET << 16); } diff --git a/drivers/input/keyboard/atakbd.c b/drivers/input/keyboard/atakbd.c index 6f62da2909ec..6caee807cafa 100644 --- a/drivers/input/keyboard/atakbd.c +++ b/drivers/input/keyboard/atakbd.c @@ -75,8 +75,7 @@ MODULE_LICENSE("GPL"); */ -static unsigned char atakbd_keycode[0x72] = { /* American layout */ - [0] = KEY_GRAVE, +static unsigned char atakbd_keycode[0x73] = { /* American layout */ [1] = KEY_ESC, [2] = KEY_1, [3] = KEY_2, @@ -117,9 +116,9 @@ static unsigned char atakbd_keycode[0x72] = { /* American layout */ [38] = KEY_L, [39] = KEY_SEMICOLON, [40] = KEY_APOSTROPHE, - [41] = KEY_BACKSLASH, /* FIXME, '#' */ + [41] = KEY_GRAVE, [42] = KEY_LEFTSHIFT, - [43] = KEY_GRAVE, /* FIXME: '~' */ + [43] = KEY_BACKSLASH, [44] = KEY_Z, [45] = KEY_X, [46] = KEY_C, @@ -145,45 +144,34 @@ static unsigned char atakbd_keycode[0x72] = { /* American layout */ [66] = KEY_F8, [67] = KEY_F9, [68] = KEY_F10, - [69] = KEY_ESC, - [70] = KEY_DELETE, - [71] = KEY_KP7, - [72] = KEY_KP8, - [73] = KEY_KP9, + [71] = KEY_HOME, + [72] = KEY_UP, [74] = KEY_KPMINUS, - [75] = KEY_KP4, - [76] = KEY_KP5, - [77] = KEY_KP6, + [75] = KEY_LEFT, + [77] = KEY_RIGHT, [78] = KEY_KPPLUS, - [79] = KEY_KP1, - [80] = KEY_KP2, - [81] = KEY_KP3, - [82] = KEY_KP0, - [83] = KEY_KPDOT, - [90] = KEY_KPLEFTPAREN, - [91] = KEY_KPRIGHTPAREN, - [92] = KEY_KPASTERISK, /* FIXME */ - [93] = KEY_KPASTERISK, - [94] = KEY_KPPLUS, - [95] = KEY_HELP, + [80] = KEY_DOWN, + [82] = KEY_INSERT, + [83] = KEY_DELETE, [96] = KEY_102ND, - [97] = KEY_KPASTERISK, /* FIXME */ - [98] = KEY_KPSLASH, + [97] = KEY_UNDO, + [98] = KEY_HELP, [99] = KEY_KPLEFTPAREN, [100] = KEY_KPRIGHTPAREN, [101] = KEY_KPSLASH, [102] = KEY_KPASTERISK, - [103] = KEY_UP, - [104] = KEY_KPASTERISK, /* FIXME */ - [105] = KEY_LEFT, - [106] = KEY_RIGHT, - [107] = KEY_KPASTERISK, /* FIXME */ - [108] = KEY_DOWN, - [109] = KEY_KPASTERISK, /* FIXME */ - [110] = KEY_KPASTERISK, /* FIXME */ - [111] = KEY_KPASTERISK, /* FIXME */ - [112] = KEY_KPASTERISK, /* FIXME */ - [113] = KEY_KPASTERISK /* FIXME */ + [103] = KEY_KP7, + [104] = KEY_KP8, + [105] = KEY_KP9, + [106] = KEY_KP4, + [107] = KEY_KP5, + [108] = KEY_KP6, + [109] = KEY_KP1, + [110] = KEY_KP2, + [111] = KEY_KP3, + [112] = KEY_KP0, + [113] = KEY_KPDOT, + [114] = KEY_KPENTER, }; static struct input_dev *atakbd_dev; @@ -191,21 +179,15 @@ static struct input_dev *atakbd_dev; static void atakbd_interrupt(unsigned char scancode, char down) { - if (scancode < 0x72) { /* scancodes < 0xf2 are keys */ + if (scancode < 0x73) { /* scancodes < 0xf3 are keys */ // report raw events here? scancode = atakbd_keycode[scancode]; - if (scancode == KEY_CAPSLOCK) { /* CapsLock is a toggle switch key on Amiga */ - input_report_key(atakbd_dev, scancode, 1); - input_report_key(atakbd_dev, scancode, 0); - input_sync(atakbd_dev); - } else { - input_report_key(atakbd_dev, scancode, down); - input_sync(atakbd_dev); - } - } else /* scancodes >= 0xf2 are mouse data, most likely */ + input_report_key(atakbd_dev, scancode, down); + input_sync(atakbd_dev); + } else /* scancodes >= 0xf3 are mouse data, most likely */ printk(KERN_INFO "atakbd: unhandled scancode %x\n", scancode); return; diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c index 96a887f33698..eb14ddf69346 100644 --- a/drivers/input/misc/uinput.c +++ b/drivers/input/misc/uinput.c @@ -410,7 +410,7 @@ static int uinput_validate_absinfo(struct input_dev *dev, unsigned int code, min = abs->minimum; max = abs->maximum; - if ((min != 0 || max != 0) && max <= min) { + if ((min != 0 || max != 0) && max < min) { printk(KERN_DEBUG "%s: invalid abs[%02x] min:%d max:%d\n", UINPUT_NAME, code, min, max); diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c index 44f57cf6675b..2d95e8d93cc7 100644 --- a/drivers/input/mouse/elantech.c +++ b/drivers/input/mouse/elantech.c @@ -1178,6 +1178,8 @@ static const struct dmi_system_id elantech_dmi_has_middle_button[] = { static const char * const middle_button_pnp_ids[] = { "LEN2131", /* ThinkPad P52 w/ NFC */ "LEN2132", /* ThinkPad P52 */ + "LEN2133", /* ThinkPad P72 w/ NFC */ + "LEN2134", /* ThinkPad P72 */ NULL }; diff --git a/drivers/input/touchscreen/egalax_ts.c b/drivers/input/touchscreen/egalax_ts.c index 80e69bb8283e..83ac8c128192 100644 --- a/drivers/input/touchscreen/egalax_ts.c +++ b/drivers/input/touchscreen/egalax_ts.c @@ -241,6 +241,9 @@ static int __maybe_unused egalax_ts_suspend(struct device *dev) struct i2c_client *client = to_i2c_client(dev); int ret; + if (device_may_wakeup(dev)) + return enable_irq_wake(client->irq); + ret = i2c_master_send(client, suspend_cmd, MAX_I2C_DATA_LEN); return ret > 0 ? 0 : ret; } @@ -249,6 +252,9 @@ static int __maybe_unused egalax_ts_resume(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); + if (device_may_wakeup(dev)) + return disable_irq_wake(client->irq); + return egalax_wake_up_device(client); } diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 4e04fff23977..73e47d93e7a0 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -246,7 +246,13 @@ static u16 get_alias(struct device *dev) /* The callers make sure that get_device_id() does not fail here */ devid = get_device_id(dev); + + /* For ACPI HID devices, we simply return the devid as such */ + if (!dev_is_pci(dev)) + return devid; + ivrs_alias = amd_iommu_alias_table[devid]; + pci_for_each_dma_alias(pdev, __last_alias, &pci_alias); if (ivrs_alias == pci_alias) diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 5f3f10cf9d9d..bedc801b06a0 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -2540,9 +2540,9 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu, if (dev && dev_is_pci(dev) && info->pasid_supported) { ret = intel_pasid_alloc_table(dev); if (ret) { - __dmar_remove_one_dev_info(info); - spin_unlock_irqrestore(&device_domain_lock, flags); - return NULL; + pr_warn("No pasid table for %s, pasid disabled\n", + dev_name(dev)); + info->pasid_supported = 0; } } spin_unlock_irqrestore(&device_domain_lock, flags); diff --git a/drivers/iommu/intel-pasid.h b/drivers/iommu/intel-pasid.h index 1c05ed6fc5a5..1fb5e12b029a 100644 --- a/drivers/iommu/intel-pasid.h +++ b/drivers/iommu/intel-pasid.h @@ -11,7 +11,7 @@ #define __INTEL_PASID_H #define PASID_MIN 0x1 -#define PASID_MAX 0x100000 +#define PASID_MAX 0x20000 struct pasid_entry { u64 val; diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c index 258115b10fa9..ad3e2b97469e 100644 --- a/drivers/iommu/rockchip-iommu.c +++ b/drivers/iommu/rockchip-iommu.c @@ -1241,6 +1241,12 @@ err_unprepare_clocks: static void rk_iommu_shutdown(struct platform_device *pdev) { + struct rk_iommu *iommu = platform_get_drvdata(pdev); + int i = 0, irq; + + while ((irq = platform_get_irq(pdev, i++)) != -ENXIO) + devm_free_irq(iommu->dev, irq, iommu); + pm_runtime_force_suspend(&pdev->dev); } diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index 83504dd8100a..954dad29e6e8 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h @@ -965,6 +965,7 @@ void bch_prio_write(struct cache *ca); void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent); extern struct workqueue_struct *bcache_wq; +extern struct workqueue_struct *bch_journal_wq; extern struct mutex bch_register_lock; extern struct list_head bch_cache_sets; diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index 6116bbf870d8..522c7426f3a0 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c @@ -485,7 +485,7 @@ static void do_journal_discard(struct cache *ca) closure_get(&ca->set->cl); INIT_WORK(&ja->discard_work, journal_discard_work); - schedule_work(&ja->discard_work); + queue_work(bch_journal_wq, &ja->discard_work); } } @@ -592,7 +592,7 @@ static void journal_write_done(struct closure *cl) : &j->w[0]; __closure_wake_up(&w->wait); - continue_at_nobarrier(cl, journal_write, system_wq); + continue_at_nobarrier(cl, journal_write, bch_journal_wq); } static void journal_write_unlock(struct closure *cl) @@ -627,7 +627,7 @@ static void journal_write_unlocked(struct closure *cl) spin_unlock(&c->journal.lock); btree_flush_write(c); - continue_at(cl, journal_write, system_wq); + continue_at(cl, journal_write, bch_journal_wq); return; } diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 94c756c66bd7..30ba9aeb5ee8 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -47,6 +47,7 @@ static int bcache_major; static DEFINE_IDA(bcache_device_idx); static wait_queue_head_t unregister_wait; struct workqueue_struct *bcache_wq; +struct workqueue_struct *bch_journal_wq; #define BTREE_MAX_PAGES (256 * 1024 / PAGE_SIZE) /* limitation of partitions number on single bcache device */ @@ -2341,6 +2342,9 @@ static void bcache_exit(void) kobject_put(bcache_kobj); if (bcache_wq) destroy_workqueue(bcache_wq); + if (bch_journal_wq) + destroy_workqueue(bch_journal_wq); + if (bcache_major) unregister_blkdev(bcache_major, "bcache"); unregister_reboot_notifier(&reboot); @@ -2370,6 +2374,10 @@ static int __init bcache_init(void) if (!bcache_wq) goto err; + bch_journal_wq = alloc_workqueue("bch_journal", WQ_MEM_RECLAIM, 0); + if (!bch_journal_wq) + goto err; + bcache_kobj = kobject_create_and_add("bcache", fs_kobj); if (!bcache_kobj) goto err; diff --git a/drivers/media/i2c/mt9v111.c b/drivers/media/i2c/mt9v111.c index b5410aeb5fe2..bb41bea950ac 100644 --- a/drivers/media/i2c/mt9v111.c +++ b/drivers/media/i2c/mt9v111.c @@ -1159,41 +1159,21 @@ static int mt9v111_probe(struct i2c_client *client) V4L2_CID_AUTO_WHITE_BALANCE, 0, 1, 1, V4L2_WHITE_BALANCE_AUTO); - if (IS_ERR_OR_NULL(mt9v111->auto_awb)) { - ret = PTR_ERR(mt9v111->auto_awb); - goto error_free_ctrls; - } - mt9v111->auto_exp = v4l2_ctrl_new_std_menu(&mt9v111->ctrls, &mt9v111_ctrl_ops, V4L2_CID_EXPOSURE_AUTO, V4L2_EXPOSURE_MANUAL, 0, V4L2_EXPOSURE_AUTO); - if (IS_ERR_OR_NULL(mt9v111->auto_exp)) { - ret = PTR_ERR(mt9v111->auto_exp); - goto error_free_ctrls; - } - - /* Initialize timings */ mt9v111->hblank = v4l2_ctrl_new_std(&mt9v111->ctrls, &mt9v111_ctrl_ops, V4L2_CID_HBLANK, MT9V111_CORE_R05_MIN_HBLANK, MT9V111_CORE_R05_MAX_HBLANK, 1, MT9V111_CORE_R05_DEF_HBLANK); - if (IS_ERR_OR_NULL(mt9v111->hblank)) { - ret = PTR_ERR(mt9v111->hblank); - goto error_free_ctrls; - } - mt9v111->vblank = v4l2_ctrl_new_std(&mt9v111->ctrls, &mt9v111_ctrl_ops, V4L2_CID_VBLANK, MT9V111_CORE_R06_MIN_VBLANK, MT9V111_CORE_R06_MAX_VBLANK, 1, MT9V111_CORE_R06_DEF_VBLANK); - if (IS_ERR_OR_NULL(mt9v111->vblank)) { - ret = PTR_ERR(mt9v111->vblank); - goto error_free_ctrls; - } /* PIXEL_RATE is fixed: just expose it to user space. */ v4l2_ctrl_new_std(&mt9v111->ctrls, &mt9v111_ctrl_ops, @@ -1201,6 +1181,10 @@ static int mt9v111_probe(struct i2c_client *client) DIV_ROUND_CLOSEST(mt9v111->sysclk, 2), 1, DIV_ROUND_CLOSEST(mt9v111->sysclk, 2)); + if (mt9v111->ctrls.error) { + ret = mt9v111->ctrls.error; + goto error_free_ctrls; + } mt9v111->sd.ctrl_handler = &mt9v111->ctrls; /* Start with default configuration: 640x480 UYVY. */ @@ -1226,26 +1210,27 @@ static int mt9v111_probe(struct i2c_client *client) mt9v111->pad.flags = MEDIA_PAD_FL_SOURCE; ret = media_entity_pads_init(&mt9v111->sd.entity, 1, &mt9v111->pad); if (ret) - goto error_free_ctrls; + goto error_free_entity; #endif ret = mt9v111_chip_probe(mt9v111); if (ret) - goto error_free_ctrls; + goto error_free_entity; ret = v4l2_async_register_subdev(&mt9v111->sd); if (ret) - goto error_free_ctrls; + goto error_free_entity; return 0; -error_free_ctrls: - v4l2_ctrl_handler_free(&mt9v111->ctrls); - +error_free_entity: #if IS_ENABLED(CONFIG_MEDIA_CONTROLLER) media_entity_cleanup(&mt9v111->sd.entity); #endif +error_free_ctrls: + v4l2_ctrl_handler_free(&mt9v111->ctrls); + mutex_destroy(&mt9v111->pwr_mutex); mutex_destroy(&mt9v111->stream_mutex); @@ -1259,12 +1244,12 @@ static int mt9v111_remove(struct i2c_client *client) v4l2_async_unregister_subdev(sd); - v4l2_ctrl_handler_free(&mt9v111->ctrls); - #if IS_ENABLED(CONFIG_MEDIA_CONTROLLER) media_entity_cleanup(&sd->entity); #endif + v4l2_ctrl_handler_free(&mt9v111->ctrls); + mutex_destroy(&mt9v111->pwr_mutex); mutex_destroy(&mt9v111->stream_mutex); diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig index 94c1fe0e9787..54fe90acb5b2 100644 --- a/drivers/media/platform/Kconfig +++ b/drivers/media/platform/Kconfig @@ -541,6 +541,8 @@ config VIDEO_CROS_EC_CEC depends on MFD_CROS_EC select CEC_CORE select CEC_NOTIFIER + select CHROME_PLATFORMS + select CROS_EC_PROTO ---help--- If you say yes here you will get support for the ChromeOS Embedded Controller's CEC. diff --git a/drivers/media/platform/qcom/camss/camss-csid.c b/drivers/media/platform/qcom/camss/camss-csid.c index 729b31891466..a5ae85674ffb 100644 --- a/drivers/media/platform/qcom/camss/camss-csid.c +++ b/drivers/media/platform/qcom/camss/camss-csid.c @@ -10,6 +10,7 @@ #include <linux/clk.h> #include <linux/completion.h> #include <linux/interrupt.h> +#include <linux/io.h> #include <linux/kernel.h> #include <linux/of.h> #include <linux/platform_device.h> diff --git a/drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c b/drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c index c832539397d7..12bce391d71f 100644 --- a/drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c +++ b/drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c @@ -12,6 +12,7 @@ #include <linux/delay.h> #include <linux/interrupt.h> +#include <linux/io.h> #define CAMSS_CSI_PHY_LNn_CFG2(n) (0x004 + 0x40 * (n)) #define CAMSS_CSI_PHY_LNn_CFG3(n) (0x008 + 0x40 * (n)) diff --git a/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c b/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c index bcd0dfd33618..2e65caf1ecae 100644 --- a/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c +++ b/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c @@ -12,6 +12,7 @@ #include <linux/delay.h> #include <linux/interrupt.h> +#include <linux/io.h> #define CSIPHY_3PH_LNn_CFG1(n) (0x000 + 0x100 * (n)) #define CSIPHY_3PH_LNn_CFG1_SWI_REC_DLY_PRG (BIT(7) | BIT(6)) diff --git a/drivers/media/platform/qcom/camss/camss-csiphy.c b/drivers/media/platform/qcom/camss/camss-csiphy.c index 4559f3b1b38c..008afb85023b 100644 --- a/drivers/media/platform/qcom/camss/camss-csiphy.c +++ b/drivers/media/platform/qcom/camss/camss-csiphy.c @@ -10,6 +10,7 @@ #include <linux/clk.h> #include <linux/delay.h> #include <linux/interrupt.h> +#include <linux/io.h> #include <linux/kernel.h> #include <linux/of.h> #include <linux/platform_device.h> diff --git a/drivers/media/platform/qcom/camss/camss-ispif.c b/drivers/media/platform/qcom/camss/camss-ispif.c index 7f269021d08c..1f33b4eb198c 100644 --- a/drivers/media/platform/qcom/camss/camss-ispif.c +++ b/drivers/media/platform/qcom/camss/camss-ispif.c @@ -10,6 +10,7 @@ #include <linux/clk.h> #include <linux/completion.h> #include <linux/interrupt.h> +#include <linux/io.h> #include <linux/iopoll.h> #include <linux/kernel.h> #include <linux/mutex.h> @@ -1076,8 +1077,8 @@ int msm_ispif_subdev_init(struct ispif_device *ispif, else return -EINVAL; - ispif->line = kcalloc(ispif->line_num, sizeof(*ispif->line), - GFP_KERNEL); + ispif->line = devm_kcalloc(dev, ispif->line_num, sizeof(*ispif->line), + GFP_KERNEL); if (!ispif->line) return -ENOMEM; diff --git a/drivers/media/platform/qcom/camss/camss-vfe-4-1.c b/drivers/media/platform/qcom/camss/camss-vfe-4-1.c index da3a9fed9f2d..174a36be6f5d 100644 --- a/drivers/media/platform/qcom/camss/camss-vfe-4-1.c +++ b/drivers/media/platform/qcom/camss/camss-vfe-4-1.c @@ -9,6 +9,7 @@ */ #include <linux/interrupt.h> +#include <linux/io.h> #include <linux/iopoll.h> #include "camss-vfe.h" diff --git a/drivers/media/platform/qcom/camss/camss-vfe-4-7.c b/drivers/media/platform/qcom/camss/camss-vfe-4-7.c index 4c584bffd179..0dca8bf9281e 100644 --- a/drivers/media/platform/qcom/camss/camss-vfe-4-7.c +++ b/drivers/media/platform/qcom/camss/camss-vfe-4-7.c @@ -9,6 +9,7 @@ */ #include <linux/interrupt.h> +#include <linux/io.h> #include <linux/iopoll.h> #include "camss-vfe.h" diff --git a/drivers/media/platform/qcom/camss/camss.c b/drivers/media/platform/qcom/camss/camss.c index dcc0c30ef1b1..669615fff6a0 100644 --- a/drivers/media/platform/qcom/camss/camss.c +++ b/drivers/media/platform/qcom/camss/camss.c @@ -848,17 +848,18 @@ static int camss_probe(struct platform_device *pdev) return -EINVAL; } - camss->csiphy = kcalloc(camss->csiphy_num, sizeof(*camss->csiphy), - GFP_KERNEL); + camss->csiphy = devm_kcalloc(dev, camss->csiphy_num, + sizeof(*camss->csiphy), GFP_KERNEL); if (!camss->csiphy) return -ENOMEM; - camss->csid = kcalloc(camss->csid_num, sizeof(*camss->csid), - GFP_KERNEL); + camss->csid = devm_kcalloc(dev, camss->csid_num, sizeof(*camss->csid), + GFP_KERNEL); if (!camss->csid) return -ENOMEM; - camss->vfe = kcalloc(camss->vfe_num, sizeof(*camss->vfe), GFP_KERNEL); + camss->vfe = devm_kcalloc(dev, camss->vfe_num, sizeof(*camss->vfe), + GFP_KERNEL); if (!camss->vfe) return -ENOMEM; @@ -993,12 +994,12 @@ static const struct of_device_id camss_dt_match[] = { MODULE_DEVICE_TABLE(of, camss_dt_match); -static int camss_runtime_suspend(struct device *dev) +static int __maybe_unused camss_runtime_suspend(struct device *dev) { return 0; } -static int camss_runtime_resume(struct device *dev) +static int __maybe_unused camss_runtime_resume(struct device *dev) { return 0; } diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c index 666d319d3d1a..1f6c1eefe389 100644 --- a/drivers/media/usb/dvb-usb-v2/af9035.c +++ b/drivers/media/usb/dvb-usb-v2/af9035.c @@ -402,8 +402,10 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap, if (msg[0].addr == state->af9033_i2c_addr[1]) reg |= 0x100000; - ret = af9035_wr_regs(d, reg, &msg[0].buf[3], - msg[0].len - 3); + ret = (msg[0].len >= 3) ? af9035_wr_regs(d, reg, + &msg[0].buf[3], + msg[0].len - 3) + : -EOPNOTSUPP; } else { /* I2C write */ u8 buf[MAX_XFER_SIZE]; diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c index e11ab12fbdf2..800986a79704 100644 --- a/drivers/mfd/omap-usb-host.c +++ b/drivers/mfd/omap-usb-host.c @@ -528,8 +528,8 @@ static int usbhs_omap_get_dt_pdata(struct device *dev, } static const struct of_device_id usbhs_child_match_table[] = { - { .compatible = "ti,omap-ehci", }, - { .compatible = "ti,omap-ohci", }, + { .compatible = "ti,ehci-omap", }, + { .compatible = "ti,ohci-omap3", }, { } }; @@ -855,6 +855,7 @@ static struct platform_driver usbhs_omap_driver = { .pm = &usbhsomap_dev_pm_ops, .of_match_table = usbhs_omap_dt_ids, }, + .probe = usbhs_omap_probe, .remove = usbhs_omap_remove, }; @@ -864,9 +865,9 @@ MODULE_ALIAS("platform:" USBHS_DRIVER_NAME); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("usb host common core driver for omap EHCI and OHCI"); -static int __init omap_usbhs_drvinit(void) +static int omap_usbhs_drvinit(void) { - return platform_driver_probe(&usbhs_omap_driver, usbhs_omap_probe); + return platform_driver_register(&usbhs_omap_driver); } /* @@ -878,7 +879,7 @@ static int __init omap_usbhs_drvinit(void) */ fs_initcall_sync(omap_usbhs_drvinit); -static void __exit omap_usbhs_drvexit(void) +static void omap_usbhs_drvexit(void) { platform_driver_unregister(&usbhs_omap_driver); } diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c index cbfafc453274..270d3c9580c5 100644 --- a/drivers/mtd/devices/m25p80.c +++ b/drivers/mtd/devices/m25p80.c @@ -39,13 +39,23 @@ static int m25p80_read_reg(struct spi_nor *nor, u8 code, u8 *val, int len) struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(code, 1), SPI_MEM_OP_NO_ADDR, SPI_MEM_OP_NO_DUMMY, - SPI_MEM_OP_DATA_IN(len, val, 1)); + SPI_MEM_OP_DATA_IN(len, NULL, 1)); + void *scratchbuf; int ret; + scratchbuf = kmalloc(len, GFP_KERNEL); + if (!scratchbuf) + return -ENOMEM; + + op.data.buf.in = scratchbuf; ret = spi_mem_exec_op(flash->spimem, &op); if (ret < 0) dev_err(&flash->spimem->spi->dev, "error %d reading %x\n", ret, code); + else + memcpy(val, scratchbuf, len); + + kfree(scratchbuf); return ret; } @@ -56,9 +66,19 @@ static int m25p80_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len) struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(opcode, 1), SPI_MEM_OP_NO_ADDR, SPI_MEM_OP_NO_DUMMY, - SPI_MEM_OP_DATA_OUT(len, buf, 1)); + SPI_MEM_OP_DATA_OUT(len, NULL, 1)); + void *scratchbuf; + int ret; - return spi_mem_exec_op(flash->spimem, &op); + scratchbuf = kmemdup(buf, len, GFP_KERNEL); + if (!scratchbuf) + return -ENOMEM; + + op.data.buf.out = scratchbuf; + ret = spi_mem_exec_op(flash->spimem, &op); + kfree(scratchbuf); + + return ret; } static ssize_t m25p80_write(struct spi_nor *nor, loff_t to, size_t len, diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c index 52e2cb35fc79..99c460facd5e 100644 --- a/drivers/mtd/mtdpart.c +++ b/drivers/mtd/mtdpart.c @@ -873,8 +873,11 @@ static int mtd_part_of_parse(struct mtd_info *master, int ret, err = 0; np = mtd_get_of_node(master); - if (!mtd_is_partition(master)) + if (mtd_is_partition(master)) + of_node_get(np); + else np = of_get_child_by_name(np, "partitions"); + of_property_for_each_string(np, "compatible", prop, compat) { parser = mtd_part_get_compatible_parser(compat); if (!parser) diff --git a/drivers/mtd/nand/raw/denali.c b/drivers/mtd/nand/raw/denali.c index 67b2065e7a19..b864b93dd289 100644 --- a/drivers/mtd/nand/raw/denali.c +++ b/drivers/mtd/nand/raw/denali.c @@ -596,6 +596,12 @@ static int denali_dma_xfer(struct denali_nand_info *denali, void *buf, } iowrite32(DMA_ENABLE__FLAG, denali->reg + DMA_ENABLE); + /* + * The ->setup_dma() hook kicks DMA by using the data/command + * interface, which belongs to a different AXI port from the + * register interface. Read back the register to avoid a race. + */ + ioread32(denali->reg + DMA_ENABLE); denali_reset_irq(denali); denali->setup_dma(denali, dma_addr, page, write); diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c index 7af4d6213ee5..bc2ef5209783 100644 --- a/drivers/mtd/nand/raw/marvell_nand.c +++ b/drivers/mtd/nand/raw/marvell_nand.c @@ -1547,7 +1547,7 @@ static void marvell_nfc_parse_instructions(struct nand_chip *chip, for (op_id = 0; op_id < subop->ninstrs; op_id++) { unsigned int offset, naddrs; const u8 *addrs; - int len = nand_subop_get_data_len(subop, op_id); + int len; instr = &subop->instrs[op_id]; @@ -1593,6 +1593,7 @@ static void marvell_nfc_parse_instructions(struct nand_chip *chip, nfc_op->ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW) | NDCB0_LEN_OVRD; + len = nand_subop_get_data_len(subop, op_id); nfc_op->ndcb[3] |= round_up(len, FIFO_DEPTH); } nfc_op->data_delay_ns = instr->delay_ns; @@ -1606,6 +1607,7 @@ static void marvell_nfc_parse_instructions(struct nand_chip *chip, nfc_op->ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW) | NDCB0_LEN_OVRD; + len = nand_subop_get_data_len(subop, op_id); nfc_op->ndcb[3] |= round_up(len, FIFO_DEPTH); } nfc_op->data_delay_ns = instr->delay_ns; diff --git a/drivers/net/appletalk/ipddp.c b/drivers/net/appletalk/ipddp.c index 9375cef22420..3d27616d9c85 100644 --- a/drivers/net/appletalk/ipddp.c +++ b/drivers/net/appletalk/ipddp.c @@ -283,8 +283,12 @@ static int ipddp_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) case SIOCFINDIPDDPRT: spin_lock_bh(&ipddp_route_lock); rp = __ipddp_find_route(&rcp); - if (rp) - memcpy(&rcp2, rp, sizeof(rcp2)); + if (rp) { + memset(&rcp2, 0, sizeof(rcp2)); + rcp2.ip = rp->ip; + rcp2.at = rp->at; + rcp2.flags = rp->flags; + } spin_unlock_bh(&ipddp_route_lock); if (rp) { diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index a764a83f99da..0d87e11e7f1d 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -971,16 +971,13 @@ static void bond_poll_controller(struct net_device *bond_dev) struct slave *slave = NULL; struct list_head *iter; struct ad_info ad_info; - struct netpoll_info *ni; - const struct net_device_ops *ops; if (BOND_MODE(bond) == BOND_MODE_8023AD) if (bond_3ad_get_active_agg_info(bond, &ad_info)) return; bond_for_each_slave_rcu(bond, slave, iter) { - ops = slave->dev->netdev_ops; - if (!bond_slave_is_up(slave) || !ops->ndo_poll_controller) + if (!bond_slave_is_up(slave)) continue; if (BOND_MODE(bond) == BOND_MODE_8023AD) { @@ -992,11 +989,7 @@ static void bond_poll_controller(struct net_device *bond_dev) continue; } - ni = rcu_dereference_bh(slave->dev->npinfo); - if (down_trylock(&ni->dev_lock)) - continue; - ops->ndo_poll_controller(slave->dev); - up(&ni->dev_lock); + netpoll_poll_dev(slave->dev); } } diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h index 7c791c1da4b9..bef01331266f 100644 --- a/drivers/net/dsa/mv88e6xxx/global1.h +++ b/drivers/net/dsa/mv88e6xxx/global1.h @@ -128,7 +128,7 @@ #define MV88E6XXX_G1_ATU_OP_GET_CLR_VIOLATION 0x7000 #define MV88E6XXX_G1_ATU_OP_AGE_OUT_VIOLATION BIT(7) #define MV88E6XXX_G1_ATU_OP_MEMBER_VIOLATION BIT(6) -#define MV88E6XXX_G1_ATU_OP_MISS_VIOLTATION BIT(5) +#define MV88E6XXX_G1_ATU_OP_MISS_VIOLATION BIT(5) #define MV88E6XXX_G1_ATU_OP_FULL_VIOLATION BIT(4) /* Offset 0x0C: ATU Data Register */ diff --git a/drivers/net/dsa/mv88e6xxx/global1_atu.c b/drivers/net/dsa/mv88e6xxx/global1_atu.c index 307410898fc9..5200e4bdce93 100644 --- a/drivers/net/dsa/mv88e6xxx/global1_atu.c +++ b/drivers/net/dsa/mv88e6xxx/global1_atu.c @@ -349,7 +349,7 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id) chip->ports[entry.portvec].atu_member_violation++; } - if (val & MV88E6XXX_G1_ATU_OP_MEMBER_VIOLATION) { + if (val & MV88E6XXX_G1_ATU_OP_MISS_VIOLATION) { dev_err_ratelimited(chip->dev, "ATU miss violation for %pM portvec %x\n", entry.mac, entry.portvec); diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c index 024998d6d8c6..6a8e2567f2bd 100644 --- a/drivers/net/ethernet/apple/bmac.c +++ b/drivers/net/ethernet/apple/bmac.c @@ -154,7 +154,7 @@ static irqreturn_t bmac_txdma_intr(int irq, void *dev_id); static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id); static void bmac_set_timeout(struct net_device *dev); static void bmac_tx_timeout(struct timer_list *t); -static int bmac_output(struct sk_buff *skb, struct net_device *dev); +static netdev_tx_t bmac_output(struct sk_buff *skb, struct net_device *dev); static void bmac_start(struct net_device *dev); #define DBDMA_SET(x) ( ((x) | (x) << 16) ) @@ -1456,7 +1456,7 @@ bmac_start(struct net_device *dev) spin_unlock_irqrestore(&bp->lock, flags); } -static int +static netdev_tx_t bmac_output(struct sk_buff *skb, struct net_device *dev) { struct bmac_data *bp = netdev_priv(dev); diff --git a/drivers/net/ethernet/apple/mace.c b/drivers/net/ethernet/apple/mace.c index 0b5429d76bcf..68b9ee489489 100644 --- a/drivers/net/ethernet/apple/mace.c +++ b/drivers/net/ethernet/apple/mace.c @@ -78,7 +78,7 @@ struct mace_data { static int mace_open(struct net_device *dev); static int mace_close(struct net_device *dev); -static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev); +static netdev_tx_t mace_xmit_start(struct sk_buff *skb, struct net_device *dev); static void mace_set_multicast(struct net_device *dev); static void mace_reset(struct net_device *dev); static int mace_set_address(struct net_device *dev, void *addr); @@ -525,7 +525,7 @@ static inline void mace_set_timeout(struct net_device *dev) mp->timeout_active = 1; } -static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t mace_xmit_start(struct sk_buff *skb, struct net_device *dev) { struct mace_data *mp = netdev_priv(dev); volatile struct dbdma_regs __iomem *td = mp->tx_dma; diff --git a/drivers/net/ethernet/apple/macmace.c b/drivers/net/ethernet/apple/macmace.c index 137cbb470af2..376f2c2613e7 100644 --- a/drivers/net/ethernet/apple/macmace.c +++ b/drivers/net/ethernet/apple/macmace.c @@ -89,7 +89,7 @@ struct mace_frame { static int mace_open(struct net_device *dev); static int mace_close(struct net_device *dev); -static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev); +static netdev_tx_t mace_xmit_start(struct sk_buff *skb, struct net_device *dev); static void mace_set_multicast(struct net_device *dev); static int mace_set_address(struct net_device *dev, void *addr); static void mace_reset(struct net_device *dev); @@ -444,7 +444,7 @@ static int mace_close(struct net_device *dev) * Transmit a frame */ -static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t mace_xmit_start(struct sk_buff *skb, struct net_device *dev) { struct mace_data *mp = netdev_priv(dev); unsigned long flags; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c index b5f1f62e8e25..d1e1a0ba8615 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c @@ -225,9 +225,10 @@ int aq_ring_rx_clean(struct aq_ring_s *self, } /* for single fragment packets use build_skb() */ - if (buff->is_eop) { + if (buff->is_eop && + buff->len <= AQ_CFG_RX_FRAME_MAX - AQ_SKB_ALIGN) { skb = build_skb(page_address(buff->page), - buff->len + AQ_SKB_ALIGN); + AQ_CFG_RX_FRAME_MAX); if (unlikely(!skb)) { err = -ENOMEM; goto err_exit; @@ -247,18 +248,21 @@ int aq_ring_rx_clean(struct aq_ring_s *self, buff->len - ETH_HLEN, SKB_TRUESIZE(buff->len - ETH_HLEN)); - for (i = 1U, next_ = buff->next, - buff_ = &self->buff_ring[next_]; true; - next_ = buff_->next, - buff_ = &self->buff_ring[next_], ++i) { - skb_add_rx_frag(skb, i, buff_->page, 0, - buff_->len, - SKB_TRUESIZE(buff->len - - ETH_HLEN)); - buff_->is_cleaned = 1; - - if (buff_->is_eop) - break; + if (!buff->is_eop) { + for (i = 1U, next_ = buff->next, + buff_ = &self->buff_ring[next_]; + true; next_ = buff_->next, + buff_ = &self->buff_ring[next_], ++i) { + skb_add_rx_frag(skb, i, + buff_->page, 0, + buff_->len, + SKB_TRUESIZE(buff->len - + ETH_HLEN)); + buff_->is_cleaned = 1; + + if (buff_->is_eop) + break; + } } } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 71362b7f6040..fcc2328bb0d9 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -12894,19 +12894,6 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) } } -#ifdef CONFIG_NET_POLL_CONTROLLER -static void poll_bnx2x(struct net_device *dev) -{ - struct bnx2x *bp = netdev_priv(dev); - int i; - - for_each_eth_queue(bp, i) { - struct bnx2x_fastpath *fp = &bp->fp[i]; - napi_schedule(&bnx2x_fp(bp, fp->index, napi)); - } -} -#endif - static int bnx2x_validate_addr(struct net_device *dev) { struct bnx2x *bp = netdev_priv(dev); @@ -13113,9 +13100,6 @@ static const struct net_device_ops bnx2x_netdev_ops = { .ndo_tx_timeout = bnx2x_tx_timeout, .ndo_vlan_rx_add_vid = bnx2x_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = bnx2x_vlan_rx_kill_vid, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = poll_bnx2x, -#endif .ndo_setup_tc = __bnx2x_setup_tc, #ifdef CONFIG_BNX2X_SRIOV .ndo_set_vf_mac = bnx2x_set_vf_mac, diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index cecbb1d1f587..61957b0bbd8c 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -7672,21 +7672,6 @@ static void bnxt_tx_timeout(struct net_device *dev) bnxt_queue_sp_work(bp); } -#ifdef CONFIG_NET_POLL_CONTROLLER -static void bnxt_poll_controller(struct net_device *dev) -{ - struct bnxt *bp = netdev_priv(dev); - int i; - - /* Only process tx rings/combined rings in netpoll mode. */ - for (i = 0; i < bp->tx_nr_rings; i++) { - struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; - - napi_schedule(&txr->bnapi->napi); - } -} -#endif - static void bnxt_timer(struct timer_list *t) { struct bnxt *bp = from_timer(bp, t, timer); @@ -8027,7 +8012,7 @@ static int bnxt_change_mac_addr(struct net_device *dev, void *p) if (ether_addr_equal(addr->sa_data, dev->dev_addr)) return 0; - rc = bnxt_approve_mac(bp, addr->sa_data); + rc = bnxt_approve_mac(bp, addr->sa_data, true); if (rc) return rc; @@ -8520,9 +8505,6 @@ static const struct net_device_ops bnxt_netdev_ops = { .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk, .ndo_set_vf_trust = bnxt_set_vf_trust, #endif -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = bnxt_poll_controller, -#endif .ndo_setup_tc = bnxt_setup_tc, #ifdef CONFIG_RFS_ACCEL .ndo_rx_flow_steer = bnxt_rx_flow_steer, @@ -8827,14 +8809,19 @@ static int bnxt_init_mac_addr(struct bnxt *bp) } else { #ifdef CONFIG_BNXT_SRIOV struct bnxt_vf_info *vf = &bp->vf; + bool strict_approval = true; if (is_valid_ether_addr(vf->mac_addr)) { /* overwrite netdev dev_addr with admin VF MAC */ memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN); + /* Older PF driver or firmware may not approve this + * correctly. + */ + strict_approval = false; } else { eth_hw_addr_random(bp->dev); } - rc = bnxt_approve_mac(bp, bp->dev->dev_addr); + rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval); #endif } return rc; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c index f3b9fbcc705b..790c684f08ab 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c @@ -46,6 +46,9 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg, } } + if (i == ARRAY_SIZE(nvm_params)) + return -EOPNOTSUPP; + if (nvm_param.dir_type == BNXT_NVM_PORT_CFG) idx = bp->pf.port_id; else if (nvm_param.dir_type == BNXT_NVM_FUNC_CFG) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index fcd085a9853a..3962f6fd543c 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -1104,7 +1104,7 @@ update_vf_mac_exit: mutex_unlock(&bp->hwrm_cmd_lock); } -int bnxt_approve_mac(struct bnxt *bp, u8 *mac) +int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict) { struct hwrm_func_vf_cfg_input req = {0}; int rc = 0; @@ -1122,12 +1122,13 @@ int bnxt_approve_mac(struct bnxt *bp, u8 *mac) memcpy(req.dflt_mac_addr, mac, ETH_ALEN); rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); mac_done: - if (rc) { + if (rc && strict) { rc = -EADDRNOTAVAIL; netdev_warn(bp->dev, "VF MAC address %pM not approved by the PF\n", mac); + return rc; } - return rc; + return 0; } #else @@ -1144,7 +1145,7 @@ void bnxt_update_vf_mac(struct bnxt *bp) { } -int bnxt_approve_mac(struct bnxt *bp, u8 *mac) +int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict) { return 0; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h index e9b20cd19881..2eed9eda1195 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h @@ -39,5 +39,5 @@ int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs); void bnxt_sriov_disable(struct bnxt *); void bnxt_hwrm_exec_fwd_req(struct bnxt *); void bnxt_update_vf_mac(struct bnxt *); -int bnxt_approve_mac(struct bnxt *, u8 *); +int bnxt_approve_mac(struct bnxt *, u8 *, bool); #endif diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c index 092c817f8f11..e1594c9df4c6 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c @@ -75,17 +75,23 @@ static int bnxt_tc_parse_redir(struct bnxt *bp, return 0; } -static void bnxt_tc_parse_vlan(struct bnxt *bp, - struct bnxt_tc_actions *actions, - const struct tc_action *tc_act) +static int bnxt_tc_parse_vlan(struct bnxt *bp, + struct bnxt_tc_actions *actions, + const struct tc_action *tc_act) { - if (tcf_vlan_action(tc_act) == TCA_VLAN_ACT_POP) { + switch (tcf_vlan_action(tc_act)) { + case TCA_VLAN_ACT_POP: actions->flags |= BNXT_TC_ACTION_FLAG_POP_VLAN; - } else if (tcf_vlan_action(tc_act) == TCA_VLAN_ACT_PUSH) { + break; + case TCA_VLAN_ACT_PUSH: actions->flags |= BNXT_TC_ACTION_FLAG_PUSH_VLAN; actions->push_vlan_tci = htons(tcf_vlan_push_vid(tc_act)); actions->push_vlan_tpid = tcf_vlan_push_proto(tc_act); + break; + default: + return -EOPNOTSUPP; } + return 0; } static int bnxt_tc_parse_tunnel_set(struct bnxt *bp, @@ -134,7 +140,9 @@ static int bnxt_tc_parse_actions(struct bnxt *bp, /* Push/pop VLAN */ if (is_tcf_vlan(tc_act)) { - bnxt_tc_parse_vlan(bp, actions, tc_act); + rc = bnxt_tc_parse_vlan(bp, actions, tc_act); + if (rc) + return rc; continue; } diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 16e4ef7d7185..f1a86b422617 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -3837,6 +3837,13 @@ static const struct macb_config at91sam9260_config = { .init = macb_init, }; +static const struct macb_config sama5d3macb_config = { + .caps = MACB_CAPS_SG_DISABLED + | MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII, + .clk_init = macb_clk_init, + .init = macb_init, +}; + static const struct macb_config pc302gem_config = { .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE, .dma_burst_length = 16, @@ -3904,6 +3911,7 @@ static const struct of_device_id macb_dt_ids[] = { { .compatible = "cdns,gem", .data = &pc302gem_config }, { .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config }, { .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config }, + { .compatible = "atmel,sama5d3-macb", .data = &sama5d3macb_config }, { .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config }, { .compatible = "cdns,at91rm9200-emac", .data = &emac_config }, { .compatible = "cdns,emac", .data = &emac_config }, diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h index b8f75a22fb6c..f152da1ce046 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h @@ -753,7 +753,6 @@ struct cpl_abort_req_rss { }; struct cpl_abort_req_rss6 { - WR_HDR; union opcode_tid ot; __be32 srqidx_status; }; diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c index e2a702996db4..13dfdfca49fc 100644 --- a/drivers/net/ethernet/cirrus/ep93xx_eth.c +++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c @@ -332,7 +332,7 @@ static int ep93xx_poll(struct napi_struct *napi, int budget) return rx; } -static int ep93xx_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t ep93xx_xmit(struct sk_buff *skb, struct net_device *dev) { struct ep93xx_priv *ep = netdev_priv(dev); struct ep93xx_tdesc *txd; diff --git a/drivers/net/ethernet/cirrus/mac89x0.c b/drivers/net/ethernet/cirrus/mac89x0.c index 3f8fe8fd79cc..6324e80960c3 100644 --- a/drivers/net/ethernet/cirrus/mac89x0.c +++ b/drivers/net/ethernet/cirrus/mac89x0.c @@ -113,7 +113,7 @@ struct net_local { /* Index to functions, as function prototypes. */ static int net_open(struct net_device *dev); -static int net_send_packet(struct sk_buff *skb, struct net_device *dev); +static netdev_tx_t net_send_packet(struct sk_buff *skb, struct net_device *dev); static irqreturn_t net_interrupt(int irq, void *dev_id); static void set_multicast_list(struct net_device *dev); static void net_rx(struct net_device *dev); @@ -324,7 +324,7 @@ net_open(struct net_device *dev) return 0; } -static int +static netdev_tx_t net_send_packet(struct sk_buff *skb, struct net_device *dev) { struct net_local *lp = netdev_priv(dev); diff --git a/drivers/net/ethernet/hp/hp100.c b/drivers/net/ethernet/hp/hp100.c index c8c7ad2eff77..9b5a68b65432 100644 --- a/drivers/net/ethernet/hp/hp100.c +++ b/drivers/net/ethernet/hp/hp100.c @@ -2634,7 +2634,7 @@ static int hp100_login_to_vg_hub(struct net_device *dev, u_short force_relogin) /* Wait for link to drop */ time = jiffies + (HZ / 10); do { - if (~(hp100_inb(VG_LAN_CFG_1) & HP100_LINK_UP_ST)) + if (!(hp100_inb(VG_LAN_CFG_1) & HP100_LINK_UP_ST)) break; if (!in_interrupt()) schedule_timeout_interruptible(1); diff --git a/drivers/net/ethernet/i825xx/ether1.c b/drivers/net/ethernet/i825xx/ether1.c index dc983450354b..35f6291a3672 100644 --- a/drivers/net/ethernet/i825xx/ether1.c +++ b/drivers/net/ethernet/i825xx/ether1.c @@ -64,7 +64,8 @@ static unsigned int net_debug = NET_DEBUG; #define RX_AREA_END 0x0fc00 static int ether1_open(struct net_device *dev); -static int ether1_sendpacket(struct sk_buff *skb, struct net_device *dev); +static netdev_tx_t ether1_sendpacket(struct sk_buff *skb, + struct net_device *dev); static irqreturn_t ether1_interrupt(int irq, void *dev_id); static int ether1_close(struct net_device *dev); static void ether1_setmulticastlist(struct net_device *dev); @@ -667,7 +668,7 @@ ether1_timeout(struct net_device *dev) netif_wake_queue(dev); } -static int +static netdev_tx_t ether1_sendpacket (struct sk_buff *skb, struct net_device *dev) { int tmp, tst, nopaddr, txaddr, tbdaddr, dataddr; diff --git a/drivers/net/ethernet/i825xx/lib82596.c b/drivers/net/ethernet/i825xx/lib82596.c index f00a1dc2128c..2f7ae118217f 100644 --- a/drivers/net/ethernet/i825xx/lib82596.c +++ b/drivers/net/ethernet/i825xx/lib82596.c @@ -347,7 +347,7 @@ static const char init_setup[] = 0x7f /* *multi IA */ }; static int i596_open(struct net_device *dev); -static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev); +static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev); static irqreturn_t i596_interrupt(int irq, void *dev_id); static int i596_close(struct net_device *dev); static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd); @@ -966,7 +966,7 @@ static void i596_tx_timeout (struct net_device *dev) } -static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct i596_private *lp = netdev_priv(dev); struct tx_cmd *tx_cmd; diff --git a/drivers/net/ethernet/i825xx/sun3_82586.c b/drivers/net/ethernet/i825xx/sun3_82586.c index 8bb15a8c2a40..1a86184d44c0 100644 --- a/drivers/net/ethernet/i825xx/sun3_82586.c +++ b/drivers/net/ethernet/i825xx/sun3_82586.c @@ -121,7 +121,8 @@ static int sun3_82586_probe1(struct net_device *dev,int ioaddr); static irqreturn_t sun3_82586_interrupt(int irq,void *dev_id); static int sun3_82586_open(struct net_device *dev); static int sun3_82586_close(struct net_device *dev); -static int sun3_82586_send_packet(struct sk_buff *,struct net_device *); +static netdev_tx_t sun3_82586_send_packet(struct sk_buff *, + struct net_device *); static struct net_device_stats *sun3_82586_get_stats(struct net_device *dev); static void set_multicast_list(struct net_device *dev); static void sun3_82586_timeout(struct net_device *dev); @@ -1002,7 +1003,8 @@ static void sun3_82586_timeout(struct net_device *dev) * send frame */ -static int sun3_82586_send_packet(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t +sun3_82586_send_packet(struct sk_buff *skb, struct net_device *dev) { int len,i; #ifndef NO_NOPCOMMANDS diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c index 372664686309..129f4e9f38da 100644 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c @@ -2677,12 +2677,17 @@ static int emac_init_phy(struct emac_instance *dev) if (of_phy_is_fixed_link(np)) { int res = emac_dt_mdio_probe(dev); - if (!res) { - res = of_phy_register_fixed_link(np); - if (res) - mdiobus_unregister(dev->mii_bus); + if (res) + return res; + + res = of_phy_register_fixed_link(np); + dev->phy_dev = of_phy_find_device(np); + if (res || !dev->phy_dev) { + mdiobus_unregister(dev->mii_bus); + return res ? res : -EINVAL; } - return res; + emac_adjust_link(dev->ndev); + put_device(&dev->phy_dev->mdio.dev); } return 0; } diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h index a903a0ba45e1..7d42582ed48d 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k.h @@ -504,9 +504,6 @@ void fm10k_update_stats(struct fm10k_intfc *interface); void fm10k_service_event_schedule(struct fm10k_intfc *interface); void fm10k_macvlan_schedule(struct fm10k_intfc *interface); void fm10k_update_rx_drop_en(struct fm10k_intfc *interface); -#ifdef CONFIG_NET_POLL_CONTROLLER -void fm10k_netpoll(struct net_device *netdev); -#endif /* Netdev */ struct net_device *fm10k_alloc_netdev(const struct fm10k_info *info); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c index 929f538d28bc..538a8467f434 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c @@ -1648,9 +1648,6 @@ static const struct net_device_ops fm10k_netdev_ops = { .ndo_udp_tunnel_del = fm10k_udp_tunnel_del, .ndo_dfwd_add_station = fm10k_dfwd_add_station, .ndo_dfwd_del_station = fm10k_dfwd_del_station, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = fm10k_netpoll, -#endif .ndo_features_check = fm10k_features_check, }; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c index 15071e4adb98..c859ababeed5 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c @@ -1210,28 +1210,6 @@ static irqreturn_t fm10k_msix_mbx_vf(int __always_unused irq, void *data) return IRQ_HANDLED; } -#ifdef CONFIG_NET_POLL_CONTROLLER -/** - * fm10k_netpoll - A Polling 'interrupt' handler - * @netdev: network interface device structure - * - * This is used by netconsole to send skbs without having to re-enable - * interrupts. It's not called while the normal interrupt routine is executing. - **/ -void fm10k_netpoll(struct net_device *netdev) -{ - struct fm10k_intfc *interface = netdev_priv(netdev); - int i; - - /* if interface is down do nothing */ - if (test_bit(__FM10K_DOWN, interface->state)) - return; - - for (i = 0; i < interface->num_q_vectors; i++) - fm10k_msix_clean_rings(0, interface->q_vector[i]); -} - -#endif #define FM10K_ERR_MSG(type) case (type): error = #type; break static void fm10k_handle_fault(struct fm10k_intfc *interface, int type, struct fm10k_fault *fault) diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 5906c1c1d19d..fef6d892ed4c 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -396,29 +396,6 @@ static void i40evf_map_rings_to_vectors(struct i40evf_adapter *adapter) adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS; } -#ifdef CONFIG_NET_POLL_CONTROLLER -/** - * i40evf_netpoll - A Polling 'interrupt' handler - * @netdev: network interface device structure - * - * This is used by netconsole to send skbs without having to re-enable - * interrupts. It's not called while the normal interrupt routine is executing. - **/ -static void i40evf_netpoll(struct net_device *netdev) -{ - struct i40evf_adapter *adapter = netdev_priv(netdev); - int q_vectors = adapter->num_msix_vectors - NONQ_VECS; - int i; - - /* if interface is down do nothing */ - if (test_bit(__I40E_VSI_DOWN, adapter->vsi.state)) - return; - - for (i = 0; i < q_vectors; i++) - i40evf_msix_clean_rings(0, &adapter->q_vectors[i]); -} - -#endif /** * i40evf_irq_affinity_notify - Callback for affinity changes * @notify: context as to what irq was changed @@ -3229,9 +3206,6 @@ static const struct net_device_ops i40evf_netdev_ops = { .ndo_features_check = i40evf_features_check, .ndo_fix_features = i40evf_fix_features, .ndo_set_features = i40evf_set_features, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = i40evf_netpoll, -#endif .ndo_setup_tc = i40evf_setup_tc, }; diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index f1e80eed2fd6..3f047bb43348 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -4806,30 +4806,6 @@ void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) stats->rx_length_errors = vsi_stats->rx_length_errors; } -#ifdef CONFIG_NET_POLL_CONTROLLER -/** - * ice_netpoll - polling "interrupt" handler - * @netdev: network interface device structure - * - * Used by netconsole to send skbs without having to re-enable interrupts. - * This is not called in the normal interrupt path. - */ -static void ice_netpoll(struct net_device *netdev) -{ - struct ice_netdev_priv *np = netdev_priv(netdev); - struct ice_vsi *vsi = np->vsi; - struct ice_pf *pf = vsi->back; - int i; - - if (test_bit(__ICE_DOWN, vsi->state) || - !test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) - return; - - for (i = 0; i < vsi->num_q_vectors; i++) - ice_msix_clean_rings(0, vsi->q_vectors[i]); -} -#endif /* CONFIG_NET_POLL_CONTROLLER */ - /** * ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI * @vsi: VSI having NAPI disabled @@ -5497,9 +5473,6 @@ static const struct net_device_ops ice_netdev_ops = { .ndo_validate_addr = eth_validate_addr, .ndo_change_mtu = ice_change_mtu, .ndo_get_stats64 = ice_get_stats64, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = ice_netpoll, -#endif /* CONFIG_NET_POLL_CONTROLLER */ .ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid, .ndo_set_features = ice_set_features, diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index a32c576c1e65..0796cef96fa3 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -205,10 +205,6 @@ static struct notifier_block dca_notifier = { .priority = 0 }; #endif -#ifdef CONFIG_NET_POLL_CONTROLLER -/* for netdump / net console */ -static void igb_netpoll(struct net_device *); -#endif #ifdef CONFIG_PCI_IOV static unsigned int max_vfs; module_param(max_vfs, uint, 0); @@ -2881,9 +2877,6 @@ static const struct net_device_ops igb_netdev_ops = { .ndo_set_vf_spoofchk = igb_ndo_set_vf_spoofchk, .ndo_set_vf_trust = igb_ndo_set_vf_trust, .ndo_get_vf_config = igb_ndo_get_vf_config, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = igb_netpoll, -#endif .ndo_fix_features = igb_fix_features, .ndo_set_features = igb_set_features, .ndo_fdb_add = igb_ndo_fdb_add, @@ -9053,29 +9046,6 @@ static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs) return 0; } -#ifdef CONFIG_NET_POLL_CONTROLLER -/* Polling 'interrupt' - used by things like netconsole to send skbs - * without having to re-enable interrupts. It's not called while - * the interrupt routine is executing. - */ -static void igb_netpoll(struct net_device *netdev) -{ - struct igb_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - struct igb_q_vector *q_vector; - int i; - - for (i = 0; i < adapter->num_q_vectors; i++) { - q_vector = adapter->q_vector[i]; - if (adapter->flags & IGB_FLAG_HAS_MSIX) - wr32(E1000_EIMC, q_vector->eims_value); - else - igb_irq_disable(adapter); - napi_schedule(&q_vector->napi); - } -} -#endif /* CONFIG_NET_POLL_CONTROLLER */ - /** * igb_io_error_detected - called when PCI error is detected * @pdev: Pointer to PCI device diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c index d3e72d0f66ef..7722153c4ac2 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c +++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c @@ -81,11 +81,6 @@ static int ixgb_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid); static void ixgb_restore_vlan(struct ixgb_adapter *adapter); -#ifdef CONFIG_NET_POLL_CONTROLLER -/* for netdump / net console */ -static void ixgb_netpoll(struct net_device *dev); -#endif - static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev, enum pci_channel_state state); static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev); @@ -348,9 +343,6 @@ static const struct net_device_ops ixgb_netdev_ops = { .ndo_tx_timeout = ixgb_tx_timeout, .ndo_vlan_rx_add_vid = ixgb_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = ixgb_vlan_rx_kill_vid, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = ixgb_netpoll, -#endif .ndo_fix_features = ixgb_fix_features, .ndo_set_features = ixgb_set_features, }; @@ -2195,23 +2187,6 @@ ixgb_restore_vlan(struct ixgb_adapter *adapter) ixgb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); } -#ifdef CONFIG_NET_POLL_CONTROLLER -/* - * Polling 'interrupt' - used by things like netconsole to send skbs - * without having to re-enable interrupts. It's not called while - * the interrupt routine is executing. - */ - -static void ixgb_netpoll(struct net_device *dev) -{ - struct ixgb_adapter *adapter = netdev_priv(dev); - - disable_irq(adapter->pdev->irq); - ixgb_intr(adapter->pdev->irq, dev); - enable_irq(adapter->pdev->irq); -} -#endif - /** * ixgb_io_error_detected - called when PCI error is detected * @pdev: pointer to pci device with error diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 9a23d33a47ed..f27d73a7bf16 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -8768,28 +8768,6 @@ static int ixgbe_del_sanmac_netdev(struct net_device *dev) return err; } -#ifdef CONFIG_NET_POLL_CONTROLLER -/* - * Polling 'interrupt' - used by things like netconsole to send skbs - * without having to re-enable interrupts. It's not called while - * the interrupt routine is executing. - */ -static void ixgbe_netpoll(struct net_device *netdev) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - int i; - - /* if interface is down do nothing */ - if (test_bit(__IXGBE_DOWN, &adapter->state)) - return; - - /* loop through and schedule all active queues */ - for (i = 0; i < adapter->num_q_vectors; i++) - ixgbe_msix_clean_rings(0, adapter->q_vector[i]); -} - -#endif - static void ixgbe_get_ring_stats64(struct rtnl_link_stats64 *stats, struct ixgbe_ring *ring) { @@ -10251,9 +10229,6 @@ static const struct net_device_ops ixgbe_netdev_ops = { .ndo_get_vf_config = ixgbe_ndo_get_vf_config, .ndo_get_stats64 = ixgbe_get_stats64, .ndo_setup_tc = __ixgbe_setup_tc, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = ixgbe_netpoll, -#endif #ifdef IXGBE_FCOE .ndo_select_queue = ixgbe_select_queue, .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get, diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index d86446d202d5..5a228582423b 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -4233,24 +4233,6 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu) return 0; } -#ifdef CONFIG_NET_POLL_CONTROLLER -/* Polling 'interrupt' - used by things like netconsole to send skbs - * without having to re-enable interrupts. It's not called while - * the interrupt routine is executing. - */ -static void ixgbevf_netpoll(struct net_device *netdev) -{ - struct ixgbevf_adapter *adapter = netdev_priv(netdev); - int i; - - /* if interface is down do nothing */ - if (test_bit(__IXGBEVF_DOWN, &adapter->state)) - return; - for (i = 0; i < adapter->num_rx_queues; i++) - ixgbevf_msix_clean_rings(0, adapter->q_vector[i]); -} -#endif /* CONFIG_NET_POLL_CONTROLLER */ - static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state) { struct net_device *netdev = pci_get_drvdata(pdev); @@ -4482,9 +4464,6 @@ static const struct net_device_ops ixgbevf_netdev_ops = { .ndo_tx_timeout = ixgbevf_tx_timeout, .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = ixgbevf_netpoll, -#endif .ndo_features_check = ixgbevf_features_check, .ndo_bpf = ixgbevf_xdp, }; diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index bc80a678abc3..b4ed7d394d07 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -1890,8 +1890,8 @@ static void mvneta_rxq_drop_pkts(struct mvneta_port *pp, if (!data || !(rx_desc->buf_phys_addr)) continue; - dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr, - MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE); + dma_unmap_page(pp->dev->dev.parent, rx_desc->buf_phys_addr, + PAGE_SIZE, DMA_FROM_DEVICE); __free_page(data); } } @@ -2008,8 +2008,8 @@ static int mvneta_rx_swbm(struct napi_struct *napi, skb_add_rx_frag(rxq->skb, frag_num, page, frag_offset, frag_size, PAGE_SIZE); - dma_unmap_single(dev->dev.parent, phys_addr, - PAGE_SIZE, DMA_FROM_DEVICE); + dma_unmap_page(dev->dev.parent, phys_addr, + PAGE_SIZE, DMA_FROM_DEVICE); rxq->left_size -= frag_size; } } else { @@ -2039,9 +2039,8 @@ static int mvneta_rx_swbm(struct napi_struct *napi, frag_offset, frag_size, PAGE_SIZE); - dma_unmap_single(dev->dev.parent, phys_addr, - PAGE_SIZE, - DMA_FROM_DEVICE); + dma_unmap_page(dev->dev.parent, phys_addr, + PAGE_SIZE, DMA_FROM_DEVICE); rxq->left_size -= frag_size; } diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 28500417843e..38cc01beea79 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -58,6 +58,8 @@ static struct { */ static void mvpp2_mac_config(struct net_device *dev, unsigned int mode, const struct phylink_link_state *state); +static void mvpp2_mac_link_up(struct net_device *dev, unsigned int mode, + phy_interface_t interface, struct phy_device *phy); /* Queue modes */ #define MVPP2_QDIST_SINGLE_MODE 0 @@ -3053,10 +3055,12 @@ static int mvpp2_poll(struct napi_struct *napi, int budget) cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK); } - cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; - if (cause_tx) { - cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET; - mvpp2_tx_done(port, cause_tx, qv->sw_thread_id); + if (port->has_tx_irqs) { + cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; + if (cause_tx) { + cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET; + mvpp2_tx_done(port, cause_tx, qv->sw_thread_id); + } } /* Process RX packets */ @@ -3142,6 +3146,7 @@ static void mvpp2_start_dev(struct mvpp2_port *port) mvpp22_mode_reconfigure(port); if (port->phylink) { + netif_carrier_off(port->dev); phylink_start(port->phylink); } else { /* Phylink isn't used as of now for ACPI, so the MAC has to be @@ -3150,9 +3155,10 @@ static void mvpp2_start_dev(struct mvpp2_port *port) */ struct phylink_link_state state = { .interface = port->phy_interface, - .link = 1, }; mvpp2_mac_config(port->dev, MLO_AN_INBAND, &state); + mvpp2_mac_link_up(port->dev, MLO_AN_INBAND, port->phy_interface, + NULL); } netif_tx_start_all_queues(port->dev); @@ -4495,10 +4501,6 @@ static void mvpp2_mac_config(struct net_device *dev, unsigned int mode, return; } - netif_tx_stop_all_queues(port->dev); - if (!port->has_phy) - netif_carrier_off(port->dev); - /* Make sure the port is disabled when reconfiguring the mode */ mvpp2_port_disable(port); @@ -4523,16 +4525,7 @@ static void mvpp2_mac_config(struct net_device *dev, unsigned int mode, if (port->priv->hw_version == MVPP21 && port->flags & MVPP2_F_LOOPBACK) mvpp2_port_loopback_set(port, state); - /* If the port already was up, make sure it's still in the same state */ - if (state->link || !port->has_phy) { - mvpp2_port_enable(port); - - mvpp2_egress_enable(port); - mvpp2_ingress_enable(port); - if (!port->has_phy) - netif_carrier_on(dev); - netif_tx_wake_all_queues(dev); - } + mvpp2_port_enable(port); } static void mvpp2_mac_link_up(struct net_device *dev, unsigned int mode, diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 6785661d1a72..fe49384eba48 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -1286,20 +1286,6 @@ out: mutex_unlock(&mdev->state_lock); } -#ifdef CONFIG_NET_POLL_CONTROLLER -static void mlx4_en_netpoll(struct net_device *dev) -{ - struct mlx4_en_priv *priv = netdev_priv(dev); - struct mlx4_en_cq *cq; - int i; - - for (i = 0; i < priv->tx_ring_num[TX]; i++) { - cq = priv->tx_cq[TX][i]; - napi_schedule(&cq->napi); - } -} -#endif - static int mlx4_en_set_rss_steer_rules(struct mlx4_en_priv *priv) { u64 reg_id; @@ -2946,9 +2932,6 @@ static const struct net_device_ops mlx4_netdev_ops = { .ndo_tx_timeout = mlx4_en_tx_timeout, .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = mlx4_en_netpoll, -#endif .ndo_set_features = mlx4_en_set_features, .ndo_fix_features = mlx4_en_fix_features, .ndo_setup_tc = __mlx4_en_setup_tc, @@ -2983,9 +2966,6 @@ static const struct net_device_ops mlx4_netdev_ops_master = { .ndo_set_vf_link_state = mlx4_en_set_vf_link_state, .ndo_get_vf_stats = mlx4_en_get_vf_stats, .ndo_get_vf_config = mlx4_en_get_vf_config, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = mlx4_en_netpoll, -#endif .ndo_set_features = mlx4_en_set_features, .ndo_fix_features = mlx4_en_fix_features, .ndo_setup_tc = __mlx4_en_setup_tc, diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index 1f3372c1802e..2df92dbd38e1 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c @@ -240,7 +240,8 @@ static void mlx4_set_eq_affinity_hint(struct mlx4_priv *priv, int vec) struct mlx4_dev *dev = &priv->dev; struct mlx4_eq *eq = &priv->eq_table.eq[vec]; - if (!eq->affinity_mask || cpumask_empty(eq->affinity_mask)) + if (!cpumask_available(eq->affinity_mask) || + cpumask_empty(eq->affinity_mask)) return; hint_err = irq_set_affinity_hint(eq->irq, eq->affinity_mask); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 3ce14d42ddc8..a53736c26c0c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -206,7 +206,7 @@ static void poll_timeout(struct mlx5_cmd_work_ent *ent) u8 own; do { - own = ent->lay->status_own; + own = READ_ONCE(ent->lay->status_own); if (!(own & CMD_OWNER_HW)) { ent->ret = 0; return; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c index eddd7702680b..e88340e196f7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c @@ -183,12 +183,13 @@ static const struct tlsdev_ops mlx5e_tls_ops = { void mlx5e_tls_build_netdev(struct mlx5e_priv *priv) { - u32 caps = mlx5_accel_tls_device_caps(priv->mdev); struct net_device *netdev = priv->netdev; + u32 caps; if (!mlx5_accel_is_tls_device(priv->mdev)) return; + caps = mlx5_accel_tls_device_caps(priv->mdev); if (caps & MLX5_ACCEL_TLS_TX) { netdev->features |= NETIF_F_HW_TLS_TX; netdev->hw_features |= NETIF_F_HW_TLS_TX; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 5a7939e70190..54118b77dc1f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -4315,22 +4315,6 @@ static int mlx5e_xdp(struct net_device *dev, struct netdev_bpf *xdp) } } -#ifdef CONFIG_NET_POLL_CONTROLLER -/* Fake "interrupt" called by netpoll (eg netconsole) to send skbs without - * reenabling interrupts. - */ -static void mlx5e_netpoll(struct net_device *dev) -{ - struct mlx5e_priv *priv = netdev_priv(dev); - struct mlx5e_channels *chs = &priv->channels; - - int i; - - for (i = 0; i < chs->num; i++) - napi_schedule(&chs->c[i]->napi); -} -#endif - static const struct net_device_ops mlx5e_netdev_ops = { .ndo_open = mlx5e_open, .ndo_stop = mlx5e_close, @@ -4356,9 +4340,6 @@ static const struct net_device_ops mlx5e_netdev_ops = { #ifdef CONFIG_MLX5_EN_ARFS .ndo_rx_flow_steer = mlx5e_rx_flow_steer, #endif -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = mlx5e_netpoll, -#endif #ifdef CONFIG_MLX5_ESWITCH /* SRIOV E-Switch NDOs */ .ndo_set_vf_mac = mlx5e_set_vf_mac, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c index dae1c5c5d27c..d2f76070ea7c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c @@ -509,7 +509,7 @@ static int mlx5_hairpin_modify_sq(struct mlx5_core_dev *peer_mdev, u32 sqn, sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx); - if (next_state == MLX5_RQC_STATE_RDY) { + if (next_state == MLX5_SQC_STATE_RDY) { MLX5_SET(sqc, sqc, hairpin_peer_rq, peer_rq); MLX5_SET(sqc, sqc, hairpin_peer_vhca, peer_vhca); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 930700413b1d..b492152c8881 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -44,8 +44,8 @@ #define MLXSW_SP_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100) #define MLXSW_SP1_FWREV_MAJOR 13 -#define MLXSW_SP1_FWREV_MINOR 1702 -#define MLXSW_SP1_FWREV_SUBMINOR 6 +#define MLXSW_SP1_FWREV_MINOR 1703 +#define MLXSW_SP1_FWREV_SUBMINOR 4 #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702 static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = { diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c index e7dce79ff2c9..001b5f714c1b 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.c +++ b/drivers/net/ethernet/microchip/lan743x_main.c @@ -2850,7 +2850,7 @@ static void lan743x_pcidev_shutdown(struct pci_dev *pdev) lan743x_hardware_cleanup(adapter); } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP static u16 lan743x_pm_wakeframe_crc16(const u8 *buf, int len) { return bitrev16(crc16(0xFFFF, buf, len)); @@ -3016,7 +3016,7 @@ static int lan743x_pm_resume(struct device *dev) static const struct dev_pm_ops lan743x_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(lan743x_pm_suspend, lan743x_pm_resume) }; -#endif /*CONFIG_PM */ +#endif /* CONFIG_PM_SLEEP */ static const struct pci_device_id lan743x_pcidev_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_LAN7430) }, @@ -3028,7 +3028,7 @@ static struct pci_driver lan743x_pcidev_driver = { .id_table = lan743x_pcidev_tbl, .probe = lan743x_pcidev_probe, .remove = lan743x_pcidev_remove, -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP .driver.pm = &lan743x_pm_ops, #endif .shutdown = lan743x_pcidev_shutdown, diff --git a/drivers/net/ethernet/mscc/ocelot_board.c b/drivers/net/ethernet/mscc/ocelot_board.c index 26bb3b18f3be..3cdf63e35b53 100644 --- a/drivers/net/ethernet/mscc/ocelot_board.c +++ b/drivers/net/ethernet/mscc/ocelot_board.c @@ -91,7 +91,7 @@ static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg) struct sk_buff *skb; struct net_device *dev; u32 *buf; - int sz, len; + int sz, len, buf_len; u32 ifh[4]; u32 val; struct frame_info info; @@ -116,14 +116,20 @@ static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg) err = -ENOMEM; break; } - buf = (u32 *)skb_put(skb, info.len); + buf_len = info.len - ETH_FCS_LEN; + buf = (u32 *)skb_put(skb, buf_len); len = 0; do { sz = ocelot_rx_frame_word(ocelot, grp, false, &val); *buf++ = val; len += sz; - } while ((sz == 4) && (len < info.len)); + } while (len < buf_len); + + /* Read the FCS and discard it */ + sz = ocelot_rx_frame_word(ocelot, grp, false, &val); + /* Update the statistics if part of the FCS was read before */ + len -= ETH_FCS_LEN - sz; if (sz < 0) { err = sz; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 253bdaef1505..8ed38fd5a852 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -3146,21 +3146,6 @@ nfp_net_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) return nfp_net_reconfig_mbox(nn, NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL); } -#ifdef CONFIG_NET_POLL_CONTROLLER -static void nfp_net_netpoll(struct net_device *netdev) -{ - struct nfp_net *nn = netdev_priv(netdev); - int i; - - /* nfp_net's NAPIs are statically allocated so even if there is a race - * with reconfig path this will simply try to schedule some disabled - * NAPI instances. - */ - for (i = 0; i < nn->dp.num_stack_tx_rings; i++) - napi_schedule_irqoff(&nn->r_vecs[i].napi); -} -#endif - static void nfp_net_stat64(struct net_device *netdev, struct rtnl_link_stats64 *stats) { @@ -3519,9 +3504,6 @@ const struct net_device_ops nfp_net_netdev_ops = { .ndo_get_stats64 = nfp_net_stat64, .ndo_vlan_rx_add_vid = nfp_net_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = nfp_net_vlan_rx_kill_vid, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = nfp_net_netpoll, -#endif .ndo_set_vf_mac = nfp_app_set_vf_mac, .ndo_set_vf_vlan = nfp_app_set_vf_vlan, .ndo_set_vf_spoofchk = nfp_app_set_vf_spoofchk, diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c index 6bb76e6d3c14..f5459de6d60a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c @@ -190,10 +190,8 @@ qed_dcbx_dp_protocol(struct qed_hwfn *p_hwfn, struct qed_dcbx_results *p_data) static void qed_dcbx_set_params(struct qed_dcbx_results *p_data, - struct qed_hw_info *p_info, - bool enable, - u8 prio, - u8 tc, + struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + bool enable, u8 prio, u8 tc, enum dcbx_protocol_type type, enum qed_pci_personality personality) { @@ -206,19 +204,30 @@ qed_dcbx_set_params(struct qed_dcbx_results *p_data, else p_data->arr[type].update = DONT_UPDATE_DCB_DSCP; + /* Do not add vlan tag 0 when DCB is enabled and port in UFP/OV mode */ + if ((test_bit(QED_MF_8021Q_TAGGING, &p_hwfn->cdev->mf_bits) || + test_bit(QED_MF_8021AD_TAGGING, &p_hwfn->cdev->mf_bits))) + p_data->arr[type].dont_add_vlan0 = true; + /* QM reconf data */ - if (p_info->personality == personality) - qed_hw_info_set_offload_tc(p_info, tc); + if (p_hwfn->hw_info.personality == personality) + qed_hw_info_set_offload_tc(&p_hwfn->hw_info, tc); + + /* Configure dcbx vlan priority in doorbell block for roce EDPM */ + if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits) && + type == DCBX_PROTOCOL_ROCE) { + qed_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 1); + qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_PCP_BB_K2, prio << 1); + } } /* Update app protocol data and hw_info fields with the TLV info */ static void qed_dcbx_update_app_info(struct qed_dcbx_results *p_data, - struct qed_hwfn *p_hwfn, - bool enable, - u8 prio, u8 tc, enum dcbx_protocol_type type) + struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + bool enable, u8 prio, u8 tc, + enum dcbx_protocol_type type) { - struct qed_hw_info *p_info = &p_hwfn->hw_info; enum qed_pci_personality personality; enum dcbx_protocol_type id; int i; @@ -231,7 +240,7 @@ qed_dcbx_update_app_info(struct qed_dcbx_results *p_data, personality = qed_dcbx_app_update[i].personality; - qed_dcbx_set_params(p_data, p_info, enable, + qed_dcbx_set_params(p_data, p_hwfn, p_ptt, enable, prio, tc, type, personality); } } @@ -265,7 +274,7 @@ qed_dcbx_get_app_protocol_type(struct qed_hwfn *p_hwfn, * reconfiguring QM. Get protocol specific data for PF update ramrod command. */ static int -qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, +qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_dcbx_results *p_data, struct dcbx_app_priority_entry *p_tbl, u32 pri_tc_tbl, int count, u8 dcbx_version) @@ -309,7 +318,7 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, enable = true; } - qed_dcbx_update_app_info(p_data, p_hwfn, enable, + qed_dcbx_update_app_info(p_data, p_hwfn, p_ptt, enable, priority, tc, type); } } @@ -331,7 +340,7 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, continue; enable = (type == DCBX_PROTOCOL_ETH) ? false : !!dcbx_version; - qed_dcbx_update_app_info(p_data, p_hwfn, enable, + qed_dcbx_update_app_info(p_data, p_hwfn, p_ptt, enable, priority, tc, type); } @@ -341,7 +350,8 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, /* Parse app TLV's to update TC information in hw_info structure for * reconfiguring QM. Get protocol specific data for PF update ramrod command. */ -static int qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn) +static int +qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { struct dcbx_app_priority_feature *p_app; struct dcbx_app_priority_entry *p_tbl; @@ -365,7 +375,7 @@ static int qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn) p_info = &p_hwfn->hw_info; num_entries = QED_MFW_GET_FIELD(p_app->flags, DCBX_APP_NUM_ENTRIES); - rc = qed_dcbx_process_tlv(p_hwfn, &data, p_tbl, pri_tc_tbl, + rc = qed_dcbx_process_tlv(p_hwfn, p_ptt, &data, p_tbl, pri_tc_tbl, num_entries, dcbx_version); if (rc) return rc; @@ -891,7 +901,7 @@ qed_dcbx_mib_update_event(struct qed_hwfn *p_hwfn, return rc; if (type == QED_DCBX_OPERATIONAL_MIB) { - rc = qed_dcbx_process_mib_info(p_hwfn); + rc = qed_dcbx_process_mib_info(p_hwfn, p_ptt); if (!rc) { /* reconfigure tcs of QM queues according * to negotiation results @@ -954,6 +964,7 @@ static void qed_dcbx_update_protocol_data(struct protocol_dcb_data *p_data, p_data->dcb_enable_flag = p_src->arr[type].enable; p_data->dcb_priority = p_src->arr[type].priority; p_data->dcb_tc = p_src->arr[type].tc; + p_data->dcb_dont_add_vlan0 = p_src->arr[type].dont_add_vlan0; } /* Set pf update ramrod command params */ diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h index a4d688c04e18..01f253ea4b22 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h @@ -55,6 +55,7 @@ struct qed_dcbx_app_data { u8 update; /* Update indication */ u8 priority; /* Priority */ u8 tc; /* Traffic Class */ + bool dont_add_vlan0; /* Do not insert a vlan tag with id 0 */ }; #define QED_DCBX_VERSION_DISABLED 0 diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 016ca8a7ec8a..97f073fd3725 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -1706,7 +1706,7 @@ static int qed_vf_start(struct qed_hwfn *p_hwfn, int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) { struct qed_load_req_params load_req_params; - u32 load_code, param, drv_mb_param; + u32 load_code, resp, param, drv_mb_param; bool b_default_mtu = true; struct qed_hwfn *p_hwfn; int rc = 0, mfw_rc, i; @@ -1852,6 +1852,19 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) if (IS_PF(cdev)) { p_hwfn = QED_LEADING_HWFN(cdev); + + /* Get pre-negotiated values for stag, bandwidth etc. */ + DP_VERBOSE(p_hwfn, + QED_MSG_SPQ, + "Sending GET_OEM_UPDATES command to trigger stag/bandwidth attention handling\n"); + drv_mb_param = 1 << DRV_MB_PARAM_DUMMY_OEM_UPDATES_OFFSET; + rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, + DRV_MSG_CODE_GET_OEM_UPDATES, + drv_mb_param, &resp, ¶m); + if (rc) + DP_NOTICE(p_hwfn, + "Failed to send GET_OEM_UPDATES attention request\n"); + drv_mb_param = STORM_FW_VERSION; rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER, diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index 8faceb691657..9b3ef00e5782 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -12414,6 +12414,7 @@ struct public_drv_mb { #define DRV_MSG_SET_RESOURCE_VALUE_MSG 0x35000000 #define DRV_MSG_CODE_OV_UPDATE_WOL 0x38000000 #define DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE 0x39000000 +#define DRV_MSG_CODE_GET_OEM_UPDATES 0x41000000 #define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000 #define DRV_MSG_CODE_NIG_DRAIN 0x30000000 @@ -12541,6 +12542,9 @@ struct public_drv_mb { #define DRV_MB_PARAM_ESWITCH_MODE_VEB 0x1 #define DRV_MB_PARAM_ESWITCH_MODE_VEPA 0x2 +#define DRV_MB_PARAM_DUMMY_OEM_UPDATES_MASK 0x1 +#define DRV_MB_PARAM_DUMMY_OEM_UPDATES_OFFSET 0 + #define DRV_MB_PARAM_SET_LED_MODE_OPER 0x0 #define DRV_MB_PARAM_SET_LED_MODE_ON 0x1 #define DRV_MB_PARAM_SET_LED_MODE_OFF 0x2 diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 5d37ec7e9b0b..58c7eb9d8e1b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -1581,13 +1581,29 @@ static void qed_mcp_update_stag(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) p_hwfn->mcp_info->func_info.ovlan = (u16)shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK; p_hwfn->hw_info.ovlan = p_hwfn->mcp_info->func_info.ovlan; - if ((p_hwfn->hw_info.hw_mode & BIT(MODE_MF_SD)) && - (p_hwfn->hw_info.ovlan != QED_MCP_VLAN_UNSET)) { - qed_wr(p_hwfn, p_ptt, - NIG_REG_LLH_FUNC_TAG_VALUE, p_hwfn->hw_info.ovlan); + if (test_bit(QED_MF_OVLAN_CLSS, &p_hwfn->cdev->mf_bits)) { + if (p_hwfn->hw_info.ovlan != QED_MCP_VLAN_UNSET) { + qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE, + p_hwfn->hw_info.ovlan); + qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_EN, 1); + + /* Configure DB to add external vlan to EDPM packets */ + qed_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 1); + qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID_BB_K2, + p_hwfn->hw_info.ovlan); + } else { + qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_EN, 0); + qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE, 0); + qed_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 0); + qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID_BB_K2, 0); + } + qed_sp_pf_update_stag(p_hwfn); } + DP_VERBOSE(p_hwfn, QED_MSG_SP, "ovlan = %d hw_mode = 0x%x\n", + p_hwfn->mcp_info->func_info.ovlan, p_hwfn->hw_info.hw_mode); + /* Acknowledge the MFW */ qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_S_TAG_UPDATE_ACK, 0, &resp, ¶m); diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h index f736f70956fd..2440970882c4 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h +++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h @@ -216,6 +216,12 @@ 0x00c000UL #define DORQ_REG_IFEN \ 0x100040UL +#define DORQ_REG_TAG1_OVRD_MODE \ + 0x1008b4UL +#define DORQ_REG_PF_PCP_BB_K2 \ + 0x1008c4UL +#define DORQ_REG_PF_EXT_VID_BB_K2 \ + 0x1008c8UL #define DORQ_REG_DB_DROP_REASON \ 0x100a2cUL #define DORQ_REG_DB_DROP_DETAILS \ diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 1d8631303b53..ab30aaeac6d3 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -13,6 +13,7 @@ #include <linux/pci.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> +#include <linux/clk.h> #include <linux/delay.h> #include <linux/ethtool.h> #include <linux/phy.h> @@ -665,6 +666,7 @@ struct rtl8169_private { u16 event_slow; const struct rtl_coalesce_info *coalesce_info; + struct clk *clk; struct mdio_ops { void (*write)(struct rtl8169_private *, int, int); @@ -4069,6 +4071,15 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp) phy_speed_up(dev->phydev); genphy_soft_reset(dev->phydev); + + /* It was reported that chip version 33 ends up with 10MBit/Half on a + * 1GBit link after resuming from S3. For whatever reason the PHY on + * this chip doesn't properly start a renegotiation when soft-reset. + * Explicitly requesting a renegotiation fixes this. + */ + if (tp->mac_version == RTL_GIGA_MAC_VER_33 && + dev->phydev->autoneg == AUTONEG_ENABLE) + phy_restart_aneg(dev->phydev); } static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr) @@ -4775,12 +4786,14 @@ static void rtl_pcie_state_l2l3_enable(struct rtl8169_private *tp, bool enable) static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable) { if (enable) { - RTL_W8(tp, Config2, RTL_R8(tp, Config2) | ClkReqEn); RTL_W8(tp, Config5, RTL_R8(tp, Config5) | ASPM_en); + RTL_W8(tp, Config2, RTL_R8(tp, Config2) | ClkReqEn); } else { RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn); RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en); } + + udelay(10); } static void rtl_hw_start_8168bb(struct rtl8169_private *tp) @@ -5625,6 +5638,8 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp) static void rtl_hw_start_8106(struct rtl8169_private *tp) { + rtl_hw_aspm_clkreq_enable(tp, false); + /* Force LAN exit from ASPM if Rx/Tx are not idle */ RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800); @@ -5633,6 +5648,7 @@ static void rtl_hw_start_8106(struct rtl8169_private *tp) RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN); rtl_pcie_state_l2l3_enable(tp, false); + rtl_hw_aspm_clkreq_enable(tp, true); } static void rtl_hw_start_8101(struct rtl8169_private *tp) @@ -7257,6 +7273,11 @@ static int rtl_jumbo_max(struct rtl8169_private *tp) } } +static void rtl_disable_clk(void *data) +{ + clk_disable_unprepare(data); +} + static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data; @@ -7277,6 +7298,32 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT); tp->supports_gmii = cfg->has_gmii; + /* Get the *optional* external "ether_clk" used on some boards */ + tp->clk = devm_clk_get(&pdev->dev, "ether_clk"); + if (IS_ERR(tp->clk)) { + rc = PTR_ERR(tp->clk); + if (rc == -ENOENT) { + /* clk-core allows NULL (for suspend / resume) */ + tp->clk = NULL; + } else if (rc == -EPROBE_DEFER) { + return rc; + } else { + dev_err(&pdev->dev, "failed to get clk: %d\n", rc); + return rc; + } + } else { + rc = clk_prepare_enable(tp->clk); + if (rc) { + dev_err(&pdev->dev, "failed to enable clk: %d\n", rc); + return rc; + } + + rc = devm_add_action_or_reset(&pdev->dev, rtl_disable_clk, + tp->clk); + if (rc) + return rc; + } + /* enable device (incl. PCI PM wakeup and hotplug setup) */ rc = pcim_enable_device(pdev); if (rc < 0) { diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h index 1470fc12282b..9b6bf557a2f5 100644 --- a/drivers/net/ethernet/renesas/ravb.h +++ b/drivers/net/ethernet/renesas/ravb.h @@ -428,6 +428,7 @@ enum EIS_BIT { EIS_CULF1 = 0x00000080, EIS_TFFF = 0x00000100, EIS_QFS = 0x00010000, + EIS_RESERVED = (GENMASK(31, 17) | GENMASK(15, 11)), }; /* RIC0 */ @@ -472,6 +473,7 @@ enum RIS0_BIT { RIS0_FRF15 = 0x00008000, RIS0_FRF16 = 0x00010000, RIS0_FRF17 = 0x00020000, + RIS0_RESERVED = GENMASK(31, 18), }; /* RIC1 */ @@ -528,6 +530,7 @@ enum RIS2_BIT { RIS2_QFF16 = 0x00010000, RIS2_QFF17 = 0x00020000, RIS2_RFFF = 0x80000000, + RIS2_RESERVED = GENMASK(30, 18), }; /* TIC */ @@ -544,6 +547,7 @@ enum TIS_BIT { TIS_FTF1 = 0x00000002, /* Undocumented? */ TIS_TFUF = 0x00000100, TIS_TFWF = 0x00000200, + TIS_RESERVED = (GENMASK(31, 20) | GENMASK(15, 12) | GENMASK(7, 4)) }; /* ISS */ @@ -617,6 +621,7 @@ enum GIC_BIT { enum GIS_BIT { GIS_PTCF = 0x00000001, /* Undocumented? */ GIS_PTMF = 0x00000004, + GIS_RESERVED = GENMASK(15, 10), }; /* GIE (R-Car Gen3 only) */ diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index aff5516b781e..d6f753925352 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -739,10 +739,11 @@ static void ravb_error_interrupt(struct net_device *ndev) u32 eis, ris2; eis = ravb_read(ndev, EIS); - ravb_write(ndev, ~EIS_QFS, EIS); + ravb_write(ndev, ~(EIS_QFS | EIS_RESERVED), EIS); if (eis & EIS_QFS) { ris2 = ravb_read(ndev, RIS2); - ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF), RIS2); + ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF | RIS2_RESERVED), + RIS2); /* Receive Descriptor Empty int */ if (ris2 & RIS2_QFF0) @@ -795,7 +796,7 @@ static bool ravb_timestamp_interrupt(struct net_device *ndev) u32 tis = ravb_read(ndev, TIS); if (tis & TIS_TFUF) { - ravb_write(ndev, ~TIS_TFUF, TIS); + ravb_write(ndev, ~(TIS_TFUF | TIS_RESERVED), TIS); ravb_get_tx_tstamp(ndev); return true; } @@ -930,7 +931,7 @@ static int ravb_poll(struct napi_struct *napi, int budget) /* Processing RX Descriptor Ring */ if (ris0 & mask) { /* Clear RX interrupt */ - ravb_write(ndev, ~mask, RIS0); + ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0); if (ravb_rx(ndev, "a, q)) goto out; } @@ -938,7 +939,7 @@ static int ravb_poll(struct napi_struct *napi, int budget) if (tis & mask) { spin_lock_irqsave(&priv->lock, flags); /* Clear TX interrupt */ - ravb_write(ndev, ~mask, TIS); + ravb_write(ndev, ~(mask | TIS_RESERVED), TIS); ravb_tx_free(ndev, q, true); netif_wake_subqueue(ndev, q); mmiowb(); diff --git a/drivers/net/ethernet/renesas/ravb_ptp.c b/drivers/net/ethernet/renesas/ravb_ptp.c index 0721b5c35d91..dce2a40a31e3 100644 --- a/drivers/net/ethernet/renesas/ravb_ptp.c +++ b/drivers/net/ethernet/renesas/ravb_ptp.c @@ -315,7 +315,7 @@ void ravb_ptp_interrupt(struct net_device *ndev) } } - ravb_write(ndev, ~gis, GIS); + ravb_write(ndev, ~(gis | GIS_RESERVED), GIS); } void ravb_ptp_init(struct net_device *ndev, struct platform_device *pdev) diff --git a/drivers/net/ethernet/seeq/ether3.c b/drivers/net/ethernet/seeq/ether3.c index c5bc124b41a9..d1bb73bf9914 100644 --- a/drivers/net/ethernet/seeq/ether3.c +++ b/drivers/net/ethernet/seeq/ether3.c @@ -77,7 +77,8 @@ static void ether3_setmulticastlist(struct net_device *dev); static int ether3_rx(struct net_device *dev, unsigned int maxcnt); static void ether3_tx(struct net_device *dev); static int ether3_open (struct net_device *dev); -static int ether3_sendpacket (struct sk_buff *skb, struct net_device *dev); +static netdev_tx_t ether3_sendpacket(struct sk_buff *skb, + struct net_device *dev); static irqreturn_t ether3_interrupt (int irq, void *dev_id); static int ether3_close (struct net_device *dev); static void ether3_setmulticastlist (struct net_device *dev); @@ -481,7 +482,7 @@ static void ether3_timeout(struct net_device *dev) /* * Transmit a packet */ -static int +static netdev_tx_t ether3_sendpacket(struct sk_buff *skb, struct net_device *dev) { unsigned long flags; diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c index 573691bc3b71..70cce63a6081 100644 --- a/drivers/net/ethernet/seeq/sgiseeq.c +++ b/drivers/net/ethernet/seeq/sgiseeq.c @@ -578,7 +578,8 @@ static inline int sgiseeq_reset(struct net_device *dev) return 0; } -static int sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t +sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct sgiseeq_private *sp = netdev_priv(dev); struct hpc3_ethregs *hregs = sp->hregs; diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c index 18d533fdf14c..3140999642ba 100644 --- a/drivers/net/ethernet/sgi/ioc3-eth.c +++ b/drivers/net/ethernet/sgi/ioc3-eth.c @@ -99,7 +99,7 @@ struct ioc3_private { static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); static void ioc3_set_multicast_list(struct net_device *dev); -static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev); +static netdev_tx_t ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev); static void ioc3_timeout(struct net_device *dev); static inline unsigned int ioc3_hash(const unsigned char *addr); static inline void ioc3_stop(struct ioc3_private *ip); @@ -1390,7 +1390,7 @@ static struct pci_driver ioc3_driver = { .remove = ioc3_remove_one, }; -static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev) { unsigned long data; struct ioc3_private *ip = netdev_priv(dev); diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c index ea55abd62ec7..703fbbefea44 100644 --- a/drivers/net/ethernet/sgi/meth.c +++ b/drivers/net/ethernet/sgi/meth.c @@ -697,7 +697,7 @@ static void meth_add_to_tx_ring(struct meth_private *priv, struct sk_buff *skb) /* * Transmit a packet (called by the kernel) */ -static int meth_tx(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t meth_tx(struct sk_buff *skb, struct net_device *dev) { struct meth_private *priv = netdev_priv(dev); unsigned long flags; diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index 1854f270ad66..b1b305f8f414 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -258,10 +258,10 @@ struct stmmac_safety_stats { #define MAX_DMA_RIWT 0xff #define MIN_DMA_RIWT 0x20 /* Tx coalesce parameters */ -#define STMMAC_COAL_TX_TIMER 40000 +#define STMMAC_COAL_TX_TIMER 1000 #define STMMAC_MAX_COAL_TX_TICK 100000 #define STMMAC_TX_MAX_FRAMES 256 -#define STMMAC_TX_FRAMES 64 +#define STMMAC_TX_FRAMES 25 /* Packets types */ enum packets_types { diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index c0a855b7ab3b..63e1064b27a2 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -48,6 +48,8 @@ struct stmmac_tx_info { /* Frequently used values are kept adjacent for cache effect */ struct stmmac_tx_queue { + u32 tx_count_frames; + struct timer_list txtimer; u32 queue_index; struct stmmac_priv *priv_data; struct dma_extended_desc *dma_etx ____cacheline_aligned_in_smp; @@ -73,7 +75,14 @@ struct stmmac_rx_queue { u32 rx_zeroc_thresh; dma_addr_t dma_rx_phy; u32 rx_tail_addr; +}; + +struct stmmac_channel { struct napi_struct napi ____cacheline_aligned_in_smp; + struct stmmac_priv *priv_data; + u32 index; + int has_rx; + int has_tx; }; struct stmmac_tc_entry { @@ -109,14 +118,12 @@ struct stmmac_pps_cfg { struct stmmac_priv { /* Frequently used values are kept adjacent for cache effect */ - u32 tx_count_frames; u32 tx_coal_frames; u32 tx_coal_timer; int tx_coalesce; int hwts_tx_en; bool tx_path_in_lpi_mode; - struct timer_list txtimer; bool tso; unsigned int dma_buf_sz; @@ -137,6 +144,9 @@ struct stmmac_priv { /* TX Queue */ struct stmmac_tx_queue tx_queue[MTL_MAX_TX_QUEUES]; + /* Generic channel for NAPI */ + struct stmmac_channel channel[STMMAC_CH_MAX]; + bool oldlink; int speed; int oldduplex; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 9f458bb16f2a..75896d6ba6e2 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -148,12 +148,14 @@ static void stmmac_verify_args(void) static void stmmac_disable_all_queues(struct stmmac_priv *priv) { u32 rx_queues_cnt = priv->plat->rx_queues_to_use; + u32 tx_queues_cnt = priv->plat->tx_queues_to_use; + u32 maxq = max(rx_queues_cnt, tx_queues_cnt); u32 queue; - for (queue = 0; queue < rx_queues_cnt; queue++) { - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + for (queue = 0; queue < maxq; queue++) { + struct stmmac_channel *ch = &priv->channel[queue]; - napi_disable(&rx_q->napi); + napi_disable(&ch->napi); } } @@ -164,12 +166,14 @@ static void stmmac_disable_all_queues(struct stmmac_priv *priv) static void stmmac_enable_all_queues(struct stmmac_priv *priv) { u32 rx_queues_cnt = priv->plat->rx_queues_to_use; + u32 tx_queues_cnt = priv->plat->tx_queues_to_use; + u32 maxq = max(rx_queues_cnt, tx_queues_cnt); u32 queue; - for (queue = 0; queue < rx_queues_cnt; queue++) { - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + for (queue = 0; queue < maxq; queue++) { + struct stmmac_channel *ch = &priv->channel[queue]; - napi_enable(&rx_q->napi); + napi_enable(&ch->napi); } } @@ -1843,18 +1847,18 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv) * @queue: TX queue index * Description: it reclaims the transmit resources after transmission completes. */ -static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue) +static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) { struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; unsigned int bytes_compl = 0, pkts_compl = 0; - unsigned int entry; + unsigned int entry, count = 0; - netif_tx_lock(priv->dev); + __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue)); priv->xstats.tx_clean++; entry = tx_q->dirty_tx; - while (entry != tx_q->cur_tx) { + while ((entry != tx_q->cur_tx) && (count < budget)) { struct sk_buff *skb = tx_q->tx_skbuff[entry]; struct dma_desc *p; int status; @@ -1870,6 +1874,8 @@ static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue) if (unlikely(status & tx_dma_own)) break; + count++; + /* Make sure descriptor fields are read after reading * the own bit. */ @@ -1937,7 +1943,10 @@ static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue) stmmac_enable_eee_mode(priv); mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer)); } - netif_tx_unlock(priv->dev); + + __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue)); + + return count; } /** @@ -2020,6 +2029,33 @@ static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv) return false; } +static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan) +{ + int status = stmmac_dma_interrupt_status(priv, priv->ioaddr, + &priv->xstats, chan); + struct stmmac_channel *ch = &priv->channel[chan]; + bool needs_work = false; + + if ((status & handle_rx) && ch->has_rx) { + needs_work = true; + } else { + status &= ~handle_rx; + } + + if ((status & handle_tx) && ch->has_tx) { + needs_work = true; + } else { + status &= ~handle_tx; + } + + if (needs_work && napi_schedule_prep(&ch->napi)) { + stmmac_disable_dma_irq(priv, priv->ioaddr, chan); + __napi_schedule(&ch->napi); + } + + return status; +} + /** * stmmac_dma_interrupt - DMA ISR * @priv: driver private structure @@ -2034,57 +2070,14 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv) u32 channels_to_check = tx_channel_count > rx_channel_count ? tx_channel_count : rx_channel_count; u32 chan; - bool poll_scheduled = false; int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)]; /* Make sure we never check beyond our status buffer. */ if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status))) channels_to_check = ARRAY_SIZE(status); - /* Each DMA channel can be used for rx and tx simultaneously, yet - * napi_struct is embedded in struct stmmac_rx_queue rather than in a - * stmmac_channel struct. - * Because of this, stmmac_poll currently checks (and possibly wakes) - * all tx queues rather than just a single tx queue. - */ for (chan = 0; chan < channels_to_check; chan++) - status[chan] = stmmac_dma_interrupt_status(priv, priv->ioaddr, - &priv->xstats, chan); - - for (chan = 0; chan < rx_channel_count; chan++) { - if (likely(status[chan] & handle_rx)) { - struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan]; - - if (likely(napi_schedule_prep(&rx_q->napi))) { - stmmac_disable_dma_irq(priv, priv->ioaddr, chan); - __napi_schedule(&rx_q->napi); - poll_scheduled = true; - } - } - } - - /* If we scheduled poll, we already know that tx queues will be checked. - * If we didn't schedule poll, see if any DMA channel (used by tx) has a - * completed transmission, if so, call stmmac_poll (once). - */ - if (!poll_scheduled) { - for (chan = 0; chan < tx_channel_count; chan++) { - if (status[chan] & handle_tx) { - /* It doesn't matter what rx queue we choose - * here. We use 0 since it always exists. - */ - struct stmmac_rx_queue *rx_q = - &priv->rx_queue[0]; - - if (likely(napi_schedule_prep(&rx_q->napi))) { - stmmac_disable_dma_irq(priv, - priv->ioaddr, chan); - __napi_schedule(&rx_q->napi); - } - break; - } - } - } + status[chan] = stmmac_napi_check(priv, chan); for (chan = 0; chan < tx_channel_count; chan++) { if (unlikely(status[chan] & tx_hard_error_bump_tc)) { @@ -2220,8 +2213,7 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv) stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, tx_q->dma_tx_phy, chan); - tx_q->tx_tail_addr = tx_q->dma_tx_phy + - (DMA_TX_SIZE * sizeof(struct dma_desc)); + tx_q->tx_tail_addr = tx_q->dma_tx_phy; stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, chan); } @@ -2233,6 +2225,13 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv) return ret; } +static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue) +{ + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; + + mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer)); +} + /** * stmmac_tx_timer - mitigation sw timer for tx. * @data: data pointer @@ -2241,13 +2240,14 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv) */ static void stmmac_tx_timer(struct timer_list *t) { - struct stmmac_priv *priv = from_timer(priv, t, txtimer); - u32 tx_queues_count = priv->plat->tx_queues_to_use; - u32 queue; + struct stmmac_tx_queue *tx_q = from_timer(tx_q, t, txtimer); + struct stmmac_priv *priv = tx_q->priv_data; + struct stmmac_channel *ch; + + ch = &priv->channel[tx_q->queue_index]; - /* let's scan all the tx queues */ - for (queue = 0; queue < tx_queues_count; queue++) - stmmac_tx_clean(priv, queue); + if (likely(napi_schedule_prep(&ch->napi))) + __napi_schedule(&ch->napi); } /** @@ -2260,11 +2260,17 @@ static void stmmac_tx_timer(struct timer_list *t) */ static void stmmac_init_tx_coalesce(struct stmmac_priv *priv) { + u32 tx_channel_count = priv->plat->tx_queues_to_use; + u32 chan; + priv->tx_coal_frames = STMMAC_TX_FRAMES; priv->tx_coal_timer = STMMAC_COAL_TX_TIMER; - timer_setup(&priv->txtimer, stmmac_tx_timer, 0); - priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer); - add_timer(&priv->txtimer); + + for (chan = 0; chan < tx_channel_count; chan++) { + struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; + + timer_setup(&tx_q->txtimer, stmmac_tx_timer, 0); + } } static void stmmac_set_rings_length(struct stmmac_priv *priv) @@ -2592,6 +2598,7 @@ static void stmmac_hw_teardown(struct net_device *dev) static int stmmac_open(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); + u32 chan; int ret; stmmac_check_ether_addr(priv); @@ -2688,7 +2695,9 @@ irq_error: if (dev->phydev) phy_stop(dev->phydev); - del_timer_sync(&priv->txtimer); + for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) + del_timer_sync(&priv->tx_queue[chan].txtimer); + stmmac_hw_teardown(dev); init_error: free_dma_desc_resources(priv); @@ -2708,6 +2717,7 @@ dma_desc_error: static int stmmac_release(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); + u32 chan; if (priv->eee_enabled) del_timer_sync(&priv->eee_ctrl_timer); @@ -2722,7 +2732,8 @@ static int stmmac_release(struct net_device *dev) stmmac_disable_all_queues(priv); - del_timer_sync(&priv->txtimer); + for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) + del_timer_sync(&priv->tx_queue[chan].txtimer); /* Free the IRQ lines */ free_irq(dev->irq, dev); @@ -2936,14 +2947,13 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) priv->xstats.tx_tso_nfrags += nfrags; /* Manage tx mitigation */ - priv->tx_count_frames += nfrags + 1; - if (likely(priv->tx_coal_frames > priv->tx_count_frames)) { - mod_timer(&priv->txtimer, - STMMAC_COAL_TIMER(priv->tx_coal_timer)); - } else { - priv->tx_count_frames = 0; + tx_q->tx_count_frames += nfrags + 1; + if (priv->tx_coal_frames <= tx_q->tx_count_frames) { stmmac_set_tx_ic(priv, desc); priv->xstats.tx_set_ic_bit++; + tx_q->tx_count_frames = 0; + } else { + stmmac_tx_timer_arm(priv, queue); } skb_tx_timestamp(skb); @@ -2992,6 +3002,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); + tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc)); stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue); return NETDEV_TX_OK; @@ -3146,14 +3157,13 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) * This approach takes care about the fragments: desc is the first * element in case of no SG. */ - priv->tx_count_frames += nfrags + 1; - if (likely(priv->tx_coal_frames > priv->tx_count_frames)) { - mod_timer(&priv->txtimer, - STMMAC_COAL_TIMER(priv->tx_coal_timer)); - } else { - priv->tx_count_frames = 0; + tx_q->tx_count_frames += nfrags + 1; + if (priv->tx_coal_frames <= tx_q->tx_count_frames) { stmmac_set_tx_ic(priv, desc); priv->xstats.tx_set_ic_bit++; + tx_q->tx_count_frames = 0; + } else { + stmmac_tx_timer_arm(priv, queue); } skb_tx_timestamp(skb); @@ -3199,6 +3209,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); stmmac_enable_dma_transmission(priv, priv->ioaddr); + + tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc)); stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue); return NETDEV_TX_OK; @@ -3319,6 +3331,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue) static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) { struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + struct stmmac_channel *ch = &priv->channel[queue]; unsigned int entry = rx_q->cur_rx; int coe = priv->hw->rx_csum; unsigned int next_entry; @@ -3491,7 +3504,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) else skb->ip_summed = CHECKSUM_UNNECESSARY; - napi_gro_receive(&rx_q->napi, skb); + napi_gro_receive(&ch->napi, skb); priv->dev->stats.rx_packets++; priv->dev->stats.rx_bytes += frame_len; @@ -3514,27 +3527,33 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) * Description : * To look at the incoming frames and clear the tx resources. */ -static int stmmac_poll(struct napi_struct *napi, int budget) +static int stmmac_napi_poll(struct napi_struct *napi, int budget) { - struct stmmac_rx_queue *rx_q = - container_of(napi, struct stmmac_rx_queue, napi); - struct stmmac_priv *priv = rx_q->priv_data; - u32 tx_count = priv->plat->tx_queues_to_use; - u32 chan = rx_q->queue_index; - int work_done = 0; - u32 queue; + struct stmmac_channel *ch = + container_of(napi, struct stmmac_channel, napi); + struct stmmac_priv *priv = ch->priv_data; + int work_done = 0, work_rem = budget; + u32 chan = ch->index; priv->xstats.napi_poll++; - /* check all the queues */ - for (queue = 0; queue < tx_count; queue++) - stmmac_tx_clean(priv, queue); + if (ch->has_tx) { + int done = stmmac_tx_clean(priv, work_rem, chan); - work_done = stmmac_rx(priv, budget, rx_q->queue_index); - if (work_done < budget) { - napi_complete_done(napi, work_done); - stmmac_enable_dma_irq(priv, priv->ioaddr, chan); + work_done += done; + work_rem -= done; + } + + if (ch->has_rx) { + int done = stmmac_rx(priv, work_rem, chan); + + work_done += done; + work_rem -= done; } + + if (work_done < budget && napi_complete_done(napi, work_done)) + stmmac_enable_dma_irq(priv, priv->ioaddr, chan); + return work_done; } @@ -4198,8 +4217,8 @@ int stmmac_dvr_probe(struct device *device, { struct net_device *ndev = NULL; struct stmmac_priv *priv; + u32 queue, maxq; int ret = 0; - u32 queue; ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv), MTL_MAX_TX_QUEUES, @@ -4322,11 +4341,22 @@ int stmmac_dvr_probe(struct device *device, "Enable RX Mitigation via HW Watchdog Timer\n"); } - for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) { - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + /* Setup channels NAPI */ + maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use); - netif_napi_add(ndev, &rx_q->napi, stmmac_poll, - (8 * priv->plat->rx_queues_to_use)); + for (queue = 0; queue < maxq; queue++) { + struct stmmac_channel *ch = &priv->channel[queue]; + + ch->priv_data = priv; + ch->index = queue; + + if (queue < priv->plat->rx_queues_to_use) + ch->has_rx = true; + if (queue < priv->plat->tx_queues_to_use) + ch->has_tx = true; + + netif_napi_add(ndev, &ch->napi, stmmac_napi_poll, + NAPI_POLL_WEIGHT); } mutex_init(&priv->lock); @@ -4372,10 +4402,10 @@ error_netdev_register: priv->hw->pcs != STMMAC_PCS_RTBI) stmmac_mdio_unregister(ndev); error_mdio_register: - for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) { - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + for (queue = 0; queue < maxq; queue++) { + struct stmmac_channel *ch = &priv->channel[queue]; - netif_napi_del(&rx_q->napi); + netif_napi_del(&ch->napi); } error_hw_init: destroy_workqueue(priv->wq); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 3609c7b696c7..2b800ce1d5bf 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -67,7 +67,7 @@ static int dwmac1000_validate_mcast_bins(int mcast_bins) * Description: * This function validates the number of Unicast address entries supported * by a particular Synopsys 10/100/1000 controller. The Synopsys controller - * supports 1, 32, 64, or 128 Unicast filter entries for it's Unicast filter + * supports 1..32, 64, or 128 Unicast filter entries for it's Unicast filter * logic. This function validates a valid, supported configuration is * selected, and defaults to 1 Unicast address if an unsupported * configuration is selected. @@ -77,8 +77,7 @@ static int dwmac1000_validate_ucast_entries(int ucast_entries) int x = ucast_entries; switch (x) { - case 1: - case 32: + case 1 ... 32: case 64: case 128: break; diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig index 9263d638bd6d..f932923f7d56 100644 --- a/drivers/net/ethernet/ti/Kconfig +++ b/drivers/net/ethernet/ti/Kconfig @@ -41,6 +41,7 @@ config TI_DAVINCI_MDIO config TI_DAVINCI_CPDMA tristate "TI DaVinci CPDMA Support" depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || COMPILE_TEST + select GENERIC_ALLOCATOR ---help--- This driver supports TI's DaVinci CPDMA dma engine. diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c index 2bdfb39215e9..d8ba512f166a 100644 --- a/drivers/net/ethernet/wiznet/w5100.c +++ b/drivers/net/ethernet/wiznet/w5100.c @@ -835,7 +835,7 @@ static void w5100_tx_work(struct work_struct *work) w5100_tx_skb(priv->ndev, skb); } -static int w5100_start_tx(struct sk_buff *skb, struct net_device *ndev) +static netdev_tx_t w5100_start_tx(struct sk_buff *skb, struct net_device *ndev) { struct w5100_priv *priv = netdev_priv(ndev); diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c index 56ae573001e8..80fdbff67d82 100644 --- a/drivers/net/ethernet/wiznet/w5300.c +++ b/drivers/net/ethernet/wiznet/w5300.c @@ -365,7 +365,7 @@ static void w5300_tx_timeout(struct net_device *ndev) netif_wake_queue(ndev); } -static int w5300_start_tx(struct sk_buff *skb, struct net_device *ndev) +static netdev_tx_t w5300_start_tx(struct sk_buff *skb, struct net_device *ndev) { struct w5300_priv *priv = netdev_priv(ndev); diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 31c3d77b4733..fe01e141c8f8 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -1203,6 +1203,9 @@ static void netvsc_send_vf(struct net_device *ndev, net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated; net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial; + netdev_info(ndev, "VF slot %u %s\n", + net_device_ctx->vf_serial, + net_device_ctx->vf_alloc ? "added" : "removed"); } static void netvsc_receive_inband(struct net_device *ndev, diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 70921bbe0e28..3af6d8d15233 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -1894,20 +1894,6 @@ out_unlock: rtnl_unlock(); } -static struct net_device *get_netvsc_bymac(const u8 *mac) -{ - struct net_device_context *ndev_ctx; - - list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) { - struct net_device *dev = hv_get_drvdata(ndev_ctx->device_ctx); - - if (ether_addr_equal(mac, dev->perm_addr)) - return dev; - } - - return NULL; -} - static struct net_device *get_netvsc_byref(struct net_device *vf_netdev) { struct net_device_context *net_device_ctx; @@ -2036,26 +2022,48 @@ static void netvsc_vf_setup(struct work_struct *w) rtnl_unlock(); } +/* Find netvsc by VMBus serial number. + * The PCI hyperv controller records the serial number as the slot. + */ +static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev) +{ + struct device *parent = vf_netdev->dev.parent; + struct net_device_context *ndev_ctx; + struct pci_dev *pdev; + + if (!parent || !dev_is_pci(parent)) + return NULL; /* not a PCI device */ + + pdev = to_pci_dev(parent); + if (!pdev->slot) { + netdev_notice(vf_netdev, "no PCI slot information\n"); + return NULL; + } + + list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) { + if (!ndev_ctx->vf_alloc) + continue; + + if (ndev_ctx->vf_serial == pdev->slot->number) + return hv_get_drvdata(ndev_ctx->device_ctx); + } + + netdev_notice(vf_netdev, + "no netdev found for slot %u\n", pdev->slot->number); + return NULL; +} + static int netvsc_register_vf(struct net_device *vf_netdev) { - struct net_device *ndev; struct net_device_context *net_device_ctx; - struct device *pdev = vf_netdev->dev.parent; struct netvsc_device *netvsc_dev; + struct net_device *ndev; int ret; if (vf_netdev->addr_len != ETH_ALEN) return NOTIFY_DONE; - if (!pdev || !dev_is_pci(pdev) || dev_is_pf(pdev)) - return NOTIFY_DONE; - - /* - * We will use the MAC address to locate the synthetic interface to - * associate with the VF interface. If we don't find a matching - * synthetic interface, move on. - */ - ndev = get_netvsc_bymac(vf_netdev->perm_addr); + ndev = get_netvsc_byslot(vf_netdev); if (!ndev) return NOTIFY_DONE; @@ -2272,17 +2280,15 @@ static int netvsc_remove(struct hv_device *dev) cancel_delayed_work_sync(&ndev_ctx->dwork); - rcu_read_lock(); - nvdev = rcu_dereference(ndev_ctx->nvdev); - - if (nvdev) + rtnl_lock(); + nvdev = rtnl_dereference(ndev_ctx->nvdev); + if (nvdev) cancel_work_sync(&nvdev->subchan_work); /* * Call to the vsc driver to let it know that the device is being * removed. Also blocks mtu and channel changes. */ - rtnl_lock(); vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev); if (vf_netdev) netvsc_unregister_vf(vf_netdev); @@ -2294,7 +2300,6 @@ static int netvsc_remove(struct hv_device *dev) list_del(&ndev_ctx->list); rtnl_unlock(); - rcu_read_unlock(); hv_set_drvdata(dev, NULL); diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c index 740655261e5b..83060fb349f4 100644 --- a/drivers/net/phy/sfp-bus.c +++ b/drivers/net/phy/sfp-bus.c @@ -349,6 +349,7 @@ static int sfp_register_bus(struct sfp_bus *bus) } if (bus->started) bus->socket_ops->start(bus->sfp); + bus->netdev->sfp_bus = bus; bus->registered = true; return 0; } @@ -357,6 +358,7 @@ static void sfp_unregister_bus(struct sfp_bus *bus) { const struct sfp_upstream_ops *ops = bus->upstream_ops; + bus->netdev->sfp_bus = NULL; if (bus->registered) { if (bus->started) bus->socket_ops->stop(bus->sfp); @@ -438,7 +440,6 @@ static void sfp_upstream_clear(struct sfp_bus *bus) { bus->upstream_ops = NULL; bus->upstream = NULL; - bus->netdev->sfp_bus = NULL; bus->netdev = NULL; } @@ -467,7 +468,6 @@ struct sfp_bus *sfp_register_upstream(struct fwnode_handle *fwnode, bus->upstream_ops = ops; bus->upstream = upstream; bus->netdev = ndev; - ndev->sfp_bus = bus; if (bus->sfp) { ret = sfp_register_bus(bus); diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c index ce61231e96ea..62dc564b251d 100644 --- a/drivers/net/ppp/pppoe.c +++ b/drivers/net/ppp/pppoe.c @@ -429,6 +429,9 @@ static int pppoe_rcv(struct sk_buff *skb, struct net_device *dev, if (!skb) goto out; + if (skb_mac_header_len(skb) < ETH_HLEN) + goto drop; + if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr))) goto drop; diff --git a/drivers/net/tun.c b/drivers/net/tun.c index ebd07ad82431..e2648b5a3861 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1153,43 +1153,6 @@ static netdev_features_t tun_net_fix_features(struct net_device *dev, return (features & tun->set_features) | (features & ~TUN_USER_FEATURES); } -#ifdef CONFIG_NET_POLL_CONTROLLER -static void tun_poll_controller(struct net_device *dev) -{ - /* - * Tun only receives frames when: - * 1) the char device endpoint gets data from user space - * 2) the tun socket gets a sendmsg call from user space - * If NAPI is not enabled, since both of those are synchronous - * operations, we are guaranteed never to have pending data when we poll - * for it so there is nothing to do here but return. - * We need this though so netpoll recognizes us as an interface that - * supports polling, which enables bridge devices in virt setups to - * still use netconsole - * If NAPI is enabled, however, we need to schedule polling for all - * queues unless we are using napi_gro_frags(), which we call in - * process context and not in NAPI context. - */ - struct tun_struct *tun = netdev_priv(dev); - - if (tun->flags & IFF_NAPI) { - struct tun_file *tfile; - int i; - - if (tun_napi_frags_enabled(tun)) - return; - - rcu_read_lock(); - for (i = 0; i < tun->numqueues; i++) { - tfile = rcu_dereference(tun->tfiles[i]); - if (tfile->napi_enabled) - napi_schedule(&tfile->napi); - } - rcu_read_unlock(); - } - return; -} -#endif static void tun_set_headroom(struct net_device *dev, int new_hr) { @@ -1283,9 +1246,6 @@ static const struct net_device_ops tun_netdev_ops = { .ndo_start_xmit = tun_net_xmit, .ndo_fix_features = tun_net_fix_features, .ndo_select_queue = tun_select_queue, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = tun_poll_controller, -#endif .ndo_set_rx_headroom = tun_set_headroom, .ndo_get_stats64 = tun_net_get_stats64, }; @@ -1365,9 +1325,6 @@ static const struct net_device_ops tap_netdev_ops = { .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_select_queue = tun_select_queue, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = tun_poll_controller, -#endif .ndo_features_check = passthru_features_check, .ndo_set_rx_headroom = tun_set_headroom, .ndo_get_stats64 = tun_net_get_stats64, diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index e3270deecec2..533b6fb8d923 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1213,13 +1213,13 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x1199, 0x9061, 8)}, /* Sierra Wireless Modem */ {QMI_FIXED_INTF(0x1199, 0x9063, 8)}, /* Sierra Wireless EM7305 */ {QMI_FIXED_INTF(0x1199, 0x9063, 10)}, /* Sierra Wireless EM7305 */ - {QMI_FIXED_INTF(0x1199, 0x9071, 8)}, /* Sierra Wireless MC74xx */ - {QMI_FIXED_INTF(0x1199, 0x9071, 10)}, /* Sierra Wireless MC74xx */ - {QMI_FIXED_INTF(0x1199, 0x9079, 8)}, /* Sierra Wireless EM74xx */ - {QMI_FIXED_INTF(0x1199, 0x9079, 10)}, /* Sierra Wireless EM74xx */ - {QMI_FIXED_INTF(0x1199, 0x907b, 8)}, /* Sierra Wireless EM74xx */ - {QMI_FIXED_INTF(0x1199, 0x907b, 10)}, /* Sierra Wireless EM74xx */ - {QMI_FIXED_INTF(0x1199, 0x9091, 8)}, /* Sierra Wireless EM7565 */ + {QMI_QUIRK_SET_DTR(0x1199, 0x9071, 8)}, /* Sierra Wireless MC74xx */ + {QMI_QUIRK_SET_DTR(0x1199, 0x9071, 10)},/* Sierra Wireless MC74xx */ + {QMI_QUIRK_SET_DTR(0x1199, 0x9079, 8)}, /* Sierra Wireless EM74xx */ + {QMI_QUIRK_SET_DTR(0x1199, 0x9079, 10)},/* Sierra Wireless EM74xx */ + {QMI_QUIRK_SET_DTR(0x1199, 0x907b, 8)}, /* Sierra Wireless EM74xx */ + {QMI_QUIRK_SET_DTR(0x1199, 0x907b, 10)},/* Sierra Wireless EM74xx */ + {QMI_QUIRK_SET_DTR(0x1199, 0x9091, 8)}, /* Sierra Wireless EM7565 */ {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */ {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */ {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 8d679c8b7f25..41a00cd76955 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -463,6 +463,8 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq, struct sk_buff *skb, int mac_len, delta, off; struct xdp_buff xdp; + skb_orphan(skb); + rcu_read_lock(); xdp_prog = rcu_dereference(rq->xdp_prog); if (unlikely(!xdp_prog)) { @@ -508,8 +510,6 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq, struct sk_buff *skb, skb_copy_header(nskb, skb); head_off = skb_headroom(nskb) - skb_headroom(skb); skb_headers_offset_update(nskb, head_off); - if (skb->sk) - skb_set_owner_w(nskb, skb->sk); consume_skb(skb); skb = nskb; } diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 9407acbd19a9..f17f602e6171 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -908,7 +908,11 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue, BUG_ON(pull_to <= skb_headlen(skb)); __pskb_pull_tail(skb, pull_to - skb_headlen(skb)); } - BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS); + if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) { + queue->rx.rsp_cons = ++cons; + kfree_skb(nskb); + return ~0U; + } skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, skb_frag_page(nfrag), @@ -1045,6 +1049,8 @@ err: skb->len += rx->status; i = xennet_fill_frags(queue, skb, &tmpq); + if (unlikely(i == ~0U)) + goto err; if (rx->flags & XEN_NETRXF_csum_blank) skb->ip_summed = CHECKSUM_PARTIAL; diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index 5a9562881d4e..9fe3fff818b8 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -537,8 +537,10 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) INIT_WORK(&ctrl->ana_work, nvme_ana_work); ctrl->ana_log_buf = kmalloc(ctrl->ana_log_size, GFP_KERNEL); - if (!ctrl->ana_log_buf) + if (!ctrl->ana_log_buf) { + error = -ENOMEM; goto out; + } error = nvme_read_ana_log(ctrl, true); if (error) @@ -547,7 +549,7 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) out_free_ana_log_buf: kfree(ctrl->ana_log_buf); out: - return -ENOMEM; + return error; } void nvme_mpath_uninit(struct nvme_ctrl *ctrl) diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c index a21caea1e080..2008fa62a373 100644 --- a/drivers/nvme/target/admin-cmd.c +++ b/drivers/nvme/target/admin-cmd.c @@ -245,6 +245,10 @@ static void nvmet_execute_get_log_page_ana(struct nvmet_req *req) offset += len; ngrps++; } + for ( ; grpid <= NVMET_MAX_ANAGRPS; grpid++) { + if (nvmet_ana_group_enabled[grpid]) + ngrps++; + } hdr.chgcnt = cpu_to_le64(nvmet_ana_chgcnt); hdr.ngrps = cpu_to_le16(ngrps); diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c index 778c4f76a884..2153956a0b20 100644 --- a/drivers/pci/controller/dwc/pcie-designware.c +++ b/drivers/pci/controller/dwc/pcie-designware.c @@ -135,7 +135,7 @@ static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, int index, if (val & PCIE_ATU_ENABLE) return; - usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX); + mdelay(LINK_WAIT_IATU); } dev_err(pci->dev, "Outbound iATU is not being enabled\n"); } @@ -178,7 +178,7 @@ void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type, if (val & PCIE_ATU_ENABLE) return; - usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX); + mdelay(LINK_WAIT_IATU); } dev_err(pci->dev, "Outbound iATU is not being enabled\n"); } @@ -236,7 +236,7 @@ static int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, int index, if (val & PCIE_ATU_ENABLE) return 0; - usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX); + mdelay(LINK_WAIT_IATU); } dev_err(pci->dev, "Inbound iATU is not being enabled\n"); @@ -282,7 +282,7 @@ int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int bar, if (val & PCIE_ATU_ENABLE) return 0; - usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX); + mdelay(LINK_WAIT_IATU); } dev_err(pci->dev, "Inbound iATU is not being enabled\n"); diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h index 96126fd8403c..9f1a5e399b70 100644 --- a/drivers/pci/controller/dwc/pcie-designware.h +++ b/drivers/pci/controller/dwc/pcie-designware.h @@ -26,8 +26,7 @@ /* Parameters for the waiting for iATU enabled routine */ #define LINK_WAIT_MAX_IATU_RETRIES 5 -#define LINK_WAIT_IATU_MIN 9000 -#define LINK_WAIT_IATU_MAX 10000 +#define LINK_WAIT_IATU 9 /* Synopsys-specific PCIe configuration registers */ #define PCIE_PORT_LINK_CONTROL 0x710 diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c index c00f82cc54aa..9ba4d12c179c 100644 --- a/drivers/pci/controller/pci-hyperv.c +++ b/drivers/pci/controller/pci-hyperv.c @@ -89,6 +89,9 @@ static enum pci_protocol_version_t pci_protocol_version; #define STATUS_REVISION_MISMATCH 0xC0000059 +/* space for 32bit serial number as string */ +#define SLOT_NAME_SIZE 11 + /* * Message Types */ @@ -494,6 +497,7 @@ struct hv_pci_dev { struct list_head list_entry; refcount_t refs; enum hv_pcichild_state state; + struct pci_slot *pci_slot; struct pci_function_description desc; bool reported_missing; struct hv_pcibus_device *hbus; @@ -1457,6 +1461,36 @@ static void prepopulate_bars(struct hv_pcibus_device *hbus) spin_unlock_irqrestore(&hbus->device_list_lock, flags); } +/* + * Assign entries in sysfs pci slot directory. + * + * Note that this function does not need to lock the children list + * because it is called from pci_devices_present_work which + * is serialized with hv_eject_device_work because they are on the + * same ordered workqueue. Therefore hbus->children list will not change + * even when pci_create_slot sleeps. + */ +static void hv_pci_assign_slots(struct hv_pcibus_device *hbus) +{ + struct hv_pci_dev *hpdev; + char name[SLOT_NAME_SIZE]; + int slot_nr; + + list_for_each_entry(hpdev, &hbus->children, list_entry) { + if (hpdev->pci_slot) + continue; + + slot_nr = PCI_SLOT(wslot_to_devfn(hpdev->desc.win_slot.slot)); + snprintf(name, SLOT_NAME_SIZE, "%u", hpdev->desc.ser); + hpdev->pci_slot = pci_create_slot(hbus->pci_bus, slot_nr, + name, NULL); + if (IS_ERR(hpdev->pci_slot)) { + pr_warn("pci_create slot %s failed\n", name); + hpdev->pci_slot = NULL; + } + } +} + /** * create_root_hv_pci_bus() - Expose a new root PCI bus * @hbus: Root PCI bus, as understood by this driver @@ -1480,6 +1514,7 @@ static int create_root_hv_pci_bus(struct hv_pcibus_device *hbus) pci_lock_rescan_remove(); pci_scan_child_bus(hbus->pci_bus); pci_bus_assign_resources(hbus->pci_bus); + hv_pci_assign_slots(hbus); pci_bus_add_devices(hbus->pci_bus); pci_unlock_rescan_remove(); hbus->state = hv_pcibus_installed; @@ -1742,6 +1777,7 @@ static void pci_devices_present_work(struct work_struct *work) */ pci_lock_rescan_remove(); pci_scan_child_bus(hbus->pci_bus); + hv_pci_assign_slots(hbus); pci_unlock_rescan_remove(); break; @@ -1858,6 +1894,9 @@ static void hv_eject_device_work(struct work_struct *work) list_del(&hpdev->list_entry); spin_unlock_irqrestore(&hpdev->hbus->device_list_lock, flags); + if (hpdev->pci_slot) + pci_destroy_slot(hpdev->pci_slot); + memset(&ctxt, 0, sizeof(ctxt)); ejct_pkt = (struct pci_eject_response *)&ctxt.pkt.message; ejct_pkt->message_type.type = PCI_EJECTION_COMPLETE; diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index ef0b1b6ba86f..12afa7fdf77e 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c @@ -457,17 +457,18 @@ static void acpiphp_native_scan_bridge(struct pci_dev *bridge) /** * enable_slot - enable, configure a slot * @slot: slot to be enabled + * @bridge: true if enable is for the whole bridge (not a single slot) * * This function should be called per *physical slot*, * not per each slot object in ACPI namespace. */ -static void enable_slot(struct acpiphp_slot *slot) +static void enable_slot(struct acpiphp_slot *slot, bool bridge) { struct pci_dev *dev; struct pci_bus *bus = slot->bus; struct acpiphp_func *func; - if (bus->self && hotplug_is_native(bus->self)) { + if (bridge && bus->self && hotplug_is_native(bus->self)) { /* * If native hotplug is used, it will take care of hotplug * slot management and resource allocation for hotplug @@ -701,7 +702,7 @@ static void acpiphp_check_bridge(struct acpiphp_bridge *bridge) trim_stale_devices(dev); /* configure all functions */ - enable_slot(slot); + enable_slot(slot, true); } else { disable_slot(slot); } @@ -785,7 +786,7 @@ static void hotplug_event(u32 type, struct acpiphp_context *context) if (bridge) acpiphp_check_bridge(bridge); else if (!(slot->flags & SLOT_IS_GOING_AWAY)) - enable_slot(slot); + enable_slot(slot, false); break; @@ -973,7 +974,7 @@ int acpiphp_enable_slot(struct acpiphp_slot *slot) /* configure all functions */ if (!(slot->flags & SLOT_ENABLED)) - enable_slot(slot); + enable_slot(slot, false); pci_unlock_rescan_remove(); return 0; diff --git a/drivers/pinctrl/intel/pinctrl-cannonlake.c b/drivers/pinctrl/intel/pinctrl-cannonlake.c index fb1afe55bf53..e7f45d96b0cb 100644 --- a/drivers/pinctrl/intel/pinctrl-cannonlake.c +++ b/drivers/pinctrl/intel/pinctrl-cannonlake.c @@ -15,10 +15,11 @@ #include "pinctrl-intel.h" -#define CNL_PAD_OWN 0x020 -#define CNL_PADCFGLOCK 0x080 -#define CNL_HOSTSW_OWN 0x0b0 -#define CNL_GPI_IE 0x120 +#define CNL_PAD_OWN 0x020 +#define CNL_PADCFGLOCK 0x080 +#define CNL_LP_HOSTSW_OWN 0x0b0 +#define CNL_H_HOSTSW_OWN 0x0c0 +#define CNL_GPI_IE 0x120 #define CNL_GPP(r, s, e, g) \ { \ @@ -30,12 +31,12 @@ #define CNL_NO_GPIO -1 -#define CNL_COMMUNITY(b, s, e, g) \ +#define CNL_COMMUNITY(b, s, e, o, g) \ { \ .barno = (b), \ .padown_offset = CNL_PAD_OWN, \ .padcfglock_offset = CNL_PADCFGLOCK, \ - .hostown_offset = CNL_HOSTSW_OWN, \ + .hostown_offset = (o), \ .ie_offset = CNL_GPI_IE, \ .pin_base = (s), \ .npins = ((e) - (s) + 1), \ @@ -43,6 +44,12 @@ .ngpps = ARRAY_SIZE(g), \ } +#define CNLLP_COMMUNITY(b, s, e, g) \ + CNL_COMMUNITY(b, s, e, CNL_LP_HOSTSW_OWN, g) + +#define CNLH_COMMUNITY(b, s, e, g) \ + CNL_COMMUNITY(b, s, e, CNL_H_HOSTSW_OWN, g) + /* Cannon Lake-H */ static const struct pinctrl_pin_desc cnlh_pins[] = { /* GPP_A */ @@ -379,7 +386,7 @@ static const struct intel_padgroup cnlh_community1_gpps[] = { static const struct intel_padgroup cnlh_community3_gpps[] = { CNL_GPP(0, 155, 178, 192), /* GPP_K */ CNL_GPP(1, 179, 202, 224), /* GPP_H */ - CNL_GPP(2, 203, 215, 258), /* GPP_E */ + CNL_GPP(2, 203, 215, 256), /* GPP_E */ CNL_GPP(3, 216, 239, 288), /* GPP_F */ CNL_GPP(4, 240, 248, CNL_NO_GPIO), /* SPI */ }; @@ -442,10 +449,10 @@ static const struct intel_function cnlh_functions[] = { }; static const struct intel_community cnlh_communities[] = { - CNL_COMMUNITY(0, 0, 50, cnlh_community0_gpps), - CNL_COMMUNITY(1, 51, 154, cnlh_community1_gpps), - CNL_COMMUNITY(2, 155, 248, cnlh_community3_gpps), - CNL_COMMUNITY(3, 249, 298, cnlh_community4_gpps), + CNLH_COMMUNITY(0, 0, 50, cnlh_community0_gpps), + CNLH_COMMUNITY(1, 51, 154, cnlh_community1_gpps), + CNLH_COMMUNITY(2, 155, 248, cnlh_community3_gpps), + CNLH_COMMUNITY(3, 249, 298, cnlh_community4_gpps), }; static const struct intel_pinctrl_soc_data cnlh_soc_data = { @@ -803,9 +810,9 @@ static const struct intel_padgroup cnllp_community4_gpps[] = { }; static const struct intel_community cnllp_communities[] = { - CNL_COMMUNITY(0, 0, 67, cnllp_community0_gpps), - CNL_COMMUNITY(1, 68, 180, cnllp_community1_gpps), - CNL_COMMUNITY(2, 181, 243, cnllp_community4_gpps), + CNLLP_COMMUNITY(0, 0, 67, cnllp_community0_gpps), + CNLLP_COMMUNITY(1, 68, 180, cnllp_community1_gpps), + CNLLP_COMMUNITY(2, 181, 243, cnllp_community4_gpps), }; static const struct intel_pinctrl_soc_data cnllp_soc_data = { diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c index 62b009b27eda..1ea3438ea67e 100644 --- a/drivers/pinctrl/intel/pinctrl-intel.c +++ b/drivers/pinctrl/intel/pinctrl-intel.c @@ -747,13 +747,63 @@ static const struct pinctrl_desc intel_pinctrl_desc = { .owner = THIS_MODULE, }; +/** + * intel_gpio_to_pin() - Translate from GPIO offset to pin number + * @pctrl: Pinctrl structure + * @offset: GPIO offset from gpiolib + * @commmunity: Community is filled here if not %NULL + * @padgrp: Pad group is filled here if not %NULL + * + * When coming through gpiolib irqchip, the GPIO offset is not + * automatically translated to pinctrl pin number. This function can be + * used to find out the corresponding pinctrl pin. + */ +static int intel_gpio_to_pin(struct intel_pinctrl *pctrl, unsigned offset, + const struct intel_community **community, + const struct intel_padgroup **padgrp) +{ + int i; + + for (i = 0; i < pctrl->ncommunities; i++) { + const struct intel_community *comm = &pctrl->communities[i]; + int j; + + for (j = 0; j < comm->ngpps; j++) { + const struct intel_padgroup *pgrp = &comm->gpps[j]; + + if (pgrp->gpio_base < 0) + continue; + + if (offset >= pgrp->gpio_base && + offset < pgrp->gpio_base + pgrp->size) { + int pin; + + pin = pgrp->base + offset - pgrp->gpio_base; + if (community) + *community = comm; + if (padgrp) + *padgrp = pgrp; + + return pin; + } + } + } + + return -EINVAL; +} + static int intel_gpio_get(struct gpio_chip *chip, unsigned offset) { struct intel_pinctrl *pctrl = gpiochip_get_data(chip); void __iomem *reg; u32 padcfg0; + int pin; - reg = intel_get_padcfg(pctrl, offset, PADCFG0); + pin = intel_gpio_to_pin(pctrl, offset, NULL, NULL); + if (pin < 0) + return -EINVAL; + + reg = intel_get_padcfg(pctrl, pin, PADCFG0); if (!reg) return -EINVAL; @@ -770,8 +820,13 @@ static void intel_gpio_set(struct gpio_chip *chip, unsigned offset, int value) unsigned long flags; void __iomem *reg; u32 padcfg0; + int pin; - reg = intel_get_padcfg(pctrl, offset, PADCFG0); + pin = intel_gpio_to_pin(pctrl, offset, NULL, NULL); + if (pin < 0) + return; + + reg = intel_get_padcfg(pctrl, pin, PADCFG0); if (!reg) return; @@ -790,8 +845,13 @@ static int intel_gpio_get_direction(struct gpio_chip *chip, unsigned int offset) struct intel_pinctrl *pctrl = gpiochip_get_data(chip); void __iomem *reg; u32 padcfg0; + int pin; - reg = intel_get_padcfg(pctrl, offset, PADCFG0); + pin = intel_gpio_to_pin(pctrl, offset, NULL, NULL); + if (pin < 0) + return -EINVAL; + + reg = intel_get_padcfg(pctrl, pin, PADCFG0); if (!reg) return -EINVAL; @@ -827,81 +887,6 @@ static const struct gpio_chip intel_gpio_chip = { .set_config = gpiochip_generic_config, }; -/** - * intel_gpio_to_pin() - Translate from GPIO offset to pin number - * @pctrl: Pinctrl structure - * @offset: GPIO offset from gpiolib - * @commmunity: Community is filled here if not %NULL - * @padgrp: Pad group is filled here if not %NULL - * - * When coming through gpiolib irqchip, the GPIO offset is not - * automatically translated to pinctrl pin number. This function can be - * used to find out the corresponding pinctrl pin. - */ -static int intel_gpio_to_pin(struct intel_pinctrl *pctrl, unsigned offset, - const struct intel_community **community, - const struct intel_padgroup **padgrp) -{ - int i; - - for (i = 0; i < pctrl->ncommunities; i++) { - const struct intel_community *comm = &pctrl->communities[i]; - int j; - - for (j = 0; j < comm->ngpps; j++) { - const struct intel_padgroup *pgrp = &comm->gpps[j]; - - if (pgrp->gpio_base < 0) - continue; - - if (offset >= pgrp->gpio_base && - offset < pgrp->gpio_base + pgrp->size) { - int pin; - - pin = pgrp->base + offset - pgrp->gpio_base; - if (community) - *community = comm; - if (padgrp) - *padgrp = pgrp; - - return pin; - } - } - } - - return -EINVAL; -} - -static int intel_gpio_irq_reqres(struct irq_data *d) -{ - struct gpio_chip *gc = irq_data_get_irq_chip_data(d); - struct intel_pinctrl *pctrl = gpiochip_get_data(gc); - int pin; - int ret; - - pin = intel_gpio_to_pin(pctrl, irqd_to_hwirq(d), NULL, NULL); - if (pin >= 0) { - ret = gpiochip_lock_as_irq(gc, pin); - if (ret) { - dev_err(pctrl->dev, "unable to lock HW IRQ %d for IRQ\n", - pin); - return ret; - } - } - return 0; -} - -static void intel_gpio_irq_relres(struct irq_data *d) -{ - struct gpio_chip *gc = irq_data_get_irq_chip_data(d); - struct intel_pinctrl *pctrl = gpiochip_get_data(gc); - int pin; - - pin = intel_gpio_to_pin(pctrl, irqd_to_hwirq(d), NULL, NULL); - if (pin >= 0) - gpiochip_unlock_as_irq(gc, pin); -} - static void intel_gpio_irq_ack(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); @@ -1117,8 +1102,6 @@ static irqreturn_t intel_gpio_irq(int irq, void *data) static struct irq_chip intel_gpio_irqchip = { .name = "intel-gpio", - .irq_request_resources = intel_gpio_irq_reqres, - .irq_release_resources = intel_gpio_irq_relres, .irq_enable = intel_gpio_irq_enable, .irq_ack = intel_gpio_irq_ack, .irq_mask = intel_gpio_irq_mask, diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c index 41ccc759b8b8..1425c2874d40 100644 --- a/drivers/pinctrl/pinctrl-amd.c +++ b/drivers/pinctrl/pinctrl-amd.c @@ -348,21 +348,12 @@ static void amd_gpio_irq_enable(struct irq_data *d) unsigned long flags; struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct amd_gpio *gpio_dev = gpiochip_get_data(gc); - u32 mask = BIT(INTERRUPT_ENABLE_OFF) | BIT(INTERRUPT_MASK_OFF); raw_spin_lock_irqsave(&gpio_dev->lock, flags); pin_reg = readl(gpio_dev->base + (d->hwirq)*4); pin_reg |= BIT(INTERRUPT_ENABLE_OFF); pin_reg |= BIT(INTERRUPT_MASK_OFF); writel(pin_reg, gpio_dev->base + (d->hwirq)*4); - /* - * When debounce logic is enabled it takes ~900 us before interrupts - * can be enabled. During this "debounce warm up" period the - * "INTERRUPT_ENABLE" bit will read as 0. Poll the bit here until it - * reads back as 1, signaling that interrupts are now enabled. - */ - while ((readl(gpio_dev->base + (d->hwirq)*4) & mask) != mask) - continue; raw_spin_unlock_irqrestore(&gpio_dev->lock, flags); } @@ -426,7 +417,7 @@ static void amd_gpio_irq_eoi(struct irq_data *d) static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type) { int ret = 0; - u32 pin_reg; + u32 pin_reg, pin_reg_irq_en, mask; unsigned long flags, irq_flags; struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct amd_gpio *gpio_dev = gpiochip_get_data(gc); @@ -495,6 +486,28 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type) } pin_reg |= CLR_INTR_STAT << INTERRUPT_STS_OFF; + /* + * If WAKE_INT_MASTER_REG.MaskStsEn is set, a software write to the + * debounce registers of any GPIO will block wake/interrupt status + * generation for *all* GPIOs for a lenght of time that depends on + * WAKE_INT_MASTER_REG.MaskStsLength[11:0]. During this period the + * INTERRUPT_ENABLE bit will read as 0. + * + * We temporarily enable irq for the GPIO whose configuration is + * changing, and then wait for it to read back as 1 to know when + * debounce has settled and then disable the irq again. + * We do this polling with the spinlock held to ensure other GPIO + * access routines do not read an incorrect value for the irq enable + * bit of other GPIOs. We keep the GPIO masked while polling to avoid + * spurious irqs, and disable the irq again after polling. + */ + mask = BIT(INTERRUPT_ENABLE_OFF); + pin_reg_irq_en = pin_reg; + pin_reg_irq_en |= mask; + pin_reg_irq_en &= ~BIT(INTERRUPT_MASK_OFF); + writel(pin_reg_irq_en, gpio_dev->base + (d->hwirq)*4); + while ((readl(gpio_dev->base + (d->hwirq)*4) & mask) != mask) + continue; writel(pin_reg, gpio_dev->base + (d->hwirq)*4); raw_spin_unlock_irqrestore(&gpio_dev->lock, flags); diff --git a/drivers/platform/x86/alienware-wmi.c b/drivers/platform/x86/alienware-wmi.c index d975462a4c57..f10af5c383c5 100644 --- a/drivers/platform/x86/alienware-wmi.c +++ b/drivers/platform/x86/alienware-wmi.c @@ -536,6 +536,7 @@ static acpi_status alienware_wmax_command(struct wmax_basic_args *in_args, if (obj && obj->type == ACPI_TYPE_INTEGER) *out_data = (u32) obj->integer.value; } + kfree(output.pointer); return status; } diff --git a/drivers/platform/x86/dell-smbios-wmi.c b/drivers/platform/x86/dell-smbios-wmi.c index 88afe5651d24..cf2229ece9ff 100644 --- a/drivers/platform/x86/dell-smbios-wmi.c +++ b/drivers/platform/x86/dell-smbios-wmi.c @@ -78,6 +78,7 @@ static int run_smbios_call(struct wmi_device *wdev) dev_dbg(&wdev->dev, "result: [%08x,%08x,%08x,%08x]\n", priv->buf->std.output[0], priv->buf->std.output[1], priv->buf->std.output[2], priv->buf->std.output[3]); + kfree(output.pointer); return 0; } diff --git a/drivers/regulator/bd71837-regulator.c b/drivers/regulator/bd71837-regulator.c index 0f8ac8dec3e1..a1bd8aaf4d98 100644 --- a/drivers/regulator/bd71837-regulator.c +++ b/drivers/regulator/bd71837-regulator.c @@ -569,6 +569,25 @@ static int bd71837_probe(struct platform_device *pdev) BD71837_REG_REGLOCK); } + /* + * There is a HW quirk in BD71837. The shutdown sequence timings for + * bucks/LDOs which are controlled via register interface are changed. + * At PMIC poweroff the voltage for BUCK6/7 is cut immediately at the + * beginning of shut-down sequence. As bucks 6 and 7 are parent + * supplies for LDO5 and LDO6 - this causes LDO5/6 voltage + * monitoring to errorneously detect under voltage and force PMIC to + * emergency state instead of poweroff. In order to avoid this we + * disable voltage monitoring for LDO5 and LDO6 + */ + err = regmap_update_bits(pmic->mfd->regmap, BD718XX_REG_MVRFLTMASK2, + BD718XX_LDO5_VRMON80 | BD718XX_LDO6_VRMON80, + BD718XX_LDO5_VRMON80 | BD718XX_LDO6_VRMON80); + if (err) { + dev_err(&pmic->pdev->dev, + "Failed to disable voltage monitoring\n"); + goto err; + } + for (i = 0; i < ARRAY_SIZE(pmic_regulator_inits); i++) { struct regulator_desc *desc; diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index bb1324f93143..9577d8941846 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -3161,7 +3161,7 @@ static inline int regulator_suspend_toggle(struct regulator_dev *rdev, if (!rstate->changeable) return -EPERM; - rstate->enabled = en; + rstate->enabled = (en) ? ENABLE_IN_SUSPEND : DISABLE_IN_SUSPEND; return 0; } @@ -4395,13 +4395,13 @@ regulator_register(const struct regulator_desc *regulator_desc, !rdev->desc->fixed_uV) rdev->is_switch = true; + dev_set_drvdata(&rdev->dev, rdev); ret = device_register(&rdev->dev); if (ret != 0) { put_device(&rdev->dev); goto unset_supplies; } - dev_set_drvdata(&rdev->dev, rdev); rdev_init_debugfs(rdev); /* try to resolve regulators supply since a new one was registered */ diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c index 638f17d4c848..210fc20f7de7 100644 --- a/drivers/regulator/of_regulator.c +++ b/drivers/regulator/of_regulator.c @@ -213,8 +213,6 @@ static void of_get_regulation_constraints(struct device_node *np, else if (of_property_read_bool(suspend_np, "regulator-off-in-suspend")) suspend_state->enabled = DISABLE_IN_SUSPEND; - else - suspend_state->enabled = DO_NOTHING_IN_SUSPEND; if (!of_property_read_u32(np, "regulator-suspend-min-microvolt", &pval)) diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c index fac377320158..f42a619198c4 100644 --- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c +++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c @@ -3474,11 +3474,10 @@ static int ibmvscsis_probe(struct vio_dev *vdev, vscsi->dds.window[LOCAL].liobn, vscsi->dds.window[REMOTE].liobn); - strcpy(vscsi->eye, "VSCSI "); - strncat(vscsi->eye, vdev->name, MAX_EYE); + snprintf(vscsi->eye, sizeof(vscsi->eye), "VSCSI %s", vdev->name); vscsi->dds.unit_id = vdev->unit_address; - strncpy(vscsi->dds.partition_name, partition_name, + strscpy(vscsi->dds.partition_name, partition_name, sizeof(vscsi->dds.partition_name)); vscsi->dds.partition_num = partition_number; diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index f2ec80b0ffc0..271990bc065b 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -3335,6 +3335,65 @@ static void ipr_release_dump(struct kref *kref) LEAVE; } +static void ipr_add_remove_thread(struct work_struct *work) +{ + unsigned long lock_flags; + struct ipr_resource_entry *res; + struct scsi_device *sdev; + struct ipr_ioa_cfg *ioa_cfg = + container_of(work, struct ipr_ioa_cfg, scsi_add_work_q); + u8 bus, target, lun; + int did_work; + + ENTER; + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + +restart: + do { + did_work = 0; + if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) { + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return; + } + + list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { + if (res->del_from_ml && res->sdev) { + did_work = 1; + sdev = res->sdev; + if (!scsi_device_get(sdev)) { + if (!res->add_to_ml) + list_move_tail(&res->queue, &ioa_cfg->free_res_q); + else + res->del_from_ml = 0; + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + scsi_remove_device(sdev); + scsi_device_put(sdev); + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + } + break; + } + } + } while (did_work); + + list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { + if (res->add_to_ml) { + bus = res->bus; + target = res->target; + lun = res->lun; + res->add_to_ml = 0; + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + scsi_add_device(ioa_cfg->host, bus, target, lun); + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + goto restart; + } + } + + ioa_cfg->scan_done = 1; + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE); + LEAVE; +} + /** * ipr_worker_thread - Worker thread * @work: ioa config struct @@ -3349,13 +3408,9 @@ static void ipr_release_dump(struct kref *kref) static void ipr_worker_thread(struct work_struct *work) { unsigned long lock_flags; - struct ipr_resource_entry *res; - struct scsi_device *sdev; struct ipr_dump *dump; struct ipr_ioa_cfg *ioa_cfg = container_of(work, struct ipr_ioa_cfg, work_q); - u8 bus, target, lun; - int did_work; ENTER; spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); @@ -3393,49 +3448,9 @@ static void ipr_worker_thread(struct work_struct *work) return; } -restart: - do { - did_work = 0; - if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) { - spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); - return; - } + schedule_work(&ioa_cfg->scsi_add_work_q); - list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { - if (res->del_from_ml && res->sdev) { - did_work = 1; - sdev = res->sdev; - if (!scsi_device_get(sdev)) { - if (!res->add_to_ml) - list_move_tail(&res->queue, &ioa_cfg->free_res_q); - else - res->del_from_ml = 0; - spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); - scsi_remove_device(sdev); - scsi_device_put(sdev); - spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); - } - break; - } - } - } while (did_work); - - list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { - if (res->add_to_ml) { - bus = res->bus; - target = res->target; - lun = res->lun; - res->add_to_ml = 0; - spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); - scsi_add_device(ioa_cfg->host, bus, target, lun); - spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); - goto restart; - } - } - - ioa_cfg->scan_done = 1; spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); - kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE); LEAVE; } @@ -9933,6 +9948,7 @@ static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg, INIT_LIST_HEAD(&ioa_cfg->free_res_q); INIT_LIST_HEAD(&ioa_cfg->used_res_q); INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread); + INIT_WORK(&ioa_cfg->scsi_add_work_q, ipr_add_remove_thread); init_waitqueue_head(&ioa_cfg->reset_wait_q); init_waitqueue_head(&ioa_cfg->msi_wait_q); init_waitqueue_head(&ioa_cfg->eeh_wait_q); diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h index 68afbbde54d3..f6baa2351313 100644 --- a/drivers/scsi/ipr.h +++ b/drivers/scsi/ipr.h @@ -1575,6 +1575,7 @@ struct ipr_ioa_cfg { u8 saved_mode_page_len; struct work_struct work_q; + struct work_struct scsi_add_work_q; struct workqueue_struct *reset_work_q; wait_queue_head_t reset_wait_q; diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 057a60abe664..1a6ed9b0a249 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -360,12 +360,12 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, goto buffer_done; list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { + nrport = NULL; + spin_lock(&vport->phba->hbalock); rport = lpfc_ndlp_get_nrport(ndlp); - if (!rport) - continue; - - /* local short-hand pointer. */ - nrport = rport->remoteport; + if (rport) + nrport = rport->remoteport; + spin_unlock(&vport->phba->hbalock); if (!nrport) continue; @@ -3386,6 +3386,7 @@ lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport) struct lpfc_nodelist *ndlp; #if (IS_ENABLED(CONFIG_NVME_FC)) struct lpfc_nvme_rport *rport; + struct nvme_fc_remote_port *remoteport = NULL; #endif shost = lpfc_shost_from_vport(vport); @@ -3396,8 +3397,12 @@ lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport) if (ndlp->rport) ndlp->rport->dev_loss_tmo = vport->cfg_devloss_tmo; #if (IS_ENABLED(CONFIG_NVME_FC)) + spin_lock(&vport->phba->hbalock); rport = lpfc_ndlp_get_nrport(ndlp); if (rport) + remoteport = rport->remoteport; + spin_unlock(&vport->phba->hbalock); + if (remoteport) nvme_fc_set_remoteport_devloss(rport->remoteport, vport->cfg_devloss_tmo); #endif diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index 9df0c051349f..aec5b10a8c85 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c @@ -551,7 +551,7 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size) unsigned char *statep; struct nvme_fc_local_port *localport; struct lpfc_nvmet_tgtport *tgtp; - struct nvme_fc_remote_port *nrport; + struct nvme_fc_remote_port *nrport = NULL; struct lpfc_nvme_rport *rport; cnt = (LPFC_NODELIST_SIZE / LPFC_NODELIST_ENTRY_SIZE); @@ -696,11 +696,11 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size) len += snprintf(buf + len, size - len, "\tRport List:\n"); list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { /* local short-hand pointer. */ + spin_lock(&phba->hbalock); rport = lpfc_ndlp_get_nrport(ndlp); - if (!rport) - continue; - - nrport = rport->remoteport; + if (rport) + nrport = rport->remoteport; + spin_unlock(&phba->hbalock); if (!nrport) continue; diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c index 028462e5994d..918ae18ef8a8 100644 --- a/drivers/scsi/lpfc/lpfc_nvme.c +++ b/drivers/scsi/lpfc/lpfc_nvme.c @@ -2725,7 +2725,9 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn); rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn); + spin_lock_irq(&vport->phba->hbalock); oldrport = lpfc_ndlp_get_nrport(ndlp); + spin_unlock_irq(&vport->phba->hbalock); if (!oldrport) lpfc_nlp_get(ndlp); @@ -2840,7 +2842,7 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) struct nvme_fc_local_port *localport; struct lpfc_nvme_lport *lport; struct lpfc_nvme_rport *rport; - struct nvme_fc_remote_port *remoteport; + struct nvme_fc_remote_port *remoteport = NULL; localport = vport->localport; @@ -2854,11 +2856,14 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) if (!lport) goto input_err; + spin_lock_irq(&vport->phba->hbalock); rport = lpfc_ndlp_get_nrport(ndlp); - if (!rport) + if (rport) + remoteport = rport->remoteport; + spin_unlock_irq(&vport->phba->hbalock); + if (!remoteport) goto input_err; - remoteport = rport->remoteport; lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, "6033 Unreg nvme remoteport %p, portname x%llx, " "port_id x%06x, portstate x%x port type x%x\n", diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h index fecf96f0225c..199d3ba1916d 100644 --- a/drivers/scsi/qla2xxx/qla_target.h +++ b/drivers/scsi/qla2xxx/qla_target.h @@ -374,8 +374,8 @@ struct atio_from_isp { static inline int fcpcmd_is_corrupted(struct atio *atio) { if (atio->entry_type == ATIO_TYPE7 && - (le16_to_cpu(atio->attr_n_length & FCP_CMD_LENGTH_MASK) < - FCP_CMD_LENGTH_MIN)) + ((le16_to_cpu(atio->attr_n_length) & FCP_CMD_LENGTH_MASK) < + FCP_CMD_LENGTH_MIN)) return 1; else return 0; diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index b79b366a94f7..4a57ffecc7e6 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -1276,7 +1276,8 @@ static int sd_init_command(struct scsi_cmnd *cmd) case REQ_OP_ZONE_RESET: return sd_zbc_setup_reset_cmnd(cmd); default: - BUG(); + WARN_ON_ONCE(1); + return BLKPREP_KILL; } } @@ -2959,6 +2960,9 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp) if (rot == 1) { blk_queue_flag_set(QUEUE_FLAG_NONROT, q); blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q); + } else { + blk_queue_flag_clear(QUEUE_FLAG_NONROT, q); + blk_queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q); } if (sdkp->device->type == TYPE_ZBC) { diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 9d5d2ca7fc4f..c55f38ec391c 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -7940,6 +7940,13 @@ int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle) err = -ENOMEM; goto out_error; } + + /* + * Do not use blk-mq at this time because blk-mq does not support + * runtime pm. + */ + host->use_blk_mq = false; + hba = shost_priv(host); hba->host = host; hba->dev = dev; diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c index 4b5e250e8615..e5c7e1ef6318 100644 --- a/drivers/soundwire/stream.c +++ b/drivers/soundwire/stream.c @@ -899,9 +899,10 @@ static void sdw_release_master_stream(struct sdw_stream_runtime *stream) struct sdw_master_runtime *m_rt = stream->m_rt; struct sdw_slave_runtime *s_rt, *_s_rt; - list_for_each_entry_safe(s_rt, _s_rt, - &m_rt->slave_rt_list, m_rt_node) - sdw_stream_remove_slave(s_rt->slave, stream); + list_for_each_entry_safe(s_rt, _s_rt, &m_rt->slave_rt_list, m_rt_node) { + sdw_slave_port_release(s_rt->slave->bus, s_rt->slave, stream); + sdw_release_slave_stream(s_rt->slave, stream); + } list_del(&m_rt->bus_node); } @@ -1112,7 +1113,7 @@ int sdw_stream_add_master(struct sdw_bus *bus, "Master runtime config failed for stream:%s", stream->name); ret = -ENOMEM; - goto error; + goto unlock; } ret = sdw_config_stream(bus->dev, stream, stream_config, false); @@ -1123,11 +1124,11 @@ int sdw_stream_add_master(struct sdw_bus *bus, if (ret) goto stream_error; - stream->state = SDW_STREAM_CONFIGURED; + goto unlock; stream_error: sdw_release_master_stream(stream); -error: +unlock: mutex_unlock(&bus->bus_lock); return ret; } @@ -1141,6 +1142,10 @@ EXPORT_SYMBOL(sdw_stream_add_master); * @stream: SoundWire stream * @port_config: Port configuration for audio stream * @num_ports: Number of ports + * + * It is expected that Slave is added before adding Master + * to the Stream. + * */ int sdw_stream_add_slave(struct sdw_slave *slave, struct sdw_stream_config *stream_config, @@ -1186,6 +1191,12 @@ int sdw_stream_add_slave(struct sdw_slave *slave, if (ret) goto stream_error; + /* + * Change stream state to CONFIGURED on first Slave add. + * Bus is not aware of number of Slave(s) in a stream at this + * point so cannot depend on all Slave(s) to be added in order to + * change stream state to CONFIGURED. + */ stream->state = SDW_STREAM_CONFIGURED; goto error; diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c index 7cb3ab0a35a0..3082e72e4f6c 100644 --- a/drivers/spi/spi-fsl-dspi.c +++ b/drivers/spi/spi-fsl-dspi.c @@ -30,7 +30,11 @@ #define DRIVER_NAME "fsl-dspi" +#ifdef CONFIG_M5441x +#define DSPI_FIFO_SIZE 16 +#else #define DSPI_FIFO_SIZE 4 +#endif #define DSPI_DMA_BUFSIZE (DSPI_FIFO_SIZE * 1024) #define SPI_MCR 0x00 @@ -623,9 +627,11 @@ static void dspi_tcfq_read(struct fsl_dspi *dspi) static void dspi_eoq_write(struct fsl_dspi *dspi) { int fifo_size = DSPI_FIFO_SIZE; + u16 xfer_cmd = dspi->tx_cmd; /* Fill TX FIFO with as many transfers as possible */ while (dspi->len && fifo_size--) { + dspi->tx_cmd = xfer_cmd; /* Request EOQF for last transfer in FIFO */ if (dspi->len == dspi->bytes_per_word || fifo_size == 0) dspi->tx_cmd |= SPI_PUSHR_CMD_EOQ; diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c index 0626e6e3ea0c..421bfc7dda67 100644 --- a/drivers/spi/spi-gpio.c +++ b/drivers/spi/spi-gpio.c @@ -300,8 +300,8 @@ static int spi_gpio_request(struct device *dev, *mflags |= SPI_MASTER_NO_RX; spi_gpio->sck = devm_gpiod_get(dev, "sck", GPIOD_OUT_LOW); - if (IS_ERR(spi_gpio->mosi)) - return PTR_ERR(spi_gpio->mosi); + if (IS_ERR(spi_gpio->sck)) + return PTR_ERR(spi_gpio->sck); for (i = 0; i < num_chipselects; i++) { spi_gpio->cs_gpios[i] = devm_gpiod_get_index(dev, "cs", diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c index 95dc4d78618d..b37de1d991d6 100644 --- a/drivers/spi/spi-rspi.c +++ b/drivers/spi/spi-rspi.c @@ -598,11 +598,13 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx, ret = wait_event_interruptible_timeout(rspi->wait, rspi->dma_callbacked, HZ); - if (ret > 0 && rspi->dma_callbacked) + if (ret > 0 && rspi->dma_callbacked) { ret = 0; - else if (!ret) { - dev_err(&rspi->master->dev, "DMA timeout\n"); - ret = -ETIMEDOUT; + } else { + if (!ret) { + dev_err(&rspi->master->dev, "DMA timeout\n"); + ret = -ETIMEDOUT; + } if (tx) dmaengine_terminate_all(rspi->master->dma_tx); if (rx) @@ -1350,12 +1352,36 @@ static const struct platform_device_id spi_driver_ids[] = { MODULE_DEVICE_TABLE(platform, spi_driver_ids); +#ifdef CONFIG_PM_SLEEP +static int rspi_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct rspi_data *rspi = platform_get_drvdata(pdev); + + return spi_master_suspend(rspi->master); +} + +static int rspi_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct rspi_data *rspi = platform_get_drvdata(pdev); + + return spi_master_resume(rspi->master); +} + +static SIMPLE_DEV_PM_OPS(rspi_pm_ops, rspi_suspend, rspi_resume); +#define DEV_PM_OPS &rspi_pm_ops +#else +#define DEV_PM_OPS NULL +#endif /* CONFIG_PM_SLEEP */ + static struct platform_driver rspi_driver = { .probe = rspi_probe, .remove = rspi_remove, .id_table = spi_driver_ids, .driver = { .name = "renesas_spi", + .pm = DEV_PM_OPS, .of_match_table = of_match_ptr(rspi_of_match), }, }; diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c index 539d6d1a277a..101cd6aae2ea 100644 --- a/drivers/spi/spi-sh-msiof.c +++ b/drivers/spi/spi-sh-msiof.c @@ -397,7 +397,8 @@ static void sh_msiof_spi_set_mode_regs(struct sh_msiof_spi_priv *p, static void sh_msiof_reset_str(struct sh_msiof_spi_priv *p) { - sh_msiof_write(p, STR, sh_msiof_read(p, STR)); + sh_msiof_write(p, STR, + sh_msiof_read(p, STR) & ~(STR_TDREQ | STR_RDREQ)); } static void sh_msiof_spi_write_fifo_8(struct sh_msiof_spi_priv *p, @@ -1426,12 +1427,37 @@ static const struct platform_device_id spi_driver_ids[] = { }; MODULE_DEVICE_TABLE(platform, spi_driver_ids); +#ifdef CONFIG_PM_SLEEP +static int sh_msiof_spi_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct sh_msiof_spi_priv *p = platform_get_drvdata(pdev); + + return spi_master_suspend(p->master); +} + +static int sh_msiof_spi_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct sh_msiof_spi_priv *p = platform_get_drvdata(pdev); + + return spi_master_resume(p->master); +} + +static SIMPLE_DEV_PM_OPS(sh_msiof_spi_pm_ops, sh_msiof_spi_suspend, + sh_msiof_spi_resume); +#define DEV_PM_OPS &sh_msiof_spi_pm_ops +#else +#define DEV_PM_OPS NULL +#endif /* CONFIG_PM_SLEEP */ + static struct platform_driver sh_msiof_spi_drv = { .probe = sh_msiof_spi_probe, .remove = sh_msiof_spi_remove, .id_table = spi_driver_ids, .driver = { .name = "spi_sh_msiof", + .pm = DEV_PM_OPS, .of_match_table = of_match_ptr(sh_msiof_match), }, }; diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c index 6f7b946b5ced..1427f343b39a 100644 --- a/drivers/spi/spi-tegra20-slink.c +++ b/drivers/spi/spi-tegra20-slink.c @@ -1063,6 +1063,24 @@ static int tegra_slink_probe(struct platform_device *pdev) goto exit_free_master; } + /* disabled clock may cause interrupt storm upon request */ + tspi->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(tspi->clk)) { + ret = PTR_ERR(tspi->clk); + dev_err(&pdev->dev, "Can not get clock %d\n", ret); + goto exit_free_master; + } + ret = clk_prepare(tspi->clk); + if (ret < 0) { + dev_err(&pdev->dev, "Clock prepare failed %d\n", ret); + goto exit_free_master; + } + ret = clk_enable(tspi->clk); + if (ret < 0) { + dev_err(&pdev->dev, "Clock enable failed %d\n", ret); + goto exit_free_master; + } + spi_irq = platform_get_irq(pdev, 0); tspi->irq = spi_irq; ret = request_threaded_irq(tspi->irq, tegra_slink_isr, @@ -1071,14 +1089,7 @@ static int tegra_slink_probe(struct platform_device *pdev) if (ret < 0) { dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n", tspi->irq); - goto exit_free_master; - } - - tspi->clk = devm_clk_get(&pdev->dev, NULL); - if (IS_ERR(tspi->clk)) { - dev_err(&pdev->dev, "can not get clock\n"); - ret = PTR_ERR(tspi->clk); - goto exit_free_irq; + goto exit_clk_disable; } tspi->rst = devm_reset_control_get_exclusive(&pdev->dev, "spi"); @@ -1138,6 +1149,8 @@ exit_rx_dma_free: tegra_slink_deinit_dma_param(tspi, true); exit_free_irq: free_irq(spi_irq, tspi); +exit_clk_disable: + clk_disable(tspi->clk); exit_free_master: spi_master_put(master); return ret; @@ -1150,6 +1163,8 @@ static int tegra_slink_remove(struct platform_device *pdev) free_irq(tspi->irq, tspi); + clk_disable(tspi->clk); + if (tspi->tx_dma_chan) tegra_slink_deinit_dma_param(tspi, false); diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index ec395a6baf9c..9da0bc5a036c 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -2143,8 +2143,17 @@ int spi_register_controller(struct spi_controller *ctlr) */ if (ctlr->num_chipselect == 0) return -EINVAL; - /* allocate dynamic bus number using Linux idr */ - if ((ctlr->bus_num < 0) && ctlr->dev.of_node) { + if (ctlr->bus_num >= 0) { + /* devices with a fixed bus num must check-in with the num */ + mutex_lock(&board_lock); + id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num, + ctlr->bus_num + 1, GFP_KERNEL); + mutex_unlock(&board_lock); + if (WARN(id < 0, "couldn't get idr")) + return id == -ENOSPC ? -EBUSY : id; + ctlr->bus_num = id; + } else if (ctlr->dev.of_node) { + /* allocate dynamic bus number using Linux idr */ id = of_alias_get_id(ctlr->dev.of_node, "spi"); if (id >= 0) { ctlr->bus_num = id; diff --git a/drivers/staging/media/mt9t031/Kconfig b/drivers/staging/media/mt9t031/Kconfig index f48e06a03cdb..9a58aaf72edd 100644 --- a/drivers/staging/media/mt9t031/Kconfig +++ b/drivers/staging/media/mt9t031/Kconfig @@ -1,9 +1,3 @@ -config SOC_CAMERA_IMX074 - tristate "imx074 support (DEPRECATED)" - depends on SOC_CAMERA && I2C - help - This driver supports IMX074 cameras from Sony - config SOC_CAMERA_MT9T031 tristate "mt9t031 support (DEPRECATED)" depends on SOC_CAMERA && I2C diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 9cdfccbdd06f..cc756a123fd8 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -1416,7 +1416,8 @@ static void iscsit_do_crypto_hash_buf(struct ahash_request *hash, sg_init_table(sg, ARRAY_SIZE(sg)); sg_set_buf(sg, buf, payload_length); - sg_set_buf(sg + 1, pad_bytes, padding); + if (padding) + sg_set_buf(sg + 1, pad_bytes, padding); ahash_request_set_crypt(hash, sg, data_crc, payload_length + padding); @@ -3910,10 +3911,14 @@ static bool iscsi_target_check_conn_state(struct iscsi_conn *conn) static void iscsit_get_rx_pdu(struct iscsi_conn *conn) { int ret; - u8 buffer[ISCSI_HDR_LEN], opcode; + u8 *buffer, opcode; u32 checksum = 0, digest = 0; struct kvec iov; + buffer = kcalloc(ISCSI_HDR_LEN, sizeof(*buffer), GFP_KERNEL); + if (!buffer) + return; + while (!kthread_should_stop()) { /* * Ensure that both TX and RX per connection kthreads @@ -3921,7 +3926,6 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *conn) */ iscsit_thread_check_cpumask(conn, current, 0); - memset(buffer, 0, ISCSI_HDR_LEN); memset(&iov, 0, sizeof(struct kvec)); iov.iov_base = buffer; @@ -3930,7 +3934,7 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *conn) ret = rx_data(conn, &iov, 1, ISCSI_HDR_LEN); if (ret != ISCSI_HDR_LEN) { iscsit_rx_thread_wait_for_tcp(conn); - return; + break; } if (conn->conn_ops->HeaderDigest) { @@ -3940,7 +3944,7 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *conn) ret = rx_data(conn, &iov, 1, ISCSI_CRC_LEN); if (ret != ISCSI_CRC_LEN) { iscsit_rx_thread_wait_for_tcp(conn); - return; + break; } iscsit_do_crypto_hash_buf(conn->conn_rx_hash, buffer, @@ -3964,7 +3968,7 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *conn) } if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) - return; + break; opcode = buffer[0] & ISCSI_OPCODE_MASK; @@ -3975,13 +3979,15 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *conn) " while in Discovery Session, rejecting.\n", opcode); iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, buffer); - return; + break; } ret = iscsi_target_rx_opcode(conn, buffer); if (ret < 0) - return; + break; } + + kfree(buffer); } int iscsi_target_rx_thread(void *arg) diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c index 9518ffd8b8ba..4e680d753941 100644 --- a/drivers/target/iscsi/iscsi_target_auth.c +++ b/drivers/target/iscsi/iscsi_target_auth.c @@ -26,27 +26,6 @@ #include "iscsi_target_nego.h" #include "iscsi_target_auth.h" -static int chap_string_to_hex(unsigned char *dst, unsigned char *src, int len) -{ - int j = DIV_ROUND_UP(len, 2), rc; - - rc = hex2bin(dst, src, j); - if (rc < 0) - pr_debug("CHAP string contains non hex digit symbols\n"); - - dst[j] = '\0'; - return j; -} - -static void chap_binaryhex_to_asciihex(char *dst, char *src, int src_len) -{ - int i; - - for (i = 0; i < src_len; i++) { - sprintf(&dst[i*2], "%02x", (int) src[i] & 0xff); - } -} - static int chap_gen_challenge( struct iscsi_conn *conn, int caller, @@ -62,7 +41,7 @@ static int chap_gen_challenge( ret = get_random_bytes_wait(chap->challenge, CHAP_CHALLENGE_LENGTH); if (unlikely(ret)) return ret; - chap_binaryhex_to_asciihex(challenge_asciihex, chap->challenge, + bin2hex(challenge_asciihex, chap->challenge, CHAP_CHALLENGE_LENGTH); /* * Set CHAP_C, and copy the generated challenge into c_str. @@ -248,9 +227,16 @@ static int chap_server_compute_md5( pr_err("Could not find CHAP_R.\n"); goto out; } + if (strlen(chap_r) != MD5_SIGNATURE_SIZE * 2) { + pr_err("Malformed CHAP_R\n"); + goto out; + } + if (hex2bin(client_digest, chap_r, MD5_SIGNATURE_SIZE) < 0) { + pr_err("Malformed CHAP_R\n"); + goto out; + } pr_debug("[server] Got CHAP_R=%s\n", chap_r); - chap_string_to_hex(client_digest, chap_r, strlen(chap_r)); tfm = crypto_alloc_shash("md5", 0, 0); if (IS_ERR(tfm)) { @@ -294,7 +280,7 @@ static int chap_server_compute_md5( goto out; } - chap_binaryhex_to_asciihex(response, server_digest, MD5_SIGNATURE_SIZE); + bin2hex(response, server_digest, MD5_SIGNATURE_SIZE); pr_debug("[server] MD5 Server Digest: %s\n", response); if (memcmp(server_digest, client_digest, MD5_SIGNATURE_SIZE) != 0) { @@ -349,9 +335,7 @@ static int chap_server_compute_md5( pr_err("Could not find CHAP_C.\n"); goto out; } - pr_debug("[server] Got CHAP_C=%s\n", challenge); - challenge_len = chap_string_to_hex(challenge_binhex, challenge, - strlen(challenge)); + challenge_len = DIV_ROUND_UP(strlen(challenge), 2); if (!challenge_len) { pr_err("Unable to convert incoming challenge\n"); goto out; @@ -360,6 +344,11 @@ static int chap_server_compute_md5( pr_err("CHAP_C exceeds maximum binary size of 1024 bytes\n"); goto out; } + if (hex2bin(challenge_binhex, challenge, challenge_len) < 0) { + pr_err("Malformed CHAP_C\n"); + goto out; + } + pr_debug("[server] Got CHAP_C=%s\n", challenge); /* * During mutual authentication, the CHAP_C generated by the * initiator must not match the original CHAP_C generated by @@ -413,7 +402,7 @@ static int chap_server_compute_md5( /* * Convert response from binary hex to ascii hext. */ - chap_binaryhex_to_asciihex(response, digest, MD5_SIGNATURE_SIZE); + bin2hex(response, digest, MD5_SIGNATURE_SIZE); *nr_out_len += sprintf(nr_out_ptr + *nr_out_len, "CHAP_R=0x%s", response); *nr_out_len += 1; diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_core.c b/drivers/tty/serial/cpm_uart/cpm_uart_core.c index ea7204d75022..79ad30d34949 100644 --- a/drivers/tty/serial/cpm_uart/cpm_uart_core.c +++ b/drivers/tty/serial/cpm_uart/cpm_uart_core.c @@ -1054,8 +1054,8 @@ static int poll_wait_key(char *obuf, struct uart_cpm_port *pinfo) /* Get the address of the host memory buffer. */ bdp = pinfo->rx_cur; - while (bdp->cbd_sc & BD_SC_EMPTY) - ; + if (bdp->cbd_sc & BD_SC_EMPTY) + return NO_POLL_CHAR; /* If the buffer address is in the CPM DPRAM, don't * convert it. @@ -1090,7 +1090,11 @@ static int cpm_get_poll_char(struct uart_port *port) poll_chars = 0; } if (poll_chars <= 0) { - poll_chars = poll_wait_key(poll_buf, pinfo); + int ret = poll_wait_key(poll_buf, pinfo); + + if (ret == NO_POLL_CHAR) + return ret; + poll_chars = ret; pollp = poll_buf; } poll_chars--; diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c index 51e47a63d61a..3f8d1274fc85 100644 --- a/drivers/tty/serial/fsl_lpuart.c +++ b/drivers/tty/serial/fsl_lpuart.c @@ -979,7 +979,8 @@ static inline int lpuart_start_rx_dma(struct lpuart_port *sport) struct circ_buf *ring = &sport->rx_ring; int ret, nent; int bits, baud; - struct tty_struct *tty = tty_port_tty_get(&sport->port.state->port); + struct tty_port *port = &sport->port.state->port; + struct tty_struct *tty = port->tty; struct ktermios *termios = &tty->termios; baud = tty_get_baud_rate(tty); diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c index 4341589e0aab..d4e051b578f6 100644 --- a/drivers/tty/serial/imx.c +++ b/drivers/tty/serial/imx.c @@ -2347,6 +2347,14 @@ static int imx_uart_probe(struct platform_device *pdev) ret); return ret; } + + ret = devm_request_irq(&pdev->dev, rtsirq, imx_uart_rtsint, 0, + dev_name(&pdev->dev), sport); + if (ret) { + dev_err(&pdev->dev, "failed to request rts irq: %d\n", + ret); + return ret; + } } else { ret = devm_request_irq(&pdev->dev, rxirq, imx_uart_int, 0, dev_name(&pdev->dev), sport); diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c index d04b5eeea3c6..170e446a2f62 100644 --- a/drivers/tty/serial/mvebu-uart.c +++ b/drivers/tty/serial/mvebu-uart.c @@ -511,6 +511,7 @@ static void mvebu_uart_set_termios(struct uart_port *port, termios->c_iflag |= old->c_iflag & ~(INPCK | IGNPAR); termios->c_cflag &= CREAD | CBAUD; termios->c_cflag |= old->c_cflag & ~(CREAD | CBAUD); + termios->c_cflag |= CS8; } spin_unlock_irqrestore(&port->lock, flags); diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c index 32bc3e3fe4d3..5e5da9acaf0a 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c @@ -1255,6 +1255,7 @@ static void tty_driver_remove_tty(struct tty_driver *driver, struct tty_struct * static int tty_reopen(struct tty_struct *tty) { struct tty_driver *driver = tty->driver; + int retval; if (driver->type == TTY_DRIVER_TYPE_PTY && driver->subtype == PTY_TYPE_MASTER) @@ -1268,10 +1269,14 @@ static int tty_reopen(struct tty_struct *tty) tty->count++; - if (!tty->ldisc) - return tty_ldisc_reinit(tty, tty->termios.c_line); + if (tty->ldisc) + return 0; - return 0; + retval = tty_ldisc_reinit(tty, tty->termios.c_line); + if (retval) + tty->count--; + + return retval; } /** diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c index a78ad10a119b..73cdc0d633dd 100644 --- a/drivers/tty/vt/vt_ioctl.c +++ b/drivers/tty/vt/vt_ioctl.c @@ -32,6 +32,8 @@ #include <asm/io.h> #include <linux/uaccess.h> +#include <linux/nospec.h> + #include <linux/kbd_kern.h> #include <linux/vt_kern.h> #include <linux/kbd_diacr.h> @@ -700,6 +702,8 @@ int vt_ioctl(struct tty_struct *tty, if (vsa.console == 0 || vsa.console > MAX_NR_CONSOLES) ret = -ENXIO; else { + vsa.console = array_index_nospec(vsa.console, + MAX_NR_CONSOLES + 1); vsa.console--; console_lock(); ret = vc_allocate(vsa.console); diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c index 656d247819c9..bec581fb7c63 100644 --- a/drivers/usb/class/cdc-wdm.c +++ b/drivers/usb/class/cdc-wdm.c @@ -460,7 +460,7 @@ static int service_outstanding_interrupt(struct wdm_device *desc) set_bit(WDM_RESPONDING, &desc->flags); spin_unlock_irq(&desc->iuspin); - rv = usb_submit_urb(desc->response, GFP_ATOMIC); + rv = usb_submit_urb(desc->response, GFP_KERNEL); spin_lock_irq(&desc->iuspin); if (rv) { dev_err(&desc->intf->dev, diff --git a/drivers/usb/common/roles.c b/drivers/usb/common/roles.c index 15cc76e22123..99116af07f1d 100644 --- a/drivers/usb/common/roles.c +++ b/drivers/usb/common/roles.c @@ -109,8 +109,15 @@ static void *usb_role_switch_match(struct device_connection *con, int ep, */ struct usb_role_switch *usb_role_switch_get(struct device *dev) { - return device_connection_find_match(dev, "usb-role-switch", NULL, - usb_role_switch_match); + struct usb_role_switch *sw; + + sw = device_connection_find_match(dev, "usb-role-switch", NULL, + usb_role_switch_match); + + if (!IS_ERR_OR_NULL(sw)) + WARN_ON(!try_module_get(sw->dev.parent->driver->owner)); + + return sw; } EXPORT_SYMBOL_GPL(usb_role_switch_get); @@ -122,8 +129,10 @@ EXPORT_SYMBOL_GPL(usb_role_switch_get); */ void usb_role_switch_put(struct usb_role_switch *sw) { - if (!IS_ERR_OR_NULL(sw)) + if (!IS_ERR_OR_NULL(sw)) { put_device(&sw->dev); + module_put(sw->dev.parent->driver->owner); + } } EXPORT_SYMBOL_GPL(usb_role_switch_put); diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index 6ce77b33da61..244417d0dfd1 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c @@ -1434,10 +1434,13 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb struct async *as = NULL; struct usb_ctrlrequest *dr = NULL; unsigned int u, totlen, isofrmlen; - int i, ret, is_in, num_sgs = 0, ifnum = -1; + int i, ret, num_sgs = 0, ifnum = -1; int number_of_packets = 0; unsigned int stream_id = 0; void *buf; + bool is_in; + bool allow_short = false; + bool allow_zero = false; unsigned long mask = USBDEVFS_URB_SHORT_NOT_OK | USBDEVFS_URB_BULK_CONTINUATION | USBDEVFS_URB_NO_FSBR | @@ -1471,6 +1474,8 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb u = 0; switch (uurb->type) { case USBDEVFS_URB_TYPE_CONTROL: + if (is_in) + allow_short = true; if (!usb_endpoint_xfer_control(&ep->desc)) return -EINVAL; /* min 8 byte setup packet */ @@ -1511,6 +1516,10 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb break; case USBDEVFS_URB_TYPE_BULK: + if (!is_in) + allow_zero = true; + else + allow_short = true; switch (usb_endpoint_type(&ep->desc)) { case USB_ENDPOINT_XFER_CONTROL: case USB_ENDPOINT_XFER_ISOC: @@ -1531,6 +1540,10 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb if (!usb_endpoint_xfer_int(&ep->desc)) return -EINVAL; interrupt_urb: + if (!is_in) + allow_zero = true; + else + allow_short = true; break; case USBDEVFS_URB_TYPE_ISO: @@ -1676,14 +1689,19 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb u = (is_in ? URB_DIR_IN : URB_DIR_OUT); if (uurb->flags & USBDEVFS_URB_ISO_ASAP) u |= URB_ISO_ASAP; - if (uurb->flags & USBDEVFS_URB_SHORT_NOT_OK && is_in) + if (allow_short && uurb->flags & USBDEVFS_URB_SHORT_NOT_OK) u |= URB_SHORT_NOT_OK; - if (uurb->flags & USBDEVFS_URB_ZERO_PACKET) + if (allow_zero && uurb->flags & USBDEVFS_URB_ZERO_PACKET) u |= URB_ZERO_PACKET; if (uurb->flags & USBDEVFS_URB_NO_INTERRUPT) u |= URB_NO_INTERRUPT; as->urb->transfer_flags = u; + if (!allow_short && uurb->flags & USBDEVFS_URB_SHORT_NOT_OK) + dev_warn(&ps->dev->dev, "Requested nonsensical USBDEVFS_URB_SHORT_NOT_OK.\n"); + if (!allow_zero && uurb->flags & USBDEVFS_URB_ZERO_PACKET) + dev_warn(&ps->dev->dev, "Requested nonsensical USBDEVFS_URB_ZERO_PACKET.\n"); + as->urb->transfer_buffer_length = uurb->buffer_length; as->urb->setup_packet = (unsigned char *)dr; dr = NULL; diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c index e76e95f62f76..a1f225f077cd 100644 --- a/drivers/usb/core/driver.c +++ b/drivers/usb/core/driver.c @@ -512,7 +512,6 @@ int usb_driver_claim_interface(struct usb_driver *driver, struct device *dev; struct usb_device *udev; int retval = 0; - int lpm_disable_error = -ENODEV; if (!iface) return -ENODEV; @@ -533,16 +532,6 @@ int usb_driver_claim_interface(struct usb_driver *driver, iface->condition = USB_INTERFACE_BOUND; - /* See the comment about disabling LPM in usb_probe_interface(). */ - if (driver->disable_hub_initiated_lpm) { - lpm_disable_error = usb_unlocked_disable_lpm(udev); - if (lpm_disable_error) { - dev_err(&iface->dev, "%s Failed to disable LPM for driver %s\n", - __func__, driver->name); - return -ENOMEM; - } - } - /* Claimed interfaces are initially inactive (suspended) and * runtime-PM-enabled, but only if the driver has autosuspend * support. Otherwise they are marked active, to prevent the @@ -561,9 +550,20 @@ int usb_driver_claim_interface(struct usb_driver *driver, if (device_is_registered(dev)) retval = device_bind_driver(dev); - /* Attempt to re-enable USB3 LPM, if the disable was successful. */ - if (!lpm_disable_error) - usb_unlocked_enable_lpm(udev); + if (retval) { + dev->driver = NULL; + usb_set_intfdata(iface, NULL); + iface->needs_remote_wakeup = 0; + iface->condition = USB_INTERFACE_UNBOUND; + + /* + * Unbound interfaces are always runtime-PM-disabled + * and runtime-PM-suspended + */ + if (driver->supports_autosuspend) + pm_runtime_disable(dev); + pm_runtime_set_suspended(dev); + } return retval; } diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index e77dfe5ed5ec..178d6c6063c0 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -58,6 +58,7 @@ static int quirks_param_set(const char *val, const struct kernel_param *kp) quirk_list = kcalloc(quirk_count, sizeof(struct quirk_entry), GFP_KERNEL); if (!quirk_list) { + quirk_count = 0; mutex_unlock(&quirk_mutex); return -ENOMEM; } @@ -154,7 +155,7 @@ static struct kparam_string quirks_param_string = { .string = quirks_param, }; -module_param_cb(quirks, &quirks_param_ops, &quirks_param_string, 0644); +device_param_cb(quirks, &quirks_param_ops, &quirks_param_string, 0644); MODULE_PARM_DESC(quirks, "Add/modify USB quirks by specifying quirks=vendorID:productID:quirks"); /* Lists of quirky USB devices, split in device quirks and interface quirks. diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c index 623be3174fb3..79d8bd7a612e 100644 --- a/drivers/usb/core/usb.c +++ b/drivers/usb/core/usb.c @@ -228,6 +228,8 @@ struct usb_host_interface *usb_find_alt_setting( struct usb_interface_cache *intf_cache = NULL; int i; + if (!config) + return NULL; for (i = 0; i < config->desc.bNumInterfaces; i++) { if (config->intf_cache[i]->altsetting[0].desc.bInterfaceNumber == iface_num) { diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c index df827ff57b0d..23a0df79ef21 100644 --- a/drivers/usb/musb/musb_dsps.c +++ b/drivers/usb/musb/musb_dsps.c @@ -658,16 +658,6 @@ dsps_dma_controller_create(struct musb *musb, void __iomem *base) return controller; } -static void dsps_dma_controller_destroy(struct dma_controller *c) -{ - struct musb *musb = c->musb; - struct dsps_glue *glue = dev_get_drvdata(musb->controller->parent); - void __iomem *usbss_base = glue->usbss_base; - - musb_writel(usbss_base, USBSS_IRQ_CLEARR, USBSS_IRQ_PD_COMP); - cppi41_dma_controller_destroy(c); -} - #ifdef CONFIG_PM_SLEEP static void dsps_dma_controller_suspend(struct dsps_glue *glue) { @@ -697,7 +687,7 @@ static struct musb_platform_ops dsps_ops = { #ifdef CONFIG_USB_TI_CPPI41_DMA .dma_init = dsps_dma_controller_create, - .dma_exit = dsps_dma_controller_destroy, + .dma_exit = cppi41_dma_controller_destroy, #endif .enable = dsps_musb_enable, .disable = dsps_musb_disable, diff --git a/drivers/usb/typec/mux.c b/drivers/usb/typec/mux.c index ddaac63ecf12..d990aa510fab 100644 --- a/drivers/usb/typec/mux.c +++ b/drivers/usb/typec/mux.c @@ -9,6 +9,7 @@ #include <linux/device.h> #include <linux/list.h> +#include <linux/module.h> #include <linux/mutex.h> #include <linux/usb/typec_mux.h> @@ -49,8 +50,10 @@ struct typec_switch *typec_switch_get(struct device *dev) mutex_lock(&switch_lock); sw = device_connection_find_match(dev, "typec-switch", NULL, typec_switch_match); - if (!IS_ERR_OR_NULL(sw)) + if (!IS_ERR_OR_NULL(sw)) { + WARN_ON(!try_module_get(sw->dev->driver->owner)); get_device(sw->dev); + } mutex_unlock(&switch_lock); return sw; @@ -65,8 +68,10 @@ EXPORT_SYMBOL_GPL(typec_switch_get); */ void typec_switch_put(struct typec_switch *sw) { - if (!IS_ERR_OR_NULL(sw)) + if (!IS_ERR_OR_NULL(sw)) { + module_put(sw->dev->driver->owner); put_device(sw->dev); + } } EXPORT_SYMBOL_GPL(typec_switch_put); @@ -136,8 +141,10 @@ struct typec_mux *typec_mux_get(struct device *dev, const char *name) mutex_lock(&mux_lock); mux = device_connection_find_match(dev, name, NULL, typec_mux_match); - if (!IS_ERR_OR_NULL(mux)) + if (!IS_ERR_OR_NULL(mux)) { + WARN_ON(!try_module_get(mux->dev->driver->owner)); get_device(mux->dev); + } mutex_unlock(&mux_lock); return mux; @@ -152,8 +159,10 @@ EXPORT_SYMBOL_GPL(typec_mux_get); */ void typec_mux_put(struct typec_mux *mux) { - if (!IS_ERR_OR_NULL(mux)) + if (!IS_ERR_OR_NULL(mux)) { + module_put(mux->dev->driver->owner); put_device(mux->dev); + } } EXPORT_SYMBOL_GPL(typec_mux_put); diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c index 7bafa703a992..84575baceebc 100644 --- a/drivers/xen/grant-table.c +++ b/drivers/xen/grant-table.c @@ -1040,18 +1040,33 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, return ret; for (i = 0; i < count; i++) { - /* Retry eagain maps */ - if (map_ops[i].status == GNTST_eagain) - gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, map_ops + i, - &map_ops[i].status, __func__); - - if (map_ops[i].status == GNTST_okay) { + switch (map_ops[i].status) { + case GNTST_okay: + { struct xen_page_foreign *foreign; SetPageForeign(pages[i]); foreign = xen_page_foreign(pages[i]); foreign->domid = map_ops[i].dom; foreign->gref = map_ops[i].ref; + break; + } + + case GNTST_no_device_space: + pr_warn_ratelimited("maptrack limit reached, can't map all guest pages\n"); + break; + + case GNTST_eagain: + /* Retry eagain maps */ + gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, + map_ops + i, + &map_ops[i].status, __func__); + /* Test status in next loop iteration. */ + i--; + break; + + default: + break; } } @@ -447,6 +447,7 @@ bool dax_lock_mapping_entry(struct page *page) xa_unlock_irq(&mapping->i_pages); break; } else if (IS_ERR(entry)) { + xa_unlock_irq(&mapping->i_pages); WARN_ON_ONCE(PTR_ERR(entry) != -EAGAIN); continue; } @@ -1120,21 +1121,12 @@ static vm_fault_t dax_load_hole(struct address_space *mapping, void *entry, { struct inode *inode = mapping->host; unsigned long vaddr = vmf->address; - vm_fault_t ret = VM_FAULT_NOPAGE; - struct page *zero_page; - pfn_t pfn; - - zero_page = ZERO_PAGE(0); - if (unlikely(!zero_page)) { - ret = VM_FAULT_OOM; - goto out; - } + pfn_t pfn = pfn_to_pfn_t(my_zero_pfn(vaddr)); + vm_fault_t ret; - pfn = page_to_pfn_t(zero_page); dax_insert_mapping_entry(mapping, vmf, entry, pfn, RADIX_DAX_ZERO_PAGE, false); ret = vmf_insert_mixed(vmf->vma, vaddr, pfn); -out: trace_dax_load_hole(inode, vmf, ret); return ret; } diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c index 7f7ee18fe179..e4bb9386c045 100644 --- a/fs/ext2/inode.c +++ b/fs/ext2/inode.c @@ -1448,6 +1448,7 @@ struct inode *ext2_iget (struct super_block *sb, unsigned long ino) } inode->i_blocks = le32_to_cpu(raw_inode->i_blocks); ei->i_flags = le32_to_cpu(raw_inode->i_flags); + ext2_set_inode_flags(inode); ei->i_faddr = le32_to_cpu(raw_inode->i_faddr); ei->i_frag_no = raw_inode->i_frag; ei->i_frag_size = raw_inode->i_fsize; @@ -1517,7 +1518,6 @@ struct inode *ext2_iget (struct super_block *sb, unsigned long ino) new_decode_dev(le32_to_cpu(raw_inode->i_block[1]))); } brelse (bh); - ext2_set_inode_flags(inode); unlock_new_inode(inode); return inode; diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c index e2902d394f1b..f93f9881ec18 100644 --- a/fs/ext4/dir.c +++ b/fs/ext4/dir.c @@ -76,7 +76,7 @@ int __ext4_check_dir_entry(const char *function, unsigned int line, else if (unlikely(rlen < EXT4_DIR_REC_LEN(de->name_len))) error_msg = "rec_len is too small for name_len"; else if (unlikely(((char *) de - buf) + rlen > size)) - error_msg = "directory entry across range"; + error_msg = "directory entry overrun"; else if (unlikely(le32_to_cpu(de->inode) > le32_to_cpu(EXT4_SB(dir->i_sb)->s_es->s_inodes_count))) error_msg = "inode out of bounds"; @@ -85,18 +85,16 @@ int __ext4_check_dir_entry(const char *function, unsigned int line, if (filp) ext4_error_file(filp, function, line, bh->b_blocknr, - "bad entry in directory: %s - offset=%u(%u), " - "inode=%u, rec_len=%d, name_len=%d", - error_msg, (unsigned) (offset % size), - offset, le32_to_cpu(de->inode), - rlen, de->name_len); + "bad entry in directory: %s - offset=%u, " + "inode=%u, rec_len=%d, name_len=%d, size=%d", + error_msg, offset, le32_to_cpu(de->inode), + rlen, de->name_len, size); else ext4_error_inode(dir, function, line, bh->b_blocknr, - "bad entry in directory: %s - offset=%u(%u), " - "inode=%u, rec_len=%d, name_len=%d", - error_msg, (unsigned) (offset % size), - offset, le32_to_cpu(de->inode), - rlen, de->name_len); + "bad entry in directory: %s - offset=%u, " + "inode=%u, rec_len=%d, name_len=%d, size=%d", + error_msg, offset, le32_to_cpu(de->inode), + rlen, de->name_len, size); return 1; } diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 0f0edd1cd0cd..caff935fbeb8 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -43,6 +43,17 @@ #define __FS_HAS_ENCRYPTION IS_ENABLED(CONFIG_EXT4_FS_ENCRYPTION) #include <linux/fscrypt.h> +#include <linux/compiler.h> + +/* Until this gets included into linux/compiler-gcc.h */ +#ifndef __nonstring +#if defined(GCC_VERSION) && (GCC_VERSION >= 80000) +#define __nonstring __attribute__((nonstring)) +#else +#define __nonstring +#endif +#endif + /* * The fourth extended filesystem constants/structures */ @@ -675,6 +686,9 @@ enum { /* Max physical block we can address w/o extents */ #define EXT4_MAX_BLOCK_FILE_PHYS 0xFFFFFFFF +/* Max logical block we can support */ +#define EXT4_MAX_LOGICAL_BLOCK 0xFFFFFFFF + /* * Structure of an inode on the disk */ @@ -1226,7 +1240,7 @@ struct ext4_super_block { __le32 s_feature_ro_compat; /* readonly-compatible feature set */ /*68*/ __u8 s_uuid[16]; /* 128-bit uuid for volume */ /*78*/ char s_volume_name[16]; /* volume name */ -/*88*/ char s_last_mounted[64]; /* directory where last mounted */ +/*88*/ char s_last_mounted[64] __nonstring; /* directory where last mounted */ /*C8*/ __le32 s_algorithm_usage_bitmap; /* For compression */ /* * Performance hints. Directory preallocation should only @@ -1277,13 +1291,13 @@ struct ext4_super_block { __le32 s_first_error_time; /* first time an error happened */ __le32 s_first_error_ino; /* inode involved in first error */ __le64 s_first_error_block; /* block involved of first error */ - __u8 s_first_error_func[32]; /* function where the error happened */ + __u8 s_first_error_func[32] __nonstring; /* function where the error happened */ __le32 s_first_error_line; /* line number where error happened */ __le32 s_last_error_time; /* most recent time of an error */ __le32 s_last_error_ino; /* inode involved in last error */ __le32 s_last_error_line; /* line number where error happened */ __le64 s_last_error_block; /* block involved of last error */ - __u8 s_last_error_func[32]; /* function where the error happened */ + __u8 s_last_error_func[32] __nonstring; /* function where the error happened */ #define EXT4_S_ERR_END offsetof(struct ext4_super_block, s_mount_opts) __u8 s_mount_opts[64]; __le32 s_usr_quota_inum; /* inode for tracking user quota */ diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c index 3543fe80a3c4..7b4736022761 100644 --- a/fs/ext4/inline.c +++ b/fs/ext4/inline.c @@ -1753,6 +1753,7 @@ bool empty_inline_dir(struct inode *dir, int *has_inline_data) { int err, inline_size; struct ext4_iloc iloc; + size_t inline_len; void *inline_pos; unsigned int offset; struct ext4_dir_entry_2 *de; @@ -1780,8 +1781,9 @@ bool empty_inline_dir(struct inode *dir, int *has_inline_data) goto out; } + inline_len = ext4_get_inline_size(dir); offset = EXT4_INLINE_DOTDOT_SIZE; - while (offset < dir->i_size) { + while (offset < inline_len) { de = ext4_get_inline_entry(dir, &iloc, offset, &inline_pos, &inline_size); if (ext4_check_dir_entry(dir, NULL, de, diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index d0dd585add6a..d767e993591d 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -3413,12 +3413,16 @@ static int ext4_iomap_begin(struct inode *inode, loff_t offset, loff_t length, { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); unsigned int blkbits = inode->i_blkbits; - unsigned long first_block = offset >> blkbits; - unsigned long last_block = (offset + length - 1) >> blkbits; + unsigned long first_block, last_block; struct ext4_map_blocks map; bool delalloc = false; int ret; + if ((offset >> blkbits) > EXT4_MAX_LOGICAL_BLOCK) + return -EINVAL; + first_block = offset >> blkbits; + last_block = min_t(loff_t, (offset + length - 1) >> blkbits, + EXT4_MAX_LOGICAL_BLOCK); if (flags & IOMAP_REPORT) { if (ext4_has_inline_data(inode)) { @@ -3948,6 +3952,7 @@ static const struct address_space_operations ext4_dax_aops = { .writepages = ext4_dax_writepages, .direct_IO = noop_direct_IO, .set_page_dirty = noop_set_page_dirty, + .bmap = ext4_bmap, .invalidatepage = noop_invalidatepage, }; @@ -4192,9 +4197,8 @@ int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset, return 0; } -static void ext4_wait_dax_page(struct ext4_inode_info *ei, bool *did_unlock) +static void ext4_wait_dax_page(struct ext4_inode_info *ei) { - *did_unlock = true; up_write(&ei->i_mmap_sem); schedule(); down_write(&ei->i_mmap_sem); @@ -4204,14 +4208,12 @@ int ext4_break_layouts(struct inode *inode) { struct ext4_inode_info *ei = EXT4_I(inode); struct page *page; - bool retry; int error; if (WARN_ON_ONCE(!rwsem_is_locked(&ei->i_mmap_sem))) return -EINVAL; do { - retry = false; page = dax_layout_busy_page(inode->i_mapping); if (!page) return 0; @@ -4219,8 +4221,8 @@ int ext4_break_layouts(struct inode *inode) error = ___wait_var_event(&page->_refcount, atomic_read(&page->_refcount) == 1, TASK_INTERRUPTIBLE, 0, 0, - ext4_wait_dax_page(ei, &retry)); - } while (error == 0 && retry); + ext4_wait_dax_page(ei)); + } while (error == 0); return error; } @@ -4895,6 +4897,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino) * not initialized on a new filesystem. */ } ei->i_flags = le32_to_cpu(raw_inode->i_flags); + ext4_set_inode_flags(inode); inode->i_blocks = ext4_inode_blocks(raw_inode, ei); ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo); if (ext4_has_feature_64bit(sb)) @@ -5041,7 +5044,6 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino) goto bad_inode; } brelse(iloc.bh); - ext4_set_inode_flags(inode); unlock_new_inode(inode); return inode; diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c index 39b07c2d3384..2305b4374fd3 100644 --- a/fs/ext4/mmp.c +++ b/fs/ext4/mmp.c @@ -49,7 +49,6 @@ static int write_mmp_block(struct super_block *sb, struct buffer_head *bh) */ sb_start_write(sb); ext4_mmp_csum_set(sb, mmp); - mark_buffer_dirty(bh); lock_buffer(bh); bh->b_end_io = end_buffer_write_sync; get_bh(bh); diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index 116ff68c5bd4..377d516c475f 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -3478,6 +3478,12 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry, int credits; u8 old_file_type; + if (new.inode && new.inode->i_nlink == 0) { + EXT4_ERROR_INODE(new.inode, + "target of rename is already freed"); + return -EFSCORRUPTED; + } + if ((ext4_test_inode_flag(new_dir, EXT4_INODE_PROJINHERIT)) && (!projid_eq(EXT4_I(new_dir)->i_projid, EXT4_I(old_dentry->d_inode)->i_projid))) diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c index e5fb38451a73..ebbc663d0798 100644 --- a/fs/ext4/resize.c +++ b/fs/ext4/resize.c @@ -19,6 +19,7 @@ int ext4_resize_begin(struct super_block *sb) { + struct ext4_sb_info *sbi = EXT4_SB(sb); int ret = 0; if (!capable(CAP_SYS_RESOURCE)) @@ -29,7 +30,7 @@ int ext4_resize_begin(struct super_block *sb) * because the user tools have no way of handling this. Probably a * bad time to do it anyways. */ - if (EXT4_SB(sb)->s_sbh->b_blocknr != + if (EXT4_B2C(sbi, sbi->s_sbh->b_blocknr) != le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) { ext4_warning(sb, "won't resize using backup superblock at %llu", (unsigned long long)EXT4_SB(sb)->s_sbh->b_blocknr); @@ -1986,6 +1987,26 @@ retry: } } + /* + * Make sure the last group has enough space so that it's + * guaranteed to have enough space for all metadata blocks + * that it might need to hold. (We might not need to store + * the inode table blocks in the last block group, but there + * will be cases where this might be needed.) + */ + if ((ext4_group_first_block_no(sb, n_group) + + ext4_group_overhead_blocks(sb, n_group) + 2 + + sbi->s_itb_per_group + sbi->s_cluster_ratio) >= n_blocks_count) { + n_blocks_count = ext4_group_first_block_no(sb, n_group); + n_group--; + n_blocks_count_retry = 0; + if (resize_inode) { + iput(resize_inode); + resize_inode = NULL; + } + goto retry; + } + /* extend the last group */ if (n_group == o_group) add = n_blocks_count - o_blocks_count; diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 5863fd22e90b..1145109968ef 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -2145,6 +2145,8 @@ static int _ext4_show_options(struct seq_file *seq, struct super_block *sb, SEQ_OPTS_PRINT("max_dir_size_kb=%u", sbi->s_max_dir_size_kb); if (test_opt(sb, DATA_ERR_ABORT)) SEQ_OPTS_PUTS("data_err=abort"); + if (DUMMY_ENCRYPTION_ENABLED(sbi)) + SEQ_OPTS_PUTS("test_dummy_encryption"); ext4_show_quota_options(seq, sb); return 0; @@ -4378,11 +4380,13 @@ no_journal: block = ext4_count_free_clusters(sb); ext4_free_blocks_count_set(sbi->s_es, EXT4_C2B(sbi, block)); + ext4_superblock_csum_set(sb); err = percpu_counter_init(&sbi->s_freeclusters_counter, block, GFP_KERNEL); if (!err) { unsigned long freei = ext4_count_free_inodes(sb); sbi->s_es->s_free_inodes_count = cpu_to_le32(freei); + ext4_superblock_csum_set(sb); err = percpu_counter_init(&sbi->s_freeinodes_counter, freei, GFP_KERNEL); } diff --git a/fs/ocfs2/buffer_head_io.c b/fs/ocfs2/buffer_head_io.c index d9ebe11c8990..1d098c3c00e0 100644 --- a/fs/ocfs2/buffer_head_io.c +++ b/fs/ocfs2/buffer_head_io.c @@ -342,6 +342,7 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr, * for this bh as it's not marked locally * uptodate. */ status = -EIO; + clear_buffer_needs_validate(bh); put_bh(bh); bhs[i] = NULL; continue; diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c index ad72261ee3fe..d297fe4472a9 100644 --- a/fs/proc/kcore.c +++ b/fs/proc/kcore.c @@ -464,6 +464,7 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos) ret = -EFAULT; goto out; } + m = NULL; /* skip the list anchor */ } else if (m->type == KCORE_VMALLOC) { vread(buf, (char *)start, tsz); /* we have to zero-fill user buffer even if no read */ diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index 23e7042666a7..bf000c8aeffb 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -1912,7 +1912,9 @@ static int ubifs_remount_fs(struct super_block *sb, int *flags, char *data) mutex_unlock(&c->bu_mutex); } - ubifs_assert(c, c->lst.taken_empty_lebs > 0); + if (!c->need_recovery) + ubifs_assert(c, c->lst.taken_empty_lebs > 0); + return 0; } @@ -1954,6 +1956,9 @@ static struct ubi_volume_desc *open_ubi(const char *name, int mode) int dev, vol; char *endptr; + if (!name || !*name) + return ERR_PTR(-EINVAL); + /* First, try to open using the device node path method */ ubi = ubi_open_volume_path(name, mode); if (!IS_ERR(ubi)) diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c index 61afdfee4b28..f5ad1ede7990 100644 --- a/fs/ubifs/xattr.c +++ b/fs/ubifs/xattr.c @@ -152,12 +152,6 @@ static int create_xattr(struct ubifs_info *c, struct inode *host, ui->data_len = size; mutex_lock(&host_ui->ui_mutex); - - if (!host->i_nlink) { - err = -ENOENT; - goto out_noent; - } - host->i_ctime = current_time(host); host_ui->xattr_cnt += 1; host_ui->xattr_size += CALC_DENT_SIZE(fname_len(nm)); @@ -190,7 +184,6 @@ out_cancel: host_ui->xattr_size -= CALC_XATTR_BYTES(size); host_ui->xattr_names -= fname_len(nm); host_ui->flags &= ~UBIFS_CRYPT_FL; -out_noent: mutex_unlock(&host_ui->ui_mutex); out_free: make_bad_inode(inode); @@ -242,12 +235,6 @@ static int change_xattr(struct ubifs_info *c, struct inode *host, mutex_unlock(&ui->ui_mutex); mutex_lock(&host_ui->ui_mutex); - - if (!host->i_nlink) { - err = -ENOENT; - goto out_noent; - } - host->i_ctime = current_time(host); host_ui->xattr_size -= CALC_XATTR_BYTES(old_size); host_ui->xattr_size += CALC_XATTR_BYTES(size); @@ -269,7 +256,6 @@ static int change_xattr(struct ubifs_info *c, struct inode *host, out_cancel: host_ui->xattr_size -= CALC_XATTR_BYTES(size); host_ui->xattr_size += CALC_XATTR_BYTES(old_size); -out_noent: mutex_unlock(&host_ui->ui_mutex); make_bad_inode(inode); out_free: @@ -496,12 +482,6 @@ static int remove_xattr(struct ubifs_info *c, struct inode *host, return err; mutex_lock(&host_ui->ui_mutex); - - if (!host->i_nlink) { - err = -ENOENT; - goto out_noent; - } - host->i_ctime = current_time(host); host_ui->xattr_cnt -= 1; host_ui->xattr_size -= CALC_DENT_SIZE(fname_len(nm)); @@ -521,7 +501,6 @@ out_cancel: host_ui->xattr_size += CALC_DENT_SIZE(fname_len(nm)); host_ui->xattr_size += CALC_XATTR_BYTES(ui->data_len); host_ui->xattr_names += fname_len(nm); -out_noent: mutex_unlock(&host_ui->ui_mutex); ubifs_release_budget(c, &req); make_bad_inode(inode); @@ -561,9 +540,6 @@ static int ubifs_xattr_remove(struct inode *host, const char *name) ubifs_assert(c, inode_is_locked(host)); - if (!host->i_nlink) - return -ENOENT; - if (fname_len(&nm) > UBIFS_MAX_NLEN) return -ENAMETOOLONG; diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h index 46a8009784df..152b3055e9e1 100644 --- a/include/drm/drm_drv.h +++ b/include/drm/drm_drv.h @@ -675,7 +675,7 @@ static inline bool drm_core_check_feature(struct drm_device *dev, int feature) static inline bool drm_drv_uses_atomic_modeset(struct drm_device *dev) { return drm_core_check_feature(dev, DRIVER_ATOMIC) || - dev->mode_config.funcs->atomic_commit != NULL; + (dev->mode_config.funcs && dev->mode_config.funcs->atomic_commit != NULL); } diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h index 582a0ec0aa70..777814755fa6 100644 --- a/include/drm/drm_panel.h +++ b/include/drm/drm_panel.h @@ -89,7 +89,6 @@ struct drm_panel { struct drm_device *drm; struct drm_connector *connector; struct device *dev; - struct device_link *link; const struct drm_panel_funcs *funcs; diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index 763bbad1e258..4d36b27214fd 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -79,20 +79,6 @@ #define __noretpoline __attribute__((indirect_branch("keep"))) #endif -/* - * it doesn't make sense on ARM (currently the only user of __naked) - * to trace naked functions because then mcount is called without - * stack and frame pointer being set up and there is no chance to - * restore the lr register to the value before mcount was called. - * - * The asm() bodies of naked functions often depend on standard calling - * conventions, therefore they must be noinline and noclone. - * - * GCC 4.[56] currently fail to enforce this, so we must do so ourselves. - * See GCC PR44290. - */ -#define __naked __attribute__((naked)) noinline __noclone notrace - #define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__) #define __optimize(level) __attribute__((__optimize__(level))) diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h index 3525c179698c..db192becfec4 100644 --- a/include/linux/compiler_types.h +++ b/include/linux/compiler_types.h @@ -226,6 +226,14 @@ struct ftrace_likely_data { #define notrace __attribute__((no_instrument_function)) #endif +/* + * it doesn't make sense on ARM (currently the only user of __naked) + * to trace naked functions because then mcount is called without + * stack and frame pointer being set up and there is no chance to + * restore the lr register to the value before mcount was called. + */ +#define __naked __attribute__((naked)) notrace + #define __compiler_offsetof(a, b) __builtin_offsetof(a, b) /* diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 57864422a2c8..25c08c6c7f99 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -83,10 +83,10 @@ struct partition { } __attribute__((packed)); struct disk_stats { + u64 nsecs[NR_STAT_GROUPS]; unsigned long sectors[NR_STAT_GROUPS]; unsigned long ios[NR_STAT_GROUPS]; unsigned long merges[NR_STAT_GROUPS]; - unsigned long ticks[NR_STAT_GROUPS]; unsigned long io_ticks; unsigned long time_in_queue; }; @@ -354,6 +354,9 @@ static inline void free_part_stats(struct hd_struct *part) #endif /* CONFIG_SMP */ +#define part_stat_read_msecs(part, which) \ + div_u64(part_stat_read(part, nsecs[which]), NSEC_PER_MSEC) + #define part_stat_read_accum(part, field) \ (part_stat_read(part, field[STAT_READ]) + \ part_stat_read(part, field[STAT_WRITE]) + \ diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 0205aee44ded..c926698040e0 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -733,8 +733,6 @@ bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu); void kvm_vcpu_kick(struct kvm_vcpu *vcpu); int kvm_vcpu_yield_to(struct kvm_vcpu *target); void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool usermode_vcpu_not_eligible); -void kvm_load_guest_fpu(struct kvm_vcpu *vcpu); -void kvm_put_guest_fpu(struct kvm_vcpu *vcpu); void kvm_flush_remote_tlbs(struct kvm *kvm); void kvm_reload_remote_mmus(struct kvm *kvm); diff --git a/include/linux/mfd/da9063/pdata.h b/include/linux/mfd/da9063/pdata.h index 8a125701ef7b..50bed4f89c1a 100644 --- a/include/linux/mfd/da9063/pdata.h +++ b/include/linux/mfd/da9063/pdata.h @@ -21,7 +21,7 @@ /* * Regulator configuration */ -/* DA9063 regulator IDs */ +/* DA9063 and DA9063L regulator IDs */ enum { /* BUCKs */ DA9063_ID_BCORE1, @@ -37,18 +37,20 @@ enum { DA9063_ID_BMEM_BIO_MERGED, /* When two BUCKs are merged, they cannot be reused separately */ - /* LDOs */ + /* LDOs on both DA9063 and DA9063L */ + DA9063_ID_LDO3, + DA9063_ID_LDO7, + DA9063_ID_LDO8, + DA9063_ID_LDO9, + DA9063_ID_LDO11, + + /* DA9063-only LDOs */ DA9063_ID_LDO1, DA9063_ID_LDO2, - DA9063_ID_LDO3, DA9063_ID_LDO4, DA9063_ID_LDO5, DA9063_ID_LDO6, - DA9063_ID_LDO7, - DA9063_ID_LDO8, - DA9063_ID_LDO9, DA9063_ID_LDO10, - DA9063_ID_LDO11, }; /* Regulators platform data */ diff --git a/include/linux/mfd/rohm-bd718x7.h b/include/linux/mfd/rohm-bd718x7.h index a528747f8aed..e8338e5dc10b 100644 --- a/include/linux/mfd/rohm-bd718x7.h +++ b/include/linux/mfd/rohm-bd718x7.h @@ -78,9 +78,9 @@ enum { BD71837_REG_TRANS_COND0 = 0x1F, BD71837_REG_TRANS_COND1 = 0x20, BD71837_REG_VRFAULTEN = 0x21, - BD71837_REG_MVRFLTMASK0 = 0x22, - BD71837_REG_MVRFLTMASK1 = 0x23, - BD71837_REG_MVRFLTMASK2 = 0x24, + BD718XX_REG_MVRFLTMASK0 = 0x22, + BD718XX_REG_MVRFLTMASK1 = 0x23, + BD718XX_REG_MVRFLTMASK2 = 0x24, BD71837_REG_RCVCFG = 0x25, BD71837_REG_RCVNUM = 0x26, BD71837_REG_PWRONCONFIG0 = 0x27, @@ -159,6 +159,33 @@ enum { #define BUCK8_MASK 0x3F #define BUCK8_DEFAULT 0x1E +/* BD718XX Voltage monitoring masks */ +#define BD718XX_BUCK1_VRMON80 0x1 +#define BD718XX_BUCK1_VRMON130 0x2 +#define BD718XX_BUCK2_VRMON80 0x4 +#define BD718XX_BUCK2_VRMON130 0x8 +#define BD718XX_1ST_NODVS_BUCK_VRMON80 0x1 +#define BD718XX_1ST_NODVS_BUCK_VRMON130 0x2 +#define BD718XX_2ND_NODVS_BUCK_VRMON80 0x4 +#define BD718XX_2ND_NODVS_BUCK_VRMON130 0x8 +#define BD718XX_3RD_NODVS_BUCK_VRMON80 0x10 +#define BD718XX_3RD_NODVS_BUCK_VRMON130 0x20 +#define BD718XX_4TH_NODVS_BUCK_VRMON80 0x40 +#define BD718XX_4TH_NODVS_BUCK_VRMON130 0x80 +#define BD718XX_LDO1_VRMON80 0x1 +#define BD718XX_LDO2_VRMON80 0x2 +#define BD718XX_LDO3_VRMON80 0x4 +#define BD718XX_LDO4_VRMON80 0x8 +#define BD718XX_LDO5_VRMON80 0x10 +#define BD718XX_LDO6_VRMON80 0x20 + +/* BD71837 specific voltage monitoring masks */ +#define BD71837_BUCK3_VRMON80 0x10 +#define BD71837_BUCK3_VRMON130 0x20 +#define BD71837_BUCK4_VRMON80 0x40 +#define BD71837_BUCK4_VRMON130 0x80 +#define BD71837_LDO7_VRMON80 0x40 + /* BD71837_REG_IRQ bits */ #define IRQ_SWRST 0x40 #define IRQ_PWRON_S 0x20 diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h index 67662d01130a..3ef82d3a78db 100644 --- a/include/linux/netpoll.h +++ b/include/linux/netpoll.h @@ -49,8 +49,9 @@ struct netpoll_info { }; #ifdef CONFIG_NETPOLL -extern void netpoll_poll_disable(struct net_device *dev); -extern void netpoll_poll_enable(struct net_device *dev); +void netpoll_poll_dev(struct net_device *dev); +void netpoll_poll_disable(struct net_device *dev); +void netpoll_poll_enable(struct net_device *dev); #else static inline void netpoll_poll_disable(struct net_device *dev) { return; } static inline void netpoll_poll_enable(struct net_device *dev) { return; } diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h index 3468703d663a..a459a5e973a7 100644 --- a/include/linux/regulator/machine.h +++ b/include/linux/regulator/machine.h @@ -48,9 +48,9 @@ struct regulator; * DISABLE_IN_SUSPEND - turn off regulator in suspend states * ENABLE_IN_SUSPEND - keep regulator on in suspend states */ -#define DO_NOTHING_IN_SUSPEND (-1) -#define DISABLE_IN_SUSPEND 0 -#define ENABLE_IN_SUSPEND 1 +#define DO_NOTHING_IN_SUSPEND 0 +#define DISABLE_IN_SUSPEND 1 +#define ENABLE_IN_SUSPEND 2 /* Regulator active discharge flags */ enum regulator_active_discharge { diff --git a/include/linux/spi/spi-mem.h b/include/linux/spi/spi-mem.h index b2bd4b4127c4..69ee30456864 100644 --- a/include/linux/spi/spi-mem.h +++ b/include/linux/spi/spi-mem.h @@ -81,8 +81,10 @@ enum spi_mem_data_dir { * @dummy.buswidth: number of IO lanes used to transmit the dummy bytes * @data.buswidth: number of IO lanes used to send/receive the data * @data.dir: direction of the transfer - * @data.buf.in: input buffer - * @data.buf.out: output buffer + * @data.nbytes: number of data bytes to send/receive. Can be zero if the + * operation does not involve transferring data + * @data.buf.in: input buffer (must be DMA-able) + * @data.buf.out: output buffer (must be DMA-able) */ struct spi_mem_op { struct { @@ -105,7 +107,6 @@ struct spi_mem_op { u8 buswidth; enum spi_mem_data_dir dir; unsigned int nbytes; - /* buf.{in,out} must be DMA-able. */ union { void *in; const void *out; diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index c43e9a01b892..7ddfc65586b0 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h @@ -30,6 +30,7 @@ #define MTL_MAX_RX_QUEUES 8 #define MTL_MAX_TX_QUEUES 8 +#define STMMAC_CH_MAX 8 #define STMMAC_RX_COE_NONE 0 #define STMMAC_RX_COE_TYPE1 1 diff --git a/include/linux/uio.h b/include/linux/uio.h index 409c845d4cd3..422b1c01ee0d 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h @@ -172,7 +172,7 @@ size_t copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i) static __always_inline __must_check size_t copy_to_iter_mcsafe(void *addr, size_t bytes, struct iov_iter *i) { - if (unlikely(!check_copy_size(addr, bytes, false))) + if (unlikely(!check_copy_size(addr, bytes, true))) return 0; else return _copy_to_iter_mcsafe(addr, bytes, i); diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h index a34539b7f750..7e6ac0114d55 100644 --- a/include/linux/vga_switcheroo.h +++ b/include/linux/vga_switcheroo.h @@ -133,15 +133,18 @@ struct vga_switcheroo_handler { * @can_switch: check if the device is in a position to switch now. * Mandatory. The client should return false if a user space process * has one of its device files open + * @gpu_bound: notify the client id to audio client when the GPU is bound. * * Client callbacks. A client can be either a GPU or an audio device on a GPU. * The @set_gpu_state and @can_switch methods are mandatory, @reprobe may be * set to NULL. For audio clients, the @reprobe member is bogus. + * OTOH, @gpu_bound is only for audio clients, and not used for GPU clients. */ struct vga_switcheroo_client_ops { void (*set_gpu_state)(struct pci_dev *dev, enum vga_switcheroo_state); void (*reprobe)(struct pci_dev *dev); bool (*can_switch)(struct pci_dev *dev); + void (*gpu_bound)(struct pci_dev *dev, enum vga_switcheroo_client_id); }; #if defined(CONFIG_VGA_SWITCHEROO) diff --git a/include/net/nfc/hci.h b/include/net/nfc/hci.h index 316694dafa5b..008f466d1da7 100644 --- a/include/net/nfc/hci.h +++ b/include/net/nfc/hci.h @@ -87,7 +87,7 @@ struct nfc_hci_pipe { * According to specification 102 622 chapter 4.4 Pipes, * the pipe identifier is 7 bits long. */ -#define NFC_HCI_MAX_PIPES 127 +#define NFC_HCI_MAX_PIPES 128 struct nfc_hci_init_data { u8 gate_count; struct nfc_hci_gate gates[NFC_HCI_MAX_CUSTOM_GATES]; diff --git a/include/net/tls.h b/include/net/tls.h index d5c683e8bb22..0a769cf2f5f3 100644 --- a/include/net/tls.h +++ b/include/net/tls.h @@ -171,15 +171,14 @@ struct cipher_context { char *rec_seq; }; +union tls_crypto_context { + struct tls_crypto_info info; + struct tls12_crypto_info_aes_gcm_128 aes_gcm_128; +}; + struct tls_context { - union { - struct tls_crypto_info crypto_send; - struct tls12_crypto_info_aes_gcm_128 crypto_send_aes_gcm_128; - }; - union { - struct tls_crypto_info crypto_recv; - struct tls12_crypto_info_aes_gcm_128 crypto_recv_aes_gcm_128; - }; + union tls_crypto_context crypto_send; + union tls_crypto_context crypto_recv; struct list_head list; struct net_device *netdev; @@ -367,8 +366,8 @@ static inline void tls_fill_prepend(struct tls_context *ctx, * size KTLS_DTLS_HEADER_SIZE + KTLS_DTLS_NONCE_EXPLICIT_SIZE */ buf[0] = record_type; - buf[1] = TLS_VERSION_MINOR(ctx->crypto_send.version); - buf[2] = TLS_VERSION_MAJOR(ctx->crypto_send.version); + buf[1] = TLS_VERSION_MINOR(ctx->crypto_send.info.version); + buf[2] = TLS_VERSION_MAJOR(ctx->crypto_send.info.version); /* we can use IV for nonce explicit according to spec */ buf[3] = pkt_len >> 8; buf[4] = pkt_len & 0xFF; diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h index 6f1e1f3b3063..cd1773d0e08f 100644 --- a/include/sound/hdaudio.h +++ b/include/sound/hdaudio.h @@ -412,6 +412,7 @@ void snd_hdac_bus_init_cmd_io(struct hdac_bus *bus); void snd_hdac_bus_stop_cmd_io(struct hdac_bus *bus); void snd_hdac_bus_enter_link_reset(struct hdac_bus *bus); void snd_hdac_bus_exit_link_reset(struct hdac_bus *bus); +int snd_hdac_bus_reset_link(struct hdac_bus *bus, bool full_reset); void snd_hdac_bus_update_rirb(struct hdac_bus *bus); int snd_hdac_bus_handle_stream_irq(struct hdac_bus *bus, unsigned int status, diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h index af9ef16cc34d..fdaaafdc7a00 100644 --- a/include/sound/soc-dapm.h +++ b/include/sound/soc-dapm.h @@ -407,6 +407,7 @@ int snd_soc_dapm_new_dai_widgets(struct snd_soc_dapm_context *dapm, int snd_soc_dapm_link_dai_widgets(struct snd_soc_card *card); void snd_soc_dapm_connect_dai_link_widgets(struct snd_soc_card *card); int snd_soc_dapm_new_pcm(struct snd_soc_card *card, + struct snd_soc_pcm_runtime *rtd, const struct snd_soc_pcm_stream *params, unsigned int num_params, struct snd_soc_dapm_widget *source, diff --git a/include/uapi/linux/keyctl.h b/include/uapi/linux/keyctl.h index 910cc4334b21..7b8c9e19bad1 100644 --- a/include/uapi/linux/keyctl.h +++ b/include/uapi/linux/keyctl.h @@ -65,7 +65,7 @@ /* keyctl structures */ struct keyctl_dh_params { - __s32 dh_private; + __s32 private; __s32 prime; __s32 base; }; diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 07548de5c988..251be353f950 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -952,6 +952,7 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_S390_HPAGE_1M 156 #define KVM_CAP_NESTED_STATE 157 #define KVM_CAP_ARM_INJECT_SERROR_ESR 158 +#define KVM_CAP_MSR_PLATFORM_INFO 159 #ifdef KVM_CAP_IRQ_ROUTING diff --git a/include/uapi/sound/skl-tplg-interface.h b/include/uapi/sound/skl-tplg-interface.h index f58cafa42f18..f39352cef382 100644 --- a/include/uapi/sound/skl-tplg-interface.h +++ b/include/uapi/sound/skl-tplg-interface.h @@ -10,6 +10,8 @@ #ifndef __HDA_TPLG_INTERFACE_H__ #define __HDA_TPLG_INTERFACE_H__ +#include <linux/types.h> + /* * Default types range from 0~12. type can range from 0 to 0xff * SST types start at higher to avoid any overlapping in future @@ -143,10 +145,10 @@ enum skl_module_param_type { }; struct skl_dfw_algo_data { - u32 set_params:2; - u32 rsvd:30; - u32 param_id; - u32 max; + __u32 set_params:2; + __u32 rsvd:30; + __u32 param_id; + __u32 max; char params[0]; } __packed; @@ -163,68 +165,68 @@ enum skl_tuple_type { /* v4 configuration data */ struct skl_dfw_v4_module_pin { - u16 module_id; - u16 instance_id; + __u16 module_id; + __u16 instance_id; } __packed; struct skl_dfw_v4_module_fmt { - u32 channels; - u32 freq; - u32 bit_depth; - u32 valid_bit_depth; - u32 ch_cfg; - u32 interleaving_style; - u32 sample_type; - u32 ch_map; + __u32 channels; + __u32 freq; + __u32 bit_depth; + __u32 valid_bit_depth; + __u32 ch_cfg; + __u32 interleaving_style; + __u32 sample_type; + __u32 ch_map; } __packed; struct skl_dfw_v4_module_caps { - u32 set_params:2; - u32 rsvd:30; - u32 param_id; - u32 caps_size; - u32 caps[HDA_SST_CFG_MAX]; + __u32 set_params:2; + __u32 rsvd:30; + __u32 param_id; + __u32 caps_size; + __u32 caps[HDA_SST_CFG_MAX]; } __packed; struct skl_dfw_v4_pipe { - u8 pipe_id; - u8 pipe_priority; - u16 conn_type:4; - u16 rsvd:4; - u16 memory_pages:8; + __u8 pipe_id; + __u8 pipe_priority; + __u16 conn_type:4; + __u16 rsvd:4; + __u16 memory_pages:8; } __packed; struct skl_dfw_v4_module { char uuid[SKL_UUID_STR_SZ]; - u16 module_id; - u16 instance_id; - u32 max_mcps; - u32 mem_pages; - u32 obs; - u32 ibs; - u32 vbus_id; - - u32 max_in_queue:8; - u32 max_out_queue:8; - u32 time_slot:8; - u32 core_id:4; - u32 rsvd1:4; - - u32 module_type:8; - u32 conn_type:4; - u32 dev_type:4; - u32 hw_conn_type:4; - u32 rsvd2:12; - - u32 params_fixup:8; - u32 converter:8; - u32 input_pin_type:1; - u32 output_pin_type:1; - u32 is_dynamic_in_pin:1; - u32 is_dynamic_out_pin:1; - u32 is_loadable:1; - u32 rsvd3:11; + __u16 module_id; + __u16 instance_id; + __u32 max_mcps; + __u32 mem_pages; + __u32 obs; + __u32 ibs; + __u32 vbus_id; + + __u32 max_in_queue:8; + __u32 max_out_queue:8; + __u32 time_slot:8; + __u32 core_id:4; + __u32 rsvd1:4; + + __u32 module_type:8; + __u32 conn_type:4; + __u32 dev_type:4; + __u32 hw_conn_type:4; + __u32 rsvd2:12; + + __u32 params_fixup:8; + __u32 converter:8; + __u32 input_pin_type:1; + __u32 output_pin_type:1; + __u32 is_dynamic_in_pin:1; + __u32 is_dynamic_out_pin:1; + __u32 is_loadable:1; + __u32 rsvd3:11; struct skl_dfw_v4_pipe pipe; struct skl_dfw_v4_module_fmt in_fmt[MAX_IN_QUEUE]; diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 2590700237c1..138f0302692e 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -1844,7 +1844,7 @@ static int btf_check_all_metas(struct btf_verifier_env *env) hdr = &btf->hdr; cur = btf->nohdr_data + hdr->type_off; - end = btf->nohdr_data + hdr->type_len; + end = cur + hdr->type_len; env->log_type_id = 1; while (cur < end) { diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c index 488ef9663c01..0a0f2ec75370 100644 --- a/kernel/bpf/sockmap.c +++ b/kernel/bpf/sockmap.c @@ -132,6 +132,7 @@ struct smap_psock { struct work_struct gc_work; struct proto *sk_proto; + void (*save_unhash)(struct sock *sk); void (*save_close)(struct sock *sk, long timeout); void (*save_data_ready)(struct sock *sk); void (*save_write_space)(struct sock *sk); @@ -143,6 +144,7 @@ static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, static int bpf_tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); static int bpf_tcp_sendpage(struct sock *sk, struct page *page, int offset, size_t size, int flags); +static void bpf_tcp_unhash(struct sock *sk); static void bpf_tcp_close(struct sock *sk, long timeout); static inline struct smap_psock *smap_psock_sk(const struct sock *sk) @@ -184,6 +186,7 @@ static void build_protos(struct proto prot[SOCKMAP_NUM_CONFIGS], struct proto *base) { prot[SOCKMAP_BASE] = *base; + prot[SOCKMAP_BASE].unhash = bpf_tcp_unhash; prot[SOCKMAP_BASE].close = bpf_tcp_close; prot[SOCKMAP_BASE].recvmsg = bpf_tcp_recvmsg; prot[SOCKMAP_BASE].stream_memory_read = bpf_tcp_stream_read; @@ -217,6 +220,7 @@ static int bpf_tcp_init(struct sock *sk) return -EBUSY; } + psock->save_unhash = sk->sk_prot->unhash; psock->save_close = sk->sk_prot->close; psock->sk_proto = sk->sk_prot; @@ -305,30 +309,12 @@ static struct smap_psock_map_entry *psock_map_pop(struct sock *sk, return e; } -static void bpf_tcp_close(struct sock *sk, long timeout) +static void bpf_tcp_remove(struct sock *sk, struct smap_psock *psock) { - void (*close_fun)(struct sock *sk, long timeout); struct smap_psock_map_entry *e; struct sk_msg_buff *md, *mtmp; - struct smap_psock *psock; struct sock *osk; - lock_sock(sk); - rcu_read_lock(); - psock = smap_psock_sk(sk); - if (unlikely(!psock)) { - rcu_read_unlock(); - release_sock(sk); - return sk->sk_prot->close(sk, timeout); - } - - /* The psock may be destroyed anytime after exiting the RCU critial - * section so by the time we use close_fun the psock may no longer - * be valid. However, bpf_tcp_close is called with the sock lock - * held so the close hook and sk are still valid. - */ - close_fun = psock->save_close; - if (psock->cork) { free_start_sg(psock->sock, psock->cork, true); kfree(psock->cork); @@ -379,6 +365,42 @@ static void bpf_tcp_close(struct sock *sk, long timeout) kfree(e); e = psock_map_pop(sk, psock); } +} + +static void bpf_tcp_unhash(struct sock *sk) +{ + void (*unhash_fun)(struct sock *sk); + struct smap_psock *psock; + + rcu_read_lock(); + psock = smap_psock_sk(sk); + if (unlikely(!psock)) { + rcu_read_unlock(); + if (sk->sk_prot->unhash) + sk->sk_prot->unhash(sk); + return; + } + unhash_fun = psock->save_unhash; + bpf_tcp_remove(sk, psock); + rcu_read_unlock(); + unhash_fun(sk); +} + +static void bpf_tcp_close(struct sock *sk, long timeout) +{ + void (*close_fun)(struct sock *sk, long timeout); + struct smap_psock *psock; + + lock_sock(sk); + rcu_read_lock(); + psock = smap_psock_sk(sk); + if (unlikely(!psock)) { + rcu_read_unlock(); + release_sock(sk); + return sk->sk_prot->close(sk, timeout); + } + close_fun = psock->save_close; + bpf_tcp_remove(sk, psock); rcu_read_unlock(); release_sock(sk); close_fun(sk, timeout); @@ -2097,8 +2119,12 @@ static int sock_map_update_elem(struct bpf_map *map, return -EINVAL; } + /* ULPs are currently supported only for TCP sockets in ESTABLISHED + * state. + */ if (skops.sk->sk_type != SOCK_STREAM || - skops.sk->sk_protocol != IPPROTO_TCP) { + skops.sk->sk_protocol != IPPROTO_TCP || + skops.sk->sk_state != TCP_ESTABLISHED) { fput(socket->file); return -EOPNOTSUPP; } @@ -2453,6 +2479,16 @@ static int sock_hash_update_elem(struct bpf_map *map, return -EINVAL; } + /* ULPs are currently supported only for TCP sockets in ESTABLISHED + * state. + */ + if (skops.sk->sk_type != SOCK_STREAM || + skops.sk->sk_protocol != IPPROTO_TCP || + skops.sk->sk_state != TCP_ESTABLISHED) { + fput(socket->file); + return -EOPNOTSUPP; + } + lock_sock(skops.sk); preempt_disable(); rcu_read_lock(); @@ -2543,10 +2579,22 @@ const struct bpf_map_ops sock_hash_ops = { .map_check_btf = map_check_no_btf, }; +static bool bpf_is_valid_sock_op(struct bpf_sock_ops_kern *ops) +{ + return ops->op == BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB || + ops->op == BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB; +} BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, bpf_sock, struct bpf_map *, map, void *, key, u64, flags) { WARN_ON_ONCE(!rcu_read_lock_held()); + + /* ULPs are currently supported only for TCP sockets in ESTABLISHED + * state. This checks that the sock ops triggering the update is + * one indicating we are (or will be soon) in an ESTABLISHED state. + */ + if (!bpf_is_valid_sock_op(bpf_sock)) + return -EOPNOTSUPP; return sock_map_ctx_update_elem(bpf_sock, map, key, flags); } @@ -2565,6 +2613,9 @@ BPF_CALL_4(bpf_sock_hash_update, struct bpf_sock_ops_kern *, bpf_sock, struct bpf_map *, map, void *, key, u64, flags) { WARN_ON_ONCE(!rcu_read_lock_held()); + + if (!bpf_is_valid_sock_op(bpf_sock)) + return -EOPNOTSUPP; return sock_hash_ctx_update_elem(bpf_sock, map, key, flags); } diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 92246117d2b0..bb07e74b34a2 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -3163,7 +3163,7 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env, * an arbitrary scalar. Disallow all math except * pointer subtraction */ - if (opcode == BPF_SUB){ + if (opcode == BPF_SUB && env->allow_ptr_leaks) { mark_reg_unknown(env, regs, insn->dst_reg); return 0; } diff --git a/kernel/dma/Kconfig b/kernel/dma/Kconfig index 9bd54304446f..1b1d63b3634b 100644 --- a/kernel/dma/Kconfig +++ b/kernel/dma/Kconfig @@ -23,6 +23,9 @@ config ARCH_HAS_SYNC_DMA_FOR_CPU bool select NEED_DMA_MAP_STATE +config ARCH_HAS_SYNC_DMA_FOR_CPU_ALL + bool + config DMA_DIRECT_OPS bool depends on HAS_DMA diff --git a/kernel/events/core.c b/kernel/events/core.c index c80549bf82c6..dcb093e7b377 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -3935,6 +3935,12 @@ int perf_event_read_local(struct perf_event *event, u64 *value, goto out; } + /* If this is a pinned event it must be running on this CPU */ + if (event->attr.pinned && event->oncpu != smp_processor_id()) { + ret = -EBUSY; + goto out; + } + /* * If the event is currently on this CPU, its either a per-task event, * or local to this CPU. Furthermore it means its ACTIVE (otherwise diff --git a/kernel/pid.c b/kernel/pid.c index de1cfc4f75a2..cdf63e53a014 100644 --- a/kernel/pid.c +++ b/kernel/pid.c @@ -195,7 +195,7 @@ struct pid *alloc_pid(struct pid_namespace *ns) idr_preload_end(); if (nr < 0) { - retval = nr; + retval = (nr == -ENOSPC) ? -EAGAIN : nr; goto out_free; } diff --git a/kernel/sys.c b/kernel/sys.c index cf5c67533ff1..123bd73046ec 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -71,9 +71,6 @@ #include <asm/io.h> #include <asm/unistd.h> -/* Hardening for Spectre-v1 */ -#include <linux/nospec.h> - #include "uid16.h" #ifndef SET_UNALIGN_CTL diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 1d92d4a982fd..65bd4616220d 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -1546,6 +1546,8 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages) tmp_iter_page = first_page; do { + cond_resched(); + to_remove_page = tmp_iter_page; rb_inc_page(cpu_buffer, &tmp_iter_page); diff --git a/mm/Kconfig b/mm/Kconfig index a550635ea5c3..de64ea658716 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -637,6 +637,7 @@ config DEFERRED_STRUCT_PAGE_INIT depends on NO_BOOTMEM depends on SPARSEMEM depends on !NEED_PER_CPU_KM + depends on 64BIT help Ordinarily all struct pages are initialised during early boot in a single thread. On very large machines this can take a considerable diff --git a/mm/shmem.c b/mm/shmem.c index 0376c124b043..446942677cd4 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -2227,6 +2227,8 @@ static struct inode *shmem_get_inode(struct super_block *sb, const struct inode mpol_shared_policy_init(&info->policy, NULL); break; } + + lockdep_annotate_inode_mutex_key(inode); } else shmem_free_inode(sb); return inode; diff --git a/mm/vmscan.c b/mm/vmscan.c index 7e7d25504651..c7ce2c161225 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -476,6 +476,17 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl, delta = freeable >> priority; delta *= 4; do_div(delta, shrinker->seeks); + + /* + * Make sure we apply some minimal pressure on default priority + * even on small cgroups. Stale objects are not only consuming memory + * by themselves, but can also hold a reference to a dying cgroup, + * preventing it from being reclaimed. A dying cgroup with all + * corresponding structures like per-cpu stats and kmem caches + * can be really big, so it may lead to a significant waste of memory. + */ + delta = max_t(unsigned long long, delta, min(freeable, batch_size)); + total_scan += delta; if (total_scan < 0) { pr_err("shrink_slab: %pF negative objects to delete nr=%ld\n", diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c index 71c20c1d4002..9f481cfdf77d 100644 --- a/net/batman-adv/bat_v_elp.c +++ b/net/batman-adv/bat_v_elp.c @@ -241,7 +241,7 @@ batadv_v_elp_wifi_neigh_probe(struct batadv_hardif_neigh_node *neigh) * the packet to be exactly of that size to make the link * throughput estimation effective. */ - skb_put(skb, probe_len - hard_iface->bat_v.elp_skb->len); + skb_put_zero(skb, probe_len - hard_iface->bat_v.elp_skb->len); batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "Sending unicast (probe) ELP packet on interface %s to %pM\n", @@ -268,6 +268,7 @@ static void batadv_v_elp_periodic_work(struct work_struct *work) struct batadv_priv *bat_priv; struct sk_buff *skb; u32 elp_interval; + bool ret; bat_v = container_of(work, struct batadv_hard_iface_bat_v, elp_wq.work); hard_iface = container_of(bat_v, struct batadv_hard_iface, bat_v); @@ -329,8 +330,11 @@ static void batadv_v_elp_periodic_work(struct work_struct *work) * may sleep and that is not allowed in an rcu protected * context. Therefore schedule a task for that. */ - queue_work(batadv_event_workqueue, - &hardif_neigh->bat_v.metric_work); + ret = queue_work(batadv_event_workqueue, + &hardif_neigh->bat_v.metric_work); + + if (!ret) + batadv_hardif_neigh_put(hardif_neigh); } rcu_read_unlock(); diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c index ff9659af6b91..5f1aeeded0e3 100644 --- a/net/batman-adv/bridge_loop_avoidance.c +++ b/net/batman-adv/bridge_loop_avoidance.c @@ -1772,6 +1772,7 @@ batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb, { struct batadv_bla_backbone_gw *backbone_gw; struct ethhdr *ethhdr; + bool ret; ethhdr = eth_hdr(skb); @@ -1795,8 +1796,13 @@ batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb, if (unlikely(!backbone_gw)) return true; - queue_work(batadv_event_workqueue, &backbone_gw->report_work); - /* backbone_gw is unreferenced in the report work function function */ + ret = queue_work(batadv_event_workqueue, &backbone_gw->report_work); + + /* backbone_gw is unreferenced in the report work function function + * if queue_work() call was successful + */ + if (!ret) + batadv_backbone_gw_put(backbone_gw); return true; } diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c index 8b198ee798c9..140c61a3f1ec 100644 --- a/net/batman-adv/gateway_client.c +++ b/net/batman-adv/gateway_client.c @@ -32,6 +32,7 @@ #include <linux/kernel.h> #include <linux/kref.h> #include <linux/list.h> +#include <linux/lockdep.h> #include <linux/netdevice.h> #include <linux/netlink.h> #include <linux/rculist.h> @@ -348,6 +349,9 @@ out: * @bat_priv: the bat priv with all the soft interface information * @orig_node: originator announcing gateway capabilities * @gateway: announced bandwidth information + * + * Has to be called with the appropriate locks being acquired + * (gw.list_lock). */ static void batadv_gw_node_add(struct batadv_priv *bat_priv, struct batadv_orig_node *orig_node, @@ -355,6 +359,8 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv, { struct batadv_gw_node *gw_node; + lockdep_assert_held(&bat_priv->gw.list_lock); + if (gateway->bandwidth_down == 0) return; @@ -369,10 +375,8 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv, gw_node->bandwidth_down = ntohl(gateway->bandwidth_down); gw_node->bandwidth_up = ntohl(gateway->bandwidth_up); - spin_lock_bh(&bat_priv->gw.list_lock); kref_get(&gw_node->refcount); hlist_add_head_rcu(&gw_node->list, &bat_priv->gw.gateway_list); - spin_unlock_bh(&bat_priv->gw.list_lock); batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "Found new gateway %pM -> gw bandwidth: %u.%u/%u.%u MBit\n", @@ -428,11 +432,14 @@ void batadv_gw_node_update(struct batadv_priv *bat_priv, { struct batadv_gw_node *gw_node, *curr_gw = NULL; + spin_lock_bh(&bat_priv->gw.list_lock); gw_node = batadv_gw_node_get(bat_priv, orig_node); if (!gw_node) { batadv_gw_node_add(bat_priv, orig_node, gateway); + spin_unlock_bh(&bat_priv->gw.list_lock); goto out; } + spin_unlock_bh(&bat_priv->gw.list_lock); if (gw_node->bandwidth_down == ntohl(gateway->bandwidth_down) && gw_node->bandwidth_up == ntohl(gateway->bandwidth_up)) diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h index 8da3c9336111..3ccc75ee719c 100644 --- a/net/batman-adv/main.h +++ b/net/batman-adv/main.h @@ -25,7 +25,7 @@ #define BATADV_DRIVER_DEVICE "batman-adv" #ifndef BATADV_SOURCE_VERSION -#define BATADV_SOURCE_VERSION "2018.2" +#define BATADV_SOURCE_VERSION "2018.3" #endif /* B.A.T.M.A.N. parameters */ diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c index c3578444f3cb..34caf129a9bf 100644 --- a/net/batman-adv/network-coding.c +++ b/net/batman-adv/network-coding.c @@ -854,16 +854,27 @@ batadv_nc_get_nc_node(struct batadv_priv *bat_priv, spinlock_t *lock; /* Used to lock list selected by "int in_coding" */ struct list_head *list; + /* Select ingoing or outgoing coding node */ + if (in_coding) { + lock = &orig_neigh_node->in_coding_list_lock; + list = &orig_neigh_node->in_coding_list; + } else { + lock = &orig_neigh_node->out_coding_list_lock; + list = &orig_neigh_node->out_coding_list; + } + + spin_lock_bh(lock); + /* Check if nc_node is already added */ nc_node = batadv_nc_find_nc_node(orig_node, orig_neigh_node, in_coding); /* Node found */ if (nc_node) - return nc_node; + goto unlock; nc_node = kzalloc(sizeof(*nc_node), GFP_ATOMIC); if (!nc_node) - return NULL; + goto unlock; /* Initialize nc_node */ INIT_LIST_HEAD(&nc_node->list); @@ -872,22 +883,14 @@ batadv_nc_get_nc_node(struct batadv_priv *bat_priv, kref_get(&orig_neigh_node->refcount); nc_node->orig_node = orig_neigh_node; - /* Select ingoing or outgoing coding node */ - if (in_coding) { - lock = &orig_neigh_node->in_coding_list_lock; - list = &orig_neigh_node->in_coding_list; - } else { - lock = &orig_neigh_node->out_coding_list_lock; - list = &orig_neigh_node->out_coding_list; - } - batadv_dbg(BATADV_DBG_NC, bat_priv, "Adding nc_node %pM -> %pM\n", nc_node->addr, nc_node->orig_node->orig); /* Add nc_node to orig_node */ - spin_lock_bh(lock); kref_get(&nc_node->refcount); list_add_tail_rcu(&nc_node->list, list); + +unlock: spin_unlock_bh(lock); return nc_node; diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index 1485263a348b..626ddca332db 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c @@ -574,15 +574,20 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid) struct batadv_softif_vlan *vlan; int err; + spin_lock_bh(&bat_priv->softif_vlan_list_lock); + vlan = batadv_softif_vlan_get(bat_priv, vid); if (vlan) { batadv_softif_vlan_put(vlan); + spin_unlock_bh(&bat_priv->softif_vlan_list_lock); return -EEXIST; } vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC); - if (!vlan) + if (!vlan) { + spin_unlock_bh(&bat_priv->softif_vlan_list_lock); return -ENOMEM; + } vlan->bat_priv = bat_priv; vlan->vid = vid; @@ -590,17 +595,23 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid) atomic_set(&vlan->ap_isolation, 0); + kref_get(&vlan->refcount); + hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list); + spin_unlock_bh(&bat_priv->softif_vlan_list_lock); + + /* batadv_sysfs_add_vlan cannot be in the spinlock section due to the + * sleeping behavior of the sysfs functions and the fs_reclaim lock + */ err = batadv_sysfs_add_vlan(bat_priv->soft_iface, vlan); if (err) { - kfree(vlan); + /* ref for the function */ + batadv_softif_vlan_put(vlan); + + /* ref for the list */ + batadv_softif_vlan_put(vlan); return err; } - spin_lock_bh(&bat_priv->softif_vlan_list_lock); - kref_get(&vlan->refcount); - hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list); - spin_unlock_bh(&bat_priv->softif_vlan_list_lock); - /* add a new TT local entry. This one will be marked with the NOPURGE * flag */ diff --git a/net/batman-adv/sysfs.c b/net/batman-adv/sysfs.c index f2eef43bd2ec..09427fc6494a 100644 --- a/net/batman-adv/sysfs.c +++ b/net/batman-adv/sysfs.c @@ -188,7 +188,8 @@ ssize_t batadv_store_##_name(struct kobject *kobj, \ \ return __batadv_store_uint_attr(buff, count, _min, _max, \ _post_func, attr, \ - &bat_priv->_var, net_dev); \ + &bat_priv->_var, net_dev, \ + NULL); \ } #define BATADV_ATTR_SIF_SHOW_UINT(_name, _var) \ @@ -262,7 +263,9 @@ ssize_t batadv_store_##_name(struct kobject *kobj, \ \ length = __batadv_store_uint_attr(buff, count, _min, _max, \ _post_func, attr, \ - &hard_iface->_var, net_dev); \ + &hard_iface->_var, \ + hard_iface->soft_iface, \ + net_dev); \ \ batadv_hardif_put(hard_iface); \ return length; \ @@ -356,10 +359,12 @@ __batadv_store_bool_attr(char *buff, size_t count, static int batadv_store_uint_attr(const char *buff, size_t count, struct net_device *net_dev, + struct net_device *slave_dev, const char *attr_name, unsigned int min, unsigned int max, atomic_t *attr) { + char ifname[IFNAMSIZ + 3] = ""; unsigned long uint_val; int ret; @@ -385,8 +390,11 @@ static int batadv_store_uint_attr(const char *buff, size_t count, if (atomic_read(attr) == uint_val) return count; - batadv_info(net_dev, "%s: Changing from: %i to: %lu\n", - attr_name, atomic_read(attr), uint_val); + if (slave_dev) + snprintf(ifname, sizeof(ifname), "%s: ", slave_dev->name); + + batadv_info(net_dev, "%s: %sChanging from: %i to: %lu\n", + attr_name, ifname, atomic_read(attr), uint_val); atomic_set(attr, uint_val); return count; @@ -397,12 +405,13 @@ static ssize_t __batadv_store_uint_attr(const char *buff, size_t count, void (*post_func)(struct net_device *), const struct attribute *attr, atomic_t *attr_store, - struct net_device *net_dev) + struct net_device *net_dev, + struct net_device *slave_dev) { int ret; - ret = batadv_store_uint_attr(buff, count, net_dev, attr->name, min, max, - attr_store); + ret = batadv_store_uint_attr(buff, count, net_dev, slave_dev, + attr->name, min, max, attr_store); if (post_func && ret) post_func(net_dev); @@ -571,7 +580,7 @@ static ssize_t batadv_store_gw_sel_class(struct kobject *kobj, return __batadv_store_uint_attr(buff, count, 1, BATADV_TQ_MAX_VALUE, batadv_post_gw_reselect, attr, &bat_priv->gw.sel_class, - bat_priv->soft_iface); + bat_priv->soft_iface, NULL); } static ssize_t batadv_show_gw_bwidth(struct kobject *kobj, @@ -1090,8 +1099,9 @@ static ssize_t batadv_store_throughput_override(struct kobject *kobj, if (old_tp_override == tp_override) goto out; - batadv_info(net_dev, "%s: Changing from: %u.%u MBit to: %u.%u MBit\n", - "throughput_override", + batadv_info(hard_iface->soft_iface, + "%s: %s: Changing from: %u.%u MBit to: %u.%u MBit\n", + "throughput_override", net_dev->name, old_tp_override / 10, old_tp_override % 10, tp_override / 10, tp_override % 10); diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index 12a2b7d21376..d21624c44665 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c @@ -1613,6 +1613,8 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global, { struct batadv_tt_orig_list_entry *orig_entry; + spin_lock_bh(&tt_global->list_lock); + orig_entry = batadv_tt_global_orig_entry_find(tt_global, orig_node); if (orig_entry) { /* refresh the ttvn: the current value could be a bogus one that @@ -1635,11 +1637,9 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global, orig_entry->flags = flags; kref_init(&orig_entry->refcount); - spin_lock_bh(&tt_global->list_lock); kref_get(&orig_entry->refcount); hlist_add_head_rcu(&orig_entry->list, &tt_global->orig_list); - spin_unlock_bh(&tt_global->list_lock); atomic_inc(&tt_global->orig_list_count); sync_flags: @@ -1647,6 +1647,8 @@ sync_flags: out: if (orig_entry) batadv_tt_orig_list_entry_put(orig_entry); + + spin_unlock_bh(&tt_global->list_lock); } /** diff --git a/net/batman-adv/tvlv.c b/net/batman-adv/tvlv.c index a637458205d1..40e69c9346d2 100644 --- a/net/batman-adv/tvlv.c +++ b/net/batman-adv/tvlv.c @@ -529,15 +529,20 @@ void batadv_tvlv_handler_register(struct batadv_priv *bat_priv, { struct batadv_tvlv_handler *tvlv_handler; + spin_lock_bh(&bat_priv->tvlv.handler_list_lock); + tvlv_handler = batadv_tvlv_handler_get(bat_priv, type, version); if (tvlv_handler) { + spin_unlock_bh(&bat_priv->tvlv.handler_list_lock); batadv_tvlv_handler_put(tvlv_handler); return; } tvlv_handler = kzalloc(sizeof(*tvlv_handler), GFP_ATOMIC); - if (!tvlv_handler) + if (!tvlv_handler) { + spin_unlock_bh(&bat_priv->tvlv.handler_list_lock); return; + } tvlv_handler->ogm_handler = optr; tvlv_handler->unicast_handler = uptr; @@ -547,7 +552,6 @@ void batadv_tvlv_handler_register(struct batadv_priv *bat_priv, kref_init(&tvlv_handler->refcount); INIT_HLIST_NODE(&tvlv_handler->list); - spin_lock_bh(&bat_priv->tvlv.handler_list_lock); kref_get(&tvlv_handler->refcount); hlist_add_head_rcu(&tvlv_handler->list, &bat_priv->tvlv.handler_list); spin_unlock_bh(&bat_priv->tvlv.handler_list_lock); diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c index ae91e2d40056..3a7b0773536b 100644 --- a/net/bluetooth/smp.c +++ b/net/bluetooth/smp.c @@ -83,6 +83,7 @@ enum { struct smp_dev { /* Secure Connections OOB data */ + bool local_oob; u8 local_pk[64]; u8 local_rand[16]; bool debug_key; @@ -599,6 +600,8 @@ int smp_generate_oob(struct hci_dev *hdev, u8 hash[16], u8 rand[16]) memcpy(rand, smp->local_rand, 16); + smp->local_oob = true; + return 0; } @@ -1785,7 +1788,7 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb) * successfully received our local OOB data - therefore set the * flag to indicate that local OOB is in use. */ - if (req->oob_flag == SMP_OOB_PRESENT) + if (req->oob_flag == SMP_OOB_PRESENT && SMP_DEV(hdev)->local_oob) set_bit(SMP_FLAG_LOCAL_OOB, &smp->flags); /* SMP over BR/EDR requires special treatment */ @@ -1967,7 +1970,7 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb) * successfully received our local OOB data - therefore set the * flag to indicate that local OOB is in use. */ - if (rsp->oob_flag == SMP_OOB_PRESENT) + if (rsp->oob_flag == SMP_OOB_PRESENT && SMP_DEV(hdev)->local_oob) set_bit(SMP_FLAG_LOCAL_OOB, &smp->flags); smp->prsp[0] = SMP_CMD_PAIRING_RSP; @@ -2697,7 +2700,13 @@ static int smp_cmd_public_key(struct l2cap_conn *conn, struct sk_buff *skb) * key was set/generated. */ if (test_bit(SMP_FLAG_LOCAL_OOB, &smp->flags)) { - struct smp_dev *smp_dev = chan->data; + struct l2cap_chan *hchan = hdev->smp_data; + struct smp_dev *smp_dev; + + if (!hchan || !hchan->data) + return SMP_UNSPECIFIED; + + smp_dev = hchan->data; tfm_ecdh = smp_dev->tfm_ecdh; } else { @@ -3230,6 +3239,7 @@ static struct l2cap_chan *smp_add_cid(struct hci_dev *hdev, u16 cid) return ERR_CAST(tfm_ecdh); } + smp->local_oob = false; smp->tfm_aes = tfm_aes; smp->tfm_cmac = tfm_cmac; smp->tfm_ecdh = tfm_ecdh; diff --git a/net/core/devlink.c b/net/core/devlink.c index 65fc366a78a4..8c0ed225e280 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -2592,7 +2592,7 @@ send_done: if (!nlh) { err = devlink_dpipe_send_and_alloc_skb(&skb, info); if (err) - goto err_skb_send_alloc; + return err; goto send_done; } return genlmsg_reply(skb, info); @@ -2600,7 +2600,6 @@ send_done: nla_put_failure: err = -EMSGSIZE; err_resource_put: -err_skb_send_alloc: nlmsg_free(skb); return err; } diff --git a/net/core/ethtool.c b/net/core/ethtool.c index c9993c6c2fd4..234a0ec2e932 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c @@ -2624,6 +2624,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr) case ETHTOOL_GPHYSTATS: case ETHTOOL_GTSO: case ETHTOOL_GPERMADDR: + case ETHTOOL_GUFO: case ETHTOOL_GGSO: case ETHTOOL_GGRO: case ETHTOOL_GFLAGS: diff --git a/net/core/filter.c b/net/core/filter.c index aecdeba052d3..5e00f2b85a56 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -2344,7 +2344,8 @@ BPF_CALL_4(bpf_msg_pull_data, if (unlikely(bytes_sg_total > copy)) return -EINVAL; - page = alloc_pages(__GFP_NOWARN | GFP_ATOMIC, get_order(copy)); + page = alloc_pages(__GFP_NOWARN | GFP_ATOMIC | __GFP_COMP, + get_order(copy)); if (unlikely(!page)) return -ENOMEM; p = page_address(page); diff --git a/net/core/neighbour.c b/net/core/neighbour.c index aa19d86937af..91592fceeaad 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -1180,6 +1180,12 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, lladdr = neigh->ha; } + /* Update confirmed timestamp for neighbour entry after we + * received ARP packet even if it doesn't change IP to MAC binding. + */ + if (new & NUD_CONNECTED) + neigh->confirmed = jiffies; + /* If entry was valid and address is not changed, do not change entry state, if new one is STALE. */ @@ -1201,15 +1207,12 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, } } - /* Update timestamps only once we know we will make a change to the + /* Update timestamp only once we know we will make a change to the * neighbour entry. Otherwise we risk to move the locktime window with * noop updates and ignore relevant ARP updates. */ - if (new != old || lladdr != neigh->ha) { - if (new & NUD_CONNECTED) - neigh->confirmed = jiffies; + if (new != old || lladdr != neigh->ha) neigh->updated = jiffies; - } if (new != old) { neigh_del_timer(neigh); diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 57557a6a950c..3219a2932463 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -187,16 +187,16 @@ static void poll_napi(struct net_device *dev) } } -static void netpoll_poll_dev(struct net_device *dev) +void netpoll_poll_dev(struct net_device *dev) { - const struct net_device_ops *ops; struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo); + const struct net_device_ops *ops; /* Don't do any rx activity if the dev_lock mutex is held * the dev_open/close paths use this to block netpoll activity * while changing device state */ - if (down_trylock(&ni->dev_lock)) + if (!ni || down_trylock(&ni->dev_lock)) return; if (!netif_running(dev)) { @@ -205,13 +205,8 @@ static void netpoll_poll_dev(struct net_device *dev) } ops = dev->netdev_ops; - if (!ops->ndo_poll_controller) { - up(&ni->dev_lock); - return; - } - - /* Process pending work on NIC */ - ops->ndo_poll_controller(dev); + if (ops->ndo_poll_controller) + ops->ndo_poll_controller(dev); poll_napi(dev); @@ -219,6 +214,7 @@ static void netpoll_poll_dev(struct net_device *dev) zap_completion_queue(); } +EXPORT_SYMBOL(netpoll_poll_dev); void netpoll_poll_disable(struct net_device *dev) { @@ -613,8 +609,7 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev) strlcpy(np->dev_name, ndev->name, IFNAMSIZ); INIT_WORK(&np->cleanup_work, netpoll_async_cleanup); - if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) || - !ndev->netdev_ops->ndo_poll_controller) { + if (ndev->priv_flags & IFF_DISABLE_NETPOLL) { np_err(np, "%s doesn't support polling, aborting\n", np->dev_name); err = -ENOTSUPP; diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 60c928894a78..63ce2283a456 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -2810,7 +2810,7 @@ int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm) } if (dev->rtnl_link_state == RTNL_LINK_INITIALIZED) { - __dev_notify_flags(dev, old_flags, 0U); + __dev_notify_flags(dev, old_flags, (old_flags ^ dev->flags)); } else { dev->rtnl_link_state = RTNL_LINK_INITIALIZED; __dev_notify_flags(dev, old_flags, ~0U); diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 20fda8fb8ffd..1fbe2f815474 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -1377,6 +1377,7 @@ struct sk_buff *inet_gso_segment(struct sk_buff *skb, if (encap) skb_reset_inner_headers(skb); skb->network_header = (u8 *)iph - skb->head; + skb_reset_mac_len(skb); } while ((skb = skb->next)); out: diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index c4f5602308ed..284a22154b4e 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c @@ -627,6 +627,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, const struct iphdr *tnl_params, u8 protocol) { struct ip_tunnel *tunnel = netdev_priv(dev); + unsigned int inner_nhdr_len = 0; const struct iphdr *inner_iph; struct flowi4 fl4; u8 tos, ttl; @@ -636,6 +637,14 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, __be32 dst; bool connected; + /* ensure we can access the inner net header, for several users below */ + if (skb->protocol == htons(ETH_P_IP)) + inner_nhdr_len = sizeof(struct iphdr); + else if (skb->protocol == htons(ETH_P_IPV6)) + inner_nhdr_len = sizeof(struct ipv6hdr); + if (unlikely(!pskb_may_pull(skb, inner_nhdr_len))) + goto tx_error; + inner_iph = (const struct iphdr *)skb_inner_network_header(skb); connected = (tunnel->parms.iph.daddr != 0); diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index f4e35b2ff8b8..7d69dd6fa7e8 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -2124,6 +2124,28 @@ static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh, inet_compute_pseudo); } +/* wrapper for udp_queue_rcv_skb tacking care of csum conversion and + * return code conversion for ip layer consumption + */ +static int udp_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb, + struct udphdr *uh) +{ + int ret; + + if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk)) + skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check, + inet_compute_pseudo); + + ret = udp_queue_rcv_skb(sk, skb); + + /* a return value > 0 means to resubmit the input, but + * it wants the return to be -protocol, or 0 + */ + if (ret > 0) + return -ret; + return 0; +} + /* * All we need to do is get the socket, and then do a checksum. */ @@ -2170,14 +2192,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, if (unlikely(sk->sk_rx_dst != dst)) udp_sk_rx_dst_set(sk, dst); - ret = udp_queue_rcv_skb(sk, skb); + ret = udp_unicast_rcv_skb(sk, skb, uh); sock_put(sk); - /* a return value > 0 means to resubmit the input, but - * it wants the return to be -protocol, or 0 - */ - if (ret > 0) - return -ret; - return 0; + return ret; } if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST)) @@ -2185,22 +2202,8 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, saddr, daddr, udptable, proto); sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable); - if (sk) { - int ret; - - if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk)) - skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check, - inet_compute_pseudo); - - ret = udp_queue_rcv_skb(sk, skb); - - /* a return value > 0 means to resubmit the input, but - * it wants the return to be -protocol, or 0 - */ - if (ret > 0) - return -ret; - return 0; - } + if (sk) + return udp_unicast_rcv_skb(sk, skb, uh); if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) goto drop; diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index d51a8c0b3372..c63ccce6425f 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -4201,7 +4201,6 @@ static struct inet6_ifaddr *if6_get_first(struct seq_file *seq, loff_t pos) p++; continue; } - state->offset++; return ifa; } @@ -4225,13 +4224,12 @@ static struct inet6_ifaddr *if6_get_next(struct seq_file *seq, return ifa; } + state->offset = 0; while (++state->bucket < IN6_ADDR_HSIZE) { - state->offset = 0; hlist_for_each_entry_rcu(ifa, &inet6_addr_lst[state->bucket], addr_lst) { if (!net_eq(dev_net(ifa->idev->dev), net)) continue; - state->offset++; return ifa; } } diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c index 37ff4805b20c..c7e495f12011 100644 --- a/net/ipv6/ip6_offload.c +++ b/net/ipv6/ip6_offload.c @@ -115,6 +115,7 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, payload_len = skb->len - nhoff - sizeof(*ipv6h); ipv6h->payload_len = htons(payload_len); skb->network_header = (u8 *)ipv6h - skb->head; + skb_reset_mac_len(skb); if (udpfrag) { int err = ip6_find_1stfragopt(skb, &prevhdr); diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 16f200f06500..f9f8f554d141 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -219,12 +219,10 @@ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6, kfree_skb(skb); return -ENOBUFS; } + if (skb->sk) + skb_set_owner_w(skb2, skb->sk); consume_skb(skb); skb = skb2; - /* skb_set_owner_w() changes sk->sk_wmem_alloc atomically, - * it is safe to call in our context (socket lock not held) - */ - skb_set_owner_w(skb, (struct sock *)sk); } if (opt->opt_flen) ipv6_push_frag_opts(skb, opt, &proto); diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 419960b0ba16..a0b6932c3afd 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -1234,7 +1234,7 @@ static inline int ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) { struct ip6_tnl *t = netdev_priv(dev); - const struct iphdr *iph = ip_hdr(skb); + const struct iphdr *iph; int encap_limit = -1; struct flowi6 fl6; __u8 dsfield; @@ -1242,6 +1242,11 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) u8 tproto; int err; + /* ensure we can access the full inner ip header */ + if (!pskb_may_pull(skb, sizeof(struct iphdr))) + return -1; + + iph = ip_hdr(skb); memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); tproto = READ_ONCE(t->parms.proto); @@ -1306,7 +1311,7 @@ static inline int ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) { struct ip6_tnl *t = netdev_priv(dev); - struct ipv6hdr *ipv6h = ipv6_hdr(skb); + struct ipv6hdr *ipv6h; int encap_limit = -1; __u16 offset; struct flowi6 fl6; @@ -1315,6 +1320,10 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) u8 tproto; int err; + if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h)))) + return -1; + + ipv6h = ipv6_hdr(skb); tproto = READ_ONCE(t->parms.proto); if ((tproto != IPPROTO_IPV6 && tproto != 0) || ip6_tnl_addr_conflict(t, ipv6h)) diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 18e00ce1719a..826b14de7dbb 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -364,11 +364,14 @@ EXPORT_SYMBOL(ip6_dst_alloc); static void ip6_dst_destroy(struct dst_entry *dst) { + struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst); struct rt6_info *rt = (struct rt6_info *)dst; struct fib6_info *from; struct inet6_dev *idev; - dst_destroy_metrics_generic(dst); + if (p != &dst_default_metrics && refcount_dec_and_test(&p->refcnt)) + kfree(p); + rt6_uncached_list_del(rt); idev = rt->rt6i_idev; @@ -946,8 +949,6 @@ static void ip6_rt_init_dst_reject(struct rt6_info *rt, struct fib6_info *ort) static void ip6_rt_init_dst(struct rt6_info *rt, struct fib6_info *ort) { - rt->dst.flags |= fib6_info_dst_flags(ort); - if (ort->fib6_flags & RTF_REJECT) { ip6_rt_init_dst_reject(rt, ort); return; @@ -978,6 +979,10 @@ static void rt6_set_from(struct rt6_info *rt, struct fib6_info *from) rt->rt6i_flags &= ~RTF_EXPIRES; rcu_assign_pointer(rt->from, from); dst_init_metrics(&rt->dst, from->fib6_metrics->metrics, true); + if (from->fib6_metrics != &dst_default_metrics) { + rt->dst._metrics |= DST_METRICS_REFCOUNTED; + refcount_inc(&from->fib6_metrics->refcnt); + } } /* Caller must already hold reference to @ort */ @@ -4670,20 +4675,31 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb, int iif, int type, u32 portid, u32 seq, unsigned int flags) { - struct rtmsg *rtm; + struct rt6_info *rt6 = (struct rt6_info *)dst; + struct rt6key *rt6_dst, *rt6_src; + u32 *pmetrics, table, rt6_flags; struct nlmsghdr *nlh; + struct rtmsg *rtm; long expires = 0; - u32 *pmetrics; - u32 table; nlh = nlmsg_put(skb, portid, seq, type, sizeof(*rtm), flags); if (!nlh) return -EMSGSIZE; + if (rt6) { + rt6_dst = &rt6->rt6i_dst; + rt6_src = &rt6->rt6i_src; + rt6_flags = rt6->rt6i_flags; + } else { + rt6_dst = &rt->fib6_dst; + rt6_src = &rt->fib6_src; + rt6_flags = rt->fib6_flags; + } + rtm = nlmsg_data(nlh); rtm->rtm_family = AF_INET6; - rtm->rtm_dst_len = rt->fib6_dst.plen; - rtm->rtm_src_len = rt->fib6_src.plen; + rtm->rtm_dst_len = rt6_dst->plen; + rtm->rtm_src_len = rt6_src->plen; rtm->rtm_tos = 0; if (rt->fib6_table) table = rt->fib6_table->tb6_id; @@ -4698,7 +4714,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb, rtm->rtm_scope = RT_SCOPE_UNIVERSE; rtm->rtm_protocol = rt->fib6_protocol; - if (rt->fib6_flags & RTF_CACHE) + if (rt6_flags & RTF_CACHE) rtm->rtm_flags |= RTM_F_CLONED; if (dest) { @@ -4706,7 +4722,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb, goto nla_put_failure; rtm->rtm_dst_len = 128; } else if (rtm->rtm_dst_len) - if (nla_put_in6_addr(skb, RTA_DST, &rt->fib6_dst.addr)) + if (nla_put_in6_addr(skb, RTA_DST, &rt6_dst->addr)) goto nla_put_failure; #ifdef CONFIG_IPV6_SUBTREES if (src) { @@ -4714,12 +4730,12 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb, goto nla_put_failure; rtm->rtm_src_len = 128; } else if (rtm->rtm_src_len && - nla_put_in6_addr(skb, RTA_SRC, &rt->fib6_src.addr)) + nla_put_in6_addr(skb, RTA_SRC, &rt6_src->addr)) goto nla_put_failure; #endif if (iif) { #ifdef CONFIG_IPV6_MROUTE - if (ipv6_addr_is_multicast(&rt->fib6_dst.addr)) { + if (ipv6_addr_is_multicast(&rt6_dst->addr)) { int err = ip6mr_get_route(net, skb, rtm, portid); if (err == 0) @@ -4754,7 +4770,14 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb, /* For multipath routes, walk the siblings list and add * each as a nexthop within RTA_MULTIPATH. */ - if (rt->fib6_nsiblings) { + if (rt6) { + if (rt6_flags & RTF_GATEWAY && + nla_put_in6_addr(skb, RTA_GATEWAY, &rt6->rt6i_gateway)) + goto nla_put_failure; + + if (dst->dev && nla_put_u32(skb, RTA_OIF, dst->dev->ifindex)) + goto nla_put_failure; + } else if (rt->fib6_nsiblings) { struct fib6_info *sibling, *next_sibling; struct nlattr *mp; @@ -4777,7 +4800,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb, goto nla_put_failure; } - if (rt->fib6_flags & RTF_EXPIRES) { + if (rt6_flags & RTF_EXPIRES) { expires = dst ? dst->expires : rt->expires; expires -= jiffies; } @@ -4785,7 +4808,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb, if (rtnl_put_cacheinfo(skb, dst, 0, expires, dst ? dst->error : 0) < 0) goto nla_put_failure; - if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt->fib6_flags))) + if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt6_flags))) goto nla_put_failure; diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 83f4c77c79d8..28c4aa5078fc 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -752,6 +752,28 @@ static void udp6_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst) } } +/* wrapper for udp_queue_rcv_skb tacking care of csum conversion and + * return code conversion for ip layer consumption + */ +static int udp6_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb, + struct udphdr *uh) +{ + int ret; + + if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk)) + skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check, + ip6_compute_pseudo); + + ret = udpv6_queue_rcv_skb(sk, skb); + + /* a return value > 0 means to resubmit the input, but + * it wants the return to be -protocol, or 0 + */ + if (ret > 0) + return -ret; + return 0; +} + int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, int proto) { @@ -803,13 +825,14 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, if (unlikely(sk->sk_rx_dst != dst)) udp6_sk_rx_dst_set(sk, dst); - ret = udpv6_queue_rcv_skb(sk, skb); - sock_put(sk); + if (!uh->check && !udp_sk(sk)->no_check6_rx) { + sock_put(sk); + goto report_csum_error; + } - /* a return value > 0 means to resubmit the input */ - if (ret > 0) - return ret; - return 0; + ret = udp6_unicast_rcv_skb(sk, skb, uh); + sock_put(sk); + return ret; } /* @@ -822,30 +845,13 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, /* Unicast */ sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable); if (sk) { - int ret; - - if (!uh->check && !udp_sk(sk)->no_check6_rx) { - udp6_csum_zero_error(skb); - goto csum_error; - } - - if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk)) - skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check, - ip6_compute_pseudo); - - ret = udpv6_queue_rcv_skb(sk, skb); - - /* a return value > 0 means to resubmit the input */ - if (ret > 0) - return ret; - - return 0; + if (!uh->check && !udp_sk(sk)->no_check6_rx) + goto report_csum_error; + return udp6_unicast_rcv_skb(sk, skb, uh); } - if (!uh->check) { - udp6_csum_zero_error(skb); - goto csum_error; - } + if (!uh->check) + goto report_csum_error; if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) goto discard; @@ -866,6 +872,9 @@ short_packet: ulen, skb->len, daddr, ntohs(uh->dest)); goto discard; + +report_csum_error: + udp6_csum_zero_error(skb); csum_error: __UDP6_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE); discard: diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c index 7a4de6d618b1..8fbe6cdbe255 100644 --- a/net/mpls/af_mpls.c +++ b/net/mpls/af_mpls.c @@ -1533,10 +1533,14 @@ static int mpls_dev_notify(struct notifier_block *this, unsigned long event, unsigned int flags; if (event == NETDEV_REGISTER) { - /* For now just support Ethernet, IPGRE, SIT and IPIP devices */ + + /* For now just support Ethernet, IPGRE, IP6GRE, SIT and + * IPIP devices + */ if (dev->type == ARPHRD_ETHER || dev->type == ARPHRD_LOOPBACK || dev->type == ARPHRD_IPGRE || + dev->type == ARPHRD_IP6GRE || dev->type == ARPHRD_SIT || dev->type == ARPHRD_TUNNEL) { mdev = mpls_add_dev(dev); diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c index c070dfc0190a..c92894c3e40a 100644 --- a/net/netlabel/netlabel_unlabeled.c +++ b/net/netlabel/netlabel_unlabeled.c @@ -781,7 +781,8 @@ static int netlbl_unlabel_addrinfo_get(struct genl_info *info, { u32 addr_len; - if (info->attrs[NLBL_UNLABEL_A_IPV4ADDR]) { + if (info->attrs[NLBL_UNLABEL_A_IPV4ADDR] && + info->attrs[NLBL_UNLABEL_A_IPV4MASK]) { addr_len = nla_len(info->attrs[NLBL_UNLABEL_A_IPV4ADDR]); if (addr_len != sizeof(struct in_addr) && addr_len != nla_len(info->attrs[NLBL_UNLABEL_A_IPV4MASK])) diff --git a/net/nfc/hci/core.c b/net/nfc/hci/core.c index ac8030c4bcf8..19cb2e473ea6 100644 --- a/net/nfc/hci/core.c +++ b/net/nfc/hci/core.c @@ -209,6 +209,11 @@ void nfc_hci_cmd_received(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd, } create_info = (struct hci_create_pipe_resp *)skb->data; + if (create_info->pipe >= NFC_HCI_MAX_PIPES) { + status = NFC_HCI_ANY_E_NOK; + goto exit; + } + /* Save the new created pipe and bind with local gate, * the description for skb->data[3] is destination gate id * but since we received this cmd from host controller, we @@ -232,6 +237,11 @@ void nfc_hci_cmd_received(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd, } delete_info = (struct hci_delete_pipe_noti *)skb->data; + if (delete_info->pipe >= NFC_HCI_MAX_PIPES) { + status = NFC_HCI_ANY_E_NOK; + goto exit; + } + hdev->pipes[delete_info->pipe].gate = NFC_HCI_INVALID_GATE; hdev->pipes[delete_info->pipe].dest_host = NFC_HCI_INVALID_HOST; break; diff --git a/net/rds/ib.h b/net/rds/ib.h index 73427ff439f9..71ff356ee702 100644 --- a/net/rds/ib.h +++ b/net/rds/ib.h @@ -443,7 +443,7 @@ int rds_ib_send_grab_credits(struct rds_ib_connection *ic, u32 wanted, int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op); /* ib_stats.c */ -DECLARE_PER_CPU(struct rds_ib_statistics, rds_ib_stats); +DECLARE_PER_CPU_SHARED_ALIGNED(struct rds_ib_statistics, rds_ib_stats); #define rds_ib_stats_inc(member) rds_stats_inc_which(rds_ib_stats, member) #define rds_ib_stats_add(member, count) \ rds_stats_add_which(rds_ib_stats, member, count) diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c index 44e9c00657bc..6b67aa13d2dd 100644 --- a/net/sched/act_sample.c +++ b/net/sched/act_sample.c @@ -69,7 +69,7 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla, if (!exists) { ret = tcf_idr_create(tn, parm->index, est, a, - &act_sample_ops, bind, false); + &act_sample_ops, bind, true); if (ret) { tcf_idr_cleanup(tn, parm->index); return ret; diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 1a67af8a6e8c..0a75cb2e5e7b 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -1902,6 +1902,8 @@ replay: RTM_NEWCHAIN, false); break; case RTM_DELCHAIN: + tfilter_notify_chain(net, skb, block, q, parent, n, + chain, RTM_DELTFILTER); /* Flush the chain first as the user requested chain removal. */ tcf_chain_flush(chain); /* In case the chain was successfully deleted, put a reference diff --git a/net/sctp/transport.c b/net/sctp/transport.c index 12cac85da994..033696e6f74f 100644 --- a/net/sctp/transport.c +++ b/net/sctp/transport.c @@ -260,6 +260,7 @@ void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk) bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu) { struct dst_entry *dst = sctp_transport_dst_check(t); + struct sock *sk = t->asoc->base.sk; bool change = true; if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) { @@ -271,12 +272,19 @@ bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu) pmtu = SCTP_TRUNC4(pmtu); if (dst) { - dst->ops->update_pmtu(dst, t->asoc->base.sk, NULL, pmtu); + struct sctp_pf *pf = sctp_get_pf_specific(dst->ops->family); + union sctp_addr addr; + + pf->af->from_sk(&addr, sk); + pf->to_sk_daddr(&t->ipaddr, sk); + dst->ops->update_pmtu(dst, sk, NULL, pmtu); + pf->to_sk_daddr(&addr, sk); + dst = sctp_transport_dst_check(t); } if (!dst) { - t->af_specific->get_dst(t, &t->saddr, &t->fl, t->asoc->base.sk); + t->af_specific->get_dst(t, &t->saddr, &t->fl, sk); dst = t->dst; } diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index 2d8a1e15e4f9..015231789ed2 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -742,7 +742,10 @@ static void smc_connect_work(struct work_struct *work) smc->sk.sk_err = -rc; out: - smc->sk.sk_state_change(&smc->sk); + if (smc->sk.sk_err) + smc->sk.sk_state_change(&smc->sk); + else + smc->sk.sk_write_space(&smc->sk); kfree(smc->connect_info); smc->connect_info = NULL; release_sock(&smc->sk); @@ -1150,9 +1153,9 @@ static int smc_listen_rdma_reg(struct smc_sock *new_smc, int local_contact) } /* listen worker: finish RDMA setup */ -static void smc_listen_rdma_finish(struct smc_sock *new_smc, - struct smc_clc_msg_accept_confirm *cclc, - int local_contact) +static int smc_listen_rdma_finish(struct smc_sock *new_smc, + struct smc_clc_msg_accept_confirm *cclc, + int local_contact) { struct smc_link *link = &new_smc->conn.lgr->lnk[SMC_SINGLE_LINK]; int reason_code = 0; @@ -1175,11 +1178,12 @@ static void smc_listen_rdma_finish(struct smc_sock *new_smc, if (reason_code) goto decline; } - return; + return 0; decline: mutex_unlock(&smc_create_lgr_pending); smc_listen_decline(new_smc, reason_code, local_contact); + return reason_code; } /* setup for RDMA connection of server */ @@ -1276,8 +1280,10 @@ static void smc_listen_work(struct work_struct *work) } /* finish worker */ - if (!ism_supported) - smc_listen_rdma_finish(new_smc, &cclc, local_contact); + if (!ism_supported) { + if (smc_listen_rdma_finish(new_smc, &cclc, local_contact)) + return; + } smc_conn_save_peer_info(new_smc, &cclc); mutex_unlock(&smc_create_lgr_pending); smc_listen_out_connected(new_smc); @@ -1529,7 +1535,7 @@ static __poll_t smc_poll(struct file *file, struct socket *sock, return EPOLLNVAL; smc = smc_sk(sock->sk); - if ((sk->sk_state == SMC_INIT) || smc->use_fallback) { + if (smc->use_fallback) { /* delegate to CLC child sock */ mask = smc->clcsock->ops->poll(file, smc->clcsock, wait); sk->sk_err = smc->clcsock->sk->sk_err; @@ -1560,9 +1566,9 @@ static __poll_t smc_poll(struct file *file, struct socket *sock, mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP; if (sk->sk_state == SMC_APPCLOSEWAIT1) mask |= EPOLLIN; + if (smc->conn.urg_state == SMC_URG_VALID) + mask |= EPOLLPRI; } - if (smc->conn.urg_state == SMC_URG_VALID) - mask |= EPOLLPRI; } return mask; diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c index 83aba9ade060..52241d679cc9 100644 --- a/net/smc/smc_clc.c +++ b/net/smc/smc_clc.c @@ -446,14 +446,12 @@ int smc_clc_send_proposal(struct smc_sock *smc, int smc_type, vec[i++].iov_len = sizeof(trl); /* due to the few bytes needed for clc-handshake this cannot block */ len = kernel_sendmsg(smc->clcsock, &msg, vec, i, plen); - if (len < sizeof(pclc)) { - if (len >= 0) { - reason_code = -ENETUNREACH; - smc->sk.sk_err = -reason_code; - } else { - smc->sk.sk_err = smc->clcsock->sk->sk_err; - reason_code = -smc->sk.sk_err; - } + if (len < 0) { + smc->sk.sk_err = smc->clcsock->sk->sk_err; + reason_code = -smc->sk.sk_err; + } else if (len < (int)sizeof(pclc)) { + reason_code = -ENETUNREACH; + smc->sk.sk_err = -reason_code; } return reason_code; diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c index ac961dfb1ea1..ea2b87f29469 100644 --- a/net/smc/smc_close.c +++ b/net/smc/smc_close.c @@ -100,15 +100,14 @@ static void smc_close_active_abort(struct smc_sock *smc) struct smc_cdc_conn_state_flags *txflags = &smc->conn.local_tx_ctrl.conn_state_flags; - sk->sk_err = ECONNABORTED; - if (smc->clcsock && smc->clcsock->sk) { - smc->clcsock->sk->sk_err = ECONNABORTED; - smc->clcsock->sk->sk_state_change(smc->clcsock->sk); + if (sk->sk_state != SMC_INIT && smc->clcsock && smc->clcsock->sk) { + sk->sk_err = ECONNABORTED; + if (smc->clcsock && smc->clcsock->sk) { + smc->clcsock->sk->sk_err = ECONNABORTED; + smc->clcsock->sk->sk_state_change(smc->clcsock->sk); + } } switch (sk->sk_state) { - case SMC_INIT: - sk->sk_state = SMC_PEERABORTWAIT; - break; case SMC_ACTIVE: sk->sk_state = SMC_PEERABORTWAIT; release_sock(sk); @@ -143,6 +142,7 @@ static void smc_close_active_abort(struct smc_sock *smc) case SMC_PEERFINCLOSEWAIT: sock_put(sk); /* passive closing */ break; + case SMC_INIT: case SMC_PEERABORTWAIT: case SMC_CLOSED: break; diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c index 01c6ce042a1c..7cb3e4f07c10 100644 --- a/net/smc/smc_pnet.c +++ b/net/smc/smc_pnet.c @@ -461,7 +461,7 @@ static const struct genl_ops smc_pnet_ops[] = { }; /* SMC_PNETID family definition */ -static struct genl_family smc_pnet_nl_family = { +static struct genl_family smc_pnet_nl_family __ro_after_init = { .hdrsize = 0, .name = SMCR_GENL_FAMILY_NAME, .version = SMCR_GENL_FAMILY_VERSION, diff --git a/net/socket.c b/net/socket.c index e6945e318f02..01f3f8f32d6f 100644 --- a/net/socket.c +++ b/net/socket.c @@ -941,7 +941,8 @@ void dlci_ioctl_set(int (*hook) (unsigned int, void __user *)) EXPORT_SYMBOL(dlci_ioctl_set); static long sock_do_ioctl(struct net *net, struct socket *sock, - unsigned int cmd, unsigned long arg) + unsigned int cmd, unsigned long arg, + unsigned int ifreq_size) { int err; void __user *argp = (void __user *)arg; @@ -967,11 +968,11 @@ static long sock_do_ioctl(struct net *net, struct socket *sock, } else { struct ifreq ifr; bool need_copyout; - if (copy_from_user(&ifr, argp, sizeof(struct ifreq))) + if (copy_from_user(&ifr, argp, ifreq_size)) return -EFAULT; err = dev_ioctl(net, cmd, &ifr, &need_copyout); if (!err && need_copyout) - if (copy_to_user(argp, &ifr, sizeof(struct ifreq))) + if (copy_to_user(argp, &ifr, ifreq_size)) return -EFAULT; } return err; @@ -1070,7 +1071,8 @@ static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg) err = open_related_ns(&net->ns, get_net_ns); break; default: - err = sock_do_ioctl(net, sock, cmd, arg); + err = sock_do_ioctl(net, sock, cmd, arg, + sizeof(struct ifreq)); break; } return err; @@ -2750,7 +2752,8 @@ static int do_siocgstamp(struct net *net, struct socket *sock, int err; set_fs(KERNEL_DS); - err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv); + err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv, + sizeof(struct compat_ifreq)); set_fs(old_fs); if (!err) err = compat_put_timeval(&ktv, up); @@ -2766,7 +2769,8 @@ static int do_siocgstampns(struct net *net, struct socket *sock, int err; set_fs(KERNEL_DS); - err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts); + err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts, + sizeof(struct compat_ifreq)); set_fs(old_fs); if (!err) err = compat_put_timespec(&kts, up); @@ -3072,7 +3076,8 @@ static int routing_ioctl(struct net *net, struct socket *sock, } set_fs(KERNEL_DS); - ret = sock_do_ioctl(net, sock, cmd, (unsigned long) r); + ret = sock_do_ioctl(net, sock, cmd, (unsigned long) r, + sizeof(struct compat_ifreq)); set_fs(old_fs); out: @@ -3185,7 +3190,8 @@ static int compat_sock_ioctl_trans(struct file *file, struct socket *sock, case SIOCBONDSETHWADDR: case SIOCBONDCHANGEACTIVE: case SIOCGIFNAME: - return sock_do_ioctl(net, sock, cmd, arg); + return sock_do_ioctl(net, sock, cmd, arg, + sizeof(struct compat_ifreq)); } return -ENOIOCTLCMD; diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c index 292742e50bfa..961b07d4d41c 100644 --- a/net/tls/tls_device.c +++ b/net/tls/tls_device.c @@ -686,7 +686,7 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx) goto free_marker_record; } - crypto_info = &ctx->crypto_send; + crypto_info = &ctx->crypto_send.info; switch (crypto_info->cipher_type) { case TLS_CIPHER_AES_GCM_128: nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE; @@ -780,7 +780,7 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx) ctx->priv_ctx_tx = offload_ctx; rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_TX, - &ctx->crypto_send, + &ctx->crypto_send.info, tcp_sk(sk)->write_seq); if (rc) goto release_netdev; @@ -862,7 +862,7 @@ int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx) goto release_ctx; rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_RX, - &ctx->crypto_recv, + &ctx->crypto_recv.info, tcp_sk(sk)->copied_seq); if (rc) { pr_err_ratelimited("%s: The netdev has refused to offload this socket\n", diff --git a/net/tls/tls_device_fallback.c b/net/tls/tls_device_fallback.c index 6102169239d1..450a6dbc5a88 100644 --- a/net/tls/tls_device_fallback.c +++ b/net/tls/tls_device_fallback.c @@ -320,7 +320,7 @@ static struct sk_buff *tls_enc_skb(struct tls_context *tls_ctx, goto free_req; iv = buf; - memcpy(iv, tls_ctx->crypto_send_aes_gcm_128.salt, + memcpy(iv, tls_ctx->crypto_send.aes_gcm_128.salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE); aad = buf + TLS_CIPHER_AES_GCM_128_SALT_SIZE + TLS_CIPHER_AES_GCM_128_IV_SIZE; diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c index 180b6640e531..523622dc74f8 100644 --- a/net/tls/tls_main.c +++ b/net/tls/tls_main.c @@ -241,6 +241,16 @@ static void tls_write_space(struct sock *sk) ctx->sk_write_space(sk); } +static void tls_ctx_free(struct tls_context *ctx) +{ + if (!ctx) + return; + + memzero_explicit(&ctx->crypto_send, sizeof(ctx->crypto_send)); + memzero_explicit(&ctx->crypto_recv, sizeof(ctx->crypto_recv)); + kfree(ctx); +} + static void tls_sk_proto_close(struct sock *sk, long timeout) { struct tls_context *ctx = tls_get_ctx(sk); @@ -294,7 +304,7 @@ static void tls_sk_proto_close(struct sock *sk, long timeout) #else { #endif - kfree(ctx); + tls_ctx_free(ctx); ctx = NULL; } @@ -305,7 +315,7 @@ skip_tx_cleanup: * for sk->sk_prot->unhash [tls_hw_unhash] */ if (free_ctx) - kfree(ctx); + tls_ctx_free(ctx); } static int do_tls_getsockopt_tx(struct sock *sk, char __user *optval, @@ -330,7 +340,7 @@ static int do_tls_getsockopt_tx(struct sock *sk, char __user *optval, } /* get user crypto info */ - crypto_info = &ctx->crypto_send; + crypto_info = &ctx->crypto_send.info; if (!TLS_CRYPTO_INFO_READY(crypto_info)) { rc = -EBUSY; @@ -417,9 +427,9 @@ static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval, } if (tx) - crypto_info = &ctx->crypto_send; + crypto_info = &ctx->crypto_send.info; else - crypto_info = &ctx->crypto_recv; + crypto_info = &ctx->crypto_recv.info; /* Currently we don't support set crypto info more than one time */ if (TLS_CRYPTO_INFO_READY(crypto_info)) { @@ -499,7 +509,7 @@ static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval, goto out; err_crypto_info: - memset(crypto_info, 0, sizeof(*crypto_info)); + memzero_explicit(crypto_info, sizeof(union tls_crypto_context)); out: return rc; } diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index e28a6ff25d96..b9c6ecfbcfea 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -931,7 +931,15 @@ int tls_sw_recvmsg(struct sock *sk, if (control != TLS_RECORD_TYPE_DATA) goto recv_end; } + } else { + /* MSG_PEEK right now cannot look beyond current skb + * from strparser, meaning we cannot advance skb here + * and thus unpause strparser since we'd loose original + * one. + */ + break; } + /* If we have a new message from strparser, continue now. */ if (copied >= target && !ctx->recv_pkt) break; @@ -1055,8 +1063,8 @@ static int tls_read_size(struct strparser *strp, struct sk_buff *skb) goto read_failure; } - if (header[1] != TLS_VERSION_MINOR(tls_ctx->crypto_recv.version) || - header[2] != TLS_VERSION_MAJOR(tls_ctx->crypto_recv.version)) { + if (header[1] != TLS_VERSION_MINOR(tls_ctx->crypto_recv.info.version) || + header[2] != TLS_VERSION_MAJOR(tls_ctx->crypto_recv.info.version)) { ret = -EINVAL; goto read_failure; } @@ -1136,7 +1144,6 @@ void tls_sw_free_resources_rx(struct sock *sk) int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx) { - char keyval[TLS_CIPHER_AES_GCM_128_KEY_SIZE]; struct tls_crypto_info *crypto_info; struct tls12_crypto_info_aes_gcm_128 *gcm_128_info; struct tls_sw_context_tx *sw_ctx_tx = NULL; @@ -1181,12 +1188,12 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx) if (tx) { crypto_init_wait(&sw_ctx_tx->async_wait); - crypto_info = &ctx->crypto_send; + crypto_info = &ctx->crypto_send.info; cctx = &ctx->tx; aead = &sw_ctx_tx->aead_send; } else { crypto_init_wait(&sw_ctx_rx->async_wait); - crypto_info = &ctx->crypto_recv; + crypto_info = &ctx->crypto_recv.info; cctx = &ctx->rx; aead = &sw_ctx_rx->aead_recv; } @@ -1265,9 +1272,7 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx) ctx->push_pending_record = tls_sw_push_pending_record; - memcpy(keyval, gcm_128_info->key, TLS_CIPHER_AES_GCM_128_KEY_SIZE); - - rc = crypto_aead_setkey(*aead, keyval, + rc = crypto_aead_setkey(*aead, gcm_128_info->key, TLS_CIPHER_AES_GCM_128_KEY_SIZE); if (rc) goto free_aead; diff --git a/scripts/subarch.include b/scripts/subarch.include new file mode 100644 index 000000000000..650682821126 --- /dev/null +++ b/scripts/subarch.include @@ -0,0 +1,13 @@ +# SUBARCH tells the usermode build what the underlying arch is. That is set +# first, and if a usermode build is happening, the "ARCH=um" on the command +# line overrides the setting of ARCH below. If a native build is happening, +# then ARCH is assigned, getting whatever value it gets normally, and +# SUBARCH is subsequently ignored. + +SUBARCH := $(shell uname -m | sed -e s/i.86/x86/ -e s/x86_64/x86/ \ + -e s/sun4u/sparc64/ \ + -e s/arm.*/arm/ -e s/sa110/arm/ \ + -e s/s390x/s390/ -e s/parisc64/parisc/ \ + -e s/ppc.*/powerpc/ -e s/mips.*/mips/ \ + -e s/sh[234].*/sh/ -e s/aarch64.*/arm64/ \ + -e s/riscv.*/riscv/) diff --git a/security/keys/dh.c b/security/keys/dh.c index 3b602a1e27fa..711e89d8c415 100644 --- a/security/keys/dh.c +++ b/security/keys/dh.c @@ -300,7 +300,7 @@ long __keyctl_dh_compute(struct keyctl_dh_params __user *params, } dh_inputs.g_size = dlen; - dlen = dh_data_from_key(pcopy.dh_private, &dh_inputs.key); + dlen = dh_data_from_key(pcopy.private, &dh_inputs.key); if (dlen < 0) { ret = dlen; goto out2; diff --git a/sound/firewire/bebob/bebob.c b/sound/firewire/bebob/bebob.c index 730ea91d9be8..93676354f87f 100644 --- a/sound/firewire/bebob/bebob.c +++ b/sound/firewire/bebob/bebob.c @@ -263,6 +263,8 @@ do_registration(struct work_struct *work) error: mutex_unlock(&devices_mutex); snd_bebob_stream_destroy_duplex(bebob); + kfree(bebob->maudio_special_quirk); + bebob->maudio_special_quirk = NULL; snd_card_free(bebob->card); dev_info(&bebob->unit->device, "Sound card registration failed: %d\n", err); diff --git a/sound/firewire/bebob/bebob_maudio.c b/sound/firewire/bebob/bebob_maudio.c index bd55620c6a47..c266997ad299 100644 --- a/sound/firewire/bebob/bebob_maudio.c +++ b/sound/firewire/bebob/bebob_maudio.c @@ -96,17 +96,13 @@ int snd_bebob_maudio_load_firmware(struct fw_unit *unit) struct fw_device *device = fw_parent_device(unit); int err, rcode; u64 date; - __le32 cues[3] = { - cpu_to_le32(MAUDIO_BOOTLOADER_CUE1), - cpu_to_le32(MAUDIO_BOOTLOADER_CUE2), - cpu_to_le32(MAUDIO_BOOTLOADER_CUE3) - }; + __le32 *cues; /* check date of software used to build */ err = snd_bebob_read_block(unit, INFO_OFFSET_SW_DATE, &date, sizeof(u64)); if (err < 0) - goto end; + return err; /* * firmware version 5058 or later has date later than "20070401", but * 'date' is not null-terminated. @@ -114,20 +110,28 @@ int snd_bebob_maudio_load_firmware(struct fw_unit *unit) if (date < 0x3230303730343031LL) { dev_err(&unit->device, "Use firmware version 5058 or later\n"); - err = -ENOSYS; - goto end; + return -ENXIO; } + cues = kmalloc_array(3, sizeof(*cues), GFP_KERNEL); + if (!cues) + return -ENOMEM; + + cues[0] = cpu_to_le32(MAUDIO_BOOTLOADER_CUE1); + cues[1] = cpu_to_le32(MAUDIO_BOOTLOADER_CUE2); + cues[2] = cpu_to_le32(MAUDIO_BOOTLOADER_CUE3); + rcode = fw_run_transaction(device->card, TCODE_WRITE_BLOCK_REQUEST, device->node_id, device->generation, device->max_speed, BEBOB_ADDR_REG_REQ, - cues, sizeof(cues)); + cues, 3 * sizeof(*cues)); + kfree(cues); if (rcode != RCODE_COMPLETE) { dev_err(&unit->device, "Failed to send a cue to load firmware\n"); err = -EIO; } -end: + return err; } @@ -290,10 +294,6 @@ snd_bebob_maudio_special_discover(struct snd_bebob *bebob, bool is1814) bebob->midi_output_ports = 2; } end: - if (err < 0) { - kfree(params); - bebob->maudio_special_quirk = NULL; - } mutex_unlock(&bebob->mutex); return err; } diff --git a/sound/firewire/digi00x/digi00x.c b/sound/firewire/digi00x/digi00x.c index 1f5e1d23f31a..ef689997d6a5 100644 --- a/sound/firewire/digi00x/digi00x.c +++ b/sound/firewire/digi00x/digi00x.c @@ -49,6 +49,7 @@ static void dg00x_free(struct snd_dg00x *dg00x) fw_unit_put(dg00x->unit); mutex_destroy(&dg00x->mutex); + kfree(dg00x); } static void dg00x_card_free(struct snd_card *card) diff --git a/sound/firewire/fireface/ff-protocol-ff400.c b/sound/firewire/fireface/ff-protocol-ff400.c index ad7a0a32557d..64c3cb0fb926 100644 --- a/sound/firewire/fireface/ff-protocol-ff400.c +++ b/sound/firewire/fireface/ff-protocol-ff400.c @@ -146,6 +146,7 @@ static int ff400_switch_fetching_mode(struct snd_ff *ff, bool enable) { __le32 *reg; int i; + int err; reg = kcalloc(18, sizeof(__le32), GFP_KERNEL); if (reg == NULL) @@ -163,9 +164,11 @@ static int ff400_switch_fetching_mode(struct snd_ff *ff, bool enable) reg[i] = cpu_to_le32(0x00000001); } - return snd_fw_transaction(ff->unit, TCODE_WRITE_BLOCK_REQUEST, - FF400_FETCH_PCM_FRAMES, reg, - sizeof(__le32) * 18, 0); + err = snd_fw_transaction(ff->unit, TCODE_WRITE_BLOCK_REQUEST, + FF400_FETCH_PCM_FRAMES, reg, + sizeof(__le32) * 18, 0); + kfree(reg); + return err; } static void ff400_dump_sync_status(struct snd_ff *ff, diff --git a/sound/firewire/fireworks/fireworks.c b/sound/firewire/fireworks/fireworks.c index 71a0613d3da0..f2d073365cf6 100644 --- a/sound/firewire/fireworks/fireworks.c +++ b/sound/firewire/fireworks/fireworks.c @@ -301,6 +301,8 @@ error: snd_efw_transaction_remove_instance(efw); snd_efw_stream_destroy_duplex(efw); snd_card_free(efw->card); + kfree(efw->resp_buf); + efw->resp_buf = NULL; dev_info(&efw->unit->device, "Sound card registration failed: %d\n", err); } diff --git a/sound/firewire/oxfw/oxfw.c b/sound/firewire/oxfw/oxfw.c index 1e5b2c802635..2ea8be6c8584 100644 --- a/sound/firewire/oxfw/oxfw.c +++ b/sound/firewire/oxfw/oxfw.c @@ -130,6 +130,7 @@ static void oxfw_free(struct snd_oxfw *oxfw) kfree(oxfw->spec); mutex_destroy(&oxfw->mutex); + kfree(oxfw); } /* @@ -207,6 +208,7 @@ static int detect_quirks(struct snd_oxfw *oxfw) static void do_registration(struct work_struct *work) { struct snd_oxfw *oxfw = container_of(work, struct snd_oxfw, dwork.work); + int i; int err; if (oxfw->registered) @@ -269,7 +271,15 @@ error: snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->rx_stream); if (oxfw->has_output) snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->tx_stream); + for (i = 0; i < SND_OXFW_STREAM_FORMAT_ENTRIES; ++i) { + kfree(oxfw->tx_stream_formats[i]); + oxfw->tx_stream_formats[i] = NULL; + kfree(oxfw->rx_stream_formats[i]); + oxfw->rx_stream_formats[i] = NULL; + } snd_card_free(oxfw->card); + kfree(oxfw->spec); + oxfw->spec = NULL; dev_info(&oxfw->unit->device, "Sound card registration failed: %d\n", err); } diff --git a/sound/firewire/tascam/tascam.c b/sound/firewire/tascam/tascam.c index 44ad41fb7374..d3fdc463a884 100644 --- a/sound/firewire/tascam/tascam.c +++ b/sound/firewire/tascam/tascam.c @@ -93,6 +93,7 @@ static void tscm_free(struct snd_tscm *tscm) fw_unit_put(tscm->unit); mutex_destroy(&tscm->mutex); + kfree(tscm); } static void tscm_card_free(struct snd_card *card) diff --git a/sound/hda/hdac_controller.c b/sound/hda/hdac_controller.c index 560ec0986e1a..74244d8e2909 100644 --- a/sound/hda/hdac_controller.c +++ b/sound/hda/hdac_controller.c @@ -40,6 +40,8 @@ static void azx_clear_corbrp(struct hdac_bus *bus) */ void snd_hdac_bus_init_cmd_io(struct hdac_bus *bus) { + WARN_ON_ONCE(!bus->rb.area); + spin_lock_irq(&bus->reg_lock); /* CORB set up */ bus->corb.addr = bus->rb.addr; @@ -383,7 +385,7 @@ void snd_hdac_bus_exit_link_reset(struct hdac_bus *bus) EXPORT_SYMBOL_GPL(snd_hdac_bus_exit_link_reset); /* reset codec link */ -static int azx_reset(struct hdac_bus *bus, bool full_reset) +int snd_hdac_bus_reset_link(struct hdac_bus *bus, bool full_reset) { if (!full_reset) goto skip_reset; @@ -408,7 +410,7 @@ static int azx_reset(struct hdac_bus *bus, bool full_reset) skip_reset: /* check to see if controller is ready */ if (!snd_hdac_chip_readb(bus, GCTL)) { - dev_dbg(bus->dev, "azx_reset: controller not ready!\n"); + dev_dbg(bus->dev, "controller not ready!\n"); return -EBUSY; } @@ -423,6 +425,7 @@ static int azx_reset(struct hdac_bus *bus, bool full_reset) return 0; } +EXPORT_SYMBOL_GPL(snd_hdac_bus_reset_link); /* enable interrupts */ static void azx_int_enable(struct hdac_bus *bus) @@ -477,15 +480,17 @@ bool snd_hdac_bus_init_chip(struct hdac_bus *bus, bool full_reset) return false; /* reset controller */ - azx_reset(bus, full_reset); + snd_hdac_bus_reset_link(bus, full_reset); - /* initialize interrupts */ + /* clear interrupts */ azx_int_clear(bus); - azx_int_enable(bus); /* initialize the codec command I/O */ snd_hdac_bus_init_cmd_io(bus); + /* enable interrupts after CORB/RIRB buffers are initialized above */ + azx_int_enable(bus); + /* program the position buffer */ if (bus->use_posbuf && bus->posbuf.addr) { snd_hdac_chip_writel(bus, DPLBASE, (u32)bus->posbuf.addr); diff --git a/sound/pci/emu10k1/emufx.c b/sound/pci/emu10k1/emufx.c index 90713741c2dc..6ebe817801ea 100644 --- a/sound/pci/emu10k1/emufx.c +++ b/sound/pci/emu10k1/emufx.c @@ -2540,7 +2540,7 @@ static int snd_emu10k1_fx8010_ioctl(struct snd_hwdep * hw, struct file *file, un emu->support_tlv = 1; return put_user(SNDRV_EMU10K1_VERSION, (int __user *)argp); case SNDRV_EMU10K1_IOCTL_INFO: - info = kmalloc(sizeof(*info), GFP_KERNEL); + info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; snd_emu10k1_fx8010_info(emu, info); diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 1b2ce304152a..aa4c672dbaf7 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -365,8 +365,10 @@ enum { */ #ifdef SUPPORT_VGA_SWITCHEROO #define use_vga_switcheroo(chip) ((chip)->use_vga_switcheroo) +#define needs_eld_notify_link(chip) ((chip)->need_eld_notify_link) #else #define use_vga_switcheroo(chip) 0 +#define needs_eld_notify_link(chip) false #endif #define CONTROLLER_IN_GPU(pci) (((pci)->device == 0x0a0c) || \ @@ -453,6 +455,7 @@ static inline void mark_runtime_wc(struct azx *chip, struct azx_dev *azx_dev, #endif static int azx_acquire_irq(struct azx *chip, int do_disconnect); +static void set_default_power_save(struct azx *chip); /* * initialize the PCI registers @@ -1201,6 +1204,10 @@ static int azx_runtime_idle(struct device *dev) azx_bus(chip)->codec_powered || !chip->running) return -EBUSY; + /* ELD notification gets broken when HD-audio bus is off */ + if (needs_eld_notify_link(hda)) + return -EBUSY; + return 0; } @@ -1298,6 +1305,36 @@ static bool azx_vs_can_switch(struct pci_dev *pci) return true; } +/* + * The discrete GPU cannot power down unless the HDA controller runtime + * suspends, so activate runtime PM on codecs even if power_save == 0. + */ +static void setup_vga_switcheroo_runtime_pm(struct azx *chip) +{ + struct hda_intel *hda = container_of(chip, struct hda_intel, chip); + struct hda_codec *codec; + + if (hda->use_vga_switcheroo && !hda->need_eld_notify_link) { + list_for_each_codec(codec, &chip->bus) + codec->auto_runtime_pm = 1; + /* reset the power save setup */ + if (chip->running) + set_default_power_save(chip); + } +} + +static void azx_vs_gpu_bound(struct pci_dev *pci, + enum vga_switcheroo_client_id client_id) +{ + struct snd_card *card = pci_get_drvdata(pci); + struct azx *chip = card->private_data; + struct hda_intel *hda = container_of(chip, struct hda_intel, chip); + + if (client_id == VGA_SWITCHEROO_DIS) + hda->need_eld_notify_link = 0; + setup_vga_switcheroo_runtime_pm(chip); +} + static void init_vga_switcheroo(struct azx *chip) { struct hda_intel *hda = container_of(chip, struct hda_intel, chip); @@ -1306,6 +1343,7 @@ static void init_vga_switcheroo(struct azx *chip) dev_info(chip->card->dev, "Handle vga_switcheroo audio client\n"); hda->use_vga_switcheroo = 1; + hda->need_eld_notify_link = 1; /* cleared in gpu_bound op */ chip->driver_caps |= AZX_DCAPS_PM_RUNTIME; pci_dev_put(p); } @@ -1314,6 +1352,7 @@ static void init_vga_switcheroo(struct azx *chip) static const struct vga_switcheroo_client_ops azx_vs_ops = { .set_gpu_state = azx_vs_set_state, .can_switch = azx_vs_can_switch, + .gpu_bound = azx_vs_gpu_bound, }; static int register_vga_switcheroo(struct azx *chip) @@ -1339,6 +1378,7 @@ static int register_vga_switcheroo(struct azx *chip) #define init_vga_switcheroo(chip) /* NOP */ #define register_vga_switcheroo(chip) 0 #define check_hdmi_disabled(pci) false +#define setup_vga_switcheroo_runtime_pm(chip) /* NOP */ #endif /* SUPPORT_VGA_SWITCHER */ /* @@ -1352,6 +1392,7 @@ static int azx_free(struct azx *chip) if (azx_has_pm_runtime(chip) && chip->running) pm_runtime_get_noresume(&pci->dev); + chip->running = 0; azx_del_card_list(chip); @@ -2230,6 +2271,25 @@ static struct snd_pci_quirk power_save_blacklist[] = { }; #endif /* CONFIG_PM */ +static void set_default_power_save(struct azx *chip) +{ + int val = power_save; + +#ifdef CONFIG_PM + if (pm_blacklist) { + const struct snd_pci_quirk *q; + + q = snd_pci_quirk_lookup(chip->pci, power_save_blacklist); + if (q && val) { + dev_info(chip->card->dev, "device %04x:%04x is on the power_save blacklist, forcing power_save to 0\n", + q->subvendor, q->subdevice); + val = 0; + } + } +#endif /* CONFIG_PM */ + snd_hda_set_power_save(&chip->bus, val * 1000); +} + /* number of codec slots for each chipset: 0 = default slots (i.e. 4) */ static unsigned int azx_max_codecs[AZX_NUM_DRIVERS] = { [AZX_DRIVER_NVIDIA] = 8, @@ -2241,9 +2301,7 @@ static int azx_probe_continue(struct azx *chip) struct hda_intel *hda = container_of(chip, struct hda_intel, chip); struct hdac_bus *bus = azx_bus(chip); struct pci_dev *pci = chip->pci; - struct hda_codec *codec; int dev = chip->dev_index; - int val; int err; hda->probe_continued = 1; @@ -2322,31 +2380,13 @@ static int azx_probe_continue(struct azx *chip) if (err < 0) goto out_free; + setup_vga_switcheroo_runtime_pm(chip); + chip->running = 1; azx_add_card_list(chip); - val = power_save; -#ifdef CONFIG_PM - if (pm_blacklist) { - const struct snd_pci_quirk *q; - - q = snd_pci_quirk_lookup(chip->pci, power_save_blacklist); - if (q && val) { - dev_info(chip->card->dev, "device %04x:%04x is on the power_save blacklist, forcing power_save to 0\n", - q->subvendor, q->subdevice); - val = 0; - } - } -#endif /* CONFIG_PM */ - /* - * The discrete GPU cannot power down unless the HDA controller runtime - * suspends, so activate runtime PM on codecs even if power_save == 0. - */ - if (use_vga_switcheroo(hda)) - list_for_each_codec(codec, &chip->bus) - codec->auto_runtime_pm = 1; + set_default_power_save(chip); - snd_hda_set_power_save(&chip->bus, val * 1000); if (azx_has_pm_runtime(chip)) pm_runtime_put_autosuspend(&pci->dev); diff --git a/sound/pci/hda/hda_intel.h b/sound/pci/hda/hda_intel.h index e3a3d318d2e5..f59719e06b91 100644 --- a/sound/pci/hda/hda_intel.h +++ b/sound/pci/hda/hda_intel.h @@ -37,6 +37,7 @@ struct hda_intel { /* vga_switcheroo setup */ unsigned int use_vga_switcheroo:1; + unsigned int need_eld_notify_link:1; unsigned int vga_switcheroo_registered:1; unsigned int init_failed:1; /* delayed init failed */ diff --git a/sound/soc/amd/acp-pcm-dma.c b/sound/soc/amd/acp-pcm-dma.c index e359938e3d7e..77b265bd0505 100644 --- a/sound/soc/amd/acp-pcm-dma.c +++ b/sound/soc/amd/acp-pcm-dma.c @@ -16,6 +16,7 @@ #include <linux/module.h> #include <linux/delay.h> #include <linux/io.h> +#include <linux/iopoll.h> #include <linux/sizes.h> #include <linux/pm_runtime.h> @@ -184,6 +185,24 @@ static void config_dma_descriptor_in_sram(void __iomem *acp_mmio, acp_reg_write(descr_info->xfer_val, acp_mmio, mmACP_SRBM_Targ_Idx_Data); } +static void pre_config_reset(void __iomem *acp_mmio, u16 ch_num) +{ + u32 dma_ctrl; + int ret; + + /* clear the reset bit */ + dma_ctrl = acp_reg_read(acp_mmio, mmACP_DMA_CNTL_0 + ch_num); + dma_ctrl &= ~ACP_DMA_CNTL_0__DMAChRst_MASK; + acp_reg_write(dma_ctrl, acp_mmio, mmACP_DMA_CNTL_0 + ch_num); + /* check the reset bit before programming configuration registers */ + ret = readl_poll_timeout(acp_mmio + ((mmACP_DMA_CNTL_0 + ch_num) * 4), + dma_ctrl, + !(dma_ctrl & ACP_DMA_CNTL_0__DMAChRst_MASK), + 100, ACP_DMA_RESET_TIME); + if (ret < 0) + pr_err("Failed to clear reset of channel : %d\n", ch_num); +} + /* * Initialize the DMA descriptor information for transfer between * system memory <-> ACP SRAM @@ -236,6 +255,7 @@ static void set_acp_sysmem_dma_descriptors(void __iomem *acp_mmio, config_dma_descriptor_in_sram(acp_mmio, dma_dscr_idx, &dmadscr[i]); } + pre_config_reset(acp_mmio, ch); config_acp_dma_channel(acp_mmio, ch, dma_dscr_idx - 1, NUM_DSCRS_PER_CHANNEL, @@ -275,6 +295,7 @@ static void set_acp_to_i2s_dma_descriptors(void __iomem *acp_mmio, u32 size, config_dma_descriptor_in_sram(acp_mmio, dma_dscr_idx, &dmadscr[i]); } + pre_config_reset(acp_mmio, ch); /* Configure the DMA channel with the above descriptore */ config_acp_dma_channel(acp_mmio, ch, dma_dscr_idx - 1, NUM_DSCRS_PER_CHANNEL, diff --git a/sound/soc/codecs/cs4265.c b/sound/soc/codecs/cs4265.c index 275677de669f..407554175282 100644 --- a/sound/soc/codecs/cs4265.c +++ b/sound/soc/codecs/cs4265.c @@ -157,8 +157,8 @@ static const struct snd_kcontrol_new cs4265_snd_controls[] = { SOC_SINGLE("Validity Bit Control Switch", CS4265_SPDIF_CTL2, 3, 1, 0), SOC_ENUM("SPDIF Mono/Stereo", spdif_mono_stereo_enum), - SOC_SINGLE("MMTLR Data Switch", 0, - 1, 1, 0), + SOC_SINGLE("MMTLR Data Switch", CS4265_SPDIF_CTL2, + 0, 1, 0), SOC_ENUM("Mono Channel Select", spdif_mono_select_enum), SND_SOC_BYTES("C Data Buffer", CS4265_C_DATA_BUFF, 24), }; diff --git a/sound/soc/codecs/max98373.c b/sound/soc/codecs/max98373.c index 92b7125ea169..1093f766d0d2 100644 --- a/sound/soc/codecs/max98373.c +++ b/sound/soc/codecs/max98373.c @@ -520,6 +520,7 @@ static bool max98373_volatile_reg(struct device *dev, unsigned int reg) { switch (reg) { case MAX98373_R2000_SW_RESET ... MAX98373_R2009_INT_FLAG3: + case MAX98373_R203E_AMP_PATH_GAIN: case MAX98373_R2054_MEAS_ADC_PVDD_CH_READBACK: case MAX98373_R2055_MEAS_ADC_THERM_CH_READBACK: case MAX98373_R20B6_BDE_CUR_STATE_READBACK: @@ -729,6 +730,7 @@ static int max98373_probe(struct snd_soc_component *component) /* Software Reset */ regmap_write(max98373->regmap, MAX98373_R2000_SW_RESET, MAX98373_SOFT_RESET); + usleep_range(10000, 11000); /* IV default slot configuration */ regmap_write(max98373->regmap, @@ -817,6 +819,7 @@ static int max98373_resume(struct device *dev) regmap_write(max98373->regmap, MAX98373_R2000_SW_RESET, MAX98373_SOFT_RESET); + usleep_range(10000, 11000); regcache_cache_only(max98373->regmap, false); regcache_sync(max98373->regmap); return 0; diff --git a/sound/soc/codecs/rt5514.c b/sound/soc/codecs/rt5514.c index dca82dd6e3bf..32fe76c3134a 100644 --- a/sound/soc/codecs/rt5514.c +++ b/sound/soc/codecs/rt5514.c @@ -64,8 +64,8 @@ static const struct reg_sequence rt5514_patch[] = { {RT5514_ANA_CTRL_LDO10, 0x00028604}, {RT5514_ANA_CTRL_ADCFED, 0x00000800}, {RT5514_ASRC_IN_CTRL1, 0x00000003}, - {RT5514_DOWNFILTER0_CTRL3, 0x10000352}, - {RT5514_DOWNFILTER1_CTRL3, 0x10000352}, + {RT5514_DOWNFILTER0_CTRL3, 0x10000342}, + {RT5514_DOWNFILTER1_CTRL3, 0x10000342}, }; static const struct reg_default rt5514_reg[] = { @@ -92,10 +92,10 @@ static const struct reg_default rt5514_reg[] = { {RT5514_ASRC_IN_CTRL1, 0x00000003}, {RT5514_DOWNFILTER0_CTRL1, 0x00020c2f}, {RT5514_DOWNFILTER0_CTRL2, 0x00020c2f}, - {RT5514_DOWNFILTER0_CTRL3, 0x10000352}, + {RT5514_DOWNFILTER0_CTRL3, 0x10000342}, {RT5514_DOWNFILTER1_CTRL1, 0x00020c2f}, {RT5514_DOWNFILTER1_CTRL2, 0x00020c2f}, - {RT5514_DOWNFILTER1_CTRL3, 0x10000352}, + {RT5514_DOWNFILTER1_CTRL3, 0x10000342}, {RT5514_ANA_CTRL_LDO10, 0x00028604}, {RT5514_ANA_CTRL_LDO18_16, 0x02000345}, {RT5514_ANA_CTRL_ADC12, 0x0000a2a8}, diff --git a/sound/soc/codecs/rt5682.c b/sound/soc/codecs/rt5682.c index 640d400ca013..afe7d5b19313 100644 --- a/sound/soc/codecs/rt5682.c +++ b/sound/soc/codecs/rt5682.c @@ -750,8 +750,8 @@ static bool rt5682_readable_register(struct device *dev, unsigned int reg) } static const DECLARE_TLV_DB_SCALE(hp_vol_tlv, -2250, 150, 0); -static const DECLARE_TLV_DB_SCALE(dac_vol_tlv, -65625, 375, 0); -static const DECLARE_TLV_DB_SCALE(adc_vol_tlv, -17625, 375, 0); +static const DECLARE_TLV_DB_SCALE(dac_vol_tlv, -6525, 75, 0); +static const DECLARE_TLV_DB_SCALE(adc_vol_tlv, -1725, 75, 0); static const DECLARE_TLV_DB_SCALE(adc_bst_tlv, 0, 1200, 0); /* {0, +20, +24, +30, +35, +40, +44, +50, +52} dB */ @@ -1114,7 +1114,7 @@ static const struct snd_kcontrol_new rt5682_snd_controls[] = { /* DAC Digital Volume */ SOC_DOUBLE_TLV("DAC1 Playback Volume", RT5682_DAC1_DIG_VOL, - RT5682_L_VOL_SFT, RT5682_R_VOL_SFT, 175, 0, dac_vol_tlv), + RT5682_L_VOL_SFT + 1, RT5682_R_VOL_SFT + 1, 86, 0, dac_vol_tlv), /* IN Boost Volume */ SOC_SINGLE_TLV("CBJ Boost Volume", RT5682_CBJ_BST_CTRL, @@ -1124,7 +1124,7 @@ static const struct snd_kcontrol_new rt5682_snd_controls[] = { SOC_DOUBLE("STO1 ADC Capture Switch", RT5682_STO1_ADC_DIG_VOL, RT5682_L_MUTE_SFT, RT5682_R_MUTE_SFT, 1, 1), SOC_DOUBLE_TLV("STO1 ADC Capture Volume", RT5682_STO1_ADC_DIG_VOL, - RT5682_L_VOL_SFT, RT5682_R_VOL_SFT, 127, 0, adc_vol_tlv), + RT5682_L_VOL_SFT + 1, RT5682_R_VOL_SFT + 1, 63, 0, adc_vol_tlv), /* ADC Boost Volume Control */ SOC_DOUBLE_TLV("STO1 ADC Boost Gain Volume", RT5682_STO1_ADC_BOOST, diff --git a/sound/soc/codecs/sigmadsp.c b/sound/soc/codecs/sigmadsp.c index d53680ac78e4..6df158669420 100644 --- a/sound/soc/codecs/sigmadsp.c +++ b/sound/soc/codecs/sigmadsp.c @@ -117,8 +117,7 @@ static int sigmadsp_ctrl_write(struct sigmadsp *sigmadsp, struct sigmadsp_control *ctrl, void *data) { /* safeload loads up to 20 bytes in a atomic operation */ - if (ctrl->num_bytes > 4 && ctrl->num_bytes <= 20 && sigmadsp->ops && - sigmadsp->ops->safeload) + if (ctrl->num_bytes <= 20 && sigmadsp->ops && sigmadsp->ops->safeload) return sigmadsp->ops->safeload(sigmadsp, ctrl->addr, data, ctrl->num_bytes); else diff --git a/sound/soc/codecs/tas6424.c b/sound/soc/codecs/tas6424.c index 14999b999fd3..0d6145549a98 100644 --- a/sound/soc/codecs/tas6424.c +++ b/sound/soc/codecs/tas6424.c @@ -424,8 +424,10 @@ static void tas6424_fault_check_work(struct work_struct *work) TAS6424_FAULT_PVDD_UV | TAS6424_FAULT_VBAT_UV; - if (reg) + if (!reg) { + tas6424->last_fault1 = reg; goto check_global_fault2_reg; + } /* * Only flag errors once for a given occurrence. This is needed as @@ -461,8 +463,10 @@ check_global_fault2_reg: TAS6424_FAULT_OTSD_CH3 | TAS6424_FAULT_OTSD_CH4; - if (!reg) + if (!reg) { + tas6424->last_fault2 = reg; goto check_warn_reg; + } if ((reg & TAS6424_FAULT_OTSD) && !(tas6424->last_fault2 & TAS6424_FAULT_OTSD)) dev_crit(dev, "experienced a global overtemp shutdown\n"); @@ -497,8 +501,10 @@ check_warn_reg: TAS6424_WARN_VDD_OTW_CH3 | TAS6424_WARN_VDD_OTW_CH4; - if (!reg) + if (!reg) { + tas6424->last_warn = reg; goto out; + } if ((reg & TAS6424_WARN_VDD_UV) && !(tas6424->last_warn & TAS6424_WARN_VDD_UV)) dev_warn(dev, "experienced a VDD under voltage condition\n"); diff --git a/sound/soc/codecs/wm8804-i2c.c b/sound/soc/codecs/wm8804-i2c.c index f27464c2c5ba..79541960f45d 100644 --- a/sound/soc/codecs/wm8804-i2c.c +++ b/sound/soc/codecs/wm8804-i2c.c @@ -13,6 +13,7 @@ #include <linux/init.h> #include <linux/module.h> #include <linux/i2c.h> +#include <linux/acpi.h> #include "wm8804.h" @@ -40,17 +41,29 @@ static const struct i2c_device_id wm8804_i2c_id[] = { }; MODULE_DEVICE_TABLE(i2c, wm8804_i2c_id); +#if defined(CONFIG_OF) static const struct of_device_id wm8804_of_match[] = { { .compatible = "wlf,wm8804", }, { } }; MODULE_DEVICE_TABLE(of, wm8804_of_match); +#endif + +#ifdef CONFIG_ACPI +static const struct acpi_device_id wm8804_acpi_match[] = { + { "1AEC8804", 0 }, /* Wolfson PCI ID + part ID */ + { "10138804", 0 }, /* Cirrus Logic PCI ID + part ID */ + { }, +}; +MODULE_DEVICE_TABLE(acpi, wm8804_acpi_match); +#endif static struct i2c_driver wm8804_i2c_driver = { .driver = { .name = "wm8804", .pm = &wm8804_pm, - .of_match_table = wm8804_of_match, + .of_match_table = of_match_ptr(wm8804_of_match), + .acpi_match_table = ACPI_PTR(wm8804_acpi_match), }, .probe = wm8804_i2c_probe, .remove = wm8804_i2c_remove, diff --git a/sound/soc/codecs/wm9712.c b/sound/soc/codecs/wm9712.c index 953d94d50586..ade34c26ad2f 100644 --- a/sound/soc/codecs/wm9712.c +++ b/sound/soc/codecs/wm9712.c @@ -719,7 +719,7 @@ static int wm9712_probe(struct platform_device *pdev) static struct platform_driver wm9712_component_driver = { .driver = { - .name = "wm9712-component", + .name = "wm9712-codec", }, .probe = wm9712_probe, diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c index d32844f94d74..b6dc524830b2 100644 --- a/sound/soc/intel/boards/bytcr_rt5640.c +++ b/sound/soc/intel/boards/bytcr_rt5640.c @@ -575,6 +575,17 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = { BYT_RT5640_MONO_SPEAKER | BYT_RT5640_MCLK_EN), }, + { /* Linx Linx7 tablet */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LINX"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "LINX7"), + }, + .driver_data = (void *)(BYTCR_INPUT_DEFAULTS | + BYT_RT5640_MONO_SPEAKER | + BYT_RT5640_JD_NOT_INV | + BYT_RT5640_SSP0_AIF1 | + BYT_RT5640_MCLK_EN), + }, { /* MSI S100 tablet */ .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Micro-Star International Co., Ltd."), @@ -602,6 +613,21 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = { BYT_RT5640_SSP0_AIF1 | BYT_RT5640_MCLK_EN), }, + { /* Onda v975w */ + .matches = { + DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "Aptio CRB"), + /* The above are too generic, also match BIOS info */ + DMI_EXACT_MATCH(DMI_BIOS_VERSION, "5.6.5"), + DMI_EXACT_MATCH(DMI_BIOS_DATE, "07/25/2014"), + }, + .driver_data = (void *)(BYT_RT5640_IN1_MAP | + BYT_RT5640_JD_SRC_JD2_IN4N | + BYT_RT5640_OVCD_TH_2000UA | + BYT_RT5640_OVCD_SF_0P75 | + BYT_RT5640_DIFF_MIC | + BYT_RT5640_MCLK_EN), + }, { /* Pipo W4 */ .matches = { DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"), diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c index dce649485649..1d17be0f78a0 100644 --- a/sound/soc/intel/skylake/skl.c +++ b/sound/soc/intel/skylake/skl.c @@ -834,7 +834,7 @@ static int skl_first_init(struct hdac_bus *bus) return -ENXIO; } - skl_init_chip(bus, true); + snd_hdac_bus_reset_link(bus, true); snd_hdac_bus_parse_capabilities(bus); diff --git a/sound/soc/qcom/qdsp6/q6routing.c b/sound/soc/qcom/qdsp6/q6routing.c index dc94c5c53788..c6b51571be94 100644 --- a/sound/soc/qcom/qdsp6/q6routing.c +++ b/sound/soc/qcom/qdsp6/q6routing.c @@ -960,8 +960,10 @@ static int msm_routing_probe(struct snd_soc_component *c) { int i; - for (i = 0; i < MAX_SESSIONS; i++) + for (i = 0; i < MAX_SESSIONS; i++) { routing_data->sessions[i].port_id = -1; + routing_data->sessions[i].fedai_id = -1; + } return 0; } diff --git a/sound/soc/sh/rcar/adg.c b/sound/soc/sh/rcar/adg.c index 3a3064dda57f..051f96405346 100644 --- a/sound/soc/sh/rcar/adg.c +++ b/sound/soc/sh/rcar/adg.c @@ -462,6 +462,11 @@ static void rsnd_adg_get_clkout(struct rsnd_priv *priv, goto rsnd_adg_get_clkout_end; req_size = prop->length / sizeof(u32); + if (req_size > REQ_SIZE) { + dev_err(dev, + "too many clock-frequency, use top %d\n", REQ_SIZE); + req_size = REQ_SIZE; + } of_property_read_u32_array(np, "clock-frequency", req_rate, req_size); req_48kHz_rate = 0; diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c index f8425d8b44d2..d23c2bbff0cf 100644 --- a/sound/soc/sh/rcar/core.c +++ b/sound/soc/sh/rcar/core.c @@ -478,7 +478,7 @@ static int rsnd_status_update(u32 *status, (func_call && (mod)->ops->fn) ? #fn : ""); \ if (func_call && (mod)->ops->fn) \ tmp = (mod)->ops->fn(mod, io, param); \ - if (tmp) \ + if (tmp && (tmp != -EPROBE_DEFER)) \ dev_err(dev, "%s[%d] : %s error %d\n", \ rsnd_mod_name(mod), rsnd_mod_id(mod), \ #fn, tmp); \ @@ -958,12 +958,23 @@ static void rsnd_soc_dai_shutdown(struct snd_pcm_substream *substream, rsnd_dai_stream_quit(io); } +static int rsnd_soc_dai_prepare(struct snd_pcm_substream *substream, + struct snd_soc_dai *dai) +{ + struct rsnd_priv *priv = rsnd_dai_to_priv(dai); + struct rsnd_dai *rdai = rsnd_dai_to_rdai(dai); + struct rsnd_dai_stream *io = rsnd_rdai_to_io(rdai, substream); + + return rsnd_dai_call(prepare, io, priv); +} + static const struct snd_soc_dai_ops rsnd_soc_dai_ops = { .startup = rsnd_soc_dai_startup, .shutdown = rsnd_soc_dai_shutdown, .trigger = rsnd_soc_dai_trigger, .set_fmt = rsnd_soc_dai_set_fmt, .set_tdm_slot = rsnd_soc_set_dai_tdm_slot, + .prepare = rsnd_soc_dai_prepare, }; void rsnd_parse_connect_common(struct rsnd_dai *rdai, @@ -1550,6 +1561,14 @@ exit_snd_probe: rsnd_dai_call(remove, &rdai->capture, priv); } + /* + * adg is very special mod which can't use rsnd_dai_call(remove), + * and it registers ADG clock on probe. + * It should be unregister if probe failed. + * Mainly it is assuming -EPROBE_DEFER case + */ + rsnd_adg_remove(priv); + return ret; } diff --git a/sound/soc/sh/rcar/dma.c b/sound/soc/sh/rcar/dma.c index fe63ef8600d0..d65ea7bc4dac 100644 --- a/sound/soc/sh/rcar/dma.c +++ b/sound/soc/sh/rcar/dma.c @@ -241,6 +241,10 @@ static int rsnd_dmaen_attach(struct rsnd_dai_stream *io, /* try to get DMAEngine channel */ chan = rsnd_dmaen_request_channel(io, mod_from, mod_to); if (IS_ERR_OR_NULL(chan)) { + /* Let's follow when -EPROBE_DEFER case */ + if (PTR_ERR(chan) == -EPROBE_DEFER) + return PTR_ERR(chan); + /* * DMA failed. try to PIO mode * see diff --git a/sound/soc/sh/rcar/rsnd.h b/sound/soc/sh/rcar/rsnd.h index 96d93330b1e1..8f7a0abfa751 100644 --- a/sound/soc/sh/rcar/rsnd.h +++ b/sound/soc/sh/rcar/rsnd.h @@ -280,6 +280,9 @@ struct rsnd_mod_ops { int (*nolock_stop)(struct rsnd_mod *mod, struct rsnd_dai_stream *io, struct rsnd_priv *priv); + int (*prepare)(struct rsnd_mod *mod, + struct rsnd_dai_stream *io, + struct rsnd_priv *priv); }; struct rsnd_dai_stream; @@ -309,6 +312,7 @@ struct rsnd_mod { * H 0: fallback * H 0: hw_params * H 0: pointer + * H 0: prepare */ #define __rsnd_mod_shift_nolock_start 0 #define __rsnd_mod_shift_nolock_stop 0 @@ -323,6 +327,7 @@ struct rsnd_mod { #define __rsnd_mod_shift_fallback 28 /* always called */ #define __rsnd_mod_shift_hw_params 28 /* always called */ #define __rsnd_mod_shift_pointer 28 /* always called */ +#define __rsnd_mod_shift_prepare 28 /* always called */ #define __rsnd_mod_add_probe 0 #define __rsnd_mod_add_remove 0 @@ -337,6 +342,7 @@ struct rsnd_mod { #define __rsnd_mod_add_fallback 0 #define __rsnd_mod_add_hw_params 0 #define __rsnd_mod_add_pointer 0 +#define __rsnd_mod_add_prepare 0 #define __rsnd_mod_call_probe 0 #define __rsnd_mod_call_remove 0 @@ -351,6 +357,7 @@ struct rsnd_mod { #define __rsnd_mod_call_pointer 0 #define __rsnd_mod_call_nolock_start 0 #define __rsnd_mod_call_nolock_stop 1 +#define __rsnd_mod_call_prepare 0 #define rsnd_mod_to_priv(mod) ((mod)->priv) #define rsnd_mod_name(mod) ((mod)->ops->name) diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c index 8304e4ec9242..3f880ec66459 100644 --- a/sound/soc/sh/rcar/ssi.c +++ b/sound/soc/sh/rcar/ssi.c @@ -283,7 +283,7 @@ static int rsnd_ssi_master_clk_start(struct rsnd_mod *mod, if (rsnd_ssi_is_multi_slave(mod, io)) return 0; - if (ssi->usrcnt > 1) { + if (ssi->rate) { if (ssi->rate != rate) { dev_err(dev, "SSI parent/child should use same rate\n"); return -EINVAL; @@ -434,7 +434,6 @@ static int rsnd_ssi_init(struct rsnd_mod *mod, struct rsnd_priv *priv) { struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod); - int ret; if (!rsnd_ssi_is_run_mods(mod, io)) return 0; @@ -443,10 +442,6 @@ static int rsnd_ssi_init(struct rsnd_mod *mod, rsnd_mod_power_on(mod); - ret = rsnd_ssi_master_clk_start(mod, io); - if (ret < 0) - return ret; - rsnd_ssi_config_init(mod, io); rsnd_ssi_register_setup(mod); @@ -852,6 +847,13 @@ static int rsnd_ssi_pio_pointer(struct rsnd_mod *mod, return 0; } +static int rsnd_ssi_prepare(struct rsnd_mod *mod, + struct rsnd_dai_stream *io, + struct rsnd_priv *priv) +{ + return rsnd_ssi_master_clk_start(mod, io); +} + static struct rsnd_mod_ops rsnd_ssi_pio_ops = { .name = SSI_NAME, .probe = rsnd_ssi_common_probe, @@ -864,6 +866,7 @@ static struct rsnd_mod_ops rsnd_ssi_pio_ops = { .pointer = rsnd_ssi_pio_pointer, .pcm_new = rsnd_ssi_pcm_new, .hw_params = rsnd_ssi_hw_params, + .prepare = rsnd_ssi_prepare, }; static int rsnd_ssi_dma_probe(struct rsnd_mod *mod, @@ -940,6 +943,7 @@ static struct rsnd_mod_ops rsnd_ssi_dma_ops = { .pcm_new = rsnd_ssi_pcm_new, .fallback = rsnd_ssi_fallback, .hw_params = rsnd_ssi_hw_params, + .prepare = rsnd_ssi_prepare, }; int rsnd_ssi_is_dma_mode(struct rsnd_mod *mod) diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index 9cfe10d8040c..473eefe8658e 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c @@ -1447,7 +1447,7 @@ static int soc_link_dai_widgets(struct snd_soc_card *card, sink = codec_dai->playback_widget; source = cpu_dai->capture_widget; if (sink && source) { - ret = snd_soc_dapm_new_pcm(card, dai_link->params, + ret = snd_soc_dapm_new_pcm(card, rtd, dai_link->params, dai_link->num_params, source, sink); if (ret != 0) { @@ -1460,7 +1460,7 @@ static int soc_link_dai_widgets(struct snd_soc_card *card, sink = cpu_dai->playback_widget; source = codec_dai->capture_widget; if (sink && source) { - ret = snd_soc_dapm_new_pcm(card, dai_link->params, + ret = snd_soc_dapm_new_pcm(card, rtd, dai_link->params, dai_link->num_params, source, sink); if (ret != 0) { diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c index 7e96793050c9..461d951917c0 100644 --- a/sound/soc/soc-dapm.c +++ b/sound/soc/soc-dapm.c @@ -3652,6 +3652,7 @@ static int snd_soc_dai_link_event(struct snd_soc_dapm_widget *w, { struct snd_soc_dapm_path *source_p, *sink_p; struct snd_soc_dai *source, *sink; + struct snd_soc_pcm_runtime *rtd = w->priv; const struct snd_soc_pcm_stream *config = w->params + w->params_select; struct snd_pcm_substream substream; struct snd_pcm_hw_params *params = NULL; @@ -3711,6 +3712,7 @@ static int snd_soc_dai_link_event(struct snd_soc_dapm_widget *w, goto out; } substream.runtime = runtime; + substream.private_data = rtd; switch (event) { case SND_SOC_DAPM_PRE_PMU: @@ -3895,6 +3897,7 @@ outfree_w_param: } int snd_soc_dapm_new_pcm(struct snd_soc_card *card, + struct snd_soc_pcm_runtime *rtd, const struct snd_soc_pcm_stream *params, unsigned int num_params, struct snd_soc_dapm_widget *source, @@ -3963,6 +3966,7 @@ int snd_soc_dapm_new_pcm(struct snd_soc_card *card, w->params = params; w->num_params = num_params; + w->priv = rtd; ret = snd_soc_dapm_add_path(&card->dapm, source, w, NULL, NULL); if (ret) diff --git a/tools/include/tools/libc_compat.h b/tools/include/tools/libc_compat.h index 664ced8cb1b0..e907ba6f15e5 100644 --- a/tools/include/tools/libc_compat.h +++ b/tools/include/tools/libc_compat.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0+ +// SPDX-License-Identifier: (LGPL-2.0+ OR BSD-2-Clause) /* Copyright (C) 2018 Netronome Systems, Inc. */ #ifndef __TOOLS_LIBC_COMPAT_H diff --git a/tools/lib/bpf/Build b/tools/lib/bpf/Build index 13a861135127..6eb9bacd1948 100644 --- a/tools/lib/bpf/Build +++ b/tools/lib/bpf/Build @@ -1 +1 @@ -libbpf-y := libbpf.o bpf.o nlattr.o btf.o libbpf_errno.o +libbpf-y := libbpf.o bpf.o nlattr.o btf.o libbpf_errno.o str_error.o diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 2abd0f112627..bdb94939fd60 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -50,6 +50,7 @@ #include "libbpf.h" #include "bpf.h" #include "btf.h" +#include "str_error.h" #ifndef EM_BPF #define EM_BPF 247 @@ -469,7 +470,7 @@ static int bpf_object__elf_init(struct bpf_object *obj) obj->efile.fd = open(obj->path, O_RDONLY); if (obj->efile.fd < 0) { char errmsg[STRERR_BUFSIZE]; - char *cp = strerror_r(errno, errmsg, sizeof(errmsg)); + char *cp = str_error(errno, errmsg, sizeof(errmsg)); pr_warning("failed to open %s: %s\n", obj->path, cp); return -errno; @@ -810,8 +811,7 @@ static int bpf_object__elf_collect(struct bpf_object *obj) data->d_size, name, idx); if (err) { char errmsg[STRERR_BUFSIZE]; - char *cp = strerror_r(-err, errmsg, - sizeof(errmsg)); + char *cp = str_error(-err, errmsg, sizeof(errmsg)); pr_warning("failed to alloc program %s (%s): %s", name, obj->path, cp); @@ -1140,7 +1140,7 @@ bpf_object__create_maps(struct bpf_object *obj) *pfd = bpf_create_map_xattr(&create_attr); if (*pfd < 0 && create_attr.btf_key_type_id) { - cp = strerror_r(errno, errmsg, sizeof(errmsg)); + cp = str_error(errno, errmsg, sizeof(errmsg)); pr_warning("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n", map->name, cp, errno); create_attr.btf_fd = 0; @@ -1155,7 +1155,7 @@ bpf_object__create_maps(struct bpf_object *obj) size_t j; err = *pfd; - cp = strerror_r(errno, errmsg, sizeof(errmsg)); + cp = str_error(errno, errmsg, sizeof(errmsg)); pr_warning("failed to create map (name: '%s'): %s\n", map->name, cp); for (j = 0; j < i; j++) @@ -1339,7 +1339,7 @@ load_program(enum bpf_prog_type type, enum bpf_attach_type expected_attach_type, } ret = -LIBBPF_ERRNO__LOAD; - cp = strerror_r(errno, errmsg, sizeof(errmsg)); + cp = str_error(errno, errmsg, sizeof(errmsg)); pr_warning("load bpf program failed: %s\n", cp); if (log_buf && log_buf[0] != '\0') { @@ -1654,7 +1654,7 @@ static int check_path(const char *path) dir = dirname(dname); if (statfs(dir, &st_fs)) { - cp = strerror_r(errno, errmsg, sizeof(errmsg)); + cp = str_error(errno, errmsg, sizeof(errmsg)); pr_warning("failed to statfs %s: %s\n", dir, cp); err = -errno; } @@ -1690,7 +1690,7 @@ int bpf_program__pin_instance(struct bpf_program *prog, const char *path, } if (bpf_obj_pin(prog->instances.fds[instance], path)) { - cp = strerror_r(errno, errmsg, sizeof(errmsg)); + cp = str_error(errno, errmsg, sizeof(errmsg)); pr_warning("failed to pin program: %s\n", cp); return -errno; } @@ -1708,7 +1708,7 @@ static int make_dir(const char *path) err = -errno; if (err) { - cp = strerror_r(-err, errmsg, sizeof(errmsg)); + cp = str_error(-err, errmsg, sizeof(errmsg)); pr_warning("failed to mkdir %s: %s\n", path, cp); } return err; @@ -1770,7 +1770,7 @@ int bpf_map__pin(struct bpf_map *map, const char *path) } if (bpf_obj_pin(map->fd, path)) { - cp = strerror_r(errno, errmsg, sizeof(errmsg)); + cp = str_error(errno, errmsg, sizeof(errmsg)); pr_warning("failed to pin map: %s\n", cp); return -errno; } diff --git a/tools/lib/bpf/str_error.c b/tools/lib/bpf/str_error.c new file mode 100644 index 000000000000..b8798114a357 --- /dev/null +++ b/tools/lib/bpf/str_error.c @@ -0,0 +1,18 @@ +// SPDX-License-Identifier: LGPL-2.1 +#undef _GNU_SOURCE +#include <string.h> +#include <stdio.h> +#include "str_error.h" + +/* + * Wrapper to allow for building in non-GNU systems such as Alpine Linux's musl + * libc, while checking strerror_r() return to avoid having to check this in + * all places calling it. + */ +char *str_error(int err, char *dst, int len) +{ + int ret = strerror_r(err, dst, len); + if (ret) + snprintf(dst, len, "ERROR: strerror_r(%d)=%d", err, ret); + return dst; +} diff --git a/tools/lib/bpf/str_error.h b/tools/lib/bpf/str_error.h new file mode 100644 index 000000000000..355b1db571d1 --- /dev/null +++ b/tools/lib/bpf/str_error.h @@ -0,0 +1,6 @@ +// SPDX-License-Identifier: LGPL-2.1 +#ifndef BPF_STR_ERROR +#define BPF_STR_ERROR + +char *str_error(int err, char *dst, int len); +#endif // BPF_STR_ERROR diff --git a/tools/perf/Documentation/Makefile b/tools/perf/Documentation/Makefile index 42261a9b280e..ac841bc5c35b 100644 --- a/tools/perf/Documentation/Makefile +++ b/tools/perf/Documentation/Makefile @@ -280,7 +280,7 @@ $(MAN_HTML): $(OUTPUT)%.html : %.txt mv $@+ $@ ifdef USE_ASCIIDOCTOR -$(OUTPUT)%.1 $(OUTPUT)%.5 $(OUTPUT)%.7 : $(OUTPUT)%.txt +$(OUTPUT)%.1 $(OUTPUT)%.5 $(OUTPUT)%.7 : %.txt $(QUIET_ASCIIDOC)$(RM) $@+ $@ && \ $(ASCIIDOC) -b manpage -d manpage \ $(ASCIIDOC_EXTRA) -aperf_version=$(PERF_VERSION) -o $@+ $< && \ diff --git a/tools/testing/selftests/android/Makefile b/tools/testing/selftests/android/Makefile index 72c25a3cb658..d9a725478375 100644 --- a/tools/testing/selftests/android/Makefile +++ b/tools/testing/selftests/android/Makefile @@ -6,7 +6,7 @@ TEST_PROGS := run.sh include ../lib.mk -all: +all: khdr @for DIR in $(SUBDIRS); do \ BUILD_TARGET=$(OUTPUT)/$$DIR; \ mkdir $$BUILD_TARGET -p; \ diff --git a/tools/testing/selftests/android/ion/config b/tools/testing/selftests/android/config index b4ad748a9dd9..b4ad748a9dd9 100644 --- a/tools/testing/selftests/android/ion/config +++ b/tools/testing/selftests/android/config diff --git a/tools/testing/selftests/android/ion/Makefile b/tools/testing/selftests/android/ion/Makefile index e03695287f76..88cfe88e466f 100644 --- a/tools/testing/selftests/android/ion/Makefile +++ b/tools/testing/selftests/android/ion/Makefile @@ -10,6 +10,8 @@ $(TEST_GEN_FILES): ipcsocket.c ionutils.c TEST_PROGS := ion_test.sh +KSFT_KHDR_INSTALL := 1 +top_srcdir = ../../../../.. include ../../lib.mk $(OUTPUT)/ionapp_export: ionapp_export.c ipcsocket.c ionutils.c diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c index 6f54f84144a0..9b552c0fc47d 100644 --- a/tools/testing/selftests/bpf/test_maps.c +++ b/tools/testing/selftests/bpf/test_maps.c @@ -580,7 +580,11 @@ static void test_sockmap(int tasks, void *data) /* Test update without programs */ for (i = 0; i < 6; i++) { err = bpf_map_update_elem(fd, &i, &sfd[i], BPF_ANY); - if (err) { + if (i < 2 && !err) { + printf("Allowed update sockmap '%i:%i' not in ESTABLISHED\n", + i, sfd[i]); + goto out_sockmap; + } else if (i >= 2 && err) { printf("Failed noprog update sockmap '%i:%i'\n", i, sfd[i]); goto out_sockmap; @@ -741,7 +745,7 @@ static void test_sockmap(int tasks, void *data) } /* Test map update elem afterwards fd lives in fd and map_fd */ - for (i = 0; i < 6; i++) { + for (i = 2; i < 6; i++) { err = bpf_map_update_elem(map_fd_rx, &i, &sfd[i], BPF_ANY); if (err) { printf("Failed map_fd_rx update sockmap %i '%i:%i'\n", @@ -845,7 +849,7 @@ static void test_sockmap(int tasks, void *data) } /* Delete the elems without programs */ - for (i = 0; i < 6; i++) { + for (i = 2; i < 6; i++) { err = bpf_map_delete_elem(fd, &i); if (err) { printf("Failed delete sockmap %i '%i:%i'\n", diff --git a/tools/testing/selftests/cgroup/.gitignore b/tools/testing/selftests/cgroup/.gitignore index 95eb3a53c381..adacda50a4b2 100644 --- a/tools/testing/selftests/cgroup/.gitignore +++ b/tools/testing/selftests/cgroup/.gitignore @@ -1 +1,2 @@ test_memcontrol +test_core diff --git a/tools/testing/selftests/cgroup/cgroup_util.c b/tools/testing/selftests/cgroup/cgroup_util.c index 1c5d2b2a583b..14c9fe284806 100644 --- a/tools/testing/selftests/cgroup/cgroup_util.c +++ b/tools/testing/selftests/cgroup/cgroup_util.c @@ -89,17 +89,28 @@ int cg_read(const char *cgroup, const char *control, char *buf, size_t len) int cg_read_strcmp(const char *cgroup, const char *control, const char *expected) { - size_t size = strlen(expected) + 1; + size_t size; char *buf; + int ret; + + /* Handle the case of comparing against empty string */ + if (!expected) + size = 32; + else + size = strlen(expected) + 1; buf = malloc(size); if (!buf) return -1; - if (cg_read(cgroup, control, buf, size)) + if (cg_read(cgroup, control, buf, size)) { + free(buf); return -1; + } - return strcmp(expected, buf); + ret = strcmp(expected, buf); + free(buf); + return ret; } int cg_read_strstr(const char *cgroup, const char *control, const char *needle) @@ -337,3 +348,24 @@ int is_swap_enabled(void) return cnt > 1; } + +int set_oom_adj_score(int pid, int score) +{ + char path[PATH_MAX]; + int fd, len; + + sprintf(path, "/proc/%d/oom_score_adj", pid); + + fd = open(path, O_WRONLY | O_APPEND); + if (fd < 0) + return fd; + + len = dprintf(fd, "%d", score); + if (len < 0) { + close(fd); + return len; + } + + close(fd); + return 0; +} diff --git a/tools/testing/selftests/cgroup/cgroup_util.h b/tools/testing/selftests/cgroup/cgroup_util.h index 1ff6f9f1abdc..9ac8b7958f83 100644 --- a/tools/testing/selftests/cgroup/cgroup_util.h +++ b/tools/testing/selftests/cgroup/cgroup_util.h @@ -40,3 +40,4 @@ extern int get_temp_fd(void); extern int alloc_pagecache(int fd, size_t size); extern int alloc_anon(const char *cgroup, void *arg); extern int is_swap_enabled(void); +extern int set_oom_adj_score(int pid, int score); diff --git a/tools/testing/selftests/cgroup/test_memcontrol.c b/tools/testing/selftests/cgroup/test_memcontrol.c index cf0bddc9d271..28d321ba311b 100644 --- a/tools/testing/selftests/cgroup/test_memcontrol.c +++ b/tools/testing/selftests/cgroup/test_memcontrol.c @@ -2,6 +2,7 @@ #define _GNU_SOURCE #include <linux/limits.h> +#include <linux/oom.h> #include <fcntl.h> #include <stdio.h> #include <stdlib.h> @@ -202,6 +203,36 @@ static int alloc_pagecache_50M_noexit(const char *cgroup, void *arg) return 0; } +static int alloc_anon_noexit(const char *cgroup, void *arg) +{ + int ppid = getppid(); + + if (alloc_anon(cgroup, arg)) + return -1; + + while (getppid() == ppid) + sleep(1); + + return 0; +} + +/* + * Wait until processes are killed asynchronously by the OOM killer + * If we exceed a timeout, fail. + */ +static int cg_test_proc_killed(const char *cgroup) +{ + int limit; + + for (limit = 10; limit > 0; limit--) { + if (cg_read_strcmp(cgroup, "cgroup.procs", "") == 0) + return 0; + + usleep(100000); + } + return -1; +} + /* * First, this test creates the following hierarchy: * A memory.min = 50M, memory.max = 200M @@ -964,6 +995,177 @@ cleanup: return ret; } +/* + * This test disables swapping and tries to allocate anonymous memory + * up to OOM with memory.group.oom set. Then it checks that all + * processes in the leaf (but not the parent) were killed. + */ +static int test_memcg_oom_group_leaf_events(const char *root) +{ + int ret = KSFT_FAIL; + char *parent, *child; + + parent = cg_name(root, "memcg_test_0"); + child = cg_name(root, "memcg_test_0/memcg_test_1"); + + if (!parent || !child) + goto cleanup; + + if (cg_create(parent)) + goto cleanup; + + if (cg_create(child)) + goto cleanup; + + if (cg_write(parent, "cgroup.subtree_control", "+memory")) + goto cleanup; + + if (cg_write(child, "memory.max", "50M")) + goto cleanup; + + if (cg_write(child, "memory.swap.max", "0")) + goto cleanup; + + if (cg_write(child, "memory.oom.group", "1")) + goto cleanup; + + cg_run_nowait(parent, alloc_anon_noexit, (void *) MB(60)); + cg_run_nowait(child, alloc_anon_noexit, (void *) MB(1)); + cg_run_nowait(child, alloc_anon_noexit, (void *) MB(1)); + if (!cg_run(child, alloc_anon, (void *)MB(100))) + goto cleanup; + + if (cg_test_proc_killed(child)) + goto cleanup; + + if (cg_read_key_long(child, "memory.events", "oom_kill ") <= 0) + goto cleanup; + + if (cg_read_key_long(parent, "memory.events", "oom_kill ") != 0) + goto cleanup; + + ret = KSFT_PASS; + +cleanup: + if (child) + cg_destroy(child); + if (parent) + cg_destroy(parent); + free(child); + free(parent); + + return ret; +} + +/* + * This test disables swapping and tries to allocate anonymous memory + * up to OOM with memory.group.oom set. Then it checks that all + * processes in the parent and leaf were killed. + */ +static int test_memcg_oom_group_parent_events(const char *root) +{ + int ret = KSFT_FAIL; + char *parent, *child; + + parent = cg_name(root, "memcg_test_0"); + child = cg_name(root, "memcg_test_0/memcg_test_1"); + + if (!parent || !child) + goto cleanup; + + if (cg_create(parent)) + goto cleanup; + + if (cg_create(child)) + goto cleanup; + + if (cg_write(parent, "memory.max", "80M")) + goto cleanup; + + if (cg_write(parent, "memory.swap.max", "0")) + goto cleanup; + + if (cg_write(parent, "memory.oom.group", "1")) + goto cleanup; + + cg_run_nowait(parent, alloc_anon_noexit, (void *) MB(60)); + cg_run_nowait(child, alloc_anon_noexit, (void *) MB(1)); + cg_run_nowait(child, alloc_anon_noexit, (void *) MB(1)); + + if (!cg_run(child, alloc_anon, (void *)MB(100))) + goto cleanup; + + if (cg_test_proc_killed(child)) + goto cleanup; + if (cg_test_proc_killed(parent)) + goto cleanup; + + ret = KSFT_PASS; + +cleanup: + if (child) + cg_destroy(child); + if (parent) + cg_destroy(parent); + free(child); + free(parent); + + return ret; +} + +/* + * This test disables swapping and tries to allocate anonymous memory + * up to OOM with memory.group.oom set. Then it checks that all + * processes were killed except those set with OOM_SCORE_ADJ_MIN + */ +static int test_memcg_oom_group_score_events(const char *root) +{ + int ret = KSFT_FAIL; + char *memcg; + int safe_pid; + + memcg = cg_name(root, "memcg_test_0"); + + if (!memcg) + goto cleanup; + + if (cg_create(memcg)) + goto cleanup; + + if (cg_write(memcg, "memory.max", "50M")) + goto cleanup; + + if (cg_write(memcg, "memory.swap.max", "0")) + goto cleanup; + + if (cg_write(memcg, "memory.oom.group", "1")) + goto cleanup; + + safe_pid = cg_run_nowait(memcg, alloc_anon_noexit, (void *) MB(1)); + if (set_oom_adj_score(safe_pid, OOM_SCORE_ADJ_MIN)) + goto cleanup; + + cg_run_nowait(memcg, alloc_anon_noexit, (void *) MB(1)); + if (!cg_run(memcg, alloc_anon, (void *)MB(100))) + goto cleanup; + + if (cg_read_key_long(memcg, "memory.events", "oom_kill ") != 3) + goto cleanup; + + if (kill(safe_pid, SIGKILL)) + goto cleanup; + + ret = KSFT_PASS; + +cleanup: + if (memcg) + cg_destroy(memcg); + free(memcg); + + return ret; +} + + #define T(x) { x, #x } struct memcg_test { int (*fn)(const char *root); @@ -978,6 +1180,9 @@ struct memcg_test { T(test_memcg_oom_events), T(test_memcg_swap_max), T(test_memcg_sock), + T(test_memcg_oom_group_leaf_events), + T(test_memcg_oom_group_parent_events), + T(test_memcg_oom_group_score_events), }; #undef T diff --git a/tools/testing/selftests/efivarfs/config b/tools/testing/selftests/efivarfs/config new file mode 100644 index 000000000000..4e151f1005b2 --- /dev/null +++ b/tools/testing/selftests/efivarfs/config @@ -0,0 +1 @@ +CONFIG_EFIVAR_FS=y diff --git a/tools/testing/selftests/futex/functional/Makefile b/tools/testing/selftests/futex/functional/Makefile index ff8feca49746..ad1eeb14fda7 100644 --- a/tools/testing/selftests/futex/functional/Makefile +++ b/tools/testing/selftests/futex/functional/Makefile @@ -18,6 +18,7 @@ TEST_GEN_FILES := \ TEST_PROGS := run.sh +top_srcdir = ../../../../.. include ../../lib.mk $(TEST_GEN_FILES): $(HEADERS) diff --git a/tools/testing/selftests/gpio/Makefile b/tools/testing/selftests/gpio/Makefile index 1bbb47565c55..4665cdbf1a8d 100644 --- a/tools/testing/selftests/gpio/Makefile +++ b/tools/testing/selftests/gpio/Makefile @@ -21,11 +21,8 @@ endef CFLAGS += -O2 -g -std=gnu99 -Wall -I../../../../usr/include/ LDLIBS += -lmount -I/usr/include/libmount -$(BINARIES): ../../../gpio/gpio-utils.o ../../../../usr/include/linux/gpio.h +$(BINARIES):| khdr +$(BINARIES): ../../../gpio/gpio-utils.o ../../../gpio/gpio-utils.o: make ARCH=$(ARCH) CROSS_COMPILE=$(CROSS_COMPILE) -C ../../../gpio - -../../../../usr/include/linux/gpio.h: - make -C ../../../.. headers_install INSTALL_HDR_PATH=$(shell pwd)/../../../../usr/ - diff --git a/tools/testing/selftests/kselftest.h b/tools/testing/selftests/kselftest.h index 15e6b75fc3a5..a3edb2c8e43d 100644 --- a/tools/testing/selftests/kselftest.h +++ b/tools/testing/selftests/kselftest.h @@ -19,7 +19,6 @@ #define KSFT_FAIL 1 #define KSFT_XFAIL 2 #define KSFT_XPASS 3 -/* Treat skip as pass */ #define KSFT_SKIP 4 /* counters */ diff --git a/tools/testing/selftests/kvm/.gitignore b/tools/testing/selftests/kvm/.gitignore index 4202139d81d9..5c34752e1cff 100644 --- a/tools/testing/selftests/kvm/.gitignore +++ b/tools/testing/selftests/kvm/.gitignore @@ -1,4 +1,5 @@ cr4_cpuid_sync_test +platform_info_test set_sregs_test sync_regs_test vmx_tsc_adjust_test diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index 03b0f551bedf..ec32dad3c3f0 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -6,7 +6,8 @@ UNAME_M := $(shell uname -m) LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/sparsebit.c LIBKVM_x86_64 = lib/x86.c lib/vmx.c -TEST_GEN_PROGS_x86_64 = set_sregs_test +TEST_GEN_PROGS_x86_64 = platform_info_test +TEST_GEN_PROGS_x86_64 += set_sregs_test TEST_GEN_PROGS_x86_64 += sync_regs_test TEST_GEN_PROGS_x86_64 += vmx_tsc_adjust_test TEST_GEN_PROGS_x86_64 += cr4_cpuid_sync_test @@ -20,7 +21,7 @@ INSTALL_HDR_PATH = $(top_srcdir)/usr LINUX_HDR_PATH = $(INSTALL_HDR_PATH)/include/ LINUX_TOOL_INCLUDE = $(top_srcdir)tools/include CFLAGS += -O2 -g -std=gnu99 -I$(LINUX_TOOL_INCLUDE) -I$(LINUX_HDR_PATH) -Iinclude -I$(<D) -I.. -LDFLAGS += -lpthread +LDFLAGS += -pthread # After inclusion, $(OUTPUT) is defined and # $(TEST_GEN_PROGS) starts with $(OUTPUT)/ @@ -37,9 +38,6 @@ $(LIBKVM_OBJ): $(OUTPUT)/%.o: %.c $(OUTPUT)/libkvm.a: $(LIBKVM_OBJ) $(AR) crs $@ $^ -$(LINUX_HDR_PATH): - make -C $(top_srcdir) headers_install - -all: $(STATIC_LIBS) $(LINUX_HDR_PATH) +all: $(STATIC_LIBS) $(TEST_GEN_PROGS): $(STATIC_LIBS) -$(TEST_GEN_PROGS) $(LIBKVM_OBJ): | $(LINUX_HDR_PATH) +$(STATIC_LIBS):| khdr diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h index bb5a25fb82c6..3acf9a91704c 100644 --- a/tools/testing/selftests/kvm/include/kvm_util.h +++ b/tools/testing/selftests/kvm/include/kvm_util.h @@ -50,6 +50,7 @@ enum vm_mem_backing_src_type { }; int kvm_check_cap(long cap); +int vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap); struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm); void kvm_vm_free(struct kvm_vm *vmp); @@ -108,6 +109,9 @@ void vcpu_events_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_vcpu_events *events); void vcpu_events_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_vcpu_events *events); +uint64_t vcpu_get_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index); +void vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index, + uint64_t msr_value); const char *exit_reason_str(unsigned int exit_reason); diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c index e9ba389c48db..6fd8c089cafc 100644 --- a/tools/testing/selftests/kvm/lib/kvm_util.c +++ b/tools/testing/selftests/kvm/lib/kvm_util.c @@ -63,6 +63,29 @@ int kvm_check_cap(long cap) return ret; } +/* VM Enable Capability + * + * Input Args: + * vm - Virtual Machine + * cap - Capability + * + * Output Args: None + * + * Return: On success, 0. On failure a TEST_ASSERT failure is produced. + * + * Enables a capability (KVM_CAP_*) on the VM. + */ +int vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap) +{ + int ret; + + ret = ioctl(vm->fd, KVM_ENABLE_CAP, cap); + TEST_ASSERT(ret == 0, "KVM_ENABLE_CAP IOCTL failed,\n" + " rc: %i errno: %i", ret, errno); + + return ret; +} + static void vm_open(struct kvm_vm *vm, int perm) { vm->kvm_fd = open(KVM_DEV_PATH, perm); @@ -1220,6 +1243,72 @@ void vcpu_events_set(struct kvm_vm *vm, uint32_t vcpuid, ret, errno); } +/* VCPU Get MSR + * + * Input Args: + * vm - Virtual Machine + * vcpuid - VCPU ID + * msr_index - Index of MSR + * + * Output Args: None + * + * Return: On success, value of the MSR. On failure a TEST_ASSERT is produced. + * + * Get value of MSR for VCPU. + */ +uint64_t vcpu_get_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index) +{ + struct vcpu *vcpu = vcpu_find(vm, vcpuid); + struct { + struct kvm_msrs header; + struct kvm_msr_entry entry; + } buffer = {}; + int r; + + TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); + buffer.header.nmsrs = 1; + buffer.entry.index = msr_index; + r = ioctl(vcpu->fd, KVM_GET_MSRS, &buffer.header); + TEST_ASSERT(r == 1, "KVM_GET_MSRS IOCTL failed,\n" + " rc: %i errno: %i", r, errno); + + return buffer.entry.data; +} + +/* VCPU Set MSR + * + * Input Args: + * vm - Virtual Machine + * vcpuid - VCPU ID + * msr_index - Index of MSR + * msr_value - New value of MSR + * + * Output Args: None + * + * Return: On success, nothing. On failure a TEST_ASSERT is produced. + * + * Set value of MSR for VCPU. + */ +void vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index, + uint64_t msr_value) +{ + struct vcpu *vcpu = vcpu_find(vm, vcpuid); + struct { + struct kvm_msrs header; + struct kvm_msr_entry entry; + } buffer = {}; + int r; + + TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); + memset(&buffer, 0, sizeof(buffer)); + buffer.header.nmsrs = 1; + buffer.entry.index = msr_index; + buffer.entry.data = msr_value; + r = ioctl(vcpu->fd, KVM_SET_MSRS, &buffer.header); + TEST_ASSERT(r == 1, "KVM_SET_MSRS IOCTL failed,\n" + " rc: %i errno: %i", r, errno); +} + /* VM VCPU Args Set * * Input Args: diff --git a/tools/testing/selftests/kvm/platform_info_test.c b/tools/testing/selftests/kvm/platform_info_test.c new file mode 100644 index 000000000000..3764e7121265 --- /dev/null +++ b/tools/testing/selftests/kvm/platform_info_test.c @@ -0,0 +1,110 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Test for x86 KVM_CAP_MSR_PLATFORM_INFO + * + * Copyright (C) 2018, Google LLC. + * + * This work is licensed under the terms of the GNU GPL, version 2. + * + * Verifies expected behavior of controlling guest access to + * MSR_PLATFORM_INFO. + */ + +#define _GNU_SOURCE /* for program_invocation_short_name */ +#include <fcntl.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <sys/ioctl.h> + +#include "test_util.h" +#include "kvm_util.h" +#include "x86.h" + +#define VCPU_ID 0 +#define MSR_PLATFORM_INFO_MAX_TURBO_RATIO 0xff00 + +static void guest_code(void) +{ + uint64_t msr_platform_info; + + for (;;) { + msr_platform_info = rdmsr(MSR_PLATFORM_INFO); + GUEST_SYNC(msr_platform_info); + asm volatile ("inc %r11"); + } +} + +static void set_msr_platform_info_enabled(struct kvm_vm *vm, bool enable) +{ + struct kvm_enable_cap cap = {}; + + cap.cap = KVM_CAP_MSR_PLATFORM_INFO; + cap.flags = 0; + cap.args[0] = (int)enable; + vm_enable_cap(vm, &cap); +} + +static void test_msr_platform_info_enabled(struct kvm_vm *vm) +{ + struct kvm_run *run = vcpu_state(vm, VCPU_ID); + struct guest_args args; + + set_msr_platform_info_enabled(vm, true); + vcpu_run(vm, VCPU_ID); + TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, + "Exit_reason other than KVM_EXIT_IO: %u (%s),\n", + run->exit_reason, + exit_reason_str(run->exit_reason)); + guest_args_read(vm, VCPU_ID, &args); + TEST_ASSERT(args.port == GUEST_PORT_SYNC, + "Received IO from port other than PORT_HOST_SYNC: %u\n", + run->io.port); + TEST_ASSERT((args.arg1 & MSR_PLATFORM_INFO_MAX_TURBO_RATIO) == + MSR_PLATFORM_INFO_MAX_TURBO_RATIO, + "Expected MSR_PLATFORM_INFO to have max turbo ratio mask: %i.", + MSR_PLATFORM_INFO_MAX_TURBO_RATIO); +} + +static void test_msr_platform_info_disabled(struct kvm_vm *vm) +{ + struct kvm_run *run = vcpu_state(vm, VCPU_ID); + + set_msr_platform_info_enabled(vm, false); + vcpu_run(vm, VCPU_ID); + TEST_ASSERT(run->exit_reason == KVM_EXIT_SHUTDOWN, + "Exit_reason other than KVM_EXIT_SHUTDOWN: %u (%s)\n", + run->exit_reason, + exit_reason_str(run->exit_reason)); +} + +int main(int argc, char *argv[]) +{ + struct kvm_vm *vm; + struct kvm_run *state; + int rv; + uint64_t msr_platform_info; + + /* Tell stdout not to buffer its content */ + setbuf(stdout, NULL); + + rv = kvm_check_cap(KVM_CAP_MSR_PLATFORM_INFO); + if (!rv) { + fprintf(stderr, + "KVM_CAP_MSR_PLATFORM_INFO not supported, skip test\n"); + exit(KSFT_SKIP); + } + + vm = vm_create_default(VCPU_ID, 0, guest_code); + + msr_platform_info = vcpu_get_msr(vm, VCPU_ID, MSR_PLATFORM_INFO); + vcpu_set_msr(vm, VCPU_ID, MSR_PLATFORM_INFO, + msr_platform_info | MSR_PLATFORM_INFO_MAX_TURBO_RATIO); + test_msr_platform_info_disabled(vm); + test_msr_platform_info_enabled(vm); + vcpu_set_msr(vm, VCPU_ID, MSR_PLATFORM_INFO, msr_platform_info); + + kvm_vm_free(vm); + + return 0; +} diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk index 17ab36605a8e..0a8e75886224 100644 --- a/tools/testing/selftests/lib.mk +++ b/tools/testing/selftests/lib.mk @@ -16,8 +16,20 @@ TEST_GEN_PROGS := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_PROGS)) TEST_GEN_PROGS_EXTENDED := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_PROGS_EXTENDED)) TEST_GEN_FILES := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_FILES)) +top_srcdir ?= ../../../.. +include $(top_srcdir)/scripts/subarch.include +ARCH ?= $(SUBARCH) + all: $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES) +.PHONY: khdr +khdr: + make ARCH=$(ARCH) -C $(top_srcdir) headers_install + +ifdef KSFT_KHDR_INSTALL +$(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES):| khdr +endif + .ONESHELL: define RUN_TEST_PRINT_RESULT TEST_HDR_MSG="selftests: "`basename $$PWD`:" $$BASENAME_TEST"; \ diff --git a/tools/testing/selftests/memory-hotplug/config b/tools/testing/selftests/memory-hotplug/config index 2fde30191a47..a7e8cd5bb265 100644 --- a/tools/testing/selftests/memory-hotplug/config +++ b/tools/testing/selftests/memory-hotplug/config @@ -2,3 +2,4 @@ CONFIG_MEMORY_HOTPLUG=y CONFIG_MEMORY_HOTPLUG_SPARSE=y CONFIG_NOTIFIER_ERROR_INJECTION=y CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m +CONFIG_MEMORY_HOTREMOVE=y diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile index 9cca68e440a0..919aa2ac00af 100644 --- a/tools/testing/selftests/net/Makefile +++ b/tools/testing/selftests/net/Makefile @@ -15,6 +15,7 @@ TEST_GEN_FILES += udpgso udpgso_bench_tx udpgso_bench_rx TEST_GEN_PROGS = reuseport_bpf reuseport_bpf_cpu reuseport_bpf_numa TEST_GEN_PROGS += reuseport_dualstack reuseaddr_conflict tls +KSFT_KHDR_INSTALL := 1 include ../lib.mk $(OUTPUT)/reuseport_bpf_numa: LDFLAGS += -lnuma diff --git a/tools/testing/selftests/net/pmtu.sh b/tools/testing/selftests/net/pmtu.sh index 32a194e3e07a..0ab9423d009f 100755 --- a/tools/testing/selftests/net/pmtu.sh +++ b/tools/testing/selftests/net/pmtu.sh @@ -178,8 +178,8 @@ setup() { cleanup() { [ ${cleanup_done} -eq 1 ] && return - ip netns del ${NS_A} 2 > /dev/null - ip netns del ${NS_B} 2 > /dev/null + ip netns del ${NS_A} 2> /dev/null + ip netns del ${NS_B} 2> /dev/null cleanup_done=1 } diff --git a/tools/testing/selftests/net/tls.c b/tools/testing/selftests/net/tls.c index b3ebf2646e52..8fdfeafaf8c0 100644 --- a/tools/testing/selftests/net/tls.c +++ b/tools/testing/selftests/net/tls.c @@ -502,6 +502,55 @@ TEST_F(tls, recv_peek_multiple) EXPECT_EQ(memcmp(test_str, buf, send_len), 0); } +TEST_F(tls, recv_peek_multiple_records) +{ + char const *test_str = "test_read_peek_mult_recs"; + char const *test_str_first = "test_read_peek"; + char const *test_str_second = "_mult_recs"; + int len; + char buf[64]; + + len = strlen(test_str_first); + EXPECT_EQ(send(self->fd, test_str_first, len, 0), len); + + len = strlen(test_str_second) + 1; + EXPECT_EQ(send(self->fd, test_str_second, len, 0), len); + + len = sizeof(buf); + memset(buf, 0, len); + EXPECT_NE(recv(self->cfd, buf, len, MSG_PEEK), -1); + + /* MSG_PEEK can only peek into the current record. */ + len = strlen(test_str_first) + 1; + EXPECT_EQ(memcmp(test_str_first, buf, len), 0); + + len = sizeof(buf); + memset(buf, 0, len); + EXPECT_NE(recv(self->cfd, buf, len, 0), -1); + + /* Non-MSG_PEEK will advance strparser (and therefore record) + * however. + */ + len = strlen(test_str) + 1; + EXPECT_EQ(memcmp(test_str, buf, len), 0); + + /* MSG_MORE will hold current record open, so later MSG_PEEK + * will see everything. + */ + len = strlen(test_str_first); + EXPECT_EQ(send(self->fd, test_str_first, len, MSG_MORE), len); + + len = strlen(test_str_second) + 1; + EXPECT_EQ(send(self->fd, test_str_second, len, 0), len); + + len = sizeof(buf); + memset(buf, 0, len); + EXPECT_NE(recv(self->cfd, buf, len, MSG_PEEK), -1); + + len = strlen(test_str) + 1; + EXPECT_EQ(memcmp(test_str, buf, len), 0); +} + TEST_F(tls, pollin) { char const *test_str = "test_poll"; diff --git a/tools/testing/selftests/networking/timestamping/Makefile b/tools/testing/selftests/networking/timestamping/Makefile index a728040edbe1..14cfcf006936 100644 --- a/tools/testing/selftests/networking/timestamping/Makefile +++ b/tools/testing/selftests/networking/timestamping/Makefile @@ -5,6 +5,7 @@ TEST_PROGS := hwtstamp_config rxtimestamp timestamping txtimestamp all: $(TEST_PROGS) +top_srcdir = ../../../../.. include ../../lib.mk clean: diff --git a/tools/testing/selftests/powerpc/alignment/Makefile b/tools/testing/selftests/powerpc/alignment/Makefile index 93baacab7693..d056486f49de 100644 --- a/tools/testing/selftests/powerpc/alignment/Makefile +++ b/tools/testing/selftests/powerpc/alignment/Makefile @@ -1,5 +1,6 @@ TEST_GEN_PROGS := copy_first_unaligned alignment_handler +top_srcdir = ../../../../.. include ../../lib.mk $(TEST_GEN_PROGS): ../harness.c ../utils.c diff --git a/tools/testing/selftests/powerpc/benchmarks/Makefile b/tools/testing/selftests/powerpc/benchmarks/Makefile index b4d7432a0ecd..d40300a65b42 100644 --- a/tools/testing/selftests/powerpc/benchmarks/Makefile +++ b/tools/testing/selftests/powerpc/benchmarks/Makefile @@ -4,6 +4,7 @@ TEST_GEN_FILES := exec_target CFLAGS += -O2 +top_srcdir = ../../../../.. include ../../lib.mk $(TEST_GEN_PROGS): ../harness.c diff --git a/tools/testing/selftests/powerpc/cache_shape/Makefile b/tools/testing/selftests/powerpc/cache_shape/Makefile index 1be547434a49..ede4d3dae750 100644 --- a/tools/testing/selftests/powerpc/cache_shape/Makefile +++ b/tools/testing/selftests/powerpc/cache_shape/Makefile @@ -5,6 +5,7 @@ all: $(TEST_PROGS) $(TEST_PROGS): ../harness.c ../utils.c +top_srcdir = ../../../../.. include ../../lib.mk clean: diff --git a/tools/testing/selftests/powerpc/copyloops/Makefile b/tools/testing/selftests/powerpc/copyloops/Makefile index 1cf89a34d97c..44574f3818b3 100644 --- a/tools/testing/selftests/powerpc/copyloops/Makefile +++ b/tools/testing/selftests/powerpc/copyloops/Makefile @@ -17,6 +17,7 @@ TEST_GEN_PROGS := copyuser_64_t0 copyuser_64_t1 copyuser_64_t2 \ EXTRA_SOURCES := validate.c ../harness.c stubs.S +top_srcdir = ../../../../.. include ../../lib.mk $(OUTPUT)/copyuser_64_t%: copyuser_64.S $(EXTRA_SOURCES) diff --git a/tools/testing/selftests/powerpc/dscr/Makefile b/tools/testing/selftests/powerpc/dscr/Makefile index 55d7db7a616b..5df476364b4d 100644 --- a/tools/testing/selftests/powerpc/dscr/Makefile +++ b/tools/testing/selftests/powerpc/dscr/Makefile @@ -3,6 +3,7 @@ TEST_GEN_PROGS := dscr_default_test dscr_explicit_test dscr_user_test \ dscr_inherit_test dscr_inherit_exec_test dscr_sysfs_test \ dscr_sysfs_thread_test +top_srcdir = ../../../../.. include ../../lib.mk $(OUTPUT)/dscr_default_test: LDLIBS += -lpthread diff --git a/tools/testing/selftests/powerpc/math/Makefile b/tools/testing/selftests/powerpc/math/Makefile index 0dd3a01fdab9..11a10d7a2bbd 100644 --- a/tools/testing/selftests/powerpc/math/Makefile +++ b/tools/testing/selftests/powerpc/math/Makefile @@ -1,6 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 TEST_GEN_PROGS := fpu_syscall fpu_preempt fpu_signal vmx_syscall vmx_preempt vmx_signal vsx_preempt +top_srcdir = ../../../../.. include ../../lib.mk $(TEST_GEN_PROGS): ../harness.c diff --git a/tools/testing/selftests/powerpc/mm/Makefile b/tools/testing/selftests/powerpc/mm/Makefile index 8ebbe96d80a8..33ced6e0ad25 100644 --- a/tools/testing/selftests/powerpc/mm/Makefile +++ b/tools/testing/selftests/powerpc/mm/Makefile @@ -5,6 +5,7 @@ noarg: TEST_GEN_PROGS := hugetlb_vs_thp_test subpage_prot prot_sao segv_errors TEST_GEN_FILES := tempfile +top_srcdir = ../../../../.. include ../../lib.mk $(TEST_GEN_PROGS): ../harness.c diff --git a/tools/testing/selftests/powerpc/pmu/Makefile b/tools/testing/selftests/powerpc/pmu/Makefile index 6e1629bf5b09..19046db995fe 100644 --- a/tools/testing/selftests/powerpc/pmu/Makefile +++ b/tools/testing/selftests/powerpc/pmu/Makefile @@ -5,6 +5,7 @@ noarg: TEST_GEN_PROGS := count_instructions l3_bank_test per_event_excludes EXTRA_SOURCES := ../harness.c event.c lib.c ../utils.c +top_srcdir = ../../../../.. include ../../lib.mk all: $(TEST_GEN_PROGS) ebb diff --git a/tools/testing/selftests/powerpc/pmu/ebb/Makefile b/tools/testing/selftests/powerpc/pmu/ebb/Makefile index c4e64bc2e265..bd5dfa509272 100644 --- a/tools/testing/selftests/powerpc/pmu/ebb/Makefile +++ b/tools/testing/selftests/powerpc/pmu/ebb/Makefile @@ -17,6 +17,7 @@ TEST_GEN_PROGS := reg_access_test event_attributes_test cycles_test \ lost_exception_test no_handler_test \ cycles_with_mmcr2_test +top_srcdir = ../../../../../.. include ../../../lib.mk $(TEST_GEN_PROGS): ../../harness.c ../../utils.c ../event.c ../lib.c \ diff --git a/tools/testing/selftests/powerpc/primitives/Makefile b/tools/testing/selftests/powerpc/primitives/Makefile index 175366db7be8..ea2b7bd09e36 100644 --- a/tools/testing/selftests/powerpc/primitives/Makefile +++ b/tools/testing/selftests/powerpc/primitives/Makefile @@ -2,6 +2,7 @@ CFLAGS += -I$(CURDIR) TEST_GEN_PROGS := load_unaligned_zeropad +top_srcdir = ../../../../.. include ../../lib.mk $(TEST_GEN_PROGS): ../harness.c diff --git a/tools/testing/selftests/powerpc/ptrace/Makefile b/tools/testing/selftests/powerpc/ptrace/Makefile index 28f5b781a553..923d531265f8 100644 --- a/tools/testing/selftests/powerpc/ptrace/Makefile +++ b/tools/testing/selftests/powerpc/ptrace/Makefile @@ -4,6 +4,7 @@ TEST_PROGS := ptrace-gpr ptrace-tm-gpr ptrace-tm-spd-gpr \ ptrace-tm-spd-vsx ptrace-tm-spr ptrace-hwbreak ptrace-pkey core-pkey \ perf-hwbreak +top_srcdir = ../../../../.. include ../../lib.mk all: $(TEST_PROGS) diff --git a/tools/testing/selftests/powerpc/signal/Makefile b/tools/testing/selftests/powerpc/signal/Makefile index a7cbd5082e27..1fca25c6ace0 100644 --- a/tools/testing/selftests/powerpc/signal/Makefile +++ b/tools/testing/selftests/powerpc/signal/Makefile @@ -8,6 +8,7 @@ $(TEST_PROGS): ../harness.c ../utils.c signal.S CFLAGS += -maltivec signal_tm: CFLAGS += -mhtm +top_srcdir = ../../../../.. include ../../lib.mk clean: diff --git a/tools/testing/selftests/powerpc/stringloops/Makefile b/tools/testing/selftests/powerpc/stringloops/Makefile index 10b35c87a4f4..7fc0623d85c3 100644 --- a/tools/testing/selftests/powerpc/stringloops/Makefile +++ b/tools/testing/selftests/powerpc/stringloops/Makefile @@ -29,6 +29,7 @@ endif ASFLAGS = $(CFLAGS) +top_srcdir = ../../../../.. include ../../lib.mk $(TEST_GEN_PROGS): $(EXTRA_SOURCES) diff --git a/tools/testing/selftests/powerpc/switch_endian/Makefile b/tools/testing/selftests/powerpc/switch_endian/Makefile index 30b8ff8fb82e..fcd2dcb8972b 100644 --- a/tools/testing/selftests/powerpc/switch_endian/Makefile +++ b/tools/testing/selftests/powerpc/switch_endian/Makefile @@ -5,6 +5,7 @@ ASFLAGS += -O2 -Wall -g -nostdlib -m64 EXTRA_CLEAN = $(OUTPUT)/*.o $(OUTPUT)/check-reversed.S +top_srcdir = ../../../../.. include ../../lib.mk $(OUTPUT)/switch_endian_test: $(OUTPUT)/check-reversed.S diff --git a/tools/testing/selftests/powerpc/syscalls/Makefile b/tools/testing/selftests/powerpc/syscalls/Makefile index da22ca7c38c1..161b8846336f 100644 --- a/tools/testing/selftests/powerpc/syscalls/Makefile +++ b/tools/testing/selftests/powerpc/syscalls/Makefile @@ -2,6 +2,7 @@ TEST_GEN_PROGS := ipc_unmuxed CFLAGS += -I../../../../../usr/include +top_srcdir = ../../../../.. include ../../lib.mk $(TEST_GEN_PROGS): ../harness.c diff --git a/tools/testing/selftests/powerpc/tm/Makefile b/tools/testing/selftests/powerpc/tm/Makefile index c0e45d2dde25..9fc2cf6fbc92 100644 --- a/tools/testing/selftests/powerpc/tm/Makefile +++ b/tools/testing/selftests/powerpc/tm/Makefile @@ -6,6 +6,7 @@ TEST_GEN_PROGS := tm-resched-dscr tm-syscall tm-signal-msr-resv tm-signal-stack tm-vmxcopy tm-fork tm-tar tm-tmspr tm-vmx-unavail tm-unavailable tm-trap \ $(SIGNAL_CONTEXT_CHK_TESTS) tm-sigreturn +top_srcdir = ../../../../.. include ../../lib.mk $(TEST_GEN_PROGS): ../harness.c ../utils.c diff --git a/tools/testing/selftests/powerpc/vphn/Makefile b/tools/testing/selftests/powerpc/vphn/Makefile index f8ced26748f8..fb82068c9fda 100644 --- a/tools/testing/selftests/powerpc/vphn/Makefile +++ b/tools/testing/selftests/powerpc/vphn/Makefile @@ -2,6 +2,7 @@ TEST_GEN_PROGS := test-vphn CFLAGS += -m64 +top_srcdir = ../../../../.. include ../../lib.mk $(TEST_GEN_PROGS): ../harness.c diff --git a/tools/testing/selftests/vm/Makefile b/tools/testing/selftests/vm/Makefile index 9881876d2aa0..e94b7b14bcb2 100644 --- a/tools/testing/selftests/vm/Makefile +++ b/tools/testing/selftests/vm/Makefile @@ -26,10 +26,6 @@ TEST_PROGS := run_vmtests include ../lib.mk -$(OUTPUT)/userfaultfd: ../../../../usr/include/linux/kernel.h $(OUTPUT)/userfaultfd: LDLIBS += -lpthread $(OUTPUT)/mlock-random-test: LDLIBS += -lcap - -../../../../usr/include/linux/kernel.h: - make -C ../../../.. headers_install |