diff options
author | David S. Miller <davem@davemloft.net> | 2017-04-15 21:16:30 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2017-04-15 21:16:30 -0400 |
commit | 6b6cbc1471676402565e958674523d06213b82d7 (patch) | |
tree | a66d41d276e5eb400e27641f24f34032fe1b9275 | |
parent | ce07183282975026716107d36fd3f5f93de76668 (diff) | |
parent | 1bf4b1268e66d9364fc6fd41f906bc01458530ac (diff) | |
download | linux-stable-6b6cbc1471676402565e958674523d06213b82d7.tar.gz linux-stable-6b6cbc1471676402565e958674523d06213b82d7.tar.bz2 linux-stable-6b6cbc1471676402565e958674523d06213b82d7.zip |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts were simply overlapping changes. In the net/ipv4/route.c
case the code had simply moved around a little bit and the same fix
was made in both 'net' and 'net-next'.
In the net/sched/sch_generic.c case a fix in 'net' happened at
the same time that a new argument was added to qdisc_hash_add().
Signed-off-by: David S. Miller <davem@davemloft.net>
298 files changed, 3243 insertions, 1816 deletions
@@ -99,6 +99,8 @@ Linas Vepstas <linas@austin.ibm.com> Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@web.de> Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@ascom.ch> Mark Brown <broonie@sirena.org.uk> +Martin Kepplinger <martink@posteo.de> <martin.kepplinger@theobroma-systems.com> +Martin Kepplinger <martink@posteo.de> <martin.kepplinger@ginzinger.com> Matthieu CASTET <castet.matthieu@free.fr> Mauro Carvalho Chehab <mchehab@kernel.org> <mchehab@brturbo.com.br> Mauro Carvalho Chehab <mchehab@kernel.org> <maurochehab@gmail.com> @@ -171,6 +173,7 @@ Vlad Dogaru <ddvlad@gmail.com> <vlad.dogaru@intel.com> Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@virtuozzo.com> Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@parallels.com> Takashi YOSHII <takashi.yoshii.zj@renesas.com> +Yakir Yang <kuankuan.y@gmail.com> <ykk@rock-chips.com> Yusuke Goda <goda.yusuke@renesas.com> Gustavo Padovan <gustavo@las.ic.unicamp.br> Gustavo Padovan <padovan@profusion.mobi> diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking index fdcfdd79682a..fe25787ff6d4 100644 --- a/Documentation/filesystems/Locking +++ b/Documentation/filesystems/Locking @@ -58,8 +58,7 @@ prototypes: int (*permission) (struct inode *, int, unsigned int); int (*get_acl)(struct inode *, int); int (*setattr) (struct dentry *, struct iattr *); - int (*getattr) (const struct path *, struct dentry *, struct kstat *, - u32, unsigned int); + int (*getattr) (const struct path *, struct kstat *, u32, unsigned int); ssize_t (*listxattr) (struct dentry *, char *, size_t); int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, u64 len); void (*update_time)(struct inode *, struct timespec *, int); diff --git a/Documentation/filesystems/porting b/Documentation/filesystems/porting index 95280079c0b3..5fb17f49f7a2 100644 --- a/Documentation/filesystems/porting +++ b/Documentation/filesystems/porting @@ -600,3 +600,9 @@ in your dentry operations instead. [recommended] ->readlink is optional for symlinks. Don't set, unless filesystem needs to fake something for readlink(2). +-- +[mandatory] + ->getattr() is now passed a struct path rather than a vfsmount and + dentry separately, and it now has request_mask and query_flags arguments + to specify the fields and sync type requested by statx. Filesystems not + supporting any statx-specific features may ignore the new arguments. diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt index 569211703721..94dd27ef4a76 100644 --- a/Documentation/filesystems/vfs.txt +++ b/Documentation/filesystems/vfs.txt @@ -382,8 +382,7 @@ struct inode_operations { int (*permission) (struct inode *, int); int (*get_acl)(struct inode *, int); int (*setattr) (struct dentry *, struct iattr *); - int (*getattr) (const struct path *, struct dentry *, struct kstat *, - u32, unsigned int); + int (*getattr) (const struct path *, struct kstat *, u32, unsigned int); ssize_t (*listxattr) (struct dentry *, char *, size_t); void (*update_time)(struct inode *, struct timespec *, int); int (*atomic_open)(struct inode *, struct dentry *, struct file *, diff --git a/Documentation/pinctrl.txt b/Documentation/pinctrl.txt index 54bd5faa8782..f2af35f6d6b2 100644 --- a/Documentation/pinctrl.txt +++ b/Documentation/pinctrl.txt @@ -77,9 +77,15 @@ static struct pinctrl_desc foo_desc = { int __init foo_probe(void) { + int error; + struct pinctrl_dev *pctl; - return pinctrl_register_and_init(&foo_desc, <PARENT>, NULL, &pctl); + error = pinctrl_register_and_init(&foo_desc, <PARENT>, NULL, &pctl); + if (error) + return error; + + return pinctrl_enable(pctl); } To enable the pinctrl subsystem and the subgroups for PINMUX and PINCONF and diff --git a/Documentation/process/stable-kernel-rules.rst b/Documentation/process/stable-kernel-rules.rst index 11ec2d93a5e0..61e9c78bd6d1 100644 --- a/Documentation/process/stable-kernel-rules.rst +++ b/Documentation/process/stable-kernel-rules.rst @@ -124,7 +124,7 @@ specified in the following format in the sign-off area: .. code-block:: none - Cc: <stable@vger.kernel.org> # 3.3.x- + Cc: <stable@vger.kernel.org> # 3.3.x The tag has the meaning of: diff --git a/Documentation/virtual/kvm/devices/arm-vgic.txt b/Documentation/virtual/kvm/devices/arm-vgic.txt index 76e61c883347..b2f60ca8b60c 100644 --- a/Documentation/virtual/kvm/devices/arm-vgic.txt +++ b/Documentation/virtual/kvm/devices/arm-vgic.txt @@ -83,6 +83,12 @@ Groups: Bits for undefined preemption levels are RAZ/WI. + For historical reasons and to provide ABI compatibility with userspace we + export the GICC_PMR register in the format of the GICH_VMCR.VMPriMask + field in the lower 5 bits of a word, meaning that userspace must always + use the lower 5 bits to communicate with the KVM device and must shift the + value left by 3 places to obtain the actual priority mask level. + Limitations: - Priorities are not implemented, and registers are RAZ/WI - Currently only implemented for KVM_DEV_TYPE_ARM_VGIC_V2. diff --git a/MAINTAINERS b/MAINTAINERS index 5397f54af5fc..8bc85dc8a71a 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -4124,14 +4124,13 @@ F: drivers/block/drbd/ F: lib/lru_cache.c F: Documentation/blockdev/drbd/ -DRIVER CORE, KOBJECTS, DEBUGFS, KERNFS AND SYSFS +DRIVER CORE, KOBJECTS, DEBUGFS AND SYSFS M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core.git S: Supported F: Documentation/kobject.txt F: drivers/base/ F: fs/debugfs/ -F: fs/kernfs/ F: fs/sysfs/ F: include/linux/debugfs.h F: include/linux/kobj* @@ -7216,6 +7215,14 @@ F: arch/mips/include/uapi/asm/kvm* F: arch/mips/include/asm/kvm* F: arch/mips/kvm/ +KERNFS +M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +M: Tejun Heo <tj@kernel.org> +T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core.git +S: Supported +F: include/linux/kernfs.h +F: fs/kernfs/ + KEXEC M: Eric Biederman <ebiederm@xmission.com> W: http://kernel.org/pub/linux/utils/kernel/kexec/ @@ -13311,7 +13318,7 @@ F: drivers/virtio/ F: tools/virtio/ F: drivers/net/virtio_net.c F: drivers/block/virtio_blk.c -F: include/linux/virtio_*.h +F: include/linux/virtio*.h F: include/uapi/linux/virtio_*.h F: drivers/crypto/virtio/ @@ -1,7 +1,7 @@ VERSION = 4 PATCHLEVEL = 11 SUBLEVEL = 0 -EXTRAVERSION = -rc5 +EXTRAVERSION = -rc6 NAME = Fearless Coyote # *DOCUMENTATION* diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c index 0b961093ca5c..6d76e528ab8f 100644 --- a/arch/alpha/kernel/osf_sys.c +++ b/arch/alpha/kernel/osf_sys.c @@ -1290,7 +1290,7 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p) /* copy relevant bits of struct timex. */ if (copy_from_user(&txc, txc_p, offsetof(struct timex32, time)) || copy_from_user(&txc.tick, &txc_p->tick, sizeof(struct timex32) - - offsetof(struct timex32, time))) + offsetof(struct timex32, tick))) return -EFAULT; ret = do_adjtimex(&txc); diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 96dba7cd8be7..314eb6abe1ff 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -1124,6 +1124,9 @@ static void cpu_hyp_reinit(void) if (__hyp_get_vectors() == hyp_default_vectors) cpu_init_hyp_mode(NULL); } + + if (vgic_present) + kvm_vgic_init_cpu_hardware(); } static void cpu_hyp_reset(void) diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index 962616fd4ddd..582a972371cf 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c @@ -292,11 +292,18 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size) phys_addr_t addr = start, end = start + size; phys_addr_t next; + assert_spin_locked(&kvm->mmu_lock); pgd = kvm->arch.pgd + stage2_pgd_index(addr); do { next = stage2_pgd_addr_end(addr, end); if (!stage2_pgd_none(*pgd)) unmap_stage2_puds(kvm, pgd, addr, next); + /* + * If the range is too large, release the kvm->mmu_lock + * to prevent starvation and lockup detector warnings. + */ + if (next != end) + cond_resched_lock(&kvm->mmu_lock); } while (pgd++, addr = next, addr != end); } @@ -803,6 +810,7 @@ void stage2_unmap_vm(struct kvm *kvm) int idx; idx = srcu_read_lock(&kvm->srcu); + down_read(¤t->mm->mmap_sem); spin_lock(&kvm->mmu_lock); slots = kvm_memslots(kvm); @@ -810,6 +818,7 @@ void stage2_unmap_vm(struct kvm *kvm) stage2_unmap_memslot(kvm, memslot); spin_unlock(&kvm->mmu_lock); + up_read(¤t->mm->mmap_sem); srcu_read_unlock(&kvm->srcu, idx); } @@ -829,7 +838,10 @@ void kvm_free_stage2_pgd(struct kvm *kvm) if (kvm->arch.pgd == NULL) return; + spin_lock(&kvm->mmu_lock); unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE); + spin_unlock(&kvm->mmu_lock); + /* Free the HW pgd, one page at a time */ free_pages_exact(kvm->arch.pgd, S2_PGD_SIZE); kvm->arch.pgd = NULL; @@ -1801,6 +1813,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, (KVM_PHYS_SIZE >> PAGE_SHIFT)) return -EFAULT; + down_read(¤t->mm->mmap_sem); /* * A memory region could potentially cover multiple VMAs, and any holes * between them, so iterate over all of them to find out if we can map @@ -1844,8 +1857,10 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, pa += vm_start - vma->vm_start; /* IO region dirty page logging not allowed */ - if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) - return -EINVAL; + if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) { + ret = -EINVAL; + goto out; + } ret = kvm_phys_addr_ioremap(kvm, gpa, pa, vm_end - vm_start, @@ -1857,7 +1872,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, } while (hva < reg_end); if (change == KVM_MR_FLAGS_ONLY) - return ret; + goto out; spin_lock(&kvm->mmu_lock); if (ret) @@ -1865,6 +1880,8 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, else stage2_flush_memslot(kvm, memslot); spin_unlock(&kvm->mmu_lock); +out: + up_read(¤t->mm->mmap_sem); return ret; } diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 63eabb06f9f1..475811f5383a 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -935,13 +935,31 @@ static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_add __arm_dma_free(dev, size, cpu_addr, handle, attrs, true); } +/* + * The whole dma_get_sgtable() idea is fundamentally unsafe - it seems + * that the intention is to allow exporting memory allocated via the + * coherent DMA APIs through the dma_buf API, which only accepts a + * scattertable. This presents a couple of problems: + * 1. Not all memory allocated via the coherent DMA APIs is backed by + * a struct page + * 2. Passing coherent DMA memory into the streaming APIs is not allowed + * as we will try to flush the memory through a different alias to that + * actually being used (and the flushes are redundant.) + */ int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr, dma_addr_t handle, size_t size, unsigned long attrs) { - struct page *page = pfn_to_page(dma_to_pfn(dev, handle)); + unsigned long pfn = dma_to_pfn(dev, handle); + struct page *page; int ret; + /* If the PFN is not valid, we do not have a struct page */ + if (!pfn_valid(pfn)) + return -ENXIO; + + page = pfn_to_page(pfn); + ret = sg_alloc_table(sgt, 1, GFP_KERNEL); if (unlikely(ret)) return ret; diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c index 3b5c7aaf9c76..33a45bd96860 100644 --- a/arch/arm/mm/nommu.c +++ b/arch/arm/mm/nommu.c @@ -303,7 +303,10 @@ static inline void set_vbar(unsigned long val) */ static inline bool security_extensions_enabled(void) { - return !!cpuid_feature_extract(CPUID_EXT_PFR1, 4); + /* Check CPUID Identification Scheme before ID_PFR1 read */ + if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) + return !!cpuid_feature_extract(CPUID_EXT_PFR1, 4); + return 0; } static unsigned long __init setup_vectors_base(void) diff --git a/arch/arm/probes/kprobes/core.c b/arch/arm/probes/kprobes/core.c index b6dc9d838a9a..ad1f4e6a9e33 100644 --- a/arch/arm/probes/kprobes/core.c +++ b/arch/arm/probes/kprobes/core.c @@ -266,11 +266,20 @@ void __kprobes kprobe_handler(struct pt_regs *regs) #endif if (p) { - if (cur) { + if (!p->ainsn.insn_check_cc(regs->ARM_cpsr)) { + /* + * Probe hit but conditional execution check failed, + * so just skip the instruction and continue as if + * nothing had happened. + * In this case, we can skip recursing check too. + */ + singlestep_skip(p, regs); + } else if (cur) { /* Kprobe is pending, so we're recursing. */ switch (kcb->kprobe_status) { case KPROBE_HIT_ACTIVE: case KPROBE_HIT_SSDONE: + case KPROBE_HIT_SS: /* A pre- or post-handler probe got us here. */ kprobes_inc_nmissed_count(p); save_previous_kprobe(kcb); @@ -279,11 +288,16 @@ void __kprobes kprobe_handler(struct pt_regs *regs) singlestep(p, regs, kcb); restore_previous_kprobe(kcb); break; + case KPROBE_REENTER: + /* A nested probe was hit in FIQ, it is a BUG */ + pr_warn("Unrecoverable kprobe detected at %p.\n", + p->addr); + /* fall through */ default: /* impossible cases */ BUG(); } - } else if (p->ainsn.insn_check_cc(regs->ARM_cpsr)) { + } else { /* Probe hit and conditional execution check ok. */ set_current_kprobe(p); kcb->kprobe_status = KPROBE_HIT_ACTIVE; @@ -304,13 +318,6 @@ void __kprobes kprobe_handler(struct pt_regs *regs) } reset_current_kprobe(); } - } else { - /* - * Probe hit but conditional execution check failed, - * so just skip the instruction and continue as if - * nothing had happened. - */ - singlestep_skip(p, regs); } } else if (cur) { /* We probably hit a jprobe. Call its break handler. */ @@ -434,6 +441,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs) struct hlist_node *tmp; unsigned long flags, orig_ret_address = 0; unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline; + kprobe_opcode_t *correct_ret_addr = NULL; INIT_HLIST_HEAD(&empty_rp); kretprobe_hash_lock(current, &head, &flags); @@ -456,14 +464,34 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs) /* another task is sharing our hash bucket */ continue; + orig_ret_address = (unsigned long)ri->ret_addr; + + if (orig_ret_address != trampoline_address) + /* + * This is the real return address. Any other + * instances associated with this task are for + * other calls deeper on the call stack + */ + break; + } + + kretprobe_assert(ri, orig_ret_address, trampoline_address); + + correct_ret_addr = ri->ret_addr; + hlist_for_each_entry_safe(ri, tmp, head, hlist) { + if (ri->task != current) + /* another task is sharing our hash bucket */ + continue; + + orig_ret_address = (unsigned long)ri->ret_addr; if (ri->rp && ri->rp->handler) { __this_cpu_write(current_kprobe, &ri->rp->kp); get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE; + ri->ret_addr = correct_ret_addr; ri->rp->handler(ri, regs); __this_cpu_write(current_kprobe, NULL); } - orig_ret_address = (unsigned long)ri->ret_addr; recycle_rp_inst(ri, &empty_rp); if (orig_ret_address != trampoline_address) @@ -475,7 +503,6 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs) break; } - kretprobe_assert(ri, orig_ret_address, trampoline_address); kretprobe_hash_unlock(current, &flags); hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { diff --git a/arch/arm/probes/kprobes/test-core.c b/arch/arm/probes/kprobes/test-core.c index c893726aa52d..1c98a87786ca 100644 --- a/arch/arm/probes/kprobes/test-core.c +++ b/arch/arm/probes/kprobes/test-core.c @@ -977,7 +977,10 @@ static void coverage_end(void) void __naked __kprobes_test_case_start(void) { __asm__ __volatile__ ( - "stmdb sp!, {r4-r11} \n\t" + "mov r2, sp \n\t" + "bic r3, r2, #7 \n\t" + "mov sp, r3 \n\t" + "stmdb sp!, {r2-r11} \n\t" "sub sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t" "bic r0, lr, #1 @ r0 = inline data \n\t" "mov r1, sp \n\t" @@ -997,7 +1000,8 @@ void __naked __kprobes_test_case_end_32(void) "movne pc, r0 \n\t" "mov r0, r4 \n\t" "add sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t" - "ldmia sp!, {r4-r11} \n\t" + "ldmia sp!, {r2-r11} \n\t" + "mov sp, r2 \n\t" "mov pc, r0 \n\t" ); } @@ -1013,7 +1017,8 @@ void __naked __kprobes_test_case_end_16(void) "bxne r0 \n\t" "mov r0, r4 \n\t" "add sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t" - "ldmia sp!, {r4-r11} \n\t" + "ldmia sp!, {r2-r11} \n\t" + "mov sp, r2 \n\t" "bx r0 \n\t" ); } diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 4bf899fb451b..1b35b8bddbfb 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -42,7 +42,20 @@ #include <asm/pgtable.h> #include <asm/tlbflush.h> -static const char *fault_name(unsigned int esr); +struct fault_info { + int (*fn)(unsigned long addr, unsigned int esr, + struct pt_regs *regs); + int sig; + int code; + const char *name; +}; + +static const struct fault_info fault_info[]; + +static inline const struct fault_info *esr_to_fault_info(unsigned int esr) +{ + return fault_info + (esr & 63); +} #ifdef CONFIG_KPROBES static inline int notify_page_fault(struct pt_regs *regs, unsigned int esr) @@ -197,10 +210,12 @@ static void __do_user_fault(struct task_struct *tsk, unsigned long addr, struct pt_regs *regs) { struct siginfo si; + const struct fault_info *inf; if (unhandled_signal(tsk, sig) && show_unhandled_signals_ratelimited()) { + inf = esr_to_fault_info(esr); pr_info("%s[%d]: unhandled %s (%d) at 0x%08lx, esr 0x%03x\n", - tsk->comm, task_pid_nr(tsk), fault_name(esr), sig, + tsk->comm, task_pid_nr(tsk), inf->name, sig, addr, esr); show_pte(tsk->mm, addr); show_regs(regs); @@ -219,14 +234,16 @@ static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *re { struct task_struct *tsk = current; struct mm_struct *mm = tsk->active_mm; + const struct fault_info *inf; /* * If we are in kernel mode at this point, we have no context to * handle this fault with. */ - if (user_mode(regs)) - __do_user_fault(tsk, addr, esr, SIGSEGV, SEGV_MAPERR, regs); - else + if (user_mode(regs)) { + inf = esr_to_fault_info(esr); + __do_user_fault(tsk, addr, esr, inf->sig, inf->code, regs); + } else __do_kernel_fault(mm, addr, esr, regs); } @@ -488,12 +505,7 @@ static int do_bad(unsigned long addr, unsigned int esr, struct pt_regs *regs) return 1; } -static const struct fault_info { - int (*fn)(unsigned long addr, unsigned int esr, struct pt_regs *regs); - int sig; - int code; - const char *name; -} fault_info[] = { +static const struct fault_info fault_info[] = { { do_bad, SIGBUS, 0, "ttbr address size fault" }, { do_bad, SIGBUS, 0, "level 1 address size fault" }, { do_bad, SIGBUS, 0, "level 2 address size fault" }, @@ -560,19 +572,13 @@ static const struct fault_info { { do_bad, SIGBUS, 0, "unknown 63" }, }; -static const char *fault_name(unsigned int esr) -{ - const struct fault_info *inf = fault_info + (esr & 63); - return inf->name; -} - /* * Dispatch a data abort to the relevant handler. */ asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs) { - const struct fault_info *inf = fault_info + (esr & 63); + const struct fault_info *inf = esr_to_fault_info(esr); struct siginfo info; if (!inf->fn(addr, esr, regs)) diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c index e25584d72396..7514a000e361 100644 --- a/arch/arm64/mm/hugetlbpage.c +++ b/arch/arm64/mm/hugetlbpage.c @@ -294,10 +294,6 @@ static __init int setup_hugepagesz(char *opt) hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT); } else if (ps == PUD_SIZE) { hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT); - } else if (ps == (PAGE_SIZE * CONT_PTES)) { - hugetlb_add_hstate(CONT_PTE_SHIFT); - } else if (ps == (PMD_SIZE * CONT_PMDS)) { - hugetlb_add_hstate((PMD_SHIFT + CONT_PMD_SHIFT) - PAGE_SHIFT); } else { hugetlb_bad_size(); pr_err("hugepagesz: Unsupported page size %lu K\n", ps >> 10); @@ -306,13 +302,3 @@ static __init int setup_hugepagesz(char *opt) return 1; } __setup("hugepagesz=", setup_hugepagesz); - -#ifdef CONFIG_ARM64_64K_PAGES -static __init int add_default_hugepagesz(void) -{ - if (size_to_hstate(CONT_PTES * PAGE_SIZE) == NULL) - hugetlb_add_hstate(CONT_PTE_SHIFT); - return 0; -} -arch_initcall(add_default_hugepagesz); -#endif diff --git a/arch/ia64/include/asm/asm-prototypes.h b/arch/ia64/include/asm/asm-prototypes.h new file mode 100644 index 000000000000..a2c139808cfe --- /dev/null +++ b/arch/ia64/include/asm/asm-prototypes.h @@ -0,0 +1,29 @@ +#ifndef _ASM_IA64_ASM_PROTOTYPES_H +#define _ASM_IA64_ASM_PROTOTYPES_H + +#include <asm/cacheflush.h> +#include <asm/checksum.h> +#include <asm/esi.h> +#include <asm/ftrace.h> +#include <asm/page.h> +#include <asm/pal.h> +#include <asm/string.h> +#include <asm/uaccess.h> +#include <asm/unwind.h> +#include <asm/xor.h> + +extern const char ia64_ivt[]; + +signed int __divsi3(signed int, unsigned int); +signed int __modsi3(signed int, unsigned int); + +signed long long __divdi3(signed long long, unsigned long long); +signed long long __moddi3(signed long long, unsigned long long); + +unsigned int __udivsi3(unsigned int, unsigned int); +unsigned int __umodsi3(unsigned int, unsigned int); + +unsigned long long __udivdi3(unsigned long long, unsigned long long); +unsigned long long __umoddi3(unsigned long long, unsigned long long); + +#endif /* _ASM_IA64_ASM_PROTOTYPES_H */ diff --git a/arch/ia64/lib/Makefile b/arch/ia64/lib/Makefile index 1f3d3877618f..0a40b14407b1 100644 --- a/arch/ia64/lib/Makefile +++ b/arch/ia64/lib/Makefile @@ -24,25 +24,25 @@ AFLAGS___modsi3.o = -DMODULO AFLAGS___umodsi3.o = -DUNSIGNED -DMODULO $(obj)/__divdi3.o: $(src)/idiv64.S FORCE - $(call if_changed_dep,as_o_S) + $(call if_changed_rule,as_o_S) $(obj)/__udivdi3.o: $(src)/idiv64.S FORCE - $(call if_changed_dep,as_o_S) + $(call if_changed_rule,as_o_S) $(obj)/__moddi3.o: $(src)/idiv64.S FORCE - $(call if_changed_dep,as_o_S) + $(call if_changed_rule,as_o_S) $(obj)/__umoddi3.o: $(src)/idiv64.S FORCE - $(call if_changed_dep,as_o_S) + $(call if_changed_rule,as_o_S) $(obj)/__divsi3.o: $(src)/idiv32.S FORCE - $(call if_changed_dep,as_o_S) + $(call if_changed_rule,as_o_S) $(obj)/__udivsi3.o: $(src)/idiv32.S FORCE - $(call if_changed_dep,as_o_S) + $(call if_changed_rule,as_o_S) $(obj)/__modsi3.o: $(src)/idiv32.S FORCE - $(call if_changed_dep,as_o_S) + $(call if_changed_rule,as_o_S) $(obj)/__umodsi3.o: $(src)/idiv32.S FORCE - $(call if_changed_dep,as_o_S) + $(call if_changed_rule,as_o_S) diff --git a/arch/metag/include/asm/uaccess.h b/arch/metag/include/asm/uaccess.h index 273e61225c27..07238b39638c 100644 --- a/arch/metag/include/asm/uaccess.h +++ b/arch/metag/include/asm/uaccess.h @@ -197,20 +197,21 @@ extern long __must_check strnlen_user(const char __user *src, long count); #define strlen_user(str) strnlen_user(str, 32767) -extern unsigned long __must_check __copy_user_zeroing(void *to, - const void __user *from, - unsigned long n); +extern unsigned long raw_copy_from_user(void *to, const void __user *from, + unsigned long n); static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n) { + unsigned long res = n; if (likely(access_ok(VERIFY_READ, from, n))) - return __copy_user_zeroing(to, from, n); - memset(to, 0, n); - return n; + res = raw_copy_from_user(to, from, n); + if (unlikely(res)) + memset(to + (n - res), 0, res); + return res; } -#define __copy_from_user(to, from, n) __copy_user_zeroing(to, from, n) +#define __copy_from_user(to, from, n) raw_copy_from_user(to, from, n) #define __copy_from_user_inatomic __copy_from_user extern unsigned long __must_check __copy_user(void __user *to, diff --git a/arch/metag/lib/usercopy.c b/arch/metag/lib/usercopy.c index b3ebfe9c8e88..2792fc621088 100644 --- a/arch/metag/lib/usercopy.c +++ b/arch/metag/lib/usercopy.c @@ -29,7 +29,6 @@ COPY \ "1:\n" \ " .section .fixup,\"ax\"\n" \ - " MOV D1Ar1,#0\n" \ FIXUP \ " MOVT D1Ar1,#HI(1b)\n" \ " JUMP D1Ar1,#LO(1b)\n" \ @@ -260,27 +259,31 @@ "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ "22:\n" \ "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ - "SUB %3, %3, #32\n" \ "23:\n" \ - "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ + "SUB %3, %3, #32\n" \ "24:\n" \ + "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ + "25:\n" \ "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ + "26:\n" \ "SUB %3, %3, #32\n" \ "DCACHE [%1+#-64], D0Ar6\n" \ "BR $Lloop"id"\n" \ \ "MOV RAPF, %1\n" \ - "25:\n" \ + "27:\n" \ "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ - "26:\n" \ + "28:\n" \ "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ + "29:\n" \ "SUB %3, %3, #32\n" \ - "27:\n" \ + "30:\n" \ "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ - "28:\n" \ + "31:\n" \ "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ + "32:\n" \ "SUB %0, %0, #8\n" \ - "29:\n" \ + "33:\n" \ "SETL [%0++], D0.7, D1.7\n" \ "SUB %3, %3, #32\n" \ "1:" \ @@ -312,11 +315,15 @@ " .long 26b,3b\n" \ " .long 27b,3b\n" \ " .long 28b,3b\n" \ - " .long 29b,4b\n" \ + " .long 29b,3b\n" \ + " .long 30b,3b\n" \ + " .long 31b,3b\n" \ + " .long 32b,3b\n" \ + " .long 33b,4b\n" \ " .previous\n" \ : "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \ : "0" (to), "1" (from), "2" (ret), "3" (n) \ - : "D1Ar1", "D0Ar2", "memory") + : "D1Ar1", "D0Ar2", "cc", "memory") /* rewind 'to' and 'from' pointers when a fault occurs * @@ -342,7 +349,7 @@ #define __asm_copy_to_user_64bit_rapf_loop(to, from, ret, n, id)\ __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \ "LSR D0Ar2, D0Ar2, #8\n" \ - "AND D0Ar2, D0Ar2, #0x7\n" \ + "ANDS D0Ar2, D0Ar2, #0x7\n" \ "ADDZ D0Ar2, D0Ar2, #4\n" \ "SUB D0Ar2, D0Ar2, #1\n" \ "MOV D1Ar1, #4\n" \ @@ -403,47 +410,55 @@ "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ "22:\n" \ "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ - "SUB %3, %3, #16\n" \ "23:\n" \ - "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ - "24:\n" \ - "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ "SUB %3, %3, #16\n" \ - "25:\n" \ + "24:\n" \ "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ - "26:\n" \ + "25:\n" \ "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ + "26:\n" \ "SUB %3, %3, #16\n" \ "27:\n" \ "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ "28:\n" \ "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ + "29:\n" \ + "SUB %3, %3, #16\n" \ + "30:\n" \ + "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ + "31:\n" \ + "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ + "32:\n" \ "SUB %3, %3, #16\n" \ "DCACHE [%1+#-64], D0Ar6\n" \ "BR $Lloop"id"\n" \ \ "MOV RAPF, %1\n" \ - "29:\n" \ + "33:\n" \ "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ - "30:\n" \ + "34:\n" \ "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ + "35:\n" \ "SUB %3, %3, #16\n" \ - "31:\n" \ + "36:\n" \ "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ - "32:\n" \ + "37:\n" \ "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ + "38:\n" \ "SUB %3, %3, #16\n" \ - "33:\n" \ + "39:\n" \ "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ - "34:\n" \ + "40:\n" \ "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ + "41:\n" \ "SUB %3, %3, #16\n" \ - "35:\n" \ + "42:\n" \ "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ - "36:\n" \ + "43:\n" \ "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ + "44:\n" \ "SUB %0, %0, #4\n" \ - "37:\n" \ + "45:\n" \ "SETD [%0++], D0.7\n" \ "SUB %3, %3, #16\n" \ "1:" \ @@ -483,11 +498,19 @@ " .long 34b,3b\n" \ " .long 35b,3b\n" \ " .long 36b,3b\n" \ - " .long 37b,4b\n" \ + " .long 37b,3b\n" \ + " .long 38b,3b\n" \ + " .long 39b,3b\n" \ + " .long 40b,3b\n" \ + " .long 41b,3b\n" \ + " .long 42b,3b\n" \ + " .long 43b,3b\n" \ + " .long 44b,3b\n" \ + " .long 45b,4b\n" \ " .previous\n" \ : "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \ : "0" (to), "1" (from), "2" (ret), "3" (n) \ - : "D1Ar1", "D0Ar2", "memory") + : "D1Ar1", "D0Ar2", "cc", "memory") /* rewind 'to' and 'from' pointers when a fault occurs * @@ -513,7 +536,7 @@ #define __asm_copy_to_user_32bit_rapf_loop(to, from, ret, n, id)\ __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \ "LSR D0Ar2, D0Ar2, #8\n" \ - "AND D0Ar2, D0Ar2, #0x7\n" \ + "ANDS D0Ar2, D0Ar2, #0x7\n" \ "ADDZ D0Ar2, D0Ar2, #4\n" \ "SUB D0Ar2, D0Ar2, #1\n" \ "MOV D1Ar1, #4\n" \ @@ -538,23 +561,31 @@ unsigned long __copy_user(void __user *pdst, const void *psrc, if ((unsigned long) src & 1) { __asm_copy_to_user_1(dst, src, retn); n--; + if (retn) + return retn + n; } if ((unsigned long) dst & 1) { /* Worst case - byte copy */ while (n > 0) { __asm_copy_to_user_1(dst, src, retn); n--; + if (retn) + return retn + n; } } if (((unsigned long) src & 2) && n >= 2) { __asm_copy_to_user_2(dst, src, retn); n -= 2; + if (retn) + return retn + n; } if ((unsigned long) dst & 2) { /* Second worst case - word copy */ while (n >= 2) { __asm_copy_to_user_2(dst, src, retn); n -= 2; + if (retn) + return retn + n; } } @@ -569,6 +600,8 @@ unsigned long __copy_user(void __user *pdst, const void *psrc, while (n >= 8) { __asm_copy_to_user_8x64(dst, src, retn); n -= 8; + if (retn) + return retn + n; } } if (n >= RAPF_MIN_BUF_SIZE) { @@ -581,6 +614,8 @@ unsigned long __copy_user(void __user *pdst, const void *psrc, while (n >= 8) { __asm_copy_to_user_8x64(dst, src, retn); n -= 8; + if (retn) + return retn + n; } } #endif @@ -588,11 +623,15 @@ unsigned long __copy_user(void __user *pdst, const void *psrc, while (n >= 16) { __asm_copy_to_user_16(dst, src, retn); n -= 16; + if (retn) + return retn + n; } while (n >= 4) { __asm_copy_to_user_4(dst, src, retn); n -= 4; + if (retn) + return retn + n; } switch (n) { @@ -609,6 +648,10 @@ unsigned long __copy_user(void __user *pdst, const void *psrc, break; } + /* + * If we get here, retn correctly reflects the number of failing + * bytes. + */ return retn; } EXPORT_SYMBOL(__copy_user); @@ -617,16 +660,14 @@ EXPORT_SYMBOL(__copy_user); __asm_copy_user_cont(to, from, ret, \ " GETB D1Ar1,[%1++]\n" \ "2: SETB [%0++],D1Ar1\n", \ - "3: ADD %2,%2,#1\n" \ - " SETB [%0++],D1Ar1\n", \ + "3: ADD %2,%2,#1\n", \ " .long 2b,3b\n") #define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ __asm_copy_user_cont(to, from, ret, \ " GETW D1Ar1,[%1++]\n" \ "2: SETW [%0++],D1Ar1\n" COPY, \ - "3: ADD %2,%2,#2\n" \ - " SETW [%0++],D1Ar1\n" FIXUP, \ + "3: ADD %2,%2,#2\n" FIXUP, \ " .long 2b,3b\n" TENTRY) #define __asm_copy_from_user_2(to, from, ret) \ @@ -636,145 +677,26 @@ EXPORT_SYMBOL(__copy_user); __asm_copy_from_user_2x_cont(to, from, ret, \ " GETB D1Ar1,[%1++]\n" \ "4: SETB [%0++],D1Ar1\n", \ - "5: ADD %2,%2,#1\n" \ - " SETB [%0++],D1Ar1\n", \ + "5: ADD %2,%2,#1\n", \ " .long 4b,5b\n") #define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ __asm_copy_user_cont(to, from, ret, \ " GETD D1Ar1,[%1++]\n" \ "2: SETD [%0++],D1Ar1\n" COPY, \ - "3: ADD %2,%2,#4\n" \ - " SETD [%0++],D1Ar1\n" FIXUP, \ + "3: ADD %2,%2,#4\n" FIXUP, \ " .long 2b,3b\n" TENTRY) #define __asm_copy_from_user_4(to, from, ret) \ __asm_copy_from_user_4x_cont(to, from, ret, "", "", "") -#define __asm_copy_from_user_5(to, from, ret) \ - __asm_copy_from_user_4x_cont(to, from, ret, \ - " GETB D1Ar1,[%1++]\n" \ - "4: SETB [%0++],D1Ar1\n", \ - "5: ADD %2,%2,#1\n" \ - " SETB [%0++],D1Ar1\n", \ - " .long 4b,5b\n") - -#define __asm_copy_from_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ - __asm_copy_from_user_4x_cont(to, from, ret, \ - " GETW D1Ar1,[%1++]\n" \ - "4: SETW [%0++],D1Ar1\n" COPY, \ - "5: ADD %2,%2,#2\n" \ - " SETW [%0++],D1Ar1\n" FIXUP, \ - " .long 4b,5b\n" TENTRY) - -#define __asm_copy_from_user_6(to, from, ret) \ - __asm_copy_from_user_6x_cont(to, from, ret, "", "", "") - -#define __asm_copy_from_user_7(to, from, ret) \ - __asm_copy_from_user_6x_cont(to, from, ret, \ - " GETB D1Ar1,[%1++]\n" \ - "6: SETB [%0++],D1Ar1\n", \ - "7: ADD %2,%2,#1\n" \ - " SETB [%0++],D1Ar1\n", \ - " .long 6b,7b\n") - -#define __asm_copy_from_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ - __asm_copy_from_user_4x_cont(to, from, ret, \ - " GETD D1Ar1,[%1++]\n" \ - "4: SETD [%0++],D1Ar1\n" COPY, \ - "5: ADD %2,%2,#4\n" \ - " SETD [%0++],D1Ar1\n" FIXUP, \ - " .long 4b,5b\n" TENTRY) - -#define __asm_copy_from_user_8(to, from, ret) \ - __asm_copy_from_user_8x_cont(to, from, ret, "", "", "") - -#define __asm_copy_from_user_9(to, from, ret) \ - __asm_copy_from_user_8x_cont(to, from, ret, \ - " GETB D1Ar1,[%1++]\n" \ - "6: SETB [%0++],D1Ar1\n", \ - "7: ADD %2,%2,#1\n" \ - " SETB [%0++],D1Ar1\n", \ - " .long 6b,7b\n") - -#define __asm_copy_from_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ - __asm_copy_from_user_8x_cont(to, from, ret, \ - " GETW D1Ar1,[%1++]\n" \ - "6: SETW [%0++],D1Ar1\n" COPY, \ - "7: ADD %2,%2,#2\n" \ - " SETW [%0++],D1Ar1\n" FIXUP, \ - " .long 6b,7b\n" TENTRY) - -#define __asm_copy_from_user_10(to, from, ret) \ - __asm_copy_from_user_10x_cont(to, from, ret, "", "", "") - -#define __asm_copy_from_user_11(to, from, ret) \ - __asm_copy_from_user_10x_cont(to, from, ret, \ - " GETB D1Ar1,[%1++]\n" \ - "8: SETB [%0++],D1Ar1\n", \ - "9: ADD %2,%2,#1\n" \ - " SETB [%0++],D1Ar1\n", \ - " .long 8b,9b\n") - -#define __asm_copy_from_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ - __asm_copy_from_user_8x_cont(to, from, ret, \ - " GETD D1Ar1,[%1++]\n" \ - "6: SETD [%0++],D1Ar1\n" COPY, \ - "7: ADD %2,%2,#4\n" \ - " SETD [%0++],D1Ar1\n" FIXUP, \ - " .long 6b,7b\n" TENTRY) - -#define __asm_copy_from_user_12(to, from, ret) \ - __asm_copy_from_user_12x_cont(to, from, ret, "", "", "") - -#define __asm_copy_from_user_13(to, from, ret) \ - __asm_copy_from_user_12x_cont(to, from, ret, \ - " GETB D1Ar1,[%1++]\n" \ - "8: SETB [%0++],D1Ar1\n", \ - "9: ADD %2,%2,#1\n" \ - " SETB [%0++],D1Ar1\n", \ - " .long 8b,9b\n") - -#define __asm_copy_from_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ - __asm_copy_from_user_12x_cont(to, from, ret, \ - " GETW D1Ar1,[%1++]\n" \ - "8: SETW [%0++],D1Ar1\n" COPY, \ - "9: ADD %2,%2,#2\n" \ - " SETW [%0++],D1Ar1\n" FIXUP, \ - " .long 8b,9b\n" TENTRY) - -#define __asm_copy_from_user_14(to, from, ret) \ - __asm_copy_from_user_14x_cont(to, from, ret, "", "", "") - -#define __asm_copy_from_user_15(to, from, ret) \ - __asm_copy_from_user_14x_cont(to, from, ret, \ - " GETB D1Ar1,[%1++]\n" \ - "10: SETB [%0++],D1Ar1\n", \ - "11: ADD %2,%2,#1\n" \ - " SETB [%0++],D1Ar1\n", \ - " .long 10b,11b\n") - -#define __asm_copy_from_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ - __asm_copy_from_user_12x_cont(to, from, ret, \ - " GETD D1Ar1,[%1++]\n" \ - "8: SETD [%0++],D1Ar1\n" COPY, \ - "9: ADD %2,%2,#4\n" \ - " SETD [%0++],D1Ar1\n" FIXUP, \ - " .long 8b,9b\n" TENTRY) - -#define __asm_copy_from_user_16(to, from, ret) \ - __asm_copy_from_user_16x_cont(to, from, ret, "", "", "") - #define __asm_copy_from_user_8x64(to, from, ret) \ asm volatile ( \ " GETL D0Ar2,D1Ar1,[%1++]\n" \ "2: SETL [%0++],D0Ar2,D1Ar1\n" \ "1:\n" \ " .section .fixup,\"ax\"\n" \ - " MOV D1Ar1,#0\n" \ - " MOV D0Ar2,#0\n" \ "3: ADD %2,%2,#8\n" \ - " SETL [%0++],D0Ar2,D1Ar1\n" \ " MOVT D0Ar2,#HI(1b)\n" \ " JUMP D0Ar2,#LO(1b)\n" \ " .previous\n" \ @@ -789,36 +711,57 @@ EXPORT_SYMBOL(__copy_user); * * Rationale: * A fault occurs while reading from user buffer, which is the - * source. Since the fault is at a single address, we only - * need to rewind by 8 bytes. + * source. * Since we don't write to kernel buffer until we read first, * the kernel buffer is at the right state and needn't be - * corrected. + * corrected, but the source must be rewound to the beginning of + * the block, which is LSM_STEP*8 bytes. + * LSM_STEP is bits 10:8 in TXSTATUS which is already read + * and stored in D0Ar2 + * + * NOTE: If a fault occurs at the last operation in M{G,S}ETL + * LSM_STEP will be 0. ie: we do 4 writes in our case, if + * a fault happens at the 4th write, LSM_STEP will be 0 + * instead of 4. The code copes with that. */ #define __asm_copy_from_user_64bit_rapf_loop(to, from, ret, n, id) \ __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \ - "SUB %1, %1, #8\n") + "LSR D0Ar2, D0Ar2, #5\n" \ + "ANDS D0Ar2, D0Ar2, #0x38\n" \ + "ADDZ D0Ar2, D0Ar2, #32\n" \ + "SUB %1, %1, D0Ar2\n") /* rewind 'from' pointer when a fault occurs * * Rationale: * A fault occurs while reading from user buffer, which is the - * source. Since the fault is at a single address, we only - * need to rewind by 4 bytes. + * source. * Since we don't write to kernel buffer until we read first, * the kernel buffer is at the right state and needn't be - * corrected. + * corrected, but the source must be rewound to the beginning of + * the block, which is LSM_STEP*4 bytes. + * LSM_STEP is bits 10:8 in TXSTATUS which is already read + * and stored in D0Ar2 + * + * NOTE: If a fault occurs at the last operation in M{G,S}ETL + * LSM_STEP will be 0. ie: we do 4 writes in our case, if + * a fault happens at the 4th write, LSM_STEP will be 0 + * instead of 4. The code copes with that. */ #define __asm_copy_from_user_32bit_rapf_loop(to, from, ret, n, id) \ __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \ - "SUB %1, %1, #4\n") + "LSR D0Ar2, D0Ar2, #6\n" \ + "ANDS D0Ar2, D0Ar2, #0x1c\n" \ + "ADDZ D0Ar2, D0Ar2, #16\n" \ + "SUB %1, %1, D0Ar2\n") -/* Copy from user to kernel, zeroing the bytes that were inaccessible in - userland. The return-value is the number of bytes that were - inaccessible. */ -unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, - unsigned long n) +/* + * Copy from user to kernel. The return-value is the number of bytes that were + * inaccessible. + */ +unsigned long raw_copy_from_user(void *pdst, const void __user *psrc, + unsigned long n) { register char *dst asm ("A0.2") = pdst; register const char __user *src asm ("A1.2") = psrc; @@ -830,6 +773,8 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, if ((unsigned long) src & 1) { __asm_copy_from_user_1(dst, src, retn); n--; + if (retn) + return retn + n; } if ((unsigned long) dst & 1) { /* Worst case - byte copy */ @@ -837,12 +782,14 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, __asm_copy_from_user_1(dst, src, retn); n--; if (retn) - goto copy_exception_bytes; + return retn + n; } } if (((unsigned long) src & 2) && n >= 2) { __asm_copy_from_user_2(dst, src, retn); n -= 2; + if (retn) + return retn + n; } if ((unsigned long) dst & 2) { /* Second worst case - word copy */ @@ -850,16 +797,10 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, __asm_copy_from_user_2(dst, src, retn); n -= 2; if (retn) - goto copy_exception_bytes; + return retn + n; } } - /* We only need one check after the unalignment-adjustments, - because if both adjustments were done, either both or - neither reference had an exception. */ - if (retn != 0) - goto copy_exception_bytes; - #ifdef USE_RAPF /* 64 bit copy loop */ if (!(((unsigned long) src | (unsigned long) dst) & 7)) { @@ -872,7 +813,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, __asm_copy_from_user_8x64(dst, src, retn); n -= 8; if (retn) - goto copy_exception_bytes; + return retn + n; } } @@ -888,7 +829,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, __asm_copy_from_user_8x64(dst, src, retn); n -= 8; if (retn) - goto copy_exception_bytes; + return retn + n; } } #endif @@ -898,7 +839,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, n -= 4; if (retn) - goto copy_exception_bytes; + return retn + n; } /* If we get here, there were no memory read faults. */ @@ -924,21 +865,8 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, /* If we get here, retn correctly reflects the number of failing bytes. */ return retn; - - copy_exception_bytes: - /* We already have "retn" bytes cleared, and need to clear the - remaining "n" bytes. A non-optimized simple byte-for-byte in-line - memset is preferred here, since this isn't speed-critical code and - we'd rather have this a leaf-function than calling memset. */ - { - char *endp; - for (endp = dst + n; dst < endp; dst++) - *dst = 0; - } - - return retn + n; } -EXPORT_SYMBOL(__copy_user_zeroing); +EXPORT_SYMBOL(raw_copy_from_user); #define __asm_clear_8x64(to, ret) \ asm volatile ( \ diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index a008a9f03072..e0bb576410bb 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -1531,7 +1531,7 @@ config CPU_MIPS64_R6 select CPU_SUPPORTS_HIGHMEM select CPU_SUPPORTS_MSA select GENERIC_CSUM - select MIPS_O32_FP64_SUPPORT if MIPS32_O32 + select MIPS_O32_FP64_SUPPORT if 32BIT || MIPS32_O32 select HAVE_KVM help Choose this option to build a kernel for release 6 or later of the diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h index f94455f964ec..a2813fe381cf 100644 --- a/arch/mips/include/asm/fpu.h +++ b/arch/mips/include/asm/fpu.h @@ -21,6 +21,7 @@ #include <asm/cpu-features.h> #include <asm/fpu_emulator.h> #include <asm/hazards.h> +#include <asm/ptrace.h> #include <asm/processor.h> #include <asm/current.h> #include <asm/msa.h> diff --git a/arch/mips/include/asm/irq.h b/arch/mips/include/asm/irq.h index 956db6e201d1..ddd1c918103b 100644 --- a/arch/mips/include/asm/irq.h +++ b/arch/mips/include/asm/irq.h @@ -18,9 +18,24 @@ #include <irq.h> #define IRQ_STACK_SIZE THREAD_SIZE +#define IRQ_STACK_START (IRQ_STACK_SIZE - sizeof(unsigned long)) extern void *irq_stack[NR_CPUS]; +/* + * The highest address on the IRQ stack contains a dummy frame put down in + * genex.S (handle_int & except_vec_vi_handler) which is structured as follows: + * + * top ------------ + * | task sp | <- irq_stack[cpu] + IRQ_STACK_START + * ------------ + * | | <- First frame of IRQ context + * ------------ + * + * task sp holds a copy of the task stack pointer where the struct pt_regs + * from exception entry can be found. + */ + static inline bool on_irq_stack(int cpu, unsigned long sp) { unsigned long low = (unsigned long)irq_stack[cpu]; diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h index f485afe51514..a8df44d60607 100644 --- a/arch/mips/include/asm/spinlock.h +++ b/arch/mips/include/asm/spinlock.h @@ -127,7 +127,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock) " andi %[ticket], %[ticket], 0xffff \n" " bne %[ticket], %[my_ticket], 4f \n" " subu %[ticket], %[my_ticket], %[ticket] \n" - "2: \n" + "2: .insn \n" " .subsection 2 \n" "4: andi %[ticket], %[ticket], 0xffff \n" " sll %[ticket], 5 \n" @@ -202,7 +202,7 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock) " sc %[ticket], %[ticket_ptr] \n" " beqz %[ticket], 1b \n" " li %[ticket], 1 \n" - "2: \n" + "2: .insn \n" " .subsection 2 \n" "3: b 2b \n" " li %[ticket], 0 \n" @@ -382,7 +382,7 @@ static inline int arch_read_trylock(arch_rwlock_t *rw) " .set reorder \n" __WEAK_LLSC_MB " li %2, 1 \n" - "2: \n" + "2: .insn \n" : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret) : GCC_OFF_SMALL_ASM() (rw->lock) : "memory"); @@ -422,7 +422,7 @@ static inline int arch_write_trylock(arch_rwlock_t *rw) " lui %1, 0x8000 \n" " sc %1, %0 \n" " li %2, 1 \n" - "2: \n" + "2: .insn \n" : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret) : GCC_OFF_SMALL_ASM() (rw->lock) diff --git a/arch/mips/include/uapi/asm/unistd.h b/arch/mips/include/uapi/asm/unistd.h index 3e940dbe0262..78faf4292e90 100644 --- a/arch/mips/include/uapi/asm/unistd.h +++ b/arch/mips/include/uapi/asm/unistd.h @@ -386,17 +386,18 @@ #define __NR_pkey_mprotect (__NR_Linux + 363) #define __NR_pkey_alloc (__NR_Linux + 364) #define __NR_pkey_free (__NR_Linux + 365) +#define __NR_statx (__NR_Linux + 366) /* * Offset of the last Linux o32 flavoured syscall */ -#define __NR_Linux_syscalls 365 +#define __NR_Linux_syscalls 366 #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ #define __NR_O32_Linux 4000 -#define __NR_O32_Linux_syscalls 365 +#define __NR_O32_Linux_syscalls 366 #if _MIPS_SIM == _MIPS_SIM_ABI64 @@ -730,16 +731,17 @@ #define __NR_pkey_mprotect (__NR_Linux + 323) #define __NR_pkey_alloc (__NR_Linux + 324) #define __NR_pkey_free (__NR_Linux + 325) +#define __NR_statx (__NR_Linux + 326) /* * Offset of the last Linux 64-bit flavoured syscall */ -#define __NR_Linux_syscalls 325 +#define __NR_Linux_syscalls 326 #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ #define __NR_64_Linux 5000 -#define __NR_64_Linux_syscalls 325 +#define __NR_64_Linux_syscalls 326 #if _MIPS_SIM == _MIPS_SIM_NABI32 @@ -1077,15 +1079,16 @@ #define __NR_pkey_mprotect (__NR_Linux + 327) #define __NR_pkey_alloc (__NR_Linux + 328) #define __NR_pkey_free (__NR_Linux + 329) +#define __NR_statx (__NR_Linux + 330) /* * Offset of the last N32 flavoured syscall */ -#define __NR_Linux_syscalls 329 +#define __NR_Linux_syscalls 330 #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */ #define __NR_N32_Linux 6000 -#define __NR_N32_Linux_syscalls 329 +#define __NR_N32_Linux_syscalls 330 #endif /* _UAPI_ASM_UNISTD_H */ diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c index bb5c5d34ba81..a670c0c11875 100644 --- a/arch/mips/kernel/asm-offsets.c +++ b/arch/mips/kernel/asm-offsets.c @@ -102,6 +102,7 @@ void output_thread_info_defines(void) DEFINE(_THREAD_SIZE, THREAD_SIZE); DEFINE(_THREAD_MASK, THREAD_MASK); DEFINE(_IRQ_STACK_SIZE, IRQ_STACK_SIZE); + DEFINE(_IRQ_STACK_START, IRQ_STACK_START); BLANK(); } diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S index 59476a607add..a00e87b0256d 100644 --- a/arch/mips/kernel/cps-vec.S +++ b/arch/mips/kernel/cps-vec.S @@ -361,7 +361,7 @@ LEAF(mips_cps_get_bootcfg) END(mips_cps_get_bootcfg) LEAF(mips_cps_boot_vpes) - PTR_L ta2, COREBOOTCFG_VPEMASK(a0) + lw ta2, COREBOOTCFG_VPEMASK(a0) PTR_L ta3, COREBOOTCFG_VPECONFIG(a0) #if defined(CONFIG_CPU_MIPSR6) diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index 07718bb5fc9d..12422fd4af23 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -1824,7 +1824,7 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu) } decode_configs(c); - c->options |= MIPS_CPU_TLBINV | MIPS_CPU_LDPTE; + c->options |= MIPS_CPU_FTLB | MIPS_CPU_TLBINV | MIPS_CPU_LDPTE; c->writecombine = _CACHE_UNCACHED_ACCELERATED; break; default: diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S index 7ec9612cb007..ae810da4d499 100644 --- a/arch/mips/kernel/genex.S +++ b/arch/mips/kernel/genex.S @@ -215,9 +215,11 @@ NESTED(handle_int, PT_SIZE, sp) beq t0, t1, 2f /* Switch to IRQ stack */ - li t1, _IRQ_STACK_SIZE + li t1, _IRQ_STACK_START PTR_ADD sp, t0, t1 + /* Save task's sp on IRQ stack so that unwinding can follow it */ + LONG_S s1, 0(sp) 2: jal plat_irq_dispatch @@ -325,9 +327,11 @@ NESTED(except_vec_vi_handler, 0, sp) beq t0, t1, 2f /* Switch to IRQ stack */ - li t1, _IRQ_STACK_SIZE + li t1, _IRQ_STACK_START PTR_ADD sp, t0, t1 + /* Save task's sp on IRQ stack so that unwinding can follow it */ + LONG_S s1, 0(sp) 2: jalr v0 @@ -519,7 +523,7 @@ NESTED(nmi_handler, PT_SIZE, sp) BUILD_HANDLER reserved reserved sti verbose /* others */ .align 5 - LEAF(handle_ri_rdhwr_vivt) + LEAF(handle_ri_rdhwr_tlbp) .set push .set noat .set noreorder @@ -538,7 +542,7 @@ NESTED(nmi_handler, PT_SIZE, sp) .set pop bltz k1, handle_ri /* slow path */ /* fall thru */ - END(handle_ri_rdhwr_vivt) + END(handle_ri_rdhwr_tlbp) LEAF(handle_ri_rdhwr) .set push diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index fb6b6b650719..b68e10fc453d 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -488,31 +488,52 @@ unsigned long notrace unwind_stack_by_address(unsigned long stack_page, unsigned long pc, unsigned long *ra) { + unsigned long low, high, irq_stack_high; struct mips_frame_info info; unsigned long size, ofs; + struct pt_regs *regs; int leaf; - extern void ret_from_irq(void); - extern void ret_from_exception(void); if (!stack_page) return 0; /* - * If we reached the bottom of interrupt context, - * return saved pc in pt_regs. + * IRQ stacks start at IRQ_STACK_START + * task stacks at THREAD_SIZE - 32 */ - if (pc == (unsigned long)ret_from_irq || - pc == (unsigned long)ret_from_exception) { - struct pt_regs *regs; - if (*sp >= stack_page && - *sp + sizeof(*regs) <= stack_page + THREAD_SIZE - 32) { - regs = (struct pt_regs *)*sp; - pc = regs->cp0_epc; - if (!user_mode(regs) && __kernel_text_address(pc)) { - *sp = regs->regs[29]; - *ra = regs->regs[31]; - return pc; - } + low = stack_page; + if (!preemptible() && on_irq_stack(raw_smp_processor_id(), *sp)) { + high = stack_page + IRQ_STACK_START; + irq_stack_high = high; + } else { + high = stack_page + THREAD_SIZE - 32; + irq_stack_high = 0; + } + + /* + * If we reached the top of the interrupt stack, start unwinding + * the interrupted task stack. + */ + if (unlikely(*sp == irq_stack_high)) { + unsigned long task_sp = *(unsigned long *)*sp; + + /* + * Check that the pointer saved in the IRQ stack head points to + * something within the stack of the current task + */ + if (!object_is_on_stack((void *)task_sp)) + return 0; + + /* + * Follow pointer to tasks kernel stack frame where interrupted + * state was saved. + */ + regs = (struct pt_regs *)task_sp; + pc = regs->cp0_epc; + if (!user_mode(regs) && __kernel_text_address(pc)) { + *sp = regs->regs[29]; + *ra = regs->regs[31]; + return pc; } return 0; } @@ -533,8 +554,7 @@ unsigned long notrace unwind_stack_by_address(unsigned long stack_page, if (leaf < 0) return 0; - if (*sp < stack_page || - *sp + info.frame_size > stack_page + THREAD_SIZE - 32) + if (*sp < low || *sp + info.frame_size > high) return 0; if (leaf) diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S index c29d397eee86..80ed68b2c95e 100644 --- a/arch/mips/kernel/scall32-o32.S +++ b/arch/mips/kernel/scall32-o32.S @@ -600,3 +600,4 @@ EXPORT(sys_call_table) PTR sys_pkey_mprotect PTR sys_pkey_alloc PTR sys_pkey_free /* 4365 */ + PTR sys_statx diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S index 0687f96ee912..49765b44aa9b 100644 --- a/arch/mips/kernel/scall64-64.S +++ b/arch/mips/kernel/scall64-64.S @@ -438,4 +438,5 @@ EXPORT(sys_call_table) PTR sys_pkey_mprotect PTR sys_pkey_alloc PTR sys_pkey_free /* 5325 */ + PTR sys_statx .size sys_call_table,.-sys_call_table diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index 0331ba39a065..90bad2d1b2d3 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S @@ -433,4 +433,5 @@ EXPORT(sysn32_call_table) PTR sys_pkey_mprotect PTR sys_pkey_alloc PTR sys_pkey_free + PTR sys_statx /* 6330 */ .size sysn32_call_table,.-sysn32_call_table diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index 5a47042dd25f..2dd70bd104e1 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S @@ -588,4 +588,5 @@ EXPORT(sys32_call_table) PTR sys_pkey_mprotect PTR sys_pkey_alloc PTR sys_pkey_free /* 4365 */ + PTR sys_statx .size sys32_call_table,.-sys32_call_table diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index c7d17cfb32f6..b49e7bf9f950 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -83,7 +83,7 @@ extern asmlinkage void handle_dbe(void); extern asmlinkage void handle_sys(void); extern asmlinkage void handle_bp(void); extern asmlinkage void handle_ri(void); -extern asmlinkage void handle_ri_rdhwr_vivt(void); +extern asmlinkage void handle_ri_rdhwr_tlbp(void); extern asmlinkage void handle_ri_rdhwr(void); extern asmlinkage void handle_cpu(void); extern asmlinkage void handle_ov(void); @@ -2408,9 +2408,18 @@ void __init trap_init(void) set_except_vector(EXCCODE_SYS, handle_sys); set_except_vector(EXCCODE_BP, handle_bp); - set_except_vector(EXCCODE_RI, rdhwr_noopt ? handle_ri : - (cpu_has_vtag_icache ? - handle_ri_rdhwr_vivt : handle_ri_rdhwr)); + + if (rdhwr_noopt) + set_except_vector(EXCCODE_RI, handle_ri); + else { + if (cpu_has_vtag_icache) + set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp); + else if (current_cpu_type() == CPU_LOONGSON3) + set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp); + else + set_except_vector(EXCCODE_RI, handle_ri_rdhwr); + } + set_except_vector(EXCCODE_CPU, handle_cpu); set_except_vector(EXCCODE_OV, handle_ov); set_except_vector(EXCCODE_TR, handle_tr); diff --git a/arch/mips/lantiq/xway/sysctrl.c b/arch/mips/lantiq/xway/sysctrl.c index 3c3aa05891dd..95bec460b651 100644 --- a/arch/mips/lantiq/xway/sysctrl.c +++ b/arch/mips/lantiq/xway/sysctrl.c @@ -467,7 +467,7 @@ void __init ltq_soc_init(void) if (!np_xbar) panic("Failed to load xbar nodes from devicetree"); - if (of_address_to_resource(np_pmu, 0, &res_xbar)) + if (of_address_to_resource(np_xbar, 0, &res_xbar)) panic("Failed to get xbar resources"); if (!request_mem_region(res_xbar.start, resource_size(&res_xbar), res_xbar.name)) diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index e7f798d55fbc..3fe99cb271a9 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -1562,6 +1562,7 @@ static void probe_vcache(void) vcache_size = c->vcache.sets * c->vcache.ways * c->vcache.linesz; c->vcache.waybit = 0; + c->vcache.waysize = vcache_size / c->vcache.ways; pr_info("Unified victim cache %ldkB %s, linesize %d bytes.\n", vcache_size >> 10, way_string[c->vcache.ways], c->vcache.linesz); @@ -1664,6 +1665,7 @@ static void __init loongson3_sc_init(void) /* Loongson-3 has 4 cores, 1MB scache for each. scaches are shared */ scache_size *= 4; c->scache.waybit = 0; + c->scache.waysize = scache_size / c->scache.ways; pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n", scache_size >> 10, way_string[c->scache.ways], c->scache.linesz); if (scache_size) diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 9bfee8988eaf..4f642e07c2b1 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -760,7 +760,8 @@ static void build_huge_update_entries(u32 **p, unsigned int pte, static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r, struct uasm_label **l, unsigned int pte, - unsigned int ptr) + unsigned int ptr, + unsigned int flush) { #ifdef CONFIG_SMP UASM_i_SC(p, pte, 0, ptr); @@ -769,6 +770,22 @@ static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r, #else UASM_i_SW(p, pte, 0, ptr); #endif + if (cpu_has_ftlb && flush) { + BUG_ON(!cpu_has_tlbinv); + + UASM_i_MFC0(p, ptr, C0_ENTRYHI); + uasm_i_ori(p, ptr, ptr, MIPS_ENTRYHI_EHINV); + UASM_i_MTC0(p, ptr, C0_ENTRYHI); + build_tlb_write_entry(p, l, r, tlb_indexed); + + uasm_i_xori(p, ptr, ptr, MIPS_ENTRYHI_EHINV); + UASM_i_MTC0(p, ptr, C0_ENTRYHI); + build_huge_update_entries(p, pte, ptr); + build_huge_tlb_write_entry(p, l, r, pte, tlb_random, 0); + + return; + } + build_huge_update_entries(p, pte, ptr); build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed, 0); } @@ -2199,7 +2216,7 @@ static void build_r4000_tlb_load_handler(void) uasm_l_tlbl_goaround2(&l, p); } uasm_i_ori(&p, wr.r1, wr.r1, (_PAGE_ACCESSED | _PAGE_VALID)); - build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2); + build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 1); #endif uasm_l_nopage_tlbl(&l, p); @@ -2254,7 +2271,7 @@ static void build_r4000_tlb_store_handler(void) build_tlb_probe_entry(&p); uasm_i_ori(&p, wr.r1, wr.r1, _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY); - build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2); + build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 1); #endif uasm_l_nopage_tlbs(&l, p); @@ -2310,7 +2327,7 @@ static void build_r4000_tlb_modify_handler(void) build_tlb_probe_entry(&p); uasm_i_ori(&p, wr.r1, wr.r1, _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY); - build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2); + build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 0); #endif uasm_l_nopage_tlbm(&l, p); diff --git a/arch/mips/ralink/rt3883.c b/arch/mips/ralink/rt3883.c index c4ffd43d3996..48ce701557a4 100644 --- a/arch/mips/ralink/rt3883.c +++ b/arch/mips/ralink/rt3883.c @@ -35,7 +35,7 @@ static struct rt2880_pmx_func uartlite_func[] = { FUNC("uartlite", 0, 15, 2) }; static struct rt2880_pmx_func jtag_func[] = { FUNC("jtag", 0, 17, 5) }; static struct rt2880_pmx_func mdio_func[] = { FUNC("mdio", 0, 22, 2) }; static struct rt2880_pmx_func lna_a_func[] = { FUNC("lna a", 0, 32, 3) }; -static struct rt2880_pmx_func lna_g_func[] = { FUNC("lna a", 0, 35, 3) }; +static struct rt2880_pmx_func lna_g_func[] = { FUNC("lna g", 0, 35, 3) }; static struct rt2880_pmx_func pci_func[] = { FUNC("pci-dev", 0, 40, 32), FUNC("pci-host2", 1, 40, 32), @@ -43,7 +43,7 @@ static struct rt2880_pmx_func pci_func[] = { FUNC("pci-fnc", 3, 40, 32) }; static struct rt2880_pmx_func ge1_func[] = { FUNC("ge1", 0, 72, 12) }; -static struct rt2880_pmx_func ge2_func[] = { FUNC("ge1", 0, 84, 12) }; +static struct rt2880_pmx_func ge2_func[] = { FUNC("ge2", 0, 84, 12) }; static struct rt2880_pmx_group rt3883_pinmux_data[] = { GRP("i2c", i2c_func, 1, RT3883_GPIO_MODE_I2C), diff --git a/arch/powerpc/crypto/crc32c-vpmsum_glue.c b/arch/powerpc/crypto/crc32c-vpmsum_glue.c index 411994551afc..f058e0c3e4d4 100644 --- a/arch/powerpc/crypto/crc32c-vpmsum_glue.c +++ b/arch/powerpc/crypto/crc32c-vpmsum_glue.c @@ -33,10 +33,13 @@ static u32 crc32c_vpmsum(u32 crc, unsigned char const *p, size_t len) } if (len & ~VMX_ALIGN_MASK) { + preempt_disable(); pagefault_disable(); enable_kernel_altivec(); crc = __crc32c_vpmsum(crc, p, len & ~VMX_ALIGN_MASK); + disable_kernel_altivec(); pagefault_enable(); + preempt_enable(); } tail = len & VMX_ALIGN_MASK; diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c index cbc7c42cdb74..ec7a8b099dd9 100644 --- a/arch/powerpc/kernel/align.c +++ b/arch/powerpc/kernel/align.c @@ -807,14 +807,25 @@ int fix_alignment(struct pt_regs *regs) nb = aligninfo[instr].len; flags = aligninfo[instr].flags; - /* ldbrx/stdbrx overlap lfs/stfs in the DSISR unfortunately */ - if (IS_XFORM(instruction) && ((instruction >> 1) & 0x3ff) == 532) { - nb = 8; - flags = LD+SW; - } else if (IS_XFORM(instruction) && - ((instruction >> 1) & 0x3ff) == 660) { - nb = 8; - flags = ST+SW; + /* + * Handle some cases which give overlaps in the DSISR values. + */ + if (IS_XFORM(instruction)) { + switch (get_xop(instruction)) { + case 532: /* ldbrx */ + nb = 8; + flags = LD+SW; + break; + case 660: /* stdbrx */ + nb = 8; + flags = ST+SW; + break; + case 20: /* lwarx */ + case 84: /* ldarx */ + case 116: /* lharx */ + case 276: /* lqarx */ + return 0; /* not emulated ever */ + } } /* Byteswap little endian loads and stores */ diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S index ae179cb1bb3c..c119044cad0d 100644 --- a/arch/powerpc/kernel/misc_64.S +++ b/arch/powerpc/kernel/misc_64.S @@ -67,7 +67,7 @@ PPC64_CACHES: * flush all bytes from start through stop-1 inclusive */ -_GLOBAL(flush_icache_range) +_GLOBAL_TOC(flush_icache_range) BEGIN_FTR_SECTION PURGE_PREFETCHED_INS blr @@ -120,7 +120,7 @@ EXPORT_SYMBOL(flush_icache_range) * * flush all bytes from start to stop-1 inclusive */ -_GLOBAL(flush_dcache_range) +_GLOBAL_TOC(flush_dcache_range) /* * Flush the data cache to memory diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 9cfaa8b69b5f..f997154dfc41 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -236,6 +236,15 @@ static void cpu_ready_for_interrupts(void) mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3); } + /* + * Fixup HFSCR:TM based on CPU features. The bit is set by our + * early asm init because at that point we haven't updated our + * CPU features from firmware and device-tree. Here we have, + * so let's do it. + */ + if (cpu_has_feature(CPU_FTR_HVMODE) && !cpu_has_feature(CPU_FTR_TM_COMP)) + mtspr(SPRN_HFSCR, mfspr(SPRN_HFSCR) & ~HFSCR_TM); + /* Set IR and DR in PACA MSR */ get_paca()->kernel_msr = MSR_KERNEL; } diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index 8c68145ba1bd..710e491206ed 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -1487,6 +1487,10 @@ long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm, /* start new resize */ resize = kzalloc(sizeof(*resize), GFP_KERNEL); + if (!resize) { + ret = -ENOMEM; + goto out; + } resize->order = shift; resize->kvm = kvm; INIT_WORK(&resize->work, resize_hpt_prepare_work); diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c index cc332608e656..65bb8f33b399 100644 --- a/arch/powerpc/mm/hash_native_64.c +++ b/arch/powerpc/mm/hash_native_64.c @@ -638,6 +638,10 @@ static void native_flush_hash_range(unsigned long number, int local) unsigned long psize = batch->psize; int ssize = batch->ssize; int i; + unsigned int use_local; + + use_local = local && mmu_has_feature(MMU_FTR_TLBIEL) && + mmu_psize_defs[psize].tlbiel && !cxl_ctx_in_use(); local_irq_save(flags); @@ -667,8 +671,7 @@ static void native_flush_hash_range(unsigned long number, int local) } pte_iterate_hashed_end(); } - if (mmu_has_feature(MMU_FTR_TLBIEL) && - mmu_psize_defs[psize].tlbiel && local) { + if (use_local) { asm volatile("ptesync":::"memory"); for (i = 0; i < number; i++) { vpn = batch->vpn[i]; diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c index d55c829a5944..ddbffb715b40 100644 --- a/arch/s390/kvm/gaccess.c +++ b/arch/s390/kvm/gaccess.c @@ -168,8 +168,7 @@ union page_table_entry { unsigned long z : 1; /* Zero Bit */ unsigned long i : 1; /* Page-Invalid Bit */ unsigned long p : 1; /* DAT-Protection Bit */ - unsigned long co : 1; /* Change-Recording Override */ - unsigned long : 8; + unsigned long : 9; }; }; @@ -745,8 +744,6 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva, return PGM_PAGE_TRANSLATION; if (pte.z) return PGM_TRANSLATION_SPEC; - if (pte.co && !edat1) - return PGM_TRANSLATION_SPEC; dat_protection |= pte.p; raddr.pfra = pte.pfra; real_address: @@ -1182,7 +1179,7 @@ int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg, rc = gmap_read_table(sg->parent, pgt + vaddr.px * 8, &pte.val); if (!rc && pte.i) rc = PGM_PAGE_TRANSLATION; - if (!rc && (pte.z || (pte.co && sg->edat_level < 1))) + if (!rc && pte.z) rc = PGM_TRANSLATION_SPEC; shadow_page: pte.p |= dat_protection; diff --git a/arch/sparc/include/asm/page_64.h b/arch/sparc/include/asm/page_64.h index f294dd42fc7d..5961b2d8398a 100644 --- a/arch/sparc/include/asm/page_64.h +++ b/arch/sparc/include/asm/page_64.h @@ -17,6 +17,7 @@ #define HPAGE_SHIFT 23 #define REAL_HPAGE_SHIFT 22 +#define HPAGE_2GB_SHIFT 31 #define HPAGE_256MB_SHIFT 28 #define HPAGE_64K_SHIFT 16 #define REAL_HPAGE_SIZE (_AC(1,UL) << REAL_HPAGE_SHIFT) @@ -27,7 +28,7 @@ #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA #define REAL_HPAGE_PER_HPAGE (_AC(1,UL) << (HPAGE_SHIFT - REAL_HPAGE_SHIFT)) -#define HUGE_MAX_HSTATE 3 +#define HUGE_MAX_HSTATE 4 #endif #ifndef __ASSEMBLY__ diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index 8a598528ec1f..6fbd931f0570 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h @@ -679,26 +679,27 @@ static inline unsigned long pmd_pfn(pmd_t pmd) return pte_pfn(pte); } -#ifdef CONFIG_TRANSPARENT_HUGEPAGE -static inline unsigned long pmd_dirty(pmd_t pmd) +#define __HAVE_ARCH_PMD_WRITE +static inline unsigned long pmd_write(pmd_t pmd) { pte_t pte = __pte(pmd_val(pmd)); - return pte_dirty(pte); + return pte_write(pte); } -static inline unsigned long pmd_young(pmd_t pmd) +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +static inline unsigned long pmd_dirty(pmd_t pmd) { pte_t pte = __pte(pmd_val(pmd)); - return pte_young(pte); + return pte_dirty(pte); } -static inline unsigned long pmd_write(pmd_t pmd) +static inline unsigned long pmd_young(pmd_t pmd) { pte_t pte = __pte(pmd_val(pmd)); - return pte_write(pte); + return pte_young(pte); } static inline unsigned long pmd_trans_huge(pmd_t pmd) diff --git a/arch/sparc/include/asm/processor_32.h b/arch/sparc/include/asm/processor_32.h index 365d4cb267b4..dd27159819eb 100644 --- a/arch/sparc/include/asm/processor_32.h +++ b/arch/sparc/include/asm/processor_32.h @@ -18,12 +18,6 @@ #include <asm/signal.h> #include <asm/page.h> -/* - * The sparc has no problems with write protection - */ -#define wp_works_ok 1 -#define wp_works_ok__is_a_macro /* for versions in ksyms.c */ - /* Whee, this is STACK_TOP + PAGE_SIZE and the lowest kernel address too... * That one page is used to protect kernel from intruders, so that * we can make our access_ok test faster diff --git a/arch/sparc/include/asm/processor_64.h b/arch/sparc/include/asm/processor_64.h index 6448cfc8292f..b58ee9018433 100644 --- a/arch/sparc/include/asm/processor_64.h +++ b/arch/sparc/include/asm/processor_64.h @@ -18,10 +18,6 @@ #include <asm/ptrace.h> #include <asm/page.h> -/* The sparc has no problems with write protection */ -#define wp_works_ok 1 -#define wp_works_ok__is_a_macro /* for versions in ksyms.c */ - /* * User lives in his very own context, and cannot reference us. Note * that TASK_SIZE is a misnomer, it really gives maximum user virtual diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S index 6aa3da152c20..44101196d02b 100644 --- a/arch/sparc/kernel/head_64.S +++ b/arch/sparc/kernel/head_64.S @@ -96,6 +96,7 @@ sparc64_boot: andn %g1, PSTATE_AM, %g1 wrpr %g1, 0x0, %pstate ba,a,pt %xcc, 1f + nop .globl prom_finddev_name, prom_chosen_path, prom_root_node .globl prom_getprop_name, prom_mmu_name, prom_peer_name @@ -613,6 +614,7 @@ niagara_tlb_fixup: nop ba,a,pt %xcc, 80f + nop niagara4_patch: call niagara4_patch_copyops nop @@ -622,6 +624,7 @@ niagara4_patch: nop ba,a,pt %xcc, 80f + nop niagara2_patch: call niagara2_patch_copyops @@ -632,6 +635,7 @@ niagara2_patch: nop ba,a,pt %xcc, 80f + nop niagara_patch: call niagara_patch_copyops diff --git a/arch/sparc/kernel/misctrap.S b/arch/sparc/kernel/misctrap.S index 34b4933900bf..9276d2f0dd86 100644 --- a/arch/sparc/kernel/misctrap.S +++ b/arch/sparc/kernel/misctrap.S @@ -82,6 +82,7 @@ do_stdfmna: call handle_stdfmna add %sp, PTREGS_OFF, %o0 ba,a,pt %xcc, rtrap + nop .size do_stdfmna,.-do_stdfmna .type breakpoint_trap,#function diff --git a/arch/sparc/kernel/rtrap_64.S b/arch/sparc/kernel/rtrap_64.S index 216948ca4382..709a82ebd294 100644 --- a/arch/sparc/kernel/rtrap_64.S +++ b/arch/sparc/kernel/rtrap_64.S @@ -237,6 +237,7 @@ rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1 bne,pt %xcc, user_rtt_fill_32bit wrpr %g1, %cwp ba,a,pt %xcc, user_rtt_fill_64bit + nop user_rtt_fill_fixup_dax: ba,pt %xcc, user_rtt_fill_fixup_common diff --git a/arch/sparc/kernel/spiterrs.S b/arch/sparc/kernel/spiterrs.S index 4a73009f66a5..d7e540842809 100644 --- a/arch/sparc/kernel/spiterrs.S +++ b/arch/sparc/kernel/spiterrs.S @@ -86,6 +86,7 @@ __spitfire_cee_trap_continue: rd %pc, %g7 ba,a,pt %xcc, 2f + nop 1: ba,pt %xcc, etrap_irq rd %pc, %g7 diff --git a/arch/sparc/kernel/sun4v_tlb_miss.S b/arch/sparc/kernel/sun4v_tlb_miss.S index 6179e19bc9b9..c19f352f46c7 100644 --- a/arch/sparc/kernel/sun4v_tlb_miss.S +++ b/arch/sparc/kernel/sun4v_tlb_miss.S @@ -352,6 +352,7 @@ sun4v_mna: call sun4v_do_mna add %sp, PTREGS_OFF, %o0 ba,a,pt %xcc, rtrap + nop /* Privileged Action. */ sun4v_privact: diff --git a/arch/sparc/kernel/urtt_fill.S b/arch/sparc/kernel/urtt_fill.S index 5604a2b051d4..364af3250646 100644 --- a/arch/sparc/kernel/urtt_fill.S +++ b/arch/sparc/kernel/urtt_fill.S @@ -92,6 +92,7 @@ user_rtt_fill_fixup_common: call sun4v_data_access_exception nop ba,a,pt %xcc, rtrap + nop 1: call spitfire_data_access_exception nop diff --git a/arch/sparc/kernel/winfixup.S b/arch/sparc/kernel/winfixup.S index 855019a8590e..1ee173cc3c39 100644 --- a/arch/sparc/kernel/winfixup.S +++ b/arch/sparc/kernel/winfixup.S @@ -152,6 +152,8 @@ fill_fixup_dax: call sun4v_data_access_exception nop ba,a,pt %xcc, rtrap + nop 1: call spitfire_data_access_exception nop ba,a,pt %xcc, rtrap + nop diff --git a/arch/sparc/lib/NG2memcpy.S b/arch/sparc/lib/NG2memcpy.S index c629dbd121b6..64dcd6cdb606 100644 --- a/arch/sparc/lib/NG2memcpy.S +++ b/arch/sparc/lib/NG2memcpy.S @@ -326,11 +326,13 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ blu 170f nop ba,a,pt %xcc, 180f + nop 4: /* 32 <= low bits < 48 */ blu 150f nop ba,a,pt %xcc, 160f + nop 5: /* 0 < low bits < 32 */ blu,a 6f cmp %g2, 8 @@ -338,6 +340,7 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ blu 130f nop ba,a,pt %xcc, 140f + nop 6: /* 0 < low bits < 16 */ bgeu 120f nop @@ -475,6 +478,7 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ brz,pt %o2, 85f sub %o0, %o1, GLOBAL_SPARE ba,a,pt %XCC, 90f + nop .align 64 75: /* 16 < len <= 64 */ diff --git a/arch/sparc/lib/NG4memcpy.S b/arch/sparc/lib/NG4memcpy.S index 75bb93b1437f..78ea962edcbe 100644 --- a/arch/sparc/lib/NG4memcpy.S +++ b/arch/sparc/lib/NG4memcpy.S @@ -530,4 +530,5 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ bne,pt %icc, 1b EX_ST(STORE(stb, %g1, %o0 - 0x01), NG4_retl_o2_plus_1) ba,a,pt %icc, .Lexit + nop .size FUNC_NAME, .-FUNC_NAME diff --git a/arch/sparc/lib/NG4memset.S b/arch/sparc/lib/NG4memset.S index 41da4bdd95cb..7c0c81f18837 100644 --- a/arch/sparc/lib/NG4memset.S +++ b/arch/sparc/lib/NG4memset.S @@ -102,4 +102,5 @@ NG4bzero: bne,pt %icc, 1b add %o0, 0x30, %o0 ba,a,pt %icc, .Lpostloop + nop .size NG4bzero,.-NG4bzero diff --git a/arch/sparc/lib/NGmemcpy.S b/arch/sparc/lib/NGmemcpy.S index d88c4ed50a00..cd654a719b27 100644 --- a/arch/sparc/lib/NGmemcpy.S +++ b/arch/sparc/lib/NGmemcpy.S @@ -394,6 +394,7 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */ brz,pt %i2, 85f sub %o0, %i1, %i3 ba,a,pt %XCC, 90f + nop .align 64 70: /* 16 < len <= 64 */ diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c index 323bc6b6e3ad..ee5273ad918d 100644 --- a/arch/sparc/mm/hugetlbpage.c +++ b/arch/sparc/mm/hugetlbpage.c @@ -143,6 +143,10 @@ static pte_t sun4v_hugepage_shift_to_tte(pte_t entry, unsigned int shift) pte_val(entry) = pte_val(entry) & ~_PAGE_SZALL_4V; switch (shift) { + case HPAGE_2GB_SHIFT: + hugepage_size = _PAGE_SZ2GB_4V; + pte_val(entry) |= _PAGE_PMD_HUGE; + break; case HPAGE_256MB_SHIFT: hugepage_size = _PAGE_SZ256MB_4V; pte_val(entry) |= _PAGE_PMD_HUGE; @@ -183,6 +187,9 @@ static unsigned int sun4v_huge_tte_to_shift(pte_t entry) unsigned int shift; switch (tte_szbits) { + case _PAGE_SZ2GB_4V: + shift = HPAGE_2GB_SHIFT; + break; case _PAGE_SZ256MB_4V: shift = HPAGE_256MB_SHIFT; break; @@ -261,7 +268,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, if (!pmd) return NULL; - if (sz == PMD_SHIFT) + if (sz >= PMD_SIZE) pte = (pte_t *)pmd; else pte = pte_alloc_map(mm, pmd, addr); diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index ccd455328989..0cda653ae007 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -337,6 +337,10 @@ static int __init setup_hugepagesz(char *string) hugepage_shift = ilog2(hugepage_size); switch (hugepage_shift) { + case HPAGE_2GB_SHIFT: + hv_pgsz_mask = HV_PGSZ_MASK_2GB; + hv_pgsz_idx = HV_PGSZ_IDX_2GB; + break; case HPAGE_256MB_SHIFT: hv_pgsz_mask = HV_PGSZ_MASK_256MB; hv_pgsz_idx = HV_PGSZ_IDX_256MB; @@ -1563,7 +1567,7 @@ bool kern_addr_valid(unsigned long addr) if ((long)addr < 0L) { unsigned long pa = __pa(addr); - if ((addr >> max_phys_bits) != 0UL) + if ((pa >> max_phys_bits) != 0UL) return false; return pfn_valid(pa >> PAGE_SHIFT); diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c index def82f6d626f..8e76ebba2986 100644 --- a/arch/sparc/mm/srmmu.c +++ b/arch/sparc/mm/srmmu.c @@ -54,6 +54,7 @@ enum mbus_module srmmu_modtype; static unsigned int hwbug_bitmask; int vac_cache_size; +EXPORT_SYMBOL(vac_cache_size); int vac_line_size; extern struct resource sparc_iomap; diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c index afda3bbf7854..ee8066c3d96c 100644 --- a/arch/sparc/mm/tlb.c +++ b/arch/sparc/mm/tlb.c @@ -154,7 +154,7 @@ static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr, if (pte_val(*pte) & _PAGE_VALID) { bool exec = pte_exec(*pte); - tlb_batch_add_one(mm, vaddr, exec, false); + tlb_batch_add_one(mm, vaddr, exec, PAGE_SHIFT); } pte++; vaddr += PAGE_SIZE; @@ -209,9 +209,9 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr, pte_t orig_pte = __pte(pmd_val(orig)); bool exec = pte_exec(orig_pte); - tlb_batch_add_one(mm, addr, exec, true); + tlb_batch_add_one(mm, addr, exec, REAL_HPAGE_SHIFT); tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec, - true); + REAL_HPAGE_SHIFT); } else { tlb_batch_pmd_scan(mm, addr, orig); } diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c index 0a04811f06b7..bedf08b22a47 100644 --- a/arch/sparc/mm/tsb.c +++ b/arch/sparc/mm/tsb.c @@ -122,7 +122,7 @@ void flush_tsb_user(struct tlb_batch *tb) spin_lock_irqsave(&mm->context.lock, flags); - if (tb->hugepage_shift < HPAGE_SHIFT) { + if (tb->hugepage_shift < REAL_HPAGE_SHIFT) { base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; if (tlb_type == cheetah_plus || tlb_type == hypervisor) @@ -155,7 +155,7 @@ void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr, spin_lock_irqsave(&mm->context.lock, flags); - if (hugepage_shift < HPAGE_SHIFT) { + if (hugepage_shift < REAL_HPAGE_SHIFT) { base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; if (tlb_type == cheetah_plus || tlb_type == hypervisor) diff --git a/arch/x86/entry/vdso/vdso32-setup.c b/arch/x86/entry/vdso/vdso32-setup.c index 7853b53959cd..3f9d1a83891a 100644 --- a/arch/x86/entry/vdso/vdso32-setup.c +++ b/arch/x86/entry/vdso/vdso32-setup.c @@ -30,8 +30,10 @@ static int __init vdso32_setup(char *s) { vdso32_enabled = simple_strtoul(s, NULL, 0); - if (vdso32_enabled > 1) + if (vdso32_enabled > 1) { pr_warn("vdso32 values other than 0 and 1 are no longer allowed; vdso disabled\n"); + vdso32_enabled = 0; + } return 1; } @@ -62,13 +64,18 @@ subsys_initcall(sysenter_setup); /* Register vsyscall32 into the ABI table */ #include <linux/sysctl.h> +static const int zero; +static const int one = 1; + static struct ctl_table abi_table2[] = { { .procname = "vsyscall32", .data = &vdso32_enabled, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = proc_dointvec + .proc_handler = proc_dointvec_minmax, + .extra1 = (int *)&zero, + .extra2 = (int *)&one, }, {} }; diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c index 81b321ace8e0..f924629836a8 100644 --- a/arch/x86/events/intel/lbr.c +++ b/arch/x86/events/intel/lbr.c @@ -507,6 +507,9 @@ static void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc) cpuc->lbr_entries[i].to = msr_lastbranch.to; cpuc->lbr_entries[i].mispred = 0; cpuc->lbr_entries[i].predicted = 0; + cpuc->lbr_entries[i].in_tx = 0; + cpuc->lbr_entries[i].abort = 0; + cpuc->lbr_entries[i].cycles = 0; cpuc->lbr_entries[i].reserved = 0; } cpuc->lbr_stack.nr = i; diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h index 9d49c18b5ea9..3762536619f8 100644 --- a/arch/x86/include/asm/elf.h +++ b/arch/x86/include/asm/elf.h @@ -287,7 +287,7 @@ struct task_struct; #define ARCH_DLINFO_IA32 \ do { \ - if (vdso32_enabled) { \ + if (VDSO_CURRENT_BASE) { \ NEW_AUX_ENT(AT_SYSINFO, VDSO_ENTRY); \ NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_CURRENT_BASE); \ } \ diff --git a/arch/x86/kernel/cpu/intel_rdt_schemata.c b/arch/x86/kernel/cpu/intel_rdt_schemata.c index f369cb8db0d5..badd2b31a560 100644 --- a/arch/x86/kernel/cpu/intel_rdt_schemata.c +++ b/arch/x86/kernel/cpu/intel_rdt_schemata.c @@ -200,11 +200,11 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of, } out: - rdtgroup_kn_unlock(of->kn); for_each_enabled_rdt_resource(r) { kfree(r->tmp_cbms); r->tmp_cbms = NULL; } + rdtgroup_kn_unlock(of->kn); return ret ?: nbytes; } diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index 396c042e9d0e..cc30a74e4adb 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c @@ -846,7 +846,7 @@ void signal_fault(struct pt_regs *regs, void __user *frame, char *where) task_pid_nr(current) > 1 ? KERN_INFO : KERN_EMERG, me->comm, me->pid, where, frame, regs->ip, regs->sp, regs->orig_ax); - print_vma_addr(" in ", regs->ip); + print_vma_addr(KERN_CONT " in ", regs->ip); pr_cont("\n"); } diff --git a/arch/x86/kernel/signal_compat.c b/arch/x86/kernel/signal_compat.c index ec1f756f9dc9..71beb28600d4 100644 --- a/arch/x86/kernel/signal_compat.c +++ b/arch/x86/kernel/signal_compat.c @@ -151,8 +151,8 @@ int __copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from, if (from->si_signo == SIGSEGV) { if (from->si_code == SEGV_BNDERR) { - compat_uptr_t lower = (unsigned long)&to->si_lower; - compat_uptr_t upper = (unsigned long)&to->si_upper; + compat_uptr_t lower = (unsigned long)from->si_lower; + compat_uptr_t upper = (unsigned long)from->si_upper; put_user_ex(lower, &to->si_lower); put_user_ex(upper, &to->si_upper); } diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 948443e115c1..4e496379a871 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -255,7 +255,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs, pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx", tsk->comm, tsk->pid, str, regs->ip, regs->sp, error_code); - print_vma_addr(" in ", regs->ip); + print_vma_addr(KERN_CONT " in ", regs->ip); pr_cont("\n"); } @@ -519,7 +519,7 @@ do_general_protection(struct pt_regs *regs, long error_code) pr_info("%s[%d] general protection ip:%lx sp:%lx error:%lx", tsk->comm, task_pid_nr(tsk), regs->ip, regs->sp, error_code); - print_vma_addr(" in ", regs->ip); + print_vma_addr(KERN_CONT " in ", regs->ip); pr_cont("\n"); } diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 2ee00dbbbd51..259e9b28ccf8 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -8198,6 +8198,9 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu) return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES); case EXIT_REASON_PREEMPTION_TIMER: return false; + case EXIT_REASON_PML_FULL: + /* We don't expose PML support to L1. */ + return false; default: return true; } @@ -10267,6 +10270,18 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, } + if (enable_pml) { + /* + * Conceptually we want to copy the PML address and index from + * vmcs01 here, and then back to vmcs01 on nested vmexit. But, + * since we always flush the log on each vmexit, this happens + * to be equivalent to simply resetting the fields in vmcs02. + */ + ASSERT(vmx->pml_pg); + vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg)); + vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1); + } + if (nested_cpu_has_ept(vmcs12)) { kvm_mmu_unload(vcpu); nested_ept_init_mmu_context(vcpu); diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 22af912d66d2..889e7619a091 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -643,21 +643,40 @@ void __init init_mem_mapping(void) * devmem_is_allowed() checks to see if /dev/mem access to a certain address * is valid. The argument is a physical page number. * - * - * On x86, access has to be given to the first megabyte of ram because that area - * contains BIOS code and data regions used by X and dosemu and similar apps. - * Access has to be given to non-kernel-ram areas as well, these contain the PCI - * mmio resources as well as potential bios/acpi data regions. + * On x86, access has to be given to the first megabyte of RAM because that + * area traditionally contains BIOS code and data regions used by X, dosemu, + * and similar apps. Since they map the entire memory range, the whole range + * must be allowed (for mapping), but any areas that would otherwise be + * disallowed are flagged as being "zero filled" instead of rejected. + * Access has to be given to non-kernel-ram areas as well, these contain the + * PCI mmio resources as well as potential bios/acpi data regions. */ int devmem_is_allowed(unsigned long pagenr) { - if (pagenr < 256) - return 1; - if (iomem_is_exclusive(pagenr << PAGE_SHIFT)) + if (page_is_ram(pagenr)) { + /* + * For disallowed memory regions in the low 1MB range, + * request that the page be shown as all zeros. + */ + if (pagenr < 256) + return 2; + + return 0; + } + + /* + * This must follow RAM test, since System RAM is considered a + * restricted resource under CONFIG_STRICT_IOMEM. + */ + if (iomem_is_exclusive(pagenr << PAGE_SHIFT)) { + /* Low 1MB bypasses iomem restrictions. */ + if (pagenr < 256) + return 1; + return 0; - if (!page_is_ram(pagenr)) - return 1; - return 0; + } + + return 1; } void free_init_pages(char *what, unsigned long begin, unsigned long end) diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c index 30031d5293c4..cdfe8c628959 100644 --- a/arch/x86/platform/efi/quirks.c +++ b/arch/x86/platform/efi/quirks.c @@ -201,6 +201,10 @@ void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size) return; } + /* No need to reserve regions that will never be freed. */ + if (md.attribute & EFI_MEMORY_RUNTIME) + return; + size += addr % EFI_PAGE_SIZE; size = round_up(size, EFI_PAGE_SIZE); addr = round_down(addr, EFI_PAGE_SIZE); diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index 09af8ff18719..c974a1bbf4cb 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -171,7 +171,8 @@ void blk_mq_sched_put_request(struct request *rq) void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) { - struct elevator_queue *e = hctx->queue->elevator; + struct request_queue *q = hctx->queue; + struct elevator_queue *e = q->elevator; const bool has_sched_dispatch = e && e->type->ops.mq.dispatch_request; bool did_work = false; LIST_HEAD(rq_list); @@ -203,10 +204,10 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) */ if (!list_empty(&rq_list)) { blk_mq_sched_mark_restart_hctx(hctx); - did_work = blk_mq_dispatch_rq_list(hctx, &rq_list); + did_work = blk_mq_dispatch_rq_list(q, &rq_list); } else if (!has_sched_dispatch) { blk_mq_flush_busy_ctxs(hctx, &rq_list); - blk_mq_dispatch_rq_list(hctx, &rq_list); + blk_mq_dispatch_rq_list(q, &rq_list); } /* @@ -222,7 +223,7 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) if (!rq) break; list_add(&rq->queuelist, &rq_list); - } while (blk_mq_dispatch_rq_list(hctx, &rq_list)); + } while (blk_mq_dispatch_rq_list(q, &rq_list)); } } @@ -317,25 +318,68 @@ static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx, return true; } -static void blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx) +static bool blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx) { if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) { clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); - if (blk_mq_hctx_has_pending(hctx)) + if (blk_mq_hctx_has_pending(hctx)) { blk_mq_run_hw_queue(hctx, true); + return true; + } } + return false; } -void blk_mq_sched_restart_queues(struct blk_mq_hw_ctx *hctx) -{ - struct request_queue *q = hctx->queue; - unsigned int i; +/** + * list_for_each_entry_rcu_rr - iterate in a round-robin fashion over rcu list + * @pos: loop cursor. + * @skip: the list element that will not be examined. Iteration starts at + * @skip->next. + * @head: head of the list to examine. This list must have at least one + * element, namely @skip. + * @member: name of the list_head structure within typeof(*pos). + */ +#define list_for_each_entry_rcu_rr(pos, skip, head, member) \ + for ((pos) = (skip); \ + (pos = (pos)->member.next != (head) ? list_entry_rcu( \ + (pos)->member.next, typeof(*pos), member) : \ + list_entry_rcu((pos)->member.next->next, typeof(*pos), member)), \ + (pos) != (skip); ) - if (test_bit(QUEUE_FLAG_RESTART, &q->queue_flags)) { - if (test_and_clear_bit(QUEUE_FLAG_RESTART, &q->queue_flags)) { - queue_for_each_hw_ctx(q, hctx, i) - blk_mq_sched_restart_hctx(hctx); +/* + * Called after a driver tag has been freed to check whether a hctx needs to + * be restarted. Restarts @hctx if its tag set is not shared. Restarts hardware + * queues in a round-robin fashion if the tag set of @hctx is shared with other + * hardware queues. + */ +void blk_mq_sched_restart(struct blk_mq_hw_ctx *const hctx) +{ + struct blk_mq_tags *const tags = hctx->tags; + struct blk_mq_tag_set *const set = hctx->queue->tag_set; + struct request_queue *const queue = hctx->queue, *q; + struct blk_mq_hw_ctx *hctx2; + unsigned int i, j; + + if (set->flags & BLK_MQ_F_TAG_SHARED) { + rcu_read_lock(); + list_for_each_entry_rcu_rr(q, queue, &set->tag_list, + tag_set_list) { + queue_for_each_hw_ctx(q, hctx2, i) + if (hctx2->tags == tags && + blk_mq_sched_restart_hctx(hctx2)) + goto done; + } + j = hctx->queue_num + 1; + for (i = 0; i < queue->nr_hw_queues; i++, j++) { + if (j == queue->nr_hw_queues) + j = 0; + hctx2 = queue->queue_hw_ctx[j]; + if (hctx2->tags == tags && + blk_mq_sched_restart_hctx(hctx2)) + break; } +done: + rcu_read_unlock(); } else { blk_mq_sched_restart_hctx(hctx); } @@ -431,11 +475,67 @@ static void blk_mq_sched_free_tags(struct blk_mq_tag_set *set, } } -int blk_mq_sched_setup(struct request_queue *q) +static int blk_mq_sched_alloc_tags(struct request_queue *q, + struct blk_mq_hw_ctx *hctx, + unsigned int hctx_idx) +{ + struct blk_mq_tag_set *set = q->tag_set; + int ret; + + hctx->sched_tags = blk_mq_alloc_rq_map(set, hctx_idx, q->nr_requests, + set->reserved_tags); + if (!hctx->sched_tags) + return -ENOMEM; + + ret = blk_mq_alloc_rqs(set, hctx->sched_tags, hctx_idx, q->nr_requests); + if (ret) + blk_mq_sched_free_tags(set, hctx, hctx_idx); + + return ret; +} + +static void blk_mq_sched_tags_teardown(struct request_queue *q) { struct blk_mq_tag_set *set = q->tag_set; struct blk_mq_hw_ctx *hctx; - int ret, i; + int i; + + queue_for_each_hw_ctx(q, hctx, i) + blk_mq_sched_free_tags(set, hctx, i); +} + +int blk_mq_sched_init_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx, + unsigned int hctx_idx) +{ + struct elevator_queue *e = q->elevator; + + if (!e) + return 0; + + return blk_mq_sched_alloc_tags(q, hctx, hctx_idx); +} + +void blk_mq_sched_exit_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx, + unsigned int hctx_idx) +{ + struct elevator_queue *e = q->elevator; + + if (!e) + return; + + blk_mq_sched_free_tags(q->tag_set, hctx, hctx_idx); +} + +int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e) +{ + struct blk_mq_hw_ctx *hctx; + unsigned int i; + int ret; + + if (!e) { + q->elevator = NULL; + return 0; + } /* * Default to 256, since we don't split into sync/async like the @@ -443,49 +543,30 @@ int blk_mq_sched_setup(struct request_queue *q) */ q->nr_requests = 2 * BLKDEV_MAX_RQ; - /* - * We're switching to using an IO scheduler, so setup the hctx - * scheduler tags and switch the request map from the regular - * tags to scheduler tags. First allocate what we need, so we - * can safely fail and fallback, if needed. - */ - ret = 0; queue_for_each_hw_ctx(q, hctx, i) { - hctx->sched_tags = blk_mq_alloc_rq_map(set, i, - q->nr_requests, set->reserved_tags); - if (!hctx->sched_tags) { - ret = -ENOMEM; - break; - } - ret = blk_mq_alloc_rqs(set, hctx->sched_tags, i, q->nr_requests); + ret = blk_mq_sched_alloc_tags(q, hctx, i); if (ret) - break; + goto err; } - /* - * If we failed, free what we did allocate - */ - if (ret) { - queue_for_each_hw_ctx(q, hctx, i) { - if (!hctx->sched_tags) - continue; - blk_mq_sched_free_tags(set, hctx, i); - } - - return ret; - } + ret = e->ops.mq.init_sched(q, e); + if (ret) + goto err; return 0; + +err: + blk_mq_sched_tags_teardown(q); + q->elevator = NULL; + return ret; } -void blk_mq_sched_teardown(struct request_queue *q) +void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e) { - struct blk_mq_tag_set *set = q->tag_set; - struct blk_mq_hw_ctx *hctx; - int i; - - queue_for_each_hw_ctx(q, hctx, i) - blk_mq_sched_free_tags(set, hctx, i); + if (e->type->ops.mq.exit_sched) + e->type->ops.mq.exit_sched(e); + blk_mq_sched_tags_teardown(q); + q->elevator = NULL; } int blk_mq_sched_init(struct request_queue *q) diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h index a75b16b123f7..3a9e6e40558b 100644 --- a/block/blk-mq-sched.h +++ b/block/blk-mq-sched.h @@ -19,7 +19,7 @@ bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio, struct request **merged_request); bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio); bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq); -void blk_mq_sched_restart_queues(struct blk_mq_hw_ctx *hctx); +void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx); void blk_mq_sched_insert_request(struct request *rq, bool at_head, bool run_queue, bool async, bool can_block); @@ -32,8 +32,13 @@ void blk_mq_sched_move_to_dispatch(struct blk_mq_hw_ctx *hctx, struct list_head *rq_list, struct request *(*get_rq)(struct blk_mq_hw_ctx *)); -int blk_mq_sched_setup(struct request_queue *q); -void blk_mq_sched_teardown(struct request_queue *q); +int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e); +void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e); + +int blk_mq_sched_init_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx, + unsigned int hctx_idx); +void blk_mq_sched_exit_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx, + unsigned int hctx_idx); int blk_mq_sched_init(struct request_queue *q); @@ -131,20 +136,6 @@ static inline void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx) set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); } -/* - * Mark a hardware queue and the request queue it belongs to as needing a - * restart. - */ -static inline void blk_mq_sched_mark_restart_queue(struct blk_mq_hw_ctx *hctx) -{ - struct request_queue *q = hctx->queue; - - if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) - set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); - if (!test_bit(QUEUE_FLAG_RESTART, &q->queue_flags)) - set_bit(QUEUE_FLAG_RESTART, &q->queue_flags); -} - static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx) { return test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); diff --git a/block/blk-mq.c b/block/blk-mq.c index 6b6e7bc041db..572966f49596 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -321,7 +321,6 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int rw, rq = blk_mq_sched_get_request(q, NULL, rw, &alloc_data); - blk_mq_put_ctx(alloc_data.ctx); blk_queue_exit(q); if (!rq) @@ -349,7 +348,7 @@ void __blk_mq_finish_request(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag); if (sched_tag != -1) blk_mq_sched_completed_request(hctx, rq); - blk_mq_sched_restart_queues(hctx); + blk_mq_sched_restart(hctx); blk_queue_exit(q); } @@ -846,12 +845,8 @@ bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx, .flags = wait ? 0 : BLK_MQ_REQ_NOWAIT, }; - if (rq->tag != -1) { -done: - if (hctx) - *hctx = data.hctx; - return true; - } + if (rq->tag != -1) + goto done; if (blk_mq_tag_is_reserved(data.hctx->sched_tags, rq->internal_tag)) data.flags |= BLK_MQ_REQ_RESERVED; @@ -863,10 +858,12 @@ done: atomic_inc(&data.hctx->nr_active); } data.hctx->tags->rqs[rq->tag] = rq; - goto done; } - return false; +done: + if (hctx) + *hctx = data.hctx; + return rq->tag != -1; } static void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx, @@ -963,14 +960,17 @@ static bool blk_mq_dispatch_wait_add(struct blk_mq_hw_ctx *hctx) return true; } -bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list) +bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list) { - struct request_queue *q = hctx->queue; + struct blk_mq_hw_ctx *hctx; struct request *rq; LIST_HEAD(driver_list); struct list_head *dptr; int errors, queued, ret = BLK_MQ_RQ_QUEUE_OK; + if (list_empty(list)) + return false; + /* * Start off with dptr being NULL, so we start the first request * immediately, even if we have more pending. @@ -981,7 +981,7 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list) * Now process all the entries, sending them to the driver. */ errors = queued = 0; - while (!list_empty(list)) { + do { struct blk_mq_queue_data bd; rq = list_first_entry(list, struct request, queuelist); @@ -1052,7 +1052,7 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list) */ if (!dptr && list->next != list->prev) dptr = &driver_list; - } + } while (!list_empty(list)); hctx->dispatched[queued_to_index(queued)]++; @@ -1135,7 +1135,8 @@ static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx) return hctx->next_cpu; } -void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) +static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async, + unsigned long msecs) { if (unlikely(blk_mq_hctx_stopped(hctx) || !blk_mq_hw_queue_mapped(hctx))) @@ -1152,7 +1153,24 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) put_cpu(); } - kblockd_schedule_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work); + if (msecs == 0) + kblockd_schedule_work_on(blk_mq_hctx_next_cpu(hctx), + &hctx->run_work); + else + kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx), + &hctx->delayed_run_work, + msecs_to_jiffies(msecs)); +} + +void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs) +{ + __blk_mq_delay_run_hw_queue(hctx, true, msecs); +} +EXPORT_SYMBOL(blk_mq_delay_run_hw_queue); + +void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) +{ + __blk_mq_delay_run_hw_queue(hctx, async, 0); } void blk_mq_run_hw_queues(struct request_queue *q, bool async) @@ -1255,6 +1273,15 @@ static void blk_mq_run_work_fn(struct work_struct *work) __blk_mq_run_hw_queue(hctx); } +static void blk_mq_delayed_run_work_fn(struct work_struct *work) +{ + struct blk_mq_hw_ctx *hctx; + + hctx = container_of(work, struct blk_mq_hw_ctx, delayed_run_work.work); + + __blk_mq_run_hw_queue(hctx); +} + static void blk_mq_delay_work_fn(struct work_struct *work) { struct blk_mq_hw_ctx *hctx; @@ -1924,6 +1951,8 @@ static void blk_mq_exit_hctx(struct request_queue *q, hctx->fq->flush_rq, hctx_idx, flush_start_tag + hctx_idx); + blk_mq_sched_exit_hctx(q, hctx, hctx_idx); + if (set->ops->exit_hctx) set->ops->exit_hctx(hctx, hctx_idx); @@ -1960,6 +1989,7 @@ static int blk_mq_init_hctx(struct request_queue *q, node = hctx->numa_node = set->numa_node; INIT_WORK(&hctx->run_work, blk_mq_run_work_fn); + INIT_DELAYED_WORK(&hctx->delayed_run_work, blk_mq_delayed_run_work_fn); INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn); spin_lock_init(&hctx->lock); INIT_LIST_HEAD(&hctx->dispatch); @@ -1990,9 +2020,12 @@ static int blk_mq_init_hctx(struct request_queue *q, set->ops->init_hctx(hctx, set->driver_data, hctx_idx)) goto free_bitmap; + if (blk_mq_sched_init_hctx(q, hctx, hctx_idx)) + goto exit_hctx; + hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size); if (!hctx->fq) - goto exit_hctx; + goto sched_exit_hctx; if (set->ops->init_request && set->ops->init_request(set->driver_data, @@ -2007,6 +2040,8 @@ static int blk_mq_init_hctx(struct request_queue *q, free_fq: kfree(hctx->fq); + sched_exit_hctx: + blk_mq_sched_exit_hctx(q, hctx, hctx_idx); exit_hctx: if (set->ops->exit_hctx) set->ops->exit_hctx(hctx, hctx_idx); @@ -2233,8 +2268,6 @@ void blk_mq_release(struct request_queue *q) struct blk_mq_hw_ctx *hctx; unsigned int i; - blk_mq_sched_teardown(q); - /* hctx kobj stays in hctx */ queue_for_each_hw_ctx(q, hctx, i) { if (!hctx) @@ -2565,6 +2598,14 @@ static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set) return 0; } +static int blk_mq_update_queue_map(struct blk_mq_tag_set *set) +{ + if (set->ops->map_queues) + return set->ops->map_queues(set); + else + return blk_mq_map_queues(set); +} + /* * Alloc a tag set to be associated with one or more request queues. * May fail with EINVAL for various error conditions. May adjust the @@ -2619,10 +2660,7 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set) if (!set->mq_map) goto out_free_tags; - if (set->ops->map_queues) - ret = set->ops->map_queues(set); - else - ret = blk_mq_map_queues(set); + ret = blk_mq_update_queue_map(set); if (ret) goto out_free_mq_map; @@ -2714,6 +2752,7 @@ void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues) blk_mq_freeze_queue(q); set->nr_hw_queues = nr_hw_queues; + blk_mq_update_queue_map(set); list_for_each_entry(q, &set->tag_list, tag_set_list) { blk_mq_realloc_hw_ctxs(set, q); diff --git a/block/blk-mq.h b/block/blk-mq.h index b79f9a7d8cf6..660a17e1d033 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -31,7 +31,7 @@ void blk_mq_freeze_queue(struct request_queue *q); void blk_mq_free_queue(struct request_queue *q); int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr); void blk_mq_wake_waiters(struct request_queue *q); -bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *, struct list_head *); +bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *); void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list); bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx); bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx, diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index c44b321335f3..37f0b3ad635e 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -816,7 +816,7 @@ static void blk_release_queue(struct kobject *kobj) if (q->elevator) { ioc_clear_queue(q); - elevator_exit(q->elevator); + elevator_exit(q, q->elevator); } blk_exit_rl(&q->root_rl); diff --git a/block/elevator.c b/block/elevator.c index 01139f549b5b..dbeecf7be719 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -242,26 +242,21 @@ int elevator_init(struct request_queue *q, char *name) } } - if (e->uses_mq) { - err = blk_mq_sched_setup(q); - if (!err) - err = e->ops.mq.init_sched(q, e); - } else + if (e->uses_mq) + err = blk_mq_init_sched(q, e); + else err = e->ops.sq.elevator_init_fn(q, e); - if (err) { - if (e->uses_mq) - blk_mq_sched_teardown(q); + if (err) elevator_put(e); - } return err; } EXPORT_SYMBOL(elevator_init); -void elevator_exit(struct elevator_queue *e) +void elevator_exit(struct request_queue *q, struct elevator_queue *e) { mutex_lock(&e->sysfs_lock); if (e->uses_mq && e->type->ops.mq.exit_sched) - e->type->ops.mq.exit_sched(e); + blk_mq_exit_sched(q, e); else if (!e->uses_mq && e->type->ops.sq.elevator_exit_fn) e->type->ops.sq.elevator_exit_fn(e); mutex_unlock(&e->sysfs_lock); @@ -946,6 +941,45 @@ void elv_unregister(struct elevator_type *e) } EXPORT_SYMBOL_GPL(elv_unregister); +static int elevator_switch_mq(struct request_queue *q, + struct elevator_type *new_e) +{ + int ret; + + blk_mq_freeze_queue(q); + blk_mq_quiesce_queue(q); + + if (q->elevator) { + if (q->elevator->registered) + elv_unregister_queue(q); + ioc_clear_queue(q); + elevator_exit(q, q->elevator); + } + + ret = blk_mq_init_sched(q, new_e); + if (ret) + goto out; + + if (new_e) { + ret = elv_register_queue(q); + if (ret) { + elevator_exit(q, q->elevator); + goto out; + } + } + + if (new_e) + blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name); + else + blk_add_trace_msg(q, "elv switch: none"); + +out: + blk_mq_unfreeze_queue(q); + blk_mq_start_stopped_hw_queues(q, true); + return ret; + +} + /* * switch to new_e io scheduler. be careful not to introduce deadlocks - * we don't free the old io scheduler, before we have allocated what we @@ -958,10 +992,8 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) bool old_registered = false; int err; - if (q->mq_ops) { - blk_mq_freeze_queue(q); - blk_mq_quiesce_queue(q); - } + if (q->mq_ops) + return elevator_switch_mq(q, new_e); /* * Turn on BYPASS and drain all requests w/ elevator private data. @@ -973,11 +1005,7 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) if (old) { old_registered = old->registered; - if (old->uses_mq) - blk_mq_sched_teardown(q); - - if (!q->mq_ops) - blk_queue_bypass_start(q); + blk_queue_bypass_start(q); /* unregister and clear all auxiliary data of the old elevator */ if (old_registered) @@ -987,56 +1015,32 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) } /* allocate, init and register new elevator */ - if (new_e) { - if (new_e->uses_mq) { - err = blk_mq_sched_setup(q); - if (!err) - err = new_e->ops.mq.init_sched(q, new_e); - } else - err = new_e->ops.sq.elevator_init_fn(q, new_e); - if (err) - goto fail_init; + err = new_e->ops.sq.elevator_init_fn(q, new_e); + if (err) + goto fail_init; - err = elv_register_queue(q); - if (err) - goto fail_register; - } else - q->elevator = NULL; + err = elv_register_queue(q); + if (err) + goto fail_register; /* done, kill the old one and finish */ if (old) { - elevator_exit(old); - if (!q->mq_ops) - blk_queue_bypass_end(q); + elevator_exit(q, old); + blk_queue_bypass_end(q); } - if (q->mq_ops) { - blk_mq_unfreeze_queue(q); - blk_mq_start_stopped_hw_queues(q, true); - } - - if (new_e) - blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name); - else - blk_add_trace_msg(q, "elv switch: none"); + blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name); return 0; fail_register: - if (q->mq_ops) - blk_mq_sched_teardown(q); - elevator_exit(q->elevator); + elevator_exit(q, q->elevator); fail_init: /* switch failed, restore and re-register old elevator */ if (old) { q->elevator = old; elv_register_queue(q); - if (!q->mq_ops) - blk_queue_bypass_end(q); - } - if (q->mq_ops) { - blk_mq_unfreeze_queue(q); - blk_mq_start_stopped_hw_queues(q, true); + blk_queue_bypass_end(q); } return err; diff --git a/drivers/acpi/acpica/utresrc.c b/drivers/acpi/acpica/utresrc.c index c86bae7b1d0f..ff096d9755b9 100644 --- a/drivers/acpi/acpica/utresrc.c +++ b/drivers/acpi/acpica/utresrc.c @@ -421,10 +421,8 @@ acpi_ut_walk_aml_resources(struct acpi_walk_state *walk_state, ACPI_FUNCTION_TRACE(ut_walk_aml_resources); - /* - * The absolute minimum resource template is one end_tag descriptor. - * However, we will treat a lone end_tag as just a simple buffer. - */ + /* The absolute minimum resource template is one end_tag descriptor */ + if (aml_length < sizeof(struct aml_resource_end_tag)) { return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG); } @@ -456,8 +454,9 @@ acpi_ut_walk_aml_resources(struct acpi_walk_state *walk_state, /* Invoke the user function */ if (user_function) { - status = user_function(aml, length, offset, - resource_index, context); + status = + user_function(aml, length, offset, resource_index, + context); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } @@ -481,12 +480,6 @@ acpi_ut_walk_aml_resources(struct acpi_walk_state *walk_state, *context = aml; } - /* Check if buffer is defined to be longer than the resource length */ - - if (aml_length > (offset + length)) { - return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG); - } - /* Normal exit */ return_ACPI_STATUS(AE_OK); diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c index fb19e1cdb641..edc8663b5db3 100644 --- a/drivers/acpi/glue.c +++ b/drivers/acpi/glue.c @@ -99,13 +99,13 @@ static int find_child_checks(struct acpi_device *adev, bool check_children) return -ENODEV; /* - * If the device has a _HID (or _CID) returning a valid ACPI/PNP - * device ID, it is better to make it look less attractive here, so that - * the other device with the same _ADR value (that may not have a valid - * device ID) can be matched going forward. [This means a second spec - * violation in a row, so whatever we do here is best effort anyway.] + * If the device has a _HID returning a valid ACPI/PNP device ID, it is + * better to make it look less attractive here, so that the other device + * with the same _ADR value (that may not have a valid device ID) can be + * matched going forward. [This means a second spec violation in a row, + * so whatever we do here is best effort anyway.] */ - return sta_present && list_empty(&adev->pnp.ids) ? + return sta_present && !adev->pnp.type.platform_id ? FIND_CHILD_MAX_SCORE : FIND_CHILD_MIN_SCORE; } diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 192691880d55..2433569b02ef 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -1857,15 +1857,20 @@ static void acpi_bus_attach(struct acpi_device *device) return; device->flags.match_driver = true; - if (!ret) { - ret = device_attach(&device->dev); - if (ret < 0) - return; - - if (!ret && device->pnp.type.platform_id) - acpi_default_enumeration(device); + if (ret > 0) { + acpi_device_set_enumerated(device); + goto ok; } + ret = device_attach(&device->dev); + if (ret < 0) + return; + + if (ret > 0 || !device->pnp.type.platform_id) + acpi_device_set_enumerated(device); + else + acpi_default_enumeration(device); + ok: list_for_each_entry(child, &device->children, node) acpi_bus_attach(child); diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c index 6c9aa95a9a05..49d705c9f0f7 100644 --- a/drivers/ata/pata_atiixp.c +++ b/drivers/ata/pata_atiixp.c @@ -278,11 +278,6 @@ static int atiixp_init_one(struct pci_dev *pdev, const struct pci_device_id *id) }; const struct ata_port_info *ppi[] = { &info, &info }; - /* SB600/700 don't have secondary port wired */ - if ((pdev->device == PCI_DEVICE_ID_ATI_IXP600_IDE) || - (pdev->device == PCI_DEVICE_ID_ATI_IXP700_IDE)) - ppi[1] = &ata_dummy_port_info; - return ata_pci_bmdma_init_one(pdev, ppi, &atiixp_sht, NULL, ATA_HOST_PARALLEL_SCAN); } diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c index 0636d84fbefe..f3f538eec7b3 100644 --- a/drivers/ata/sata_via.c +++ b/drivers/ata/sata_via.c @@ -644,14 +644,16 @@ static void svia_configure(struct pci_dev *pdev, int board_id, pci_write_config_byte(pdev, SATA_NATIVE_MODE, tmp8); } - /* enable IRQ on hotplug */ - pci_read_config_byte(pdev, SVIA_MISC_3, &tmp8); - if ((tmp8 & SATA_HOTPLUG) != SATA_HOTPLUG) { - dev_dbg(&pdev->dev, - "enabling SATA hotplug (0x%x)\n", - (int) tmp8); - tmp8 |= SATA_HOTPLUG; - pci_write_config_byte(pdev, SVIA_MISC_3, tmp8); + if (board_id == vt6421) { + /* enable IRQ on hotplug */ + pci_read_config_byte(pdev, SVIA_MISC_3, &tmp8); + if ((tmp8 & SATA_HOTPLUG) != SATA_HOTPLUG) { + dev_dbg(&pdev->dev, + "enabling SATA hotplug (0x%x)\n", + (int) tmp8); + tmp8 |= SATA_HOTPLUG; + pci_write_config_byte(pdev, SVIA_MISC_3, tmp8); + } } /* diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index dceb5edd1e54..0c09d4256108 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -523,7 +523,7 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index) cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO); if (size == PAGE_SIZE) { - copy_page(mem, cmem); + memcpy(mem, cmem, PAGE_SIZE); } else { struct zcomp_strm *zstrm = zcomp_stream_get(zram->comp); @@ -717,7 +717,7 @@ compress_again: if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) { src = kmap_atomic(page); - copy_page(cmem, src); + memcpy(cmem, src, PAGE_SIZE); kunmap_atomic(src); } else { memcpy(cmem, src, clen); @@ -928,7 +928,7 @@ static int zram_rw_page(struct block_device *bdev, sector_t sector, } index = sector >> SECTORS_PER_PAGE_SHIFT; - offset = sector & (SECTORS_PER_PAGE - 1) << SECTOR_SHIFT; + offset = (sector & (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT; bv.bv_page = page; bv.bv_len = PAGE_SIZE; diff --git a/drivers/char/mem.c b/drivers/char/mem.c index 6d9cc2d39d22..7e4a9d1296bb 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c @@ -60,6 +60,10 @@ static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size) #endif #ifdef CONFIG_STRICT_DEVMEM +static inline int page_is_allowed(unsigned long pfn) +{ + return devmem_is_allowed(pfn); +} static inline int range_is_allowed(unsigned long pfn, unsigned long size) { u64 from = ((u64)pfn) << PAGE_SHIFT; @@ -75,6 +79,10 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size) return 1; } #else +static inline int page_is_allowed(unsigned long pfn) +{ + return 1; +} static inline int range_is_allowed(unsigned long pfn, unsigned long size) { return 1; @@ -122,23 +130,31 @@ static ssize_t read_mem(struct file *file, char __user *buf, while (count > 0) { unsigned long remaining; + int allowed; sz = size_inside_page(p, count); - if (!range_is_allowed(p >> PAGE_SHIFT, count)) + allowed = page_is_allowed(p >> PAGE_SHIFT); + if (!allowed) return -EPERM; + if (allowed == 2) { + /* Show zeros for restricted memory. */ + remaining = clear_user(buf, sz); + } else { + /* + * On ia64 if a page has been mapped somewhere as + * uncached, then it must also be accessed uncached + * by the kernel or data corruption may occur. + */ + ptr = xlate_dev_mem_ptr(p); + if (!ptr) + return -EFAULT; - /* - * On ia64 if a page has been mapped somewhere as uncached, then - * it must also be accessed uncached by the kernel or data - * corruption may occur. - */ - ptr = xlate_dev_mem_ptr(p); - if (!ptr) - return -EFAULT; + remaining = copy_to_user(buf, ptr, sz); + + unxlate_dev_mem_ptr(p, ptr); + } - remaining = copy_to_user(buf, ptr, sz); - unxlate_dev_mem_ptr(p, ptr); if (remaining) return -EFAULT; @@ -181,30 +197,36 @@ static ssize_t write_mem(struct file *file, const char __user *buf, #endif while (count > 0) { + int allowed; + sz = size_inside_page(p, count); - if (!range_is_allowed(p >> PAGE_SHIFT, sz)) + allowed = page_is_allowed(p >> PAGE_SHIFT); + if (!allowed) return -EPERM; - /* - * On ia64 if a page has been mapped somewhere as uncached, then - * it must also be accessed uncached by the kernel or data - * corruption may occur. - */ - ptr = xlate_dev_mem_ptr(p); - if (!ptr) { - if (written) - break; - return -EFAULT; - } + /* Skip actual writing when a page is marked as restricted. */ + if (allowed == 1) { + /* + * On ia64 if a page has been mapped somewhere as + * uncached, then it must also be accessed uncached + * by the kernel or data corruption may occur. + */ + ptr = xlate_dev_mem_ptr(p); + if (!ptr) { + if (written) + break; + return -EFAULT; + } - copied = copy_from_user(ptr, buf, sz); - unxlate_dev_mem_ptr(p, ptr); - if (copied) { - written += sz - copied; - if (written) - break; - return -EFAULT; + copied = copy_from_user(ptr, buf, sz); + unxlate_dev_mem_ptr(p, ptr); + if (copied) { + written += sz - copied; + if (written) + break; + return -EFAULT; + } } buf += sz; diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index e9b7e0b3cabe..87fe111d0be6 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -2202,14 +2202,16 @@ static int virtcons_freeze(struct virtio_device *vdev) vdev->config->reset(vdev); - virtqueue_disable_cb(portdev->c_ivq); + if (use_multiport(portdev)) + virtqueue_disable_cb(portdev->c_ivq); cancel_work_sync(&portdev->control_work); cancel_work_sync(&portdev->config_work); /* * Once more: if control_work_handler() was running, it would * enable the cb as the last step. */ - virtqueue_disable_cb(portdev->c_ivq); + if (use_multiport(portdev)) + virtqueue_disable_cb(portdev->c_ivq); remove_controlq_data(portdev); list_for_each_entry(port, &portdev->ports, list) { diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index bc96d423781a..0e3f6496524d 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -2398,6 +2398,20 @@ EXPORT_SYMBOL_GPL(cpufreq_boost_enabled); *********************************************************************/ static enum cpuhp_state hp_online; +static int cpuhp_cpufreq_online(unsigned int cpu) +{ + cpufreq_online(cpu); + + return 0; +} + +static int cpuhp_cpufreq_offline(unsigned int cpu) +{ + cpufreq_offline(cpu); + + return 0; +} + /** * cpufreq_register_driver - register a CPU Frequency driver * @driver_data: A struct cpufreq_driver containing the values# @@ -2460,8 +2474,8 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) } ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "cpufreq:online", - cpufreq_online, - cpufreq_offline); + cpuhp_cpufreq_online, + cpuhp_cpufreq_offline); if (ret < 0) goto err_if_unreg; hp_online = ret; diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c index 32100c4851dd..49cbdcba7883 100644 --- a/drivers/crypto/caam/caampkc.c +++ b/drivers/crypto/caam/caampkc.c @@ -506,7 +506,7 @@ static int caam_rsa_init_tfm(struct crypto_akcipher *tfm) ctx->dev = caam_jr_alloc(); if (IS_ERR(ctx->dev)) { - dev_err(ctx->dev, "Job Ring Device allocation for transform failed\n"); + pr_err("Job Ring Device allocation for transform failed\n"); return PTR_ERR(ctx->dev); } diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index fef39f9f41ee..5d7f73d60515 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c @@ -281,7 +281,8 @@ static int deinstantiate_rng(struct device *ctrldev, int state_handle_mask) /* Try to run it through DECO0 */ ret = run_descriptor_deco0(ctrldev, desc, &status); - if (ret || status) { + if (ret || + (status && status != JRSTA_SSRC_JUMP_HALT_CC)) { dev_err(ctrldev, "Failed to deinstantiate RNG4 SH%d\n", sh_idx); @@ -301,15 +302,13 @@ static int caam_remove(struct platform_device *pdev) struct device *ctrldev; struct caam_drv_private *ctrlpriv; struct caam_ctrl __iomem *ctrl; - int ring; ctrldev = &pdev->dev; ctrlpriv = dev_get_drvdata(ctrldev); ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl; - /* Remove platform devices for JobRs */ - for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) - of_device_unregister(ctrlpriv->jrpdev[ring]); + /* Remove platform devices under the crypto node */ + of_platform_depopulate(ctrldev); /* De-initialize RNG state handles initialized by this driver. */ if (ctrlpriv->rng4_sh_init) @@ -418,10 +417,21 @@ DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u32_ro, caam_debugfs_u32_get, NULL, "%llu\n"); DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u64_ro, caam_debugfs_u64_get, NULL, "%llu\n"); #endif +static const struct of_device_id caam_match[] = { + { + .compatible = "fsl,sec-v4.0", + }, + { + .compatible = "fsl,sec4.0", + }, + {}, +}; +MODULE_DEVICE_TABLE(of, caam_match); + /* Probe routine for CAAM top (controller) level */ static int caam_probe(struct platform_device *pdev) { - int ret, ring, ridx, rspec, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN; + int ret, ring, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN; u64 caam_id; struct device *dev; struct device_node *nprop, *np; @@ -597,47 +607,24 @@ static int caam_probe(struct platform_device *pdev) goto iounmap_ctrl; } - /* - * Detect and enable JobRs - * First, find out how many ring spec'ed, allocate references - * for all, then go probe each one. - */ - rspec = 0; - for_each_available_child_of_node(nprop, np) - if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") || - of_device_is_compatible(np, "fsl,sec4.0-job-ring")) - rspec++; - - ctrlpriv->jrpdev = devm_kcalloc(&pdev->dev, rspec, - sizeof(*ctrlpriv->jrpdev), GFP_KERNEL); - if (ctrlpriv->jrpdev == NULL) { - ret = -ENOMEM; + ret = of_platform_populate(nprop, caam_match, NULL, dev); + if (ret) { + dev_err(dev, "JR platform devices creation error\n"); goto iounmap_ctrl; } ring = 0; - ridx = 0; - ctrlpriv->total_jobrs = 0; for_each_available_child_of_node(nprop, np) if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") || of_device_is_compatible(np, "fsl,sec4.0-job-ring")) { - ctrlpriv->jrpdev[ring] = - of_platform_device_create(np, NULL, dev); - if (!ctrlpriv->jrpdev[ring]) { - pr_warn("JR physical index %d: Platform device creation error\n", - ridx); - ridx++; - continue; - } ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *) ((__force uint8_t *)ctrl + - (ridx + JR_BLOCK_NUMBER) * + (ring + JR_BLOCK_NUMBER) * BLOCK_OFFSET ); ctrlpriv->total_jobrs++; ring++; - ridx++; - } + } /* Check to see if QI present. If so, enable */ ctrlpriv->qi_present = @@ -847,17 +834,6 @@ disable_caam_ipg: return ret; } -static struct of_device_id caam_match[] = { - { - .compatible = "fsl,sec-v4.0", - }, - { - .compatible = "fsl,sec4.0", - }, - {}, -}; -MODULE_DEVICE_TABLE(of, caam_match); - static struct platform_driver caam_driver = { .driver = { .name = "caam", diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h index e2bcacc1a921..dbed8baeebe5 100644 --- a/drivers/crypto/caam/intern.h +++ b/drivers/crypto/caam/intern.h @@ -66,7 +66,6 @@ struct caam_drv_private_jr { struct caam_drv_private { struct device *dev; - struct platform_device **jrpdev; /* Alloc'ed array per sub-device */ struct platform_device *pdev; /* Physical-presence section */ diff --git a/drivers/firmware/efi/libstub/gop.c b/drivers/firmware/efi/libstub/gop.c index 932742e4cf23..24c461dea7af 100644 --- a/drivers/firmware/efi/libstub/gop.c +++ b/drivers/firmware/efi/libstub/gop.c @@ -149,7 +149,8 @@ setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si, status = __gop_query32(sys_table_arg, gop32, &info, &size, ¤t_fb_base); - if (status == EFI_SUCCESS && (!first_gop || conout_found)) { + if (status == EFI_SUCCESS && (!first_gop || conout_found) && + info->pixel_format != PIXEL_BLT_ONLY) { /* * Systems that use the UEFI Console Splitter may * provide multiple GOP devices, not all of which are @@ -266,7 +267,8 @@ setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si, status = __gop_query64(sys_table_arg, gop64, &info, &size, ¤t_fb_base); - if (status == EFI_SUCCESS && (!first_gop || conout_found)) { + if (status == EFI_SUCCESS && (!first_gop || conout_found) && + info->pixel_format != PIXEL_BLT_ONLY) { /* * Systems that use the UEFI Console Splitter may * provide multiple GOP devices, not all of which are diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c index da48819ff2e6..b78d9239e48f 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c @@ -1317,7 +1317,7 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu, if (!fence) { event_free(gpu, event); ret = -ENOMEM; - goto out_pm_put; + goto out_unlock; } gpu->event[event].fence = fence; @@ -1357,6 +1357,7 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu, hangcheck_timer_reset(gpu); ret = 0; +out_unlock: mutex_unlock(&gpu->lock); out_pm_put: diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c index b7d7721e72fa..40af17ec6312 100644 --- a/drivers/gpu/drm/i915/gvt/cfg_space.c +++ b/drivers/gpu/drm/i915/gvt/cfg_space.c @@ -285,9 +285,6 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset, { int ret; - if (vgpu->failsafe) - return 0; - if (WARN_ON(bytes > 4)) return -EINVAL; diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c index f1f426a97aa9..d186c157f65f 100644 --- a/drivers/gpu/drm/i915/gvt/execlist.c +++ b/drivers/gpu/drm/i915/gvt/execlist.c @@ -775,7 +775,8 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id) _EL_OFFSET_STATUS_PTR); ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg); - ctx_status_ptr.read_ptr = ctx_status_ptr.write_ptr = 0x7; + ctx_status_ptr.read_ptr = 0; + ctx_status_ptr.write_ptr = 0x7; vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw; } diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c index 933a7c211a1c..dce8d15f706f 100644 --- a/drivers/gpu/drm/i915/gvt/firmware.c +++ b/drivers/gpu/drm/i915/gvt/firmware.c @@ -75,11 +75,11 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt) struct gvt_firmware_header *h; void *firmware; void *p; - unsigned long size; + unsigned long size, crc32_start; int i; int ret; - size = sizeof(*h) + info->mmio_size + info->cfg_space_size - 1; + size = sizeof(*h) + info->mmio_size + info->cfg_space_size; firmware = vzalloc(size); if (!firmware) return -ENOMEM; @@ -112,6 +112,9 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt) memcpy(gvt->firmware.mmio, p, info->mmio_size); + crc32_start = offsetof(struct gvt_firmware_header, crc32) + 4; + h->crc32 = crc32_le(0, firmware + crc32_start, size - crc32_start); + firmware_attr.size = size; firmware_attr.private = firmware; @@ -234,7 +237,7 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt) firmware->mmio = mem; - sprintf(path, "%s/vid_0x%04x_did_0x%04x_rid_0x%04x.golden_hw_state", + sprintf(path, "%s/vid_0x%04x_did_0x%04x_rid_0x%02x.golden_hw_state", GVT_FIRMWARE_PATH, pdev->vendor, pdev->device, pdev->revision); diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c index 3b9d59e457ba..ef3baa0c4754 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.c +++ b/drivers/gpu/drm/i915/gvt/gvt.c @@ -52,6 +52,8 @@ static const struct intel_gvt_ops intel_gvt_ops = { .vgpu_create = intel_gvt_create_vgpu, .vgpu_destroy = intel_gvt_destroy_vgpu, .vgpu_reset = intel_gvt_reset_vgpu, + .vgpu_activate = intel_gvt_activate_vgpu, + .vgpu_deactivate = intel_gvt_deactivate_vgpu, }; /** diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 6dfc48b63b71..becae2fa3b29 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -382,7 +382,8 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu); void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, unsigned int engine_mask); void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu); - +void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu); +void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu); /* validating GM functions */ #define vgpu_gmadr_is_aperture(vgpu, gmadr) \ @@ -449,6 +450,8 @@ struct intel_gvt_ops { struct intel_vgpu_type *); void (*vgpu_destroy)(struct intel_vgpu *); void (*vgpu_reset)(struct intel_vgpu *); + void (*vgpu_activate)(struct intel_vgpu *); + void (*vgpu_deactivate)(struct intel_vgpu *); }; diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index d641214578a7..e466259034e2 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -544,6 +544,8 @@ static int intel_vgpu_open(struct mdev_device *mdev) if (ret) goto undo_group; + intel_gvt_ops->vgpu_activate(vgpu); + atomic_set(&vgpu->vdev.released, 0); return ret; @@ -569,6 +571,8 @@ static void __intel_vgpu_release(struct intel_vgpu *vgpu) if (atomic_cmpxchg(&vgpu->vdev.released, 0, 1)) return; + intel_gvt_ops->vgpu_deactivate(vgpu); + ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_IOMMU_NOTIFY, &vgpu->vdev.iommu_notifier); WARN(ret, "vfio_unregister_notifier for iommu failed: %d\n", ret); @@ -1340,13 +1344,6 @@ static int kvmgt_guest_init(struct mdev_device *mdev) static bool kvmgt_guest_exit(struct kvmgt_guest_info *info) { - struct intel_vgpu *vgpu = info->vgpu; - - if (!info) { - gvt_vgpu_err("kvmgt_guest_info invalid\n"); - return false; - } - kvm_page_track_unregister_notifier(info->kvm, &info->track_node); kvm_put_kvm(info->kvm); kvmgt_protect_table_destroy(info); diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index 41cfa5ccae84..649ef280cc9a 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c @@ -72,7 +72,7 @@ static struct { char *name; } vgpu_types[] = { /* Fixed vGPU type table */ - { MB_TO_BYTES(64), MB_TO_BYTES(512), 4, GVT_EDID_1024_768, "8" }, + { MB_TO_BYTES(64), MB_TO_BYTES(384), 4, GVT_EDID_1024_768, "8" }, { MB_TO_BYTES(128), MB_TO_BYTES(512), 4, GVT_EDID_1920_1200, "4" }, { MB_TO_BYTES(256), MB_TO_BYTES(1024), 4, GVT_EDID_1920_1200, "2" }, { MB_TO_BYTES(512), MB_TO_BYTES(2048), 4, GVT_EDID_1920_1200, "1" }, @@ -179,20 +179,34 @@ static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt) } /** - * intel_gvt_destroy_vgpu - destroy a virtual GPU + * intel_gvt_active_vgpu - activate a virtual GPU * @vgpu: virtual GPU * - * This function is called when user wants to destroy a virtual GPU. + * This function is called when user wants to activate a virtual GPU. * */ -void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu) +void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu) +{ + mutex_lock(&vgpu->gvt->lock); + vgpu->active = true; + mutex_unlock(&vgpu->gvt->lock); +} + +/** + * intel_gvt_deactive_vgpu - deactivate a virtual GPU + * @vgpu: virtual GPU + * + * This function is called when user wants to deactivate a virtual GPU. + * All virtual GPU runtime information will be destroyed. + * + */ +void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu) { struct intel_gvt *gvt = vgpu->gvt; mutex_lock(&gvt->lock); vgpu->active = false; - idr_remove(&gvt->vgpu_idr, vgpu->id); if (atomic_read(&vgpu->running_workload_num)) { mutex_unlock(&gvt->lock); @@ -201,6 +215,26 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu) } intel_vgpu_stop_schedule(vgpu); + + mutex_unlock(&gvt->lock); +} + +/** + * intel_gvt_destroy_vgpu - destroy a virtual GPU + * @vgpu: virtual GPU + * + * This function is called when user wants to destroy a virtual GPU. + * + */ +void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu) +{ + struct intel_gvt *gvt = vgpu->gvt; + + mutex_lock(&gvt->lock); + + WARN(vgpu->active, "vGPU is still active!\n"); + + idr_remove(&gvt->vgpu_idr, vgpu->id); intel_vgpu_clean_sched_policy(vgpu); intel_vgpu_clean_gvt_context(vgpu); intel_vgpu_clean_execlist(vgpu); @@ -277,7 +311,6 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, if (ret) goto out_clean_shadow_ctx; - vgpu->active = true; mutex_unlock(&gvt->lock); return vgpu; diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 1c75402a59c1..5c089b3c2a7e 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -1434,8 +1434,6 @@ static int i915_drm_suspend(struct drm_device *dev) goto out; } - intel_guc_suspend(dev_priv); - intel_display_suspend(dev); intel_dp_mst_suspend(dev); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 1e53c31b6826..46fcd8b7080a 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -806,6 +806,7 @@ struct intel_csr { func(has_resource_streamer); \ func(has_runtime_pm); \ func(has_snoop); \ + func(unfenced_needs_alignment); \ func(cursor_needs_physical); \ func(hws_needs_physical); \ func(overlay_needs_physical); \ diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 67b1fc5a0331..fe531f904062 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -4348,6 +4348,8 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv) i915_gem_context_lost(dev_priv); mutex_unlock(&dev->struct_mutex); + intel_guc_suspend(dev_priv); + cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); cancel_delayed_work_sync(&dev_priv->gt.retire_work); diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 30e0675fd7da..15a15d00a6bf 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -888,6 +888,7 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *engine, struct list_head ordered_vmas; struct list_head pinned_vmas; bool has_fenced_gpu_access = INTEL_GEN(engine->i915) < 4; + bool needs_unfenced_map = INTEL_INFO(engine->i915)->unfenced_needs_alignment; int retry; vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm; @@ -908,7 +909,8 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *engine, if (!has_fenced_gpu_access) entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE; need_fence = - entry->flags & EXEC_OBJECT_NEEDS_FENCE && + (entry->flags & EXEC_OBJECT_NEEDS_FENCE || + needs_unfenced_map) && i915_gem_object_is_tiled(obj); need_mappable = need_fence || need_reloc_mappable(vma); diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 2801a4d56324..96e45a4d5441 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -2704,7 +2704,7 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj, struct i915_ggtt *ggtt = &dev_priv->ggtt; if (unlikely(ggtt->do_idle_maps)) { - if (i915_gem_wait_for_idle(dev_priv, I915_WAIT_LOCKED)) { + if (i915_gem_wait_for_idle(dev_priv, 0)) { DRM_ERROR("Failed to wait for idle; VT'd may hang.\n"); /* Wait a bit, in hopes it avoids the hang */ udelay(10); diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c index e7c3c0318ff6..da70bfe97ec5 100644 --- a/drivers/gpu/drm/i915/i915_gem_request.c +++ b/drivers/gpu/drm/i915/i915_gem_request.c @@ -37,6 +37,17 @@ static const char *i915_fence_get_driver_name(struct dma_fence *fence) static const char *i915_fence_get_timeline_name(struct dma_fence *fence) { + /* The timeline struct (as part of the ppgtt underneath a context) + * may be freed when the request is no longer in use by the GPU. + * We could extend the life of a context to beyond that of all + * fences, possibly keeping the hw resource around indefinitely, + * or we just give them a false name. Since + * dma_fence_ops.get_timeline_name is a debug feature, the occasional + * lie seems justifiable. + */ + if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) + return "signaled"; + return to_request(fence)->timeline->common->name; } diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c index d5d2b4c6ed38..70b3832a79dd 100644 --- a/drivers/gpu/drm/i915/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c @@ -53,6 +53,17 @@ static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock) BUG(); } +static void i915_gem_shrinker_unlock(struct drm_device *dev, bool unlock) +{ + if (!unlock) + return; + + mutex_unlock(&dev->struct_mutex); + + /* expedite the RCU grace period to free some request slabs */ + synchronize_rcu_expedited(); +} + static bool any_vma_pinned(struct drm_i915_gem_object *obj) { struct i915_vma *vma; @@ -232,11 +243,8 @@ i915_gem_shrink(struct drm_i915_private *dev_priv, intel_runtime_pm_put(dev_priv); i915_gem_retire_requests(dev_priv); - if (unlock) - mutex_unlock(&dev_priv->drm.struct_mutex); - /* expedite the RCU grace period to free some request slabs */ - synchronize_rcu_expedited(); + i915_gem_shrinker_unlock(&dev_priv->drm, unlock); return count; } @@ -293,8 +301,7 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc) count += obj->base.size >> PAGE_SHIFT; } - if (unlock) - mutex_unlock(&dev->struct_mutex); + i915_gem_shrinker_unlock(dev, unlock); return count; } @@ -321,8 +328,8 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) sc->nr_to_scan - freed, I915_SHRINK_BOUND | I915_SHRINK_UNBOUND); - if (unlock) - mutex_unlock(&dev->struct_mutex); + + i915_gem_shrinker_unlock(dev, unlock); return freed; } @@ -364,8 +371,7 @@ i915_gem_shrinker_unlock_uninterruptible(struct drm_i915_private *dev_priv, struct shrinker_lock_uninterruptible *slu) { dev_priv->mm.interruptible = slu->was_interruptible; - if (slu->unlock) - mutex_unlock(&dev_priv->drm.struct_mutex); + i915_gem_shrinker_unlock(&dev_priv->drm, slu->unlock); } static int diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index ecb487b5356f..9bbbd4e83e3c 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -60,6 +60,7 @@ .has_overlay = 1, .overlay_needs_physical = 1, \ .has_gmch_display = 1, \ .hws_needs_physical = 1, \ + .unfenced_needs_alignment = 1, \ .ring_mask = RENDER_RING, \ GEN_DEFAULT_PIPEOFFSETS, \ CURSOR_OFFSETS @@ -101,6 +102,7 @@ static const struct intel_device_info intel_i915g_info = { .platform = INTEL_I915G, .cursor_needs_physical = 1, .has_overlay = 1, .overlay_needs_physical = 1, .hws_needs_physical = 1, + .unfenced_needs_alignment = 1, }; static const struct intel_device_info intel_i915gm_info = { @@ -112,6 +114,7 @@ static const struct intel_device_info intel_i915gm_info = { .supports_tv = 1, .has_fbc = 1, .hws_needs_physical = 1, + .unfenced_needs_alignment = 1, }; static const struct intel_device_info intel_i945g_info = { @@ -120,6 +123,7 @@ static const struct intel_device_info intel_i945g_info = { .has_hotplug = 1, .cursor_needs_physical = 1, .has_overlay = 1, .overlay_needs_physical = 1, .hws_needs_physical = 1, + .unfenced_needs_alignment = 1, }; static const struct intel_device_info intel_i945gm_info = { @@ -130,6 +134,7 @@ static const struct intel_device_info intel_i945gm_info = { .supports_tv = 1, .has_fbc = 1, .hws_needs_physical = 1, + .unfenced_needs_alignment = 1, }; static const struct intel_device_info intel_g33_info = { diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index a1b7eec58be2..70964ca9251e 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -1705,7 +1705,7 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv, */ if (WARN_ON(stream->sample_flags != props->sample_flags)) { ret = -ENODEV; - goto err_alloc; + goto err_flags; } list_add(&stream->link, &dev_priv->perf.streams); @@ -1728,6 +1728,7 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv, err_open: list_del(&stream->link); +err_flags: if (stream->ops->destroy) stream->ops->destroy(stream); err_alloc: @@ -1793,6 +1794,11 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv, if (ret) return ret; + if (id == 0 || id >= DRM_I915_PERF_PROP_MAX) { + DRM_DEBUG("Unknown i915 perf property ID\n"); + return -EINVAL; + } + switch ((enum drm_i915_perf_property_id)id) { case DRM_I915_PERF_PROP_CTX_HANDLE: props->single_context = 1; @@ -1862,9 +1868,8 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv, props->oa_periodic = true; props->oa_period_exponent = value; break; - default: + case DRM_I915_PERF_PROP_MAX: MISSING_CASE(id); - DRM_DEBUG("Unknown i915 perf property ID\n"); return -EINVAL; } diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 471af3b480ad..47517a02f0a4 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -670,15 +670,14 @@ static void execlists_submit_request(struct drm_i915_gem_request *request) static struct intel_engine_cs * pt_lock_engine(struct i915_priotree *pt, struct intel_engine_cs *locked) { - struct intel_engine_cs *engine; + struct intel_engine_cs *engine = + container_of(pt, struct drm_i915_gem_request, priotree)->engine; + + GEM_BUG_ON(!locked); - engine = container_of(pt, - struct drm_i915_gem_request, - priotree)->engine; if (engine != locked) { - if (locked) - spin_unlock_irq(&locked->timeline->lock); - spin_lock_irq(&engine->timeline->lock); + spin_unlock(&locked->timeline->lock); + spin_lock(&engine->timeline->lock); } return engine; @@ -686,7 +685,7 @@ pt_lock_engine(struct i915_priotree *pt, struct intel_engine_cs *locked) static void execlists_schedule(struct drm_i915_gem_request *request, int prio) { - struct intel_engine_cs *engine = NULL; + struct intel_engine_cs *engine; struct i915_dependency *dep, *p; struct i915_dependency stack; LIST_HEAD(dfs); @@ -720,26 +719,23 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio) list_for_each_entry_safe(dep, p, &dfs, dfs_link) { struct i915_priotree *pt = dep->signaler; - list_for_each_entry(p, &pt->signalers_list, signal_link) + /* Within an engine, there can be no cycle, but we may + * refer to the same dependency chain multiple times + * (redundant dependencies are not eliminated) and across + * engines. + */ + list_for_each_entry(p, &pt->signalers_list, signal_link) { + GEM_BUG_ON(p->signaler->priority < pt->priority); if (prio > READ_ONCE(p->signaler->priority)) list_move_tail(&p->dfs_link, &dfs); + } list_safe_reset_next(dep, p, dfs_link); - if (!RB_EMPTY_NODE(&pt->node)) - continue; - - engine = pt_lock_engine(pt, engine); - - /* If it is not already in the rbtree, we can update the - * priority inplace and skip over it (and its dependencies) - * if it is referenced *again* as we descend the dfs. - */ - if (prio > pt->priority && RB_EMPTY_NODE(&pt->node)) { - pt->priority = prio; - list_del_init(&dep->dfs_link); - } } + engine = request->engine; + spin_lock_irq(&engine->timeline->lock); + /* Fifo and depth-first replacement ensure our deps execute before us */ list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) { struct i915_priotree *pt = dep->signaler; @@ -751,16 +747,15 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio) if (prio <= pt->priority) continue; - GEM_BUG_ON(RB_EMPTY_NODE(&pt->node)); - pt->priority = prio; - rb_erase(&pt->node, &engine->execlist_queue); - if (insert_request(pt, &engine->execlist_queue)) - engine->execlist_first = &pt->node; + if (!RB_EMPTY_NODE(&pt->node)) { + rb_erase(&pt->node, &engine->execlist_queue); + if (insert_request(pt, &engine->execlist_queue)) + engine->execlist_first = &pt->node; + } } - if (engine) - spin_unlock_irq(&engine->timeline->lock); + spin_unlock_irq(&engine->timeline->lock); /* XXX Do we need to preempt to make room for us and our deps? */ } @@ -1440,7 +1435,9 @@ static void reset_common_ring(struct intel_engine_cs *engine, GEM_BUG_ON(request->ctx != port[0].request->ctx); /* Reset WaIdleLiteRestore:bdw,skl as well */ - request->tail = request->wa_tail - WA_TAIL_DWORDS * sizeof(u32); + request->tail = + intel_ring_wrap(request->ring, + request->wa_tail - WA_TAIL_DWORDS*sizeof(u32)); } static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req) diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 13dccb18cd43..8cb2078c5bfc 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -521,11 +521,17 @@ static inline void intel_ring_advance(struct intel_ring *ring) */ } +static inline u32 +intel_ring_wrap(const struct intel_ring *ring, u32 pos) +{ + return pos & (ring->size - 1); +} + static inline u32 intel_ring_offset(struct intel_ring *ring, void *addr) { /* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */ u32 offset = addr - ring->vaddr; - return offset & (ring->size - 1); + return intel_ring_wrap(ring, offset); } int __intel_ring_space(int head, int tail, int size); diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index 0b4440ffbeae..a9182d5e6011 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c @@ -995,7 +995,6 @@ nv50_wndw_atomic_destroy_state(struct drm_plane *plane, { struct nv50_wndw_atom *asyw = nv50_wndw_atom(state); __drm_atomic_helper_plane_destroy_state(&asyw->state); - dma_fence_put(asyw->state.fence); kfree(asyw); } @@ -1007,7 +1006,6 @@ nv50_wndw_atomic_duplicate_state(struct drm_plane *plane) if (!(asyw = kmalloc(sizeof(*asyw), GFP_KERNEL))) return NULL; __drm_atomic_helper_plane_duplicate_state(plane, &asyw->state); - asyw->state.fence = NULL; asyw->interval = 1; asyw->sema = armw->sema; asyw->ntfy = armw->ntfy; @@ -2036,6 +2034,7 @@ nv50_head_atomic_check_mode(struct nv50_head *head, struct nv50_head_atom *asyh) u32 vbackp = (mode->vtotal - mode->vsync_end) * vscan / ilace; u32 hfrontp = mode->hsync_start - mode->hdisplay; u32 vfrontp = (mode->vsync_start - mode->vdisplay) * vscan / ilace; + u32 blankus; struct nv50_head_mode *m = &asyh->mode; m->h.active = mode->htotal; @@ -2049,9 +2048,10 @@ nv50_head_atomic_check_mode(struct nv50_head *head, struct nv50_head_atom *asyh) m->v.blanks = m->v.active - vfrontp - 1; /*XXX: Safe underestimate, even "0" works */ - m->v.blankus = (m->v.active - mode->vdisplay - 2) * m->h.active; - m->v.blankus *= 1000; - m->v.blankus /= mode->clock; + blankus = (m->v.active - mode->vdisplay - 2) * m->h.active; + blankus *= 1000; + blankus /= mode->clock; + m->v.blankus = blankus; if (mode->flags & DRM_MODE_FLAG_INTERLACE) { m->v.blank2e = m->v.active + m->v.synce + vbackp; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c index 273562dd6bbd..3b86a7399567 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c @@ -714,7 +714,7 @@ nv4a_chipset = { .i2c = nv04_i2c_new, .imem = nv40_instmem_new, .mc = nv44_mc_new, - .mmu = nv44_mmu_new, + .mmu = nv04_mmu_new, .pci = nv40_pci_new, .therm = nv40_therm_new, .timer = nv41_timer_new, @@ -2271,6 +2271,35 @@ nv136_chipset = { .fifo = gp100_fifo_new, }; +static const struct nvkm_device_chip +nv137_chipset = { + .name = "GP107", + .bar = gf100_bar_new, + .bios = nvkm_bios_new, + .bus = gf100_bus_new, + .devinit = gm200_devinit_new, + .fb = gp102_fb_new, + .fuse = gm107_fuse_new, + .gpio = gk104_gpio_new, + .i2c = gm200_i2c_new, + .ibus = gm200_ibus_new, + .imem = nv50_instmem_new, + .ltc = gp100_ltc_new, + .mc = gp100_mc_new, + .mmu = gf100_mmu_new, + .pci = gp100_pci_new, + .pmu = gp102_pmu_new, + .timer = gk20a_timer_new, + .top = gk104_top_new, + .ce[0] = gp102_ce_new, + .ce[1] = gp102_ce_new, + .ce[2] = gp102_ce_new, + .ce[3] = gp102_ce_new, + .disp = gp102_disp_new, + .dma = gf119_dma_new, + .fifo = gp100_fifo_new, +}; + static int nvkm_device_event_ctor(struct nvkm_object *object, void *data, u32 size, struct nvkm_notify *notify) @@ -2708,6 +2737,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func, case 0x132: device->chip = &nv132_chipset; break; case 0x134: device->chip = &nv134_chipset; break; case 0x136: device->chip = &nv136_chipset; break; + case 0x137: device->chip = &nv137_chipset; break; default: nvdev_error(device, "unknown chipset (%08x)\n", boot0); goto done; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c index 003ac915eaad..8a8895246d26 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c @@ -198,7 +198,7 @@ nv31_mpeg_intr(struct nvkm_engine *engine) } if (type == 0x00000010) { - if (!nv31_mpeg_mthd(mpeg, mthd, data)) + if (nv31_mpeg_mthd(mpeg, mthd, data)) show &= ~0x01000000; } } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c index e536f37e24b0..c3cf02ed468e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c @@ -172,7 +172,7 @@ nv44_mpeg_intr(struct nvkm_engine *engine) } if (type == 0x00000010) { - if (!nv44_mpeg_mthd(subdev->device, mthd, data)) + if (nv44_mpeg_mthd(subdev->device, mthd, data)) show &= ~0x01000000; } } diff --git a/drivers/gpu/drm/udl/udl_transfer.c b/drivers/gpu/drm/udl/udl_transfer.c index 917dcb978c2c..0c87b1ac6b68 100644 --- a/drivers/gpu/drm/udl/udl_transfer.c +++ b/drivers/gpu/drm/udl/udl_transfer.c @@ -14,6 +14,7 @@ #include <linux/slab.h> #include <linux/fb.h> #include <linux/prefetch.h> +#include <asm/unaligned.h> #include <drm/drmP.h> #include "udl_drv.h" @@ -163,7 +164,7 @@ static void udl_compress_hline16( const u8 *const start = pixel; const uint16_t repeating_pixel_val16 = pixel_val16; - *(uint16_t *)cmd = cpu_to_be16(pixel_val16); + put_unaligned_be16(pixel_val16, cmd); cmd += 2; pixel += bpp; diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 63ec1993eaaa..d162f0dc76e3 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -819,8 +819,7 @@ static int hid_scan_report(struct hid_device *hid) hid->group = HID_GROUP_WACOM; break; case USB_VENDOR_ID_SYNAPTICS: - if (hid->group == HID_GROUP_GENERIC || - hid->group == HID_GROUP_MULTITOUCH_WIN_8) + if (hid->group == HID_GROUP_GENERIC) if ((parser->scan_flags & HID_SCAN_FLAG_VENDOR_SPECIFIC) && (parser->scan_flags & HID_SCAN_FLAG_GD_POINTER)) /* @@ -2096,6 +2095,7 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_45) }, { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_DRAWIMAGE_G3) }, { HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_GP0610) }, + { HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_TABLET_EX07S) }, { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SMARTJOY_PLUS) }, { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SUPER_JOY_BOX_3) }, { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD) }, diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 4e2648c86c8c..b26c030926c1 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -1028,6 +1028,9 @@ #define USB_DEVICE_ID_UGEE_TABLET_45 0x0045 #define USB_DEVICE_ID_YIYNOVA_TABLET 0x004d +#define USB_VENDOR_ID_UGEE 0x28bd +#define USB_DEVICE_ID_UGEE_TABLET_EX07S 0x0071 + #define USB_VENDOR_ID_UNITEC 0x227d #define USB_DEVICE_ID_UNITEC_USB_TOUCH_0709 0x0709 #define USB_DEVICE_ID_UNITEC_USB_TOUCH_0A19 0x0a19 diff --git a/drivers/hid/hid-uclogic.c b/drivers/hid/hid-uclogic.c index 1509d7287ff3..e3e6e5c893cc 100644 --- a/drivers/hid/hid-uclogic.c +++ b/drivers/hid/hid-uclogic.c @@ -977,6 +977,7 @@ static int uclogic_probe(struct hid_device *hdev, } break; case USB_DEVICE_ID_UGTIZER_TABLET_GP0610: + case USB_DEVICE_ID_UGEE_TABLET_EX07S: /* If this is the pen interface */ if (intf->cur_altsetting->desc.bInterfaceNumber == 1) { rc = uclogic_tablet_enable(hdev); @@ -1069,6 +1070,7 @@ static const struct hid_device_id uclogic_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_45) }, { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_DRAWIMAGE_G3) }, { HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_GP0610) }, + { HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_TABLET_EX07S) }, { } }; MODULE_DEVICE_TABLE(hid, uclogic_devices); diff --git a/drivers/iio/accel/hid-sensor-accel-3d.c b/drivers/iio/accel/hid-sensor-accel-3d.c index ca5759c0c318..43a6cb078193 100644 --- a/drivers/iio/accel/hid-sensor-accel-3d.c +++ b/drivers/iio/accel/hid-sensor-accel-3d.c @@ -370,10 +370,12 @@ static int hid_accel_3d_probe(struct platform_device *pdev) name = "accel_3d"; channel_spec = accel_3d_channels; channel_size = sizeof(accel_3d_channels); + indio_dev->num_channels = ARRAY_SIZE(accel_3d_channels); } else { name = "gravity"; channel_spec = gravity_channels; channel_size = sizeof(gravity_channels); + indio_dev->num_channels = ARRAY_SIZE(gravity_channels); } ret = hid_sensor_parse_common_attributes(hsdev, hsdev->usage, &accel_state->common_attributes); @@ -395,7 +397,6 @@ static int hid_accel_3d_probe(struct platform_device *pdev) goto error_free_dev_mem; } - indio_dev->num_channels = ARRAY_SIZE(accel_3d_channels); indio_dev->dev.parent = &pdev->dev; indio_dev->info = &accel_3d_info; indio_dev->name = name; diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c index d6c372bb433b..c17596f7ed2c 100644 --- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c +++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c @@ -61,7 +61,7 @@ static int cros_ec_sensors_read(struct iio_dev *indio_dev, ret = st->core.read_ec_sensors_data(indio_dev, 1 << idx, &data); if (ret < 0) break; - + ret = IIO_VAL_INT; *val = data; break; case IIO_CHAN_INFO_CALIBBIAS: @@ -76,7 +76,7 @@ static int cros_ec_sensors_read(struct iio_dev *indio_dev, for (i = CROS_EC_SENSOR_X; i < CROS_EC_SENSOR_MAX_AXIS; i++) st->core.calib[i] = st->core.resp->sensor_offset.offset[i]; - + ret = IIO_VAL_INT; *val = st->core.calib[idx]; break; case IIO_CHAN_INFO_SCALE: diff --git a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c index 7afdac42ed42..01e02b9926d4 100644 --- a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c +++ b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c @@ -379,6 +379,8 @@ int hid_sensor_parse_common_attributes(struct hid_sensor_hub_device *hsdev, { struct hid_sensor_hub_attribute_info timestamp; + s32 value; + int ret; hid_sensor_get_reporting_interval(hsdev, usage_id, st); @@ -417,6 +419,14 @@ int hid_sensor_parse_common_attributes(struct hid_sensor_hub_device *hsdev, st->sensitivity.index, st->sensitivity.report_id, timestamp.index, timestamp.report_id); + ret = sensor_hub_get_feature(hsdev, + st->power_state.report_id, + st->power_state.index, sizeof(value), &value); + if (ret < 0) + return ret; + if (value < 0) + return -EINVAL; + return 0; } EXPORT_SYMBOL(hid_sensor_parse_common_attributes); diff --git a/drivers/iio/gyro/bmg160_core.c b/drivers/iio/gyro/bmg160_core.c index f7fcfa886f72..821919dd245b 100644 --- a/drivers/iio/gyro/bmg160_core.c +++ b/drivers/iio/gyro/bmg160_core.c @@ -27,6 +27,7 @@ #include <linux/iio/trigger_consumer.h> #include <linux/iio/triggered_buffer.h> #include <linux/regmap.h> +#include <linux/delay.h> #include "bmg160.h" #define BMG160_IRQ_NAME "bmg160_event" @@ -52,6 +53,9 @@ #define BMG160_DEF_BW 100 #define BMG160_REG_PMU_BW_RES BIT(7) +#define BMG160_GYRO_REG_RESET 0x14 +#define BMG160_GYRO_RESET_VAL 0xb6 + #define BMG160_REG_INT_MAP_0 0x17 #define BMG160_INT_MAP_0_BIT_ANY BIT(1) @@ -236,6 +240,14 @@ static int bmg160_chip_init(struct bmg160_data *data) int ret; unsigned int val; + /* + * Reset chip to get it in a known good state. A delay of 30ms after + * reset is required according to the datasheet. + */ + regmap_write(data->regmap, BMG160_GYRO_REG_RESET, + BMG160_GYRO_RESET_VAL); + usleep_range(30000, 30700); + ret = regmap_read(data->regmap, BMG160_REG_CHIP_ID, &val); if (ret < 0) { dev_err(dev, "Error reading reg_chip_id\n"); diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c index d18ded45bedd..3ff91e02fee3 100644 --- a/drivers/iio/industrialio-core.c +++ b/drivers/iio/industrialio-core.c @@ -610,10 +610,9 @@ static ssize_t __iio_format_value(char *buf, size_t len, unsigned int type, tmp0 = (int)div_s64_rem(tmp, 1000000000, &tmp1); return snprintf(buf, len, "%d.%09u", tmp0, abs(tmp1)); case IIO_VAL_FRACTIONAL_LOG2: - tmp = (s64)vals[0] * 1000000000LL >> vals[1]; - tmp1 = do_div(tmp, 1000000000LL); - tmp0 = tmp; - return snprintf(buf, len, "%d.%09u", tmp0, tmp1); + tmp = shift_right((s64)vals[0] * 1000000000LL, vals[1]); + tmp0 = (int)div_s64_rem(tmp, 1000000000LL, &tmp1); + return snprintf(buf, len, "%d.%09u", tmp0, abs(tmp1)); case IIO_VAL_INT_MULTIPLE: { int i; diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c index 5f2680855552..fd0edca0e656 100644 --- a/drivers/iio/pressure/st_pressure_core.c +++ b/drivers/iio/pressure/st_pressure_core.c @@ -457,6 +457,7 @@ static const struct st_sensor_settings st_press_sensors_settings[] = { .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, }, .multi_read_bit = true, + .bootime = 2, }, }; diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index 91cbe86b25c8..fcbed35e95a8 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -817,6 +817,7 @@ isert_post_recvm(struct isert_conn *isert_conn, u32 count) rx_wr->sg_list = &rx_desc->rx_sg; rx_wr->num_sge = 1; rx_wr->next = rx_wr + 1; + rx_desc->in_use = false; } rx_wr--; rx_wr->next = NULL; /* mark end of work requests list */ @@ -835,6 +836,15 @@ isert_post_recv(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc) struct ib_recv_wr *rx_wr_failed, rx_wr; int ret; + if (!rx_desc->in_use) { + /* + * if the descriptor is not in-use we already reposted it + * for recv, so just silently return + */ + return 0; + } + + rx_desc->in_use = false; rx_wr.wr_cqe = &rx_desc->rx_cqe; rx_wr.sg_list = &rx_desc->rx_sg; rx_wr.num_sge = 1; @@ -1397,6 +1407,8 @@ isert_recv_done(struct ib_cq *cq, struct ib_wc *wc) return; } + rx_desc->in_use = true; + ib_dma_sync_single_for_cpu(ib_dev, rx_desc->dma_addr, ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); @@ -1659,10 +1671,23 @@ isert_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc) ret = isert_check_pi_status(cmd, isert_cmd->rw.sig->sig_mr); isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn); - if (ret) - transport_send_check_condition_and_sense(cmd, cmd->pi_err, 0); - else - isert_put_response(isert_conn->conn, isert_cmd->iscsi_cmd); + if (ret) { + /* + * transport_generic_request_failure() expects to have + * plus two references to handle queue-full, so re-add + * one here as target-core will have already dropped + * it after the first isert_put_datain() callback. + */ + kref_get(&cmd->cmd_kref); + transport_generic_request_failure(cmd, cmd->pi_err); + } else { + /* + * XXX: isert_put_response() failure is not retried. + */ + ret = isert_put_response(isert_conn->conn, isert_cmd->iscsi_cmd); + if (ret) + pr_warn_ratelimited("isert_put_response() ret: %d\n", ret); + } } static void @@ -1699,13 +1724,15 @@ isert_rdma_read_done(struct ib_cq *cq, struct ib_wc *wc) cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT; spin_unlock_bh(&cmd->istate_lock); - if (ret) { - target_put_sess_cmd(se_cmd); - transport_send_check_condition_and_sense(se_cmd, - se_cmd->pi_err, 0); - } else { + /* + * transport_generic_request_failure() will drop the extra + * se_cmd->cmd_kref reference after T10-PI error, and handle + * any non-zero ->queue_status() callback error retries. + */ + if (ret) + transport_generic_request_failure(se_cmd, se_cmd->pi_err); + else target_execute_cmd(se_cmd); - } } static void @@ -2171,26 +2198,28 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd) chain_wr = &isert_cmd->tx_desc.send_wr; } - isert_rdma_rw_ctx_post(isert_cmd, isert_conn, cqe, chain_wr); - isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ\n", isert_cmd); - return 1; + rc = isert_rdma_rw_ctx_post(isert_cmd, isert_conn, cqe, chain_wr); + isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ rc: %d\n", + isert_cmd, rc); + return rc; } static int isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery) { struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); + int ret; isert_dbg("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n", isert_cmd, cmd->se_cmd.data_length, cmd->write_data_done); isert_cmd->tx_desc.tx_cqe.done = isert_rdma_read_done; - isert_rdma_rw_ctx_post(isert_cmd, conn->context, - &isert_cmd->tx_desc.tx_cqe, NULL); + ret = isert_rdma_rw_ctx_post(isert_cmd, conn->context, + &isert_cmd->tx_desc.tx_cqe, NULL); - isert_dbg("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n", - isert_cmd); - return 0; + isert_dbg("Cmd: %p posted RDMA_READ memory for ISER Data WRITE rc: %d\n", + isert_cmd, ret); + return ret; } static int diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h index c02ada57d7f5..87d994de8c91 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.h +++ b/drivers/infiniband/ulp/isert/ib_isert.h @@ -60,7 +60,7 @@ #define ISER_RX_PAD_SIZE (ISCSI_DEF_MAX_RECV_SEG_LEN + 4096 - \ (ISER_RX_PAYLOAD_SIZE + sizeof(u64) + sizeof(struct ib_sge) + \ - sizeof(struct ib_cqe))) + sizeof(struct ib_cqe) + sizeof(bool))) #define ISCSI_ISER_SG_TABLESIZE 256 @@ -85,6 +85,7 @@ struct iser_rx_desc { u64 dma_addr; struct ib_sge rx_sg; struct ib_cqe rx_cqe; + bool in_use; char pad[ISER_RX_PAD_SIZE]; } __packed; diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c index 155fcb3b6230..153b1ee13e03 100644 --- a/drivers/input/joystick/xpad.c +++ b/drivers/input/joystick/xpad.c @@ -202,6 +202,7 @@ static const struct xpad_device { { 0x1430, 0x8888, "TX6500+ Dance Pad (first generation)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, { 0x146b, 0x0601, "BigBen Interactive XBOX 360 Controller", 0, XTYPE_XBOX360 }, { 0x1532, 0x0037, "Razer Sabertooth", 0, XTYPE_XBOX360 }, + { 0x1532, 0x0a03, "Razer Wildcat", 0, XTYPE_XBOXONE }, { 0x15e4, 0x3f00, "Power A Mini Pro Elite", 0, XTYPE_XBOX360 }, { 0x15e4, 0x3f0a, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 }, { 0x15e4, 0x3f10, "Batarang Xbox 360 controller", 0, XTYPE_XBOX360 }, @@ -326,6 +327,7 @@ static struct usb_device_id xpad_table[] = { XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */ XPAD_XBOX360_VENDOR(0x146b), /* BigBen Interactive Controllers */ XPAD_XBOX360_VENDOR(0x1532), /* Razer Sabertooth */ + XPAD_XBOXONE_VENDOR(0x1532), /* Razer Wildcat */ XPAD_XBOX360_VENDOR(0x15e4), /* Numark X-Box 360 controllers */ XPAD_XBOX360_VENDOR(0x162e), /* Joytech X-Box 360 controllers */ XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */ diff --git a/drivers/irqchip/irq-imx-gpcv2.c b/drivers/irqchip/irq-imx-gpcv2.c index 15af9a9753e5..2d203b422129 100644 --- a/drivers/irqchip/irq-imx-gpcv2.c +++ b/drivers/irqchip/irq-imx-gpcv2.c @@ -230,6 +230,8 @@ static int __init imx_gpcv2_irqchip_init(struct device_node *node, return -ENOMEM; } + raw_spin_lock_init(&cd->rlock); + cd->gpc_base = of_iomap(node, 0); if (!cd->gpc_base) { pr_err("fsl-gpcv2: unable to map gpc registers\n"); diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c index e4c2c1a1e993..6735c8d6a445 100644 --- a/drivers/md/dm-cache-metadata.c +++ b/drivers/md/dm-cache-metadata.c @@ -932,7 +932,7 @@ static int blocks_are_clean_separate_dirty(struct dm_cache_metadata *cmd, *result = true; r = dm_bitset_cursor_begin(&cmd->dirty_info, cmd->dirty_root, - from_cblock(begin), &cmd->dirty_cursor); + from_cblock(cmd->cache_blocks), &cmd->dirty_cursor); if (r) { DMERR("%s: dm_bitset_cursor_begin for dirty failed", __func__); return r; @@ -959,14 +959,16 @@ static int blocks_are_clean_separate_dirty(struct dm_cache_metadata *cmd, return 0; } + begin = to_cblock(from_cblock(begin) + 1); + if (begin == end) + break; + r = dm_bitset_cursor_next(&cmd->dirty_cursor); if (r) { DMERR("%s: dm_bitset_cursor_next for dirty failed", __func__); dm_bitset_cursor_end(&cmd->dirty_cursor); return r; } - - begin = to_cblock(from_cblock(begin) + 1); } dm_bitset_cursor_end(&cmd->dirty_cursor); diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index f8564d63982f..1e217ba84d09 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -3726,7 +3726,7 @@ static int raid_preresume(struct dm_target *ti) return r; /* Resize bitmap to adjust to changed region size (aka MD bitmap chunksize) */ - if (test_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags) && + if (test_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags) && mddev->bitmap && mddev->bitmap_info.chunksize != to_bytes(rs->requested_bitmap_chunk_sectors)) { r = bitmap_resize(mddev->bitmap, mddev->dev_sectors, to_bytes(rs->requested_bitmap_chunk_sectors), 0); diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index 28955b94d2b2..0b081d170087 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c @@ -755,6 +755,7 @@ static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx, /* Undo dm_start_request() before requeuing */ rq_end_stats(md, rq); rq_completed(md, rq_data_dir(rq), false); + blk_mq_delay_run_hw_queue(hctx, 100/*ms*/); return BLK_MQ_RQ_QUEUE_BUSY; } diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c index 0f0eb8a3d922..78f36012eaca 100644 --- a/drivers/md/dm-verity-fec.c +++ b/drivers/md/dm-verity-fec.c @@ -146,8 +146,6 @@ static int fec_decode_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio, block = fec_buffer_rs_block(v, fio, n, i); res = fec_decode_rs8(v, fio, block, &par[offset], neras); if (res < 0) { - dm_bufio_release(buf); - r = res; goto error; } @@ -172,6 +170,8 @@ static int fec_decode_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio, done: r = corrected; error: + dm_bufio_release(buf); + if (r < 0 && neras) DMERR_LIMIT("%s: FEC %llu: failed to correct: %d", v->data_dev->name, (unsigned long long)rsb, r); @@ -269,7 +269,7 @@ static int fec_read_bufs(struct dm_verity *v, struct dm_verity_io *io, &is_zero) == 0) { /* skip known zero blocks entirely */ if (is_zero) - continue; + goto done; /* * skip if we have already found the theoretical @@ -439,6 +439,13 @@ int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io, if (!verity_fec_is_enabled(v)) return -EOPNOTSUPP; + if (fio->level >= DM_VERITY_FEC_MAX_RECURSION) { + DMWARN_LIMIT("%s: FEC: recursion too deep", v->data_dev->name); + return -EIO; + } + + fio->level++; + if (type == DM_VERITY_BLOCK_TYPE_METADATA) block += v->data_blocks; @@ -470,7 +477,7 @@ int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io, if (r < 0) { r = fec_decode_rsb(v, io, fio, rsb, offset, true); if (r < 0) - return r; + goto done; } if (dest) @@ -480,6 +487,8 @@ int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io, r = verity_for_bv_block(v, io, iter, fec_bv_copy); } +done: + fio->level--; return r; } @@ -520,6 +529,7 @@ void verity_fec_init_io(struct dm_verity_io *io) memset(fio->bufs, 0, sizeof(fio->bufs)); fio->nbufs = 0; fio->output = NULL; + fio->level = 0; } /* diff --git a/drivers/md/dm-verity-fec.h b/drivers/md/dm-verity-fec.h index 7fa0298b995e..bb31ce87a933 100644 --- a/drivers/md/dm-verity-fec.h +++ b/drivers/md/dm-verity-fec.h @@ -27,6 +27,9 @@ #define DM_VERITY_FEC_BUF_MAX \ (1 << (PAGE_SHIFT - DM_VERITY_FEC_BUF_RS_BITS)) +/* maximum recursion level for verity_fec_decode */ +#define DM_VERITY_FEC_MAX_RECURSION 4 + #define DM_VERITY_OPT_FEC_DEV "use_fec_from_device" #define DM_VERITY_OPT_FEC_BLOCKS "fec_blocks" #define DM_VERITY_OPT_FEC_START "fec_start" @@ -58,6 +61,7 @@ struct dm_verity_fec_io { unsigned nbufs; /* number of buffers allocated */ u8 *output; /* buffer for corrected output */ size_t output_pos; + unsigned level; /* recursion level */ }; #ifdef CONFIG_DM_VERITY_FEC diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c index 138f5ae75c0b..4d1fe8d95042 100644 --- a/drivers/net/can/ifi_canfd/ifi_canfd.c +++ b/drivers/net/can/ifi_canfd/ifi_canfd.c @@ -557,7 +557,7 @@ static int ifi_canfd_poll(struct napi_struct *napi, int quota) int work_done = 0; u32 stcmd = readl(priv->base + IFI_CANFD_STCMD); - u32 rxstcmd = readl(priv->base + IFI_CANFD_STCMD); + u32 rxstcmd = readl(priv->base + IFI_CANFD_RXSTCMD); u32 errctr = readl(priv->base + IFI_CANFD_ERROR_CTR); /* Handle bus state changes */ diff --git a/drivers/net/can/rcar/rcar_can.c b/drivers/net/can/rcar/rcar_can.c index caed4e6960f8..11662f479e76 100644 --- a/drivers/net/can/rcar/rcar_can.c +++ b/drivers/net/can/rcar/rcar_can.c @@ -826,8 +826,7 @@ static int rcar_can_probe(struct platform_device *pdev) devm_can_led_init(ndev); - dev_info(&pdev->dev, "device registered (regs @ %p, IRQ%d)\n", - priv->regs, ndev->irq); + dev_info(&pdev->dev, "device registered (IRQ%d)\n", ndev->irq); return 0; fail_candev: diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 65c056e2f705..450b6a9fdec7 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -990,7 +990,7 @@ static void team_port_disable(struct team *team, #define TEAM_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \ NETIF_F_RXCSUM | NETIF_F_ALL_TSO) -static void ___team_compute_features(struct team *team) +static void __team_compute_features(struct team *team) { struct team_port *port; u32 vlan_features = TEAM_VLAN_FEATURES & NETIF_F_ALL_FOR_ALL; @@ -1023,16 +1023,10 @@ static void ___team_compute_features(struct team *team) team->dev->priv_flags |= IFF_XMIT_DST_RELEASE; } -static void __team_compute_features(struct team *team) -{ - ___team_compute_features(team); - netdev_change_features(team->dev); -} - static void team_compute_features(struct team *team) { mutex_lock(&team->lock); - ___team_compute_features(team); + __team_compute_features(team); mutex_unlock(&team->lock); netdev_change_features(team->dev); } @@ -1641,6 +1635,7 @@ static void team_uninit(struct net_device *dev) team_notify_peers_fini(team); team_queue_override_fini(team); mutex_unlock(&team->lock); + netdev_change_features(dev); } static void team_destructor(struct net_device *dev) @@ -1928,6 +1923,10 @@ static int team_add_slave(struct net_device *dev, struct net_device *port_dev) mutex_lock(&team->lock); err = team_port_add(team, port_dev); mutex_unlock(&team->lock); + + if (!err) + netdev_change_features(dev); + return err; } @@ -1939,6 +1938,10 @@ static int team_del_slave(struct net_device *dev, struct net_device *port_dev) mutex_lock(&team->lock); err = team_port_del(team, port_dev); mutex_unlock(&team->lock); + + if (!err) + netdev_change_features(dev); + return err; } diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index adbed261cc8a..a3ed8115747c 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1202,7 +1202,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */ {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ - {QMI_FIXED_INTF(0x1bc7, 0x1201, 2)}, /* Telit LE920 */ + {QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */ {QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)}, /* XS Stick W100-2 from 4G Systems */ {QMI_FIXED_INTF(0x0b3c, 0xc000, 4)}, /* Olivetti Olicard 100 */ {QMI_FIXED_INTF(0x0b3c, 0xc001, 4)}, /* Olivetti Olicard 120 */ diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 1cc945cbeaa3..79048e72c1bd 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -1975,7 +1975,7 @@ static int __usbnet_read_cmd(struct usbnet *dev, u8 cmd, u8 reqtype, " value=0x%04x index=0x%04x size=%d\n", cmd, reqtype, value, index, size); - if (data) { + if (size) { buf = kmalloc(size, GFP_KERNEL); if (!buf) goto out; @@ -1984,8 +1984,13 @@ static int __usbnet_read_cmd(struct usbnet *dev, u8 cmd, u8 reqtype, err = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), cmd, reqtype, value, index, buf, size, USB_CTRL_GET_TIMEOUT); - if (err > 0 && err <= size) - memcpy(data, buf, err); + if (err > 0 && err <= size) { + if (data) + memcpy(data, buf, err); + else + netdev_dbg(dev->net, + "Huh? Data requested but thrown away.\n"); + } kfree(buf); out: return err; @@ -2006,7 +2011,13 @@ static int __usbnet_write_cmd(struct usbnet *dev, u8 cmd, u8 reqtype, buf = kmemdup(data, size, GFP_KERNEL); if (!buf) goto out; - } + } else { + if (size) { + WARN_ON_ONCE(1); + err = -EINVAL; + goto out; + } + } err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), cmd, reqtype, value, index, buf, size, diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index b0d241d110ec..666ada6130ab 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -2240,14 +2240,8 @@ static bool virtnet_validate_features(struct virtio_device *vdev) #define MIN_MTU ETH_MIN_MTU #define MAX_MTU ETH_MAX_MTU -static int virtnet_probe(struct virtio_device *vdev) +static int virtnet_validate(struct virtio_device *vdev) { - int i, err; - struct net_device *dev; - struct virtnet_info *vi; - u16 max_queue_pairs; - int mtu; - if (!vdev->config->get) { dev_err(&vdev->dev, "%s failure: config access disabled\n", __func__); @@ -2257,6 +2251,25 @@ static int virtnet_probe(struct virtio_device *vdev) if (!virtnet_validate_features(vdev)) return -EINVAL; + if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) { + int mtu = virtio_cread16(vdev, + offsetof(struct virtio_net_config, + mtu)); + if (mtu < MIN_MTU) + __virtio_clear_bit(vdev, VIRTIO_NET_F_MTU); + } + + return 0; +} + +static int virtnet_probe(struct virtio_device *vdev) +{ + int i, err; + struct net_device *dev; + struct virtnet_info *vi; + u16 max_queue_pairs; + int mtu; + /* Find if host supports multiqueue virtio_net device */ err = virtio_cread_feature(vdev, VIRTIO_NET_F_MQ, struct virtio_net_config, @@ -2372,11 +2385,20 @@ static int virtnet_probe(struct virtio_device *vdev) offsetof(struct virtio_net_config, mtu)); if (mtu < dev->min_mtu) { - __virtio_clear_bit(vdev, VIRTIO_NET_F_MTU); - } else { - dev->mtu = mtu; - dev->max_mtu = mtu; + /* Should never trigger: MTU was previously validated + * in virtnet_validate. + */ + dev_err(&vdev->dev, "device MTU appears to have changed " + "it is now %d < %d", mtu, dev->min_mtu); + goto free_stats; } + + dev->mtu = mtu; + dev->max_mtu = mtu; + + /* TODO: size buffers correctly in this case. */ + if (dev->mtu > ETH_DATA_LEN) + vi->big_packets = true; } if (vi->any_header_sg) @@ -2554,6 +2576,7 @@ static struct virtio_driver virtio_net_driver = { .driver.name = KBUILD_MODNAME, .driver.owner = THIS_MODULE, .id_table = id_table, + .validate = virtnet_validate, .probe = virtnet_probe, .remove = virtnet_remove, .config_changed = virtnet_config_changed, diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 9b3b57fef446..9583a5f58a1d 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -270,7 +270,7 @@ static inline int nvme_setup_discard(struct nvme_ns *ns, struct request *req, memset(cmnd, 0, sizeof(*cmnd)); cmnd->dsm.opcode = nvme_cmd_dsm; cmnd->dsm.nsid = cpu_to_le32(ns->ns_id); - cmnd->dsm.nr = segments - 1; + cmnd->dsm.nr = cpu_to_le32(segments - 1); cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD); req->special_vec.bv_page = virt_to_page(range); diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c index a7bcff45f437..76450b0c55f1 100644 --- a/drivers/nvme/target/admin-cmd.c +++ b/drivers/nvme/target/admin-cmd.c @@ -100,7 +100,7 @@ static u16 nvmet_get_smart_log(struct nvmet_req *req, u16 status; WARN_ON(req == NULL || slog == NULL); - if (req->cmd->get_log_page.nsid == 0xFFFFFFFF) + if (req->cmd->get_log_page.nsid == cpu_to_le32(0xFFFFFFFF)) status = nvmet_get_smart_log_all(req, slog); else status = nvmet_get_smart_log_nsid(req, slog); diff --git a/drivers/nvme/target/io-cmd.c b/drivers/nvme/target/io-cmd.c index 4195115c7e54..6b0baa9caab9 100644 --- a/drivers/nvme/target/io-cmd.c +++ b/drivers/nvme/target/io-cmd.c @@ -180,7 +180,7 @@ static void nvmet_execute_write_zeroes(struct nvmet_req *req) sector = le64_to_cpu(write_zeroes->slba) << (req->ns->blksize_shift - 9); - nr_sector = (((sector_t)le32_to_cpu(write_zeroes->length)) << + nr_sector = (((sector_t)le16_to_cpu(write_zeroes->length)) << (req->ns->blksize_shift - 9)) + 1; if (__blkdev_issue_zeroout(req->ns->bdev, sector, nr_sector, @@ -230,7 +230,7 @@ int nvmet_parse_io_cmd(struct nvmet_req *req) return 0; case nvme_cmd_dsm: req->execute = nvmet_execute_dsm; - req->data_len = le32_to_cpu(cmd->dsm.nr + 1) * + req->data_len = (le32_to_cpu(cmd->dsm.nr) + 1) * sizeof(struct nvme_dsm_range); return 0; case nvme_cmd_write_zeroes: diff --git a/drivers/pci/dwc/Kconfig b/drivers/pci/dwc/Kconfig index dfb8a69afc28..d2d2ba5b8a68 100644 --- a/drivers/pci/dwc/Kconfig +++ b/drivers/pci/dwc/Kconfig @@ -89,6 +89,7 @@ config PCI_HISI depends on PCI_MSI_IRQ_DOMAIN select PCIEPORTBUS select PCIE_DW_HOST + select PCI_HOST_COMMON help Say Y here if you want PCIe controller support on HiSilicon Hip05 and Hip06 SoCs diff --git a/drivers/pci/dwc/pcie-artpec6.c b/drivers/pci/dwc/pcie-artpec6.c index fcd3ef845883..6d23683c0892 100644 --- a/drivers/pci/dwc/pcie-artpec6.c +++ b/drivers/pci/dwc/pcie-artpec6.c @@ -234,6 +234,9 @@ static int artpec6_add_pcie_port(struct artpec6_pcie *artpec6_pcie, return 0; } +static const struct dw_pcie_ops dw_pcie_ops = { +}; + static int artpec6_pcie_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -252,6 +255,7 @@ static int artpec6_pcie_probe(struct platform_device *pdev) return -ENOMEM; pci->dev = dev; + pci->ops = &dw_pcie_ops; artpec6_pcie->pci = pci; diff --git a/drivers/pci/dwc/pcie-designware-plat.c b/drivers/pci/dwc/pcie-designware-plat.c index b6c832ba39dd..f20d494922ab 100644 --- a/drivers/pci/dwc/pcie-designware-plat.c +++ b/drivers/pci/dwc/pcie-designware-plat.c @@ -86,6 +86,9 @@ static int dw_plat_add_pcie_port(struct pcie_port *pp, return 0; } +static const struct dw_pcie_ops dw_pcie_ops = { +}; + static int dw_plat_pcie_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -103,6 +106,7 @@ static int dw_plat_pcie_probe(struct platform_device *pdev) return -ENOMEM; pci->dev = dev; + pci->ops = &dw_pcie_ops; dw_plat_pcie->pci = pci; diff --git a/drivers/pci/host/pci-thunder-pem.c b/drivers/pci/host/pci-thunder-pem.c index b89c373555c5..6e031b522529 100644 --- a/drivers/pci/host/pci-thunder-pem.c +++ b/drivers/pci/host/pci-thunder-pem.c @@ -375,7 +375,6 @@ static void thunder_pem_legacy_fw(struct acpi_pci_root *root, index -= node * PEM_MAX_DOM_IN_NODE; res_pem->start = PEM_RES_BASE | FIELD_PREP(PEM_NODE_MASK, node) | FIELD_PREP(PEM_INDX_MASK, index); - res_pem->end = res_pem->start + SZ_16M - 1; res_pem->flags = IORESOURCE_MEM; } @@ -399,8 +398,15 @@ static int thunder_pem_acpi_init(struct pci_config_window *cfg) */ if (ret) { thunder_pem_legacy_fw(root, res_pem); - /* Reserve PEM-specific resources and PCI configuration space */ + /* + * Reserve 64K size PEM specific resources. The full 16M range + * size is required for thunder_pem_init() call. + */ + res_pem->end = res_pem->start + SZ_64K - 1; thunder_pem_reserve_range(dev, root->segment, res_pem); + res_pem->end = res_pem->start + SZ_16M - 1; + + /* Reserve PCI configuration space as well. */ thunder_pem_reserve_range(dev, root->segment, &cfg->res); } diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c index d69046537b75..32822b0d9cd0 100644 --- a/drivers/pinctrl/core.c +++ b/drivers/pinctrl/core.c @@ -2010,29 +2010,57 @@ out_err: return ERR_PTR(ret); } -static int pinctrl_create_and_start(struct pinctrl_dev *pctldev) +static int pinctrl_claim_hogs(struct pinctrl_dev *pctldev) { pctldev->p = create_pinctrl(pctldev->dev, pctldev); - if (!IS_ERR(pctldev->p)) { - kref_get(&pctldev->p->users); - pctldev->hog_default = - pinctrl_lookup_state(pctldev->p, PINCTRL_STATE_DEFAULT); - if (IS_ERR(pctldev->hog_default)) { - dev_dbg(pctldev->dev, - "failed to lookup the default state\n"); - } else { - if (pinctrl_select_state(pctldev->p, - pctldev->hog_default)) - dev_err(pctldev->dev, - "failed to select default state\n"); - } + if (PTR_ERR(pctldev->p) == -ENODEV) { + dev_dbg(pctldev->dev, "no hogs found\n"); - pctldev->hog_sleep = - pinctrl_lookup_state(pctldev->p, - PINCTRL_STATE_SLEEP); - if (IS_ERR(pctldev->hog_sleep)) - dev_dbg(pctldev->dev, - "failed to lookup the sleep state\n"); + return 0; + } + + if (IS_ERR(pctldev->p)) { + dev_err(pctldev->dev, "error claiming hogs: %li\n", + PTR_ERR(pctldev->p)); + + return PTR_ERR(pctldev->p); + } + + kref_get(&pctldev->p->users); + pctldev->hog_default = + pinctrl_lookup_state(pctldev->p, PINCTRL_STATE_DEFAULT); + if (IS_ERR(pctldev->hog_default)) { + dev_dbg(pctldev->dev, + "failed to lookup the default state\n"); + } else { + if (pinctrl_select_state(pctldev->p, + pctldev->hog_default)) + dev_err(pctldev->dev, + "failed to select default state\n"); + } + + pctldev->hog_sleep = + pinctrl_lookup_state(pctldev->p, + PINCTRL_STATE_SLEEP); + if (IS_ERR(pctldev->hog_sleep)) + dev_dbg(pctldev->dev, + "failed to lookup the sleep state\n"); + + return 0; +} + +int pinctrl_enable(struct pinctrl_dev *pctldev) +{ + int error; + + error = pinctrl_claim_hogs(pctldev); + if (error) { + dev_err(pctldev->dev, "could not claim hogs: %i\n", + error); + mutex_destroy(&pctldev->mutex); + kfree(pctldev); + + return error; } mutex_lock(&pinctrldev_list_mutex); @@ -2043,6 +2071,7 @@ static int pinctrl_create_and_start(struct pinctrl_dev *pctldev) return 0; } +EXPORT_SYMBOL_GPL(pinctrl_enable); /** * pinctrl_register() - register a pin controller device @@ -2065,25 +2094,30 @@ struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc, if (IS_ERR(pctldev)) return pctldev; - error = pinctrl_create_and_start(pctldev); - if (error) { - mutex_destroy(&pctldev->mutex); - kfree(pctldev); - + error = pinctrl_enable(pctldev); + if (error) return ERR_PTR(error); - } return pctldev; } EXPORT_SYMBOL_GPL(pinctrl_register); +/** + * pinctrl_register_and_init() - register and init pin controller device + * @pctldesc: descriptor for this pin controller + * @dev: parent device for this pin controller + * @driver_data: private pin controller data for this pin controller + * @pctldev: pin controller device + * + * Note that pinctrl_enable() still needs to be manually called after + * this once the driver is ready. + */ int pinctrl_register_and_init(struct pinctrl_desc *pctldesc, struct device *dev, void *driver_data, struct pinctrl_dev **pctldev) { struct pinctrl_dev *p; - int error; p = pinctrl_init_controller(pctldesc, dev, driver_data); if (IS_ERR(p)) @@ -2097,15 +2131,6 @@ int pinctrl_register_and_init(struct pinctrl_desc *pctldesc, */ *pctldev = p; - error = pinctrl_create_and_start(p); - if (error) { - mutex_destroy(&p->mutex); - kfree(p); - *pctldev = NULL; - - return error; - } - return 0; } EXPORT_SYMBOL_GPL(pinctrl_register_and_init); diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c index a7ace9e1ad81..74bd90dfd7b1 100644 --- a/drivers/pinctrl/freescale/pinctrl-imx.c +++ b/drivers/pinctrl/freescale/pinctrl-imx.c @@ -790,7 +790,7 @@ int imx_pinctrl_probe(struct platform_device *pdev, dev_info(&pdev->dev, "initialized IMX pinctrl driver\n"); - return 0; + return pinctrl_enable(ipctl->pctl); free: imx_free_resources(ipctl); diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c index f80134e3e0b6..9ff790174906 100644 --- a/drivers/pinctrl/intel/pinctrl-cherryview.c +++ b/drivers/pinctrl/intel/pinctrl-cherryview.c @@ -13,6 +13,7 @@ * published by the Free Software Foundation. */ +#include <linux/dmi.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> @@ -1524,10 +1525,31 @@ static void chv_gpio_irq_handler(struct irq_desc *desc) chained_irq_exit(chip, desc); } +/* + * Certain machines seem to hardcode Linux IRQ numbers in their ACPI + * tables. Since we leave GPIOs that are not capable of generating + * interrupts out of the irqdomain the numbering will be different and + * cause devices using the hardcoded IRQ numbers fail. In order not to + * break such machines we will only mask pins from irqdomain if the machine + * is not listed below. + */ +static const struct dmi_system_id chv_no_valid_mask[] = { + { + /* See https://bugzilla.kernel.org/show_bug.cgi?id=194945 */ + .ident = "Acer Chromebook (CYAN)", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"), + DMI_MATCH(DMI_PRODUCT_NAME, "Edgar"), + DMI_MATCH(DMI_BIOS_DATE, "05/21/2016"), + }, + } +}; + static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq) { const struct chv_gpio_pinrange *range; struct gpio_chip *chip = &pctrl->chip; + bool need_valid_mask = !dmi_check_system(chv_no_valid_mask); int ret, i, offset; *chip = chv_gpio_chip; @@ -1536,7 +1558,7 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq) chip->label = dev_name(pctrl->dev); chip->parent = pctrl->dev; chip->base = -1; - chip->irq_need_valid_mask = true; + chip->irq_need_valid_mask = need_valid_mask; ret = devm_gpiochip_add_data(pctrl->dev, chip, pctrl); if (ret) { @@ -1567,7 +1589,7 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq) intsel &= CHV_PADCTRL0_INTSEL_MASK; intsel >>= CHV_PADCTRL0_INTSEL_SHIFT; - if (intsel >= pctrl->community->nirqs) + if (need_valid_mask && intsel >= pctrl->community->nirqs) clear_bit(i, chip->irq_valid_mask); } diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c index 8b2d45e85bae..9c267dcda094 100644 --- a/drivers/pinctrl/pinctrl-single.c +++ b/drivers/pinctrl/pinctrl-single.c @@ -1781,7 +1781,7 @@ static int pcs_probe(struct platform_device *pdev) dev_info(pcs->dev, "%i pins at pa %p size %u\n", pcs->desc.npins, pcs->base, pcs->size); - return 0; + return pinctrl_enable(pcs->pctl); free: pcs_free_resources(pcs); diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c index f9b49967f512..63e51b56a22a 100644 --- a/drivers/pinctrl/samsung/pinctrl-exynos.c +++ b/drivers/pinctrl/samsung/pinctrl-exynos.c @@ -1468,82 +1468,82 @@ const struct samsung_pin_ctrl exynos5420_pin_ctrl[] __initconst = { /* pin banks of exynos5433 pin-controller - ALIVE */ static const struct samsung_pin_bank_data exynos5433_pin_banks0[] __initconst = { - EXYNOS_PIN_BANK_EINTW(8, 0x000, "gpa0", 0x00), - EXYNOS_PIN_BANK_EINTW(8, 0x020, "gpa1", 0x04), - EXYNOS_PIN_BANK_EINTW(8, 0x040, "gpa2", 0x08), - EXYNOS_PIN_BANK_EINTW(8, 0x060, "gpa3", 0x0c), - EXYNOS_PIN_BANK_EINTW_EXT(8, 0x020, "gpf1", 0x1004, 1), - EXYNOS_PIN_BANK_EINTW_EXT(4, 0x040, "gpf2", 0x1008, 1), - EXYNOS_PIN_BANK_EINTW_EXT(4, 0x060, "gpf3", 0x100c, 1), - EXYNOS_PIN_BANK_EINTW_EXT(8, 0x080, "gpf4", 0x1010, 1), - EXYNOS_PIN_BANK_EINTW_EXT(8, 0x0a0, "gpf5", 0x1014, 1), + EXYNOS5433_PIN_BANK_EINTW(8, 0x000, "gpa0", 0x00), + EXYNOS5433_PIN_BANK_EINTW(8, 0x020, "gpa1", 0x04), + EXYNOS5433_PIN_BANK_EINTW(8, 0x040, "gpa2", 0x08), + EXYNOS5433_PIN_BANK_EINTW(8, 0x060, "gpa3", 0x0c), + EXYNOS5433_PIN_BANK_EINTW_EXT(8, 0x020, "gpf1", 0x1004, 1), + EXYNOS5433_PIN_BANK_EINTW_EXT(4, 0x040, "gpf2", 0x1008, 1), + EXYNOS5433_PIN_BANK_EINTW_EXT(4, 0x060, "gpf3", 0x100c, 1), + EXYNOS5433_PIN_BANK_EINTW_EXT(8, 0x080, "gpf4", 0x1010, 1), + EXYNOS5433_PIN_BANK_EINTW_EXT(8, 0x0a0, "gpf5", 0x1014, 1), }; /* pin banks of exynos5433 pin-controller - AUD */ static const struct samsung_pin_bank_data exynos5433_pin_banks1[] __initconst = { - EXYNOS_PIN_BANK_EINTG(7, 0x000, "gpz0", 0x00), - EXYNOS_PIN_BANK_EINTG(4, 0x020, "gpz1", 0x04), + EXYNOS5433_PIN_BANK_EINTG(7, 0x000, "gpz0", 0x00), + EXYNOS5433_PIN_BANK_EINTG(4, 0x020, "gpz1", 0x04), }; /* pin banks of exynos5433 pin-controller - CPIF */ static const struct samsung_pin_bank_data exynos5433_pin_banks2[] __initconst = { - EXYNOS_PIN_BANK_EINTG(2, 0x000, "gpv6", 0x00), + EXYNOS5433_PIN_BANK_EINTG(2, 0x000, "gpv6", 0x00), }; /* pin banks of exynos5433 pin-controller - eSE */ static const struct samsung_pin_bank_data exynos5433_pin_banks3[] __initconst = { - EXYNOS_PIN_BANK_EINTG(3, 0x000, "gpj2", 0x00), + EXYNOS5433_PIN_BANK_EINTG(3, 0x000, "gpj2", 0x00), }; /* pin banks of exynos5433 pin-controller - FINGER */ static const struct samsung_pin_bank_data exynos5433_pin_banks4[] __initconst = { - EXYNOS_PIN_BANK_EINTG(4, 0x000, "gpd5", 0x00), + EXYNOS5433_PIN_BANK_EINTG(4, 0x000, "gpd5", 0x00), }; /* pin banks of exynos5433 pin-controller - FSYS */ static const struct samsung_pin_bank_data exynos5433_pin_banks5[] __initconst = { - EXYNOS_PIN_BANK_EINTG(6, 0x000, "gph1", 0x00), - EXYNOS_PIN_BANK_EINTG(7, 0x020, "gpr4", 0x04), - EXYNOS_PIN_BANK_EINTG(5, 0x040, "gpr0", 0x08), - EXYNOS_PIN_BANK_EINTG(8, 0x060, "gpr1", 0x0c), - EXYNOS_PIN_BANK_EINTG(2, 0x080, "gpr2", 0x10), - EXYNOS_PIN_BANK_EINTG(8, 0x0a0, "gpr3", 0x14), + EXYNOS5433_PIN_BANK_EINTG(6, 0x000, "gph1", 0x00), + EXYNOS5433_PIN_BANK_EINTG(7, 0x020, "gpr4", 0x04), + EXYNOS5433_PIN_BANK_EINTG(5, 0x040, "gpr0", 0x08), + EXYNOS5433_PIN_BANK_EINTG(8, 0x060, "gpr1", 0x0c), + EXYNOS5433_PIN_BANK_EINTG(2, 0x080, "gpr2", 0x10), + EXYNOS5433_PIN_BANK_EINTG(8, 0x0a0, "gpr3", 0x14), }; /* pin banks of exynos5433 pin-controller - IMEM */ static const struct samsung_pin_bank_data exynos5433_pin_banks6[] __initconst = { - EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpf0", 0x00), + EXYNOS5433_PIN_BANK_EINTG(8, 0x000, "gpf0", 0x00), }; /* pin banks of exynos5433 pin-controller - NFC */ static const struct samsung_pin_bank_data exynos5433_pin_banks7[] __initconst = { - EXYNOS_PIN_BANK_EINTG(3, 0x000, "gpj0", 0x00), + EXYNOS5433_PIN_BANK_EINTG(3, 0x000, "gpj0", 0x00), }; /* pin banks of exynos5433 pin-controller - PERIC */ static const struct samsung_pin_bank_data exynos5433_pin_banks8[] __initconst = { - EXYNOS_PIN_BANK_EINTG(6, 0x000, "gpv7", 0x00), - EXYNOS_PIN_BANK_EINTG(5, 0x020, "gpb0", 0x04), - EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpc0", 0x08), - EXYNOS_PIN_BANK_EINTG(2, 0x060, "gpc1", 0x0c), - EXYNOS_PIN_BANK_EINTG(6, 0x080, "gpc2", 0x10), - EXYNOS_PIN_BANK_EINTG(8, 0x0a0, "gpc3", 0x14), - EXYNOS_PIN_BANK_EINTG(2, 0x0c0, "gpg0", 0x18), - EXYNOS_PIN_BANK_EINTG(4, 0x0e0, "gpd0", 0x1c), - EXYNOS_PIN_BANK_EINTG(6, 0x100, "gpd1", 0x20), - EXYNOS_PIN_BANK_EINTG(8, 0x120, "gpd2", 0x24), - EXYNOS_PIN_BANK_EINTG(5, 0x140, "gpd4", 0x28), - EXYNOS_PIN_BANK_EINTG(2, 0x160, "gpd8", 0x2c), - EXYNOS_PIN_BANK_EINTG(7, 0x180, "gpd6", 0x30), - EXYNOS_PIN_BANK_EINTG(3, 0x1a0, "gpd7", 0x34), - EXYNOS_PIN_BANK_EINTG(5, 0x1c0, "gpg1", 0x38), - EXYNOS_PIN_BANK_EINTG(2, 0x1e0, "gpg2", 0x3c), - EXYNOS_PIN_BANK_EINTG(8, 0x200, "gpg3", 0x40), + EXYNOS5433_PIN_BANK_EINTG(6, 0x000, "gpv7", 0x00), + EXYNOS5433_PIN_BANK_EINTG(5, 0x020, "gpb0", 0x04), + EXYNOS5433_PIN_BANK_EINTG(8, 0x040, "gpc0", 0x08), + EXYNOS5433_PIN_BANK_EINTG(2, 0x060, "gpc1", 0x0c), + EXYNOS5433_PIN_BANK_EINTG(6, 0x080, "gpc2", 0x10), + EXYNOS5433_PIN_BANK_EINTG(8, 0x0a0, "gpc3", 0x14), + EXYNOS5433_PIN_BANK_EINTG(2, 0x0c0, "gpg0", 0x18), + EXYNOS5433_PIN_BANK_EINTG(4, 0x0e0, "gpd0", 0x1c), + EXYNOS5433_PIN_BANK_EINTG(6, 0x100, "gpd1", 0x20), + EXYNOS5433_PIN_BANK_EINTG(8, 0x120, "gpd2", 0x24), + EXYNOS5433_PIN_BANK_EINTG(5, 0x140, "gpd4", 0x28), + EXYNOS5433_PIN_BANK_EINTG(2, 0x160, "gpd8", 0x2c), + EXYNOS5433_PIN_BANK_EINTG(7, 0x180, "gpd6", 0x30), + EXYNOS5433_PIN_BANK_EINTG(3, 0x1a0, "gpd7", 0x34), + EXYNOS5433_PIN_BANK_EINTG(5, 0x1c0, "gpg1", 0x38), + EXYNOS5433_PIN_BANK_EINTG(2, 0x1e0, "gpg2", 0x3c), + EXYNOS5433_PIN_BANK_EINTG(8, 0x200, "gpg3", 0x40), }; /* pin banks of exynos5433 pin-controller - TOUCH */ static const struct samsung_pin_bank_data exynos5433_pin_banks9[] __initconst = { - EXYNOS_PIN_BANK_EINTG(3, 0x000, "gpj1", 0x00), + EXYNOS5433_PIN_BANK_EINTG(3, 0x000, "gpj1", 0x00), }; /* diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.h b/drivers/pinctrl/samsung/pinctrl-exynos.h index a473092fb8d2..cd046eb7d705 100644 --- a/drivers/pinctrl/samsung/pinctrl-exynos.h +++ b/drivers/pinctrl/samsung/pinctrl-exynos.h @@ -79,17 +79,6 @@ .name = id \ } -#define EXYNOS_PIN_BANK_EINTW_EXT(pins, reg, id, offs, pctl_idx) \ - { \ - .type = &bank_type_alive, \ - .pctl_offset = reg, \ - .nr_pins = pins, \ - .eint_type = EINT_TYPE_WKUP, \ - .eint_offset = offs, \ - .name = id, \ - .pctl_res_idx = pctl_idx, \ - } \ - #define EXYNOS5433_PIN_BANK_EINTG(pins, reg, id, offs) \ { \ .type = &exynos5433_bank_type_off, \ diff --git a/drivers/pinctrl/sh-pfc/pinctrl.c b/drivers/pinctrl/sh-pfc/pinctrl.c index 08150a321be6..a70157f0acf4 100644 --- a/drivers/pinctrl/sh-pfc/pinctrl.c +++ b/drivers/pinctrl/sh-pfc/pinctrl.c @@ -816,6 +816,13 @@ int sh_pfc_register_pinctrl(struct sh_pfc *pfc) pmx->pctl_desc.pins = pmx->pins; pmx->pctl_desc.npins = pfc->info->nr_pins; - return devm_pinctrl_register_and_init(pfc->dev, &pmx->pctl_desc, pmx, - &pmx->pctl); + ret = devm_pinctrl_register_and_init(pfc->dev, &pmx->pctl_desc, pmx, + &pmx->pctl); + if (ret) { + dev_err(pfc->dev, "could not register: %i\n", ret); + + return ret; + } + + return pinctrl_enable(pmx->pctl); } diff --git a/drivers/pinctrl/ti/pinctrl-ti-iodelay.c b/drivers/pinctrl/ti/pinctrl-ti-iodelay.c index 717e3404900c..362c50918c13 100644 --- a/drivers/pinctrl/ti/pinctrl-ti-iodelay.c +++ b/drivers/pinctrl/ti/pinctrl-ti-iodelay.c @@ -893,6 +893,8 @@ static int ti_iodelay_probe(struct platform_device *pdev) platform_set_drvdata(pdev, iod); + return pinctrl_enable(iod->pctl); + exit_out: of_node_put(np); return ret; diff --git a/drivers/pwm/pwm-lpss-pci.c b/drivers/pwm/pwm-lpss-pci.c index 053088b9b66e..c1527cb645be 100644 --- a/drivers/pwm/pwm-lpss-pci.c +++ b/drivers/pwm/pwm-lpss-pci.c @@ -36,6 +36,14 @@ static const struct pwm_lpss_boardinfo pwm_lpss_bxt_info = { .clk_rate = 19200000, .npwm = 4, .base_unit_bits = 22, + .bypass = true, +}; + +/* Tangier */ +static const struct pwm_lpss_boardinfo pwm_lpss_tng_info = { + .clk_rate = 19200000, + .npwm = 4, + .base_unit_bits = 22, }; static int pwm_lpss_probe_pci(struct pci_dev *pdev, @@ -97,7 +105,7 @@ static const struct pci_device_id pwm_lpss_pci_ids[] = { { PCI_VDEVICE(INTEL, 0x0ac8), (unsigned long)&pwm_lpss_bxt_info}, { PCI_VDEVICE(INTEL, 0x0f08), (unsigned long)&pwm_lpss_byt_info}, { PCI_VDEVICE(INTEL, 0x0f09), (unsigned long)&pwm_lpss_byt_info}, - { PCI_VDEVICE(INTEL, 0x11a5), (unsigned long)&pwm_lpss_bxt_info}, + { PCI_VDEVICE(INTEL, 0x11a5), (unsigned long)&pwm_lpss_tng_info}, { PCI_VDEVICE(INTEL, 0x1ac8), (unsigned long)&pwm_lpss_bxt_info}, { PCI_VDEVICE(INTEL, 0x2288), (unsigned long)&pwm_lpss_bsw_info}, { PCI_VDEVICE(INTEL, 0x2289), (unsigned long)&pwm_lpss_bsw_info}, diff --git a/drivers/pwm/pwm-lpss-platform.c b/drivers/pwm/pwm-lpss-platform.c index b22b6fdadb9a..5d6ed1507d29 100644 --- a/drivers/pwm/pwm-lpss-platform.c +++ b/drivers/pwm/pwm-lpss-platform.c @@ -37,6 +37,7 @@ static const struct pwm_lpss_boardinfo pwm_lpss_bxt_info = { .clk_rate = 19200000, .npwm = 4, .base_unit_bits = 22, + .bypass = true, }; static int pwm_lpss_probe_platform(struct platform_device *pdev) diff --git a/drivers/pwm/pwm-lpss.c b/drivers/pwm/pwm-lpss.c index 689d2c1cbead..8db0d40ccacd 100644 --- a/drivers/pwm/pwm-lpss.c +++ b/drivers/pwm/pwm-lpss.c @@ -57,7 +57,7 @@ static inline void pwm_lpss_write(const struct pwm_device *pwm, u32 value) writel(value, lpwm->regs + pwm->hwpwm * PWM_SIZE + PWM); } -static int pwm_lpss_update(struct pwm_device *pwm) +static int pwm_lpss_wait_for_update(struct pwm_device *pwm) { struct pwm_lpss_chip *lpwm = to_lpwm(pwm->chip); const void __iomem *addr = lpwm->regs + pwm->hwpwm * PWM_SIZE + PWM; @@ -65,8 +65,6 @@ static int pwm_lpss_update(struct pwm_device *pwm) u32 val; int err; - pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_SW_UPDATE); - /* * PWM Configuration register has SW_UPDATE bit that is set when a new * configuration is written to the register. The bit is automatically @@ -122,6 +120,12 @@ static void pwm_lpss_prepare(struct pwm_lpss_chip *lpwm, struct pwm_device *pwm, pwm_lpss_write(pwm, ctrl); } +static inline void pwm_lpss_cond_enable(struct pwm_device *pwm, bool cond) +{ + if (cond) + pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_ENABLE); +} + static int pwm_lpss_apply(struct pwm_chip *chip, struct pwm_device *pwm, struct pwm_state *state) { @@ -137,18 +141,21 @@ static int pwm_lpss_apply(struct pwm_chip *chip, struct pwm_device *pwm, return ret; } pwm_lpss_prepare(lpwm, pwm, state->duty_cycle, state->period); - ret = pwm_lpss_update(pwm); + pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_SW_UPDATE); + pwm_lpss_cond_enable(pwm, lpwm->info->bypass == false); + ret = pwm_lpss_wait_for_update(pwm); if (ret) { pm_runtime_put(chip->dev); return ret; } - pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_ENABLE); + pwm_lpss_cond_enable(pwm, lpwm->info->bypass == true); } else { ret = pwm_lpss_is_updating(pwm); if (ret) return ret; pwm_lpss_prepare(lpwm, pwm, state->duty_cycle, state->period); - return pwm_lpss_update(pwm); + pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_SW_UPDATE); + return pwm_lpss_wait_for_update(pwm); } } else if (pwm_is_enabled(pwm)) { pwm_lpss_write(pwm, pwm_lpss_read(pwm) & ~PWM_ENABLE); diff --git a/drivers/pwm/pwm-lpss.h b/drivers/pwm/pwm-lpss.h index c94cd7c2695d..98306bb02cfe 100644 --- a/drivers/pwm/pwm-lpss.h +++ b/drivers/pwm/pwm-lpss.h @@ -22,6 +22,7 @@ struct pwm_lpss_boardinfo { unsigned long clk_rate; unsigned int npwm; unsigned long base_unit_bits; + bool bypass; }; struct pwm_lpss_chip *pwm_lpss_probe(struct device *dev, struct resource *r, diff --git a/drivers/pwm/pwm-rockchip.c b/drivers/pwm/pwm-rockchip.c index ef89df1f7336..744d56197286 100644 --- a/drivers/pwm/pwm-rockchip.c +++ b/drivers/pwm/pwm-rockchip.c @@ -191,6 +191,28 @@ static int rockchip_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, return 0; } +static int rockchip_pwm_enable(struct pwm_chip *chip, + struct pwm_device *pwm, + bool enable, + enum pwm_polarity polarity) +{ + struct rockchip_pwm_chip *pc = to_rockchip_pwm_chip(chip); + int ret; + + if (enable) { + ret = clk_enable(pc->clk); + if (ret) + return ret; + } + + pc->data->set_enable(chip, pwm, enable, polarity); + + if (!enable) + clk_disable(pc->clk); + + return 0; +} + static int rockchip_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, struct pwm_state *state) { @@ -207,22 +229,26 @@ static int rockchip_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, return ret; if (state->polarity != curstate.polarity && enabled) { - pc->data->set_enable(chip, pwm, false, state->polarity); + ret = rockchip_pwm_enable(chip, pwm, false, state->polarity); + if (ret) + goto out; enabled = false; } ret = rockchip_pwm_config(chip, pwm, state->duty_cycle, state->period); if (ret) { if (enabled != curstate.enabled) - pc->data->set_enable(chip, pwm, !enabled, - state->polarity); - + rockchip_pwm_enable(chip, pwm, !enabled, + state->polarity); goto out; } - if (state->enabled != enabled) - pc->data->set_enable(chip, pwm, state->enabled, - state->polarity); + if (state->enabled != enabled) { + ret = rockchip_pwm_enable(chip, pwm, state->enabled, + state->polarity); + if (ret) + goto out; + } /* * Update the state with the real hardware, which can differ a bit diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 19125d72f322..e5a2d590a104 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -496,7 +496,7 @@ static void scsi_run_queue(struct request_queue *q) scsi_starved_list_run(sdev->host); if (q->mq_ops) - blk_mq_start_stopped_hw_queues(q, false); + blk_mq_run_hw_queues(q, false); else blk_run_queue(q); } @@ -667,7 +667,7 @@ static bool scsi_end_request(struct request *req, int error, !list_empty(&sdev->host->starved_list)) kblockd_schedule_work(&sdev->requeue_work); else - blk_mq_start_stopped_hw_queues(q, true); + blk_mq_run_hw_queues(q, true); } else { unsigned long flags; @@ -1974,7 +1974,7 @@ out: case BLK_MQ_RQ_QUEUE_BUSY: if (atomic_read(&sdev->device_busy) == 0 && !scsi_device_blocked(sdev)) - blk_mq_delay_queue(hctx, SCSI_QUEUE_DELAY); + blk_mq_delay_run_hw_queue(hctx, SCSI_QUEUE_DELAY); break; case BLK_MQ_RQ_QUEUE_ERROR: /* diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c index 7cbad0d45b9c..6ba270e0494d 100644 --- a/drivers/staging/android/ashmem.c +++ b/drivers/staging/android/ashmem.c @@ -409,6 +409,7 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma) ret = PTR_ERR(vmfile); goto out; } + vmfile->f_mode |= FMODE_LSEEK; asma->file = vmfile; } get_file(asma->file); diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index a91802432f2f..e3f9ed3690b7 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -485,8 +485,7 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *); int iscsit_queue_rsp(struct iscsi_conn *conn, struct iscsi_cmd *cmd) { - iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state); - return 0; + return iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state); } EXPORT_SYMBOL(iscsit_queue_rsp); diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c index bf40f03755dd..344e8448869c 100644 --- a/drivers/target/iscsi/iscsi_target_configfs.c +++ b/drivers/target/iscsi/iscsi_target_configfs.c @@ -1398,11 +1398,10 @@ static u32 lio_sess_get_initiator_sid( static int lio_queue_data_in(struct se_cmd *se_cmd) { struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd); + struct iscsi_conn *conn = cmd->conn; cmd->i_state = ISTATE_SEND_DATAIN; - cmd->conn->conn_transport->iscsit_queue_data_in(cmd->conn, cmd); - - return 0; + return conn->conn_transport->iscsit_queue_data_in(conn, cmd); } static int lio_write_pending(struct se_cmd *se_cmd) @@ -1431,16 +1430,14 @@ static int lio_write_pending_status(struct se_cmd *se_cmd) static int lio_queue_status(struct se_cmd *se_cmd) { struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd); + struct iscsi_conn *conn = cmd->conn; cmd->i_state = ISTATE_SEND_STATUS; if (cmd->se_cmd.scsi_status || cmd->sense_reason) { - iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state); - return 0; + return iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); } - cmd->conn->conn_transport->iscsit_queue_status(cmd->conn, cmd); - - return 0; + return conn->conn_transport->iscsit_queue_status(conn, cmd); } static void lio_queue_tm_rsp(struct se_cmd *se_cmd) diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c index e65bf78ceef3..fce627628200 100644 --- a/drivers/target/iscsi/iscsi_target_parameters.c +++ b/drivers/target/iscsi/iscsi_target_parameters.c @@ -782,22 +782,6 @@ static void iscsi_check_proposer_for_optional_reply(struct iscsi_param *param) if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH)) SET_PSTATE_REPLY_OPTIONAL(param); /* - * The GlobalSAN iSCSI Initiator for MacOSX does - * not respond to MaxBurstLength, FirstBurstLength, - * DefaultTime2Wait or DefaultTime2Retain parameter keys. - * So, we set them to 'reply optional' here, and assume the - * the defaults from iscsi_parameters.h if the initiator - * is not RFC compliant and the keys are not negotiated. - */ - if (!strcmp(param->name, MAXBURSTLENGTH)) - SET_PSTATE_REPLY_OPTIONAL(param); - if (!strcmp(param->name, FIRSTBURSTLENGTH)) - SET_PSTATE_REPLY_OPTIONAL(param); - if (!strcmp(param->name, DEFAULTTIME2WAIT)) - SET_PSTATE_REPLY_OPTIONAL(param); - if (!strcmp(param->name, DEFAULTTIME2RETAIN)) - SET_PSTATE_REPLY_OPTIONAL(param); - /* * Required for gPXE iSCSI boot client */ if (!strcmp(param->name, MAXCONNECTIONS)) diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c index 5041a9c8bdcb..7d3e2fcc26a0 100644 --- a/drivers/target/iscsi/iscsi_target_util.c +++ b/drivers/target/iscsi/iscsi_target_util.c @@ -567,7 +567,7 @@ static void iscsit_remove_cmd_from_immediate_queue( } } -void iscsit_add_cmd_to_response_queue( +int iscsit_add_cmd_to_response_queue( struct iscsi_cmd *cmd, struct iscsi_conn *conn, u8 state) @@ -578,7 +578,7 @@ void iscsit_add_cmd_to_response_queue( if (!qr) { pr_err("Unable to allocate memory for" " struct iscsi_queue_req\n"); - return; + return -ENOMEM; } INIT_LIST_HEAD(&qr->qr_list); qr->cmd = cmd; @@ -590,6 +590,7 @@ void iscsit_add_cmd_to_response_queue( spin_unlock_bh(&conn->response_queue_lock); wake_up(&conn->queues_wq); + return 0; } struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *conn) @@ -737,21 +738,23 @@ void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown) { struct se_cmd *se_cmd = NULL; int rc; + bool op_scsi = false; /* * Determine if a struct se_cmd is associated with * this struct iscsi_cmd. */ switch (cmd->iscsi_opcode) { case ISCSI_OP_SCSI_CMD: - se_cmd = &cmd->se_cmd; - __iscsit_free_cmd(cmd, true, shutdown); + op_scsi = true; /* * Fallthrough */ case ISCSI_OP_SCSI_TMFUNC: - rc = transport_generic_free_cmd(&cmd->se_cmd, shutdown); - if (!rc && shutdown && se_cmd && se_cmd->se_sess) { - __iscsit_free_cmd(cmd, true, shutdown); + se_cmd = &cmd->se_cmd; + __iscsit_free_cmd(cmd, op_scsi, shutdown); + rc = transport_generic_free_cmd(se_cmd, shutdown); + if (!rc && shutdown && se_cmd->se_sess) { + __iscsit_free_cmd(cmd, op_scsi, shutdown); target_put_sess_cmd(se_cmd); } break; diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h index 8ff08856516a..9e4197af8708 100644 --- a/drivers/target/iscsi/iscsi_target_util.h +++ b/drivers/target/iscsi/iscsi_target_util.h @@ -31,7 +31,7 @@ extern int iscsit_find_cmd_for_recovery(struct iscsi_session *, struct iscsi_cmd struct iscsi_conn_recovery **, itt_t); extern void iscsit_add_cmd_to_immediate_queue(struct iscsi_cmd *, struct iscsi_conn *, u8); extern struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *); -extern void iscsit_add_cmd_to_response_queue(struct iscsi_cmd *, struct iscsi_conn *, u8); +extern int iscsit_add_cmd_to_response_queue(struct iscsi_cmd *, struct iscsi_conn *, u8); extern struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *); extern void iscsit_remove_cmd_from_tx_queues(struct iscsi_cmd *, struct iscsi_conn *); extern bool iscsit_conn_all_queues_empty(struct iscsi_conn *); diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index fd7c16a7ca6e..fc4a9c303d55 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c @@ -197,8 +197,7 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd) /* * Set the ASYMMETRIC ACCESS State */ - buf[off++] |= (atomic_read( - &tg_pt_gp->tg_pt_gp_alua_access_state) & 0xff); + buf[off++] |= tg_pt_gp->tg_pt_gp_alua_access_state & 0xff; /* * Set supported ASYMMETRIC ACCESS State bits */ @@ -710,7 +709,7 @@ target_alua_state_check(struct se_cmd *cmd) spin_lock(&lun->lun_tg_pt_gp_lock); tg_pt_gp = lun->lun_tg_pt_gp; - out_alua_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state); + out_alua_state = tg_pt_gp->tg_pt_gp_alua_access_state; nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs; // XXX: keeps using tg_pt_gp witout reference after unlock @@ -911,7 +910,7 @@ static int core_alua_write_tpg_metadata( } /* - * Called with tg_pt_gp->tg_pt_gp_md_mutex held + * Called with tg_pt_gp->tg_pt_gp_transition_mutex held */ static int core_alua_update_tpg_primary_metadata( struct t10_alua_tg_pt_gp *tg_pt_gp) @@ -934,7 +933,7 @@ static int core_alua_update_tpg_primary_metadata( "alua_access_state=0x%02x\n" "alua_access_status=0x%02x\n", tg_pt_gp->tg_pt_gp_id, - tg_pt_gp->tg_pt_gp_alua_pending_state, + tg_pt_gp->tg_pt_gp_alua_access_state, tg_pt_gp->tg_pt_gp_alua_access_status); snprintf(path, ALUA_METADATA_PATH_LEN, @@ -1013,93 +1012,41 @@ static void core_alua_queue_state_change_ua(struct t10_alua_tg_pt_gp *tg_pt_gp) spin_unlock(&tg_pt_gp->tg_pt_gp_lock); } -static void core_alua_do_transition_tg_pt_work(struct work_struct *work) -{ - struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(work, - struct t10_alua_tg_pt_gp, tg_pt_gp_transition_work); - struct se_device *dev = tg_pt_gp->tg_pt_gp_dev; - bool explicit = (tg_pt_gp->tg_pt_gp_alua_access_status == - ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG); - - /* - * Update the ALUA metadata buf that has been allocated in - * core_alua_do_port_transition(), this metadata will be written - * to struct file. - * - * Note that there is the case where we do not want to update the - * metadata when the saved metadata is being parsed in userspace - * when setting the existing port access state and access status. - * - * Also note that the failure to write out the ALUA metadata to - * struct file does NOT affect the actual ALUA transition. - */ - if (tg_pt_gp->tg_pt_gp_write_metadata) { - mutex_lock(&tg_pt_gp->tg_pt_gp_md_mutex); - core_alua_update_tpg_primary_metadata(tg_pt_gp); - mutex_unlock(&tg_pt_gp->tg_pt_gp_md_mutex); - } - /* - * Set the current primary ALUA access state to the requested new state - */ - atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, - tg_pt_gp->tg_pt_gp_alua_pending_state); - - pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu" - " from primary access state %s to %s\n", (explicit) ? "explicit" : - "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item), - tg_pt_gp->tg_pt_gp_id, - core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_previous_state), - core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_pending_state)); - - core_alua_queue_state_change_ua(tg_pt_gp); - - spin_lock(&dev->t10_alua.tg_pt_gps_lock); - atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); - spin_unlock(&dev->t10_alua.tg_pt_gps_lock); - - if (tg_pt_gp->tg_pt_gp_transition_complete) - complete(tg_pt_gp->tg_pt_gp_transition_complete); -} - static int core_alua_do_transition_tg_pt( struct t10_alua_tg_pt_gp *tg_pt_gp, int new_state, int explicit) { - struct se_device *dev = tg_pt_gp->tg_pt_gp_dev; - DECLARE_COMPLETION_ONSTACK(wait); + int prev_state; + mutex_lock(&tg_pt_gp->tg_pt_gp_transition_mutex); /* Nothing to be done here */ - if (atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) == new_state) + if (tg_pt_gp->tg_pt_gp_alua_access_state == new_state) { + mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex); return 0; + } - if (explicit && new_state == ALUA_ACCESS_STATE_TRANSITION) + if (explicit && new_state == ALUA_ACCESS_STATE_TRANSITION) { + mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex); return -EAGAIN; - - /* - * Flush any pending transitions - */ - if (!explicit) - flush_work(&tg_pt_gp->tg_pt_gp_transition_work); + } /* * Save the old primary ALUA access state, and set the current state * to ALUA_ACCESS_STATE_TRANSITION. */ - atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, - ALUA_ACCESS_STATE_TRANSITION); + prev_state = tg_pt_gp->tg_pt_gp_alua_access_state; + tg_pt_gp->tg_pt_gp_alua_access_state = ALUA_ACCESS_STATE_TRANSITION; tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ? ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG : ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA; core_alua_queue_state_change_ua(tg_pt_gp); - if (new_state == ALUA_ACCESS_STATE_TRANSITION) + if (new_state == ALUA_ACCESS_STATE_TRANSITION) { + mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex); return 0; - - tg_pt_gp->tg_pt_gp_alua_previous_state = - atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state); - tg_pt_gp->tg_pt_gp_alua_pending_state = new_state; + } /* * Check for the optional ALUA primary state transition delay @@ -1108,19 +1055,36 @@ static int core_alua_do_transition_tg_pt( msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs); /* - * Take a reference for workqueue item + * Set the current primary ALUA access state to the requested new state */ - spin_lock(&dev->t10_alua.tg_pt_gps_lock); - atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); - spin_unlock(&dev->t10_alua.tg_pt_gps_lock); + tg_pt_gp->tg_pt_gp_alua_access_state = new_state; - schedule_work(&tg_pt_gp->tg_pt_gp_transition_work); - if (explicit) { - tg_pt_gp->tg_pt_gp_transition_complete = &wait; - wait_for_completion(&wait); - tg_pt_gp->tg_pt_gp_transition_complete = NULL; + /* + * Update the ALUA metadata buf that has been allocated in + * core_alua_do_port_transition(), this metadata will be written + * to struct file. + * + * Note that there is the case where we do not want to update the + * metadata when the saved metadata is being parsed in userspace + * when setting the existing port access state and access status. + * + * Also note that the failure to write out the ALUA metadata to + * struct file does NOT affect the actual ALUA transition. + */ + if (tg_pt_gp->tg_pt_gp_write_metadata) { + core_alua_update_tpg_primary_metadata(tg_pt_gp); } + pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu" + " from primary access state %s to %s\n", (explicit) ? "explicit" : + "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item), + tg_pt_gp->tg_pt_gp_id, + core_alua_dump_state(prev_state), + core_alua_dump_state(new_state)); + + core_alua_queue_state_change_ua(tg_pt_gp); + + mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex); return 0; } @@ -1685,14 +1649,12 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev, } INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list); INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_lun_list); - mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex); + mutex_init(&tg_pt_gp->tg_pt_gp_transition_mutex); spin_lock_init(&tg_pt_gp->tg_pt_gp_lock); atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0); - INIT_WORK(&tg_pt_gp->tg_pt_gp_transition_work, - core_alua_do_transition_tg_pt_work); tg_pt_gp->tg_pt_gp_dev = dev; - atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, - ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED); + tg_pt_gp->tg_pt_gp_alua_access_state = + ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED; /* * Enable both explicit and implicit ALUA support by default */ @@ -1797,8 +1759,6 @@ void core_alua_free_tg_pt_gp( dev->t10_alua.alua_tg_pt_gps_counter--; spin_unlock(&dev->t10_alua.tg_pt_gps_lock); - flush_work(&tg_pt_gp->tg_pt_gp_transition_work); - /* * Allow a struct t10_alua_tg_pt_gp_member * referenced by * core_alua_get_tg_pt_gp_by_name() in @@ -1938,8 +1898,8 @@ ssize_t core_alua_show_tg_pt_gp_info(struct se_lun *lun, char *page) "Primary Access Status: %s\nTG Port Secondary Access" " State: %s\nTG Port Secondary Access Status: %s\n", config_item_name(tg_pt_ci), tg_pt_gp->tg_pt_gp_id, - core_alua_dump_state(atomic_read( - &tg_pt_gp->tg_pt_gp_alua_access_state)), + core_alua_dump_state( + tg_pt_gp->tg_pt_gp_alua_access_state), core_alua_dump_status( tg_pt_gp->tg_pt_gp_alua_access_status), atomic_read(&lun->lun_tg_pt_secondary_offline) ? diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index 38b5025e4c7a..70657fd56440 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c @@ -2392,7 +2392,7 @@ static ssize_t target_tg_pt_gp_alua_access_state_show(struct config_item *item, char *page) { return sprintf(page, "%d\n", - atomic_read(&to_tg_pt_gp(item)->tg_pt_gp_alua_access_state)); + to_tg_pt_gp(item)->tg_pt_gp_alua_access_state); } static ssize_t target_tg_pt_gp_alua_access_state_store(struct config_item *item, diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c index d8a16ca6baa5..d1e6cab8e3d3 100644 --- a/drivers/target/target_core_fabric_configfs.c +++ b/drivers/target/target_core_fabric_configfs.c @@ -92,6 +92,11 @@ static int target_fabric_mappedlun_link( pr_err("Source se_lun->lun_se_dev does not exist\n"); return -EINVAL; } + if (lun->lun_shutdown) { + pr_err("Unable to create mappedlun symlink because" + " lun->lun_shutdown=true\n"); + return -EINVAL; + } se_tpg = lun->lun_tpg; nacl_ci = &lun_acl_ci->ci_parent->ci_group->cg_item; diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c index 6fb191914f45..dfaef4d3b2d2 100644 --- a/drivers/target/target_core_tpg.c +++ b/drivers/target/target_core_tpg.c @@ -642,6 +642,8 @@ void core_tpg_remove_lun( */ struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev); + lun->lun_shutdown = true; + core_clear_lun_from_tpg(lun, tpg); /* * Wait for any active I/O references to percpu se_lun->lun_ref to @@ -663,6 +665,8 @@ void core_tpg_remove_lun( } if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) hlist_del_rcu(&lun->link); + + lun->lun_shutdown = false; mutex_unlock(&tpg->tpg_lun_mutex); percpu_ref_exit(&lun->lun_ref); diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index b1a3cdb29468..a0cd56ee5fe9 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -64,8 +64,9 @@ struct kmem_cache *t10_alua_lba_map_cache; struct kmem_cache *t10_alua_lba_map_mem_cache; static void transport_complete_task_attr(struct se_cmd *cmd); +static int translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason); static void transport_handle_queue_full(struct se_cmd *cmd, - struct se_device *dev); + struct se_device *dev, int err, bool write_pending); static int transport_put_cmd(struct se_cmd *cmd); static void target_complete_ok_work(struct work_struct *work); @@ -804,7 +805,8 @@ void target_qf_do_work(struct work_struct *work) if (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) transport_write_pending_qf(cmd); - else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) + else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK || + cmd->t_state == TRANSPORT_COMPLETE_QF_ERR) transport_complete_qf(cmd); } } @@ -1719,7 +1721,7 @@ void transport_generic_request_failure(struct se_cmd *cmd, } trace_target_cmd_complete(cmd); ret = cmd->se_tfo->queue_status(cmd); - if (ret == -EAGAIN || ret == -ENOMEM) + if (ret) goto queue_full; goto check_stop; default: @@ -1730,7 +1732,7 @@ void transport_generic_request_failure(struct se_cmd *cmd, } ret = transport_send_check_condition_and_sense(cmd, sense_reason, 0); - if (ret == -EAGAIN || ret == -ENOMEM) + if (ret) goto queue_full; check_stop: @@ -1739,8 +1741,7 @@ check_stop: return; queue_full: - cmd->t_state = TRANSPORT_COMPLETE_QF_OK; - transport_handle_queue_full(cmd, cmd->se_dev); + transport_handle_queue_full(cmd, cmd->se_dev, ret, false); } EXPORT_SYMBOL(transport_generic_request_failure); @@ -1977,13 +1978,29 @@ static void transport_complete_qf(struct se_cmd *cmd) int ret = 0; transport_complete_task_attr(cmd); + /* + * If a fabric driver ->write_pending() or ->queue_data_in() callback + * has returned neither -ENOMEM or -EAGAIN, assume it's fatal and + * the same callbacks should not be retried. Return CHECK_CONDITION + * if a scsi_status is not already set. + * + * If a fabric driver ->queue_status() has returned non zero, always + * keep retrying no matter what.. + */ + if (cmd->t_state == TRANSPORT_COMPLETE_QF_ERR) { + if (cmd->scsi_status) + goto queue_status; - if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { - trace_target_cmd_complete(cmd); - ret = cmd->se_tfo->queue_status(cmd); - goto out; + cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE; + cmd->scsi_status = SAM_STAT_CHECK_CONDITION; + cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER; + translate_sense_reason(cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE); + goto queue_status; } + if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) + goto queue_status; + switch (cmd->data_direction) { case DMA_FROM_DEVICE: if (cmd->scsi_status) @@ -2007,19 +2024,33 @@ queue_status: break; } -out: if (ret < 0) { - transport_handle_queue_full(cmd, cmd->se_dev); + transport_handle_queue_full(cmd, cmd->se_dev, ret, false); return; } transport_lun_remove_cmd(cmd); transport_cmd_check_stop_to_fabric(cmd); } -static void transport_handle_queue_full( - struct se_cmd *cmd, - struct se_device *dev) +static void transport_handle_queue_full(struct se_cmd *cmd, struct se_device *dev, + int err, bool write_pending) { + /* + * -EAGAIN or -ENOMEM signals retry of ->write_pending() and/or + * ->queue_data_in() callbacks from new process context. + * + * Otherwise for other errors, transport_complete_qf() will send + * CHECK_CONDITION via ->queue_status() instead of attempting to + * retry associated fabric driver data-transfer callbacks. + */ + if (err == -EAGAIN || err == -ENOMEM) { + cmd->t_state = (write_pending) ? TRANSPORT_COMPLETE_QF_WP : + TRANSPORT_COMPLETE_QF_OK; + } else { + pr_warn_ratelimited("Got unknown fabric queue status: %d\n", err); + cmd->t_state = TRANSPORT_COMPLETE_QF_ERR; + } + spin_lock_irq(&dev->qf_cmd_lock); list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list); atomic_inc_mb(&dev->dev_qf_count); @@ -2083,7 +2114,7 @@ static void target_complete_ok_work(struct work_struct *work) WARN_ON(!cmd->scsi_status); ret = transport_send_check_condition_and_sense( cmd, 0, 1); - if (ret == -EAGAIN || ret == -ENOMEM) + if (ret) goto queue_full; transport_lun_remove_cmd(cmd); @@ -2109,7 +2140,7 @@ static void target_complete_ok_work(struct work_struct *work) } else if (rc) { ret = transport_send_check_condition_and_sense(cmd, rc, 0); - if (ret == -EAGAIN || ret == -ENOMEM) + if (ret) goto queue_full; transport_lun_remove_cmd(cmd); @@ -2134,7 +2165,7 @@ queue_rsp: if (target_read_prot_action(cmd)) { ret = transport_send_check_condition_and_sense(cmd, cmd->pi_err, 0); - if (ret == -EAGAIN || ret == -ENOMEM) + if (ret) goto queue_full; transport_lun_remove_cmd(cmd); @@ -2144,7 +2175,7 @@ queue_rsp: trace_target_cmd_complete(cmd); ret = cmd->se_tfo->queue_data_in(cmd); - if (ret == -EAGAIN || ret == -ENOMEM) + if (ret) goto queue_full; break; case DMA_TO_DEVICE: @@ -2157,7 +2188,7 @@ queue_rsp: atomic_long_add(cmd->data_length, &cmd->se_lun->lun_stats.tx_data_octets); ret = cmd->se_tfo->queue_data_in(cmd); - if (ret == -EAGAIN || ret == -ENOMEM) + if (ret) goto queue_full; break; } @@ -2166,7 +2197,7 @@ queue_rsp: queue_status: trace_target_cmd_complete(cmd); ret = cmd->se_tfo->queue_status(cmd); - if (ret == -EAGAIN || ret == -ENOMEM) + if (ret) goto queue_full; break; default: @@ -2180,8 +2211,8 @@ queue_status: queue_full: pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p," " data_direction: %d\n", cmd, cmd->data_direction); - cmd->t_state = TRANSPORT_COMPLETE_QF_OK; - transport_handle_queue_full(cmd, cmd->se_dev); + + transport_handle_queue_full(cmd, cmd->se_dev, ret, false); } void target_free_sgl(struct scatterlist *sgl, int nents) @@ -2449,18 +2480,14 @@ transport_generic_new_cmd(struct se_cmd *cmd) spin_unlock_irqrestore(&cmd->t_state_lock, flags); ret = cmd->se_tfo->write_pending(cmd); - if (ret == -EAGAIN || ret == -ENOMEM) + if (ret) goto queue_full; - /* fabric drivers should only return -EAGAIN or -ENOMEM as error */ - WARN_ON(ret); - - return (!ret) ? 0 : TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + return 0; queue_full: pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd); - cmd->t_state = TRANSPORT_COMPLETE_QF_WP; - transport_handle_queue_full(cmd, cmd->se_dev); + transport_handle_queue_full(cmd, cmd->se_dev, ret, true); return 0; } EXPORT_SYMBOL(transport_generic_new_cmd); @@ -2470,10 +2497,10 @@ static void transport_write_pending_qf(struct se_cmd *cmd) int ret; ret = cmd->se_tfo->write_pending(cmd); - if (ret == -EAGAIN || ret == -ENOMEM) { + if (ret) { pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd); - transport_handle_queue_full(cmd, cmd->se_dev); + transport_handle_queue_full(cmd, cmd->se_dev, ret, true); } } @@ -3011,6 +3038,8 @@ static int __transport_check_aborted_status(struct se_cmd *cmd, int send_status) __releases(&cmd->t_state_lock) __acquires(&cmd->t_state_lock) { + int ret; + assert_spin_locked(&cmd->t_state_lock); WARN_ON_ONCE(!irqs_disabled()); @@ -3034,7 +3063,9 @@ static int __transport_check_aborted_status(struct se_cmd *cmd, int send_status) trace_target_cmd_complete(cmd); spin_unlock_irq(&cmd->t_state_lock); - cmd->se_tfo->queue_status(cmd); + ret = cmd->se_tfo->queue_status(cmd); + if (ret) + transport_handle_queue_full(cmd, cmd->se_dev, ret, false); spin_lock_irq(&cmd->t_state_lock); return 1; @@ -3055,6 +3086,7 @@ EXPORT_SYMBOL(transport_check_aborted_status); void transport_send_task_abort(struct se_cmd *cmd) { unsigned long flags; + int ret; spin_lock_irqsave(&cmd->t_state_lock, flags); if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION)) { @@ -3090,7 +3122,9 @@ send_abort: cmd->t_task_cdb[0], cmd->tag); trace_target_cmd_complete(cmd); - cmd->se_tfo->queue_status(cmd); + ret = cmd->se_tfo->queue_status(cmd); + if (ret) + transport_handle_queue_full(cmd, cmd->se_dev, ret, false); } static void target_tmr_work(struct work_struct *work) diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index c6874c38a10b..f615c3bbb73e 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -311,24 +311,50 @@ static void free_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd) DATA_BLOCK_BITS); } -static void gather_data_area(struct tcmu_dev *udev, unsigned long *cmd_bitmap, - struct scatterlist *data_sg, unsigned int data_nents) +static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd, + bool bidi) { + struct se_cmd *se_cmd = cmd->se_cmd; int i, block; int block_remaining = 0; void *from, *to; size_t copy_bytes, from_offset; - struct scatterlist *sg; + struct scatterlist *sg, *data_sg; + unsigned int data_nents; + DECLARE_BITMAP(bitmap, DATA_BLOCK_BITS); + + bitmap_copy(bitmap, cmd->data_bitmap, DATA_BLOCK_BITS); + + if (!bidi) { + data_sg = se_cmd->t_data_sg; + data_nents = se_cmd->t_data_nents; + } else { + uint32_t count; + + /* + * For bidi case, the first count blocks are for Data-Out + * buffer blocks, and before gathering the Data-In buffer + * the Data-Out buffer blocks should be discarded. + */ + count = DIV_ROUND_UP(se_cmd->data_length, DATA_BLOCK_SIZE); + while (count--) { + block = find_first_bit(bitmap, DATA_BLOCK_BITS); + clear_bit(block, bitmap); + } + + data_sg = se_cmd->t_bidi_data_sg; + data_nents = se_cmd->t_bidi_data_nents; + } for_each_sg(data_sg, sg, data_nents, i) { int sg_remaining = sg->length; to = kmap_atomic(sg_page(sg)) + sg->offset; while (sg_remaining > 0) { if (block_remaining == 0) { - block = find_first_bit(cmd_bitmap, + block = find_first_bit(bitmap, DATA_BLOCK_BITS); block_remaining = DATA_BLOCK_SIZE; - clear_bit(block, cmd_bitmap); + clear_bit(block, bitmap); } copy_bytes = min_t(size_t, sg_remaining, block_remaining); @@ -394,6 +420,27 @@ static bool is_ring_space_avail(struct tcmu_dev *udev, size_t cmd_size, size_t d return true; } +static inline size_t tcmu_cmd_get_data_length(struct tcmu_cmd *tcmu_cmd) +{ + struct se_cmd *se_cmd = tcmu_cmd->se_cmd; + size_t data_length = round_up(se_cmd->data_length, DATA_BLOCK_SIZE); + + if (se_cmd->se_cmd_flags & SCF_BIDI) { + BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents)); + data_length += round_up(se_cmd->t_bidi_data_sg->length, + DATA_BLOCK_SIZE); + } + + return data_length; +} + +static inline uint32_t tcmu_cmd_get_block_cnt(struct tcmu_cmd *tcmu_cmd) +{ + size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd); + + return data_length / DATA_BLOCK_SIZE; +} + static sense_reason_t tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) { @@ -407,7 +454,7 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) uint32_t cmd_head; uint64_t cdb_off; bool copy_to_data_area; - size_t data_length; + size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd); DECLARE_BITMAP(old_bitmap, DATA_BLOCK_BITS); if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) @@ -421,8 +468,7 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) * expensive to tell how many regions are freed in the bitmap */ base_command_size = max(offsetof(struct tcmu_cmd_entry, - req.iov[se_cmd->t_bidi_data_nents + - se_cmd->t_data_nents]), + req.iov[tcmu_cmd_get_block_cnt(tcmu_cmd)]), sizeof(struct tcmu_cmd_entry)); command_size = base_command_size + round_up(scsi_command_size(se_cmd->t_task_cdb), TCMU_OP_ALIGN_SIZE); @@ -433,11 +479,6 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) mb = udev->mb_addr; cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ - data_length = se_cmd->data_length; - if (se_cmd->se_cmd_flags & SCF_BIDI) { - BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents)); - data_length += se_cmd->t_bidi_data_sg->length; - } if ((command_size > (udev->cmdr_size / 2)) || data_length > udev->data_size) { pr_warn("TCMU: Request of size %zu/%zu is too big for %u/%zu " @@ -511,11 +552,14 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) entry->req.iov_dif_cnt = 0; /* Handle BIDI commands */ - iov_cnt = 0; - alloc_and_scatter_data_area(udev, se_cmd->t_bidi_data_sg, - se_cmd->t_bidi_data_nents, &iov, &iov_cnt, false); - entry->req.iov_bidi_cnt = iov_cnt; - + if (se_cmd->se_cmd_flags & SCF_BIDI) { + iov_cnt = 0; + iov++; + alloc_and_scatter_data_area(udev, se_cmd->t_bidi_data_sg, + se_cmd->t_bidi_data_nents, &iov, &iov_cnt, + false); + entry->req.iov_bidi_cnt = iov_cnt; + } /* cmd's data_bitmap is what changed in process */ bitmap_xor(tcmu_cmd->data_bitmap, old_bitmap, udev->data_bitmap, DATA_BLOCK_BITS); @@ -592,19 +636,11 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry * se_cmd->scsi_sense_length); free_data_area(udev, cmd); } else if (se_cmd->se_cmd_flags & SCF_BIDI) { - DECLARE_BITMAP(bitmap, DATA_BLOCK_BITS); - /* Get Data-In buffer before clean up */ - bitmap_copy(bitmap, cmd->data_bitmap, DATA_BLOCK_BITS); - gather_data_area(udev, bitmap, - se_cmd->t_bidi_data_sg, se_cmd->t_bidi_data_nents); + gather_data_area(udev, cmd, true); free_data_area(udev, cmd); } else if (se_cmd->data_direction == DMA_FROM_DEVICE) { - DECLARE_BITMAP(bitmap, DATA_BLOCK_BITS); - - bitmap_copy(bitmap, cmd->data_bitmap, DATA_BLOCK_BITS); - gather_data_area(udev, bitmap, - se_cmd->t_data_sg, se_cmd->t_data_nents); + gather_data_area(udev, cmd, false); free_data_area(udev, cmd); } else if (se_cmd->data_direction == DMA_TO_DEVICE) { free_data_area(udev, cmd); @@ -1196,11 +1232,6 @@ static ssize_t tcmu_cmd_time_out_store(struct config_item *item, const char *pag if (ret < 0) return ret; - if (!val) { - pr_err("Illegal value for cmd_time_out\n"); - return -EINVAL; - } - udev->cmd_time_out = val * MSEC_PER_SEC; return count; } diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c index d2351139342f..a82e2bd5ea34 100644 --- a/drivers/usb/gadget/function/f_tcm.c +++ b/drivers/usb/gadget/function/f_tcm.c @@ -373,7 +373,7 @@ static void bot_cleanup_old_alt(struct f_uas *fu) usb_ep_free_request(fu->ep_in, fu->bot_req_in); usb_ep_free_request(fu->ep_out, fu->bot_req_out); usb_ep_free_request(fu->ep_out, fu->cmd.req); - usb_ep_free_request(fu->ep_out, fu->bot_status.req); + usb_ep_free_request(fu->ep_in, fu->bot_status.req); kfree(fu->cmd.buf); diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c index 8c4dc1e1f94f..b827a8113e26 100644 --- a/drivers/video/fbdev/efifb.c +++ b/drivers/video/fbdev/efifb.c @@ -10,6 +10,7 @@ #include <linux/efi.h> #include <linux/errno.h> #include <linux/fb.h> +#include <linux/pci.h> #include <linux/platform_device.h> #include <linux/screen_info.h> #include <video/vga.h> @@ -143,6 +144,8 @@ static struct attribute *efifb_attrs[] = { }; ATTRIBUTE_GROUPS(efifb); +static bool pci_dev_disabled; /* FB base matches BAR of a disabled device */ + static int efifb_probe(struct platform_device *dev) { struct fb_info *info; @@ -152,7 +155,7 @@ static int efifb_probe(struct platform_device *dev) unsigned int size_total; char *option = NULL; - if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI) + if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI || pci_dev_disabled) return -ENODEV; if (fb_get_options("efifb", &option)) @@ -360,3 +363,64 @@ static struct platform_driver efifb_driver = { }; builtin_platform_driver(efifb_driver); + +#if defined(CONFIG_PCI) && !defined(CONFIG_X86) + +static bool pci_bar_found; /* did we find a BAR matching the efifb base? */ + +static void claim_efifb_bar(struct pci_dev *dev, int idx) +{ + u16 word; + + pci_bar_found = true; + + pci_read_config_word(dev, PCI_COMMAND, &word); + if (!(word & PCI_COMMAND_MEMORY)) { + pci_dev_disabled = true; + dev_err(&dev->dev, + "BAR %d: assigned to efifb but device is disabled!\n", + idx); + return; + } + + if (pci_claim_resource(dev, idx)) { + pci_dev_disabled = true; + dev_err(&dev->dev, + "BAR %d: failed to claim resource for efifb!\n", idx); + return; + } + + dev_info(&dev->dev, "BAR %d: assigned to efifb\n", idx); +} + +static void efifb_fixup_resources(struct pci_dev *dev) +{ + u64 base = screen_info.lfb_base; + u64 size = screen_info.lfb_size; + int i; + + if (pci_bar_found || screen_info.orig_video_isVGA != VIDEO_TYPE_EFI) + return; + + if (screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE) + base |= (u64)screen_info.ext_lfb_base << 32; + + if (!base) + return; + + for (i = 0; i < PCI_STD_RESOURCE_END; i++) { + struct resource *res = &dev->resource[i]; + + if (!(res->flags & IORESOURCE_MEM)) + continue; + + if (res->start <= base && res->end >= base + size - 1) { + claim_efifb_bar(dev, i); + break; + } + } +} +DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_ANY_ID, PCI_ANY_ID, PCI_BASE_CLASS_DISPLAY, + 16, efifb_fixup_resources); + +#endif diff --git a/drivers/video/fbdev/omap/omapfb_main.c b/drivers/video/fbdev/omap/omapfb_main.c index 1abba07b84b3..f4cbfb3b8a09 100644 --- a/drivers/video/fbdev/omap/omapfb_main.c +++ b/drivers/video/fbdev/omap/omapfb_main.c @@ -1608,19 +1608,6 @@ static int omapfb_find_ctrl(struct omapfb_device *fbdev) return 0; } -static void check_required_callbacks(struct omapfb_device *fbdev) -{ -#define _C(x) (fbdev->ctrl->x != NULL) -#define _P(x) (fbdev->panel->x != NULL) - BUG_ON(fbdev->ctrl == NULL || fbdev->panel == NULL); - BUG_ON(!(_C(init) && _C(cleanup) && _C(get_caps) && - _C(set_update_mode) && _C(setup_plane) && _C(enable_plane) && - _P(init) && _P(cleanup) && _P(enable) && _P(disable) && - _P(get_caps))); -#undef _P -#undef _C -} - /* * Called by LDM binding to probe and attach a new device. * Initialization sequence: @@ -1705,8 +1692,6 @@ static int omapfb_do_probe(struct platform_device *pdev, omapfb_ops.fb_mmap = omapfb_mmap; init_state++; - check_required_callbacks(fbdev); - r = planes_init(fbdev); if (r) goto cleanup; diff --git a/drivers/video/fbdev/ssd1307fb.c b/drivers/video/fbdev/ssd1307fb.c index bd017b57c47f..f599520374dd 100644 --- a/drivers/video/fbdev/ssd1307fb.c +++ b/drivers/video/fbdev/ssd1307fb.c @@ -578,10 +578,14 @@ static int ssd1307fb_probe(struct i2c_client *client, par->vbat_reg = devm_regulator_get_optional(&client->dev, "vbat"); if (IS_ERR(par->vbat_reg)) { - dev_err(&client->dev, "failed to get VBAT regulator: %ld\n", - PTR_ERR(par->vbat_reg)); ret = PTR_ERR(par->vbat_reg); - goto fb_alloc_error; + if (ret == -ENODEV) { + par->vbat_reg = NULL; + } else { + dev_err(&client->dev, "failed to get VBAT regulator: %d\n", + ret); + goto fb_alloc_error; + } } if (of_property_read_u32(node, "solomon,width", &par->width)) @@ -668,10 +672,13 @@ static int ssd1307fb_probe(struct i2c_client *client, udelay(4); } - ret = regulator_enable(par->vbat_reg); - if (ret) { - dev_err(&client->dev, "failed to enable VBAT: %d\n", ret); - goto reset_oled_error; + if (par->vbat_reg) { + ret = regulator_enable(par->vbat_reg); + if (ret) { + dev_err(&client->dev, "failed to enable VBAT: %d\n", + ret); + goto reset_oled_error; + } } ret = ssd1307fb_init(par); @@ -710,7 +717,8 @@ panel_init_error: pwm_put(par->pwm); }; regulator_enable_error: - regulator_disable(par->vbat_reg); + if (par->vbat_reg) + regulator_disable(par->vbat_reg); reset_oled_error: fb_deferred_io_cleanup(info); fb_alloc_error: diff --git a/drivers/video/fbdev/xen-fbfront.c b/drivers/video/fbdev/xen-fbfront.c index d0115a7af0a9..3ee309c50b2d 100644 --- a/drivers/video/fbdev/xen-fbfront.c +++ b/drivers/video/fbdev/xen-fbfront.c @@ -643,7 +643,6 @@ static void xenfb_backend_changed(struct xenbus_device *dev, break; case XenbusStateInitWait: -InitWait: xenbus_switch_state(dev, XenbusStateConnected); break; @@ -654,7 +653,8 @@ InitWait: * get Connected twice here. */ if (dev->state != XenbusStateConnected) - goto InitWait; /* no InitWait seen yet, fudge it */ + /* no InitWait seen yet, fudge it */ + xenbus_switch_state(dev, XenbusStateConnected); if (xenbus_read_unsigned(info->xbdev->otherend, "request-update", 0)) diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c index 400d70b69379..48230a5e12f2 100644 --- a/drivers/virtio/virtio.c +++ b/drivers/virtio/virtio.c @@ -232,6 +232,12 @@ static int virtio_dev_probe(struct device *_d) if (device_features & (1ULL << i)) __virtio_set_bit(dev, i); + if (drv->validate) { + err = drv->validate(dev); + if (err) + goto err; + } + err = virtio_finalize_features(dev); if (err) goto err; diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c index 590534910dc6..698d5d06fa03 100644 --- a/drivers/virtio/virtio_pci_common.c +++ b/drivers/virtio/virtio_pci_common.c @@ -33,8 +33,10 @@ void vp_synchronize_vectors(struct virtio_device *vdev) struct virtio_pci_device *vp_dev = to_vp_device(vdev); int i; - synchronize_irq(pci_irq_vector(vp_dev->pci_dev, 0)); - for (i = 1; i < vp_dev->msix_vectors; i++) + if (vp_dev->intx_enabled) + synchronize_irq(vp_dev->pci_dev->irq); + + for (i = 0; i < vp_dev->msix_vectors; ++i) synchronize_irq(pci_irq_vector(vp_dev->pci_dev, i)); } @@ -60,13 +62,16 @@ static irqreturn_t vp_config_changed(int irq, void *opaque) static irqreturn_t vp_vring_interrupt(int irq, void *opaque) { struct virtio_pci_device *vp_dev = opaque; + struct virtio_pci_vq_info *info; irqreturn_t ret = IRQ_NONE; - struct virtqueue *vq; + unsigned long flags; - list_for_each_entry(vq, &vp_dev->vdev.vqs, list) { - if (vq->callback && vring_interrupt(irq, vq) == IRQ_HANDLED) + spin_lock_irqsave(&vp_dev->lock, flags); + list_for_each_entry(info, &vp_dev->virtqueues, node) { + if (vring_interrupt(irq, info->vq) == IRQ_HANDLED) ret = IRQ_HANDLED; } + spin_unlock_irqrestore(&vp_dev->lock, flags); return ret; } @@ -97,186 +102,244 @@ static irqreturn_t vp_interrupt(int irq, void *opaque) return vp_vring_interrupt(irq, opaque); } -static void vp_remove_vqs(struct virtio_device *vdev) +static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors, + bool per_vq_vectors, struct irq_affinity *desc) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); - struct virtqueue *vq, *n; + const char *name = dev_name(&vp_dev->vdev.dev); + unsigned i, v; + int err = -ENOMEM; - list_for_each_entry_safe(vq, n, &vdev->vqs, list) { - if (vp_dev->msix_vector_map) { - int v = vp_dev->msix_vector_map[vq->index]; + vp_dev->msix_vectors = nvectors; - if (v != VIRTIO_MSI_NO_VECTOR) - free_irq(pci_irq_vector(vp_dev->pci_dev, v), - vq); - } - vp_dev->del_vq(vq); + vp_dev->msix_names = kmalloc(nvectors * sizeof *vp_dev->msix_names, + GFP_KERNEL); + if (!vp_dev->msix_names) + goto error; + vp_dev->msix_affinity_masks + = kzalloc(nvectors * sizeof *vp_dev->msix_affinity_masks, + GFP_KERNEL); + if (!vp_dev->msix_affinity_masks) + goto error; + for (i = 0; i < nvectors; ++i) + if (!alloc_cpumask_var(&vp_dev->msix_affinity_masks[i], + GFP_KERNEL)) + goto error; + + err = pci_alloc_irq_vectors_affinity(vp_dev->pci_dev, nvectors, + nvectors, PCI_IRQ_MSIX | + (desc ? PCI_IRQ_AFFINITY : 0), + desc); + if (err < 0) + goto error; + vp_dev->msix_enabled = 1; + + /* Set the vector used for configuration */ + v = vp_dev->msix_used_vectors; + snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names, + "%s-config", name); + err = request_irq(pci_irq_vector(vp_dev->pci_dev, v), + vp_config_changed, 0, vp_dev->msix_names[v], + vp_dev); + if (err) + goto error; + ++vp_dev->msix_used_vectors; + + v = vp_dev->config_vector(vp_dev, v); + /* Verify we had enough resources to assign the vector */ + if (v == VIRTIO_MSI_NO_VECTOR) { + err = -EBUSY; + goto error; } + + if (!per_vq_vectors) { + /* Shared vector for all VQs */ + v = vp_dev->msix_used_vectors; + snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names, + "%s-virtqueues", name); + err = request_irq(pci_irq_vector(vp_dev->pci_dev, v), + vp_vring_interrupt, 0, vp_dev->msix_names[v], + vp_dev); + if (err) + goto error; + ++vp_dev->msix_used_vectors; + } + return 0; +error: + return err; +} + +static struct virtqueue *vp_setup_vq(struct virtio_device *vdev, unsigned index, + void (*callback)(struct virtqueue *vq), + const char *name, + u16 msix_vec) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + struct virtio_pci_vq_info *info = kmalloc(sizeof *info, GFP_KERNEL); + struct virtqueue *vq; + unsigned long flags; + + /* fill out our structure that represents an active queue */ + if (!info) + return ERR_PTR(-ENOMEM); + + vq = vp_dev->setup_vq(vp_dev, info, index, callback, name, + msix_vec); + if (IS_ERR(vq)) + goto out_info; + + info->vq = vq; + if (callback) { + spin_lock_irqsave(&vp_dev->lock, flags); + list_add(&info->node, &vp_dev->virtqueues); + spin_unlock_irqrestore(&vp_dev->lock, flags); + } else { + INIT_LIST_HEAD(&info->node); + } + + vp_dev->vqs[index] = info; + return vq; + +out_info: + kfree(info); + return vq; +} + +static void vp_del_vq(struct virtqueue *vq) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); + struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index]; + unsigned long flags; + + spin_lock_irqsave(&vp_dev->lock, flags); + list_del(&info->node); + spin_unlock_irqrestore(&vp_dev->lock, flags); + + vp_dev->del_vq(info); + kfree(info); } /* the config->del_vqs() implementation */ void vp_del_vqs(struct virtio_device *vdev) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); + struct virtqueue *vq, *n; int i; - if (WARN_ON_ONCE(list_empty_careful(&vdev->vqs))) - return; + list_for_each_entry_safe(vq, n, &vdev->vqs, list) { + if (vp_dev->per_vq_vectors) { + int v = vp_dev->vqs[vq->index]->msix_vector; - vp_remove_vqs(vdev); + if (v != VIRTIO_MSI_NO_VECTOR) { + int irq = pci_irq_vector(vp_dev->pci_dev, v); + + irq_set_affinity_hint(irq, NULL); + free_irq(irq, vq); + } + } + vp_del_vq(vq); + } + vp_dev->per_vq_vectors = false; + + if (vp_dev->intx_enabled) { + free_irq(vp_dev->pci_dev->irq, vp_dev); + vp_dev->intx_enabled = 0; + } - if (vp_dev->pci_dev->msix_enabled) { - for (i = 0; i < vp_dev->msix_vectors; i++) + for (i = 0; i < vp_dev->msix_used_vectors; ++i) + free_irq(pci_irq_vector(vp_dev->pci_dev, i), vp_dev); + + for (i = 0; i < vp_dev->msix_vectors; i++) + if (vp_dev->msix_affinity_masks[i]) free_cpumask_var(vp_dev->msix_affinity_masks[i]); + if (vp_dev->msix_enabled) { /* Disable the vector used for configuration */ vp_dev->config_vector(vp_dev, VIRTIO_MSI_NO_VECTOR); - kfree(vp_dev->msix_affinity_masks); - kfree(vp_dev->msix_names); - kfree(vp_dev->msix_vector_map); + pci_free_irq_vectors(vp_dev->pci_dev); + vp_dev->msix_enabled = 0; } - free_irq(pci_irq_vector(vp_dev->pci_dev, 0), vp_dev); - pci_free_irq_vectors(vp_dev->pci_dev); + vp_dev->msix_vectors = 0; + vp_dev->msix_used_vectors = 0; + kfree(vp_dev->msix_names); + vp_dev->msix_names = NULL; + kfree(vp_dev->msix_affinity_masks); + vp_dev->msix_affinity_masks = NULL; + kfree(vp_dev->vqs); + vp_dev->vqs = NULL; } static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], - const char * const names[], struct irq_affinity *desc) + const char * const names[], bool per_vq_vectors, + struct irq_affinity *desc) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); - const char *name = dev_name(&vp_dev->vdev.dev); - int i, j, err = -ENOMEM, allocated_vectors, nvectors; - unsigned flags = PCI_IRQ_MSIX; - bool shared = false; u16 msix_vec; + int i, err, nvectors, allocated_vectors; - if (desc) { - flags |= PCI_IRQ_AFFINITY; - desc->pre_vectors++; /* virtio config vector */ - } - - nvectors = 1; - for (i = 0; i < nvqs; i++) - if (callbacks[i]) - nvectors++; - - /* Try one vector per queue first. */ - err = pci_alloc_irq_vectors_affinity(vp_dev->pci_dev, nvectors, - nvectors, flags, desc); - if (err < 0) { - /* Fallback to one vector for config, one shared for queues. */ - shared = true; - err = pci_alloc_irq_vectors(vp_dev->pci_dev, 2, 2, - PCI_IRQ_MSIX); - if (err < 0) - return err; - } - if (err < 0) - return err; - - vp_dev->msix_vectors = nvectors; - vp_dev->msix_names = kmalloc_array(nvectors, - sizeof(*vp_dev->msix_names), GFP_KERNEL); - if (!vp_dev->msix_names) - goto out_free_irq_vectors; - - vp_dev->msix_affinity_masks = kcalloc(nvectors, - sizeof(*vp_dev->msix_affinity_masks), GFP_KERNEL); - if (!vp_dev->msix_affinity_masks) - goto out_free_msix_names; + vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL); + if (!vp_dev->vqs) + return -ENOMEM; - for (i = 0; i < nvectors; ++i) { - if (!alloc_cpumask_var(&vp_dev->msix_affinity_masks[i], - GFP_KERNEL)) - goto out_free_msix_affinity_masks; + if (per_vq_vectors) { + /* Best option: one for change interrupt, one per vq. */ + nvectors = 1; + for (i = 0; i < nvqs; ++i) + if (callbacks[i]) + ++nvectors; + } else { + /* Second best: one for change, shared for all vqs. */ + nvectors = 2; } - /* Set the vector used for configuration */ - snprintf(vp_dev->msix_names[0], sizeof(*vp_dev->msix_names), - "%s-config", name); - err = request_irq(pci_irq_vector(vp_dev->pci_dev, 0), vp_config_changed, - 0, vp_dev->msix_names[0], vp_dev); + err = vp_request_msix_vectors(vdev, nvectors, per_vq_vectors, + per_vq_vectors ? desc : NULL); if (err) - goto out_free_msix_affinity_masks; + goto error_find; - /* Verify we had enough resources to assign the vector */ - if (vp_dev->config_vector(vp_dev, 0) == VIRTIO_MSI_NO_VECTOR) { - err = -EBUSY; - goto out_free_config_irq; - } - - vp_dev->msix_vector_map = kmalloc_array(nvqs, - sizeof(*vp_dev->msix_vector_map), GFP_KERNEL); - if (!vp_dev->msix_vector_map) - goto out_disable_config_irq; - - allocated_vectors = j = 1; /* vector 0 is the config interrupt */ + vp_dev->per_vq_vectors = per_vq_vectors; + allocated_vectors = vp_dev->msix_used_vectors; for (i = 0; i < nvqs; ++i) { if (!names[i]) { vqs[i] = NULL; continue; } - if (callbacks[i]) - msix_vec = allocated_vectors; - else + if (!callbacks[i]) msix_vec = VIRTIO_MSI_NO_VECTOR; - - vqs[i] = vp_dev->setup_vq(vp_dev, i, callbacks[i], names[i], - msix_vec); + else if (vp_dev->per_vq_vectors) + msix_vec = allocated_vectors++; + else + msix_vec = VP_MSIX_VQ_VECTOR; + vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i], + msix_vec); if (IS_ERR(vqs[i])) { err = PTR_ERR(vqs[i]); - goto out_remove_vqs; + goto error_find; } - if (msix_vec == VIRTIO_MSI_NO_VECTOR) { - vp_dev->msix_vector_map[i] = VIRTIO_MSI_NO_VECTOR; + if (!vp_dev->per_vq_vectors || msix_vec == VIRTIO_MSI_NO_VECTOR) continue; - } - snprintf(vp_dev->msix_names[j], - sizeof(*vp_dev->msix_names), "%s-%s", + /* allocate per-vq irq if available and necessary */ + snprintf(vp_dev->msix_names[msix_vec], + sizeof *vp_dev->msix_names, + "%s-%s", dev_name(&vp_dev->vdev.dev), names[i]); err = request_irq(pci_irq_vector(vp_dev->pci_dev, msix_vec), - vring_interrupt, IRQF_SHARED, - vp_dev->msix_names[j], vqs[i]); - if (err) { - /* don't free this irq on error */ - vp_dev->msix_vector_map[i] = VIRTIO_MSI_NO_VECTOR; - goto out_remove_vqs; - } - vp_dev->msix_vector_map[i] = msix_vec; - j++; - - /* - * Use a different vector for each queue if they are available, - * else share the same vector for all VQs. - */ - if (!shared) - allocated_vectors++; + vring_interrupt, 0, + vp_dev->msix_names[msix_vec], + vqs[i]); + if (err) + goto error_find; } - return 0; -out_remove_vqs: - vp_remove_vqs(vdev); - kfree(vp_dev->msix_vector_map); -out_disable_config_irq: - vp_dev->config_vector(vp_dev, VIRTIO_MSI_NO_VECTOR); -out_free_config_irq: - free_irq(pci_irq_vector(vp_dev->pci_dev, 0), vp_dev); -out_free_msix_affinity_masks: - for (i = 0; i < nvectors; i++) { - if (vp_dev->msix_affinity_masks[i]) - free_cpumask_var(vp_dev->msix_affinity_masks[i]); - } - kfree(vp_dev->msix_affinity_masks); -out_free_msix_names: - kfree(vp_dev->msix_names); -out_free_irq_vectors: - pci_free_irq_vectors(vp_dev->pci_dev); +error_find: + vp_del_vqs(vdev); return err; } @@ -287,29 +350,33 @@ static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned nvqs, struct virtio_pci_device *vp_dev = to_vp_device(vdev); int i, err; + vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL); + if (!vp_dev->vqs) + return -ENOMEM; + err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, IRQF_SHARED, dev_name(&vdev->dev), vp_dev); if (err) - return err; + goto out_del_vqs; + vp_dev->intx_enabled = 1; + vp_dev->per_vq_vectors = false; for (i = 0; i < nvqs; ++i) { if (!names[i]) { vqs[i] = NULL; continue; } - vqs[i] = vp_dev->setup_vq(vp_dev, i, callbacks[i], names[i], - VIRTIO_MSI_NO_VECTOR); + vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i], + VIRTIO_MSI_NO_VECTOR); if (IS_ERR(vqs[i])) { err = PTR_ERR(vqs[i]); - goto out_remove_vqs; + goto out_del_vqs; } } return 0; - -out_remove_vqs: - vp_remove_vqs(vdev); - free_irq(pci_irq_vector(vp_dev->pci_dev, 0), vp_dev); +out_del_vqs: + vp_del_vqs(vdev); return err; } @@ -320,9 +387,15 @@ int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs, { int err; - err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, desc); + /* Try MSI-X with one vector per queue. */ + err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, true, desc); if (!err) return 0; + /* Fallback: MSI-X with one vector for config, one shared for queues. */ + err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, false, desc); + if (!err) + return 0; + /* Finally fall back to regular interrupts. */ return vp_find_vqs_intx(vdev, nvqs, vqs, callbacks, names); } @@ -342,15 +415,16 @@ int vp_set_vq_affinity(struct virtqueue *vq, int cpu) { struct virtio_device *vdev = vq->vdev; struct virtio_pci_device *vp_dev = to_vp_device(vdev); + struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index]; + struct cpumask *mask; + unsigned int irq; if (!vq->callback) return -EINVAL; - if (vp_dev->pci_dev->msix_enabled) { - int vec = vp_dev->msix_vector_map[vq->index]; - struct cpumask *mask = vp_dev->msix_affinity_masks[vec]; - unsigned int irq = pci_irq_vector(vp_dev->pci_dev, vec); - + if (vp_dev->msix_enabled) { + mask = vp_dev->msix_affinity_masks[info->msix_vector]; + irq = pci_irq_vector(vp_dev->pci_dev, info->msix_vector); if (cpu == -1) irq_set_affinity_hint(irq, NULL); else { @@ -365,12 +439,13 @@ int vp_set_vq_affinity(struct virtqueue *vq, int cpu) const struct cpumask *vp_get_vq_affinity(struct virtio_device *vdev, int index) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); - unsigned int *map = vp_dev->msix_vector_map; - if (!map || map[index] == VIRTIO_MSI_NO_VECTOR) + if (!vp_dev->per_vq_vectors || + vp_dev->vqs[index]->msix_vector == VIRTIO_MSI_NO_VECTOR) return NULL; - return pci_irq_get_affinity(vp_dev->pci_dev, map[index]); + return pci_irq_get_affinity(vp_dev->pci_dev, + vp_dev->vqs[index]->msix_vector); } #ifdef CONFIG_PM_SLEEP @@ -441,6 +516,8 @@ static int virtio_pci_probe(struct pci_dev *pci_dev, vp_dev->vdev.dev.parent = &pci_dev->dev; vp_dev->vdev.dev.release = virtio_pci_release_dev; vp_dev->pci_dev = pci_dev; + INIT_LIST_HEAD(&vp_dev->virtqueues); + spin_lock_init(&vp_dev->lock); /* enable the device */ rc = pci_enable_device(pci_dev); diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h index ac8c9d788964..e96334aec1e0 100644 --- a/drivers/virtio/virtio_pci_common.h +++ b/drivers/virtio/virtio_pci_common.h @@ -31,6 +31,17 @@ #include <linux/highmem.h> #include <linux/spinlock.h> +struct virtio_pci_vq_info { + /* the actual virtqueue */ + struct virtqueue *vq; + + /* the list node for the virtqueues list */ + struct list_head node; + + /* MSI-X vector (or none) */ + unsigned msix_vector; +}; + /* Our device structure */ struct virtio_pci_device { struct virtio_device vdev; @@ -64,25 +75,47 @@ struct virtio_pci_device { /* the IO mapping for the PCI config space */ void __iomem *ioaddr; + /* a list of queues so we can dispatch IRQs */ + spinlock_t lock; + struct list_head virtqueues; + + /* array of all queues for house-keeping */ + struct virtio_pci_vq_info **vqs; + + /* MSI-X support */ + int msix_enabled; + int intx_enabled; cpumask_var_t *msix_affinity_masks; /* Name strings for interrupts. This size should be enough, * and I'm too lazy to allocate each name separately. */ char (*msix_names)[256]; - /* Total Number of MSI-X vectors (including per-VQ ones). */ - int msix_vectors; - /* Map of per-VQ MSI-X vectors, may be NULL */ - unsigned *msix_vector_map; + /* Number of available vectors */ + unsigned msix_vectors; + /* Vectors allocated, excluding per-vq vectors if any */ + unsigned msix_used_vectors; + + /* Whether we have vector per vq */ + bool per_vq_vectors; struct virtqueue *(*setup_vq)(struct virtio_pci_device *vp_dev, + struct virtio_pci_vq_info *info, unsigned idx, void (*callback)(struct virtqueue *vq), const char *name, u16 msix_vec); - void (*del_vq)(struct virtqueue *vq); + void (*del_vq)(struct virtio_pci_vq_info *info); u16 (*config_vector)(struct virtio_pci_device *vp_dev, u16 vector); }; +/* Constants for MSI-X */ +/* Use first vector for configuration changes, second and the rest for + * virtqueues Thus, we need at least 2 vectors for MSI. */ +enum { + VP_MSIX_CONFIG_VECTOR = 0, + VP_MSIX_VQ_VECTOR = 1, +}; + /* Convert a generic virtio device to our structure */ static struct virtio_pci_device *to_vp_device(struct virtio_device *vdev) { diff --git a/drivers/virtio/virtio_pci_legacy.c b/drivers/virtio/virtio_pci_legacy.c index f7362c5fe18a..4bfa48fb1324 100644 --- a/drivers/virtio/virtio_pci_legacy.c +++ b/drivers/virtio/virtio_pci_legacy.c @@ -112,6 +112,7 @@ static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector) } static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, + struct virtio_pci_vq_info *info, unsigned index, void (*callback)(struct virtqueue *vq), const char *name, @@ -129,6 +130,8 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, if (!num || ioread32(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN)) return ERR_PTR(-ENOENT); + info->msix_vector = msix_vec; + /* create the vring */ vq = vring_create_virtqueue(index, num, VIRTIO_PCI_VRING_ALIGN, &vp_dev->vdev, @@ -159,13 +162,14 @@ out_deactivate: return ERR_PTR(err); } -static void del_vq(struct virtqueue *vq) +static void del_vq(struct virtio_pci_vq_info *info) { + struct virtqueue *vq = info->vq; struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL); - if (vp_dev->pci_dev->msix_enabled) { + if (vp_dev->msix_enabled) { iowrite16(VIRTIO_MSI_NO_VECTOR, vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); /* Flush the write out to device */ diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c index 7bc3004b840e..8978f109d2d7 100644 --- a/drivers/virtio/virtio_pci_modern.c +++ b/drivers/virtio/virtio_pci_modern.c @@ -293,6 +293,7 @@ static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector) } static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, + struct virtio_pci_vq_info *info, unsigned index, void (*callback)(struct virtqueue *vq), const char *name, @@ -322,6 +323,8 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, /* get offset of notification word for this vq */ off = vp_ioread16(&cfg->queue_notify_off); + info->msix_vector = msix_vec; + /* create the vring */ vq = vring_create_virtqueue(index, num, SMP_CACHE_BYTES, &vp_dev->vdev, @@ -405,13 +408,14 @@ static int vp_modern_find_vqs(struct virtio_device *vdev, unsigned nvqs, return 0; } -static void del_vq(struct virtqueue *vq) +static void del_vq(struct virtio_pci_vq_info *info) { + struct virtqueue *vq = info->vq; struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); vp_iowrite16(vq->index, &vp_dev->common->queue_select); - if (vp_dev->pci_dev->msix_enabled) { + if (vp_dev->msix_enabled) { vp_iowrite16(VIRTIO_MSI_NO_VECTOR, &vp_dev->common->queue_msix_vector); /* Flush the write out to device */ diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c index 1f4733b80c87..f3b089b7c0b6 100644 --- a/drivers/xen/xenbus/xenbus_dev_frontend.c +++ b/drivers/xen/xenbus/xenbus_dev_frontend.c @@ -442,8 +442,10 @@ static int xenbus_write_transaction(unsigned msg_type, return xenbus_command_reply(u, XS_ERROR, "ENOENT"); rc = xenbus_dev_request_and_reply(&u->u.msg, u); - if (rc) + if (rc && trans) { + list_del(&trans->list); kfree(trans); + } out: return rc; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index a18510be76c1..5e71f1ea3391 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -7910,7 +7910,6 @@ struct btrfs_retry_complete { static void btrfs_retry_endio_nocsum(struct bio *bio) { struct btrfs_retry_complete *done = bio->bi_private; - struct inode *inode; struct bio_vec *bvec; int i; @@ -7918,12 +7917,12 @@ static void btrfs_retry_endio_nocsum(struct bio *bio) goto end; ASSERT(bio->bi_vcnt == 1); - inode = bio->bi_io_vec->bv_page->mapping->host; - ASSERT(bio->bi_io_vec->bv_len == btrfs_inode_sectorsize(inode)); + ASSERT(bio->bi_io_vec->bv_len == btrfs_inode_sectorsize(done->inode)); done->uptodate = 1; bio_for_each_segment_all(bvec, bio, i) - clean_io_failure(BTRFS_I(done->inode), done->start, bvec->bv_page, 0); + clean_io_failure(BTRFS_I(done->inode), done->start, + bvec->bv_page, 0); end: complete(&done->done); bio_put(bio); @@ -7973,8 +7972,10 @@ next_block_or_try_again: start += sectorsize; - if (nr_sectors--) { + nr_sectors--; + if (nr_sectors) { pgoff += sectorsize; + ASSERT(pgoff < PAGE_SIZE); goto next_block_or_try_again; } } @@ -7986,9 +7987,7 @@ static void btrfs_retry_endio(struct bio *bio) { struct btrfs_retry_complete *done = bio->bi_private; struct btrfs_io_bio *io_bio = btrfs_io_bio(bio); - struct inode *inode; struct bio_vec *bvec; - u64 start; int uptodate; int ret; int i; @@ -7998,11 +7997,8 @@ static void btrfs_retry_endio(struct bio *bio) uptodate = 1; - start = done->start; - ASSERT(bio->bi_vcnt == 1); - inode = bio->bi_io_vec->bv_page->mapping->host; - ASSERT(bio->bi_io_vec->bv_len == btrfs_inode_sectorsize(inode)); + ASSERT(bio->bi_io_vec->bv_len == btrfs_inode_sectorsize(done->inode)); bio_for_each_segment_all(bvec, bio, i) { ret = __readpage_endio_check(done->inode, io_bio, i, @@ -8080,8 +8076,10 @@ next: ASSERT(nr_sectors); - if (--nr_sectors) { + nr_sectors--; + if (nr_sectors) { pgoff += sectorsize; + ASSERT(pgoff < PAGE_SIZE); goto next_block; } } diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index da687dc79cce..9530a333d302 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -549,16 +549,19 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options, case Opt_ssd: btrfs_set_and_info(info, SSD, "use ssd allocation scheme"); + btrfs_clear_opt(info->mount_opt, NOSSD); break; case Opt_ssd_spread: btrfs_set_and_info(info, SSD_SPREAD, "use spread ssd allocation scheme"); btrfs_set_opt(info->mount_opt, SSD); + btrfs_clear_opt(info->mount_opt, NOSSD); break; case Opt_nossd: btrfs_set_and_info(info, NOSSD, "not using ssd allocation scheme"); btrfs_clear_opt(info->mount_opt, SSD); + btrfs_clear_opt(info->mount_opt, SSD_SPREAD); break; case Opt_barrier: btrfs_clear_and_info(info, NOBARRIER, diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 73d56eef5e60..ab8a66d852f9 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -6213,7 +6213,7 @@ int btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio, for (dev_nr = 0; dev_nr < total_devs; dev_nr++) { dev = bbio->stripes[dev_nr].dev; if (!dev || !dev->bdev || - (bio_op(bio) == REQ_OP_WRITE && !dev->writeable)) { + (bio_op(first_bio) == REQ_OP_WRITE && !dev->writeable)) { bbio_error(bbio, first_bio, logical); continue; } diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 8c91f37ac0eb..5e9b306bc162 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -973,6 +973,86 @@ out: return rc; } +ssize_t cifs_file_copychunk_range(unsigned int xid, + struct file *src_file, loff_t off, + struct file *dst_file, loff_t destoff, + size_t len, unsigned int flags) +{ + struct inode *src_inode = file_inode(src_file); + struct inode *target_inode = file_inode(dst_file); + struct cifsFileInfo *smb_file_src; + struct cifsFileInfo *smb_file_target; + struct cifs_tcon *src_tcon; + struct cifs_tcon *target_tcon; + ssize_t rc; + + cifs_dbg(FYI, "copychunk range\n"); + + if (src_inode == target_inode) { + rc = -EINVAL; + goto out; + } + + if (!src_file->private_data || !dst_file->private_data) { + rc = -EBADF; + cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n"); + goto out; + } + + rc = -EXDEV; + smb_file_target = dst_file->private_data; + smb_file_src = src_file->private_data; + src_tcon = tlink_tcon(smb_file_src->tlink); + target_tcon = tlink_tcon(smb_file_target->tlink); + + if (src_tcon->ses != target_tcon->ses) { + cifs_dbg(VFS, "source and target of copy not on same server\n"); + goto out; + } + + /* + * Note: cifs case is easier than btrfs since server responsible for + * checks for proper open modes and file type and if it wants + * server could even support copy of range where source = target + */ + lock_two_nondirectories(target_inode, src_inode); + + cifs_dbg(FYI, "about to flush pages\n"); + /* should we flush first and last page first */ + truncate_inode_pages(&target_inode->i_data, 0); + + if (target_tcon->ses->server->ops->copychunk_range) + rc = target_tcon->ses->server->ops->copychunk_range(xid, + smb_file_src, smb_file_target, off, len, destoff); + else + rc = -EOPNOTSUPP; + + /* force revalidate of size and timestamps of target file now + * that target is updated on the server + */ + CIFS_I(target_inode)->time = 0; + /* although unlocking in the reverse order from locking is not + * strictly necessary here it is a little cleaner to be consistent + */ + unlock_two_nondirectories(src_inode, target_inode); + +out: + return rc; +} + +static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off, + struct file *dst_file, loff_t destoff, + size_t len, unsigned int flags) +{ + unsigned int xid = get_xid(); + ssize_t rc; + + rc = cifs_file_copychunk_range(xid, src_file, off, dst_file, destoff, + len, flags); + free_xid(xid); + return rc; +} + const struct file_operations cifs_file_ops = { .read_iter = cifs_loose_read_iter, .write_iter = cifs_file_write_iter, @@ -985,6 +1065,7 @@ const struct file_operations cifs_file_ops = { .splice_read = generic_file_splice_read, .llseek = cifs_llseek, .unlocked_ioctl = cifs_ioctl, + .copy_file_range = cifs_copy_file_range, .clone_file_range = cifs_clone_file_range, .setlease = cifs_setlease, .fallocate = cifs_fallocate, @@ -1002,6 +1083,7 @@ const struct file_operations cifs_file_strict_ops = { .splice_read = generic_file_splice_read, .llseek = cifs_llseek, .unlocked_ioctl = cifs_ioctl, + .copy_file_range = cifs_copy_file_range, .clone_file_range = cifs_clone_file_range, .setlease = cifs_setlease, .fallocate = cifs_fallocate, @@ -1019,6 +1101,7 @@ const struct file_operations cifs_file_direct_ops = { .mmap = cifs_file_mmap, .splice_read = generic_file_splice_read, .unlocked_ioctl = cifs_ioctl, + .copy_file_range = cifs_copy_file_range, .clone_file_range = cifs_clone_file_range, .llseek = cifs_llseek, .setlease = cifs_setlease, @@ -1036,6 +1119,7 @@ const struct file_operations cifs_file_nobrl_ops = { .splice_read = generic_file_splice_read, .llseek = cifs_llseek, .unlocked_ioctl = cifs_ioctl, + .copy_file_range = cifs_copy_file_range, .clone_file_range = cifs_clone_file_range, .setlease = cifs_setlease, .fallocate = cifs_fallocate, @@ -1052,6 +1136,7 @@ const struct file_operations cifs_file_strict_nobrl_ops = { .splice_read = generic_file_splice_read, .llseek = cifs_llseek, .unlocked_ioctl = cifs_ioctl, + .copy_file_range = cifs_copy_file_range, .clone_file_range = cifs_clone_file_range, .setlease = cifs_setlease, .fallocate = cifs_fallocate, @@ -1068,6 +1153,7 @@ const struct file_operations cifs_file_direct_nobrl_ops = { .mmap = cifs_file_mmap, .splice_read = generic_file_splice_read, .unlocked_ioctl = cifs_ioctl, + .copy_file_range = cifs_copy_file_range, .clone_file_range = cifs_clone_file_range, .llseek = cifs_llseek, .setlease = cifs_setlease, @@ -1079,6 +1165,7 @@ const struct file_operations cifs_dir_ops = { .release = cifs_closedir, .read = generic_read_dir, .unlocked_ioctl = cifs_ioctl, + .copy_file_range = cifs_copy_file_range, .clone_file_range = cifs_clone_file_range, .llseek = generic_file_llseek, }; diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index da717fee3026..30bf89b1fd9a 100644 --- a/fs/cifs/cifsfs.h +++ b/fs/cifs/cifsfs.h @@ -139,6 +139,11 @@ extern ssize_t cifs_listxattr(struct dentry *, char *, size_t); # define cifs_listxattr NULL #endif +extern ssize_t cifs_file_copychunk_range(unsigned int xid, + struct file *src_file, loff_t off, + struct file *dst_file, loff_t destoff, + size_t len, unsigned int flags); + extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg); #ifdef CONFIG_CIFS_NFSD_EXPORT extern const struct export_operations cifs_export_ops; diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index d42dd3288647..37f5a41cc50c 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -243,6 +243,7 @@ struct smb_version_operations { /* verify the message */ int (*check_message)(char *, unsigned int, struct TCP_Server_Info *); bool (*is_oplock_break)(char *, struct TCP_Server_Info *); + int (*handle_cancelled_mid)(char *, struct TCP_Server_Info *); void (*downgrade_oplock)(struct TCP_Server_Info *, struct cifsInodeInfo *, bool); /* process transaction2 response */ @@ -407,9 +408,10 @@ struct smb_version_operations { char * (*create_lease_buf)(u8 *, u8); /* parse lease context buffer and return oplock/epoch info */ __u8 (*parse_lease_buf)(void *, unsigned int *); - int (*clone_range)(const unsigned int, struct cifsFileInfo *src_file, - struct cifsFileInfo *target_file, u64 src_off, u64 len, - u64 dest_off); + ssize_t (*copychunk_range)(const unsigned int, + struct cifsFileInfo *src_file, + struct cifsFileInfo *target_file, + u64 src_off, u64 len, u64 dest_off); int (*duplicate_extents)(const unsigned int, struct cifsFileInfo *src, struct cifsFileInfo *target_file, u64 src_off, u64 len, u64 dest_off); @@ -946,7 +948,6 @@ struct cifs_tcon { bool use_persistent:1; /* use persistent instead of durable handles */ #ifdef CONFIG_CIFS_SMB2 bool print:1; /* set if connection to printer share */ - bool bad_network_name:1; /* set if ret status STATUS_BAD_NETWORK_NAME */ __le32 capabilities; __u32 share_flags; __u32 maximal_access; @@ -1343,6 +1344,7 @@ struct mid_q_entry { void *callback_data; /* general purpose pointer for callback */ void *resp_buf; /* pointer to received SMB header */ int mid_state; /* wish this were enum but can not pass to wait_event */ + unsigned int mid_flags; __le16 command; /* smb command code */ bool large_buf:1; /* if valid response, is pointer to large buf */ bool multiRsp:1; /* multiple trans2 responses for one request */ @@ -1350,6 +1352,12 @@ struct mid_q_entry { bool decrypted:1; /* decrypted entry */ }; +struct close_cancelled_open { + struct cifs_fid fid; + struct cifs_tcon *tcon; + struct work_struct work; +}; + /* Make code in transport.c a little cleaner by moving update of optional stats into function below */ #ifdef CONFIG_CIFS_STATS2 @@ -1481,6 +1489,9 @@ static inline void free_dfs_info_array(struct dfs_info3_param *param, #define MID_RESPONSE_MALFORMED 0x10 #define MID_SHUTDOWN 0x20 +/* Flags */ +#define MID_WAIT_CANCELLED 1 /* Cancelled while waiting for response */ + /* Types of response buffer returned from SendReceive2 */ #define CIFS_NO_BUFFER 0 /* Response buffer not returned */ #define CIFS_SMALL_BUFFER 1 diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index 066950671929..5d21f00ae341 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -1428,6 +1428,8 @@ cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid) length = cifs_discard_remaining_data(server); dequeue_mid(mid, rdata->result); + mid->resp_buf = server->smallbuf; + server->smallbuf = NULL; return length; } @@ -1541,6 +1543,8 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid) return cifs_readv_discard(server, mid); dequeue_mid(mid, false); + mid->resp_buf = server->smallbuf; + server->smallbuf = NULL; return length; } diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 858698dcde3c..190a855bc844 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -905,10 +905,19 @@ cifs_demultiplex_thread(void *p) server->lstrp = jiffies; if (mid_entry != NULL) { + if ((mid_entry->mid_flags & MID_WAIT_CANCELLED) && + mid_entry->mid_state == MID_RESPONSE_RECEIVED && + server->ops->handle_cancelled_mid) + server->ops->handle_cancelled_mid( + mid_entry->resp_buf, + server); + if (!mid_entry->multiRsp || mid_entry->multiEnd) mid_entry->callback(mid_entry); - } else if (!server->ops->is_oplock_break || - !server->ops->is_oplock_break(buf, server)) { + } else if (server->ops->is_oplock_break && + server->ops->is_oplock_break(buf, server)) { + cifs_dbg(FYI, "Received oplock break\n"); + } else { cifs_dbg(VFS, "No task to wake, unknown frame received! NumMids %d\n", atomic_read(&midCount)); cifs_dump_mem("Received Data is: ", buf, @@ -3745,6 +3754,9 @@ try_mount_again: if (IS_ERR(tcon)) { rc = PTR_ERR(tcon); tcon = NULL; + if (rc == -EACCES) + goto mount_fail_check; + goto remote_path_check; } diff --git a/fs/cifs/file.c b/fs/cifs/file.c index aa3debbba826..21d404535739 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -2597,7 +2597,7 @@ cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from, wdata->credits = credits; if (!wdata->cfile->invalidHandle || - !cifs_reopen_file(wdata->cfile, false)) + !(rc = cifs_reopen_file(wdata->cfile, false))) rc = server->ops->async_writev(wdata, cifs_uncached_writedata_release); if (rc) { @@ -3022,7 +3022,7 @@ cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file, rdata->credits = credits; if (!rdata->cfile->invalidHandle || - !cifs_reopen_file(rdata->cfile, true)) + !(rc = cifs_reopen_file(rdata->cfile, true))) rc = server->ops->async_readv(rdata); error: if (rc) { @@ -3617,7 +3617,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping, } if (!rdata->cfile->invalidHandle || - !cifs_reopen_file(rdata->cfile, true)) + !(rc = cifs_reopen_file(rdata->cfile, true))) rc = server->ops->async_readv(rdata); if (rc) { add_credits_and_wake_if(server, rdata->credits, 0); diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c index 001528781b6b..265c45fe4ea5 100644 --- a/fs/cifs/ioctl.c +++ b/fs/cifs/ioctl.c @@ -34,71 +34,14 @@ #include "cifs_ioctl.h" #include <linux/btrfs.h> -static int cifs_file_clone_range(unsigned int xid, struct file *src_file, - struct file *dst_file) -{ - struct inode *src_inode = file_inode(src_file); - struct inode *target_inode = file_inode(dst_file); - struct cifsFileInfo *smb_file_src; - struct cifsFileInfo *smb_file_target; - struct cifs_tcon *src_tcon; - struct cifs_tcon *target_tcon; - int rc; - - cifs_dbg(FYI, "ioctl clone range\n"); - - if (!src_file->private_data || !dst_file->private_data) { - rc = -EBADF; - cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n"); - goto out; - } - - rc = -EXDEV; - smb_file_target = dst_file->private_data; - smb_file_src = src_file->private_data; - src_tcon = tlink_tcon(smb_file_src->tlink); - target_tcon = tlink_tcon(smb_file_target->tlink); - - if (src_tcon->ses != target_tcon->ses) { - cifs_dbg(VFS, "source and target of copy not on same server\n"); - goto out; - } - - /* - * Note: cifs case is easier than btrfs since server responsible for - * checks for proper open modes and file type and if it wants - * server could even support copy of range where source = target - */ - lock_two_nondirectories(target_inode, src_inode); - - cifs_dbg(FYI, "about to flush pages\n"); - /* should we flush first and last page first */ - truncate_inode_pages(&target_inode->i_data, 0); - - if (target_tcon->ses->server->ops->clone_range) - rc = target_tcon->ses->server->ops->clone_range(xid, - smb_file_src, smb_file_target, 0, src_inode->i_size, 0); - else - rc = -EOPNOTSUPP; - - /* force revalidate of size and timestamps of target file now - that target is updated on the server */ - CIFS_I(target_inode)->time = 0; - /* although unlocking in the reverse order from locking is not - strictly necessary here it is a little cleaner to be consistent */ - unlock_two_nondirectories(src_inode, target_inode); -out: - return rc; -} - -static long cifs_ioctl_clone(unsigned int xid, struct file *dst_file, +static long cifs_ioctl_copychunk(unsigned int xid, struct file *dst_file, unsigned long srcfd) { int rc; struct fd src_file; struct inode *src_inode; - cifs_dbg(FYI, "ioctl clone range\n"); + cifs_dbg(FYI, "ioctl copychunk range\n"); /* the destination must be opened for writing */ if (!(dst_file->f_mode & FMODE_WRITE)) { cifs_dbg(FYI, "file target not open for write\n"); @@ -129,7 +72,8 @@ static long cifs_ioctl_clone(unsigned int xid, struct file *dst_file, if (S_ISDIR(src_inode->i_mode)) goto out_fput; - rc = cifs_file_clone_range(xid, src_file.file, dst_file); + rc = cifs_file_copychunk_range(xid, src_file.file, 0, dst_file, 0, + src_inode->i_size, 0); out_fput: fdput(src_file); @@ -251,7 +195,7 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg) } break; case CIFS_IOC_COPYCHUNK_FILE: - rc = cifs_ioctl_clone(xid, filep, arg); + rc = cifs_ioctl_copychunk(xid, filep, arg); break; case CIFS_IOC_SET_INTEGRITY: if (pSMBFile == NULL) diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c index fd516ea8b8f8..1a04b3a5beb1 100644 --- a/fs/cifs/smb2misc.c +++ b/fs/cifs/smb2misc.c @@ -659,3 +659,49 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server) cifs_dbg(FYI, "Can not process oplock break for non-existent connection\n"); return false; } + +void +smb2_cancelled_close_fid(struct work_struct *work) +{ + struct close_cancelled_open *cancelled = container_of(work, + struct close_cancelled_open, work); + + cifs_dbg(VFS, "Close unmatched open\n"); + + SMB2_close(0, cancelled->tcon, cancelled->fid.persistent_fid, + cancelled->fid.volatile_fid); + cifs_put_tcon(cancelled->tcon); + kfree(cancelled); +} + +int +smb2_handle_cancelled_mid(char *buffer, struct TCP_Server_Info *server) +{ + struct smb2_sync_hdr *sync_hdr = get_sync_hdr(buffer); + struct smb2_create_rsp *rsp = (struct smb2_create_rsp *)buffer; + struct cifs_tcon *tcon; + struct close_cancelled_open *cancelled; + + if (sync_hdr->Command != SMB2_CREATE || + sync_hdr->Status != STATUS_SUCCESS) + return 0; + + cancelled = kzalloc(sizeof(*cancelled), GFP_KERNEL); + if (!cancelled) + return -ENOMEM; + + tcon = smb2_find_smb_tcon(server, sync_hdr->SessionId, + sync_hdr->TreeId); + if (!tcon) { + kfree(cancelled); + return -ENOENT; + } + + cancelled->fid.persistent_fid = rsp->PersistentFileId; + cancelled->fid.volatile_fid = rsp->VolatileFileId; + cancelled->tcon = tcon; + INIT_WORK(&cancelled->work, smb2_cancelled_close_fid); + queue_work(cifsiod_wq, &cancelled->work); + + return 0; +} diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index 0231108d9387..152e37f2ad92 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c @@ -21,6 +21,7 @@ #include <linux/vfs.h> #include <linux/falloc.h> #include <linux/scatterlist.h> +#include <linux/uuid.h> #include <crypto/aead.h> #include "cifsglob.h" #include "smb2pdu.h" @@ -592,8 +593,8 @@ req_res_key_exit: return rc; } -static int -smb2_clone_range(const unsigned int xid, +static ssize_t +smb2_copychunk_range(const unsigned int xid, struct cifsFileInfo *srcfile, struct cifsFileInfo *trgtfile, u64 src_off, u64 len, u64 dest_off) @@ -605,13 +606,14 @@ smb2_clone_range(const unsigned int xid, struct cifs_tcon *tcon; int chunks_copied = 0; bool chunk_sizes_updated = false; + ssize_t bytes_written, total_bytes_written = 0; pcchunk = kmalloc(sizeof(struct copychunk_ioctl), GFP_KERNEL); if (pcchunk == NULL) return -ENOMEM; - cifs_dbg(FYI, "in smb2_clone_range - about to call request res key\n"); + cifs_dbg(FYI, "in smb2_copychunk_range - about to call request res key\n"); /* Request a key from the server to identify the source of the copy */ rc = SMB2_request_res_key(xid, tlink_tcon(srcfile->tlink), srcfile->fid.persistent_fid, @@ -669,14 +671,16 @@ smb2_clone_range(const unsigned int xid, } chunks_copied++; - src_off += le32_to_cpu(retbuf->TotalBytesWritten); - dest_off += le32_to_cpu(retbuf->TotalBytesWritten); - len -= le32_to_cpu(retbuf->TotalBytesWritten); + bytes_written = le32_to_cpu(retbuf->TotalBytesWritten); + src_off += bytes_written; + dest_off += bytes_written; + len -= bytes_written; + total_bytes_written += bytes_written; - cifs_dbg(FYI, "Chunks %d PartialChunk %d Total %d\n", + cifs_dbg(FYI, "Chunks %d PartialChunk %d Total %zu\n", le32_to_cpu(retbuf->ChunksWritten), le32_to_cpu(retbuf->ChunkBytesWritten), - le32_to_cpu(retbuf->TotalBytesWritten)); + bytes_written); } else if (rc == -EINVAL) { if (ret_data_len != sizeof(struct copychunk_ioctl_rsp)) goto cchunk_out; @@ -713,7 +717,10 @@ smb2_clone_range(const unsigned int xid, cchunk_out: kfree(pcchunk); kfree(retbuf); - return rc; + if (rc) + return rc; + else + return total_bytes_written; } static int @@ -2322,6 +2329,7 @@ struct smb_version_operations smb20_operations = { .clear_stats = smb2_clear_stats, .print_stats = smb2_print_stats, .is_oplock_break = smb2_is_valid_oplock_break, + .handle_cancelled_mid = smb2_handle_cancelled_mid, .downgrade_oplock = smb2_downgrade_oplock, .need_neg = smb2_need_neg, .negotiate = smb2_negotiate, @@ -2377,7 +2385,7 @@ struct smb_version_operations smb20_operations = { .set_oplock_level = smb2_set_oplock_level, .create_lease_buf = smb2_create_lease_buf, .parse_lease_buf = smb2_parse_lease_buf, - .clone_range = smb2_clone_range, + .copychunk_range = smb2_copychunk_range, .wp_retry_size = smb2_wp_retry_size, .dir_needs_close = smb2_dir_needs_close, .get_dfs_refer = smb2_get_dfs_refer, @@ -2404,6 +2412,7 @@ struct smb_version_operations smb21_operations = { .clear_stats = smb2_clear_stats, .print_stats = smb2_print_stats, .is_oplock_break = smb2_is_valid_oplock_break, + .handle_cancelled_mid = smb2_handle_cancelled_mid, .downgrade_oplock = smb2_downgrade_oplock, .need_neg = smb2_need_neg, .negotiate = smb2_negotiate, @@ -2459,7 +2468,7 @@ struct smb_version_operations smb21_operations = { .set_oplock_level = smb21_set_oplock_level, .create_lease_buf = smb2_create_lease_buf, .parse_lease_buf = smb2_parse_lease_buf, - .clone_range = smb2_clone_range, + .copychunk_range = smb2_copychunk_range, .wp_retry_size = smb2_wp_retry_size, .dir_needs_close = smb2_dir_needs_close, .enum_snapshots = smb3_enum_snapshots, @@ -2488,6 +2497,7 @@ struct smb_version_operations smb30_operations = { .print_stats = smb2_print_stats, .dump_share_caps = smb2_dump_share_caps, .is_oplock_break = smb2_is_valid_oplock_break, + .handle_cancelled_mid = smb2_handle_cancelled_mid, .downgrade_oplock = smb2_downgrade_oplock, .need_neg = smb2_need_neg, .negotiate = smb2_negotiate, @@ -2545,7 +2555,7 @@ struct smb_version_operations smb30_operations = { .set_oplock_level = smb3_set_oplock_level, .create_lease_buf = smb3_create_lease_buf, .parse_lease_buf = smb3_parse_lease_buf, - .clone_range = smb2_clone_range, + .copychunk_range = smb2_copychunk_range, .duplicate_extents = smb2_duplicate_extents, .validate_negotiate = smb3_validate_negotiate, .wp_retry_size = smb2_wp_retry_size, @@ -2582,6 +2592,7 @@ struct smb_version_operations smb311_operations = { .print_stats = smb2_print_stats, .dump_share_caps = smb2_dump_share_caps, .is_oplock_break = smb2_is_valid_oplock_break, + .handle_cancelled_mid = smb2_handle_cancelled_mid, .downgrade_oplock = smb2_downgrade_oplock, .need_neg = smb2_need_neg, .negotiate = smb2_negotiate, @@ -2639,7 +2650,7 @@ struct smb_version_operations smb311_operations = { .set_oplock_level = smb3_set_oplock_level, .create_lease_buf = smb3_create_lease_buf, .parse_lease_buf = smb3_parse_lease_buf, - .clone_range = smb2_clone_range, + .copychunk_range = smb2_copychunk_range, .duplicate_extents = smb2_duplicate_extents, /* .validate_negotiate = smb3_validate_negotiate, */ /* not used in 3.11 */ .wp_retry_size = smb2_wp_retry_size, diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index fb75fe908225..fb0da096c2ce 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c @@ -563,8 +563,10 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses) * but for time being this is our only auth choice so doesn't matter. * We just found a server which sets blob length to zero expecting raw. */ - if (blob_length == 0) + if (blob_length == 0) { cifs_dbg(FYI, "missing security blob on negprot\n"); + server->sec_ntlmssp = true; + } rc = cifs_enable_signing(server, ses->sign); if (rc) @@ -1172,9 +1174,6 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree, else return -EIO; - if (tcon && tcon->bad_network_name) - return -ENOENT; - unc_path = kmalloc(MAX_SHARENAME_LENGTH * 2, GFP_KERNEL); if (unc_path == NULL) return -ENOMEM; @@ -1186,6 +1185,10 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree, return -EINVAL; } + /* SMB2 TREE_CONNECT request must be called with TreeId == 0 */ + if (tcon) + tcon->tid = 0; + rc = small_smb2_init(SMB2_TREE_CONNECT, tcon, (void **) &req); if (rc) { kfree(unc_path); @@ -1274,8 +1277,6 @@ tcon_exit: tcon_error_exit: if (rsp->hdr.sync_hdr.Status == STATUS_BAD_NETWORK_NAME) { cifs_dbg(VFS, "BAD_NETWORK_NAME: %s\n", tree); - if (tcon) - tcon->bad_network_name = true; } goto tcon_exit; } @@ -2178,6 +2179,9 @@ void smb2_reconnect_server(struct work_struct *work) struct cifs_tcon *tcon, *tcon2; struct list_head tmp_list; int tcon_exist = false; + int rc; + int resched = false; + /* Prevent simultaneous reconnects that can corrupt tcon->rlist list */ mutex_lock(&server->reconnect_mutex); @@ -2205,13 +2209,18 @@ void smb2_reconnect_server(struct work_struct *work) spin_unlock(&cifs_tcp_ses_lock); list_for_each_entry_safe(tcon, tcon2, &tmp_list, rlist) { - if (!smb2_reconnect(SMB2_INTERNAL_CMD, tcon)) + rc = smb2_reconnect(SMB2_INTERNAL_CMD, tcon); + if (!rc) cifs_reopen_persistent_handles(tcon); + else + resched = true; list_del_init(&tcon->rlist); cifs_put_tcon(tcon); } cifs_dbg(FYI, "Reconnecting tcons finished\n"); + if (resched) + queue_delayed_work(cifsiod_wq, &server->reconnect, 2 * HZ); mutex_unlock(&server->reconnect_mutex); /* now we can safely release srv struct */ diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h index 69e35873b1de..6853454fc871 100644 --- a/fs/cifs/smb2proto.h +++ b/fs/cifs/smb2proto.h @@ -48,6 +48,10 @@ extern struct mid_q_entry *smb2_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst); extern struct mid_q_entry *smb2_setup_async_request( struct TCP_Server_Info *server, struct smb_rqst *rqst); +extern struct cifs_ses *smb2_find_smb_ses(struct TCP_Server_Info *server, + __u64 ses_id); +extern struct cifs_tcon *smb2_find_smb_tcon(struct TCP_Server_Info *server, + __u64 ses_id, __u32 tid); extern int smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server); extern int smb3_calc_signature(struct smb_rqst *rqst, @@ -164,6 +168,9 @@ extern int SMB2_set_compression(const unsigned int xid, struct cifs_tcon *tcon, extern int SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon, const u64 persistent_fid, const u64 volatile_fid, const __u8 oplock_level); +extern int smb2_handle_cancelled_mid(char *buffer, + struct TCP_Server_Info *server); +void smb2_cancelled_close_fid(struct work_struct *work); extern int SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_file_id, u64 volatile_file_id, struct kstatfs *FSData); diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c index 7c3bb1bd7eed..506b67fc93d9 100644 --- a/fs/cifs/smb2transport.c +++ b/fs/cifs/smb2transport.c @@ -115,23 +115,70 @@ smb3_crypto_shash_allocate(struct TCP_Server_Info *server) return 0; } -struct cifs_ses * -smb2_find_smb_ses(struct TCP_Server_Info *server, __u64 ses_id) +static struct cifs_ses * +smb2_find_smb_ses_unlocked(struct TCP_Server_Info *server, __u64 ses_id) { struct cifs_ses *ses; - spin_lock(&cifs_tcp_ses_lock); list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) { if (ses->Suid != ses_id) continue; - spin_unlock(&cifs_tcp_ses_lock); return ses; } + + return NULL; +} + +struct cifs_ses * +smb2_find_smb_ses(struct TCP_Server_Info *server, __u64 ses_id) +{ + struct cifs_ses *ses; + + spin_lock(&cifs_tcp_ses_lock); + ses = smb2_find_smb_ses_unlocked(server, ses_id); spin_unlock(&cifs_tcp_ses_lock); + return ses; +} + +static struct cifs_tcon * +smb2_find_smb_sess_tcon_unlocked(struct cifs_ses *ses, __u32 tid) +{ + struct cifs_tcon *tcon; + + list_for_each_entry(tcon, &ses->tcon_list, tcon_list) { + if (tcon->tid != tid) + continue; + ++tcon->tc_count; + return tcon; + } + return NULL; } +/* + * Obtain tcon corresponding to the tid in the given + * cifs_ses + */ + +struct cifs_tcon * +smb2_find_smb_tcon(struct TCP_Server_Info *server, __u64 ses_id, __u32 tid) +{ + struct cifs_ses *ses; + struct cifs_tcon *tcon; + + spin_lock(&cifs_tcp_ses_lock); + ses = smb2_find_smb_ses_unlocked(server, ses_id); + if (!ses) { + spin_unlock(&cifs_tcp_ses_lock); + return NULL; + } + tcon = smb2_find_smb_sess_tcon_unlocked(ses, tid); + spin_unlock(&cifs_tcp_ses_lock); + + return tcon; +} + int smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server) { diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index 526f0533cb4e..f6e13a977fc8 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c @@ -752,9 +752,11 @@ cifs_send_recv(const unsigned int xid, struct cifs_ses *ses, rc = wait_for_response(ses->server, midQ); if (rc != 0) { + cifs_dbg(FYI, "Cancelling wait for mid %llu\n", midQ->mid); send_cancel(ses->server, rqst, midQ); spin_lock(&GlobalMid_Lock); if (midQ->mid_state == MID_REQUEST_SUBMITTED) { + midQ->mid_flags |= MID_WAIT_CANCELLED; midQ->callback = DeleteMidQEntry; spin_unlock(&GlobalMid_Lock); add_credits(ses->server, 1, optype); @@ -373,6 +373,22 @@ restart: } spin_lock_irq(&mapping->tree_lock); + if (!entry) { + /* + * We needed to drop the page_tree lock while calling + * radix_tree_preload() and we didn't have an entry to + * lock. See if another thread inserted an entry at + * our index during this time. + */ + entry = __radix_tree_lookup(&mapping->page_tree, index, + NULL, &slot); + if (entry) { + radix_tree_preload_end(); + spin_unlock_irq(&mapping->tree_lock); + goto restart; + } + } + if (pmd_downgrade) { radix_tree_delete(&mapping->page_tree, index); mapping->nrexceptional--; @@ -388,19 +404,12 @@ restart: if (err) { spin_unlock_irq(&mapping->tree_lock); /* - * Someone already created the entry? This is a - * normal failure when inserting PMDs in a range - * that already contains PTEs. In that case we want - * to return -EEXIST immediately. - */ - if (err == -EEXIST && !(size_flag & RADIX_DAX_PMD)) - goto restart; - /* - * Our insertion of a DAX PMD entry failed, most - * likely because it collided with a PTE sized entry - * at a different index in the PMD range. We haven't - * inserted anything into the radix tree and have no - * waiters to wake. + * Our insertion of a DAX entry failed, most likely + * because we were inserting a PMD entry and it + * collided with a PTE sized entry at a different + * index in the PMD range. We haven't inserted + * anything into the radix tree and have no waiters to + * wake. */ return ERR_PTR(err); } diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index f493af666591..fb69ee2388db 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -2466,6 +2466,7 @@ extern int ext4_setattr(struct dentry *, struct iattr *); extern int ext4_getattr(const struct path *, struct kstat *, u32, unsigned int); extern void ext4_evict_inode(struct inode *); extern void ext4_clear_inode(struct inode *); +extern int ext4_file_getattr(const struct path *, struct kstat *, u32, unsigned int); extern int ext4_sync_inode(handle_t *, struct inode *); extern void ext4_dirty_inode(struct inode *, int); extern int ext4_change_inode_journal_flag(struct inode *, int); diff --git a/fs/ext4/file.c b/fs/ext4/file.c index 8210c1f43556..cefa9835f275 100644 --- a/fs/ext4/file.c +++ b/fs/ext4/file.c @@ -744,7 +744,7 @@ const struct file_operations ext4_file_operations = { const struct inode_operations ext4_file_inode_operations = { .setattr = ext4_setattr, - .getattr = ext4_getattr, + .getattr = ext4_file_getattr, .listxattr = ext4_listxattr, .get_acl = ext4_get_acl, .set_acl = ext4_set_acl, diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 4247d8d25687..b9ffa9f4191f 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -5390,11 +5390,46 @@ err_out: int ext4_getattr(const struct path *path, struct kstat *stat, u32 request_mask, unsigned int query_flags) { - struct inode *inode; - unsigned long long delalloc_blocks; + struct inode *inode = d_inode(path->dentry); + struct ext4_inode *raw_inode; + struct ext4_inode_info *ei = EXT4_I(inode); + unsigned int flags; + + if (EXT4_FITS_IN_INODE(raw_inode, ei, i_crtime)) { + stat->result_mask |= STATX_BTIME; + stat->btime.tv_sec = ei->i_crtime.tv_sec; + stat->btime.tv_nsec = ei->i_crtime.tv_nsec; + } + + flags = ei->i_flags & EXT4_FL_USER_VISIBLE; + if (flags & EXT4_APPEND_FL) + stat->attributes |= STATX_ATTR_APPEND; + if (flags & EXT4_COMPR_FL) + stat->attributes |= STATX_ATTR_COMPRESSED; + if (flags & EXT4_ENCRYPT_FL) + stat->attributes |= STATX_ATTR_ENCRYPTED; + if (flags & EXT4_IMMUTABLE_FL) + stat->attributes |= STATX_ATTR_IMMUTABLE; + if (flags & EXT4_NODUMP_FL) + stat->attributes |= STATX_ATTR_NODUMP; + + stat->attributes_mask |= (STATX_ATTR_APPEND | + STATX_ATTR_COMPRESSED | + STATX_ATTR_ENCRYPTED | + STATX_ATTR_IMMUTABLE | + STATX_ATTR_NODUMP); - inode = d_inode(path->dentry); generic_fillattr(inode, stat); + return 0; +} + +int ext4_file_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int query_flags) +{ + struct inode *inode = d_inode(path->dentry); + u64 delalloc_blocks; + + ext4_getattr(path, stat, request_mask, query_flags); /* * If there is inline data in the inode, the inode will normally not diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index 6ad612c576fc..07e5e1405771 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -3912,6 +3912,7 @@ const struct inode_operations ext4_dir_inode_operations = { .tmpfile = ext4_tmpfile, .rename = ext4_rename2, .setattr = ext4_setattr, + .getattr = ext4_getattr, .listxattr = ext4_listxattr, .get_acl = ext4_get_acl, .set_acl = ext4_set_acl, @@ -3920,6 +3921,7 @@ const struct inode_operations ext4_dir_inode_operations = { const struct inode_operations ext4_special_inode_operations = { .setattr = ext4_setattr, + .getattr = ext4_getattr, .listxattr = ext4_listxattr, .get_acl = ext4_get_acl, .set_acl = ext4_set_acl, diff --git a/fs/ext4/symlink.c b/fs/ext4/symlink.c index 73b184d161fc..5c8fc53cb0e5 100644 --- a/fs/ext4/symlink.c +++ b/fs/ext4/symlink.c @@ -85,17 +85,20 @@ errout: const struct inode_operations ext4_encrypted_symlink_inode_operations = { .get_link = ext4_encrypted_get_link, .setattr = ext4_setattr, + .getattr = ext4_getattr, .listxattr = ext4_listxattr, }; const struct inode_operations ext4_symlink_inode_operations = { .get_link = page_get_link, .setattr = ext4_setattr, + .getattr = ext4_getattr, .listxattr = ext4_listxattr, }; const struct inode_operations ext4_fast_symlink_inode_operations = { .get_link = simple_get_link, .setattr = ext4_setattr, + .getattr = ext4_getattr, .listxattr = ext4_listxattr, }; diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 7163fe014b57..dde861387a40 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -136,17 +136,26 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND; vma->vm_ops = &hugetlb_vm_ops; + /* + * Offset passed to mmap (before page shift) could have been + * negative when represented as a (l)off_t. + */ + if (((loff_t)vma->vm_pgoff << PAGE_SHIFT) < 0) + return -EINVAL; + if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT)) return -EINVAL; vma_len = (loff_t)(vma->vm_end - vma->vm_start); + len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); + /* check for overflow */ + if (len < vma_len) + return -EINVAL; inode_lock(inode); file_accessed(file); ret = -ENOMEM; - len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); - if (hugetlb_reserve_pages(inode, vma->vm_pgoff >> huge_page_order(h), len >> huge_page_shift(h), vma, @@ -155,7 +164,7 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) ret = 0; if (vma->vm_flags & VM_WRITE && inode->i_size < len) - inode->i_size = len; + i_size_write(inode, len); out: inode_unlock(inode); diff --git a/fs/orangefs/super.c b/fs/orangefs/super.c index 67c24351a67f..cd261c8de53a 100644 --- a/fs/orangefs/super.c +++ b/fs/orangefs/super.c @@ -263,8 +263,13 @@ int orangefs_remount(struct orangefs_sb_info_s *orangefs_sb) if (!new_op) return -ENOMEM; new_op->upcall.req.features.features = 0; - ret = service_operation(new_op, "orangefs_features", 0); - orangefs_features = new_op->downcall.resp.features.features; + ret = service_operation(new_op, "orangefs_features", + ORANGEFS_OP_PRIORITY | ORANGEFS_OP_NO_MUTEX); + if (!ret) + orangefs_features = + new_op->downcall.resp.features.features; + else + orangefs_features = 0; op_release(new_op); } else { orangefs_features = 0; diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index 8f91ec66baa3..d04ea4349909 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c @@ -1074,6 +1074,7 @@ static int sysctl_check_table(const char *path, struct ctl_table *table) if ((table->proc_handler == proc_dostring) || (table->proc_handler == proc_dointvec) || + (table->proc_handler == proc_douintvec) || (table->proc_handler == proc_dointvec_minmax) || (table->proc_handler == proc_dointvec_jiffies) || (table->proc_handler == proc_dointvec_userhz_jiffies) || diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index f08bd31c1081..312578089544 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -900,7 +900,14 @@ static inline void clear_soft_dirty(struct vm_area_struct *vma, static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmdp) { - pmd_t pmd = pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp); + pmd_t pmd = *pmdp; + + /* See comment in change_huge_pmd() */ + pmdp_invalidate(vma, addr, pmdp); + if (pmd_dirty(*pmdp)) + pmd = pmd_mkdirty(pmd); + if (pmd_young(*pmdp)) + pmd = pmd_mkyoung(pmd); pmd = pmd_wrprotect(pmd); pmd = pmd_clear_soft_dirty(pmd); diff --git a/fs/stat.c b/fs/stat.c index fa0be59340cc..c6c963b2546b 100644 --- a/fs/stat.c +++ b/fs/stat.c @@ -130,9 +130,13 @@ EXPORT_SYMBOL(vfs_getattr); int vfs_statx_fd(unsigned int fd, struct kstat *stat, u32 request_mask, unsigned int query_flags) { - struct fd f = fdget_raw(fd); + struct fd f; int error = -EBADF; + if (query_flags & ~KSTAT_QUERY_FLAGS) + return -EINVAL; + + f = fdget_raw(fd); if (f.file) { error = vfs_getattr(&f.file->f_path, stat, request_mask, query_flags); @@ -155,9 +159,6 @@ EXPORT_SYMBOL(vfs_statx_fd); * Additionally, the use of AT_SYMLINK_NOFOLLOW in flags will prevent a symlink * at the given name from being referenced. * - * The caller must have preset stat->request_mask as for vfs_getattr(). The - * flags are also used to load up stat->query_flags. - * * 0 will be returned on success, and a -ve error code if unsuccessful. */ int vfs_statx(int dfd, const char __user *filename, int flags, @@ -509,46 +510,38 @@ SYSCALL_DEFINE4(fstatat64, int, dfd, const char __user *, filename, } #endif /* __ARCH_WANT_STAT64 || __ARCH_WANT_COMPAT_STAT64 */ -static inline int __put_timestamp(struct timespec *kts, - struct statx_timestamp __user *uts) -{ - return (__put_user(kts->tv_sec, &uts->tv_sec ) || - __put_user(kts->tv_nsec, &uts->tv_nsec ) || - __put_user(0, &uts->__reserved )); -} - -/* - * Set the statx results. - */ -static long statx_set_result(struct kstat *stat, struct statx __user *buffer) +static noinline_for_stack int +cp_statx(const struct kstat *stat, struct statx __user *buffer) { - uid_t uid = from_kuid_munged(current_user_ns(), stat->uid); - gid_t gid = from_kgid_munged(current_user_ns(), stat->gid); - - if (__put_user(stat->result_mask, &buffer->stx_mask ) || - __put_user(stat->mode, &buffer->stx_mode ) || - __clear_user(&buffer->__spare0, sizeof(buffer->__spare0)) || - __put_user(stat->nlink, &buffer->stx_nlink ) || - __put_user(uid, &buffer->stx_uid ) || - __put_user(gid, &buffer->stx_gid ) || - __put_user(stat->attributes, &buffer->stx_attributes ) || - __put_user(stat->blksize, &buffer->stx_blksize ) || - __put_user(MAJOR(stat->rdev), &buffer->stx_rdev_major ) || - __put_user(MINOR(stat->rdev), &buffer->stx_rdev_minor ) || - __put_user(MAJOR(stat->dev), &buffer->stx_dev_major ) || - __put_user(MINOR(stat->dev), &buffer->stx_dev_minor ) || - __put_timestamp(&stat->atime, &buffer->stx_atime ) || - __put_timestamp(&stat->btime, &buffer->stx_btime ) || - __put_timestamp(&stat->ctime, &buffer->stx_ctime ) || - __put_timestamp(&stat->mtime, &buffer->stx_mtime ) || - __put_user(stat->ino, &buffer->stx_ino ) || - __put_user(stat->size, &buffer->stx_size ) || - __put_user(stat->blocks, &buffer->stx_blocks ) || - __clear_user(&buffer->__spare1, sizeof(buffer->__spare1)) || - __clear_user(&buffer->__spare2, sizeof(buffer->__spare2))) - return -EFAULT; - - return 0; + struct statx tmp; + + memset(&tmp, 0, sizeof(tmp)); + + tmp.stx_mask = stat->result_mask; + tmp.stx_blksize = stat->blksize; + tmp.stx_attributes = stat->attributes; + tmp.stx_nlink = stat->nlink; + tmp.stx_uid = from_kuid_munged(current_user_ns(), stat->uid); + tmp.stx_gid = from_kgid_munged(current_user_ns(), stat->gid); + tmp.stx_mode = stat->mode; + tmp.stx_ino = stat->ino; + tmp.stx_size = stat->size; + tmp.stx_blocks = stat->blocks; + tmp.stx_attributes_mask = stat->attributes_mask; + tmp.stx_atime.tv_sec = stat->atime.tv_sec; + tmp.stx_atime.tv_nsec = stat->atime.tv_nsec; + tmp.stx_btime.tv_sec = stat->btime.tv_sec; + tmp.stx_btime.tv_nsec = stat->btime.tv_nsec; + tmp.stx_ctime.tv_sec = stat->ctime.tv_sec; + tmp.stx_ctime.tv_nsec = stat->ctime.tv_nsec; + tmp.stx_mtime.tv_sec = stat->mtime.tv_sec; + tmp.stx_mtime.tv_nsec = stat->mtime.tv_nsec; + tmp.stx_rdev_major = MAJOR(stat->rdev); + tmp.stx_rdev_minor = MINOR(stat->rdev); + tmp.stx_dev_major = MAJOR(stat->dev); + tmp.stx_dev_minor = MINOR(stat->dev); + + return copy_to_user(buffer, &tmp, sizeof(tmp)) ? -EFAULT : 0; } /** @@ -570,10 +563,10 @@ SYSCALL_DEFINE5(statx, struct kstat stat; int error; + if (mask & STATX__RESERVED) + return -EINVAL; if ((flags & AT_STATX_SYNC_TYPE) == AT_STATX_SYNC_TYPE) return -EINVAL; - if (!access_ok(VERIFY_WRITE, buffer, sizeof(*buffer))) - return -EFAULT; if (filename) error = vfs_statx(dfd, filename, flags, &stat, mask); @@ -581,7 +574,8 @@ SYSCALL_DEFINE5(statx, error = vfs_statx_fd(dfd, &stat, mask, flags); if (error) return error; - return statx_set_result(&stat, buffer); + + return cp_statx(&stat, buffer); } /* Caller is here responsible for sufficient locking (ie. inode->i_lock) */ diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c index b803213d1307..39c75a86c67f 100644 --- a/fs/sysfs/file.c +++ b/fs/sysfs/file.c @@ -108,7 +108,7 @@ static ssize_t sysfs_kf_read(struct kernfs_open_file *of, char *buf, { const struct sysfs_ops *ops = sysfs_file_ops(of->kn); struct kobject *kobj = of->kn->parent->priv; - size_t len; + ssize_t len; /* * If buf != of->prealloc_buf, we don't know how @@ -117,13 +117,15 @@ static ssize_t sysfs_kf_read(struct kernfs_open_file *of, char *buf, if (WARN_ON_ONCE(buf != of->prealloc_buf)) return 0; len = ops->show(kobj, of->kn->priv, buf); + if (len < 0) + return len; if (pos) { if (len <= pos) return 0; len -= pos; memmove(buf, buf + pos, len); } - return min(count, len); + return min_t(ssize_t, count, len); } /* kernfs write callback for regular sysfs files */ diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index 1d227b0fcf49..f7555fc25877 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c @@ -1756,7 +1756,7 @@ static void userfaultfd_show_fdinfo(struct seq_file *m, struct file *f) * protocols: aa:... bb:... */ seq_printf(m, "pending:\t%lu\ntotal:\t%lu\nAPI:\t%Lx:%x:%Lx\n", - pending, total, UFFD_API, UFFD_API_FEATURES, + pending, total, UFFD_API, ctx->features, UFFD_API_IOCTLS|UFFD_API_RANGE_IOCTLS); } #endif diff --git a/fs/xfs/libxfs/xfs_dir2_priv.h b/fs/xfs/libxfs/xfs_dir2_priv.h index eb00bc133bca..39f8604f764e 100644 --- a/fs/xfs/libxfs/xfs_dir2_priv.h +++ b/fs/xfs/libxfs/xfs_dir2_priv.h @@ -125,8 +125,7 @@ extern int xfs_dir2_sf_create(struct xfs_da_args *args, xfs_ino_t pino); extern int xfs_dir2_sf_lookup(struct xfs_da_args *args); extern int xfs_dir2_sf_removename(struct xfs_da_args *args); extern int xfs_dir2_sf_replace(struct xfs_da_args *args); -extern int xfs_dir2_sf_verify(struct xfs_mount *mp, struct xfs_dir2_sf_hdr *sfp, - int size); +extern int xfs_dir2_sf_verify(struct xfs_inode *ip); /* xfs_dir2_readdir.c */ extern int xfs_readdir(struct xfs_inode *dp, struct dir_context *ctx, diff --git a/fs/xfs/libxfs/xfs_dir2_sf.c b/fs/xfs/libxfs/xfs_dir2_sf.c index 96b45cd6c63f..e84af093b2ab 100644 --- a/fs/xfs/libxfs/xfs_dir2_sf.c +++ b/fs/xfs/libxfs/xfs_dir2_sf.c @@ -632,36 +632,49 @@ xfs_dir2_sf_check( /* Verify the consistency of an inline directory. */ int xfs_dir2_sf_verify( - struct xfs_mount *mp, - struct xfs_dir2_sf_hdr *sfp, - int size) + struct xfs_inode *ip) { + struct xfs_mount *mp = ip->i_mount; + struct xfs_dir2_sf_hdr *sfp; struct xfs_dir2_sf_entry *sfep; struct xfs_dir2_sf_entry *next_sfep; char *endp; const struct xfs_dir_ops *dops; + struct xfs_ifork *ifp; xfs_ino_t ino; int i; int i8count; int offset; + int size; + int error; __uint8_t filetype; + ASSERT(ip->i_d.di_format == XFS_DINODE_FMT_LOCAL); + /* + * xfs_iread calls us before xfs_setup_inode sets up ip->d_ops, + * so we can only trust the mountpoint to have the right pointer. + */ dops = xfs_dir_get_ops(mp, NULL); + ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); + sfp = (struct xfs_dir2_sf_hdr *)ifp->if_u1.if_data; + size = ifp->if_bytes; + /* * Give up if the directory is way too short. */ - XFS_WANT_CORRUPTED_RETURN(mp, size > - offsetof(struct xfs_dir2_sf_hdr, parent)); - XFS_WANT_CORRUPTED_RETURN(mp, size >= - xfs_dir2_sf_hdr_size(sfp->i8count)); + if (size <= offsetof(struct xfs_dir2_sf_hdr, parent) || + size < xfs_dir2_sf_hdr_size(sfp->i8count)) + return -EFSCORRUPTED; endp = (char *)sfp + size; /* Check .. entry */ ino = dops->sf_get_parent_ino(sfp); i8count = ino > XFS_DIR2_MAX_SHORT_INUM; - XFS_WANT_CORRUPTED_RETURN(mp, !xfs_dir_ino_validate(mp, ino)); + error = xfs_dir_ino_validate(mp, ino); + if (error) + return error; offset = dops->data_first_offset; /* Check all reported entries */ @@ -672,12 +685,12 @@ xfs_dir2_sf_verify( * Check the fixed-offset parts of the structure are * within the data buffer. */ - XFS_WANT_CORRUPTED_RETURN(mp, - ((char *)sfep + sizeof(*sfep)) < endp); + if (((char *)sfep + sizeof(*sfep)) >= endp) + return -EFSCORRUPTED; /* Don't allow names with known bad length. */ - XFS_WANT_CORRUPTED_RETURN(mp, sfep->namelen > 0); - XFS_WANT_CORRUPTED_RETURN(mp, sfep->namelen < MAXNAMELEN); + if (sfep->namelen == 0) + return -EFSCORRUPTED; /* * Check that the variable-length part of the structure is @@ -685,33 +698,39 @@ xfs_dir2_sf_verify( * name component, so nextentry is an acceptable test. */ next_sfep = dops->sf_nextentry(sfp, sfep); - XFS_WANT_CORRUPTED_RETURN(mp, endp >= (char *)next_sfep); + if (endp < (char *)next_sfep) + return -EFSCORRUPTED; /* Check that the offsets always increase. */ - XFS_WANT_CORRUPTED_RETURN(mp, - xfs_dir2_sf_get_offset(sfep) >= offset); + if (xfs_dir2_sf_get_offset(sfep) < offset) + return -EFSCORRUPTED; /* Check the inode number. */ ino = dops->sf_get_ino(sfp, sfep); i8count += ino > XFS_DIR2_MAX_SHORT_INUM; - XFS_WANT_CORRUPTED_RETURN(mp, !xfs_dir_ino_validate(mp, ino)); + error = xfs_dir_ino_validate(mp, ino); + if (error) + return error; /* Check the file type. */ filetype = dops->sf_get_ftype(sfep); - XFS_WANT_CORRUPTED_RETURN(mp, filetype < XFS_DIR3_FT_MAX); + if (filetype >= XFS_DIR3_FT_MAX) + return -EFSCORRUPTED; offset = xfs_dir2_sf_get_offset(sfep) + dops->data_entsize(sfep->namelen); sfep = next_sfep; } - XFS_WANT_CORRUPTED_RETURN(mp, i8count == sfp->i8count); - XFS_WANT_CORRUPTED_RETURN(mp, (void *)sfep == (void *)endp); + if (i8count != sfp->i8count) + return -EFSCORRUPTED; + if ((void *)sfep != (void *)endp) + return -EFSCORRUPTED; /* Make sure this whole thing ought to be in local format. */ - XFS_WANT_CORRUPTED_RETURN(mp, offset + - (sfp->count + 2) * (uint)sizeof(xfs_dir2_leaf_entry_t) + - (uint)sizeof(xfs_dir2_block_tail_t) <= mp->m_dir_geo->blksize); + if (offset + (sfp->count + 2) * (uint)sizeof(xfs_dir2_leaf_entry_t) + + (uint)sizeof(xfs_dir2_block_tail_t) > mp->m_dir_geo->blksize) + return -EFSCORRUPTED; return 0; } diff --git a/fs/xfs/libxfs/xfs_inode_fork.c b/fs/xfs/libxfs/xfs_inode_fork.c index 9653e964eda4..8a37efe04de3 100644 --- a/fs/xfs/libxfs/xfs_inode_fork.c +++ b/fs/xfs/libxfs/xfs_inode_fork.c @@ -212,6 +212,16 @@ xfs_iformat_fork( if (error) return error; + /* Check inline dir contents. */ + if (S_ISDIR(VFS_I(ip)->i_mode) && + dip->di_format == XFS_DINODE_FMT_LOCAL) { + error = xfs_dir2_sf_verify(ip); + if (error) { + xfs_idestroy_fork(ip, XFS_DATA_FORK); + return error; + } + } + if (xfs_is_reflink_inode(ip)) { ASSERT(ip->i_cowfp == NULL); xfs_ifork_init_cow(ip); @@ -322,8 +332,6 @@ xfs_iformat_local( int whichfork, int size) { - int error; - /* * If the size is unreasonable, then something * is wrong and we just bail out rather than crash in @@ -339,14 +347,6 @@ xfs_iformat_local( return -EFSCORRUPTED; } - if (S_ISDIR(VFS_I(ip)->i_mode) && whichfork == XFS_DATA_FORK) { - error = xfs_dir2_sf_verify(ip->i_mount, - (struct xfs_dir2_sf_hdr *)XFS_DFORK_DPTR(dip), - size); - if (error) - return error; - } - xfs_init_local_fork(ip, whichfork, XFS_DFORK_PTR(dip, whichfork), size); return 0; } @@ -867,7 +867,7 @@ xfs_iextents_copy( * In these cases, the format always takes precedence, because the * format indicates the current state of the fork. */ -int +void xfs_iflush_fork( xfs_inode_t *ip, xfs_dinode_t *dip, @@ -877,7 +877,6 @@ xfs_iflush_fork( char *cp; xfs_ifork_t *ifp; xfs_mount_t *mp; - int error; static const short brootflag[2] = { XFS_ILOG_DBROOT, XFS_ILOG_ABROOT }; static const short dataflag[2] = @@ -886,7 +885,7 @@ xfs_iflush_fork( { XFS_ILOG_DEXT, XFS_ILOG_AEXT }; if (!iip) - return 0; + return; ifp = XFS_IFORK_PTR(ip, whichfork); /* * This can happen if we gave up in iformat in an error path, @@ -894,19 +893,12 @@ xfs_iflush_fork( */ if (!ifp) { ASSERT(whichfork == XFS_ATTR_FORK); - return 0; + return; } cp = XFS_DFORK_PTR(dip, whichfork); mp = ip->i_mount; switch (XFS_IFORK_FORMAT(ip, whichfork)) { case XFS_DINODE_FMT_LOCAL: - if (S_ISDIR(VFS_I(ip)->i_mode) && whichfork == XFS_DATA_FORK) { - error = xfs_dir2_sf_verify(mp, - (struct xfs_dir2_sf_hdr *)ifp->if_u1.if_data, - ifp->if_bytes); - if (error) - return error; - } if ((iip->ili_fields & dataflag[whichfork]) && (ifp->if_bytes > 0)) { ASSERT(ifp->if_u1.if_data != NULL); @@ -959,7 +951,6 @@ xfs_iflush_fork( ASSERT(0); break; } - return 0; } /* diff --git a/fs/xfs/libxfs/xfs_inode_fork.h b/fs/xfs/libxfs/xfs_inode_fork.h index 132dc59fdde6..7fb8365326d1 100644 --- a/fs/xfs/libxfs/xfs_inode_fork.h +++ b/fs/xfs/libxfs/xfs_inode_fork.h @@ -140,7 +140,7 @@ typedef struct xfs_ifork { struct xfs_ifork *xfs_iext_state_to_fork(struct xfs_inode *ip, int state); int xfs_iformat_fork(struct xfs_inode *, struct xfs_dinode *); -int xfs_iflush_fork(struct xfs_inode *, struct xfs_dinode *, +void xfs_iflush_fork(struct xfs_inode *, struct xfs_dinode *, struct xfs_inode_log_item *, int); void xfs_idestroy_fork(struct xfs_inode *, int); void xfs_idata_realloc(struct xfs_inode *, int, int); diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c index 8b75dcea5966..828532ce0adc 100644 --- a/fs/xfs/xfs_bmap_util.c +++ b/fs/xfs/xfs_bmap_util.c @@ -1311,8 +1311,16 @@ xfs_free_file_space( /* * Now that we've unmap all full blocks we'll have to zero out any * partial block at the beginning and/or end. xfs_zero_range is - * smart enough to skip any holes, including those we just created. + * smart enough to skip any holes, including those we just created, + * but we must take care not to zero beyond EOF and enlarge i_size. */ + + if (offset >= XFS_ISIZE(ip)) + return 0; + + if (offset + len > XFS_ISIZE(ip)) + len = XFS_ISIZE(ip) - offset; + return xfs_zero_range(ip, offset, len, NULL); } diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index c7fe2c2123ab..7605d8396596 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -50,6 +50,7 @@ #include "xfs_log.h" #include "xfs_bmap_btree.h" #include "xfs_reflink.h" +#include "xfs_dir2_priv.h" kmem_zone_t *xfs_inode_zone; @@ -3475,7 +3476,6 @@ xfs_iflush_int( struct xfs_inode_log_item *iip = ip->i_itemp; struct xfs_dinode *dip; struct xfs_mount *mp = ip->i_mount; - int error; ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); ASSERT(xfs_isiflocked(ip)); @@ -3547,6 +3547,12 @@ xfs_iflush_int( if (ip->i_d.di_version < 3) ip->i_d.di_flushiter++; + /* Check the inline directory data. */ + if (S_ISDIR(VFS_I(ip)->i_mode) && + ip->i_d.di_format == XFS_DINODE_FMT_LOCAL && + xfs_dir2_sf_verify(ip)) + goto corrupt_out; + /* * Copy the dirty parts of the inode into the on-disk inode. We always * copy out the core of the inode, because if the inode is dirty at all @@ -3558,14 +3564,9 @@ xfs_iflush_int( if (ip->i_d.di_flushiter == DI_MAX_FLUSH) ip->i_d.di_flushiter = 0; - error = xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK); - if (error) - return error; - if (XFS_IFORK_Q(ip)) { - error = xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK); - if (error) - return error; - } + xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK); + if (XFS_IFORK_Q(ip)) + xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK); xfs_inobp_check(mp, bp); /* diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c index 229cc6a6d8ef..ebfc13350f9a 100644 --- a/fs/xfs/xfs_iops.c +++ b/fs/xfs/xfs_iops.c @@ -516,6 +516,20 @@ xfs_vn_getattr( stat->blocks = XFS_FSB_TO_BB(mp, ip->i_d.di_nblocks + ip->i_delayed_blks); + if (ip->i_d.di_version == 3) { + if (request_mask & STATX_BTIME) { + stat->result_mask |= STATX_BTIME; + stat->btime.tv_sec = ip->i_d.di_crtime.t_sec; + stat->btime.tv_nsec = ip->i_d.di_crtime.t_nsec; + } + } + + if (ip->i_d.di_flags & XFS_DIFLAG_IMMUTABLE) + stat->attributes |= STATX_ATTR_IMMUTABLE; + if (ip->i_d.di_flags & XFS_DIFLAG_APPEND) + stat->attributes |= STATX_ATTR_APPEND; + if (ip->i_d.di_flags & XFS_DIFLAG_NODUMP) + stat->attributes |= STATX_ATTR_NODUMP; switch (inode->i_mode & S_IFMT) { case S_IFBLK: diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c index 2a6d9b1558e0..26d67ce3c18d 100644 --- a/fs/xfs/xfs_itable.c +++ b/fs/xfs/xfs_itable.c @@ -583,7 +583,7 @@ xfs_inumbers( return error; bcount = MIN(left, (int)(PAGE_SIZE / sizeof(*buffer))); - buffer = kmem_alloc(bcount * sizeof(*buffer), KM_SLEEP); + buffer = kmem_zalloc(bcount * sizeof(*buffer), KM_SLEEP); do { struct xfs_inobt_rec_incore r; int stat; diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 7cdfe167074f..143db9c523e2 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -261,9 +261,9 @@ */ #ifndef RO_AFTER_INIT_DATA #define RO_AFTER_INIT_DATA \ - __start_ro_after_init = .; \ + VMLINUX_SYMBOL(__start_ro_after_init) = .; \ *(.data..ro_after_init) \ - __end_ro_after_init = .; + VMLINUX_SYMBOL(__end_ro_after_init) = .; #endif /* diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index b72dd2ad5f44..c0b3d999c266 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h @@ -295,6 +295,7 @@ void kvm_vgic_vcpu_early_init(struct kvm_vcpu *vcpu); void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu); int kvm_vgic_map_resources(struct kvm *kvm); int kvm_vgic_hyp_init(void); +void kvm_vgic_init_cpu_hardware(void); int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid, bool level); diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index b296a9006117..9382c5da7a2e 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -51,6 +51,7 @@ struct blk_mq_hw_ctx { atomic_t nr_active; + struct delayed_work delayed_run_work; struct delayed_work delay_work; struct hlist_node cpuhp_dead; @@ -238,6 +239,7 @@ void blk_mq_stop_hw_queues(struct request_queue *q); void blk_mq_start_hw_queues(struct request_queue *q); void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async); +void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); void blk_mq_run_hw_queues(struct request_queue *q, bool async); void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset, diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 5a7da607ca04..7548f332121a 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -610,7 +610,6 @@ struct request_queue { #define QUEUE_FLAG_FLUSH_NQ 25 /* flush not queueuable */ #define QUEUE_FLAG_DAX 26 /* device supports DAX */ #define QUEUE_FLAG_STATS 27 /* track rq completion times */ -#define QUEUE_FLAG_RESTART 28 /* queue needs restart at completion */ #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ (1 << QUEUE_FLAG_STACKABLE) | \ diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index f6b43fbb141c..af9c86e958bd 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -570,6 +570,25 @@ static inline void pr_cont_cgroup_path(struct cgroup *cgrp) pr_cont_kernfs_path(cgrp->kn); } +static inline void cgroup_init_kthreadd(void) +{ + /* + * kthreadd is inherited by all kthreads, keep it in the root so + * that the new kthreads are guaranteed to stay in the root until + * initialization is finished. + */ + current->no_cgroup_migration = 1; +} + +static inline void cgroup_kthread_ready(void) +{ + /* + * This kthread finished initialization. The creator should have + * set PF_NO_SETAFFINITY if this kthread should stay in the root. + */ + current->no_cgroup_migration = 0; +} + #else /* !CONFIG_CGROUPS */ struct cgroup_subsys_state; @@ -590,6 +609,8 @@ static inline void cgroup_free(struct task_struct *p) {} static inline int cgroup_init_early(void) { return 0; } static inline int cgroup_init(void) { return 0; } +static inline void cgroup_init_kthreadd(void) {} +static inline void cgroup_kthread_ready(void) {} static inline bool task_under_cgroup_hierarchy(struct task_struct *task, struct cgroup *ancestor) diff --git a/include/linux/elevator.h b/include/linux/elevator.h index aebecc4ed088..22d39e8d4de1 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h @@ -211,7 +211,7 @@ extern ssize_t elv_iosched_show(struct request_queue *, char *); extern ssize_t elv_iosched_store(struct request_queue *, const char *, size_t); extern int elevator_init(struct request_queue *, char *); -extern void elevator_exit(struct elevator_queue *); +extern void elevator_exit(struct request_queue *, struct elevator_queue *); extern int elevator_change(struct request_queue *, const char *); extern bool elv_bio_merge_ok(struct request *, struct bio *); extern struct elevator_queue *elevator_alloc(struct request_queue *, diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h index eafc965b3eb8..dc30f3d057eb 100644 --- a/include/linux/irqchip/arm-gic.h +++ b/include/linux/irqchip/arm-gic.h @@ -96,6 +96,9 @@ #define GICH_MISR_EOI (1 << 0) #define GICH_MISR_U (1 << 1) +#define GICV_PMR_PRIORITY_SHIFT 3 +#define GICV_PMR_PRIORITY_MASK (0x1f << GICV_PMR_PRIORITY_SHIFT) + #ifndef __ASSEMBLY__ #include <linux/irqdomain.h> diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h index 51891fb0d3ce..c91b3bcd158f 100644 --- a/include/linux/mmu_notifier.h +++ b/include/linux/mmu_notifier.h @@ -394,18 +394,6 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm) ___pud; \ }) -#define pmdp_huge_get_and_clear_notify(__mm, __haddr, __pmd) \ -({ \ - unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \ - pmd_t ___pmd; \ - \ - ___pmd = pmdp_huge_get_and_clear(__mm, __haddr, __pmd); \ - mmu_notifier_invalidate_range(__mm, ___haddr, \ - ___haddr + HPAGE_PMD_SIZE); \ - \ - ___pmd; \ -}) - /* * set_pte_at_notify() sets the pte _after_ running the notifier. * This is safe to start by updating the secondary MMUs, because the primary MMU @@ -489,7 +477,6 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm) #define ptep_clear_flush_notify ptep_clear_flush #define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush #define pudp_huge_clear_flush_notify pudp_huge_clear_flush -#define pmdp_huge_get_and_clear_notify pmdp_huge_get_and_clear #define set_pte_at_notify set_pte_at #endif /* CONFIG_MMU_NOTIFIER */ diff --git a/include/linux/nvme.h b/include/linux/nvme.h index c43d435d4225..9061780b141f 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -64,26 +64,26 @@ enum { * RDMA_QPTYPE field */ enum { - NVMF_RDMA_QPTYPE_CONNECTED = 0, /* Reliable Connected */ - NVMF_RDMA_QPTYPE_DATAGRAM = 1, /* Reliable Datagram */ + NVMF_RDMA_QPTYPE_CONNECTED = 1, /* Reliable Connected */ + NVMF_RDMA_QPTYPE_DATAGRAM = 2, /* Reliable Datagram */ }; /* RDMA QP Service Type codes for Discovery Log Page entry TSAS * RDMA_QPTYPE field */ enum { - NVMF_RDMA_PRTYPE_NOT_SPECIFIED = 0, /* No Provider Specified */ - NVMF_RDMA_PRTYPE_IB = 1, /* InfiniBand */ - NVMF_RDMA_PRTYPE_ROCE = 2, /* InfiniBand RoCE */ - NVMF_RDMA_PRTYPE_ROCEV2 = 3, /* InfiniBand RoCEV2 */ - NVMF_RDMA_PRTYPE_IWARP = 4, /* IWARP */ + NVMF_RDMA_PRTYPE_NOT_SPECIFIED = 1, /* No Provider Specified */ + NVMF_RDMA_PRTYPE_IB = 2, /* InfiniBand */ + NVMF_RDMA_PRTYPE_ROCE = 3, /* InfiniBand RoCE */ + NVMF_RDMA_PRTYPE_ROCEV2 = 4, /* InfiniBand RoCEV2 */ + NVMF_RDMA_PRTYPE_IWARP = 5, /* IWARP */ }; /* RDMA Connection Management Service Type codes for Discovery Log Page * entry TSAS RDMA_CMS field */ enum { - NVMF_RDMA_CMS_RDMA_CM = 0, /* Sockets based enpoint addressing */ + NVMF_RDMA_CMS_RDMA_CM = 1, /* Sockets based endpoint addressing */ }; #define NVMF_AQ_DEPTH 32 diff --git a/include/linux/pinctrl/pinctrl.h b/include/linux/pinctrl/pinctrl.h index 8ce2d87a238b..5e45385c5bdc 100644 --- a/include/linux/pinctrl/pinctrl.h +++ b/include/linux/pinctrl/pinctrl.h @@ -145,8 +145,9 @@ struct pinctrl_desc { extern int pinctrl_register_and_init(struct pinctrl_desc *pctldesc, struct device *dev, void *driver_data, struct pinctrl_dev **pctldev); +extern int pinctrl_enable(struct pinctrl_dev *pctldev); -/* Please use pinctrl_register_and_init() instead */ +/* Please use pinctrl_register_and_init() and pinctrl_enable() instead */ extern struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc, struct device *dev, void *driver_data); diff --git a/include/linux/sched.h b/include/linux/sched.h index d67eee84fd43..4cf9a59a4d08 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -604,6 +604,10 @@ struct task_struct { #ifdef CONFIG_COMPAT_BRK unsigned brk_randomized:1; #endif +#ifdef CONFIG_CGROUPS + /* disallow userland-initiated cgroup migration */ + unsigned no_cgroup_migration:1; +#endif unsigned long atomic_flags; /* Flags requiring atomic access. */ diff --git a/include/linux/stat.h b/include/linux/stat.h index c76e524fb34b..64b6b3aece21 100644 --- a/include/linux/stat.h +++ b/include/linux/stat.h @@ -26,6 +26,7 @@ struct kstat { unsigned int nlink; uint32_t blksize; /* Preferred I/O size */ u64 attributes; + u64 attributes_mask; #define KSTAT_ATTR_FS_IOC_FLAGS \ (STATX_ATTR_COMPRESSED | \ STATX_ATTR_IMMUTABLE | \ diff --git a/include/linux/uio.h b/include/linux/uio.h index 804e34c6f981..f2d36a3d3005 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h @@ -39,7 +39,10 @@ struct iov_iter { }; union { unsigned long nr_segs; - int idx; + struct { + int idx; + int start_idx; + }; }; }; @@ -81,6 +84,7 @@ unsigned long iov_shorten(struct iovec *iov, unsigned long nr_segs, size_t to); size_t iov_iter_copy_from_user_atomic(struct page *page, struct iov_iter *i, unsigned long offset, size_t bytes); void iov_iter_advance(struct iov_iter *i, size_t bytes); +void iov_iter_revert(struct iov_iter *i, size_t bytes); int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes); size_t iov_iter_single_seg_count(const struct iov_iter *i); size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, diff --git a/include/linux/virtio.h b/include/linux/virtio.h index 04b0d3f95043..7edfbdb55a99 100644 --- a/include/linux/virtio.h +++ b/include/linux/virtio.h @@ -167,6 +167,7 @@ struct virtio_driver { unsigned int feature_table_size; const unsigned int *feature_table_legacy; unsigned int feature_table_size_legacy; + int (*validate)(struct virtio_device *dev); int (*probe)(struct virtio_device *dev); void (*scan)(struct virtio_device *dev); void (*remove)(struct virtio_device *dev); diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 4b784b6e21c0..ccfad0e9c2cd 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -117,6 +117,7 @@ enum transport_state_table { TRANSPORT_ISTATE_PROCESSING = 11, TRANSPORT_COMPLETE_QF_WP = 18, TRANSPORT_COMPLETE_QF_OK = 19, + TRANSPORT_COMPLETE_QF_ERR = 20, }; /* Used for struct se_cmd->se_cmd_flags */ @@ -279,8 +280,6 @@ struct t10_alua_tg_pt_gp { u16 tg_pt_gp_id; int tg_pt_gp_valid_id; int tg_pt_gp_alua_supported_states; - int tg_pt_gp_alua_pending_state; - int tg_pt_gp_alua_previous_state; int tg_pt_gp_alua_access_status; int tg_pt_gp_alua_access_type; int tg_pt_gp_nonop_delay_msecs; @@ -289,18 +288,16 @@ struct t10_alua_tg_pt_gp { int tg_pt_gp_pref; int tg_pt_gp_write_metadata; u32 tg_pt_gp_members; - atomic_t tg_pt_gp_alua_access_state; + int tg_pt_gp_alua_access_state; atomic_t tg_pt_gp_ref_cnt; spinlock_t tg_pt_gp_lock; - struct mutex tg_pt_gp_md_mutex; + struct mutex tg_pt_gp_transition_mutex; struct se_device *tg_pt_gp_dev; struct config_group tg_pt_gp_group; struct list_head tg_pt_gp_list; struct list_head tg_pt_gp_lun_list; struct se_lun *tg_pt_gp_alua_lun; struct se_node_acl *tg_pt_gp_alua_nacl; - struct work_struct tg_pt_gp_transition_work; - struct completion *tg_pt_gp_transition_complete; }; struct t10_vpd { @@ -705,6 +702,7 @@ struct se_lun { u64 unpacked_lun; #define SE_LUN_LINK_MAGIC 0xffff7771 u32 lun_link_magic; + bool lun_shutdown; bool lun_access_ro; u32 lun_index; diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild index dd9820b1c779..f8d9fed17ba9 100644 --- a/include/uapi/linux/Kbuild +++ b/include/uapi/linux/Kbuild @@ -445,6 +445,7 @@ header-y += unistd.h header-y += unix_diag.h header-y += usbdevice_fs.h header-y += usbip.h +header-y += userio.h header-y += utime.h header-y += utsname.h header-y += uuid.h diff --git a/include/uapi/linux/stat.h b/include/uapi/linux/stat.h index 51a6b86e3700..d538897b8e08 100644 --- a/include/uapi/linux/stat.h +++ b/include/uapi/linux/stat.h @@ -114,7 +114,7 @@ struct statx { __u64 stx_ino; /* Inode number */ __u64 stx_size; /* File size */ __u64 stx_blocks; /* Number of 512-byte blocks allocated */ - __u64 __spare1[1]; + __u64 stx_attributes_mask; /* Mask to show what's supported in stx_attributes */ /* 0x40 */ struct statx_timestamp stx_atime; /* Last access time */ struct statx_timestamp stx_btime; /* File creation time */ @@ -152,9 +152,10 @@ struct statx { #define STATX_BASIC_STATS 0x000007ffU /* The stuff in the normal stat struct */ #define STATX_BTIME 0x00000800U /* Want/got stx_btime */ #define STATX_ALL 0x00000fffU /* All currently supported flags */ +#define STATX__RESERVED 0x80000000U /* Reserved for future struct statx expansion */ /* - * Attributes to be found in stx_attributes + * Attributes to be found in stx_attributes and masked in stx_attributes_mask. * * These give information about the features or the state of a file that might * be of use to ordinary userspace programs such as GUIs or ls rather than diff --git a/include/uapi/linux/virtio_pci.h b/include/uapi/linux/virtio_pci.h index 15b4385a2be1..90007a1abcab 100644 --- a/include/uapi/linux/virtio_pci.h +++ b/include/uapi/linux/virtio_pci.h @@ -79,7 +79,7 @@ * configuration space */ #define VIRTIO_PCI_CONFIG_OFF(msix_enabled) ((msix_enabled) ? 24 : 20) /* Deprecated: please use VIRTIO_PCI_CONFIG_OFF instead */ -#define VIRTIO_PCI_CONFIG(dev) VIRTIO_PCI_CONFIG_OFF((dev)->pci_dev->msix_enabled) +#define VIRTIO_PCI_CONFIG(dev) VIRTIO_PCI_CONFIG_OFF((dev)->msix_enabled) /* Virtio ABI version, this must match exactly */ #define VIRTIO_PCI_ABI_VERSION 0 diff --git a/kernel/audit.c b/kernel/audit.c index d54bf5932374..dc202d582aa1 100644 --- a/kernel/audit.c +++ b/kernel/audit.c @@ -160,7 +160,6 @@ static LIST_HEAD(audit_freelist); /* queue msgs to send via kauditd_task */ static struct sk_buff_head audit_queue; -static void kauditd_hold_skb(struct sk_buff *skb); /* queue msgs due to temporary unicast send problems */ static struct sk_buff_head audit_retry_queue; /* queue msgs waiting for new auditd connection */ @@ -454,30 +453,6 @@ static void auditd_set(int pid, u32 portid, struct net *net) } /** - * auditd_reset - Disconnect the auditd connection - * - * Description: - * Break the auditd/kauditd connection and move all the queued records into the - * hold queue in case auditd reconnects. - */ -static void auditd_reset(void) -{ - struct sk_buff *skb; - - /* if it isn't already broken, break the connection */ - rcu_read_lock(); - if (auditd_conn.pid) - auditd_set(0, 0, NULL); - rcu_read_unlock(); - - /* flush all of the main and retry queues to the hold queue */ - while ((skb = skb_dequeue(&audit_retry_queue))) - kauditd_hold_skb(skb); - while ((skb = skb_dequeue(&audit_queue))) - kauditd_hold_skb(skb); -} - -/** * kauditd_print_skb - Print the audit record to the ring buffer * @skb: audit record * @@ -505,9 +480,6 @@ static void kauditd_rehold_skb(struct sk_buff *skb) { /* put the record back in the queue at the same place */ skb_queue_head(&audit_hold_queue, skb); - - /* fail the auditd connection */ - auditd_reset(); } /** @@ -544,9 +516,6 @@ static void kauditd_hold_skb(struct sk_buff *skb) /* we have no other options - drop the message */ audit_log_lost("kauditd hold queue overflow"); kfree_skb(skb); - - /* fail the auditd connection */ - auditd_reset(); } /** @@ -567,6 +536,30 @@ static void kauditd_retry_skb(struct sk_buff *skb) } /** + * auditd_reset - Disconnect the auditd connection + * + * Description: + * Break the auditd/kauditd connection and move all the queued records into the + * hold queue in case auditd reconnects. + */ +static void auditd_reset(void) +{ + struct sk_buff *skb; + + /* if it isn't already broken, break the connection */ + rcu_read_lock(); + if (auditd_conn.pid) + auditd_set(0, 0, NULL); + rcu_read_unlock(); + + /* flush all of the main and retry queues to the hold queue */ + while ((skb = skb_dequeue(&audit_retry_queue))) + kauditd_hold_skb(skb); + while ((skb = skb_dequeue(&audit_queue))) + kauditd_hold_skb(skb); +} + +/** * auditd_send_unicast_skb - Send a record via unicast to auditd * @skb: audit record * @@ -758,6 +751,7 @@ static int kauditd_thread(void *dummy) NULL, kauditd_rehold_skb); if (rc < 0) { sk = NULL; + auditd_reset(); goto main_queue; } @@ -767,6 +761,7 @@ static int kauditd_thread(void *dummy) NULL, kauditd_hold_skb); if (rc < 0) { sk = NULL; + auditd_reset(); goto main_queue; } @@ -775,16 +770,18 @@ main_queue: * unicast, dump failed record sends to the retry queue; if * sk == NULL due to previous failures we will just do the * multicast send and move the record to the retry queue */ - kauditd_send_queue(sk, portid, &audit_queue, 1, - kauditd_send_multicast_skb, - kauditd_retry_skb); + rc = kauditd_send_queue(sk, portid, &audit_queue, 1, + kauditd_send_multicast_skb, + kauditd_retry_skb); + if (sk == NULL || rc < 0) + auditd_reset(); + sk = NULL; /* drop our netns reference, no auditd sends past this line */ if (net) { put_net(net); net = NULL; } - sk = NULL; /* we have processed all the queues so wake everyone */ wake_up(&audit_backlog_wait); diff --git a/kernel/audit.h b/kernel/audit.h index 0f1cf6d1878a..0d87f8ab8778 100644 --- a/kernel/audit.h +++ b/kernel/audit.h @@ -333,13 +333,7 @@ extern u32 audit_sig_sid; extern int audit_filter(int msgtype, unsigned int listtype); #ifdef CONFIG_AUDITSYSCALL -extern int __audit_signal_info(int sig, struct task_struct *t); -static inline int audit_signal_info(int sig, struct task_struct *t) -{ - if (auditd_test_task(t) || (audit_signals && !audit_dummy_context())) - return __audit_signal_info(sig, t); - return 0; -} +extern int audit_signal_info(int sig, struct task_struct *t); extern void audit_filter_inodes(struct task_struct *, struct audit_context *); extern struct list_head *audit_killed_trees(void); #else diff --git a/kernel/auditsc.c b/kernel/auditsc.c index e59ffc7fc522..1c2333155893 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c @@ -2249,26 +2249,27 @@ void __audit_ptrace(struct task_struct *t) * If the audit subsystem is being terminated, record the task (pid) * and uid that is doing that. */ -int __audit_signal_info(int sig, struct task_struct *t) +int audit_signal_info(int sig, struct task_struct *t) { struct audit_aux_data_pids *axp; struct task_struct *tsk = current; struct audit_context *ctx = tsk->audit_context; kuid_t uid = current_uid(), t_uid = task_uid(t); - if (auditd_test_task(t)) { - if (sig == SIGTERM || sig == SIGHUP || sig == SIGUSR1 || sig == SIGUSR2) { - audit_sig_pid = task_tgid_nr(tsk); - if (uid_valid(tsk->loginuid)) - audit_sig_uid = tsk->loginuid; - else - audit_sig_uid = uid; - security_task_getsecid(tsk, &audit_sig_sid); - } - if (!audit_signals || audit_dummy_context()) - return 0; + if (auditd_test_task(t) && + (sig == SIGTERM || sig == SIGHUP || + sig == SIGUSR1 || sig == SIGUSR2)) { + audit_sig_pid = task_tgid_nr(tsk); + if (uid_valid(tsk->loginuid)) + audit_sig_uid = tsk->loginuid; + else + audit_sig_uid = uid; + security_task_getsecid(tsk, &audit_sig_sid); } + if (!audit_signals || audit_dummy_context()) + return 0; + /* optimize the common case by putting first signal recipient directly * in audit_context */ if (!ctx->target_pid) { diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index f45827e205d3..b4f1cb0c5ac7 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -1162,12 +1162,12 @@ out: LD_ABS_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + imm32)) */ off = IMM; load_word: - /* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are - * only appearing in the programs where ctx == - * skb. All programs keep 'ctx' in regs[BPF_REG_CTX] - * == BPF_R6, bpf_convert_filter() saves it in BPF_R6, - * internal BPF verifier will check that BPF_R6 == - * ctx. + /* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are only + * appearing in the programs where ctx == skb + * (see may_access_skb() in the verifier). All programs + * keep 'ctx' in regs[BPF_REG_CTX] == BPF_R6, + * bpf_convert_filter() saves it in BPF_R6, internal BPF + * verifier will check that BPF_R6 == ctx. * * BPF_ABS and BPF_IND are wrappers of function calls, * so they scratch BPF_R1-BPF_R5 registers, preserve diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c index 1dc22f6b49f5..12e19f0636ea 100644 --- a/kernel/cgroup/cgroup-v1.c +++ b/kernel/cgroup/cgroup-v1.c @@ -1146,7 +1146,7 @@ struct dentry *cgroup1_mount(struct file_system_type *fs_type, int flags, * path is super cold. Let's just sleep a bit and retry. */ pinned_sb = kernfs_pin_sb(root->kf_root, NULL); - if (IS_ERR(pinned_sb) || + if (IS_ERR_OR_NULL(pinned_sb) || !percpu_ref_tryget_live(&root->cgrp.self.refcnt)) { mutex_unlock(&cgroup_mutex); if (!IS_ERR_OR_NULL(pinned_sb)) diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index 48851327a15e..687f5e0194ef 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -2425,11 +2425,12 @@ ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf, tsk = tsk->group_leader; /* - * Workqueue threads may acquire PF_NO_SETAFFINITY and become - * trapped in a cpuset, or RT worker may be born in a cgroup - * with no rt_runtime allocated. Just say no. + * kthreads may acquire PF_NO_SETAFFINITY during initialization. + * If userland migrates such a kthread to a non-root cgroup, it can + * become trapped in a cpuset, or RT kthread may be born in a + * cgroup with no rt_runtime allocated. Just say no. */ - if (tsk == kthreadd_task || (tsk->flags & PF_NO_SETAFFINITY)) { + if (tsk->no_cgroup_migration || (tsk->flags & PF_NO_SETAFFINITY)) { ret = -EINVAL; goto out_unlock_rcu; } diff --git a/kernel/irq/affinity.c b/kernel/irq/affinity.c index 4544b115f5eb..d052947fe785 100644 --- a/kernel/irq/affinity.c +++ b/kernel/irq/affinity.c @@ -59,7 +59,7 @@ static int get_nodes_in_cpumask(const struct cpumask *mask, nodemask_t *nodemsk) struct cpumask * irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd) { - int n, nodes, vecs_per_node, cpus_per_vec, extra_vecs, curvec; + int n, nodes, cpus_per_vec, extra_vecs, curvec; int affv = nvecs - affd->pre_vectors - affd->post_vectors; int last_affv = affv + affd->pre_vectors; nodemask_t nodemsk = NODE_MASK_NONE; @@ -94,19 +94,21 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd) goto done; } - /* Spread the vectors per node */ - vecs_per_node = affv / nodes; - /* Account for rounding errors */ - extra_vecs = affv - (nodes * vecs_per_node); - for_each_node_mask(n, nodemsk) { - int ncpus, v, vecs_to_assign = vecs_per_node; + int ncpus, v, vecs_to_assign, vecs_per_node; + + /* Spread the vectors per node */ + vecs_per_node = (affv - curvec) / nodes; /* Get the cpus on this node which are in the mask */ cpumask_and(nmsk, cpu_online_mask, cpumask_of_node(n)); /* Calculate the number of cpus per vector */ ncpus = cpumask_weight(nmsk); + vecs_to_assign = min(vecs_per_node, ncpus); + + /* Account for rounding errors */ + extra_vecs = ncpus - vecs_to_assign * (ncpus / vecs_to_assign); for (v = 0; curvec < last_affv && v < vecs_to_assign; curvec++, v++) { @@ -115,14 +117,14 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd) /* Account for extra vectors to compensate rounding errors */ if (extra_vecs) { cpus_per_vec++; - if (!--extra_vecs) - vecs_per_node++; + --extra_vecs; } irq_spread_init_one(masks + curvec, nmsk, cpus_per_vec); } if (curvec >= last_affv) break; + --nodes; } done: diff --git a/kernel/kthread.c b/kernel/kthread.c index 2f26adea0f84..26db528c1d88 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -20,6 +20,7 @@ #include <linux/freezer.h> #include <linux/ptrace.h> #include <linux/uaccess.h> +#include <linux/cgroup.h> #include <trace/events/sched.h> static DEFINE_SPINLOCK(kthread_create_lock); @@ -225,6 +226,7 @@ static int kthread(void *_create) ret = -EINTR; if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) { + cgroup_kthread_ready(); __kthread_parkme(self); ret = threadfn(data); } @@ -538,6 +540,7 @@ int kthreadd(void *unused) set_mems_allowed(node_states[N_MEMORY]); current->flags |= PF_NOFREEZE; + cgroup_init_kthreadd(); for (;;) { set_current_state(TASK_INTERRUPTIBLE); diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 0af928712174..266ddcc1d8bb 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -184,11 +184,17 @@ static void ptrace_unfreeze_traced(struct task_struct *task) WARN_ON(!task->ptrace || task->parent != current); + /* + * PTRACE_LISTEN can allow ptrace_trap_notify to wake us up remotely. + * Recheck state under the lock to close this race. + */ spin_lock_irq(&task->sighand->siglock); - if (__fatal_signal_pending(task)) - wake_up_state(task, __TASK_TRACED); - else - task->state = TASK_TRACED; + if (task->state == __TASK_TRACED) { + if (__fatal_signal_pending(task)) + wake_up_state(task, __TASK_TRACED); + else + task->state = TASK_TRACED; + } spin_unlock_irq(&task->sighand->siglock); } diff --git a/kernel/sysctl.c b/kernel/sysctl.c index acf0a5a06da7..8c8714fcb53c 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -2133,9 +2133,12 @@ static int do_proc_douintvec_conv(bool *negp, unsigned long *lvalp, if (write) { if (*negp) return -EINVAL; + if (*lvalp > UINT_MAX) + return -EINVAL; *valp = *lvalp; } else { unsigned int val = *valp; + *negp = false; *lvalp = (unsigned long)val; } return 0; diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 96fc3c043ad6..54e7a90db848 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -4826,9 +4826,9 @@ static __init int test_ringbuffer(void) rb_data[cpu].cnt = cpu; rb_threads[cpu] = kthread_create(rb_test, &rb_data[cpu], "rbtester/%d", cpu); - if (WARN_ON(!rb_threads[cpu])) { + if (WARN_ON(IS_ERR(rb_threads[cpu]))) { pr_cont("FAILED\n"); - ret = -1; + ret = PTR_ERR(rb_threads[cpu]); goto out_free; } @@ -4838,9 +4838,9 @@ static __init int test_ringbuffer(void) /* Now create the rb hammer! */ rb_hammer = kthread_run(rb_hammer_test, NULL, "rbhammer"); - if (WARN_ON(!rb_hammer)) { + if (WARN_ON(IS_ERR(rb_hammer))) { pr_cont("FAILED\n"); - ret = -1; + ret = PTR_ERR(rb_hammer); goto out_free; } diff --git a/lib/iov_iter.c b/lib/iov_iter.c index e68604ae3ced..60abc44385b7 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -786,6 +786,68 @@ void iov_iter_advance(struct iov_iter *i, size_t size) } EXPORT_SYMBOL(iov_iter_advance); +void iov_iter_revert(struct iov_iter *i, size_t unroll) +{ + if (!unroll) + return; + i->count += unroll; + if (unlikely(i->type & ITER_PIPE)) { + struct pipe_inode_info *pipe = i->pipe; + int idx = i->idx; + size_t off = i->iov_offset; + while (1) { + size_t n = off - pipe->bufs[idx].offset; + if (unroll < n) { + off -= (n - unroll); + break; + } + unroll -= n; + if (!unroll && idx == i->start_idx) { + off = 0; + break; + } + if (!idx--) + idx = pipe->buffers - 1; + off = pipe->bufs[idx].offset + pipe->bufs[idx].len; + } + i->iov_offset = off; + i->idx = idx; + pipe_truncate(i); + return; + } + if (unroll <= i->iov_offset) { + i->iov_offset -= unroll; + return; + } + unroll -= i->iov_offset; + if (i->type & ITER_BVEC) { + const struct bio_vec *bvec = i->bvec; + while (1) { + size_t n = (--bvec)->bv_len; + i->nr_segs++; + if (unroll <= n) { + i->bvec = bvec; + i->iov_offset = n - unroll; + return; + } + unroll -= n; + } + } else { /* same logics for iovec and kvec */ + const struct iovec *iov = i->iov; + while (1) { + size_t n = (--iov)->iov_len; + i->nr_segs++; + if (unroll <= n) { + i->iov = iov; + i->iov_offset = n - unroll; + return; + } + unroll -= n; + } + } +} +EXPORT_SYMBOL(iov_iter_revert); + /* * Return the count of just the current iov_iter segment. */ @@ -839,6 +901,7 @@ void iov_iter_pipe(struct iov_iter *i, int direction, i->idx = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1); i->iov_offset = 0; i->count = count; + i->start_idx = i->idx; } EXPORT_SYMBOL(iov_iter_pipe); diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 1ebc93e179f3..f3c4f9d22821 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -240,18 +240,18 @@ static ssize_t defrag_store(struct kobject *kobj, clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); - } else if (!memcmp("defer", buf, - min(sizeof("defer")-1, count))) { - clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); - clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); - clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); - set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); } else if (!memcmp("defer+madvise", buf, min(sizeof("defer+madvise")-1, count))) { clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); + } else if (!memcmp("defer", buf, + min(sizeof("defer")-1, count))) { + clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); + clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); + clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); + set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); } else if (!memcmp("madvise", buf, min(sizeof("madvise")-1, count))) { clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); @@ -1568,8 +1568,7 @@ bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, deactivate_page(page); if (pmd_young(orig_pmd) || pmd_dirty(orig_pmd)) { - orig_pmd = pmdp_huge_get_and_clear_full(tlb->mm, addr, pmd, - tlb->fullmm); + pmdp_invalidate(vma, addr, pmd); orig_pmd = pmd_mkold(orig_pmd); orig_pmd = pmd_mkclean(orig_pmd); @@ -1724,37 +1723,69 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, { struct mm_struct *mm = vma->vm_mm; spinlock_t *ptl; - int ret = 0; + pmd_t entry; + bool preserve_write; + int ret; ptl = __pmd_trans_huge_lock(pmd, vma); - if (ptl) { - pmd_t entry; - bool preserve_write = prot_numa && pmd_write(*pmd); - ret = 1; + if (!ptl) + return 0; - /* - * Avoid trapping faults against the zero page. The read-only - * data is likely to be read-cached on the local CPU and - * local/remote hits to the zero page are not interesting. - */ - if (prot_numa && is_huge_zero_pmd(*pmd)) { - spin_unlock(ptl); - return ret; - } + preserve_write = prot_numa && pmd_write(*pmd); + ret = 1; - if (!prot_numa || !pmd_protnone(*pmd)) { - entry = pmdp_huge_get_and_clear_notify(mm, addr, pmd); - entry = pmd_modify(entry, newprot); - if (preserve_write) - entry = pmd_mk_savedwrite(entry); - ret = HPAGE_PMD_NR; - set_pmd_at(mm, addr, pmd, entry); - BUG_ON(vma_is_anonymous(vma) && !preserve_write && - pmd_write(entry)); - } - spin_unlock(ptl); - } + /* + * Avoid trapping faults against the zero page. The read-only + * data is likely to be read-cached on the local CPU and + * local/remote hits to the zero page are not interesting. + */ + if (prot_numa && is_huge_zero_pmd(*pmd)) + goto unlock; + + if (prot_numa && pmd_protnone(*pmd)) + goto unlock; + + /* + * In case prot_numa, we are under down_read(mmap_sem). It's critical + * to not clear pmd intermittently to avoid race with MADV_DONTNEED + * which is also under down_read(mmap_sem): + * + * CPU0: CPU1: + * change_huge_pmd(prot_numa=1) + * pmdp_huge_get_and_clear_notify() + * madvise_dontneed() + * zap_pmd_range() + * pmd_trans_huge(*pmd) == 0 (without ptl) + * // skip the pmd + * set_pmd_at(); + * // pmd is re-established + * + * The race makes MADV_DONTNEED miss the huge pmd and don't clear it + * which may break userspace. + * + * pmdp_invalidate() is required to make sure we don't miss + * dirty/young flags set by hardware. + */ + entry = *pmd; + pmdp_invalidate(vma, addr, pmd); + /* + * Recover dirty/young flags. It relies on pmdp_invalidate to not + * corrupt them. + */ + if (pmd_dirty(*pmd)) + entry = pmd_mkdirty(entry); + if (pmd_young(*pmd)) + entry = pmd_mkyoung(entry); + + entry = pmd_modify(entry, newprot); + if (preserve_write) + entry = pmd_mk_savedwrite(entry); + ret = HPAGE_PMD_NR; + set_pmd_at(mm, addr, pmd, entry); + BUG_ON(vma_is_anonymous(vma) && !preserve_write && pmd_write(entry)); +unlock: + spin_unlock(ptl); return ret; } diff --git a/mm/internal.h b/mm/internal.h index ccfc2a2969f4..266efaeaa370 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -481,6 +481,13 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone, enum ttu_flags; struct tlbflush_unmap_batch; + +/* + * only for MM internal work items which do not depend on + * any allocations or locks which might depend on allocations + */ +extern struct workqueue_struct *mm_percpu_wq; + #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH void try_to_unmap_flush(void); void try_to_unmap_flush_dirty(void); diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 75b2745bac41..37d0b334bfe9 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1529,7 +1529,6 @@ COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy, COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask, compat_ulong_t, maxnode) { - long err = 0; unsigned long __user *nm = NULL; unsigned long nr_bits, alloc_size; DECLARE_BITMAP(bm, MAX_NUMNODES); @@ -1538,14 +1537,13 @@ COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask, alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; if (nmask) { - err = compat_get_bitmap(bm, nmask, nr_bits); + if (compat_get_bitmap(bm, nmask, nr_bits)) + return -EFAULT; nm = compat_alloc_user_space(alloc_size); - err |= copy_to_user(nm, bm, alloc_size); + if (copy_to_user(nm, bm, alloc_size)) + return -EFAULT; } - if (err) - return -EFAULT; - return sys_set_mempolicy(mode, nm, nr_bits+1); } @@ -1553,7 +1551,6 @@ COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len, compat_ulong_t, mode, compat_ulong_t __user *, nmask, compat_ulong_t, maxnode, compat_ulong_t, flags) { - long err = 0; unsigned long __user *nm = NULL; unsigned long nr_bits, alloc_size; nodemask_t bm; @@ -1562,14 +1559,13 @@ COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len, alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; if (nmask) { - err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits); + if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits)) + return -EFAULT; nm = compat_alloc_user_space(alloc_size); - err |= copy_to_user(nm, nodes_addr(bm), alloc_size); + if (copy_to_user(nm, nodes_addr(bm), alloc_size)) + return -EFAULT; } - if (err) - return -EFAULT; - return sys_mbind(start, len, mode, nm, nr_bits+1, flags); } diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 6cbde310abed..f3d603cef2c0 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2373,6 +2373,13 @@ void drain_all_pages(struct zone *zone) */ static cpumask_t cpus_with_pcps; + /* + * Make sure nobody triggers this path before mm_percpu_wq is fully + * initialized. + */ + if (WARN_ON_ONCE(!mm_percpu_wq)) + return; + /* Workqueues cannot recurse */ if (current->flags & PF_WQ_WORKER) return; @@ -2422,7 +2429,7 @@ void drain_all_pages(struct zone *zone) for_each_cpu(cpu, &cpus_with_pcps) { struct work_struct *work = per_cpu_ptr(&pcpu_drain, cpu); INIT_WORK(work, drain_local_pages_wq); - schedule_work_on(cpu, work); + queue_work_on(cpu, mm_percpu_wq, work); } for_each_cpu(cpu, &cpus_with_pcps) flush_work(per_cpu_ptr(&pcpu_drain, cpu)); @@ -4519,13 +4526,13 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask) K(node_page_state(pgdat, NR_FILE_MAPPED)), K(node_page_state(pgdat, NR_FILE_DIRTY)), K(node_page_state(pgdat, NR_WRITEBACK)), + K(node_page_state(pgdat, NR_SHMEM)), #ifdef CONFIG_TRANSPARENT_HUGEPAGE K(node_page_state(pgdat, NR_SHMEM_THPS) * HPAGE_PMD_NR), K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED) * HPAGE_PMD_NR), K(node_page_state(pgdat, NR_ANON_THPS) * HPAGE_PMD_NR), #endif - K(node_page_state(pgdat, NR_SHMEM)), K(node_page_state(pgdat, NR_WRITEBACK_TEMP)), K(node_page_state(pgdat, NR_UNSTABLE_NFS)), node_page_state(pgdat, NR_PAGES_SCANNED), diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c index c4c9def8ffea..de9c40d7304a 100644 --- a/mm/page_vma_mapped.c +++ b/mm/page_vma_mapped.c @@ -111,12 +111,8 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw) if (pvmw->pmd && !pvmw->pte) return not_found(pvmw); - /* Only for THP, seek to next pte entry makes sense */ - if (pvmw->pte) { - if (!PageTransHuge(pvmw->page) || PageHuge(pvmw->page)) - return not_found(pvmw); + if (pvmw->pte) goto next_pte; - } if (unlikely(PageHuge(pvmw->page))) { /* when pud is not present, pte will be NULL */ @@ -165,9 +161,14 @@ restart: while (1) { if (check_pte(pvmw)) return true; -next_pte: do { +next_pte: + /* Seek to next pte only makes sense for THP */ + if (!PageTransHuge(pvmw->page) || PageHuge(pvmw->page)) + return not_found(pvmw); + do { pvmw->address += PAGE_SIZE; - if (pvmw->address >= + if (pvmw->address >= pvmw->vma->vm_end || + pvmw->address >= __vma_address(pvmw->page, pvmw->vma) + hpage_nr_pages(pvmw->page) * PAGE_SIZE) return not_found(pvmw); diff --git a/mm/swap.c b/mm/swap.c index c4910f14f957..5dabf444d724 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -670,30 +670,19 @@ static void lru_add_drain_per_cpu(struct work_struct *dummy) static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work); -/* - * lru_add_drain_wq is used to do lru_add_drain_all() from a WQ_MEM_RECLAIM - * workqueue, aiding in getting memory freed. - */ -static struct workqueue_struct *lru_add_drain_wq; - -static int __init lru_init(void) -{ - lru_add_drain_wq = alloc_workqueue("lru-add-drain", WQ_MEM_RECLAIM, 0); - - if (WARN(!lru_add_drain_wq, - "Failed to create workqueue lru_add_drain_wq")) - return -ENOMEM; - - return 0; -} -early_initcall(lru_init); - void lru_add_drain_all(void) { static DEFINE_MUTEX(lock); static struct cpumask has_work; int cpu; + /* + * Make sure nobody triggers this path before mm_percpu_wq is fully + * initialized. + */ + if (WARN_ON(!mm_percpu_wq)) + return; + mutex_lock(&lock); get_online_cpus(); cpumask_clear(&has_work); @@ -707,7 +696,7 @@ void lru_add_drain_all(void) pagevec_count(&per_cpu(lru_deactivate_pvecs, cpu)) || need_activate_page_drain(cpu)) { INIT_WORK(work, lru_add_drain_per_cpu); - queue_work_on(cpu, lru_add_drain_wq, work); + queue_work_on(cpu, mm_percpu_wq, work); cpumask_set_cpu(cpu, &has_work); } } diff --git a/mm/swap_cgroup.c b/mm/swap_cgroup.c index 310ac0b8f974..ac6318a064d3 100644 --- a/mm/swap_cgroup.c +++ b/mm/swap_cgroup.c @@ -201,6 +201,8 @@ void swap_cgroup_swapoff(int type) struct page *page = map[i]; if (page) __free_page(page); + if (!(i % SWAP_CLUSTER_MAX)) + cond_resched(); } vfree(map); } diff --git a/mm/vmstat.c b/mm/vmstat.c index 89f95396ec46..809025ed97ea 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -1552,7 +1552,6 @@ static const struct file_operations proc_vmstat_file_operations = { #endif /* CONFIG_PROC_FS */ #ifdef CONFIG_SMP -static struct workqueue_struct *vmstat_wq; static DEFINE_PER_CPU(struct delayed_work, vmstat_work); int sysctl_stat_interval __read_mostly = HZ; @@ -1623,7 +1622,7 @@ static void vmstat_update(struct work_struct *w) * to occur in the future. Keep on running the * update worker thread. */ - queue_delayed_work_on(smp_processor_id(), vmstat_wq, + queue_delayed_work_on(smp_processor_id(), mm_percpu_wq, this_cpu_ptr(&vmstat_work), round_jiffies_relative(sysctl_stat_interval)); } @@ -1702,7 +1701,7 @@ static void vmstat_shepherd(struct work_struct *w) struct delayed_work *dw = &per_cpu(vmstat_work, cpu); if (!delayed_work_pending(dw) && need_update(cpu)) - queue_delayed_work_on(cpu, vmstat_wq, dw, 0); + queue_delayed_work_on(cpu, mm_percpu_wq, dw, 0); } put_online_cpus(); @@ -1718,7 +1717,6 @@ static void __init start_shepherd_timer(void) INIT_DEFERRABLE_WORK(per_cpu_ptr(&vmstat_work, cpu), vmstat_update); - vmstat_wq = alloc_workqueue("vmstat", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0); schedule_delayed_work(&shepherd, round_jiffies_relative(sysctl_stat_interval)); } @@ -1764,11 +1762,16 @@ static int vmstat_cpu_dead(unsigned int cpu) #endif +struct workqueue_struct *mm_percpu_wq; + void __init init_mm_internals(void) { -#ifdef CONFIG_SMP - int ret; + int ret __maybe_unused; + mm_percpu_wq = alloc_workqueue("mm_percpu_wq", + WQ_FREEZABLE|WQ_MEM_RECLAIM, 0); + +#ifdef CONFIG_SMP ret = cpuhp_setup_state_nocalls(CPUHP_MM_VMSTAT_DEAD, "mm/vmstat:dead", NULL, vmstat_cpu_dead); if (ret < 0) diff --git a/mm/z3fold.c b/mm/z3fold.c index f9492bccfd79..54f63c4a809a 100644 --- a/mm/z3fold.c +++ b/mm/z3fold.c @@ -185,6 +185,12 @@ static inline void z3fold_page_lock(struct z3fold_header *zhdr) spin_lock(&zhdr->page_lock); } +/* Try to lock a z3fold page */ +static inline int z3fold_page_trylock(struct z3fold_header *zhdr) +{ + return spin_trylock(&zhdr->page_lock); +} + /* Unlock a z3fold page */ static inline void z3fold_page_unlock(struct z3fold_header *zhdr) { @@ -385,7 +391,7 @@ static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp, spin_lock(&pool->lock); zhdr = list_first_entry_or_null(&pool->unbuddied[i], struct z3fold_header, buddy); - if (!zhdr) { + if (!zhdr || !z3fold_page_trylock(zhdr)) { spin_unlock(&pool->lock); continue; } @@ -394,7 +400,6 @@ static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp, spin_unlock(&pool->lock); page = virt_to_page(zhdr); - z3fold_page_lock(zhdr); if (zhdr->first_chunks == 0) { if (zhdr->middle_chunks != 0 && chunks >= zhdr->start_middle) diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index b7ee9c34dbd6..d41edd28298b 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -276,7 +276,7 @@ struct zs_pool { struct zspage { struct { unsigned int fullness:FULLNESS_BITS; - unsigned int class:CLASS_BITS; + unsigned int class:CLASS_BITS + 1; unsigned int isolated:ISOLATED_BITS; unsigned int magic:MAGIC_VAL_BITS; }; diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index ea71513fca21..90f49a194249 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c @@ -119,6 +119,15 @@ static int br_dev_init(struct net_device *dev) return err; } +static void br_dev_uninit(struct net_device *dev) +{ + struct net_bridge *br = netdev_priv(dev); + + br_multicast_uninit_stats(br); + br_vlan_flush(br); + free_percpu(br->stats); +} + static int br_dev_open(struct net_device *dev) { struct net_bridge *br = netdev_priv(dev); @@ -332,6 +341,7 @@ static const struct net_device_ops br_netdev_ops = { .ndo_open = br_dev_open, .ndo_stop = br_dev_stop, .ndo_init = br_dev_init, + .ndo_uninit = br_dev_uninit, .ndo_start_xmit = br_dev_xmit, .ndo_get_stats64 = br_get_stats64, .ndo_set_mac_address = br_set_mac_address, @@ -356,14 +366,6 @@ static const struct net_device_ops br_netdev_ops = { .ndo_features_check = passthru_features_check, }; -static void br_dev_free(struct net_device *dev) -{ - struct net_bridge *br = netdev_priv(dev); - - free_percpu(br->stats); - free_netdev(dev); -} - static struct device_type br_type = { .name = "bridge", }; @@ -376,7 +378,7 @@ void br_dev_setup(struct net_device *dev) ether_setup(dev); dev->netdev_ops = &br_netdev_ops; - dev->destructor = br_dev_free; + dev->destructor = free_netdev; dev->ethtool_ops = &br_ethtool_ops; SET_NETDEV_DEVTYPE(dev, &br_type); dev->priv_flags = IFF_EBRIDGE | IFF_NO_QUEUE; diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index 6eb52d422dd9..6d273ca0bf7c 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c @@ -312,7 +312,6 @@ void br_dev_delete(struct net_device *dev, struct list_head *head) br_fdb_delete_by_port(br, NULL, 0, 1); - br_vlan_flush(br); br_multicast_dev_del(br); cancel_delayed_work_sync(&br->gc_work); diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index b760f2620abf..faa7261a992f 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@ -2031,8 +2031,6 @@ void br_multicast_dev_del(struct net_bridge *br) out: spin_unlock_bh(&br->multicast_lock); - - free_percpu(br->mcast_stats); } int br_multicast_set_router(struct net_bridge *br, unsigned long val) @@ -2531,6 +2529,11 @@ int br_multicast_init_stats(struct net_bridge *br) return 0; } +void br_multicast_uninit_stats(struct net_bridge *br) +{ + free_percpu(br->mcast_stats); +} + static void mcast_stats_add_dir(u64 *dst, u64 *src) { dst[BR_MCAST_DIR_RX] += src[BR_MCAST_DIR_RX]; diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index e6dea5cd6bd6..650986473577 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c @@ -1165,11 +1165,14 @@ static int br_dev_newlink(struct net *src_net, struct net_device *dev, spin_unlock_bh(&br->lock); } - err = br_changelink(dev, tb, data); + err = register_netdevice(dev); if (err) return err; - return register_netdevice(dev); + err = br_changelink(dev, tb, data); + if (err) + unregister_netdevice(dev); + return err; } static size_t br_get_size(const struct net_device *brdev) diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 61368186edea..0d177280aa84 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -620,6 +620,7 @@ void br_rtr_notify(struct net_device *dev, struct net_bridge_port *port, void br_multicast_count(struct net_bridge *br, const struct net_bridge_port *p, const struct sk_buff *skb, u8 type, u8 dir); int br_multicast_init_stats(struct net_bridge *br); +void br_multicast_uninit_stats(struct net_bridge *br); void br_multicast_get_stats(const struct net_bridge *br, const struct net_bridge_port *p, struct br_mcast_stats *dest); @@ -760,6 +761,10 @@ static inline int br_multicast_init_stats(struct net_bridge *br) return 0; } +static inline void br_multicast_uninit_stats(struct net_bridge *br) +{ +} + static inline int br_multicast_igmp_type(const struct sk_buff *skb) { return 0; diff --git a/net/core/datagram.c b/net/core/datagram.c index 4608aa245410..15ef99469cfe 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c @@ -402,7 +402,7 @@ int skb_copy_datagram_iter(const struct sk_buff *skb, int offset, struct iov_iter *to, int len) { int start = skb_headlen(skb); - int i, copy = start - offset; + int i, copy = start - offset, start_off = offset, n; struct sk_buff *frag_iter; trace_skb_copy_datagram_iovec(skb, len); @@ -411,11 +411,12 @@ int skb_copy_datagram_iter(const struct sk_buff *skb, int offset, if (copy > 0) { if (copy > len) copy = len; - if (copy_to_iter(skb->data + offset, copy, to) != copy) + n = copy_to_iter(skb->data + offset, copy, to); + offset += n; + if (n != copy) goto short_copy; if ((len -= copy) == 0) return 0; - offset += copy; } /* Copy paged appendix. Hmm... why does this look so complicated? */ @@ -429,13 +430,14 @@ int skb_copy_datagram_iter(const struct sk_buff *skb, int offset, if ((copy = end - offset) > 0) { if (copy > len) copy = len; - if (copy_page_to_iter(skb_frag_page(frag), + n = copy_page_to_iter(skb_frag_page(frag), frag->page_offset + offset - - start, copy, to) != copy) + start, copy, to); + offset += n; + if (n != copy) goto short_copy; if (!(len -= copy)) return 0; - offset += copy; } start = end; } @@ -467,6 +469,7 @@ int skb_copy_datagram_iter(const struct sk_buff *skb, int offset, */ fault: + iov_iter_revert(to, offset - start_off); return -EFAULT; short_copy: @@ -617,7 +620,7 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset, __wsum *csump) { int start = skb_headlen(skb); - int i, copy = start - offset; + int i, copy = start - offset, start_off = offset; struct sk_buff *frag_iter; int pos = 0; int n; @@ -627,11 +630,11 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset, if (copy > len) copy = len; n = csum_and_copy_to_iter(skb->data + offset, copy, csump, to); + offset += n; if (n != copy) goto fault; if ((len -= copy) == 0) return 0; - offset += copy; pos = copy; } @@ -653,12 +656,12 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset, offset - start, copy, &csum2, to); kunmap(page); + offset += n; if (n != copy) goto fault; *csump = csum_block_add(*csump, csum2, pos); if (!(len -= copy)) return 0; - offset += copy; pos += copy; } start = end; @@ -691,6 +694,7 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset, return 0; fault: + iov_iter_revert(to, offset - start_off); return -EFAULT; } @@ -775,6 +779,7 @@ int skb_copy_and_csum_datagram_msg(struct sk_buff *skb, } return 0; csum_error: + iov_iter_revert(&msg->msg_iter, chunk); return -EINVAL; fault: return -EFAULT; diff --git a/net/core/dev.c b/net/core/dev.c index ef9fe60ee294..5d33e2baab2b 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -6754,7 +6754,6 @@ int dev_change_xdp_fd(struct net_device *dev, int fd, u32 flags) return err; } -EXPORT_SYMBOL(dev_change_xdp_fd); /** * dev_new_index - allocate an ifindex diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c index fcbdc0c49b0e..038f293c2376 100644 --- a/net/ipv4/netfilter/ipt_CLUSTERIP.c +++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c @@ -462,7 +462,7 @@ static void clusterip_tg_destroy(const struct xt_tgdtor_param *par) clusterip_config_put(cipinfo->config); - nf_ct_netns_get(par->net, par->family); + nf_ct_netns_put(par->net, par->family); } #ifdef CONFIG_COMPAT diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 94f0b5b50e0d..04843ae77b9e 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -2322,6 +2322,7 @@ int tcp_disconnect(struct sock *sk, int flags) tcp_init_send_head(sk); memset(&tp->rx_opt, 0, sizeof(tp->rx_opt)); __sk_dst_reset(sk); + tcp_saved_syn_free(tp); /* Clean up fastopen related fields */ tcp_free_fastopen_req(tp); diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 31f2765ef851..a5838858c362 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -1935,6 +1935,7 @@ void tcp_enter_loss(struct sock *sk) struct tcp_sock *tp = tcp_sk(sk); struct net *net = sock_net(sk); struct sk_buff *skb; + bool new_recovery = icsk->icsk_ca_state < TCP_CA_Recovery; bool is_reneg; /* is receiver reneging on SACKs? */ bool mark_lost; @@ -1994,15 +1995,18 @@ void tcp_enter_loss(struct sock *sk) tp->high_seq = tp->snd_nxt; tcp_ecn_queue_cwr(tp); - /* F-RTO RFC5682 sec 3.1 step 1 mandates to disable F-RTO - * if a previous recovery is underway, otherwise it may incorrectly - * call a timeout spurious if some previously retransmitted packets - * are s/acked (sec 3.2). We do not apply that retriction since - * retransmitted skbs are permanently tagged with TCPCB_EVER_RETRANS - * so FLAG_ORIG_SACK_ACKED is always correct. But we do disable F-RTO - * on PTMU discovery to avoid sending new data. + /* F-RTO RFC5682 sec 3.1 step 1: retransmit SND.UNA if no previous + * loss recovery is underway except recurring timeout(s) on + * the same SND.UNA (sec 3.2). Disable F-RTO on path MTU probing + * + * In theory F-RTO can be used repeatedly during loss recovery. + * In practice this interacts badly with broken middle-boxes that + * falsely raise the receive window, which results in repeated + * timeouts and stop-and-go behavior. */ - tp->frto = sysctl_tcp_frto && !inet_csk(sk)->icsk_mtup.probe_size; + tp->frto = sysctl_tcp_frto && + (new_recovery || icsk->icsk_retransmits) && + !inet_csk(sk)->icsk_mtup.probe_size; } /* If ACK arrived pointing to a remembered SACK, it means that our diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 0e807a83c1bc..ffc9274b2706 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -2995,6 +2995,8 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority) { struct sk_buff *skb; + TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS); + /* NOTE: No TCP options attached and we never retransmit this. */ skb = alloc_skb(MAX_TCP_HEADER, priority); if (!skb) { @@ -3010,8 +3012,6 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority) /* Send it off. */ if (tcp_transmit_skb(sk, skb, 0, priority)) NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED); - - TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS); } /* Send a crossed SYN-ACK during socket establishment. diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index b330c2abcb24..d6da0fe5acca 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -3646,14 +3646,19 @@ restart: INIT_LIST_HEAD(&del_list); list_for_each_entry_safe(ifa, tmp, &idev->addr_list, if_list) { struct rt6_info *rt = NULL; + bool keep; addrconf_del_dad_work(ifa); + keep = keep_addr && (ifa->flags & IFA_F_PERMANENT) && + !addr_is_local(&ifa->addr); + if (!keep) + list_move(&ifa->if_list, &del_list); + write_unlock_bh(&idev->lock); spin_lock_bh(&ifa->lock); - if (keep_addr && (ifa->flags & IFA_F_PERMANENT) && - !addr_is_local(&ifa->addr)) { + if (keep) { /* set state to skip the notifier below */ state = INET6_IFADDR_STATE_DEAD; ifa->state = 0; @@ -3665,8 +3670,6 @@ restart: } else { state = ifa->state; ifa->state = INET6_IFADDR_STATE_DEAD; - - list_move(&ifa->if_list, &del_list); } spin_unlock_bh(&ifa->lock); diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index 861b255a2d51..32ea0f3d868c 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c @@ -1383,8 +1383,6 @@ static int pppol2tp_setsockopt(struct socket *sock, int level, int optname, } else err = pppol2tp_session_setsockopt(sk, session, optname, val); - err = 0; - end_put_sess: sock_put(sk); end: @@ -1507,8 +1505,13 @@ static int pppol2tp_getsockopt(struct socket *sock, int level, int optname, err = pppol2tp_tunnel_getsockopt(sk, tunnel, optname, &val); sock_put(ps->tunnel_sock); - } else + if (err) + goto end_put_sess; + } else { err = pppol2tp_session_getsockopt(sk, session, optname, &val); + if (err) + goto end_put_sess; + } err = -EFAULT; if (put_user(len, optlen)) diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c index cb29e598605f..a5ca5e426bae 100644 --- a/net/netfilter/nf_conntrack_expect.c +++ b/net/netfilter/nf_conntrack_expect.c @@ -57,7 +57,7 @@ void nf_ct_unlink_expect_report(struct nf_conntrack_expect *exp, hlist_del_rcu(&exp->hnode); net->ct.expect_count--; - hlist_del(&exp->lnode); + hlist_del_rcu(&exp->lnode); master_help->expecting[exp->class]--; nf_ct_expect_event_report(IPEXP_DESTROY, exp, portid, report); @@ -363,7 +363,7 @@ static void nf_ct_expect_insert(struct nf_conntrack_expect *exp) /* two references : one for hash insert, one for the timer */ refcount_add(2, &exp->use); - hlist_add_head(&exp->lnode, &master_help->expectations); + hlist_add_head_rcu(&exp->lnode, &master_help->expectations); master_help->expecting[exp->class]++; hlist_add_head_rcu(&exp->hnode, &nf_ct_expect_hash[h]); diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c index 6dc44d9b4190..4eeb3418366a 100644 --- a/net/netfilter/nf_conntrack_helper.c +++ b/net/netfilter/nf_conntrack_helper.c @@ -158,16 +158,25 @@ nf_conntrack_helper_try_module_get(const char *name, u16 l3num, u8 protonum) { struct nf_conntrack_helper *h; + rcu_read_lock(); + h = __nf_conntrack_helper_find(name, l3num, protonum); #ifdef CONFIG_MODULES if (h == NULL) { - if (request_module("nfct-helper-%s", name) == 0) + rcu_read_unlock(); + if (request_module("nfct-helper-%s", name) == 0) { + rcu_read_lock(); h = __nf_conntrack_helper_find(name, l3num, protonum); + } else { + return h; + } } #endif if (h != NULL && !try_module_get(h->me)) h = NULL; + rcu_read_unlock(); + return h; } EXPORT_SYMBOL_GPL(nf_conntrack_helper_try_module_get); @@ -311,38 +320,36 @@ void nf_ct_helper_expectfn_unregister(struct nf_ct_helper_expectfn *n) } EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_unregister); +/* Caller should hold the rcu lock */ struct nf_ct_helper_expectfn * nf_ct_helper_expectfn_find_by_name(const char *name) { struct nf_ct_helper_expectfn *cur; bool found = false; - rcu_read_lock(); list_for_each_entry_rcu(cur, &nf_ct_helper_expectfn_list, head) { if (!strcmp(cur->name, name)) { found = true; break; } } - rcu_read_unlock(); return found ? cur : NULL; } EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_find_by_name); +/* Caller should hold the rcu lock */ struct nf_ct_helper_expectfn * nf_ct_helper_expectfn_find_by_symbol(const void *symbol) { struct nf_ct_helper_expectfn *cur; bool found = false; - rcu_read_lock(); list_for_each_entry_rcu(cur, &nf_ct_helper_expectfn_list, head) { if (cur->expectfn == symbol) { found = true; break; } } - rcu_read_unlock(); return found ? cur : NULL; } EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_find_by_symbol); diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index ace824ab2e03..aafd25dff8c0 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -1490,11 +1490,16 @@ static int ctnetlink_change_helper(struct nf_conn *ct, * treat the second attempt as a no-op instead of returning * an error. */ - if (help && help->helper && - !strcmp(help->helper->name, helpname)) - return 0; - else - return -EBUSY; + err = -EBUSY; + if (help) { + rcu_read_lock(); + helper = rcu_dereference(help->helper); + if (helper && !strcmp(helper->name, helpname)) + err = 0; + rcu_read_unlock(); + } + + return err; } if (!strcmp(helpname, "")) { @@ -1932,9 +1937,9 @@ static int ctnetlink_new_conntrack(struct net *net, struct sock *ctnl, err = 0; if (test_bit(IPS_EXPECTED_BIT, &ct->status)) - events = IPCT_RELATED; + events = 1 << IPCT_RELATED; else - events = IPCT_NEW; + events = 1 << IPCT_NEW; if (cda[CTA_LABELS] && ctnetlink_attach_labels(ct, cda) == 0) @@ -2679,8 +2684,8 @@ ctnetlink_exp_dump_table(struct sk_buff *skb, struct netlink_callback *cb) last = (struct nf_conntrack_expect *)cb->args[1]; for (; cb->args[0] < nf_ct_expect_hsize; cb->args[0]++) { restart: - hlist_for_each_entry(exp, &nf_ct_expect_hash[cb->args[0]], - hnode) { + hlist_for_each_entry_rcu(exp, &nf_ct_expect_hash[cb->args[0]], + hnode) { if (l3proto && exp->tuple.src.l3num != l3proto) continue; @@ -2731,7 +2736,7 @@ ctnetlink_exp_ct_dump_table(struct sk_buff *skb, struct netlink_callback *cb) rcu_read_lock(); last = (struct nf_conntrack_expect *)cb->args[1]; restart: - hlist_for_each_entry(exp, &help->expectations, lnode) { + hlist_for_each_entry_rcu(exp, &help->expectations, lnode) { if (l3proto && exp->tuple.src.l3num != l3proto) continue; if (cb->args[1]) { @@ -2793,6 +2798,12 @@ static int ctnetlink_dump_exp_ct(struct net *net, struct sock *ctnl, return -ENOENT; ct = nf_ct_tuplehash_to_ctrack(h); + /* No expectation linked to this connection tracking. */ + if (!nfct_help(ct)) { + nf_ct_put(ct); + return 0; + } + c.data = ct; err = netlink_dump_start(ctnl, skb, nlh, &c); @@ -3138,23 +3149,27 @@ ctnetlink_create_expect(struct net *net, return -ENOENT; ct = nf_ct_tuplehash_to_ctrack(h); + rcu_read_lock(); if (cda[CTA_EXPECT_HELP_NAME]) { const char *helpname = nla_data(cda[CTA_EXPECT_HELP_NAME]); helper = __nf_conntrack_helper_find(helpname, u3, nf_ct_protonum(ct)); if (helper == NULL) { + rcu_read_unlock(); #ifdef CONFIG_MODULES if (request_module("nfct-helper-%s", helpname) < 0) { err = -EOPNOTSUPP; goto err_ct; } + rcu_read_lock(); helper = __nf_conntrack_helper_find(helpname, u3, nf_ct_protonum(ct)); if (helper) { err = -EAGAIN; - goto err_ct; + goto err_rcu; } + rcu_read_unlock(); #endif err = -EOPNOTSUPP; goto err_ct; @@ -3164,11 +3179,13 @@ ctnetlink_create_expect(struct net *net, exp = ctnetlink_alloc_expect(cda, ct, helper, &tuple, &mask); if (IS_ERR(exp)) { err = PTR_ERR(exp); - goto err_ct; + goto err_rcu; } err = nf_ct_expect_related_report(exp, portid, report); nf_ct_expect_put(exp); +err_rcu: + rcu_read_unlock(); err_ct: nf_ct_put(ct); return err; diff --git a/net/netfilter/nf_nat_redirect.c b/net/netfilter/nf_nat_redirect.c index d43869879fcf..86067560a318 100644 --- a/net/netfilter/nf_nat_redirect.c +++ b/net/netfilter/nf_nat_redirect.c @@ -101,11 +101,13 @@ nf_nat_redirect_ipv6(struct sk_buff *skb, const struct nf_nat_range *range, rcu_read_lock(); idev = __in6_dev_get(skb->dev); if (idev != NULL) { + read_lock_bh(&idev->lock); list_for_each_entry(ifa, &idev->addr_list, if_list) { newdst = ifa->addr; addr = true; break; } + read_unlock_bh(&idev->lock); } rcu_read_unlock(); diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c index a6a4633725bb..52a5079a91a3 100644 --- a/net/netfilter/nft_hash.c +++ b/net/netfilter/nft_hash.c @@ -21,6 +21,7 @@ struct nft_jhash { enum nft_registers sreg:8; enum nft_registers dreg:8; u8 len; + bool autogen_seed:1; u32 modulus; u32 seed; u32 offset; @@ -102,10 +103,12 @@ static int nft_jhash_init(const struct nft_ctx *ctx, if (priv->offset + priv->modulus - 1 < priv->offset) return -EOVERFLOW; - if (tb[NFTA_HASH_SEED]) + if (tb[NFTA_HASH_SEED]) { priv->seed = ntohl(nla_get_be32(tb[NFTA_HASH_SEED])); - else + } else { + priv->autogen_seed = true; get_random_bytes(&priv->seed, sizeof(priv->seed)); + } return nft_validate_register_load(priv->sreg, len) && nft_validate_register_store(ctx, priv->dreg, NULL, @@ -151,7 +154,8 @@ static int nft_jhash_dump(struct sk_buff *skb, goto nla_put_failure; if (nla_put_be32(skb, NFTA_HASH_MODULUS, htonl(priv->modulus))) goto nla_put_failure; - if (nla_put_be32(skb, NFTA_HASH_SEED, htonl(priv->seed))) + if (!priv->autogen_seed && + nla_put_be32(skb, NFTA_HASH_SEED, htonl(priv->seed))) goto nla_put_failure; if (priv->offset != 0) if (nla_put_be32(skb, NFTA_HASH_OFFSET, htonl(priv->offset))) diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c index 27241a767f17..c64aca611ac5 100644 --- a/net/netfilter/xt_TCPMSS.c +++ b/net/netfilter/xt_TCPMSS.c @@ -104,7 +104,7 @@ tcpmss_mangle_packet(struct sk_buff *skb, tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff); tcp_hdrlen = tcph->doff * 4; - if (len < tcp_hdrlen) + if (len < tcp_hdrlen || tcp_hdrlen < sizeof(struct tcphdr)) return -1; if (info->mss == XT_TCPMSS_CLAMP_PMTU) { @@ -152,6 +152,10 @@ tcpmss_mangle_packet(struct sk_buff *skb, if (len > tcp_hdrlen) return 0; + /* tcph->doff has 4 bits, do not wrap it to 0 */ + if (tcp_hdrlen >= 15 * 4) + return 0; + /* * MSS Option not found ?! add it.. */ diff --git a/net/netfilter/xt_TPROXY.c b/net/netfilter/xt_TPROXY.c index 80cb7babeb64..df7f1df00330 100644 --- a/net/netfilter/xt_TPROXY.c +++ b/net/netfilter/xt_TPROXY.c @@ -393,7 +393,8 @@ tproxy_laddr6(struct sk_buff *skb, const struct in6_addr *user_laddr, rcu_read_lock(); indev = __in6_dev_get(skb->dev); - if (indev) + if (indev) { + read_lock_bh(&indev->lock); list_for_each_entry(ifa, &indev->addr_list, if_list) { if (ifa->flags & (IFA_F_TENTATIVE | IFA_F_DEPRECATED)) continue; @@ -401,6 +402,8 @@ tproxy_laddr6(struct sk_buff *skb, const struct in6_addr *user_laddr, laddr = &ifa->addr; break; } + read_unlock_bh(&indev->lock); + } rcu_read_unlock(); return laddr ? laddr : daddr; diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 3e64d23e098c..52a2c55f6d9e 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -794,7 +794,7 @@ static void attach_default_qdiscs(struct net_device *dev) } } #ifdef CONFIG_NET_SCHED - if (dev->qdisc) + if (dev->qdisc != &noop_qdisc) qdisc_hash_add(dev->qdisc, false); #endif } diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 8e56df8d175d..f16c8d97b7f3 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -7174,6 +7174,9 @@ int sctp_inet_listen(struct socket *sock, int backlog) if (sock->state != SS_UNCONNECTED) goto out; + if (!sctp_sstate(sk, LISTENING) && !sctp_sstate(sk, CLOSED)) + goto out; + /* If backlog is zero, disable listening. */ if (!backlog) { if (sctp_sstate(sk, CLOSED)) diff --git a/samples/statx/test-statx.c b/samples/statx/test-statx.c index 8571d766331d..d4d77b09412c 100644 --- a/samples/statx/test-statx.c +++ b/samples/statx/test-statx.c @@ -141,8 +141,8 @@ static void dump_statx(struct statx *stx) if (stx->stx_mask & STATX_BTIME) print_time(" Birth: ", &stx->stx_btime); - if (stx->stx_attributes) { - unsigned char bits; + if (stx->stx_attributes_mask) { + unsigned char bits, mbits; int loop, byte; static char attr_representation[64 + 1] = @@ -160,14 +160,18 @@ static void dump_statx(struct statx *stx) printf("Attributes: %016llx (", stx->stx_attributes); for (byte = 64 - 8; byte >= 0; byte -= 8) { bits = stx->stx_attributes >> byte; + mbits = stx->stx_attributes_mask >> byte; for (loop = 7; loop >= 0; loop--) { int bit = byte + loop; - if (bits & 0x80) + if (!(mbits & 0x80)) + putchar('.'); /* Not supported */ + else if (bits & 0x80) putchar(attr_representation[63 - bit]); else - putchar('-'); + putchar('-'); /* Not set */ bits <<= 1; + mbits <<= 1; } if (byte) putchar(' '); diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c index 273f21fa32b5..7aa57225cbf7 100644 --- a/tools/perf/util/annotate.c +++ b/tools/perf/util/annotate.c @@ -130,6 +130,12 @@ static struct arch architectures[] = { .name = "powerpc", .init = powerpc__annotate_init, }, + { + .name = "s390", + .objdump = { + .comment_char = '#', + }, + }, }; static void ins__delete(struct ins_operands *ops) diff --git a/tools/power/cpupower/utils/helpers/cpuid.c b/tools/power/cpupower/utils/helpers/cpuid.c index 93b0aa74ca03..39c2c7d067bb 100644 --- a/tools/power/cpupower/utils/helpers/cpuid.c +++ b/tools/power/cpupower/utils/helpers/cpuid.c @@ -156,6 +156,7 @@ out: */ case 0x2C: /* Westmere EP - Gulftown */ cpu_info->caps |= CPUPOWER_CAP_HAS_TURBO_RATIO; + break; case 0x2A: /* SNB */ case 0x2D: /* SNB Xeon */ case 0x3A: /* IVB */ diff --git a/tools/power/x86/turbostat/turbostat.8 b/tools/power/x86/turbostat/turbostat.8 index fedca3285326..ccf2a69365cc 100644 --- a/tools/power/x86/turbostat/turbostat.8 +++ b/tools/power/x86/turbostat/turbostat.8 @@ -100,6 +100,8 @@ The system configuration dump (if --quiet is not used) is followed by statistics \fBCPU%c1, CPU%c3, CPU%c6, CPU%c7\fP show the percentage residency in hardware core idle states. These numbers are from hardware residency counters. \fBCoreTmp\fP Degrees Celsius reported by the per-core Digital Thermal Sensor. \fBPkgTtmp\fP Degrees Celsius reported by the per-package Package Thermal Monitor. +\fBGFX%rc6\fP The percentage of time the GPU is in the "render C6" state, rc6, during the measurement interval. From /sys/class/drm/card0/power/rc6_residency_ms. +\fBGFXMHz\fP Instantaneous snapshot of what sysfs presents at the end of the measurement interval. From /sys/class/graphics/fb0/device/drm/card0/gt_cur_freq_mhz. \fBPkg%pc2, Pkg%pc3, Pkg%pc6, Pkg%pc7\fP percentage residency in hardware package idle states. These numbers are from hardware residency counters. \fBPkgWatt\fP Watts consumed by the whole package. \fBCorWatt\fP Watts consumed by the core part of the package. diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 828dccd3f01e..b11294730771 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -1142,7 +1142,7 @@ delta_thread(struct thread_data *new, struct thread_data *old, * it is possible for mperf's non-halted cycles + idle states * to exceed TSC's all cycles: show c1 = 0% in that case. */ - if ((old->mperf + core_delta->c3 + core_delta->c6 + core_delta->c7) > old->tsc) + if ((old->mperf + core_delta->c3 + core_delta->c6 + core_delta->c7) > (old->tsc * tsc_tweak)) old->c1 = 0; else { /* normal case, derive c1 */ @@ -2485,8 +2485,10 @@ int snapshot_gfx_mhz(void) if (fp == NULL) fp = fopen_or_die("/sys/class/graphics/fb0/device/drm/card0/gt_cur_freq_mhz", "r"); - else + else { rewind(fp); + fflush(fp); + } retval = fscanf(fp, "%d", &gfx_cur_mhz); if (retval != 1) @@ -3111,7 +3113,7 @@ int print_hwp(struct thread_data *t, struct core_data *c, struct pkg_data *p) return 0; fprintf(outf, "cpu%d: MSR_HWP_CAPABILITIES: 0x%08llx " - "(high 0x%x guar 0x%x eff 0x%x low 0x%x)\n", + "(high %d guar %d eff %d low %d)\n", cpu, msr, (unsigned int)HWP_HIGHEST_PERF(msr), (unsigned int)HWP_GUARANTEED_PERF(msr), @@ -3122,7 +3124,7 @@ int print_hwp(struct thread_data *t, struct core_data *c, struct pkg_data *p) return 0; fprintf(outf, "cpu%d: MSR_HWP_REQUEST: 0x%08llx " - "(min 0x%x max 0x%x des 0x%x epp 0x%x window 0x%x pkg 0x%x)\n", + "(min %d max %d des %d epp 0x%x window 0x%x pkg 0x%x)\n", cpu, msr, (unsigned int)(((msr) >> 0) & 0xff), (unsigned int)(((msr) >> 8) & 0xff), @@ -3136,7 +3138,7 @@ int print_hwp(struct thread_data *t, struct core_data *c, struct pkg_data *p) return 0; fprintf(outf, "cpu%d: MSR_HWP_REQUEST_PKG: 0x%08llx " - "(min 0x%x max 0x%x des 0x%x epp 0x%x window 0x%x)\n", + "(min %d max %d des %d epp 0x%x window 0x%x)\n", cpu, msr, (unsigned int)(((msr) >> 0) & 0xff), (unsigned int)(((msr) >> 8) & 0xff), @@ -3353,17 +3355,19 @@ void rapl_probe(unsigned int family, unsigned int model) case INTEL_FAM6_SKYLAKE_DESKTOP: /* SKL */ case INTEL_FAM6_KABYLAKE_MOBILE: /* KBL */ case INTEL_FAM6_KABYLAKE_DESKTOP: /* KBL */ - do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO; + do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_GFX | RAPL_PKG_POWER_INFO; BIC_PRESENT(BIC_PKG__); BIC_PRESENT(BIC_RAM__); if (rapl_joules) { BIC_PRESENT(BIC_Pkg_J); BIC_PRESENT(BIC_Cor_J); BIC_PRESENT(BIC_RAM_J); + BIC_PRESENT(BIC_GFX_J); } else { BIC_PRESENT(BIC_PkgWatt); BIC_PRESENT(BIC_CorWatt); BIC_PRESENT(BIC_RAMWatt); + BIC_PRESENT(BIC_GFXWatt); } break; case INTEL_FAM6_HASWELL_X: /* HSX */ @@ -3478,7 +3482,7 @@ void perf_limit_reasons_probe(unsigned int family, unsigned int model) int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p) { unsigned long long msr; - unsigned int dts; + unsigned int dts, dts2; int cpu; if (!(do_dts || do_ptm)) @@ -3503,7 +3507,6 @@ int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p fprintf(outf, "cpu%d: MSR_IA32_PACKAGE_THERM_STATUS: 0x%08llx (%d C)\n", cpu, msr, tcc_activation_temp - dts); -#ifdef THERM_DEBUG if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, &msr)) return 0; @@ -3511,11 +3514,10 @@ int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p dts2 = (msr >> 8) & 0x7F; fprintf(outf, "cpu%d: MSR_IA32_PACKAGE_THERM_INTERRUPT: 0x%08llx (%d C, %d C)\n", cpu, msr, tcc_activation_temp - dts, tcc_activation_temp - dts2); -#endif } - if (do_dts) { + if (do_dts && debug) { unsigned int resolution; if (get_msr(cpu, MSR_IA32_THERM_STATUS, &msr)) @@ -3526,7 +3528,6 @@ int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p fprintf(outf, "cpu%d: MSR_IA32_THERM_STATUS: 0x%08llx (%d C +/- %d)\n", cpu, msr, tcc_activation_temp - dts, resolution); -#ifdef THERM_DEBUG if (get_msr(cpu, MSR_IA32_THERM_INTERRUPT, &msr)) return 0; @@ -3534,7 +3535,6 @@ int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p dts2 = (msr >> 8) & 0x7F; fprintf(outf, "cpu%d: MSR_IA32_THERM_INTERRUPT: 0x%08llx (%d C, %d C)\n", cpu, msr, tcc_activation_temp - dts, tcc_activation_temp - dts2); -#endif } return 0; @@ -4578,7 +4578,7 @@ int get_and_dump_counters(void) } void print_version() { - fprintf(outf, "turbostat version 17.02.24" + fprintf(outf, "turbostat version 17.04.12" " - Len Brown <lenb@kernel.org>\n"); } diff --git a/tools/testing/selftests/powerpc/Makefile b/tools/testing/selftests/powerpc/Makefile index 1c5d0575802e..bf13fc2297aa 100644 --- a/tools/testing/selftests/powerpc/Makefile +++ b/tools/testing/selftests/powerpc/Makefile @@ -34,34 +34,34 @@ endif all: $(SUB_DIRS) $(SUB_DIRS): - BUILD_TARGET=$$OUTPUT/$@; mkdir -p $$BUILD_TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -k -C $@ all + BUILD_TARGET=$(OUTPUT)/$@; mkdir -p $$BUILD_TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -k -C $@ all include ../lib.mk override define RUN_TESTS @for TARGET in $(SUB_DIRS); do \ - BUILD_TARGET=$$OUTPUT/$$TARGET; \ + BUILD_TARGET=$(OUTPUT)/$$TARGET; \ $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET run_tests;\ done; endef override define INSTALL_RULE @for TARGET in $(SUB_DIRS); do \ - BUILD_TARGET=$$OUTPUT/$$TARGET; \ + BUILD_TARGET=$(OUTPUT)/$$TARGET; \ $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET install;\ done; endef override define EMIT_TESTS @for TARGET in $(SUB_DIRS); do \ - BUILD_TARGET=$$OUTPUT/$$TARGET; \ + BUILD_TARGET=$(OUTPUT)/$$TARGET; \ $(MAKE) OUTPUT=$$BUILD_TARGET -s -C $$TARGET emit_tests;\ done; endef clean: @for TARGET in $(SUB_DIRS); do \ - BUILD_TARGET=$$OUTPUT/$$TARGET; \ + BUILD_TARGET=$(OUTPUT)/$$TARGET; \ $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET clean; \ done; rm -f tags diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c index 276139a24e6f..702f8108608d 100644 --- a/virt/kvm/arm/vgic/vgic-init.c +++ b/virt/kvm/arm/vgic/vgic-init.c @@ -392,6 +392,25 @@ static irqreturn_t vgic_maintenance_handler(int irq, void *data) } /** + * kvm_vgic_init_cpu_hardware - initialize the GIC VE hardware + * + * For a specific CPU, initialize the GIC VE hardware. + */ +void kvm_vgic_init_cpu_hardware(void) +{ + BUG_ON(preemptible()); + + /* + * We want to make sure the list registers start out clear so that we + * only have the program the used registers. + */ + if (kvm_vgic_global_state.type == VGIC_V2) + vgic_v2_init_lrs(); + else + kvm_call_hyp(__vgic_v3_init_lrs); +} + +/** * kvm_vgic_hyp_init: populates the kvm_vgic_global_state variable * according to the host GIC model. Accordingly calls either * vgic_v2/v3_probe which registers the KVM_DEVICE that can be diff --git a/virt/kvm/arm/vgic/vgic-mmio-v2.c b/virt/kvm/arm/vgic/vgic-mmio-v2.c index a3ad7ff95c9b..0a4283ed9aa7 100644 --- a/virt/kvm/arm/vgic/vgic-mmio-v2.c +++ b/virt/kvm/arm/vgic/vgic-mmio-v2.c @@ -229,7 +229,15 @@ static unsigned long vgic_mmio_read_vcpuif(struct kvm_vcpu *vcpu, val = vmcr.ctlr; break; case GIC_CPU_PRIMASK: - val = vmcr.pmr; + /* + * Our KVM_DEV_TYPE_ARM_VGIC_V2 device ABI exports the + * the PMR field as GICH_VMCR.VMPriMask rather than + * GICC_PMR.Priority, so we expose the upper five bits of + * priority mask to userspace using the lower bits in the + * unsigned long. + */ + val = (vmcr.pmr & GICV_PMR_PRIORITY_MASK) >> + GICV_PMR_PRIORITY_SHIFT; break; case GIC_CPU_BINPOINT: val = vmcr.bpr; @@ -262,7 +270,15 @@ static void vgic_mmio_write_vcpuif(struct kvm_vcpu *vcpu, vmcr.ctlr = val; break; case GIC_CPU_PRIMASK: - vmcr.pmr = val; + /* + * Our KVM_DEV_TYPE_ARM_VGIC_V2 device ABI exports the + * the PMR field as GICH_VMCR.VMPriMask rather than + * GICC_PMR.Priority, so we expose the upper five bits of + * priority mask to userspace using the lower bits in the + * unsigned long. + */ + vmcr.pmr = (val << GICV_PMR_PRIORITY_SHIFT) & + GICV_PMR_PRIORITY_MASK; break; case GIC_CPU_BINPOINT: vmcr.bpr = val; diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c index b834ecdf3225..b637d9c7afe3 100644 --- a/virt/kvm/arm/vgic/vgic-v2.c +++ b/virt/kvm/arm/vgic/vgic-v2.c @@ -36,6 +36,21 @@ static unsigned long *u64_to_bitmask(u64 *val) return (unsigned long *)val; } +static inline void vgic_v2_write_lr(int lr, u32 val) +{ + void __iomem *base = kvm_vgic_global_state.vctrl_base; + + writel_relaxed(val, base + GICH_LR0 + (lr * 4)); +} + +void vgic_v2_init_lrs(void) +{ + int i; + + for (i = 0; i < kvm_vgic_global_state.nr_lr; i++) + vgic_v2_write_lr(i, 0); +} + void vgic_v2_process_maintenance(struct kvm_vcpu *vcpu) { struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2; @@ -191,8 +206,8 @@ void vgic_v2_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp) GICH_VMCR_ALIAS_BINPOINT_MASK; vmcr |= (vmcrp->bpr << GICH_VMCR_BINPOINT_SHIFT) & GICH_VMCR_BINPOINT_MASK; - vmcr |= (vmcrp->pmr << GICH_VMCR_PRIMASK_SHIFT) & - GICH_VMCR_PRIMASK_MASK; + vmcr |= ((vmcrp->pmr >> GICV_PMR_PRIORITY_SHIFT) << + GICH_VMCR_PRIMASK_SHIFT) & GICH_VMCR_PRIMASK_MASK; vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = vmcr; } @@ -207,8 +222,8 @@ void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp) GICH_VMCR_ALIAS_BINPOINT_SHIFT; vmcrp->bpr = (vmcr & GICH_VMCR_BINPOINT_MASK) >> GICH_VMCR_BINPOINT_SHIFT; - vmcrp->pmr = (vmcr & GICH_VMCR_PRIMASK_MASK) >> - GICH_VMCR_PRIMASK_SHIFT; + vmcrp->pmr = ((vmcr & GICH_VMCR_PRIMASK_MASK) >> + GICH_VMCR_PRIMASK_SHIFT) << GICV_PMR_PRIORITY_SHIFT; } void vgic_v2_enable(struct kvm_vcpu *vcpu) diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h index db28f7cadab2..6cf557e9f718 100644 --- a/virt/kvm/arm/vgic/vgic.h +++ b/virt/kvm/arm/vgic/vgic.h @@ -81,11 +81,18 @@ static inline bool irq_is_pending(struct vgic_irq *irq) return irq->pending_latch || irq->line_level; } +/* + * This struct provides an intermediate representation of the fields contained + * in the GICH_VMCR and ICH_VMCR registers, such that code exporting the GIC + * state to userspace can generate either GICv2 or GICv3 CPU interface + * registers regardless of the hardware backed GIC used. + */ struct vgic_vmcr { u32 ctlr; u32 abpr; u32 bpr; - u32 pmr; + u32 pmr; /* Priority mask field in the GICC_PMR and + * ICC_PMR_EL1 priority field format */ /* Below member variable are valid only for GICv3 */ u32 grpen0; u32 grpen1; @@ -130,6 +137,8 @@ int vgic_v2_map_resources(struct kvm *kvm); int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address, enum vgic_type); +void vgic_v2_init_lrs(void); + static inline void vgic_get_irq_kref(struct vgic_irq *irq) { if (irq->intid < VGIC_MIN_LPI) |