summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--CREDITS8
-rw-r--r--Documentation/admin-guide/cgroup-v2.rst9
-rw-r--r--Documentation/vm/hmm.rst8
-rw-r--r--MAINTAINERS24
-rw-r--r--arch/arm/boot/compressed/decompress.c1
-rw-r--r--arch/mips/kvm/mips.c3
-rw-r--r--arch/parisc/configs/c8000_defconfig1
-rw-r--r--arch/powerpc/include/asm/kvm_host.h2
-rw-r--r--arch/powerpc/kernel/kexec_elf_64.c6
-rw-r--r--arch/powerpc/kvm/book3s.c1
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c36
-rw-r--r--arch/powerpc/kvm/book3s_hv.c48
-rw-r--r--arch/powerpc/kvm/book3s_rtas.c14
-rw-r--r--arch/powerpc/kvm/book3s_xive.c55
-rw-r--r--arch/powerpc/kvm/book3s_xive.h1
-rw-r--r--arch/powerpc/kvm/book3s_xive_native.c100
-rw-r--r--arch/powerpc/kvm/powerpc.c3
-rw-r--r--arch/powerpc/perf/core-book3s.c6
-rw-r--r--arch/powerpc/perf/power8-pmu.c3
-rw-r--r--arch/powerpc/perf/power9-pmu.c3
-rw-r--r--arch/powerpc/platforms/powernv/opal-imc.c4
-rw-r--r--arch/s390/crypto/aes_s390.c156
-rw-r--r--arch/s390/crypto/des_s390.c7
-rw-r--r--arch/s390/include/asm/ap.h4
-rw-r--r--arch/s390/include/asm/cpacf.h4
-rw-r--r--arch/s390/include/asm/pci_clp.h25
-rw-r--r--arch/s390/kvm/kvm-s390.c1
-rw-r--r--arch/s390/mm/fault.c4
-rw-r--r--arch/s390/pci/pci.c5
-rw-r--r--arch/s390/pci/pci_clp.c6
-rw-r--r--arch/x86/kernel/ima_arch.c5
-rw-r--r--arch/x86/kvm/x86.c3
-rw-r--r--block/blk-core.c81
-rw-r--r--block/blk-mq-cpumap.c10
-rw-r--r--block/blk-mq-pci.c2
-rw-r--r--block/blk-mq-rdma.c4
-rw-r--r--block/blk-mq-virtio.c4
-rw-r--r--block/blk-mq.c5
-rw-r--r--block/blk-rq-qos.c7
-rw-r--r--block/blk-sysfs.c47
-rw-r--r--block/blk-throttle.c2
-rw-r--r--block/blk.h1
-rw-r--r--block/bsg-lib.c1
-rw-r--r--block/genhd.c4
-rw-r--r--block/partitions/ldm.c2
-rw-r--r--drivers/acpi/device_pm.c4
-rw-r--r--drivers/acpi/sleep.c39
-rw-r--r--drivers/block/loop.c18
-rw-r--r--drivers/i2c/busses/i2c-mlxcpld.c2
-rw-r--r--drivers/i2c/busses/i2c-synquacer.c2
-rw-r--r--drivers/i2c/i2c-dev.c1
-rw-r--r--drivers/iommu/intel-iommu.c3
-rw-r--r--drivers/leds/led-core.c5
-rw-r--r--drivers/leds/trigger/ledtrig-timer.c5
-rw-r--r--drivers/pci/pci-acpi.c3
-rw-r--r--drivers/pci/pci-driver.c17
-rw-r--r--drivers/s390/crypto/ap_bus.c26
-rw-r--r--drivers/s390/crypto/ap_bus.h3
-rw-r--r--drivers/s390/crypto/zcrypt_api.c17
-rw-r--r--drivers/s390/scsi/zfcp_ext.h1
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c9
-rw-r--r--drivers/s390/scsi/zfcp_sysfs.c55
-rw-r--r--drivers/s390/scsi/zfcp_unit.c8
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c4
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c6
-rw-r--r--drivers/scsi/libsas/sas_expander.c2
-rw-r--r--drivers/scsi/libsas/sas_phy.c3
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c2
-rw-r--r--drivers/thermal/qcom/tsens-common.c14
-rw-r--r--drivers/thermal/qcom/tsens-v0_1.c1
-rw-r--r--drivers/thermal/qcom/tsens-v2.c1
-rw-r--r--drivers/thermal/qcom/tsens.c5
-rw-r--r--drivers/thermal/qcom/tsens.h1
-rw-r--r--drivers/xen/pvcalls-front.c4
-rw-r--r--drivers/xen/xenbus/xenbus.h3
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c18
-rw-r--r--drivers/xen/xenbus/xenbus_xs.c7
-rw-r--r--fs/block_dev.c25
-rw-r--r--fs/cifs/dfs_cache.c4
-rw-r--r--fs/cifs/file.c4
-rw-r--r--fs/cifs/smb2pdu.c9
-rw-r--r--fs/io_uring.c2
-rw-r--r--fs/lockd/xdr.c4
-rw-r--r--fs/lockd/xdr4.c4
-rw-r--r--fs/ocfs2/filecheck.c1
-rw-r--r--include/linux/cgroup-defs.h5
-rw-r--r--include/linux/generic-radix-tree.h2
-rw-r--r--include/linux/list_lru.h1
-rw-r--r--include/linux/memcontrol.h10
-rw-r--r--include/linux/pci.h1
-rw-r--r--include/linux/suspend.h2
-rw-r--r--kernel/cgroup/cgroup.c16
-rw-r--r--kernel/fork.c2
-rw-r--r--kernel/power/hibernate.c4
-rw-r--r--kernel/signal.c2
-rw-r--r--kernel/sys.c62
-rw-r--r--lib/sort.c15
-rw-r--r--mm/compaction.c2
-rw-r--r--mm/gup.c15
-rw-r--r--mm/kasan/common.c2
-rw-r--r--mm/list_lru.c8
-rw-r--r--mm/util.c4
-rw-r--r--mm/vmalloc.c2
-rw-r--r--mm/z3fold.c11
-rw-r--r--scripts/gcc-plugins/gcc-common.h4
-rw-r--r--scripts/gdb/linux/constants.py.in3
-rwxr-xr-xscripts/spdxcheck.py7
-rw-r--r--security/integrity/evm/evm_crypto.c3
-rw-r--r--security/integrity/ima/ima_policy.c28
-rw-r--r--virt/kvm/arm/arm.c3
-rw-r--r--virt/kvm/kvm_main.c4
111 files changed, 813 insertions, 473 deletions
diff --git a/CREDITS b/CREDITS
index 8e0342620a06..681335f42491 100644
--- a/CREDITS
+++ b/CREDITS
@@ -3364,6 +3364,14 @@ S: Braunschweiger Strasse 79
S: 31134 Hildesheim
S: Germany
+N: Martin Schwidefsky
+D: Martin was the most significant contributor to the initial s390
+D: port of the Linux Kernel and later the maintainer of the s390
+D: architecture backend for almost two decades.
+D: He passed away in 2019, and will be greatly missed.
+S: Germany
+W: https://lwn.net/Articles/789028/
+
N: Marcel Selhorst
E: tpmdd@selhorst.net
D: TPM driver
diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst
index 88e746074252..cf88c1f98270 100644
--- a/Documentation/admin-guide/cgroup-v2.rst
+++ b/Documentation/admin-guide/cgroup-v2.rst
@@ -177,6 +177,15 @@ cgroup v2 currently supports the following mount options.
ignored on non-init namespace mounts. Please refer to the
Delegation section for details.
+ memory_localevents
+
+ Only populate memory.events with data for the current cgroup,
+ and not any subtrees. This is legacy behaviour, the default
+ behaviour without this option is to include subtree counts.
+ This option is system wide and can only be set on mount or
+ modified through remount from the init namespace. The mount
+ option is ignored on non-init namespace mounts.
+
Organizing Processes and Threads
--------------------------------
diff --git a/Documentation/vm/hmm.rst b/Documentation/vm/hmm.rst
index ec1efa32af3c..7cdf7282e022 100644
--- a/Documentation/vm/hmm.rst
+++ b/Documentation/vm/hmm.rst
@@ -288,15 +288,17 @@ For instance if the device flags for device entries are:
WRITE (1 << 62)
Now let say that device driver wants to fault with at least read a range then
-it does set:
- range->default_flags = (1 << 63)
+it does set::
+
+ range->default_flags = (1 << 63);
range->pfn_flags_mask = 0;
and calls hmm_range_fault() as described above. This will fill fault all page
in the range with at least read permission.
Now let say driver wants to do the same except for one page in the range for
-which its want to have write. Now driver set:
+which its want to have write. Now driver set::
+
range->default_flags = (1 << 63);
range->pfn_flags_mask = (1 << 62);
range->pfns[index_of_write] = (1 << 62);
diff --git a/MAINTAINERS b/MAINTAINERS
index 429c6c624861..a6954776a37e 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -696,6 +696,7 @@ F: drivers/input/mouse/alps.*
ALTERA I2C CONTROLLER DRIVER
M: Thor Thayer <thor.thayer@linux.intel.com>
S: Maintained
+F: Documentation/devicetree/bindings/i2c/i2c-altera.txt
F: drivers/i2c/busses/i2c-altera.c
ALTERA MAILBOX DRIVER
@@ -1174,6 +1175,7 @@ S: Maintained
F: Documentation/devicetree/bindings/arm/arm-boards
F: Documentation/devicetree/bindings/auxdisplay/arm-charlcd.txt
F: Documentation/devicetree/bindings/clock/arm-integrator.txt
+F: Documentation/devicetree/bindings/i2c/i2c-versatile.txt
F: Documentation/devicetree/bindings/interrupt-controller/arm,versatile-fpga-irq.txt
F: Documentation/devicetree/bindings/mtd/arm-versatile.txt
F: arch/arm/mach-integrator/
@@ -1781,6 +1783,7 @@ ARM/LPC18XX ARCHITECTURE
M: Vladimir Zapolskiy <vz@mleia.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
+F: Documentation/devicetree/bindings/i2c/i2c-lpc2k.txt
F: arch/arm/boot/dts/lpc43*
F: drivers/i2c/busses/i2c-lpc2k.c
F: drivers/memory/pl172.c
@@ -1794,6 +1797,7 @@ M: Sylvain Lemieux <slemieux.tyco@gmail.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
T: git git://github.com/vzapolskiy/linux-lpc32xx.git
S: Maintained
+F: Documentation/devicetree/bindings/i2c/i2c-pnx.txt
F: arch/arm/boot/dts/lpc32*
F: arch/arm/mach-lpc32xx/
F: drivers/i2c/busses/i2c-pnx.c
@@ -1918,6 +1922,8 @@ ARM/NOMADIK/U300/Ux500 ARCHITECTURES
M: Linus Walleij <linus.walleij@linaro.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
+F: Documentation/devicetree/bindings/i2c/i2c-nomadik.txt
+F: Documentation/devicetree/bindings/i2c/i2c-stu300.txt
F: arch/arm/mach-nomadik/
F: arch/arm/mach-u300/
F: arch/arm/mach-ux500/
@@ -2140,6 +2146,7 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: linux-rockchip@lists.infradead.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mmind/linux-rockchip.git
S: Maintained
+F: Documentation/devicetree/bindings/i2c/i2c-rk3x.txt
F: arch/arm/boot/dts/rk3*
F: arch/arm/boot/dts/rv1108*
F: arch/arm/mach-rockchip/
@@ -2275,6 +2282,7 @@ M: Patrice Chotard <patrice.chotard@st.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
W: http://www.stlinux.com
S: Maintained
+F: Documentation/devicetree/bindings/i2c/i2c-st.txt
F: arch/arm/mach-sti/
F: arch/arm/boot/dts/sti*
F: drivers/char/hw_random/st-rng.c
@@ -2466,6 +2474,7 @@ ARM/VT8500 ARM ARCHITECTURE
M: Tony Prisk <linux@prisktech.co.nz>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
+F: Documentation/devicetree/bindings/i2c/i2c-wmt.txt
F: arch/arm/mach-vt8500/
F: drivers/clocksource/timer-vt8500.c
F: drivers/i2c/busses/i2c-wmt.c
@@ -2531,6 +2540,8 @@ F: drivers/cpuidle/cpuidle-zynq.c
F: drivers/block/xsysace.c
N: zynq
N: xilinx
+F: Documentation/devicetree/bindings/i2c/i2c-cadence.txt
+F: Documentation/devicetree/bindings/i2c/i2c-xiic.txt
F: drivers/clocksource/timer-cadence-ttc.c
F: drivers/i2c/busses/i2c-cadence.c
F: drivers/mmc/host/sdhci-of-arasan.c
@@ -3049,8 +3060,9 @@ S: Maintained
F: arch/riscv/net/
BPF JIT for S390
-M: Martin Schwidefsky <schwidefsky@de.ibm.com>
M: Heiko Carstens <heiko.carstens@de.ibm.com>
+M: Vasily Gorbik <gor@linux.ibm.com>
+M: Christian Borntraeger <borntraeger@de.ibm.com>
L: netdev@vger.kernel.org
L: bpf@vger.kernel.org
S: Maintained
@@ -7341,6 +7353,7 @@ I2C MV64XXX MARVELL AND ALLWINNER DRIVER
M: Gregory CLEMENT <gregory.clement@bootlin.com>
L: linux-i2c@vger.kernel.org
S: Maintained
+F: Documentation/devicetree/bindings/i2c/i2c-mv64xxx.txt
F: drivers/i2c/busses/i2c-mv64xxx.c
I2C OVER PARALLEL PORT
@@ -11724,6 +11737,7 @@ M: Peter Korsgaard <peter@korsgaard.com>
M: Andrew Lunn <andrew@lunn.ch>
L: linux-i2c@vger.kernel.org
S: Maintained
+F: Documentation/devicetree/bindings/i2c/i2c-ocores.txt
F: Documentation/i2c/busses/i2c-ocores
F: drivers/i2c/busses/i2c-ocores.c
F: include/linux/platform_data/i2c-ocores.h
@@ -13363,6 +13377,7 @@ F: drivers/clk/renesas/
RENESAS EMEV2 I2C DRIVER
M: Wolfram Sang <wsa+renesas@sang-engineering.com>
S: Supported
+F: Documentation/devicetree/bindings/i2c/i2c-emev2.txt
F: drivers/i2c/busses/i2c-emev2.c
RENESAS ETHERNET DRIVERS
@@ -13384,6 +13399,8 @@ F: drivers/iio/adc/rcar-gyroadc.c
RENESAS R-CAR I2C DRIVERS
M: Wolfram Sang <wsa+renesas@sang-engineering.com>
S: Supported
+F: Documentation/devicetree/bindings/i2c/i2c-rcar.txt
+F: Documentation/devicetree/bindings/i2c/i2c-sh_mobile.txt
F: drivers/i2c/busses/i2c-rcar.c
F: drivers/i2c/busses/i2c-sh_mobile.c
@@ -13614,8 +13631,9 @@ S: Maintained
F: drivers/video/fbdev/savage/
S390
-M: Martin Schwidefsky <schwidefsky@de.ibm.com>
M: Heiko Carstens <heiko.carstens@de.ibm.com>
+M: Vasily Gorbik <gor@linux.ibm.com>
+M: Christian Borntraeger <borntraeger@de.ibm.com>
L: linux-s390@vger.kernel.org
W: http://www.ibm.com/developerworks/linux/linux390/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux.git
@@ -15672,6 +15690,7 @@ R: Bartosz Golaszewski <bgolaszewski@baylibre.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
T: git git://git.kernel.org/pub/scm/linux/kernel/git/nsekhar/linux-davinci.git
S: Supported
+F: Documentation/devicetree/bindings/i2c/i2c-davinci.txt
F: arch/arm/mach-davinci/
F: drivers/i2c/busses/i2c-davinci.c
F: arch/arm/boot/dts/da850*
@@ -17375,6 +17394,7 @@ M: Jan Glauber <jglauber@cavium.com>
L: linux-i2c@vger.kernel.org
W: http://www.cavium.com
S: Supported
+F: Documentation/devicetree/bindings/i2c/i2c-xlp9xx.txt
F: drivers/i2c/busses/i2c-xlp9xx.c
XRA1403 GPIO EXPANDER
diff --git a/arch/arm/boot/compressed/decompress.c b/arch/arm/boot/compressed/decompress.c
index c16c1829a5e4..aa075d8372ea 100644
--- a/arch/arm/boot/compressed/decompress.c
+++ b/arch/arm/boot/compressed/decompress.c
@@ -32,6 +32,7 @@
extern char * strstr(const char * s1, const char *s2);
extern size_t strlen(const char *s);
extern int memcmp(const void *cs, const void *ct, size_t count);
+extern char * strchrnul(const char *, int);
#ifdef CONFIG_KERNEL_GZIP
#include "../../../../lib/decompress_inflate.c"
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index 6d0517ac18e5..0369f26ab96d 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -1122,6 +1122,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_MAX_VCPUS:
r = KVM_MAX_VCPUS;
break;
+ case KVM_CAP_MAX_VCPU_ID:
+ r = KVM_MAX_VCPU_ID;
+ break;
case KVM_CAP_MIPS_FPU:
/* We don't handle systems with inconsistent cpu_has_fpu */
r = !!raw_cpu_has_fpu;
diff --git a/arch/parisc/configs/c8000_defconfig b/arch/parisc/configs/c8000_defconfig
index 088ab948a5ca..900b00084953 100644
--- a/arch/parisc/configs/c8000_defconfig
+++ b/arch/parisc/configs/c8000_defconfig
@@ -225,7 +225,6 @@ CONFIG_UNUSED_SYMBOLS=y
CONFIG_DEBUG_FS=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_SLAB=y
-CONFIG_DEBUG_SLAB_LEAK=y
CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_DEBUG_STACKOVERFLOW=y
CONFIG_PANIC_ON_OOPS=y
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 013c76a0a03e..d10df677c452 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -309,6 +309,7 @@ struct kvm_arch {
#ifdef CONFIG_PPC_BOOK3S_64
struct list_head spapr_tce_tables;
struct list_head rtas_tokens;
+ struct mutex rtas_token_lock;
DECLARE_BITMAP(enabled_hcalls, MAX_HCALL_OPCODE/4 + 1);
#endif
#ifdef CONFIG_KVM_MPIC
@@ -325,6 +326,7 @@ struct kvm_arch {
#endif
struct kvmppc_ops *kvm_ops;
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+ struct mutex mmu_setup_lock; /* nests inside vcpu mutexes */
u64 l1_ptcr;
int max_nested_lpid;
struct kvm_nested_guest *nested_guests[KVM_MAX_NESTED_GUESTS];
diff --git a/arch/powerpc/kernel/kexec_elf_64.c b/arch/powerpc/kernel/kexec_elf_64.c
index ba4f18a43ee8..52a29fc73730 100644
--- a/arch/powerpc/kernel/kexec_elf_64.c
+++ b/arch/powerpc/kernel/kexec_elf_64.c
@@ -547,6 +547,7 @@ static int elf_exec_load(struct kimage *image, struct elfhdr *ehdr,
kbuf.memsz = phdr->p_memsz;
kbuf.buf_align = phdr->p_align;
kbuf.buf_min = phdr->p_paddr + base;
+ kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
ret = kexec_add_buffer(&kbuf);
if (ret)
goto out;
@@ -581,7 +582,8 @@ static void *elf64_load(struct kimage *image, char *kernel_buf,
struct kexec_buf kbuf = { .image = image, .buf_min = 0,
.buf_max = ppc64_rma_size };
struct kexec_buf pbuf = { .image = image, .buf_min = 0,
- .buf_max = ppc64_rma_size, .top_down = true };
+ .buf_max = ppc64_rma_size, .top_down = true,
+ .mem = KEXEC_BUF_MEM_UNKNOWN };
ret = build_elf_exec_info(kernel_buf, kernel_len, &ehdr, &elf_info);
if (ret)
@@ -606,6 +608,7 @@ static void *elf64_load(struct kimage *image, char *kernel_buf,
kbuf.bufsz = kbuf.memsz = initrd_len;
kbuf.buf_align = PAGE_SIZE;
kbuf.top_down = false;
+ kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
ret = kexec_add_buffer(&kbuf);
if (ret)
goto out;
@@ -638,6 +641,7 @@ static void *elf64_load(struct kimage *image, char *kernel_buf,
kbuf.bufsz = kbuf.memsz = fdt_size;
kbuf.buf_align = PAGE_SIZE;
kbuf.top_down = true;
+ kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
ret = kexec_add_buffer(&kbuf);
if (ret)
goto out;
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 61a212d0daf0..ac5664845aca 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -902,6 +902,7 @@ int kvmppc_core_init_vm(struct kvm *kvm)
#ifdef CONFIG_PPC64
INIT_LIST_HEAD_RCU(&kvm->arch.spapr_tce_tables);
INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
+ mutex_init(&kvm->arch.rtas_token_lock);
#endif
return kvm->arch.kvm_ops->init_vm(kvm);
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index ab3d484c5e2e..51971311e6c9 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -63,7 +63,7 @@ struct kvm_resize_hpt {
struct work_struct work;
u32 order;
- /* These fields protected by kvm->lock */
+ /* These fields protected by kvm->arch.mmu_setup_lock */
/* Possible values and their usage:
* <0 an error occurred during allocation,
@@ -73,7 +73,7 @@ struct kvm_resize_hpt {
int error;
/* Private to the work thread, until error != -EBUSY,
- * then protected by kvm->lock.
+ * then protected by kvm->arch.mmu_setup_lock.
*/
struct kvm_hpt_info hpt;
};
@@ -139,7 +139,7 @@ long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order)
long err = -EBUSY;
struct kvm_hpt_info info;
- mutex_lock(&kvm->lock);
+ mutex_lock(&kvm->arch.mmu_setup_lock);
if (kvm->arch.mmu_ready) {
kvm->arch.mmu_ready = 0;
/* order mmu_ready vs. vcpus_running */
@@ -183,7 +183,7 @@ out:
/* Ensure that each vcpu will flush its TLB on next entry. */
cpumask_setall(&kvm->arch.need_tlb_flush);
- mutex_unlock(&kvm->lock);
+ mutex_unlock(&kvm->arch.mmu_setup_lock);
return err;
}
@@ -1447,7 +1447,7 @@ static void resize_hpt_pivot(struct kvm_resize_hpt *resize)
static void resize_hpt_release(struct kvm *kvm, struct kvm_resize_hpt *resize)
{
- if (WARN_ON(!mutex_is_locked(&kvm->lock)))
+ if (WARN_ON(!mutex_is_locked(&kvm->arch.mmu_setup_lock)))
return;
if (!resize)
@@ -1474,14 +1474,14 @@ static void resize_hpt_prepare_work(struct work_struct *work)
if (WARN_ON(resize->error != -EBUSY))
return;
- mutex_lock(&kvm->lock);
+ mutex_lock(&kvm->arch.mmu_setup_lock);
/* Request is still current? */
if (kvm->arch.resize_hpt == resize) {
/* We may request large allocations here:
- * do not sleep with kvm->lock held for a while.
+ * do not sleep with kvm->arch.mmu_setup_lock held for a while.
*/
- mutex_unlock(&kvm->lock);
+ mutex_unlock(&kvm->arch.mmu_setup_lock);
resize_hpt_debug(resize, "resize_hpt_prepare_work(): order = %d\n",
resize->order);
@@ -1494,9 +1494,9 @@ static void resize_hpt_prepare_work(struct work_struct *work)
if (WARN_ON(err == -EBUSY))
err = -EINPROGRESS;
- mutex_lock(&kvm->lock);
+ mutex_lock(&kvm->arch.mmu_setup_lock);
/* It is possible that kvm->arch.resize_hpt != resize
- * after we grab kvm->lock again.
+ * after we grab kvm->arch.mmu_setup_lock again.
*/
}
@@ -1505,7 +1505,7 @@ static void resize_hpt_prepare_work(struct work_struct *work)
if (kvm->arch.resize_hpt != resize)
resize_hpt_release(kvm, resize);
- mutex_unlock(&kvm->lock);
+ mutex_unlock(&kvm->arch.mmu_setup_lock);
}
long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm,
@@ -1522,7 +1522,7 @@ long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm,
if (shift && ((shift < 18) || (shift > 46)))
return -EINVAL;
- mutex_lock(&kvm->lock);
+ mutex_lock(&kvm->arch.mmu_setup_lock);
resize = kvm->arch.resize_hpt;
@@ -1565,7 +1565,7 @@ long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm,
ret = 100; /* estimated time in ms */
out:
- mutex_unlock(&kvm->lock);
+ mutex_unlock(&kvm->arch.mmu_setup_lock);
return ret;
}
@@ -1588,7 +1588,7 @@ long kvm_vm_ioctl_resize_hpt_commit(struct kvm *kvm,
if (shift && ((shift < 18) || (shift > 46)))
return -EINVAL;
- mutex_lock(&kvm->lock);
+ mutex_lock(&kvm->arch.mmu_setup_lock);
resize = kvm->arch.resize_hpt;
@@ -1625,7 +1625,7 @@ out:
smp_mb();
out_no_hpt:
resize_hpt_release(kvm, resize);
- mutex_unlock(&kvm->lock);
+ mutex_unlock(&kvm->arch.mmu_setup_lock);
return ret;
}
@@ -1868,7 +1868,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
return -EINVAL;
/* lock out vcpus from running while we're doing this */
- mutex_lock(&kvm->lock);
+ mutex_lock(&kvm->arch.mmu_setup_lock);
mmu_ready = kvm->arch.mmu_ready;
if (mmu_ready) {
kvm->arch.mmu_ready = 0; /* temporarily */
@@ -1876,7 +1876,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
smp_mb();
if (atomic_read(&kvm->arch.vcpus_running)) {
kvm->arch.mmu_ready = 1;
- mutex_unlock(&kvm->lock);
+ mutex_unlock(&kvm->arch.mmu_setup_lock);
return -EBUSY;
}
}
@@ -1963,7 +1963,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
/* Order HPTE updates vs. mmu_ready */
smp_wmb();
kvm->arch.mmu_ready = mmu_ready;
- mutex_unlock(&kvm->lock);
+ mutex_unlock(&kvm->arch.mmu_setup_lock);
if (err)
return err;
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index d5fc624e0655..5e840113eda4 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -446,12 +446,7 @@ static void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
static struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id)
{
- struct kvm_vcpu *ret;
-
- mutex_lock(&kvm->lock);
- ret = kvm_get_vcpu_by_id(kvm, id);
- mutex_unlock(&kvm->lock);
- return ret;
+ return kvm_get_vcpu_by_id(kvm, id);
}
static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa)
@@ -1583,7 +1578,6 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
struct kvmppc_vcore *vc = vcpu->arch.vcore;
u64 mask;
- mutex_lock(&kvm->lock);
spin_lock(&vc->lock);
/*
* If ILE (interrupt little-endian) has changed, update the
@@ -1623,7 +1617,6 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
mask &= 0xFFFFFFFF;
vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask);
spin_unlock(&vc->lock);
- mutex_unlock(&kvm->lock);
}
static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
@@ -2338,11 +2331,17 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
pr_devel("KVM: collision on id %u", id);
vcore = NULL;
} else if (!vcore) {
+ /*
+ * Take mmu_setup_lock for mutual exclusion
+ * with kvmppc_update_lpcr().
+ */
err = -ENOMEM;
vcore = kvmppc_vcore_create(kvm,
id & ~(kvm->arch.smt_mode - 1));
+ mutex_lock(&kvm->arch.mmu_setup_lock);
kvm->arch.vcores[core] = vcore;
kvm->arch.online_vcores++;
+ mutex_unlock(&kvm->arch.mmu_setup_lock);
}
}
mutex_unlock(&kvm->lock);
@@ -3663,6 +3662,7 @@ int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
vc->in_guest = 0;
mtspr(SPRN_DEC, local_paca->kvm_hstate.dec_expires - mftb());
+ mtspr(SPRN_SPRG_VDSO_WRITE, local_paca->sprg_vdso);
kvmhv_load_host_pmu();
@@ -3859,7 +3859,7 @@ static int kvmhv_setup_mmu(struct kvm_vcpu *vcpu)
int r = 0;
struct kvm *kvm = vcpu->kvm;
- mutex_lock(&kvm->lock);
+ mutex_lock(&kvm->arch.mmu_setup_lock);
if (!kvm->arch.mmu_ready) {
if (!kvm_is_radix(kvm))
r = kvmppc_hv_setup_htab_rma(vcpu);
@@ -3869,7 +3869,7 @@ static int kvmhv_setup_mmu(struct kvm_vcpu *vcpu)
kvm->arch.mmu_ready = 1;
}
}
- mutex_unlock(&kvm->lock);
+ mutex_unlock(&kvm->arch.mmu_setup_lock);
return r;
}
@@ -4091,16 +4091,20 @@ int kvmhv_run_single_vcpu(struct kvm_run *kvm_run,
kvmppc_check_need_tlb_flush(kvm, pcpu, nested);
}
- trace_hardirqs_on();
guest_enter_irqoff();
srcu_idx = srcu_read_lock(&kvm->srcu);
this_cpu_disable_ftrace();
+ /* Tell lockdep that we're about to enable interrupts */
+ trace_hardirqs_on();
+
trap = kvmhv_p9_guest_entry(vcpu, time_limit, lpcr);
vcpu->arch.trap = trap;
+ trace_hardirqs_off();
+
this_cpu_enable_ftrace();
srcu_read_unlock(&kvm->srcu, srcu_idx);
@@ -4110,7 +4114,6 @@ int kvmhv_run_single_vcpu(struct kvm_run *kvm_run,
isync();
}
- trace_hardirqs_off();
set_irq_happened(trap);
kvmppc_set_host_core(pcpu);
@@ -4478,7 +4481,8 @@ static void kvmppc_core_commit_memory_region_hv(struct kvm *kvm,
/*
* Update LPCR values in kvm->arch and in vcores.
- * Caller must hold kvm->lock.
+ * Caller must hold kvm->arch.mmu_setup_lock (for mutual exclusion
+ * of kvm->arch.lpcr update).
*/
void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr, unsigned long mask)
{
@@ -4530,7 +4534,7 @@ void kvmppc_setup_partition_table(struct kvm *kvm)
/*
* Set up HPT (hashed page table) and RMA (real-mode area).
- * Must be called with kvm->lock held.
+ * Must be called with kvm->arch.mmu_setup_lock held.
*/
static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
{
@@ -4618,7 +4622,10 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
goto out_srcu;
}
-/* Must be called with kvm->lock held and mmu_ready = 0 and no vcpus running */
+/*
+ * Must be called with kvm->arch.mmu_setup_lock held and
+ * mmu_ready = 0 and no vcpus running.
+ */
int kvmppc_switch_mmu_to_hpt(struct kvm *kvm)
{
if (nesting_enabled(kvm))
@@ -4635,7 +4642,10 @@ int kvmppc_switch_mmu_to_hpt(struct kvm *kvm)
return 0;
}
-/* Must be called with kvm->lock held and mmu_ready = 0 and no vcpus running */
+/*
+ * Must be called with kvm->arch.mmu_setup_lock held and
+ * mmu_ready = 0 and no vcpus running.
+ */
int kvmppc_switch_mmu_to_radix(struct kvm *kvm)
{
int err;
@@ -4740,6 +4750,8 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm)
char buf[32];
int ret;
+ mutex_init(&kvm->arch.mmu_setup_lock);
+
/* Allocate the guest's logical partition ID */
lpid = kvmppc_alloc_lpid();
@@ -5265,7 +5277,7 @@ static int kvmhv_configure_mmu(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg)
if (kvmhv_on_pseries() && !radix)
return -EINVAL;
- mutex_lock(&kvm->lock);
+ mutex_lock(&kvm->arch.mmu_setup_lock);
if (radix != kvm_is_radix(kvm)) {
if (kvm->arch.mmu_ready) {
kvm->arch.mmu_ready = 0;
@@ -5293,7 +5305,7 @@ static int kvmhv_configure_mmu(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg)
err = 0;
out_unlock:
- mutex_unlock(&kvm->lock);
+ mutex_unlock(&kvm->arch.mmu_setup_lock);
return err;
}
diff --git a/arch/powerpc/kvm/book3s_rtas.c b/arch/powerpc/kvm/book3s_rtas.c
index 4e178c4c1ea5..b7ae3dfbf00e 100644
--- a/arch/powerpc/kvm/book3s_rtas.c
+++ b/arch/powerpc/kvm/book3s_rtas.c
@@ -146,7 +146,7 @@ static int rtas_token_undefine(struct kvm *kvm, char *name)
{
struct rtas_token_definition *d, *tmp;
- lockdep_assert_held(&kvm->lock);
+ lockdep_assert_held(&kvm->arch.rtas_token_lock);
list_for_each_entry_safe(d, tmp, &kvm->arch.rtas_tokens, list) {
if (rtas_name_matches(d->handler->name, name)) {
@@ -167,7 +167,7 @@ static int rtas_token_define(struct kvm *kvm, char *name, u64 token)
bool found;
int i;
- lockdep_assert_held(&kvm->lock);
+ lockdep_assert_held(&kvm->arch.rtas_token_lock);
list_for_each_entry(d, &kvm->arch.rtas_tokens, list) {
if (d->token == token)
@@ -206,14 +206,14 @@ int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp)
if (copy_from_user(&args, argp, sizeof(args)))
return -EFAULT;
- mutex_lock(&kvm->lock);
+ mutex_lock(&kvm->arch.rtas_token_lock);
if (args.token)
rc = rtas_token_define(kvm, args.name, args.token);
else
rc = rtas_token_undefine(kvm, args.name);
- mutex_unlock(&kvm->lock);
+ mutex_unlock(&kvm->arch.rtas_token_lock);
return rc;
}
@@ -245,7 +245,7 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
orig_rets = args.rets;
args.rets = &args.args[be32_to_cpu(args.nargs)];
- mutex_lock(&vcpu->kvm->lock);
+ mutex_lock(&vcpu->kvm->arch.rtas_token_lock);
rc = -ENOENT;
list_for_each_entry(d, &vcpu->kvm->arch.rtas_tokens, list) {
@@ -256,7 +256,7 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
}
}
- mutex_unlock(&vcpu->kvm->lock);
+ mutex_unlock(&vcpu->kvm->arch.rtas_token_lock);
if (rc == 0) {
args.rets = orig_rets;
@@ -282,8 +282,6 @@ void kvmppc_rtas_tokens_free(struct kvm *kvm)
{
struct rtas_token_definition *d, *tmp;
- lockdep_assert_held(&kvm->lock);
-
list_for_each_entry_safe(d, tmp, &kvm->arch.rtas_tokens, list) {
list_del(&d->list);
kfree(d);
diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c
index 4953957333b7..922fd62bcd2a 100644
--- a/arch/powerpc/kvm/book3s_xive.c
+++ b/arch/powerpc/kvm/book3s_xive.c
@@ -271,14 +271,14 @@ static int xive_provision_queue(struct kvm_vcpu *vcpu, u8 prio)
return rc;
}
-/* Called with kvm_lock held */
+/* Called with xive->lock held */
static int xive_check_provisioning(struct kvm *kvm, u8 prio)
{
struct kvmppc_xive *xive = kvm->arch.xive;
struct kvm_vcpu *vcpu;
int i, rc;
- lockdep_assert_held(&kvm->lock);
+ lockdep_assert_held(&xive->lock);
/* Already provisioned ? */
if (xive->qmap & (1 << prio))
@@ -621,9 +621,12 @@ int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
irq, server, priority);
/* First, check provisioning of queues */
- if (priority != MASKED)
+ if (priority != MASKED) {
+ mutex_lock(&xive->lock);
rc = xive_check_provisioning(xive->kvm,
xive_prio_from_guest(priority));
+ mutex_unlock(&xive->lock);
+ }
if (rc) {
pr_devel(" provisioning failure %d !\n", rc);
return rc;
@@ -1199,7 +1202,7 @@ int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
return -ENOMEM;
/* We need to synchronize with queue provisioning */
- mutex_lock(&vcpu->kvm->lock);
+ mutex_lock(&xive->lock);
vcpu->arch.xive_vcpu = xc;
xc->xive = xive;
xc->vcpu = vcpu;
@@ -1283,7 +1286,7 @@ int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_00);
bail:
- mutex_unlock(&vcpu->kvm->lock);
+ mutex_unlock(&xive->lock);
if (r) {
kvmppc_xive_cleanup_vcpu(vcpu);
return r;
@@ -1527,13 +1530,12 @@ static int xive_get_source(struct kvmppc_xive *xive, long irq, u64 addr)
struct kvmppc_xive_src_block *kvmppc_xive_create_src_block(
struct kvmppc_xive *xive, int irq)
{
- struct kvm *kvm = xive->kvm;
struct kvmppc_xive_src_block *sb;
int i, bid;
bid = irq >> KVMPPC_XICS_ICS_SHIFT;
- mutex_lock(&kvm->lock);
+ mutex_lock(&xive->lock);
/* block already exists - somebody else got here first */
if (xive->src_blocks[bid])
@@ -1560,7 +1562,7 @@ struct kvmppc_xive_src_block *kvmppc_xive_create_src_block(
xive->max_sbid = bid;
out:
- mutex_unlock(&kvm->lock);
+ mutex_unlock(&xive->lock);
return xive->src_blocks[bid];
}
@@ -1670,9 +1672,9 @@ static int xive_set_source(struct kvmppc_xive *xive, long irq, u64 addr)
/* If we have a priority target the interrupt */
if (act_prio != MASKED) {
/* First, check provisioning of queues */
- mutex_lock(&xive->kvm->lock);
+ mutex_lock(&xive->lock);
rc = xive_check_provisioning(xive->kvm, act_prio);
- mutex_unlock(&xive->kvm->lock);
+ mutex_unlock(&xive->lock);
/* Target interrupt */
if (rc == 0)
@@ -1826,7 +1828,6 @@ static void kvmppc_xive_cleanup_irq(u32 hw_num, struct xive_irq_data *xd)
{
xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_01);
xive_native_configure_irq(hw_num, 0, MASKED, 0);
- xive_cleanup_irq_data(xd);
}
void kvmppc_xive_free_sources(struct kvmppc_xive_src_block *sb)
@@ -1840,9 +1841,10 @@ void kvmppc_xive_free_sources(struct kvmppc_xive_src_block *sb)
continue;
kvmppc_xive_cleanup_irq(state->ipi_number, &state->ipi_data);
+ xive_cleanup_irq_data(&state->ipi_data);
xive_native_free_irq(state->ipi_number);
- /* Pass-through, cleanup too */
+ /* Pass-through, cleanup too but keep IRQ hw data */
if (state->pt_number)
kvmppc_xive_cleanup_irq(state->pt_number, state->pt_data);
@@ -1859,21 +1861,10 @@ static void kvmppc_xive_release(struct kvm_device *dev)
struct kvm *kvm = xive->kvm;
struct kvm_vcpu *vcpu;
int i;
- int was_ready;
pr_devel("Releasing xive device\n");
- debugfs_remove(xive->dentry);
-
/*
- * Clearing mmu_ready temporarily while holding kvm->lock
- * is a way of ensuring that no vcpus can enter the guest
- * until we drop kvm->lock. Doing kick_all_cpus_sync()
- * ensures that any vcpu executing inside the guest has
- * exited the guest. Once kick_all_cpus_sync() has finished,
- * we know that no vcpu can be executing the XIVE push or
- * pull code, or executing a XICS hcall.
- *
* Since this is the device release function, we know that
* userspace does not have any open fd referring to the
* device. Therefore there can not be any of the device
@@ -1881,9 +1872,8 @@ static void kvmppc_xive_release(struct kvm_device *dev)
* and similarly, the connect_vcpu and set/clr_mapped
* functions also cannot be being executed.
*/
- was_ready = kvm->arch.mmu_ready;
- kvm->arch.mmu_ready = 0;
- kick_all_cpus_sync();
+
+ debugfs_remove(xive->dentry);
/*
* We should clean up the vCPU interrupt presenters first.
@@ -1892,12 +1882,22 @@ static void kvmppc_xive_release(struct kvm_device *dev)
/*
* Take vcpu->mutex to ensure that no one_reg get/set ioctl
* (i.e. kvmppc_xive_[gs]et_icp) can be done concurrently.
+ * Holding the vcpu->mutex also means that the vcpu cannot
+ * be executing the KVM_RUN ioctl, and therefore it cannot
+ * be executing the XIVE push or pull code or accessing
+ * the XIVE MMIO regions.
*/
mutex_lock(&vcpu->mutex);
kvmppc_xive_cleanup_vcpu(vcpu);
mutex_unlock(&vcpu->mutex);
}
+ /*
+ * Now that we have cleared vcpu->arch.xive_vcpu, vcpu->arch.irq_type
+ * and vcpu->arch.xive_esc_[vr]addr on each vcpu, we are safe
+ * against xive code getting called during vcpu execution or
+ * set/get one_reg operations.
+ */
kvm->arch.xive = NULL;
/* Mask and free interrupts */
@@ -1911,8 +1911,6 @@ static void kvmppc_xive_release(struct kvm_device *dev)
if (xive->vp_base != XIVE_INVALID_VP)
xive_native_free_vp_block(xive->vp_base);
- kvm->arch.mmu_ready = was_ready;
-
/*
* A reference of the kvmppc_xive pointer is now kept under
* the xive_devices struct of the machine for reuse. It is
@@ -1967,6 +1965,7 @@ static int kvmppc_xive_create(struct kvm_device *dev, u32 type)
dev->private = xive;
xive->dev = dev;
xive->kvm = kvm;
+ mutex_init(&xive->lock);
/* Already there ? */
if (kvm->arch.xive)
diff --git a/arch/powerpc/kvm/book3s_xive.h b/arch/powerpc/kvm/book3s_xive.h
index 426146332984..862c2c9650ae 100644
--- a/arch/powerpc/kvm/book3s_xive.h
+++ b/arch/powerpc/kvm/book3s_xive.h
@@ -141,6 +141,7 @@ struct kvmppc_xive {
struct kvmppc_xive_ops *ops;
struct address_space *mapping;
struct mutex mapping_lock;
+ struct mutex lock;
};
#define KVMPPC_XIVE_Q_COUNT 8
diff --git a/arch/powerpc/kvm/book3s_xive_native.c b/arch/powerpc/kvm/book3s_xive_native.c
index 6a8e698c4b6e..5596c8ec221a 100644
--- a/arch/powerpc/kvm/book3s_xive_native.c
+++ b/arch/powerpc/kvm/book3s_xive_native.c
@@ -109,12 +109,12 @@ int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
return -EPERM;
if (vcpu->arch.irq_type != KVMPPC_IRQ_DEFAULT)
return -EBUSY;
- if (server_num >= KVM_MAX_VCPUS) {
+ if (server_num >= (KVM_MAX_VCPUS * vcpu->kvm->arch.emul_smt_mode)) {
pr_devel("Out of bounds !\n");
return -EINVAL;
}
- mutex_lock(&vcpu->kvm->lock);
+ mutex_lock(&xive->lock);
if (kvmppc_xive_find_server(vcpu->kvm, server_num)) {
pr_devel("Duplicate !\n");
@@ -159,7 +159,7 @@ int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
/* TODO: reset all queues to a clean state ? */
bail:
- mutex_unlock(&vcpu->kvm->lock);
+ mutex_unlock(&xive->lock);
if (rc)
kvmppc_xive_native_cleanup_vcpu(vcpu);
@@ -172,6 +172,7 @@ bail:
static int kvmppc_xive_native_reset_mapped(struct kvm *kvm, unsigned long irq)
{
struct kvmppc_xive *xive = kvm->arch.xive;
+ pgoff_t esb_pgoff = KVM_XIVE_ESB_PAGE_OFFSET + irq * 2;
if (irq >= KVMPPC_XIVE_NR_IRQS)
return -EINVAL;
@@ -185,7 +186,7 @@ static int kvmppc_xive_native_reset_mapped(struct kvm *kvm, unsigned long irq)
mutex_lock(&xive->mapping_lock);
if (xive->mapping)
unmap_mapping_range(xive->mapping,
- irq * (2ull << PAGE_SHIFT),
+ esb_pgoff << PAGE_SHIFT,
2ull << PAGE_SHIFT, 1);
mutex_unlock(&xive->mapping_lock);
return 0;
@@ -535,6 +536,7 @@ static int kvmppc_xive_native_set_queue_config(struct kvmppc_xive *xive,
struct xive_q *q;
gfn_t gfn;
unsigned long page_size;
+ int srcu_idx;
/*
* Demangle priority/server tuple from the EQ identifier
@@ -565,24 +567,6 @@ static int kvmppc_xive_native_set_queue_config(struct kvmppc_xive *xive,
__func__, server, priority, kvm_eq.flags,
kvm_eq.qshift, kvm_eq.qaddr, kvm_eq.qtoggle, kvm_eq.qindex);
- /*
- * sPAPR specifies a "Unconditional Notify (n) flag" for the
- * H_INT_SET_QUEUE_CONFIG hcall which forces notification
- * without using the coalescing mechanisms provided by the
- * XIVE END ESBs. This is required on KVM as notification
- * using the END ESBs is not supported.
- */
- if (kvm_eq.flags != KVM_XIVE_EQ_ALWAYS_NOTIFY) {
- pr_err("invalid flags %d\n", kvm_eq.flags);
- return -EINVAL;
- }
-
- rc = xive_native_validate_queue_size(kvm_eq.qshift);
- if (rc) {
- pr_err("invalid queue size %d\n", kvm_eq.qshift);
- return rc;
- }
-
/* reset queue and disable queueing */
if (!kvm_eq.qshift) {
q->guest_qaddr = 0;
@@ -604,26 +588,48 @@ static int kvmppc_xive_native_set_queue_config(struct kvmppc_xive *xive,
return 0;
}
+ /*
+ * sPAPR specifies a "Unconditional Notify (n) flag" for the
+ * H_INT_SET_QUEUE_CONFIG hcall which forces notification
+ * without using the coalescing mechanisms provided by the
+ * XIVE END ESBs. This is required on KVM as notification
+ * using the END ESBs is not supported.
+ */
+ if (kvm_eq.flags != KVM_XIVE_EQ_ALWAYS_NOTIFY) {
+ pr_err("invalid flags %d\n", kvm_eq.flags);
+ return -EINVAL;
+ }
+
+ rc = xive_native_validate_queue_size(kvm_eq.qshift);
+ if (rc) {
+ pr_err("invalid queue size %d\n", kvm_eq.qshift);
+ return rc;
+ }
+
if (kvm_eq.qaddr & ((1ull << kvm_eq.qshift) - 1)) {
pr_err("queue page is not aligned %llx/%llx\n", kvm_eq.qaddr,
1ull << kvm_eq.qshift);
return -EINVAL;
}
+ srcu_idx = srcu_read_lock(&kvm->srcu);
gfn = gpa_to_gfn(kvm_eq.qaddr);
page = gfn_to_page(kvm, gfn);
if (is_error_page(page)) {
+ srcu_read_unlock(&kvm->srcu, srcu_idx);
pr_err("Couldn't get queue page %llx!\n", kvm_eq.qaddr);
return -EINVAL;
}
page_size = kvm_host_page_size(kvm, gfn);
if (1ull << kvm_eq.qshift > page_size) {
+ srcu_read_unlock(&kvm->srcu, srcu_idx);
pr_warn("Incompatible host page size %lx!\n", page_size);
return -EINVAL;
}
qaddr = page_to_virt(page) + (kvm_eq.qaddr & ~PAGE_MASK);
+ srcu_read_unlock(&kvm->srcu, srcu_idx);
/*
* Backup the queue page guest address to the mark EQ page
@@ -772,7 +778,7 @@ static int kvmppc_xive_reset(struct kvmppc_xive *xive)
pr_devel("%s\n", __func__);
- mutex_lock(&kvm->lock);
+ mutex_lock(&xive->lock);
kvm_for_each_vcpu(i, vcpu, kvm) {
struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
@@ -810,7 +816,7 @@ static int kvmppc_xive_reset(struct kvmppc_xive *xive)
}
}
- mutex_unlock(&kvm->lock);
+ mutex_unlock(&xive->lock);
return 0;
}
@@ -854,6 +860,7 @@ static int kvmppc_xive_native_vcpu_eq_sync(struct kvm_vcpu *vcpu)
{
struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
unsigned int prio;
+ int srcu_idx;
if (!xc)
return -ENOENT;
@@ -865,7 +872,9 @@ static int kvmppc_xive_native_vcpu_eq_sync(struct kvm_vcpu *vcpu)
continue;
/* Mark EQ page dirty for migration */
+ srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
mark_page_dirty(vcpu->kvm, gpa_to_gfn(q->guest_qaddr));
+ srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
}
return 0;
}
@@ -878,7 +887,7 @@ static int kvmppc_xive_native_eq_sync(struct kvmppc_xive *xive)
pr_devel("%s\n", __func__);
- mutex_lock(&kvm->lock);
+ mutex_lock(&xive->lock);
for (i = 0; i <= xive->max_sbid; i++) {
struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
@@ -892,7 +901,7 @@ static int kvmppc_xive_native_eq_sync(struct kvmppc_xive *xive)
kvm_for_each_vcpu(i, vcpu, kvm) {
kvmppc_xive_native_vcpu_eq_sync(vcpu);
}
- mutex_unlock(&kvm->lock);
+ mutex_unlock(&xive->lock);
return 0;
}
@@ -965,7 +974,7 @@ static int kvmppc_xive_native_has_attr(struct kvm_device *dev,
}
/*
- * Called when device fd is closed
+ * Called when device fd is closed. kvm->lock is held.
*/
static void kvmppc_xive_native_release(struct kvm_device *dev)
{
@@ -973,21 +982,18 @@ static void kvmppc_xive_native_release(struct kvm_device *dev)
struct kvm *kvm = xive->kvm;
struct kvm_vcpu *vcpu;
int i;
- int was_ready;
-
- debugfs_remove(xive->dentry);
pr_devel("Releasing xive native device\n");
/*
- * Clearing mmu_ready temporarily while holding kvm->lock
- * is a way of ensuring that no vcpus can enter the guest
- * until we drop kvm->lock. Doing kick_all_cpus_sync()
- * ensures that any vcpu executing inside the guest has
- * exited the guest. Once kick_all_cpus_sync() has finished,
- * we know that no vcpu can be executing the XIVE push or
- * pull code or accessing the XIVE MMIO regions.
- *
+ * Clear the KVM device file address_space which is used to
+ * unmap the ESB pages when a device is passed-through.
+ */
+ mutex_lock(&xive->mapping_lock);
+ xive->mapping = NULL;
+ mutex_unlock(&xive->mapping_lock);
+
+ /*
* Since this is the device release function, we know that
* userspace does not have any open fd or mmap referring to
* the device. Therefore there can not be any of the
@@ -996,9 +1002,8 @@ static void kvmppc_xive_native_release(struct kvm_device *dev)
* connect_vcpu and set/clr_mapped functions also cannot
* be being executed.
*/
- was_ready = kvm->arch.mmu_ready;
- kvm->arch.mmu_ready = 0;
- kick_all_cpus_sync();
+
+ debugfs_remove(xive->dentry);
/*
* We should clean up the vCPU interrupt presenters first.
@@ -1007,12 +1012,22 @@ static void kvmppc_xive_native_release(struct kvm_device *dev)
/*
* Take vcpu->mutex to ensure that no one_reg get/set ioctl
* (i.e. kvmppc_xive_native_[gs]et_vp) can be being done.
+ * Holding the vcpu->mutex also means that the vcpu cannot
+ * be executing the KVM_RUN ioctl, and therefore it cannot
+ * be executing the XIVE push or pull code or accessing
+ * the XIVE MMIO regions.
*/
mutex_lock(&vcpu->mutex);
kvmppc_xive_native_cleanup_vcpu(vcpu);
mutex_unlock(&vcpu->mutex);
}
+ /*
+ * Now that we have cleared vcpu->arch.xive_vcpu, vcpu->arch.irq_type
+ * and vcpu->arch.xive_esc_[vr]addr on each vcpu, we are safe
+ * against xive code getting called during vcpu execution or
+ * set/get one_reg operations.
+ */
kvm->arch.xive = NULL;
for (i = 0; i <= xive->max_sbid; i++) {
@@ -1025,8 +1040,6 @@ static void kvmppc_xive_native_release(struct kvm_device *dev)
if (xive->vp_base != XIVE_INVALID_VP)
xive_native_free_vp_block(xive->vp_base);
- kvm->arch.mmu_ready = was_ready;
-
/*
* A reference of the kvmppc_xive pointer is now kept under
* the xive_devices struct of the machine for reuse. It is
@@ -1060,6 +1073,7 @@ static int kvmppc_xive_native_create(struct kvm_device *dev, u32 type)
xive->kvm = kvm;
kvm->arch.xive = xive;
mutex_init(&xive->mapping_lock);
+ mutex_init(&xive->lock);
/*
* Allocate a bunch of VPs. KVM_MAX_VCPUS is a large value for
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 3393b166817a..aa3a678711be 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -657,6 +657,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_MAX_VCPUS:
r = KVM_MAX_VCPUS;
break;
+ case KVM_CAP_MAX_VCPU_ID:
+ r = KVM_MAX_VCPU_ID;
+ break;
#ifdef CONFIG_PPC_BOOK3S_64
case KVM_CAP_PPC_GET_SMMU_INFO:
r = 1;
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 7e129f62cd67..ca92e01d0bd1 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -1846,6 +1846,7 @@ static int power_pmu_event_init(struct perf_event *event)
int n;
int err;
struct cpu_hw_events *cpuhw;
+ u64 bhrb_filter;
if (!ppmu)
return -ENOENT;
@@ -1951,13 +1952,14 @@ static int power_pmu_event_init(struct perf_event *event)
err = power_check_constraints(cpuhw, events, cflags, n + 1);
if (has_branch_stack(event)) {
- cpuhw->bhrb_filter = ppmu->bhrb_filter_map(
+ bhrb_filter = ppmu->bhrb_filter_map(
event->attr.branch_sample_type);
- if (cpuhw->bhrb_filter == -1) {
+ if (bhrb_filter == -1) {
put_cpu_var(cpu_hw_events);
return -EOPNOTSUPP;
}
+ cpuhw->bhrb_filter = bhrb_filter;
}
put_cpu_var(cpu_hw_events);
diff --git a/arch/powerpc/perf/power8-pmu.c b/arch/powerpc/perf/power8-pmu.c
index b47e9fb5e899..3a5fcc20ff31 100644
--- a/arch/powerpc/perf/power8-pmu.c
+++ b/arch/powerpc/perf/power8-pmu.c
@@ -25,6 +25,7 @@ enum {
#define POWER8_MMCRA_IFM1 0x0000000040000000UL
#define POWER8_MMCRA_IFM2 0x0000000080000000UL
#define POWER8_MMCRA_IFM3 0x00000000C0000000UL
+#define POWER8_MMCRA_BHRB_MASK 0x00000000C0000000UL
/*
* Raw event encoding for PowerISA v2.07 (Power8):
@@ -239,6 +240,8 @@ static u64 power8_bhrb_filter_map(u64 branch_sample_type)
static void power8_config_bhrb(u64 pmu_bhrb_filter)
{
+ pmu_bhrb_filter &= POWER8_MMCRA_BHRB_MASK;
+
/* Enable BHRB filter in PMU */
mtspr(SPRN_MMCRA, (mfspr(SPRN_MMCRA) | pmu_bhrb_filter));
}
diff --git a/arch/powerpc/perf/power9-pmu.c b/arch/powerpc/perf/power9-pmu.c
index 738ed26c538d..08c3ef796198 100644
--- a/arch/powerpc/perf/power9-pmu.c
+++ b/arch/powerpc/perf/power9-pmu.c
@@ -88,6 +88,7 @@ enum {
#define POWER9_MMCRA_IFM1 0x0000000040000000UL
#define POWER9_MMCRA_IFM2 0x0000000080000000UL
#define POWER9_MMCRA_IFM3 0x00000000C0000000UL
+#define POWER9_MMCRA_BHRB_MASK 0x00000000C0000000UL
/* Nasty Power9 specific hack */
#define PVR_POWER9_CUMULUS 0x00002000
@@ -296,6 +297,8 @@ static u64 power9_bhrb_filter_map(u64 branch_sample_type)
static void power9_config_bhrb(u64 pmu_bhrb_filter)
{
+ pmu_bhrb_filter &= POWER9_MMCRA_BHRB_MASK;
+
/* Enable BHRB filter in PMU */
mtspr(SPRN_MMCRA, (mfspr(SPRN_MMCRA) | pmu_bhrb_filter));
}
diff --git a/arch/powerpc/platforms/powernv/opal-imc.c b/arch/powerpc/platforms/powernv/opal-imc.c
index 1b6932890a73..186109bdd41b 100644
--- a/arch/powerpc/platforms/powernv/opal-imc.c
+++ b/arch/powerpc/platforms/powernv/opal-imc.c
@@ -157,6 +157,10 @@ static int imc_pmu_create(struct device_node *parent, int pmu_index, int domain)
struct imc_pmu *pmu_ptr;
u32 offset;
+ /* Return for unknown domain */
+ if (domain < 0)
+ return -EINVAL;
+
/* memory for pmu */
pmu_ptr = kzalloc(sizeof(*pmu_ptr), GFP_KERNEL);
if (!pmu_ptr)
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index dd456725189f..d00f84add5f4 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -27,14 +27,14 @@
#include <linux/module.h>
#include <linux/cpufeature.h>
#include <linux/init.h>
-#include <linux/spinlock.h>
+#include <linux/mutex.h>
#include <linux/fips.h>
#include <linux/string.h>
#include <crypto/xts.h>
#include <asm/cpacf.h>
static u8 *ctrblk;
-static DEFINE_SPINLOCK(ctrblk_lock);
+static DEFINE_MUTEX(ctrblk_lock);
static cpacf_mask_t km_functions, kmc_functions, kmctr_functions,
kma_functions;
@@ -698,7 +698,7 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
unsigned int n, nbytes;
int ret, locked;
- locked = spin_trylock(&ctrblk_lock);
+ locked = mutex_trylock(&ctrblk_lock);
ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE);
while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
@@ -716,7 +716,7 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
ret = blkcipher_walk_done(desc, walk, nbytes - n);
}
if (locked)
- spin_unlock(&ctrblk_lock);
+ mutex_unlock(&ctrblk_lock);
/*
* final block may be < AES_BLOCK_SIZE, copy only nbytes
*/
@@ -826,19 +826,45 @@ static int gcm_aes_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
return 0;
}
-static void gcm_sg_walk_start(struct gcm_sg_walk *gw, struct scatterlist *sg,
- unsigned int len)
+static void gcm_walk_start(struct gcm_sg_walk *gw, struct scatterlist *sg,
+ unsigned int len)
{
memset(gw, 0, sizeof(*gw));
gw->walk_bytes_remain = len;
scatterwalk_start(&gw->walk, sg);
}
-static int gcm_sg_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
+static inline unsigned int _gcm_sg_clamp_and_map(struct gcm_sg_walk *gw)
+{
+ struct scatterlist *nextsg;
+
+ gw->walk_bytes = scatterwalk_clamp(&gw->walk, gw->walk_bytes_remain);
+ while (!gw->walk_bytes) {
+ nextsg = sg_next(gw->walk.sg);
+ if (!nextsg)
+ return 0;
+ scatterwalk_start(&gw->walk, nextsg);
+ gw->walk_bytes = scatterwalk_clamp(&gw->walk,
+ gw->walk_bytes_remain);
+ }
+ gw->walk_ptr = scatterwalk_map(&gw->walk);
+ return gw->walk_bytes;
+}
+
+static inline void _gcm_sg_unmap_and_advance(struct gcm_sg_walk *gw,
+ unsigned int nbytes)
+{
+ gw->walk_bytes_remain -= nbytes;
+ scatterwalk_unmap(&gw->walk);
+ scatterwalk_advance(&gw->walk, nbytes);
+ scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain);
+ gw->walk_ptr = NULL;
+}
+
+static int gcm_in_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
{
int n;
- /* minbytesneeded <= AES_BLOCK_SIZE */
if (gw->buf_bytes && gw->buf_bytes >= minbytesneeded) {
gw->ptr = gw->buf;
gw->nbytes = gw->buf_bytes;
@@ -851,13 +877,11 @@ static int gcm_sg_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
goto out;
}
- gw->walk_bytes = scatterwalk_clamp(&gw->walk, gw->walk_bytes_remain);
- if (!gw->walk_bytes) {
- scatterwalk_start(&gw->walk, sg_next(gw->walk.sg));
- gw->walk_bytes = scatterwalk_clamp(&gw->walk,
- gw->walk_bytes_remain);
+ if (!_gcm_sg_clamp_and_map(gw)) {
+ gw->ptr = NULL;
+ gw->nbytes = 0;
+ goto out;
}
- gw->walk_ptr = scatterwalk_map(&gw->walk);
if (!gw->buf_bytes && gw->walk_bytes >= minbytesneeded) {
gw->ptr = gw->walk_ptr;
@@ -869,51 +893,90 @@ static int gcm_sg_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
n = min(gw->walk_bytes, AES_BLOCK_SIZE - gw->buf_bytes);
memcpy(gw->buf + gw->buf_bytes, gw->walk_ptr, n);
gw->buf_bytes += n;
- gw->walk_bytes_remain -= n;
- scatterwalk_unmap(&gw->walk);
- scatterwalk_advance(&gw->walk, n);
- scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain);
-
+ _gcm_sg_unmap_and_advance(gw, n);
if (gw->buf_bytes >= minbytesneeded) {
gw->ptr = gw->buf;
gw->nbytes = gw->buf_bytes;
goto out;
}
-
- gw->walk_bytes = scatterwalk_clamp(&gw->walk,
- gw->walk_bytes_remain);
- if (!gw->walk_bytes) {
- scatterwalk_start(&gw->walk, sg_next(gw->walk.sg));
- gw->walk_bytes = scatterwalk_clamp(&gw->walk,
- gw->walk_bytes_remain);
+ if (!_gcm_sg_clamp_and_map(gw)) {
+ gw->ptr = NULL;
+ gw->nbytes = 0;
+ goto out;
}
- gw->walk_ptr = scatterwalk_map(&gw->walk);
}
out:
return gw->nbytes;
}
-static void gcm_sg_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone)
+static int gcm_out_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
{
- int n;
+ if (gw->walk_bytes_remain == 0) {
+ gw->ptr = NULL;
+ gw->nbytes = 0;
+ goto out;
+ }
+ if (!_gcm_sg_clamp_and_map(gw)) {
+ gw->ptr = NULL;
+ gw->nbytes = 0;
+ goto out;
+ }
+
+ if (gw->walk_bytes >= minbytesneeded) {
+ gw->ptr = gw->walk_ptr;
+ gw->nbytes = gw->walk_bytes;
+ goto out;
+ }
+
+ scatterwalk_unmap(&gw->walk);
+ gw->walk_ptr = NULL;
+
+ gw->ptr = gw->buf;
+ gw->nbytes = sizeof(gw->buf);
+
+out:
+ return gw->nbytes;
+}
+
+static int gcm_in_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone)
+{
if (gw->ptr == NULL)
- return;
+ return 0;
if (gw->ptr == gw->buf) {
- n = gw->buf_bytes - bytesdone;
+ int n = gw->buf_bytes - bytesdone;
if (n > 0) {
memmove(gw->buf, gw->buf + bytesdone, n);
- gw->buf_bytes -= n;
+ gw->buf_bytes = n;
} else
gw->buf_bytes = 0;
- } else {
- gw->walk_bytes_remain -= bytesdone;
- scatterwalk_unmap(&gw->walk);
- scatterwalk_advance(&gw->walk, bytesdone);
- scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain);
- }
+ } else
+ _gcm_sg_unmap_and_advance(gw, bytesdone);
+
+ return bytesdone;
+}
+
+static int gcm_out_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone)
+{
+ int i, n;
+
+ if (gw->ptr == NULL)
+ return 0;
+
+ if (gw->ptr == gw->buf) {
+ for (i = 0; i < bytesdone; i += n) {
+ if (!_gcm_sg_clamp_and_map(gw))
+ return i;
+ n = min(gw->walk_bytes, bytesdone - i);
+ memcpy(gw->walk_ptr, gw->buf + i, n);
+ _gcm_sg_unmap_and_advance(gw, n);
+ }
+ } else
+ _gcm_sg_unmap_and_advance(gw, bytesdone);
+
+ return bytesdone;
}
static int gcm_aes_crypt(struct aead_request *req, unsigned int flags)
@@ -926,7 +989,7 @@ static int gcm_aes_crypt(struct aead_request *req, unsigned int flags)
unsigned int pclen = req->cryptlen;
int ret = 0;
- unsigned int len, in_bytes, out_bytes,
+ unsigned int n, len, in_bytes, out_bytes,
min_bytes, bytes, aad_bytes, pc_bytes;
struct gcm_sg_walk gw_in, gw_out;
u8 tag[GHASH_DIGEST_SIZE];
@@ -963,14 +1026,14 @@ static int gcm_aes_crypt(struct aead_request *req, unsigned int flags)
*(u32 *)(param.j0 + ivsize) = 1;
memcpy(param.k, ctx->key, ctx->key_len);
- gcm_sg_walk_start(&gw_in, req->src, len);
- gcm_sg_walk_start(&gw_out, req->dst, len);
+ gcm_walk_start(&gw_in, req->src, len);
+ gcm_walk_start(&gw_out, req->dst, len);
do {
min_bytes = min_t(unsigned int,
aadlen > 0 ? aadlen : pclen, AES_BLOCK_SIZE);
- in_bytes = gcm_sg_walk_go(&gw_in, min_bytes);
- out_bytes = gcm_sg_walk_go(&gw_out, min_bytes);
+ in_bytes = gcm_in_walk_go(&gw_in, min_bytes);
+ out_bytes = gcm_out_walk_go(&gw_out, min_bytes);
bytes = min(in_bytes, out_bytes);
if (aadlen + pclen <= bytes) {
@@ -997,8 +1060,11 @@ static int gcm_aes_crypt(struct aead_request *req, unsigned int flags)
gw_in.ptr + aad_bytes, pc_bytes,
gw_in.ptr, aad_bytes);
- gcm_sg_walk_done(&gw_in, aad_bytes + pc_bytes);
- gcm_sg_walk_done(&gw_out, aad_bytes + pc_bytes);
+ n = aad_bytes + pc_bytes;
+ if (gcm_in_walk_done(&gw_in, n) != n)
+ return -ENOMEM;
+ if (gcm_out_walk_done(&gw_out, n) != n)
+ return -ENOMEM;
aadlen -= aad_bytes;
pclen -= pc_bytes;
} while (aadlen + pclen > 0);
diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c
index 1f9ab24dc048..374b42fc7637 100644
--- a/arch/s390/crypto/des_s390.c
+++ b/arch/s390/crypto/des_s390.c
@@ -14,6 +14,7 @@
#include <linux/cpufeature.h>
#include <linux/crypto.h>
#include <linux/fips.h>
+#include <linux/mutex.h>
#include <crypto/algapi.h>
#include <crypto/des.h>
#include <asm/cpacf.h>
@@ -21,7 +22,7 @@
#define DES3_KEY_SIZE (3 * DES_KEY_SIZE)
static u8 *ctrblk;
-static DEFINE_SPINLOCK(ctrblk_lock);
+static DEFINE_MUTEX(ctrblk_lock);
static cpacf_mask_t km_functions, kmc_functions, kmctr_functions;
@@ -374,7 +375,7 @@ static int ctr_desall_crypt(struct blkcipher_desc *desc, unsigned long fc,
unsigned int n, nbytes;
int ret, locked;
- locked = spin_trylock(&ctrblk_lock);
+ locked = mutex_trylock(&ctrblk_lock);
ret = blkcipher_walk_virt_block(desc, walk, DES_BLOCK_SIZE);
while ((nbytes = walk->nbytes) >= DES_BLOCK_SIZE) {
@@ -391,7 +392,7 @@ static int ctr_desall_crypt(struct blkcipher_desc *desc, unsigned long fc,
ret = blkcipher_walk_done(desc, walk, nbytes - n);
}
if (locked)
- spin_unlock(&ctrblk_lock);
+ mutex_unlock(&ctrblk_lock);
/* final block may be < DES_BLOCK_SIZE, copy only nbytes */
if (nbytes) {
cpacf_kmctr(fc, ctx->key, buf, walk->src.virt.addr,
diff --git a/arch/s390/include/asm/ap.h b/arch/s390/include/asm/ap.h
index e94a0a28b5eb..aea32dda3d14 100644
--- a/arch/s390/include/asm/ap.h
+++ b/arch/s390/include/asm/ap.h
@@ -160,8 +160,8 @@ struct ap_config_info {
unsigned char Nd; /* max # of Domains - 1 */
unsigned char _reserved3[10];
unsigned int apm[8]; /* AP ID mask */
- unsigned int aqm[8]; /* AP queue mask */
- unsigned int adm[8]; /* AP domain mask */
+ unsigned int aqm[8]; /* AP (usage) queue mask */
+ unsigned int adm[8]; /* AP (control) domain mask */
unsigned char _reserved4[16];
} __aligned(8);
diff --git a/arch/s390/include/asm/cpacf.h b/arch/s390/include/asm/cpacf.h
index 27696755daa9..e3d53eb6bcf5 100644
--- a/arch/s390/include/asm/cpacf.h
+++ b/arch/s390/include/asm/cpacf.h
@@ -178,7 +178,7 @@ static inline void __cpacf_query(unsigned int opcode, cpacf_mask_t *mask)
: "cc");
}
-static inline int __cpacf_check_opcode(unsigned int opcode)
+static __always_inline int __cpacf_check_opcode(unsigned int opcode)
{
switch (opcode) {
case CPACF_KMAC:
@@ -218,7 +218,7 @@ static inline int cpacf_test_func(cpacf_mask_t *mask, unsigned int func)
return (mask->bytes[func >> 3] & (0x80 >> (func & 7))) != 0;
}
-static inline int cpacf_query_func(unsigned int opcode, unsigned int func)
+static __always_inline int cpacf_query_func(unsigned int opcode, unsigned int func)
{
cpacf_mask_t mask;
diff --git a/arch/s390/include/asm/pci_clp.h b/arch/s390/include/asm/pci_clp.h
index 3ec52a05d500..50359172cc48 100644
--- a/arch/s390/include/asm/pci_clp.h
+++ b/arch/s390/include/asm/pci_clp.h
@@ -70,6 +70,17 @@ struct clp_rsp_list_pci {
struct clp_fh_list_entry fh_list[CLP_FH_LIST_NR_ENTRIES];
} __packed;
+struct mio_info {
+ u32 valid : 6;
+ u32 : 26;
+ u32 : 32;
+ struct {
+ u64 wb;
+ u64 wt;
+ } addr[PCI_BAR_COUNT];
+ u32 reserved[6];
+} __packed;
+
/* Query PCI function request */
struct clp_req_query_pci {
struct clp_req_hdr hdr;
@@ -100,14 +111,7 @@ struct clp_rsp_query_pci {
u32 uid; /* user defined id */
u8 util_str[CLP_UTIL_STR_LEN]; /* utility string */
u32 reserved2[16];
- u32 mio_valid : 6;
- u32 : 26;
- u32 : 32;
- struct {
- u64 wb;
- u64 wt;
- } addr[PCI_BAR_COUNT];
- u32 reserved3[6];
+ struct mio_info mio;
} __packed;
/* Query PCI function group request */
@@ -155,8 +159,9 @@ struct clp_req_set_pci {
struct clp_rsp_set_pci {
struct clp_rsp_hdr hdr;
u32 fh; /* function handle */
- u32 reserved3;
- u64 reserved4;
+ u32 reserved1;
+ u64 reserved2;
+ struct mio_info mio;
} __packed;
/* Combined request/response block structures used by clp insn */
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index e5e8eb29e68e..28ebd647784c 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -539,6 +539,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
break;
case KVM_CAP_NR_VCPUS:
case KVM_CAP_MAX_VCPUS:
+ case KVM_CAP_MAX_VCPU_ID:
r = KVM_S390_BSCA_CPU_SLOTS;
if (!kvm_s390_use_sca_entries())
r = KVM_MAX_VCPUS;
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index c220399ae196..91ce03fd0c84 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -85,7 +85,7 @@ static inline int notify_page_fault(struct pt_regs *regs)
* Find out which address space caused the exception.
* Access register mode is impossible, ignore space == 3.
*/
-static inline enum fault_type get_fault_type(struct pt_regs *regs)
+static enum fault_type get_fault_type(struct pt_regs *regs)
{
unsigned long trans_exc_code;
@@ -211,6 +211,8 @@ static void dump_fault_info(struct pt_regs *regs)
asce = S390_lowcore.kernel_asce;
pr_cont("kernel ");
break;
+ default:
+ unreachable();
}
pr_cont("ASCE.\n");
dump_pagetable(asce, regs->int_parm_long & __FAIL_ADDR_MASK);
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index 0ebb7c405a25..86ca7f88fb22 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -528,7 +528,10 @@ static int zpci_setup_bus_resources(struct zpci_dev *zdev,
if (zdev->bars[i].val & 4)
flags |= IORESOURCE_MEM_64;
- addr = ZPCI_ADDR(entry);
+ if (static_branch_likely(&have_mio))
+ addr = (unsigned long) zdev->bars[i].mio_wb;
+ else
+ addr = ZPCI_ADDR(entry);
size = 1UL << zdev->bars[i].size;
res = __alloc_res(zdev, addr, size, flags);
diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c
index 3a36b07a5571..d03631dba7c2 100644
--- a/arch/s390/pci/pci_clp.c
+++ b/arch/s390/pci/pci_clp.c
@@ -165,11 +165,11 @@ static int clp_store_query_pci_fn(struct zpci_dev *zdev,
}
zdev->mio_capable = response->mio_addr_avail;
for (i = 0; i < PCI_BAR_COUNT; i++) {
- if (!(response->mio_valid & (1 << (PCI_BAR_COUNT - i - 1))))
+ if (!(response->mio.valid & (1 << (PCI_BAR_COUNT - i - 1))))
continue;
- zdev->bars[i].mio_wb = (void __iomem *) response->addr[i].wb;
- zdev->bars[i].mio_wt = (void __iomem *) response->addr[i].wt;
+ zdev->bars[i].mio_wb = (void __iomem *) response->mio.addr[i].wb;
+ zdev->bars[i].mio_wt = (void __iomem *) response->mio.addr[i].wt;
}
return 0;
}
diff --git a/arch/x86/kernel/ima_arch.c b/arch/x86/kernel/ima_arch.c
index 85de790583f9..64b973f0e985 100644
--- a/arch/x86/kernel/ima_arch.c
+++ b/arch/x86/kernel/ima_arch.c
@@ -18,6 +18,11 @@ static enum efi_secureboot_mode get_sb_mode(void)
size = sizeof(secboot);
+ if (!efi_enabled(EFI_RUNTIME_SERVICES)) {
+ pr_info("ima: secureboot mode unknown, no efi\n");
+ return efi_secureboot_mode_unknown;
+ }
+
/* Get variable contents into buffer */
status = efi.get_variable(efi_SecureBoot_name, &efi_variable_guid,
NULL, &size, &secboot);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index acb179f78fdc..83aefd759846 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3122,6 +3122,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_MAX_VCPUS:
r = KVM_MAX_VCPUS;
break;
+ case KVM_CAP_MAX_VCPU_ID:
+ r = KVM_MAX_VCPU_ID;
+ break;
case KVM_CAP_PV_MMU: /* obsolete */
r = 0;
break;
diff --git a/block/blk-core.c b/block/blk-core.c
index 1bf83a0df0f6..ee1b35fe8572 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -282,35 +282,6 @@ void blk_set_queue_dying(struct request_queue *q)
}
EXPORT_SYMBOL_GPL(blk_set_queue_dying);
-/* Unconfigure the I/O scheduler and dissociate from the cgroup controller. */
-void blk_exit_queue(struct request_queue *q)
-{
- /*
- * Since the I/O scheduler exit code may access cgroup information,
- * perform I/O scheduler exit before disassociating from the block
- * cgroup controller.
- */
- if (q->elevator) {
- ioc_clear_queue(q);
- elevator_exit(q, q->elevator);
- q->elevator = NULL;
- }
-
- /*
- * Remove all references to @q from the block cgroup controller before
- * restoring @q->queue_lock to avoid that restoring this pointer causes
- * e.g. blkcg_print_blkgs() to crash.
- */
- blkcg_exit_queue(q);
-
- /*
- * Since the cgroup code may dereference the @q->backing_dev_info
- * pointer, only decrease its reference count after having removed the
- * association with the block cgroup controller.
- */
- bdi_put(q->backing_dev_info);
-}
-
/**
* blk_cleanup_queue - shutdown a request queue
* @q: request queue to shutdown
@@ -346,14 +317,6 @@ void blk_cleanup_queue(struct request_queue *q)
del_timer_sync(&q->backing_dev_info->laptop_mode_wb_timer);
blk_sync_queue(q);
- /*
- * I/O scheduler exit is only safe after the sysfs scheduler attribute
- * has been removed.
- */
- WARN_ON_ONCE(q->kobj.state_in_sysfs);
-
- blk_exit_queue(q);
-
if (queue_is_mq(q))
blk_mq_exit_queue(q);
@@ -994,22 +957,8 @@ blk_qc_t generic_make_request(struct bio *bio)
* yet.
*/
struct bio_list bio_list_on_stack[2];
- blk_mq_req_flags_t flags = 0;
- struct request_queue *q = bio->bi_disk->queue;
blk_qc_t ret = BLK_QC_T_NONE;
- if (bio->bi_opf & REQ_NOWAIT)
- flags = BLK_MQ_REQ_NOWAIT;
- if (bio_flagged(bio, BIO_QUEUE_ENTERED))
- blk_queue_enter_live(q);
- else if (blk_queue_enter(q, flags) < 0) {
- if (!blk_queue_dying(q) && (bio->bi_opf & REQ_NOWAIT))
- bio_wouldblock_error(bio);
- else
- bio_io_error(bio);
- return ret;
- }
-
if (!generic_make_request_checks(bio))
goto out;
@@ -1046,22 +995,11 @@ blk_qc_t generic_make_request(struct bio *bio)
bio_list_init(&bio_list_on_stack[0]);
current->bio_list = bio_list_on_stack;
do {
- bool enter_succeeded = true;
-
- if (unlikely(q != bio->bi_disk->queue)) {
- if (q)
- blk_queue_exit(q);
- q = bio->bi_disk->queue;
- flags = 0;
- if (bio->bi_opf & REQ_NOWAIT)
- flags = BLK_MQ_REQ_NOWAIT;
- if (blk_queue_enter(q, flags) < 0) {
- enter_succeeded = false;
- q = NULL;
- }
- }
+ struct request_queue *q = bio->bi_disk->queue;
+ blk_mq_req_flags_t flags = bio->bi_opf & REQ_NOWAIT ?
+ BLK_MQ_REQ_NOWAIT : 0;
- if (enter_succeeded) {
+ if (likely(blk_queue_enter(q, flags) == 0)) {
struct bio_list lower, same;
/* Create a fresh bio_list for all subordinate requests */
@@ -1069,6 +1007,8 @@ blk_qc_t generic_make_request(struct bio *bio)
bio_list_init(&bio_list_on_stack[0]);
ret = q->make_request_fn(q, bio);
+ blk_queue_exit(q);
+
/* sort new bios into those for a lower level
* and those for the same level
*/
@@ -1095,8 +1035,6 @@ blk_qc_t generic_make_request(struct bio *bio)
current->bio_list = NULL; /* deactivate */
out:
- if (q)
- blk_queue_exit(q);
return ret;
}
EXPORT_SYMBOL(generic_make_request);
@@ -1200,7 +1138,9 @@ static int blk_cloned_rq_check_limits(struct request_queue *q,
struct request *rq)
{
if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, req_op(rq))) {
- printk(KERN_ERR "%s: over max size limit.\n", __func__);
+ printk(KERN_ERR "%s: over max size limit. (%u > %u)\n",
+ __func__, blk_rq_sectors(rq),
+ blk_queue_get_max_sectors(q, req_op(rq)));
return -EIO;
}
@@ -1212,7 +1152,8 @@ static int blk_cloned_rq_check_limits(struct request_queue *q,
*/
blk_recalc_rq_segments(rq);
if (rq->nr_phys_segments > queue_max_segments(q)) {
- printk(KERN_ERR "%s: over max segments limit.\n", __func__);
+ printk(KERN_ERR "%s: over max segments limit. (%hu > %hu)\n",
+ __func__, rq->nr_phys_segments, queue_max_segments(q));
return -EIO;
}
diff --git a/block/blk-mq-cpumap.c b/block/blk-mq-cpumap.c
index 48bebf00a5f3..f945621a0e8f 100644
--- a/block/blk-mq-cpumap.c
+++ b/block/blk-mq-cpumap.c
@@ -42,8 +42,8 @@ int blk_mq_map_queues(struct blk_mq_queue_map *qmap)
/*
* First do sequential mapping between CPUs and queues.
* In case we still have CPUs to map, and we have some number of
- * threads per cores then map sibling threads to the same queue for
- * performace optimizations.
+ * threads per cores then map sibling threads to the same queue
+ * for performance optimizations.
*/
if (cpu < nr_queues) {
map[cpu] = cpu_to_queue_index(qmap, nr_queues, cpu);
@@ -60,7 +60,11 @@ int blk_mq_map_queues(struct blk_mq_queue_map *qmap)
}
EXPORT_SYMBOL_GPL(blk_mq_map_queues);
-/*
+/**
+ * blk_mq_hw_queue_to_node - Look up the memory node for a hardware queue index
+ * @qmap: CPU to hardware queue map.
+ * @index: hardware queue index.
+ *
* We have no quick way of doing reverse lookups. This is only used at
* queue init time, so runtime isn't important.
*/
diff --git a/block/blk-mq-pci.c b/block/blk-mq-pci.c
index ad4545a2a98b..b595a94c4d16 100644
--- a/block/blk-mq-pci.c
+++ b/block/blk-mq-pci.c
@@ -13,7 +13,7 @@
/**
* blk_mq_pci_map_queues - provide a default queue mapping for PCI device
- * @set: tagset to provide the mapping for
+ * @qmap: CPU to hardware queue map.
* @pdev: PCI device associated with @set.
* @offset: Offset to use for the pci irq vector
*
diff --git a/block/blk-mq-rdma.c b/block/blk-mq-rdma.c
index cc921e6ba709..14f968e58b8f 100644
--- a/block/blk-mq-rdma.c
+++ b/block/blk-mq-rdma.c
@@ -8,8 +8,8 @@
/**
* blk_mq_rdma_map_queues - provide a default queue mapping for rdma device
- * @set: tagset to provide the mapping for
- * @dev: rdma device associated with @set.
+ * @map: CPU to hardware queue map.
+ * @dev: rdma device to provide a mapping for.
* @first_vec: first interrupt vectors to use for queues (usually 0)
*
* This function assumes the rdma device @dev has at least as many available
diff --git a/block/blk-mq-virtio.c b/block/blk-mq-virtio.c
index 75a52c18a8f6..488341628256 100644
--- a/block/blk-mq-virtio.c
+++ b/block/blk-mq-virtio.c
@@ -11,8 +11,8 @@
/**
* blk_mq_virtio_map_queues - provide a default queue mapping for virtio device
- * @set: tagset to provide the mapping for
- * @vdev: virtio device associated with @set.
+ * @qmap: CPU to hardware queue map.
+ * @vdev: virtio device to provide a mapping for.
* @first_vec: first interrupt vectors to use for queues (usually 0)
*
* This function assumes the virtio device @vdev has at least as many available
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 32b8ad3d341b..ce0f5f4ede70 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2865,7 +2865,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
goto err_exit;
if (blk_mq_alloc_ctxs(q))
- goto err_exit;
+ goto err_poll;
/* init q->mq_kobj and sw queues' kobjects */
blk_mq_sysfs_init(q);
@@ -2929,6 +2929,9 @@ err_hctxs:
kfree(q->queue_hw_ctx);
err_sys_init:
blk_mq_sysfs_deinit(q);
+err_poll:
+ blk_stat_free_callback(q->poll_cb);
+ q->poll_cb = NULL;
err_exit:
q->mq_ops = NULL;
return ERR_PTR(-ENOMEM);
diff --git a/block/blk-rq-qos.c b/block/blk-rq-qos.c
index 3f55b56f24bc..659ccb8b693f 100644
--- a/block/blk-rq-qos.c
+++ b/block/blk-rq-qos.c
@@ -209,9 +209,10 @@ static int rq_qos_wake_function(struct wait_queue_entry *curr,
/**
* rq_qos_wait - throttle on a rqw if we need to
- * @private_data - caller provided specific data
- * @acquire_inflight_cb - inc the rqw->inflight counter if we can
- * @cleanup_cb - the callback to cleanup in case we race with a waker
+ * @rqw: rqw to throttle on
+ * @private_data: caller provided specific data
+ * @acquire_inflight_cb: inc the rqw->inflight counter if we can
+ * @cleanup_cb: the callback to cleanup in case we race with a waker
*
* This provides a uniform place for the rq_qos users to do their throttling.
* Since you can end up with a lot of things sleeping at once, this manages the
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index a16a02c52a85..75b5281cc577 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -840,6 +840,36 @@ static void blk_free_queue_rcu(struct rcu_head *rcu_head)
kmem_cache_free(blk_requestq_cachep, q);
}
+/* Unconfigure the I/O scheduler and dissociate from the cgroup controller. */
+static void blk_exit_queue(struct request_queue *q)
+{
+ /*
+ * Since the I/O scheduler exit code may access cgroup information,
+ * perform I/O scheduler exit before disassociating from the block
+ * cgroup controller.
+ */
+ if (q->elevator) {
+ ioc_clear_queue(q);
+ elevator_exit(q, q->elevator);
+ q->elevator = NULL;
+ }
+
+ /*
+ * Remove all references to @q from the block cgroup controller before
+ * restoring @q->queue_lock to avoid that restoring this pointer causes
+ * e.g. blkcg_print_blkgs() to crash.
+ */
+ blkcg_exit_queue(q);
+
+ /*
+ * Since the cgroup code may dereference the @q->backing_dev_info
+ * pointer, only decrease its reference count after having removed the
+ * association with the block cgroup controller.
+ */
+ bdi_put(q->backing_dev_info);
+}
+
+
/**
* __blk_release_queue - release a request queue
* @work: pointer to the release_work member of the request queue to be released
@@ -860,23 +890,10 @@ static void __blk_release_queue(struct work_struct *work)
blk_stat_remove_callback(q, q->poll_cb);
blk_stat_free_callback(q->poll_cb);
- if (!blk_queue_dead(q)) {
- /*
- * Last reference was dropped without having called
- * blk_cleanup_queue().
- */
- WARN_ONCE(blk_queue_init_done(q),
- "request queue %p has been registered but blk_cleanup_queue() has not been called for that queue\n",
- q);
- blk_exit_queue(q);
- }
-
- WARN(blk_queue_root_blkg(q),
- "request queue %p is being released but it has not yet been removed from the blkcg controller\n",
- q);
-
blk_free_queue_stats(q->stats);
+ blk_exit_queue(q);
+
blk_queue_free_zone_bitmaps(q);
if (queue_is_mq(q))
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 1b97a73d2fb1..9ea7c0ecad10 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -1220,7 +1220,7 @@ static bool throtl_can_upgrade(struct throtl_data *td,
struct throtl_grp *this_tg);
/**
* throtl_pending_timer_fn - timer function for service_queue->pending_timer
- * @arg: the throtl_service_queue being serviced
+ * @t: the pending_timer member of the throtl_service_queue being serviced
*
* This timer is armed when a child throtl_grp with active bio's become
* pending and queued on the service_queue's pending_tree and expires when
diff --git a/block/blk.h b/block/blk.h
index e27fd1512e4b..91b3581b7c7a 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -50,7 +50,6 @@ struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
int node, int cmd_size, gfp_t flags);
void blk_free_flush_queue(struct blk_flush_queue *q);
-void blk_exit_queue(struct request_queue *q);
void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
struct bio *bio);
void blk_freeze_queue(struct request_queue *q);
diff --git a/block/bsg-lib.c b/block/bsg-lib.c
index b898a1cdf872..785dd58947f1 100644
--- a/block/bsg-lib.c
+++ b/block/bsg-lib.c
@@ -354,6 +354,7 @@ static const struct blk_mq_ops bsg_mq_ops = {
* @dev: device to attach bsg device to
* @name: device to give bsg device
* @job_fn: bsg job handler
+ * @timeout: timeout handler function pointer
* @dd_job_size: size of LLD data needed for each job
*/
struct request_queue *bsg_setup_queue(struct device *dev, const char *name,
diff --git a/block/genhd.c b/block/genhd.c
index ad6826628e79..24654e1d83e6 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -532,8 +532,8 @@ void blk_free_devt(dev_t devt)
}
}
-/**
- * We invalidate devt by assigning NULL pointer for devt in idr.
+/*
+ * We invalidate devt by assigning NULL pointer for devt in idr.
*/
void blk_invalidate_devt(dev_t devt)
{
diff --git a/block/partitions/ldm.c b/block/partitions/ldm.c
index 6db573f33219..fe5d970e2e60 100644
--- a/block/partitions/ldm.c
+++ b/block/partitions/ldm.c
@@ -19,7 +19,7 @@
#include "check.h"
#include "msdos.h"
-/**
+/*
* ldm_debug/info/error/crit - Output an error message
* @f: A printf format string containing the message
* @...: Variables to substitute into @f
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index adc8979b02b6..e54956ae93d3 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -944,8 +944,8 @@ static bool acpi_dev_needs_resume(struct device *dev, struct acpi_device *adev)
u32 sys_target = acpi_target_system_state();
int ret, state;
- if (!pm_runtime_suspended(dev) || !adev ||
- device_may_wakeup(dev) != !!adev->wakeup.prepare_count)
+ if (!pm_runtime_suspended(dev) || !adev || (adev->wakeup.flags.valid &&
+ device_may_wakeup(dev) != !!adev->wakeup.prepare_count))
return true;
if (sys_target == ACPI_STATE_S0)
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index e52f1238d2d6..a34deccd7317 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -1132,15 +1132,19 @@ void __init acpi_no_s4_hw_signature(void)
nosigcheck = true;
}
-static int acpi_hibernation_begin(void)
+static int acpi_hibernation_begin(pm_message_t stage)
{
- int error;
+ if (!nvs_nosave) {
+ int error = suspend_nvs_alloc();
+ if (error)
+ return error;
+ }
- error = nvs_nosave ? 0 : suspend_nvs_alloc();
- if (!error)
- acpi_pm_start(ACPI_STATE_S4);
+ if (stage.event == PM_EVENT_HIBERNATE)
+ pm_set_suspend_via_firmware();
- return error;
+ acpi_pm_start(ACPI_STATE_S4);
+ return 0;
}
static int acpi_hibernation_enter(void)
@@ -1200,7 +1204,7 @@ static const struct platform_hibernation_ops acpi_hibernation_ops = {
* function is used if the pre-ACPI 2.0 suspend ordering has been
* requested.
*/
-static int acpi_hibernation_begin_old(void)
+static int acpi_hibernation_begin_old(pm_message_t stage)
{
int error;
/*
@@ -1211,16 +1215,21 @@ static int acpi_hibernation_begin_old(void)
acpi_sleep_tts_switch(ACPI_STATE_S4);
error = acpi_sleep_prepare(ACPI_STATE_S4);
+ if (error)
+ return error;
- if (!error) {
- if (!nvs_nosave)
- error = suspend_nvs_alloc();
- if (!error) {
- acpi_target_sleep_state = ACPI_STATE_S4;
- acpi_scan_lock_acquire();
- }
+ if (!nvs_nosave) {
+ error = suspend_nvs_alloc();
+ if (error)
+ return error;
}
- return error;
+
+ if (stage.event == PM_EVENT_HIBERNATE)
+ pm_set_suspend_via_firmware();
+
+ acpi_target_sleep_state = ACPI_STATE_S4;
+ acpi_scan_lock_acquire();
+ return 0;
}
/*
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 102d79575895..f11b7dc16e9d 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -945,9 +945,20 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
if (!file)
goto out;
+ /*
+ * If we don't hold exclusive handle for the device, upgrade to it
+ * here to avoid changing device under exclusive owner.
+ */
+ if (!(mode & FMODE_EXCL)) {
+ bdgrab(bdev);
+ error = blkdev_get(bdev, mode | FMODE_EXCL, loop_set_fd);
+ if (error)
+ goto out_putf;
+ }
+
error = mutex_lock_killable(&loop_ctl_mutex);
if (error)
- goto out_putf;
+ goto out_bdev;
error = -EBUSY;
if (lo->lo_state != Lo_unbound)
@@ -1012,10 +1023,15 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
mutex_unlock(&loop_ctl_mutex);
if (partscan)
loop_reread_partitions(lo, bdev);
+ if (!(mode & FMODE_EXCL))
+ blkdev_put(bdev, mode | FMODE_EXCL);
return 0;
out_unlock:
mutex_unlock(&loop_ctl_mutex);
+out_bdev:
+ if (!(mode & FMODE_EXCL))
+ blkdev_put(bdev, mode | FMODE_EXCL);
out_putf:
fput(file);
out:
diff --git a/drivers/i2c/busses/i2c-mlxcpld.c b/drivers/i2c/busses/i2c-mlxcpld.c
index 745ed43a22d6..2fd717d8dd30 100644
--- a/drivers/i2c/busses/i2c-mlxcpld.c
+++ b/drivers/i2c/busses/i2c-mlxcpld.c
@@ -503,6 +503,7 @@ static int mlxcpld_i2c_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
priv->dev = &pdev->dev;
+ priv->base_addr = MLXPLAT_CPLD_LPC_I2C_BASE_ADDR;
/* Register with i2c layer */
mlxcpld_i2c_adapter.timeout = usecs_to_jiffies(MLXCPLD_I2C_XFER_TO);
@@ -518,7 +519,6 @@ static int mlxcpld_i2c_probe(struct platform_device *pdev)
mlxcpld_i2c_adapter.nr = pdev->id;
priv->adap = mlxcpld_i2c_adapter;
priv->adap.dev.parent = &pdev->dev;
- priv->base_addr = MLXPLAT_CPLD_LPC_I2C_BASE_ADDR;
i2c_set_adapdata(&priv->adap, priv);
err = i2c_add_numbered_adapter(&priv->adap);
diff --git a/drivers/i2c/busses/i2c-synquacer.c b/drivers/i2c/busses/i2c-synquacer.c
index f14d4b3fab44..f724c8e6b360 100644
--- a/drivers/i2c/busses/i2c-synquacer.c
+++ b/drivers/i2c/busses/i2c-synquacer.c
@@ -351,7 +351,7 @@ static int synquacer_i2c_doxfer(struct synquacer_i2c *i2c,
/* wait 2 clock periods to ensure the stop has been through the bus */
udelay(DIV_ROUND_UP(2 * 1000, i2c->speed_khz));
- return 0;
+ return ret;
}
static irqreturn_t synquacer_i2c_isr(int irq, void *dev_id)
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index e83bf56c1d1c..2ea4585d18c5 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -275,6 +275,7 @@ static noinline int i2cdev_ioctl_rdwr(struct i2c_client *client,
msgs[i].len < 1 || msgs[i].buf[0] < 1 ||
msgs[i].len < msgs[i].buf[0] +
I2C_SMBUS_BLOCK_MAX) {
+ i++;
res = -EINVAL;
break;
}
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index a209199f3af6..09b8ff0d856a 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -3034,7 +3034,8 @@ static int __init iommu_prepare_static_identity_mapping(int hw)
{
struct pci_dev *pdev = NULL;
struct dmar_drhd_unit *drhd;
- struct intel_iommu *iommu;
+ /* To avoid a -Wunused-but-set-variable warning. */
+ struct intel_iommu *iommu __maybe_unused;
struct device *dev;
int i;
int ret = 0;
diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c
index e9ae7f87ab90..e3da7c03da1b 100644
--- a/drivers/leds/led-core.c
+++ b/drivers/leds/led-core.c
@@ -164,11 +164,6 @@ static void led_blink_setup(struct led_classdev *led_cdev,
unsigned long *delay_on,
unsigned long *delay_off)
{
- /*
- * If "set brightness to 0" is pending in workqueue, we don't
- * want that to be reordered after blink_set()
- */
- flush_work(&led_cdev->set_brightness_work);
if (!test_bit(LED_BLINK_ONESHOT, &led_cdev->work_flags) &&
led_cdev->blink_set &&
!led_cdev->blink_set(led_cdev, delay_on, delay_off))
diff --git a/drivers/leds/trigger/ledtrig-timer.c b/drivers/leds/trigger/ledtrig-timer.c
index ca898c1383be..427fc3c303d5 100644
--- a/drivers/leds/trigger/ledtrig-timer.c
+++ b/drivers/leds/trigger/ledtrig-timer.c
@@ -113,6 +113,11 @@ static int timer_trig_activate(struct led_classdev *led_cdev)
led_cdev->flags &= ~LED_INIT_DEFAULT_TRIGGER;
}
+ /*
+ * If "set brightness to 0" is pending in workqueue, we don't
+ * want that to be reordered after blink_set()
+ */
+ flush_work(&led_cdev->set_brightness_work);
led_blink_set(led_cdev, &led_cdev->blink_delay_on,
&led_cdev->blink_delay_off);
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index c5e1a097d7e3..1897847ceb0c 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -733,7 +733,8 @@ static bool acpi_pci_need_resume(struct pci_dev *dev)
if (!adev || !acpi_device_power_manageable(adev))
return false;
- if (device_may_wakeup(&dev->dev) != !!adev->wakeup.prepare_count)
+ if (adev->wakeup.flags.valid &&
+ device_may_wakeup(&dev->dev) != !!adev->wakeup.prepare_count)
return true;
if (acpi_target_system_state() == ACPI_STATE_S0)
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index cae630fe6387..5eadbc3d0969 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -734,6 +734,8 @@ static int pci_pm_suspend(struct device *dev)
struct pci_dev *pci_dev = to_pci_dev(dev);
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+ pci_dev->skip_bus_pm = false;
+
if (pci_has_legacy_pm_support(pci_dev))
return pci_legacy_suspend(dev, PMSG_SUSPEND);
@@ -827,7 +829,20 @@ static int pci_pm_suspend_noirq(struct device *dev)
}
}
- if (!pci_dev->state_saved) {
+ if (pci_dev->skip_bus_pm) {
+ /*
+ * The function is running for the second time in a row without
+ * going through full resume, which is possible only during
+ * suspend-to-idle in a spurious wakeup case. Moreover, the
+ * device was originally left in D0, so its power state should
+ * not be changed here and the device register values saved
+ * originally should be restored on resume again.
+ */
+ pci_dev->state_saved = true;
+ } else if (pci_dev->state_saved) {
+ if (pci_dev->current_state == PCI_D0)
+ pci_dev->skip_bus_pm = true;
+ } else {
pci_save_state(pci_dev);
if (pci_power_manageable(pci_dev))
pci_prepare_to_sleep(pci_dev);
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index cc30e4f07fff..b9fc502c58c2 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -254,19 +254,37 @@ static inline int ap_test_config_card_id(unsigned int id)
}
/*
- * ap_test_config_domain(): Test, whether an AP usage domain is configured.
+ * ap_test_config_usage_domain(): Test, whether an AP usage domain
+ * is configured.
* @domain AP usage domain ID
*
* Returns 0 if the usage domain is not configured
* 1 if the usage domain is configured or
* if the configuration information is not available
*/
-static inline int ap_test_config_domain(unsigned int domain)
+int ap_test_config_usage_domain(unsigned int domain)
{
if (!ap_configuration) /* QCI not supported */
return domain < 16;
return ap_test_config(ap_configuration->aqm, domain);
}
+EXPORT_SYMBOL(ap_test_config_usage_domain);
+
+/*
+ * ap_test_config_ctrl_domain(): Test, whether an AP control domain
+ * is configured.
+ * @domain AP control domain ID
+ *
+ * Returns 1 if the control domain is configured
+ * 0 in all other cases
+ */
+int ap_test_config_ctrl_domain(unsigned int domain)
+{
+ if (!ap_configuration) /* QCI not supported */
+ return 0;
+ return ap_test_config(ap_configuration->adm, domain);
+}
+EXPORT_SYMBOL(ap_test_config_ctrl_domain);
/**
* ap_query_queue(): Check if an AP queue is available.
@@ -1267,7 +1285,7 @@ static void ap_select_domain(void)
best_domain = -1;
max_count = 0;
for (i = 0; i < AP_DOMAINS; i++) {
- if (!ap_test_config_domain(i) ||
+ if (!ap_test_config_usage_domain(i) ||
!test_bit_inv(i, ap_perms.aqm))
continue;
count = 0;
@@ -1442,7 +1460,7 @@ static void _ap_scan_bus_adapter(int id)
(void *)(long) qid,
__match_queue_device_with_qid);
aq = dev ? to_ap_queue(dev) : NULL;
- if (!ap_test_config_domain(dom)) {
+ if (!ap_test_config_usage_domain(dom)) {
if (dev) {
/* Queue device exists but has been
* removed from configuration.
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index 15a98a673c5c..6f3cf37776ca 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -251,6 +251,9 @@ void ap_wait(enum ap_wait wait);
void ap_request_timeout(struct timer_list *t);
void ap_bus_force_rescan(void);
+int ap_test_config_usage_domain(unsigned int domain);
+int ap_test_config_ctrl_domain(unsigned int domain);
+
void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *ap_msg);
struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type);
void ap_queue_prepare_remove(struct ap_queue *aq);
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 852b8c2299c1..1058b4b5cc1e 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -822,7 +822,7 @@ static long _zcrypt_send_cprb(struct ap_perms *perms,
struct ap_message ap_msg;
unsigned int weight, pref_weight;
unsigned int func_code;
- unsigned short *domain;
+ unsigned short *domain, tdom;
int qid = 0, rc = -ENODEV;
struct module *mod;
@@ -834,6 +834,17 @@ static long _zcrypt_send_cprb(struct ap_perms *perms,
if (rc)
goto out;
+ /*
+ * If a valid target domain is set and this domain is NOT a usage
+ * domain but a control only domain, use the default domain as target.
+ */
+ tdom = *domain;
+ if (tdom >= 0 && tdom < AP_DOMAINS &&
+ !ap_test_config_usage_domain(tdom) &&
+ ap_test_config_ctrl_domain(tdom) &&
+ ap_domain_index >= 0)
+ tdom = ap_domain_index;
+
pref_zc = NULL;
pref_zq = NULL;
spin_lock(&zcrypt_list_lock);
@@ -856,8 +867,8 @@ static long _zcrypt_send_cprb(struct ap_perms *perms,
/* check if device is online and eligible */
if (!zq->online ||
!zq->ops->send_cprb ||
- ((*domain != (unsigned short) AUTOSELECT) &&
- (*domain != AP_QID_QUEUE(zq->queue->qid))))
+ (tdom != (unsigned short) AUTOSELECT &&
+ tdom != AP_QID_QUEUE(zq->queue->qid)))
continue;
/* check if device node has admission for this queue */
if (!zcrypt_check_queue(perms,
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index c6acca521ffe..31e8a7240fd7 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -167,6 +167,7 @@ extern const struct attribute_group *zfcp_port_attr_groups[];
extern struct mutex zfcp_sysfs_port_units_mutex;
extern struct device_attribute *zfcp_sysfs_sdev_attrs[];
extern struct device_attribute *zfcp_sysfs_shost_attrs[];
+bool zfcp_sysfs_port_is_removing(const struct zfcp_port *const port);
/* zfcp_unit.c */
extern int zfcp_unit_add(struct zfcp_port *, u64);
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 221d0dfb8493..e9ded2befa0d 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -129,6 +129,15 @@ static int zfcp_scsi_slave_alloc(struct scsi_device *sdev)
zfcp_sdev->erp_action.port = port;
+ mutex_lock(&zfcp_sysfs_port_units_mutex);
+ if (zfcp_sysfs_port_is_removing(port)) {
+ /* port is already gone */
+ mutex_unlock(&zfcp_sysfs_port_units_mutex);
+ put_device(&port->dev); /* undo zfcp_get_port_by_wwpn() */
+ return -ENXIO;
+ }
+ mutex_unlock(&zfcp_sysfs_port_units_mutex);
+
unit = zfcp_unit_find(port, zfcp_scsi_dev_lun(sdev));
if (unit)
put_device(&unit->dev);
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
index b277be6f7611..af197e2b3e69 100644
--- a/drivers/s390/scsi/zfcp_sysfs.c
+++ b/drivers/s390/scsi/zfcp_sysfs.c
@@ -235,6 +235,53 @@ static ZFCP_DEV_ATTR(adapter, port_rescan, S_IWUSR, NULL,
DEFINE_MUTEX(zfcp_sysfs_port_units_mutex);
+static void zfcp_sysfs_port_set_removing(struct zfcp_port *const port)
+{
+ lockdep_assert_held(&zfcp_sysfs_port_units_mutex);
+ atomic_set(&port->units, -1);
+}
+
+bool zfcp_sysfs_port_is_removing(const struct zfcp_port *const port)
+{
+ lockdep_assert_held(&zfcp_sysfs_port_units_mutex);
+ return atomic_read(&port->units) == -1;
+}
+
+static bool zfcp_sysfs_port_in_use(struct zfcp_port *const port)
+{
+ struct zfcp_adapter *const adapter = port->adapter;
+ unsigned long flags;
+ struct scsi_device *sdev;
+ bool in_use = true;
+
+ mutex_lock(&zfcp_sysfs_port_units_mutex);
+ if (atomic_read(&port->units) > 0)
+ goto unlock_port_units_mutex; /* zfcp_unit(s) under port */
+
+ spin_lock_irqsave(adapter->scsi_host->host_lock, flags);
+ __shost_for_each_device(sdev, adapter->scsi_host) {
+ const struct zfcp_scsi_dev *zsdev = sdev_to_zfcp(sdev);
+
+ if (sdev->sdev_state == SDEV_DEL ||
+ sdev->sdev_state == SDEV_CANCEL)
+ continue;
+ if (zsdev->port != port)
+ continue;
+ /* alive scsi_device under port of interest */
+ goto unlock_host_lock;
+ }
+
+ /* port is about to be removed, so no more unit_add or slave_alloc */
+ zfcp_sysfs_port_set_removing(port);
+ in_use = false;
+
+unlock_host_lock:
+ spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags);
+unlock_port_units_mutex:
+ mutex_unlock(&zfcp_sysfs_port_units_mutex);
+ return in_use;
+}
+
static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
@@ -257,15 +304,11 @@ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
else
retval = 0;
- mutex_lock(&zfcp_sysfs_port_units_mutex);
- if (atomic_read(&port->units) > 0) {
+ if (zfcp_sysfs_port_in_use(port)) {
retval = -EBUSY;
- mutex_unlock(&zfcp_sysfs_port_units_mutex);
+ put_device(&port->dev); /* undo zfcp_get_port_by_wwpn() */
goto out;
}
- /* port is about to be removed, so no more unit_add */
- atomic_set(&port->units, -1);
- mutex_unlock(&zfcp_sysfs_port_units_mutex);
write_lock_irq(&adapter->port_list_lock);
list_del(&port->list);
diff --git a/drivers/s390/scsi/zfcp_unit.c b/drivers/s390/scsi/zfcp_unit.c
index 1bf0a0984a09..e67bf7388cae 100644
--- a/drivers/s390/scsi/zfcp_unit.c
+++ b/drivers/s390/scsi/zfcp_unit.c
@@ -124,7 +124,7 @@ int zfcp_unit_add(struct zfcp_port *port, u64 fcp_lun)
int retval = 0;
mutex_lock(&zfcp_sysfs_port_units_mutex);
- if (atomic_read(&port->units) == -1) {
+ if (zfcp_sysfs_port_is_removing(port)) {
/* port is already gone */
retval = -ENODEV;
goto out;
@@ -168,8 +168,14 @@ int zfcp_unit_add(struct zfcp_port *port, u64 fcp_lun)
write_lock_irq(&port->unit_list_lock);
list_add_tail(&unit->list, &port->unit_list);
write_unlock_irq(&port->unit_list_lock);
+ /*
+ * lock order: shost->scan_mutex before zfcp_sysfs_port_units_mutex
+ * due to zfcp_unit_scsi_scan() => zfcp_scsi_slave_alloc()
+ */
+ mutex_unlock(&zfcp_sysfs_port_units_mutex);
zfcp_unit_scsi_scan(unit);
+ return retval;
out:
mutex_unlock(&zfcp_sysfs_port_units_mutex);
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index 8b915d4ed98d..7d43e014bd21 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -639,6 +639,10 @@ cxgbi_check_route(struct sockaddr *dst_addr, int ifindex)
if (ndev->flags & IFF_LOOPBACK) {
ndev = ip_dev_find(&init_net, daddr->sin_addr.s_addr);
+ if (!ndev) {
+ err = -ENETUNREACH;
+ goto rel_neigh;
+ }
mtu = ndev->mtu;
pr_info("rt dev %s, loopback -> %s, mtu %u.\n",
n->dev->name, ndev->name, mtu);
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index a7a4a772f501..f0066f8a1786 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -1160,10 +1160,8 @@ static int __init alua_init(void)
int r;
kaluad_wq = alloc_workqueue("kaluad", WQ_MEM_RECLAIM, 0);
- if (!kaluad_wq) {
- /* Temporary failure, bypass */
- return SCSI_DH_DEV_TEMP_BUSY;
- }
+ if (!kaluad_wq)
+ return -ENOMEM;
r = scsi_register_device_handler(&alua_dh);
if (r != 0) {
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 83f2fd70ce76..9f7e2457360e 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -1019,6 +1019,8 @@ static struct domain_device *sas_ex_discover_expander(
list_del(&child->dev_list_node);
spin_unlock_irq(&parent->port->dev_list_lock);
sas_put_device(child);
+ sas_port_delete(phy->port);
+ phy->port = NULL;
return NULL;
}
list_add_tail(&child->siblings, &parent->ex_dev.children);
diff --git a/drivers/scsi/libsas/sas_phy.c b/drivers/scsi/libsas/sas_phy.c
index e030e1452136..b71f5ac6c7dc 100644
--- a/drivers/scsi/libsas/sas_phy.c
+++ b/drivers/scsi/libsas/sas_phy.c
@@ -35,7 +35,6 @@ static void sas_phye_loss_of_signal(struct work_struct *work)
struct asd_sas_event *ev = to_asd_sas_event(work);
struct asd_sas_phy *phy = ev->phy;
- phy->in_shutdown = 0;
phy->error = 0;
sas_deform_port(phy, 1);
}
@@ -45,7 +44,6 @@ static void sas_phye_oob_done(struct work_struct *work)
struct asd_sas_event *ev = to_asd_sas_event(work);
struct asd_sas_phy *phy = ev->phy;
- phy->in_shutdown = 0;
phy->error = 0;
}
@@ -126,6 +124,7 @@ static void sas_phye_shutdown(struct work_struct *work)
ret);
} else
pr_notice("phy%d is not enabled, cannot shutdown\n", phy->id);
+ phy->in_shutdown = 0;
}
/* ---------- Phy class registration ---------- */
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index b17761eafca9..d6be4e8f4a8f 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -7291,7 +7291,7 @@ static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
else
mask = DMA_BIT_MASK(32);
- rc = dma_set_mask(&ctrl_info->pci_dev->dev, mask);
+ rc = dma_set_mask_and_coherent(&ctrl_info->pci_dev->dev, mask);
if (rc) {
dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n");
goto disable_device;
diff --git a/drivers/thermal/qcom/tsens-common.c b/drivers/thermal/qcom/tsens-common.c
index 928e8e81ba69..528df8801254 100644
--- a/drivers/thermal/qcom/tsens-common.c
+++ b/drivers/thermal/qcom/tsens-common.c
@@ -64,20 +64,6 @@ void compute_intercept_slope(struct tsens_priv *priv, u32 *p1,
}
}
-bool is_sensor_enabled(struct tsens_priv *priv, u32 hw_id)
-{
- u32 val;
- int ret;
-
- if ((hw_id > (priv->num_sensors - 1)) || (hw_id < 0))
- return -EINVAL;
- ret = regmap_field_read(priv->rf[SENSOR_EN], &val);
- if (ret)
- return ret;
-
- return val & (1 << hw_id);
-}
-
static inline int code_to_degc(u32 adc_code, const struct tsens_sensor *s)
{
int degc, num, den;
diff --git a/drivers/thermal/qcom/tsens-v0_1.c b/drivers/thermal/qcom/tsens-v0_1.c
index a319283c223f..6f26fadf4c27 100644
--- a/drivers/thermal/qcom/tsens-v0_1.c
+++ b/drivers/thermal/qcom/tsens-v0_1.c
@@ -334,7 +334,6 @@ static const struct reg_field tsens_v0_1_regfields[MAX_REGFIELDS] = {
/* CTRL_OFFSET */
[TSENS_EN] = REG_FIELD(SROT_CTRL_OFF, 0, 0),
[TSENS_SW_RST] = REG_FIELD(SROT_CTRL_OFF, 1, 1),
- [SENSOR_EN] = REG_FIELD(SROT_CTRL_OFF, 3, 13),
/* ----- TM ------ */
/* INTERRUPT ENABLE */
diff --git a/drivers/thermal/qcom/tsens-v2.c b/drivers/thermal/qcom/tsens-v2.c
index 1099069f2aa3..0a4f2b8fcab6 100644
--- a/drivers/thermal/qcom/tsens-v2.c
+++ b/drivers/thermal/qcom/tsens-v2.c
@@ -44,7 +44,6 @@ static const struct reg_field tsens_v2_regfields[MAX_REGFIELDS] = {
/* CTRL_OFF */
[TSENS_EN] = REG_FIELD(SROT_CTRL_OFF, 0, 0),
[TSENS_SW_RST] = REG_FIELD(SROT_CTRL_OFF, 1, 1),
- [SENSOR_EN] = REG_FIELD(SROT_CTRL_OFF, 3, 18),
/* ----- TM ------ */
/* INTERRUPT ENABLE */
diff --git a/drivers/thermal/qcom/tsens.c b/drivers/thermal/qcom/tsens.c
index 36b0b52db524..0627d8615c30 100644
--- a/drivers/thermal/qcom/tsens.c
+++ b/drivers/thermal/qcom/tsens.c
@@ -85,11 +85,6 @@ static int tsens_register(struct tsens_priv *priv)
struct thermal_zone_device *tzd;
for (i = 0; i < priv->num_sensors; i++) {
- if (!is_sensor_enabled(priv, priv->sensor[i].hw_id)) {
- dev_err(priv->dev, "sensor %d: disabled\n",
- priv->sensor[i].hw_id);
- continue;
- }
priv->sensor[i].priv = priv;
priv->sensor[i].id = i;
tzd = devm_thermal_zone_of_sensor_register(priv->dev, i,
diff --git a/drivers/thermal/qcom/tsens.h b/drivers/thermal/qcom/tsens.h
index eefe3844fb4e..2fd94997245b 100644
--- a/drivers/thermal/qcom/tsens.h
+++ b/drivers/thermal/qcom/tsens.h
@@ -315,7 +315,6 @@ void compute_intercept_slope(struct tsens_priv *priv, u32 *pt1, u32 *pt2, u32 mo
int init_common(struct tsens_priv *priv);
int get_temp_tsens_valid(struct tsens_priv *priv, int i, int *temp);
int get_temp_common(struct tsens_priv *priv, int i, int *temp);
-bool is_sensor_enabled(struct tsens_priv *priv, u32 hw_id);
/* TSENS target */
extern const struct tsens_plat_data data_8960;
diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c
index c75549928656..57592a6b5c9e 100644
--- a/drivers/xen/pvcalls-front.c
+++ b/drivers/xen/pvcalls-front.c
@@ -531,7 +531,6 @@ out:
int pvcalls_front_sendmsg(struct socket *sock, struct msghdr *msg,
size_t len)
{
- struct pvcalls_bedata *bedata;
struct sock_mapping *map;
int sent, tot_sent = 0;
int count = 0, flags;
@@ -543,7 +542,6 @@ int pvcalls_front_sendmsg(struct socket *sock, struct msghdr *msg,
map = pvcalls_enter_sock(sock);
if (IS_ERR(map))
return PTR_ERR(map);
- bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
mutex_lock(&map->active.out_mutex);
if ((flags & MSG_DONTWAIT) && !pvcalls_front_write_todo(map)) {
@@ -626,7 +624,6 @@ out:
int pvcalls_front_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
int flags)
{
- struct pvcalls_bedata *bedata;
int ret;
struct sock_mapping *map;
@@ -636,7 +633,6 @@ int pvcalls_front_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
map = pvcalls_enter_sock(sock);
if (IS_ERR(map))
return PTR_ERR(map);
- bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
mutex_lock(&map->active.in_mutex);
if (len > XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER))
diff --git a/drivers/xen/xenbus/xenbus.h b/drivers/xen/xenbus/xenbus.h
index 092981171df1..d75a2385b37c 100644
--- a/drivers/xen/xenbus/xenbus.h
+++ b/drivers/xen/xenbus/xenbus.h
@@ -83,6 +83,7 @@ struct xb_req_data {
int num_vecs;
int err;
enum xb_req_state state;
+ bool user_req;
void (*cb)(struct xb_req_data *);
void *par;
};
@@ -133,4 +134,6 @@ void xenbus_ring_ops_init(void);
int xenbus_dev_request_and_reply(struct xsd_sockmsg *msg, void *par);
void xenbus_dev_queue_reply(struct xb_req_data *req);
+extern unsigned int xb_dev_generation_id;
+
#endif
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index faf452d0edf0..08adc590f631 100644
--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -62,6 +62,8 @@
#include "xenbus.h"
+unsigned int xb_dev_generation_id;
+
/*
* An element of a list of outstanding transactions, for which we're
* still waiting a reply.
@@ -69,6 +71,7 @@
struct xenbus_transaction_holder {
struct list_head list;
struct xenbus_transaction handle;
+ unsigned int generation_id;
};
/*
@@ -441,6 +444,7 @@ static int xenbus_write_transaction(unsigned msg_type,
rc = -ENOMEM;
goto out;
}
+ trans->generation_id = xb_dev_generation_id;
list_add(&trans->list, &u->transactions);
} else if (msg->hdr.tx_id != 0 &&
!xenbus_get_transaction(u, msg->hdr.tx_id))
@@ -449,6 +453,20 @@ static int xenbus_write_transaction(unsigned msg_type,
!(msg->hdr.len == 2 &&
(!strcmp(msg->body, "T") || !strcmp(msg->body, "F"))))
return xenbus_command_reply(u, XS_ERROR, "EINVAL");
+ else if (msg_type == XS_TRANSACTION_END) {
+ trans = xenbus_get_transaction(u, msg->hdr.tx_id);
+ if (trans && trans->generation_id != xb_dev_generation_id) {
+ list_del(&trans->list);
+ kfree(trans);
+ if (!strcmp(msg->body, "T"))
+ return xenbus_command_reply(u, XS_ERROR,
+ "EAGAIN");
+ else
+ return xenbus_command_reply(u,
+ XS_TRANSACTION_END,
+ "OK");
+ }
+ }
rc = xenbus_dev_request_and_reply(&msg->hdr, u);
if (rc && trans) {
diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
index 49a3874ae6bb..ddc18da61834 100644
--- a/drivers/xen/xenbus/xenbus_xs.c
+++ b/drivers/xen/xenbus/xenbus_xs.c
@@ -105,6 +105,7 @@ static void xs_suspend_enter(void)
static void xs_suspend_exit(void)
{
+ xb_dev_generation_id++;
spin_lock(&xs_state_lock);
xs_suspend_active--;
spin_unlock(&xs_state_lock);
@@ -125,7 +126,7 @@ static uint32_t xs_request_enter(struct xb_req_data *req)
spin_lock(&xs_state_lock);
}
- if (req->type == XS_TRANSACTION_START)
+ if (req->type == XS_TRANSACTION_START && !req->user_req)
xs_state_users++;
xs_state_users++;
rq_id = xs_request_id++;
@@ -140,7 +141,7 @@ void xs_request_exit(struct xb_req_data *req)
spin_lock(&xs_state_lock);
xs_state_users--;
if ((req->type == XS_TRANSACTION_START && req->msg.type == XS_ERROR) ||
- (req->type == XS_TRANSACTION_END &&
+ (req->type == XS_TRANSACTION_END && !req->user_req &&
!WARN_ON_ONCE(req->msg.type == XS_ERROR &&
!strcmp(req->body, "ENOENT"))))
xs_state_users--;
@@ -286,6 +287,7 @@ int xenbus_dev_request_and_reply(struct xsd_sockmsg *msg, void *par)
req->num_vecs = 1;
req->cb = xenbus_dev_queue_reply;
req->par = par;
+ req->user_req = true;
xs_send(req, msg);
@@ -313,6 +315,7 @@ static void *xs_talkv(struct xenbus_transaction t,
req->vec = iovec;
req->num_vecs = num_vecs;
req->cb = xs_wake_up;
+ req->user_req = false;
msg.req_id = 0;
msg.tx_id = t.id;
diff --git a/fs/block_dev.c b/fs/block_dev.c
index e6886c93c89d..749f5984425d 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -1406,20 +1406,27 @@ void check_disk_size_change(struct gendisk *disk, struct block_device *bdev,
*/
int revalidate_disk(struct gendisk *disk)
{
- struct block_device *bdev;
int ret = 0;
if (disk->fops->revalidate_disk)
ret = disk->fops->revalidate_disk(disk);
- bdev = bdget_disk(disk, 0);
- if (!bdev)
- return ret;
- mutex_lock(&bdev->bd_mutex);
- check_disk_size_change(disk, bdev, ret == 0);
- bdev->bd_invalidated = 0;
- mutex_unlock(&bdev->bd_mutex);
- bdput(bdev);
+ /*
+ * Hidden disks don't have associated bdev so there's no point in
+ * revalidating it.
+ */
+ if (!(disk->flags & GENHD_FL_HIDDEN)) {
+ struct block_device *bdev = bdget_disk(disk, 0);
+
+ if (!bdev)
+ return ret;
+
+ mutex_lock(&bdev->bd_mutex);
+ check_disk_size_change(disk, bdev, ret == 0);
+ bdev->bd_invalidated = 0;
+ mutex_unlock(&bdev->bd_mutex);
+ bdput(bdev);
+ }
return ret;
}
EXPORT_SYMBOL(revalidate_disk);
diff --git a/fs/cifs/dfs_cache.c b/fs/cifs/dfs_cache.c
index 85dc89d3a203..e3e1c13df439 100644
--- a/fs/cifs/dfs_cache.c
+++ b/fs/cifs/dfs_cache.c
@@ -132,7 +132,7 @@ static inline void flush_cache_ent(struct dfs_cache_entry *ce)
return;
hlist_del_init_rcu(&ce->ce_hlist);
- kfree(ce->ce_path);
+ kfree_const(ce->ce_path);
free_tgts(ce);
dfs_cache_count--;
call_rcu(&ce->ce_rcu, free_cache_entry);
@@ -422,7 +422,7 @@ alloc_cache_entry(const char *path, const struct dfs_info3_param *refs,
rc = copy_ref_data(refs, numrefs, ce, NULL);
if (rc) {
- kfree(ce->ce_path);
+ kfree_const(ce->ce_path);
kmem_cache_free(dfs_cache_slab, ce);
ce = ERR_PTR(rc);
}
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index ce9a5be11df5..06e27ac6d82c 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -3216,7 +3216,9 @@ cifs_read_allocate_pages(struct cifs_readdata *rdata, unsigned int nr_pages)
}
if (rc) {
- for (i = 0; i < nr_pages; i++) {
+ unsigned int nr_page_failed = i;
+
+ for (i = 0; i < nr_page_failed; i++) {
put_page(rdata->pages[i]);
rdata->pages[i] = NULL;
}
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 710ceb875161..29b699d532ef 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -1054,7 +1054,8 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
* not supported error. Client should accept it.
*/
cifs_dbg(VFS, "Server does not support validate negotiate\n");
- return 0;
+ rc = 0;
+ goto out_free_inbuf;
} else if (rc != 0) {
cifs_dbg(VFS, "validate protocol negotiate failed: %d\n", rc);
rc = -EIO;
@@ -2619,10 +2620,12 @@ SMB2_ioctl_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
void
SMB2_ioctl_free(struct smb_rqst *rqst)
{
+ int i;
if (rqst && rqst->rq_iov) {
cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
- if (rqst->rq_iov[1].iov_len)
- kfree(rqst->rq_iov[1].iov_base);
+ for (i = 1; i < rqst->rq_nvec; i++)
+ if (rqst->rq_iov[i].iov_base != smb2_padding)
+ kfree(rqst->rq_iov[i].iov_base);
}
}
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 310f8d17c53e..0fbb486a320e 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -2616,7 +2616,7 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg,
ret = io_copy_iov(ctx, &iov, arg, i);
if (ret)
- break;
+ goto err;
/*
* Don't impose further limits on the size and buffer
diff --git a/fs/lockd/xdr.c b/fs/lockd/xdr.c
index 9846f7e95282..7147e4aebecc 100644
--- a/fs/lockd/xdr.c
+++ b/fs/lockd/xdr.c
@@ -127,7 +127,7 @@ nlm_decode_lock(__be32 *p, struct nlm_lock *lock)
locks_init_lock(fl);
fl->fl_owner = current->files;
- fl->fl_pid = current->tgid;
+ fl->fl_pid = (pid_t)lock->svid;
fl->fl_flags = FL_POSIX;
fl->fl_type = F_RDLCK; /* as good as anything else */
start = ntohl(*p++);
@@ -269,7 +269,7 @@ nlmsvc_decode_shareargs(struct svc_rqst *rqstp, __be32 *p)
memset(lock, 0, sizeof(*lock));
locks_init_lock(&lock->fl);
lock->svid = ~(u32) 0;
- lock->fl.fl_pid = current->tgid;
+ lock->fl.fl_pid = (pid_t)lock->svid;
if (!(p = nlm_decode_cookie(p, &argp->cookie))
|| !(p = xdr_decode_string_inplace(p, &lock->caller,
diff --git a/fs/lockd/xdr4.c b/fs/lockd/xdr4.c
index 70154f376695..7ed9edf9aed4 100644
--- a/fs/lockd/xdr4.c
+++ b/fs/lockd/xdr4.c
@@ -119,7 +119,7 @@ nlm4_decode_lock(__be32 *p, struct nlm_lock *lock)
locks_init_lock(fl);
fl->fl_owner = current->files;
- fl->fl_pid = current->tgid;
+ fl->fl_pid = (pid_t)lock->svid;
fl->fl_flags = FL_POSIX;
fl->fl_type = F_RDLCK; /* as good as anything else */
p = xdr_decode_hyper(p, &start);
@@ -266,7 +266,7 @@ nlm4svc_decode_shareargs(struct svc_rqst *rqstp, __be32 *p)
memset(lock, 0, sizeof(*lock));
locks_init_lock(&lock->fl);
lock->svid = ~(u32) 0;
- lock->fl.fl_pid = current->tgid;
+ lock->fl.fl_pid = (pid_t)lock->svid;
if (!(p = nlm4_decode_cookie(p, &argp->cookie))
|| !(p = xdr_decode_string_inplace(p, &lock->caller,
diff --git a/fs/ocfs2/filecheck.c b/fs/ocfs2/filecheck.c
index f65f2b2f594d..1906cc962c4d 100644
--- a/fs/ocfs2/filecheck.c
+++ b/fs/ocfs2/filecheck.c
@@ -193,6 +193,7 @@ int ocfs2_filecheck_create_sysfs(struct ocfs2_super *osb)
ret = kobject_init_and_add(&entry->fs_kobj, &ocfs2_ktype_filecheck,
NULL, "filecheck");
if (ret) {
+ kobject_put(&entry->fs_kobj);
kfree(fcheck);
return ret;
}
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 77258d276f93..11e215d7937e 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -89,6 +89,11 @@ enum {
* Enable cpuset controller in v1 cgroup to use v2 behavior.
*/
CGRP_ROOT_CPUSET_V2_MODE = (1 << 4),
+
+ /*
+ * Enable legacy local memory.events.
+ */
+ CGRP_ROOT_MEMORY_LOCAL_EVENTS = (1 << 5),
};
/* cftype->flags */
diff --git a/include/linux/generic-radix-tree.h b/include/linux/generic-radix-tree.h
index 3a91130a4fbd..02393c0c98f9 100644
--- a/include/linux/generic-radix-tree.h
+++ b/include/linux/generic-radix-tree.h
@@ -2,7 +2,7 @@
#define _LINUX_GENERIC_RADIX_TREE_H
/**
- * DOC: Generic radix trees/sparse arrays:
+ * DOC: Generic radix trees/sparse arrays
*
* Very simple and minimalistic, supporting arbitrary size entries up to
* PAGE_SIZE.
diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h
index aa5efd9351eb..d5ceb2839a2d 100644
--- a/include/linux/list_lru.h
+++ b/include/linux/list_lru.h
@@ -54,6 +54,7 @@ struct list_lru {
#ifdef CONFIG_MEMCG_KMEM
struct list_head list;
int shrinker_id;
+ bool memcg_aware;
#endif
};
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 73fe0a700911..edf9e8f32d70 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -737,8 +737,14 @@ static inline void count_memcg_event_mm(struct mm_struct *mm,
static inline void memcg_memory_event(struct mem_cgroup *memcg,
enum memcg_memory_event event)
{
- atomic_long_inc(&memcg->memory_events[event]);
- cgroup_file_notify(&memcg->events_file);
+ do {
+ atomic_long_inc(&memcg->memory_events[event]);
+ cgroup_file_notify(&memcg->events_file);
+
+ if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS)
+ break;
+ } while ((memcg = parent_mem_cgroup(memcg)) &&
+ !mem_cgroup_is_root(memcg));
}
static inline void memcg_memory_event_mm(struct mm_struct *mm,
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 4a5a84d7bdd4..dd436da7eccc 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -344,6 +344,7 @@ struct pci_dev {
D3cold, not set for devices
powered on/off by the
corresponding bridge */
+ unsigned int skip_bus_pm:1; /* Internal: Skip bus-level PM */
unsigned int ignore_hotplug:1; /* Ignore hotplug events */
unsigned int hotplug_user_indicators:1; /* SlotCtl indicators
controlled exclusively by
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 6b3ea9ea6a9e..4a2ffd678887 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -359,7 +359,7 @@ extern void mark_free_pages(struct zone *zone);
* platforms which require special recovery actions in that situation.
*/
struct platform_hibernation_ops {
- int (*begin)(void);
+ int (*begin)(pm_message_t stage);
void (*end)(void);
int (*pre_snapshot)(void);
void (*finish)(void);
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index 217cec4e22c6..426a0026225c 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -1810,11 +1810,13 @@ int cgroup_show_path(struct seq_file *sf, struct kernfs_node *kf_node,
enum cgroup2_param {
Opt_nsdelegate,
+ Opt_memory_localevents,
nr__cgroup2_params
};
static const struct fs_parameter_spec cgroup2_param_specs[] = {
- fsparam_flag ("nsdelegate", Opt_nsdelegate),
+ fsparam_flag("nsdelegate", Opt_nsdelegate),
+ fsparam_flag("memory_localevents", Opt_memory_localevents),
{}
};
@@ -1837,6 +1839,9 @@ static int cgroup2_parse_param(struct fs_context *fc, struct fs_parameter *param
case Opt_nsdelegate:
ctx->flags |= CGRP_ROOT_NS_DELEGATE;
return 0;
+ case Opt_memory_localevents:
+ ctx->flags |= CGRP_ROOT_MEMORY_LOCAL_EVENTS;
+ return 0;
}
return -EINVAL;
}
@@ -1848,6 +1853,11 @@ static void apply_cgroup_root_flags(unsigned int root_flags)
cgrp_dfl_root.flags |= CGRP_ROOT_NS_DELEGATE;
else
cgrp_dfl_root.flags &= ~CGRP_ROOT_NS_DELEGATE;
+
+ if (root_flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS)
+ cgrp_dfl_root.flags |= CGRP_ROOT_MEMORY_LOCAL_EVENTS;
+ else
+ cgrp_dfl_root.flags &= ~CGRP_ROOT_MEMORY_LOCAL_EVENTS;
}
}
@@ -1855,6 +1865,8 @@ static int cgroup_show_options(struct seq_file *seq, struct kernfs_root *kf_root
{
if (cgrp_dfl_root.flags & CGRP_ROOT_NS_DELEGATE)
seq_puts(seq, ",nsdelegate");
+ if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS)
+ seq_puts(seq, ",memory_localevents");
return 0;
}
@@ -6325,7 +6337,7 @@ static struct kobj_attribute cgroup_delegate_attr = __ATTR_RO(delegate);
static ssize_t features_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
- return snprintf(buf, PAGE_SIZE, "nsdelegate\n");
+ return snprintf(buf, PAGE_SIZE, "nsdelegate\nmemory_localevents\n");
}
static struct kobj_attribute cgroup_features_attr = __ATTR_RO(features);
diff --git a/kernel/fork.c b/kernel/fork.c
index b2b87d450b80..75675b9bf6df 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -123,7 +123,7 @@
unsigned long total_forks; /* Handle normal Linux uptimes. */
int nr_threads; /* The idle threads do not count.. */
-int max_threads; /* tunable limit on nr_threads */
+static int max_threads; /* tunable limit on nr_threads */
DEFINE_PER_CPU(unsigned long, process_counts) = 0;
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index c8c272df7154..97522630b1b6 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -129,7 +129,7 @@ static int hibernation_test(int level) { return 0; }
static int platform_begin(int platform_mode)
{
return (platform_mode && hibernation_ops) ?
- hibernation_ops->begin() : 0;
+ hibernation_ops->begin(PMSG_FREEZE) : 0;
}
/**
@@ -542,7 +542,7 @@ int hibernation_platform_enter(void)
* hibernation_ops->finish() before saving the image, so we should let
* the firmware know that we're going to enter the sleep state after all
*/
- error = hibernation_ops->begin();
+ error = hibernation_ops->begin(PMSG_HIBERNATE);
if (error)
goto Close;
diff --git a/kernel/signal.c b/kernel/signal.c
index d7b9d14ac80d..328a01e1a2f0 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -2485,6 +2485,8 @@ relock:
if (signal_group_exit(signal)) {
ksig->info.si_signo = signr = SIGKILL;
sigdelset(&current->pending.signal, SIGKILL);
+ trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
+ &sighand->action[SIGKILL - 1]);
recalc_sigpending();
goto fatal;
}
diff --git a/kernel/sys.c b/kernel/sys.c
index bdbfe8d37418..2969304c29fe 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1882,13 +1882,14 @@ exit_err:
}
/*
+ * Check arithmetic relations of passed addresses.
+ *
* WARNING: we don't require any capability here so be very careful
* in what is allowed for modification from userspace.
*/
-static int validate_prctl_map(struct prctl_mm_map *prctl_map)
+static int validate_prctl_map_addr(struct prctl_mm_map *prctl_map)
{
unsigned long mmap_max_addr = TASK_SIZE;
- struct mm_struct *mm = current->mm;
int error = -EINVAL, i;
static const unsigned char offsets[] = {
@@ -1949,24 +1950,6 @@ static int validate_prctl_map(struct prctl_mm_map *prctl_map)
prctl_map->start_data))
goto out;
- /*
- * Someone is trying to cheat the auxv vector.
- */
- if (prctl_map->auxv_size) {
- if (!prctl_map->auxv || prctl_map->auxv_size > sizeof(mm->saved_auxv))
- goto out;
- }
-
- /*
- * Finally, make sure the caller has the rights to
- * change /proc/pid/exe link: only local sys admin should
- * be allowed to.
- */
- if (prctl_map->exe_fd != (u32)-1) {
- if (!ns_capable(current_user_ns(), CAP_SYS_ADMIN))
- goto out;
- }
-
error = 0;
out:
return error;
@@ -1993,11 +1976,18 @@ static int prctl_set_mm_map(int opt, const void __user *addr, unsigned long data
if (copy_from_user(&prctl_map, addr, sizeof(prctl_map)))
return -EFAULT;
- error = validate_prctl_map(&prctl_map);
+ error = validate_prctl_map_addr(&prctl_map);
if (error)
return error;
if (prctl_map.auxv_size) {
+ /*
+ * Someone is trying to cheat the auxv vector.
+ */
+ if (!prctl_map.auxv ||
+ prctl_map.auxv_size > sizeof(mm->saved_auxv))
+ return -EINVAL;
+
memset(user_auxv, 0, sizeof(user_auxv));
if (copy_from_user(user_auxv,
(const void __user *)prctl_map.auxv,
@@ -2010,6 +2000,14 @@ static int prctl_set_mm_map(int opt, const void __user *addr, unsigned long data
}
if (prctl_map.exe_fd != (u32)-1) {
+ /*
+ * Make sure the caller has the rights to
+ * change /proc/pid/exe link: only local sys admin should
+ * be allowed to.
+ */
+ if (!ns_capable(current_user_ns(), CAP_SYS_ADMIN))
+ return -EINVAL;
+
error = prctl_set_mm_exe_file(mm, prctl_map.exe_fd);
if (error)
return error;
@@ -2097,7 +2095,11 @@ static int prctl_set_mm(int opt, unsigned long addr,
unsigned long arg4, unsigned long arg5)
{
struct mm_struct *mm = current->mm;
- struct prctl_mm_map prctl_map;
+ struct prctl_mm_map prctl_map = {
+ .auxv = NULL,
+ .auxv_size = 0,
+ .exe_fd = -1,
+ };
struct vm_area_struct *vma;
int error;
@@ -2125,9 +2127,15 @@ static int prctl_set_mm(int opt, unsigned long addr,
error = -EINVAL;
- down_write(&mm->mmap_sem);
+ /*
+ * arg_lock protects concurent updates of arg boundaries, we need
+ * mmap_sem for a) concurrent sys_brk, b) finding VMA for addr
+ * validation.
+ */
+ down_read(&mm->mmap_sem);
vma = find_vma(mm, addr);
+ spin_lock(&mm->arg_lock);
prctl_map.start_code = mm->start_code;
prctl_map.end_code = mm->end_code;
prctl_map.start_data = mm->start_data;
@@ -2139,9 +2147,6 @@ static int prctl_set_mm(int opt, unsigned long addr,
prctl_map.arg_end = mm->arg_end;
prctl_map.env_start = mm->env_start;
prctl_map.env_end = mm->env_end;
- prctl_map.auxv = NULL;
- prctl_map.auxv_size = 0;
- prctl_map.exe_fd = -1;
switch (opt) {
case PR_SET_MM_START_CODE:
@@ -2181,7 +2186,7 @@ static int prctl_set_mm(int opt, unsigned long addr,
goto out;
}
- error = validate_prctl_map(&prctl_map);
+ error = validate_prctl_map_addr(&prctl_map);
if (error)
goto out;
@@ -2218,7 +2223,8 @@ static int prctl_set_mm(int opt, unsigned long addr,
error = 0;
out:
- up_write(&mm->mmap_sem);
+ spin_unlock(&mm->arg_lock);
+ up_read(&mm->mmap_sem);
return error;
}
diff --git a/lib/sort.c b/lib/sort.c
index 50855ea8c262..cf408aec3733 100644
--- a/lib/sort.c
+++ b/lib/sort.c
@@ -43,8 +43,9 @@ static bool is_aligned(const void *base, size_t size, unsigned char align)
/**
* swap_words_32 - swap two elements in 32-bit chunks
- * @a, @b: pointers to the elements
- * @size: element size (must be a multiple of 4)
+ * @a: pointer to the first element to swap
+ * @b: pointer to the second element to swap
+ * @n: element size (must be a multiple of 4)
*
* Exchange the two objects in memory. This exploits base+index addressing,
* which basically all CPUs have, to minimize loop overhead computations.
@@ -65,8 +66,9 @@ static void swap_words_32(void *a, void *b, size_t n)
/**
* swap_words_64 - swap two elements in 64-bit chunks
- * @a, @b: pointers to the elements
- * @size: element size (must be a multiple of 8)
+ * @a: pointer to the first element to swap
+ * @b: pointer to the second element to swap
+ * @n: element size (must be a multiple of 8)
*
* Exchange the two objects in memory. This exploits base+index
* addressing, which basically all CPUs have, to minimize loop overhead
@@ -100,8 +102,9 @@ static void swap_words_64(void *a, void *b, size_t n)
/**
* swap_bytes - swap two elements a byte at a time
- * @a, @b: pointers to the elements
- * @size: element size
+ * @a: pointer to the first element to swap
+ * @b: pointer to the second element to swap
+ * @n: element size
*
* This is the fallback if alignment doesn't allow using larger chunks.
*/
diff --git a/mm/compaction.c b/mm/compaction.c
index 9febc8cc84e7..9e1b9acb116b 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -1399,7 +1399,7 @@ fast_isolate_freepages(struct compact_control *cc)
page = pfn_to_page(highest);
cc->free_pfn = highest;
} else {
- if (cc->direct_compaction) {
+ if (cc->direct_compaction && pfn_valid(min_pfn)) {
page = pfn_to_page(min_pfn);
cc->free_pfn = min_pfn;
}
diff --git a/mm/gup.c b/mm/gup.c
index f173fcbaf1b2..ddde097cf9e4 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1042,10 +1042,6 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
BUG_ON(ret >= nr_pages);
}
- if (!pages)
- /* If it's a prefault don't insist harder */
- return ret;
-
if (ret > 0) {
nr_pages -= ret;
pages_done += ret;
@@ -1061,8 +1057,12 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
pages_done = ret;
break;
}
- /* VM_FAULT_RETRY triggered, so seek to the faulting offset */
- pages += ret;
+ /*
+ * VM_FAULT_RETRY triggered, so seek to the faulting offset.
+ * For the prefault case (!pages) we only update counts.
+ */
+ if (likely(pages))
+ pages += ret;
start += ret << PAGE_SHIFT;
/*
@@ -1085,7 +1085,8 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
pages_done++;
if (!nr_pages)
break;
- pages++;
+ if (likely(pages))
+ pages++;
start += PAGE_SIZE;
}
if (lock_dropped && *locked) {
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 36afcf64e016..242fdc01aaa9 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -464,7 +464,7 @@ static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object,
{
unsigned long redzone_start;
unsigned long redzone_end;
- u8 tag;
+ u8 tag = 0xff;
if (gfpflags_allow_blocking(flags))
quarantine_reduce();
diff --git a/mm/list_lru.c b/mm/list_lru.c
index 0bdf3152735e..e4709fdaa8e6 100644
--- a/mm/list_lru.c
+++ b/mm/list_lru.c
@@ -38,11 +38,7 @@ static int lru_shrinker_id(struct list_lru *lru)
static inline bool list_lru_memcg_aware(struct list_lru *lru)
{
- /*
- * This needs node 0 to be always present, even
- * in the systems supporting sparse numa ids.
- */
- return !!lru->node[0].memcg_lrus;
+ return lru->memcg_aware;
}
static inline struct list_lru_one *
@@ -452,6 +448,8 @@ static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
{
int i;
+ lru->memcg_aware = memcg_aware;
+
if (!memcg_aware)
return 0;
diff --git a/mm/util.c b/mm/util.c
index 91682a2090ee..9834c4ab7d8e 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -718,12 +718,12 @@ int get_cmdline(struct task_struct *task, char *buffer, int buflen)
if (!mm->arg_end)
goto out_mm; /* Shh! No looking before we're done */
- down_read(&mm->mmap_sem);
+ spin_lock(&mm->arg_lock);
arg_start = mm->arg_start;
arg_end = mm->arg_end;
env_start = mm->env_start;
env_end = mm->env_end;
- up_read(&mm->mmap_sem);
+ spin_unlock(&mm->arg_lock);
len = arg_end - arg_start;
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 233af6936c93..7350a124524b 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -815,7 +815,7 @@ find_vmap_lowest_match(unsigned long size,
}
/*
- * OK. We roll back and find the fist right sub-tree,
+ * OK. We roll back and find the first right sub-tree,
* that will satisfy the search criteria. It can happen
* only once due to "vstart" restriction.
*/
diff --git a/mm/z3fold.c b/mm/z3fold.c
index 99be52c5ca45..985732c8b025 100644
--- a/mm/z3fold.c
+++ b/mm/z3fold.c
@@ -190,10 +190,11 @@ static int size_to_chunks(size_t size)
static void compact_page_work(struct work_struct *w);
-static inline struct z3fold_buddy_slots *alloc_slots(struct z3fold_pool *pool)
+static inline struct z3fold_buddy_slots *alloc_slots(struct z3fold_pool *pool,
+ gfp_t gfp)
{
struct z3fold_buddy_slots *slots = kmem_cache_alloc(pool->c_handle,
- GFP_KERNEL);
+ gfp);
if (slots) {
memset(slots->slot, 0, sizeof(slots->slot));
@@ -295,10 +296,10 @@ static void z3fold_unregister_migration(struct z3fold_pool *pool)
/* Initializes the z3fold header of a newly allocated z3fold page */
static struct z3fold_header *init_z3fold_page(struct page *page,
- struct z3fold_pool *pool)
+ struct z3fold_pool *pool, gfp_t gfp)
{
struct z3fold_header *zhdr = page_address(page);
- struct z3fold_buddy_slots *slots = alloc_slots(pool);
+ struct z3fold_buddy_slots *slots = alloc_slots(pool, gfp);
if (!slots)
return NULL;
@@ -912,7 +913,7 @@ retry:
if (!page)
return -ENOMEM;
- zhdr = init_z3fold_page(page, pool);
+ zhdr = init_z3fold_page(page, pool, gfp);
if (!zhdr) {
__free_page(page);
return -ENOMEM;
diff --git a/scripts/gcc-plugins/gcc-common.h b/scripts/gcc-plugins/gcc-common.h
index 552d5efd7cb7..17f06079a712 100644
--- a/scripts/gcc-plugins/gcc-common.h
+++ b/scripts/gcc-plugins/gcc-common.h
@@ -150,8 +150,12 @@ void print_gimple_expr(FILE *, gimple, int, int);
void dump_gimple_stmt(pretty_printer *, gimple, int, int);
#endif
+#ifndef __unused
#define __unused __attribute__((__unused__))
+#endif
+#ifndef __visible
#define __visible __attribute__((visibility("default")))
+#endif
#define DECL_NAME_POINTER(node) IDENTIFIER_POINTER(DECL_NAME(node))
#define DECL_NAME_LENGTH(node) IDENTIFIER_LENGTH(DECL_NAME(node))
diff --git a/scripts/gdb/linux/constants.py.in b/scripts/gdb/linux/constants.py.in
index 1d73083da6cb..2efbec6b6b8d 100644
--- a/scripts/gdb/linux/constants.py.in
+++ b/scripts/gdb/linux/constants.py.in
@@ -40,7 +40,8 @@
import gdb
/* linux/clk-provider.h */
-LX_GDBPARSED(CLK_GET_RATE_NOCACHE)
+if IS_BUILTIN(CONFIG_COMMON_CLK):
+ LX_GDBPARSED(CLK_GET_RATE_NOCACHE)
/* linux/fs.h */
LX_VALUE(SB_RDONLY)
diff --git a/scripts/spdxcheck.py b/scripts/spdxcheck.py
index 33df646618e2..6374e078a5f2 100755
--- a/scripts/spdxcheck.py
+++ b/scripts/spdxcheck.py
@@ -32,7 +32,8 @@ class SPDXdata(object):
def read_spdxdata(repo):
# The subdirectories of LICENSES in the kernel source
- license_dirs = [ "preferred", "deprecated", "exceptions", "dual" ]
+ # Note: exceptions needs to be parsed as last directory.
+ license_dirs = [ "preferred", "dual", "deprecated", "exceptions" ]
lictree = repo.head.commit.tree['LICENSES']
spdx = SPDXdata()
@@ -58,13 +59,13 @@ def read_spdxdata(repo):
elif l.startswith('SPDX-Licenses:'):
for lic in l.split(':')[1].upper().strip().replace(' ', '').replace('\t', '').split(','):
if not lic in spdx.licenses:
- raise SPDXException(None, 'Exception %s missing license %s' %(ex, lic))
+ raise SPDXException(None, 'Exception %s missing license %s' %(exception, lic))
spdx.exceptions[exception].append(lic)
elif l.startswith("License-Text:"):
if exception:
if not len(spdx.exceptions[exception]):
- raise SPDXException(el, 'Exception %s is missing SPDX-Licenses' %excid)
+ raise SPDXException(el, 'Exception %s is missing SPDX-Licenses' %exception)
spdx.exception_files += 1
else:
spdx.license_files += 1
diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c
index e11564eb645b..82a38e801ee4 100644
--- a/security/integrity/evm/evm_crypto.c
+++ b/security/integrity/evm/evm_crypto.c
@@ -89,6 +89,9 @@ static struct shash_desc *init_desc(char type, uint8_t hash_algo)
tfm = &hmac_tfm;
algo = evm_hmac;
} else {
+ if (hash_algo >= HASH_ALGO__LAST)
+ return ERR_PTR(-EINVAL);
+
tfm = &evm_tfm[hash_algo];
algo = hash_algo_name[hash_algo];
}
diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
index e0cc323f948f..1cc822a59054 100644
--- a/security/integrity/ima/ima_policy.c
+++ b/security/integrity/ima/ima_policy.c
@@ -498,10 +498,11 @@ static void add_rules(struct ima_rule_entry *entries, int count,
list_add_tail(&entry->list, &ima_policy_rules);
}
- if (entries[i].action == APPRAISE)
+ if (entries[i].action == APPRAISE) {
temp_ima_appraise |= ima_appraise_flag(entries[i].func);
- if (entries[i].func == POLICY_CHECK)
- temp_ima_appraise |= IMA_APPRAISE_POLICY;
+ if (entries[i].func == POLICY_CHECK)
+ temp_ima_appraise |= IMA_APPRAISE_POLICY;
+ }
}
}
@@ -1146,10 +1147,10 @@ enum {
};
static const char *const mask_tokens[] = {
- "MAY_EXEC",
- "MAY_WRITE",
- "MAY_READ",
- "MAY_APPEND"
+ "^MAY_EXEC",
+ "^MAY_WRITE",
+ "^MAY_READ",
+ "^MAY_APPEND"
};
#define __ima_hook_stringify(str) (#str),
@@ -1209,6 +1210,7 @@ int ima_policy_show(struct seq_file *m, void *v)
struct ima_rule_entry *entry = v;
int i;
char tbuf[64] = {0,};
+ int offset = 0;
rcu_read_lock();
@@ -1232,15 +1234,17 @@ int ima_policy_show(struct seq_file *m, void *v)
if (entry->flags & IMA_FUNC)
policy_func_show(m, entry->func);
- if (entry->flags & IMA_MASK) {
+ if ((entry->flags & IMA_MASK) || (entry->flags & IMA_INMASK)) {
+ if (entry->flags & IMA_MASK)
+ offset = 1;
if (entry->mask & MAY_EXEC)
- seq_printf(m, pt(Opt_mask), mt(mask_exec));
+ seq_printf(m, pt(Opt_mask), mt(mask_exec) + offset);
if (entry->mask & MAY_WRITE)
- seq_printf(m, pt(Opt_mask), mt(mask_write));
+ seq_printf(m, pt(Opt_mask), mt(mask_write) + offset);
if (entry->mask & MAY_READ)
- seq_printf(m, pt(Opt_mask), mt(mask_read));
+ seq_printf(m, pt(Opt_mask), mt(mask_read) + offset);
if (entry->mask & MAY_APPEND)
- seq_printf(m, pt(Opt_mask), mt(mask_append));
+ seq_printf(m, pt(Opt_mask), mt(mask_append) + offset);
seq_puts(m, " ");
}
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index 90cedebaeb94..7eeebe5e9da2 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -224,6 +224,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_MAX_VCPUS:
r = KVM_MAX_VCPUS;
break;
+ case KVM_CAP_MAX_VCPU_ID:
+ r = KVM_MAX_VCPU_ID;
+ break;
case KVM_CAP_MSI_DEVID:
if (!kvm)
r = -EINVAL;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 134ec0283a8a..ca54b09adf5b 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1795,8 +1795,10 @@ void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
if (map->page)
kunmap(map->page);
+#ifdef CONFIG_HAS_IOMEM
else
memunmap(map->hva);
+#endif
if (dirty) {
kvm_vcpu_mark_page_dirty(vcpu, map->gfn);
@@ -3149,8 +3151,6 @@ static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
case KVM_CAP_MULTI_ADDRESS_SPACE:
return KVM_ADDRESS_SPACE_NUM;
#endif
- case KVM_CAP_MAX_VCPU_ID:
- return KVM_MAX_VCPU_ID;
case KVM_CAP_NR_MEMSLOTS:
return KVM_USER_MEM_SLOTS;
default: