summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/sysfs-devices-platform-dock39
-rw-r--r--Documentation/ABI/testing/sysfs-devices-system-cpu77
-rw-r--r--Documentation/ABI/testing/sysfs-platform-dptf40
-rw-r--r--Documentation/atomic_bitops.txt7
-rw-r--r--Documentation/devicetree/bindings/power/mti,mips-cpc.txt8
-rw-r--r--Documentation/features/sched/membarrier-sync-core/arch-support.txt62
-rw-r--r--Documentation/locking/mutex-design.txt49
-rw-r--r--MAINTAINERS2
-rw-r--r--arch/arm64/include/asm/cputype.h2
-rw-r--r--arch/arm64/include/asm/hugetlb.h2
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h26
-rw-r--r--arch/arm64/include/asm/mmu_context.h4
-rw-r--r--arch/arm64/include/asm/pgalloc.h44
-rw-r--r--arch/arm64/include/asm/pgtable.h23
-rw-r--r--arch/arm64/kernel/cpu_errata.c9
-rw-r--r--arch/arm64/kernel/efi.c2
-rw-r--r--arch/arm64/kernel/hibernate.c148
-rw-r--r--arch/arm64/kvm/hyp/switch.c4
-rw-r--r--arch/arm64/mm/dump.c54
-rw-r--r--arch/arm64/mm/fault.c44
-rw-r--r--arch/arm64/mm/hugetlbpage.c94
-rw-r--r--arch/arm64/mm/kasan_init.c70
-rw-r--r--arch/arm64/mm/mmu.c282
-rw-r--r--arch/arm64/mm/pageattr.c32
-rw-r--r--arch/arm64/mm/proc.S14
-rw-r--r--arch/ia64/kernel/Makefile1
-rw-r--r--arch/mips/kernel/mips-cpc.c13
-rw-r--r--arch/mips/kernel/setup.c16
-rw-r--r--arch/mips/kernel/smp-bmips.c2
-rw-r--r--arch/powerpc/include/asm/book3s/32/pgtable.h1
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash-4k.h3
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash-64k.h16
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash.h13
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgalloc.h16
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h4
-rw-r--r--arch/powerpc/include/asm/exception-64s.h2
-rw-r--r--arch/powerpc/include/asm/hw_irq.h12
-rw-r--r--arch/powerpc/include/asm/kexec.h6
-rw-r--r--arch/powerpc/include/asm/nohash/32/pgtable.h1
-rw-r--r--arch/powerpc/include/asm/nohash/64/pgtable.h1
-rw-r--r--arch/powerpc/include/asm/topology.h13
-rw-r--r--arch/powerpc/kernel/exceptions-64e.S2
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S6
-rw-r--r--arch/powerpc/kernel/sysfs.c6
-rw-r--r--arch/powerpc/mm/drmem.c8
-rw-r--r--arch/powerpc/mm/hash64_4k.c4
-rw-r--r--arch/powerpc/mm/hash64_64k.c8
-rw-r--r--arch/powerpc/mm/hash_utils_64.c1
-rw-r--r--arch/powerpc/mm/hugetlbpage-hash64.c10
-rw-r--r--arch/powerpc/mm/init-common.c4
-rw-r--r--arch/powerpc/mm/numa.c5
-rw-r--r--arch/powerpc/mm/pgtable-radix.c117
-rw-r--r--arch/powerpc/mm/pgtable_64.c4
-rw-r--r--arch/powerpc/mm/tlb_hash64.c9
-rw-r--r--arch/powerpc/platforms/powernv/opal-imc.c6
-rw-r--r--arch/powerpc/platforms/powernv/vas-window.c16
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-cpu.c4
-rw-r--r--arch/powerpc/platforms/pseries/ras.c31
-rw-r--r--arch/powerpc/sysdev/xive/spapr.c16
-rw-r--r--arch/sparc/Kconfig2
-rw-r--r--arch/x86/.gitignore1
-rw-r--r--arch/x86/Kconfig75
-rw-r--r--arch/x86/crypto/sha512-mb/sha512_mb_mgr_init_avx2.c10
-rw-r--r--arch/x86/entry/calling.h107
-rw-r--r--arch/x86/entry/entry_64.S92
-rw-r--r--arch/x86/entry/entry_64_compat.S30
-rw-r--r--arch/x86/events/intel/core.c2
-rw-r--r--arch/x86/events/intel/lbr.c2
-rw-r--r--arch/x86/events/intel/p6.c2
-rw-r--r--arch/x86/include/asm/acpi.h2
-rw-r--r--arch/x86/include/asm/barrier.h2
-rw-r--r--arch/x86/include/asm/bug.h19
-rw-r--r--arch/x86/include/asm/cpufeature.h79
-rw-r--r--arch/x86/include/asm/nospec-branch.h14
-rw-r--r--arch/x86/include/asm/page_64.h4
-rw-r--r--arch/x86/include/asm/paravirt.h4
-rw-r--r--arch/x86/include/asm/paravirt_types.h2
-rw-r--r--arch/x86/include/asm/pgtable_32.h2
-rw-r--r--arch/x86/include/asm/processor.h7
-rw-r--r--arch/x86/include/asm/smp.h1
-rw-r--r--arch/x86/include/asm/tlbflush.h27
-rw-r--r--arch/x86/kernel/amd_nb.c2
-rw-r--r--arch/x86/kernel/apic/apic.c6
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c15
-rw-r--r--arch/x86/kernel/asm-offsets_32.c2
-rw-r--r--arch/x86/kernel/cpu/amd.c28
-rw-r--r--arch/x86/kernel/cpu/bugs.c34
-rw-r--r--arch/x86/kernel/cpu/centaur.c4
-rw-r--r--arch/x86/kernel/cpu/common.c10
-rw-r--r--arch/x86/kernel/cpu/cyrix.c2
-rw-r--r--arch/x86/kernel/cpu/intel.c31
-rw-r--r--arch/x86/kernel/cpu/intel_rdt.c2
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce-internal.h15
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c19
-rw-r--r--arch/x86/kernel/cpu/microcode/intel.c6
-rw-r--r--arch/x86/kernel/cpu/mtrr/generic.c2
-rw-r--r--arch/x86/kernel/cpu/mtrr/main.c4
-rw-r--r--arch/x86/kernel/cpu/proc.c8
-rw-r--r--arch/x86/kernel/head_32.S4
-rw-r--r--arch/x86/kernel/mpparse.c2
-rw-r--r--arch/x86/kernel/paravirt.c6
-rw-r--r--arch/x86/kernel/smpboot.c11
-rw-r--r--arch/x86/kernel/traps.c2
-rw-r--r--arch/x86/kvm/mmu.c10
-rw-r--r--arch/x86/kvm/vmx.c9
-rw-r--r--arch/x86/lib/cpu.c2
-rw-r--r--arch/x86/lib/error-inject.c1
-rw-r--r--arch/x86/mm/init_64.c6
-rw-r--r--arch/x86/mm/ioremap.c2
-rw-r--r--arch/x86/mm/kmmio.c2
-rw-r--r--arch/x86/mm/pgtable_32.c2
-rw-r--r--arch/x86/mm/tlb.c6
-rw-r--r--arch/x86/platform/uv/tlb_uv.c2
-rw-r--r--arch/x86/xen/mmu_pv.c6
-rw-r--r--arch/x86/xen/smp.c2
-rw-r--r--block/blk-mq.c1
-rw-r--r--crypto/sha3_generic.c218
-rw-r--r--drivers/acpi/bus.c75
-rw-r--r--drivers/acpi/ec.c6
-rw-r--r--drivers/acpi/property.c4
-rw-r--r--drivers/acpi/spcr.c1
-rw-r--r--drivers/base/core.c3
-rw-r--r--drivers/base/power/wakeirq.c6
-rw-r--r--drivers/base/property.c5
-rw-r--r--drivers/char/hw_random/via-rng.c2
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c2
-rw-r--r--drivers/cpufreq/longhaul.c6
-rw-r--r--drivers/cpufreq/p4-clockmod.c2
-rw-r--r--drivers/cpufreq/powernow-k7.c2
-rw-r--r--drivers/cpufreq/speedstep-centrino.c4
-rw-r--r--drivers/cpufreq/speedstep-lib.c6
-rw-r--r--drivers/crypto/caam/ctrl.c8
-rw-r--r--drivers/crypto/padlock-aes.c2
-rw-r--r--drivers/crypto/sunxi-ss/sun4i-ss-prng.c6
-rw-r--r--drivers/crypto/talitos.c4
-rw-r--r--drivers/edac/amd64_edac.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c51
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/trace.h2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c14
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c2
-rw-r--r--drivers/gpu/drm/i915/i915_oa_cflgt3.c4
-rw-r--r--drivers/gpu/drm/i915/i915_oa_cnl.c4
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c231
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.h6
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c105
-rw-r--r--drivers/gpu/drm/i915/intel_breadcrumbs.c29
-rw-r--r--drivers/gpu/drm/i915/intel_cdclk.c8
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c24
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c6
-rw-r--r--drivers/hwmon/coretemp.c6
-rw-r--r--drivers/hwmon/hwmon-vid.c2
-rw-r--r--drivers/hwmon/k10temp.c7
-rw-r--r--drivers/hwmon/k8temp.c2
-rw-r--r--drivers/macintosh/macio_asic.c1
-rw-r--r--drivers/md/dm.c3
-rw-r--r--drivers/misc/ocxl/file.c2
-rw-r--r--drivers/mmc/host/bcm2835.c3
-rw-r--r--drivers/mmc/host/meson-gx-mmc.c19
-rw-r--r--drivers/mtd/nand/Kconfig2
-rw-r--r--drivers/mtd/nand/vf610_nfc.c6
-rw-r--r--drivers/nvme/host/core.c45
-rw-r--r--drivers/nvme/host/fabrics.h9
-rw-r--r--drivers/nvme/host/fc.c157
-rw-r--r--drivers/nvme/host/nvme.h3
-rw-r--r--drivers/nvme/host/pci.c39
-rw-r--r--drivers/nvme/host/rdma.c16
-rw-r--r--drivers/nvme/target/io-cmd.c7
-rw-r--r--drivers/of/property.c4
-rw-r--r--drivers/opp/cpu.c2
-rw-r--r--drivers/platform/x86/dell-laptop.c20
-rw-r--r--drivers/platform/x86/ideapad-laptop.c2
-rw-r--r--drivers/platform/x86/wmi.c2
-rw-r--r--drivers/s390/virtio/virtio_ccw.c29
-rw-r--r--drivers/usb/Kconfig6
-rw-r--r--drivers/usb/host/Kconfig8
-rw-r--r--drivers/video/fbdev/geode/video_gx.c2
-rw-r--r--drivers/xen/pvcalls-front.c197
-rw-r--r--drivers/xen/xenbus/xenbus.h1
-rw-r--r--drivers/xen/xenbus/xenbus_comms.c1
-rw-r--r--drivers/xen/xenbus/xenbus_xs.c3
-rw-r--r--fs/btrfs/backref.c11
-rw-r--r--fs/btrfs/delayed-ref.c3
-rw-r--r--fs/btrfs/extent-tree.c4
-rw-r--r--fs/btrfs/inode.c41
-rw-r--r--fs/btrfs/qgroup.c9
-rw-r--r--fs/btrfs/tree-log.c32
-rw-r--r--fs/btrfs/volumes.c1
-rw-r--r--fs/gfs2/bmap.c43
-rw-r--r--fs/proc/kcore.c4
-rw-r--r--include/asm-generic/bitops/lock.h3
-rw-r--r--include/linux/acpi.h4
-rw-r--r--include/linux/blkdev.h2
-rw-r--r--include/linux/compiler-gcc.h7
-rw-r--r--include/linux/compiler.h4
-rw-r--r--include/linux/cpuidle.h2
-rw-r--r--include/linux/cpumask.h2
-rw-r--r--include/linux/dma-mapping.h2
-rw-r--r--include/linux/fwnode.h4
-rw-r--r--include/linux/kcore.h1
-rw-r--r--include/linux/mm_inline.h6
-rw-r--r--include/linux/nospec.h36
-rw-r--r--include/linux/property.h2
-rw-r--r--include/linux/semaphore.h2
-rw-r--r--include/sound/ac97/regs.h2
-rw-r--r--include/trace/events/xen.h2
-rw-r--r--kernel/locking/qspinlock.c21
-rw-r--r--kernel/sched/core.c27
-rw-r--r--kernel/sched/cpufreq_schedutil.c2
-rw-r--r--kernel/sched/deadline.c6
-rw-r--r--kernel/sched/rt.c3
-rw-r--r--lib/dma-direct.c5
-rw-r--r--mm/memory-failure.c2
-rw-r--r--mm/memory.c2
-rw-r--r--net/9p/trans_virtio.c3
-rw-r--r--sound/ac97/Kconfig1
-rw-r--r--sound/core/seq/seq_clientmgr.c8
-rw-r--r--sound/pci/hda/patch_realtek.c72
-rw-r--r--sound/usb/mixer.c18
-rw-r--r--sound/usb/pcm.c9
-rw-r--r--sound/usb/quirks.c7
-rw-r--r--tools/objtool/check.c53
-rw-r--r--tools/objtool/check.h1
-rw-r--r--tools/testing/selftests/powerpc/alignment/alignment_handler.c2
-rw-r--r--tools/testing/selftests/x86/Makefile24
-rw-r--r--tools/testing/selftests/x86/mpx-mini-test.c32
-rw-r--r--tools/testing/selftests/x86/protection_keys.c28
-rw-r--r--tools/testing/selftests/x86/single_step_syscall.c5
-rw-r--r--tools/testing/selftests/x86/test_mremap_vdso.c4
-rw-r--r--tools/testing/selftests/x86/test_vdso.c55
-rw-r--r--tools/testing/selftests/x86/test_vsyscall.c11
234 files changed, 2656 insertions, 1715 deletions
diff --git a/Documentation/ABI/testing/sysfs-devices-platform-dock b/Documentation/ABI/testing/sysfs-devices-platform-dock
new file mode 100644
index 000000000000..1d8c18f905c7
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-devices-platform-dock
@@ -0,0 +1,39 @@
+What: /sys/devices/platform/dock.N/docked
+Date: Dec, 2006
+KernelVersion: 2.6.19
+Contact: linux-acpi@vger.kernel.org
+Description:
+ (RO) Value 1 or 0 indicates whether the software believes the
+ laptop is docked in a docking station.
+
+What: /sys/devices/platform/dock.N/undock
+Date: Dec, 2006
+KernelVersion: 2.6.19
+Contact: linux-acpi@vger.kernel.org
+Description:
+ (WO) Writing to this file causes the software to initiate an
+ undock request to the firmware.
+
+What: /sys/devices/platform/dock.N/uid
+Date: Feb, 2007
+KernelVersion: v2.6.21
+Contact: linux-acpi@vger.kernel.org
+Description:
+ (RO) Displays the docking station the laptop is docked to.
+
+What: /sys/devices/platform/dock.N/flags
+Date: May, 2007
+KernelVersion: v2.6.21
+Contact: linux-acpi@vger.kernel.org
+Description:
+ (RO) Show dock station flags, useful for checking if undock
+ request has been made by the user (from the immediate_undock
+ option).
+
+What: /sys/devices/platform/dock.N/type
+Date: Aug, 2008
+KernelVersion: v2.6.27
+Contact: linux-acpi@vger.kernel.org
+Description:
+ (RO) Display the dock station type- dock_station, ata_bay or
+ battery_bay.
diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu
index bfd29bc8d37a..4ed63b6cfb15 100644
--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
+++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
@@ -108,6 +108,8 @@ Description: CPU topology files that describe a logical CPU's relationship
What: /sys/devices/system/cpu/cpuidle/current_driver
/sys/devices/system/cpu/cpuidle/current_governer_ro
+ /sys/devices/system/cpu/cpuidle/available_governors
+ /sys/devices/system/cpu/cpuidle/current_governor
Date: September 2007
Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
Description: Discover cpuidle policy and mechanism
@@ -119,13 +121,84 @@ Description: Discover cpuidle policy and mechanism
Idle policy (governor) is differentiated from idle mechanism
(driver)
- current_driver: displays current idle mechanism
+ current_driver: (RO) displays current idle mechanism
- current_governor_ro: displays current idle policy
+ current_governor_ro: (RO) displays current idle policy
+
+ With the cpuidle_sysfs_switch boot option enabled (meant for
+ developer testing), the following three attributes are visible
+ instead:
+
+ current_driver: same as described above
+
+ available_governors: (RO) displays a space separated list of
+ available governors
+
+ current_governor: (RW) displays current idle policy. Users can
+ switch the governor at runtime by writing to this file.
See files in Documentation/cpuidle/ for more information.
+What: /sys/devices/system/cpu/cpuX/cpuidle/stateN/name
+ /sys/devices/system/cpu/cpuX/cpuidle/stateN/latency
+ /sys/devices/system/cpu/cpuX/cpuidle/stateN/power
+ /sys/devices/system/cpu/cpuX/cpuidle/stateN/time
+ /sys/devices/system/cpu/cpuX/cpuidle/stateN/usage
+Date: September 2007
+KernelVersion: v2.6.24
+Contact: Linux power management list <linux-pm@vger.kernel.org>
+Description:
+ The directory /sys/devices/system/cpu/cpuX/cpuidle contains per
+ logical CPU specific cpuidle information for each online cpu X.
+ The processor idle states which are available for use have the
+ following attributes:
+
+ name: (RO) Name of the idle state (string).
+
+ latency: (RO) The latency to exit out of this idle state (in
+ microseconds).
+
+ power: (RO) The power consumed while in this idle state (in
+ milliwatts).
+
+ time: (RO) The total time spent in this idle state (in microseconds).
+
+ usage: (RO) Number of times this state was entered (a count).
+
+
+What: /sys/devices/system/cpu/cpuX/cpuidle/stateN/desc
+Date: February 2008
+KernelVersion: v2.6.25
+Contact: Linux power management list <linux-pm@vger.kernel.org>
+Description:
+ (RO) A small description about the idle state (string).
+
+
+What: /sys/devices/system/cpu/cpuX/cpuidle/stateN/disable
+Date: March 2012
+KernelVersion: v3.10
+Contact: Linux power management list <linux-pm@vger.kernel.org>
+Description:
+ (RW) Option to disable this idle state (bool). The behavior and
+ the effect of the disable variable depends on the implementation
+ of a particular governor. In the ladder governor, for example,
+ it is not coherent, i.e. if one is disabling a light state, then
+ all deeper states are disabled as well, but the disable variable
+ does not reflect it. Likewise, if one enables a deep state but a
+ lighter state still is disabled, then this has no effect.
+
+
+What: /sys/devices/system/cpu/cpuX/cpuidle/stateN/residency
+Date: March 2014
+KernelVersion: v3.15
+Contact: Linux power management list <linux-pm@vger.kernel.org>
+Description:
+ (RO) Display the target residency i.e. the minimum amount of
+ time (in microseconds) this cpu should spend in this idle state
+ to make the transition worth the effort.
+
+
What: /sys/devices/system/cpu/cpu#/cpufreq/*
Date: pre-git history
Contact: linux-pm@vger.kernel.org
diff --git a/Documentation/ABI/testing/sysfs-platform-dptf b/Documentation/ABI/testing/sysfs-platform-dptf
new file mode 100644
index 000000000000..325dc0667dbb
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-platform-dptf
@@ -0,0 +1,40 @@
+What: /sys/bus/platform/devices/INT3407:00/dptf_power/charger_type
+Date: Jul, 2016
+KernelVersion: v4.10
+Contact: linux-acpi@vger.kernel.org
+Description:
+ (RO) The charger type - Traditional, Hybrid or NVDC.
+
+What: /sys/bus/platform/devices/INT3407:00/dptf_power/adapter_rating_mw
+Date: Jul, 2016
+KernelVersion: v4.10
+Contact: linux-acpi@vger.kernel.org
+Description:
+ (RO) Adapter rating in milliwatts (the maximum Adapter power).
+ Must be 0 if no AC Adaptor is plugged in.
+
+What: /sys/bus/platform/devices/INT3407:00/dptf_power/max_platform_power_mw
+Date: Jul, 2016
+KernelVersion: v4.10
+Contact: linux-acpi@vger.kernel.org
+Description:
+ (RO) Maximum platform power that can be supported by the battery
+ in milliwatts.
+
+What: /sys/bus/platform/devices/INT3407:00/dptf_power/platform_power_source
+Date: Jul, 2016
+KernelVersion: v4.10
+Contact: linux-acpi@vger.kernel.org
+Description:
+ (RO) Display the platform power source
+ 0x00 = DC
+ 0x01 = AC
+ 0x02 = USB
+ 0x03 = Wireless Charger
+
+What: /sys/bus/platform/devices/INT3407:00/dptf_power/battery_steady_power
+Date: Jul, 2016
+KernelVersion: v4.10
+Contact: linux-acpi@vger.kernel.org
+Description:
+ (RO) The maximum sustained power for battery in milliwatts.
diff --git a/Documentation/atomic_bitops.txt b/Documentation/atomic_bitops.txt
index 5550bfdcce5f..be70b32c95d9 100644
--- a/Documentation/atomic_bitops.txt
+++ b/Documentation/atomic_bitops.txt
@@ -58,7 +58,12 @@ Like with atomic_t, the rule of thumb is:
- RMW operations that have a return value are fully ordered.
-Except for test_and_set_bit_lock() which has ACQUIRE semantics and
+ - RMW operations that are conditional are unordered on FAILURE,
+ otherwise the above rules apply. In the case of test_and_{}_bit() operations,
+ if the bit in memory is unchanged by the operation then it is deemed to have
+ failed.
+
+Except for a successful test_and_set_bit_lock() which has ACQUIRE semantics and
clear_bit_unlock() which has RELEASE semantics.
Since a platform only has a single means of achieving atomic operations
diff --git a/Documentation/devicetree/bindings/power/mti,mips-cpc.txt b/Documentation/devicetree/bindings/power/mti,mips-cpc.txt
new file mode 100644
index 000000000000..c6b82511ae8a
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/mti,mips-cpc.txt
@@ -0,0 +1,8 @@
+Binding for MIPS Cluster Power Controller (CPC).
+
+This binding allows a system to specify where the CPC registers are
+located.
+
+Required properties:
+compatible : Should be "mti,mips-cpc".
+regs: Should describe the address & size of the CPC register region.
diff --git a/Documentation/features/sched/membarrier-sync-core/arch-support.txt b/Documentation/features/sched/membarrier-sync-core/arch-support.txt
new file mode 100644
index 000000000000..2c815a7f1ba7
--- /dev/null
+++ b/Documentation/features/sched/membarrier-sync-core/arch-support.txt
@@ -0,0 +1,62 @@
+#
+# Feature name: membarrier-sync-core
+# Kconfig: ARCH_HAS_MEMBARRIER_SYNC_CORE
+# description: arch supports core serializing membarrier
+#
+# Architecture requirements
+#
+# * arm64
+#
+# Rely on eret context synchronization when returning from IPI handler, and
+# when returning to user-space.
+#
+# * x86
+#
+# x86-32 uses IRET as return from interrupt, which takes care of the IPI.
+# However, it uses both IRET and SYSEXIT to go back to user-space. The IRET
+# instruction is core serializing, but not SYSEXIT.
+#
+# x86-64 uses IRET as return from interrupt, which takes care of the IPI.
+# However, it can return to user-space through either SYSRETL (compat code),
+# SYSRETQ, or IRET.
+#
+# Given that neither SYSRET{L,Q}, nor SYSEXIT, are core serializing, we rely
+# instead on write_cr3() performed by switch_mm() to provide core serialization
+# after changing the current mm, and deal with the special case of kthread ->
+# uthread (temporarily keeping current mm into active_mm) by issuing a
+# sync_core_before_usermode() in that specific case.
+#
+ -----------------------
+ | arch |status|
+ -----------------------
+ | alpha: | TODO |
+ | arc: | TODO |
+ | arm: | TODO |
+ | arm64: | ok |
+ | blackfin: | TODO |
+ | c6x: | TODO |
+ | cris: | TODO |
+ | frv: | TODO |
+ | h8300: | TODO |
+ | hexagon: | TODO |
+ | ia64: | TODO |
+ | m32r: | TODO |
+ | m68k: | TODO |
+ | metag: | TODO |
+ | microblaze: | TODO |
+ | mips: | TODO |
+ | mn10300: | TODO |
+ | nios2: | TODO |
+ | openrisc: | TODO |
+ | parisc: | TODO |
+ | powerpc: | TODO |
+ | s390: | TODO |
+ | score: | TODO |
+ | sh: | TODO |
+ | sparc: | TODO |
+ | tile: | TODO |
+ | um: | TODO |
+ | unicore32: | TODO |
+ | x86: | ok |
+ | xtensa: | TODO |
+ -----------------------
diff --git a/Documentation/locking/mutex-design.txt b/Documentation/locking/mutex-design.txt
index 60c482df1a38..818aca19612f 100644
--- a/Documentation/locking/mutex-design.txt
+++ b/Documentation/locking/mutex-design.txt
@@ -21,37 +21,23 @@ Implementation
--------------
Mutexes are represented by 'struct mutex', defined in include/linux/mutex.h
-and implemented in kernel/locking/mutex.c. These locks use a three
-state atomic counter (->count) to represent the different possible
-transitions that can occur during the lifetime of a lock:
-
- 1: unlocked
- 0: locked, no waiters
- negative: locked, with potential waiters
-
-In its most basic form it also includes a wait-queue and a spinlock
-that serializes access to it. CONFIG_SMP systems can also include
-a pointer to the lock task owner (->owner) as well as a spinner MCS
-lock (->osq), both described below in (ii).
+and implemented in kernel/locking/mutex.c. These locks use an atomic variable
+(->owner) to keep track of the lock state during its lifetime. Field owner
+actually contains 'struct task_struct *' to the current lock owner and it is
+therefore NULL if not currently owned. Since task_struct pointers are aligned
+at at least L1_CACHE_BYTES, low bits (3) are used to store extra state (e.g.,
+if waiter list is non-empty). In its most basic form it also includes a
+wait-queue and a spinlock that serializes access to it. Furthermore,
+CONFIG_MUTEX_SPIN_ON_OWNER=y systems use a spinner MCS lock (->osq), described
+below in (ii).
When acquiring a mutex, there are three possible paths that can be
taken, depending on the state of the lock:
-(i) fastpath: tries to atomically acquire the lock by decrementing the
- counter. If it was already taken by another task it goes to the next
- possible path. This logic is architecture specific. On x86-64, the
- locking fastpath is 2 instructions:
-
- 0000000000000e10 <mutex_lock>:
- e21: f0 ff 0b lock decl (%rbx)
- e24: 79 08 jns e2e <mutex_lock+0x1e>
-
- the unlocking fastpath is equally tight:
-
- 0000000000000bc0 <mutex_unlock>:
- bc8: f0 ff 07 lock incl (%rdi)
- bcb: 7f 0a jg bd7 <mutex_unlock+0x17>
-
+(i) fastpath: tries to atomically acquire the lock by cmpxchg()ing the owner with
+ the current task. This only works in the uncontended case (cmpxchg() checks
+ against 0UL, so all 3 state bits above have to be 0). If the lock is
+ contended it goes to the next possible path.
(ii) midpath: aka optimistic spinning, tries to spin for acquisition
while the lock owner is running and there are no other tasks ready
@@ -143,11 +129,10 @@ Test if the mutex is taken:
Disadvantages
-------------
-Unlike its original design and purpose, 'struct mutex' is larger than
-most locks in the kernel. E.g: on x86-64 it is 40 bytes, almost twice
-as large as 'struct semaphore' (24 bytes) and tied, along with rwsems,
-for the largest lock in the kernel. Larger structure sizes mean more
-CPU cache and memory footprint.
+Unlike its original design and purpose, 'struct mutex' is among the largest
+locks in the kernel. E.g: on x86-64 it is 32 bytes, where 'struct semaphore'
+is 24 bytes and rw_semaphore is 40 bytes. Larger structure sizes mean more CPU
+cache and memory footprint.
When to use mutexes
-------------------
diff --git a/MAINTAINERS b/MAINTAINERS
index 3bdc260e36b7..9a7f76eadae9 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -9206,6 +9206,7 @@ MIPS GENERIC PLATFORM
M: Paul Burton <paul.burton@mips.com>
L: linux-mips@linux-mips.org
S: Supported
+F: Documentation/devicetree/bindings/power/mti,mips-cpc.txt
F: arch/mips/generic/
F: arch/mips/tools/generic-board-config.sh
@@ -9945,6 +9946,7 @@ F: drivers/nfc/nxp-nci
OBJTOOL
M: Josh Poimboeuf <jpoimboe@redhat.com>
+M: Peter Zijlstra <peterz@infradead.org>
S: Supported
F: tools/objtool/
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index be7bd19c87ec..eda8c5f629fc 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -20,7 +20,7 @@
#define MPIDR_UP_BITMASK (0x1 << 30)
#define MPIDR_MT_BITMASK (0x1 << 24)
-#define MPIDR_HWID_BITMASK 0xff00ffffff
+#define MPIDR_HWID_BITMASK 0xff00ffffffUL
#define MPIDR_LEVEL_BITS_SHIFT 3
#define MPIDR_LEVEL_BITS (1 << MPIDR_LEVEL_BITS_SHIFT)
diff --git a/arch/arm64/include/asm/hugetlb.h b/arch/arm64/include/asm/hugetlb.h
index 1dca41bea16a..e73f68569624 100644
--- a/arch/arm64/include/asm/hugetlb.h
+++ b/arch/arm64/include/asm/hugetlb.h
@@ -22,7 +22,7 @@
static inline pte_t huge_ptep_get(pte_t *ptep)
{
- return *ptep;
+ return READ_ONCE(*ptep);
}
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 9679067a1574..7faed6e48b46 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -185,42 +185,42 @@ static inline pmd_t kvm_s2pmd_mkexec(pmd_t pmd)
return pmd;
}
-static inline void kvm_set_s2pte_readonly(pte_t *pte)
+static inline void kvm_set_s2pte_readonly(pte_t *ptep)
{
pteval_t old_pteval, pteval;
- pteval = READ_ONCE(pte_val(*pte));
+ pteval = READ_ONCE(pte_val(*ptep));
do {
old_pteval = pteval;
pteval &= ~PTE_S2_RDWR;
pteval |= PTE_S2_RDONLY;
- pteval = cmpxchg_relaxed(&pte_val(*pte), old_pteval, pteval);
+ pteval = cmpxchg_relaxed(&pte_val(*ptep), old_pteval, pteval);
} while (pteval != old_pteval);
}
-static inline bool kvm_s2pte_readonly(pte_t *pte)
+static inline bool kvm_s2pte_readonly(pte_t *ptep)
{
- return (pte_val(*pte) & PTE_S2_RDWR) == PTE_S2_RDONLY;
+ return (READ_ONCE(pte_val(*ptep)) & PTE_S2_RDWR) == PTE_S2_RDONLY;
}
-static inline bool kvm_s2pte_exec(pte_t *pte)
+static inline bool kvm_s2pte_exec(pte_t *ptep)
{
- return !(pte_val(*pte) & PTE_S2_XN);
+ return !(READ_ONCE(pte_val(*ptep)) & PTE_S2_XN);
}
-static inline void kvm_set_s2pmd_readonly(pmd_t *pmd)
+static inline void kvm_set_s2pmd_readonly(pmd_t *pmdp)
{
- kvm_set_s2pte_readonly((pte_t *)pmd);
+ kvm_set_s2pte_readonly((pte_t *)pmdp);
}
-static inline bool kvm_s2pmd_readonly(pmd_t *pmd)
+static inline bool kvm_s2pmd_readonly(pmd_t *pmdp)
{
- return kvm_s2pte_readonly((pte_t *)pmd);
+ return kvm_s2pte_readonly((pte_t *)pmdp);
}
-static inline bool kvm_s2pmd_exec(pmd_t *pmd)
+static inline bool kvm_s2pmd_exec(pmd_t *pmdp)
{
- return !(pmd_val(*pmd) & PMD_S2_XN);
+ return !(READ_ONCE(pmd_val(*pmdp)) & PMD_S2_XN);
}
static inline bool kvm_page_empty(void *ptr)
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index 8d3331985d2e..39ec0b8a689e 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -141,13 +141,13 @@ static inline void cpu_install_idmap(void)
* Atomically replaces the active TTBR1_EL1 PGD with a new VA-compatible PGD,
* avoiding the possibility of conflicting TLB entries being allocated.
*/
-static inline void cpu_replace_ttbr1(pgd_t *pgd)
+static inline void cpu_replace_ttbr1(pgd_t *pgdp)
{
typedef void (ttbr_replace_func)(phys_addr_t);
extern ttbr_replace_func idmap_cpu_replace_ttbr1;
ttbr_replace_func *replace_phys;
- phys_addr_t pgd_phys = virt_to_phys(pgd);
+ phys_addr_t pgd_phys = virt_to_phys(pgdp);
replace_phys = (void *)__pa_symbol(idmap_cpu_replace_ttbr1);
diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h
index e9d9f1b006ef..2e05bcd944c8 100644
--- a/arch/arm64/include/asm/pgalloc.h
+++ b/arch/arm64/include/asm/pgalloc.h
@@ -36,23 +36,23 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
return (pmd_t *)__get_free_page(PGALLOC_GFP);
}
-static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
+static inline void pmd_free(struct mm_struct *mm, pmd_t *pmdp)
{
- BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
- free_page((unsigned long)pmd);
+ BUG_ON((unsigned long)pmdp & (PAGE_SIZE-1));
+ free_page((unsigned long)pmdp);
}
-static inline void __pud_populate(pud_t *pud, phys_addr_t pmd, pudval_t prot)
+static inline void __pud_populate(pud_t *pudp, phys_addr_t pmdp, pudval_t prot)
{
- set_pud(pud, __pud(__phys_to_pud_val(pmd) | prot));
+ set_pud(pudp, __pud(__phys_to_pud_val(pmdp) | prot));
}
-static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+static inline void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmdp)
{
- __pud_populate(pud, __pa(pmd), PMD_TYPE_TABLE);
+ __pud_populate(pudp, __pa(pmdp), PMD_TYPE_TABLE);
}
#else
-static inline void __pud_populate(pud_t *pud, phys_addr_t pmd, pudval_t prot)
+static inline void __pud_populate(pud_t *pudp, phys_addr_t pmdp, pudval_t prot)
{
BUILD_BUG();
}
@@ -65,30 +65,30 @@ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
return (pud_t *)__get_free_page(PGALLOC_GFP);
}
-static inline void pud_free(struct mm_struct *mm, pud_t *pud)
+static inline void pud_free(struct mm_struct *mm, pud_t *pudp)
{
- BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
- free_page((unsigned long)pud);
+ BUG_ON((unsigned long)pudp & (PAGE_SIZE-1));
+ free_page((unsigned long)pudp);
}
-static inline void __pgd_populate(pgd_t *pgdp, phys_addr_t pud, pgdval_t prot)
+static inline void __pgd_populate(pgd_t *pgdp, phys_addr_t pudp, pgdval_t prot)
{
- set_pgd(pgdp, __pgd(__phys_to_pgd_val(pud) | prot));
+ set_pgd(pgdp, __pgd(__phys_to_pgd_val(pudp) | prot));
}
-static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
+static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgdp, pud_t *pudp)
{
- __pgd_populate(pgd, __pa(pud), PUD_TYPE_TABLE);
+ __pgd_populate(pgdp, __pa(pudp), PUD_TYPE_TABLE);
}
#else
-static inline void __pgd_populate(pgd_t *pgdp, phys_addr_t pud, pgdval_t prot)
+static inline void __pgd_populate(pgd_t *pgdp, phys_addr_t pudp, pgdval_t prot)
{
BUILD_BUG();
}
#endif /* CONFIG_PGTABLE_LEVELS > 3 */
extern pgd_t *pgd_alloc(struct mm_struct *mm);
-extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
+extern void pgd_free(struct mm_struct *mm, pgd_t *pgdp);
static inline pte_t *
pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
@@ -114,10 +114,10 @@ pte_alloc_one(struct mm_struct *mm, unsigned long addr)
/*
* Free a PTE table.
*/
-static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *ptep)
{
- if (pte)
- free_page((unsigned long)pte);
+ if (ptep)
+ free_page((unsigned long)ptep);
}
static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
@@ -126,10 +126,10 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
__free_page(pte);
}
-static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
+static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t ptep,
pmdval_t prot)
{
- set_pmd(pmdp, __pmd(__phys_to_pmd_val(pte) | prot));
+ set_pmd(pmdp, __pmd(__phys_to_pmd_val(ptep) | prot));
}
/*
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 094374c82db0..7e2c27e63cd8 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -218,7 +218,7 @@ static inline pmd_t pmd_mkcont(pmd_t pmd)
static inline void set_pte(pte_t *ptep, pte_t pte)
{
- *ptep = pte;
+ WRITE_ONCE(*ptep, pte);
/*
* Only if the new pte is valid and kernel, otherwise TLB maintenance
@@ -250,6 +250,8 @@ extern void __sync_icache_dcache(pte_t pteval, unsigned long addr);
static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte)
{
+ pte_t old_pte;
+
if (pte_present(pte) && pte_user_exec(pte) && !pte_special(pte))
__sync_icache_dcache(pte, addr);
@@ -258,14 +260,15 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
* hardware updates of the pte (ptep_set_access_flags safely changes
* valid ptes without going through an invalid entry).
*/
- if (IS_ENABLED(CONFIG_DEBUG_VM) && pte_valid(*ptep) && pte_valid(pte) &&
+ old_pte = READ_ONCE(*ptep);
+ if (IS_ENABLED(CONFIG_DEBUG_VM) && pte_valid(old_pte) && pte_valid(pte) &&
(mm == current->active_mm || atomic_read(&mm->mm_users) > 1)) {
VM_WARN_ONCE(!pte_young(pte),
"%s: racy access flag clearing: 0x%016llx -> 0x%016llx",
- __func__, pte_val(*ptep), pte_val(pte));
- VM_WARN_ONCE(pte_write(*ptep) && !pte_dirty(pte),
+ __func__, pte_val(old_pte), pte_val(pte));
+ VM_WARN_ONCE(pte_write(old_pte) && !pte_dirty(pte),
"%s: racy dirty state clearing: 0x%016llx -> 0x%016llx",
- __func__, pte_val(*ptep), pte_val(pte));
+ __func__, pte_val(old_pte), pte_val(pte));
}
set_pte(ptep, pte);
@@ -431,7 +434,7 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
{
- *pmdp = pmd;
+ WRITE_ONCE(*pmdp, pmd);
dsb(ishst);
isb();
}
@@ -482,7 +485,7 @@ static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
static inline void set_pud(pud_t *pudp, pud_t pud)
{
- *pudp = pud;
+ WRITE_ONCE(*pudp, pud);
dsb(ishst);
isb();
}
@@ -500,7 +503,7 @@ static inline phys_addr_t pud_page_paddr(pud_t pud)
/* Find an entry in the second-level page table. */
#define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
-#define pmd_offset_phys(dir, addr) (pud_page_paddr(*(dir)) + pmd_index(addr) * sizeof(pmd_t))
+#define pmd_offset_phys(dir, addr) (pud_page_paddr(READ_ONCE(*(dir))) + pmd_index(addr) * sizeof(pmd_t))
#define pmd_offset(dir, addr) ((pmd_t *)__va(pmd_offset_phys((dir), (addr))))
#define pmd_set_fixmap(addr) ((pmd_t *)set_fixmap_offset(FIX_PMD, addr))
@@ -535,7 +538,7 @@ static inline phys_addr_t pud_page_paddr(pud_t pud)
static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
{
- *pgdp = pgd;
+ WRITE_ONCE(*pgdp, pgd);
dsb(ishst);
}
@@ -552,7 +555,7 @@ static inline phys_addr_t pgd_page_paddr(pgd_t pgd)
/* Find an entry in the frst-level page table. */
#define pud_index(addr) (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
-#define pud_offset_phys(dir, addr) (pgd_page_paddr(*(dir)) + pud_index(addr) * sizeof(pud_t))
+#define pud_offset_phys(dir, addr) (pgd_page_paddr(READ_ONCE(*(dir))) + pud_index(addr) * sizeof(pud_t))
#define pud_offset(dir, addr) ((pud_t *)__va(pud_offset_phys((dir), (addr))))
#define pud_set_fixmap(addr) ((pud_t *)set_fixmap_offset(FIX_PUD, addr))
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 07823595b7f0..52f15cd896e1 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -408,6 +408,15 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
},
{
.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+ MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR),
+ .enable = qcom_enable_link_stack_sanitization,
+ },
+ {
+ .capability = ARM64_HARDEN_BP_POST_GUEST_EXIT,
+ MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR),
+ },
+ {
+ .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
.enable = enable_smccc_arch_workaround_1,
},
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
index f85ac58d08a3..a8bf1c892b90 100644
--- a/arch/arm64/kernel/efi.c
+++ b/arch/arm64/kernel/efi.c
@@ -90,7 +90,7 @@ static int __init set_permissions(pte_t *ptep, pgtable_t token,
unsigned long addr, void *data)
{
efi_memory_desc_t *md = data;
- pte_t pte = *ptep;
+ pte_t pte = READ_ONCE(*ptep);
if (md->attribute & EFI_MEMORY_RO)
pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
index f20cf7e99249..1ec5f28c39fc 100644
--- a/arch/arm64/kernel/hibernate.c
+++ b/arch/arm64/kernel/hibernate.c
@@ -202,10 +202,10 @@ static int create_safe_exec_page(void *src_start, size_t length,
gfp_t mask)
{
int rc = 0;
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
+ pgd_t *pgdp;
+ pud_t *pudp;
+ pmd_t *pmdp;
+ pte_t *ptep;
unsigned long dst = (unsigned long)allocator(mask);
if (!dst) {
@@ -216,38 +216,38 @@ static int create_safe_exec_page(void *src_start, size_t length,
memcpy((void *)dst, src_start, length);
flush_icache_range(dst, dst + length);
- pgd = pgd_offset_raw(allocator(mask), dst_addr);
- if (pgd_none(*pgd)) {
- pud = allocator(mask);
- if (!pud) {
+ pgdp = pgd_offset_raw(allocator(mask), dst_addr);
+ if (pgd_none(READ_ONCE(*pgdp))) {
+ pudp = allocator(mask);
+ if (!pudp) {
rc = -ENOMEM;
goto out;
}
- pgd_populate(&init_mm, pgd, pud);
+ pgd_populate(&init_mm, pgdp, pudp);
}
- pud = pud_offset(pgd, dst_addr);
- if (pud_none(*pud)) {
- pmd = allocator(mask);
- if (!pmd) {
+ pudp = pud_offset(pgdp, dst_addr);
+ if (pud_none(READ_ONCE(*pudp))) {
+ pmdp = allocator(mask);
+ if (!pmdp) {
rc = -ENOMEM;
goto out;
}
- pud_populate(&init_mm, pud, pmd);
+ pud_populate(&init_mm, pudp, pmdp);
}
- pmd = pmd_offset(pud, dst_addr);
- if (pmd_none(*pmd)) {
- pte = allocator(mask);
- if (!pte) {
+ pmdp = pmd_offset(pudp, dst_addr);
+ if (pmd_none(READ_ONCE(*pmdp))) {
+ ptep = allocator(mask);
+ if (!ptep) {
rc = -ENOMEM;
goto out;
}
- pmd_populate_kernel(&init_mm, pmd, pte);
+ pmd_populate_kernel(&init_mm, pmdp, ptep);
}
- pte = pte_offset_kernel(pmd, dst_addr);
- set_pte(pte, pfn_pte(virt_to_pfn(dst), PAGE_KERNEL_EXEC));
+ ptep = pte_offset_kernel(pmdp, dst_addr);
+ set_pte(ptep, pfn_pte(virt_to_pfn(dst), PAGE_KERNEL_EXEC));
/*
* Load our new page tables. A strict BBM approach requires that we
@@ -263,7 +263,7 @@ static int create_safe_exec_page(void *src_start, size_t length,
*/
cpu_set_reserved_ttbr0();
local_flush_tlb_all();
- write_sysreg(phys_to_ttbr(virt_to_phys(pgd)), ttbr0_el1);
+ write_sysreg(phys_to_ttbr(virt_to_phys(pgdp)), ttbr0_el1);
isb();
*phys_dst_addr = virt_to_phys((void *)dst);
@@ -320,9 +320,9 @@ int swsusp_arch_suspend(void)
return ret;
}
-static void _copy_pte(pte_t *dst_pte, pte_t *src_pte, unsigned long addr)
+static void _copy_pte(pte_t *dst_ptep, pte_t *src_ptep, unsigned long addr)
{
- pte_t pte = *src_pte;
+ pte_t pte = READ_ONCE(*src_ptep);
if (pte_valid(pte)) {
/*
@@ -330,7 +330,7 @@ static void _copy_pte(pte_t *dst_pte, pte_t *src_pte, unsigned long addr)
* read only (code, rodata). Clear the RDONLY bit from
* the temporary mappings we use during restore.
*/
- set_pte(dst_pte, pte_mkwrite(pte));
+ set_pte(dst_ptep, pte_mkwrite(pte));
} else if (debug_pagealloc_enabled() && !pte_none(pte)) {
/*
* debug_pagealloc will removed the PTE_VALID bit if
@@ -343,112 +343,116 @@ static void _copy_pte(pte_t *dst_pte, pte_t *src_pte, unsigned long addr)
*/
BUG_ON(!pfn_valid(pte_pfn(pte)));
- set_pte(dst_pte, pte_mkpresent(pte_mkwrite(pte)));
+ set_pte(dst_ptep, pte_mkpresent(pte_mkwrite(pte)));
}
}
-static int copy_pte(pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long start,
+static int copy_pte(pmd_t *dst_pmdp, pmd_t *src_pmdp, unsigned long start,
unsigned long end)
{
- pte_t *src_pte;
- pte_t *dst_pte;
+ pte_t *src_ptep;
+ pte_t *dst_ptep;
unsigned long addr = start;
- dst_pte = (pte_t *)get_safe_page(GFP_ATOMIC);
- if (!dst_pte)
+ dst_ptep = (pte_t *)get_safe_page(GFP_ATOMIC);
+ if (!dst_ptep)
return -ENOMEM;
- pmd_populate_kernel(&init_mm, dst_pmd, dst_pte);
- dst_pte = pte_offset_kernel(dst_pmd, start);
+ pmd_populate_kernel(&init_mm, dst_pmdp, dst_ptep);
+ dst_ptep = pte_offset_kernel(dst_pmdp, start);
- src_pte = pte_offset_kernel(src_pmd, start);
+ src_ptep = pte_offset_kernel(src_pmdp, start);
do {
- _copy_pte(dst_pte, src_pte, addr);
- } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
+ _copy_pte(dst_ptep, src_ptep, addr);
+ } while (dst_ptep++, src_ptep++, addr += PAGE_SIZE, addr != end);
return 0;
}
-static int copy_pmd(pud_t *dst_pud, pud_t *src_pud, unsigned long start,
+static int copy_pmd(pud_t *dst_pudp, pud_t *src_pudp, unsigned long start,
unsigned long end)
{
- pmd_t *src_pmd;
- pmd_t *dst_pmd;
+ pmd_t *src_pmdp;
+ pmd_t *dst_pmdp;
unsigned long next;
unsigned long addr = start;
- if (pud_none(*dst_pud)) {
- dst_pmd = (pmd_t *)get_safe_page(GFP_ATOMIC);
- if (!dst_pmd)
+ if (pud_none(READ_ONCE(*dst_pudp))) {
+ dst_pmdp = (pmd_t *)get_safe_page(GFP_ATOMIC);
+ if (!dst_pmdp)
return -ENOMEM;
- pud_populate(&init_mm, dst_pud, dst_pmd);
+ pud_populate(&init_mm, dst_pudp, dst_pmdp);
}
- dst_pmd = pmd_offset(dst_pud, start);
+ dst_pmdp = pmd_offset(dst_pudp, start);
- src_pmd = pmd_offset(src_pud, start);
+ src_pmdp = pmd_offset(src_pudp, start);
do {
+ pmd_t pmd = READ_ONCE(*src_pmdp);
+
next = pmd_addr_end(addr, end);
- if (pmd_none(*src_pmd))
+ if (pmd_none(pmd))
continue;
- if (pmd_table(*src_pmd)) {
- if (copy_pte(dst_pmd, src_pmd, addr, next))
+ if (pmd_table(pmd)) {
+ if (copy_pte(dst_pmdp, src_pmdp, addr, next))
return -ENOMEM;
} else {
- set_pmd(dst_pmd,
- __pmd(pmd_val(*src_pmd) & ~PMD_SECT_RDONLY));
+ set_pmd(dst_pmdp,
+ __pmd(pmd_val(pmd) & ~PMD_SECT_RDONLY));
}
- } while (dst_pmd++, src_pmd++, addr = next, addr != end);
+ } while (dst_pmdp++, src_pmdp++, addr = next, addr != end);
return 0;
}
-static int copy_pud(pgd_t *dst_pgd, pgd_t *src_pgd, unsigned long start,
+static int copy_pud(pgd_t *dst_pgdp, pgd_t *src_pgdp, unsigned long start,
unsigned long end)
{
- pud_t *dst_pud;
- pud_t *src_pud;
+ pud_t *dst_pudp;
+ pud_t *src_pudp;
unsigned long next;
unsigned long addr = start;
- if (pgd_none(*dst_pgd)) {
- dst_pud = (pud_t *)get_safe_page(GFP_ATOMIC);
- if (!dst_pud)
+ if (pgd_none(READ_ONCE(*dst_pgdp))) {
+ dst_pudp = (pud_t *)get_safe_page(GFP_ATOMIC);
+ if (!dst_pudp)
return -ENOMEM;
- pgd_populate(&init_mm, dst_pgd, dst_pud);
+ pgd_populate(&init_mm, dst_pgdp, dst_pudp);
}
- dst_pud = pud_offset(dst_pgd, start);
+ dst_pudp = pud_offset(dst_pgdp, start);
- src_pud = pud_offset(src_pgd, start);
+ src_pudp = pud_offset(src_pgdp, start);
do {
+ pud_t pud = READ_ONCE(*src_pudp);
+
next = pud_addr_end(addr, end);
- if (pud_none(*src_pud))
+ if (pud_none(pud))
continue;
- if (pud_table(*(src_pud))) {
- if (copy_pmd(dst_pud, src_pud, addr, next))
+ if (pud_table(pud)) {
+ if (copy_pmd(dst_pudp, src_pudp, addr, next))
return -ENOMEM;
} else {
- set_pud(dst_pud,
- __pud(pud_val(*src_pud) & ~PMD_SECT_RDONLY));
+ set_pud(dst_pudp,
+ __pud(pud_val(pud) & ~PMD_SECT_RDONLY));
}
- } while (dst_pud++, src_pud++, addr = next, addr != end);
+ } while (dst_pudp++, src_pudp++, addr = next, addr != end);
return 0;
}
-static int copy_page_tables(pgd_t *dst_pgd, unsigned long start,
+static int copy_page_tables(pgd_t *dst_pgdp, unsigned long start,
unsigned long end)
{
unsigned long next;
unsigned long addr = start;
- pgd_t *src_pgd = pgd_offset_k(start);
+ pgd_t *src_pgdp = pgd_offset_k(start);
- dst_pgd = pgd_offset_raw(dst_pgd, start);
+ dst_pgdp = pgd_offset_raw(dst_pgdp, start);
do {
next = pgd_addr_end(addr, end);
- if (pgd_none(*src_pgd))
+ if (pgd_none(READ_ONCE(*src_pgdp)))
continue;
- if (copy_pud(dst_pgd, src_pgd, addr, next))
+ if (copy_pud(dst_pgdp, src_pgdp, addr, next))
return -ENOMEM;
- } while (dst_pgd++, src_pgd++, addr = next, addr != end);
+ } while (dst_pgdp++, src_pgdp++, addr = next, addr != end);
return 0;
}
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index 116252a8d3a5..870f4b1587f9 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -407,8 +407,10 @@ again:
u32 midr = read_cpuid_id();
/* Apply BTAC predictors mitigation to all Falkor chips */
- if ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1)
+ if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) ||
+ ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1)) {
__qcom_hyp_sanitize_btac_predictors();
+ }
}
fp_enabled = __fpsimd_enabled();
diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c
index 7b60d62ac593..65dfc8571bf8 100644
--- a/arch/arm64/mm/dump.c
+++ b/arch/arm64/mm/dump.c
@@ -286,48 +286,52 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level,
}
-static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start)
+static void walk_pte(struct pg_state *st, pmd_t *pmdp, unsigned long start)
{
- pte_t *pte = pte_offset_kernel(pmd, 0UL);
+ pte_t *ptep = pte_offset_kernel(pmdp, 0UL);
unsigned long addr;
unsigned i;
- for (i = 0; i < PTRS_PER_PTE; i++, pte++) {
+ for (i = 0; i < PTRS_PER_PTE; i++, ptep++) {
addr = start + i * PAGE_SIZE;
- note_page(st, addr, 4, pte_val(*pte));
+ note_page(st, addr, 4, READ_ONCE(pte_val(*ptep)));
}
}
-static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
+static void walk_pmd(struct pg_state *st, pud_t *pudp, unsigned long start)
{
- pmd_t *pmd = pmd_offset(pud, 0UL);
+ pmd_t *pmdp = pmd_offset(pudp, 0UL);
unsigned long addr;
unsigned i;
- for (i = 0; i < PTRS_PER_PMD; i++, pmd++) {
+ for (i = 0; i < PTRS_PER_PMD; i++, pmdp++) {
+ pmd_t pmd = READ_ONCE(*pmdp);
+
addr = start + i * PMD_SIZE;
- if (pmd_none(*pmd) || pmd_sect(*pmd)) {
- note_page(st, addr, 3, pmd_val(*pmd));
+ if (pmd_none(pmd) || pmd_sect(pmd)) {
+ note_page(st, addr, 3, pmd_val(pmd));
} else {
- BUG_ON(pmd_bad(*pmd));
- walk_pte(st, pmd, addr);
+ BUG_ON(pmd_bad(pmd));
+ walk_pte(st, pmdp, addr);
}
}
}
-static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start)
+static void walk_pud(struct pg_state *st, pgd_t *pgdp, unsigned long start)
{
- pud_t *pud = pud_offset(pgd, 0UL);
+ pud_t *pudp = pud_offset(pgdp, 0UL);
unsigned long addr;
unsigned i;
- for (i = 0; i < PTRS_PER_PUD; i++, pud++) {
+ for (i = 0; i < PTRS_PER_PUD; i++, pudp++) {
+ pud_t pud = READ_ONCE(*pudp);
+
addr = start + i * PUD_SIZE;
- if (pud_none(*pud) || pud_sect(*pud)) {
- note_page(st, addr, 2, pud_val(*pud));
+ if (pud_none(pud) || pud_sect(pud)) {
+ note_page(st, addr, 2, pud_val(pud));
} else {
- BUG_ON(pud_bad(*pud));
- walk_pmd(st, pud, addr);
+ BUG_ON(pud_bad(pud));
+ walk_pmd(st, pudp, addr);
}
}
}
@@ -335,17 +339,19 @@ static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start)
static void walk_pgd(struct pg_state *st, struct mm_struct *mm,
unsigned long start)
{
- pgd_t *pgd = pgd_offset(mm, 0UL);
+ pgd_t *pgdp = pgd_offset(mm, 0UL);
unsigned i;
unsigned long addr;
- for (i = 0; i < PTRS_PER_PGD; i++, pgd++) {
+ for (i = 0; i < PTRS_PER_PGD; i++, pgdp++) {
+ pgd_t pgd = READ_ONCE(*pgdp);
+
addr = start + i * PGDIR_SIZE;
- if (pgd_none(*pgd)) {
- note_page(st, addr, 1, pgd_val(*pgd));
+ if (pgd_none(pgd)) {
+ note_page(st, addr, 1, pgd_val(pgd));
} else {
- BUG_ON(pgd_bad(*pgd));
- walk_pud(st, pgd, addr);
+ BUG_ON(pgd_bad(pgd));
+ walk_pud(st, pgdp, addr);
}
}
}
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index f76bb2c3c943..bff11553eb05 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -130,7 +130,8 @@ static void mem_abort_decode(unsigned int esr)
void show_pte(unsigned long addr)
{
struct mm_struct *mm;
- pgd_t *pgd;
+ pgd_t *pgdp;
+ pgd_t pgd;
if (addr < TASK_SIZE) {
/* TTBR0 */
@@ -149,33 +150,37 @@ void show_pte(unsigned long addr)
return;
}
- pr_alert("%s pgtable: %luk pages, %u-bit VAs, pgd = %p\n",
+ pr_alert("%s pgtable: %luk pages, %u-bit VAs, pgdp = %p\n",
mm == &init_mm ? "swapper" : "user", PAGE_SIZE / SZ_1K,
VA_BITS, mm->pgd);
- pgd = pgd_offset(mm, addr);
- pr_alert("[%016lx] *pgd=%016llx", addr, pgd_val(*pgd));
+ pgdp = pgd_offset(mm, addr);
+ pgd = READ_ONCE(*pgdp);
+ pr_alert("[%016lx] pgd=%016llx", addr, pgd_val(pgd));
do {
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
+ pud_t *pudp, pud;
+ pmd_t *pmdp, pmd;
+ pte_t *ptep, pte;
- if (pgd_none(*pgd) || pgd_bad(*pgd))
+ if (pgd_none(pgd) || pgd_bad(pgd))
break;
- pud = pud_offset(pgd, addr);
- pr_cont(", *pud=%016llx", pud_val(*pud));
- if (pud_none(*pud) || pud_bad(*pud))
+ pudp = pud_offset(pgdp, addr);
+ pud = READ_ONCE(*pudp);
+ pr_cont(", pud=%016llx", pud_val(pud));
+ if (pud_none(pud) || pud_bad(pud))
break;
- pmd = pmd_offset(pud, addr);
- pr_cont(", *pmd=%016llx", pmd_val(*pmd));
- if (pmd_none(*pmd) || pmd_bad(*pmd))
+ pmdp = pmd_offset(pudp, addr);
+ pmd = READ_ONCE(*pmdp);
+ pr_cont(", pmd=%016llx", pmd_val(pmd));
+ if (pmd_none(pmd) || pmd_bad(pmd))
break;
- pte = pte_offset_map(pmd, addr);
- pr_cont(", *pte=%016llx", pte_val(*pte));
- pte_unmap(pte);
+ ptep = pte_offset_map(pmdp, addr);
+ pte = READ_ONCE(*ptep);
+ pr_cont(", pte=%016llx", pte_val(pte));
+ pte_unmap(ptep);
} while(0);
pr_cont("\n");
@@ -196,8 +201,9 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
pte_t entry, int dirty)
{
pteval_t old_pteval, pteval;
+ pte_t pte = READ_ONCE(*ptep);
- if (pte_same(*ptep, entry))
+ if (pte_same(pte, entry))
return 0;
/* only preserve the access flags and write permission */
@@ -210,7 +216,7 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
* (calculated as: a & b == ~(~a | ~b)).
*/
pte_val(entry) ^= PTE_RDONLY;
- pteval = READ_ONCE(pte_val(*ptep));
+ pteval = pte_val(pte);
do {
old_pteval = pteval;
pteval ^= PTE_RDONLY;
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index 6cb0fa92a651..ecc6818191df 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -54,14 +54,14 @@ static inline pgprot_t pte_pgprot(pte_t pte)
static int find_num_contig(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, size_t *pgsize)
{
- pgd_t *pgd = pgd_offset(mm, addr);
- pud_t *pud;
- pmd_t *pmd;
+ pgd_t *pgdp = pgd_offset(mm, addr);
+ pud_t *pudp;
+ pmd_t *pmdp;
*pgsize = PAGE_SIZE;
- pud = pud_offset(pgd, addr);
- pmd = pmd_offset(pud, addr);
- if ((pte_t *)pmd == ptep) {
+ pudp = pud_offset(pgdp, addr);
+ pmdp = pmd_offset(pudp, addr);
+ if ((pte_t *)pmdp == ptep) {
*pgsize = PMD_SIZE;
return CONT_PMDS;
}
@@ -181,11 +181,8 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
clear_flush(mm, addr, ptep, pgsize, ncontig);
- for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn) {
- pr_debug("%s: set pte %p to 0x%llx\n", __func__, ptep,
- pte_val(pfn_pte(pfn, hugeprot)));
+ for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot));
- }
}
void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
@@ -203,20 +200,20 @@ void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *huge_pte_alloc(struct mm_struct *mm,
unsigned long addr, unsigned long sz)
{
- pgd_t *pgd;
- pud_t *pud;
- pte_t *pte = NULL;
-
- pr_debug("%s: addr:0x%lx sz:0x%lx\n", __func__, addr, sz);
- pgd = pgd_offset(mm, addr);
- pud = pud_alloc(mm, pgd, addr);
- if (!pud)
+ pgd_t *pgdp;
+ pud_t *pudp;
+ pmd_t *pmdp;
+ pte_t *ptep = NULL;
+
+ pgdp = pgd_offset(mm, addr);
+ pudp = pud_alloc(mm, pgdp, addr);
+ if (!pudp)
return NULL;
if (sz == PUD_SIZE) {
- pte = (pte_t *)pud;
+ ptep = (pte_t *)pudp;
} else if (sz == (PAGE_SIZE * CONT_PTES)) {
- pmd_t *pmd = pmd_alloc(mm, pud, addr);
+ pmdp = pmd_alloc(mm, pudp, addr);
WARN_ON(addr & (sz - 1));
/*
@@ -226,60 +223,55 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
* will be no pte_unmap() to correspond with this
* pte_alloc_map().
*/
- pte = pte_alloc_map(mm, pmd, addr);
+ ptep = pte_alloc_map(mm, pmdp, addr);
} else if (sz == PMD_SIZE) {
if (IS_ENABLED(CONFIG_ARCH_WANT_HUGE_PMD_SHARE) &&
- pud_none(*pud))
- pte = huge_pmd_share(mm, addr, pud);
+ pud_none(READ_ONCE(*pudp)))
+ ptep = huge_pmd_share(mm, addr, pudp);
else
- pte = (pte_t *)pmd_alloc(mm, pud, addr);
+ ptep = (pte_t *)pmd_alloc(mm, pudp, addr);
} else if (sz == (PMD_SIZE * CONT_PMDS)) {
- pmd_t *pmd;
-
- pmd = pmd_alloc(mm, pud, addr);
+ pmdp = pmd_alloc(mm, pudp, addr);
WARN_ON(addr & (sz - 1));
- return (pte_t *)pmd;
+ return (pte_t *)pmdp;
}
- pr_debug("%s: addr:0x%lx sz:0x%lx ret pte=%p/0x%llx\n", __func__, addr,
- sz, pte, pte_val(*pte));
- return pte;
+ return ptep;
}
pte_t *huge_pte_offset(struct mm_struct *mm,
unsigned long addr, unsigned long sz)
{
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
+ pgd_t *pgdp;
+ pud_t *pudp, pud;
+ pmd_t *pmdp, pmd;
- pgd = pgd_offset(mm, addr);
- pr_debug("%s: addr:0x%lx pgd:%p\n", __func__, addr, pgd);
- if (!pgd_present(*pgd))
+ pgdp = pgd_offset(mm, addr);
+ if (!pgd_present(READ_ONCE(*pgdp)))
return NULL;
- pud = pud_offset(pgd, addr);
- if (sz != PUD_SIZE && pud_none(*pud))
+ pudp = pud_offset(pgdp, addr);
+ pud = READ_ONCE(*pudp);
+ if (sz != PUD_SIZE && pud_none(pud))
return NULL;
/* hugepage or swap? */
- if (pud_huge(*pud) || !pud_present(*pud))
- return (pte_t *)pud;
+ if (pud_huge(pud) || !pud_present(pud))
+ return (pte_t *)pudp;
/* table; check the next level */
if (sz == CONT_PMD_SIZE)
addr &= CONT_PMD_MASK;
- pmd = pmd_offset(pud, addr);
+ pmdp = pmd_offset(pudp, addr);
+ pmd = READ_ONCE(*pmdp);
if (!(sz == PMD_SIZE || sz == CONT_PMD_SIZE) &&
- pmd_none(*pmd))
+ pmd_none(pmd))
return NULL;
- if (pmd_huge(*pmd) || !pmd_present(*pmd))
- return (pte_t *)pmd;
+ if (pmd_huge(pmd) || !pmd_present(pmd))
+ return (pte_t *)pmdp;
- if (sz == CONT_PTE_SIZE) {
- pte_t *pte = pte_offset_kernel(pmd, (addr & CONT_PTE_MASK));
- return pte;
- }
+ if (sz == CONT_PTE_SIZE)
+ return pte_offset_kernel(pmdp, (addr & CONT_PTE_MASK));
return NULL;
}
@@ -367,7 +359,7 @@ void huge_ptep_set_wrprotect(struct mm_struct *mm,
size_t pgsize;
pte_t pte;
- if (!pte_cont(*ptep)) {
+ if (!pte_cont(READ_ONCE(*ptep))) {
ptep_set_wrprotect(mm, addr, ptep);
return;
}
@@ -391,7 +383,7 @@ void huge_ptep_clear_flush(struct vm_area_struct *vma,
size_t pgsize;
int ncontig;
- if (!pte_cont(*ptep)) {
+ if (!pte_cont(READ_ONCE(*ptep))) {
ptep_clear_flush(vma, addr, ptep);
return;
}
diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c
index 6e02e6fb4c7b..dabfc1ecda3d 100644
--- a/arch/arm64/mm/kasan_init.c
+++ b/arch/arm64/mm/kasan_init.c
@@ -44,92 +44,92 @@ static phys_addr_t __init kasan_alloc_zeroed_page(int node)
return __pa(p);
}
-static pte_t *__init kasan_pte_offset(pmd_t *pmd, unsigned long addr, int node,
+static pte_t *__init kasan_pte_offset(pmd_t *pmdp, unsigned long addr, int node,
bool early)
{
- if (pmd_none(*pmd)) {
+ if (pmd_none(READ_ONCE(*pmdp))) {
phys_addr_t pte_phys = early ? __pa_symbol(kasan_zero_pte)
: kasan_alloc_zeroed_page(node);
- __pmd_populate(pmd, pte_phys, PMD_TYPE_TABLE);
+ __pmd_populate(pmdp, pte_phys, PMD_TYPE_TABLE);
}
- return early ? pte_offset_kimg(pmd, addr)
- : pte_offset_kernel(pmd, addr);
+ return early ? pte_offset_kimg(pmdp, addr)
+ : pte_offset_kernel(pmdp, addr);
}
-static pmd_t *__init kasan_pmd_offset(pud_t *pud, unsigned long addr, int node,
+static pmd_t *__init kasan_pmd_offset(pud_t *pudp, unsigned long addr, int node,
bool early)
{
- if (pud_none(*pud)) {
+ if (pud_none(READ_ONCE(*pudp))) {
phys_addr_t pmd_phys = early ? __pa_symbol(kasan_zero_pmd)
: kasan_alloc_zeroed_page(node);
- __pud_populate(pud, pmd_phys, PMD_TYPE_TABLE);
+ __pud_populate(pudp, pmd_phys, PMD_TYPE_TABLE);
}
- return early ? pmd_offset_kimg(pud, addr) : pmd_offset(pud, addr);
+ return early ? pmd_offset_kimg(pudp, addr) : pmd_offset(pudp, addr);
}
-static pud_t *__init kasan_pud_offset(pgd_t *pgd, unsigned long addr, int node,
+static pud_t *__init kasan_pud_offset(pgd_t *pgdp, unsigned long addr, int node,
bool early)
{
- if (pgd_none(*pgd)) {
+ if (pgd_none(READ_ONCE(*pgdp))) {
phys_addr_t pud_phys = early ? __pa_symbol(kasan_zero_pud)
: kasan_alloc_zeroed_page(node);
- __pgd_populate(pgd, pud_phys, PMD_TYPE_TABLE);
+ __pgd_populate(pgdp, pud_phys, PMD_TYPE_TABLE);
}
- return early ? pud_offset_kimg(pgd, addr) : pud_offset(pgd, addr);
+ return early ? pud_offset_kimg(pgdp, addr) : pud_offset(pgdp, addr);
}
-static void __init kasan_pte_populate(pmd_t *pmd, unsigned long addr,
+static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr,
unsigned long end, int node, bool early)
{
unsigned long next;
- pte_t *pte = kasan_pte_offset(pmd, addr, node, early);
+ pte_t *ptep = kasan_pte_offset(pmdp, addr, node, early);
do {
phys_addr_t page_phys = early ? __pa_symbol(kasan_zero_page)
: kasan_alloc_zeroed_page(node);
next = addr + PAGE_SIZE;
- set_pte(pte, pfn_pte(__phys_to_pfn(page_phys), PAGE_KERNEL));
- } while (pte++, addr = next, addr != end && pte_none(*pte));
+ set_pte(ptep, pfn_pte(__phys_to_pfn(page_phys), PAGE_KERNEL));
+ } while (ptep++, addr = next, addr != end && pte_none(READ_ONCE(*ptep)));
}
-static void __init kasan_pmd_populate(pud_t *pud, unsigned long addr,
+static void __init kasan_pmd_populate(pud_t *pudp, unsigned long addr,
unsigned long end, int node, bool early)
{
unsigned long next;
- pmd_t *pmd = kasan_pmd_offset(pud, addr, node, early);
+ pmd_t *pmdp = kasan_pmd_offset(pudp, addr, node, early);
do {
next = pmd_addr_end(addr, end);
- kasan_pte_populate(pmd, addr, next, node, early);
- } while (pmd++, addr = next, addr != end && pmd_none(*pmd));
+ kasan_pte_populate(pmdp, addr, next, node, early);
+ } while (pmdp++, addr = next, addr != end && pmd_none(READ_ONCE(*pmdp)));
}
-static void __init kasan_pud_populate(pgd_t *pgd, unsigned long addr,
+static void __init kasan_pud_populate(pgd_t *pgdp, unsigned long addr,
unsigned long end, int node, bool early)
{
unsigned long next;
- pud_t *pud = kasan_pud_offset(pgd, addr, node, early);
+ pud_t *pudp = kasan_pud_offset(pgdp, addr, node, early);
do {
next = pud_addr_end(addr, end);
- kasan_pmd_populate(pud, addr, next, node, early);
- } while (pud++, addr = next, addr != end && pud_none(*pud));
+ kasan_pmd_populate(pudp, addr, next, node, early);
+ } while (pudp++, addr = next, addr != end && pud_none(READ_ONCE(*pudp)));
}
static void __init kasan_pgd_populate(unsigned long addr, unsigned long end,
int node, bool early)
{
unsigned long next;
- pgd_t *pgd;
+ pgd_t *pgdp;
- pgd = pgd_offset_k(addr);
+ pgdp = pgd_offset_k(addr);
do {
next = pgd_addr_end(addr, end);
- kasan_pud_populate(pgd, addr, next, node, early);
- } while (pgd++, addr = next, addr != end);
+ kasan_pud_populate(pgdp, addr, next, node, early);
+ } while (pgdp++, addr = next, addr != end);
}
/* The early shadow maps everything to a single page of zeroes */
@@ -155,14 +155,14 @@ static void __init kasan_map_populate(unsigned long start, unsigned long end,
*/
void __init kasan_copy_shadow(pgd_t *pgdir)
{
- pgd_t *pgd, *pgd_new, *pgd_end;
+ pgd_t *pgdp, *pgdp_new, *pgdp_end;
- pgd = pgd_offset_k(KASAN_SHADOW_START);
- pgd_end = pgd_offset_k(KASAN_SHADOW_END);
- pgd_new = pgd_offset_raw(pgdir, KASAN_SHADOW_START);
+ pgdp = pgd_offset_k(KASAN_SHADOW_START);
+ pgdp_end = pgd_offset_k(KASAN_SHADOW_END);
+ pgdp_new = pgd_offset_raw(pgdir, KASAN_SHADOW_START);
do {
- set_pgd(pgd_new, *pgd);
- } while (pgd++, pgd_new++, pgd != pgd_end);
+ set_pgd(pgdp_new, READ_ONCE(*pgdp));
+ } while (pgdp++, pgdp_new++, pgdp != pgdp_end);
}
static void __init clear_pgds(unsigned long start,
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 4694cda823c9..3161b853f29e 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -125,45 +125,48 @@ static bool pgattr_change_is_safe(u64 old, u64 new)
return ((old ^ new) & ~mask) == 0;
}
-static void init_pte(pmd_t *pmd, unsigned long addr, unsigned long end,
+static void init_pte(pmd_t *pmdp, unsigned long addr, unsigned long end,
phys_addr_t phys, pgprot_t prot)
{
- pte_t *pte;
+ pte_t *ptep;
- pte = pte_set_fixmap_offset(pmd, addr);
+ ptep = pte_set_fixmap_offset(pmdp, addr);
do {
- pte_t old_pte = *pte;
+ pte_t old_pte = READ_ONCE(*ptep);
- set_pte(pte, pfn_pte(__phys_to_pfn(phys), prot));
+ set_pte(ptep, pfn_pte(__phys_to_pfn(phys), prot));
/*
* After the PTE entry has been populated once, we
* only allow updates to the permission attributes.
*/
- BUG_ON(!pgattr_change_is_safe(pte_val(old_pte), pte_val(*pte)));
+ BUG_ON(!pgattr_change_is_safe(pte_val(old_pte),
+ READ_ONCE(pte_val(*ptep))));
phys += PAGE_SIZE;
- } while (pte++, addr += PAGE_SIZE, addr != end);
+ } while (ptep++, addr += PAGE_SIZE, addr != end);
pte_clear_fixmap();
}
-static void alloc_init_cont_pte(pmd_t *pmd, unsigned long addr,
+static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr,
unsigned long end, phys_addr_t phys,
pgprot_t prot,
phys_addr_t (*pgtable_alloc)(void),
int flags)
{
unsigned long next;
+ pmd_t pmd = READ_ONCE(*pmdp);
- BUG_ON(pmd_sect(*pmd));
- if (pmd_none(*pmd)) {
+ BUG_ON(pmd_sect(pmd));
+ if (pmd_none(pmd)) {
phys_addr_t pte_phys;
BUG_ON(!pgtable_alloc);
pte_phys = pgtable_alloc();
- __pmd_populate(pmd, pte_phys, PMD_TYPE_TABLE);
+ __pmd_populate(pmdp, pte_phys, PMD_TYPE_TABLE);
+ pmd = READ_ONCE(*pmdp);
}
- BUG_ON(pmd_bad(*pmd));
+ BUG_ON(pmd_bad(pmd));
do {
pgprot_t __prot = prot;
@@ -175,67 +178,69 @@ static void alloc_init_cont_pte(pmd_t *pmd, unsigned long addr,
(flags & NO_CONT_MAPPINGS) == 0)
__prot = __pgprot(pgprot_val(prot) | PTE_CONT);
- init_pte(pmd, addr, next, phys, __prot);
+ init_pte(pmdp, addr, next, phys, __prot);
phys += next - addr;
} while (addr = next, addr != end);
}
-static void init_pmd(pud_t *pud, unsigned long addr, unsigned long end,
+static void init_pmd(pud_t *pudp, unsigned long addr, unsigned long end,
phys_addr_t phys, pgprot_t prot,
phys_addr_t (*pgtable_alloc)(void), int flags)
{
unsigned long next;
- pmd_t *pmd;
+ pmd_t *pmdp;
- pmd = pmd_set_fixmap_offset(pud, addr);
+ pmdp = pmd_set_fixmap_offset(pudp, addr);
do {
- pmd_t old_pmd = *pmd;
+ pmd_t old_pmd = READ_ONCE(*pmdp);
next = pmd_addr_end(addr, end);
/* try section mapping first */
if (((addr | next | phys) & ~SECTION_MASK) == 0 &&
(flags & NO_BLOCK_MAPPINGS) == 0) {
- pmd_set_huge(pmd, phys, prot);
+ pmd_set_huge(pmdp, phys, prot);
/*
* After the PMD entry has been populated once, we
* only allow updates to the permission attributes.
*/
BUG_ON(!pgattr_change_is_safe(pmd_val(old_pmd),
- pmd_val(*pmd)));
+ READ_ONCE(pmd_val(*pmdp))));
} else {
- alloc_init_cont_pte(pmd, addr, next, phys, prot,
+ alloc_init_cont_pte(pmdp, addr, next, phys, prot,
pgtable_alloc, flags);
BUG_ON(pmd_val(old_pmd) != 0 &&
- pmd_val(old_pmd) != pmd_val(*pmd));
+ pmd_val(old_pmd) != READ_ONCE(pmd_val(*pmdp)));
}
phys += next - addr;
- } while (pmd++, addr = next, addr != end);
+ } while (pmdp++, addr = next, addr != end);
pmd_clear_fixmap();
}
-static void alloc_init_cont_pmd(pud_t *pud, unsigned long addr,
+static void alloc_init_cont_pmd(pud_t *pudp, unsigned long addr,
unsigned long end, phys_addr_t phys,
pgprot_t prot,
phys_addr_t (*pgtable_alloc)(void), int flags)
{
unsigned long next;
+ pud_t pud = READ_ONCE(*pudp);
/*
* Check for initial section mappings in the pgd/pud.
*/
- BUG_ON(pud_sect(*pud));
- if (pud_none(*pud)) {
+ BUG_ON(pud_sect(pud));
+ if (pud_none(pud)) {
phys_addr_t pmd_phys;
BUG_ON(!pgtable_alloc);
pmd_phys = pgtable_alloc();
- __pud_populate(pud, pmd_phys, PUD_TYPE_TABLE);
+ __pud_populate(pudp, pmd_phys, PUD_TYPE_TABLE);
+ pud = READ_ONCE(*pudp);
}
- BUG_ON(pud_bad(*pud));
+ BUG_ON(pud_bad(pud));
do {
pgprot_t __prot = prot;
@@ -247,7 +252,7 @@ static void alloc_init_cont_pmd(pud_t *pud, unsigned long addr,
(flags & NO_CONT_MAPPINGS) == 0)
__prot = __pgprot(pgprot_val(prot) | PTE_CONT);
- init_pmd(pud, addr, next, phys, __prot, pgtable_alloc, flags);
+ init_pmd(pudp, addr, next, phys, __prot, pgtable_alloc, flags);
phys += next - addr;
} while (addr = next, addr != end);
@@ -265,25 +270,27 @@ static inline bool use_1G_block(unsigned long addr, unsigned long next,
return true;
}
-static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
- phys_addr_t phys, pgprot_t prot,
- phys_addr_t (*pgtable_alloc)(void),
- int flags)
+static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end,
+ phys_addr_t phys, pgprot_t prot,
+ phys_addr_t (*pgtable_alloc)(void),
+ int flags)
{
- pud_t *pud;
unsigned long next;
+ pud_t *pudp;
+ pgd_t pgd = READ_ONCE(*pgdp);
- if (pgd_none(*pgd)) {
+ if (pgd_none(pgd)) {
phys_addr_t pud_phys;
BUG_ON(!pgtable_alloc);
pud_phys = pgtable_alloc();
- __pgd_populate(pgd, pud_phys, PUD_TYPE_TABLE);
+ __pgd_populate(pgdp, pud_phys, PUD_TYPE_TABLE);
+ pgd = READ_ONCE(*pgdp);
}
- BUG_ON(pgd_bad(*pgd));
+ BUG_ON(pgd_bad(pgd));
- pud = pud_set_fixmap_offset(pgd, addr);
+ pudp = pud_set_fixmap_offset(pgdp, addr);
do {
- pud_t old_pud = *pud;
+ pud_t old_pud = READ_ONCE(*pudp);
next = pud_addr_end(addr, end);
@@ -292,23 +299,23 @@ static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
*/
if (use_1G_block(addr, next, phys) &&
(flags & NO_BLOCK_MAPPINGS) == 0) {
- pud_set_huge(pud, phys, prot);
+ pud_set_huge(pudp, phys, prot);
/*
* After the PUD entry has been populated once, we
* only allow updates to the permission attributes.
*/
BUG_ON(!pgattr_change_is_safe(pud_val(old_pud),
- pud_val(*pud)));
+ READ_ONCE(pud_val(*pudp))));
} else {
- alloc_init_cont_pmd(pud, addr, next, phys, prot,
+ alloc_init_cont_pmd(pudp, addr, next, phys, prot,
pgtable_alloc, flags);
BUG_ON(pud_val(old_pud) != 0 &&
- pud_val(old_pud) != pud_val(*pud));
+ pud_val(old_pud) != READ_ONCE(pud_val(*pudp)));
}
phys += next - addr;
- } while (pud++, addr = next, addr != end);
+ } while (pudp++, addr = next, addr != end);
pud_clear_fixmap();
}
@@ -320,7 +327,7 @@ static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
int flags)
{
unsigned long addr, length, end, next;
- pgd_t *pgd = pgd_offset_raw(pgdir, virt);
+ pgd_t *pgdp = pgd_offset_raw(pgdir, virt);
/*
* If the virtual and physical address don't have the same offset
@@ -336,10 +343,10 @@ static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
end = addr + length;
do {
next = pgd_addr_end(addr, end);
- alloc_init_pud(pgd, addr, next, phys, prot, pgtable_alloc,
+ alloc_init_pud(pgdp, addr, next, phys, prot, pgtable_alloc,
flags);
phys += next - addr;
- } while (pgd++, addr = next, addr != end);
+ } while (pgdp++, addr = next, addr != end);
}
static phys_addr_t pgd_pgtable_alloc(void)
@@ -401,10 +408,10 @@ static void update_mapping_prot(phys_addr_t phys, unsigned long virt,
flush_tlb_kernel_range(virt, virt + size);
}
-static void __init __map_memblock(pgd_t *pgd, phys_addr_t start,
+static void __init __map_memblock(pgd_t *pgdp, phys_addr_t start,
phys_addr_t end, pgprot_t prot, int flags)
{
- __create_pgd_mapping(pgd, start, __phys_to_virt(start), end - start,
+ __create_pgd_mapping(pgdp, start, __phys_to_virt(start), end - start,
prot, early_pgtable_alloc, flags);
}
@@ -418,7 +425,7 @@ void __init mark_linear_text_alias_ro(void)
PAGE_KERNEL_RO);
}
-static void __init map_mem(pgd_t *pgd)
+static void __init map_mem(pgd_t *pgdp)
{
phys_addr_t kernel_start = __pa_symbol(_text);
phys_addr_t kernel_end = __pa_symbol(__init_begin);
@@ -451,7 +458,7 @@ static void __init map_mem(pgd_t *pgd)
if (memblock_is_nomap(reg))
continue;
- __map_memblock(pgd, start, end, PAGE_KERNEL, flags);
+ __map_memblock(pgdp, start, end, PAGE_KERNEL, flags);
}
/*
@@ -464,7 +471,7 @@ static void __init map_mem(pgd_t *pgd)
* Note that contiguous mappings cannot be remapped in this way,
* so we should avoid them here.
*/
- __map_memblock(pgd, kernel_start, kernel_end,
+ __map_memblock(pgdp, kernel_start, kernel_end,
PAGE_KERNEL, NO_CONT_MAPPINGS);
memblock_clear_nomap(kernel_start, kernel_end - kernel_start);
@@ -475,7 +482,7 @@ static void __init map_mem(pgd_t *pgd)
* through /sys/kernel/kexec_crash_size interface.
*/
if (crashk_res.end) {
- __map_memblock(pgd, crashk_res.start, crashk_res.end + 1,
+ __map_memblock(pgdp, crashk_res.start, crashk_res.end + 1,
PAGE_KERNEL,
NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS);
memblock_clear_nomap(crashk_res.start,
@@ -499,7 +506,7 @@ void mark_rodata_ro(void)
debug_checkwx();
}
-static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end,
+static void __init map_kernel_segment(pgd_t *pgdp, void *va_start, void *va_end,
pgprot_t prot, struct vm_struct *vma,
int flags, unsigned long vm_flags)
{
@@ -509,7 +516,7 @@ static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end,
BUG_ON(!PAGE_ALIGNED(pa_start));
BUG_ON(!PAGE_ALIGNED(size));
- __create_pgd_mapping(pgd, pa_start, (unsigned long)va_start, size, prot,
+ __create_pgd_mapping(pgdp, pa_start, (unsigned long)va_start, size, prot,
early_pgtable_alloc, flags);
if (!(vm_flags & VM_NO_GUARD))
@@ -562,7 +569,7 @@ core_initcall(map_entry_trampoline);
/*
* Create fine-grained mappings for the kernel.
*/
-static void __init map_kernel(pgd_t *pgd)
+static void __init map_kernel(pgd_t *pgdp)
{
static struct vm_struct vmlinux_text, vmlinux_rodata, vmlinux_inittext,
vmlinux_initdata, vmlinux_data;
@@ -578,24 +585,24 @@ static void __init map_kernel(pgd_t *pgd)
* Only rodata will be remapped with different permissions later on,
* all other segments are allowed to use contiguous mappings.
*/
- map_kernel_segment(pgd, _text, _etext, text_prot, &vmlinux_text, 0,
+ map_kernel_segment(pgdp, _text, _etext, text_prot, &vmlinux_text, 0,
VM_NO_GUARD);
- map_kernel_segment(pgd, __start_rodata, __inittext_begin, PAGE_KERNEL,
+ map_kernel_segment(pgdp, __start_rodata, __inittext_begin, PAGE_KERNEL,
&vmlinux_rodata, NO_CONT_MAPPINGS, VM_NO_GUARD);
- map_kernel_segment(pgd, __inittext_begin, __inittext_end, text_prot,
+ map_kernel_segment(pgdp, __inittext_begin, __inittext_end, text_prot,
&vmlinux_inittext, 0, VM_NO_GUARD);
- map_kernel_segment(pgd, __initdata_begin, __initdata_end, PAGE_KERNEL,
+ map_kernel_segment(pgdp, __initdata_begin, __initdata_end, PAGE_KERNEL,
&vmlinux_initdata, 0, VM_NO_GUARD);
- map_kernel_segment(pgd, _data, _end, PAGE_KERNEL, &vmlinux_data, 0, 0);
+ map_kernel_segment(pgdp, _data, _end, PAGE_KERNEL, &vmlinux_data, 0, 0);
- if (!pgd_val(*pgd_offset_raw(pgd, FIXADDR_START))) {
+ if (!READ_ONCE(pgd_val(*pgd_offset_raw(pgdp, FIXADDR_START)))) {
/*
* The fixmap falls in a separate pgd to the kernel, and doesn't
* live in the carveout for the swapper_pg_dir. We can simply
* re-use the existing dir for the fixmap.
*/
- set_pgd(pgd_offset_raw(pgd, FIXADDR_START),
- *pgd_offset_k(FIXADDR_START));
+ set_pgd(pgd_offset_raw(pgdp, FIXADDR_START),
+ READ_ONCE(*pgd_offset_k(FIXADDR_START)));
} else if (CONFIG_PGTABLE_LEVELS > 3) {
/*
* The fixmap shares its top level pgd entry with the kernel
@@ -604,14 +611,15 @@ static void __init map_kernel(pgd_t *pgd)
* entry instead.
*/
BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
- pud_populate(&init_mm, pud_set_fixmap_offset(pgd, FIXADDR_START),
+ pud_populate(&init_mm,
+ pud_set_fixmap_offset(pgdp, FIXADDR_START),
lm_alias(bm_pmd));
pud_clear_fixmap();
} else {
BUG();
}
- kasan_copy_shadow(pgd);
+ kasan_copy_shadow(pgdp);
}
/*
@@ -621,10 +629,10 @@ static void __init map_kernel(pgd_t *pgd)
void __init paging_init(void)
{
phys_addr_t pgd_phys = early_pgtable_alloc();
- pgd_t *pgd = pgd_set_fixmap(pgd_phys);
+ pgd_t *pgdp = pgd_set_fixmap(pgd_phys);
- map_kernel(pgd);
- map_mem(pgd);
+ map_kernel(pgdp);
+ map_mem(pgdp);
/*
* We want to reuse the original swapper_pg_dir so we don't have to
@@ -635,7 +643,7 @@ void __init paging_init(void)
* To do this we need to go via a temporary pgd.
*/
cpu_replace_ttbr1(__va(pgd_phys));
- memcpy(swapper_pg_dir, pgd, PGD_SIZE);
+ memcpy(swapper_pg_dir, pgdp, PGD_SIZE);
cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
pgd_clear_fixmap();
@@ -655,37 +663,40 @@ void __init paging_init(void)
*/
int kern_addr_valid(unsigned long addr)
{
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
+ pgd_t *pgdp;
+ pud_t *pudp, pud;
+ pmd_t *pmdp, pmd;
+ pte_t *ptep, pte;
if ((((long)addr) >> VA_BITS) != -1UL)
return 0;
- pgd = pgd_offset_k(addr);
- if (pgd_none(*pgd))
+ pgdp = pgd_offset_k(addr);
+ if (pgd_none(READ_ONCE(*pgdp)))
return 0;
- pud = pud_offset(pgd, addr);
- if (pud_none(*pud))
+ pudp = pud_offset(pgdp, addr);
+ pud = READ_ONCE(*pudp);
+ if (pud_none(pud))
return 0;
- if (pud_sect(*pud))
- return pfn_valid(pud_pfn(*pud));
+ if (pud_sect(pud))
+ return pfn_valid(pud_pfn(pud));
- pmd = pmd_offset(pud, addr);
- if (pmd_none(*pmd))
+ pmdp = pmd_offset(pudp, addr);
+ pmd = READ_ONCE(*pmdp);
+ if (pmd_none(pmd))
return 0;
- if (pmd_sect(*pmd))
- return pfn_valid(pmd_pfn(*pmd));
+ if (pmd_sect(pmd))
+ return pfn_valid(pmd_pfn(pmd));
- pte = pte_offset_kernel(pmd, addr);
- if (pte_none(*pte))
+ ptep = pte_offset_kernel(pmdp, addr);
+ pte = READ_ONCE(*ptep);
+ if (pte_none(pte))
return 0;
- return pfn_valid(pte_pfn(*pte));
+ return pfn_valid(pte_pfn(pte));
}
#ifdef CONFIG_SPARSEMEM_VMEMMAP
#if !ARM64_SWAPPER_USES_SECTION_MAPS
@@ -700,32 +711,32 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
{
unsigned long addr = start;
unsigned long next;
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
+ pgd_t *pgdp;
+ pud_t *pudp;
+ pmd_t *pmdp;
do {
next = pmd_addr_end(addr, end);
- pgd = vmemmap_pgd_populate(addr, node);
- if (!pgd)
+ pgdp = vmemmap_pgd_populate(addr, node);
+ if (!pgdp)
return -ENOMEM;
- pud = vmemmap_pud_populate(pgd, addr, node);
- if (!pud)
+ pudp = vmemmap_pud_populate(pgdp, addr, node);
+ if (!pudp)
return -ENOMEM;
- pmd = pmd_offset(pud, addr);
- if (pmd_none(*pmd)) {
+ pmdp = pmd_offset(pudp, addr);
+ if (pmd_none(READ_ONCE(*pmdp))) {
void *p = NULL;
p = vmemmap_alloc_block_buf(PMD_SIZE, node);
if (!p)
return -ENOMEM;
- pmd_set_huge(pmd, __pa(p), __pgprot(PROT_SECT_NORMAL));
+ pmd_set_huge(pmdp, __pa(p), __pgprot(PROT_SECT_NORMAL));
} else
- vmemmap_verify((pte_t *)pmd, node, addr, next);
+ vmemmap_verify((pte_t *)pmdp, node, addr, next);
} while (addr = next, addr != end);
return 0;
@@ -739,20 +750,22 @@ void vmemmap_free(unsigned long start, unsigned long end,
static inline pud_t * fixmap_pud(unsigned long addr)
{
- pgd_t *pgd = pgd_offset_k(addr);
+ pgd_t *pgdp = pgd_offset_k(addr);
+ pgd_t pgd = READ_ONCE(*pgdp);
- BUG_ON(pgd_none(*pgd) || pgd_bad(*pgd));
+ BUG_ON(pgd_none(pgd) || pgd_bad(pgd));
- return pud_offset_kimg(pgd, addr);
+ return pud_offset_kimg(pgdp, addr);
}
static inline pmd_t * fixmap_pmd(unsigned long addr)
{
- pud_t *pud = fixmap_pud(addr);
+ pud_t *pudp = fixmap_pud(addr);
+ pud_t pud = READ_ONCE(*pudp);
- BUG_ON(pud_none(*pud) || pud_bad(*pud));
+ BUG_ON(pud_none(pud) || pud_bad(pud));
- return pmd_offset_kimg(pud, addr);
+ return pmd_offset_kimg(pudp, addr);
}
static inline pte_t * fixmap_pte(unsigned long addr)
@@ -768,30 +781,31 @@ static inline pte_t * fixmap_pte(unsigned long addr)
*/
void __init early_fixmap_init(void)
{
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
+ pgd_t *pgdp, pgd;
+ pud_t *pudp;
+ pmd_t *pmdp;
unsigned long addr = FIXADDR_START;
- pgd = pgd_offset_k(addr);
+ pgdp = pgd_offset_k(addr);
+ pgd = READ_ONCE(*pgdp);
if (CONFIG_PGTABLE_LEVELS > 3 &&
- !(pgd_none(*pgd) || pgd_page_paddr(*pgd) == __pa_symbol(bm_pud))) {
+ !(pgd_none(pgd) || pgd_page_paddr(pgd) == __pa_symbol(bm_pud))) {
/*
* We only end up here if the kernel mapping and the fixmap
* share the top level pgd entry, which should only happen on
* 16k/4 levels configurations.
*/
BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
- pud = pud_offset_kimg(pgd, addr);
+ pudp = pud_offset_kimg(pgdp, addr);
} else {
- if (pgd_none(*pgd))
- __pgd_populate(pgd, __pa_symbol(bm_pud), PUD_TYPE_TABLE);
- pud = fixmap_pud(addr);
+ if (pgd_none(pgd))
+ __pgd_populate(pgdp, __pa_symbol(bm_pud), PUD_TYPE_TABLE);
+ pudp = fixmap_pud(addr);
}
- if (pud_none(*pud))
- __pud_populate(pud, __pa_symbol(bm_pmd), PMD_TYPE_TABLE);
- pmd = fixmap_pmd(addr);
- __pmd_populate(pmd, __pa_symbol(bm_pte), PMD_TYPE_TABLE);
+ if (pud_none(READ_ONCE(*pudp)))
+ __pud_populate(pudp, __pa_symbol(bm_pmd), PMD_TYPE_TABLE);
+ pmdp = fixmap_pmd(addr);
+ __pmd_populate(pmdp, __pa_symbol(bm_pte), PMD_TYPE_TABLE);
/*
* The boot-ioremap range spans multiple pmds, for which
@@ -800,11 +814,11 @@ void __init early_fixmap_init(void)
BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
!= (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
- if ((pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)))
- || pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_END))) {
+ if ((pmdp != fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)))
+ || pmdp != fixmap_pmd(fix_to_virt(FIX_BTMAP_END))) {
WARN_ON(1);
- pr_warn("pmd %p != %p, %p\n",
- pmd, fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)),
+ pr_warn("pmdp %p != %p, %p\n",
+ pmdp, fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)),
fixmap_pmd(fix_to_virt(FIX_BTMAP_END)));
pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
fix_to_virt(FIX_BTMAP_BEGIN));
@@ -824,16 +838,16 @@ void __set_fixmap(enum fixed_addresses idx,
phys_addr_t phys, pgprot_t flags)
{
unsigned long addr = __fix_to_virt(idx);
- pte_t *pte;
+ pte_t *ptep;
BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
- pte = fixmap_pte(addr);
+ ptep = fixmap_pte(addr);
if (pgprot_val(flags)) {
- set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
+ set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, flags));
} else {
- pte_clear(&init_mm, addr, pte);
+ pte_clear(&init_mm, addr, ptep);
flush_tlb_kernel_range(addr, addr+PAGE_SIZE);
}
}
@@ -915,36 +929,36 @@ int __init arch_ioremap_pmd_supported(void)
return 1;
}
-int pud_set_huge(pud_t *pud, phys_addr_t phys, pgprot_t prot)
+int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot)
{
pgprot_t sect_prot = __pgprot(PUD_TYPE_SECT |
pgprot_val(mk_sect_prot(prot)));
BUG_ON(phys & ~PUD_MASK);
- set_pud(pud, pfn_pud(__phys_to_pfn(phys), sect_prot));
+ set_pud(pudp, pfn_pud(__phys_to_pfn(phys), sect_prot));
return 1;
}
-int pmd_set_huge(pmd_t *pmd, phys_addr_t phys, pgprot_t prot)
+int pmd_set_huge(pmd_t *pmdp, phys_addr_t phys, pgprot_t prot)
{
pgprot_t sect_prot = __pgprot(PMD_TYPE_SECT |
pgprot_val(mk_sect_prot(prot)));
BUG_ON(phys & ~PMD_MASK);
- set_pmd(pmd, pfn_pmd(__phys_to_pfn(phys), sect_prot));
+ set_pmd(pmdp, pfn_pmd(__phys_to_pfn(phys), sect_prot));
return 1;
}
-int pud_clear_huge(pud_t *pud)
+int pud_clear_huge(pud_t *pudp)
{
- if (!pud_sect(*pud))
+ if (!pud_sect(READ_ONCE(*pudp)))
return 0;
- pud_clear(pud);
+ pud_clear(pudp);
return 1;
}
-int pmd_clear_huge(pmd_t *pmd)
+int pmd_clear_huge(pmd_t *pmdp)
{
- if (!pmd_sect(*pmd))
+ if (!pmd_sect(READ_ONCE(*pmdp)))
return 0;
- pmd_clear(pmd);
+ pmd_clear(pmdp);
return 1;
}
diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
index a682a0a2a0fa..a56359373d8b 100644
--- a/arch/arm64/mm/pageattr.c
+++ b/arch/arm64/mm/pageattr.c
@@ -29,7 +29,7 @@ static int change_page_range(pte_t *ptep, pgtable_t token, unsigned long addr,
void *data)
{
struct page_change_data *cdata = data;
- pte_t pte = *ptep;
+ pte_t pte = READ_ONCE(*ptep);
pte = clear_pte_bit(pte, cdata->clear_mask);
pte = set_pte_bit(pte, cdata->set_mask);
@@ -156,30 +156,32 @@ void __kernel_map_pages(struct page *page, int numpages, int enable)
*/
bool kernel_page_present(struct page *page)
{
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
+ pgd_t *pgdp;
+ pud_t *pudp, pud;
+ pmd_t *pmdp, pmd;
+ pte_t *ptep;
unsigned long addr = (unsigned long)page_address(page);
- pgd = pgd_offset_k(addr);
- if (pgd_none(*pgd))
+ pgdp = pgd_offset_k(addr);
+ if (pgd_none(READ_ONCE(*pgdp)))
return false;
- pud = pud_offset(pgd, addr);
- if (pud_none(*pud))
+ pudp = pud_offset(pgdp, addr);
+ pud = READ_ONCE(*pudp);
+ if (pud_none(pud))
return false;
- if (pud_sect(*pud))
+ if (pud_sect(pud))
return true;
- pmd = pmd_offset(pud, addr);
- if (pmd_none(*pmd))
+ pmdp = pmd_offset(pudp, addr);
+ pmd = READ_ONCE(*pmdp);
+ if (pmd_none(pmd))
return false;
- if (pmd_sect(*pmd))
+ if (pmd_sect(pmd))
return true;
- pte = pte_offset_kernel(pmd, addr);
- return pte_valid(*pte);
+ ptep = pte_offset_kernel(pmdp, addr);
+ return pte_valid(READ_ONCE(*ptep));
}
#endif /* CONFIG_HIBERNATION */
#endif /* CONFIG_DEBUG_PAGEALLOC */
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 71baed7e592a..c0af47617299 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -205,7 +205,8 @@ ENDPROC(idmap_cpu_replace_ttbr1)
dc cvac, cur_\()\type\()p // Ensure any existing dirty
dmb sy // lines are written back before
ldr \type, [cur_\()\type\()p] // loading the entry
- tbz \type, #0, next_\()\type // Skip invalid entries
+ tbz \type, #0, skip_\()\type // Skip invalid and
+ tbnz \type, #11, skip_\()\type // non-global entries
.endm
.macro __idmap_kpti_put_pgtable_ent_ng, type
@@ -265,8 +266,9 @@ ENTRY(idmap_kpti_install_ng_mappings)
add end_pgdp, cur_pgdp, #(PTRS_PER_PGD * 8)
do_pgd: __idmap_kpti_get_pgtable_ent pgd
tbnz pgd, #1, walk_puds
- __idmap_kpti_put_pgtable_ent_ng pgd
next_pgd:
+ __idmap_kpti_put_pgtable_ent_ng pgd
+skip_pgd:
add cur_pgdp, cur_pgdp, #8
cmp cur_pgdp, end_pgdp
b.ne do_pgd
@@ -294,8 +296,9 @@ walk_puds:
add end_pudp, cur_pudp, #(PTRS_PER_PUD * 8)
do_pud: __idmap_kpti_get_pgtable_ent pud
tbnz pud, #1, walk_pmds
- __idmap_kpti_put_pgtable_ent_ng pud
next_pud:
+ __idmap_kpti_put_pgtable_ent_ng pud
+skip_pud:
add cur_pudp, cur_pudp, 8
cmp cur_pudp, end_pudp
b.ne do_pud
@@ -314,8 +317,9 @@ walk_pmds:
add end_pmdp, cur_pmdp, #(PTRS_PER_PMD * 8)
do_pmd: __idmap_kpti_get_pgtable_ent pmd
tbnz pmd, #1, walk_ptes
- __idmap_kpti_put_pgtable_ent_ng pmd
next_pmd:
+ __idmap_kpti_put_pgtable_ent_ng pmd
+skip_pmd:
add cur_pmdp, cur_pmdp, #8
cmp cur_pmdp, end_pmdp
b.ne do_pmd
@@ -333,7 +337,7 @@ walk_ptes:
add end_ptep, cur_ptep, #(PTRS_PER_PTE * 8)
do_pte: __idmap_kpti_get_pgtable_ent pte
__idmap_kpti_put_pgtable_ent_ng pte
-next_pte:
+skip_pte:
add cur_ptep, cur_ptep, #8
cmp cur_ptep, end_ptep
b.ne do_pte
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile
index 0b4c65a1af25..498f3da3f225 100644
--- a/arch/ia64/kernel/Makefile
+++ b/arch/ia64/kernel/Makefile
@@ -41,7 +41,6 @@ ifneq ($(CONFIG_IA64_ESI),)
obj-y += esi_stub.o # must be in kernel proper
endif
obj-$(CONFIG_INTEL_IOMMU) += pci-dma.o
-obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o
obj-$(CONFIG_BINFMT_ELF) += elfcore.o
diff --git a/arch/mips/kernel/mips-cpc.c b/arch/mips/kernel/mips-cpc.c
index 19c88d770054..fcf9af492d60 100644
--- a/arch/mips/kernel/mips-cpc.c
+++ b/arch/mips/kernel/mips-cpc.c
@@ -10,6 +10,8 @@
#include <linux/errno.h>
#include <linux/percpu.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/spinlock.h>
#include <asm/mips-cps.h>
@@ -22,6 +24,17 @@ static DEFINE_PER_CPU_ALIGNED(unsigned long, cpc_core_lock_flags);
phys_addr_t __weak mips_cpc_default_phys_base(void)
{
+ struct device_node *cpc_node;
+ struct resource res;
+ int err;
+
+ cpc_node = of_find_compatible_node(of_root, NULL, "mti,mips-cpc");
+ if (cpc_node) {
+ err = of_address_to_resource(cpc_node, 0, &res);
+ if (!err)
+ return res.start;
+ }
+
return 0;
}
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 85bc601e9a0d..5f8b0a9e30b3 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -375,6 +375,7 @@ static void __init bootmem_init(void)
unsigned long reserved_end;
unsigned long mapstart = ~0UL;
unsigned long bootmap_size;
+ phys_addr_t ramstart = (phys_addr_t)ULLONG_MAX;
bool bootmap_valid = false;
int i;
@@ -395,7 +396,8 @@ static void __init bootmem_init(void)
max_low_pfn = 0;
/*
- * Find the highest page frame number we have available.
+ * Find the highest page frame number we have available
+ * and the lowest used RAM address
*/
for (i = 0; i < boot_mem_map.nr_map; i++) {
unsigned long start, end;
@@ -407,6 +409,8 @@ static void __init bootmem_init(void)
end = PFN_DOWN(boot_mem_map.map[i].addr
+ boot_mem_map.map[i].size);
+ ramstart = min(ramstart, boot_mem_map.map[i].addr);
+
#ifndef CONFIG_HIGHMEM
/*
* Skip highmem here so we get an accurate max_low_pfn if low
@@ -436,6 +440,13 @@ static void __init bootmem_init(void)
mapstart = max(reserved_end, start);
}
+ /*
+ * Reserve any memory between the start of RAM and PHYS_OFFSET
+ */
+ if (ramstart > PHYS_OFFSET)
+ add_memory_region(PHYS_OFFSET, ramstart - PHYS_OFFSET,
+ BOOT_MEM_RESERVED);
+
if (min_low_pfn >= max_low_pfn)
panic("Incorrect memory mapping !!!");
if (min_low_pfn > ARCH_PFN_OFFSET) {
@@ -664,9 +675,6 @@ static int __init early_parse_mem(char *p)
add_memory_region(start, size, BOOT_MEM_RAM);
- if (start && start > PHYS_OFFSET)
- add_memory_region(PHYS_OFFSET, start - PHYS_OFFSET,
- BOOT_MEM_RESERVED);
return 0;
}
early_param("mem", early_parse_mem);
diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c
index 87dcac2447c8..9d41732a9146 100644
--- a/arch/mips/kernel/smp-bmips.c
+++ b/arch/mips/kernel/smp-bmips.c
@@ -572,7 +572,7 @@ asmlinkage void __weak plat_wired_tlb_setup(void)
*/
}
-void __init bmips_cpu_setup(void)
+void bmips_cpu_setup(void)
{
void __iomem __maybe_unused *cbr = BMIPS_GET_CBR();
u32 __maybe_unused cfg;
diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h
index 30a155c0a6b0..c615abdce119 100644
--- a/arch/powerpc/include/asm/book3s/32/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/32/pgtable.h
@@ -16,6 +16,7 @@
#define PGD_INDEX_SIZE (32 - PGDIR_SHIFT)
#define PMD_CACHE_INDEX PMD_INDEX_SIZE
+#define PUD_CACHE_INDEX PUD_INDEX_SIZE
#ifndef __ASSEMBLY__
#define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE)
diff --git a/arch/powerpc/include/asm/book3s/64/hash-4k.h b/arch/powerpc/include/asm/book3s/64/hash-4k.h
index 949d691094a4..67c5475311ee 100644
--- a/arch/powerpc/include/asm/book3s/64/hash-4k.h
+++ b/arch/powerpc/include/asm/book3s/64/hash-4k.h
@@ -63,7 +63,8 @@ static inline int hash__hugepd_ok(hugepd_t hpd)
* keeping the prototype consistent across the two formats.
*/
static inline unsigned long pte_set_hidx(pte_t *ptep, real_pte_t rpte,
- unsigned int subpg_index, unsigned long hidx)
+ unsigned int subpg_index, unsigned long hidx,
+ int offset)
{
return (hidx << H_PAGE_F_GIX_SHIFT) &
(H_PAGE_F_SECOND | H_PAGE_F_GIX);
diff --git a/arch/powerpc/include/asm/book3s/64/hash-64k.h b/arch/powerpc/include/asm/book3s/64/hash-64k.h
index 338b7da468ce..3bcf269f8f55 100644
--- a/arch/powerpc/include/asm/book3s/64/hash-64k.h
+++ b/arch/powerpc/include/asm/book3s/64/hash-64k.h
@@ -45,7 +45,7 @@
* generic accessors and iterators here
*/
#define __real_pte __real_pte
-static inline real_pte_t __real_pte(pte_t pte, pte_t *ptep)
+static inline real_pte_t __real_pte(pte_t pte, pte_t *ptep, int offset)
{
real_pte_t rpte;
unsigned long *hidxp;
@@ -59,7 +59,7 @@ static inline real_pte_t __real_pte(pte_t pte, pte_t *ptep)
*/
smp_rmb();
- hidxp = (unsigned long *)(ptep + PTRS_PER_PTE);
+ hidxp = (unsigned long *)(ptep + offset);
rpte.hidx = *hidxp;
return rpte;
}
@@ -86,9 +86,10 @@ static inline unsigned long __rpte_to_hidx(real_pte_t rpte, unsigned long index)
* expected to modify the PTE bits accordingly and commit the PTE to memory.
*/
static inline unsigned long pte_set_hidx(pte_t *ptep, real_pte_t rpte,
- unsigned int subpg_index, unsigned long hidx)
+ unsigned int subpg_index,
+ unsigned long hidx, int offset)
{
- unsigned long *hidxp = (unsigned long *)(ptep + PTRS_PER_PTE);
+ unsigned long *hidxp = (unsigned long *)(ptep + offset);
rpte.hidx &= ~HIDX_BITS(0xfUL, subpg_index);
*hidxp = rpte.hidx | HIDX_BITS(HIDX_SHIFT_BY_ONE(hidx), subpg_index);
@@ -140,13 +141,18 @@ static inline int hash__remap_4k_pfn(struct vm_area_struct *vma, unsigned long a
}
#define H_PTE_TABLE_SIZE PTE_FRAG_SIZE
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined (CONFIG_HUGETLB_PAGE)
#define H_PMD_TABLE_SIZE ((sizeof(pmd_t) << PMD_INDEX_SIZE) + \
(sizeof(unsigned long) << PMD_INDEX_SIZE))
#else
#define H_PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE)
#endif
+#ifdef CONFIG_HUGETLB_PAGE
+#define H_PUD_TABLE_SIZE ((sizeof(pud_t) << PUD_INDEX_SIZE) + \
+ (sizeof(unsigned long) << PUD_INDEX_SIZE))
+#else
#define H_PUD_TABLE_SIZE (sizeof(pud_t) << PUD_INDEX_SIZE)
+#endif
#define H_PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE)
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h
index 0920eff731b3..935adcd92a81 100644
--- a/arch/powerpc/include/asm/book3s/64/hash.h
+++ b/arch/powerpc/include/asm/book3s/64/hash.h
@@ -23,7 +23,8 @@
H_PUD_INDEX_SIZE + H_PGD_INDEX_SIZE + PAGE_SHIFT)
#define H_PGTABLE_RANGE (ASM_CONST(1) << H_PGTABLE_EADDR_SIZE)
-#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_PPC_64K_PAGES)
+#if (defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)) && \
+ defined(CONFIG_PPC_64K_PAGES)
/*
* only with hash 64k we need to use the second half of pmd page table
* to store pointer to deposited pgtable_t
@@ -33,6 +34,16 @@
#define H_PMD_CACHE_INDEX H_PMD_INDEX_SIZE
#endif
/*
+ * We store the slot details in the second half of page table.
+ * Increase the pud level table so that hugetlb ptes can be stored
+ * at pud level.
+ */
+#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_PPC_64K_PAGES)
+#define H_PUD_CACHE_INDEX (H_PUD_INDEX_SIZE + 1)
+#else
+#define H_PUD_CACHE_INDEX (H_PUD_INDEX_SIZE)
+#endif
+/*
* Define the address range of the kernel non-linear virtual area
*/
#define H_KERN_VIRT_START ASM_CONST(0xD000000000000000)
diff --git a/arch/powerpc/include/asm/book3s/64/pgalloc.h b/arch/powerpc/include/asm/book3s/64/pgalloc.h
index 1fcfa425cefa..4746bc68d446 100644
--- a/arch/powerpc/include/asm/book3s/64/pgalloc.h
+++ b/arch/powerpc/include/asm/book3s/64/pgalloc.h
@@ -73,10 +73,16 @@ static inline void radix__pgd_free(struct mm_struct *mm, pgd_t *pgd)
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
+ pgd_t *pgd;
+
if (radix_enabled())
return radix__pgd_alloc(mm);
- return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE),
- pgtable_gfp_flags(mm, GFP_KERNEL));
+
+ pgd = kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE),
+ pgtable_gfp_flags(mm, GFP_KERNEL));
+ memset(pgd, 0, PGD_TABLE_SIZE);
+
+ return pgd;
}
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
@@ -93,13 +99,13 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
{
- return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE),
+ return kmem_cache_alloc(PGT_CACHE(PUD_CACHE_INDEX),
pgtable_gfp_flags(mm, GFP_KERNEL));
}
static inline void pud_free(struct mm_struct *mm, pud_t *pud)
{
- kmem_cache_free(PGT_CACHE(PUD_INDEX_SIZE), pud);
+ kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), pud);
}
static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
@@ -115,7 +121,7 @@ static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
* ahead and flush the page walk cache
*/
flush_tlb_pgtable(tlb, address);
- pgtable_free_tlb(tlb, pud, PUD_INDEX_SIZE);
+ pgtable_free_tlb(tlb, pud, PUD_CACHE_INDEX);
}
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 51017726d495..a6b9f1d74600 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -232,11 +232,13 @@ extern unsigned long __pmd_index_size;
extern unsigned long __pud_index_size;
extern unsigned long __pgd_index_size;
extern unsigned long __pmd_cache_index;
+extern unsigned long __pud_cache_index;
#define PTE_INDEX_SIZE __pte_index_size
#define PMD_INDEX_SIZE __pmd_index_size
#define PUD_INDEX_SIZE __pud_index_size
#define PGD_INDEX_SIZE __pgd_index_size
#define PMD_CACHE_INDEX __pmd_cache_index
+#define PUD_CACHE_INDEX __pud_cache_index
/*
* Because of use of pte fragments and THP, size of page table
* are not always derived out of index size above.
@@ -348,7 +350,7 @@ extern unsigned long pci_io_base;
*/
#ifndef __real_pte
-#define __real_pte(e,p) ((real_pte_t){(e)})
+#define __real_pte(e, p, o) ((real_pte_t){(e)})
#define __rpte_to_pte(r) ((r).pte)
#define __rpte_to_hidx(r,index) (pte_val(__rpte_to_pte(r)) >> H_PAGE_F_GIX_SHIFT)
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index 176dfb73d42c..471b2274fbeb 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -645,7 +645,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
EXC_HV, SOFTEN_TEST_HV, bitmask)
#define MASKABLE_RELON_EXCEPTION_HV_OOL(vec, label, bitmask) \
- MASKABLE_EXCEPTION_PROLOG_1(PACA_EXGEN, SOFTEN_NOTEST_HV, vec, bitmask);\
+ MASKABLE_EXCEPTION_PROLOG_1(PACA_EXGEN, SOFTEN_TEST_HV, vec, bitmask);\
EXCEPTION_RELON_PROLOG_PSERIES_1(label, EXC_HV)
/*
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index 88e5e8f17e98..855e17d158b1 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -30,6 +30,16 @@
#define PACA_IRQ_PMI 0x40
/*
+ * Some soft-masked interrupts must be hard masked until they are replayed
+ * (e.g., because the soft-masked handler does not clear the exception).
+ */
+#ifdef CONFIG_PPC_BOOK3S
+#define PACA_IRQ_MUST_HARD_MASK (PACA_IRQ_EE|PACA_IRQ_PMI)
+#else
+#define PACA_IRQ_MUST_HARD_MASK (PACA_IRQ_EE)
+#endif
+
+/*
* flags for paca->irq_soft_mask
*/
#define IRQS_ENABLED 0
@@ -244,7 +254,7 @@ static inline bool lazy_irq_pending(void)
static inline void may_hard_irq_enable(void)
{
get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS;
- if (!(get_paca()->irq_happened & PACA_IRQ_EE))
+ if (!(get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK))
__hard_irq_enable();
}
diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h
index 9dcbfa6bbb91..d8b1e8e7e035 100644
--- a/arch/powerpc/include/asm/kexec.h
+++ b/arch/powerpc/include/asm/kexec.h
@@ -140,6 +140,12 @@ static inline bool kdump_in_progress(void)
return false;
}
+static inline void crash_ipi_callback(struct pt_regs *regs) { }
+
+static inline void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
+{
+}
+
#endif /* CONFIG_KEXEC_CORE */
#endif /* ! __ASSEMBLY__ */
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h
index 504a3c36ce5c..03bbd1149530 100644
--- a/arch/powerpc/include/asm/nohash/32/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/32/pgtable.h
@@ -24,6 +24,7 @@ extern int icache_44x_need_flush;
#define PGD_INDEX_SIZE (32 - PGDIR_SHIFT)
#define PMD_CACHE_INDEX PMD_INDEX_SIZE
+#define PUD_CACHE_INDEX PUD_INDEX_SIZE
#ifndef __ASSEMBLY__
#define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE)
diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h
index abddf5830ad5..5c5f75d005ad 100644
--- a/arch/powerpc/include/asm/nohash/64/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/64/pgtable.h
@@ -27,6 +27,7 @@
#else
#define PMD_CACHE_INDEX PMD_INDEX_SIZE
#endif
+#define PUD_CACHE_INDEX PUD_INDEX_SIZE
/*
* Define the address range of the kernel non-linear virtual area
diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
index 88187c285c70..9f421641a35c 100644
--- a/arch/powerpc/include/asm/topology.h
+++ b/arch/powerpc/include/asm/topology.h
@@ -44,6 +44,11 @@ extern int sysfs_add_device_to_node(struct device *dev, int nid);
extern void sysfs_remove_device_from_node(struct device *dev, int nid);
extern int numa_update_cpu_topology(bool cpus_locked);
+static inline void update_numa_cpu_lookup_table(unsigned int cpu, int node)
+{
+ numa_cpu_lookup_table[cpu] = node;
+}
+
static inline int early_cpu_to_node(int cpu)
{
int nid;
@@ -76,12 +81,16 @@ static inline int numa_update_cpu_topology(bool cpus_locked)
{
return 0;
}
+
+static inline void update_numa_cpu_lookup_table(unsigned int cpu, int node) {}
+
#endif /* CONFIG_NUMA */
#if defined(CONFIG_NUMA) && defined(CONFIG_PPC_SPLPAR)
extern int start_topology_update(void);
extern int stop_topology_update(void);
extern int prrn_is_enabled(void);
+extern int find_and_online_cpu_nid(int cpu);
#else
static inline int start_topology_update(void)
{
@@ -95,6 +104,10 @@ static inline int prrn_is_enabled(void)
{
return 0;
}
+static inline int find_and_online_cpu_nid(int cpu)
+{
+ return 0;
+}
#endif /* CONFIG_NUMA && CONFIG_PPC_SPLPAR */
#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_NEED_MULTIPLE_NODES)
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index ee832d344a5a..9b6e653e501a 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -943,6 +943,8 @@ kernel_dbg_exc:
/*
* An interrupt came in while soft-disabled; We mark paca->irq_happened
* accordingly and if the interrupt is level sensitive, we hard disable
+ * hard disable (full_mask) corresponds to PACA_IRQ_MUST_HARD_MASK, so
+ * keep these in synch.
*/
.macro masked_interrupt_book3e paca_irq full_mask
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 243d072a225a..3ac87e53b3da 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -1426,7 +1426,7 @@ EXC_COMMON_BEGIN(soft_nmi_common)
* triggered and won't automatically refire.
* - If it was a HMI we return immediately since we handled it in realmode
* and it won't refire.
- * - else we hard disable and return.
+ * - Else it is one of PACA_IRQ_MUST_HARD_MASK, so hard disable and return.
* This is called with r10 containing the value to OR to the paca field.
*/
#define MASKED_INTERRUPT(_H) \
@@ -1441,8 +1441,8 @@ masked_##_H##interrupt: \
ori r10,r10,0xffff; \
mtspr SPRN_DEC,r10; \
b MASKED_DEC_HANDLER_LABEL; \
-1: andi. r10,r10,(PACA_IRQ_DBELL|PACA_IRQ_HMI); \
- bne 2f; \
+1: andi. r10,r10,PACA_IRQ_MUST_HARD_MASK; \
+ beq 2f; \
mfspr r10,SPRN_##_H##SRR1; \
xori r10,r10,MSR_EE; /* clear MSR_EE */ \
mtspr SPRN_##_H##SRR1,r10; \
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index 5a8bfee6e187..04d0bbd7a1dd 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -788,7 +788,8 @@ static int register_cpu_online(unsigned int cpu)
if (cpu_has_feature(CPU_FTR_PPCAS_ARCH_V2))
device_create_file(s, &dev_attr_pir);
- if (cpu_has_feature(CPU_FTR_ARCH_206))
+ if (cpu_has_feature(CPU_FTR_ARCH_206) &&
+ !firmware_has_feature(FW_FEATURE_LPAR))
device_create_file(s, &dev_attr_tscr);
#endif /* CONFIG_PPC64 */
@@ -873,7 +874,8 @@ static int unregister_cpu_online(unsigned int cpu)
if (cpu_has_feature(CPU_FTR_PPCAS_ARCH_V2))
device_remove_file(s, &dev_attr_pir);
- if (cpu_has_feature(CPU_FTR_ARCH_206))
+ if (cpu_has_feature(CPU_FTR_ARCH_206) &&
+ !firmware_has_feature(FW_FEATURE_LPAR))
device_remove_file(s, &dev_attr_tscr);
#endif /* CONFIG_PPC64 */
diff --git a/arch/powerpc/mm/drmem.c b/arch/powerpc/mm/drmem.c
index 1604110c4238..916844f99c64 100644
--- a/arch/powerpc/mm/drmem.c
+++ b/arch/powerpc/mm/drmem.c
@@ -216,6 +216,8 @@ static void __init __walk_drmem_v1_lmbs(const __be32 *prop, const __be32 *usm,
u32 i, n_lmbs;
n_lmbs = of_read_number(prop++, 1);
+ if (n_lmbs == 0)
+ return;
for (i = 0; i < n_lmbs; i++) {
read_drconf_v1_cell(&lmb, &prop);
@@ -245,6 +247,8 @@ static void __init __walk_drmem_v2_lmbs(const __be32 *prop, const __be32 *usm,
u32 i, j, lmb_sets;
lmb_sets = of_read_number(prop++, 1);
+ if (lmb_sets == 0)
+ return;
for (i = 0; i < lmb_sets; i++) {
read_drconf_v2_cell(&dr_cell, &prop);
@@ -354,6 +358,8 @@ static void __init init_drmem_v1_lmbs(const __be32 *prop)
struct drmem_lmb *lmb;
drmem_info->n_lmbs = of_read_number(prop++, 1);
+ if (drmem_info->n_lmbs == 0)
+ return;
drmem_info->lmbs = kcalloc(drmem_info->n_lmbs, sizeof(*lmb),
GFP_KERNEL);
@@ -373,6 +379,8 @@ static void __init init_drmem_v2_lmbs(const __be32 *prop)
int lmb_index;
lmb_sets = of_read_number(prop++, 1);
+ if (lmb_sets == 0)
+ return;
/* first pass, calculate the number of LMBs */
p = prop;
diff --git a/arch/powerpc/mm/hash64_4k.c b/arch/powerpc/mm/hash64_4k.c
index 5a69b51d08a3..d573d7d07f25 100644
--- a/arch/powerpc/mm/hash64_4k.c
+++ b/arch/powerpc/mm/hash64_4k.c
@@ -55,7 +55,7 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
* need to add in 0x1 if it's a read-only user page
*/
rflags = htab_convert_pte_flags(new_pte);
- rpte = __real_pte(__pte(old_pte), ptep);
+ rpte = __real_pte(__pte(old_pte), ptep, PTRS_PER_PTE);
if (cpu_has_feature(CPU_FTR_NOEXECUTE) &&
!cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
@@ -117,7 +117,7 @@ repeat:
return -1;
}
new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | H_PAGE_HASHPTE;
- new_pte |= pte_set_hidx(ptep, rpte, 0, slot);
+ new_pte |= pte_set_hidx(ptep, rpte, 0, slot, PTRS_PER_PTE);
}
*ptep = __pte(new_pte & ~H_PAGE_BUSY);
return 0;
diff --git a/arch/powerpc/mm/hash64_64k.c b/arch/powerpc/mm/hash64_64k.c
index 2253bbc6a599..e601d95c3b20 100644
--- a/arch/powerpc/mm/hash64_64k.c
+++ b/arch/powerpc/mm/hash64_64k.c
@@ -86,7 +86,7 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
subpg_index = (ea & (PAGE_SIZE - 1)) >> shift;
vpn = hpt_vpn(ea, vsid, ssize);
- rpte = __real_pte(__pte(old_pte), ptep);
+ rpte = __real_pte(__pte(old_pte), ptep, PTRS_PER_PTE);
/*
*None of the sub 4k page is hashed
*/
@@ -214,7 +214,7 @@ repeat:
return -1;
}
- new_pte |= pte_set_hidx(ptep, rpte, subpg_index, slot);
+ new_pte |= pte_set_hidx(ptep, rpte, subpg_index, slot, PTRS_PER_PTE);
new_pte |= H_PAGE_HASHPTE;
*ptep = __pte(new_pte & ~H_PAGE_BUSY);
@@ -262,7 +262,7 @@ int __hash_page_64K(unsigned long ea, unsigned long access,
} while (!pte_xchg(ptep, __pte(old_pte), __pte(new_pte)));
rflags = htab_convert_pte_flags(new_pte);
- rpte = __real_pte(__pte(old_pte), ptep);
+ rpte = __real_pte(__pte(old_pte), ptep, PTRS_PER_PTE);
if (cpu_has_feature(CPU_FTR_NOEXECUTE) &&
!cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
@@ -327,7 +327,7 @@ repeat:
}
new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | H_PAGE_HASHPTE;
- new_pte |= pte_set_hidx(ptep, rpte, 0, slot);
+ new_pte |= pte_set_hidx(ptep, rpte, 0, slot, PTRS_PER_PTE);
}
*ptep = __pte(new_pte & ~H_PAGE_BUSY);
return 0;
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 7d07c7e17db6..cf290d415dcd 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -1008,6 +1008,7 @@ void __init hash__early_init_mmu(void)
__pmd_index_size = H_PMD_INDEX_SIZE;
__pud_index_size = H_PUD_INDEX_SIZE;
__pgd_index_size = H_PGD_INDEX_SIZE;
+ __pud_cache_index = H_PUD_CACHE_INDEX;
__pmd_cache_index = H_PMD_CACHE_INDEX;
__pte_table_size = H_PTE_TABLE_SIZE;
__pmd_table_size = H_PMD_TABLE_SIZE;
diff --git a/arch/powerpc/mm/hugetlbpage-hash64.c b/arch/powerpc/mm/hugetlbpage-hash64.c
index 12511f5a015f..b320f5097a06 100644
--- a/arch/powerpc/mm/hugetlbpage-hash64.c
+++ b/arch/powerpc/mm/hugetlbpage-hash64.c
@@ -27,7 +27,7 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
unsigned long vpn;
unsigned long old_pte, new_pte;
unsigned long rflags, pa, sz;
- long slot;
+ long slot, offset;
BUG_ON(shift != mmu_psize_defs[mmu_psize].shift);
@@ -63,7 +63,11 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
} while(!pte_xchg(ptep, __pte(old_pte), __pte(new_pte)));
rflags = htab_convert_pte_flags(new_pte);
- rpte = __real_pte(__pte(old_pte), ptep);
+ if (unlikely(mmu_psize == MMU_PAGE_16G))
+ offset = PTRS_PER_PUD;
+ else
+ offset = PTRS_PER_PMD;
+ rpte = __real_pte(__pte(old_pte), ptep, offset);
sz = ((1UL) << shift);
if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
@@ -104,7 +108,7 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
return -1;
}
- new_pte |= pte_set_hidx(ptep, rpte, 0, slot);
+ new_pte |= pte_set_hidx(ptep, rpte, 0, slot, offset);
}
/*
diff --git a/arch/powerpc/mm/init-common.c b/arch/powerpc/mm/init-common.c
index eb8c6c8c4851..2b656e67f2ea 100644
--- a/arch/powerpc/mm/init-common.c
+++ b/arch/powerpc/mm/init-common.c
@@ -100,6 +100,6 @@ void pgtable_cache_init(void)
* same size as either the pgd or pmd index except with THP enabled
* on book3s 64
*/
- if (PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE))
- pgtable_cache_add(PUD_INDEX_SIZE, pud_ctor);
+ if (PUD_CACHE_INDEX && !PGT_CACHE(PUD_CACHE_INDEX))
+ pgtable_cache_add(PUD_CACHE_INDEX, pud_ctor);
}
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 314d19ab9385..edd8d0bc9364 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -143,11 +143,6 @@ static void reset_numa_cpu_lookup_table(void)
numa_cpu_lookup_table[cpu] = -1;
}
-static void update_numa_cpu_lookup_table(unsigned int cpu, int node)
-{
- numa_cpu_lookup_table[cpu] = node;
-}
-
static void map_cpu_to_node(int cpu, int node)
{
update_numa_cpu_lookup_table(cpu, node);
diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
index 573a9a2ee455..2e10a964e290 100644
--- a/arch/powerpc/mm/pgtable-radix.c
+++ b/arch/powerpc/mm/pgtable-radix.c
@@ -17,9 +17,11 @@
#include <linux/of_fdt.h>
#include <linux/mm.h>
#include <linux/string_helpers.h>
+#include <linux/stop_machine.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
+#include <asm/mmu_context.h>
#include <asm/dma.h>
#include <asm/machdep.h>
#include <asm/mmu.h>
@@ -333,6 +335,22 @@ static void __init radix_init_pgtable(void)
"r" (TLBIEL_INVAL_SET_LPID), "r" (0));
asm volatile("eieio; tlbsync; ptesync" : : : "memory");
trace_tlbie(0, 0, TLBIEL_INVAL_SET_LPID, 0, 2, 1, 1);
+
+ /*
+ * The init_mm context is given the first available (non-zero) PID,
+ * which is the "guard PID" and contains no page table. PIDR should
+ * never be set to zero because that duplicates the kernel address
+ * space at the 0x0... offset (quadrant 0)!
+ *
+ * An arbitrary PID that may later be allocated by the PID allocator
+ * for userspace processes must not be used either, because that
+ * would cause stale user mappings for that PID on CPUs outside of
+ * the TLB invalidation scheme (because it won't be in mm_cpumask).
+ *
+ * So permanently carve out one PID for the purpose of a guard PID.
+ */
+ init_mm.context.id = mmu_base_pid;
+ mmu_base_pid++;
}
static void __init radix_init_partition_table(void)
@@ -535,6 +553,7 @@ void __init radix__early_init_mmu(void)
__pmd_index_size = RADIX_PMD_INDEX_SIZE;
__pud_index_size = RADIX_PUD_INDEX_SIZE;
__pgd_index_size = RADIX_PGD_INDEX_SIZE;
+ __pud_cache_index = RADIX_PUD_INDEX_SIZE;
__pmd_cache_index = RADIX_PMD_INDEX_SIZE;
__pte_table_size = RADIX_PTE_TABLE_SIZE;
__pmd_table_size = RADIX_PMD_TABLE_SIZE;
@@ -579,7 +598,8 @@ void __init radix__early_init_mmu(void)
radix_init_iamr();
radix_init_pgtable();
-
+ /* Switch to the guard PID before turning on MMU */
+ radix__switch_mmu_context(NULL, &init_mm);
if (cpu_has_feature(CPU_FTR_HVMODE))
tlbiel_all();
}
@@ -604,6 +624,7 @@ void radix__early_init_mmu_secondary(void)
}
radix_init_iamr();
+ radix__switch_mmu_context(NULL, &init_mm);
if (cpu_has_feature(CPU_FTR_HVMODE))
tlbiel_all();
}
@@ -666,6 +687,30 @@ static void free_pmd_table(pmd_t *pmd_start, pud_t *pud)
pud_clear(pud);
}
+struct change_mapping_params {
+ pte_t *pte;
+ unsigned long start;
+ unsigned long end;
+ unsigned long aligned_start;
+ unsigned long aligned_end;
+};
+
+static int stop_machine_change_mapping(void *data)
+{
+ struct change_mapping_params *params =
+ (struct change_mapping_params *)data;
+
+ if (!data)
+ return -1;
+
+ spin_unlock(&init_mm.page_table_lock);
+ pte_clear(&init_mm, params->aligned_start, params->pte);
+ create_physical_mapping(params->aligned_start, params->start);
+ create_physical_mapping(params->end, params->aligned_end);
+ spin_lock(&init_mm.page_table_lock);
+ return 0;
+}
+
static void remove_pte_table(pte_t *pte_start, unsigned long addr,
unsigned long end)
{
@@ -694,6 +739,52 @@ static void remove_pte_table(pte_t *pte_start, unsigned long addr,
}
}
+/*
+ * clear the pte and potentially split the mapping helper
+ */
+static void split_kernel_mapping(unsigned long addr, unsigned long end,
+ unsigned long size, pte_t *pte)
+{
+ unsigned long mask = ~(size - 1);
+ unsigned long aligned_start = addr & mask;
+ unsigned long aligned_end = addr + size;
+ struct change_mapping_params params;
+ bool split_region = false;
+
+ if ((end - addr) < size) {
+ /*
+ * We're going to clear the PTE, but not flushed
+ * the mapping, time to remap and flush. The
+ * effects if visible outside the processor or
+ * if we are running in code close to the
+ * mapping we cleared, we are in trouble.
+ */
+ if (overlaps_kernel_text(aligned_start, addr) ||
+ overlaps_kernel_text(end, aligned_end)) {
+ /*
+ * Hack, just return, don't pte_clear
+ */
+ WARN_ONCE(1, "Linear mapping %lx->%lx overlaps kernel "
+ "text, not splitting\n", addr, end);
+ return;
+ }
+ split_region = true;
+ }
+
+ if (split_region) {
+ params.pte = pte;
+ params.start = addr;
+ params.end = end;
+ params.aligned_start = addr & ~(size - 1);
+ params.aligned_end = min_t(unsigned long, aligned_end,
+ (unsigned long)__va(memblock_end_of_DRAM()));
+ stop_machine(stop_machine_change_mapping, &params, NULL);
+ return;
+ }
+
+ pte_clear(&init_mm, addr, pte);
+}
+
static void remove_pmd_table(pmd_t *pmd_start, unsigned long addr,
unsigned long end)
{
@@ -709,13 +800,7 @@ static void remove_pmd_table(pmd_t *pmd_start, unsigned long addr,
continue;
if (pmd_huge(*pmd)) {
- if (!IS_ALIGNED(addr, PMD_SIZE) ||
- !IS_ALIGNED(next, PMD_SIZE)) {
- WARN_ONCE(1, "%s: unaligned range\n", __func__);
- continue;
- }
-
- pte_clear(&init_mm, addr, (pte_t *)pmd);
+ split_kernel_mapping(addr, end, PMD_SIZE, (pte_t *)pmd);
continue;
}
@@ -740,13 +825,7 @@ static void remove_pud_table(pud_t *pud_start, unsigned long addr,
continue;
if (pud_huge(*pud)) {
- if (!IS_ALIGNED(addr, PUD_SIZE) ||
- !IS_ALIGNED(next, PUD_SIZE)) {
- WARN_ONCE(1, "%s: unaligned range\n", __func__);
- continue;
- }
-
- pte_clear(&init_mm, addr, (pte_t *)pud);
+ split_kernel_mapping(addr, end, PUD_SIZE, (pte_t *)pud);
continue;
}
@@ -772,13 +851,7 @@ static void remove_pagetable(unsigned long start, unsigned long end)
continue;
if (pgd_huge(*pgd)) {
- if (!IS_ALIGNED(addr, PGDIR_SIZE) ||
- !IS_ALIGNED(next, PGDIR_SIZE)) {
- WARN_ONCE(1, "%s: unaligned range\n", __func__);
- continue;
- }
-
- pte_clear(&init_mm, addr, (pte_t *)pgd);
+ split_kernel_mapping(addr, end, PGDIR_SIZE, (pte_t *)pgd);
continue;
}
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index c9a623c2d8a2..28c980eb4422 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -82,6 +82,8 @@ unsigned long __pgd_index_size;
EXPORT_SYMBOL(__pgd_index_size);
unsigned long __pmd_cache_index;
EXPORT_SYMBOL(__pmd_cache_index);
+unsigned long __pud_cache_index;
+EXPORT_SYMBOL(__pud_cache_index);
unsigned long __pte_table_size;
EXPORT_SYMBOL(__pte_table_size);
unsigned long __pmd_table_size;
@@ -471,6 +473,8 @@ void mmu_partition_table_set_entry(unsigned int lpid, unsigned long dw0,
if (old & PATB_HR) {
asm volatile(PPC_TLBIE_5(%0,%1,2,0,1) : :
"r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
+ asm volatile(PPC_TLBIE_5(%0,%1,2,1,1) : :
+ "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 1);
} else {
asm volatile(PPC_TLBIE_5(%0,%1,2,0,0) : :
diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c
index 881ebd53ffc2..9b23f12e863c 100644
--- a/arch/powerpc/mm/tlb_hash64.c
+++ b/arch/powerpc/mm/tlb_hash64.c
@@ -51,7 +51,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
unsigned int psize;
int ssize;
real_pte_t rpte;
- int i;
+ int i, offset;
i = batch->index;
@@ -67,6 +67,10 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
psize = get_slice_psize(mm, addr);
/* Mask the address for the correct page size */
addr &= ~((1UL << mmu_psize_defs[psize].shift) - 1);
+ if (unlikely(psize == MMU_PAGE_16G))
+ offset = PTRS_PER_PUD;
+ else
+ offset = PTRS_PER_PMD;
#else
BUG();
psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */
@@ -78,6 +82,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
* support 64k pages, this might be different from the
* hardware page size encoded in the slice table. */
addr &= PAGE_MASK;
+ offset = PTRS_PER_PTE;
}
@@ -91,7 +96,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
}
WARN_ON(vsid == 0);
vpn = hpt_vpn(addr, vsid, ssize);
- rpte = __real_pte(__pte(pte), ptep);
+ rpte = __real_pte(__pte(pte), ptep, offset);
/*
* Check if we have an active batch on this CPU. If not, just
diff --git a/arch/powerpc/platforms/powernv/opal-imc.c b/arch/powerpc/platforms/powernv/opal-imc.c
index dd4c9b8b8a81..f6f55ab4980e 100644
--- a/arch/powerpc/platforms/powernv/opal-imc.c
+++ b/arch/powerpc/platforms/powernv/opal-imc.c
@@ -199,9 +199,11 @@ static void disable_nest_pmu_counters(void)
const struct cpumask *l_cpumask;
get_online_cpus();
- for_each_online_node(nid) {
+ for_each_node_with_cpus(nid) {
l_cpumask = cpumask_of_node(nid);
- cpu = cpumask_first(l_cpumask);
+ cpu = cpumask_first_and(l_cpumask, cpu_online_mask);
+ if (cpu >= nr_cpu_ids)
+ continue;
opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST,
get_hard_smp_processor_id(cpu));
}
diff --git a/arch/powerpc/platforms/powernv/vas-window.c b/arch/powerpc/platforms/powernv/vas-window.c
index 2b3eb01ab110..b7c53a51c31b 100644
--- a/arch/powerpc/platforms/powernv/vas-window.c
+++ b/arch/powerpc/platforms/powernv/vas-window.c
@@ -1063,16 +1063,16 @@ struct vas_window *vas_tx_win_open(int vasid, enum vas_cop_type cop,
rc = PTR_ERR(txwin->paste_kaddr);
goto free_window;
}
+ } else {
+ /*
+ * A user mapping must ensure that context switch issues
+ * CP_ABORT for this thread.
+ */
+ rc = set_thread_uses_vas();
+ if (rc)
+ goto free_window;
}
- /*
- * Now that we have a send window, ensure context switch issues
- * CP_ABORT for this thread.
- */
- rc = -EINVAL;
- if (set_thread_uses_vas() < 0)
- goto free_window;
-
set_vinst_win(vinst, txwin);
return txwin;
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
index dceb51454d8d..652d3e96b812 100644
--- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
+++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
@@ -36,6 +36,7 @@
#include <asm/xics.h>
#include <asm/xive.h>
#include <asm/plpar_wrappers.h>
+#include <asm/topology.h>
#include "pseries.h"
#include "offline_states.h"
@@ -331,6 +332,7 @@ static void pseries_remove_processor(struct device_node *np)
BUG_ON(cpu_online(cpu));
set_cpu_present(cpu, false);
set_hard_smp_processor_id(cpu, -1);
+ update_numa_cpu_lookup_table(cpu, -1);
break;
}
if (cpu >= nr_cpu_ids)
@@ -340,8 +342,6 @@ static void pseries_remove_processor(struct device_node *np)
cpu_maps_update_done();
}
-extern int find_and_online_cpu_nid(int cpu);
-
static int dlpar_online_cpu(struct device_node *dn)
{
int rc = 0;
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
index 81d8614e7379..5e1ef9150182 100644
--- a/arch/powerpc/platforms/pseries/ras.c
+++ b/arch/powerpc/platforms/pseries/ras.c
@@ -49,6 +49,28 @@ static irqreturn_t ras_error_interrupt(int irq, void *dev_id);
/*
+ * Enable the hotplug interrupt late because processing them may touch other
+ * devices or systems (e.g. hugepages) that have not been initialized at the
+ * subsys stage.
+ */
+int __init init_ras_hotplug_IRQ(void)
+{
+ struct device_node *np;
+
+ /* Hotplug Events */
+ np = of_find_node_by_path("/event-sources/hot-plug-events");
+ if (np != NULL) {
+ if (dlpar_workqueue_init() == 0)
+ request_event_sources_irqs(np, ras_hotplug_interrupt,
+ "RAS_HOTPLUG");
+ of_node_put(np);
+ }
+
+ return 0;
+}
+machine_late_initcall(pseries, init_ras_hotplug_IRQ);
+
+/*
* Initialize handlers for the set of interrupts caused by hardware errors
* and power system events.
*/
@@ -66,15 +88,6 @@ static int __init init_ras_IRQ(void)
of_node_put(np);
}
- /* Hotplug Events */
- np = of_find_node_by_path("/event-sources/hot-plug-events");
- if (np != NULL) {
- if (dlpar_workqueue_init() == 0)
- request_event_sources_irqs(np, ras_hotplug_interrupt,
- "RAS_HOTPLUG");
- of_node_put(np);
- }
-
/* EPOW Events */
np = of_find_node_by_path("/event-sources/epow-events");
if (np != NULL) {
diff --git a/arch/powerpc/sysdev/xive/spapr.c b/arch/powerpc/sysdev/xive/spapr.c
index d9c4c9366049..091f1d0d0af1 100644
--- a/arch/powerpc/sysdev/xive/spapr.c
+++ b/arch/powerpc/sysdev/xive/spapr.c
@@ -356,7 +356,8 @@ static int xive_spapr_configure_queue(u32 target, struct xive_q *q, u8 prio,
rc = plpar_int_get_queue_info(0, target, prio, &esn_page, &esn_size);
if (rc) {
- pr_err("Error %lld getting queue info prio %d\n", rc, prio);
+ pr_err("Error %lld getting queue info CPU %d prio %d\n", rc,
+ target, prio);
rc = -EIO;
goto fail;
}
@@ -370,7 +371,8 @@ static int xive_spapr_configure_queue(u32 target, struct xive_q *q, u8 prio,
/* Configure and enable the queue in HW */
rc = plpar_int_set_queue_config(flags, target, prio, qpage_phys, order);
if (rc) {
- pr_err("Error %lld setting queue for prio %d\n", rc, prio);
+ pr_err("Error %lld setting queue for CPU %d prio %d\n", rc,
+ target, prio);
rc = -EIO;
} else {
q->qpage = qpage;
@@ -389,8 +391,8 @@ static int xive_spapr_setup_queue(unsigned int cpu, struct xive_cpu *xc,
if (IS_ERR(qpage))
return PTR_ERR(qpage);
- return xive_spapr_configure_queue(cpu, q, prio, qpage,
- xive_queue_shift);
+ return xive_spapr_configure_queue(get_hard_smp_processor_id(cpu),
+ q, prio, qpage, xive_queue_shift);
}
static void xive_spapr_cleanup_queue(unsigned int cpu, struct xive_cpu *xc,
@@ -399,10 +401,12 @@ static void xive_spapr_cleanup_queue(unsigned int cpu, struct xive_cpu *xc,
struct xive_q *q = &xc->queue[prio];
unsigned int alloc_order;
long rc;
+ int hw_cpu = get_hard_smp_processor_id(cpu);
- rc = plpar_int_set_queue_config(0, cpu, prio, 0, 0);
+ rc = plpar_int_set_queue_config(0, hw_cpu, prio, 0, 0);
if (rc)
- pr_err("Error %ld setting queue for prio %d\n", rc, prio);
+ pr_err("Error %ld setting queue for CPU %d prio %d\n", rc,
+ hw_cpu, prio);
alloc_order = xive_alloc_order(xive_queue_shift);
free_pages((unsigned long)q->qpage, alloc_order);
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 6bf594ace663..8767e45f1b2b 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -430,6 +430,8 @@ config SPARC_LEON
depends on SPARC32
select USB_EHCI_BIG_ENDIAN_MMIO
select USB_EHCI_BIG_ENDIAN_DESC
+ select USB_UHCI_BIG_ENDIAN_MMIO
+ select USB_UHCI_BIG_ENDIAN_DESC
---help---
If you say Y here if you are running on a SPARC-LEON processor.
The LEON processor is a synthesizable VHDL model of the
diff --git a/arch/x86/.gitignore b/arch/x86/.gitignore
index aff152c87cf4..5a82bac5e0bc 100644
--- a/arch/x86/.gitignore
+++ b/arch/x86/.gitignore
@@ -1,6 +1,7 @@
boot/compressed/vmlinux
tools/test_get_len
tools/insn_sanity
+tools/insn_decoder_test
purgatory/kexec-purgatory.c
purgatory/purgatory.ro
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 63bf349b2b24..a528c14d45a5 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -423,12 +423,6 @@ config X86_MPPARSE
For old smp systems that do not have proper acpi support. Newer systems
(esp with 64bit cpus) with acpi support, MADT and DSDT will override it
-config X86_BIGSMP
- bool "Support for big SMP systems with more than 8 CPUs"
- depends on X86_32 && SMP
- ---help---
- This option is needed for the systems that have more than 8 CPUs
-
config GOLDFISH
def_bool y
depends on X86_GOLDFISH
@@ -460,6 +454,12 @@ config INTEL_RDT
Say N if unsure.
if X86_32
+config X86_BIGSMP
+ bool "Support for big SMP systems with more than 8 CPUs"
+ depends on SMP
+ ---help---
+ This option is needed for the systems that have more than 8 CPUs
+
config X86_EXTENDED_PLATFORM
bool "Support for extended (non-PC) x86 platforms"
default y
@@ -949,25 +949,66 @@ config MAXSMP
Enable maximum number of CPUS and NUMA Nodes for this architecture.
If unsure, say N.
+#
+# The maximum number of CPUs supported:
+#
+# The main config value is NR_CPUS, which defaults to NR_CPUS_DEFAULT,
+# and which can be configured interactively in the
+# [NR_CPUS_RANGE_BEGIN ... NR_CPUS_RANGE_END] range.
+#
+# The ranges are different on 32-bit and 64-bit kernels, depending on
+# hardware capabilities and scalability features of the kernel.
+#
+# ( If MAXSMP is enabled we just use the highest possible value and disable
+# interactive configuration. )
+#
+
+config NR_CPUS_RANGE_BEGIN
+ int
+ default NR_CPUS_RANGE_END if MAXSMP
+ default 1 if !SMP
+ default 2
+
+config NR_CPUS_RANGE_END
+ int
+ depends on X86_32
+ default 64 if SMP && X86_BIGSMP
+ default 8 if SMP && !X86_BIGSMP
+ default 1 if !SMP
+
+config NR_CPUS_RANGE_END
+ int
+ depends on X86_64
+ default 8192 if SMP && ( MAXSMP || CPUMASK_OFFSTACK)
+ default 512 if SMP && (!MAXSMP && !CPUMASK_OFFSTACK)
+ default 1 if !SMP
+
+config NR_CPUS_DEFAULT
+ int
+ depends on X86_32
+ default 32 if X86_BIGSMP
+ default 8 if SMP
+ default 1 if !SMP
+
+config NR_CPUS_DEFAULT
+ int
+ depends on X86_64
+ default 8192 if MAXSMP
+ default 64 if SMP
+ default 1 if !SMP
+
config NR_CPUS
int "Maximum number of CPUs" if SMP && !MAXSMP
- range 2 8 if SMP && X86_32 && !X86_BIGSMP
- range 2 64 if SMP && X86_32 && X86_BIGSMP
- range 2 512 if SMP && !MAXSMP && !CPUMASK_OFFSTACK && X86_64
- range 2 8192 if SMP && !MAXSMP && CPUMASK_OFFSTACK && X86_64
- default "1" if !SMP
- default "8192" if MAXSMP
- default "32" if SMP && X86_BIGSMP
- default "8" if SMP && X86_32
- default "64" if SMP
+ range NR_CPUS_RANGE_BEGIN NR_CPUS_RANGE_END
+ default NR_CPUS_DEFAULT
---help---
This allows you to specify the maximum number of CPUs which this
kernel will support. If CPUMASK_OFFSTACK is enabled, the maximum
supported value is 8192, otherwise the maximum value is 512. The
minimum value which makes sense is 2.
- This is purely to save memory - each supported CPU adds
- approximately eight kilobytes to the kernel image.
+ This is purely to save memory: each supported CPU adds about 8KB
+ to the kernel image.
config SCHED_SMT
bool "SMT (Hyperthreading) scheduler support"
diff --git a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_init_avx2.c b/arch/x86/crypto/sha512-mb/sha512_mb_mgr_init_avx2.c
index 36870b26067a..d08805032f01 100644
--- a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_init_avx2.c
+++ b/arch/x86/crypto/sha512-mb/sha512_mb_mgr_init_avx2.c
@@ -57,10 +57,12 @@ void sha512_mb_mgr_init_avx2(struct sha512_mb_mgr *state)
{
unsigned int j;
- state->lens[0] = 0;
- state->lens[1] = 1;
- state->lens[2] = 2;
- state->lens[3] = 3;
+ /* initially all lanes are unused */
+ state->lens[0] = 0xFFFFFFFF00000000;
+ state->lens[1] = 0xFFFFFFFF00000001;
+ state->lens[2] = 0xFFFFFFFF00000002;
+ state->lens[3] = 0xFFFFFFFF00000003;
+
state->unused_lanes = 0xFF03020100;
for (j = 0; j < 4; j++)
state->ldata[j].job_in_lane = NULL;
diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h
index 3f48f695d5e6..dce7092ab24a 100644
--- a/arch/x86/entry/calling.h
+++ b/arch/x86/entry/calling.h
@@ -97,80 +97,69 @@ For 32-bit we have the following conventions - kernel is built with
#define SIZEOF_PTREGS 21*8
- .macro ALLOC_PT_GPREGS_ON_STACK
- addq $-(15*8), %rsp
- .endm
+.macro PUSH_AND_CLEAR_REGS rdx=%rdx rax=%rax
+ /*
+ * Push registers and sanitize registers of values that a
+ * speculation attack might otherwise want to exploit. The
+ * lower registers are likely clobbered well before they
+ * could be put to use in a speculative execution gadget.
+ * Interleave XOR with PUSH for better uop scheduling:
+ */
+ pushq %rdi /* pt_regs->di */
+ pushq %rsi /* pt_regs->si */
+ pushq \rdx /* pt_regs->dx */
+ pushq %rcx /* pt_regs->cx */
+ pushq \rax /* pt_regs->ax */
+ pushq %r8 /* pt_regs->r8 */
+ xorq %r8, %r8 /* nospec r8 */
+ pushq %r9 /* pt_regs->r9 */
+ xorq %r9, %r9 /* nospec r9 */
+ pushq %r10 /* pt_regs->r10 */
+ xorq %r10, %r10 /* nospec r10 */
+ pushq %r11 /* pt_regs->r11 */
+ xorq %r11, %r11 /* nospec r11*/
+ pushq %rbx /* pt_regs->rbx */
+ xorl %ebx, %ebx /* nospec rbx*/
+ pushq %rbp /* pt_regs->rbp */
+ xorl %ebp, %ebp /* nospec rbp*/
+ pushq %r12 /* pt_regs->r12 */
+ xorq %r12, %r12 /* nospec r12*/
+ pushq %r13 /* pt_regs->r13 */
+ xorq %r13, %r13 /* nospec r13*/
+ pushq %r14 /* pt_regs->r14 */
+ xorq %r14, %r14 /* nospec r14*/
+ pushq %r15 /* pt_regs->r15 */
+ xorq %r15, %r15 /* nospec r15*/
+ UNWIND_HINT_REGS
+.endm
- .macro SAVE_C_REGS_HELPER offset=0 rax=1 rcx=1 r8910=1 r11=1
- .if \r11
- movq %r11, 6*8+\offset(%rsp)
- .endif
- .if \r8910
- movq %r10, 7*8+\offset(%rsp)
- movq %r9, 8*8+\offset(%rsp)
- movq %r8, 9*8+\offset(%rsp)
- .endif
- .if \rax
- movq %rax, 10*8+\offset(%rsp)
- .endif
- .if \rcx
- movq %rcx, 11*8+\offset(%rsp)
- .endif
- movq %rdx, 12*8+\offset(%rsp)
- movq %rsi, 13*8+\offset(%rsp)
- movq %rdi, 14*8+\offset(%rsp)
- UNWIND_HINT_REGS offset=\offset extra=0
- .endm
- .macro SAVE_C_REGS offset=0
- SAVE_C_REGS_HELPER \offset, 1, 1, 1, 1
- .endm
- .macro SAVE_C_REGS_EXCEPT_RAX_RCX offset=0
- SAVE_C_REGS_HELPER \offset, 0, 0, 1, 1
- .endm
- .macro SAVE_C_REGS_EXCEPT_R891011
- SAVE_C_REGS_HELPER 0, 1, 1, 0, 0
- .endm
- .macro SAVE_C_REGS_EXCEPT_RCX_R891011
- SAVE_C_REGS_HELPER 0, 1, 0, 0, 0
- .endm
- .macro SAVE_C_REGS_EXCEPT_RAX_RCX_R11
- SAVE_C_REGS_HELPER 0, 0, 0, 1, 0
- .endm
-
- .macro SAVE_EXTRA_REGS offset=0
- movq %r15, 0*8+\offset(%rsp)
- movq %r14, 1*8+\offset(%rsp)
- movq %r13, 2*8+\offset(%rsp)
- movq %r12, 3*8+\offset(%rsp)
- movq %rbp, 4*8+\offset(%rsp)
- movq %rbx, 5*8+\offset(%rsp)
- UNWIND_HINT_REGS offset=\offset
- .endm
-
- .macro POP_EXTRA_REGS
+.macro POP_REGS pop_rdi=1 skip_r11rcx=0
popq %r15
popq %r14
popq %r13
popq %r12
popq %rbp
popq %rbx
- .endm
-
- .macro POP_C_REGS
+ .if \skip_r11rcx
+ popq %rsi
+ .else
popq %r11
+ .endif
popq %r10
popq %r9
popq %r8
popq %rax
+ .if \skip_r11rcx
+ popq %rsi
+ .else
popq %rcx
+ .endif
popq %rdx
popq %rsi
+ .if \pop_rdi
popq %rdi
- .endm
-
- .macro icebp
- .byte 0xf1
- .endm
+ .endif
+.endm
/*
* This is a sneaky trick to help the unwinder find pt_regs on the stack. The
@@ -178,7 +167,7 @@ For 32-bit we have the following conventions - kernel is built with
* is just setting the LSB, which makes it an invalid stack address and is also
* a signal to the unwinder that it's a pt_regs pointer in disguise.
*
- * NOTE: This macro must be used *after* SAVE_EXTRA_REGS because it corrupts
+ * NOTE: This macro must be used *after* PUSH_AND_CLEAR_REGS because it corrupts
* the original rbp.
*/
.macro ENCODE_FRAME_POINTER ptregs_offset=0
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 30c8c5344c4a..8971bd64d515 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -213,7 +213,7 @@ ENTRY(entry_SYSCALL_64)
swapgs
/*
- * This path is not taken when PAGE_TABLE_ISOLATION is disabled so it
+ * This path is only taken when PAGE_TABLE_ISOLATION is disabled so it
* is not required to switch CR3.
*/
movq %rsp, PER_CPU_VAR(rsp_scratch)
@@ -227,22 +227,8 @@ ENTRY(entry_SYSCALL_64)
pushq %rcx /* pt_regs->ip */
GLOBAL(entry_SYSCALL_64_after_hwframe)
pushq %rax /* pt_regs->orig_ax */
- pushq %rdi /* pt_regs->di */
- pushq %rsi /* pt_regs->si */
- pushq %rdx /* pt_regs->dx */
- pushq %rcx /* pt_regs->cx */
- pushq $-ENOSYS /* pt_regs->ax */
- pushq %r8 /* pt_regs->r8 */
- pushq %r9 /* pt_regs->r9 */
- pushq %r10 /* pt_regs->r10 */
- pushq %r11 /* pt_regs->r11 */
- pushq %rbx /* pt_regs->rbx */
- pushq %rbp /* pt_regs->rbp */
- pushq %r12 /* pt_regs->r12 */
- pushq %r13 /* pt_regs->r13 */
- pushq %r14 /* pt_regs->r14 */
- pushq %r15 /* pt_regs->r15 */
- UNWIND_HINT_REGS
+
+ PUSH_AND_CLEAR_REGS rax=$-ENOSYS
TRACE_IRQS_OFF
@@ -321,15 +307,7 @@ GLOBAL(entry_SYSCALL_64_after_hwframe)
syscall_return_via_sysret:
/* rcx and r11 are already restored (see code above) */
UNWIND_HINT_EMPTY
- POP_EXTRA_REGS
- popq %rsi /* skip r11 */
- popq %r10
- popq %r9
- popq %r8
- popq %rax
- popq %rsi /* skip rcx */
- popq %rdx
- popq %rsi
+ POP_REGS pop_rdi=0 skip_r11rcx=1
/*
* Now all regs are restored except RSP and RDI.
@@ -559,9 +537,7 @@ END(irq_entries_start)
call switch_to_thread_stack
1:
- ALLOC_PT_GPREGS_ON_STACK
- SAVE_C_REGS
- SAVE_EXTRA_REGS
+ PUSH_AND_CLEAR_REGS
ENCODE_FRAME_POINTER
testb $3, CS(%rsp)
@@ -622,15 +598,7 @@ GLOBAL(swapgs_restore_regs_and_return_to_usermode)
ud2
1:
#endif
- POP_EXTRA_REGS
- popq %r11
- popq %r10
- popq %r9
- popq %r8
- popq %rax
- popq %rcx
- popq %rdx
- popq %rsi
+ POP_REGS pop_rdi=0
/*
* The stack is now user RDI, orig_ax, RIP, CS, EFLAGS, RSP, SS.
@@ -688,8 +656,7 @@ GLOBAL(restore_regs_and_return_to_kernel)
ud2
1:
#endif
- POP_EXTRA_REGS
- POP_C_REGS
+ POP_REGS
addq $8, %rsp /* skip regs->orig_ax */
/*
* ARCH_HAS_MEMBARRIER_SYNC_CORE rely on IRET core serialization
@@ -908,7 +875,9 @@ ENTRY(\sym)
pushq $-1 /* ORIG_RAX: no syscall to restart */
.endif
- ALLOC_PT_GPREGS_ON_STACK
+ /* Save all registers in pt_regs */
+ PUSH_AND_CLEAR_REGS
+ ENCODE_FRAME_POINTER
.if \paranoid < 2
testb $3, CS(%rsp) /* If coming from userspace, switch stacks */
@@ -1121,9 +1090,7 @@ ENTRY(xen_failsafe_callback)
addq $0x30, %rsp
UNWIND_HINT_IRET_REGS
pushq $-1 /* orig_ax = -1 => not a system call */
- ALLOC_PT_GPREGS_ON_STACK
- SAVE_C_REGS
- SAVE_EXTRA_REGS
+ PUSH_AND_CLEAR_REGS
ENCODE_FRAME_POINTER
jmp error_exit
END(xen_failsafe_callback)
@@ -1163,16 +1130,13 @@ idtentry machine_check do_mce has_error_code=0 paranoid=1
#endif
/*
- * Save all registers in pt_regs, and switch gs if needed.
+ * Switch gs if needed.
* Use slow, but surefire "are we in kernel?" check.
* Return: ebx=0: need swapgs on exit, ebx=1: otherwise
*/
ENTRY(paranoid_entry)
UNWIND_HINT_FUNC
cld
- SAVE_C_REGS 8
- SAVE_EXTRA_REGS 8
- ENCODE_FRAME_POINTER 8
movl $1, %ebx
movl $MSR_GS_BASE, %ecx
rdmsr
@@ -1211,21 +1175,18 @@ ENTRY(paranoid_exit)
jmp .Lparanoid_exit_restore
.Lparanoid_exit_no_swapgs:
TRACE_IRQS_IRETQ_DEBUG
+ RESTORE_CR3 scratch_reg=%rbx save_reg=%r14
.Lparanoid_exit_restore:
jmp restore_regs_and_return_to_kernel
END(paranoid_exit)
/*
- * Save all registers in pt_regs, and switch gs if needed.
+ * Switch gs if needed.
* Return: EBX=0: came from user mode; EBX=1: otherwise
*/
ENTRY(error_entry)
- UNWIND_HINT_FUNC
+ UNWIND_HINT_REGS offset=8
cld
- SAVE_C_REGS 8
- SAVE_EXTRA_REGS 8
- ENCODE_FRAME_POINTER 8
- xorl %ebx, %ebx
testb $3, CS+8(%rsp)
jz .Lerror_kernelspace
@@ -1406,22 +1367,7 @@ ENTRY(nmi)
pushq 1*8(%rdx) /* pt_regs->rip */
UNWIND_HINT_IRET_REGS
pushq $-1 /* pt_regs->orig_ax */
- pushq %rdi /* pt_regs->di */
- pushq %rsi /* pt_regs->si */
- pushq (%rdx) /* pt_regs->dx */
- pushq %rcx /* pt_regs->cx */
- pushq %rax /* pt_regs->ax */
- pushq %r8 /* pt_regs->r8 */
- pushq %r9 /* pt_regs->r9 */
- pushq %r10 /* pt_regs->r10 */
- pushq %r11 /* pt_regs->r11 */
- pushq %rbx /* pt_regs->rbx */
- pushq %rbp /* pt_regs->rbp */
- pushq %r12 /* pt_regs->r12 */
- pushq %r13 /* pt_regs->r13 */
- pushq %r14 /* pt_regs->r14 */
- pushq %r15 /* pt_regs->r15 */
- UNWIND_HINT_REGS
+ PUSH_AND_CLEAR_REGS rdx=(%rdx)
ENCODE_FRAME_POINTER
/*
@@ -1631,7 +1577,8 @@ end_repeat_nmi:
* frame to point back to repeat_nmi.
*/
pushq $-1 /* ORIG_RAX: no syscall to restart */
- ALLOC_PT_GPREGS_ON_STACK
+ PUSH_AND_CLEAR_REGS
+ ENCODE_FRAME_POINTER
/*
* Use paranoid_entry to handle SWAPGS, but no need to use paranoid_exit
@@ -1655,8 +1602,7 @@ end_repeat_nmi:
nmi_swapgs:
SWAPGS_UNSAFE_STACK
nmi_restore:
- POP_EXTRA_REGS
- POP_C_REGS
+ POP_REGS
/*
* Skip orig_ax and the "outermost" frame to point RSP at the "iret"
diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
index 98d5358e4041..fd65e016e413 100644
--- a/arch/x86/entry/entry_64_compat.S
+++ b/arch/x86/entry/entry_64_compat.S
@@ -85,15 +85,25 @@ ENTRY(entry_SYSENTER_compat)
pushq %rcx /* pt_regs->cx */
pushq $-ENOSYS /* pt_regs->ax */
pushq $0 /* pt_regs->r8 = 0 */
+ xorq %r8, %r8 /* nospec r8 */
pushq $0 /* pt_regs->r9 = 0 */
+ xorq %r9, %r9 /* nospec r9 */
pushq $0 /* pt_regs->r10 = 0 */
+ xorq %r10, %r10 /* nospec r10 */
pushq $0 /* pt_regs->r11 = 0 */
+ xorq %r11, %r11 /* nospec r11 */
pushq %rbx /* pt_regs->rbx */
+ xorl %ebx, %ebx /* nospec rbx */
pushq %rbp /* pt_regs->rbp (will be overwritten) */
+ xorl %ebp, %ebp /* nospec rbp */
pushq $0 /* pt_regs->r12 = 0 */
+ xorq %r12, %r12 /* nospec r12 */
pushq $0 /* pt_regs->r13 = 0 */
+ xorq %r13, %r13 /* nospec r13 */
pushq $0 /* pt_regs->r14 = 0 */
+ xorq %r14, %r14 /* nospec r14 */
pushq $0 /* pt_regs->r15 = 0 */
+ xorq %r15, %r15 /* nospec r15 */
cld
/*
@@ -214,15 +224,25 @@ GLOBAL(entry_SYSCALL_compat_after_hwframe)
pushq %rbp /* pt_regs->cx (stashed in bp) */
pushq $-ENOSYS /* pt_regs->ax */
pushq $0 /* pt_regs->r8 = 0 */
+ xorq %r8, %r8 /* nospec r8 */
pushq $0 /* pt_regs->r9 = 0 */
+ xorq %r9, %r9 /* nospec r9 */
pushq $0 /* pt_regs->r10 = 0 */
+ xorq %r10, %r10 /* nospec r10 */
pushq $0 /* pt_regs->r11 = 0 */
+ xorq %r11, %r11 /* nospec r11 */
pushq %rbx /* pt_regs->rbx */
+ xorl %ebx, %ebx /* nospec rbx */
pushq %rbp /* pt_regs->rbp (will be overwritten) */
+ xorl %ebp, %ebp /* nospec rbp */
pushq $0 /* pt_regs->r12 = 0 */
+ xorq %r12, %r12 /* nospec r12 */
pushq $0 /* pt_regs->r13 = 0 */
+ xorq %r13, %r13 /* nospec r13 */
pushq $0 /* pt_regs->r14 = 0 */
+ xorq %r14, %r14 /* nospec r14 */
pushq $0 /* pt_regs->r15 = 0 */
+ xorq %r15, %r15 /* nospec r15 */
/*
* User mode is traced as though IRQs are on, and SYSENTER
@@ -338,15 +358,25 @@ ENTRY(entry_INT80_compat)
pushq %rcx /* pt_regs->cx */
pushq $-ENOSYS /* pt_regs->ax */
pushq $0 /* pt_regs->r8 = 0 */
+ xorq %r8, %r8 /* nospec r8 */
pushq $0 /* pt_regs->r9 = 0 */
+ xorq %r9, %r9 /* nospec r9 */
pushq $0 /* pt_regs->r10 = 0 */
+ xorq %r10, %r10 /* nospec r10 */
pushq $0 /* pt_regs->r11 = 0 */
+ xorq %r11, %r11 /* nospec r11 */
pushq %rbx /* pt_regs->rbx */
+ xorl %ebx, %ebx /* nospec rbx */
pushq %rbp /* pt_regs->rbp */
+ xorl %ebp, %ebp /* nospec rbp */
pushq %r12 /* pt_regs->r12 */
+ xorq %r12, %r12 /* nospec r12 */
pushq %r13 /* pt_regs->r13 */
+ xorq %r13, %r13 /* nospec r13 */
pushq %r14 /* pt_regs->r14 */
+ xorq %r14, %r14 /* nospec r14 */
pushq %r15 /* pt_regs->r15 */
+ xorq %r15, %r15 /* nospec r15 */
cld
/*
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 731153a4681e..56457cb73448 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -3559,7 +3559,7 @@ static int intel_snb_pebs_broken(int cpu)
break;
case INTEL_FAM6_SANDYBRIDGE_X:
- switch (cpu_data(cpu).x86_mask) {
+ switch (cpu_data(cpu).x86_stepping) {
case 6: rev = 0x618; break;
case 7: rev = 0x70c; break;
}
diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c
index ae64d0b69729..cf372b90557e 100644
--- a/arch/x86/events/intel/lbr.c
+++ b/arch/x86/events/intel/lbr.c
@@ -1186,7 +1186,7 @@ void __init intel_pmu_lbr_init_atom(void)
* on PMU interrupt
*/
if (boot_cpu_data.x86_model == 28
- && boot_cpu_data.x86_mask < 10) {
+ && boot_cpu_data.x86_stepping < 10) {
pr_cont("LBR disabled due to erratum");
return;
}
diff --git a/arch/x86/events/intel/p6.c b/arch/x86/events/intel/p6.c
index a5604c352930..408879b0c0d4 100644
--- a/arch/x86/events/intel/p6.c
+++ b/arch/x86/events/intel/p6.c
@@ -234,7 +234,7 @@ static __initconst const struct x86_pmu p6_pmu = {
static __init void p6_pmu_rdpmc_quirk(void)
{
- if (boot_cpu_data.x86_mask < 9) {
+ if (boot_cpu_data.x86_stepping < 9) {
/*
* PPro erratum 26; fixed in stepping 9 and above.
*/
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
index 44f5d79d5105..11881726ed37 100644
--- a/arch/x86/include/asm/acpi.h
+++ b/arch/x86/include/asm/acpi.h
@@ -94,7 +94,7 @@ static inline unsigned int acpi_processor_cstate_check(unsigned int max_cstate)
if (boot_cpu_data.x86 == 0x0F &&
boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
boot_cpu_data.x86_model <= 0x05 &&
- boot_cpu_data.x86_mask < 0x0A)
+ boot_cpu_data.x86_stepping < 0x0A)
return 1;
else if (boot_cpu_has(X86_BUG_AMD_APIC_C1E))
return 1;
diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
index 30d406146016..e1259f043ae9 100644
--- a/arch/x86/include/asm/barrier.h
+++ b/arch/x86/include/asm/barrier.h
@@ -40,7 +40,7 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
asm ("cmp %1,%2; sbb %0,%0;"
:"=r" (mask)
- :"r"(size),"r" (index)
+ :"g"(size),"r" (index)
:"cc");
return mask;
}
diff --git a/arch/x86/include/asm/bug.h b/arch/x86/include/asm/bug.h
index 34d99af43994..6804d6642767 100644
--- a/arch/x86/include/asm/bug.h
+++ b/arch/x86/include/asm/bug.h
@@ -5,23 +5,20 @@
#include <linux/stringify.h>
/*
- * Since some emulators terminate on UD2, we cannot use it for WARN.
- * Since various instruction decoders disagree on the length of UD1,
- * we cannot use it either. So use UD0 for WARN.
+ * Despite that some emulators terminate on UD2, we use it for WARN().
*
- * (binutils knows about "ud1" but {en,de}codes it as 2 bytes, whereas
- * our kernel decoder thinks it takes a ModRM byte, which seems consistent
- * with various things like the Intel SDM instruction encoding rules)
+ * Since various instruction decoders/specs disagree on the encoding of
+ * UD0/UD1.
*/
-#define ASM_UD0 ".byte 0x0f, 0xff"
+#define ASM_UD0 ".byte 0x0f, 0xff" /* + ModRM (for Intel) */
#define ASM_UD1 ".byte 0x0f, 0xb9" /* + ModRM */
#define ASM_UD2 ".byte 0x0f, 0x0b"
#define INSN_UD0 0xff0f
#define INSN_UD2 0x0b0f
-#define LEN_UD0 2
+#define LEN_UD2 2
#ifdef CONFIG_GENERIC_BUG
@@ -77,7 +74,11 @@ do { \
unreachable(); \
} while (0)
-#define __WARN_FLAGS(flags) _BUG_FLAGS(ASM_UD0, BUGFLAG_WARNING|(flags))
+#define __WARN_FLAGS(flags) \
+do { \
+ _BUG_FLAGS(ASM_UD2, BUGFLAG_WARNING|(flags)); \
+ annotate_reachable(); \
+} while (0)
#include <asm-generic/bug.h>
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 70eddb3922ff..736771c9822e 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -148,45 +148,46 @@ extern void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int bit);
*/
static __always_inline __pure bool _static_cpu_has(u16 bit)
{
- asm_volatile_goto("1: jmp 6f\n"
- "2:\n"
- ".skip -(((5f-4f) - (2b-1b)) > 0) * "
- "((5f-4f) - (2b-1b)),0x90\n"
- "3:\n"
- ".section .altinstructions,\"a\"\n"
- " .long 1b - .\n" /* src offset */
- " .long 4f - .\n" /* repl offset */
- " .word %P1\n" /* always replace */
- " .byte 3b - 1b\n" /* src len */
- " .byte 5f - 4f\n" /* repl len */
- " .byte 3b - 2b\n" /* pad len */
- ".previous\n"
- ".section .altinstr_replacement,\"ax\"\n"
- "4: jmp %l[t_no]\n"
- "5:\n"
- ".previous\n"
- ".section .altinstructions,\"a\"\n"
- " .long 1b - .\n" /* src offset */
- " .long 0\n" /* no replacement */
- " .word %P0\n" /* feature bit */
- " .byte 3b - 1b\n" /* src len */
- " .byte 0\n" /* repl len */
- " .byte 0\n" /* pad len */
- ".previous\n"
- ".section .altinstr_aux,\"ax\"\n"
- "6:\n"
- " testb %[bitnum],%[cap_byte]\n"
- " jnz %l[t_yes]\n"
- " jmp %l[t_no]\n"
- ".previous\n"
- : : "i" (bit), "i" (X86_FEATURE_ALWAYS),
- [bitnum] "i" (1 << (bit & 7)),
- [cap_byte] "m" (((const char *)boot_cpu_data.x86_capability)[bit >> 3])
- : : t_yes, t_no);
- t_yes:
- return true;
- t_no:
- return false;
+ asm_volatile_goto("1: jmp 6f\n"
+ "2:\n"
+ ".skip -(((5f-4f) - (2b-1b)) > 0) * "
+ "((5f-4f) - (2b-1b)),0x90\n"
+ "3:\n"
+ ".section .altinstructions,\"a\"\n"
+ " .long 1b - .\n" /* src offset */
+ " .long 4f - .\n" /* repl offset */
+ " .word %P[always]\n" /* always replace */
+ " .byte 3b - 1b\n" /* src len */
+ " .byte 5f - 4f\n" /* repl len */
+ " .byte 3b - 2b\n" /* pad len */
+ ".previous\n"
+ ".section .altinstr_replacement,\"ax\"\n"
+ "4: jmp %l[t_no]\n"
+ "5:\n"
+ ".previous\n"
+ ".section .altinstructions,\"a\"\n"
+ " .long 1b - .\n" /* src offset */
+ " .long 0\n" /* no replacement */
+ " .word %P[feature]\n" /* feature bit */
+ " .byte 3b - 1b\n" /* src len */
+ " .byte 0\n" /* repl len */
+ " .byte 0\n" /* pad len */
+ ".previous\n"
+ ".section .altinstr_aux,\"ax\"\n"
+ "6:\n"
+ " testb %[bitnum],%[cap_byte]\n"
+ " jnz %l[t_yes]\n"
+ " jmp %l[t_no]\n"
+ ".previous\n"
+ : : [feature] "i" (bit),
+ [always] "i" (X86_FEATURE_ALWAYS),
+ [bitnum] "i" (1 << (bit & 7)),
+ [cap_byte] "m" (((const char *)boot_cpu_data.x86_capability)[bit >> 3])
+ : : t_yes, t_no);
+t_yes:
+ return true;
+t_no:
+ return false;
}
#define static_cpu_has(bit) \
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index 4d57894635f2..76b058533e47 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -6,6 +6,7 @@
#include <asm/alternative.h>
#include <asm/alternative-asm.h>
#include <asm/cpufeatures.h>
+#include <asm/msr-index.h>
#ifdef __ASSEMBLY__
@@ -164,10 +165,15 @@ static inline void vmexit_fill_RSB(void)
static inline void indirect_branch_prediction_barrier(void)
{
- alternative_input("",
- "call __ibp_barrier",
- X86_FEATURE_USE_IBPB,
- ASM_NO_INPUT_CLOBBER("eax", "ecx", "edx", "memory"));
+ asm volatile(ALTERNATIVE("",
+ "movl %[msr], %%ecx\n\t"
+ "movl %[val], %%eax\n\t"
+ "movl $0, %%edx\n\t"
+ "wrmsr",
+ X86_FEATURE_USE_IBPB)
+ : : [msr] "i" (MSR_IA32_PRED_CMD),
+ [val] "i" (PRED_CMD_IBPB)
+ : "eax", "ecx", "edx", "memory");
}
#endif /* __ASSEMBLY__ */
diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
index 4baa6bceb232..d652a3808065 100644
--- a/arch/x86/include/asm/page_64.h
+++ b/arch/x86/include/asm/page_64.h
@@ -52,10 +52,6 @@ static inline void clear_page(void *page)
void copy_page(void *to, void *from);
-#ifdef CONFIG_X86_MCE
-#define arch_unmap_kpfn arch_unmap_kpfn
-#endif
-
#endif /* !__ASSEMBLY__ */
#ifdef CONFIG_X86_VSYSCALL_EMULATION
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 892df375b615..554841fab717 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -297,9 +297,9 @@ static inline void __flush_tlb_global(void)
{
PVOP_VCALL0(pv_mmu_ops.flush_tlb_kernel);
}
-static inline void __flush_tlb_single(unsigned long addr)
+static inline void __flush_tlb_one_user(unsigned long addr)
{
- PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr);
+ PVOP_VCALL1(pv_mmu_ops.flush_tlb_one_user, addr);
}
static inline void flush_tlb_others(const struct cpumask *cpumask,
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index 6ec54d01972d..f624f1f10316 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -217,7 +217,7 @@ struct pv_mmu_ops {
/* TLB operations */
void (*flush_tlb_user)(void);
void (*flush_tlb_kernel)(void);
- void (*flush_tlb_single)(unsigned long addr);
+ void (*flush_tlb_one_user)(unsigned long addr);
void (*flush_tlb_others)(const struct cpumask *cpus,
const struct flush_tlb_info *info);
diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
index e67c0620aec2..e55466760ff8 100644
--- a/arch/x86/include/asm/pgtable_32.h
+++ b/arch/x86/include/asm/pgtable_32.h
@@ -61,7 +61,7 @@ void paging_init(void);
#define kpte_clear_flush(ptep, vaddr) \
do { \
pte_clear(&init_mm, (vaddr), (ptep)); \
- __flush_tlb_one((vaddr)); \
+ __flush_tlb_one_kernel((vaddr)); \
} while (0)
#endif /* !__ASSEMBLY__ */
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 793bae7e7ce3..1bd9ed87606f 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -91,7 +91,7 @@ struct cpuinfo_x86 {
__u8 x86; /* CPU family */
__u8 x86_vendor; /* CPU vendor */
__u8 x86_model;
- __u8 x86_mask;
+ __u8 x86_stepping;
#ifdef CONFIG_X86_64
/* Number of 4K pages in DTLB/ITLB combined(in pages): */
int x86_tlbsize;
@@ -109,7 +109,7 @@ struct cpuinfo_x86 {
char x86_vendor_id[16];
char x86_model_id[64];
/* in KB - valid for CPUS which support this call: */
- int x86_cache_size;
+ unsigned int x86_cache_size;
int x86_cache_alignment; /* In bytes */
/* Cache QoS architectural values: */
int x86_cache_max_rmid; /* max index */
@@ -977,7 +977,4 @@ bool xen_set_default_idle(void);
void stop_this_cpu(void *dummy);
void df_debug(struct pt_regs *regs, long error_code);
-
-void __ibp_barrier(void);
-
#endif /* _ASM_X86_PROCESSOR_H */
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index 461f53d27708..a4189762b266 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -129,6 +129,7 @@ static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask)
void cpu_disable_common(void);
void native_smp_prepare_boot_cpu(void);
void native_smp_prepare_cpus(unsigned int max_cpus);
+void calculate_max_logical_packages(void);
void native_smp_cpus_done(unsigned int max_cpus);
void common_cpu_up(unsigned int cpunum, struct task_struct *tidle);
int native_cpu_up(unsigned int cpunum, struct task_struct *tidle);
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 2b8f18ca5874..84137c22fdfa 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -140,7 +140,7 @@ static inline unsigned long build_cr3_noflush(pgd_t *pgd, u16 asid)
#else
#define __flush_tlb() __native_flush_tlb()
#define __flush_tlb_global() __native_flush_tlb_global()
-#define __flush_tlb_single(addr) __native_flush_tlb_single(addr)
+#define __flush_tlb_one_user(addr) __native_flush_tlb_one_user(addr)
#endif
static inline bool tlb_defer_switch_to_init_mm(void)
@@ -400,7 +400,7 @@ static inline void __native_flush_tlb_global(void)
/*
* flush one page in the user mapping
*/
-static inline void __native_flush_tlb_single(unsigned long addr)
+static inline void __native_flush_tlb_one_user(unsigned long addr)
{
u32 loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
@@ -437,18 +437,31 @@ static inline void __flush_tlb_all(void)
/*
* flush one page in the kernel mapping
*/
-static inline void __flush_tlb_one(unsigned long addr)
+static inline void __flush_tlb_one_kernel(unsigned long addr)
{
count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
- __flush_tlb_single(addr);
+
+ /*
+ * If PTI is off, then __flush_tlb_one_user() is just INVLPG or its
+ * paravirt equivalent. Even with PCID, this is sufficient: we only
+ * use PCID if we also use global PTEs for the kernel mapping, and
+ * INVLPG flushes global translations across all address spaces.
+ *
+ * If PTI is on, then the kernel is mapped with non-global PTEs, and
+ * __flush_tlb_one_user() will flush the given address for the current
+ * kernel address space and for its usermode counterpart, but it does
+ * not flush it for other address spaces.
+ */
+ __flush_tlb_one_user(addr);
if (!static_cpu_has(X86_FEATURE_PTI))
return;
/*
- * __flush_tlb_single() will have cleared the TLB entry for this ASID,
- * but since kernel space is replicated across all, we must also
- * invalidate all others.
+ * See above. We need to propagate the flush to all other address
+ * spaces. In principle, we only need to propagate it to kernelmode
+ * address spaces, but the extra bookkeeping we would need is not
+ * worth it.
*/
invalidate_other_asid();
}
diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
index 6db28f17ff28..c88e0b127810 100644
--- a/arch/x86/kernel/amd_nb.c
+++ b/arch/x86/kernel/amd_nb.c
@@ -235,7 +235,7 @@ int amd_cache_northbridges(void)
if (boot_cpu_data.x86 == 0x10 &&
boot_cpu_data.x86_model >= 0x8 &&
(boot_cpu_data.x86_model > 0x9 ||
- boot_cpu_data.x86_mask >= 0x1))
+ boot_cpu_data.x86_stepping >= 0x1))
amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
if (boot_cpu_data.x86 == 0x15)
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 25ddf02598d2..b203af0855b5 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -546,7 +546,7 @@ static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
static u32 hsx_deadline_rev(void)
{
- switch (boot_cpu_data.x86_mask) {
+ switch (boot_cpu_data.x86_stepping) {
case 0x02: return 0x3a; /* EP */
case 0x04: return 0x0f; /* EX */
}
@@ -556,7 +556,7 @@ static u32 hsx_deadline_rev(void)
static u32 bdx_deadline_rev(void)
{
- switch (boot_cpu_data.x86_mask) {
+ switch (boot_cpu_data.x86_stepping) {
case 0x02: return 0x00000011;
case 0x03: return 0x0700000e;
case 0x04: return 0x0f00000c;
@@ -568,7 +568,7 @@ static u32 bdx_deadline_rev(void)
static u32 skx_deadline_rev(void)
{
- switch (boot_cpu_data.x86_mask) {
+ switch (boot_cpu_data.x86_stepping) {
case 0x03: return 0x01000136;
case 0x04: return 0x02000014;
}
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index 46b675aaf20b..f11910b44638 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -1176,16 +1176,25 @@ static void __init decode_gam_rng_tbl(unsigned long ptr)
uv_gre_table = gre;
for (; gre->type != UV_GAM_RANGE_TYPE_UNUSED; gre++) {
+ unsigned long size = ((unsigned long)(gre->limit - lgre)
+ << UV_GAM_RANGE_SHFT);
+ int order = 0;
+ char suffix[] = " KMGTPE";
+
+ while (size > 9999 && order < sizeof(suffix)) {
+ size /= 1024;
+ order++;
+ }
+
if (!index) {
pr_info("UV: GAM Range Table...\n");
pr_info("UV: # %20s %14s %5s %4s %5s %3s %2s\n", "Range", "", "Size", "Type", "NASID", "SID", "PN");
}
- pr_info("UV: %2d: 0x%014lx-0x%014lx %5luG %3d %04x %02x %02x\n",
+ pr_info("UV: %2d: 0x%014lx-0x%014lx %5lu%c %3d %04x %02x %02x\n",
index++,
(unsigned long)lgre << UV_GAM_RANGE_SHFT,
(unsigned long)gre->limit << UV_GAM_RANGE_SHFT,
- ((unsigned long)(gre->limit - lgre)) >>
- (30 - UV_GAM_RANGE_SHFT), /* 64M -> 1G */
+ size, suffix[order],
gre->type, gre->nasid, gre->sockid, gre->pnode);
lgre = gre->limit;
diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c
index fa1261eefa16..f91ba53e06c8 100644
--- a/arch/x86/kernel/asm-offsets_32.c
+++ b/arch/x86/kernel/asm-offsets_32.c
@@ -18,7 +18,7 @@ void foo(void)
OFFSET(CPUINFO_x86, cpuinfo_x86, x86);
OFFSET(CPUINFO_x86_vendor, cpuinfo_x86, x86_vendor);
OFFSET(CPUINFO_x86_model, cpuinfo_x86, x86_model);
- OFFSET(CPUINFO_x86_mask, cpuinfo_x86, x86_mask);
+ OFFSET(CPUINFO_x86_stepping, cpuinfo_x86, x86_stepping);
OFFSET(CPUINFO_cpuid_level, cpuinfo_x86, cpuid_level);
OFFSET(CPUINFO_x86_capability, cpuinfo_x86, x86_capability);
OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 5bddbdcbc4a3..f0e6456ca7d3 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -119,7 +119,7 @@ static void init_amd_k6(struct cpuinfo_x86 *c)
return;
}
- if (c->x86_model == 6 && c->x86_mask == 1) {
+ if (c->x86_model == 6 && c->x86_stepping == 1) {
const int K6_BUG_LOOP = 1000000;
int n;
void (*f_vide)(void);
@@ -149,7 +149,7 @@ static void init_amd_k6(struct cpuinfo_x86 *c)
/* K6 with old style WHCR */
if (c->x86_model < 8 ||
- (c->x86_model == 8 && c->x86_mask < 8)) {
+ (c->x86_model == 8 && c->x86_stepping < 8)) {
/* We can only write allocate on the low 508Mb */
if (mbytes > 508)
mbytes = 508;
@@ -168,7 +168,7 @@ static void init_amd_k6(struct cpuinfo_x86 *c)
return;
}
- if ((c->x86_model == 8 && c->x86_mask > 7) ||
+ if ((c->x86_model == 8 && c->x86_stepping > 7) ||
c->x86_model == 9 || c->x86_model == 13) {
/* The more serious chips .. */
@@ -221,7 +221,7 @@ static void init_amd_k7(struct cpuinfo_x86 *c)
* are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
* As per AMD technical note 27212 0.2
*/
- if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) {
+ if ((c->x86_model == 8 && c->x86_stepping >= 1) || (c->x86_model > 8)) {
rdmsr(MSR_K7_CLK_CTL, l, h);
if ((l & 0xfff00000) != 0x20000000) {
pr_info("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n",
@@ -241,12 +241,12 @@ static void init_amd_k7(struct cpuinfo_x86 *c)
* but they are not certified as MP capable.
*/
/* Athlon 660/661 is valid. */
- if ((c->x86_model == 6) && ((c->x86_mask == 0) ||
- (c->x86_mask == 1)))
+ if ((c->x86_model == 6) && ((c->x86_stepping == 0) ||
+ (c->x86_stepping == 1)))
return;
/* Duron 670 is valid */
- if ((c->x86_model == 7) && (c->x86_mask == 0))
+ if ((c->x86_model == 7) && (c->x86_stepping == 0))
return;
/*
@@ -256,8 +256,8 @@ static void init_amd_k7(struct cpuinfo_x86 *c)
* See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
* more.
*/
- if (((c->x86_model == 6) && (c->x86_mask >= 2)) ||
- ((c->x86_model == 7) && (c->x86_mask >= 1)) ||
+ if (((c->x86_model == 6) && (c->x86_stepping >= 2)) ||
+ ((c->x86_model == 7) && (c->x86_stepping >= 1)) ||
(c->x86_model > 7))
if (cpu_has(c, X86_FEATURE_MP))
return;
@@ -628,7 +628,7 @@ static void early_init_amd(struct cpuinfo_x86 *c)
/* Set MTRR capability flag if appropriate */
if (c->x86 == 5)
if (c->x86_model == 13 || c->x86_model == 9 ||
- (c->x86_model == 8 && c->x86_mask >= 8))
+ (c->x86_model == 8 && c->x86_stepping >= 8))
set_cpu_cap(c, X86_FEATURE_K6_MTRR);
#endif
#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
@@ -795,7 +795,7 @@ static void init_amd_zn(struct cpuinfo_x86 *c)
* Fix erratum 1076: CPB feature bit not being set in CPUID. It affects
* all up to and including B1.
*/
- if (c->x86_model <= 1 && c->x86_mask <= 1)
+ if (c->x86_model <= 1 && c->x86_stepping <= 1)
set_cpu_cap(c, X86_FEATURE_CPB);
}
@@ -906,11 +906,11 @@ static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
/* AMD errata T13 (order #21922) */
if ((c->x86 == 6)) {
/* Duron Rev A0 */
- if (c->x86_model == 3 && c->x86_mask == 0)
+ if (c->x86_model == 3 && c->x86_stepping == 0)
size = 64;
/* Tbird rev A1/A2 */
if (c->x86_model == 4 &&
- (c->x86_mask == 0 || c->x86_mask == 1))
+ (c->x86_stepping == 0 || c->x86_stepping == 1))
size = 256;
}
return size;
@@ -1047,7 +1047,7 @@ static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
}
/* OSVW unavailable or ID unknown, match family-model-stepping range */
- ms = (cpu->x86_model << 4) | cpu->x86_mask;
+ ms = (cpu->x86_model << 4) | cpu->x86_stepping;
while ((range = *erratum++))
if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) &&
(ms >= AMD_MODEL_RANGE_START(range)) &&
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 71949bf2de5a..d71c8b54b696 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -162,8 +162,7 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
if (cmdline_find_option_bool(boot_command_line, "nospectre_v2"))
return SPECTRE_V2_CMD_NONE;
else {
- ret = cmdline_find_option(boot_command_line, "spectre_v2", arg,
- sizeof(arg));
+ ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg));
if (ret < 0)
return SPECTRE_V2_CMD_AUTO;
@@ -175,8 +174,7 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
}
if (i >= ARRAY_SIZE(mitigation_options)) {
- pr_err("unknown option (%s). Switching to AUTO select\n",
- mitigation_options[i].option);
+ pr_err("unknown option (%s). Switching to AUTO select\n", arg);
return SPECTRE_V2_CMD_AUTO;
}
}
@@ -185,8 +183,7 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
cmd == SPECTRE_V2_CMD_RETPOLINE_AMD ||
cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC) &&
!IS_ENABLED(CONFIG_RETPOLINE)) {
- pr_err("%s selected but not compiled in. Switching to AUTO select\n",
- mitigation_options[i].option);
+ pr_err("%s selected but not compiled in. Switching to AUTO select\n", mitigation_options[i].option);
return SPECTRE_V2_CMD_AUTO;
}
@@ -256,14 +253,14 @@ static void __init spectre_v2_select_mitigation(void)
goto retpoline_auto;
break;
}
- pr_err("kernel not compiled with retpoline; no mitigation available!");
+ pr_err("Spectre mitigation: kernel not compiled with retpoline; no mitigation available!");
return;
retpoline_auto:
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
retpoline_amd:
if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
- pr_err("LFENCE not serializing. Switching to generic retpoline\n");
+ pr_err("Spectre mitigation: LFENCE not serializing, switching to generic retpoline\n");
goto retpoline_generic;
}
mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_AMD :
@@ -281,7 +278,7 @@ retpoline_auto:
pr_info("%s\n", spectre_v2_strings[mode]);
/*
- * If neither SMEP or KPTI are available, there is a risk of
+ * If neither SMEP nor PTI are available, there is a risk of
* hitting userspace addresses in the RSB after a context switch
* from a shallow call stack to a deeper one. To prevent this fill
* the entire RSB, even when using IBRS.
@@ -295,21 +292,20 @@ retpoline_auto:
if ((!boot_cpu_has(X86_FEATURE_PTI) &&
!boot_cpu_has(X86_FEATURE_SMEP)) || is_skylake_era()) {
setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
- pr_info("Filling RSB on context switch\n");
+ pr_info("Spectre v2 mitigation: Filling RSB on context switch\n");
}
/* Initialize Indirect Branch Prediction Barrier if supported */
if (boot_cpu_has(X86_FEATURE_IBPB)) {
setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
- pr_info("Enabling Indirect Branch Prediction Barrier\n");
+ pr_info("Spectre v2 mitigation: Enabling Indirect Branch Prediction Barrier\n");
}
}
#undef pr_fmt
#ifdef CONFIG_SYSFS
-ssize_t cpu_show_meltdown(struct device *dev,
- struct device_attribute *attr, char *buf)
+ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
{
if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
return sprintf(buf, "Not affected\n");
@@ -318,16 +314,14 @@ ssize_t cpu_show_meltdown(struct device *dev,
return sprintf(buf, "Vulnerable\n");
}
-ssize_t cpu_show_spectre_v1(struct device *dev,
- struct device_attribute *attr, char *buf)
+ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
{
if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1))
return sprintf(buf, "Not affected\n");
return sprintf(buf, "Mitigation: __user pointer sanitization\n");
}
-ssize_t cpu_show_spectre_v2(struct device *dev,
- struct device_attribute *attr, char *buf)
+ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
{
if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
return sprintf(buf, "Not affected\n");
@@ -337,9 +331,3 @@ ssize_t cpu_show_spectre_v2(struct device *dev,
spectre_v2_module_string());
}
#endif
-
-void __ibp_barrier(void)
-{
- __wrmsr(MSR_IA32_PRED_CMD, PRED_CMD_IBPB, 0);
-}
-EXPORT_SYMBOL_GPL(__ibp_barrier);
diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c
index c578cd29c2d2..e5ec0f11c0de 100644
--- a/arch/x86/kernel/cpu/centaur.c
+++ b/arch/x86/kernel/cpu/centaur.c
@@ -140,7 +140,7 @@ static void init_centaur(struct cpuinfo_x86 *c)
clear_cpu_cap(c, X86_FEATURE_TSC);
break;
case 8:
- switch (c->x86_mask) {
+ switch (c->x86_stepping) {
default:
name = "2";
break;
@@ -215,7 +215,7 @@ centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size)
* - Note, it seems this may only be in engineering samples.
*/
if ((c->x86 == 6) && (c->x86_model == 9) &&
- (c->x86_mask == 1) && (size == 65))
+ (c->x86_stepping == 1) && (size == 65))
size -= 1;
return size;
}
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index d63f4b5706e4..824aee0117bb 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -731,7 +731,7 @@ void cpu_detect(struct cpuinfo_x86 *c)
cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
c->x86 = x86_family(tfms);
c->x86_model = x86_model(tfms);
- c->x86_mask = x86_stepping(tfms);
+ c->x86_stepping = x86_stepping(tfms);
if (cap0 & (1<<19)) {
c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
@@ -1184,9 +1184,9 @@ static void identify_cpu(struct cpuinfo_x86 *c)
int i;
c->loops_per_jiffy = loops_per_jiffy;
- c->x86_cache_size = -1;
+ c->x86_cache_size = 0;
c->x86_vendor = X86_VENDOR_UNKNOWN;
- c->x86_model = c->x86_mask = 0; /* So far unknown... */
+ c->x86_model = c->x86_stepping = 0; /* So far unknown... */
c->x86_vendor_id[0] = '\0'; /* Unset */
c->x86_model_id[0] = '\0'; /* Unset */
c->x86_max_cores = 1;
@@ -1378,8 +1378,8 @@ void print_cpu_info(struct cpuinfo_x86 *c)
pr_cont(" (family: 0x%x, model: 0x%x", c->x86, c->x86_model);
- if (c->x86_mask || c->cpuid_level >= 0)
- pr_cont(", stepping: 0x%x)\n", c->x86_mask);
+ if (c->x86_stepping || c->cpuid_level >= 0)
+ pr_cont(", stepping: 0x%x)\n", c->x86_stepping);
else
pr_cont(")\n");
}
diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c
index 6b4bb335641f..8949b7ae6d92 100644
--- a/arch/x86/kernel/cpu/cyrix.c
+++ b/arch/x86/kernel/cpu/cyrix.c
@@ -215,7 +215,7 @@ static void init_cyrix(struct cpuinfo_x86 *c)
/* common case step number/rev -- exceptions handled below */
c->x86_model = (dir1 >> 4) + 1;
- c->x86_mask = dir1 & 0xf;
+ c->x86_stepping = dir1 & 0xf;
/* Now cook; the original recipe is by Channing Corn, from Cyrix.
* We do the same thing for each generation: we work out
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 319bf989fad1..d19e903214b4 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -116,14 +116,13 @@ struct sku_microcode {
u32 microcode;
};
static const struct sku_microcode spectre_bad_microcodes[] = {
- { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0B, 0x84 },
- { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0A, 0x84 },
- { INTEL_FAM6_KABYLAKE_DESKTOP, 0x09, 0x84 },
- { INTEL_FAM6_KABYLAKE_MOBILE, 0x0A, 0x84 },
- { INTEL_FAM6_KABYLAKE_MOBILE, 0x09, 0x84 },
+ { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0B, 0x80 },
+ { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0A, 0x80 },
+ { INTEL_FAM6_KABYLAKE_DESKTOP, 0x09, 0x80 },
+ { INTEL_FAM6_KABYLAKE_MOBILE, 0x0A, 0x80 },
+ { INTEL_FAM6_KABYLAKE_MOBILE, 0x09, 0x80 },
{ INTEL_FAM6_SKYLAKE_X, 0x03, 0x0100013e },
{ INTEL_FAM6_SKYLAKE_X, 0x04, 0x0200003c },
- { INTEL_FAM6_SKYLAKE_MOBILE, 0x03, 0xc2 },
{ INTEL_FAM6_SKYLAKE_DESKTOP, 0x03, 0xc2 },
{ INTEL_FAM6_BROADWELL_CORE, 0x04, 0x28 },
{ INTEL_FAM6_BROADWELL_GT3E, 0x01, 0x1b },
@@ -136,8 +135,6 @@ static const struct sku_microcode spectre_bad_microcodes[] = {
{ INTEL_FAM6_HASWELL_X, 0x02, 0x3b },
{ INTEL_FAM6_HASWELL_X, 0x04, 0x10 },
{ INTEL_FAM6_IVYBRIDGE_X, 0x04, 0x42a },
- /* Updated in the 20180108 release; blacklist until we know otherwise */
- { INTEL_FAM6_ATOM_GEMINI_LAKE, 0x01, 0x22 },
/* Observed in the wild */
{ INTEL_FAM6_SANDYBRIDGE_X, 0x06, 0x61b },
{ INTEL_FAM6_SANDYBRIDGE_X, 0x07, 0x712 },
@@ -149,7 +146,7 @@ static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) {
if (c->x86_model == spectre_bad_microcodes[i].model &&
- c->x86_mask == spectre_bad_microcodes[i].stepping)
+ c->x86_stepping == spectre_bad_microcodes[i].stepping)
return (c->microcode <= spectre_bad_microcodes[i].microcode);
}
return false;
@@ -196,7 +193,7 @@ static void early_init_intel(struct cpuinfo_x86 *c)
* need the microcode to have already been loaded... so if it is
* not, recommend a BIOS update and disable large pages.
*/
- if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_mask <= 2 &&
+ if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_stepping <= 2 &&
c->microcode < 0x20e) {
pr_warn("Atom PSE erratum detected, BIOS microcode update recommended\n");
clear_cpu_cap(c, X86_FEATURE_PSE);
@@ -212,7 +209,7 @@ static void early_init_intel(struct cpuinfo_x86 *c)
/* CPUID workaround for 0F33/0F34 CPU */
if (c->x86 == 0xF && c->x86_model == 0x3
- && (c->x86_mask == 0x3 || c->x86_mask == 0x4))
+ && (c->x86_stepping == 0x3 || c->x86_stepping == 0x4))
c->x86_phys_bits = 36;
/*
@@ -310,7 +307,7 @@ int ppro_with_ram_bug(void)
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
boot_cpu_data.x86 == 6 &&
boot_cpu_data.x86_model == 1 &&
- boot_cpu_data.x86_mask < 8) {
+ boot_cpu_data.x86_stepping < 8) {
pr_info("Pentium Pro with Errata#50 detected. Taking evasive action.\n");
return 1;
}
@@ -327,7 +324,7 @@ static void intel_smp_check(struct cpuinfo_x86 *c)
* Mask B, Pentium, but not Pentium MMX
*/
if (c->x86 == 5 &&
- c->x86_mask >= 1 && c->x86_mask <= 4 &&
+ c->x86_stepping >= 1 && c->x86_stepping <= 4 &&
c->x86_model <= 3) {
/*
* Remember we have B step Pentia with bugs
@@ -370,7 +367,7 @@ static void intel_workarounds(struct cpuinfo_x86 *c)
* SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until
* model 3 mask 3
*/
- if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633)
+ if ((c->x86<<8 | c->x86_model<<4 | c->x86_stepping) < 0x633)
clear_cpu_cap(c, X86_FEATURE_SEP);
/*
@@ -388,7 +385,7 @@ static void intel_workarounds(struct cpuinfo_x86 *c)
* P4 Xeon erratum 037 workaround.
* Hardware prefetcher may cause stale data to be loaded into the cache.
*/
- if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) {
+ if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_stepping == 1)) {
if (msr_set_bit(MSR_IA32_MISC_ENABLE,
MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT) > 0) {
pr_info("CPU: C0 stepping P4 Xeon detected.\n");
@@ -403,7 +400,7 @@ static void intel_workarounds(struct cpuinfo_x86 *c)
* Specification Update").
*/
if (boot_cpu_has(X86_FEATURE_APIC) && (c->x86<<8 | c->x86_model<<4) == 0x520 &&
- (c->x86_mask < 0x6 || c->x86_mask == 0xb))
+ (c->x86_stepping < 0x6 || c->x86_stepping == 0xb))
set_cpu_bug(c, X86_BUG_11AP);
@@ -650,7 +647,7 @@ static void init_intel(struct cpuinfo_x86 *c)
case 6:
if (l2 == 128)
p = "Celeron (Mendocino)";
- else if (c->x86_mask == 0 || c->x86_mask == 5)
+ else if (c->x86_stepping == 0 || c->x86_stepping == 5)
p = "Celeron-A";
break;
diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/intel_rdt.c
index 410629f10ad3..589b948e6e01 100644
--- a/arch/x86/kernel/cpu/intel_rdt.c
+++ b/arch/x86/kernel/cpu/intel_rdt.c
@@ -819,7 +819,7 @@ static __init void rdt_quirks(void)
cache_alloc_hsw_probe();
break;
case INTEL_FAM6_SKYLAKE_X:
- if (boot_cpu_data.x86_mask <= 4)
+ if (boot_cpu_data.x86_stepping <= 4)
set_rdt_options("!cmt,!mbmtotal,!mbmlocal,!l3cat");
}
}
diff --git a/arch/x86/kernel/cpu/mcheck/mce-internal.h b/arch/x86/kernel/cpu/mcheck/mce-internal.h
index aa0d5df9dc60..e956eb267061 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-internal.h
+++ b/arch/x86/kernel/cpu/mcheck/mce-internal.h
@@ -115,4 +115,19 @@ static inline void mce_unregister_injector_chain(struct notifier_block *nb) { }
extern struct mca_config mca_cfg;
+#ifndef CONFIG_X86_64
+/*
+ * On 32-bit systems it would be difficult to safely unmap a poison page
+ * from the kernel 1:1 map because there are no non-canonical addresses that
+ * we can use to refer to the address without risking a speculative access.
+ * However, this isn't much of an issue because:
+ * 1) Few unmappable pages are in the 1:1 map. Most are in HIGHMEM which
+ * are only mapped into the kernel as needed
+ * 2) Few people would run a 32-bit kernel on a machine that supports
+ * recoverable errors because they have too much memory to boot 32-bit.
+ */
+static inline void mce_unmap_kpfn(unsigned long pfn) {}
+#define mce_unmap_kpfn mce_unmap_kpfn
+#endif
+
#endif /* __X86_MCE_INTERNAL_H__ */
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 3a8e88a611eb..8ff94d1e2dce 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -105,6 +105,10 @@ static struct irq_work mce_irq_work;
static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs);
+#ifndef mce_unmap_kpfn
+static void mce_unmap_kpfn(unsigned long pfn);
+#endif
+
/*
* CPU/chipset specific EDAC code can register a notifier call here to print
* MCE errors in a human-readable form.
@@ -234,7 +238,7 @@ static void __print_mce(struct mce *m)
m->cs, m->ip);
if (m->cs == __KERNEL_CS)
- pr_cont("{%pS}", (void *)m->ip);
+ pr_cont("{%pS}", (void *)(unsigned long)m->ip);
pr_cont("\n");
}
@@ -590,7 +594,8 @@ static int srao_decode_notifier(struct notifier_block *nb, unsigned long val,
if (mce_usable_address(mce) && (mce->severity == MCE_AO_SEVERITY)) {
pfn = mce->addr >> PAGE_SHIFT;
- memory_failure(pfn, 0);
+ if (!memory_failure(pfn, 0))
+ mce_unmap_kpfn(pfn);
}
return NOTIFY_OK;
@@ -1057,12 +1062,13 @@ static int do_memory_failure(struct mce *m)
ret = memory_failure(m->addr >> PAGE_SHIFT, flags);
if (ret)
pr_err("Memory error not recovered");
+ else
+ mce_unmap_kpfn(m->addr >> PAGE_SHIFT);
return ret;
}
-#if defined(arch_unmap_kpfn) && defined(CONFIG_MEMORY_FAILURE)
-
-void arch_unmap_kpfn(unsigned long pfn)
+#ifndef mce_unmap_kpfn
+static void mce_unmap_kpfn(unsigned long pfn)
{
unsigned long decoy_addr;
@@ -1073,7 +1079,7 @@ void arch_unmap_kpfn(unsigned long pfn)
* We would like to just call:
* set_memory_np((unsigned long)pfn_to_kaddr(pfn), 1);
* but doing that would radically increase the odds of a
- * speculative access to the posion page because we'd have
+ * speculative access to the poison page because we'd have
* the virtual address of the kernel 1:1 mapping sitting
* around in registers.
* Instead we get tricky. We create a non-canonical address
@@ -1098,7 +1104,6 @@ void arch_unmap_kpfn(unsigned long pfn)
if (set_memory_np(decoy_addr, 1))
pr_warn("Could not invalidate pfn=0x%lx from 1:1 map\n", pfn);
-
}
#endif
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index f7c55b0e753a..a15db2b4e0d6 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -921,7 +921,7 @@ static bool is_blacklisted(unsigned int cpu)
*/
if (c->x86 == 6 &&
c->x86_model == INTEL_FAM6_BROADWELL_X &&
- c->x86_mask == 0x01 &&
+ c->x86_stepping == 0x01 &&
llc_size_per_core > 2621440 &&
c->microcode < 0x0b000021) {
pr_err_once("Erratum BDF90: late loading with revision < 0x0b000021 (0x%x) disabled.\n", c->microcode);
@@ -944,7 +944,7 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
return UCODE_NFOUND;
sprintf(name, "intel-ucode/%02x-%02x-%02x",
- c->x86, c->x86_model, c->x86_mask);
+ c->x86, c->x86_model, c->x86_stepping);
if (request_firmware_direct(&firmware, name, device)) {
pr_debug("data file %s load failed\n", name);
@@ -982,7 +982,7 @@ static struct microcode_ops microcode_intel_ops = {
static int __init calc_llc_size_per_core(struct cpuinfo_x86 *c)
{
- u64 llc_size = c->x86_cache_size * 1024;
+ u64 llc_size = c->x86_cache_size * 1024ULL;
do_div(llc_size, c->x86_max_cores);
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index fdc55215d44d..e12ee86906c6 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -859,7 +859,7 @@ int generic_validate_add_page(unsigned long base, unsigned long size,
*/
if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 &&
boot_cpu_data.x86_model == 1 &&
- boot_cpu_data.x86_mask <= 7) {
+ boot_cpu_data.x86_stepping <= 7) {
if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) {
pr_warn("mtrr: base(0x%lx000) is not 4 MiB aligned\n", base);
return -EINVAL;
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
index 40d5a8a75212..7468de429087 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -711,8 +711,8 @@ void __init mtrr_bp_init(void)
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
boot_cpu_data.x86 == 0xF &&
boot_cpu_data.x86_model == 0x3 &&
- (boot_cpu_data.x86_mask == 0x3 ||
- boot_cpu_data.x86_mask == 0x4))
+ (boot_cpu_data.x86_stepping == 0x3 ||
+ boot_cpu_data.x86_stepping == 0x4))
phys_addr = 36;
size_or_mask = SIZE_OR_MASK_BITS(phys_addr);
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c
index e7ecedafa1c8..2c8522a39ed5 100644
--- a/arch/x86/kernel/cpu/proc.c
+++ b/arch/x86/kernel/cpu/proc.c
@@ -72,8 +72,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
c->x86_model,
c->x86_model_id[0] ? c->x86_model_id : "unknown");
- if (c->x86_mask || c->cpuid_level >= 0)
- seq_printf(m, "stepping\t: %d\n", c->x86_mask);
+ if (c->x86_stepping || c->cpuid_level >= 0)
+ seq_printf(m, "stepping\t: %d\n", c->x86_stepping);
else
seq_puts(m, "stepping\t: unknown\n");
if (c->microcode)
@@ -91,8 +91,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
}
/* Cache size */
- if (c->x86_cache_size >= 0)
- seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
+ if (c->x86_cache_size)
+ seq_printf(m, "cache size\t: %u KB\n", c->x86_cache_size);
show_cpuinfo_core(m, c, cpu);
show_cpuinfo_misc(m, c);
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index c29020907886..b59e4fb40fd9 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -37,7 +37,7 @@
#define X86 new_cpu_data+CPUINFO_x86
#define X86_VENDOR new_cpu_data+CPUINFO_x86_vendor
#define X86_MODEL new_cpu_data+CPUINFO_x86_model
-#define X86_MASK new_cpu_data+CPUINFO_x86_mask
+#define X86_STEPPING new_cpu_data+CPUINFO_x86_stepping
#define X86_HARD_MATH new_cpu_data+CPUINFO_hard_math
#define X86_CPUID new_cpu_data+CPUINFO_cpuid_level
#define X86_CAPABILITY new_cpu_data+CPUINFO_x86_capability
@@ -332,7 +332,7 @@ ENTRY(startup_32_smp)
shrb $4,%al
movb %al,X86_MODEL
andb $0x0f,%cl # mask mask revision
- movb %cl,X86_MASK
+ movb %cl,X86_STEPPING
movl %edx,X86_CAPABILITY
.Lis486:
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index 27d0a1712663..f1c5eb99d445 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -410,7 +410,7 @@ static inline void __init construct_default_ISA_mptable(int mpc_default_type)
processor.apicver = mpc_default_type > 4 ? 0x10 : 0x01;
processor.cpuflag = CPU_ENABLED;
processor.cpufeature = (boot_cpu_data.x86 << 8) |
- (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask;
+ (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_stepping;
processor.featureflag = boot_cpu_data.x86_capability[CPUID_1_EDX];
processor.reserved[0] = 0;
processor.reserved[1] = 0;
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 041096bdef86..99dc79e76bdc 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -200,9 +200,9 @@ static void native_flush_tlb_global(void)
__native_flush_tlb_global();
}
-static void native_flush_tlb_single(unsigned long addr)
+static void native_flush_tlb_one_user(unsigned long addr)
{
- __native_flush_tlb_single(addr);
+ __native_flush_tlb_one_user(addr);
}
struct static_key paravirt_steal_enabled;
@@ -401,7 +401,7 @@ struct pv_mmu_ops pv_mmu_ops __ro_after_init = {
.flush_tlb_user = native_flush_tlb,
.flush_tlb_kernel = native_flush_tlb_global,
- .flush_tlb_single = native_flush_tlb_single,
+ .flush_tlb_one_user = native_flush_tlb_one_user,
.flush_tlb_others = native_flush_tlb_others,
.pgd_alloc = __paravirt_pgd_alloc,
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 6f27facbaa9b..9eee25d07586 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1281,11 +1281,10 @@ void __init native_smp_prepare_boot_cpu(void)
cpu_set_state_online(me);
}
-void __init native_smp_cpus_done(unsigned int max_cpus)
+void __init calculate_max_logical_packages(void)
{
int ncpus;
- pr_debug("Boot done\n");
/*
* Today neither Intel nor AMD support heterogenous systems so
* extrapolate the boot cpu's data to all packages.
@@ -1293,6 +1292,13 @@ void __init native_smp_cpus_done(unsigned int max_cpus)
ncpus = cpu_data(0).booted_cores * topology_max_smt_threads();
__max_logical_packages = DIV_ROUND_UP(nr_cpu_ids, ncpus);
pr_info("Max logical packages: %u\n", __max_logical_packages);
+}
+
+void __init native_smp_cpus_done(unsigned int max_cpus)
+{
+ pr_debug("Boot done\n");
+
+ calculate_max_logical_packages();
if (x86_has_numa_in_package)
set_sched_topology(x86_numa_in_package_topology);
@@ -1430,7 +1436,6 @@ static void remove_siblinginfo(int cpu)
cpumask_clear(cpu_llc_shared_mask(cpu));
cpumask_clear(topology_sibling_cpumask(cpu));
cpumask_clear(topology_core_cpumask(cpu));
- c->phys_proc_id = 0;
c->cpu_core_id = 0;
cpumask_clear_cpu(cpu, cpu_sibling_setup_mask);
recompute_smt_state();
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 446c9ef8cfc3..3d9b2308e7fa 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -181,7 +181,7 @@ int fixup_bug(struct pt_regs *regs, int trapnr)
break;
case BUG_TRAP_TYPE_WARN:
- regs->ip += LEN_UD0;
+ regs->ip += LEN_UD2;
return 1;
}
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 8eca1d04aeb8..46ff304140c7 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -5080,7 +5080,7 @@ void kvm_mmu_uninit_vm(struct kvm *kvm)
typedef bool (*slot_level_handler) (struct kvm *kvm, struct kvm_rmap_head *rmap_head);
/* The caller should hold mmu-lock before calling this function. */
-static bool
+static __always_inline bool
slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot,
slot_level_handler fn, int start_level, int end_level,
gfn_t start_gfn, gfn_t end_gfn, bool lock_flush_tlb)
@@ -5110,7 +5110,7 @@ slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot,
return flush;
}
-static bool
+static __always_inline bool
slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
slot_level_handler fn, int start_level, int end_level,
bool lock_flush_tlb)
@@ -5121,7 +5121,7 @@ slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
lock_flush_tlb);
}
-static bool
+static __always_inline bool
slot_handle_all_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
slot_level_handler fn, bool lock_flush_tlb)
{
@@ -5129,7 +5129,7 @@ slot_handle_all_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb);
}
-static bool
+static __always_inline bool
slot_handle_large_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
slot_level_handler fn, bool lock_flush_tlb)
{
@@ -5137,7 +5137,7 @@ slot_handle_large_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb);
}
-static bool
+static __always_inline bool
slot_handle_leaf(struct kvm *kvm, struct kvm_memory_slot *memslot,
slot_level_handler fn, bool lock_flush_tlb)
{
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index f427723dc7db..3dec126aa302 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -10136,7 +10136,10 @@ static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu,
(unsigned long)(vmcs12->posted_intr_desc_addr &
(PAGE_SIZE - 1)));
}
- if (!nested_vmx_prepare_msr_bitmap(vcpu, vmcs12))
+ if (nested_vmx_prepare_msr_bitmap(vcpu, vmcs12))
+ vmcs_set_bits(CPU_BASED_VM_EXEC_CONTROL,
+ CPU_BASED_USE_MSR_BITMAPS);
+ else
vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL,
CPU_BASED_USE_MSR_BITMAPS);
}
@@ -10224,8 +10227,8 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
* updated to reflect this when L1 (or its L2s) actually write to
* the MSR.
*/
- bool pred_cmd = msr_write_intercepted_l01(vcpu, MSR_IA32_PRED_CMD);
- bool spec_ctrl = msr_write_intercepted_l01(vcpu, MSR_IA32_SPEC_CTRL);
+ bool pred_cmd = !msr_write_intercepted_l01(vcpu, MSR_IA32_PRED_CMD);
+ bool spec_ctrl = !msr_write_intercepted_l01(vcpu, MSR_IA32_SPEC_CTRL);
/* Nothing to do if the MSR bitmap is not in use. */
if (!cpu_has_vmx_msr_bitmap() ||
diff --git a/arch/x86/lib/cpu.c b/arch/x86/lib/cpu.c
index d6f848d1211d..2dd1fe13a37b 100644
--- a/arch/x86/lib/cpu.c
+++ b/arch/x86/lib/cpu.c
@@ -18,7 +18,7 @@ unsigned int x86_model(unsigned int sig)
{
unsigned int fam, model;
- fam = x86_family(sig);
+ fam = x86_family(sig);
model = (sig >> 4) & 0xf;
diff --git a/arch/x86/lib/error-inject.c b/arch/x86/lib/error-inject.c
index 7b881d03d0dd..3cdf06128d13 100644
--- a/arch/x86/lib/error-inject.c
+++ b/arch/x86/lib/error-inject.c
@@ -7,6 +7,7 @@ asmlinkage void just_return_func(void);
asm(
".type just_return_func, @function\n"
+ ".globl just_return_func\n"
"just_return_func:\n"
" ret\n"
".size just_return_func, .-just_return_func\n"
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 1ab42c852069..8b72923f1d35 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -256,7 +256,7 @@ static void __set_pte_vaddr(pud_t *pud, unsigned long vaddr, pte_t new_pte)
* It's enough to flush this one mapping.
* (PGE mappings get flushed as well)
*/
- __flush_tlb_one(vaddr);
+ __flush_tlb_one_kernel(vaddr);
}
void set_pte_vaddr_p4d(p4d_t *p4d_page, unsigned long vaddr, pte_t new_pte)
@@ -1193,8 +1193,8 @@ void __init mem_init(void)
register_page_bootmem_info();
/* Register memory areas for /proc/kcore */
- kclist_add(&kcore_vsyscall, (void *)VSYSCALL_ADDR,
- PAGE_SIZE, KCORE_OTHER);
+ if (get_gate_vma(&init_mm))
+ kclist_add(&kcore_vsyscall, (void *)VSYSCALL_ADDR, PAGE_SIZE, KCORE_USER);
mem_init_print_info(NULL);
}
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index c45b6ec5357b..e2db83bebc3b 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -820,5 +820,5 @@ void __init __early_set_fixmap(enum fixed_addresses idx,
set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
else
pte_clear(&init_mm, addr, pte);
- __flush_tlb_one(addr);
+ __flush_tlb_one_kernel(addr);
}
diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c
index 58477ec3d66d..7c8686709636 100644
--- a/arch/x86/mm/kmmio.c
+++ b/arch/x86/mm/kmmio.c
@@ -168,7 +168,7 @@ static int clear_page_presence(struct kmmio_fault_page *f, bool clear)
return -1;
}
- __flush_tlb_one(f->addr);
+ __flush_tlb_one_kernel(f->addr);
return 0;
}
diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
index c3c5274410a9..9bb7f0ab9fe6 100644
--- a/arch/x86/mm/pgtable_32.c
+++ b/arch/x86/mm/pgtable_32.c
@@ -63,7 +63,7 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
* It's enough to flush this one mapping.
* (PGE mappings get flushed as well)
*/
- __flush_tlb_one(vaddr);
+ __flush_tlb_one_kernel(vaddr);
}
unsigned long __FIXADDR_TOP = 0xfffff000;
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 8dcc0607f805..7f1a51399674 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -498,7 +498,7 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f,
* flush that changes context.tlb_gen from 2 to 3. If they get
* processed on this CPU in reverse order, we'll see
* local_tlb_gen == 1, mm_tlb_gen == 3, and end != TLB_FLUSH_ALL.
- * If we were to use __flush_tlb_single() and set local_tlb_gen to
+ * If we were to use __flush_tlb_one_user() and set local_tlb_gen to
* 3, we'd be break the invariant: we'd update local_tlb_gen above
* 1 without the full flush that's needed for tlb_gen 2.
*
@@ -519,7 +519,7 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f,
addr = f->start;
while (addr < f->end) {
- __flush_tlb_single(addr);
+ __flush_tlb_one_user(addr);
addr += PAGE_SIZE;
}
if (local)
@@ -666,7 +666,7 @@ static void do_kernel_range_flush(void *info)
/* flush range by one by one 'invlpg' */
for (addr = f->start; addr < f->end; addr += PAGE_SIZE)
- __flush_tlb_one(addr);
+ __flush_tlb_one_kernel(addr);
}
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
index c2e9285d1bf1..db77e087adaf 100644
--- a/arch/x86/platform/uv/tlb_uv.c
+++ b/arch/x86/platform/uv/tlb_uv.c
@@ -299,7 +299,7 @@ static void bau_process_message(struct msg_desc *mdp, struct bau_control *bcp,
local_flush_tlb();
stat->d_alltlb++;
} else {
- __flush_tlb_single(msg->address);
+ __flush_tlb_one_user(msg->address);
stat->d_onetlb++;
}
stat->d_requestee++;
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
index d85076223a69..aae88fec9941 100644
--- a/arch/x86/xen/mmu_pv.c
+++ b/arch/x86/xen/mmu_pv.c
@@ -1300,12 +1300,12 @@ static void xen_flush_tlb(void)
preempt_enable();
}
-static void xen_flush_tlb_single(unsigned long addr)
+static void xen_flush_tlb_one_user(unsigned long addr)
{
struct mmuext_op *op;
struct multicall_space mcs;
- trace_xen_mmu_flush_tlb_single(addr);
+ trace_xen_mmu_flush_tlb_one_user(addr);
preempt_disable();
@@ -2370,7 +2370,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
.flush_tlb_user = xen_flush_tlb,
.flush_tlb_kernel = xen_flush_tlb,
- .flush_tlb_single = xen_flush_tlb_single,
+ .flush_tlb_one_user = xen_flush_tlb_one_user,
.flush_tlb_others = xen_flush_tlb_others,
.pgd_alloc = xen_pgd_alloc,
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 77c959cf81e7..7a43b2ae19f1 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -122,6 +122,8 @@ void __init xen_smp_cpus_done(unsigned int max_cpus)
if (xen_hvm_domain())
native_smp_cpus_done(max_cpus);
+ else
+ calculate_max_logical_packages();
if (xen_have_vcpu_info_placement)
return;
diff --git a/block/blk-mq.c b/block/blk-mq.c
index df93102e2149..357492712b0e 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -3164,6 +3164,7 @@ static bool __blk_mq_poll(struct blk_mq_hw_ctx *hctx, struct request *rq)
cpu_relax();
}
+ __set_current_state(TASK_RUNNING);
return false;
}
diff --git a/crypto/sha3_generic.c b/crypto/sha3_generic.c
index a965b9d80559..ded148783303 100644
--- a/crypto/sha3_generic.c
+++ b/crypto/sha3_generic.c
@@ -20,6 +20,20 @@
#include <crypto/sha3.h>
#include <asm/unaligned.h>
+/*
+ * On some 32-bit architectures (mn10300 and h8300), GCC ends up using
+ * over 1 KB of stack if we inline the round calculation into the loop
+ * in keccakf(). On the other hand, on 64-bit architectures with plenty
+ * of [64-bit wide] general purpose registers, not inlining it severely
+ * hurts performance. So let's use 64-bitness as a heuristic to decide
+ * whether to inline or not.
+ */
+#ifdef CONFIG_64BIT
+#define SHA3_INLINE inline
+#else
+#define SHA3_INLINE noinline
+#endif
+
#define KECCAK_ROUNDS 24
static const u64 keccakf_rndc[24] = {
@@ -35,111 +49,115 @@ static const u64 keccakf_rndc[24] = {
/* update the state with given number of rounds */
-static void __attribute__((__optimize__("O3"))) keccakf(u64 st[25])
+static SHA3_INLINE void keccakf_round(u64 st[25])
{
u64 t[5], tt, bc[5];
- int round;
- for (round = 0; round < KECCAK_ROUNDS; round++) {
+ /* Theta */
+ bc[0] = st[0] ^ st[5] ^ st[10] ^ st[15] ^ st[20];
+ bc[1] = st[1] ^ st[6] ^ st[11] ^ st[16] ^ st[21];
+ bc[2] = st[2] ^ st[7] ^ st[12] ^ st[17] ^ st[22];
+ bc[3] = st[3] ^ st[8] ^ st[13] ^ st[18] ^ st[23];
+ bc[4] = st[4] ^ st[9] ^ st[14] ^ st[19] ^ st[24];
+
+ t[0] = bc[4] ^ rol64(bc[1], 1);
+ t[1] = bc[0] ^ rol64(bc[2], 1);
+ t[2] = bc[1] ^ rol64(bc[3], 1);
+ t[3] = bc[2] ^ rol64(bc[4], 1);
+ t[4] = bc[3] ^ rol64(bc[0], 1);
+
+ st[0] ^= t[0];
+
+ /* Rho Pi */
+ tt = st[1];
+ st[ 1] = rol64(st[ 6] ^ t[1], 44);
+ st[ 6] = rol64(st[ 9] ^ t[4], 20);
+ st[ 9] = rol64(st[22] ^ t[2], 61);
+ st[22] = rol64(st[14] ^ t[4], 39);
+ st[14] = rol64(st[20] ^ t[0], 18);
+ st[20] = rol64(st[ 2] ^ t[2], 62);
+ st[ 2] = rol64(st[12] ^ t[2], 43);
+ st[12] = rol64(st[13] ^ t[3], 25);
+ st[13] = rol64(st[19] ^ t[4], 8);
+ st[19] = rol64(st[23] ^ t[3], 56);
+ st[23] = rol64(st[15] ^ t[0], 41);
+ st[15] = rol64(st[ 4] ^ t[4], 27);
+ st[ 4] = rol64(st[24] ^ t[4], 14);
+ st[24] = rol64(st[21] ^ t[1], 2);
+ st[21] = rol64(st[ 8] ^ t[3], 55);
+ st[ 8] = rol64(st[16] ^ t[1], 45);
+ st[16] = rol64(st[ 5] ^ t[0], 36);
+ st[ 5] = rol64(st[ 3] ^ t[3], 28);
+ st[ 3] = rol64(st[18] ^ t[3], 21);
+ st[18] = rol64(st[17] ^ t[2], 15);
+ st[17] = rol64(st[11] ^ t[1], 10);
+ st[11] = rol64(st[ 7] ^ t[2], 6);
+ st[ 7] = rol64(st[10] ^ t[0], 3);
+ st[10] = rol64( tt ^ t[1], 1);
+
+ /* Chi */
+ bc[ 0] = ~st[ 1] & st[ 2];
+ bc[ 1] = ~st[ 2] & st[ 3];
+ bc[ 2] = ~st[ 3] & st[ 4];
+ bc[ 3] = ~st[ 4] & st[ 0];
+ bc[ 4] = ~st[ 0] & st[ 1];
+ st[ 0] ^= bc[ 0];
+ st[ 1] ^= bc[ 1];
+ st[ 2] ^= bc[ 2];
+ st[ 3] ^= bc[ 3];
+ st[ 4] ^= bc[ 4];
+
+ bc[ 0] = ~st[ 6] & st[ 7];
+ bc[ 1] = ~st[ 7] & st[ 8];
+ bc[ 2] = ~st[ 8] & st[ 9];
+ bc[ 3] = ~st[ 9] & st[ 5];
+ bc[ 4] = ~st[ 5] & st[ 6];
+ st[ 5] ^= bc[ 0];
+ st[ 6] ^= bc[ 1];
+ st[ 7] ^= bc[ 2];
+ st[ 8] ^= bc[ 3];
+ st[ 9] ^= bc[ 4];
+
+ bc[ 0] = ~st[11] & st[12];
+ bc[ 1] = ~st[12] & st[13];
+ bc[ 2] = ~st[13] & st[14];
+ bc[ 3] = ~st[14] & st[10];
+ bc[ 4] = ~st[10] & st[11];
+ st[10] ^= bc[ 0];
+ st[11] ^= bc[ 1];
+ st[12] ^= bc[ 2];
+ st[13] ^= bc[ 3];
+ st[14] ^= bc[ 4];
+
+ bc[ 0] = ~st[16] & st[17];
+ bc[ 1] = ~st[17] & st[18];
+ bc[ 2] = ~st[18] & st[19];
+ bc[ 3] = ~st[19] & st[15];
+ bc[ 4] = ~st[15] & st[16];
+ st[15] ^= bc[ 0];
+ st[16] ^= bc[ 1];
+ st[17] ^= bc[ 2];
+ st[18] ^= bc[ 3];
+ st[19] ^= bc[ 4];
+
+ bc[ 0] = ~st[21] & st[22];
+ bc[ 1] = ~st[22] & st[23];
+ bc[ 2] = ~st[23] & st[24];
+ bc[ 3] = ~st[24] & st[20];
+ bc[ 4] = ~st[20] & st[21];
+ st[20] ^= bc[ 0];
+ st[21] ^= bc[ 1];
+ st[22] ^= bc[ 2];
+ st[23] ^= bc[ 3];
+ st[24] ^= bc[ 4];
+}
- /* Theta */
- bc[0] = st[0] ^ st[5] ^ st[10] ^ st[15] ^ st[20];
- bc[1] = st[1] ^ st[6] ^ st[11] ^ st[16] ^ st[21];
- bc[2] = st[2] ^ st[7] ^ st[12] ^ st[17] ^ st[22];
- bc[3] = st[3] ^ st[8] ^ st[13] ^ st[18] ^ st[23];
- bc[4] = st[4] ^ st[9] ^ st[14] ^ st[19] ^ st[24];
-
- t[0] = bc[4] ^ rol64(bc[1], 1);
- t[1] = bc[0] ^ rol64(bc[2], 1);
- t[2] = bc[1] ^ rol64(bc[3], 1);
- t[3] = bc[2] ^ rol64(bc[4], 1);
- t[4] = bc[3] ^ rol64(bc[0], 1);
-
- st[0] ^= t[0];
-
- /* Rho Pi */
- tt = st[1];
- st[ 1] = rol64(st[ 6] ^ t[1], 44);
- st[ 6] = rol64(st[ 9] ^ t[4], 20);
- st[ 9] = rol64(st[22] ^ t[2], 61);
- st[22] = rol64(st[14] ^ t[4], 39);
- st[14] = rol64(st[20] ^ t[0], 18);
- st[20] = rol64(st[ 2] ^ t[2], 62);
- st[ 2] = rol64(st[12] ^ t[2], 43);
- st[12] = rol64(st[13] ^ t[3], 25);
- st[13] = rol64(st[19] ^ t[4], 8);
- st[19] = rol64(st[23] ^ t[3], 56);
- st[23] = rol64(st[15] ^ t[0], 41);
- st[15] = rol64(st[ 4] ^ t[4], 27);
- st[ 4] = rol64(st[24] ^ t[4], 14);
- st[24] = rol64(st[21] ^ t[1], 2);
- st[21] = rol64(st[ 8] ^ t[3], 55);
- st[ 8] = rol64(st[16] ^ t[1], 45);
- st[16] = rol64(st[ 5] ^ t[0], 36);
- st[ 5] = rol64(st[ 3] ^ t[3], 28);
- st[ 3] = rol64(st[18] ^ t[3], 21);
- st[18] = rol64(st[17] ^ t[2], 15);
- st[17] = rol64(st[11] ^ t[1], 10);
- st[11] = rol64(st[ 7] ^ t[2], 6);
- st[ 7] = rol64(st[10] ^ t[0], 3);
- st[10] = rol64( tt ^ t[1], 1);
-
- /* Chi */
- bc[ 0] = ~st[ 1] & st[ 2];
- bc[ 1] = ~st[ 2] & st[ 3];
- bc[ 2] = ~st[ 3] & st[ 4];
- bc[ 3] = ~st[ 4] & st[ 0];
- bc[ 4] = ~st[ 0] & st[ 1];
- st[ 0] ^= bc[ 0];
- st[ 1] ^= bc[ 1];
- st[ 2] ^= bc[ 2];
- st[ 3] ^= bc[ 3];
- st[ 4] ^= bc[ 4];
-
- bc[ 0] = ~st[ 6] & st[ 7];
- bc[ 1] = ~st[ 7] & st[ 8];
- bc[ 2] = ~st[ 8] & st[ 9];
- bc[ 3] = ~st[ 9] & st[ 5];
- bc[ 4] = ~st[ 5] & st[ 6];
- st[ 5] ^= bc[ 0];
- st[ 6] ^= bc[ 1];
- st[ 7] ^= bc[ 2];
- st[ 8] ^= bc[ 3];
- st[ 9] ^= bc[ 4];
-
- bc[ 0] = ~st[11] & st[12];
- bc[ 1] = ~st[12] & st[13];
- bc[ 2] = ~st[13] & st[14];
- bc[ 3] = ~st[14] & st[10];
- bc[ 4] = ~st[10] & st[11];
- st[10] ^= bc[ 0];
- st[11] ^= bc[ 1];
- st[12] ^= bc[ 2];
- st[13] ^= bc[ 3];
- st[14] ^= bc[ 4];
-
- bc[ 0] = ~st[16] & st[17];
- bc[ 1] = ~st[17] & st[18];
- bc[ 2] = ~st[18] & st[19];
- bc[ 3] = ~st[19] & st[15];
- bc[ 4] = ~st[15] & st[16];
- st[15] ^= bc[ 0];
- st[16] ^= bc[ 1];
- st[17] ^= bc[ 2];
- st[18] ^= bc[ 3];
- st[19] ^= bc[ 4];
-
- bc[ 0] = ~st[21] & st[22];
- bc[ 1] = ~st[22] & st[23];
- bc[ 2] = ~st[23] & st[24];
- bc[ 3] = ~st[24] & st[20];
- bc[ 4] = ~st[20] & st[21];
- st[20] ^= bc[ 0];
- st[21] ^= bc[ 1];
- st[22] ^= bc[ 2];
- st[23] ^= bc[ 3];
- st[24] ^= bc[ 4];
+static void __optimize("O3") keccakf(u64 st[25])
+{
+ int round;
+ for (round = 0; round < KECCAK_ROUNDS; round++) {
+ keccakf_round(st);
/* Iota */
st[0] ^= keccakf_rndc[round];
}
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 676c9788e1c8..0dad0bd9327b 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -660,13 +660,15 @@ struct acpi_device *acpi_companion_match(const struct device *dev)
* acpi_of_match_device - Match device object using the "compatible" property.
* @adev: ACPI device object to match.
* @of_match_table: List of device IDs to match against.
+ * @of_id: OF ID if matched
*
* If @dev has an ACPI companion which has ACPI_DT_NAMESPACE_HID in its list of
* identifiers and a _DSD object with the "compatible" property, use that
* property to match against the given list of identifiers.
*/
static bool acpi_of_match_device(struct acpi_device *adev,
- const struct of_device_id *of_match_table)
+ const struct of_device_id *of_match_table,
+ const struct of_device_id **of_id)
{
const union acpi_object *of_compatible, *obj;
int i, nval;
@@ -690,8 +692,11 @@ static bool acpi_of_match_device(struct acpi_device *adev,
const struct of_device_id *id;
for (id = of_match_table; id->compatible[0]; id++)
- if (!strcasecmp(obj->string.pointer, id->compatible))
+ if (!strcasecmp(obj->string.pointer, id->compatible)) {
+ if (of_id)
+ *of_id = id;
return true;
+ }
}
return false;
@@ -762,10 +767,11 @@ static bool __acpi_match_device_cls(const struct acpi_device_id *id,
return true;
}
-static const struct acpi_device_id *__acpi_match_device(
- struct acpi_device *device,
- const struct acpi_device_id *ids,
- const struct of_device_id *of_ids)
+static bool __acpi_match_device(struct acpi_device *device,
+ const struct acpi_device_id *acpi_ids,
+ const struct of_device_id *of_ids,
+ const struct acpi_device_id **acpi_id,
+ const struct of_device_id **of_id)
{
const struct acpi_device_id *id;
struct acpi_hardware_id *hwid;
@@ -775,30 +781,32 @@ static const struct acpi_device_id *__acpi_match_device(
* driver for it.
*/
if (!device || !device->status.present)
- return NULL;
+ return false;
list_for_each_entry(hwid, &device->pnp.ids, list) {
/* First, check the ACPI/PNP IDs provided by the caller. */
- for (id = ids; id->id[0] || id->cls; id++) {
- if (id->id[0] && !strcmp((char *) id->id, hwid->id))
- return id;
- else if (id->cls && __acpi_match_device_cls(id, hwid))
- return id;
+ if (acpi_ids) {
+ for (id = acpi_ids; id->id[0] || id->cls; id++) {
+ if (id->id[0] && !strcmp((char *)id->id, hwid->id))
+ goto out_acpi_match;
+ if (id->cls && __acpi_match_device_cls(id, hwid))
+ goto out_acpi_match;
+ }
}
/*
* Next, check ACPI_DT_NAMESPACE_HID and try to match the
* "compatible" property if found.
- *
- * The id returned by the below is not valid, but the only
- * caller passing non-NULL of_ids here is only interested in
- * whether or not the return value is NULL.
*/
- if (!strcmp(ACPI_DT_NAMESPACE_HID, hwid->id)
- && acpi_of_match_device(device, of_ids))
- return id;
+ if (!strcmp(ACPI_DT_NAMESPACE_HID, hwid->id))
+ return acpi_of_match_device(device, of_ids, of_id);
}
- return NULL;
+ return false;
+
+out_acpi_match:
+ if (acpi_id)
+ *acpi_id = id;
+ return true;
}
/**
@@ -815,32 +823,29 @@ static const struct acpi_device_id *__acpi_match_device(
const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids,
const struct device *dev)
{
- return __acpi_match_device(acpi_companion_match(dev), ids, NULL);
+ const struct acpi_device_id *id = NULL;
+
+ __acpi_match_device(acpi_companion_match(dev), ids, NULL, &id, NULL);
+ return id;
}
EXPORT_SYMBOL_GPL(acpi_match_device);
-void *acpi_get_match_data(const struct device *dev)
+const void *acpi_device_get_match_data(const struct device *dev)
{
const struct acpi_device_id *match;
- if (!dev->driver)
- return NULL;
-
- if (!dev->driver->acpi_match_table)
- return NULL;
-
match = acpi_match_device(dev->driver->acpi_match_table, dev);
if (!match)
return NULL;
- return (void *)match->driver_data;
+ return (const void *)match->driver_data;
}
-EXPORT_SYMBOL_GPL(acpi_get_match_data);
+EXPORT_SYMBOL_GPL(acpi_device_get_match_data);
int acpi_match_device_ids(struct acpi_device *device,
const struct acpi_device_id *ids)
{
- return __acpi_match_device(device, ids, NULL) ? 0 : -ENOENT;
+ return __acpi_match_device(device, ids, NULL, NULL, NULL) ? 0 : -ENOENT;
}
EXPORT_SYMBOL(acpi_match_device_ids);
@@ -849,10 +854,12 @@ bool acpi_driver_match_device(struct device *dev,
{
if (!drv->acpi_match_table)
return acpi_of_match_device(ACPI_COMPANION(dev),
- drv->of_match_table);
+ drv->of_match_table,
+ NULL);
- return !!__acpi_match_device(acpi_companion_match(dev),
- drv->acpi_match_table, drv->of_match_table);
+ return __acpi_match_device(acpi_companion_match(dev),
+ drv->acpi_match_table, drv->of_match_table,
+ NULL, NULL);
}
EXPORT_SYMBOL_GPL(acpi_driver_match_device);
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index d9f38c645e4a..30a572956557 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -1927,6 +1927,9 @@ static int acpi_ec_suspend_noirq(struct device *dev)
ec->reference_count >= 1)
acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_DISABLE);
+ if (acpi_sleep_no_ec_events())
+ acpi_ec_enter_noirq(ec);
+
return 0;
}
@@ -1934,6 +1937,9 @@ static int acpi_ec_resume_noirq(struct device *dev)
{
struct acpi_ec *ec = acpi_driver_data(to_acpi_device(dev));
+ if (acpi_sleep_no_ec_events())
+ acpi_ec_leave_noirq(ec);
+
if (ec_no_wakeup && test_bit(EC_FLAGS_STARTED, &ec->flags) &&
ec->reference_count >= 1)
acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE);
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index 466d1503aba0..5815356ea6ad 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -1271,11 +1271,11 @@ static int acpi_fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode,
return 0;
}
-static void *
+static const void *
acpi_fwnode_device_get_match_data(const struct fwnode_handle *fwnode,
const struct device *dev)
{
- return acpi_get_match_data(dev);
+ return acpi_device_get_match_data(dev);
}
#define DECLARE_ACPI_FWNODE_OPS(ops) \
diff --git a/drivers/acpi/spcr.c b/drivers/acpi/spcr.c
index 89e97d21a89c..9d52743080a4 100644
--- a/drivers/acpi/spcr.c
+++ b/drivers/acpi/spcr.c
@@ -115,6 +115,7 @@ int __init acpi_parse_spcr(bool enable_earlycon, bool enable_console)
table->serial_port.access_width))) {
default:
pr_err("Unexpected SPCR Access Width. Defaulting to byte size\n");
+ /* fall through */
case 8:
iotype = "mmio";
break;
diff --git a/drivers/base/core.c b/drivers/base/core.c
index b2261f92f2f1..5847364f25d9 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -310,6 +310,9 @@ static void __device_link_del(struct device_link *link)
dev_info(link->consumer, "Dropping the link to %s\n",
dev_name(link->supplier));
+ if (link->flags & DL_FLAG_PM_RUNTIME)
+ pm_runtime_drop_link(link->consumer);
+
list_del(&link->s_node);
list_del(&link->c_node);
device_link_free(link);
diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c
index a8ac86e4d79e..6637fc319269 100644
--- a/drivers/base/power/wakeirq.c
+++ b/drivers/base/power/wakeirq.c
@@ -321,7 +321,8 @@ void dev_pm_arm_wake_irq(struct wake_irq *wirq)
return;
if (device_may_wakeup(wirq->dev)) {
- if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED)
+ if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED &&
+ !pm_runtime_status_suspended(wirq->dev))
enable_irq(wirq->irq);
enable_irq_wake(wirq->irq);
@@ -343,7 +344,8 @@ void dev_pm_disarm_wake_irq(struct wake_irq *wirq)
if (device_may_wakeup(wirq->dev)) {
disable_irq_wake(wirq->irq);
- if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED)
+ if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED &&
+ !pm_runtime_status_suspended(wirq->dev))
disable_irq_nosync(wirq->irq);
}
}
diff --git a/drivers/base/property.c b/drivers/base/property.c
index 302236281d83..8f205f6461ed 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -1410,9 +1410,8 @@ int fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode,
}
EXPORT_SYMBOL(fwnode_graph_parse_endpoint);
-void *device_get_match_data(struct device *dev)
+const void *device_get_match_data(struct device *dev)
{
- return fwnode_call_ptr_op(dev_fwnode(dev), device_get_match_data,
- dev);
+ return fwnode_call_ptr_op(dev_fwnode(dev), device_get_match_data, dev);
}
EXPORT_SYMBOL_GPL(device_get_match_data);
diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c
index d1f5bb534e0e..6e9df558325b 100644
--- a/drivers/char/hw_random/via-rng.c
+++ b/drivers/char/hw_random/via-rng.c
@@ -162,7 +162,7 @@ static int via_rng_init(struct hwrng *rng)
/* Enable secondary noise source on CPUs where it is present. */
/* Nehemiah stepping 8 and higher */
- if ((c->x86_model == 9) && (c->x86_mask > 7))
+ if ((c->x86_model == 9) && (c->x86_stepping > 7))
lo |= VIA_NOISESRC2;
/* Esther */
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 3a2ca0f79daf..d0c34df0529c 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -629,7 +629,7 @@ static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c)
if (c->x86_vendor == X86_VENDOR_INTEL) {
if ((c->x86 == 15) &&
(c->x86_model == 6) &&
- (c->x86_mask == 8)) {
+ (c->x86_stepping == 8)) {
pr_info("Intel(R) Xeon(R) 7100 Errata AL30, processors may lock up on frequency changes: disabling acpi-cpufreq\n");
return -ENODEV;
}
diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c
index 942632a27b50..f730b6528c18 100644
--- a/drivers/cpufreq/longhaul.c
+++ b/drivers/cpufreq/longhaul.c
@@ -775,7 +775,7 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy)
break;
case 7:
- switch (c->x86_mask) {
+ switch (c->x86_stepping) {
case 0:
longhaul_version = TYPE_LONGHAUL_V1;
cpu_model = CPU_SAMUEL2;
@@ -787,7 +787,7 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy)
break;
case 1 ... 15:
longhaul_version = TYPE_LONGHAUL_V2;
- if (c->x86_mask < 8) {
+ if (c->x86_stepping < 8) {
cpu_model = CPU_SAMUEL2;
cpuname = "C3 'Samuel 2' [C5B]";
} else {
@@ -814,7 +814,7 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy)
numscales = 32;
memcpy(mults, nehemiah_mults, sizeof(nehemiah_mults));
memcpy(eblcr, nehemiah_eblcr, sizeof(nehemiah_eblcr));
- switch (c->x86_mask) {
+ switch (c->x86_stepping) {
case 0 ... 1:
cpu_model = CPU_NEHEMIAH;
cpuname = "C3 'Nehemiah A' [C5XLOE]";
diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
index fd77812313f3..a25741b1281b 100644
--- a/drivers/cpufreq/p4-clockmod.c
+++ b/drivers/cpufreq/p4-clockmod.c
@@ -168,7 +168,7 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy)
#endif
/* Errata workaround */
- cpuid = (c->x86 << 8) | (c->x86_model << 4) | c->x86_mask;
+ cpuid = (c->x86 << 8) | (c->x86_model << 4) | c->x86_stepping;
switch (cpuid) {
case 0x0f07:
case 0x0f0a:
diff --git a/drivers/cpufreq/powernow-k7.c b/drivers/cpufreq/powernow-k7.c
index 80ac313e6c59..302e9ce793a0 100644
--- a/drivers/cpufreq/powernow-k7.c
+++ b/drivers/cpufreq/powernow-k7.c
@@ -131,7 +131,7 @@ static int check_powernow(void)
return 0;
}
- if ((c->x86_model == 6) && (c->x86_mask == 0)) {
+ if ((c->x86_model == 6) && (c->x86_stepping == 0)) {
pr_info("K7 660[A0] core detected, enabling errata workarounds\n");
have_a0 = 1;
}
diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
index 41bc5397f4bb..4fa5adf16c70 100644
--- a/drivers/cpufreq/speedstep-centrino.c
+++ b/drivers/cpufreq/speedstep-centrino.c
@@ -37,7 +37,7 @@ struct cpu_id
{
__u8 x86; /* CPU family */
__u8 x86_model; /* model */
- __u8 x86_mask; /* stepping */
+ __u8 x86_stepping; /* stepping */
};
enum {
@@ -277,7 +277,7 @@ static int centrino_verify_cpu_id(const struct cpuinfo_x86 *c,
{
if ((c->x86 == x->x86) &&
(c->x86_model == x->x86_model) &&
- (c->x86_mask == x->x86_mask))
+ (c->x86_stepping == x->x86_stepping))
return 1;
return 0;
}
diff --git a/drivers/cpufreq/speedstep-lib.c b/drivers/cpufreq/speedstep-lib.c
index 8085ec9000d1..e3a9962ee410 100644
--- a/drivers/cpufreq/speedstep-lib.c
+++ b/drivers/cpufreq/speedstep-lib.c
@@ -272,9 +272,9 @@ unsigned int speedstep_detect_processor(void)
ebx = cpuid_ebx(0x00000001);
ebx &= 0x000000FF;
- pr_debug("ebx value is %x, x86_mask is %x\n", ebx, c->x86_mask);
+ pr_debug("ebx value is %x, x86_stepping is %x\n", ebx, c->x86_stepping);
- switch (c->x86_mask) {
+ switch (c->x86_stepping) {
case 4:
/*
* B-stepping [M-P4-M]
@@ -361,7 +361,7 @@ unsigned int speedstep_detect_processor(void)
msr_lo, msr_hi);
if ((msr_hi & (1<<18)) &&
(relaxed_check ? 1 : (msr_hi & (3<<24)))) {
- if (c->x86_mask == 0x01) {
+ if (c->x86_stepping == 0x01) {
pr_debug("early PIII version\n");
return SPEEDSTEP_CPU_PIII_C_EARLY;
} else
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 75d280cb2dc0..e843cf410373 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -228,12 +228,16 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
* without any error (HW optimizations for later
* CAAM eras), then try again.
*/
+ if (ret)
+ break;
+
rdsta_val = rd_reg32(&ctrl->r4tst[0].rdsta) & RDSTA_IFMASK;
if ((status && status != JRSTA_SSRC_JUMP_HALT_CC) ||
- !(rdsta_val & (1 << sh_idx)))
+ !(rdsta_val & (1 << sh_idx))) {
ret = -EAGAIN;
- if (ret)
break;
+ }
+
dev_info(ctrldev, "Instantiated RNG4 SH%d\n", sh_idx);
/* Clear the contents before recreating the descriptor */
memset(desc, 0x00, CAAM_CMD_SZ * 7);
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index 4b6642a25df5..1c6cbda56afe 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -512,7 +512,7 @@ static int __init padlock_init(void)
printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n");
- if (c->x86 == 6 && c->x86_model == 15 && c->x86_mask == 2) {
+ if (c->x86 == 6 && c->x86_model == 15 && c->x86_stepping == 2) {
ecb_fetch_blocks = MAX_ECB_FETCH_BLOCKS;
cbc_fetch_blocks = MAX_CBC_FETCH_BLOCKS;
printk(KERN_NOTICE PFX "VIA Nano stepping 2 detected: enabling workaround.\n");
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-prng.c b/drivers/crypto/sunxi-ss/sun4i-ss-prng.c
index 0d01d1624252..63d636424161 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss-prng.c
+++ b/drivers/crypto/sunxi-ss/sun4i-ss-prng.c
@@ -28,7 +28,7 @@ int sun4i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src,
algt = container_of(alg, struct sun4i_ss_alg_template, alg.rng);
ss = algt->ss;
- spin_lock(&ss->slock);
+ spin_lock_bh(&ss->slock);
writel(mode, ss->base + SS_CTL);
@@ -51,6 +51,6 @@ int sun4i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src,
}
writel(0, ss->base + SS_CTL);
- spin_unlock(&ss->slock);
- return dlen;
+ spin_unlock_bh(&ss->slock);
+ return 0;
}
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 9c80e0cb1664..6882fa2f8bad 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -1138,6 +1138,10 @@ static int talitos_sg_map(struct device *dev, struct scatterlist *src,
struct talitos_private *priv = dev_get_drvdata(dev);
bool is_sec1 = has_ftr_sec1(priv);
+ if (!src) {
+ to_talitos_ptr(ptr, 0, 0, is_sec1);
+ return 1;
+ }
if (sg_count == 1) {
to_talitos_ptr(ptr, sg_dma_address(src) + offset, len, is_sec1);
return sg_count;
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 8b16ec595fa7..329cb96f886f 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -3147,7 +3147,7 @@ static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
struct amd64_family_type *fam_type = NULL;
pvt->ext_model = boot_cpu_data.x86_model >> 4;
- pvt->stepping = boot_cpu_data.x86_mask;
+ pvt->stepping = boot_cpu_data.x86_stepping;
pvt->model = boot_cpu_data.x86_model;
pvt->fam = boot_cpu_data.x86;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index e2c3c5ec42d1..c53095b3b0fb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -568,6 +568,7 @@ static const struct amdgpu_px_quirk amdgpu_px_quirk_list[] = {
/* HG _PR3 doesn't seem to work on this A+A weston board */
{ 0x1002, 0x6900, 0x1002, 0x0124, AMDGPU_PX_QUIRK_FORCE_ATPX },
{ 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX },
+ { 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX },
{ 0, 0, 0, 0, 0 },
};
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 909499b73d03..021f722e2481 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -733,6 +733,25 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
return ret == 0 ? count : ret;
}
+static bool gtt_entry(struct mdev_device *mdev, loff_t *ppos)
+{
+ struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
+ unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
+ struct intel_gvt *gvt = vgpu->gvt;
+ int offset;
+
+ /* Only allow MMIO GGTT entry access */
+ if (index != PCI_BASE_ADDRESS_0)
+ return false;
+
+ offset = (u64)(*ppos & VFIO_PCI_OFFSET_MASK) -
+ intel_vgpu_get_bar_gpa(vgpu, PCI_BASE_ADDRESS_0);
+
+ return (offset >= gvt->device_info.gtt_start_offset &&
+ offset < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt)) ?
+ true : false;
+}
+
static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf,
size_t count, loff_t *ppos)
{
@@ -742,7 +761,21 @@ static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf,
while (count) {
size_t filled;
- if (count >= 4 && !(*ppos % 4)) {
+ /* Only support GGTT entry 8 bytes read */
+ if (count >= 8 && !(*ppos % 8) &&
+ gtt_entry(mdev, ppos)) {
+ u64 val;
+
+ ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
+ ppos, false);
+ if (ret <= 0)
+ goto read_err;
+
+ if (copy_to_user(buf, &val, sizeof(val)))
+ goto read_err;
+
+ filled = 8;
+ } else if (count >= 4 && !(*ppos % 4)) {
u32 val;
ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
@@ -802,7 +835,21 @@ static ssize_t intel_vgpu_write(struct mdev_device *mdev,
while (count) {
size_t filled;
- if (count >= 4 && !(*ppos % 4)) {
+ /* Only support GGTT entry 8 bytes write */
+ if (count >= 8 && !(*ppos % 8) &&
+ gtt_entry(mdev, ppos)) {
+ u64 val;
+
+ if (copy_from_user(&val, buf, sizeof(val)))
+ goto write_err;
+
+ ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
+ ppos, true);
+ if (ret <= 0)
+ goto write_err;
+
+ filled = 8;
+ } else if (count >= 4 && !(*ppos % 4)) {
u32 val;
if (copy_from_user(&val, buf, sizeof(val)))
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index 73ad6e90e49d..256f1bb522b7 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -118,6 +118,7 @@ static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {
{RCS, HALF_SLICE_CHICKEN3, 0xffff, true}, /* 0xe184 */
{RCS, GEN9_HALF_SLICE_CHICKEN5, 0xffff, true}, /* 0xe188 */
{RCS, GEN9_HALF_SLICE_CHICKEN7, 0xffff, true}, /* 0xe194 */
+ {RCS, GEN8_ROW_CHICKEN, 0xffff, true}, /* 0xe4f0 */
{RCS, TRVATTL3PTRDW(0), 0, false}, /* 0x4de0 */
{RCS, TRVATTL3PTRDW(1), 0, false}, /* 0x4de4 */
{RCS, TRNULLDETCT, 0, false}, /* 0x4de8 */
diff --git a/drivers/gpu/drm/i915/gvt/trace.h b/drivers/gpu/drm/i915/gvt/trace.h
index 7a2511538f34..736bd2bc5127 100644
--- a/drivers/gpu/drm/i915/gvt/trace.h
+++ b/drivers/gpu/drm/i915/gvt/trace.h
@@ -333,7 +333,7 @@ TRACE_EVENT(render_mmio,
TP_PROTO(int old_id, int new_id, char *action, unsigned int reg,
unsigned int old_val, unsigned int new_val),
- TP_ARGS(old_id, new_id, action, reg, new_val, old_val),
+ TP_ARGS(old_id, new_id, action, reg, old_val, new_val),
TP_STRUCT__entry(
__field(int, old_id)
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 173d0095e3b2..2f5209de0391 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1433,19 +1433,7 @@ void i915_driver_unload(struct drm_device *dev)
intel_modeset_cleanup(dev);
- /*
- * free the memory space allocated for the child device
- * config parsed from VBT
- */
- if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) {
- kfree(dev_priv->vbt.child_dev);
- dev_priv->vbt.child_dev = NULL;
- dev_priv->vbt.child_dev_num = 0;
- }
- kfree(dev_priv->vbt.sdvo_lvds_vbt_mode);
- dev_priv->vbt.sdvo_lvds_vbt_mode = NULL;
- kfree(dev_priv->vbt.lfp_lvds_vbt_mode);
- dev_priv->vbt.lfp_lvds_vbt_mode = NULL;
+ intel_bios_cleanup(dev_priv);
vga_switcheroo_unregister_client(pdev);
vga_client_register(pdev, NULL, NULL, NULL);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index a42deebedb0f..d307429a5ae0 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1349,6 +1349,7 @@ struct intel_vbt_data {
u32 size;
u8 *data;
const u8 *sequence[MIPI_SEQ_MAX];
+ u8 *deassert_seq; /* Used by fixup_mipi_sequences() */
} dsi;
int crt_ddc_pin;
@@ -3657,6 +3658,7 @@ extern void intel_i2c_reset(struct drm_i915_private *dev_priv);
/* intel_bios.c */
void intel_bios_init(struct drm_i915_private *dev_priv);
+void intel_bios_cleanup(struct drm_i915_private *dev_priv);
bool intel_bios_is_valid_vbt(const void *buf, size_t size);
bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv);
bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin);
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 648e7536ff51..0c963fcf31ff 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -803,7 +803,7 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
case I915_CONTEXT_PARAM_PRIORITY:
{
- int priority = args->value;
+ s64 priority = args->value;
if (args->size)
ret = -EINVAL;
diff --git a/drivers/gpu/drm/i915/i915_oa_cflgt3.c b/drivers/gpu/drm/i915/i915_oa_cflgt3.c
index 42ff06fe54a3..792facdb6702 100644
--- a/drivers/gpu/drm/i915/i915_oa_cflgt3.c
+++ b/drivers/gpu/drm/i915/i915_oa_cflgt3.c
@@ -84,9 +84,9 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
void
i915_perf_load_test_config_cflgt3(struct drm_i915_private *dev_priv)
{
- strncpy(dev_priv->perf.oa.test_config.uuid,
+ strlcpy(dev_priv->perf.oa.test_config.uuid,
"577e8e2c-3fa0-4875-8743-3538d585e3b0",
- UUID_STRING_LEN);
+ sizeof(dev_priv->perf.oa.test_config.uuid));
dev_priv->perf.oa.test_config.id = 1;
dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
diff --git a/drivers/gpu/drm/i915/i915_oa_cnl.c b/drivers/gpu/drm/i915/i915_oa_cnl.c
index ff0ac3627cc4..ba9140c87cc0 100644
--- a/drivers/gpu/drm/i915/i915_oa_cnl.c
+++ b/drivers/gpu/drm/i915/i915_oa_cnl.c
@@ -96,9 +96,9 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
void
i915_perf_load_test_config_cnl(struct drm_i915_private *dev_priv)
{
- strncpy(dev_priv->perf.oa.test_config.uuid,
+ strlcpy(dev_priv->perf.oa.test_config.uuid,
"db41edd4-d8e7-4730-ad11-b9a2d6833503",
- UUID_STRING_LEN);
+ sizeof(dev_priv->perf.oa.test_config.uuid));
dev_priv->perf.oa.test_config.id = 1;
dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index 55a8a1e29424..0e9b98c32b62 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -285,26 +285,41 @@ static u64 count_interrupts(struct drm_i915_private *i915)
return sum;
}
-static void i915_pmu_event_destroy(struct perf_event *event)
+static void engine_event_destroy(struct perf_event *event)
{
- WARN_ON(event->parent);
+ struct drm_i915_private *i915 =
+ container_of(event->pmu, typeof(*i915), pmu.base);
+ struct intel_engine_cs *engine;
+
+ engine = intel_engine_lookup_user(i915,
+ engine_event_class(event),
+ engine_event_instance(event));
+ if (WARN_ON_ONCE(!engine))
+ return;
+
+ if (engine_event_sample(event) == I915_SAMPLE_BUSY &&
+ intel_engine_supports_stats(engine))
+ intel_disable_engine_stats(engine);
}
-static int engine_event_init(struct perf_event *event)
+static void i915_pmu_event_destroy(struct perf_event *event)
{
- struct drm_i915_private *i915 =
- container_of(event->pmu, typeof(*i915), pmu.base);
+ WARN_ON(event->parent);
- if (!intel_engine_lookup_user(i915, engine_event_class(event),
- engine_event_instance(event)))
- return -ENODEV;
+ if (is_engine_event(event))
+ engine_event_destroy(event);
+}
- switch (engine_event_sample(event)) {
+static int
+engine_event_status(struct intel_engine_cs *engine,
+ enum drm_i915_pmu_engine_sample sample)
+{
+ switch (sample) {
case I915_SAMPLE_BUSY:
case I915_SAMPLE_WAIT:
break;
case I915_SAMPLE_SEMA:
- if (INTEL_GEN(i915) < 6)
+ if (INTEL_GEN(engine->i915) < 6)
return -ENODEV;
break;
default:
@@ -314,6 +329,30 @@ static int engine_event_init(struct perf_event *event)
return 0;
}
+static int engine_event_init(struct perf_event *event)
+{
+ struct drm_i915_private *i915 =
+ container_of(event->pmu, typeof(*i915), pmu.base);
+ struct intel_engine_cs *engine;
+ u8 sample;
+ int ret;
+
+ engine = intel_engine_lookup_user(i915, engine_event_class(event),
+ engine_event_instance(event));
+ if (!engine)
+ return -ENODEV;
+
+ sample = engine_event_sample(event);
+ ret = engine_event_status(engine, sample);
+ if (ret)
+ return ret;
+
+ if (sample == I915_SAMPLE_BUSY && intel_engine_supports_stats(engine))
+ ret = intel_enable_engine_stats(engine);
+
+ return ret;
+}
+
static int i915_pmu_event_init(struct perf_event *event)
{
struct drm_i915_private *i915 =
@@ -370,7 +409,94 @@ static int i915_pmu_event_init(struct perf_event *event)
return 0;
}
-static u64 __i915_pmu_event_read(struct perf_event *event)
+static u64 __get_rc6(struct drm_i915_private *i915)
+{
+ u64 val;
+
+ val = intel_rc6_residency_ns(i915,
+ IS_VALLEYVIEW(i915) ?
+ VLV_GT_RENDER_RC6 :
+ GEN6_GT_GFX_RC6);
+
+ if (HAS_RC6p(i915))
+ val += intel_rc6_residency_ns(i915, GEN6_GT_GFX_RC6p);
+
+ if (HAS_RC6pp(i915))
+ val += intel_rc6_residency_ns(i915, GEN6_GT_GFX_RC6pp);
+
+ return val;
+}
+
+static u64 get_rc6(struct drm_i915_private *i915, bool locked)
+{
+#if IS_ENABLED(CONFIG_PM)
+ unsigned long flags;
+ u64 val;
+
+ if (intel_runtime_pm_get_if_in_use(i915)) {
+ val = __get_rc6(i915);
+ intel_runtime_pm_put(i915);
+
+ /*
+ * If we are coming back from being runtime suspended we must
+ * be careful not to report a larger value than returned
+ * previously.
+ */
+
+ if (!locked)
+ spin_lock_irqsave(&i915->pmu.lock, flags);
+
+ if (val >= i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) {
+ i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = 0;
+ i915->pmu.sample[__I915_SAMPLE_RC6].cur = val;
+ } else {
+ val = i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur;
+ }
+
+ if (!locked)
+ spin_unlock_irqrestore(&i915->pmu.lock, flags);
+ } else {
+ struct pci_dev *pdev = i915->drm.pdev;
+ struct device *kdev = &pdev->dev;
+ unsigned long flags2;
+
+ /*
+ * We are runtime suspended.
+ *
+ * Report the delta from when the device was suspended to now,
+ * on top of the last known real value, as the approximated RC6
+ * counter value.
+ */
+ if (!locked)
+ spin_lock_irqsave(&i915->pmu.lock, flags);
+
+ spin_lock_irqsave(&kdev->power.lock, flags2);
+
+ if (!i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur)
+ i915->pmu.suspended_jiffies_last =
+ kdev->power.suspended_jiffies;
+
+ val = kdev->power.suspended_jiffies -
+ i915->pmu.suspended_jiffies_last;
+ val += jiffies - kdev->power.accounting_timestamp;
+
+ spin_unlock_irqrestore(&kdev->power.lock, flags2);
+
+ val = jiffies_to_nsecs(val);
+ val += i915->pmu.sample[__I915_SAMPLE_RC6].cur;
+ i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val;
+
+ if (!locked)
+ spin_unlock_irqrestore(&i915->pmu.lock, flags);
+ }
+
+ return val;
+#else
+ return __get_rc6(i915);
+#endif
+}
+
+static u64 __i915_pmu_event_read(struct perf_event *event, bool locked)
{
struct drm_i915_private *i915 =
container_of(event->pmu, typeof(*i915), pmu.base);
@@ -387,7 +513,7 @@ static u64 __i915_pmu_event_read(struct perf_event *event)
if (WARN_ON_ONCE(!engine)) {
/* Do nothing */
} else if (sample == I915_SAMPLE_BUSY &&
- engine->pmu.busy_stats) {
+ intel_engine_supports_stats(engine)) {
val = ktime_to_ns(intel_engine_get_busy_time(engine));
} else {
val = engine->pmu.sample[sample].cur;
@@ -408,18 +534,7 @@ static u64 __i915_pmu_event_read(struct perf_event *event)
val = count_interrupts(i915);
break;
case I915_PMU_RC6_RESIDENCY:
- intel_runtime_pm_get(i915);
- val = intel_rc6_residency_ns(i915,
- IS_VALLEYVIEW(i915) ?
- VLV_GT_RENDER_RC6 :
- GEN6_GT_GFX_RC6);
- if (HAS_RC6p(i915))
- val += intel_rc6_residency_ns(i915,
- GEN6_GT_GFX_RC6p);
- if (HAS_RC6pp(i915))
- val += intel_rc6_residency_ns(i915,
- GEN6_GT_GFX_RC6pp);
- intel_runtime_pm_put(i915);
+ val = get_rc6(i915, locked);
break;
}
}
@@ -434,7 +549,7 @@ static void i915_pmu_event_read(struct perf_event *event)
again:
prev = local64_read(&hwc->prev_count);
- new = __i915_pmu_event_read(event);
+ new = __i915_pmu_event_read(event, false);
if (local64_cmpxchg(&hwc->prev_count, prev, new) != prev)
goto again;
@@ -442,12 +557,6 @@ again:
local64_add(new - prev, &event->count);
}
-static bool engine_needs_busy_stats(struct intel_engine_cs *engine)
-{
- return intel_engine_supports_stats(engine) &&
- (engine->pmu.enable & BIT(I915_SAMPLE_BUSY));
-}
-
static void i915_pmu_enable(struct perf_event *event)
{
struct drm_i915_private *i915 =
@@ -487,21 +596,7 @@ static void i915_pmu_enable(struct perf_event *event)
GEM_BUG_ON(sample >= I915_PMU_SAMPLE_BITS);
GEM_BUG_ON(engine->pmu.enable_count[sample] == ~0);
- if (engine->pmu.enable_count[sample]++ == 0) {
- /*
- * Enable engine busy stats tracking if needed or
- * alternatively cancel the scheduled disable.
- *
- * If the delayed disable was pending, cancel it and
- * in this case do not enable since it already is.
- */
- if (engine_needs_busy_stats(engine) &&
- !engine->pmu.busy_stats) {
- engine->pmu.busy_stats = true;
- if (!cancel_delayed_work(&engine->pmu.disable_busy_stats))
- intel_enable_engine_stats(engine);
- }
- }
+ engine->pmu.enable_count[sample]++;
}
/*
@@ -509,19 +604,11 @@ static void i915_pmu_enable(struct perf_event *event)
* for all listeners. Even when the event was already enabled and has
* an existing non-zero value.
*/
- local64_set(&event->hw.prev_count, __i915_pmu_event_read(event));
+ local64_set(&event->hw.prev_count, __i915_pmu_event_read(event, true));
spin_unlock_irqrestore(&i915->pmu.lock, flags);
}
-static void __disable_busy_stats(struct work_struct *work)
-{
- struct intel_engine_cs *engine =
- container_of(work, typeof(*engine), pmu.disable_busy_stats.work);
-
- intel_disable_engine_stats(engine);
-}
-
static void i915_pmu_disable(struct perf_event *event)
{
struct drm_i915_private *i915 =
@@ -545,26 +632,8 @@ static void i915_pmu_disable(struct perf_event *event)
* Decrement the reference count and clear the enabled
* bitmask when the last listener on an event goes away.
*/
- if (--engine->pmu.enable_count[sample] == 0) {
+ if (--engine->pmu.enable_count[sample] == 0)
engine->pmu.enable &= ~BIT(sample);
- if (!engine_needs_busy_stats(engine) &&
- engine->pmu.busy_stats) {
- engine->pmu.busy_stats = false;
- /*
- * We request a delayed disable to handle the
- * rapid on/off cycles on events, which can
- * happen when tools like perf stat start, in a
- * nicer way.
- *
- * In addition, this also helps with busy stats
- * accuracy with background CPU offline/online
- * migration events.
- */
- queue_delayed_work(system_wq,
- &engine->pmu.disable_busy_stats,
- round_jiffies_up_relative(HZ));
- }
- }
}
GEM_BUG_ON(bit >= I915_PMU_MASK_BITS);
@@ -797,8 +866,6 @@ static void i915_pmu_unregister_cpuhp_state(struct drm_i915_private *i915)
void i915_pmu_register(struct drm_i915_private *i915)
{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
int ret;
if (INTEL_GEN(i915) <= 2) {
@@ -820,10 +887,6 @@ void i915_pmu_register(struct drm_i915_private *i915)
hrtimer_init(&i915->pmu.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
i915->pmu.timer.function = i915_sample;
- for_each_engine(engine, i915, id)
- INIT_DELAYED_WORK(&engine->pmu.disable_busy_stats,
- __disable_busy_stats);
-
ret = perf_pmu_register(&i915->pmu.base, "i915", -1);
if (ret)
goto err;
@@ -843,9 +906,6 @@ err:
void i915_pmu_unregister(struct drm_i915_private *i915)
{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
if (!i915->pmu.base.event_init)
return;
@@ -853,11 +913,6 @@ void i915_pmu_unregister(struct drm_i915_private *i915)
hrtimer_cancel(&i915->pmu.timer);
- for_each_engine(engine, i915, id) {
- GEM_BUG_ON(engine->pmu.busy_stats);
- flush_delayed_work(&engine->pmu.disable_busy_stats);
- }
-
i915_pmu_unregister_cpuhp_state(i915);
perf_pmu_unregister(&i915->pmu.base);
diff --git a/drivers/gpu/drm/i915/i915_pmu.h b/drivers/gpu/drm/i915/i915_pmu.h
index 40c154d13565..bb62df15afa4 100644
--- a/drivers/gpu/drm/i915/i915_pmu.h
+++ b/drivers/gpu/drm/i915/i915_pmu.h
@@ -27,6 +27,8 @@
enum {
__I915_SAMPLE_FREQ_ACT = 0,
__I915_SAMPLE_FREQ_REQ,
+ __I915_SAMPLE_RC6,
+ __I915_SAMPLE_RC6_ESTIMATED,
__I915_NUM_PMU_SAMPLERS
};
@@ -94,6 +96,10 @@ struct i915_pmu {
* struct intel_engine_cs.
*/
struct i915_pmu_sample sample[__I915_NUM_PMU_SAMPLERS];
+ /**
+ * @suspended_jiffies_last: Cached suspend time from PM core.
+ */
+ unsigned long suspended_jiffies_last;
};
#ifdef CONFIG_PERF_EVENTS
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index f7f771749e48..b49a2df44430 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -947,6 +947,86 @@ static int goto_next_sequence_v3(const u8 *data, int index, int total)
return 0;
}
+/*
+ * Get len of pre-fixed deassert fragment from a v1 init OTP sequence,
+ * skip all delay + gpio operands and stop at the first DSI packet op.
+ */
+static int get_init_otp_deassert_fragment_len(struct drm_i915_private *dev_priv)
+{
+ const u8 *data = dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP];
+ int index, len;
+
+ if (WARN_ON(!data || dev_priv->vbt.dsi.seq_version != 1))
+ return 0;
+
+ /* index = 1 to skip sequence byte */
+ for (index = 1; data[index] != MIPI_SEQ_ELEM_END; index += len) {
+ switch (data[index]) {
+ case MIPI_SEQ_ELEM_SEND_PKT:
+ return index == 1 ? 0 : index;
+ case MIPI_SEQ_ELEM_DELAY:
+ len = 5; /* 1 byte for operand + uint32 */
+ break;
+ case MIPI_SEQ_ELEM_GPIO:
+ len = 3; /* 1 byte for op, 1 for gpio_nr, 1 for value */
+ break;
+ default:
+ return 0;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Some v1 VBT MIPI sequences do the deassert in the init OTP sequence.
+ * The deassert must be done before calling intel_dsi_device_ready, so for
+ * these devices we split the init OTP sequence into a deassert sequence and
+ * the actual init OTP part.
+ */
+static void fixup_mipi_sequences(struct drm_i915_private *dev_priv)
+{
+ u8 *init_otp;
+ int len;
+
+ /* Limit this to VLV for now. */
+ if (!IS_VALLEYVIEW(dev_priv))
+ return;
+
+ /* Limit this to v1 vid-mode sequences */
+ if (dev_priv->vbt.dsi.config->is_cmd_mode ||
+ dev_priv->vbt.dsi.seq_version != 1)
+ return;
+
+ /* Only do this if there are otp and assert seqs and no deassert seq */
+ if (!dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] ||
+ !dev_priv->vbt.dsi.sequence[MIPI_SEQ_ASSERT_RESET] ||
+ dev_priv->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET])
+ return;
+
+ /* The deassert-sequence ends at the first DSI packet */
+ len = get_init_otp_deassert_fragment_len(dev_priv);
+ if (!len)
+ return;
+
+ DRM_DEBUG_KMS("Using init OTP fragment to deassert reset\n");
+
+ /* Copy the fragment, update seq byte and terminate it */
+ init_otp = (u8 *)dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP];
+ dev_priv->vbt.dsi.deassert_seq = kmemdup(init_otp, len + 1, GFP_KERNEL);
+ if (!dev_priv->vbt.dsi.deassert_seq)
+ return;
+ dev_priv->vbt.dsi.deassert_seq[0] = MIPI_SEQ_DEASSERT_RESET;
+ dev_priv->vbt.dsi.deassert_seq[len] = MIPI_SEQ_ELEM_END;
+ /* Use the copy for deassert */
+ dev_priv->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET] =
+ dev_priv->vbt.dsi.deassert_seq;
+ /* Replace the last byte of the fragment with init OTP seq byte */
+ init_otp[len - 1] = MIPI_SEQ_INIT_OTP;
+ /* And make MIPI_MIPI_SEQ_INIT_OTP point to it */
+ dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] = init_otp + len - 1;
+}
+
static void
parse_mipi_sequence(struct drm_i915_private *dev_priv,
const struct bdb_header *bdb)
@@ -1016,6 +1096,8 @@ parse_mipi_sequence(struct drm_i915_private *dev_priv,
dev_priv->vbt.dsi.size = seq_size;
dev_priv->vbt.dsi.seq_version = sequence->version;
+ fixup_mipi_sequences(dev_priv);
+
DRM_DEBUG_DRIVER("MIPI related VBT parsing complete\n");
return;
@@ -1589,6 +1671,29 @@ out:
}
/**
+ * intel_bios_cleanup - Free any resources allocated by intel_bios_init()
+ * @dev_priv: i915 device instance
+ */
+void intel_bios_cleanup(struct drm_i915_private *dev_priv)
+{
+ kfree(dev_priv->vbt.child_dev);
+ dev_priv->vbt.child_dev = NULL;
+ dev_priv->vbt.child_dev_num = 0;
+ kfree(dev_priv->vbt.sdvo_lvds_vbt_mode);
+ dev_priv->vbt.sdvo_lvds_vbt_mode = NULL;
+ kfree(dev_priv->vbt.lfp_lvds_vbt_mode);
+ dev_priv->vbt.lfp_lvds_vbt_mode = NULL;
+ kfree(dev_priv->vbt.dsi.data);
+ dev_priv->vbt.dsi.data = NULL;
+ kfree(dev_priv->vbt.dsi.pps);
+ dev_priv->vbt.dsi.pps = NULL;
+ kfree(dev_priv->vbt.dsi.config);
+ dev_priv->vbt.dsi.config = NULL;
+ kfree(dev_priv->vbt.dsi.deassert_seq);
+ dev_priv->vbt.dsi.deassert_seq = NULL;
+}
+
+/**
* intel_bios_is_tv_present - is integrated TV present in VBT
* @dev_priv: i915 device instance
*
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index bd40fea16b4f..f54ddda9fdad 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -594,29 +594,16 @@ void intel_engine_remove_wait(struct intel_engine_cs *engine,
spin_unlock_irq(&b->rb_lock);
}
-static bool signal_valid(const struct drm_i915_gem_request *request)
-{
- return intel_wait_check_request(&request->signaling.wait, request);
-}
-
static bool signal_complete(const struct drm_i915_gem_request *request)
{
if (!request)
return false;
- /* If another process served as the bottom-half it may have already
- * signalled that this wait is already completed.
- */
- if (intel_wait_complete(&request->signaling.wait))
- return signal_valid(request);
-
- /* Carefully check if the request is complete, giving time for the
+ /*
+ * Carefully check if the request is complete, giving time for the
* seqno to be visible or if the GPU hung.
*/
- if (__i915_request_irq_complete(request))
- return true;
-
- return false;
+ return __i915_request_irq_complete(request);
}
static struct drm_i915_gem_request *to_signaler(struct rb_node *rb)
@@ -659,9 +646,13 @@ static int intel_breadcrumbs_signaler(void *arg)
request = i915_gem_request_get_rcu(request);
rcu_read_unlock();
if (signal_complete(request)) {
- local_bh_disable();
- dma_fence_signal(&request->fence);
- local_bh_enable(); /* kick start the tasklets */
+ if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+ &request->fence.flags)) {
+ local_bh_disable();
+ dma_fence_signal(&request->fence);
+ GEM_BUG_ON(!i915_gem_request_completed(request));
+ local_bh_enable(); /* kick start the tasklets */
+ }
spin_lock_irq(&b->rb_lock);
diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c
index 5dc118f26b51..1704c8897afd 100644
--- a/drivers/gpu/drm/i915/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/intel_cdclk.c
@@ -1952,6 +1952,14 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
if (crtc_state->has_audio && INTEL_GEN(dev_priv) >= 9)
min_cdclk = max(2 * 96000, min_cdclk);
+ /*
+ * On Valleyview some DSI panels lose (v|h)sync when the clock is lower
+ * than 320000KHz.
+ */
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) &&
+ IS_VALLEYVIEW(dev_priv))
+ min_cdclk = max(320000, min_cdclk);
+
if (min_cdclk > dev_priv->max_cdclk_freq) {
DRM_DEBUG_KMS("required cdclk (%d kHz) exceeds max (%d kHz)\n",
min_cdclk, dev_priv->max_cdclk_freq);
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index d790bdc227ff..fa960cfd2764 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -1458,7 +1458,9 @@ static bool ring_is_idle(struct intel_engine_cs *engine)
struct drm_i915_private *dev_priv = engine->i915;
bool idle = true;
- intel_runtime_pm_get(dev_priv);
+ /* If the whole device is asleep, the engine must be idle */
+ if (!intel_runtime_pm_get_if_in_use(dev_priv))
+ return true;
/* First check that no commands are left in the ring */
if ((I915_READ_HEAD(engine) & HEAD_ADDR) !=
@@ -1943,16 +1945,22 @@ intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance)
*/
int intel_enable_engine_stats(struct intel_engine_cs *engine)
{
+ struct intel_engine_execlists *execlists = &engine->execlists;
unsigned long flags;
+ int err = 0;
if (!intel_engine_supports_stats(engine))
return -ENODEV;
+ tasklet_disable(&execlists->tasklet);
spin_lock_irqsave(&engine->stats.lock, flags);
- if (engine->stats.enabled == ~0)
- goto busy;
+
+ if (unlikely(engine->stats.enabled == ~0)) {
+ err = -EBUSY;
+ goto unlock;
+ }
+
if (engine->stats.enabled++ == 0) {
- struct intel_engine_execlists *execlists = &engine->execlists;
const struct execlist_port *port = execlists->port;
unsigned int num_ports = execlists_num_ports(execlists);
@@ -1967,14 +1975,12 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine)
if (engine->stats.active)
engine->stats.start = engine->stats.enabled_at;
}
- spin_unlock_irqrestore(&engine->stats.lock, flags);
-
- return 0;
-busy:
+unlock:
spin_unlock_irqrestore(&engine->stats.lock, flags);
+ tasklet_enable(&execlists->tasklet);
- return -EBUSY;
+ return err;
}
static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index c5ff203e42d6..a0e7a6c2a57c 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -366,20 +366,6 @@ struct intel_engine_cs {
*/
#define I915_ENGINE_SAMPLE_MAX (I915_SAMPLE_SEMA + 1)
struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_MAX];
- /**
- * @busy_stats: Has enablement of engine stats tracking been
- * requested.
- */
- bool busy_stats;
- /**
- * @disable_busy_stats: Work item for busy stats disabling.
- *
- * Same as with @enable_busy_stats action, with the difference
- * that we delay it in case there are rapid enable-disable
- * actions, which can happen during tool startup (like perf
- * stat).
- */
- struct delayed_work disable_busy_stats;
} pmu;
/*
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
index bf62303571b3..3695cde669f8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
@@ -301,7 +301,7 @@ nvkm_therm_attr_set(struct nvkm_therm *therm,
void
nvkm_therm_clkgate_enable(struct nvkm_therm *therm)
{
- if (!therm->func->clkgate_enable || !therm->clkgating_enabled)
+ if (!therm || !therm->func->clkgate_enable || !therm->clkgating_enabled)
return;
nvkm_debug(&therm->subdev,
@@ -312,7 +312,7 @@ nvkm_therm_clkgate_enable(struct nvkm_therm *therm)
void
nvkm_therm_clkgate_fini(struct nvkm_therm *therm, bool suspend)
{
- if (!therm->func->clkgate_fini || !therm->clkgating_enabled)
+ if (!therm || !therm->func->clkgate_fini || !therm->clkgating_enabled)
return;
nvkm_debug(&therm->subdev,
@@ -395,7 +395,7 @@ void
nvkm_therm_clkgate_init(struct nvkm_therm *therm,
const struct nvkm_therm_clkgate_pack *p)
{
- if (!therm->func->clkgate_init || !therm->clkgating_enabled)
+ if (!therm || !therm->func->clkgate_init || !therm->clkgating_enabled)
return;
therm->func->clkgate_init(therm, p);
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index 4bdbf77f7197..72c338eb5fae 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -269,13 +269,13 @@ static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
for (i = 0; i < ARRAY_SIZE(tjmax_model_table); i++) {
const struct tjmax_model *tm = &tjmax_model_table[i];
if (c->x86_model == tm->model &&
- (tm->mask == ANY || c->x86_mask == tm->mask))
+ (tm->mask == ANY || c->x86_stepping == tm->mask))
return tm->tjmax;
}
/* Early chips have no MSR for TjMax */
- if (c->x86_model == 0xf && c->x86_mask < 4)
+ if (c->x86_model == 0xf && c->x86_stepping < 4)
usemsr_ee = 0;
if (c->x86_model > 0xe && usemsr_ee) {
@@ -426,7 +426,7 @@ static int chk_ucode_version(unsigned int cpu)
* Readings might stop update when processor visited too deep sleep,
* fixed for stepping D0 (6EC).
*/
- if (c->x86_model == 0xe && c->x86_mask < 0xc && c->microcode < 0x39) {
+ if (c->x86_model == 0xe && c->x86_stepping < 0xc && c->microcode < 0x39) {
pr_err("Errata AE18 not fixed, update BIOS or microcode of the CPU!\n");
return -ENODEV;
}
diff --git a/drivers/hwmon/hwmon-vid.c b/drivers/hwmon/hwmon-vid.c
index ef91b8a67549..84e91286fc4f 100644
--- a/drivers/hwmon/hwmon-vid.c
+++ b/drivers/hwmon/hwmon-vid.c
@@ -293,7 +293,7 @@ u8 vid_which_vrm(void)
if (c->x86 < 6) /* Any CPU with family lower than 6 */
return 0; /* doesn't have VID */
- vrm_ret = find_vrm(c->x86, c->x86_model, c->x86_mask, c->x86_vendor);
+ vrm_ret = find_vrm(c->x86, c->x86_model, c->x86_stepping, c->x86_vendor);
if (vrm_ret == 134)
vrm_ret = get_via_model_d_vrm();
if (vrm_ret == 0)
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index 06b4e1c78bd8..051a72eecb24 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -129,7 +129,10 @@ static ssize_t temp1_input_show(struct device *dev,
data->read_tempreg(data->pdev, &regval);
temp = (regval >> 21) * 125;
- temp -= data->temp_offset;
+ if (temp > data->temp_offset)
+ temp -= data->temp_offset;
+ else
+ temp = 0;
return sprintf(buf, "%u\n", temp);
}
@@ -227,7 +230,7 @@ static bool has_erratum_319(struct pci_dev *pdev)
* and AM3 formats, but that's the best we can do.
*/
return boot_cpu_data.x86_model < 4 ||
- (boot_cpu_data.x86_model == 4 && boot_cpu_data.x86_mask <= 2);
+ (boot_cpu_data.x86_model == 4 && boot_cpu_data.x86_stepping <= 2);
}
static int k10temp_probe(struct pci_dev *pdev,
diff --git a/drivers/hwmon/k8temp.c b/drivers/hwmon/k8temp.c
index 5a632bcf869b..e59f9113fb93 100644
--- a/drivers/hwmon/k8temp.c
+++ b/drivers/hwmon/k8temp.c
@@ -187,7 +187,7 @@ static int k8temp_probe(struct pci_dev *pdev,
return -ENOMEM;
model = boot_cpu_data.x86_model;
- stepping = boot_cpu_data.x86_mask;
+ stepping = boot_cpu_data.x86_stepping;
/* feature available since SH-C0, exclude older revisions */
if ((model == 4 && stepping == 0) ||
diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
index 62f541f968f6..07074820a167 100644
--- a/drivers/macintosh/macio_asic.c
+++ b/drivers/macintosh/macio_asic.c
@@ -375,6 +375,7 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip,
dev->ofdev.dev.of_node = np;
dev->ofdev.archdata.dma_mask = 0xffffffffUL;
dev->ofdev.dev.dma_mask = &dev->ofdev.archdata.dma_mask;
+ dev->ofdev.dev.coherent_dma_mask = dev->ofdev.archdata.dma_mask;
dev->ofdev.dev.parent = parent;
dev->ofdev.dev.bus = &macio_bus_type;
dev->ofdev.dev.release = macio_release_dev;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index d6de00f367ef..68136806d365 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -903,7 +903,8 @@ static void dec_pending(struct dm_io *io, blk_status_t error)
queue_io(md, bio);
} else {
/* done with normal IO or empty flush */
- bio->bi_status = io_error;
+ if (io_error)
+ bio->bi_status = io_error;
bio_endio(bio);
}
}
diff --git a/drivers/misc/ocxl/file.c b/drivers/misc/ocxl/file.c
index d9aa407db06a..2dd2db9bc1c9 100644
--- a/drivers/misc/ocxl/file.c
+++ b/drivers/misc/ocxl/file.c
@@ -277,7 +277,7 @@ static ssize_t afu_read(struct file *file, char __user *buf, size_t count,
struct ocxl_context *ctx = file->private_data;
struct ocxl_kernel_event_header header;
ssize_t rc;
- size_t used = 0;
+ ssize_t used = 0;
DEFINE_WAIT(event_wait);
memset(&header, 0, sizeof(header));
diff --git a/drivers/mmc/host/bcm2835.c b/drivers/mmc/host/bcm2835.c
index 229dc18f0581..768972af8b85 100644
--- a/drivers/mmc/host/bcm2835.c
+++ b/drivers/mmc/host/bcm2835.c
@@ -1265,7 +1265,8 @@ static int bcm2835_add_host(struct bcm2835_host *host)
char pio_limit_string[20];
int ret;
- mmc->f_max = host->max_clk;
+ if (!mmc->f_max || mmc->f_max > host->max_clk)
+ mmc->f_max = host->max_clk;
mmc->f_min = host->max_clk / SDCDIV_MAX_CDIV;
mmc->max_busy_timeout = ~0 / (mmc->f_max / 1000);
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
index 22438ebfe4e6..4f972b879fe6 100644
--- a/drivers/mmc/host/meson-gx-mmc.c
+++ b/drivers/mmc/host/meson-gx-mmc.c
@@ -717,22 +717,6 @@ static int meson_mmc_clk_phase_tuning(struct mmc_host *mmc, u32 opcode,
static int meson_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode)
{
struct meson_host *host = mmc_priv(mmc);
- int ret;
-
- /*
- * If this is the initial tuning, try to get a sane Rx starting
- * phase before doing the actual tuning.
- */
- if (!mmc->doing_retune) {
- ret = meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk);
-
- if (ret)
- return ret;
- }
-
- ret = meson_mmc_clk_phase_tuning(mmc, opcode, host->tx_clk);
- if (ret)
- return ret;
return meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk);
}
@@ -763,9 +747,8 @@ static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
if (!IS_ERR(mmc->supply.vmmc))
mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
- /* Reset phases */
+ /* Reset rx phase */
clk_set_phase(host->rx_clk, 0);
- clk_set_phase(host->tx_clk, 270);
break;
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index e6b8c59f2c0d..736ac887303c 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -328,7 +328,7 @@ config MTD_NAND_MARVELL
tristate "NAND controller support on Marvell boards"
depends on PXA3xx || ARCH_MMP || PLAT_ORION || ARCH_MVEBU || \
COMPILE_TEST
- depends on HAS_IOMEM
+ depends on HAS_IOMEM && HAS_DMA
help
This enables the NAND flash controller driver for Marvell boards,
including:
diff --git a/drivers/mtd/nand/vf610_nfc.c b/drivers/mtd/nand/vf610_nfc.c
index 80d31a58e558..f367144f3c6f 100644
--- a/drivers/mtd/nand/vf610_nfc.c
+++ b/drivers/mtd/nand/vf610_nfc.c
@@ -752,10 +752,8 @@ static int vf610_nfc_probe(struct platform_device *pdev)
if (mtd->oobsize > 64)
mtd->oobsize = 64;
- /*
- * mtd->ecclayout is not specified here because we're using the
- * default large page ECC layout defined in NAND core.
- */
+ /* Use default large page ECC layout defined in NAND core */
+ mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
if (chip->ecc.strength == 32) {
nfc->ecc_mode = ECC_60_BYTE;
chip->ecc.bytes = 60;
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index f431c32774f3..0fe7ea35c221 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -120,8 +120,12 @@ int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
int ret;
ret = nvme_reset_ctrl(ctrl);
- if (!ret)
+ if (!ret) {
flush_work(&ctrl->reset_work);
+ if (ctrl->state != NVME_CTRL_LIVE)
+ ret = -ENETRESET;
+ }
+
return ret;
}
EXPORT_SYMBOL_GPL(nvme_reset_ctrl_sync);
@@ -265,7 +269,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
switch (new_state) {
case NVME_CTRL_ADMIN_ONLY:
switch (old_state) {
- case NVME_CTRL_RECONNECTING:
+ case NVME_CTRL_CONNECTING:
changed = true;
/* FALLTHRU */
default:
@@ -276,7 +280,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
switch (old_state) {
case NVME_CTRL_NEW:
case NVME_CTRL_RESETTING:
- case NVME_CTRL_RECONNECTING:
+ case NVME_CTRL_CONNECTING:
changed = true;
/* FALLTHRU */
default:
@@ -294,9 +298,9 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
break;
}
break;
- case NVME_CTRL_RECONNECTING:
+ case NVME_CTRL_CONNECTING:
switch (old_state) {
- case NVME_CTRL_LIVE:
+ case NVME_CTRL_NEW:
case NVME_CTRL_RESETTING:
changed = true;
/* FALLTHRU */
@@ -309,7 +313,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
case NVME_CTRL_LIVE:
case NVME_CTRL_ADMIN_ONLY:
case NVME_CTRL_RESETTING:
- case NVME_CTRL_RECONNECTING:
+ case NVME_CTRL_CONNECTING:
changed = true;
/* FALLTHRU */
default:
@@ -518,9 +522,11 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
u64 slba = nvme_block_nr(ns, bio->bi_iter.bi_sector);
u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift;
- range[n].cattr = cpu_to_le32(0);
- range[n].nlb = cpu_to_le32(nlb);
- range[n].slba = cpu_to_le64(slba);
+ if (n < segments) {
+ range[n].cattr = cpu_to_le32(0);
+ range[n].nlb = cpu_to_le32(nlb);
+ range[n].slba = cpu_to_le64(slba);
+ }
n++;
}
@@ -794,13 +800,9 @@ static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status)
static int nvme_keep_alive(struct nvme_ctrl *ctrl)
{
- struct nvme_command c;
struct request *rq;
- memset(&c, 0, sizeof(c));
- c.common.opcode = nvme_admin_keep_alive;
-
- rq = nvme_alloc_request(ctrl->admin_q, &c, BLK_MQ_REQ_RESERVED,
+ rq = nvme_alloc_request(ctrl->admin_q, &ctrl->ka_cmd, BLK_MQ_REQ_RESERVED,
NVME_QID_ANY);
if (IS_ERR(rq))
return PTR_ERR(rq);
@@ -832,6 +834,8 @@ void nvme_start_keep_alive(struct nvme_ctrl *ctrl)
return;
INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work);
+ memset(&ctrl->ka_cmd, 0, sizeof(ctrl->ka_cmd));
+ ctrl->ka_cmd.common.opcode = nvme_admin_keep_alive;
schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
}
EXPORT_SYMBOL_GPL(nvme_start_keep_alive);
@@ -1117,14 +1121,19 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
static void nvme_update_formats(struct nvme_ctrl *ctrl)
{
- struct nvme_ns *ns;
+ struct nvme_ns *ns, *next;
+ LIST_HEAD(rm_list);
mutex_lock(&ctrl->namespaces_mutex);
list_for_each_entry(ns, &ctrl->namespaces, list) {
- if (ns->disk && nvme_revalidate_disk(ns->disk))
- nvme_ns_remove(ns);
+ if (ns->disk && nvme_revalidate_disk(ns->disk)) {
+ list_move_tail(&ns->list, &rm_list);
+ }
}
mutex_unlock(&ctrl->namespaces_mutex);
+
+ list_for_each_entry_safe(ns, next, &rm_list, list)
+ nvme_ns_remove(ns);
}
static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects)
@@ -2687,7 +2696,7 @@ static ssize_t nvme_sysfs_show_state(struct device *dev,
[NVME_CTRL_LIVE] = "live",
[NVME_CTRL_ADMIN_ONLY] = "only-admin",
[NVME_CTRL_RESETTING] = "resetting",
- [NVME_CTRL_RECONNECTING]= "reconnecting",
+ [NVME_CTRL_CONNECTING] = "connecting",
[NVME_CTRL_DELETING] = "deleting",
[NVME_CTRL_DEAD] = "dead",
};
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index 25b19f722f5b..a3145d90c1d2 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -171,13 +171,14 @@ static inline blk_status_t nvmf_check_init_req(struct nvme_ctrl *ctrl,
cmd->common.opcode != nvme_fabrics_command ||
cmd->fabrics.fctype != nvme_fabrics_type_connect) {
/*
- * Reconnecting state means transport disruption, which can take
- * a long time and even might fail permanently, fail fast to
- * give upper layers a chance to failover.
+ * Connecting state means transport disruption or initial
+ * establishment, which can take a long time and even might
+ * fail permanently, fail fast to give upper layers a chance
+ * to failover.
* Deleting state means that the ctrl will never accept commands
* again, fail it permanently.
*/
- if (ctrl->state == NVME_CTRL_RECONNECTING ||
+ if (ctrl->state == NVME_CTRL_CONNECTING ||
ctrl->state == NVME_CTRL_DELETING) {
nvme_req(rq)->status = NVME_SC_ABORT_REQ;
return BLK_STS_IOERR;
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index b856d7c919d2..7f51f8414b97 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -55,9 +55,7 @@ struct nvme_fc_queue {
enum nvme_fcop_flags {
FCOP_FLAGS_TERMIO = (1 << 0),
- FCOP_FLAGS_RELEASED = (1 << 1),
- FCOP_FLAGS_COMPLETE = (1 << 2),
- FCOP_FLAGS_AEN = (1 << 3),
+ FCOP_FLAGS_AEN = (1 << 1),
};
struct nvmefc_ls_req_op {
@@ -532,7 +530,7 @@ nvme_fc_resume_controller(struct nvme_fc_ctrl *ctrl)
{
switch (ctrl->ctrl.state) {
case NVME_CTRL_NEW:
- case NVME_CTRL_RECONNECTING:
+ case NVME_CTRL_CONNECTING:
/*
* As all reconnects were suppressed, schedule a
* connect.
@@ -777,7 +775,7 @@ nvme_fc_ctrl_connectivity_loss(struct nvme_fc_ctrl *ctrl)
}
break;
- case NVME_CTRL_RECONNECTING:
+ case NVME_CTRL_CONNECTING:
/*
* The association has already been terminated and the
* controller is attempting reconnects. No need to do anything
@@ -1470,7 +1468,6 @@ nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl)
/* *********************** NVME Ctrl Routines **************************** */
-static void __nvme_fc_final_op_cleanup(struct request *rq);
static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg);
static int
@@ -1512,13 +1509,19 @@ nvme_fc_exit_request(struct blk_mq_tag_set *set, struct request *rq,
static int
__nvme_fc_abort_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_fcp_op *op)
{
- int state;
+ unsigned long flags;
+ int opstate;
+
+ spin_lock_irqsave(&ctrl->lock, flags);
+ opstate = atomic_xchg(&op->state, FCPOP_STATE_ABORTED);
+ if (opstate != FCPOP_STATE_ACTIVE)
+ atomic_set(&op->state, opstate);
+ else if (ctrl->flags & FCCTRL_TERMIO)
+ ctrl->iocnt++;
+ spin_unlock_irqrestore(&ctrl->lock, flags);
- state = atomic_xchg(&op->state, FCPOP_STATE_ABORTED);
- if (state != FCPOP_STATE_ACTIVE) {
- atomic_set(&op->state, state);
+ if (opstate != FCPOP_STATE_ACTIVE)
return -ECANCELED;
- }
ctrl->lport->ops->fcp_abort(&ctrl->lport->localport,
&ctrl->rport->remoteport,
@@ -1532,60 +1535,26 @@ static void
nvme_fc_abort_aen_ops(struct nvme_fc_ctrl *ctrl)
{
struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops;
- unsigned long flags;
- int i, ret;
-
- for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) {
- if (atomic_read(&aen_op->state) != FCPOP_STATE_ACTIVE)
- continue;
-
- spin_lock_irqsave(&ctrl->lock, flags);
- if (ctrl->flags & FCCTRL_TERMIO) {
- ctrl->iocnt++;
- aen_op->flags |= FCOP_FLAGS_TERMIO;
- }
- spin_unlock_irqrestore(&ctrl->lock, flags);
-
- ret = __nvme_fc_abort_op(ctrl, aen_op);
- if (ret) {
- /*
- * if __nvme_fc_abort_op failed the io wasn't
- * active. Thus this call path is running in
- * parallel to the io complete. Treat as non-error.
- */
+ int i;
- /* back out the flags/counters */
- spin_lock_irqsave(&ctrl->lock, flags);
- if (ctrl->flags & FCCTRL_TERMIO)
- ctrl->iocnt--;
- aen_op->flags &= ~FCOP_FLAGS_TERMIO;
- spin_unlock_irqrestore(&ctrl->lock, flags);
- return;
- }
- }
+ for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++)
+ __nvme_fc_abort_op(ctrl, aen_op);
}
-static inline int
+static inline void
__nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl,
- struct nvme_fc_fcp_op *op)
+ struct nvme_fc_fcp_op *op, int opstate)
{
unsigned long flags;
- bool complete_rq = false;
- spin_lock_irqsave(&ctrl->lock, flags);
- if (unlikely(op->flags & FCOP_FLAGS_TERMIO)) {
+ if (opstate == FCPOP_STATE_ABORTED) {
+ spin_lock_irqsave(&ctrl->lock, flags);
if (ctrl->flags & FCCTRL_TERMIO) {
if (!--ctrl->iocnt)
wake_up(&ctrl->ioabort_wait);
}
+ spin_unlock_irqrestore(&ctrl->lock, flags);
}
- if (op->flags & FCOP_FLAGS_RELEASED)
- complete_rq = true;
- else
- op->flags |= FCOP_FLAGS_COMPLETE;
- spin_unlock_irqrestore(&ctrl->lock, flags);
-
- return complete_rq;
}
static void
@@ -1601,6 +1570,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
__le16 status = cpu_to_le16(NVME_SC_SUCCESS << 1);
union nvme_result result;
bool terminate_assoc = true;
+ int opstate;
/*
* WARNING:
@@ -1639,11 +1609,12 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
* association to be terminated.
*/
+ opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE);
+
fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma,
sizeof(op->rsp_iu), DMA_FROM_DEVICE);
- if (atomic_read(&op->state) == FCPOP_STATE_ABORTED ||
- op->flags & FCOP_FLAGS_TERMIO)
+ if (opstate == FCPOP_STATE_ABORTED)
status = cpu_to_le16(NVME_SC_ABORT_REQ << 1);
else if (freq->status)
status = cpu_to_le16(NVME_SC_INTERNAL << 1);
@@ -1708,7 +1679,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
done:
if (op->flags & FCOP_FLAGS_AEN) {
nvme_complete_async_event(&queue->ctrl->ctrl, status, &result);
- __nvme_fc_fcpop_chk_teardowns(ctrl, op);
+ __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
atomic_set(&op->state, FCPOP_STATE_IDLE);
op->flags = FCOP_FLAGS_AEN; /* clear other flags */
nvme_fc_ctrl_put(ctrl);
@@ -1722,13 +1693,11 @@ done:
if (status &&
(blk_queue_dying(rq->q) ||
ctrl->ctrl.state == NVME_CTRL_NEW ||
- ctrl->ctrl.state == NVME_CTRL_RECONNECTING))
+ ctrl->ctrl.state == NVME_CTRL_CONNECTING))
status |= cpu_to_le16(NVME_SC_DNR << 1);
- if (__nvme_fc_fcpop_chk_teardowns(ctrl, op))
- __nvme_fc_final_op_cleanup(rq);
- else
- nvme_end_request(rq, status, result);
+ __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
+ nvme_end_request(rq, status, result);
check_error:
if (terminate_assoc)
@@ -2415,46 +2384,16 @@ nvme_fc_submit_async_event(struct nvme_ctrl *arg)
}
static void
-__nvme_fc_final_op_cleanup(struct request *rq)
+nvme_fc_complete_rq(struct request *rq)
{
struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
struct nvme_fc_ctrl *ctrl = op->ctrl;
atomic_set(&op->state, FCPOP_STATE_IDLE);
- op->flags &= ~(FCOP_FLAGS_TERMIO | FCOP_FLAGS_RELEASED |
- FCOP_FLAGS_COMPLETE);
nvme_fc_unmap_data(ctrl, rq, op);
nvme_complete_rq(rq);
nvme_fc_ctrl_put(ctrl);
-
-}
-
-static void
-nvme_fc_complete_rq(struct request *rq)
-{
- struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
- struct nvme_fc_ctrl *ctrl = op->ctrl;
- unsigned long flags;
- bool completed = false;
-
- /*
- * the core layer, on controller resets after calling
- * nvme_shutdown_ctrl(), calls complete_rq without our
- * calling blk_mq_complete_request(), thus there may still
- * be live i/o outstanding with the LLDD. Means transport has
- * to track complete calls vs fcpio_done calls to know what
- * path to take on completes and dones.
- */
- spin_lock_irqsave(&ctrl->lock, flags);
- if (op->flags & FCOP_FLAGS_COMPLETE)
- completed = true;
- else
- op->flags |= FCOP_FLAGS_RELEASED;
- spin_unlock_irqrestore(&ctrl->lock, flags);
-
- if (completed)
- __nvme_fc_final_op_cleanup(rq);
}
/*
@@ -2476,35 +2415,11 @@ nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved)
struct nvme_ctrl *nctrl = data;
struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req);
- unsigned long flags;
- int status;
if (!blk_mq_request_started(req))
return;
- spin_lock_irqsave(&ctrl->lock, flags);
- if (ctrl->flags & FCCTRL_TERMIO) {
- ctrl->iocnt++;
- op->flags |= FCOP_FLAGS_TERMIO;
- }
- spin_unlock_irqrestore(&ctrl->lock, flags);
-
- status = __nvme_fc_abort_op(ctrl, op);
- if (status) {
- /*
- * if __nvme_fc_abort_op failed the io wasn't
- * active. Thus this call path is running in
- * parallel to the io complete. Treat as non-error.
- */
-
- /* back out the flags/counters */
- spin_lock_irqsave(&ctrl->lock, flags);
- if (ctrl->flags & FCCTRL_TERMIO)
- ctrl->iocnt--;
- op->flags &= ~FCOP_FLAGS_TERMIO;
- spin_unlock_irqrestore(&ctrl->lock, flags);
- return;
- }
+ __nvme_fc_abort_op(ctrl, op);
}
@@ -2943,7 +2858,7 @@ nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
unsigned long recon_delay = ctrl->ctrl.opts->reconnect_delay * HZ;
bool recon = true;
- if (ctrl->ctrl.state != NVME_CTRL_RECONNECTING)
+ if (ctrl->ctrl.state != NVME_CTRL_CONNECTING)
return;
if (portptr->port_state == FC_OBJSTATE_ONLINE)
@@ -2991,10 +2906,10 @@ nvme_fc_reset_ctrl_work(struct work_struct *work)
/* will block will waiting for io to terminate */
nvme_fc_delete_association(ctrl);
- if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) {
+ if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
dev_err(ctrl->ctrl.device,
"NVME-FC{%d}: error_recovery: Couldn't change state "
- "to RECONNECTING\n", ctrl->cnum);
+ "to CONNECTING\n", ctrl->cnum);
return;
}
@@ -3195,7 +3110,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
* transport errors (frame drop, LS failure) inherently must kill
* the association. The transport is coded so that any command used
* to create the association (prior to a LIVE state transition
- * while NEW or RECONNECTING) will fail if it completes in error or
+ * while NEW or CONNECTING) will fail if it completes in error or
* times out.
*
* As such: as the connect request was mostly likely due to a
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 8e4550fa08f8..0521e4707d1c 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -123,7 +123,7 @@ enum nvme_ctrl_state {
NVME_CTRL_LIVE,
NVME_CTRL_ADMIN_ONLY, /* Only admin queue live */
NVME_CTRL_RESETTING,
- NVME_CTRL_RECONNECTING,
+ NVME_CTRL_CONNECTING,
NVME_CTRL_DELETING,
NVME_CTRL_DEAD,
};
@@ -183,6 +183,7 @@ struct nvme_ctrl {
struct work_struct scan_work;
struct work_struct async_event_work;
struct delayed_work ka_work;
+ struct nvme_command ka_cmd;
struct work_struct fw_act_work;
/* Power saving configuration */
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 6fe7af00a1f4..73036d2fbbd5 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1141,7 +1141,7 @@ static bool nvme_should_reset(struct nvme_dev *dev, u32 csts)
/* If there is a reset/reinit ongoing, we shouldn't reset again. */
switch (dev->ctrl.state) {
case NVME_CTRL_RESETTING:
- case NVME_CTRL_RECONNECTING:
+ case NVME_CTRL_CONNECTING:
return false;
default:
break;
@@ -1215,13 +1215,17 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
* cancellation error. All outstanding requests are completed on
* shutdown, so we return BLK_EH_HANDLED.
*/
- if (dev->ctrl.state == NVME_CTRL_RESETTING) {
+ switch (dev->ctrl.state) {
+ case NVME_CTRL_CONNECTING:
+ case NVME_CTRL_RESETTING:
dev_warn(dev->ctrl.device,
"I/O %d QID %d timeout, disable controller\n",
req->tag, nvmeq->qid);
nvme_dev_disable(dev, false);
nvme_req(req)->flags |= NVME_REQ_CANCELLED;
return BLK_EH_HANDLED;
+ default:
+ break;
}
/*
@@ -1364,18 +1368,14 @@ static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues,
static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
int qid, int depth)
{
- if (qid && dev->cmb && use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) {
- unsigned offset = (qid - 1) * roundup(SQ_SIZE(depth),
- dev->ctrl.page_size);
- nvmeq->sq_dma_addr = dev->cmb_bus_addr + offset;
- nvmeq->sq_cmds_io = dev->cmb + offset;
- } else {
- nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth),
- &nvmeq->sq_dma_addr, GFP_KERNEL);
- if (!nvmeq->sq_cmds)
- return -ENOMEM;
- }
+ /* CMB SQEs will be mapped before creation */
+ if (qid && dev->cmb && use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS))
+ return 0;
+ nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth),
+ &nvmeq->sq_dma_addr, GFP_KERNEL);
+ if (!nvmeq->sq_cmds)
+ return -ENOMEM;
return 0;
}
@@ -1449,6 +1449,13 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
struct nvme_dev *dev = nvmeq->dev;
int result;
+ if (dev->cmb && use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) {
+ unsigned offset = (qid - 1) * roundup(SQ_SIZE(nvmeq->q_depth),
+ dev->ctrl.page_size);
+ nvmeq->sq_dma_addr = dev->cmb_bus_addr + offset;
+ nvmeq->sq_cmds_io = dev->cmb + offset;
+ }
+
nvmeq->cq_vector = qid - 1;
result = adapter_alloc_cq(dev, qid, nvmeq);
if (result < 0)
@@ -2288,12 +2295,12 @@ static void nvme_reset_work(struct work_struct *work)
nvme_dev_disable(dev, false);
/*
- * Introduce RECONNECTING state from nvme-fc/rdma transports to mark the
+ * Introduce CONNECTING state from nvme-fc/rdma transports to mark the
* initializing procedure here.
*/
- if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RECONNECTING)) {
+ if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) {
dev_warn(dev->ctrl.device,
- "failed to mark controller RECONNECTING\n");
+ "failed to mark controller CONNECTING\n");
goto out;
}
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 2bc059f7d73c..3a51ed50eff2 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -887,7 +887,7 @@ free_ctrl:
static void nvme_rdma_reconnect_or_remove(struct nvme_rdma_ctrl *ctrl)
{
/* If we are resetting/deleting then do nothing */
- if (ctrl->ctrl.state != NVME_CTRL_RECONNECTING) {
+ if (ctrl->ctrl.state != NVME_CTRL_CONNECTING) {
WARN_ON_ONCE(ctrl->ctrl.state == NVME_CTRL_NEW ||
ctrl->ctrl.state == NVME_CTRL_LIVE);
return;
@@ -973,7 +973,7 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
nvme_start_queues(&ctrl->ctrl);
- if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) {
+ if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
/* state change failure should never happen */
WARN_ON_ONCE(1);
return;
@@ -1756,7 +1756,7 @@ static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
nvme_stop_ctrl(&ctrl->ctrl);
nvme_rdma_shutdown_ctrl(ctrl, false);
- if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) {
+ if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
/* state change failure should never happen */
WARN_ON_ONCE(1);
return;
@@ -1784,11 +1784,8 @@ static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
return;
out_fail:
- dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
- nvme_remove_namespaces(&ctrl->ctrl);
- nvme_rdma_shutdown_ctrl(ctrl, true);
- nvme_uninit_ctrl(&ctrl->ctrl);
- nvme_put_ctrl(&ctrl->ctrl);
+ ++ctrl->ctrl.nr_reconnects;
+ nvme_rdma_reconnect_or_remove(ctrl);
}
static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = {
@@ -1942,6 +1939,9 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
if (!ctrl->queues)
goto out_uninit_ctrl;
+ changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING);
+ WARN_ON_ONCE(!changed);
+
ret = nvme_rdma_configure_admin_queue(ctrl, true);
if (ret)
goto out_kfree_queues;
diff --git a/drivers/nvme/target/io-cmd.c b/drivers/nvme/target/io-cmd.c
index 0a4372a016f2..28bbdff4a88b 100644
--- a/drivers/nvme/target/io-cmd.c
+++ b/drivers/nvme/target/io-cmd.c
@@ -105,10 +105,13 @@ static void nvmet_execute_flush(struct nvmet_req *req)
static u16 nvmet_discard_range(struct nvmet_ns *ns,
struct nvme_dsm_range *range, struct bio **bio)
{
- if (__blkdev_issue_discard(ns->bdev,
+ int ret;
+
+ ret = __blkdev_issue_discard(ns->bdev,
le64_to_cpu(range->slba) << (ns->blksize_shift - 9),
le32_to_cpu(range->nlb) << (ns->blksize_shift - 9),
- GFP_KERNEL, 0, bio))
+ GFP_KERNEL, 0, bio);
+ if (ret && ret != -EOPNOTSUPP)
return NVME_SC_INTERNAL | NVME_SC_DNR;
return 0;
}
diff --git a/drivers/of/property.c b/drivers/of/property.c
index 36ed84e26d9c..f46828e3b082 100644
--- a/drivers/of/property.c
+++ b/drivers/of/property.c
@@ -977,11 +977,11 @@ static int of_fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode,
return 0;
}
-static void *
+static const void *
of_fwnode_device_get_match_data(const struct fwnode_handle *fwnode,
const struct device *dev)
{
- return (void *)of_device_get_match_data(dev);
+ return of_device_get_match_data(dev);
}
const struct fwnode_operations of_fwnode_ops = {
diff --git a/drivers/opp/cpu.c b/drivers/opp/cpu.c
index 2d87bc1adf38..0c0910709435 100644
--- a/drivers/opp/cpu.c
+++ b/drivers/opp/cpu.c
@@ -55,7 +55,7 @@ int dev_pm_opp_init_cpufreq_table(struct device *dev,
if (max_opps <= 0)
return max_opps ? max_opps : -ENODATA;
- freq_table = kcalloc((max_opps + 1), sizeof(*freq_table), GFP_ATOMIC);
+ freq_table = kcalloc((max_opps + 1), sizeof(*freq_table), GFP_KERNEL);
if (!freq_table)
return -ENOMEM;
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index 2a68f59d2228..c52c6723374b 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -127,24 +127,6 @@ static const struct dmi_system_id dell_device_table[] __initconst = {
},
},
{
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_CHASSIS_TYPE, "30"), /*Tablet*/
- },
- },
- {
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_CHASSIS_TYPE, "31"), /*Convertible*/
- },
- },
- {
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_CHASSIS_TYPE, "32"), /*Detachable*/
- },
- },
- {
.ident = "Dell Computer Corporation",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
@@ -1279,7 +1261,7 @@ static int kbd_get_state(struct kbd_state *state)
struct calling_interface_buffer buffer;
int ret;
- dell_fill_request(&buffer, 0, 0, 0, 0);
+ dell_fill_request(&buffer, 0x1, 0, 0, 0);
ret = dell_send_request(&buffer,
CLASS_KBD_BACKLIGHT, SELECT_KBD_BACKLIGHT);
if (ret)
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index 5b6f18b18801..535199c9e6bc 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -113,7 +113,7 @@ MODULE_PARM_DESC(no_bt_rfkill, "No rfkill for bluetooth.");
/*
* ACPI Helpers
*/
-#define IDEAPAD_EC_TIMEOUT (100) /* in ms */
+#define IDEAPAD_EC_TIMEOUT (200) /* in ms */
static int read_method_int(acpi_handle handle, const char *method, int *val)
{
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index daa68acbc900..c0c8945603cb 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -933,7 +933,7 @@ static int wmi_dev_probe(struct device *dev)
goto probe_failure;
}
- buf = kmalloc(strlen(wdriver->driver.name) + 4, GFP_KERNEL);
+ buf = kmalloc(strlen(wdriver->driver.name) + 5, GFP_KERNEL);
if (!buf) {
ret = -ENOMEM;
goto probe_string_failure;
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index ba2e0856d22c..8f5c1d7f751a 100644
--- a/drivers/s390/virtio/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
@@ -1297,6 +1297,9 @@ static int virtio_ccw_cio_notify(struct ccw_device *cdev, int event)
vcdev->device_lost = true;
rc = NOTIFY_DONE;
break;
+ case CIO_OPER:
+ rc = NOTIFY_OK;
+ break;
default:
rc = NOTIFY_DONE;
break;
@@ -1309,6 +1312,27 @@ static struct ccw_device_id virtio_ids[] = {
{},
};
+#ifdef CONFIG_PM_SLEEP
+static int virtio_ccw_freeze(struct ccw_device *cdev)
+{
+ struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev);
+
+ return virtio_device_freeze(&vcdev->vdev);
+}
+
+static int virtio_ccw_restore(struct ccw_device *cdev)
+{
+ struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev);
+ int ret;
+
+ ret = virtio_ccw_set_transport_rev(vcdev);
+ if (ret)
+ return ret;
+
+ return virtio_device_restore(&vcdev->vdev);
+}
+#endif
+
static struct ccw_driver virtio_ccw_driver = {
.driver = {
.owner = THIS_MODULE,
@@ -1321,6 +1345,11 @@ static struct ccw_driver virtio_ccw_driver = {
.set_online = virtio_ccw_online,
.notify = virtio_ccw_cio_notify,
.int_class = IRQIO_VIR,
+#ifdef CONFIG_PM_SLEEP
+ .freeze = virtio_ccw_freeze,
+ .thaw = virtio_ccw_restore,
+ .restore = virtio_ccw_restore,
+#endif
};
static int __init pure_hex(char **cp, unsigned int *val, int min_digit,
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index f699abab1787..148f3ee70286 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -19,6 +19,12 @@ config USB_EHCI_BIG_ENDIAN_MMIO
config USB_EHCI_BIG_ENDIAN_DESC
bool
+config USB_UHCI_BIG_ENDIAN_MMIO
+ bool
+
+config USB_UHCI_BIG_ENDIAN_DESC
+ bool
+
menuconfig USB_SUPPORT
bool "USB support"
depends on HAS_IOMEM
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 6150bed7cfa8..4fcfb3084b36 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -633,14 +633,6 @@ config USB_UHCI_ASPEED
bool
default y if ARCH_ASPEED
-config USB_UHCI_BIG_ENDIAN_MMIO
- bool
- default y if SPARC_LEON
-
-config USB_UHCI_BIG_ENDIAN_DESC
- bool
- default y if SPARC_LEON
-
config USB_FHCI_HCD
tristate "Freescale QE USB Host Controller support"
depends on OF_GPIO && QE_GPIO && QUICC_ENGINE
diff --git a/drivers/video/fbdev/geode/video_gx.c b/drivers/video/fbdev/geode/video_gx.c
index 6082f653c68a..67773e8bbb95 100644
--- a/drivers/video/fbdev/geode/video_gx.c
+++ b/drivers/video/fbdev/geode/video_gx.c
@@ -127,7 +127,7 @@ void gx_set_dclk_frequency(struct fb_info *info)
int timeout = 1000;
/* Rev. 1 Geode GXs use a 14 MHz reference clock instead of 48 MHz. */
- if (cpu_data(0).x86_mask == 1) {
+ if (cpu_data(0).x86_stepping == 1) {
pll_table = gx_pll_table_14MHz;
pll_table_len = ARRAY_SIZE(gx_pll_table_14MHz);
} else {
diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c
index 753d9cb437d0..aedbee3b2838 100644
--- a/drivers/xen/pvcalls-front.c
+++ b/drivers/xen/pvcalls-front.c
@@ -60,6 +60,7 @@ struct sock_mapping {
bool active_socket;
struct list_head list;
struct socket *sock;
+ atomic_t refcount;
union {
struct {
int irq;
@@ -93,6 +94,32 @@ struct sock_mapping {
};
};
+static inline struct sock_mapping *pvcalls_enter_sock(struct socket *sock)
+{
+ struct sock_mapping *map;
+
+ if (!pvcalls_front_dev ||
+ dev_get_drvdata(&pvcalls_front_dev->dev) == NULL)
+ return ERR_PTR(-ENOTCONN);
+
+ map = (struct sock_mapping *)sock->sk->sk_send_head;
+ if (map == NULL)
+ return ERR_PTR(-ENOTSOCK);
+
+ pvcalls_enter();
+ atomic_inc(&map->refcount);
+ return map;
+}
+
+static inline void pvcalls_exit_sock(struct socket *sock)
+{
+ struct sock_mapping *map;
+
+ map = (struct sock_mapping *)sock->sk->sk_send_head;
+ atomic_dec(&map->refcount);
+ pvcalls_exit();
+}
+
static inline int get_request(struct pvcalls_bedata *bedata, int *req_id)
{
*req_id = bedata->ring.req_prod_pvt & (RING_SIZE(&bedata->ring) - 1);
@@ -369,31 +396,23 @@ int pvcalls_front_connect(struct socket *sock, struct sockaddr *addr,
if (addr->sa_family != AF_INET || sock->type != SOCK_STREAM)
return -EOPNOTSUPP;
- pvcalls_enter();
- if (!pvcalls_front_dev) {
- pvcalls_exit();
- return -ENOTCONN;
- }
+ map = pvcalls_enter_sock(sock);
+ if (IS_ERR(map))
+ return PTR_ERR(map);
bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
- map = (struct sock_mapping *)sock->sk->sk_send_head;
- if (!map) {
- pvcalls_exit();
- return -ENOTSOCK;
- }
-
spin_lock(&bedata->socket_lock);
ret = get_request(bedata, &req_id);
if (ret < 0) {
spin_unlock(&bedata->socket_lock);
- pvcalls_exit();
+ pvcalls_exit_sock(sock);
return ret;
}
ret = create_active(map, &evtchn);
if (ret < 0) {
spin_unlock(&bedata->socket_lock);
- pvcalls_exit();
+ pvcalls_exit_sock(sock);
return ret;
}
@@ -423,7 +442,7 @@ int pvcalls_front_connect(struct socket *sock, struct sockaddr *addr,
smp_rmb();
ret = bedata->rsp[req_id].ret;
bedata->rsp[req_id].req_id = PVCALLS_INVALID_ID;
- pvcalls_exit();
+ pvcalls_exit_sock(sock);
return ret;
}
@@ -488,23 +507,15 @@ int pvcalls_front_sendmsg(struct socket *sock, struct msghdr *msg,
if (flags & (MSG_CONFIRM|MSG_DONTROUTE|MSG_EOR|MSG_OOB))
return -EOPNOTSUPP;
- pvcalls_enter();
- if (!pvcalls_front_dev) {
- pvcalls_exit();
- return -ENOTCONN;
- }
+ map = pvcalls_enter_sock(sock);
+ if (IS_ERR(map))
+ return PTR_ERR(map);
bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
- map = (struct sock_mapping *) sock->sk->sk_send_head;
- if (!map) {
- pvcalls_exit();
- return -ENOTSOCK;
- }
-
mutex_lock(&map->active.out_mutex);
if ((flags & MSG_DONTWAIT) && !pvcalls_front_write_todo(map)) {
mutex_unlock(&map->active.out_mutex);
- pvcalls_exit();
+ pvcalls_exit_sock(sock);
return -EAGAIN;
}
if (len > INT_MAX)
@@ -526,7 +537,7 @@ again:
tot_sent = sent;
mutex_unlock(&map->active.out_mutex);
- pvcalls_exit();
+ pvcalls_exit_sock(sock);
return tot_sent;
}
@@ -591,19 +602,11 @@ int pvcalls_front_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
if (flags & (MSG_CMSG_CLOEXEC|MSG_ERRQUEUE|MSG_OOB|MSG_TRUNC))
return -EOPNOTSUPP;
- pvcalls_enter();
- if (!pvcalls_front_dev) {
- pvcalls_exit();
- return -ENOTCONN;
- }
+ map = pvcalls_enter_sock(sock);
+ if (IS_ERR(map))
+ return PTR_ERR(map);
bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
- map = (struct sock_mapping *) sock->sk->sk_send_head;
- if (!map) {
- pvcalls_exit();
- return -ENOTSOCK;
- }
-
mutex_lock(&map->active.in_mutex);
if (len > XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER))
len = XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER);
@@ -623,7 +626,7 @@ int pvcalls_front_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
ret = 0;
mutex_unlock(&map->active.in_mutex);
- pvcalls_exit();
+ pvcalls_exit_sock(sock);
return ret;
}
@@ -637,24 +640,16 @@ int pvcalls_front_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
if (addr->sa_family != AF_INET || sock->type != SOCK_STREAM)
return -EOPNOTSUPP;
- pvcalls_enter();
- if (!pvcalls_front_dev) {
- pvcalls_exit();
- return -ENOTCONN;
- }
+ map = pvcalls_enter_sock(sock);
+ if (IS_ERR(map))
+ return PTR_ERR(map);
bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
- map = (struct sock_mapping *) sock->sk->sk_send_head;
- if (map == NULL) {
- pvcalls_exit();
- return -ENOTSOCK;
- }
-
spin_lock(&bedata->socket_lock);
ret = get_request(bedata, &req_id);
if (ret < 0) {
spin_unlock(&bedata->socket_lock);
- pvcalls_exit();
+ pvcalls_exit_sock(sock);
return ret;
}
req = RING_GET_REQUEST(&bedata->ring, req_id);
@@ -684,7 +679,7 @@ int pvcalls_front_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
bedata->rsp[req_id].req_id = PVCALLS_INVALID_ID;
map->passive.status = PVCALLS_STATUS_BIND;
- pvcalls_exit();
+ pvcalls_exit_sock(sock);
return 0;
}
@@ -695,21 +690,13 @@ int pvcalls_front_listen(struct socket *sock, int backlog)
struct xen_pvcalls_request *req;
int notify, req_id, ret;
- pvcalls_enter();
- if (!pvcalls_front_dev) {
- pvcalls_exit();
- return -ENOTCONN;
- }
+ map = pvcalls_enter_sock(sock);
+ if (IS_ERR(map))
+ return PTR_ERR(map);
bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
- map = (struct sock_mapping *) sock->sk->sk_send_head;
- if (!map) {
- pvcalls_exit();
- return -ENOTSOCK;
- }
-
if (map->passive.status != PVCALLS_STATUS_BIND) {
- pvcalls_exit();
+ pvcalls_exit_sock(sock);
return -EOPNOTSUPP;
}
@@ -717,7 +704,7 @@ int pvcalls_front_listen(struct socket *sock, int backlog)
ret = get_request(bedata, &req_id);
if (ret < 0) {
spin_unlock(&bedata->socket_lock);
- pvcalls_exit();
+ pvcalls_exit_sock(sock);
return ret;
}
req = RING_GET_REQUEST(&bedata->ring, req_id);
@@ -741,7 +728,7 @@ int pvcalls_front_listen(struct socket *sock, int backlog)
bedata->rsp[req_id].req_id = PVCALLS_INVALID_ID;
map->passive.status = PVCALLS_STATUS_LISTEN;
- pvcalls_exit();
+ pvcalls_exit_sock(sock);
return ret;
}
@@ -753,21 +740,13 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
struct xen_pvcalls_request *req;
int notify, req_id, ret, evtchn, nonblock;
- pvcalls_enter();
- if (!pvcalls_front_dev) {
- pvcalls_exit();
- return -ENOTCONN;
- }
+ map = pvcalls_enter_sock(sock);
+ if (IS_ERR(map))
+ return PTR_ERR(map);
bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
- map = (struct sock_mapping *) sock->sk->sk_send_head;
- if (!map) {
- pvcalls_exit();
- return -ENOTSOCK;
- }
-
if (map->passive.status != PVCALLS_STATUS_LISTEN) {
- pvcalls_exit();
+ pvcalls_exit_sock(sock);
return -EINVAL;
}
@@ -785,13 +764,13 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
goto received;
}
if (nonblock) {
- pvcalls_exit();
+ pvcalls_exit_sock(sock);
return -EAGAIN;
}
if (wait_event_interruptible(map->passive.inflight_accept_req,
!test_and_set_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
(void *)&map->passive.flags))) {
- pvcalls_exit();
+ pvcalls_exit_sock(sock);
return -EINTR;
}
}
@@ -802,7 +781,7 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
(void *)&map->passive.flags);
spin_unlock(&bedata->socket_lock);
- pvcalls_exit();
+ pvcalls_exit_sock(sock);
return ret;
}
map2 = kzalloc(sizeof(*map2), GFP_ATOMIC);
@@ -810,7 +789,7 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
(void *)&map->passive.flags);
spin_unlock(&bedata->socket_lock);
- pvcalls_exit();
+ pvcalls_exit_sock(sock);
return -ENOMEM;
}
ret = create_active(map2, &evtchn);
@@ -819,7 +798,7 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
(void *)&map->passive.flags);
spin_unlock(&bedata->socket_lock);
- pvcalls_exit();
+ pvcalls_exit_sock(sock);
return ret;
}
list_add_tail(&map2->list, &bedata->socket_mappings);
@@ -841,13 +820,13 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
/* We could check if we have received a response before returning. */
if (nonblock) {
WRITE_ONCE(map->passive.inflight_req_id, req_id);
- pvcalls_exit();
+ pvcalls_exit_sock(sock);
return -EAGAIN;
}
if (wait_event_interruptible(bedata->inflight_req,
READ_ONCE(bedata->rsp[req_id].req_id) == req_id)) {
- pvcalls_exit();
+ pvcalls_exit_sock(sock);
return -EINTR;
}
/* read req_id, then the content */
@@ -862,7 +841,7 @@ received:
clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
(void *)&map->passive.flags);
pvcalls_front_free_map(bedata, map2);
- pvcalls_exit();
+ pvcalls_exit_sock(sock);
return -ENOMEM;
}
newsock->sk->sk_send_head = (void *)map2;
@@ -874,7 +853,7 @@ received:
clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, (void *)&map->passive.flags);
wake_up(&map->passive.inflight_accept_req);
- pvcalls_exit();
+ pvcalls_exit_sock(sock);
return ret;
}
@@ -965,23 +944,16 @@ __poll_t pvcalls_front_poll(struct file *file, struct socket *sock,
struct sock_mapping *map;
__poll_t ret;
- pvcalls_enter();
- if (!pvcalls_front_dev) {
- pvcalls_exit();
+ map = pvcalls_enter_sock(sock);
+ if (IS_ERR(map))
return EPOLLNVAL;
- }
bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
- map = (struct sock_mapping *) sock->sk->sk_send_head;
- if (!map) {
- pvcalls_exit();
- return EPOLLNVAL;
- }
if (map->active_socket)
ret = pvcalls_front_poll_active(file, bedata, map, wait);
else
ret = pvcalls_front_poll_passive(file, bedata, map, wait);
- pvcalls_exit();
+ pvcalls_exit_sock(sock);
return ret;
}
@@ -995,25 +967,20 @@ int pvcalls_front_release(struct socket *sock)
if (sock->sk == NULL)
return 0;
- pvcalls_enter();
- if (!pvcalls_front_dev) {
- pvcalls_exit();
- return -EIO;
+ map = pvcalls_enter_sock(sock);
+ if (IS_ERR(map)) {
+ if (PTR_ERR(map) == -ENOTCONN)
+ return -EIO;
+ else
+ return 0;
}
-
bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
- map = (struct sock_mapping *) sock->sk->sk_send_head;
- if (map == NULL) {
- pvcalls_exit();
- return 0;
- }
-
spin_lock(&bedata->socket_lock);
ret = get_request(bedata, &req_id);
if (ret < 0) {
spin_unlock(&bedata->socket_lock);
- pvcalls_exit();
+ pvcalls_exit_sock(sock);
return ret;
}
sock->sk->sk_send_head = NULL;
@@ -1043,14 +1010,20 @@ int pvcalls_front_release(struct socket *sock)
/*
* We need to make sure that sendmsg/recvmsg on this socket have
* not started before we've cleared sk_send_head here. The
- * easiest (though not optimal) way to guarantee this is to see
- * that no pvcall (other than us) is in progress.
+ * easiest way to guarantee this is to see that no pvcalls
+ * (other than us) is in progress on this socket.
*/
- while (atomic_read(&pvcalls_refcount) > 1)
+ while (atomic_read(&map->refcount) > 1)
cpu_relax();
pvcalls_front_free_map(bedata, map);
} else {
+ wake_up(&bedata->inflight_req);
+ wake_up(&map->passive.inflight_accept_req);
+
+ while (atomic_read(&map->refcount) > 1)
+ cpu_relax();
+
spin_lock(&bedata->socket_lock);
list_del(&map->list);
spin_unlock(&bedata->socket_lock);
diff --git a/drivers/xen/xenbus/xenbus.h b/drivers/xen/xenbus/xenbus.h
index 149c5e7efc89..092981171df1 100644
--- a/drivers/xen/xenbus/xenbus.h
+++ b/drivers/xen/xenbus/xenbus.h
@@ -76,6 +76,7 @@ struct xb_req_data {
struct list_head list;
wait_queue_head_t wq;
struct xsd_sockmsg msg;
+ uint32_t caller_req_id;
enum xsd_sockmsg_type type;
char *body;
const struct kvec *vec;
diff --git a/drivers/xen/xenbus/xenbus_comms.c b/drivers/xen/xenbus/xenbus_comms.c
index 5b081a01779d..d239fc3c5e3d 100644
--- a/drivers/xen/xenbus/xenbus_comms.c
+++ b/drivers/xen/xenbus/xenbus_comms.c
@@ -309,6 +309,7 @@ static int process_msg(void)
goto out;
if (req->state == xb_req_state_wait_reply) {
+ req->msg.req_id = req->caller_req_id;
req->msg.type = state.msg.type;
req->msg.len = state.msg.len;
req->body = state.body;
diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
index 3e59590c7254..3f3b29398ab8 100644
--- a/drivers/xen/xenbus/xenbus_xs.c
+++ b/drivers/xen/xenbus/xenbus_xs.c
@@ -227,6 +227,8 @@ static void xs_send(struct xb_req_data *req, struct xsd_sockmsg *msg)
req->state = xb_req_state_queued;
init_waitqueue_head(&req->wq);
+ /* Save the caller req_id and restore it later in the reply */
+ req->caller_req_id = req->msg.req_id;
req->msg.req_id = xs_request_enter(req);
mutex_lock(&xb_write_mutex);
@@ -310,6 +312,7 @@ static void *xs_talkv(struct xenbus_transaction t,
req->num_vecs = num_vecs;
req->cb = xs_wake_up;
+ msg.req_id = 0;
msg.tx_id = t.id;
msg.type = type;
msg.len = 0;
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index e4054e533f6d..f94b2d8c744a 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -1264,7 +1264,16 @@ again:
while (node) {
ref = rb_entry(node, struct prelim_ref, rbnode);
node = rb_next(&ref->rbnode);
- WARN_ON(ref->count < 0);
+ /*
+ * ref->count < 0 can happen here if there are delayed
+ * refs with a node->action of BTRFS_DROP_DELAYED_REF.
+ * prelim_ref_insert() relies on this when merging
+ * identical refs to keep the overall count correct.
+ * prelim_ref_insert() will merge only those refs
+ * which compare identically. Any refs having
+ * e.g. different offsets would not be merged,
+ * and would retain their original ref->count < 0.
+ */
if (roots && ref->count && ref->root_id && ref->parent == 0) {
if (sc && sc->root_objectid &&
ref->root_id != sc->root_objectid) {
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
index a1a40cf382e3..7ab5e0128f0c 100644
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
@@ -821,7 +821,8 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
spin_unlock(&delayed_refs->lock);
if (qrecord_inserted)
- return btrfs_qgroup_trace_extent_post(fs_info, record);
+ btrfs_qgroup_trace_extent_post(fs_info, record);
+
return 0;
free_head_ref:
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 05751a677da4..c1618ab9fecf 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2147,6 +2147,10 @@ int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
u64 bytes;
struct request_queue *req_q;
+ if (!stripe->dev->bdev) {
+ ASSERT(btrfs_test_opt(fs_info, DEGRADED));
+ continue;
+ }
req_q = bdev_get_queue(stripe->dev->bdev);
if (!blk_queue_discard(req_q))
continue;
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 53ca025655fc..a79299a89b7d 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -1335,8 +1335,11 @@ next_slot:
leaf = path->nodes[0];
if (path->slots[0] >= btrfs_header_nritems(leaf)) {
ret = btrfs_next_leaf(root, path);
- if (ret < 0)
+ if (ret < 0) {
+ if (cow_start != (u64)-1)
+ cur_offset = cow_start;
goto error;
+ }
if (ret > 0)
break;
leaf = path->nodes[0];
@@ -3385,6 +3388,11 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans,
ret = btrfs_orphan_reserve_metadata(trans, inode);
ASSERT(!ret);
if (ret) {
+ /*
+ * dec doesn't need spin_lock as ->orphan_block_rsv
+ * would be released only if ->orphan_inodes is
+ * zero.
+ */
atomic_dec(&root->orphan_inodes);
clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
&inode->runtime_flags);
@@ -3399,12 +3407,17 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans,
if (insert >= 1) {
ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode));
if (ret) {
- atomic_dec(&root->orphan_inodes);
if (reserve) {
clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
&inode->runtime_flags);
btrfs_orphan_release_metadata(inode);
}
+ /*
+ * btrfs_orphan_commit_root may race with us and set
+ * ->orphan_block_rsv to zero, in order to avoid that,
+ * decrease ->orphan_inodes after everything is done.
+ */
+ atomic_dec(&root->orphan_inodes);
if (ret != -EEXIST) {
clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
&inode->runtime_flags);
@@ -3436,28 +3449,26 @@ static int btrfs_orphan_del(struct btrfs_trans_handle *trans,
{
struct btrfs_root *root = inode->root;
int delete_item = 0;
- int release_rsv = 0;
int ret = 0;
- spin_lock(&root->orphan_lock);
if (test_and_clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
&inode->runtime_flags))
delete_item = 1;
+ if (delete_item && trans)
+ ret = btrfs_del_orphan_item(trans, root, btrfs_ino(inode));
+
if (test_and_clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
&inode->runtime_flags))
- release_rsv = 1;
- spin_unlock(&root->orphan_lock);
+ btrfs_orphan_release_metadata(inode);
- if (delete_item) {
+ /*
+ * btrfs_orphan_commit_root may race with us and set ->orphan_block_rsv
+ * to zero, in order to avoid that, decrease ->orphan_inodes after
+ * everything is done.
+ */
+ if (delete_item)
atomic_dec(&root->orphan_inodes);
- if (trans)
- ret = btrfs_del_orphan_item(trans, root,
- btrfs_ino(inode));
- }
-
- if (release_rsv)
- btrfs_orphan_release_metadata(inode);
return ret;
}
@@ -5281,7 +5292,7 @@ void btrfs_evict_inode(struct inode *inode)
trace_btrfs_inode_evict(inode);
if (!root) {
- kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
+ clear_inode(inode);
return;
}
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index 9e61dd624f7b..aa259d6986e1 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -1442,8 +1442,13 @@ int btrfs_qgroup_trace_extent_post(struct btrfs_fs_info *fs_info,
int ret;
ret = btrfs_find_all_roots(NULL, fs_info, bytenr, 0, &old_root, false);
- if (ret < 0)
- return ret;
+ if (ret < 0) {
+ fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
+ btrfs_warn(fs_info,
+"error accounting new delayed refs extent (err code: %d), quota inconsistent",
+ ret);
+ return 0;
+ }
/*
* Here we don't need to get the lock of
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index afadaadab18e..4fd19b4d6675 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -29,6 +29,7 @@
#include "hash.h"
#include "compression.h"
#include "qgroup.h"
+#include "inode-map.h"
/* magic values for the inode_only field in btrfs_log_inode:
*
@@ -2472,6 +2473,9 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
clean_tree_block(fs_info, next);
btrfs_wait_tree_block_writeback(next);
btrfs_tree_unlock(next);
+ } else {
+ if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
+ clear_extent_buffer_dirty(next);
}
WARN_ON(root_owner !=
@@ -2552,6 +2556,9 @@ static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
clean_tree_block(fs_info, next);
btrfs_wait_tree_block_writeback(next);
btrfs_tree_unlock(next);
+ } else {
+ if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
+ clear_extent_buffer_dirty(next);
}
WARN_ON(root_owner != BTRFS_TREE_LOG_OBJECTID);
@@ -2630,6 +2637,9 @@ static int walk_log_tree(struct btrfs_trans_handle *trans,
clean_tree_block(fs_info, next);
btrfs_wait_tree_block_writeback(next);
btrfs_tree_unlock(next);
+ } else {
+ if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
+ clear_extent_buffer_dirty(next);
}
WARN_ON(log->root_key.objectid !=
@@ -3018,13 +3028,14 @@ static void free_log_tree(struct btrfs_trans_handle *trans,
while (1) {
ret = find_first_extent_bit(&log->dirty_log_pages,
- 0, &start, &end, EXTENT_DIRTY | EXTENT_NEW,
+ 0, &start, &end,
+ EXTENT_DIRTY | EXTENT_NEW | EXTENT_NEED_WAIT,
NULL);
if (ret)
break;
clear_extent_bits(&log->dirty_log_pages, start, end,
- EXTENT_DIRTY | EXTENT_NEW);
+ EXTENT_DIRTY | EXTENT_NEW | EXTENT_NEED_WAIT);
}
/*
@@ -5677,6 +5688,23 @@ again:
path);
}
+ if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) {
+ struct btrfs_root *root = wc.replay_dest;
+
+ btrfs_release_path(path);
+
+ /*
+ * We have just replayed everything, and the highest
+ * objectid of fs roots probably has changed in case
+ * some inode_item's got replayed.
+ *
+ * root->objectid_mutex is not acquired as log replay
+ * could only happen during mount.
+ */
+ ret = btrfs_find_highest_objectid(root,
+ &root->highest_objectid);
+ }
+
key.offset = found_key.offset - 1;
wc.replay_dest->log_root = NULL;
free_extent_buffer(log->node);
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index b5036bd69e6a..2ceb924ca0d6 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -645,6 +645,7 @@ static void btrfs_free_stale_devices(const char *path,
btrfs_sysfs_remove_fsid(fs_devs);
list_del(&fs_devs->list);
free_fs_devices(fs_devs);
+ break;
} else {
fs_devs->num_devices--;
list_del(&dev->dev_list);
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index 86863792f36a..86d6a4435c87 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -716,7 +716,7 @@ int gfs2_iomap_begin(struct inode *inode, loff_t pos, loff_t length,
__be64 *ptr;
sector_t lblock;
sector_t lend;
- int ret;
+ int ret = 0;
int eob;
unsigned int len;
struct buffer_head *bh;
@@ -728,12 +728,14 @@ int gfs2_iomap_begin(struct inode *inode, loff_t pos, loff_t length,
goto out;
}
- if ((flags & IOMAP_REPORT) && gfs2_is_stuffed(ip)) {
- gfs2_stuffed_iomap(inode, iomap);
- if (pos >= iomap->length)
- return -ENOENT;
- ret = 0;
- goto out;
+ if (gfs2_is_stuffed(ip)) {
+ if (flags & IOMAP_REPORT) {
+ gfs2_stuffed_iomap(inode, iomap);
+ if (pos >= iomap->length)
+ ret = -ENOENT;
+ goto out;
+ }
+ BUG_ON(!(flags & IOMAP_WRITE));
}
lblock = pos >> inode->i_blkbits;
@@ -744,7 +746,7 @@ int gfs2_iomap_begin(struct inode *inode, loff_t pos, loff_t length,
iomap->type = IOMAP_HOLE;
iomap->length = (u64)(lend - lblock) << inode->i_blkbits;
iomap->flags = IOMAP_F_MERGED;
- bmap_lock(ip, 0);
+ bmap_lock(ip, flags & IOMAP_WRITE);
/*
* Directory data blocks have a struct gfs2_meta_header header, so the
@@ -787,27 +789,28 @@ int gfs2_iomap_begin(struct inode *inode, loff_t pos, loff_t length,
iomap->flags |= IOMAP_F_BOUNDARY;
iomap->length = (u64)len << inode->i_blkbits;
- ret = 0;
-
out_release:
release_metapath(&mp);
- bmap_unlock(ip, 0);
+ bmap_unlock(ip, flags & IOMAP_WRITE);
out:
trace_gfs2_iomap_end(ip, iomap, ret);
return ret;
do_alloc:
- if (!(flags & IOMAP_WRITE)) {
- if (pos >= i_size_read(inode)) {
+ if (flags & IOMAP_WRITE) {
+ ret = gfs2_iomap_alloc(inode, iomap, flags, &mp);
+ } else if (flags & IOMAP_REPORT) {
+ loff_t size = i_size_read(inode);
+ if (pos >= size)
ret = -ENOENT;
- goto out_release;
- }
- ret = 0;
- iomap->length = hole_size(inode, lblock, &mp);
- goto out_release;
+ else if (height <= ip->i_height)
+ iomap->length = hole_size(inode, lblock, &mp);
+ else
+ iomap->length = size - pos;
+ } else {
+ if (height <= ip->i_height)
+ iomap->length = hole_size(inode, lblock, &mp);
}
-
- ret = gfs2_iomap_alloc(inode, iomap, flags, &mp);
goto out_release;
}
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
index e8a93bc8285d..d1e82761de81 100644
--- a/fs/proc/kcore.c
+++ b/fs/proc/kcore.c
@@ -510,6 +510,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
/* we have to zero-fill user buffer even if no read */
if (copy_to_user(buffer, buf, tsz))
return -EFAULT;
+ } else if (m->type == KCORE_USER) {
+ /* User page is handled prior to normal kernel page: */
+ if (copy_to_user(buffer, (char *)start, tsz))
+ return -EFAULT;
} else {
if (kern_addr_valid(start)) {
/*
diff --git a/include/asm-generic/bitops/lock.h b/include/asm-generic/bitops/lock.h
index bc397573c43a..67ab280ad134 100644
--- a/include/asm-generic/bitops/lock.h
+++ b/include/asm-generic/bitops/lock.h
@@ -7,7 +7,8 @@
* @nr: Bit to set
* @addr: Address to count from
*
- * This operation is atomic and provides acquire barrier semantics.
+ * This operation is atomic and provides acquire barrier semantics if
+ * the returned value is 0.
* It can be used to implement bit locks.
*/
#define test_and_set_bit_lock(nr, addr) test_and_set_bit(nr, addr)
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 64e10746f282..968173ec2726 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -587,7 +587,7 @@ extern int acpi_nvs_for_each_region(int (*func)(__u64, __u64, void *),
const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids,
const struct device *dev);
-void *acpi_get_match_data(const struct device *dev);
+const void *acpi_device_get_match_data(const struct device *dev);
extern bool acpi_driver_match_device(struct device *dev,
const struct device_driver *drv);
int acpi_device_uevent_modalias(struct device *, struct kobj_uevent_env *);
@@ -766,7 +766,7 @@ static inline const struct acpi_device_id *acpi_match_device(
return NULL;
}
-static inline void *acpi_get_match_data(const struct device *dev)
+static inline const void *acpi_device_get_match_data(const struct device *dev)
{
return NULL;
}
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 4f3df807cf8f..ed63f3b69c12 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -49,7 +49,7 @@ struct blk_stat_callback;
#define BLKDEV_MIN_RQ 4
#define BLKDEV_MAX_RQ 128 /* Default maximum */
-/* Must be consisitent with blk_mq_poll_stats_bkt() */
+/* Must be consistent with blk_mq_poll_stats_bkt() */
#define BLK_MQ_POLL_STATS_BKTS 16
/*
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index 631354acfa72..73bc63e0a1c4 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -167,8 +167,6 @@
#if GCC_VERSION >= 40100
# define __compiletime_object_size(obj) __builtin_object_size(obj, 0)
-
-#define __nostackprotector __attribute__((__optimize__("no-stack-protector")))
#endif
#if GCC_VERSION >= 40300
@@ -196,6 +194,11 @@
#endif /* __CHECKER__ */
#endif /* GCC_VERSION >= 40300 */
+#if GCC_VERSION >= 40400
+#define __optimize(level) __attribute__((__optimize__(level)))
+#define __nostackprotector __optimize("no-stack-protector")
+#endif /* GCC_VERSION >= 40400 */
+
#if GCC_VERSION >= 40500
#ifndef __CHECKER__
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index c2cc57a2f508..e835fc0423ec 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -277,6 +277,10 @@ unsigned long read_word_at_a_time(const void *addr)
#endif /* __ASSEMBLY__ */
+#ifndef __optimize
+# define __optimize(level)
+#endif
+
/* Compile time object size, -1 for unknown */
#ifndef __compiletime_object_size
# define __compiletime_object_size(obj) -1
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index 871f9e21810c..0b3fc229086c 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -225,7 +225,7 @@ static inline void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev,
}
#endif
-#ifdef CONFIG_ARCH_HAS_CPU_RELAX
+#if defined(CONFIG_CPU_IDLE) && defined(CONFIG_ARCH_HAS_CPU_RELAX)
void cpuidle_poll_state_init(struct cpuidle_driver *drv);
#else
static inline void cpuidle_poll_state_init(struct cpuidle_driver *drv) {}
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index d4a2a7dcd72d..bf53d893ad02 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -170,6 +170,8 @@ static inline unsigned int cpumask_local_spread(unsigned int i, int node)
for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
#define for_each_cpu_not(cpu, mask) \
for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
+#define for_each_cpu_wrap(cpu, mask, start) \
+ for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)(start))
#define for_each_cpu_and(cpu, mask, and) \
for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)and)
#else
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 34fe8463d10e..eb9eab4ecd6d 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -578,7 +578,7 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
/*
* This is a hack for the legacy x86 forbid_dac and iommu_sac_force. Please
- * don't use this is new code.
+ * don't use this in new code.
*/
#ifndef arch_dma_supported
#define arch_dma_supported(dev, mask) (1)
diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h
index 4fa1a489efe4..4fe8f289b3f6 100644
--- a/include/linux/fwnode.h
+++ b/include/linux/fwnode.h
@@ -73,8 +73,8 @@ struct fwnode_operations {
struct fwnode_handle *(*get)(struct fwnode_handle *fwnode);
void (*put)(struct fwnode_handle *fwnode);
bool (*device_is_available)(const struct fwnode_handle *fwnode);
- void *(*device_get_match_data)(const struct fwnode_handle *fwnode,
- const struct device *dev);
+ const void *(*device_get_match_data)(const struct fwnode_handle *fwnode,
+ const struct device *dev);
bool (*property_present)(const struct fwnode_handle *fwnode,
const char *propname);
int (*property_read_int_array)(const struct fwnode_handle *fwnode,
diff --git a/include/linux/kcore.h b/include/linux/kcore.h
index 7ff25a808fef..80db19d3a505 100644
--- a/include/linux/kcore.h
+++ b/include/linux/kcore.h
@@ -10,6 +10,7 @@ enum kcore_type {
KCORE_VMALLOC,
KCORE_RAM,
KCORE_VMEMMAP,
+ KCORE_USER,
KCORE_OTHER,
};
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index c30b32e3c862..10191c28fc04 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -127,10 +127,4 @@ static __always_inline enum lru_list page_lru(struct page *page)
#define lru_to_page(head) (list_entry((head)->prev, struct page, lru))
-#ifdef arch_unmap_kpfn
-extern void arch_unmap_kpfn(unsigned long pfn);
-#else
-static __always_inline void arch_unmap_kpfn(unsigned long pfn) { }
-#endif
-
#endif
diff --git a/include/linux/nospec.h b/include/linux/nospec.h
index b99bced39ac2..fbc98e2c8228 100644
--- a/include/linux/nospec.h
+++ b/include/linux/nospec.h
@@ -20,20 +20,6 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
unsigned long size)
{
/*
- * Warn developers about inappropriate array_index_nospec() usage.
- *
- * Even if the CPU speculates past the WARN_ONCE branch, the
- * sign bit of @index is taken into account when generating the
- * mask.
- *
- * This warning is compiled out when the compiler can infer that
- * @index and @size are less than LONG_MAX.
- */
- if (WARN_ONCE(index > LONG_MAX || size > LONG_MAX,
- "array_index_nospec() limited to range of [0, LONG_MAX]\n"))
- return 0;
-
- /*
* Always calculate and emit the mask even if the compiler
* thinks the mask is not needed. The compiler does not take
* into account the value of @index under speculation.
@@ -44,6 +30,26 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
#endif
/*
+ * Warn developers about inappropriate array_index_nospec() usage.
+ *
+ * Even if the CPU speculates past the WARN_ONCE branch, the
+ * sign bit of @index is taken into account when generating the
+ * mask.
+ *
+ * This warning is compiled out when the compiler can infer that
+ * @index and @size are less than LONG_MAX.
+ */
+#define array_index_mask_nospec_check(index, size) \
+({ \
+ if (WARN_ONCE(index > LONG_MAX || size > LONG_MAX, \
+ "array_index_nospec() limited to range of [0, LONG_MAX]\n")) \
+ _mask = 0; \
+ else \
+ _mask = array_index_mask_nospec(index, size); \
+ _mask; \
+})
+
+/*
* array_index_nospec - sanitize an array index after a bounds check
*
* For a code sequence like:
@@ -61,7 +67,7 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
({ \
typeof(index) _i = (index); \
typeof(size) _s = (size); \
- unsigned long _mask = array_index_mask_nospec(_i, _s); \
+ unsigned long _mask = array_index_mask_nospec_check(_i, _s); \
\
BUILD_BUG_ON(sizeof(_i) > sizeof(long)); \
BUILD_BUG_ON(sizeof(_s) > sizeof(long)); \
diff --git a/include/linux/property.h b/include/linux/property.h
index 769d372c1edf..2eea4b310fc2 100644
--- a/include/linux/property.h
+++ b/include/linux/property.h
@@ -283,7 +283,7 @@ bool device_dma_supported(struct device *dev);
enum dev_dma_attr device_get_dma_attr(struct device *dev);
-void *device_get_match_data(struct device *dev);
+const void *device_get_match_data(struct device *dev);
int device_get_phy_mode(struct device *dev);
diff --git a/include/linux/semaphore.h b/include/linux/semaphore.h
index dc368b8ce215..11c86fbfeb98 100644
--- a/include/linux/semaphore.h
+++ b/include/linux/semaphore.h
@@ -4,7 +4,7 @@
*
* Distributed under the terms of the GNU GPL, version 2
*
- * Please see kernel/semaphore.c for documentation of these functions
+ * Please see kernel/locking/semaphore.c for documentation of these functions
*/
#ifndef __LINUX_SEMAPHORE_H
#define __LINUX_SEMAPHORE_H
diff --git a/include/sound/ac97/regs.h b/include/sound/ac97/regs.h
index 4bb86d379bd5..9a4fa0c3264a 100644
--- a/include/sound/ac97/regs.h
+++ b/include/sound/ac97/regs.h
@@ -31,7 +31,7 @@
#define AC97_HEADPHONE 0x04 /* Headphone Volume (optional) */
#define AC97_MASTER_MONO 0x06 /* Master Volume Mono (optional) */
#define AC97_MASTER_TONE 0x08 /* Master Tone (Bass & Treble) (optional) */
-#define AC97_PC_BEEP 0x0a /* PC Beep Volume (optinal) */
+#define AC97_PC_BEEP 0x0a /* PC Beep Volume (optional) */
#define AC97_PHONE 0x0c /* Phone Volume (optional) */
#define AC97_MIC 0x0e /* MIC Volume */
#define AC97_LINE 0x10 /* Line In Volume */
diff --git a/include/trace/events/xen.h b/include/trace/events/xen.h
index b8adf05c534e..7dd8f34c37df 100644
--- a/include/trace/events/xen.h
+++ b/include/trace/events/xen.h
@@ -368,7 +368,7 @@ TRACE_EVENT(xen_mmu_flush_tlb,
TP_printk("%s", "")
);
-TRACE_EVENT(xen_mmu_flush_tlb_single,
+TRACE_EVENT(xen_mmu_flush_tlb_one_user,
TP_PROTO(unsigned long addr),
TP_ARGS(addr),
TP_STRUCT__entry(
diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
index 38ece035039e..d880296245c5 100644
--- a/kernel/locking/qspinlock.c
+++ b/kernel/locking/qspinlock.c
@@ -379,6 +379,14 @@ queue:
tail = encode_tail(smp_processor_id(), idx);
node += idx;
+
+ /*
+ * Ensure that we increment the head node->count before initialising
+ * the actual node. If the compiler is kind enough to reorder these
+ * stores, then an IRQ could overwrite our assignments.
+ */
+ barrier();
+
node->locked = 0;
node->next = NULL;
pv_init_node(node);
@@ -408,14 +416,15 @@ queue:
*/
if (old & _Q_TAIL_MASK) {
prev = decode_tail(old);
+
/*
- * The above xchg_tail() is also a load of @lock which
- * generates, through decode_tail(), a pointer. The address
- * dependency matches the RELEASE of xchg_tail() such that
- * the subsequent access to @prev happens after.
+ * We must ensure that the stores to @node are observed before
+ * the write to prev->next. The address dependency from
+ * xchg_tail is not sufficient to ensure this because the read
+ * component of xchg_tail is unordered with respect to the
+ * initialisation of @node.
*/
-
- WRITE_ONCE(prev->next, node);
+ smp_store_release(&prev->next, node);
pv_wait_node(node, prev);
arch_mcs_spin_lock_contended(&node->locked);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index bf724c1952ea..e7c535eee0a6 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2601,19 +2601,31 @@ static inline void finish_task(struct task_struct *prev)
#endif
}
-static inline void finish_lock_switch(struct rq *rq)
+static inline void
+prepare_lock_switch(struct rq *rq, struct task_struct *next, struct rq_flags *rf)
{
+ /*
+ * Since the runqueue lock will be released by the next
+ * task (which is an invalid locking op but in the case
+ * of the scheduler it's an obvious special-case), so we
+ * do an early lockdep release here:
+ */
+ rq_unpin_lock(rq, rf);
+ spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
#ifdef CONFIG_DEBUG_SPINLOCK
/* this is a valid case when another task releases the spinlock */
- rq->lock.owner = current;
+ rq->lock.owner = next;
#endif
+}
+
+static inline void finish_lock_switch(struct rq *rq)
+{
/*
* If we are tracking spinlock dependencies then we have to
* fix up the runqueue lock - which gets 'carried over' from
* prev into current:
*/
spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
-
raw_spin_unlock_irq(&rq->lock);
}
@@ -2844,14 +2856,7 @@ context_switch(struct rq *rq, struct task_struct *prev,
rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP);
- /*
- * Since the runqueue lock will be released by the next
- * task (which is an invalid locking op but in the case
- * of the scheduler it's an obvious special-case), so we
- * do an early lockdep release here:
- */
- rq_unpin_lock(rq, rf);
- spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
+ prepare_lock_switch(rq, next, rf);
/* Here we just switch the register state and the stack. */
switch_to(prev, next, prev);
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index dd062a1c8cf0..7936f548e071 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -19,8 +19,6 @@
#include "sched.h"
-#define SUGOV_KTHREAD_PRIORITY 50
-
struct sugov_tunables {
struct gov_attr_set attr_set;
unsigned int rate_limit_us;
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 9bb0e0c412ec..9df09782025c 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -1153,6 +1153,7 @@ static void update_curr_dl(struct rq *rq)
struct sched_dl_entity *dl_se = &curr->dl;
u64 delta_exec, scaled_delta_exec;
int cpu = cpu_of(rq);
+ u64 now;
if (!dl_task(curr) || !on_dl_rq(dl_se))
return;
@@ -1165,7 +1166,8 @@ static void update_curr_dl(struct rq *rq)
* natural solution, but the full ramifications of this
* approach need further study.
*/
- delta_exec = rq_clock_task(rq) - curr->se.exec_start;
+ now = rq_clock_task(rq);
+ delta_exec = now - curr->se.exec_start;
if (unlikely((s64)delta_exec <= 0)) {
if (unlikely(dl_se->dl_yielded))
goto throttle;
@@ -1178,7 +1180,7 @@ static void update_curr_dl(struct rq *rq)
curr->se.sum_exec_runtime += delta_exec;
account_group_exec_runtime(curr, delta_exec);
- curr->se.exec_start = rq_clock_task(rq);
+ curr->se.exec_start = now;
cgroup_account_cputime(curr, delta_exec);
sched_rt_avg_update(rq, delta_exec);
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 663b2355a3aa..aad49451584e 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -950,12 +950,13 @@ static void update_curr_rt(struct rq *rq)
{
struct task_struct *curr = rq->curr;
struct sched_rt_entity *rt_se = &curr->rt;
- u64 now = rq_clock_task(rq);
u64 delta_exec;
+ u64 now;
if (curr->sched_class != &rt_sched_class)
return;
+ now = rq_clock_task(rq);
delta_exec = now - curr->se.exec_start;
if (unlikely((s64)delta_exec <= 0))
return;
diff --git a/lib/dma-direct.c b/lib/dma-direct.c
index 40b1f92f2214..c9e8e21cb334 100644
--- a/lib/dma-direct.c
+++ b/lib/dma-direct.c
@@ -84,6 +84,10 @@ again:
return page_address(page);
}
+/*
+ * NOTE: this function must never look at the dma_addr argument, because we want
+ * to be able to use it as a helper for iommu implementations as well.
+ */
void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_addr, unsigned long attrs)
{
@@ -152,5 +156,6 @@ const struct dma_map_ops dma_direct_ops = {
.map_sg = dma_direct_map_sg,
.dma_supported = dma_direct_supported,
.mapping_error = dma_direct_mapping_error,
+ .is_phys = 1,
};
EXPORT_SYMBOL(dma_direct_ops);
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 4b80ccee4535..8291b75f42c8 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1139,8 +1139,6 @@ int memory_failure(unsigned long pfn, int flags)
return 0;
}
- arch_unmap_kpfn(pfn);
-
orig_head = hpage = compound_head(p);
num_poisoned_pages_inc();
diff --git a/mm/memory.c b/mm/memory.c
index dd8de96f5547..5fcfc24904d1 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -80,7 +80,7 @@
#include "internal.h"
-#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
+#if defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS) && !defined(CONFIG_COMPILE_TEST)
#warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid.
#endif
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index f3a4efcf1456..3aa5a93ad107 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -160,7 +160,8 @@ static void req_done(struct virtqueue *vq)
spin_unlock_irqrestore(&chan->lock, flags);
/* Wakeup if anyone waiting for VirtIO ring space. */
wake_up(chan->vc_wq);
- p9_client_cb(chan->client, req, REQ_STATUS_RCVD);
+ if (len)
+ p9_client_cb(chan->client, req, REQ_STATUS_RCVD);
}
}
diff --git a/sound/ac97/Kconfig b/sound/ac97/Kconfig
index f8a64e15e5bf..baa5f8ef89d2 100644
--- a/sound/ac97/Kconfig
+++ b/sound/ac97/Kconfig
@@ -5,7 +5,6 @@
config AC97_BUS_NEW
tristate
- select AC97
help
This is the new AC97 bus type, successor of AC97_BUS. The ported
drivers which benefit from the AC97 automatic probing should "select"
diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
index 60db32785f62..04d4db44fae5 100644
--- a/sound/core/seq/seq_clientmgr.c
+++ b/sound/core/seq/seq_clientmgr.c
@@ -1003,7 +1003,7 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
{
struct snd_seq_client *client = file->private_data;
int written = 0, len;
- int err = -EINVAL;
+ int err;
struct snd_seq_event event;
if (!(snd_seq_file_flags(file) & SNDRV_SEQ_LFLG_OUTPUT))
@@ -1018,11 +1018,15 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
/* allocate the pool now if the pool is not allocated yet */
if (client->pool->size > 0 && !snd_seq_write_pool_allocated(client)) {
- if (snd_seq_pool_init(client->pool) < 0)
+ mutex_lock(&client->ioctl_mutex);
+ err = snd_seq_pool_init(client->pool);
+ mutex_unlock(&client->ioctl_mutex);
+ if (err < 0)
return -ENOMEM;
}
/* only process whole events */
+ err = -EINVAL;
while (count >= sizeof(struct snd_seq_event)) {
/* Read in the event header from the user */
len = sizeof(event);
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 23475888192b..ce28f7ce64e6 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -3465,6 +3465,19 @@ static void alc269_fixup_pincfg_no_hp_to_lineout(struct hda_codec *codec,
spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
}
+static void alc269_fixup_pincfg_U7x7_headset_mic(struct hda_codec *codec,
+ const struct hda_fixup *fix,
+ int action)
+{
+ unsigned int cfg_headphone = snd_hda_codec_get_pincfg(codec, 0x21);
+ unsigned int cfg_headset_mic = snd_hda_codec_get_pincfg(codec, 0x19);
+
+ if (cfg_headphone && cfg_headset_mic == 0x411111f0)
+ snd_hda_codec_set_pincfg(codec, 0x19,
+ (cfg_headphone & ~AC_DEFCFG_DEVICE) |
+ (AC_JACK_MIC_IN << AC_DEFCFG_DEVICE_SHIFT));
+}
+
static void alc269_fixup_hweq(struct hda_codec *codec,
const struct hda_fixup *fix, int action)
{
@@ -4972,6 +4985,28 @@ static void alc_fixup_tpt440_dock(struct hda_codec *codec,
}
}
+static void alc_fixup_tpt470_dock(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+ static const struct hda_pintbl pincfgs[] = {
+ { 0x17, 0x21211010 }, /* dock headphone */
+ { 0x19, 0x21a11010 }, /* dock mic */
+ { }
+ };
+ struct alc_spec *spec = codec->spec;
+
+ if (action == HDA_FIXUP_ACT_PRE_PROBE) {
+ spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
+ /* Enable DOCK device */
+ snd_hda_codec_write(codec, 0x17, 0,
+ AC_VERB_SET_CONFIG_DEFAULT_BYTES_3, 0);
+ /* Enable DOCK device */
+ snd_hda_codec_write(codec, 0x19, 0,
+ AC_VERB_SET_CONFIG_DEFAULT_BYTES_3, 0);
+ snd_hda_apply_pincfgs(codec, pincfgs);
+ }
+}
+
static void alc_shutup_dell_xps13(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
@@ -5351,6 +5386,7 @@ enum {
ALC269_FIXUP_LIFEBOOK_EXTMIC,
ALC269_FIXUP_LIFEBOOK_HP_PIN,
ALC269_FIXUP_LIFEBOOK_NO_HP_TO_LINEOUT,
+ ALC255_FIXUP_LIFEBOOK_U7x7_HEADSET_MIC,
ALC269_FIXUP_AMIC,
ALC269_FIXUP_DMIC,
ALC269VB_FIXUP_AMIC,
@@ -5446,6 +5482,7 @@ enum {
ALC700_FIXUP_INTEL_REFERENCE,
ALC274_FIXUP_DELL_BIND_DACS,
ALC274_FIXUP_DELL_AIO_LINEOUT_VERB,
+ ALC298_FIXUP_TPT470_DOCK,
};
static const struct hda_fixup alc269_fixups[] = {
@@ -5556,6 +5593,10 @@ static const struct hda_fixup alc269_fixups[] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc269_fixup_pincfg_no_hp_to_lineout,
},
+ [ALC255_FIXUP_LIFEBOOK_U7x7_HEADSET_MIC] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc269_fixup_pincfg_U7x7_headset_mic,
+ },
[ALC269_FIXUP_AMIC] = {
.type = HDA_FIXUP_PINS,
.v.pins = (const struct hda_pintbl[]) {
@@ -6271,6 +6312,12 @@ static const struct hda_fixup alc269_fixups[] = {
.chained = true,
.chain_id = ALC274_FIXUP_DELL_BIND_DACS
},
+ [ALC298_FIXUP_TPT470_DOCK] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc_fixup_tpt470_dock,
+ .chained = true,
+ .chain_id = ALC293_FIXUP_LENOVO_SPK_NOISE
+ },
};
static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -6321,6 +6368,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME),
SND_PCI_QUIRK(0x1028, 0x0798, "Dell Inspiron 17 7000 Gaming", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
SND_PCI_QUIRK(0x1028, 0x082a, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
+ SND_PCI_QUIRK(0x1028, 0x084b, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
+ SND_PCI_QUIRK(0x1028, 0x084e, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
@@ -6422,6 +6471,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x10cf, 0x159f, "Lifebook E780", ALC269_FIXUP_LIFEBOOK_NO_HP_TO_LINEOUT),
SND_PCI_QUIRK(0x10cf, 0x15dc, "Lifebook T731", ALC269_FIXUP_LIFEBOOK_HP_PIN),
SND_PCI_QUIRK(0x10cf, 0x1757, "Lifebook E752", ALC269_FIXUP_LIFEBOOK_HP_PIN),
+ SND_PCI_QUIRK(0x10cf, 0x1629, "Lifebook U7x7", ALC255_FIXUP_LIFEBOOK_U7x7_HEADSET_MIC),
SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC),
SND_PCI_QUIRK(0x10ec, 0x10f2, "Intel Reference board", ALC700_FIXUP_INTEL_REFERENCE),
SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", ALC269_FIXUP_INV_DMIC),
@@ -6450,8 +6500,16 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x2218, "Thinkpad X1 Carbon 2nd", ALC292_FIXUP_TPT440_DOCK),
SND_PCI_QUIRK(0x17aa, 0x2223, "ThinkPad T550", ALC292_FIXUP_TPT440_DOCK),
SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x222d, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x222e, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
SND_PCI_QUIRK(0x17aa, 0x2231, "Thinkpad T560", ALC292_FIXUP_TPT460),
SND_PCI_QUIRK(0x17aa, 0x2233, "Thinkpad", ALC292_FIXUP_TPT460),
+ SND_PCI_QUIRK(0x17aa, 0x2245, "Thinkpad T470", ALC298_FIXUP_TPT470_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x2246, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x2247, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x224b, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x224c, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x224d, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
@@ -6472,7 +6530,12 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x5050, "Thinkpad T560p", ALC292_FIXUP_TPT460),
SND_PCI_QUIRK(0x17aa, 0x5051, "Thinkpad L460", ALC292_FIXUP_TPT460),
SND_PCI_QUIRK(0x17aa, 0x5053, "Thinkpad T460", ALC292_FIXUP_TPT460),
+ SND_PCI_QUIRK(0x17aa, 0x505d, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x505f, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x5062, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ SND_PCI_QUIRK(0x17aa, 0x511e, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x511f, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
SND_PCI_QUIRK(0x1b7d, 0xa831, "Ordissimo EVE2 ", ALC269VB_FIXUP_ORDISSIMO_EVE2), /* Also known as Malata PC-B1303 */
@@ -6735,6 +6798,11 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
{0x14, 0x90170110},
{0x21, 0x02211020}),
SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+ {0x12, 0x90a60130},
+ {0x14, 0x90170110},
+ {0x14, 0x01011020},
+ {0x21, 0x0221101f}),
+ SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
ALC256_STANDARD_PINS),
SND_HDA_PIN_QUIRK(0x10ec0256, 0x1043, "ASUS", ALC256_FIXUP_ASUS_MIC,
{0x14, 0x90170110},
@@ -6803,6 +6871,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
{0x12, 0x90a60120},
{0x14, 0x90170110},
{0x21, 0x0321101f}),
+ SND_HDA_PIN_QUIRK(0x10ec0289, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
+ {0x12, 0xb7a60130},
+ {0x14, 0x90170110},
+ {0x21, 0x04211020}),
SND_HDA_PIN_QUIRK(0x10ec0290, 0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1,
ALC290_STANDARD_PINS,
{0x15, 0x04211040},
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index 9afb8ab524c7..06b22624ab7a 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -347,17 +347,20 @@ static int get_ctl_value_v2(struct usb_mixer_elem_info *cval, int request,
int validx, int *value_ret)
{
struct snd_usb_audio *chip = cval->head.mixer->chip;
- unsigned char buf[4 + 3 * sizeof(__u32)]; /* enough space for one range */
+ /* enough space for one range */
+ unsigned char buf[sizeof(__u16) + 3 * sizeof(__u32)];
unsigned char *val;
- int idx = 0, ret, size;
+ int idx = 0, ret, val_size, size;
__u8 bRequest;
+ val_size = uac2_ctl_value_size(cval->val_type);
+
if (request == UAC_GET_CUR) {
bRequest = UAC2_CS_CUR;
- size = uac2_ctl_value_size(cval->val_type);
+ size = val_size;
} else {
bRequest = UAC2_CS_RANGE;
- size = sizeof(buf);
+ size = sizeof(__u16) + 3 * val_size;
}
memset(buf, 0, sizeof(buf));
@@ -390,16 +393,17 @@ error:
val = buf + sizeof(__u16);
break;
case UAC_GET_MAX:
- val = buf + sizeof(__u16) * 2;
+ val = buf + sizeof(__u16) + val_size;
break;
case UAC_GET_RES:
- val = buf + sizeof(__u16) * 3;
+ val = buf + sizeof(__u16) + val_size * 2;
break;
default:
return -EINVAL;
}
- *value_ret = convert_signed_value(cval, snd_usb_combine_bytes(val, sizeof(__u16)));
+ *value_ret = convert_signed_value(cval,
+ snd_usb_combine_bytes(val, val_size));
return 0;
}
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index b9c9a19f9588..3cbfae6604f9 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -357,6 +357,15 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
alts = &iface->altsetting[1];
goto add_sync_ep;
+ case USB_ID(0x1397, 0x0002):
+ ep = 0x81;
+ iface = usb_ifnum_to_if(dev, 1);
+
+ if (!iface || iface->num_altsetting == 0)
+ return -EINVAL;
+
+ alts = &iface->altsetting[1];
+ goto add_sync_ep;
}
if (attr == USB_ENDPOINT_SYNC_ASYNC &&
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index a66ef5777887..ea8f3de92fa4 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1363,8 +1363,11 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
return SNDRV_PCM_FMTBIT_DSD_U32_BE;
break;
- /* Amanero Combo384 USB interface with native DSD support */
- case USB_ID(0x16d0, 0x071a):
+ /* Amanero Combo384 USB based DACs with native DSD support */
+ case USB_ID(0x16d0, 0x071a): /* Amanero - Combo384 */
+ case USB_ID(0x2ab6, 0x0004): /* T+A DAC8DSD-V2.0, MP1000E-V2.0, MP2000R-V2.0, MP2500R-V2.0, MP3100HV-V2.0 */
+ case USB_ID(0x2ab6, 0x0005): /* T+A USB HD Audio 1 */
+ case USB_ID(0x2ab6, 0x0006): /* T+A USB HD Audio 2 */
if (fp->altsetting == 2) {
switch (le16_to_cpu(chip->dev->descriptor.bcdDevice)) {
case 0x199:
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index b00b1896547e..a8cb69a26576 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -852,8 +852,14 @@ static int add_switch_table(struct objtool_file *file, struct symbol *func,
* This is a fairly uncommon pattern which is new for GCC 6. As of this
* writing, there are 11 occurrences of it in the allmodconfig kernel.
*
+ * As of GCC 7 there are quite a few more of these and the 'in between' code
+ * is significant. Esp. with KASAN enabled some of the code between the mov
+ * and jmpq uses .rodata itself, which can confuse things.
+ *
* TODO: Once we have DWARF CFI and smarter instruction decoding logic,
* ensure the same register is used in the mov and jump instructions.
+ *
+ * NOTE: RETPOLINE made it harder still to decode dynamic jumps.
*/
static struct rela *find_switch_table(struct objtool_file *file,
struct symbol *func,
@@ -875,12 +881,25 @@ static struct rela *find_switch_table(struct objtool_file *file,
text_rela->addend + 4);
if (!rodata_rela)
return NULL;
+
file->ignore_unreachables = true;
return rodata_rela;
}
/* case 3 */
- func_for_each_insn_continue_reverse(file, func, insn) {
+ /*
+ * Backward search using the @first_jump_src links, these help avoid
+ * much of the 'in between' code. Which avoids us getting confused by
+ * it.
+ */
+ for (insn = list_prev_entry(insn, list);
+
+ &insn->list != &file->insn_list &&
+ insn->sec == func->sec &&
+ insn->offset >= func->offset;
+
+ insn = insn->first_jump_src ?: list_prev_entry(insn, list)) {
+
if (insn->type == INSN_JUMP_DYNAMIC)
break;
@@ -910,14 +929,32 @@ static struct rela *find_switch_table(struct objtool_file *file,
return NULL;
}
+
static int add_func_switch_tables(struct objtool_file *file,
struct symbol *func)
{
- struct instruction *insn, *prev_jump = NULL;
+ struct instruction *insn, *last = NULL, *prev_jump = NULL;
struct rela *rela, *prev_rela = NULL;
int ret;
func_for_each_insn(file, func, insn) {
+ if (!last)
+ last = insn;
+
+ /*
+ * Store back-pointers for unconditional forward jumps such
+ * that find_switch_table() can back-track using those and
+ * avoid some potentially confusing code.
+ */
+ if (insn->type == INSN_JUMP_UNCONDITIONAL && insn->jump_dest &&
+ insn->offset > last->offset &&
+ insn->jump_dest->offset > insn->offset &&
+ !insn->jump_dest->first_jump_src) {
+
+ insn->jump_dest->first_jump_src = insn;
+ last = insn->jump_dest;
+ }
+
if (insn->type != INSN_JUMP_DYNAMIC)
continue;
@@ -1899,13 +1936,19 @@ static bool ignore_unreachable_insn(struct instruction *insn)
if (is_kasan_insn(insn) || is_ubsan_insn(insn))
return true;
- if (insn->type == INSN_JUMP_UNCONDITIONAL && insn->jump_dest) {
- insn = insn->jump_dest;
- continue;
+ if (insn->type == INSN_JUMP_UNCONDITIONAL) {
+ if (insn->jump_dest &&
+ insn->jump_dest->func == insn->func) {
+ insn = insn->jump_dest;
+ continue;
+ }
+
+ break;
}
if (insn->offset + insn->len >= insn->func->offset + insn->func->len)
break;
+
insn = list_next_entry(insn, list);
}
diff --git a/tools/objtool/check.h b/tools/objtool/check.h
index dbadb304a410..23a1d065cae1 100644
--- a/tools/objtool/check.h
+++ b/tools/objtool/check.h
@@ -47,6 +47,7 @@ struct instruction {
bool alt_group, visited, dead_end, ignore, hint, save, restore, ignore_alts;
struct symbol *call_dest;
struct instruction *jump_dest;
+ struct instruction *first_jump_src;
struct list_head alts;
struct symbol *func;
struct stack_op stack_op;
diff --git a/tools/testing/selftests/powerpc/alignment/alignment_handler.c b/tools/testing/selftests/powerpc/alignment/alignment_handler.c
index 39fd362415cf..0f2698f9fd6d 100644
--- a/tools/testing/selftests/powerpc/alignment/alignment_handler.c
+++ b/tools/testing/selftests/powerpc/alignment/alignment_handler.c
@@ -57,7 +57,7 @@ volatile int gotsig;
void sighandler(int sig, siginfo_t *info, void *ctx)
{
- struct ucontext *ucp = ctx;
+ ucontext_t *ucp = ctx;
if (!testing) {
signal(sig, SIG_DFL);
diff --git a/tools/testing/selftests/x86/Makefile b/tools/testing/selftests/x86/Makefile
index 10ca46df1449..d744991c0f4f 100644
--- a/tools/testing/selftests/x86/Makefile
+++ b/tools/testing/selftests/x86/Makefile
@@ -5,16 +5,26 @@ include ../lib.mk
.PHONY: all all_32 all_64 warn_32bit_failure clean
-TARGETS_C_BOTHBITS := single_step_syscall sysret_ss_attrs syscall_nt ptrace_syscall test_mremap_vdso \
- check_initial_reg_state sigreturn ldt_gdt iopl mpx-mini-test ioperm \
+UNAME_M := $(shell uname -m)
+CAN_BUILD_I386 := $(shell ./check_cc.sh $(CC) trivial_32bit_program.c -m32)
+CAN_BUILD_X86_64 := $(shell ./check_cc.sh $(CC) trivial_64bit_program.c)
+
+TARGETS_C_BOTHBITS := single_step_syscall sysret_ss_attrs syscall_nt test_mremap_vdso \
+ check_initial_reg_state sigreturn iopl mpx-mini-test ioperm \
protection_keys test_vdso test_vsyscall
TARGETS_C_32BIT_ONLY := entry_from_vm86 syscall_arg_fault test_syscall_vdso unwind_vdso \
test_FCMOV test_FCOMI test_FISTTP \
vdso_restorer
-TARGETS_C_64BIT_ONLY := fsgsbase sysret_rip 5lvl
+TARGETS_C_64BIT_ONLY := fsgsbase sysret_rip
+# Some selftests require 32bit support enabled also on 64bit systems
+TARGETS_C_32BIT_NEEDED := ldt_gdt ptrace_syscall
-TARGETS_C_32BIT_ALL := $(TARGETS_C_BOTHBITS) $(TARGETS_C_32BIT_ONLY)
+TARGETS_C_32BIT_ALL := $(TARGETS_C_BOTHBITS) $(TARGETS_C_32BIT_ONLY) $(TARGETS_C_32BIT_NEEDED)
TARGETS_C_64BIT_ALL := $(TARGETS_C_BOTHBITS) $(TARGETS_C_64BIT_ONLY)
+ifeq ($(CAN_BUILD_I386)$(CAN_BUILD_X86_64),11)
+TARGETS_C_64BIT_ALL += $(TARGETS_C_32BIT_NEEDED)
+endif
+
BINARIES_32 := $(TARGETS_C_32BIT_ALL:%=%_32)
BINARIES_64 := $(TARGETS_C_64BIT_ALL:%=%_64)
@@ -23,10 +33,6 @@ BINARIES_64 := $(patsubst %,$(OUTPUT)/%,$(BINARIES_64))
CFLAGS := -O2 -g -std=gnu99 -pthread -Wall -no-pie
-UNAME_M := $(shell uname -m)
-CAN_BUILD_I386 := $(shell ./check_cc.sh $(CC) trivial_32bit_program.c -m32)
-CAN_BUILD_X86_64 := $(shell ./check_cc.sh $(CC) trivial_64bit_program.c)
-
define gen-target-rule-32
$(1) $(1)_32: $(OUTPUT)/$(1)_32
.PHONY: $(1) $(1)_32
@@ -40,12 +46,14 @@ endef
ifeq ($(CAN_BUILD_I386),1)
all: all_32
TEST_PROGS += $(BINARIES_32)
+EXTRA_CFLAGS += -DCAN_BUILD_32
$(foreach t,$(TARGETS_C_32BIT_ALL),$(eval $(call gen-target-rule-32,$(t))))
endif
ifeq ($(CAN_BUILD_X86_64),1)
all: all_64
TEST_PROGS += $(BINARIES_64)
+EXTRA_CFLAGS += -DCAN_BUILD_64
$(foreach t,$(TARGETS_C_64BIT_ALL),$(eval $(call gen-target-rule-64,$(t))))
endif
diff --git a/tools/testing/selftests/x86/mpx-mini-test.c b/tools/testing/selftests/x86/mpx-mini-test.c
index ec0f6b45ce8b..9c0325e1ea68 100644
--- a/tools/testing/selftests/x86/mpx-mini-test.c
+++ b/tools/testing/selftests/x86/mpx-mini-test.c
@@ -315,11 +315,39 @@ static inline void *__si_bounds_upper(siginfo_t *si)
return si->si_upper;
}
#else
+
+/*
+ * This deals with old version of _sigfault in some distros:
+ *
+
+old _sigfault:
+ struct {
+ void *si_addr;
+ } _sigfault;
+
+new _sigfault:
+ struct {
+ void __user *_addr;
+ int _trapno;
+ short _addr_lsb;
+ union {
+ struct {
+ void __user *_lower;
+ void __user *_upper;
+ } _addr_bnd;
+ __u32 _pkey;
+ };
+ } _sigfault;
+ *
+ */
+
static inline void **__si_bounds_hack(siginfo_t *si)
{
void *sigfault = &si->_sifields._sigfault;
void *end_sigfault = sigfault + sizeof(si->_sifields._sigfault);
- void **__si_lower = end_sigfault;
+ int *trapno = (int*)end_sigfault;
+ /* skip _trapno and _addr_lsb */
+ void **__si_lower = (void**)(trapno + 2);
return __si_lower;
}
@@ -331,7 +359,7 @@ static inline void *__si_bounds_lower(siginfo_t *si)
static inline void *__si_bounds_upper(siginfo_t *si)
{
- return (*__si_bounds_hack(si)) + sizeof(void *);
+ return *(__si_bounds_hack(si) + 1);
}
#endif
diff --git a/tools/testing/selftests/x86/protection_keys.c b/tools/testing/selftests/x86/protection_keys.c
index bc1b0735bb50..f15aa5a76fe3 100644
--- a/tools/testing/selftests/x86/protection_keys.c
+++ b/tools/testing/selftests/x86/protection_keys.c
@@ -393,34 +393,6 @@ pid_t fork_lazy_child(void)
return forkret;
}
-void davecmp(void *_a, void *_b, int len)
-{
- int i;
- unsigned long *a = _a;
- unsigned long *b = _b;
-
- for (i = 0; i < len / sizeof(*a); i++) {
- if (a[i] == b[i])
- continue;
-
- dprintf3("[%3d]: a: %016lx b: %016lx\n", i, a[i], b[i]);
- }
-}
-
-void dumpit(char *f)
-{
- int fd = open(f, O_RDONLY);
- char buf[100];
- int nr_read;
-
- dprintf2("maps fd: %d\n", fd);
- do {
- nr_read = read(fd, &buf[0], sizeof(buf));
- write(1, buf, nr_read);
- } while (nr_read > 0);
- close(fd);
-}
-
#define PKEY_DISABLE_ACCESS 0x1
#define PKEY_DISABLE_WRITE 0x2
diff --git a/tools/testing/selftests/x86/single_step_syscall.c b/tools/testing/selftests/x86/single_step_syscall.c
index a48da95c18fd..ddfdd635de16 100644
--- a/tools/testing/selftests/x86/single_step_syscall.c
+++ b/tools/testing/selftests/x86/single_step_syscall.c
@@ -119,7 +119,9 @@ static void check_result(void)
int main()
{
+#ifdef CAN_BUILD_32
int tmp;
+#endif
sethandler(SIGTRAP, sigtrap, 0);
@@ -139,12 +141,13 @@ int main()
: : "c" (post_nop) : "r11");
check_result();
#endif
-
+#ifdef CAN_BUILD_32
printf("[RUN]\tSet TF and check int80\n");
set_eflags(get_eflags() | X86_EFLAGS_TF);
asm volatile ("int $0x80" : "=a" (tmp) : "a" (SYS_getpid)
: INT80_CLOBBERS);
check_result();
+#endif
/*
* This test is particularly interesting if fast syscalls use
diff --git a/tools/testing/selftests/x86/test_mremap_vdso.c b/tools/testing/selftests/x86/test_mremap_vdso.c
index bf0d687c7db7..64f11c8d9b76 100644
--- a/tools/testing/selftests/x86/test_mremap_vdso.c
+++ b/tools/testing/selftests/x86/test_mremap_vdso.c
@@ -90,8 +90,12 @@ int main(int argc, char **argv, char **envp)
vdso_size += PAGE_SIZE;
}
+#ifdef __i386__
/* Glibc is likely to explode now - exit with raw syscall */
asm volatile ("int $0x80" : : "a" (__NR_exit), "b" (!!ret));
+#else /* __x86_64__ */
+ syscall(SYS_exit, ret);
+#endif
} else {
int status;
diff --git a/tools/testing/selftests/x86/test_vdso.c b/tools/testing/selftests/x86/test_vdso.c
index 29973cde06d3..235259011704 100644
--- a/tools/testing/selftests/x86/test_vdso.c
+++ b/tools/testing/selftests/x86/test_vdso.c
@@ -26,20 +26,59 @@
# endif
#endif
+/* max length of lines in /proc/self/maps - anything longer is skipped here */
+#define MAPS_LINE_LEN 128
+
int nerrs = 0;
+typedef long (*getcpu_t)(unsigned *, unsigned *, void *);
+
+getcpu_t vgetcpu;
+getcpu_t vdso_getcpu;
+
+static void *vsyscall_getcpu(void)
+{
#ifdef __x86_64__
-# define VSYS(x) (x)
+ FILE *maps;
+ char line[MAPS_LINE_LEN];
+ bool found = false;
+
+ maps = fopen("/proc/self/maps", "r");
+ if (!maps) /* might still be present, but ignore it here, as we test vDSO not vsyscall */
+ return NULL;
+
+ while (fgets(line, MAPS_LINE_LEN, maps)) {
+ char r, x;
+ void *start, *end;
+ char name[MAPS_LINE_LEN];
+
+ /* sscanf() is safe here as strlen(name) >= strlen(line) */
+ if (sscanf(line, "%p-%p %c-%cp %*x %*x:%*x %*u %s",
+ &start, &end, &r, &x, name) != 5)
+ continue;
+
+ if (strcmp(name, "[vsyscall]"))
+ continue;
+
+ /* assume entries are OK, as we test vDSO here not vsyscall */
+ found = true;
+ break;
+ }
+
+ fclose(maps);
+
+ if (!found) {
+ printf("Warning: failed to find vsyscall getcpu\n");
+ return NULL;
+ }
+ return (void *) (0xffffffffff600800);
#else
-# define VSYS(x) 0
+ return NULL;
#endif
+}
-typedef long (*getcpu_t)(unsigned *, unsigned *, void *);
-
-const getcpu_t vgetcpu = (getcpu_t)VSYS(0xffffffffff600800);
-getcpu_t vdso_getcpu;
-void fill_function_pointers()
+static void fill_function_pointers()
{
void *vdso = dlopen("linux-vdso.so.1",
RTLD_LAZY | RTLD_LOCAL | RTLD_NOLOAD);
@@ -54,6 +93,8 @@ void fill_function_pointers()
vdso_getcpu = (getcpu_t)dlsym(vdso, "__vdso_getcpu");
if (!vdso_getcpu)
printf("Warning: failed to find getcpu in vDSO\n");
+
+ vgetcpu = (getcpu_t) vsyscall_getcpu();
}
static long sys_getcpu(unsigned * cpu, unsigned * node,
diff --git a/tools/testing/selftests/x86/test_vsyscall.c b/tools/testing/selftests/x86/test_vsyscall.c
index 7a744fa7b786..be81621446f0 100644
--- a/tools/testing/selftests/x86/test_vsyscall.c
+++ b/tools/testing/selftests/x86/test_vsyscall.c
@@ -33,6 +33,9 @@
# endif
#endif
+/* max length of lines in /proc/self/maps - anything longer is skipped here */
+#define MAPS_LINE_LEN 128
+
static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
int flags)
{
@@ -98,7 +101,7 @@ static int init_vsys(void)
#ifdef __x86_64__
int nerrs = 0;
FILE *maps;
- char line[128];
+ char line[MAPS_LINE_LEN];
bool found = false;
maps = fopen("/proc/self/maps", "r");
@@ -108,10 +111,12 @@ static int init_vsys(void)
return 0;
}
- while (fgets(line, sizeof(line), maps)) {
+ while (fgets(line, MAPS_LINE_LEN, maps)) {
char r, x;
void *start, *end;
- char name[128];
+ char name[MAPS_LINE_LEN];
+
+ /* sscanf() is safe here as strlen(name) >= strlen(line) */
if (sscanf(line, "%p-%p %c-%cp %*x %*x:%*x %*u %s",
&start, &end, &r, &x, name) != 5)
continue;