summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt3
-rw-r--r--Documentation/filesystems/path-lookup.md6
-rw-r--r--Documentation/memory-barriers.txt12
-rw-r--r--Documentation/translations/ko_KR/memory-barriers.txt12
-rw-r--r--arch/alpha/include/asm/atomic.h13
-rw-r--r--arch/alpha/include/asm/rwsem.h21
-rw-r--r--arch/alpha/include/asm/spinlock.h14
-rw-r--r--arch/arc/include/asm/spinlock.h11
-rw-r--r--arch/arc/kernel/smp.c2
-rw-r--r--arch/arm/include/asm/ptrace.h3
-rw-r--r--arch/arm/include/asm/spinlock.h17
-rw-r--r--arch/arm/mach-tegra/cpuidle-tegra20.c2
-rw-r--r--arch/arm/vdso/vgettimeofday.c2
-rw-r--r--arch/arm64/Kconfig17
-rw-r--r--arch/arm64/include/asm/Kbuild1
-rw-r--r--arch/arm64/include/asm/spinlock.h173
-rw-r--r--arch/arm64/include/asm/spinlock_types.h6
-rw-r--r--arch/blackfin/include/asm/spinlock.h20
-rw-r--r--arch/hexagon/include/asm/spinlock.h15
-rw-r--r--arch/ia64/include/asm/rwsem.h25
-rw-r--r--arch/ia64/include/asm/spinlock.h20
-rw-r--r--arch/m32r/include/asm/spinlock.h20
-rw-r--r--arch/metag/include/asm/spinlock.h9
-rw-r--r--arch/metag/include/asm/spinlock_lnkget.h37
-rw-r--r--arch/metag/include/asm/spinlock_lock1.h20
-rw-r--r--arch/mips/include/asm/spinlock.h7
-rw-r--r--arch/mips/include/asm/vdso.h2
-rw-r--r--arch/mips/kernel/pm-cps.c2
-rw-r--r--arch/mn10300/include/asm/spinlock.h16
-rw-r--r--arch/mn10300/kernel/mn10300-serial.c4
-rw-r--r--arch/parisc/include/asm/atomic.h2
-rw-r--r--arch/parisc/include/asm/spinlock.h22
-rw-r--r--arch/powerpc/include/asm/spinlock.h7
-rw-r--r--arch/powerpc/kernel/rtas.c2
-rw-r--r--arch/powerpc/platforms/powernv/opal-msglog.c2
-rw-r--r--arch/s390/include/asm/spinlock.h16
-rw-r--r--arch/s390/lib/spinlock.c2
-rw-r--r--arch/sh/include/asm/spinlock-cas.h20
-rw-r--r--arch/sh/include/asm/spinlock-llsc.h20
-rw-r--r--arch/sparc/include/asm/atomic_32.h2
-rw-r--r--arch/sparc/include/asm/ptrace.h1
-rw-r--r--arch/sparc/include/asm/spinlock_32.h11
-rw-r--r--arch/sparc/include/asm/spinlock_64.h7
-rw-r--r--arch/tile/gxio/dma_queue.c4
-rw-r--r--arch/tile/include/asm/spinlock_32.h22
-rw-r--r--arch/tile/include/asm/spinlock_64.h24
-rw-r--r--arch/tile/include/gxio/dma_queue.h2
-rw-r--r--arch/tile/kernel/ptrace.c2
-rw-r--r--arch/um/include/shared/init.h2
-rw-r--r--arch/x86/Kconfig2
-rw-r--r--arch/x86/entry/common.c6
-rw-r--r--arch/x86/entry/vdso/vclock_gettime.c2
-rw-r--r--arch/x86/events/core.c4
-rw-r--r--arch/x86/include/asm/barrier.h12
-rw-r--r--arch/x86/include/asm/mmu_context.h4
-rw-r--r--arch/x86/include/asm/qspinlock.h11
-rw-r--r--arch/x86/include/asm/refcount.h2
-rw-r--r--arch/x86/include/asm/rwsem.h84
-rw-r--r--arch/x86/include/asm/spinlock.h7
-rw-r--r--arch/x86/include/asm/vgtod.h2
-rw-r--r--arch/x86/kernel/espfix_64.c6
-rw-r--r--arch/x86/kernel/ldt.c2
-rw-r--r--arch/x86/kernel/nmi.c2
-rw-r--r--arch/x86/kernel/paravirt.c14
-rw-r--r--arch/x86/kernel/smpboot.c5
-rw-r--r--arch/x86/kvm/mmu.c4
-rw-r--r--arch/x86/kvm/page_track.c2
-rw-r--r--arch/x86/lib/rwsem.S12
-rw-r--r--arch/x86/mm/extable.c7
-rw-r--r--arch/x86/xen/p2m.c2
-rw-r--r--arch/x86/xen/spinlock.c6
-rw-r--r--arch/xtensa/include/asm/spinlock.h7
-rw-r--r--arch/xtensa/platforms/xtfpga/lcd.c14
-rw-r--r--block/bio.c19
-rw-r--r--block/blk-wbt.c2
-rw-r--r--block/genhd.c10
-rw-r--r--drivers/base/core.c2
-rw-r--r--drivers/base/power/runtime.c4
-rw-r--r--drivers/char/random.c4
-rw-r--r--drivers/clocksource/bcm2835_timer.c2
-rw-r--r--drivers/crypto/caam/jr.c4
-rw-r--r--drivers/crypto/nx/nx-842-powernv.c2
-rw-r--r--drivers/edac/altera_edac.c10
-rw-r--r--drivers/firewire/ohci.c10
-rw-r--r--drivers/firmware/tegra/ivc.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c4
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c2
-rw-r--r--drivers/infiniband/hw/hfi1/file_ops.c2
-rw-r--r--drivers/infiniband/hw/hfi1/pio.c6
-rw-r--r--drivers/infiniband/hw/hfi1/ruc.c2
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.c8
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.h2
-rw-r--r--drivers/infiniband/hw/hfi1/uc.c4
-rw-r--r--drivers/infiniband/hw/hfi1/ud.c4
-rw-r--r--drivers/infiniband/hw/hfi1/user_sdma.c8
-rw-r--r--drivers/infiniband/hw/qib/qib_ruc.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_uc.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_ud.c4
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c6
-rw-r--r--drivers/input/misc/regulator-haptic.c2
-rw-r--r--drivers/md/dm-bufio.c10
-rw-r--r--drivers/md/dm-integrity.c15
-rw-r--r--drivers/md/dm-kcopyd.c4
-rw-r--r--drivers/md/dm-mpath.c20
-rw-r--r--drivers/md/dm-stats.c36
-rw-r--r--drivers/md/dm-switch.c2
-rw-r--r--drivers/md/dm-thin.c2
-rw-r--r--drivers/md/dm-verity-target.c2
-rw-r--r--drivers/md/dm.c4
-rw-r--r--drivers/md/md.c2
-rw-r--r--drivers/md/raid5.c2
-rw-r--r--drivers/media/dvb-core/dvb_ringbuffer.c8
-rw-r--r--drivers/misc/mic/scif/scif_rb.c8
-rw-r--r--drivers/misc/mic/scif/scif_rma_list.c2
-rw-r--r--drivers/net/bonding/bond_alb.c2
-rw-r--r--drivers/net/bonding/bond_main.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c4
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c4
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_regs.h2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c12
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c2
-rw-r--r--drivers/net/ethernet/sfc/ef10.c10
-rw-r--r--drivers/net/ethernet/sfc/efx.c4
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.c4
-rw-r--r--drivers/net/ethernet/sfc/falcon/falcon.c4
-rw-r--r--drivers/net/ethernet/sfc/falcon/farch.c8
-rw-r--r--drivers/net/ethernet/sfc/falcon/nic.h6
-rw-r--r--drivers/net/ethernet/sfc/falcon/tx.c6
-rw-r--r--drivers/net/ethernet/sfc/farch.c8
-rw-r--r--drivers/net/ethernet/sfc/nic.h6
-rw-r--r--drivers/net/ethernet/sfc/ptp.c10
-rw-r--r--drivers/net/ethernet/sfc/tx.c6
-rw-r--r--drivers/net/ethernet/sun/niu.c4
-rw-r--r--drivers/net/tap.c2
-rw-r--r--drivers/net/tun.c4
-rw-r--r--drivers/net/wireless/ath/ath5k/desc.c8
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c10
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c2
-rw-r--r--drivers/target/target_core_user.c2
-rw-r--r--drivers/usb/class/cdc-wdm.c2
-rw-r--r--drivers/usb/core/devio.c2
-rw-r--r--drivers/usb/core/sysfs.c4
-rw-r--r--drivers/usb/gadget/udc/gr_udc.c4
-rw-r--r--drivers/usb/host/ohci-hcd.c2
-rw-r--r--drivers/usb/host/uhci-hcd.h4
-rw-r--r--drivers/vfio/vfio.c2
-rw-r--r--drivers/vhost/scsi.c2
-rw-r--r--fs/aio.c2
-rw-r--r--fs/buffer.c3
-rw-r--r--fs/crypto/keyinfo.c2
-rw-r--r--fs/dcache.c22
-rw-r--r--fs/direct-io.c2
-rw-r--r--fs/exec.c2
-rw-r--r--fs/fcntl.c2
-rw-r--r--fs/fs_pin.c4
-rw-r--r--fs/fuse/dev.c2
-rw-r--r--fs/inode.c2
-rw-r--r--fs/namei.c4
-rw-r--r--fs/namespace.c2
-rw-r--r--fs/ncpfs/dir.c9
-rw-r--r--fs/nfs/dir.c8
-rw-r--r--fs/overlayfs/ovl_entry.h2
-rw-r--r--fs/overlayfs/readdir.c2
-rw-r--r--fs/proc/array.c2
-rw-r--r--fs/proc_namespace.c2
-rw-r--r--fs/readdir.c11
-rw-r--r--fs/splice.c2
-rw-r--r--fs/userfaultfd.c8
-rw-r--r--fs/xfs/xfs_log_priv.h4
-rw-r--r--include/asm-generic/atomic-long.h3
-rw-r--r--include/asm-generic/qrwlock.h57
-rw-r--r--include/asm-generic/qrwlock_types.h15
-rw-r--r--include/asm-generic/qspinlock.h1
-rw-r--r--include/asm-generic/rwsem.h10
-rw-r--r--include/asm-generic/vmlinux.lds.h1
-rw-r--r--include/linux/atomic.h4
-rw-r--r--include/linux/average.h10
-rw-r--r--include/linux/bitops.h4
-rw-r--r--include/linux/compiler-clang.h2
-rw-r--r--include/linux/compiler-gcc.h2
-rw-r--r--include/linux/compiler-intel.h2
-rw-r--r--include/linux/compiler.h286
-rw-r--r--include/linux/compiler_types.h274
-rw-r--r--include/linux/completion.h18
-rw-r--r--include/linux/dcache.h4
-rw-r--r--include/linux/dynamic_queue_limits.h2
-rw-r--r--include/linux/genetlink.h2
-rw-r--r--include/linux/genhd.h22
-rw-r--r--include/linux/huge_mm.h2
-rw-r--r--include/linux/if_team.h2
-rw-r--r--include/linux/jump_label.h14
-rw-r--r--include/linux/jump_label_ratelimit.h6
-rw-r--r--include/linux/linkage.h2
-rw-r--r--include/linux/llist.h2
-rw-r--r--include/linux/lockdep.h20
-rw-r--r--include/linux/netfilter/nfnetlink.h2
-rw-r--r--include/linux/pm_runtime.h2
-rw-r--r--include/linux/rculist.h4
-rw-r--r--include/linux/rcupdate.h4
-rw-r--r--include/linux/rtnetlink.h2
-rw-r--r--include/linux/rwlock.h12
-rw-r--r--include/linux/rwlock_api_smp.h2
-rw-r--r--include/linux/rwsem.h1
-rw-r--r--include/linux/spinlock.h15
-rw-r--r--include/linux/spinlock_up.h11
-rw-r--r--include/linux/workqueue.h4
-rw-r--r--include/net/ip_vs.h6
-rw-r--r--include/net/netfilter/nf_tables.h4
-rw-r--r--include/uapi/linux/stddef.h2
-rw-r--r--kernel/acct.c4
-rw-r--r--kernel/events/core.c22
-rw-r--r--kernel/events/ring_buffer.c2
-rw-r--r--kernel/exit.c2
-rw-r--r--kernel/irq/timings.c2
-rw-r--r--kernel/irq_work.c2
-rw-r--r--kernel/jump_label.c14
-rw-r--r--kernel/locking/lockdep.c23
-rw-r--r--kernel/locking/qrwlock.c86
-rw-r--r--kernel/locking/qspinlock_paravirt.h47
-rw-r--r--kernel/locking/rwsem.c16
-rw-r--r--kernel/locking/spinlock.c9
-rw-r--r--kernel/rcu/tree.c16
-rw-r--r--kernel/rcu/tree_plugin.h10
-rw-r--r--kernel/sched/clock.c2
-rw-r--r--kernel/sched/cputime.c3
-rw-r--r--kernel/seccomp.c2
-rw-r--r--kernel/smp.c2
-rw-r--r--kernel/softirq.c10
-rw-r--r--kernel/task_work.c2
-rw-r--r--kernel/time/hrtimer.c4
-rw-r--r--kernel/time/posix-cpu-timers.c6
-rw-r--r--kernel/time/tick-sched.c5
-rw-r--r--kernel/trace/ring_buffer.c2
-rw-r--r--kernel/trace/trace.h2
-rw-r--r--kernel/trace/trace_stack.c2
-rw-r--r--kernel/user_namespace.c2
-rw-r--r--kernel/workqueue.c25
-rw-r--r--lib/Kconfig.debug19
-rw-r--r--lib/assoc_array.c20
-rw-r--r--lib/dynamic_queue_limits.c2
-rw-r--r--lib/llist.c2
-rw-r--r--lib/vsprintf.c4
-rw-r--r--mm/huge_memory.c2
-rw-r--r--mm/memory.c6
-rw-r--r--mm/slab.h2
-rw-r--r--net/core/dev.c2
-rw-r--r--net/core/netpoll.c2
-rw-r--r--net/core/pktgen.c2
-rw-r--r--net/ipv4/inet_fragment.c2
-rw-r--r--net/ipv4/route.c2
-rw-r--r--net/ipv4/tcp_input.c6
-rw-r--r--net/ipv4/tcp_output.c2
-rw-r--r--net/ipv4/udp.c4
-rw-r--r--net/ipv6/ip6_tunnel.c8
-rw-r--r--net/ipv6/udp.c4
-rw-r--r--net/llc/llc_input.c4
-rw-r--r--net/mac80211/sta_info.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c2
-rw-r--r--net/netfilter/nfnetlink_queue.c4
-rw-r--r--net/netlabel/netlabel_calipso.c2
-rw-r--r--net/wireless/nl80211.c2
-rw-r--r--samples/mic/mpssd/mpssd.c6
-rwxr-xr-xscripts/headers_install.sh2
-rw-r--r--security/apparmor/include/lib.h11
-rw-r--r--security/apparmor/label.c8
-rw-r--r--sound/firewire/amdtp-am824.c6
-rw-r--r--sound/firewire/amdtp-stream.c23
-rw-r--r--sound/firewire/amdtp-stream.h2
-rw-r--r--sound/firewire/digi00x/amdtp-dot.c6
-rw-r--r--sound/firewire/fireface/amdtp-ff.c4
-rw-r--r--sound/firewire/fireface/ff-midi.c10
-rw-r--r--sound/firewire/fireface/ff-transaction.c8
-rw-r--r--sound/firewire/isight.c18
-rw-r--r--sound/firewire/motu/amdtp-motu.c4
-rw-r--r--sound/firewire/oxfw/oxfw-scs1x.c12
-rw-r--r--sound/firewire/tascam/amdtp-tascam.c4
-rw-r--r--sound/firewire/tascam/tascam-transaction.c6
-rw-r--r--sound/soc/xtensa/xtfpga-i2s.c6
-rw-r--r--sound/usb/bcd2000/bcd2000.c4
-rw-r--r--tools/arch/x86/include/asm/atomic.h2
-rw-r--r--tools/include/asm-generic/atomic-gcc.h2
-rw-r--r--tools/perf/util/auxtrace.h4
-rw-r--r--tools/perf/util/session.h2
-rw-r--r--tools/testing/selftests/powerpc/dscr/dscr.h2
-rw-r--r--tools/testing/selftests/powerpc/dscr/dscr_default_test.c2
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/barriers.h5
-rw-r--r--tools/virtio/ringtest/main.h4
-rw-r--r--virt/kvm/kvm_main.c2
307 files changed, 1252 insertions, 1672 deletions
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 4af4dd46cbd9..116e798b61e6 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -709,6 +709,9 @@
It will be ignored when crashkernel=X,high is not used
or memory reserved is below 4G.
+ crossrelease_fullstack
+ [KNL] Allow to record full stack trace in cross-release
+
cryptomgr.notests
[KNL] Disable crypto self-tests
diff --git a/Documentation/filesystems/path-lookup.md b/Documentation/filesystems/path-lookup.md
index 1b39e084a2b2..1933ef734e63 100644
--- a/Documentation/filesystems/path-lookup.md
+++ b/Documentation/filesystems/path-lookup.md
@@ -826,9 +826,9 @@ If the filesystem may need to revalidate dcache entries, then
*is* passed the dentry but does not have access to the `inode` or the
`seq` number from the `nameidata`, so it needs to be extra careful
when accessing fields in the dentry. This "extra care" typically
-involves using `ACCESS_ONCE()` or the newer [`READ_ONCE()`] to access
-fields, and verifying the result is not NULL before using it. This
-pattern can be see in `nfs_lookup_revalidate()`.
+involves using [`READ_ONCE()`] to access fields, and verifying the
+result is not NULL before using it. This pattern can be seen in
+`nfs_lookup_revalidate()`.
A pair of patterns
------------------
diff --git a/Documentation/memory-barriers.txt b/Documentation/memory-barriers.txt
index 519940ec767f..479ecec80593 100644
--- a/Documentation/memory-barriers.txt
+++ b/Documentation/memory-barriers.txt
@@ -1880,18 +1880,6 @@ There are some more advanced barrier functions:
See Documentation/atomic_{t,bitops}.txt for more information.
- (*) lockless_dereference();
-
- This can be thought of as a pointer-fetch wrapper around the
- smp_read_barrier_depends() data-dependency barrier.
-
- This is also similar to rcu_dereference(), but in cases where
- object lifetime is handled by some mechanism other than RCU, for
- example, when the objects removed only when the system goes down.
- In addition, lockless_dereference() is used in some data structures
- that can be used both with and without RCU.
-
-
(*) dma_wmb();
(*) dma_rmb();
diff --git a/Documentation/translations/ko_KR/memory-barriers.txt b/Documentation/translations/ko_KR/memory-barriers.txt
index a7a813258013..ec3b46e27b7a 100644
--- a/Documentation/translations/ko_KR/memory-barriers.txt
+++ b/Documentation/translations/ko_KR/memory-barriers.txt
@@ -1858,18 +1858,6 @@ Mandatory ë°°ë¦¬ì–´ë“¤ì€ SMP 시스템ì—ì„œë„ UP 시스템ì—ì„œë„ SMP 효ê³
참고하세요.
- (*) lockless_dereference();
-
- ì´ í•¨ìˆ˜ëŠ” smp_read_barrier_depends() ë°ì´í„° ì˜ì¡´ì„± 배리어를 사용하는
- í¬ì¸í„° ì½ì–´ì˜¤ê¸° 래í¼(wrapper) 함수로 ìƒê°ë  수 있습니다.
-
- ê°ì²´ì˜ ë¼ì´í”„íƒ€ìž„ì´ RCU ì™¸ì˜ ë©”ì»¤ë‹ˆì¦˜ìœ¼ë¡œ 관리ëœë‹¤ëŠ” ì ì„ 제외하면
- rcu_dereference() ì™€ë„ ìœ ì‚¬í•œë°, 예를 들면 ê°ì²´ê°€ ì‹œìŠ¤í…œì´ êº¼ì§ˆ ë•Œì—만
- 제거ë˜ëŠ” 경우 등입니다. ë˜í•œ, lockless_dereference() ì€ RCU 와 함께
- 사용ë ìˆ˜ë„, RCU ì—†ì´ ì‚¬ìš©ë  ìˆ˜ë„ ìžˆëŠ” ì¼ë¶€ ë°ì´í„° êµ¬ì¡°ì— ì‚¬ìš©ë˜ê³ 
- 있습니다.
-
-
(*) dma_wmb();
(*) dma_rmb();
diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
index 85867d3cea64..767bfdd42992 100644
--- a/arch/alpha/include/asm/atomic.h
+++ b/arch/alpha/include/asm/atomic.h
@@ -14,6 +14,15 @@
* than regular operations.
*/
+/*
+ * To ensure dependency ordering is preserved for the _relaxed and
+ * _release atomics, an smp_read_barrier_depends() is unconditionally
+ * inserted into the _relaxed variants, which are used to build the
+ * barriered versions. To avoid redundant back-to-back fences, we can
+ * define the _acquire and _fence versions explicitly.
+ */
+#define __atomic_op_acquire(op, args...) op##_relaxed(args)
+#define __atomic_op_fence __atomic_op_release
#define ATOMIC_INIT(i) { (i) }
#define ATOMIC64_INIT(i) { (i) }
@@ -61,6 +70,7 @@ static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \
".previous" \
:"=&r" (temp), "=m" (v->counter), "=&r" (result) \
:"Ir" (i), "m" (v->counter) : "memory"); \
+ smp_read_barrier_depends(); \
return result; \
}
@@ -78,6 +88,7 @@ static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
".previous" \
:"=&r" (temp), "=m" (v->counter), "=&r" (result) \
:"Ir" (i), "m" (v->counter) : "memory"); \
+ smp_read_barrier_depends(); \
return result; \
}
@@ -112,6 +123,7 @@ static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \
".previous" \
:"=&r" (temp), "=m" (v->counter), "=&r" (result) \
:"Ir" (i), "m" (v->counter) : "memory"); \
+ smp_read_barrier_depends(); \
return result; \
}
@@ -129,6 +141,7 @@ static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v) \
".previous" \
:"=&r" (temp), "=m" (v->counter), "=&r" (result) \
:"Ir" (i), "m" (v->counter) : "memory"); \
+ smp_read_barrier_depends(); \
return result; \
}
diff --git a/arch/alpha/include/asm/rwsem.h b/arch/alpha/include/asm/rwsem.h
index 3925f06afd6b..cf8fc8f9a2ed 100644
--- a/arch/alpha/include/asm/rwsem.h
+++ b/arch/alpha/include/asm/rwsem.h
@@ -22,7 +22,7 @@
#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
-static inline void __down_read(struct rw_semaphore *sem)
+static inline int ___down_read(struct rw_semaphore *sem)
{
long oldcount;
#ifndef CONFIG_SMP
@@ -42,10 +42,24 @@ static inline void __down_read(struct rw_semaphore *sem)
:"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
:"Ir" (RWSEM_ACTIVE_READ_BIAS), "m" (sem->count) : "memory");
#endif
- if (unlikely(oldcount < 0))
+ return (oldcount < 0);
+}
+
+static inline void __down_read(struct rw_semaphore *sem)
+{
+ if (unlikely(___down_read(sem)))
rwsem_down_read_failed(sem);
}
+static inline int __down_read_killable(struct rw_semaphore *sem)
+{
+ if (unlikely(___down_read(sem)))
+ if (IS_ERR(rwsem_down_read_failed_killable(sem)))
+ return -EINTR;
+
+ return 0;
+}
+
/*
* trylock for reading -- returns 1 if successful, 0 if contention
*/
@@ -95,9 +109,10 @@ static inline void __down_write(struct rw_semaphore *sem)
static inline int __down_write_killable(struct rw_semaphore *sem)
{
- if (unlikely(___down_write(sem)))
+ if (unlikely(___down_write(sem))) {
if (IS_ERR(rwsem_down_write_failed_killable(sem)))
return -EINTR;
+ }
return 0;
}
diff --git a/arch/alpha/include/asm/spinlock.h b/arch/alpha/include/asm/spinlock.h
index aa4304afbea6..1221cbb86a6f 100644
--- a/arch/alpha/include/asm/spinlock.h
+++ b/arch/alpha/include/asm/spinlock.h
@@ -14,7 +14,6 @@
* We make no fairness assumptions. They have a cost.
*/
-#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
#define arch_spin_is_locked(x) ((x)->lock != 0)
static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
@@ -55,16 +54,6 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
/***********************************************************/
-static inline int arch_read_can_lock(arch_rwlock_t *lock)
-{
- return (lock->lock & 1) == 0;
-}
-
-static inline int arch_write_can_lock(arch_rwlock_t *lock)
-{
- return lock->lock == 0;
-}
-
static inline void arch_read_lock(arch_rwlock_t *lock)
{
long regx;
@@ -171,7 +160,4 @@ static inline void arch_write_unlock(arch_rwlock_t * lock)
lock->lock = 0;
}
-#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
-#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-
#endif /* _ALPHA_SPINLOCK_H */
diff --git a/arch/arc/include/asm/spinlock.h b/arch/arc/include/asm/spinlock.h
index 47efc8451b70..2ba04a7db621 100644
--- a/arch/arc/include/asm/spinlock.h
+++ b/arch/arc/include/asm/spinlock.h
@@ -14,7 +14,6 @@
#include <asm/barrier.h>
#define arch_spin_is_locked(x) ((x)->slock != __ARCH_SPIN_LOCK_UNLOCKED__)
-#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
#ifdef CONFIG_ARC_HAS_LLSC
@@ -410,14 +409,4 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
#endif
-#define arch_read_can_lock(x) ((x)->counter > 0)
-#define arch_write_can_lock(x) ((x)->counter == __ARCH_RW_LOCK_UNLOCKED__)
-
-#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
-#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-
-#define arch_spin_relax(lock) cpu_relax()
-#define arch_read_relax(lock) cpu_relax()
-#define arch_write_relax(lock) cpu_relax()
-
#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c
index 6df9d94a9537..efe8b4200a67 100644
--- a/arch/arc/kernel/smp.c
+++ b/arch/arc/kernel/smp.c
@@ -250,7 +250,7 @@ static void ipi_send_msg_one(int cpu, enum ipi_msg_type msg)
* and read back old value
*/
do {
- new = old = ACCESS_ONCE(*ipi_data_ptr);
+ new = old = READ_ONCE(*ipi_data_ptr);
new |= 1U << msg;
} while (cmpxchg(ipi_data_ptr, old, new) != old);
diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h
index e9c9a117bd25..c7cdbb43ae7c 100644
--- a/arch/arm/include/asm/ptrace.h
+++ b/arch/arm/include/asm/ptrace.h
@@ -126,8 +126,7 @@ extern unsigned long profile_pc(struct pt_regs *regs);
/*
* kprobe-based event tracer support
*/
-#include <linux/stddef.h>
-#include <linux/types.h>
+#include <linux/compiler.h>
#define MAX_REG_OFFSET (offsetof(struct pt_regs, ARM_ORIG_r0))
extern int regs_query_register_offset(const char *name);
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
index 25cb465c8538..099c78fcf62d 100644
--- a/arch/arm/include/asm/spinlock.h
+++ b/arch/arm/include/asm/spinlock.h
@@ -53,8 +53,6 @@ static inline void dsb_sev(void)
* memory.
*/
-#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
-
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
unsigned long tmp;
@@ -74,7 +72,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
while (lockval.tickets.next != lockval.tickets.owner) {
wfe();
- lockval.tickets.owner = ACCESS_ONCE(lock->tickets.owner);
+ lockval.tickets.owner = READ_ONCE(lock->tickets.owner);
}
smp_mb();
@@ -194,9 +192,6 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
dsb_sev();
}
-/* write_can_lock - would write_trylock() succeed? */
-#define arch_write_can_lock(x) (ACCESS_ONCE((x)->lock) == 0)
-
/*
* Read locks are a bit more hairy:
* - Exclusively load the lock value.
@@ -274,14 +269,4 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
}
}
-/* read_can_lock - would read_trylock() succeed? */
-#define arch_read_can_lock(x) (ACCESS_ONCE((x)->lock) < 0x80000000)
-
-#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
-#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-
-#define arch_spin_relax(lock) cpu_relax()
-#define arch_read_relax(lock) cpu_relax()
-#define arch_write_relax(lock) cpu_relax()
-
#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/arm/mach-tegra/cpuidle-tegra20.c b/arch/arm/mach-tegra/cpuidle-tegra20.c
index 76e4c83cd5c8..3f24addd7972 100644
--- a/arch/arm/mach-tegra/cpuidle-tegra20.c
+++ b/arch/arm/mach-tegra/cpuidle-tegra20.c
@@ -179,7 +179,7 @@ static int tegra20_idle_lp2_coupled(struct cpuidle_device *dev,
bool entered_lp2 = false;
if (tegra_pending_sgi())
- ACCESS_ONCE(abort_flag) = true;
+ WRITE_ONCE(abort_flag, true);
cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
diff --git a/arch/arm/vdso/vgettimeofday.c b/arch/arm/vdso/vgettimeofday.c
index 79214d5ff097..a9dd619c6c29 100644
--- a/arch/arm/vdso/vgettimeofday.c
+++ b/arch/arm/vdso/vgettimeofday.c
@@ -35,7 +35,7 @@ static notrace u32 __vdso_read_begin(const struct vdso_data *vdata)
{
u32 seq;
repeat:
- seq = ACCESS_ONCE(vdata->seq_count);
+ seq = READ_ONCE(vdata->seq_count);
if (seq & 1) {
cpu_relax();
goto repeat;
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 0df64a6a56d4..df02ad932020 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -22,7 +22,24 @@ config ARM64
select ARCH_HAS_STRICT_MODULE_RWX
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAVE_NMI_SAFE_CMPXCHG if ACPI_APEI_SEA
+ select ARCH_INLINE_READ_LOCK if !PREEMPT
+ select ARCH_INLINE_READ_LOCK_BH if !PREEMPT
+ select ARCH_INLINE_READ_LOCK_IRQ if !PREEMPT
+ select ARCH_INLINE_READ_LOCK_IRQSAVE if !PREEMPT
+ select ARCH_INLINE_READ_UNLOCK if !PREEMPT
+ select ARCH_INLINE_READ_UNLOCK_BH if !PREEMPT
+ select ARCH_INLINE_READ_UNLOCK_IRQ if !PREEMPT
+ select ARCH_INLINE_READ_UNLOCK_IRQRESTORE if !PREEMPT
+ select ARCH_INLINE_WRITE_LOCK if !PREEMPT
+ select ARCH_INLINE_WRITE_LOCK_BH if !PREEMPT
+ select ARCH_INLINE_WRITE_LOCK_IRQ if !PREEMPT
+ select ARCH_INLINE_WRITE_LOCK_IRQSAVE if !PREEMPT
+ select ARCH_INLINE_WRITE_UNLOCK if !PREEMPT
+ select ARCH_INLINE_WRITE_UNLOCK_BH if !PREEMPT
+ select ARCH_INLINE_WRITE_UNLOCK_IRQ if !PREEMPT
+ select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE if !PREEMPT
select ARCH_USE_CMPXCHG_LOCKREF
+ select ARCH_USE_QUEUED_RWLOCKS
select ARCH_SUPPORTS_MEMORY_FAILURE
select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_SUPPORTS_NUMA_BALANCING
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild
index 2326e39d5892..e63d0a8312de 100644
--- a/arch/arm64/include/asm/Kbuild
+++ b/arch/arm64/include/asm/Kbuild
@@ -16,6 +16,7 @@ generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
generic-y += msi.h
generic-y += preempt.h
+generic-y += qrwlock.h
generic-y += rwsem.h
generic-y += segment.h
generic-y += serial.h
diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h
index 95ad7102b63c..fdb827c7832f 100644
--- a/arch/arm64/include/asm/spinlock.h
+++ b/arch/arm64/include/asm/spinlock.h
@@ -27,8 +27,6 @@
* instructions.
*/
-#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
-
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
unsigned int tmp;
@@ -139,176 +137,7 @@ static inline int arch_spin_is_contended(arch_spinlock_t *lock)
}
#define arch_spin_is_contended arch_spin_is_contended
-/*
- * Write lock implementation.
- *
- * Write locks set bit 31. Unlocking, is done by writing 0 since the lock is
- * exclusively held.
- *
- * The memory barriers are implicit with the load-acquire and store-release
- * instructions.
- */
-
-static inline void arch_write_lock(arch_rwlock_t *rw)
-{
- unsigned int tmp;
-
- asm volatile(ARM64_LSE_ATOMIC_INSN(
- /* LL/SC */
- " sevl\n"
- "1: wfe\n"
- "2: ldaxr %w0, %1\n"
- " cbnz %w0, 1b\n"
- " stxr %w0, %w2, %1\n"
- " cbnz %w0, 2b\n"
- __nops(1),
- /* LSE atomics */
- "1: mov %w0, wzr\n"
- "2: casa %w0, %w2, %1\n"
- " cbz %w0, 3f\n"
- " ldxr %w0, %1\n"
- " cbz %w0, 2b\n"
- " wfe\n"
- " b 1b\n"
- "3:")
- : "=&r" (tmp), "+Q" (rw->lock)
- : "r" (0x80000000)
- : "memory");
-}
-
-static inline int arch_write_trylock(arch_rwlock_t *rw)
-{
- unsigned int tmp;
-
- asm volatile(ARM64_LSE_ATOMIC_INSN(
- /* LL/SC */
- "1: ldaxr %w0, %1\n"
- " cbnz %w0, 2f\n"
- " stxr %w0, %w2, %1\n"
- " cbnz %w0, 1b\n"
- "2:",
- /* LSE atomics */
- " mov %w0, wzr\n"
- " casa %w0, %w2, %1\n"
- __nops(2))
- : "=&r" (tmp), "+Q" (rw->lock)
- : "r" (0x80000000)
- : "memory");
-
- return !tmp;
-}
-
-static inline void arch_write_unlock(arch_rwlock_t *rw)
-{
- asm volatile(ARM64_LSE_ATOMIC_INSN(
- " stlr wzr, %0",
- " swpl wzr, wzr, %0")
- : "=Q" (rw->lock) :: "memory");
-}
-
-/* write_can_lock - would write_trylock() succeed? */
-#define arch_write_can_lock(x) ((x)->lock == 0)
-
-/*
- * Read lock implementation.
- *
- * It exclusively loads the lock value, increments it and stores the new value
- * back if positive and the CPU still exclusively owns the location. If the
- * value is negative, the lock is already held.
- *
- * During unlocking there may be multiple active read locks but no write lock.
- *
- * The memory barriers are implicit with the load-acquire and store-release
- * instructions.
- *
- * Note that in UNDEFINED cases, such as unlocking a lock twice, the LL/SC
- * and LSE implementations may exhibit different behaviour (although this
- * will have no effect on lockdep).
- */
-static inline void arch_read_lock(arch_rwlock_t *rw)
-{
- unsigned int tmp, tmp2;
-
- asm volatile(
- " sevl\n"
- ARM64_LSE_ATOMIC_INSN(
- /* LL/SC */
- "1: wfe\n"
- "2: ldaxr %w0, %2\n"
- " add %w0, %w0, #1\n"
- " tbnz %w0, #31, 1b\n"
- " stxr %w1, %w0, %2\n"
- " cbnz %w1, 2b\n"
- __nops(1),
- /* LSE atomics */
- "1: wfe\n"
- "2: ldxr %w0, %2\n"
- " adds %w1, %w0, #1\n"
- " tbnz %w1, #31, 1b\n"
- " casa %w0, %w1, %2\n"
- " sbc %w0, %w1, %w0\n"
- " cbnz %w0, 2b")
- : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
- :
- : "cc", "memory");
-}
-
-static inline void arch_read_unlock(arch_rwlock_t *rw)
-{
- unsigned int tmp, tmp2;
-
- asm volatile(ARM64_LSE_ATOMIC_INSN(
- /* LL/SC */
- "1: ldxr %w0, %2\n"
- " sub %w0, %w0, #1\n"
- " stlxr %w1, %w0, %2\n"
- " cbnz %w1, 1b",
- /* LSE atomics */
- " movn %w0, #0\n"
- " staddl %w0, %2\n"
- __nops(2))
- : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
- :
- : "memory");
-}
-
-static inline int arch_read_trylock(arch_rwlock_t *rw)
-{
- unsigned int tmp, tmp2;
-
- asm volatile(ARM64_LSE_ATOMIC_INSN(
- /* LL/SC */
- " mov %w1, #1\n"
- "1: ldaxr %w0, %2\n"
- " add %w0, %w0, #1\n"
- " tbnz %w0, #31, 2f\n"
- " stxr %w1, %w0, %2\n"
- " cbnz %w1, 1b\n"
- "2:",
- /* LSE atomics */
- " ldr %w0, %2\n"
- " adds %w1, %w0, #1\n"
- " tbnz %w1, #31, 1f\n"
- " casa %w0, %w1, %2\n"
- " sbc %w1, %w1, %w0\n"
- __nops(1)
- "1:")
- : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
- :
- : "cc", "memory");
-
- return !tmp2;
-}
-
-/* read_can_lock - would read_trylock() succeed? */
-#define arch_read_can_lock(x) ((x)->lock < 0x80000000)
-
-#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
-#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-
-#define arch_spin_relax(lock) cpu_relax()
-#define arch_read_relax(lock) cpu_relax()
-#define arch_write_relax(lock) cpu_relax()
+#include <asm/qrwlock.h>
/* See include/linux/spinlock.h */
#define smp_mb__after_spinlock() smp_mb()
diff --git a/arch/arm64/include/asm/spinlock_types.h b/arch/arm64/include/asm/spinlock_types.h
index 55be59a35e3f..6b856012c51b 100644
--- a/arch/arm64/include/asm/spinlock_types.h
+++ b/arch/arm64/include/asm/spinlock_types.h
@@ -36,10 +36,6 @@ typedef struct {
#define __ARCH_SPIN_LOCK_UNLOCKED { 0 , 0 }
-typedef struct {
- volatile unsigned int lock;
-} arch_rwlock_t;
-
-#define __ARCH_RW_LOCK_UNLOCKED { 0 }
+#include <asm-generic/qrwlock_types.h>
#endif
diff --git a/arch/blackfin/include/asm/spinlock.h b/arch/blackfin/include/asm/spinlock.h
index f6431439d15d..839d1441af3a 100644
--- a/arch/blackfin/include/asm/spinlock.h
+++ b/arch/blackfin/include/asm/spinlock.h
@@ -36,8 +36,6 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
__raw_spin_lock_asm(&lock->lock);
}
-#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
-
static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
return __raw_spin_trylock_asm(&lock->lock);
@@ -48,23 +46,11 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
__raw_spin_unlock_asm(&lock->lock);
}
-static inline int arch_read_can_lock(arch_rwlock_t *rw)
-{
- return __raw_uncached_fetch_asm(&rw->lock) > 0;
-}
-
-static inline int arch_write_can_lock(arch_rwlock_t *rw)
-{
- return __raw_uncached_fetch_asm(&rw->lock) == RW_LOCK_BIAS;
-}
-
static inline void arch_read_lock(arch_rwlock_t *rw)
{
__raw_read_lock_asm(&rw->lock);
}
-#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
-
static inline int arch_read_trylock(arch_rwlock_t *rw)
{
return __raw_read_trylock_asm(&rw->lock);
@@ -80,8 +66,6 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
__raw_write_lock_asm(&rw->lock);
}
-#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-
static inline int arch_write_trylock(arch_rwlock_t *rw)
{
return __raw_write_trylock_asm(&rw->lock);
@@ -92,10 +76,6 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
__raw_write_unlock_asm(&rw->lock);
}
-#define arch_spin_relax(lock) cpu_relax()
-#define arch_read_relax(lock) cpu_relax()
-#define arch_write_relax(lock) cpu_relax()
-
#endif
#endif /* !__BFIN_SPINLOCK_H */
diff --git a/arch/hexagon/include/asm/spinlock.h b/arch/hexagon/include/asm/spinlock.h
index 53a8d5885887..48020863f53a 100644
--- a/arch/hexagon/include/asm/spinlock.h
+++ b/arch/hexagon/include/asm/spinlock.h
@@ -86,16 +86,6 @@ static inline int arch_read_trylock(arch_rwlock_t *lock)
return temp;
}
-static inline int arch_read_can_lock(arch_rwlock_t *rwlock)
-{
- return rwlock->lock == 0;
-}
-
-static inline int arch_write_can_lock(arch_rwlock_t *rwlock)
-{
- return rwlock->lock == 0;
-}
-
/* Stuffs a -1 in the lock value? */
static inline void arch_write_lock(arch_rwlock_t *lock)
{
@@ -177,11 +167,6 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
/*
* SMP spinlocks are intended to allow only a single CPU at the lock
*/
-#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
-
#define arch_spin_is_locked(x) ((x)->lock != 0)
-#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
-#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-
#endif
diff --git a/arch/ia64/include/asm/rwsem.h b/arch/ia64/include/asm/rwsem.h
index 7d6fceb3d567..917910607e0e 100644
--- a/arch/ia64/include/asm/rwsem.h
+++ b/arch/ia64/include/asm/rwsem.h
@@ -38,15 +38,31 @@
/*
* lock for reading
*/
-static inline void
-__down_read (struct rw_semaphore *sem)
+static inline int
+___down_read (struct rw_semaphore *sem)
{
long result = ia64_fetchadd8_acq((unsigned long *)&sem->count.counter, 1);
- if (result < 0)
+ return (result < 0);
+}
+
+static inline void
+__down_read (struct rw_semaphore *sem)
+{
+ if (___down_read(sem))
rwsem_down_read_failed(sem);
}
+static inline int
+__down_read_killable (struct rw_semaphore *sem)
+{
+ if (___down_read(sem))
+ if (IS_ERR(rwsem_down_read_failed_killable(sem)))
+ return -EINTR;
+
+ return 0;
+}
+
/*
* lock for writing
*/
@@ -73,9 +89,10 @@ __down_write (struct rw_semaphore *sem)
static inline int
__down_write_killable (struct rw_semaphore *sem)
{
- if (___down_write(sem))
+ if (___down_write(sem)) {
if (IS_ERR(rwsem_down_write_failed_killable(sem)))
return -EINTR;
+ }
return 0;
}
diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
index aa057abd948e..afd0b3121b4c 100644
--- a/arch/ia64/include/asm/spinlock.h
+++ b/arch/ia64/include/asm/spinlock.h
@@ -62,7 +62,7 @@ static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
{
- int tmp = ACCESS_ONCE(lock->lock);
+ int tmp = READ_ONCE(lock->lock);
if (!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK))
return ia64_cmpxchg(acq, &lock->lock, tmp, tmp + 1, sizeof (tmp)) == tmp;
@@ -74,19 +74,19 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
+ WRITE_ONCE(*p, (tmp + 2) & ~1);
}
static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
{
- long tmp = ACCESS_ONCE(lock->lock);
+ long tmp = READ_ONCE(lock->lock);
return !!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK);
}
static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
{
- long tmp = ACCESS_ONCE(lock->lock);
+ long tmp = READ_ONCE(lock->lock);
return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1;
}
@@ -127,9 +127,7 @@ static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
{
arch_spin_lock(lock);
}
-
-#define arch_read_can_lock(rw) (*(volatile int *)(rw) >= 0)
-#define arch_write_can_lock(rw) (*(volatile int *)(rw) == 0)
+#define arch_spin_lock_flags arch_spin_lock_flags
#ifdef ASM_SUPPORTED
@@ -157,6 +155,7 @@ arch_read_lock_flags(arch_rwlock_t *lock, unsigned long flags)
: "p6", "p7", "r2", "memory");
}
+#define arch_read_lock_flags arch_read_lock_flags
#define arch_read_lock(lock) arch_read_lock_flags(lock, 0)
#else /* !ASM_SUPPORTED */
@@ -209,6 +208,7 @@ arch_write_lock_flags(arch_rwlock_t *lock, unsigned long flags)
: "ar.ccv", "p6", "p7", "r2", "r29", "memory");
}
+#define arch_write_lock_flags arch_write_lock_flags
#define arch_write_lock(rw) arch_write_lock_flags(rw, 0)
#define arch_write_trylock(rw) \
@@ -232,8 +232,6 @@ static inline void arch_write_unlock(arch_rwlock_t *x)
#else /* !ASM_SUPPORTED */
-#define arch_write_lock_flags(l, flags) arch_write_lock(l)
-
#define arch_write_lock(l) \
({ \
__u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \
@@ -273,8 +271,4 @@ static inline int arch_read_trylock(arch_rwlock_t *x)
return (u32)ia64_cmpxchg4_acq((__u32 *)(x), new.word, old.word) == old.word;
}
-#define arch_spin_relax(lock) cpu_relax()
-#define arch_read_relax(lock) cpu_relax()
-#define arch_write_relax(lock) cpu_relax()
-
#endif /* _ASM_IA64_SPINLOCK_H */
diff --git a/arch/m32r/include/asm/spinlock.h b/arch/m32r/include/asm/spinlock.h
index 604af84427ff..0189f410f8f5 100644
--- a/arch/m32r/include/asm/spinlock.h
+++ b/arch/m32r/include/asm/spinlock.h
@@ -29,7 +29,6 @@
*/
#define arch_spin_is_locked(x) (*(volatile int *)(&(x)->slock) <= 0)
-#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
/**
* arch_spin_trylock - Try spin lock and return a result
@@ -138,18 +137,6 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
* semaphore.h for details. -ben
*/
-/**
- * read_can_lock - would read_trylock() succeed?
- * @lock: the rwlock in question.
- */
-#define arch_read_can_lock(x) ((int)(x)->lock > 0)
-
-/**
- * write_can_lock - would write_trylock() succeed?
- * @lock: the rwlock in question.
- */
-#define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
-
static inline void arch_read_lock(arch_rwlock_t *rw)
{
unsigned long tmp0, tmp1;
@@ -318,11 +305,4 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
return 0;
}
-#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
-#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-
-#define arch_spin_relax(lock) cpu_relax()
-#define arch_read_relax(lock) cpu_relax()
-#define arch_write_relax(lock) cpu_relax()
-
#endif /* _ASM_M32R_SPINLOCK_H */
diff --git a/arch/metag/include/asm/spinlock.h b/arch/metag/include/asm/spinlock.h
index 349938c35f2d..4497c232d9c1 100644
--- a/arch/metag/include/asm/spinlock.h
+++ b/arch/metag/include/asm/spinlock.h
@@ -16,13 +16,4 @@
* locked.
*/
-#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
-
-#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
-#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-
-#define arch_spin_relax(lock) cpu_relax()
-#define arch_read_relax(lock) cpu_relax()
-#define arch_write_relax(lock) cpu_relax()
-
#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/metag/include/asm/spinlock_lnkget.h b/arch/metag/include/asm/spinlock_lnkget.h
index 029935560b7f..dfd780eab350 100644
--- a/arch/metag/include/asm/spinlock_lnkget.h
+++ b/arch/metag/include/asm/spinlock_lnkget.h
@@ -137,21 +137,6 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
: "memory");
}
-/* write_can_lock - would write_trylock() succeed? */
-static inline int arch_write_can_lock(arch_rwlock_t *rw)
-{
- int ret;
-
- asm volatile ("LNKGETD %0, [%1]\n"
- "CMP %0, #0\n"
- "MOV %0, #1\n"
- "XORNZ %0, %0, %0\n"
- : "=&d" (ret)
- : "da" (&rw->lock)
- : "cc");
- return ret;
-}
-
/*
* Read locks are a bit more hairy:
* - Exclusively load the lock value.
@@ -225,26 +210,4 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
return tmp;
}
-/* read_can_lock - would read_trylock() succeed? */
-static inline int arch_read_can_lock(arch_rwlock_t *rw)
-{
- int tmp;
-
- asm volatile ("LNKGETD %0, [%1]\n"
- "CMP %0, %2\n"
- "MOV %0, #1\n"
- "XORZ %0, %0, %0\n"
- : "=&d" (tmp)
- : "da" (&rw->lock), "bd" (0x80000000)
- : "cc");
- return tmp;
-}
-
-#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
-#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-
-#define arch_spin_relax(lock) cpu_relax()
-#define arch_read_relax(lock) cpu_relax()
-#define arch_write_relax(lock) cpu_relax()
-
#endif /* __ASM_SPINLOCK_LNKGET_H */
diff --git a/arch/metag/include/asm/spinlock_lock1.h b/arch/metag/include/asm/spinlock_lock1.h
index 12de9862d190..c0bd81bbe18c 100644
--- a/arch/metag/include/asm/spinlock_lock1.h
+++ b/arch/metag/include/asm/spinlock_lock1.h
@@ -105,16 +105,6 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
rw->lock = 0;
}
-/* write_can_lock - would write_trylock() succeed? */
-static inline int arch_write_can_lock(arch_rwlock_t *rw)
-{
- unsigned int ret;
-
- barrier();
- ret = rw->lock;
- return (ret == 0);
-}
-
/*
* Read locks are a bit more hairy:
* - Exclusively load the lock value.
@@ -172,14 +162,4 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
return (ret < 0x80000000);
}
-/* read_can_lock - would read_trylock() succeed? */
-static inline int arch_read_can_lock(arch_rwlock_t *rw)
-{
- unsigned int ret;
-
- barrier();
- ret = rw->lock;
- return (ret < 0x80000000);
-}
-
#endif /* __ASM_SPINLOCK_LOCK1_H */
diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h
index a7d21da16b6a..ee81297d9117 100644
--- a/arch/mips/include/asm/spinlock.h
+++ b/arch/mips/include/asm/spinlock.h
@@ -13,11 +13,4 @@
#include <asm/qrwlock.h>
#include <asm/qspinlock.h>
-#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
-#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-
-#define arch_spin_relax(lock) cpu_relax()
-#define arch_read_relax(lock) cpu_relax()
-#define arch_write_relax(lock) cpu_relax()
-
#endif /* _ASM_SPINLOCK_H */
diff --git a/arch/mips/include/asm/vdso.h b/arch/mips/include/asm/vdso.h
index b7cd6cf77b83..91bf0c2c265c 100644
--- a/arch/mips/include/asm/vdso.h
+++ b/arch/mips/include/asm/vdso.h
@@ -99,7 +99,7 @@ static inline u32 vdso_data_read_begin(const union mips_vdso_data *data)
u32 seq;
while (true) {
- seq = ACCESS_ONCE(data->seq_count);
+ seq = READ_ONCE(data->seq_count);
if (likely(!(seq & 1))) {
/* Paired with smp_wmb() in vdso_data_write_*(). */
smp_rmb();
diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c
index 9dd624c2fe56..421e06dfee72 100644
--- a/arch/mips/kernel/pm-cps.c
+++ b/arch/mips/kernel/pm-cps.c
@@ -166,7 +166,7 @@ int cps_pm_enter_state(enum cps_pm_state state)
nc_core_ready_count = nc_addr;
/* Ensure ready_count is zero-initialised before the assembly runs */
- ACCESS_ONCE(*nc_core_ready_count) = 0;
+ WRITE_ONCE(*nc_core_ready_count, 0);
coupled_barrier(&per_cpu(pm_barrier, core), online);
/* Run the generated entry code */
diff --git a/arch/mn10300/include/asm/spinlock.h b/arch/mn10300/include/asm/spinlock.h
index fe413b41df6c..879cd0df53ba 100644
--- a/arch/mn10300/include/asm/spinlock.h
+++ b/arch/mn10300/include/asm/spinlock.h
@@ -84,6 +84,7 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock,
: "d" (flags), "a"(&lock->slock), "i"(EPSW_IE | MN10300_CLI_LEVEL)
: "memory", "cc");
}
+#define arch_spin_lock_flags arch_spin_lock_flags
#ifdef __KERNEL__
@@ -98,18 +99,6 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock,
* read-locks.
*/
-/**
- * read_can_lock - would read_trylock() succeed?
- * @lock: the rwlock in question.
- */
-#define arch_read_can_lock(x) ((int)(x)->lock > 0)
-
-/**
- * write_can_lock - would write_trylock() succeed?
- * @lock: the rwlock in question.
- */
-#define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
-
/*
* On mn10300, we implement read-write locks as a 32-bit counter
* with the high bit (sign) being the "contended" bit.
@@ -183,9 +172,6 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
return 0;
}
-#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
-#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-
#define _raw_spin_relax(lock) cpu_relax()
#define _raw_read_relax(lock) cpu_relax()
#define _raw_write_relax(lock) cpu_relax()
diff --git a/arch/mn10300/kernel/mn10300-serial.c b/arch/mn10300/kernel/mn10300-serial.c
index 7ecf69879e2d..d7ef1232a82a 100644
--- a/arch/mn10300/kernel/mn10300-serial.c
+++ b/arch/mn10300/kernel/mn10300-serial.c
@@ -543,7 +543,7 @@ static void mn10300_serial_receive_interrupt(struct mn10300_serial_port *port)
try_again:
/* pull chars out of the hat */
- ix = ACCESS_ONCE(port->rx_outp);
+ ix = READ_ONCE(port->rx_outp);
if (CIRC_CNT(port->rx_inp, ix, MNSC_BUFFER_SIZE) == 0) {
if (push && !tport->low_latency)
tty_flip_buffer_push(tport);
@@ -1724,7 +1724,7 @@ static int mn10300_serial_poll_get_char(struct uart_port *_port)
if (mn10300_serial_int_tbl[port->rx_irq].port != NULL) {
do {
/* pull chars out of the hat */
- ix = ACCESS_ONCE(port->rx_outp);
+ ix = READ_ONCE(port->rx_outp);
if (CIRC_CNT(port->rx_inp, ix, MNSC_BUFFER_SIZE) == 0)
return NO_POLL_CHAR;
diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
index bc54addd589f..88bae6676c9b 100644
--- a/arch/parisc/include/asm/atomic.h
+++ b/arch/parisc/include/asm/atomic.h
@@ -261,7 +261,7 @@ atomic64_set(atomic64_t *v, s64 i)
static __inline__ s64
atomic64_read(const atomic64_t *v)
{
- return ACCESS_ONCE((v)->counter);
+ return READ_ONCE((v)->counter);
}
#define atomic64_inc(v) (atomic64_add( 1,(v)))
diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h
index af03359e6ac5..6f84b6acc86e 100644
--- a/arch/parisc/include/asm/spinlock.h
+++ b/arch/parisc/include/asm/spinlock.h
@@ -32,6 +32,7 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *x,
cpu_relax();
mb();
}
+#define arch_spin_lock_flags arch_spin_lock_flags
static inline void arch_spin_unlock(arch_spinlock_t *x)
{
@@ -169,25 +170,4 @@ static __inline__ int arch_write_trylock(arch_rwlock_t *rw)
return result;
}
-/*
- * read_can_lock - would read_trylock() succeed?
- * @lock: the rwlock in question.
- */
-static __inline__ int arch_read_can_lock(arch_rwlock_t *rw)
-{
- return rw->counter >= 0;
-}
-
-/*
- * write_can_lock - would write_trylock() succeed?
- * @lock: the rwlock in question.
- */
-static __inline__ int arch_write_can_lock(arch_rwlock_t *rw)
-{
- return !rw->counter;
-}
-
-#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
-#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-
#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
index edbe571bcc54..b9ebc3085fb7 100644
--- a/arch/powerpc/include/asm/spinlock.h
+++ b/arch/powerpc/include/asm/spinlock.h
@@ -161,6 +161,7 @@ void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
local_irq_restore(flags_dis);
}
}
+#define arch_spin_lock_flags arch_spin_lock_flags
static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
@@ -181,9 +182,6 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
* read-locks.
*/
-#define arch_read_can_lock(rw) ((rw)->lock >= 0)
-#define arch_write_can_lock(rw) (!(rw)->lock)
-
#ifdef CONFIG_PPC64
#define __DO_SIGN_EXTEND "extsw %0,%0\n"
#define WRLOCK_TOKEN LOCK_TOKEN /* it's negative */
@@ -302,9 +300,6 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
rw->lock = 0;
}
-#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
-#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-
#define arch_spin_relax(lock) __spin_yield(lock)
#define arch_read_relax(lock) __rw_yield(lock)
#define arch_write_relax(lock) __rw_yield(lock)
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 1643e9e53655..3f1c4fcbe0aa 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -78,7 +78,7 @@ static unsigned long lock_rtas(void)
local_irq_save(flags);
preempt_disable();
- arch_spin_lock_flags(&rtas.lock, flags);
+ arch_spin_lock(&rtas.lock);
return flags;
}
diff --git a/arch/powerpc/platforms/powernv/opal-msglog.c b/arch/powerpc/platforms/powernv/opal-msglog.c
index 7a9cde0cfbd1..acd3206dfae3 100644
--- a/arch/powerpc/platforms/powernv/opal-msglog.c
+++ b/arch/powerpc/platforms/powernv/opal-msglog.c
@@ -43,7 +43,7 @@ ssize_t opal_msglog_copy(char *to, loff_t pos, size_t count)
if (!opal_memcons)
return -ENODEV;
- out_pos = be32_to_cpu(ACCESS_ONCE(opal_memcons->out_pos));
+ out_pos = be32_to_cpu(READ_ONCE(opal_memcons->out_pos));
/* Now we've read out_pos, put a barrier in before reading the new
* data it points to in conbuf. */
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h
index 31f95a636aed..0a29588aa00b 100644
--- a/arch/s390/include/asm/spinlock.h
+++ b/arch/s390/include/asm/spinlock.h
@@ -38,6 +38,7 @@ bool arch_vcpu_is_preempted(int cpu);
*/
void arch_spin_relax(arch_spinlock_t *lock);
+#define arch_spin_relax arch_spin_relax
void arch_spin_lock_wait(arch_spinlock_t *);
int arch_spin_trylock_retry(arch_spinlock_t *);
@@ -76,6 +77,7 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lp,
if (!arch_spin_trylock_once(lp))
arch_spin_lock_wait(lp);
}
+#define arch_spin_lock_flags arch_spin_lock_flags
static inline int arch_spin_trylock(arch_spinlock_t *lp)
{
@@ -105,20 +107,6 @@ static inline void arch_spin_unlock(arch_spinlock_t *lp)
* read-locks.
*/
-/**
- * read_can_lock - would read_trylock() succeed?
- * @lock: the rwlock in question.
- */
-#define arch_read_can_lock(x) (((x)->cnts & 0xffff0000) == 0)
-
-/**
- * write_can_lock - would write_trylock() succeed?
- * @lock: the rwlock in question.
- */
-#define arch_write_can_lock(x) ((x)->cnts == 0)
-
-#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
-#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
#define arch_read_relax(rw) barrier()
#define arch_write_relax(rw) barrier()
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
index 2a781cb6515c..84c0faeaf7ea 100644
--- a/arch/s390/lib/spinlock.c
+++ b/arch/s390/lib/spinlock.c
@@ -215,7 +215,7 @@ static inline void arch_spin_lock_classic(arch_spinlock_t *lp)
lockval = SPINLOCK_LOCKVAL; /* cpu + 1 */
/* Pass the virtual CPU to the lock holder if it is not running */
- owner = arch_spin_yield_target(ACCESS_ONCE(lp->lock), NULL);
+ owner = arch_spin_yield_target(READ_ONCE(lp->lock), NULL);
if (owner && arch_vcpu_is_preempted(owner - 1))
smp_yield_cpu(owner - 1);
diff --git a/arch/sh/include/asm/spinlock-cas.h b/arch/sh/include/asm/spinlock-cas.h
index 5ed7dbbd94ff..270ee4d3e25b 100644
--- a/arch/sh/include/asm/spinlock-cas.h
+++ b/arch/sh/include/asm/spinlock-cas.h
@@ -27,7 +27,6 @@ static inline unsigned __sl_cas(volatile unsigned *p, unsigned old, unsigned new
*/
#define arch_spin_is_locked(x) ((x)->lock <= 0)
-#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
@@ -53,18 +52,6 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
* read-locks.
*/
-/**
- * read_can_lock - would read_trylock() succeed?
- * @lock: the rwlock in question.
- */
-#define arch_read_can_lock(x) ((x)->lock > 0)
-
-/**
- * write_can_lock - would write_trylock() succeed?
- * @lock: the rwlock in question.
- */
-#define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
-
static inline void arch_read_lock(arch_rwlock_t *rw)
{
unsigned old;
@@ -102,11 +89,4 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
return __sl_cas(&rw->lock, RW_LOCK_BIAS, 0) == RW_LOCK_BIAS;
}
-#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
-#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-
-#define arch_spin_relax(lock) cpu_relax()
-#define arch_read_relax(lock) cpu_relax()
-#define arch_write_relax(lock) cpu_relax()
-
#endif /* __ASM_SH_SPINLOCK_CAS_H */
diff --git a/arch/sh/include/asm/spinlock-llsc.h b/arch/sh/include/asm/spinlock-llsc.h
index f77263aae760..715595de286a 100644
--- a/arch/sh/include/asm/spinlock-llsc.h
+++ b/arch/sh/include/asm/spinlock-llsc.h
@@ -19,7 +19,6 @@
*/
#define arch_spin_is_locked(x) ((x)->lock <= 0)
-#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
/*
* Simple spin lock operations. There are two variants, one clears IRQ's
@@ -89,18 +88,6 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
* read-locks.
*/
-/**
- * read_can_lock - would read_trylock() succeed?
- * @lock: the rwlock in question.
- */
-#define arch_read_can_lock(x) ((x)->lock > 0)
-
-/**
- * write_can_lock - would write_trylock() succeed?
- * @lock: the rwlock in question.
- */
-#define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
-
static inline void arch_read_lock(arch_rwlock_t *rw)
{
unsigned long tmp;
@@ -209,11 +196,4 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
return (oldval > (RW_LOCK_BIAS - 1));
}
-#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
-#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-
-#define arch_spin_relax(lock) cpu_relax()
-#define arch_read_relax(lock) cpu_relax()
-#define arch_write_relax(lock) cpu_relax()
-
#endif /* __ASM_SH_SPINLOCK_LLSC_H */
diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h
index 0c3b3b4a9963..d13ce517f4b9 100644
--- a/arch/sparc/include/asm/atomic_32.h
+++ b/arch/sparc/include/asm/atomic_32.h
@@ -32,7 +32,7 @@ void atomic_set(atomic_t *, int);
#define atomic_set_release(v, i) atomic_set((v), (i))
-#define atomic_read(v) ACCESS_ONCE((v)->counter)
+#define atomic_read(v) READ_ONCE((v)->counter)
#define atomic_add(i, v) ((void)atomic_add_return( (int)(i), (v)))
#define atomic_sub(i, v) ((void)atomic_add_return(-(int)(i), (v)))
diff --git a/arch/sparc/include/asm/ptrace.h b/arch/sparc/include/asm/ptrace.h
index 6a339a78f4f4..71dd82b43cc5 100644
--- a/arch/sparc/include/asm/ptrace.h
+++ b/arch/sparc/include/asm/ptrace.h
@@ -7,6 +7,7 @@
#if defined(__sparc__) && defined(__arch64__)
#ifndef __ASSEMBLY__
+#include <linux/compiler.h>
#include <linux/threads.h>
#include <asm/switch_to.h>
diff --git a/arch/sparc/include/asm/spinlock_32.h b/arch/sparc/include/asm/spinlock_32.h
index 26f00ac2b470..bc5aa6f61676 100644
--- a/arch/sparc/include/asm/spinlock_32.h
+++ b/arch/sparc/include/asm/spinlock_32.h
@@ -183,17 +183,6 @@ static inline int __arch_read_trylock(arch_rwlock_t *rw)
res; \
})
-#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
-#define arch_read_lock_flags(rw, flags) arch_read_lock(rw)
-#define arch_write_lock_flags(rw, flags) arch_write_lock(rw)
-
-#define arch_spin_relax(lock) cpu_relax()
-#define arch_read_relax(lock) cpu_relax()
-#define arch_write_relax(lock) cpu_relax()
-
-#define arch_read_can_lock(rw) (!((rw)->lock & 0xff))
-#define arch_write_can_lock(rw) (!(rw)->lock)
-
#endif /* !(__ASSEMBLY__) */
#endif /* __SPARC_SPINLOCK_H */
diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
index 4822a7e94a30..7fc82a233f49 100644
--- a/arch/sparc/include/asm/spinlock_64.h
+++ b/arch/sparc/include/asm/spinlock_64.h
@@ -14,13 +14,6 @@
#include <asm/qrwlock.h>
#include <asm/qspinlock.h>
-#define arch_read_lock_flags(p, f) arch_read_lock(p)
-#define arch_write_lock_flags(p, f) arch_write_lock(p)
-
-#define arch_spin_relax(lock) cpu_relax()
-#define arch_read_relax(lock) cpu_relax()
-#define arch_write_relax(lock) cpu_relax()
-
#endif /* !(__ASSEMBLY__) */
#endif /* !(__SPARC64_SPINLOCK_H) */
diff --git a/arch/tile/gxio/dma_queue.c b/arch/tile/gxio/dma_queue.c
index baa60357f8ba..b7ba577d82ca 100644
--- a/arch/tile/gxio/dma_queue.c
+++ b/arch/tile/gxio/dma_queue.c
@@ -163,14 +163,14 @@ int __gxio_dma_queue_is_complete(__gxio_dma_queue_t *dma_queue,
int64_t completion_slot, int update)
{
if (update) {
- if (ACCESS_ONCE(dma_queue->hw_complete_count) >
+ if (READ_ONCE(dma_queue->hw_complete_count) >
completion_slot)
return 1;
__gxio_dma_queue_update_credits(dma_queue);
}
- return ACCESS_ONCE(dma_queue->hw_complete_count) > completion_slot;
+ return READ_ONCE(dma_queue->hw_complete_count) > completion_slot;
}
EXPORT_SYMBOL_GPL(__gxio_dma_queue_is_complete);
diff --git a/arch/tile/include/asm/spinlock_32.h b/arch/tile/include/asm/spinlock_32.h
index cba8ba9b8da6..fb5313d77315 100644
--- a/arch/tile/include/asm/spinlock_32.h
+++ b/arch/tile/include/asm/spinlock_32.h
@@ -51,9 +51,6 @@ static inline int arch_spin_is_locked(arch_spinlock_t *lock)
void arch_spin_lock(arch_spinlock_t *lock);
-/* We cannot take an interrupt after getting a ticket, so don't enable them. */
-#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
-
int arch_spin_trylock(arch_spinlock_t *lock);
static inline void arch_spin_unlock(arch_spinlock_t *lock)
@@ -80,22 +77,6 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
#define _RD_COUNT_WIDTH 8
/**
- * arch_read_can_lock() - would read_trylock() succeed?
- */
-static inline int arch_read_can_lock(arch_rwlock_t *rwlock)
-{
- return (rwlock->lock << _RD_COUNT_WIDTH) == 0;
-}
-
-/**
- * arch_write_can_lock() - would write_trylock() succeed?
- */
-static inline int arch_write_can_lock(arch_rwlock_t *rwlock)
-{
- return rwlock->lock == 0;
-}
-
-/**
* arch_read_lock() - acquire a read lock.
*/
void arch_read_lock(arch_rwlock_t *rwlock);
@@ -125,7 +106,4 @@ void arch_read_unlock(arch_rwlock_t *rwlock);
*/
void arch_write_unlock(arch_rwlock_t *rwlock);
-#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
-#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-
#endif /* _ASM_TILE_SPINLOCK_32_H */
diff --git a/arch/tile/include/asm/spinlock_64.h b/arch/tile/include/asm/spinlock_64.h
index 9a2c2d605752..5b616ef642a8 100644
--- a/arch/tile/include/asm/spinlock_64.h
+++ b/arch/tile/include/asm/spinlock_64.h
@@ -75,9 +75,6 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
/* Try to get the lock, and return whether we succeeded. */
int arch_spin_trylock(arch_spinlock_t *lock);
-/* We cannot take an interrupt after getting a ticket, so don't enable them. */
-#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
-
/*
* Read-write spinlocks, allowing multiple readers
* but only one writer.
@@ -93,24 +90,6 @@ static inline int arch_write_val_locked(int val)
return val < 0; /* Optimize "val & __WRITE_LOCK_BIT". */
}
-/**
- * read_can_lock - would read_trylock() succeed?
- * @lock: the rwlock in question.
- */
-static inline int arch_read_can_lock(arch_rwlock_t *rw)
-{
- return !arch_write_val_locked(rw->lock);
-}
-
-/**
- * write_can_lock - would write_trylock() succeed?
- * @lock: the rwlock in question.
- */
-static inline int arch_write_can_lock(arch_rwlock_t *rw)
-{
- return rw->lock == 0;
-}
-
extern void __read_lock_failed(arch_rwlock_t *rw);
static inline void arch_read_lock(arch_rwlock_t *rw)
@@ -156,7 +135,4 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
return 0;
}
-#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
-#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-
#endif /* _ASM_TILE_SPINLOCK_64_H */
diff --git a/arch/tile/include/gxio/dma_queue.h b/arch/tile/include/gxio/dma_queue.h
index b9e45e37649e..c8fd47edba30 100644
--- a/arch/tile/include/gxio/dma_queue.h
+++ b/arch/tile/include/gxio/dma_queue.h
@@ -121,7 +121,7 @@ static inline int64_t __gxio_dma_queue_reserve(__gxio_dma_queue_t *dma_queue,
* if the result is LESS than "hw_complete_count".
*/
uint64_t complete;
- complete = ACCESS_ONCE(dma_queue->hw_complete_count);
+ complete = READ_ONCE(dma_queue->hw_complete_count);
slot |= (complete & 0xffffffffff000000);
if (slot < complete)
slot += 0x1000000;
diff --git a/arch/tile/kernel/ptrace.c b/arch/tile/kernel/ptrace.c
index e1a078e6828e..d516d61751c2 100644
--- a/arch/tile/kernel/ptrace.c
+++ b/arch/tile/kernel/ptrace.c
@@ -255,7 +255,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
int do_syscall_trace_enter(struct pt_regs *regs)
{
- u32 work = ACCESS_ONCE(current_thread_info()->flags);
+ u32 work = READ_ONCE(current_thread_info()->flags);
if ((work & _TIF_SYSCALL_TRACE) &&
tracehook_report_syscall_entry(regs)) {
diff --git a/arch/um/include/shared/init.h b/arch/um/include/shared/init.h
index 390572daa40d..b3f5865a92c9 100644
--- a/arch/um/include/shared/init.h
+++ b/arch/um/include/shared/init.h
@@ -41,7 +41,7 @@
typedef int (*initcall_t)(void);
typedef void (*exitcall_t)(void);
-#include <linux/compiler.h>
+#include <linux/compiler_types.h>
/* These are for everybody (although not all archs will actually
discard it in modules) */
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 2fdb23313dd5..9bceea6a5852 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -56,7 +56,7 @@ config X86
select ARCH_HAS_KCOV if X86_64
select ARCH_HAS_PMEM_API if X86_64
# Causing hangs/crashes, see the commit that added this change for details.
- select ARCH_HAS_REFCOUNT if BROKEN
+ select ARCH_HAS_REFCOUNT
select ARCH_HAS_UACCESS_FLUSHCACHE if X86_64
select ARCH_HAS_SET_MEMORY
select ARCH_HAS_SG_CHAIN
diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
index 03505ffbe1b6..d7d3cc24baf4 100644
--- a/arch/x86/entry/common.c
+++ b/arch/x86/entry/common.c
@@ -75,7 +75,7 @@ static long syscall_trace_enter(struct pt_regs *regs)
if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
BUG_ON(regs != task_pt_regs(current));
- work = ACCESS_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY;
+ work = READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY;
if (unlikely(work & _TIF_SYSCALL_EMU))
emulated = true;
@@ -186,9 +186,7 @@ __visible inline void prepare_exit_to_usermode(struct pt_regs *regs)
addr_limit_user_check();
- if (IS_ENABLED(CONFIG_PROVE_LOCKING) && WARN_ON(!irqs_disabled()))
- local_irq_disable();
-
+ lockdep_assert_irqs_disabled();
lockdep_sys_exit();
cached_flags = READ_ONCE(ti->flags);
diff --git a/arch/x86/entry/vdso/vclock_gettime.c b/arch/x86/entry/vdso/vclock_gettime.c
index fa8dbfcf7ed3..11b13c4b43d5 100644
--- a/arch/x86/entry/vdso/vclock_gettime.c
+++ b/arch/x86/entry/vdso/vclock_gettime.c
@@ -318,7 +318,7 @@ int gettimeofday(struct timeval *, struct timezone *)
notrace time_t __vdso_time(time_t *t)
{
/* This is atomic on x86 so we don't need any locks. */
- time_t result = ACCESS_ONCE(gtod->wall_time_sec);
+ time_t result = READ_ONCE(gtod->wall_time_sec);
if (t)
*t = result;
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 80534d3c2480..140d33288e78 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -2118,7 +2118,7 @@ static int x86_pmu_event_init(struct perf_event *event)
event->destroy(event);
}
- if (ACCESS_ONCE(x86_pmu.attr_rdpmc))
+ if (READ_ONCE(x86_pmu.attr_rdpmc))
event->hw.flags |= PERF_X86_EVENT_RDPMC_ALLOWED;
return err;
@@ -2371,7 +2371,7 @@ static unsigned long get_segment_base(unsigned int segment)
struct ldt_struct *ldt;
/* IRQs are off, so this synchronizes with smp_store_release */
- ldt = lockless_dereference(current->active_mm->context.ldt);
+ ldt = READ_ONCE(current->active_mm->context.ldt);
if (!ldt || idx >= ldt->nr_entries)
return 0;
diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
index 01727dbc294a..7fb336210e1b 100644
--- a/arch/x86/include/asm/barrier.h
+++ b/arch/x86/include/asm/barrier.h
@@ -12,11 +12,11 @@
*/
#ifdef CONFIG_X86_32
-#define mb() asm volatile(ALTERNATIVE("lock; addl $0,0(%%esp)", "mfence", \
+#define mb() asm volatile(ALTERNATIVE("lock; addl $0,-4(%%esp)", "mfence", \
X86_FEATURE_XMM2) ::: "memory", "cc")
-#define rmb() asm volatile(ALTERNATIVE("lock; addl $0,0(%%esp)", "lfence", \
+#define rmb() asm volatile(ALTERNATIVE("lock; addl $0,-4(%%esp)", "lfence", \
X86_FEATURE_XMM2) ::: "memory", "cc")
-#define wmb() asm volatile(ALTERNATIVE("lock; addl $0,0(%%esp)", "sfence", \
+#define wmb() asm volatile(ALTERNATIVE("lock; addl $0,-4(%%esp)", "sfence", \
X86_FEATURE_XMM2) ::: "memory", "cc")
#else
#define mb() asm volatile("mfence":::"memory")
@@ -31,7 +31,11 @@
#endif
#define dma_wmb() barrier()
-#define __smp_mb() mb()
+#ifdef CONFIG_X86_32
+#define __smp_mb() asm volatile("lock; addl $0,-4(%%esp)" ::: "memory", "cc")
+#else
+#define __smp_mb() asm volatile("lock; addl $0,-4(%%rsp)" ::: "memory", "cc")
+#endif
#define __smp_rmb() dma_rmb()
#define __smp_wmb() barrier()
#define __smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0)
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index 6699fc441644..6d16d15d09a0 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -73,8 +73,8 @@ static inline void load_mm_ldt(struct mm_struct *mm)
#ifdef CONFIG_MODIFY_LDT_SYSCALL
struct ldt_struct *ldt;
- /* lockless_dereference synchronizes with smp_store_release */
- ldt = lockless_dereference(mm->context.ldt);
+ /* READ_ONCE synchronizes with smp_store_release */
+ ldt = READ_ONCE(mm->context.ldt);
/*
* Any change to mm->context.ldt is followed by an IPI to all
diff --git a/arch/x86/include/asm/qspinlock.h b/arch/x86/include/asm/qspinlock.h
index 9982dd96f093..5e16b5d40d32 100644
--- a/arch/x86/include/asm/qspinlock.h
+++ b/arch/x86/include/asm/qspinlock.h
@@ -2,6 +2,7 @@
#ifndef _ASM_X86_QSPINLOCK_H
#define _ASM_X86_QSPINLOCK_H
+#include <linux/jump_label.h>
#include <asm/cpufeature.h>
#include <asm-generic/qspinlock_types.h>
#include <asm/paravirt.h>
@@ -47,10 +48,14 @@ static inline void queued_spin_unlock(struct qspinlock *lock)
#endif
#ifdef CONFIG_PARAVIRT
+DECLARE_STATIC_KEY_TRUE(virt_spin_lock_key);
+
+void native_pv_lock_init(void) __init;
+
#define virt_spin_lock virt_spin_lock
static inline bool virt_spin_lock(struct qspinlock *lock)
{
- if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
+ if (!static_branch_likely(&virt_spin_lock_key))
return false;
/*
@@ -66,6 +71,10 @@ static inline bool virt_spin_lock(struct qspinlock *lock)
return true;
}
+#else
+static inline void native_pv_lock_init(void)
+{
+}
#endif /* CONFIG_PARAVIRT */
#include <asm-generic/qspinlock.h>
diff --git a/arch/x86/include/asm/refcount.h b/arch/x86/include/asm/refcount.h
index ff871210b9f2..4e44250e7d0d 100644
--- a/arch/x86/include/asm/refcount.h
+++ b/arch/x86/include/asm/refcount.h
@@ -15,7 +15,7 @@
* back to the regular execution flow in .text.
*/
#define _REFCOUNT_EXCEPTION \
- ".pushsection .text.unlikely\n" \
+ ".pushsection .text..refcount\n" \
"111:\tlea %[counter], %%" _ASM_CX "\n" \
"112:\t" ASM_UD0 "\n" \
ASM_UNREACHABLE \
diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
index 4d38d85a16ad..4c25cf6caefa 100644
--- a/arch/x86/include/asm/rwsem.h
+++ b/arch/x86/include/asm/rwsem.h
@@ -61,18 +61,33 @@
/*
* lock for reading
*/
+#define ____down_read(sem, slow_path) \
+({ \
+ struct rw_semaphore* ret; \
+ asm volatile("# beginning down_read\n\t" \
+ LOCK_PREFIX _ASM_INC "(%[sem])\n\t" \
+ /* adds 0x00000001 */ \
+ " jns 1f\n" \
+ " call " slow_path "\n" \
+ "1:\n\t" \
+ "# ending down_read\n\t" \
+ : "+m" (sem->count), "=a" (ret), \
+ ASM_CALL_CONSTRAINT \
+ : [sem] "a" (sem) \
+ : "memory", "cc"); \
+ ret; \
+})
+
static inline void __down_read(struct rw_semaphore *sem)
{
- asm volatile("# beginning down_read\n\t"
- LOCK_PREFIX _ASM_INC "(%1)\n\t"
- /* adds 0x00000001 */
- " jns 1f\n"
- " call call_rwsem_down_read_failed\n"
- "1:\n\t"
- "# ending down_read\n\t"
- : "+m" (sem->count)
- : "a" (sem)
- : "memory", "cc");
+ ____down_read(sem, "call_rwsem_down_read_failed");
+}
+
+static inline int __down_read_killable(struct rw_semaphore *sem)
+{
+ if (IS_ERR(____down_read(sem, "call_rwsem_down_read_failed_killable")))
+ return -EINTR;
+ return 0;
}
/*
@@ -82,17 +97,18 @@ static inline bool __down_read_trylock(struct rw_semaphore *sem)
{
long result, tmp;
asm volatile("# beginning __down_read_trylock\n\t"
- " mov %0,%1\n\t"
+ " mov %[count],%[result]\n\t"
"1:\n\t"
- " mov %1,%2\n\t"
- " add %3,%2\n\t"
+ " mov %[result],%[tmp]\n\t"
+ " add %[inc],%[tmp]\n\t"
" jle 2f\n\t"
- LOCK_PREFIX " cmpxchg %2,%0\n\t"
+ LOCK_PREFIX " cmpxchg %[tmp],%[count]\n\t"
" jnz 1b\n\t"
"2:\n\t"
"# ending __down_read_trylock\n\t"
- : "+m" (sem->count), "=&a" (result), "=&r" (tmp)
- : "i" (RWSEM_ACTIVE_READ_BIAS)
+ : [count] "+m" (sem->count), [result] "=&a" (result),
+ [tmp] "=&r" (tmp)
+ : [inc] "i" (RWSEM_ACTIVE_READ_BIAS)
: "memory", "cc");
return result >= 0;
}
@@ -106,7 +122,7 @@ static inline bool __down_read_trylock(struct rw_semaphore *sem)
struct rw_semaphore* ret; \
\
asm volatile("# beginning down_write\n\t" \
- LOCK_PREFIX " xadd %1,(%4)\n\t" \
+ LOCK_PREFIX " xadd %[tmp],(%[sem])\n\t" \
/* adds 0xffff0001, returns the old value */ \
" test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t" \
/* was the active mask 0 before? */\
@@ -114,9 +130,9 @@ static inline bool __down_read_trylock(struct rw_semaphore *sem)
" call " slow_path "\n" \
"1:\n" \
"# ending down_write" \
- : "+m" (sem->count), "=d" (tmp), \
+ : "+m" (sem->count), [tmp] "=d" (tmp), \
"=a" (ret), ASM_CALL_CONSTRAINT \
- : "a" (sem), "1" (RWSEM_ACTIVE_WRITE_BIAS) \
+ : [sem] "a" (sem), "[tmp]" (RWSEM_ACTIVE_WRITE_BIAS) \
: "memory", "cc"); \
ret; \
})
@@ -142,21 +158,21 @@ static inline bool __down_write_trylock(struct rw_semaphore *sem)
bool result;
long tmp0, tmp1;
asm volatile("# beginning __down_write_trylock\n\t"
- " mov %0,%1\n\t"
+ " mov %[count],%[tmp0]\n\t"
"1:\n\t"
" test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t"
/* was the active mask 0 before? */
" jnz 2f\n\t"
- " mov %1,%2\n\t"
- " add %4,%2\n\t"
- LOCK_PREFIX " cmpxchg %2,%0\n\t"
+ " mov %[tmp0],%[tmp1]\n\t"
+ " add %[inc],%[tmp1]\n\t"
+ LOCK_PREFIX " cmpxchg %[tmp1],%[count]\n\t"
" jnz 1b\n\t"
"2:\n\t"
CC_SET(e)
"# ending __down_write_trylock\n\t"
- : "+m" (sem->count), "=&a" (tmp0), "=&r" (tmp1),
- CC_OUT(e) (result)
- : "er" (RWSEM_ACTIVE_WRITE_BIAS)
+ : [count] "+m" (sem->count), [tmp0] "=&a" (tmp0),
+ [tmp1] "=&r" (tmp1), CC_OUT(e) (result)
+ : [inc] "er" (RWSEM_ACTIVE_WRITE_BIAS)
: "memory");
return result;
}
@@ -168,14 +184,14 @@ static inline void __up_read(struct rw_semaphore *sem)
{
long tmp;
asm volatile("# beginning __up_read\n\t"
- LOCK_PREFIX " xadd %1,(%2)\n\t"
+ LOCK_PREFIX " xadd %[tmp],(%[sem])\n\t"
/* subtracts 1, returns the old value */
" jns 1f\n\t"
" call call_rwsem_wake\n" /* expects old value in %edx */
"1:\n"
"# ending __up_read\n"
- : "+m" (sem->count), "=d" (tmp)
- : "a" (sem), "1" (-RWSEM_ACTIVE_READ_BIAS)
+ : "+m" (sem->count), [tmp] "=d" (tmp)
+ : [sem] "a" (sem), "[tmp]" (-RWSEM_ACTIVE_READ_BIAS)
: "memory", "cc");
}
@@ -186,14 +202,14 @@ static inline void __up_write(struct rw_semaphore *sem)
{
long tmp;
asm volatile("# beginning __up_write\n\t"
- LOCK_PREFIX " xadd %1,(%2)\n\t"
+ LOCK_PREFIX " xadd %[tmp],(%[sem])\n\t"
/* subtracts 0xffff0001, returns the old value */
" jns 1f\n\t"
" call call_rwsem_wake\n" /* expects old value in %edx */
"1:\n\t"
"# ending __up_write\n"
- : "+m" (sem->count), "=d" (tmp)
- : "a" (sem), "1" (-RWSEM_ACTIVE_WRITE_BIAS)
+ : "+m" (sem->count), [tmp] "=d" (tmp)
+ : [sem] "a" (sem), "[tmp]" (-RWSEM_ACTIVE_WRITE_BIAS)
: "memory", "cc");
}
@@ -203,7 +219,7 @@ static inline void __up_write(struct rw_semaphore *sem)
static inline void __downgrade_write(struct rw_semaphore *sem)
{
asm volatile("# beginning __downgrade_write\n\t"
- LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
+ LOCK_PREFIX _ASM_ADD "%[inc],(%[sem])\n\t"
/*
* transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
* 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
@@ -213,7 +229,7 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
"1:\n\t"
"# ending __downgrade_write\n"
: "+m" (sem->count)
- : "a" (sem), "er" (-RWSEM_WAITING_BIAS)
+ : [sem] "a" (sem), [inc] "er" (-RWSEM_WAITING_BIAS)
: "memory", "cc");
}
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index b34625796eb2..5b6bc7016c22 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -42,11 +42,4 @@
#include <asm/qrwlock.h>
-#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
-#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-
-#define arch_spin_relax(lock) cpu_relax()
-#define arch_read_relax(lock) cpu_relax()
-#define arch_write_relax(lock) cpu_relax()
-
#endif /* _ASM_X86_SPINLOCK_H */
diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h
index 52250681f68c..fb856c9f0449 100644
--- a/arch/x86/include/asm/vgtod.h
+++ b/arch/x86/include/asm/vgtod.h
@@ -49,7 +49,7 @@ static inline unsigned gtod_read_begin(const struct vsyscall_gtod_data *s)
unsigned ret;
repeat:
- ret = ACCESS_ONCE(s->seq);
+ ret = READ_ONCE(s->seq);
if (unlikely(ret & 1)) {
cpu_relax();
goto repeat;
diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c
index 9c4e7ba6870c..7d7715dde901 100644
--- a/arch/x86/kernel/espfix_64.c
+++ b/arch/x86/kernel/espfix_64.c
@@ -155,14 +155,14 @@ void init_espfix_ap(int cpu)
page = cpu/ESPFIX_STACKS_PER_PAGE;
/* Did another CPU already set this up? */
- stack_page = ACCESS_ONCE(espfix_pages[page]);
+ stack_page = READ_ONCE(espfix_pages[page]);
if (likely(stack_page))
goto done;
mutex_lock(&espfix_init_mutex);
/* Did we race on the lock? */
- stack_page = ACCESS_ONCE(espfix_pages[page]);
+ stack_page = READ_ONCE(espfix_pages[page]);
if (stack_page)
goto unlock_done;
@@ -200,7 +200,7 @@ void init_espfix_ap(int cpu)
set_pte(&pte_p[n*PTE_STRIDE], pte);
/* Job is done for this CPU and any CPU which shares this page */
- ACCESS_ONCE(espfix_pages[page]) = stack_page;
+ WRITE_ONCE(espfix_pages[page], stack_page);
unlock_done:
mutex_unlock(&espfix_init_mutex);
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
index 4d17bacf4030..a2fcf037bd80 100644
--- a/arch/x86/kernel/ldt.c
+++ b/arch/x86/kernel/ldt.c
@@ -102,7 +102,7 @@ static void finalize_ldt_struct(struct ldt_struct *ldt)
static void install_ldt(struct mm_struct *current_mm,
struct ldt_struct *ldt)
{
- /* Synchronizes with lockless_dereference in load_mm_ldt. */
+ /* Synchronizes with READ_ONCE in load_mm_ldt. */
smp_store_release(&current_mm->context.ldt, ldt);
/* Activate the LDT for all CPUs using current_mm. */
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
index 35aafc95e4b8..18bc9b51ac9b 100644
--- a/arch/x86/kernel/nmi.c
+++ b/arch/x86/kernel/nmi.c
@@ -105,7 +105,7 @@ static void nmi_max_handler(struct irq_work *w)
{
struct nmiaction *a = container_of(w, struct nmiaction, irq_work);
int remainder_ns, decimal_msecs;
- u64 whole_msecs = ACCESS_ONCE(a->max_duration);
+ u64 whole_msecs = READ_ONCE(a->max_duration);
remainder_ns = do_div(whole_msecs, (1000 * 1000));
decimal_msecs = remainder_ns / 1000;
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 19a3e8f961c7..041096bdef86 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -115,8 +115,18 @@ unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
return 5;
}
-/* Neat trick to map patch type back to the call within the
- * corresponding structure. */
+DEFINE_STATIC_KEY_TRUE(virt_spin_lock_key);
+
+void __init native_pv_lock_init(void)
+{
+ if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
+ static_branch_disable(&virt_spin_lock_key);
+}
+
+/*
+ * Neat trick to map patch type back to the call within the
+ * corresponding structure.
+ */
static void *get_call_destination(u8 type)
{
struct paravirt_patch_template tmpl = {
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 65a0ccdc3050..7ab6db86b573 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -77,6 +77,7 @@
#include <asm/i8259.h>
#include <asm/realmode.h>
#include <asm/misc.h>
+#include <asm/qspinlock.h>
/* Number of siblings per CPU package */
int smp_num_siblings = 1;
@@ -1095,7 +1096,7 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
unsigned long flags;
int err, ret = 0;
- WARN_ON(irqs_disabled());
+ lockdep_assert_irqs_enabled();
pr_debug("++++++++++++++++++++=_---CPU UP %u\n", cpu);
@@ -1358,6 +1359,8 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
pr_info("CPU0: ");
print_cpu_info(&cpu_data(0));
+ native_pv_lock_init();
+
uv_system_init();
set_mtrr_aps_delayed_init();
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 7a69cf053711..a119b361b8b7 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -443,7 +443,7 @@ static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
static u64 __get_spte_lockless(u64 *sptep)
{
- return ACCESS_ONCE(*sptep);
+ return READ_ONCE(*sptep);
}
#else
union split_spte {
@@ -4819,7 +4819,7 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
* If we don't have indirect shadow pages, it means no page is
* write-protected, so we can exit simply.
*/
- if (!ACCESS_ONCE(vcpu->kvm->arch.indirect_shadow_pages))
+ if (!READ_ONCE(vcpu->kvm->arch.indirect_shadow_pages))
return;
remote_flush = local_flush = false;
diff --git a/arch/x86/kvm/page_track.c b/arch/x86/kvm/page_track.c
index ea67dc876316..01c1371f39f8 100644
--- a/arch/x86/kvm/page_track.c
+++ b/arch/x86/kvm/page_track.c
@@ -157,7 +157,7 @@ bool kvm_page_track_is_active(struct kvm_vcpu *vcpu, gfn_t gfn,
return false;
index = gfn_to_index(gfn, slot->base_gfn, PT_PAGE_TABLE_LEVEL);
- return !!ACCESS_ONCE(slot->arch.gfn_track[mode][index]);
+ return !!READ_ONCE(slot->arch.gfn_track[mode][index]);
}
void kvm_page_track_cleanup(struct kvm *kvm)
diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
index bf2c6074efd2..dc2ab6ea6768 100644
--- a/arch/x86/lib/rwsem.S
+++ b/arch/x86/lib/rwsem.S
@@ -98,6 +98,18 @@ ENTRY(call_rwsem_down_read_failed)
ret
ENDPROC(call_rwsem_down_read_failed)
+ENTRY(call_rwsem_down_read_failed_killable)
+ FRAME_BEGIN
+ save_common_regs
+ __ASM_SIZE(push,) %__ASM_REG(dx)
+ movq %rax,%rdi
+ call rwsem_down_read_failed_killable
+ __ASM_SIZE(pop,) %__ASM_REG(dx)
+ restore_common_regs
+ FRAME_END
+ ret
+ENDPROC(call_rwsem_down_read_failed_killable)
+
ENTRY(call_rwsem_down_write_failed)
FRAME_BEGIN
save_common_regs
diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
index c3521e2be396..3321b446b66c 100644
--- a/arch/x86/mm/extable.c
+++ b/arch/x86/mm/extable.c
@@ -67,12 +67,17 @@ bool ex_handler_refcount(const struct exception_table_entry *fixup,
* wrapped around) will be set. Additionally, seeing the refcount
* reach 0 will set ZF (Zero Flag: result was zero). In each of
* these cases we want a report, since it's a boundary condition.
- *
+ * The SF case is not reported since it indicates post-boundary
+ * manipulations below zero or above INT_MAX. And if none of the
+ * flags are set, something has gone very wrong, so report it.
*/
if (regs->flags & (X86_EFLAGS_OF | X86_EFLAGS_ZF)) {
bool zero = regs->flags & X86_EFLAGS_ZF;
refcount_error_report(regs, zero ? "hit zero" : "overflow");
+ } else if ((regs->flags & X86_EFLAGS_SF) == 0) {
+ /* Report if none of OF, ZF, nor SF are set. */
+ refcount_error_report(regs, "unexpected saturation");
}
return true;
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index 6083ba462f35..13b4f19b9131 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -547,7 +547,7 @@ int xen_alloc_p2m_entry(unsigned long pfn)
if (p2m_top_mfn && pfn < MAX_P2M_PFN) {
topidx = p2m_top_index(pfn);
top_mfn_p = &p2m_top_mfn[topidx];
- mid_mfn = ACCESS_ONCE(p2m_top_mfn_p[topidx]);
+ mid_mfn = READ_ONCE(p2m_top_mfn_p[topidx]);
BUG_ON(virt_to_mfn(mid_mfn) != *top_mfn_p);
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 08324c64005d..02f3445a2b5f 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -11,6 +11,7 @@
#include <linux/slab.h>
#include <asm/paravirt.h>
+#include <asm/qspinlock.h>
#include <xen/interface/xen.h>
#include <xen/events.h>
@@ -81,8 +82,11 @@ void xen_init_lock_cpu(int cpu)
int irq;
char *name;
- if (!xen_pvspin)
+ if (!xen_pvspin) {
+ if (cpu == 0)
+ static_branch_disable(&virt_spin_lock_key);
return;
+ }
WARN(per_cpu(lock_kicker_irq, cpu) >= 0, "spinlock on CPU%d exists on IRQ%d!\n",
cpu, per_cpu(lock_kicker_irq, cpu));
diff --git a/arch/xtensa/include/asm/spinlock.h b/arch/xtensa/include/asm/spinlock.h
index 3bb49681ee24..c6e1290dcbb7 100644
--- a/arch/xtensa/include/asm/spinlock.h
+++ b/arch/xtensa/include/asm/spinlock.h
@@ -33,8 +33,6 @@
#define arch_spin_is_locked(x) ((x)->slock != 0)
-#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
-
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
unsigned long tmp;
@@ -97,8 +95,6 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
* 0x80000000 one writer owns the rwlock, no other writers, no readers
*/
-#define arch_write_can_lock(x) ((x)->lock == 0)
-
static inline void arch_write_lock(arch_rwlock_t *rw)
{
unsigned long tmp;
@@ -200,7 +196,4 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
: "memory");
}
-#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
-#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-
#endif /* _XTENSA_SPINLOCK_H */
diff --git a/arch/xtensa/platforms/xtfpga/lcd.c b/arch/xtensa/platforms/xtfpga/lcd.c
index 4dc0c1b43f4b..2f7eb66c23ec 100644
--- a/arch/xtensa/platforms/xtfpga/lcd.c
+++ b/arch/xtensa/platforms/xtfpga/lcd.c
@@ -34,23 +34,23 @@
static void lcd_put_byte(u8 *addr, u8 data)
{
#ifdef CONFIG_XTFPGA_LCD_8BIT_ACCESS
- ACCESS_ONCE(*addr) = data;
+ WRITE_ONCE(*addr, data);
#else
- ACCESS_ONCE(*addr) = data & 0xf0;
- ACCESS_ONCE(*addr) = (data << 4) & 0xf0;
+ WRITE_ONCE(*addr, data & 0xf0);
+ WRITE_ONCE(*addr, (data << 4) & 0xf0);
#endif
}
static int __init lcd_init(void)
{
- ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE8BIT;
+ WRITE_ONCE(*LCD_INSTR_ADDR, LCD_DISPLAY_MODE8BIT);
mdelay(5);
- ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE8BIT;
+ WRITE_ONCE(*LCD_INSTR_ADDR, LCD_DISPLAY_MODE8BIT);
udelay(200);
- ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE8BIT;
+ WRITE_ONCE(*LCD_INSTR_ADDR, LCD_DISPLAY_MODE8BIT);
udelay(50);
#ifndef CONFIG_XTFPGA_LCD_8BIT_ACCESS
- ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE4BIT;
+ WRITE_ONCE(*LCD_INSTR_ADDR, LCD_DISPLAY_MODE4BIT);
udelay(50);
lcd_put_byte(LCD_INSTR_ADDR, LCD_DISPLAY_MODE4BIT);
udelay(50);
diff --git a/block/bio.c b/block/bio.c
index 101c2a9b5481..cc60213e56d8 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -917,17 +917,9 @@ int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
}
EXPORT_SYMBOL_GPL(bio_iov_iter_get_pages);
-struct submit_bio_ret {
- struct completion event;
- int error;
-};
-
static void submit_bio_wait_endio(struct bio *bio)
{
- struct submit_bio_ret *ret = bio->bi_private;
-
- ret->error = blk_status_to_errno(bio->bi_status);
- complete(&ret->event);
+ complete(bio->bi_private);
}
/**
@@ -943,16 +935,15 @@ static void submit_bio_wait_endio(struct bio *bio)
*/
int submit_bio_wait(struct bio *bio)
{
- struct submit_bio_ret ret;
+ DECLARE_COMPLETION_ONSTACK_MAP(done, bio->bi_disk->lockdep_map);
- init_completion(&ret.event);
- bio->bi_private = &ret;
+ bio->bi_private = &done;
bio->bi_end_io = submit_bio_wait_endio;
bio->bi_opf |= REQ_SYNC;
submit_bio(bio);
- wait_for_completion_io(&ret.event);
+ wait_for_completion_io(&done);
- return ret.error;
+ return blk_status_to_errno(bio->bi_status);
}
EXPORT_SYMBOL(submit_bio_wait);
diff --git a/block/blk-wbt.c b/block/blk-wbt.c
index 6a9a0f03a67b..d822530e6aea 100644
--- a/block/blk-wbt.c
+++ b/block/blk-wbt.c
@@ -261,7 +261,7 @@ static inline bool stat_sample_valid(struct blk_rq_stat *stat)
static u64 rwb_sync_issue_lat(struct rq_wb *rwb)
{
- u64 now, issue = ACCESS_ONCE(rwb->sync_issue);
+ u64 now, issue = READ_ONCE(rwb->sync_issue);
if (!issue || !rwb->sync_cookie)
return 0;
diff --git a/block/genhd.c b/block/genhd.c
index dd305c65ffb0..630c0da6cfcf 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -1354,13 +1354,7 @@ dev_t blk_lookup_devt(const char *name, int partno)
}
EXPORT_SYMBOL(blk_lookup_devt);
-struct gendisk *alloc_disk(int minors)
-{
- return alloc_disk_node(minors, NUMA_NO_NODE);
-}
-EXPORT_SYMBOL(alloc_disk);
-
-struct gendisk *alloc_disk_node(int minors, int node_id)
+struct gendisk *__alloc_disk_node(int minors, int node_id)
{
struct gendisk *disk;
struct disk_part_tbl *ptbl;
@@ -1411,7 +1405,7 @@ struct gendisk *alloc_disk_node(int minors, int node_id)
}
return disk;
}
-EXPORT_SYMBOL(alloc_disk_node);
+EXPORT_SYMBOL(__alloc_disk_node);
struct kobject *get_disk(struct gendisk *disk)
{
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 12ebd055724c..4b8ba2a75a4d 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -668,7 +668,7 @@ const char *dev_driver_string(const struct device *dev)
* so be careful about accessing it. dev->bus and dev->class should
* never change once they are set, so they don't need special care.
*/
- drv = ACCESS_ONCE(dev->driver);
+ drv = READ_ONCE(dev->driver);
return drv ? drv->name :
(dev->bus ? dev->bus->name :
(dev->class ? dev->class->name : ""));
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 7bcf80fa9ada..41d7c2b99f69 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -134,11 +134,11 @@ unsigned long pm_runtime_autosuspend_expiration(struct device *dev)
if (!dev->power.use_autosuspend)
goto out;
- autosuspend_delay = ACCESS_ONCE(dev->power.autosuspend_delay);
+ autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay);
if (autosuspend_delay < 0)
goto out;
- last_busy = ACCESS_ONCE(dev->power.last_busy);
+ last_busy = READ_ONCE(dev->power.last_busy);
elapsed = jiffies - last_busy;
if (elapsed < 0)
goto out; /* jiffies has wrapped around. */
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 8ad92707e45f..6c7ccac2679e 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -641,7 +641,7 @@ static void credit_entropy_bits(struct entropy_store *r, int nbits)
return;
retry:
- entropy_count = orig = ACCESS_ONCE(r->entropy_count);
+ entropy_count = orig = READ_ONCE(r->entropy_count);
if (nfrac < 0) {
/* Debit */
entropy_count += nfrac;
@@ -1265,7 +1265,7 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
/* Can we pull enough? */
retry:
- entropy_count = orig = ACCESS_ONCE(r->entropy_count);
+ entropy_count = orig = READ_ONCE(r->entropy_count);
ibytes = nbytes;
/* never pull more than available */
have_bytes = entropy_count >> (ENTROPY_SHIFT + 3);
diff --git a/drivers/clocksource/bcm2835_timer.c b/drivers/clocksource/bcm2835_timer.c
index 39e489a96ad7..60da2537bef9 100644
--- a/drivers/clocksource/bcm2835_timer.c
+++ b/drivers/clocksource/bcm2835_timer.c
@@ -71,7 +71,7 @@ static irqreturn_t bcm2835_time_interrupt(int irq, void *dev_id)
if (readl_relaxed(timer->control) & timer->match_mask) {
writel_relaxed(timer->match_mask, timer->control);
- event_handler = ACCESS_ONCE(timer->evt.event_handler);
+ event_handler = READ_ONCE(timer->evt.event_handler);
if (event_handler)
event_handler(&timer->evt);
return IRQ_HANDLED;
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index d258953ff488..f4f258075b89 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -172,7 +172,7 @@ static void caam_jr_dequeue(unsigned long devarg)
while (rd_reg32(&jrp->rregs->outring_used)) {
- head = ACCESS_ONCE(jrp->head);
+ head = READ_ONCE(jrp->head);
spin_lock(&jrp->outlock);
@@ -341,7 +341,7 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
spin_lock_bh(&jrp->inplock);
head = jrp->head;
- tail = ACCESS_ONCE(jrp->tail);
+ tail = READ_ONCE(jrp->tail);
if (!rd_reg32(&jrp->rregs->inpring_avail) ||
CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) {
diff --git a/drivers/crypto/nx/nx-842-powernv.c b/drivers/crypto/nx/nx-842-powernv.c
index 874ddf5e9087..0f20f5ec9617 100644
--- a/drivers/crypto/nx/nx-842-powernv.c
+++ b/drivers/crypto/nx/nx-842-powernv.c
@@ -193,7 +193,7 @@ static int wait_for_csb(struct nx842_workmem *wmem,
ktime_t start = wmem->start, now = ktime_get();
ktime_t timeout = ktime_add_ms(start, CSB_WAIT_MAX);
- while (!(ACCESS_ONCE(csb->flags) & CSB_V)) {
+ while (!(READ_ONCE(csb->flags) & CSB_V)) {
cpu_relax();
now = ktime_get();
if (ktime_after(now, timeout))
diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c
index 346c4987b284..11d6419788c2 100644
--- a/drivers/edac/altera_edac.c
+++ b/drivers/edac/altera_edac.c
@@ -175,11 +175,11 @@ static ssize_t altr_sdr_mc_err_inject_write(struct file *file,
/*
* To trigger the error, we need to read the data back
* (the data was written with errors above).
- * The ACCESS_ONCE macros and printk are used to prevent the
+ * The READ_ONCE macros and printk are used to prevent the
* the compiler optimizing these reads out.
*/
- reg = ACCESS_ONCE(ptemp[0]);
- read_reg = ACCESS_ONCE(ptemp[1]);
+ reg = READ_ONCE(ptemp[0]);
+ read_reg = READ_ONCE(ptemp[1]);
/* Force Read */
rmb();
@@ -618,7 +618,7 @@ static ssize_t altr_edac_device_trig(struct file *file,
for (i = 0; i < (priv->trig_alloc_sz / sizeof(*ptemp)); i++) {
/* Read data so we're in the correct state */
rmb();
- if (ACCESS_ONCE(ptemp[i]))
+ if (READ_ONCE(ptemp[i]))
result = -1;
/* Toggle Error bit (it is latched), leave ECC enabled */
writel(error_mask, (drvdata->base + priv->set_err_ofst));
@@ -635,7 +635,7 @@ static ssize_t altr_edac_device_trig(struct file *file,
/* Read out written data. ECC error caused here */
for (i = 0; i < ALTR_TRIGGER_READ_WRD_CNT; i++)
- if (ACCESS_ONCE(ptemp[i]) != i)
+ if (READ_ONCE(ptemp[i]) != i)
edac_printk(KERN_ERR, EDAC_DEVICE,
"Read doesn't match written data\n");
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 8bf89267dc25..ccf52368a073 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -734,7 +734,7 @@ static unsigned int ar_search_last_active_buffer(struct ar_context *ctx,
__le16 res_count, next_res_count;
i = ar_first_buffer_index(ctx);
- res_count = ACCESS_ONCE(ctx->descriptors[i].res_count);
+ res_count = READ_ONCE(ctx->descriptors[i].res_count);
/* A buffer that is not yet completely filled must be the last one. */
while (i != last && res_count == 0) {
@@ -742,8 +742,7 @@ static unsigned int ar_search_last_active_buffer(struct ar_context *ctx,
/* Peek at the next descriptor. */
next_i = ar_next_buffer_index(i);
rmb(); /* read descriptors in order */
- next_res_count = ACCESS_ONCE(
- ctx->descriptors[next_i].res_count);
+ next_res_count = READ_ONCE(ctx->descriptors[next_i].res_count);
/*
* If the next descriptor is still empty, we must stop at this
* descriptor.
@@ -759,8 +758,7 @@ static unsigned int ar_search_last_active_buffer(struct ar_context *ctx,
if (MAX_AR_PACKET_SIZE > PAGE_SIZE && i != last) {
next_i = ar_next_buffer_index(next_i);
rmb();
- next_res_count = ACCESS_ONCE(
- ctx->descriptors[next_i].res_count);
+ next_res_count = READ_ONCE(ctx->descriptors[next_i].res_count);
if (next_res_count != cpu_to_le16(PAGE_SIZE))
goto next_buffer_is_active;
}
@@ -2812,7 +2810,7 @@ static int handle_ir_buffer_fill(struct context *context,
u32 buffer_dma;
req_count = le16_to_cpu(last->req_count);
- res_count = le16_to_cpu(ACCESS_ONCE(last->res_count));
+ res_count = le16_to_cpu(READ_ONCE(last->res_count));
completed = req_count - res_count;
buffer_dma = le32_to_cpu(last->data_address);
diff --git a/drivers/firmware/tegra/ivc.c b/drivers/firmware/tegra/ivc.c
index a01461d63f68..00de793e6423 100644
--- a/drivers/firmware/tegra/ivc.c
+++ b/drivers/firmware/tegra/ivc.c
@@ -99,11 +99,11 @@ static inline bool tegra_ivc_empty(struct tegra_ivc *ivc,
{
/*
* This function performs multiple checks on the same values with
- * security implications, so create snapshots with ACCESS_ONCE() to
+ * security implications, so create snapshots with READ_ONCE() to
* ensure that these checks use the same values.
*/
- u32 tx = ACCESS_ONCE(header->tx.count);
- u32 rx = ACCESS_ONCE(header->rx.count);
+ u32 tx = READ_ONCE(header->tx.count);
+ u32 rx = READ_ONCE(header->rx.count);
/*
* Perform an over-full check to prevent denial of service attacks
@@ -124,8 +124,8 @@ static inline bool tegra_ivc_empty(struct tegra_ivc *ivc,
static inline bool tegra_ivc_full(struct tegra_ivc *ivc,
struct tegra_ivc_header *header)
{
- u32 tx = ACCESS_ONCE(header->tx.count);
- u32 rx = ACCESS_ONCE(header->rx.count);
+ u32 tx = READ_ONCE(header->tx.count);
+ u32 rx = READ_ONCE(header->rx.count);
/*
* Invalid cases where the counters indicate that the queue is over
@@ -137,8 +137,8 @@ static inline bool tegra_ivc_full(struct tegra_ivc *ivc,
static inline u32 tegra_ivc_available(struct tegra_ivc *ivc,
struct tegra_ivc_header *header)
{
- u32 tx = ACCESS_ONCE(header->tx.count);
- u32 rx = ACCESS_ONCE(header->rx.count);
+ u32 tx = READ_ONCE(header->tx.count);
+ u32 rx = READ_ONCE(header->rx.count);
/*
* This function isn't expected to be used in scenarios where an
@@ -151,8 +151,8 @@ static inline u32 tegra_ivc_available(struct tegra_ivc *ivc,
static inline void tegra_ivc_advance_tx(struct tegra_ivc *ivc)
{
- ACCESS_ONCE(ivc->tx.channel->tx.count) =
- ACCESS_ONCE(ivc->tx.channel->tx.count) + 1;
+ WRITE_ONCE(ivc->tx.channel->tx.count,
+ READ_ONCE(ivc->tx.channel->tx.count) + 1);
if (ivc->tx.position == ivc->num_frames - 1)
ivc->tx.position = 0;
@@ -162,8 +162,8 @@ static inline void tegra_ivc_advance_tx(struct tegra_ivc *ivc)
static inline void tegra_ivc_advance_rx(struct tegra_ivc *ivc)
{
- ACCESS_ONCE(ivc->rx.channel->rx.count) =
- ACCESS_ONCE(ivc->rx.channel->rx.count) + 1;
+ WRITE_ONCE(ivc->rx.channel->rx.count,
+ READ_ONCE(ivc->rx.channel->rx.count) + 1);
if (ivc->rx.position == ivc->num_frames - 1)
ivc->rx.position = 0;
@@ -428,7 +428,7 @@ int tegra_ivc_notified(struct tegra_ivc *ivc)
/* Copy the receiver's state out of shared memory. */
tegra_ivc_invalidate(ivc, ivc->rx.phys + offset);
- state = ACCESS_ONCE(ivc->rx.channel->tx.state);
+ state = READ_ONCE(ivc->rx.channel->tx.state);
if (state == TEGRA_IVC_STATE_SYNC) {
offset = offsetof(struct tegra_ivc_header, tx.count);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 333bad749067..303b5e099a98 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -260,7 +260,7 @@ static void amdgpu_fence_fallback(unsigned long arg)
*/
int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
{
- uint64_t seq = ACCESS_ONCE(ring->fence_drv.sync_seq);
+ uint64_t seq = READ_ONCE(ring->fence_drv.sync_seq);
struct dma_fence *fence, **ptr;
int r;
@@ -300,7 +300,7 @@ unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring)
amdgpu_fence_process(ring);
emitted = 0x100000000ull;
emitted -= atomic_read(&ring->fence_drv.last_seq);
- emitted += ACCESS_ONCE(ring->fence_drv.sync_seq);
+ emitted += READ_ONCE(ring->fence_drv.sync_seq);
return lower_32_bits(emitted);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 7171968f261e..6149a47fe63d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -788,11 +788,11 @@ static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
seq_printf(m, "\t0x%08x: %12ld byte %s",
id, amdgpu_bo_size(bo), placement);
- offset = ACCESS_ONCE(bo->tbo.mem.start);
+ offset = READ_ONCE(bo->tbo.mem.start);
if (offset != AMDGPU_BO_INVALID_OFFSET)
seq_printf(m, " @ 0x%010Lx", offset);
- pin_count = ACCESS_ONCE(bo->pin_count);
+ pin_count = READ_ONCE(bo->pin_count);
if (pin_count)
seq_printf(m, " pin count %d", pin_count);
seq_printf(m, "\n");
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index 38cea6fb25a8..a25f6c72f219 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -187,7 +187,7 @@ static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity)
if (kfifo_is_empty(&entity->job_queue))
return false;
- if (ACCESS_ONCE(entity->dependency))
+ if (READ_ONCE(entity->dependency))
return false;
return true;
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 3386452bd2f0..cf3deb283da5 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -451,7 +451,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
else
r = 0;
- cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type);
+ cur_placement = READ_ONCE(robj->tbo.mem.mem_type);
args->domain = radeon_mem_type_to_domain(cur_placement);
drm_gem_object_put_unlocked(gobj);
return r;
@@ -481,7 +481,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
r = ret;
/* Flush HDP cache via MMIO if necessary */
- cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type);
+ cur_placement = READ_ONCE(robj->tbo.mem.mem_type);
if (rdev->asic->mmio_hdp_flush &&
radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
robj->rdev->asic->mmio_hdp_flush(rdev);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index a552e4ea5440..6ac094ee8983 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -904,7 +904,7 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv,
if (unlikely(drm_is_render_client(file_priv)))
require_exist = true;
- if (ACCESS_ONCE(vmw_fpriv(file_priv)->locked_master)) {
+ if (READ_ONCE(vmw_fpriv(file_priv)->locked_master)) {
DRM_ERROR("Locked master refused legacy "
"surface reference.\n");
return -EACCES;
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index d9a1e9893136..97bea2e1aa6a 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -380,7 +380,7 @@ static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
if (sc->flags & SCF_FROZEN) {
wait_event_interruptible_timeout(
dd->event_queue,
- !(ACCESS_ONCE(dd->flags) & HFI1_FROZEN),
+ !(READ_ONCE(dd->flags) & HFI1_FROZEN),
msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT));
if (dd->flags & HFI1_FROZEN)
return -ENOLCK;
diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c
index 7108a4b5e94c..75e740780285 100644
--- a/drivers/infiniband/hw/hfi1/pio.c
+++ b/drivers/infiniband/hw/hfi1/pio.c
@@ -1423,14 +1423,14 @@ retry:
goto done;
}
/* copy from receiver cache line and recalculate */
- sc->alloc_free = ACCESS_ONCE(sc->free);
+ sc->alloc_free = READ_ONCE(sc->free);
avail =
(unsigned long)sc->credits -
(sc->fill - sc->alloc_free);
if (blocks > avail) {
/* still no room, actively update */
sc_release_update(sc);
- sc->alloc_free = ACCESS_ONCE(sc->free);
+ sc->alloc_free = READ_ONCE(sc->free);
trycount++;
goto retry;
}
@@ -1667,7 +1667,7 @@ void sc_release_update(struct send_context *sc)
/* call sent buffer callbacks */
code = -1; /* code not yet set */
- head = ACCESS_ONCE(sc->sr_head); /* snapshot the head */
+ head = READ_ONCE(sc->sr_head); /* snapshot the head */
tail = sc->sr_tail;
while (head != tail) {
pbuf = &sc->sr[tail].pbuf;
diff --git a/drivers/infiniband/hw/hfi1/ruc.c b/drivers/infiniband/hw/hfi1/ruc.c
index b3291f0fde9a..a7fc664f0d4e 100644
--- a/drivers/infiniband/hw/hfi1/ruc.c
+++ b/drivers/infiniband/hw/hfi1/ruc.c
@@ -363,7 +363,7 @@ static void ruc_loopback(struct rvt_qp *sqp)
again:
smp_read_barrier_depends(); /* see post_one_send() */
- if (sqp->s_last == ACCESS_ONCE(sqp->s_head))
+ if (sqp->s_last == READ_ONCE(sqp->s_head))
goto clr_busy;
wqe = rvt_get_swqe_ptr(sqp, sqp->s_last);
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
index 6781bcdb10b3..08346d25441c 100644
--- a/drivers/infiniband/hw/hfi1/sdma.c
+++ b/drivers/infiniband/hw/hfi1/sdma.c
@@ -1725,7 +1725,7 @@ retry:
swhead = sde->descq_head & sde->sdma_mask;
/* this code is really bad for cache line trading */
- swtail = ACCESS_ONCE(sde->descq_tail) & sde->sdma_mask;
+ swtail = READ_ONCE(sde->descq_tail) & sde->sdma_mask;
cnt = sde->descq_cnt;
if (swhead < swtail)
@@ -1872,7 +1872,7 @@ retry:
if ((status & sde->idle_mask) && !idle_check_done) {
u16 swtail;
- swtail = ACCESS_ONCE(sde->descq_tail) & sde->sdma_mask;
+ swtail = READ_ONCE(sde->descq_tail) & sde->sdma_mask;
if (swtail != hwhead) {
hwhead = (u16)read_sde_csr(sde, SD(HEAD));
idle_check_done = 1;
@@ -2222,7 +2222,7 @@ void sdma_seqfile_dump_sde(struct seq_file *s, struct sdma_engine *sde)
u16 len;
head = sde->descq_head & sde->sdma_mask;
- tail = ACCESS_ONCE(sde->descq_tail) & sde->sdma_mask;
+ tail = READ_ONCE(sde->descq_tail) & sde->sdma_mask;
seq_printf(s, SDE_FMT, sde->this_idx,
sde->cpu,
sdma_state_name(sde->state.current_state),
@@ -3305,7 +3305,7 @@ int sdma_ahg_alloc(struct sdma_engine *sde)
return -EINVAL;
}
while (1) {
- nr = ffz(ACCESS_ONCE(sde->ahg_bits));
+ nr = ffz(READ_ONCE(sde->ahg_bits));
if (nr > 31) {
trace_hfi1_ahg_allocate(sde, -ENOSPC);
return -ENOSPC;
diff --git a/drivers/infiniband/hw/hfi1/sdma.h b/drivers/infiniband/hw/hfi1/sdma.h
index 107011d8613b..374c59784950 100644
--- a/drivers/infiniband/hw/hfi1/sdma.h
+++ b/drivers/infiniband/hw/hfi1/sdma.h
@@ -445,7 +445,7 @@ static inline u16 sdma_descq_freecnt(struct sdma_engine *sde)
{
return sde->descq_cnt -
(sde->descq_tail -
- ACCESS_ONCE(sde->descq_head)) - 1;
+ READ_ONCE(sde->descq_head)) - 1;
}
static inline u16 sdma_descq_inprocess(struct sdma_engine *sde)
diff --git a/drivers/infiniband/hw/hfi1/uc.c b/drivers/infiniband/hw/hfi1/uc.c
index 0b646173ca22..9a31c585427f 100644
--- a/drivers/infiniband/hw/hfi1/uc.c
+++ b/drivers/infiniband/hw/hfi1/uc.c
@@ -80,7 +80,7 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
goto bail;
/* We are in the error state, flush the work request. */
smp_read_barrier_depends(); /* see post_one_send() */
- if (qp->s_last == ACCESS_ONCE(qp->s_head))
+ if (qp->s_last == READ_ONCE(qp->s_head))
goto bail;
/* If DMAs are in progress, we can't flush immediately. */
if (iowait_sdma_pending(&priv->s_iowait)) {
@@ -121,7 +121,7 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
goto bail;
/* Check if send work queue is empty. */
smp_read_barrier_depends(); /* see post_one_send() */
- if (qp->s_cur == ACCESS_ONCE(qp->s_head)) {
+ if (qp->s_cur == READ_ONCE(qp->s_head)) {
clear_ahg(qp);
goto bail;
}
diff --git a/drivers/infiniband/hw/hfi1/ud.c b/drivers/infiniband/hw/hfi1/ud.c
index 2ba74fdd6f15..7fec6b984e3e 100644
--- a/drivers/infiniband/hw/hfi1/ud.c
+++ b/drivers/infiniband/hw/hfi1/ud.c
@@ -487,7 +487,7 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
goto bail;
/* We are in the error state, flush the work request. */
smp_read_barrier_depends(); /* see post_one_send */
- if (qp->s_last == ACCESS_ONCE(qp->s_head))
+ if (qp->s_last == READ_ONCE(qp->s_head))
goto bail;
/* If DMAs are in progress, we can't flush immediately. */
if (iowait_sdma_pending(&priv->s_iowait)) {
@@ -501,7 +501,7 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
/* see post_one_send() */
smp_read_barrier_depends();
- if (qp->s_cur == ACCESS_ONCE(qp->s_head))
+ if (qp->s_cur == READ_ONCE(qp->s_head))
goto bail;
wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c
index c0c0e0445cbf..8ec6e8a8d6f7 100644
--- a/drivers/infiniband/hw/hfi1/user_sdma.c
+++ b/drivers/infiniband/hw/hfi1/user_sdma.c
@@ -276,7 +276,7 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd,
/* Wait until all requests have been freed. */
wait_event_interruptible(
pq->wait,
- (ACCESS_ONCE(pq->state) == SDMA_PKT_Q_INACTIVE));
+ (READ_ONCE(pq->state) == SDMA_PKT_Q_INACTIVE));
kfree(pq->reqs);
kfree(pq->req_in_use);
kmem_cache_destroy(pq->txreq_cache);
@@ -591,7 +591,7 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
if (ret != -EBUSY) {
req->status = ret;
WRITE_ONCE(req->has_error, 1);
- if (ACCESS_ONCE(req->seqcomp) ==
+ if (READ_ONCE(req->seqcomp) ==
req->seqsubmitted - 1)
goto free_req;
return ret;
@@ -825,7 +825,7 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
*/
if (req->data_len) {
iovec = &req->iovs[req->iov_idx];
- if (ACCESS_ONCE(iovec->offset) == iovec->iov.iov_len) {
+ if (READ_ONCE(iovec->offset) == iovec->iov.iov_len) {
if (++req->iov_idx == req->data_iovs) {
ret = -EFAULT;
goto free_txreq;
@@ -1390,7 +1390,7 @@ static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status)
} else {
if (status != SDMA_TXREQ_S_OK)
req->status = status;
- if (req->seqcomp == (ACCESS_ONCE(req->seqsubmitted) - 1) &&
+ if (req->seqcomp == (READ_ONCE(req->seqsubmitted) - 1) &&
(READ_ONCE(req->done) ||
READ_ONCE(req->has_error))) {
user_sdma_free_request(req, false);
diff --git a/drivers/infiniband/hw/qib/qib_ruc.c b/drivers/infiniband/hw/qib/qib_ruc.c
index 53efbb0b40c4..9a37e844d4c8 100644
--- a/drivers/infiniband/hw/qib/qib_ruc.c
+++ b/drivers/infiniband/hw/qib/qib_ruc.c
@@ -368,7 +368,7 @@ static void qib_ruc_loopback(struct rvt_qp *sqp)
again:
smp_read_barrier_depends(); /* see post_one_send() */
- if (sqp->s_last == ACCESS_ONCE(sqp->s_head))
+ if (sqp->s_last == READ_ONCE(sqp->s_head))
goto clr_busy;
wqe = rvt_get_swqe_ptr(sqp, sqp->s_last);
diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c
index 498e2202e72c..bddcc37ace44 100644
--- a/drivers/infiniband/hw/qib/qib_uc.c
+++ b/drivers/infiniband/hw/qib/qib_uc.c
@@ -61,7 +61,7 @@ int qib_make_uc_req(struct rvt_qp *qp, unsigned long *flags)
goto bail;
/* We are in the error state, flush the work request. */
smp_read_barrier_depends(); /* see post_one_send() */
- if (qp->s_last == ACCESS_ONCE(qp->s_head))
+ if (qp->s_last == READ_ONCE(qp->s_head))
goto bail;
/* If DMAs are in progress, we can't flush immediately. */
if (atomic_read(&priv->s_dma_busy)) {
@@ -91,7 +91,7 @@ int qib_make_uc_req(struct rvt_qp *qp, unsigned long *flags)
goto bail;
/* Check if send work queue is empty. */
smp_read_barrier_depends(); /* see post_one_send() */
- if (qp->s_cur == ACCESS_ONCE(qp->s_head))
+ if (qp->s_cur == READ_ONCE(qp->s_head))
goto bail;
/*
* Start a new request.
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c
index be4907453ac4..15962ed193ce 100644
--- a/drivers/infiniband/hw/qib/qib_ud.c
+++ b/drivers/infiniband/hw/qib/qib_ud.c
@@ -253,7 +253,7 @@ int qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags)
goto bail;
/* We are in the error state, flush the work request. */
smp_read_barrier_depends(); /* see post_one_send */
- if (qp->s_last == ACCESS_ONCE(qp->s_head))
+ if (qp->s_last == READ_ONCE(qp->s_head))
goto bail;
/* If DMAs are in progress, we can't flush immediately. */
if (atomic_read(&priv->s_dma_busy)) {
@@ -267,7 +267,7 @@ int qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags)
/* see post_one_send() */
smp_read_barrier_depends();
- if (qp->s_cur == ACCESS_ONCE(qp->s_head))
+ if (qp->s_cur == READ_ONCE(qp->s_head))
goto bail;
wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index 22df09ae809e..b670cb9d2006 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -1073,7 +1073,7 @@ int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err)
rdi->driver_f.notify_error_qp(qp);
/* Schedule the sending tasklet to drain the send work queue. */
- if (ACCESS_ONCE(qp->s_last) != qp->s_head)
+ if (READ_ONCE(qp->s_last) != qp->s_head)
rdi->driver_f.schedule_send(qp);
rvt_clear_mr_refs(qp, 0);
@@ -1686,7 +1686,7 @@ static inline int rvt_qp_is_avail(
if (likely(qp->s_avail))
return 0;
smp_read_barrier_depends(); /* see rc.c */
- slast = ACCESS_ONCE(qp->s_last);
+ slast = READ_ONCE(qp->s_last);
if (qp->s_head >= slast)
avail = qp->s_size - (qp->s_head - slast);
else
@@ -1917,7 +1917,7 @@ int rvt_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
* ahead and kick the send engine into gear. Otherwise we will always
* just schedule the send to happen later.
*/
- call_send = qp->s_head == ACCESS_ONCE(qp->s_last) && !wr->next;
+ call_send = qp->s_head == READ_ONCE(qp->s_last) && !wr->next;
for (; wr; wr = wr->next) {
err = rvt_post_one_wr(qp, wr, &call_send);
diff --git a/drivers/input/misc/regulator-haptic.c b/drivers/input/misc/regulator-haptic.c
index 2e8f801932be..a1db1e5040dc 100644
--- a/drivers/input/misc/regulator-haptic.c
+++ b/drivers/input/misc/regulator-haptic.c
@@ -233,7 +233,7 @@ static int __maybe_unused regulator_haptic_resume(struct device *dev)
haptic->suspended = false;
- magnitude = ACCESS_ONCE(haptic->magnitude);
+ magnitude = READ_ONCE(haptic->magnitude);
if (magnitude)
regulator_haptic_set_voltage(haptic, magnitude);
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index d216a8f7bc22..33bb074d6941 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -347,7 +347,7 @@ static void __cache_size_refresh(void)
BUG_ON(!mutex_is_locked(&dm_bufio_clients_lock));
BUG_ON(dm_bufio_client_count < 0);
- dm_bufio_cache_size_latch = ACCESS_ONCE(dm_bufio_cache_size);
+ dm_bufio_cache_size_latch = READ_ONCE(dm_bufio_cache_size);
/*
* Use default if set to 0 and report the actual cache size used.
@@ -960,7 +960,7 @@ static void __get_memory_limit(struct dm_bufio_client *c,
{
unsigned long buffers;
- if (unlikely(ACCESS_ONCE(dm_bufio_cache_size) != dm_bufio_cache_size_latch)) {
+ if (unlikely(READ_ONCE(dm_bufio_cache_size) != dm_bufio_cache_size_latch)) {
if (mutex_trylock(&dm_bufio_clients_lock)) {
__cache_size_refresh();
mutex_unlock(&dm_bufio_clients_lock);
@@ -1600,7 +1600,7 @@ static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp)
static unsigned long get_retain_buffers(struct dm_bufio_client *c)
{
- unsigned long retain_bytes = ACCESS_ONCE(dm_bufio_retain_bytes);
+ unsigned long retain_bytes = READ_ONCE(dm_bufio_retain_bytes);
return retain_bytes >> (c->sectors_per_block_bits + SECTOR_SHIFT);
}
@@ -1647,7 +1647,7 @@ dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
{
struct dm_bufio_client *c = container_of(shrink, struct dm_bufio_client, shrinker);
- return ACCESS_ONCE(c->n_buffers[LIST_CLEAN]) + ACCESS_ONCE(c->n_buffers[LIST_DIRTY]);
+ return READ_ONCE(c->n_buffers[LIST_CLEAN]) + READ_ONCE(c->n_buffers[LIST_DIRTY]);
}
/*
@@ -1818,7 +1818,7 @@ EXPORT_SYMBOL_GPL(dm_bufio_set_sector_offset);
static unsigned get_max_age_hz(void)
{
- unsigned max_age = ACCESS_ONCE(dm_bufio_max_age);
+ unsigned max_age = READ_ONCE(dm_bufio_max_age);
if (max_age > UINT_MAX / HZ)
max_age = UINT_MAX / HZ;
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 096fe9b66c50..8c5756e1df94 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -6,6 +6,7 @@
* This file is released under the GPL.
*/
+#include <linux/compiler.h>
#include <linux/module.h>
#include <linux/device-mapper.h>
#include <linux/dm-io.h>
@@ -80,13 +81,13 @@ struct journal_entry {
#define journal_entry_tag(ic, je) ((__u8 *)&(je)->last_bytes[(ic)->sectors_per_block])
#if BITS_PER_LONG == 64
-#define journal_entry_set_sector(je, x) do { smp_wmb(); ACCESS_ONCE((je)->u.sector) = cpu_to_le64(x); } while (0)
+#define journal_entry_set_sector(je, x) do { smp_wmb(); WRITE_ONCE((je)->u.sector, cpu_to_le64(x)); } while (0)
#define journal_entry_get_sector(je) le64_to_cpu((je)->u.sector)
#elif defined(CONFIG_LBDAF)
-#define journal_entry_set_sector(je, x) do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); ACCESS_ONCE((je)->u.s.sector_hi) = cpu_to_le32((x) >> 32); } while (0)
+#define journal_entry_set_sector(je, x) do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); WRITE_ONCE((je)->u.s.sector_hi, cpu_to_le32((x) >> 32)); } while (0)
#define journal_entry_get_sector(je) le64_to_cpu((je)->u.sector)
#else
-#define journal_entry_set_sector(je, x) do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); ACCESS_ONCE((je)->u.s.sector_hi) = cpu_to_le32(0); } while (0)
+#define journal_entry_set_sector(je, x) do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); WRITE_ONCE((je)->u.s.sector_hi, cpu_to_le32(0)); } while (0)
#define journal_entry_get_sector(je) le32_to_cpu((je)->u.s.sector_lo)
#endif
#define journal_entry_is_unused(je) ((je)->u.s.sector_hi == cpu_to_le32(-1))
@@ -320,7 +321,7 @@ static void dm_integrity_io_error(struct dm_integrity_c *ic, const char *msg, in
static int dm_integrity_failed(struct dm_integrity_c *ic)
{
- return ACCESS_ONCE(ic->failed);
+ return READ_ONCE(ic->failed);
}
static commit_id_t dm_integrity_commit_id(struct dm_integrity_c *ic, unsigned i,
@@ -1545,7 +1546,7 @@ retry_kmap:
smp_mb();
if (unlikely(waitqueue_active(&ic->copy_to_journal_wait)))
wake_up(&ic->copy_to_journal_wait);
- if (ACCESS_ONCE(ic->free_sectors) <= ic->free_sectors_threshold) {
+ if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold) {
queue_work(ic->commit_wq, &ic->commit_work);
} else {
schedule_autocommit(ic);
@@ -1798,7 +1799,7 @@ static void integrity_commit(struct work_struct *w)
ic->n_committed_sections += commit_sections;
spin_unlock_irq(&ic->endio_wait.lock);
- if (ACCESS_ONCE(ic->free_sectors) <= ic->free_sectors_threshold)
+ if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold)
queue_work(ic->writer_wq, &ic->writer_work);
release_flush_bios:
@@ -1980,7 +1981,7 @@ static void integrity_writer(struct work_struct *w)
unsigned prev_free_sectors;
/* the following test is not needed, but it tests the replay code */
- if (ACCESS_ONCE(ic->suspending))
+ if (READ_ONCE(ic->suspending))
return;
spin_lock_irq(&ic->endio_wait.lock);
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index cf2c67e35eaf..eb45cc3df31d 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -107,7 +107,7 @@ static void io_job_start(struct dm_kcopyd_throttle *t)
try_again:
spin_lock_irq(&throttle_spinlock);
- throttle = ACCESS_ONCE(t->throttle);
+ throttle = READ_ONCE(t->throttle);
if (likely(throttle >= 100))
goto skip_limit;
@@ -157,7 +157,7 @@ static void io_job_finish(struct dm_kcopyd_throttle *t)
t->num_io_jobs--;
- if (likely(ACCESS_ONCE(t->throttle) >= 100))
+ if (likely(READ_ONCE(t->throttle) >= 100))
goto skip_limit;
if (!t->num_io_jobs) {
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 11f273d2f018..3f88c9d32f7e 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -366,7 +366,7 @@ static struct pgpath *choose_path_in_pg(struct multipath *m,
pgpath = path_to_pgpath(path);
- if (unlikely(lockless_dereference(m->current_pg) != pg)) {
+ if (unlikely(READ_ONCE(m->current_pg) != pg)) {
/* Only update current_pgpath if pg changed */
spin_lock_irqsave(&m->lock, flags);
m->current_pgpath = pgpath;
@@ -390,7 +390,7 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
}
/* Were we instructed to switch PG? */
- if (lockless_dereference(m->next_pg)) {
+ if (READ_ONCE(m->next_pg)) {
spin_lock_irqsave(&m->lock, flags);
pg = m->next_pg;
if (!pg) {
@@ -406,7 +406,7 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
/* Don't change PG until it has no remaining paths */
check_current_pg:
- pg = lockless_dereference(m->current_pg);
+ pg = READ_ONCE(m->current_pg);
if (pg) {
pgpath = choose_path_in_pg(m, pg, nr_bytes);
if (!IS_ERR_OR_NULL(pgpath))
@@ -473,7 +473,7 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
struct request *clone;
/* Do we need to select a new pgpath? */
- pgpath = lockless_dereference(m->current_pgpath);
+ pgpath = READ_ONCE(m->current_pgpath);
if (!pgpath || !test_bit(MPATHF_QUEUE_IO, &m->flags))
pgpath = choose_pgpath(m, nr_bytes);
@@ -535,7 +535,7 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_m
bool queue_io;
/* Do we need to select a new pgpath? */
- pgpath = lockless_dereference(m->current_pgpath);
+ pgpath = READ_ONCE(m->current_pgpath);
queue_io = test_bit(MPATHF_QUEUE_IO, &m->flags);
if (!pgpath || !queue_io)
pgpath = choose_pgpath(m, nr_bytes);
@@ -1804,7 +1804,7 @@ static int multipath_prepare_ioctl(struct dm_target *ti,
struct pgpath *current_pgpath;
int r;
- current_pgpath = lockless_dereference(m->current_pgpath);
+ current_pgpath = READ_ONCE(m->current_pgpath);
if (!current_pgpath)
current_pgpath = choose_pgpath(m, 0);
@@ -1826,7 +1826,7 @@ static int multipath_prepare_ioctl(struct dm_target *ti,
}
if (r == -ENOTCONN) {
- if (!lockless_dereference(m->current_pg)) {
+ if (!READ_ONCE(m->current_pg)) {
/* Path status changed, redo selection */
(void) choose_pgpath(m, 0);
}
@@ -1895,9 +1895,9 @@ static int multipath_busy(struct dm_target *ti)
return (m->queue_mode != DM_TYPE_MQ_REQUEST_BASED);
/* Guess which priority_group will be used at next mapping time */
- pg = lockless_dereference(m->current_pg);
- next_pg = lockless_dereference(m->next_pg);
- if (unlikely(!lockless_dereference(m->current_pgpath) && next_pg))
+ pg = READ_ONCE(m->current_pg);
+ next_pg = READ_ONCE(m->next_pg);
+ if (unlikely(!READ_ONCE(m->current_pgpath) && next_pg))
pg = next_pg;
if (!pg) {
diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
index a7868503d135..29bc51084c82 100644
--- a/drivers/md/dm-stats.c
+++ b/drivers/md/dm-stats.c
@@ -432,7 +432,7 @@ do_sync_free:
synchronize_rcu_expedited();
dm_stat_free(&s->rcu_head);
} else {
- ACCESS_ONCE(dm_stat_need_rcu_barrier) = 1;
+ WRITE_ONCE(dm_stat_need_rcu_barrier, 1);
call_rcu(&s->rcu_head, dm_stat_free);
}
return 0;
@@ -640,12 +640,12 @@ void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
*/
last = raw_cpu_ptr(stats->last);
stats_aux->merged =
- (bi_sector == (ACCESS_ONCE(last->last_sector) &&
+ (bi_sector == (READ_ONCE(last->last_sector) &&
((bi_rw == WRITE) ==
- (ACCESS_ONCE(last->last_rw) == WRITE))
+ (READ_ONCE(last->last_rw) == WRITE))
));
- ACCESS_ONCE(last->last_sector) = end_sector;
- ACCESS_ONCE(last->last_rw) = bi_rw;
+ WRITE_ONCE(last->last_sector, end_sector);
+ WRITE_ONCE(last->last_rw, bi_rw);
}
rcu_read_lock();
@@ -694,22 +694,22 @@ static void __dm_stat_init_temporary_percpu_totals(struct dm_stat_shared *shared
for_each_possible_cpu(cpu) {
p = &s->stat_percpu[cpu][x];
- shared->tmp.sectors[READ] += ACCESS_ONCE(p->sectors[READ]);
- shared->tmp.sectors[WRITE] += ACCESS_ONCE(p->sectors[WRITE]);
- shared->tmp.ios[READ] += ACCESS_ONCE(p->ios[READ]);
- shared->tmp.ios[WRITE] += ACCESS_ONCE(p->ios[WRITE]);
- shared->tmp.merges[READ] += ACCESS_ONCE(p->merges[READ]);
- shared->tmp.merges[WRITE] += ACCESS_ONCE(p->merges[WRITE]);
- shared->tmp.ticks[READ] += ACCESS_ONCE(p->ticks[READ]);
- shared->tmp.ticks[WRITE] += ACCESS_ONCE(p->ticks[WRITE]);
- shared->tmp.io_ticks[READ] += ACCESS_ONCE(p->io_ticks[READ]);
- shared->tmp.io_ticks[WRITE] += ACCESS_ONCE(p->io_ticks[WRITE]);
- shared->tmp.io_ticks_total += ACCESS_ONCE(p->io_ticks_total);
- shared->tmp.time_in_queue += ACCESS_ONCE(p->time_in_queue);
+ shared->tmp.sectors[READ] += READ_ONCE(p->sectors[READ]);
+ shared->tmp.sectors[WRITE] += READ_ONCE(p->sectors[WRITE]);
+ shared->tmp.ios[READ] += READ_ONCE(p->ios[READ]);
+ shared->tmp.ios[WRITE] += READ_ONCE(p->ios[WRITE]);
+ shared->tmp.merges[READ] += READ_ONCE(p->merges[READ]);
+ shared->tmp.merges[WRITE] += READ_ONCE(p->merges[WRITE]);
+ shared->tmp.ticks[READ] += READ_ONCE(p->ticks[READ]);
+ shared->tmp.ticks[WRITE] += READ_ONCE(p->ticks[WRITE]);
+ shared->tmp.io_ticks[READ] += READ_ONCE(p->io_ticks[READ]);
+ shared->tmp.io_ticks[WRITE] += READ_ONCE(p->io_ticks[WRITE]);
+ shared->tmp.io_ticks_total += READ_ONCE(p->io_ticks_total);
+ shared->tmp.time_in_queue += READ_ONCE(p->time_in_queue);
if (s->n_histogram_entries) {
unsigned i;
for (i = 0; i < s->n_histogram_entries + 1; i++)
- shared->tmp.histogram[i] += ACCESS_ONCE(p->histogram[i]);
+ shared->tmp.histogram[i] += READ_ONCE(p->histogram[i]);
}
}
}
diff --git a/drivers/md/dm-switch.c b/drivers/md/dm-switch.c
index 4c8de1ff78ca..8d0ba879777e 100644
--- a/drivers/md/dm-switch.c
+++ b/drivers/md/dm-switch.c
@@ -144,7 +144,7 @@ static unsigned switch_region_table_read(struct switch_ctx *sctx, unsigned long
switch_get_position(sctx, region_nr, &region_index, &bit);
- return (ACCESS_ONCE(sctx->region_table[region_index]) >> bit) &
+ return (READ_ONCE(sctx->region_table[region_index]) >> bit) &
((1 << sctx->region_table_entry_bits) - 1);
}
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 1e25705209c2..89e5dff9b4cf 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -2431,7 +2431,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
struct pool_c *pt = pool->ti->private;
bool needs_check = dm_pool_metadata_needs_check(pool->pmd);
enum pool_mode old_mode = get_pool_mode(pool);
- unsigned long no_space_timeout = ACCESS_ONCE(no_space_timeout_secs) * HZ;
+ unsigned long no_space_timeout = READ_ONCE(no_space_timeout_secs) * HZ;
/*
* Never allow the pool to transition to PM_WRITE mode if user
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index bda3caca23ca..fba93237a780 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -589,7 +589,7 @@ static void verity_prefetch_io(struct work_struct *work)
verity_hash_at_level(v, pw->block, i, &hash_block_start, NULL);
verity_hash_at_level(v, pw->block + pw->n_blocks - 1, i, &hash_block_end, NULL);
if (!i) {
- unsigned cluster = ACCESS_ONCE(dm_verity_prefetch_cluster);
+ unsigned cluster = READ_ONCE(dm_verity_prefetch_cluster);
cluster >>= v->data_dev_block_bits;
if (unlikely(!cluster))
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 4be85324f44d..8aaffa19b29a 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -114,7 +114,7 @@ static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS;
static int __dm_get_module_param_int(int *module_param, int min, int max)
{
- int param = ACCESS_ONCE(*module_param);
+ int param = READ_ONCE(*module_param);
int modified_param = 0;
bool modified = true;
@@ -136,7 +136,7 @@ static int __dm_get_module_param_int(int *module_param, int min, int max)
unsigned __dm_get_module_param(unsigned *module_param,
unsigned def, unsigned max)
{
- unsigned param = ACCESS_ONCE(*module_param);
+ unsigned param = READ_ONCE(*module_param);
unsigned modified_param = 0;
if (!param)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 0ff1bbf6c90e..447ddcbc9566 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -2651,7 +2651,7 @@ state_show(struct md_rdev *rdev, char *page)
{
char *sep = ",";
size_t len = 0;
- unsigned long flags = ACCESS_ONCE(rdev->flags);
+ unsigned long flags = READ_ONCE(rdev->flags);
if (test_bit(Faulty, &flags) ||
(!test_bit(ExternalBbl, &flags) &&
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 928e24a07133..7d9a50eed9db 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -6072,7 +6072,7 @@ static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_n
*/
rcu_read_lock();
for (i = 0; i < conf->raid_disks; i++) {
- struct md_rdev *rdev = ACCESS_ONCE(conf->disks[i].rdev);
+ struct md_rdev *rdev = READ_ONCE(conf->disks[i].rdev);
if (rdev == NULL || test_bit(Faulty, &rdev->flags))
still_degraded = 1;
diff --git a/drivers/media/dvb-core/dvb_ringbuffer.c b/drivers/media/dvb-core/dvb_ringbuffer.c
index 2322af1b8742..53011629c9ad 100644
--- a/drivers/media/dvb-core/dvb_ringbuffer.c
+++ b/drivers/media/dvb-core/dvb_ringbuffer.c
@@ -66,12 +66,12 @@ ssize_t dvb_ringbuffer_free(struct dvb_ringbuffer *rbuf)
{
ssize_t free;
- /* ACCESS_ONCE() to load read pointer on writer side
+ /* READ_ONCE() to load read pointer on writer side
* this pairs with smp_store_release() in dvb_ringbuffer_read(),
* dvb_ringbuffer_read_user(), dvb_ringbuffer_flush(),
* or dvb_ringbuffer_reset()
*/
- free = ACCESS_ONCE(rbuf->pread) - rbuf->pwrite;
+ free = READ_ONCE(rbuf->pread) - rbuf->pwrite;
if (free <= 0)
free += rbuf->size;
return free-1;
@@ -143,7 +143,7 @@ ssize_t dvb_ringbuffer_read_user(struct dvb_ringbuffer *rbuf, u8 __user *buf, si
todo -= split;
/* smp_store_release() for read pointer update to ensure
* that buf is not overwritten until read is complete,
- * this pairs with ACCESS_ONCE() in dvb_ringbuffer_free()
+ * this pairs with READ_ONCE() in dvb_ringbuffer_free()
*/
smp_store_release(&rbuf->pread, 0);
}
@@ -168,7 +168,7 @@ void dvb_ringbuffer_read(struct dvb_ringbuffer *rbuf, u8 *buf, size_t len)
todo -= split;
/* smp_store_release() for read pointer update to ensure
* that buf is not overwritten until read is complete,
- * this pairs with ACCESS_ONCE() in dvb_ringbuffer_free()
+ * this pairs with READ_ONCE() in dvb_ringbuffer_free()
*/
smp_store_release(&rbuf->pread, 0);
}
diff --git a/drivers/misc/mic/scif/scif_rb.c b/drivers/misc/mic/scif/scif_rb.c
index 637cc4686742..b665757ca89a 100644
--- a/drivers/misc/mic/scif/scif_rb.c
+++ b/drivers/misc/mic/scif/scif_rb.c
@@ -138,7 +138,7 @@ void scif_rb_commit(struct scif_rb *rb)
* the read barrier in scif_rb_count(..)
*/
wmb();
- ACCESS_ONCE(*rb->write_ptr) = rb->current_write_offset;
+ WRITE_ONCE(*rb->write_ptr, rb->current_write_offset);
#ifdef CONFIG_INTEL_MIC_CARD
/*
* X100 Si bug: For the case where a Core is performing an EXT_WR
@@ -147,7 +147,7 @@ void scif_rb_commit(struct scif_rb *rb)
* This way, if ordering is violated for the Interrupt Message, it will
* fall just behind the first Posted associated with the first EXT_WR.
*/
- ACCESS_ONCE(*rb->write_ptr) = rb->current_write_offset;
+ WRITE_ONCE(*rb->write_ptr, rb->current_write_offset);
#endif
}
@@ -210,7 +210,7 @@ void scif_rb_update_read_ptr(struct scif_rb *rb)
* scif_rb_space(..)
*/
mb();
- ACCESS_ONCE(*rb->read_ptr) = new_offset;
+ WRITE_ONCE(*rb->read_ptr, new_offset);
#ifdef CONFIG_INTEL_MIC_CARD
/*
* X100 Si Bug: For the case where a Core is performing an EXT_WR
@@ -219,7 +219,7 @@ void scif_rb_update_read_ptr(struct scif_rb *rb)
* This way, if ordering is violated for the Interrupt Message, it will
* fall just behind the first Posted associated with the first EXT_WR.
*/
- ACCESS_ONCE(*rb->read_ptr) = new_offset;
+ WRITE_ONCE(*rb->read_ptr, new_offset);
#endif
}
diff --git a/drivers/misc/mic/scif/scif_rma_list.c b/drivers/misc/mic/scif/scif_rma_list.c
index e1ef8daedd5a..a036dbb4101e 100644
--- a/drivers/misc/mic/scif/scif_rma_list.c
+++ b/drivers/misc/mic/scif/scif_rma_list.c
@@ -277,7 +277,7 @@ retry:
* Need to restart list traversal if there has been
* an asynchronous list entry deletion.
*/
- if (ACCESS_ONCE(ep->rma_info.async_list_del))
+ if (READ_ONCE(ep->rma_info.async_list_del))
goto retry;
}
mutex_unlock(&ep->rma_info.rma_lock);
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index c02cc817a490..1ed9529e7bd1 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -1378,7 +1378,7 @@ int bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
unsigned int count;
slaves = rcu_dereference(bond->slave_arr);
- count = slaves ? ACCESS_ONCE(slaves->count) : 0;
+ count = slaves ? READ_ONCE(slaves->count) : 0;
if (likely(count))
tx_slave = slaves->arr[hash_index %
count];
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index b2db581131b2..08a4f57cf409 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1167,7 +1167,7 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
slave = bond_slave_get_rcu(skb->dev);
bond = slave->bond;
- recv_probe = ACCESS_ONCE(bond->recv_probe);
+ recv_probe = READ_ONCE(bond->recv_probe);
if (recv_probe) {
ret = recv_probe(skb, bond, slave);
if (ret == RX_HANDLER_CONSUMED) {
@@ -3811,7 +3811,7 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev
else
bond_xmit_slave_id(bond, skb, 0);
} else {
- int slave_cnt = ACCESS_ONCE(bond->slave_cnt);
+ int slave_cnt = READ_ONCE(bond->slave_cnt);
if (likely(slave_cnt)) {
slave_id = bond_rr_gen_slave_id(bond);
@@ -3973,7 +3973,7 @@ static int bond_3ad_xor_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned int count;
slaves = rcu_dereference(bond->slave_arr);
- count = slaves ? ACCESS_ONCE(slaves->count) : 0;
+ count = slaves ? READ_ONCE(slaves->count) : 0;
if (likely(count)) {
slave = slaves->arr[bond_xmit_hash(bond, skb) % count];
bond_dev_queue_xmit(bond, skb, slave->dev);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 4ef68f69b58c..43f52a8fe708 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -405,7 +405,7 @@ void free_tx_desc(struct adapter *adap, struct sge_txq *q,
*/
static inline int reclaimable(const struct sge_txq *q)
{
- int hw_cidx = ntohs(ACCESS_ONCE(q->stat->cidx));
+ int hw_cidx = ntohs(READ_ONCE(q->stat->cidx));
hw_cidx -= q->cidx;
return hw_cidx < 0 ? hw_cidx + q->size : hw_cidx;
}
@@ -1375,7 +1375,7 @@ out_free: dev_kfree_skb_any(skb);
*/
static inline void reclaim_completed_tx_imm(struct sge_txq *q)
{
- int hw_cidx = ntohs(ACCESS_ONCE(q->stat->cidx));
+ int hw_cidx = ntohs(READ_ONCE(q->stat->cidx));
int reclaim = hw_cidx - q->cidx;
if (reclaim < 0)
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 0e3d9f39a807..c6e859a27ee6 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -605,7 +605,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
if (wrapped)
newacc += 65536;
- ACCESS_ONCE(*acc) = newacc;
+ WRITE_ONCE(*acc, newacc);
}
static void populate_erx_stats(struct be_adapter *adapter,
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index 0cec06bec63e..340e28211135 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -373,7 +373,7 @@ static int hip04_tx_reclaim(struct net_device *ndev, bool force)
unsigned int count;
smp_rmb();
- count = tx_count(ACCESS_ONCE(priv->tx_head), tx_tail);
+ count = tx_count(READ_ONCE(priv->tx_head), tx_tail);
if (count == 0)
goto out;
@@ -431,7 +431,7 @@ static int hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
dma_addr_t phys;
smp_rmb();
- count = tx_count(tx_head, ACCESS_ONCE(priv->tx_tail));
+ count = tx_count(tx_head, READ_ONCE(priv->tx_tail));
if (count == (TX_DESC_NUM - 1)) {
netif_stop_queue(ndev);
return NETDEV_TX_BUSY;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 8f326f87a815..2cb9539c931e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -264,7 +264,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
vsi->rx_buf_failed, vsi->rx_page_failed);
rcu_read_lock();
for (i = 0; i < vsi->num_queue_pairs; i++) {
- struct i40e_ring *rx_ring = ACCESS_ONCE(vsi->rx_rings[i]);
+ struct i40e_ring *rx_ring = READ_ONCE(vsi->rx_rings[i]);
if (!rx_ring)
continue;
@@ -320,7 +320,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
ITR_IS_DYNAMIC(rx_ring->rx_itr_setting) ? "dynamic" : "fixed");
}
for (i = 0; i < vsi->num_queue_pairs; i++) {
- struct i40e_ring *tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
+ struct i40e_ring *tx_ring = READ_ONCE(vsi->tx_rings[i]);
if (!tx_ring)
continue;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 05e89864f781..e9e04a485e0a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -1570,7 +1570,7 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
}
rcu_read_lock();
for (j = 0; j < vsi->num_queue_pairs; j++) {
- tx_ring = ACCESS_ONCE(vsi->tx_rings[j]);
+ tx_ring = READ_ONCE(vsi->tx_rings[j]);
if (!tx_ring)
continue;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 6498da8806cb..de1fcac7834d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -455,7 +455,7 @@ static void i40e_get_netdev_stats_struct(struct net_device *netdev,
u64 bytes, packets;
unsigned int start;
- tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
+ tx_ring = READ_ONCE(vsi->tx_rings[i]);
if (!tx_ring)
continue;
i40e_get_netdev_stats_struct_tx(tx_ring, stats);
@@ -791,7 +791,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
rcu_read_lock();
for (q = 0; q < vsi->num_queue_pairs; q++) {
/* locate Tx ring */
- p = ACCESS_ONCE(vsi->tx_rings[q]);
+ p = READ_ONCE(vsi->tx_rings[q]);
do {
start = u64_stats_fetch_begin_irq(&p->syncp);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index d8456c381c99..97381238eb7c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -130,7 +130,7 @@ static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
}
smp_mb(); /* Force any pending update before accessing. */
- adj = ACCESS_ONCE(pf->ptp_base_adj);
+ adj = READ_ONCE(pf->ptp_base_adj);
freq = adj;
freq *= ppb;
@@ -499,7 +499,7 @@ void i40e_ptp_set_increment(struct i40e_pf *pf)
wr32(hw, I40E_PRTTSYN_INC_H, incval >> 32);
/* Update the base adjustement value. */
- ACCESS_ONCE(pf->ptp_base_adj) = incval;
+ WRITE_ONCE(pf->ptp_base_adj, incval);
smp_mb(); /* Force the above update. */
}
diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h
index 58adbf234e07..31a3f09df9f7 100644
--- a/drivers/net/ethernet/intel/igb/e1000_regs.h
+++ b/drivers/net/ethernet/intel/igb/e1000_regs.h
@@ -375,7 +375,7 @@ u32 igb_rd32(struct e1000_hw *hw, u32 reg);
/* write operations, indexed using DWORDS */
#define wr32(reg, val) \
do { \
- u8 __iomem *hw_addr = ACCESS_ONCE((hw)->hw_addr); \
+ u8 __iomem *hw_addr = READ_ONCE((hw)->hw_addr); \
if (!E1000_REMOVED(hw_addr)) \
writel((val), &hw_addr[(reg)]); \
} while (0)
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index ea69af267d63..18b6c25d4705 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -750,7 +750,7 @@ static void igb_cache_ring_register(struct igb_adapter *adapter)
u32 igb_rd32(struct e1000_hw *hw, u32 reg)
{
struct igb_adapter *igb = container_of(hw, struct igb_adapter, hw);
- u8 __iomem *hw_addr = ACCESS_ONCE(hw->hw_addr);
+ u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr);
u32 value = 0;
if (E1000_REMOVED(hw_addr))
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index e083732adf64..a01409e2e06c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -161,7 +161,7 @@ static inline bool ixgbe_removed(void __iomem *addr)
static inline void ixgbe_write_reg(struct ixgbe_hw *hw, u32 reg, u32 value)
{
- u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
+ u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr);
if (ixgbe_removed(reg_addr))
return;
@@ -180,7 +180,7 @@ static inline void writeq(u64 val, void __iomem *addr)
static inline void ixgbe_write_reg64(struct ixgbe_hw *hw, u32 reg, u64 value)
{
- u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
+ u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr);
if (ixgbe_removed(reg_addr))
return;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 6d5f31e94358..935a2f15b0b0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -380,7 +380,7 @@ static void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg)
*/
u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg)
{
- u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
+ u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr);
u32 value;
if (ixgbe_removed(reg_addr))
@@ -8624,7 +8624,7 @@ static void ixgbe_get_stats64(struct net_device *netdev,
rcu_read_lock();
for (i = 0; i < adapter->num_rx_queues; i++) {
- struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]);
+ struct ixgbe_ring *ring = READ_ONCE(adapter->rx_ring[i]);
u64 bytes, packets;
unsigned int start;
@@ -8640,12 +8640,12 @@ static void ixgbe_get_stats64(struct net_device *netdev,
}
for (i = 0; i < adapter->num_tx_queues; i++) {
- struct ixgbe_ring *ring = ACCESS_ONCE(adapter->tx_ring[i]);
+ struct ixgbe_ring *ring = READ_ONCE(adapter->tx_ring[i]);
ixgbe_get_ring_stats64(stats, ring);
}
for (i = 0; i < adapter->num_xdp_queues; i++) {
- struct ixgbe_ring *ring = ACCESS_ONCE(adapter->xdp_ring[i]);
+ struct ixgbe_ring *ring = READ_ONCE(adapter->xdp_ring[i]);
ixgbe_get_ring_stats64(stats, ring);
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index 86d6924a2b71..ae312c45696a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -378,7 +378,7 @@ static int ixgbe_ptp_adjfreq_82599(struct ptp_clock_info *ptp, s32 ppb)
}
smp_mb();
- incval = ACCESS_ONCE(adapter->base_incval);
+ incval = READ_ONCE(adapter->base_incval);
freq = incval;
freq *= ppb;
@@ -1159,7 +1159,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
}
/* update the base incval used to calculate frequency adjustment */
- ACCESS_ONCE(adapter->base_incval) = incval;
+ WRITE_ONCE(adapter->base_incval, incval);
smp_mb();
/* need lock to prevent incorrect read while modifying cyclecounter */
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 032f8ac06357..cacb30682434 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -164,7 +164,7 @@ static void ixgbevf_check_remove(struct ixgbe_hw *hw, u32 reg)
u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg)
{
- u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
+ u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr);
u32 value;
if (IXGBE_REMOVED(reg_addr))
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
index 04d8d4ee4f04..c651fefcc3d2 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
@@ -182,7 +182,7 @@ struct ixgbevf_info {
static inline void ixgbe_write_reg(struct ixgbe_hw *hw, u32 reg, u32 value)
{
- u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
+ u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr);
if (IXGBE_REMOVED(reg_addr))
return;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 8a32a8f7f9c0..3541a7f9d12e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -414,8 +414,8 @@ bool mlx4_en_process_tx_cq(struct net_device *dev,
index = cons_index & size_mask;
cqe = mlx4_en_get_cqe(buf, index, priv->cqe_size) + factor;
- last_nr_txbb = ACCESS_ONCE(ring->last_nr_txbb);
- ring_cons = ACCESS_ONCE(ring->cons);
+ last_nr_txbb = READ_ONCE(ring->last_nr_txbb);
+ ring_cons = READ_ONCE(ring->cons);
ring_index = ring_cons & size_mask;
stamp_index = ring_index;
@@ -479,8 +479,8 @@ bool mlx4_en_process_tx_cq(struct net_device *dev,
wmb();
/* we want to dirty this cache line once */
- ACCESS_ONCE(ring->last_nr_txbb) = last_nr_txbb;
- ACCESS_ONCE(ring->cons) = ring_cons + txbbs_skipped;
+ WRITE_ONCE(ring->last_nr_txbb, last_nr_txbb);
+ WRITE_ONCE(ring->cons, ring_cons + txbbs_skipped);
if (cq->type == TX_XDP)
return done < budget;
@@ -858,7 +858,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
goto tx_drop;
/* fetch ring->cons far ahead before needing it to avoid stall */
- ring_cons = ACCESS_ONCE(ring->cons);
+ ring_cons = READ_ONCE(ring->cons);
real_size = get_real_size(skb, shinfo, dev, &lso_header_size,
&inline_ok, &fragptr);
@@ -1066,7 +1066,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
*/
smp_rmb();
- ring_cons = ACCESS_ONCE(ring->cons);
+ ring_cons = READ_ONCE(ring->cons);
if (unlikely(!mlx4_en_is_tx_ring_full(ring))) {
netif_tx_wake_queue(ring->tx_queue);
ring->wake_queue++;
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index 50ea69d88480..5dd5f61e1114 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -2629,7 +2629,7 @@ static void vxge_poll_vp_lockup(unsigned long data)
ring = &vdev->vpaths[i].ring;
/* Truncated to machine word size number of frames */
- rx_frms = ACCESS_ONCE(ring->stats.rx_frms);
+ rx_frms = READ_ONCE(ring->stats.rx_frms);
/* Did this vpath received any packets */
if (ring->stats.prev_rx_frms == rx_frms) {
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 13f72f5b18d2..a95a46bcd339 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -2073,7 +2073,7 @@ static irqreturn_t efx_ef10_msi_interrupt(int irq, void *dev_id)
netif_vdbg(efx, intr, efx->net_dev,
"IRQ %d on CPU %d\n", irq, raw_smp_processor_id());
- if (likely(ACCESS_ONCE(efx->irq_soft_enabled))) {
+ if (likely(READ_ONCE(efx->irq_soft_enabled))) {
/* Note test interrupts */
if (context->index == efx->irq_level)
efx->last_irq_cpu = raw_smp_processor_id();
@@ -2088,7 +2088,7 @@ static irqreturn_t efx_ef10_msi_interrupt(int irq, void *dev_id)
static irqreturn_t efx_ef10_legacy_interrupt(int irq, void *dev_id)
{
struct efx_nic *efx = dev_id;
- bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled);
+ bool soft_enabled = READ_ONCE(efx->irq_soft_enabled);
struct efx_channel *channel;
efx_dword_t reg;
u32 queues;
@@ -3291,7 +3291,7 @@ static int efx_ef10_handle_rx_event(struct efx_channel *channel,
bool rx_cont;
u16 flags = 0;
- if (unlikely(ACCESS_ONCE(efx->reset_pending)))
+ if (unlikely(READ_ONCE(efx->reset_pending)))
return 0;
/* Basic packet information */
@@ -3428,7 +3428,7 @@ efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
unsigned int tx_ev_q_label;
int tx_descs = 0;
- if (unlikely(ACCESS_ONCE(efx->reset_pending)))
+ if (unlikely(READ_ONCE(efx->reset_pending)))
return 0;
if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_TX_DROP_EVENT)))
@@ -5316,7 +5316,7 @@ static void efx_ef10_filter_remove_old(struct efx_nic *efx)
int i;
for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) {
- if (ACCESS_ONCE(table->entry[i].spec) &
+ if (READ_ONCE(table->entry[i].spec) &
EFX_EF10_FILTER_FLAG_AUTO_OLD) {
rc = efx_ef10_filter_remove_internal(efx,
1U << EFX_FILTER_PRI_AUTO, i, true);
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index b9cb697b2818..016616a63880 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -2809,7 +2809,7 @@ static void efx_reset_work(struct work_struct *data)
unsigned long pending;
enum reset_type method;
- pending = ACCESS_ONCE(efx->reset_pending);
+ pending = READ_ONCE(efx->reset_pending);
method = fls(pending) - 1;
if (method == RESET_TYPE_MC_BIST)
@@ -2874,7 +2874,7 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
/* If we're not READY then just leave the flags set as the cue
* to abort probing or reschedule the reset later.
*/
- if (ACCESS_ONCE(efx->state) != STATE_READY)
+ if (READ_ONCE(efx->state) != STATE_READY)
return;
/* efx_process_channel() will no longer read events once a
diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c
index 29614da91cbf..7263275fde4a 100644
--- a/drivers/net/ethernet/sfc/falcon/efx.c
+++ b/drivers/net/ethernet/sfc/falcon/efx.c
@@ -2545,7 +2545,7 @@ static void ef4_reset_work(struct work_struct *data)
unsigned long pending;
enum reset_type method;
- pending = ACCESS_ONCE(efx->reset_pending);
+ pending = READ_ONCE(efx->reset_pending);
method = fls(pending) - 1;
if ((method == RESET_TYPE_RECOVER_OR_DISABLE ||
@@ -2605,7 +2605,7 @@ void ef4_schedule_reset(struct ef4_nic *efx, enum reset_type type)
/* If we're not READY then just leave the flags set as the cue
* to abort probing or reschedule the reset later.
*/
- if (ACCESS_ONCE(efx->state) != STATE_READY)
+ if (READ_ONCE(efx->state) != STATE_READY)
return;
queue_work(reset_workqueue, &efx->reset_work);
diff --git a/drivers/net/ethernet/sfc/falcon/falcon.c b/drivers/net/ethernet/sfc/falcon/falcon.c
index 93c713c1f627..cd8bb472d758 100644
--- a/drivers/net/ethernet/sfc/falcon/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon/falcon.c
@@ -452,7 +452,7 @@ static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
"IRQ %d on CPU %d status " EF4_OWORD_FMT "\n",
irq, raw_smp_processor_id(), EF4_OWORD_VAL(*int_ker));
- if (!likely(ACCESS_ONCE(efx->irq_soft_enabled)))
+ if (!likely(READ_ONCE(efx->irq_soft_enabled)))
return IRQ_HANDLED;
/* Check to see if we have a serious error condition */
@@ -1372,7 +1372,7 @@ static void falcon_reconfigure_mac_wrapper(struct ef4_nic *efx)
ef4_oword_t reg;
int link_speed, isolate;
- isolate = !!ACCESS_ONCE(efx->reset_pending);
+ isolate = !!READ_ONCE(efx->reset_pending);
switch (link_state->speed) {
case 10000: link_speed = 3; break;
diff --git a/drivers/net/ethernet/sfc/falcon/farch.c b/drivers/net/ethernet/sfc/falcon/farch.c
index 05916c710d8c..494884f6af4a 100644
--- a/drivers/net/ethernet/sfc/falcon/farch.c
+++ b/drivers/net/ethernet/sfc/falcon/farch.c
@@ -834,7 +834,7 @@ ef4_farch_handle_tx_event(struct ef4_channel *channel, ef4_qword_t *event)
struct ef4_nic *efx = channel->efx;
int tx_packets = 0;
- if (unlikely(ACCESS_ONCE(efx->reset_pending)))
+ if (unlikely(READ_ONCE(efx->reset_pending)))
return 0;
if (likely(EF4_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
@@ -990,7 +990,7 @@ ef4_farch_handle_rx_event(struct ef4_channel *channel, const ef4_qword_t *event)
struct ef4_rx_queue *rx_queue;
struct ef4_nic *efx = channel->efx;
- if (unlikely(ACCESS_ONCE(efx->reset_pending)))
+ if (unlikely(READ_ONCE(efx->reset_pending)))
return;
rx_ev_cont = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT);
@@ -1504,7 +1504,7 @@ irqreturn_t ef4_farch_fatal_interrupt(struct ef4_nic *efx)
irqreturn_t ef4_farch_legacy_interrupt(int irq, void *dev_id)
{
struct ef4_nic *efx = dev_id;
- bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled);
+ bool soft_enabled = READ_ONCE(efx->irq_soft_enabled);
ef4_oword_t *int_ker = efx->irq_status.addr;
irqreturn_t result = IRQ_NONE;
struct ef4_channel *channel;
@@ -1596,7 +1596,7 @@ irqreturn_t ef4_farch_msi_interrupt(int irq, void *dev_id)
"IRQ %d on CPU %d status " EF4_OWORD_FMT "\n",
irq, raw_smp_processor_id(), EF4_OWORD_VAL(*int_ker));
- if (!likely(ACCESS_ONCE(efx->irq_soft_enabled)))
+ if (!likely(READ_ONCE(efx->irq_soft_enabled)))
return IRQ_HANDLED;
/* Handle non-event-queue sources */
diff --git a/drivers/net/ethernet/sfc/falcon/nic.h b/drivers/net/ethernet/sfc/falcon/nic.h
index a4c4592f6023..54ca457cdb15 100644
--- a/drivers/net/ethernet/sfc/falcon/nic.h
+++ b/drivers/net/ethernet/sfc/falcon/nic.h
@@ -83,7 +83,7 @@ static inline struct ef4_tx_queue *ef4_tx_queue_partner(struct ef4_tx_queue *tx_
static inline bool __ef4_nic_tx_is_empty(struct ef4_tx_queue *tx_queue,
unsigned int write_count)
{
- unsigned int empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count);
+ unsigned int empty_read_count = READ_ONCE(tx_queue->empty_read_count);
if (empty_read_count == 0)
return false;
@@ -464,11 +464,11 @@ irqreturn_t ef4_farch_fatal_interrupt(struct ef4_nic *efx);
static inline int ef4_nic_event_test_irq_cpu(struct ef4_channel *channel)
{
- return ACCESS_ONCE(channel->event_test_cpu);
+ return READ_ONCE(channel->event_test_cpu);
}
static inline int ef4_nic_irq_test_irq_cpu(struct ef4_nic *efx)
{
- return ACCESS_ONCE(efx->last_irq_cpu);
+ return READ_ONCE(efx->last_irq_cpu);
}
/* Global Resources */
diff --git a/drivers/net/ethernet/sfc/falcon/tx.c b/drivers/net/ethernet/sfc/falcon/tx.c
index 6a75f4140a4b..6486814e97dc 100644
--- a/drivers/net/ethernet/sfc/falcon/tx.c
+++ b/drivers/net/ethernet/sfc/falcon/tx.c
@@ -134,8 +134,8 @@ static void ef4_tx_maybe_stop_queue(struct ef4_tx_queue *txq1)
*/
netif_tx_stop_queue(txq1->core_txq);
smp_mb();
- txq1->old_read_count = ACCESS_ONCE(txq1->read_count);
- txq2->old_read_count = ACCESS_ONCE(txq2->read_count);
+ txq1->old_read_count = READ_ONCE(txq1->read_count);
+ txq2->old_read_count = READ_ONCE(txq2->read_count);
fill_level = max(txq1->insert_count - txq1->old_read_count,
txq2->insert_count - txq2->old_read_count);
@@ -524,7 +524,7 @@ void ef4_xmit_done(struct ef4_tx_queue *tx_queue, unsigned int index)
/* Check whether the hardware queue is now empty */
if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
- tx_queue->old_write_count = ACCESS_ONCE(tx_queue->write_count);
+ tx_queue->old_write_count = READ_ONCE(tx_queue->write_count);
if (tx_queue->read_count == tx_queue->old_write_count) {
smp_mb();
tx_queue->empty_read_count =
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index ba45150f53c7..86454d25a405 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -827,7 +827,7 @@ efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
struct efx_nic *efx = channel->efx;
int tx_packets = 0;
- if (unlikely(ACCESS_ONCE(efx->reset_pending)))
+ if (unlikely(READ_ONCE(efx->reset_pending)))
return 0;
if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
@@ -979,7 +979,7 @@ efx_farch_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
struct efx_rx_queue *rx_queue;
struct efx_nic *efx = channel->efx;
- if (unlikely(ACCESS_ONCE(efx->reset_pending)))
+ if (unlikely(READ_ONCE(efx->reset_pending)))
return;
rx_ev_cont = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT);
@@ -1520,7 +1520,7 @@ irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx)
irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id)
{
struct efx_nic *efx = dev_id;
- bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled);
+ bool soft_enabled = READ_ONCE(efx->irq_soft_enabled);
efx_oword_t *int_ker = efx->irq_status.addr;
irqreturn_t result = IRQ_NONE;
struct efx_channel *channel;
@@ -1612,7 +1612,7 @@ irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id)
"IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
- if (!likely(ACCESS_ONCE(efx->irq_soft_enabled)))
+ if (!likely(READ_ONCE(efx->irq_soft_enabled)))
return IRQ_HANDLED;
/* Handle non-event-queue sources */
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 4d7fb8af880d..7b51b6371724 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -81,7 +81,7 @@ static struct efx_tx_queue *efx_tx_queue_partner(struct efx_tx_queue *tx_queue)
static inline bool __efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue,
unsigned int write_count)
{
- unsigned int empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count);
+ unsigned int empty_read_count = READ_ONCE(tx_queue->empty_read_count);
if (empty_read_count == 0)
return false;
@@ -617,11 +617,11 @@ irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx);
static inline int efx_nic_event_test_irq_cpu(struct efx_channel *channel)
{
- return ACCESS_ONCE(channel->event_test_cpu);
+ return READ_ONCE(channel->event_test_cpu);
}
static inline int efx_nic_irq_test_irq_cpu(struct efx_nic *efx)
{
- return ACCESS_ONCE(efx->last_irq_cpu);
+ return READ_ONCE(efx->last_irq_cpu);
}
/* Global Resources */
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index 60cdb97f58e2..56c2db398def 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -658,7 +658,7 @@ static void efx_ptp_send_times(struct efx_nic *efx,
/* Write host time for specified period or until MC is done */
while ((timespec64_compare(&now.ts_real, &limit) < 0) &&
- ACCESS_ONCE(*mc_running)) {
+ READ_ONCE(*mc_running)) {
struct timespec64 update_time;
unsigned int host_time;
@@ -668,7 +668,7 @@ static void efx_ptp_send_times(struct efx_nic *efx,
do {
pps_get_ts(&now);
} while ((timespec64_compare(&now.ts_real, &update_time) < 0) &&
- ACCESS_ONCE(*mc_running));
+ READ_ONCE(*mc_running));
/* Synchronise NIC with single word of time only */
host_time = (now.ts_real.tv_sec << MC_NANOSECOND_BITS |
@@ -832,14 +832,14 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
ptp->start.dma_addr);
/* Clear flag that signals MC ready */
- ACCESS_ONCE(*start) = 0;
+ WRITE_ONCE(*start, 0);
rc = efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
EFX_WARN_ON_ONCE_PARANOID(rc);
/* Wait for start from MCDI (or timeout) */
timeout = jiffies + msecs_to_jiffies(MAX_SYNCHRONISE_WAIT_MS);
- while (!ACCESS_ONCE(*start) && (time_before(jiffies, timeout))) {
+ while (!READ_ONCE(*start) && (time_before(jiffies, timeout))) {
udelay(20); /* Usually start MCDI execution quickly */
loops++;
}
@@ -849,7 +849,7 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
if (!time_before(jiffies, timeout))
++ptp->sync_timeouts;
- if (ACCESS_ONCE(*start))
+ if (READ_ONCE(*start))
efx_ptp_send_times(efx, &last_time);
/* Collect results */
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 32bf1fecf864..efb66ea21f27 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -136,8 +136,8 @@ static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1)
*/
netif_tx_stop_queue(txq1->core_txq);
smp_mb();
- txq1->old_read_count = ACCESS_ONCE(txq1->read_count);
- txq2->old_read_count = ACCESS_ONCE(txq2->read_count);
+ txq1->old_read_count = READ_ONCE(txq1->read_count);
+ txq2->old_read_count = READ_ONCE(txq2->read_count);
fill_level = max(txq1->insert_count - txq1->old_read_count,
txq2->insert_count - txq2->old_read_count);
@@ -752,7 +752,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
/* Check whether the hardware queue is now empty */
if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
- tx_queue->old_write_count = ACCESS_ONCE(tx_queue->write_count);
+ tx_queue->old_write_count = READ_ONCE(tx_queue->write_count);
if (tx_queue->read_count == tx_queue->old_write_count) {
smp_mb();
tx_queue->empty_read_count =
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 6a4e8e1bbd90..8ab0fb6892d5 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -6245,7 +6245,7 @@ static void niu_get_rx_stats(struct niu *np,
pkts = dropped = errors = bytes = 0;
- rx_rings = ACCESS_ONCE(np->rx_rings);
+ rx_rings = READ_ONCE(np->rx_rings);
if (!rx_rings)
goto no_rings;
@@ -6276,7 +6276,7 @@ static void niu_get_tx_stats(struct niu *np,
pkts = errors = bytes = 0;
- tx_rings = ACCESS_ONCE(np->tx_rings);
+ tx_rings = READ_ONCE(np->tx_rings);
if (!tx_rings)
goto no_rings;
diff --git a/drivers/net/tap.c b/drivers/net/tap.c
index 6c0c84c33e1f..b13890953ebb 100644
--- a/drivers/net/tap.c
+++ b/drivers/net/tap.c
@@ -257,7 +257,7 @@ static struct tap_queue *tap_get_queue(struct tap_dev *tap,
* and validate that the result isn't NULL - in case we are
* racing against queue removal.
*/
- int numvtaps = ACCESS_ONCE(tap->numvtaps);
+ int numvtaps = READ_ONCE(tap->numvtaps);
__u32 rxq;
if (!numvtaps)
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 42bb820a56c9..c1685a6d7883 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -469,7 +469,7 @@ static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
u32 numqueues = 0;
rcu_read_lock();
- numqueues = ACCESS_ONCE(tun->numqueues);
+ numqueues = READ_ONCE(tun->numqueues);
txq = __skb_get_hash_symmetric(skb);
if (txq) {
@@ -864,7 +864,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
rcu_read_lock();
tfile = rcu_dereference(tun->tfiles[txq]);
- numqueues = ACCESS_ONCE(tun->numqueues);
+ numqueues = READ_ONCE(tun->numqueues);
/* Drop packet if interface is not attached */
if (txq >= numqueues)
diff --git a/drivers/net/wireless/ath/ath5k/desc.c b/drivers/net/wireless/ath/ath5k/desc.c
index bd8d4392d68b..80f75139495f 100644
--- a/drivers/net/wireless/ath/ath5k/desc.c
+++ b/drivers/net/wireless/ath/ath5k/desc.c
@@ -500,13 +500,13 @@ ath5k_hw_proc_4word_tx_status(struct ath5k_hw *ah,
tx_status = &desc->ud.ds_tx5212.tx_stat;
- txstat1 = ACCESS_ONCE(tx_status->tx_status_1);
+ txstat1 = READ_ONCE(tx_status->tx_status_1);
/* No frame has been send or error */
if (unlikely(!(txstat1 & AR5K_DESC_TX_STATUS1_DONE)))
return -EINPROGRESS;
- txstat0 = ACCESS_ONCE(tx_status->tx_status_0);
+ txstat0 = READ_ONCE(tx_status->tx_status_0);
/*
* Get descriptor status
@@ -700,14 +700,14 @@ ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah,
u32 rxstat0, rxstat1;
rx_status = &desc->ud.ds_rx.rx_stat;
- rxstat1 = ACCESS_ONCE(rx_status->rx_status_1);
+ rxstat1 = READ_ONCE(rx_status->rx_status_1);
/* No frame received / not ready */
if (unlikely(!(rxstat1 & AR5K_5212_RX_DESC_STATUS1_DONE)))
return -EINPROGRESS;
memset(rs, 0, sizeof(struct ath5k_rx_status));
- rxstat0 = ACCESS_ONCE(rx_status->rx_status_0);
+ rxstat0 = READ_ONCE(rx_status->rx_status_0);
/*
* Frame receive status
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index 613caca7dc02..785a0f33b7e6 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -3628,7 +3628,7 @@ static void brcmf_sdio_dataworker(struct work_struct *work)
bus->dpc_running = true;
wmb();
- while (ACCESS_ONCE(bus->dpc_triggered)) {
+ while (READ_ONCE(bus->dpc_triggered)) {
bus->dpc_triggered = false;
brcmf_sdio_dpc(bus);
bus->idlecount = 0;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 231878969332..0f45f34e39d3 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -1118,7 +1118,7 @@ void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state)
static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
{
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
- bool calibrating = ACCESS_ONCE(mvm->calibrating);
+ bool calibrating = READ_ONCE(mvm->calibrating);
if (state)
set_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 6f2e2af23219..6e9d3289b9d0 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -652,7 +652,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
return -1;
} else if (info.control.vif->type == NL80211_IFTYPE_STATION &&
is_multicast_ether_addr(hdr->addr1)) {
- u8 ap_sta_id = ACCESS_ONCE(mvmvif->ap_sta_id);
+ u8 ap_sta_id = READ_ONCE(mvmvif->ap_sta_id);
if (ap_sta_id != IWL_MVM_INVALID_STA)
sta_id = ap_sta_id;
@@ -700,7 +700,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
snap_ip_tcp = 8 + skb_transport_header(skb) - skb_network_header(skb) +
tcp_hdrlen(skb);
- dbg_max_amsdu_len = ACCESS_ONCE(mvm->max_amsdu_len);
+ dbg_max_amsdu_len = READ_ONCE(mvm->max_amsdu_len);
if (!sta->max_amsdu_len ||
!ieee80211_is_data_qos(hdr->frame_control) ||
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index a06b6612b658..f25ce3a1ea50 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -1247,7 +1247,7 @@ restart:
spin_lock(&rxq->lock);
/* uCode's read index (stored in shared DRAM) indicates the last Rx
* buffer that the driver may process (last buffer filled by ucode). */
- r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
+ r = le16_to_cpu(READ_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
i = rxq->read;
/* W/A 9000 device step A0 wrap-around bug */
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index 2e3e013ec95a..9ad3f4fe5894 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -2076,12 +2076,12 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx)
IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", txq_idx);
txq = trans_pcie->txq[txq_idx];
- wr_ptr = ACCESS_ONCE(txq->write_ptr);
+ wr_ptr = READ_ONCE(txq->write_ptr);
- while (txq->read_ptr != ACCESS_ONCE(txq->write_ptr) &&
+ while (txq->read_ptr != READ_ONCE(txq->write_ptr) &&
!time_after(jiffies,
now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) {
- u8 write_ptr = ACCESS_ONCE(txq->write_ptr);
+ u8 write_ptr = READ_ONCE(txq->write_ptr);
if (WARN_ONCE(wr_ptr != write_ptr,
"WR pointer moved while flushing %d -> %d\n",
@@ -2553,7 +2553,7 @@ static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans,
spin_lock(&rxq->lock);
- r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
+ r = le16_to_cpu(READ_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
for (i = rxq->read, j = 0;
i != r && j < allocated_rb_nums;
@@ -2814,7 +2814,7 @@ static struct iwl_trans_dump_data
/* Dump RBs is supported only for pre-9000 devices (1 queue) */
struct iwl_rxq *rxq = &trans_pcie->rxq[0];
/* RBs */
- num_rbs = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num))
+ num_rbs = le16_to_cpu(READ_ONCE(rxq->rb_stts->closed_rb_num))
& 0x0FFF;
num_rbs = (num_rbs - rxq->read) & RX_QUEUE_MASK;
len += num_rbs * (sizeof(*data) +
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 6467ffac9811..d2b3d6177a55 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -1380,7 +1380,7 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
mac80211_hwsim_monitor_rx(hw, skb, channel);
/* wmediumd mode check */
- _portid = ACCESS_ONCE(data->wmediumd);
+ _portid = READ_ONCE(data->wmediumd);
if (_portid)
return mac80211_hwsim_tx_frame_nl(hw, skb, _portid);
@@ -1477,7 +1477,7 @@ static void mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
struct ieee80211_channel *chan)
{
struct mac80211_hwsim_data *data = hw->priv;
- u32 _pid = ACCESS_ONCE(data->wmediumd);
+ u32 _pid = READ_ONCE(data->wmediumd);
if (ieee80211_hw_check(hw, SUPPORTS_RC_TABLE)) {
struct ieee80211_tx_info *txi = IEEE80211_SKB_CB(skb);
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index f05cfc83c9c8..f946bf889015 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -996,7 +996,7 @@ static void qlt_free_session_done(struct work_struct *work)
if (logout_started) {
bool traced = false;
- while (!ACCESS_ONCE(sess->logout_completed)) {
+ while (!READ_ONCE(sess->logout_completed)) {
if (!traced) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf086,
"%s: waiting for sess %p logout\n",
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 942d094269fb..9469695f5871 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -985,7 +985,7 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
mb = udev->mb_addr;
tcmu_flush_dcache_range(mb, sizeof(*mb));
- while (udev->cmdr_last_cleaned != ACCESS_ONCE(mb->cmd_tail)) {
+ while (udev->cmdr_last_cleaned != READ_ONCE(mb->cmd_tail)) {
struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned;
struct tcmu_cmd *cmd;
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index 3e865dbf878c..fbaa2a90d25d 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -483,7 +483,7 @@ static ssize_t wdm_read
if (rv < 0)
return -ERESTARTSYS;
- cntr = ACCESS_ONCE(desc->length);
+ cntr = READ_ONCE(desc->length);
if (cntr == 0) {
desc->read = 0;
retry:
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index e9326f31db8d..4ae667d8c238 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -150,7 +150,7 @@ static int usbfs_increase_memory_usage(u64 amount)
{
u64 lim;
- lim = ACCESS_ONCE(usbfs_memory_mb);
+ lim = READ_ONCE(usbfs_memory_mb);
lim <<= 20;
atomic64_add(amount, &usbfs_memory_usage);
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
index d930bfda4010..58d59c5f8592 100644
--- a/drivers/usb/core/sysfs.c
+++ b/drivers/usb/core/sysfs.c
@@ -973,7 +973,7 @@ static ssize_t interface_show(struct device *dev, struct device_attribute *attr,
char *string;
intf = to_usb_interface(dev);
- string = ACCESS_ONCE(intf->cur_altsetting->string);
+ string = READ_ONCE(intf->cur_altsetting->string);
if (!string)
return 0;
return sprintf(buf, "%s\n", string);
@@ -989,7 +989,7 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
intf = to_usb_interface(dev);
udev = interface_to_usbdev(intf);
- alt = ACCESS_ONCE(intf->cur_altsetting);
+ alt = READ_ONCE(intf->cur_altsetting);
return sprintf(buf, "usb:v%04Xp%04Xd%04Xdc%02Xdsc%02Xdp%02X"
"ic%02Xisc%02Xip%02Xin%02X\n",
diff --git a/drivers/usb/gadget/udc/gr_udc.c b/drivers/usb/gadget/udc/gr_udc.c
index 1f9941145746..0b59fa50aa30 100644
--- a/drivers/usb/gadget/udc/gr_udc.c
+++ b/drivers/usb/gadget/udc/gr_udc.c
@@ -1261,7 +1261,7 @@ static int gr_handle_in_ep(struct gr_ep *ep)
if (!req->last_desc)
return 0;
- if (ACCESS_ONCE(req->last_desc->ctrl) & GR_DESC_IN_CTRL_EN)
+ if (READ_ONCE(req->last_desc->ctrl) & GR_DESC_IN_CTRL_EN)
return 0; /* Not put in hardware buffers yet */
if (gr_read32(&ep->regs->epstat) & (GR_EPSTAT_B1 | GR_EPSTAT_B0))
@@ -1290,7 +1290,7 @@ static int gr_handle_out_ep(struct gr_ep *ep)
if (!req->curr_desc)
return 0;
- ctrl = ACCESS_ONCE(req->curr_desc->ctrl);
+ ctrl = READ_ONCE(req->curr_desc->ctrl);
if (ctrl & GR_DESC_OUT_CTRL_EN)
return 0; /* Not received yet */
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 44924824fa41..c86f89babd57 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -785,7 +785,7 @@ static void io_watchdog_func(unsigned long _ohci)
}
/* find the last TD processed by the controller. */
- head = hc32_to_cpu(ohci, ACCESS_ONCE(ed->hwHeadP)) & TD_MASK;
+ head = hc32_to_cpu(ohci, READ_ONCE(ed->hwHeadP)) & TD_MASK;
td_start = td;
td_next = list_prepare_entry(td, &ed->td_list, td_list);
list_for_each_entry_continue(td_next, &ed->td_list, td_list) {
diff --git a/drivers/usb/host/uhci-hcd.h b/drivers/usb/host/uhci-hcd.h
index d97f0d9b3ce6..f1cc47292a59 100644
--- a/drivers/usb/host/uhci-hcd.h
+++ b/drivers/usb/host/uhci-hcd.h
@@ -187,7 +187,7 @@ struct uhci_qh {
* We need a special accessor for the element pointer because it is
* subject to asynchronous updates by the controller.
*/
-#define qh_element(qh) ACCESS_ONCE((qh)->element)
+#define qh_element(qh) READ_ONCE((qh)->element)
#define LINK_TO_QH(uhci, qh) (UHCI_PTR_QH((uhci)) | \
cpu_to_hc32((uhci), (qh)->dma_handle))
@@ -275,7 +275,7 @@ struct uhci_td {
* subject to asynchronous updates by the controller.
*/
#define td_status(uhci, td) hc32_to_cpu((uhci), \
- ACCESS_ONCE((td)->status))
+ READ_ONCE((td)->status))
#define LINK_TO_TD(uhci, td) (cpu_to_hc32((uhci), (td)->dma_handle))
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index f5a86f651f38..2bc3705a99bd 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -665,7 +665,7 @@ static int vfio_dev_viable(struct device *dev, void *data)
{
struct vfio_group *group = data;
struct vfio_device *device;
- struct device_driver *drv = ACCESS_ONCE(dev->driver);
+ struct device_driver *drv = READ_ONCE(dev->driver);
struct vfio_unbound_dev *unbound;
int ret = -EINVAL;
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 046f6d280af5..35e929f132e8 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -929,7 +929,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
continue;
}
- tpg = ACCESS_ONCE(vs_tpg[*target]);
+ tpg = READ_ONCE(vs_tpg[*target]);
if (unlikely(!tpg)) {
/* Target does not exist, fail the request */
vhost_scsi_send_bad_target(vs, vq, head, out);
diff --git a/fs/aio.c b/fs/aio.c
index 5a2487217072..e6de7715228c 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -576,7 +576,7 @@ static int kiocb_cancel(struct aio_kiocb *kiocb)
* actually has a cancel function, hence the cmpxchg()
*/
- cancel = ACCESS_ONCE(kiocb->ki_cancel);
+ cancel = READ_ONCE(kiocb->ki_cancel);
do {
if (!cancel || cancel == KIOCB_CANCELLED)
return -EINVAL;
diff --git a/fs/buffer.c b/fs/buffer.c
index 170df856bdb9..32ce01f0f95f 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -1692,7 +1692,8 @@ static struct buffer_head *create_page_buffers(struct page *page, struct inode *
BUG_ON(!PageLocked(page));
if (!page_has_buffers(page))
- create_empty_buffers(page, 1 << ACCESS_ONCE(inode->i_blkbits), b_state);
+ create_empty_buffers(page, 1 << READ_ONCE(inode->i_blkbits),
+ b_state);
return page_buffers(page);
}
diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c
index a38630214058..577dfaf0367f 100644
--- a/fs/crypto/keyinfo.c
+++ b/fs/crypto/keyinfo.c
@@ -374,7 +374,7 @@ void fscrypt_put_encryption_info(struct inode *inode, struct fscrypt_info *ci)
struct fscrypt_info *prev;
if (ci == NULL)
- ci = ACCESS_ONCE(inode->i_crypt_info);
+ ci = READ_ONCE(inode->i_crypt_info);
if (ci == NULL)
return;
diff --git a/fs/dcache.c b/fs/dcache.c
index f90141387f01..bcc9f6981569 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -231,7 +231,7 @@ static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *c
{
/*
* Be careful about RCU walk racing with rename:
- * use 'lockless_dereference' to fetch the name pointer.
+ * use 'READ_ONCE' to fetch the name pointer.
*
* NOTE! Even if a rename will mean that the length
* was not loaded atomically, we don't care. The
@@ -245,7 +245,7 @@ static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *c
* early because the data cannot match (there can
* be no NUL in the ct/tcount data)
*/
- const unsigned char *cs = lockless_dereference(dentry->d_name.name);
+ const unsigned char *cs = READ_ONCE(dentry->d_name.name);
return dentry_string_cmp(cs, ct, tcount);
}
@@ -630,7 +630,7 @@ static inline struct dentry *lock_parent(struct dentry *dentry)
rcu_read_lock();
spin_unlock(&dentry->d_lock);
again:
- parent = ACCESS_ONCE(dentry->d_parent);
+ parent = READ_ONCE(dentry->d_parent);
spin_lock(&parent->d_lock);
/*
* We can't blindly lock dentry until we are sure
@@ -721,7 +721,7 @@ static inline bool fast_dput(struct dentry *dentry)
* around with a zero refcount.
*/
smp_rmb();
- d_flags = ACCESS_ONCE(dentry->d_flags);
+ d_flags = READ_ONCE(dentry->d_flags);
d_flags &= DCACHE_REFERENCED | DCACHE_LRU_LIST | DCACHE_DISCONNECTED;
/* Nothing to do? Dropping the reference was all we needed? */
@@ -850,11 +850,11 @@ struct dentry *dget_parent(struct dentry *dentry)
* locking.
*/
rcu_read_lock();
- ret = ACCESS_ONCE(dentry->d_parent);
+ ret = READ_ONCE(dentry->d_parent);
gotref = lockref_get_not_zero(&ret->d_lockref);
rcu_read_unlock();
if (likely(gotref)) {
- if (likely(ret == ACCESS_ONCE(dentry->d_parent)))
+ if (likely(ret == READ_ONCE(dentry->d_parent)))
return ret;
dput(ret);
}
@@ -3040,7 +3040,7 @@ static int prepend(char **buffer, int *buflen, const char *str, int namelen)
* @buflen: allocated length of the buffer
* @name: name string and length qstr structure
*
- * With RCU path tracing, it may race with d_move(). Use ACCESS_ONCE() to
+ * With RCU path tracing, it may race with d_move(). Use READ_ONCE() to
* make sure that either the old or the new name pointer and length are
* fetched. However, there may be mismatch between length and pointer.
* The length cannot be trusted, we need to copy it byte-by-byte until
@@ -3054,8 +3054,8 @@ static int prepend(char **buffer, int *buflen, const char *str, int namelen)
*/
static int prepend_name(char **buffer, int *buflen, const struct qstr *name)
{
- const char *dname = ACCESS_ONCE(name->name);
- u32 dlen = ACCESS_ONCE(name->len);
+ const char *dname = READ_ONCE(name->name);
+ u32 dlen = READ_ONCE(name->len);
char *p;
smp_read_barrier_depends();
@@ -3120,7 +3120,7 @@ restart:
struct dentry * parent;
if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
- struct mount *parent = ACCESS_ONCE(mnt->mnt_parent);
+ struct mount *parent = READ_ONCE(mnt->mnt_parent);
/* Escaped? */
if (dentry != vfsmnt->mnt_root) {
bptr = *buffer;
@@ -3130,7 +3130,7 @@ restart:
}
/* Global root? */
if (mnt != parent) {
- dentry = ACCESS_ONCE(mnt->mnt_mountpoint);
+ dentry = READ_ONCE(mnt->mnt_mountpoint);
mnt = parent;
vfsmnt = &mnt->mnt;
continue;
diff --git a/fs/direct-io.c b/fs/direct-io.c
index b53e66d9abd7..98fe1325da9d 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -1152,7 +1152,7 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
get_block_t get_block, dio_iodone_t end_io,
dio_submit_t submit_io, int flags)
{
- unsigned i_blkbits = ACCESS_ONCE(inode->i_blkbits);
+ unsigned i_blkbits = READ_ONCE(inode->i_blkbits);
unsigned blkbits = i_blkbits;
unsigned blocksize_mask = (1 << blkbits) - 1;
ssize_t retval = -EINVAL;
diff --git a/fs/exec.c b/fs/exec.c
index 3e14ba25f678..1d6243d9f2b6 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1911,7 +1911,7 @@ void set_dumpable(struct mm_struct *mm, int value)
return;
do {
- old = ACCESS_ONCE(mm->flags);
+ old = READ_ONCE(mm->flags);
new = (old & ~MMF_DUMPABLE_MASK) | value;
} while (cmpxchg(&mm->flags, old, new) != old);
}
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 8d78ffd7b399..30f47d0f74a0 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -725,7 +725,7 @@ static void send_sigio_to_task(struct task_struct *p,
* F_SETSIG can change ->signum lockless in parallel, make
* sure we read it once and use the same value throughout.
*/
- int signum = ACCESS_ONCE(fown->signum);
+ int signum = READ_ONCE(fown->signum);
if (!sigio_perm(p, fown, signum))
return;
diff --git a/fs/fs_pin.c b/fs/fs_pin.c
index 0d285fd5b44a..a6497cf8ae53 100644
--- a/fs/fs_pin.c
+++ b/fs/fs_pin.c
@@ -79,7 +79,7 @@ void mnt_pin_kill(struct mount *m)
while (1) {
struct hlist_node *p;
rcu_read_lock();
- p = ACCESS_ONCE(m->mnt_pins.first);
+ p = READ_ONCE(m->mnt_pins.first);
if (!p) {
rcu_read_unlock();
break;
@@ -93,7 +93,7 @@ void group_pin_kill(struct hlist_head *p)
while (1) {
struct hlist_node *q;
rcu_read_lock();
- q = ACCESS_ONCE(p->first);
+ q = READ_ONCE(p->first);
if (!q) {
rcu_read_unlock();
break;
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 13c65dd2d37d..a42d89371748 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -33,7 +33,7 @@ static struct fuse_dev *fuse_get_dev(struct file *file)
* Lockless access is OK, because file->private data is set
* once during mount and is valid until the file is released.
*/
- return ACCESS_ONCE(file->private_data);
+ return READ_ONCE(file->private_data);
}
static void fuse_request_init(struct fuse_req *req, struct page **pages,
diff --git a/fs/inode.c b/fs/inode.c
index d1e35b53bb23..fd401028a309 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -2090,7 +2090,7 @@ void inode_set_flags(struct inode *inode, unsigned int flags,
WARN_ON_ONCE(flags & ~mask);
do {
- old_flags = ACCESS_ONCE(inode->i_flags);
+ old_flags = READ_ONCE(inode->i_flags);
new_flags = (old_flags & ~mask) | flags;
} while (unlikely(cmpxchg(&inode->i_flags, old_flags,
new_flags) != old_flags));
diff --git a/fs/namei.c b/fs/namei.c
index ed8b9488a890..5424b10cfdc4 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1210,7 +1210,7 @@ static int follow_managed(struct path *path, struct nameidata *nd)
/* Given that we're not holding a lock here, we retain the value in a
* local variable for each dentry as we look at it so that we don't see
* the components of that value change under us */
- while (managed = ACCESS_ONCE(path->dentry->d_flags),
+ while (managed = READ_ONCE(path->dentry->d_flags),
managed &= DCACHE_MANAGED_DENTRY,
unlikely(managed != 0)) {
/* Allow the filesystem to manage the transit without i_mutex
@@ -1395,7 +1395,7 @@ int follow_down(struct path *path)
unsigned managed;
int ret;
- while (managed = ACCESS_ONCE(path->dentry->d_flags),
+ while (managed = READ_ONCE(path->dentry->d_flags),
unlikely(managed & DCACHE_MANAGED_DENTRY)) {
/* Allow the filesystem to manage the transit without i_mutex
* being held.
diff --git a/fs/namespace.c b/fs/namespace.c
index d18deb4c410b..e158ec6b527b 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -353,7 +353,7 @@ int __mnt_want_write(struct vfsmount *m)
* incremented count after it has set MNT_WRITE_HOLD.
*/
smp_mb();
- while (ACCESS_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD)
+ while (READ_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD)
cpu_relax();
/*
* After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will
diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
index b5ec1d980dc9..0c57c5c5d40a 100644
--- a/fs/ncpfs/dir.c
+++ b/fs/ncpfs/dir.c
@@ -120,10 +120,6 @@ static inline int ncp_case_sensitive(const struct inode *i)
/*
* Note: leave the hash unchanged if the directory
* is case-sensitive.
- *
- * Accessing the parent inode can be racy under RCU pathwalking.
- * Use ACCESS_ONCE() to make sure we use _one_ particular inode,
- * the callers will handle races.
*/
static int
ncp_hash_dentry(const struct dentry *dentry, struct qstr *this)
@@ -148,11 +144,6 @@ ncp_hash_dentry(const struct dentry *dentry, struct qstr *this)
return 0;
}
-/*
- * Accessing the parent inode can be racy under RCU pathwalking.
- * Use ACCESS_ONCE() to make sure we use _one_ particular inode,
- * the callers will handle races.
- */
static int
ncp_compare_dentry(const struct dentry *dentry,
unsigned int len, const char *str, const struct qstr *name)
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 5ceaeb1f6fb6..f439f1c45008 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -1081,7 +1081,7 @@ static int nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags)
int error;
if (flags & LOOKUP_RCU) {
- parent = ACCESS_ONCE(dentry->d_parent);
+ parent = READ_ONCE(dentry->d_parent);
dir = d_inode_rcu(parent);
if (!dir)
return -ECHILD;
@@ -1168,7 +1168,7 @@ out_set_verifier:
nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
out_valid:
if (flags & LOOKUP_RCU) {
- if (parent != ACCESS_ONCE(dentry->d_parent))
+ if (parent != READ_ONCE(dentry->d_parent))
return -ECHILD;
} else
dput(parent);
@@ -1582,7 +1582,7 @@ static int nfs4_lookup_revalidate(struct dentry *dentry, unsigned int flags)
struct inode *dir;
if (flags & LOOKUP_RCU) {
- parent = ACCESS_ONCE(dentry->d_parent);
+ parent = READ_ONCE(dentry->d_parent);
dir = d_inode_rcu(parent);
if (!dir)
return -ECHILD;
@@ -1596,7 +1596,7 @@ static int nfs4_lookup_revalidate(struct dentry *dentry, unsigned int flags)
ret = -ECHILD;
if (!(flags & LOOKUP_RCU))
dput(parent);
- else if (parent != ACCESS_ONCE(dentry->d_parent))
+ else if (parent != READ_ONCE(dentry->d_parent))
return -ECHILD;
goto out;
}
diff --git a/fs/overlayfs/ovl_entry.h b/fs/overlayfs/ovl_entry.h
index 25d9b5adcd42..36b49bd09264 100644
--- a/fs/overlayfs/ovl_entry.h
+++ b/fs/overlayfs/ovl_entry.h
@@ -77,5 +77,5 @@ static inline struct ovl_inode *OVL_I(struct inode *inode)
static inline struct dentry *ovl_upperdentry_dereference(struct ovl_inode *oi)
{
- return lockless_dereference(oi->__upperdentry);
+ return READ_ONCE(oi->__upperdentry);
}
diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c
index 698b74dd750e..c310e3ff7f3f 100644
--- a/fs/overlayfs/readdir.c
+++ b/fs/overlayfs/readdir.c
@@ -754,7 +754,7 @@ static int ovl_dir_fsync(struct file *file, loff_t start, loff_t end,
if (!od->is_upper && OVL_TYPE_UPPER(ovl_path_type(dentry))) {
struct inode *inode = file_inode(file);
- realfile = lockless_dereference(od->upperfile);
+ realfile = READ_ONCE(od->upperfile);
if (!realfile) {
struct path upperpath;
diff --git a/fs/proc/array.c b/fs/proc/array.c
index 9390032a11e1..d82549e80402 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -454,7 +454,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
cutime = sig->cutime;
cstime = sig->cstime;
cgtime = sig->cgtime;
- rsslim = ACCESS_ONCE(sig->rlim[RLIMIT_RSS].rlim_cur);
+ rsslim = READ_ONCE(sig->rlim[RLIMIT_RSS].rlim_cur);
/* add up live thread stats at the group level */
if (whole) {
diff --git a/fs/proc_namespace.c b/fs/proc_namespace.c
index 7626ee11b06c..7b635d173213 100644
--- a/fs/proc_namespace.c
+++ b/fs/proc_namespace.c
@@ -28,7 +28,7 @@ static unsigned mounts_poll(struct file *file, poll_table *wait)
poll_wait(file, &p->ns->poll, wait);
- event = ACCESS_ONCE(ns->event);
+ event = READ_ONCE(ns->event);
if (m->poll_event != event) {
m->poll_event = event;
res |= POLLERR | POLLPRI;
diff --git a/fs/readdir.c b/fs/readdir.c
index d336db65a33e..1b83b0ad183b 100644
--- a/fs/readdir.c
+++ b/fs/readdir.c
@@ -37,13 +37,12 @@ int iterate_dir(struct file *file, struct dir_context *ctx)
if (res)
goto out;
- if (shared) {
- inode_lock_shared(inode);
- } else {
+ if (shared)
+ res = down_read_killable(&inode->i_rwsem);
+ else
res = down_write_killable(&inode->i_rwsem);
- if (res)
- goto out;
- }
+ if (res)
+ goto out;
res = -ENOENT;
if (!IS_DEADDIR(inode)) {
diff --git a/fs/splice.c b/fs/splice.c
index f3084cce0ea6..39e2dc01ac12 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -253,7 +253,7 @@ EXPORT_SYMBOL(add_to_pipe);
*/
int splice_grow_spd(const struct pipe_inode_info *pipe, struct splice_pipe_desc *spd)
{
- unsigned int buffers = ACCESS_ONCE(pipe->buffers);
+ unsigned int buffers = READ_ONCE(pipe->buffers);
spd->nr_pages_max = buffers;
if (buffers <= PIPE_DEF_BUFFERS)
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index 1c713fd5b3e6..f46d133c0949 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -381,7 +381,7 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason)
* in __get_user_pages if userfaultfd_release waits on the
* caller of handle_userfault to release the mmap_sem.
*/
- if (unlikely(ACCESS_ONCE(ctx->released))) {
+ if (unlikely(READ_ONCE(ctx->released))) {
/*
* Don't return VM_FAULT_SIGBUS in this case, so a non
* cooperative manager can close the uffd after the
@@ -477,7 +477,7 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason)
vmf->flags, reason);
up_read(&mm->mmap_sem);
- if (likely(must_wait && !ACCESS_ONCE(ctx->released) &&
+ if (likely(must_wait && !READ_ONCE(ctx->released) &&
(return_to_userland ? !signal_pending(current) :
!fatal_signal_pending(current)))) {
wake_up_poll(&ctx->fd_wqh, POLLIN);
@@ -586,7 +586,7 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
set_current_state(TASK_KILLABLE);
if (ewq->msg.event == 0)
break;
- if (ACCESS_ONCE(ctx->released) ||
+ if (READ_ONCE(ctx->released) ||
fatal_signal_pending(current)) {
/*
* &ewq->wq may be queued in fork_event, but
@@ -833,7 +833,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
struct userfaultfd_wake_range range = { .len = 0, };
unsigned long new_flags;
- ACCESS_ONCE(ctx->released) = true;
+ WRITE_ONCE(ctx->released, true);
if (!mmget_not_zero(mm))
goto wakeup;
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h
index 51bf7b827387..129975970d99 100644
--- a/fs/xfs/xfs_log_priv.h
+++ b/fs/xfs/xfs_log_priv.h
@@ -592,9 +592,9 @@ xlog_valid_lsn(
* a transiently forward state. Instead, we can see the LSN in a
* transiently behind state if we happen to race with a cycle wrap.
*/
- cur_cycle = ACCESS_ONCE(log->l_curr_cycle);
+ cur_cycle = READ_ONCE(log->l_curr_cycle);
smp_rmb();
- cur_block = ACCESS_ONCE(log->l_curr_block);
+ cur_block = READ_ONCE(log->l_curr_block);
if ((CYCLE_LSN(lsn) > cur_cycle) ||
(CYCLE_LSN(lsn) == cur_cycle && BLOCK_LSN(lsn) > cur_block)) {
diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
index 49be4bba1e96..34a028a7bcc5 100644
--- a/include/asm-generic/atomic-long.h
+++ b/include/asm-generic/atomic-long.h
@@ -244,4 +244,7 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
#define atomic_long_inc_not_zero(l) \
ATOMIC_LONG_PFX(_inc_not_zero)((ATOMIC_LONG_PFX(_t) *)(l))
+#define atomic_long_cond_read_acquire(v, c) \
+ ATOMIC_LONG_PFX(_cond_read_acquire)((ATOMIC_LONG_PFX(_t) *)(v), (c))
+
#endif /* _ASM_GENERIC_ATOMIC_LONG_H */
diff --git a/include/asm-generic/qrwlock.h b/include/asm-generic/qrwlock.h
index 7d026bf27713..0f7062bd55e5 100644
--- a/include/asm-generic/qrwlock.h
+++ b/include/asm-generic/qrwlock.h
@@ -26,51 +26,20 @@
/*
* Writer states & reader shift and bias.
- *
- * | +0 | +1 | +2 | +3 |
- * ----+----+----+----+----+
- * LE | 78 | 56 | 34 | 12 | 0x12345678
- * ----+----+----+----+----+
- * | wr | rd |
- * +----+----+----+----+
- *
- * ----+----+----+----+----+
- * BE | 12 | 34 | 56 | 78 | 0x12345678
- * ----+----+----+----+----+
- * | rd | wr |
- * +----+----+----+----+
*/
-#define _QW_WAITING 1 /* A writer is waiting */
-#define _QW_LOCKED 0xff /* A writer holds the lock */
-#define _QW_WMASK 0xff /* Writer mask */
-#define _QR_SHIFT 8 /* Reader count shift */
+#define _QW_WAITING 0x100 /* A writer is waiting */
+#define _QW_LOCKED 0x0ff /* A writer holds the lock */
+#define _QW_WMASK 0x1ff /* Writer mask */
+#define _QR_SHIFT 9 /* Reader count shift */
#define _QR_BIAS (1U << _QR_SHIFT)
/*
* External function declarations
*/
-extern void queued_read_lock_slowpath(struct qrwlock *lock, u32 cnts);
+extern void queued_read_lock_slowpath(struct qrwlock *lock);
extern void queued_write_lock_slowpath(struct qrwlock *lock);
/**
- * queued_read_can_lock- would read_trylock() succeed?
- * @lock: Pointer to queue rwlock structure
- */
-static inline int queued_read_can_lock(struct qrwlock *lock)
-{
- return !(atomic_read(&lock->cnts) & _QW_WMASK);
-}
-
-/**
- * queued_write_can_lock- would write_trylock() succeed?
- * @lock: Pointer to queue rwlock structure
- */
-static inline int queued_write_can_lock(struct qrwlock *lock)
-{
- return !atomic_read(&lock->cnts);
-}
-
-/**
* queued_read_trylock - try to acquire read lock of a queue rwlock
* @lock : Pointer to queue rwlock structure
* Return: 1 if lock acquired, 0 if failed
@@ -118,7 +87,7 @@ static inline void queued_read_lock(struct qrwlock *lock)
return;
/* The slowpath will decrement the reader count, if necessary. */
- queued_read_lock_slowpath(lock, cnts);
+ queued_read_lock_slowpath(lock);
}
/**
@@ -147,30 +116,18 @@ static inline void queued_read_unlock(struct qrwlock *lock)
}
/**
- * __qrwlock_write_byte - retrieve the write byte address of a queue rwlock
- * @lock : Pointer to queue rwlock structure
- * Return: the write byte address of a queue rwlock
- */
-static inline u8 *__qrwlock_write_byte(struct qrwlock *lock)
-{
- return (u8 *)lock + 3 * IS_BUILTIN(CONFIG_CPU_BIG_ENDIAN);
-}
-
-/**
* queued_write_unlock - release write lock of a queue rwlock
* @lock : Pointer to queue rwlock structure
*/
static inline void queued_write_unlock(struct qrwlock *lock)
{
- smp_store_release(__qrwlock_write_byte(lock), 0);
+ smp_store_release(&lock->wlocked, 0);
}
/*
* Remapping rwlock architecture specific functions to the corresponding
* queue rwlock functions.
*/
-#define arch_read_can_lock(l) queued_read_can_lock(l)
-#define arch_write_can_lock(l) queued_write_can_lock(l)
#define arch_read_lock(l) queued_read_lock(l)
#define arch_write_lock(l) queued_write_lock(l)
#define arch_read_trylock(l) queued_read_trylock(l)
diff --git a/include/asm-generic/qrwlock_types.h b/include/asm-generic/qrwlock_types.h
index d93573eff162..137ecdd16daa 100644
--- a/include/asm-generic/qrwlock_types.h
+++ b/include/asm-generic/qrwlock_types.h
@@ -10,12 +10,23 @@
*/
typedef struct qrwlock {
- atomic_t cnts;
+ union {
+ atomic_t cnts;
+ struct {
+#ifdef __LITTLE_ENDIAN
+ u8 wlocked; /* Locked for write? */
+ u8 __lstate[3];
+#else
+ u8 __lstate[3];
+ u8 wlocked; /* Locked for write? */
+#endif
+ };
+ };
arch_spinlock_t wait_lock;
} arch_rwlock_t;
#define __ARCH_RW_LOCK_UNLOCKED { \
- .cnts = ATOMIC_INIT(0), \
+ { .cnts = ATOMIC_INIT(0), }, \
.wait_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
}
diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h
index 66260777d644..b37b4ad7eb94 100644
--- a/include/asm-generic/qspinlock.h
+++ b/include/asm-generic/qspinlock.h
@@ -121,6 +121,5 @@ static __always_inline bool virt_spin_lock(struct qspinlock *lock)
#define arch_spin_lock(l) queued_spin_lock(l)
#define arch_spin_trylock(l) queued_spin_trylock(l)
#define arch_spin_unlock(l) queued_spin_unlock(l)
-#define arch_spin_lock_flags(l, f) queued_spin_lock(l)
#endif /* __ASM_GENERIC_QSPINLOCK_H */
diff --git a/include/asm-generic/rwsem.h b/include/asm-generic/rwsem.h
index bdbe43bac230..93e67a055a4d 100644
--- a/include/asm-generic/rwsem.h
+++ b/include/asm-generic/rwsem.h
@@ -38,6 +38,16 @@ static inline void __down_read(struct rw_semaphore *sem)
rwsem_down_read_failed(sem);
}
+static inline int __down_read_killable(struct rw_semaphore *sem)
+{
+ if (unlikely(atomic_long_inc_return_acquire(&sem->count) <= 0)) {
+ if (IS_ERR(rwsem_down_read_failed_killable(sem)))
+ return -EINTR;
+ }
+
+ return 0;
+}
+
static inline int __down_read_trylock(struct rw_semaphore *sem)
{
long tmp;
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 8acfc1e099e1..e549bff87c5b 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -459,6 +459,7 @@
#define TEXT_TEXT \
ALIGN_FUNCTION(); \
*(.text.hot TEXT_MAIN .text.fixup .text.unlikely) \
+ *(.text..refcount) \
*(.ref.text) \
MEM_KEEP(init.text) \
MEM_KEEP(exit.text) \
diff --git a/include/linux/atomic.h b/include/linux/atomic.h
index cd18203d6ff3..8b276fd9a127 100644
--- a/include/linux/atomic.h
+++ b/include/linux/atomic.h
@@ -654,6 +654,8 @@ static inline int atomic_dec_if_positive(atomic_t *v)
}
#endif
+#define atomic_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c))
+
#ifdef CONFIG_GENERIC_ATOMIC64
#include <asm-generic/atomic64.h>
#endif
@@ -1073,6 +1075,8 @@ static inline long long atomic64_fetch_andnot_release(long long i, atomic64_t *v
}
#endif
+#define atomic64_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c))
+
#include <asm-generic/atomic-long.h>
#endif /* _LINUX_ATOMIC_H */
diff --git a/include/linux/average.h b/include/linux/average.h
index 1b6f5560c264..a1a8f09631ce 100644
--- a/include/linux/average.h
+++ b/include/linux/average.h
@@ -2,6 +2,10 @@
#ifndef _LINUX_AVERAGE_H
#define _LINUX_AVERAGE_H
+#include <linux/bug.h>
+#include <linux/compiler.h>
+#include <linux/log2.h>
+
/*
* Exponentially weighted moving average (EWMA)
*
@@ -49,7 +53,7 @@
static inline void ewma_##name##_add(struct ewma_##name *e, \
unsigned long val) \
{ \
- unsigned long internal = ACCESS_ONCE(e->internal); \
+ unsigned long internal = READ_ONCE(e->internal); \
unsigned long weight_rcp = ilog2(_weight_rcp); \
unsigned long precision = _precision; \
\
@@ -58,10 +62,10 @@
BUILD_BUG_ON((_precision) > 30); \
BUILD_BUG_ON_NOT_POWER_OF_2(_weight_rcp); \
\
- ACCESS_ONCE(e->internal) = internal ? \
+ WRITE_ONCE(e->internal, internal ? \
(((internal << weight_rcp) - internal) + \
(val << precision)) >> weight_rcp : \
- (val << precision); \
+ (val << precision)); \
}
#endif /* _LINUX_AVERAGE_H */
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index d03c5dd6185d..c537ac7435ad 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -237,7 +237,7 @@ static inline unsigned long __ffs64(u64 word)
typeof(*ptr) old, new; \
\
do { \
- old = ACCESS_ONCE(*ptr); \
+ old = READ_ONCE(*ptr); \
new = (old & ~mask) | bits; \
} while (cmpxchg(ptr, old, new) != old); \
\
@@ -252,7 +252,7 @@ static inline unsigned long __ffs64(u64 word)
typeof(*ptr) old, new; \
\
do { \
- old = ACCESS_ONCE(*ptr); \
+ old = READ_ONCE(*ptr); \
new = old & ~clear; \
} while (!(old & test) && \
cmpxchg(ptr, old, new) != old); \
diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
index 54dfef70a072..a06583e41f80 100644
--- a/include/linux/compiler-clang.h
+++ b/include/linux/compiler-clang.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __LINUX_COMPILER_H
+#ifndef __LINUX_COMPILER_TYPES_H
#error "Please don't include <linux/compiler-clang.h> directly, include <linux/compiler.h> instead."
#endif
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index bb78e5bdff26..2272ded07496 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __LINUX_COMPILER_H
+#ifndef __LINUX_COMPILER_TYPES_H
#error "Please don't include <linux/compiler-gcc.h> directly, include <linux/compiler.h> instead."
#endif
diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h
index 523d1b74550f..bfa08160db3a 100644
--- a/include/linux/compiler-intel.h
+++ b/include/linux/compiler-intel.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __LINUX_COMPILER_H
+#ifndef __LINUX_COMPILER_TYPES_H
#error "Please don't include <linux/compiler-intel.h> directly, include <linux/compiler.h> instead."
#endif
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 202710420d6d..3672353a0acd 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -2,111 +2,12 @@
#ifndef __LINUX_COMPILER_H
#define __LINUX_COMPILER_H
-#ifndef __ASSEMBLY__
+#include <linux/compiler_types.h>
-#ifdef __CHECKER__
-# define __user __attribute__((noderef, address_space(1)))
-# define __kernel __attribute__((address_space(0)))
-# define __safe __attribute__((safe))
-# define __force __attribute__((force))
-# define __nocast __attribute__((nocast))
-# define __iomem __attribute__((noderef, address_space(2)))
-# define __must_hold(x) __attribute__((context(x,1,1)))
-# define __acquires(x) __attribute__((context(x,0,1)))
-# define __releases(x) __attribute__((context(x,1,0)))
-# define __acquire(x) __context__(x,1)
-# define __release(x) __context__(x,-1)
-# define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
-# define __percpu __attribute__((noderef, address_space(3)))
-# define __rcu __attribute__((noderef, address_space(4)))
-# define __private __attribute__((noderef))
-extern void __chk_user_ptr(const volatile void __user *);
-extern void __chk_io_ptr(const volatile void __iomem *);
-# define ACCESS_PRIVATE(p, member) (*((typeof((p)->member) __force *) &(p)->member))
-#else /* __CHECKER__ */
-# ifdef STRUCTLEAK_PLUGIN
-# define __user __attribute__((user))
-# else
-# define __user
-# endif
-# define __kernel
-# define __safe
-# define __force
-# define __nocast
-# define __iomem
-# define __chk_user_ptr(x) (void)0
-# define __chk_io_ptr(x) (void)0
-# define __builtin_warning(x, y...) (1)
-# define __must_hold(x)
-# define __acquires(x)
-# define __releases(x)
-# define __acquire(x) (void)0
-# define __release(x) (void)0
-# define __cond_lock(x,c) (c)
-# define __percpu
-# define __rcu
-# define __private
-# define ACCESS_PRIVATE(p, member) ((p)->member)
-#endif /* __CHECKER__ */
-
-/* Indirect macros required for expanded argument pasting, eg. __LINE__. */
-#define ___PASTE(a,b) a##b
-#define __PASTE(a,b) ___PASTE(a,b)
+#ifndef __ASSEMBLY__
#ifdef __KERNEL__
-#ifdef __GNUC__
-#include <linux/compiler-gcc.h>
-#endif
-
-#if defined(CC_USING_HOTPATCH) && !defined(__CHECKER__)
-#define notrace __attribute__((hotpatch(0,0)))
-#else
-#define notrace __attribute__((no_instrument_function))
-#endif
-
-/* Intel compiler defines __GNUC__. So we will overwrite implementations
- * coming from above header files here
- */
-#ifdef __INTEL_COMPILER
-# include <linux/compiler-intel.h>
-#endif
-
-/* Clang compiler defines __GNUC__. So we will overwrite implementations
- * coming from above header files here
- */
-#ifdef __clang__
-#include <linux/compiler-clang.h>
-#endif
-
-/*
- * Generic compiler-dependent macros required for kernel
- * build go below this comment. Actual compiler/compiler version
- * specific implementations come from the above header files
- */
-
-struct ftrace_branch_data {
- const char *func;
- const char *file;
- unsigned line;
- union {
- struct {
- unsigned long correct;
- unsigned long incorrect;
- };
- struct {
- unsigned long miss;
- unsigned long hit;
- };
- unsigned long miss_hit[2];
- };
-};
-
-struct ftrace_likely_data {
- struct ftrace_branch_data data;
- unsigned long constant;
-};
-
/*
* Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
* to disable branch tracing on a per file basis.
@@ -333,6 +234,7 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
* with an explicit memory barrier or atomic instruction that provides the
* required ordering.
*/
+#include <asm/barrier.h>
#define __READ_ONCE(x, check) \
({ \
@@ -341,6 +243,7 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
__read_once_size(&(x), __u.__c, sizeof(x)); \
else \
__read_once_size_nocheck(&(x), __u.__c, sizeof(x)); \
+ smp_read_barrier_depends(); /* Enforce dependency ordering from x */ \
__u.__val; \
})
#define READ_ONCE(x) __READ_ONCE(x, 1)
@@ -363,167 +266,6 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
#endif /* __ASSEMBLY__ */
-#ifdef __KERNEL__
-/*
- * Allow us to mark functions as 'deprecated' and have gcc emit a nice
- * warning for each use, in hopes of speeding the functions removal.
- * Usage is:
- * int __deprecated foo(void)
- */
-#ifndef __deprecated
-# define __deprecated /* unimplemented */
-#endif
-
-#ifdef MODULE
-#define __deprecated_for_modules __deprecated
-#else
-#define __deprecated_for_modules
-#endif
-
-#ifndef __must_check
-#define __must_check
-#endif
-
-#ifndef CONFIG_ENABLE_MUST_CHECK
-#undef __must_check
-#define __must_check
-#endif
-#ifndef CONFIG_ENABLE_WARN_DEPRECATED
-#undef __deprecated
-#undef __deprecated_for_modules
-#define __deprecated
-#define __deprecated_for_modules
-#endif
-
-#ifndef __malloc
-#define __malloc
-#endif
-
-/*
- * Allow us to avoid 'defined but not used' warnings on functions and data,
- * as well as force them to be emitted to the assembly file.
- *
- * As of gcc 3.4, static functions that are not marked with attribute((used))
- * may be elided from the assembly file. As of gcc 3.4, static data not so
- * marked will not be elided, but this may change in a future gcc version.
- *
- * NOTE: Because distributions shipped with a backported unit-at-a-time
- * compiler in gcc 3.3, we must define __used to be __attribute__((used))
- * for gcc >=3.3 instead of 3.4.
- *
- * In prior versions of gcc, such functions and data would be emitted, but
- * would be warned about except with attribute((unused)).
- *
- * Mark functions that are referenced only in inline assembly as __used so
- * the code is emitted even though it appears to be unreferenced.
- */
-#ifndef __used
-# define __used /* unimplemented */
-#endif
-
-#ifndef __maybe_unused
-# define __maybe_unused /* unimplemented */
-#endif
-
-#ifndef __always_unused
-# define __always_unused /* unimplemented */
-#endif
-
-#ifndef noinline
-#define noinline
-#endif
-
-/*
- * Rather then using noinline to prevent stack consumption, use
- * noinline_for_stack instead. For documentation reasons.
- */
-#define noinline_for_stack noinline
-
-#ifndef __always_inline
-#define __always_inline inline
-#endif
-
-#endif /* __KERNEL__ */
-
-/*
- * From the GCC manual:
- *
- * Many functions do not examine any values except their arguments,
- * and have no effects except the return value. Basically this is
- * just slightly more strict class than the `pure' attribute above,
- * since function is not allowed to read global memory.
- *
- * Note that a function that has pointer arguments and examines the
- * data pointed to must _not_ be declared `const'. Likewise, a
- * function that calls a non-`const' function usually must not be
- * `const'. It does not make sense for a `const' function to return
- * `void'.
- */
-#ifndef __attribute_const__
-# define __attribute_const__ /* unimplemented */
-#endif
-
-#ifndef __designated_init
-# define __designated_init
-#endif
-
-#ifndef __latent_entropy
-# define __latent_entropy
-#endif
-
-#ifndef __randomize_layout
-# define __randomize_layout __designated_init
-#endif
-
-#ifndef __no_randomize_layout
-# define __no_randomize_layout
-#endif
-
-#ifndef randomized_struct_fields_start
-# define randomized_struct_fields_start
-# define randomized_struct_fields_end
-#endif
-
-/*
- * Tell gcc if a function is cold. The compiler will assume any path
- * directly leading to the call is unlikely.
- */
-
-#ifndef __cold
-#define __cold
-#endif
-
-/* Simple shorthand for a section definition */
-#ifndef __section
-# define __section(S) __attribute__ ((__section__(#S)))
-#endif
-
-#ifndef __visible
-#define __visible
-#endif
-
-#ifndef __nostackprotector
-# define __nostackprotector
-#endif
-
-/*
- * Assume alignment of return value.
- */
-#ifndef __assume_aligned
-#define __assume_aligned(a, ...)
-#endif
-
-
-/* Are two types/vars the same type (ignoring qualifiers)? */
-#ifndef __same_type
-# define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
-#endif
-
-/* Is this type a native word size -- useful for atomic operations */
-#ifndef __native_word
-# define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
-#endif
-
/* Compile time object size, -1 for unknown */
#ifndef __compiletime_object_size
# define __compiletime_object_size(obj) -1
@@ -605,24 +347,4 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
(volatile typeof(x) *)&(x); })
#define ACCESS_ONCE(x) (*__ACCESS_ONCE(x))
-/**
- * lockless_dereference() - safely load a pointer for later dereference
- * @p: The pointer to load
- *
- * Similar to rcu_dereference(), but for situations where the pointed-to
- * object's lifetime is managed by something other than RCU. That
- * "something other" might be reference counting or simple immortality.
- *
- * The seemingly unused variable ___typecheck_p validates that @p is
- * indeed a pointer type by using a pointer to typeof(*p) as the type.
- * Taking a pointer to typeof(*p) again is needed in case p is void *.
- */
-#define lockless_dereference(p) \
-({ \
- typeof(p) _________p1 = READ_ONCE(p); \
- typeof(*(p)) *___typecheck_p __maybe_unused; \
- smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
- (_________p1); \
-})
-
#endif /* __LINUX_COMPILER_H */
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
new file mode 100644
index 000000000000..6b79a9bba9a7
--- /dev/null
+++ b/include/linux/compiler_types.h
@@ -0,0 +1,274 @@
+#ifndef __LINUX_COMPILER_TYPES_H
+#define __LINUX_COMPILER_TYPES_H
+
+#ifndef __ASSEMBLY__
+
+#ifdef __CHECKER__
+# define __user __attribute__((noderef, address_space(1)))
+# define __kernel __attribute__((address_space(0)))
+# define __safe __attribute__((safe))
+# define __force __attribute__((force))
+# define __nocast __attribute__((nocast))
+# define __iomem __attribute__((noderef, address_space(2)))
+# define __must_hold(x) __attribute__((context(x,1,1)))
+# define __acquires(x) __attribute__((context(x,0,1)))
+# define __releases(x) __attribute__((context(x,1,0)))
+# define __acquire(x) __context__(x,1)
+# define __release(x) __context__(x,-1)
+# define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
+# define __percpu __attribute__((noderef, address_space(3)))
+# define __rcu __attribute__((noderef, address_space(4)))
+# define __private __attribute__((noderef))
+extern void __chk_user_ptr(const volatile void __user *);
+extern void __chk_io_ptr(const volatile void __iomem *);
+# define ACCESS_PRIVATE(p, member) (*((typeof((p)->member) __force *) &(p)->member))
+#else /* __CHECKER__ */
+# ifdef STRUCTLEAK_PLUGIN
+# define __user __attribute__((user))
+# else
+# define __user
+# endif
+# define __kernel
+# define __safe
+# define __force
+# define __nocast
+# define __iomem
+# define __chk_user_ptr(x) (void)0
+# define __chk_io_ptr(x) (void)0
+# define __builtin_warning(x, y...) (1)
+# define __must_hold(x)
+# define __acquires(x)
+# define __releases(x)
+# define __acquire(x) (void)0
+# define __release(x) (void)0
+# define __cond_lock(x,c) (c)
+# define __percpu
+# define __rcu
+# define __private
+# define ACCESS_PRIVATE(p, member) ((p)->member)
+#endif /* __CHECKER__ */
+
+/* Indirect macros required for expanded argument pasting, eg. __LINE__. */
+#define ___PASTE(a,b) a##b
+#define __PASTE(a,b) ___PASTE(a,b)
+
+#ifdef __KERNEL__
+
+#ifdef __GNUC__
+#include <linux/compiler-gcc.h>
+#endif
+
+#if defined(CC_USING_HOTPATCH) && !defined(__CHECKER__)
+#define notrace __attribute__((hotpatch(0,0)))
+#else
+#define notrace __attribute__((no_instrument_function))
+#endif
+
+/* Intel compiler defines __GNUC__. So we will overwrite implementations
+ * coming from above header files here
+ */
+#ifdef __INTEL_COMPILER
+# include <linux/compiler-intel.h>
+#endif
+
+/* Clang compiler defines __GNUC__. So we will overwrite implementations
+ * coming from above header files here
+ */
+#ifdef __clang__
+#include <linux/compiler-clang.h>
+#endif
+
+/*
+ * Generic compiler-dependent macros required for kernel
+ * build go below this comment. Actual compiler/compiler version
+ * specific implementations come from the above header files
+ */
+
+struct ftrace_branch_data {
+ const char *func;
+ const char *file;
+ unsigned line;
+ union {
+ struct {
+ unsigned long correct;
+ unsigned long incorrect;
+ };
+ struct {
+ unsigned long miss;
+ unsigned long hit;
+ };
+ unsigned long miss_hit[2];
+ };
+};
+
+struct ftrace_likely_data {
+ struct ftrace_branch_data data;
+ unsigned long constant;
+};
+
+#endif /* __KERNEL__ */
+
+#endif /* __ASSEMBLY__ */
+
+#ifdef __KERNEL__
+/*
+ * Allow us to mark functions as 'deprecated' and have gcc emit a nice
+ * warning for each use, in hopes of speeding the functions removal.
+ * Usage is:
+ * int __deprecated foo(void)
+ */
+#ifndef __deprecated
+# define __deprecated /* unimplemented */
+#endif
+
+#ifdef MODULE
+#define __deprecated_for_modules __deprecated
+#else
+#define __deprecated_for_modules
+#endif
+
+#ifndef __must_check
+#define __must_check
+#endif
+
+#ifndef CONFIG_ENABLE_MUST_CHECK
+#undef __must_check
+#define __must_check
+#endif
+#ifndef CONFIG_ENABLE_WARN_DEPRECATED
+#undef __deprecated
+#undef __deprecated_for_modules
+#define __deprecated
+#define __deprecated_for_modules
+#endif
+
+#ifndef __malloc
+#define __malloc
+#endif
+
+/*
+ * Allow us to avoid 'defined but not used' warnings on functions and data,
+ * as well as force them to be emitted to the assembly file.
+ *
+ * As of gcc 3.4, static functions that are not marked with attribute((used))
+ * may be elided from the assembly file. As of gcc 3.4, static data not so
+ * marked will not be elided, but this may change in a future gcc version.
+ *
+ * NOTE: Because distributions shipped with a backported unit-at-a-time
+ * compiler in gcc 3.3, we must define __used to be __attribute__((used))
+ * for gcc >=3.3 instead of 3.4.
+ *
+ * In prior versions of gcc, such functions and data would be emitted, but
+ * would be warned about except with attribute((unused)).
+ *
+ * Mark functions that are referenced only in inline assembly as __used so
+ * the code is emitted even though it appears to be unreferenced.
+ */
+#ifndef __used
+# define __used /* unimplemented */
+#endif
+
+#ifndef __maybe_unused
+# define __maybe_unused /* unimplemented */
+#endif
+
+#ifndef __always_unused
+# define __always_unused /* unimplemented */
+#endif
+
+#ifndef noinline
+#define noinline
+#endif
+
+/*
+ * Rather then using noinline to prevent stack consumption, use
+ * noinline_for_stack instead. For documentation reasons.
+ */
+#define noinline_for_stack noinline
+
+#ifndef __always_inline
+#define __always_inline inline
+#endif
+
+#endif /* __KERNEL__ */
+
+/*
+ * From the GCC manual:
+ *
+ * Many functions do not examine any values except their arguments,
+ * and have no effects except the return value. Basically this is
+ * just slightly more strict class than the `pure' attribute above,
+ * since function is not allowed to read global memory.
+ *
+ * Note that a function that has pointer arguments and examines the
+ * data pointed to must _not_ be declared `const'. Likewise, a
+ * function that calls a non-`const' function usually must not be
+ * `const'. It does not make sense for a `const' function to return
+ * `void'.
+ */
+#ifndef __attribute_const__
+# define __attribute_const__ /* unimplemented */
+#endif
+
+#ifndef __designated_init
+# define __designated_init
+#endif
+
+#ifndef __latent_entropy
+# define __latent_entropy
+#endif
+
+#ifndef __randomize_layout
+# define __randomize_layout __designated_init
+#endif
+
+#ifndef __no_randomize_layout
+# define __no_randomize_layout
+#endif
+
+#ifndef randomized_struct_fields_start
+# define randomized_struct_fields_start
+# define randomized_struct_fields_end
+#endif
+
+/*
+ * Tell gcc if a function is cold. The compiler will assume any path
+ * directly leading to the call is unlikely.
+ */
+
+#ifndef __cold
+#define __cold
+#endif
+
+/* Simple shorthand for a section definition */
+#ifndef __section
+# define __section(S) __attribute__ ((__section__(#S)))
+#endif
+
+#ifndef __visible
+#define __visible
+#endif
+
+#ifndef __nostackprotector
+# define __nostackprotector
+#endif
+
+/*
+ * Assume alignment of return value.
+ */
+#ifndef __assume_aligned
+#define __assume_aligned(a, ...)
+#endif
+
+
+/* Are two types/vars the same type (ignoring qualifiers)? */
+#ifndef __same_type
+# define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
+#endif
+
+/* Is this type a native word size -- useful for atomic operations */
+#ifndef __native_word
+# define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
+#endif
+
+#endif /* __LINUX_COMPILER_TYPES_H */
diff --git a/include/linux/completion.h b/include/linux/completion.h
index 7828451e161a..0662a417febe 100644
--- a/include/linux/completion.h
+++ b/include/linux/completion.h
@@ -50,15 +50,23 @@ static inline void complete_release_commit(struct completion *x)
lock_commit_crosslock((struct lockdep_map *)&x->map);
}
+#define init_completion_map(x, m) \
+do { \
+ lockdep_init_map_crosslock((struct lockdep_map *)&(x)->map, \
+ (m)->name, (m)->key, 0); \
+ __init_completion(x); \
+} while (0)
+
#define init_completion(x) \
do { \
static struct lock_class_key __key; \
lockdep_init_map_crosslock((struct lockdep_map *)&(x)->map, \
- "(complete)" #x, \
+ "(completion)" #x, \
&__key, 0); \
__init_completion(x); \
} while (0)
#else
+#define init_completion_map(x, m) __init_completion(x)
#define init_completion(x) __init_completion(x)
static inline void complete_acquire(struct completion *x) {}
static inline void complete_release(struct completion *x) {}
@@ -68,12 +76,15 @@ static inline void complete_release_commit(struct completion *x) {}
#ifdef CONFIG_LOCKDEP_COMPLETIONS
#define COMPLETION_INITIALIZER(work) \
{ 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait), \
- STATIC_CROSS_LOCKDEP_MAP_INIT("(complete)" #work, &(work)) }
+ STATIC_CROSS_LOCKDEP_MAP_INIT("(completion)" #work, &(work)) }
#else
#define COMPLETION_INITIALIZER(work) \
{ 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
#endif
+#define COMPLETION_INITIALIZER_ONSTACK_MAP(work, map) \
+ (*({ init_completion_map(&(work), &(map)); &(work); }))
+
#define COMPLETION_INITIALIZER_ONSTACK(work) \
(*({ init_completion(&work); &work; }))
@@ -103,8 +114,11 @@ static inline void complete_release_commit(struct completion *x) {}
#ifdef CONFIG_LOCKDEP
# define DECLARE_COMPLETION_ONSTACK(work) \
struct completion work = COMPLETION_INITIALIZER_ONSTACK(work)
+# define DECLARE_COMPLETION_ONSTACK_MAP(work, map) \
+ struct completion work = COMPLETION_INITIALIZER_ONSTACK_MAP(work, map)
#else
# define DECLARE_COMPLETION_ONSTACK(work) DECLARE_COMPLETION(work)
+# define DECLARE_COMPLETION_ONSTACK_MAP(work, map) DECLARE_COMPLETION(work)
#endif
/**
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index f05a659cdf34..65cd8ab60b7a 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -520,7 +520,7 @@ static inline struct inode *d_inode(const struct dentry *dentry)
}
/**
- * d_inode_rcu - Get the actual inode of this dentry with ACCESS_ONCE()
+ * d_inode_rcu - Get the actual inode of this dentry with READ_ONCE()
* @dentry: The dentry to query
*
* This is the helper normal filesystems should use to get at their own inodes
@@ -528,7 +528,7 @@ static inline struct inode *d_inode(const struct dentry *dentry)
*/
static inline struct inode *d_inode_rcu(const struct dentry *dentry)
{
- return ACCESS_ONCE(dentry->d_inode);
+ return READ_ONCE(dentry->d_inode);
}
/**
diff --git a/include/linux/dynamic_queue_limits.h b/include/linux/dynamic_queue_limits.h
index 34c0a5464c74..023eae69398c 100644
--- a/include/linux/dynamic_queue_limits.h
+++ b/include/linux/dynamic_queue_limits.h
@@ -89,7 +89,7 @@ static inline void dql_queued(struct dql *dql, unsigned int count)
/* Returns how many objects can be queued, < 0 indicates over limit. */
static inline int dql_avail(const struct dql *dql)
{
- return ACCESS_ONCE(dql->adj_limit) - ACCESS_ONCE(dql->num_queued);
+ return READ_ONCE(dql->adj_limit) - READ_ONCE(dql->num_queued);
}
/* Record number of completed objects and recalculate the limit. */
diff --git a/include/linux/genetlink.h b/include/linux/genetlink.h
index b96dd4e1e663..ecc2928e8046 100644
--- a/include/linux/genetlink.h
+++ b/include/linux/genetlink.h
@@ -31,7 +31,7 @@ extern wait_queue_head_t genl_sk_destructing_waitq;
* @p: The pointer to read, prior to dereferencing
*
* Return the value of the specified RCU-protected pointer, but omit
- * both the smp_read_barrier_depends() and the ACCESS_ONCE(), because
+ * both the smp_read_barrier_depends() and the READ_ONCE(), because
* caller holds genl mutex.
*/
#define genl_dereference(p) \
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 44790523057f..eaefb7a62f83 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -207,6 +207,7 @@ struct gendisk {
#endif /* CONFIG_BLK_DEV_INTEGRITY */
int node_id;
struct badblocks *bb;
+ struct lockdep_map lockdep_map;
};
static inline struct gendisk *part_to_disk(struct hd_struct *part)
@@ -591,8 +592,7 @@ extern void __delete_partition(struct percpu_ref *);
extern void delete_partition(struct gendisk *, int);
extern void printk_all_partitions(void);
-extern struct gendisk *alloc_disk_node(int minors, int node_id);
-extern struct gendisk *alloc_disk(int minors);
+extern struct gendisk *__alloc_disk_node(int minors, int node_id);
extern struct kobject *get_disk(struct gendisk *disk);
extern void put_disk(struct gendisk *disk);
extern void blk_register_region(dev_t devt, unsigned long range,
@@ -616,6 +616,24 @@ extern ssize_t part_fail_store(struct device *dev,
const char *buf, size_t count);
#endif /* CONFIG_FAIL_MAKE_REQUEST */
+#define alloc_disk_node(minors, node_id) \
+({ \
+ static struct lock_class_key __key; \
+ const char *__name; \
+ struct gendisk *__disk; \
+ \
+ __name = "(gendisk_completion)"#minors"("#node_id")"; \
+ \
+ __disk = __alloc_disk_node(minors, node_id); \
+ \
+ if (__disk) \
+ lockdep_init_map(&__disk->lockdep_map, __name, &__key, 0); \
+ \
+ __disk; \
+})
+
+#define alloc_disk(minors) alloc_disk_node(minors, NUMA_NO_NODE)
+
static inline int hd_ref_init(struct hd_struct *part)
{
if (percpu_ref_init(&part->ref, __delete_partition, 0,
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 87067d23a48b..a8a126259bc4 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -222,7 +222,7 @@ extern struct page *huge_zero_page;
static inline bool is_huge_zero_page(struct page *page)
{
- return ACCESS_ONCE(huge_zero_page) == page;
+ return READ_ONCE(huge_zero_page) == page;
}
static inline bool is_huge_zero_pmd(pmd_t pmd)
diff --git a/include/linux/if_team.h b/include/linux/if_team.h
index 30294603526f..d95cae09dea0 100644
--- a/include/linux/if_team.h
+++ b/include/linux/if_team.h
@@ -247,7 +247,7 @@ static inline struct team_port *team_get_port_by_index(struct team *team,
static inline int team_num_to_port_index(struct team *team, unsigned int num)
{
- int en_port_count = ACCESS_ONCE(team->en_port_count);
+ int en_port_count = READ_ONCE(team->en_port_count);
if (unlikely(!en_port_count))
return 0;
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index 3b7675bcca64..c7b368c734af 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -82,9 +82,9 @@
extern bool static_key_initialized;
-#define STATIC_KEY_CHECK_USE() WARN(!static_key_initialized, \
- "%s used before call to jump_label_init", \
- __func__)
+#define STATIC_KEY_CHECK_USE(key) WARN(!static_key_initialized, \
+ "%s(): static key '%pS' used before call to jump_label_init()", \
+ __func__, (key))
#ifdef HAVE_JUMP_LABEL
@@ -212,13 +212,13 @@ static __always_inline bool static_key_true(struct static_key *key)
static inline void static_key_slow_inc(struct static_key *key)
{
- STATIC_KEY_CHECK_USE();
+ STATIC_KEY_CHECK_USE(key);
atomic_inc(&key->enabled);
}
static inline void static_key_slow_dec(struct static_key *key)
{
- STATIC_KEY_CHECK_USE();
+ STATIC_KEY_CHECK_USE(key);
atomic_dec(&key->enabled);
}
@@ -237,7 +237,7 @@ static inline int jump_label_apply_nops(struct module *mod)
static inline void static_key_enable(struct static_key *key)
{
- STATIC_KEY_CHECK_USE();
+ STATIC_KEY_CHECK_USE(key);
if (atomic_read(&key->enabled) != 0) {
WARN_ON_ONCE(atomic_read(&key->enabled) != 1);
@@ -248,7 +248,7 @@ static inline void static_key_enable(struct static_key *key)
static inline void static_key_disable(struct static_key *key)
{
- STATIC_KEY_CHECK_USE();
+ STATIC_KEY_CHECK_USE(key);
if (atomic_read(&key->enabled) != 1) {
WARN_ON_ONCE(atomic_read(&key->enabled) != 0);
diff --git a/include/linux/jump_label_ratelimit.h b/include/linux/jump_label_ratelimit.h
index fc13ff289903..baa8eabbaa56 100644
--- a/include/linux/jump_label_ratelimit.h
+++ b/include/linux/jump_label_ratelimit.h
@@ -25,18 +25,18 @@ struct static_key_deferred {
};
static inline void static_key_slow_dec_deferred(struct static_key_deferred *key)
{
- STATIC_KEY_CHECK_USE();
+ STATIC_KEY_CHECK_USE(key);
static_key_slow_dec(&key->key);
}
static inline void static_key_deferred_flush(struct static_key_deferred *key)
{
- STATIC_KEY_CHECK_USE();
+ STATIC_KEY_CHECK_USE(key);
}
static inline void
jump_label_rate_limit(struct static_key_deferred *key,
unsigned long rl)
{
- STATIC_KEY_CHECK_USE();
+ STATIC_KEY_CHECK_USE(key);
}
#endif /* HAVE_JUMP_LABEL */
#endif /* _LINUX_JUMP_LABEL_RATELIMIT_H */
diff --git a/include/linux/linkage.h b/include/linux/linkage.h
index 2e6f90bd52aa..f68db9e450eb 100644
--- a/include/linux/linkage.h
+++ b/include/linux/linkage.h
@@ -2,7 +2,7 @@
#ifndef _LINUX_LINKAGE_H
#define _LINUX_LINKAGE_H
-#include <linux/compiler.h>
+#include <linux/compiler_types.h>
#include <linux/stringify.h>
#include <linux/export.h>
#include <asm/linkage.h>
diff --git a/include/linux/llist.h b/include/linux/llist.h
index 1957635e6d5f..85abc2915e8d 100644
--- a/include/linux/llist.h
+++ b/include/linux/llist.h
@@ -198,7 +198,7 @@ static inline void init_llist_head(struct llist_head *list)
*/
static inline bool llist_empty(const struct llist_head *head)
{
- return ACCESS_ONCE(head->first) == NULL;
+ return READ_ONCE(head->first) == NULL;
}
static inline struct llist_node *llist_next(struct llist_node *node)
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index f301d31b473c..a842551fe044 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -528,6 +528,11 @@ static inline void lockdep_on(void)
*/
struct lock_class_key { };
+/*
+ * The lockdep_map takes no space if lockdep is disabled:
+ */
+struct lockdep_map { };
+
#define lockdep_depth(tsk) (0)
#define lockdep_is_held_type(l, r) (1)
@@ -720,9 +725,24 @@ do { \
lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_); \
lock_release(&(lock)->dep_map, 0, _THIS_IP_); \
} while (0)
+
+#define lockdep_assert_irqs_enabled() do { \
+ WARN_ONCE(debug_locks && !current->lockdep_recursion && \
+ !current->hardirqs_enabled, \
+ "IRQs not enabled as expected\n"); \
+ } while (0)
+
+#define lockdep_assert_irqs_disabled() do { \
+ WARN_ONCE(debug_locks && !current->lockdep_recursion && \
+ current->hardirqs_enabled, \
+ "IRQs not disabled as expected\n"); \
+ } while (0)
+
#else
# define might_lock(lock) do { } while (0)
# define might_lock_read(lock) do { } while (0)
+# define lockdep_assert_irqs_enabled() do { } while (0)
+# define lockdep_assert_irqs_disabled() do { } while (0)
#endif
#ifdef CONFIG_LOCKDEP
diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
index 414a5e769fde..495ba4dd9da5 100644
--- a/include/linux/netfilter/nfnetlink.h
+++ b/include/linux/netfilter/nfnetlink.h
@@ -67,7 +67,7 @@ static inline bool lockdep_nfnl_is_held(__u8 subsys_id)
* @ss: The nfnetlink subsystem ID
*
* Return the value of the specified RCU-protected pointer, but omit
- * both the smp_read_barrier_depends() and the ACCESS_ONCE(), because
+ * both the smp_read_barrier_depends() and the READ_ONCE(), because
* caller holds the NFNL subsystem mutex.
*/
#define nfnl_dereference(p, ss) \
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
index 2efb08a60e63..f0fc4700b6ff 100644
--- a/include/linux/pm_runtime.h
+++ b/include/linux/pm_runtime.h
@@ -105,7 +105,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
static inline void pm_runtime_mark_last_busy(struct device *dev)
{
- ACCESS_ONCE(dev->power.last_busy) = jiffies;
+ WRITE_ONCE(dev->power.last_busy, jiffies);
}
static inline bool pm_runtime_is_irq_safe(struct device *dev)
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index c2cdd45a880a..127f534fec94 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -275,7 +275,7 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
* primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
*/
#define list_entry_rcu(ptr, type, member) \
- container_of(lockless_dereference(ptr), type, member)
+ container_of(READ_ONCE(ptr), type, member)
/*
* Where are list_empty_rcu() and list_first_entry_rcu()?
@@ -368,7 +368,7 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
* example is when items are added to the list, but never deleted.
*/
#define list_entry_lockless(ptr, type, member) \
- container_of((typeof(ptr))lockless_dereference(ptr), type, member)
+ container_of((typeof(ptr))READ_ONCE(ptr), type, member)
/**
* list_for_each_entry_lockless - iterate over rcu list of given type
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 1a9f70d44af9..a6ddc42f87a5 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -346,7 +346,7 @@ static inline void rcu_preempt_sleep_check(void) { }
#define __rcu_dereference_check(p, c, space) \
({ \
/* Dependency order vs. p above. */ \
- typeof(*p) *________p1 = (typeof(*p) *__force)lockless_dereference(p); \
+ typeof(*p) *________p1 = (typeof(*p) *__force)READ_ONCE(p); \
RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_check() usage"); \
rcu_dereference_sparse(p, space); \
((typeof(*p) __force __kernel *)(________p1)); \
@@ -360,7 +360,7 @@ static inline void rcu_preempt_sleep_check(void) { }
#define rcu_dereference_raw(p) \
({ \
/* Dependency order vs. p above. */ \
- typeof(p) ________p1 = lockless_dereference(p); \
+ typeof(p) ________p1 = READ_ONCE(p); \
((typeof(*p) __force __kernel *)(________p1)); \
})
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index ff3dd2ec44b4..54bcd970bfd3 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -68,7 +68,7 @@ static inline bool lockdep_rtnl_is_held(void)
* @p: The pointer to read, prior to dereferencing
*
* Return the value of the specified RCU-protected pointer, but omit
- * both the smp_read_barrier_depends() and the ACCESS_ONCE(), because
+ * both the smp_read_barrier_depends() and the READ_ONCE(), because
* caller holds RTNL.
*/
#define rtnl_dereference(p) \
diff --git a/include/linux/rwlock.h b/include/linux/rwlock.h
index bc2994ed66e1..3dcd617e65ae 100644
--- a/include/linux/rwlock.h
+++ b/include/linux/rwlock.h
@@ -38,6 +38,15 @@ do { \
extern int do_raw_write_trylock(rwlock_t *lock);
extern void do_raw_write_unlock(rwlock_t *lock) __releases(lock);
#else
+
+#ifndef arch_read_lock_flags
+# define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
+#endif
+
+#ifndef arch_write_lock_flags
+# define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
+#endif
+
# define do_raw_read_lock(rwlock) do {__acquire(lock); arch_read_lock(&(rwlock)->raw_lock); } while (0)
# define do_raw_read_lock_flags(lock, flags) \
do {__acquire(lock); arch_read_lock_flags(&(lock)->raw_lock, *(flags)); } while (0)
@@ -50,9 +59,6 @@ do { \
# define do_raw_write_unlock(rwlock) do {arch_write_unlock(&(rwlock)->raw_lock); __release(lock); } while (0)
#endif
-#define read_can_lock(rwlock) arch_read_can_lock(&(rwlock)->raw_lock)
-#define write_can_lock(rwlock) arch_write_can_lock(&(rwlock)->raw_lock)
-
/*
* Define the various rw_lock methods. Note we define these
* regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various
diff --git a/include/linux/rwlock_api_smp.h b/include/linux/rwlock_api_smp.h
index 5b9b84b20407..86ebb4bf9c6e 100644
--- a/include/linux/rwlock_api_smp.h
+++ b/include/linux/rwlock_api_smp.h
@@ -211,7 +211,7 @@ static inline void __raw_write_lock(rwlock_t *lock)
LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock);
}
-#endif /* CONFIG_PREEMPT */
+#endif /* !CONFIG_GENERIC_LOCKBREAK || CONFIG_DEBUG_LOCK_ALLOC */
static inline void __raw_write_unlock(rwlock_t *lock)
{
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index dfa34d803439..56707d5ff6ad 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -112,6 +112,7 @@ static inline int rwsem_is_contended(struct rw_semaphore *sem)
* lock for reading
*/
extern void down_read(struct rw_semaphore *sem);
+extern int __must_check down_read_killable(struct rw_semaphore *sem);
/*
* trylock for reading -- returns 1 if successful, 0 if contention
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 341e1a12bfc7..a39186194cd6 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -166,6 +166,10 @@ static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
arch_spin_lock(&lock->raw_lock);
}
+#ifndef arch_spin_lock_flags
+#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
+#endif
+
static inline void
do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock)
{
@@ -279,12 +283,6 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
1 : ({ local_irq_restore(flags); 0; }); \
})
-/**
- * raw_spin_can_lock - would raw_spin_trylock() succeed?
- * @lock: the spinlock in question.
- */
-#define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock))
-
/* Include rwlock functions */
#include <linux/rwlock.h>
@@ -397,11 +395,6 @@ static __always_inline int spin_is_contended(spinlock_t *lock)
return raw_spin_is_contended(&lock->rlock);
}
-static __always_inline int spin_can_lock(spinlock_t *lock)
-{
- return raw_spin_can_lock(&lock->rlock);
-}
-
#define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock)
/*
diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h
index 612fb530af41..0ac9112c1bbe 100644
--- a/include/linux/spinlock_up.h
+++ b/include/linux/spinlock_up.h
@@ -32,14 +32,6 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
barrier();
}
-static inline void
-arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
-{
- local_irq_save(flags);
- lock->slock = 0;
- barrier();
-}
-
static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
char oldval = lock->slock;
@@ -77,7 +69,4 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
#define arch_spin_is_contended(lock) (((void)(lock), 0))
-#define arch_read_can_lock(lock) (((void)(lock), 1))
-#define arch_write_can_lock(lock) (((void)(lock), 1))
-
#endif /* __LINUX_SPINLOCK_UP_H */
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 0eae11fc7a23..1cdabfb813ab 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -219,7 +219,7 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
\
__init_work((_work), _onstack); \
(_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
- lockdep_init_map(&(_work)->lockdep_map, #_work, &__key, 0); \
+ lockdep_init_map(&(_work)->lockdep_map, "(work_completion)"#_work, &__key, 0); \
INIT_LIST_HEAD(&(_work)->entry); \
(_work)->func = (_func); \
} while (0)
@@ -399,7 +399,7 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
static struct lock_class_key __key; \
const char *__lock_name; \
\
- __lock_name = #fmt#args; \
+ __lock_name = "(wq_completion)"#fmt#args; \
\
__alloc_workqueue_key((fmt), (flags), (max_active), \
&__key, __lock_name, ##args); \
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index 5d08c1950e7d..ff68cf288f9b 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -984,12 +984,12 @@ static inline int sysctl_sync_threshold(struct netns_ipvs *ipvs)
static inline int sysctl_sync_period(struct netns_ipvs *ipvs)
{
- return ACCESS_ONCE(ipvs->sysctl_sync_threshold[1]);
+ return READ_ONCE(ipvs->sysctl_sync_threshold[1]);
}
static inline unsigned int sysctl_sync_refresh_period(struct netns_ipvs *ipvs)
{
- return ACCESS_ONCE(ipvs->sysctl_sync_refresh_period);
+ return READ_ONCE(ipvs->sysctl_sync_refresh_period);
}
static inline int sysctl_sync_retries(struct netns_ipvs *ipvs)
@@ -1014,7 +1014,7 @@ static inline int sysctl_sloppy_sctp(struct netns_ipvs *ipvs)
static inline int sysctl_sync_ports(struct netns_ipvs *ipvs)
{
- return ACCESS_ONCE(ipvs->sysctl_sync_ports);
+ return READ_ONCE(ipvs->sysctl_sync_ports);
}
static inline int sysctl_sync_persist_mode(struct netns_ipvs *ipvs)
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 079c69cae2f6..470c1c71e7f4 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -1165,8 +1165,8 @@ static inline u8 nft_genmask_next(const struct net *net)
static inline u8 nft_genmask_cur(const struct net *net)
{
- /* Use ACCESS_ONCE() to prevent refetching the value for atomicity */
- return 1 << ACCESS_ONCE(net->nft.gencursor);
+ /* Use READ_ONCE() to prevent refetching the value for atomicity */
+ return 1 << READ_ONCE(net->nft.gencursor);
}
#define NFT_GENMASK_ANY ((1 << 0) | (1 << 1))
diff --git a/include/uapi/linux/stddef.h b/include/uapi/linux/stddef.h
index f65b92e0e1f9..ee8220f8dcf5 100644
--- a/include/uapi/linux/stddef.h
+++ b/include/uapi/linux/stddef.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#include <linux/compiler.h>
+#include <linux/compiler_types.h>
#ifndef __always_inline
#define __always_inline inline
diff --git a/kernel/acct.c b/kernel/acct.c
index 6670fbd3e466..d15c0ee4d955 100644
--- a/kernel/acct.c
+++ b/kernel/acct.c
@@ -147,7 +147,7 @@ static struct bsd_acct_struct *acct_get(struct pid_namespace *ns)
again:
smp_rmb();
rcu_read_lock();
- res = to_acct(ACCESS_ONCE(ns->bacct));
+ res = to_acct(READ_ONCE(ns->bacct));
if (!res) {
rcu_read_unlock();
return NULL;
@@ -159,7 +159,7 @@ again:
}
rcu_read_unlock();
mutex_lock(&res->lock);
- if (res != to_acct(ACCESS_ONCE(ns->bacct))) {
+ if (res != to_acct(READ_ONCE(ns->bacct))) {
mutex_unlock(&res->lock);
acct_put(res);
goto again;
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 10cdb9c26b5d..c298847d4b85 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -209,7 +209,7 @@ static int event_function(void *info)
struct perf_event_context *task_ctx = cpuctx->task_ctx;
int ret = 0;
- WARN_ON_ONCE(!irqs_disabled());
+ lockdep_assert_irqs_disabled();
perf_ctx_lock(cpuctx, task_ctx);
/*
@@ -306,7 +306,7 @@ static void event_function_local(struct perf_event *event, event_f func, void *d
struct task_struct *task = READ_ONCE(ctx->task);
struct perf_event_context *task_ctx = NULL;
- WARN_ON_ONCE(!irqs_disabled());
+ lockdep_assert_irqs_disabled();
if (task) {
if (task == TASK_TOMBSTONE)
@@ -1006,7 +1006,7 @@ static enum hrtimer_restart perf_mux_hrtimer_handler(struct hrtimer *hr)
struct perf_cpu_context *cpuctx;
int rotations = 0;
- WARN_ON(!irqs_disabled());
+ lockdep_assert_irqs_disabled();
cpuctx = container_of(hr, struct perf_cpu_context, hrtimer);
rotations = perf_rotate_context(cpuctx);
@@ -1093,7 +1093,7 @@ static void perf_event_ctx_activate(struct perf_event_context *ctx)
{
struct list_head *head = this_cpu_ptr(&active_ctx_list);
- WARN_ON(!irqs_disabled());
+ lockdep_assert_irqs_disabled();
WARN_ON(!list_empty(&ctx->active_ctx_list));
@@ -1102,7 +1102,7 @@ static void perf_event_ctx_activate(struct perf_event_context *ctx)
static void perf_event_ctx_deactivate(struct perf_event_context *ctx)
{
- WARN_ON(!irqs_disabled());
+ lockdep_assert_irqs_disabled();
WARN_ON(list_empty(&ctx->active_ctx_list));
@@ -1202,7 +1202,7 @@ perf_event_ctx_lock_nested(struct perf_event *event, int nesting)
again:
rcu_read_lock();
- ctx = ACCESS_ONCE(event->ctx);
+ ctx = READ_ONCE(event->ctx);
if (!atomic_inc_not_zero(&ctx->refcount)) {
rcu_read_unlock();
goto again;
@@ -3523,7 +3523,7 @@ void perf_event_task_tick(void)
struct perf_event_context *ctx, *tmp;
int throttled;
- WARN_ON(!irqs_disabled());
+ lockdep_assert_irqs_disabled();
__this_cpu_inc(perf_throttled_seq);
throttled = __this_cpu_xchg(perf_throttled_count, 0);
@@ -4233,7 +4233,7 @@ static void perf_remove_from_owner(struct perf_event *event)
* indeed free this event, otherwise we need to serialize on
* owner->perf_event_mutex.
*/
- owner = lockless_dereference(event->owner);
+ owner = READ_ONCE(event->owner);
if (owner) {
/*
* Since delayed_put_task_struct() also drops the last
@@ -4330,7 +4330,7 @@ again:
* Cannot change, child events are not migrated, see the
* comment with perf_event_ctx_lock_nested().
*/
- ctx = lockless_dereference(child->ctx);
+ ctx = READ_ONCE(child->ctx);
/*
* Since child_mutex nests inside ctx::mutex, we must jump
* through hoops. We start by grabbing a reference on the ctx.
@@ -5304,8 +5304,8 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
if (!rb)
goto aux_unlock;
- aux_offset = ACCESS_ONCE(rb->user_page->aux_offset);
- aux_size = ACCESS_ONCE(rb->user_page->aux_size);
+ aux_offset = READ_ONCE(rb->user_page->aux_offset);
+ aux_size = READ_ONCE(rb->user_page->aux_size);
if (aux_offset < perf_data_size(rb) + PAGE_SIZE)
goto aux_unlock;
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index f684d8e5fa2b..f3e37971c842 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -381,7 +381,7 @@ void *perf_aux_output_begin(struct perf_output_handle *handle,
* (B) <-> (C) ordering is still observed by the pmu driver.
*/
if (!rb->aux_overwrite) {
- aux_tail = ACCESS_ONCE(rb->user_page->aux_tail);
+ aux_tail = READ_ONCE(rb->user_page->aux_tail);
handle->wakeup = rb->aux_wakeup + rb->aux_watermark;
if (aux_head - aux_tail < perf_aux_size(rb))
handle->size = CIRC_SPACE(aux_head, aux_tail, perf_aux_size(rb));
diff --git a/kernel/exit.c b/kernel/exit.c
index f6cad39f35df..6b4298a41167 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -1339,7 +1339,7 @@ static int wait_consider_task(struct wait_opts *wo, int ptrace,
* Ensure that EXIT_ZOMBIE -> EXIT_DEAD/EXIT_TRACE transition
* can't confuse the checks below.
*/
- int exit_state = ACCESS_ONCE(p->exit_state);
+ int exit_state = READ_ONCE(p->exit_state);
int ret;
if (unlikely(exit_state == EXIT_DEAD))
diff --git a/kernel/irq/timings.c b/kernel/irq/timings.c
index c8c1d073fbf1..e0923fa4927a 100644
--- a/kernel/irq/timings.c
+++ b/kernel/irq/timings.c
@@ -264,7 +264,7 @@ u64 irq_timings_next_event(u64 now)
* order to prevent the timings circular buffer to be updated
* while we are reading it.
*/
- WARN_ON_ONCE(!irqs_disabled());
+ lockdep_assert_irqs_disabled();
/*
* Number of elements in the circular buffer: If it happens it
diff --git a/kernel/irq_work.c b/kernel/irq_work.c
index 9f20f6c72579..ec8ac337404d 100644
--- a/kernel/irq_work.c
+++ b/kernel/irq_work.c
@@ -191,7 +191,7 @@ void irq_work_tick(void)
*/
void irq_work_sync(struct irq_work *work)
{
- WARN_ON_ONCE(irqs_disabled());
+ lockdep_assert_irqs_enabled();
while (work->flags & IRQ_WORK_BUSY)
cpu_relax();
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index 0bf2e8f5244a..8ff4ca4665ff 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -83,7 +83,7 @@ static void static_key_slow_inc_cpuslocked(struct static_key *key)
{
int v, v1;
- STATIC_KEY_CHECK_USE();
+ STATIC_KEY_CHECK_USE(key);
/*
* Careful if we get concurrent static_key_slow_inc() calls;
@@ -128,7 +128,7 @@ EXPORT_SYMBOL_GPL(static_key_slow_inc);
void static_key_enable_cpuslocked(struct static_key *key)
{
- STATIC_KEY_CHECK_USE();
+ STATIC_KEY_CHECK_USE(key);
if (atomic_read(&key->enabled) > 0) {
WARN_ON_ONCE(atomic_read(&key->enabled) != 1);
@@ -158,7 +158,7 @@ EXPORT_SYMBOL_GPL(static_key_enable);
void static_key_disable_cpuslocked(struct static_key *key)
{
- STATIC_KEY_CHECK_USE();
+ STATIC_KEY_CHECK_USE(key);
if (atomic_read(&key->enabled) != 1) {
WARN_ON_ONCE(atomic_read(&key->enabled) != 0);
@@ -224,21 +224,21 @@ static void jump_label_update_timeout(struct work_struct *work)
void static_key_slow_dec(struct static_key *key)
{
- STATIC_KEY_CHECK_USE();
+ STATIC_KEY_CHECK_USE(key);
__static_key_slow_dec(key, 0, NULL);
}
EXPORT_SYMBOL_GPL(static_key_slow_dec);
void static_key_slow_dec_deferred(struct static_key_deferred *key)
{
- STATIC_KEY_CHECK_USE();
+ STATIC_KEY_CHECK_USE(key);
__static_key_slow_dec(&key->key, key->timeout, &key->work);
}
EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred);
void static_key_deferred_flush(struct static_key_deferred *key)
{
- STATIC_KEY_CHECK_USE();
+ STATIC_KEY_CHECK_USE(key);
flush_delayed_work(&key->work);
}
EXPORT_SYMBOL_GPL(static_key_deferred_flush);
@@ -246,7 +246,7 @@ EXPORT_SYMBOL_GPL(static_key_deferred_flush);
void jump_label_rate_limit(struct static_key_deferred *key,
unsigned long rl)
{
- STATIC_KEY_CHECK_USE();
+ STATIC_KEY_CHECK_USE(key);
key->timeout = rl;
INIT_DELAYED_WORK(&key->work, jump_label_update_timeout);
}
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index e36e652d996f..db933d063bfc 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -76,6 +76,19 @@ module_param(lock_stat, int, 0644);
#define lock_stat 0
#endif
+#ifdef CONFIG_BOOTPARAM_LOCKDEP_CROSSRELEASE_FULLSTACK
+static int crossrelease_fullstack = 1;
+#else
+static int crossrelease_fullstack;
+#endif
+static int __init allow_crossrelease_fullstack(char *str)
+{
+ crossrelease_fullstack = 1;
+ return 0;
+}
+
+early_param("crossrelease_fullstack", allow_crossrelease_fullstack);
+
/*
* lockdep_lock: protects the lockdep graph, the hashes and the
* class/list/hash allocators.
@@ -4863,8 +4876,14 @@ static void add_xhlock(struct held_lock *hlock)
xhlock->trace.nr_entries = 0;
xhlock->trace.max_entries = MAX_XHLOCK_TRACE_ENTRIES;
xhlock->trace.entries = xhlock->trace_entries;
- xhlock->trace.skip = 3;
- save_stack_trace(&xhlock->trace);
+
+ if (crossrelease_fullstack) {
+ xhlock->trace.skip = 3;
+ save_stack_trace(&xhlock->trace);
+ } else {
+ xhlock->trace.nr_entries = 1;
+ xhlock->trace.entries[0] = hlock->acquire_ip;
+ }
}
static inline int same_context_xhlock(struct hist_lock *xhlock)
diff --git a/kernel/locking/qrwlock.c b/kernel/locking/qrwlock.c
index 2655f26ec882..c7471c3fb798 100644
--- a/kernel/locking/qrwlock.c
+++ b/kernel/locking/qrwlock.c
@@ -23,49 +23,11 @@
#include <linux/spinlock.h>
#include <asm/qrwlock.h>
-/*
- * This internal data structure is used for optimizing access to some of
- * the subfields within the atomic_t cnts.
- */
-struct __qrwlock {
- union {
- atomic_t cnts;
- struct {
-#ifdef __LITTLE_ENDIAN
- u8 wmode; /* Writer mode */
- u8 rcnts[3]; /* Reader counts */
-#else
- u8 rcnts[3]; /* Reader counts */
- u8 wmode; /* Writer mode */
-#endif
- };
- };
- arch_spinlock_t lock;
-};
-
-/**
- * rspin_until_writer_unlock - inc reader count & spin until writer is gone
- * @lock : Pointer to queue rwlock structure
- * @writer: Current queue rwlock writer status byte
- *
- * In interrupt context or at the head of the queue, the reader will just
- * increment the reader count & wait until the writer releases the lock.
- */
-static __always_inline void
-rspin_until_writer_unlock(struct qrwlock *lock, u32 cnts)
-{
- while ((cnts & _QW_WMASK) == _QW_LOCKED) {
- cpu_relax();
- cnts = atomic_read_acquire(&lock->cnts);
- }
-}
-
/**
* queued_read_lock_slowpath - acquire read lock of a queue rwlock
* @lock: Pointer to queue rwlock structure
- * @cnts: Current qrwlock lock value
*/
-void queued_read_lock_slowpath(struct qrwlock *lock, u32 cnts)
+void queued_read_lock_slowpath(struct qrwlock *lock)
{
/*
* Readers come here when they cannot get the lock without waiting
@@ -73,13 +35,11 @@ void queued_read_lock_slowpath(struct qrwlock *lock, u32 cnts)
if (unlikely(in_interrupt())) {
/*
* Readers in interrupt context will get the lock immediately
- * if the writer is just waiting (not holding the lock yet).
- * The rspin_until_writer_unlock() function returns immediately
- * in this case. Otherwise, they will spin (with ACQUIRE
- * semantics) until the lock is available without waiting in
- * the queue.
+ * if the writer is just waiting (not holding the lock yet),
+ * so spin with ACQUIRE semantics until the lock is available
+ * without waiting in the queue.
*/
- rspin_until_writer_unlock(lock, cnts);
+ atomic_cond_read_acquire(&lock->cnts, !(VAL & _QW_LOCKED));
return;
}
atomic_sub(_QR_BIAS, &lock->cnts);
@@ -88,14 +48,14 @@ void queued_read_lock_slowpath(struct qrwlock *lock, u32 cnts)
* Put the reader into the wait queue
*/
arch_spin_lock(&lock->wait_lock);
+ atomic_add(_QR_BIAS, &lock->cnts);
/*
* The ACQUIRE semantics of the following spinning code ensure
* that accesses can't leak upwards out of our subsequent critical
* section in the case that the lock is currently held for write.
*/
- cnts = atomic_fetch_add_acquire(_QR_BIAS, &lock->cnts);
- rspin_until_writer_unlock(lock, cnts);
+ atomic_cond_read_acquire(&lock->cnts, !(VAL & _QW_LOCKED));
/*
* Signal the next one in queue to become queue head
@@ -110,8 +70,6 @@ EXPORT_SYMBOL(queued_read_lock_slowpath);
*/
void queued_write_lock_slowpath(struct qrwlock *lock)
{
- u32 cnts;
-
/* Put the writer into the wait queue */
arch_spin_lock(&lock->wait_lock);
@@ -120,30 +78,14 @@ void queued_write_lock_slowpath(struct qrwlock *lock)
(atomic_cmpxchg_acquire(&lock->cnts, 0, _QW_LOCKED) == 0))
goto unlock;
- /*
- * Set the waiting flag to notify readers that a writer is pending,
- * or wait for a previous writer to go away.
- */
- for (;;) {
- struct __qrwlock *l = (struct __qrwlock *)lock;
-
- if (!READ_ONCE(l->wmode) &&
- (cmpxchg_relaxed(&l->wmode, 0, _QW_WAITING) == 0))
- break;
+ /* Set the waiting flag to notify readers that a writer is pending */
+ atomic_add(_QW_WAITING, &lock->cnts);
- cpu_relax();
- }
-
- /* When no more readers, set the locked flag */
- for (;;) {
- cnts = atomic_read(&lock->cnts);
- if ((cnts == _QW_WAITING) &&
- (atomic_cmpxchg_acquire(&lock->cnts, _QW_WAITING,
- _QW_LOCKED) == _QW_WAITING))
- break;
-
- cpu_relax();
- }
+ /* When no more readers or writers, set the locked flag */
+ do {
+ atomic_cond_read_acquire(&lock->cnts, VAL == _QW_WAITING);
+ } while (atomic_cmpxchg_relaxed(&lock->cnts, _QW_WAITING,
+ _QW_LOCKED) != _QW_WAITING);
unlock:
arch_spin_unlock(&lock->wait_lock);
}
diff --git a/kernel/locking/qspinlock_paravirt.h b/kernel/locking/qspinlock_paravirt.h
index 15b6a39366c6..6ee477765e6c 100644
--- a/kernel/locking/qspinlock_paravirt.h
+++ b/kernel/locking/qspinlock_paravirt.h
@@ -61,21 +61,50 @@ struct pv_node {
#include "qspinlock_stat.h"
/*
+ * Hybrid PV queued/unfair lock
+ *
* By replacing the regular queued_spin_trylock() with the function below,
* it will be called once when a lock waiter enter the PV slowpath before
- * being queued. By allowing one lock stealing attempt here when the pending
- * bit is off, it helps to reduce the performance impact of lock waiter
- * preemption without the drawback of lock starvation.
+ * being queued.
+ *
+ * The pending bit is set by the queue head vCPU of the MCS wait queue in
+ * pv_wait_head_or_lock() to signal that it is ready to spin on the lock.
+ * When that bit becomes visible to the incoming waiters, no lock stealing
+ * is allowed. The function will return immediately to make the waiters
+ * enter the MCS wait queue. So lock starvation shouldn't happen as long
+ * as the queued mode vCPUs are actively running to set the pending bit
+ * and hence disabling lock stealing.
+ *
+ * When the pending bit isn't set, the lock waiters will stay in the unfair
+ * mode spinning on the lock unless the MCS wait queue is empty. In this
+ * case, the lock waiters will enter the queued mode slowpath trying to
+ * become the queue head and set the pending bit.
+ *
+ * This hybrid PV queued/unfair lock combines the best attributes of a
+ * queued lock (no lock starvation) and an unfair lock (good performance
+ * on not heavily contended locks).
*/
-#define queued_spin_trylock(l) pv_queued_spin_steal_lock(l)
-static inline bool pv_queued_spin_steal_lock(struct qspinlock *lock)
+#define queued_spin_trylock(l) pv_hybrid_queued_unfair_trylock(l)
+static inline bool pv_hybrid_queued_unfair_trylock(struct qspinlock *lock)
{
struct __qspinlock *l = (void *)lock;
- if (!(atomic_read(&lock->val) & _Q_LOCKED_PENDING_MASK) &&
- (cmpxchg_acquire(&l->locked, 0, _Q_LOCKED_VAL) == 0)) {
- qstat_inc(qstat_pv_lock_stealing, true);
- return true;
+ /*
+ * Stay in unfair lock mode as long as queued mode waiters are
+ * present in the MCS wait queue but the pending bit isn't set.
+ */
+ for (;;) {
+ int val = atomic_read(&lock->val);
+
+ if (!(val & _Q_LOCKED_PENDING_MASK) &&
+ (cmpxchg_acquire(&l->locked, 0, _Q_LOCKED_VAL) == 0)) {
+ qstat_inc(qstat_pv_lock_stealing, true);
+ return true;
+ }
+ if (!(val & _Q_TAIL_MASK) || (val & _Q_PENDING_MASK))
+ break;
+
+ cpu_relax();
}
return false;
diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
index a6c76a4832b4..f549c552dbf1 100644
--- a/kernel/locking/rwsem.c
+++ b/kernel/locking/rwsem.c
@@ -29,6 +29,22 @@ void __sched down_read(struct rw_semaphore *sem)
EXPORT_SYMBOL(down_read);
+int __sched down_read_killable(struct rw_semaphore *sem)
+{
+ might_sleep();
+ rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
+
+ if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_killable)) {
+ rwsem_release(&sem->dep_map, 1, _RET_IP_);
+ return -EINTR;
+ }
+
+ rwsem_set_reader_owned(sem);
+ return 0;
+}
+
+EXPORT_SYMBOL(down_read_killable);
+
/*
* trylock for reading -- returns 1 if successful, 0 if contention
*/
diff --git a/kernel/locking/spinlock.c b/kernel/locking/spinlock.c
index 6e40fdfba326..1fd1a7543cdd 100644
--- a/kernel/locking/spinlock.c
+++ b/kernel/locking/spinlock.c
@@ -30,11 +30,10 @@
#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
/*
* The __lock_function inlines are taken from
- * include/linux/spinlock_api_smp.h
+ * spinlock : include/linux/spinlock_api_smp.h
+ * rwlock : include/linux/rwlock_api_smp.h
*/
#else
-#define raw_read_can_lock(l) read_can_lock(l)
-#define raw_write_can_lock(l) write_can_lock(l)
/*
* Some architectures can relax in favour of the CPU owning the lock.
@@ -69,7 +68,7 @@ void __lockfunc __raw_##op##_lock(locktype##_t *lock) \
\
if (!(lock)->break_lock) \
(lock)->break_lock = 1; \
- while (!raw_##op##_can_lock(lock) && (lock)->break_lock)\
+ while ((lock)->break_lock) \
arch_##op##_relax(&lock->raw_lock); \
} \
(lock)->break_lock = 0; \
@@ -89,7 +88,7 @@ unsigned long __lockfunc __raw_##op##_lock_irqsave(locktype##_t *lock) \
\
if (!(lock)->break_lock) \
(lock)->break_lock = 1; \
- while (!raw_##op##_can_lock(lock) && (lock)->break_lock)\
+ while ((lock)->break_lock) \
arch_##op##_relax(&lock->raw_lock); \
} \
(lock)->break_lock = 0; \
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index e4fe06d42385..f9c0ca2ccf0c 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -734,7 +734,7 @@ static int rcu_future_needs_gp(struct rcu_state *rsp)
int idx = (READ_ONCE(rnp->completed) + 1) & 0x1;
int *fp = &rnp->need_future_gp[idx];
- RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_future_needs_gp() invoked with irqs enabled!!!");
+ lockdep_assert_irqs_disabled();
return READ_ONCE(*fp);
}
@@ -746,7 +746,7 @@ static int rcu_future_needs_gp(struct rcu_state *rsp)
static bool
cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp)
{
- RCU_LOCKDEP_WARN(!irqs_disabled(), "cpu_needs_another_gp() invoked with irqs enabled!!!");
+ lockdep_assert_irqs_disabled();
if (rcu_gp_in_progress(rsp))
return false; /* No, a grace period is already in progress. */
if (rcu_future_needs_gp(rsp))
@@ -773,7 +773,7 @@ static void rcu_eqs_enter_common(bool user)
struct rcu_data *rdp;
struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
- RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_eqs_enter_common() invoked with irqs enabled!!!");
+ lockdep_assert_irqs_disabled();
trace_rcu_dyntick(TPS("Start"), rdtp->dynticks_nesting, 0);
if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
!user && !is_idle_task(current)) {
@@ -843,7 +843,7 @@ static void rcu_eqs_enter(bool user)
*/
void rcu_idle_enter(void)
{
- RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_idle_enter() invoked with irqs enabled!!!");
+ lockdep_assert_irqs_disabled();
rcu_eqs_enter(false);
}
@@ -861,7 +861,7 @@ void rcu_idle_enter(void)
*/
void rcu_user_enter(void)
{
- RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_user_enter() invoked with irqs enabled!!!");
+ lockdep_assert_irqs_disabled();
rcu_eqs_enter(true);
}
#endif /* CONFIG_NO_HZ_FULL */
@@ -889,7 +889,7 @@ void rcu_irq_exit(void)
{
struct rcu_dynticks *rdtp;
- RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_irq_exit() invoked with irqs enabled!!!");
+ lockdep_assert_irqs_disabled();
rdtp = this_cpu_ptr(&rcu_dynticks);
/* Page faults can happen in NMI handlers, so check... */
@@ -959,7 +959,7 @@ static void rcu_eqs_exit(bool user)
struct rcu_dynticks *rdtp;
long long oldval;
- RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_eqs_exit() invoked with irqs enabled!!!");
+ lockdep_assert_irqs_disabled();
rdtp = this_cpu_ptr(&rcu_dynticks);
oldval = rdtp->dynticks_nesting;
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && oldval < 0);
@@ -1039,7 +1039,7 @@ void rcu_irq_enter(void)
struct rcu_dynticks *rdtp;
long long oldval;
- RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_irq_enter() invoked with irqs enabled!!!");
+ lockdep_assert_irqs_disabled();
rdtp = this_cpu_ptr(&rcu_dynticks);
/* Page faults can happen in NMI handlers, so check... */
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 4c857e583802..dd4d0d390e5b 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -326,7 +326,7 @@ static void rcu_preempt_note_context_switch(bool preempt)
struct rcu_data *rdp;
struct rcu_node *rnp;
- RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_preempt_note_context_switch() invoked with interrupts enabled!!!\n");
+ lockdep_assert_irqs_disabled();
WARN_ON_ONCE(!preempt && t->rcu_read_lock_nesting > 0);
if (t->rcu_read_lock_nesting > 0 &&
!t->rcu_read_unlock_special.b.blocked) {
@@ -1420,7 +1420,7 @@ int rcu_needs_cpu(u64 basemono, u64 *nextevt)
struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
unsigned long dj;
- RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_needs_cpu() invoked with irqs enabled!!!");
+ lockdep_assert_irqs_disabled();
/* Snapshot to detect later posting of non-lazy callback. */
rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted;
@@ -1469,7 +1469,7 @@ static void rcu_prepare_for_idle(void)
struct rcu_state *rsp;
int tne;
- RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_prepare_for_idle() invoked with irqs enabled!!!");
+ lockdep_assert_irqs_disabled();
if (rcu_is_nocb_cpu(smp_processor_id()))
return;
@@ -1524,7 +1524,7 @@ static void rcu_prepare_for_idle(void)
*/
static void rcu_cleanup_after_idle(void)
{
- RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_cleanup_after_idle() invoked with irqs enabled!!!");
+ lockdep_assert_irqs_disabled();
if (rcu_is_nocb_cpu(smp_processor_id()))
return;
if (rcu_try_advance_all_cbs())
@@ -2016,7 +2016,7 @@ static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_data *my_rdp,
struct rcu_data *rdp,
unsigned long flags)
{
- RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_nocb_adopt_orphan_cbs() invoked with irqs enabled!!!");
+ lockdep_assert_irqs_disabled();
if (!rcu_is_nocb_cpu(smp_processor_id()))
return false; /* Not NOCBs CPU, caller must migrate CBs. */
__call_rcu_nocb_enqueue(my_rdp, rcu_segcblist_head(&rdp->cblist),
diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c
index ca0f8fc945c6..e086babe6c61 100644
--- a/kernel/sched/clock.c
+++ b/kernel/sched/clock.c
@@ -388,7 +388,7 @@ void sched_clock_tick(void)
if (unlikely(!sched_clock_running))
return;
- WARN_ON_ONCE(!irqs_disabled());
+ lockdep_assert_irqs_disabled();
scd = this_scd();
__scd_stamp(scd);
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index 14d2dbf97c53..9be8b68a66da 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -259,8 +259,7 @@ static inline u64 account_other_time(u64 max)
{
u64 accounted;
- /* Shall be converted to a lockdep-enabled lightweight check */
- WARN_ON_ONCE(!irqs_disabled());
+ lockdep_assert_irqs_disabled();
accounted = steal_account_process_time(max);
diff --git a/kernel/seccomp.c b/kernel/seccomp.c
index 418a1c045933..5f0dfb2abb8d 100644
--- a/kernel/seccomp.c
+++ b/kernel/seccomp.c
@@ -190,7 +190,7 @@ static u32 seccomp_run_filters(const struct seccomp_data *sd,
u32 ret = SECCOMP_RET_ALLOW;
/* Make sure cross-thread synced filter points somewhere sane. */
struct seccomp_filter *f =
- lockless_dereference(current->seccomp.filter);
+ READ_ONCE(current->seccomp.filter);
/* Ensure unexpected behavior doesn't result in failing open. */
if (unlikely(WARN_ON(f == NULL)))
diff --git a/kernel/smp.c b/kernel/smp.c
index c94dd85c8d41..084c8b3a2681 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -213,7 +213,7 @@ static void flush_smp_call_function_queue(bool warn_cpu_offline)
call_single_data_t *csd, *csd_next;
static bool warned;
- WARN_ON(!irqs_disabled());
+ lockdep_assert_irqs_disabled();
head = this_cpu_ptr(&call_single_queue);
entry = llist_del_all(head);
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 4e09821f9d9e..662f7b1b7a78 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -137,7 +137,7 @@ EXPORT_SYMBOL(__local_bh_disable_ip);
static void __local_bh_enable(unsigned int cnt)
{
- WARN_ON_ONCE(!irqs_disabled());
+ lockdep_assert_irqs_disabled();
if (softirq_count() == (cnt & SOFTIRQ_MASK))
trace_softirqs_on(_RET_IP_);
@@ -158,7 +158,8 @@ EXPORT_SYMBOL(_local_bh_enable);
void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
{
- WARN_ON_ONCE(in_irq() || irqs_disabled());
+ WARN_ON_ONCE(in_irq());
+ lockdep_assert_irqs_enabled();
#ifdef CONFIG_TRACE_IRQFLAGS
local_irq_disable();
#endif
@@ -396,9 +397,8 @@ void irq_exit(void)
#ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
local_irq_disable();
#else
- WARN_ON_ONCE(!irqs_disabled());
+ lockdep_assert_irqs_disabled();
#endif
-
account_irq_exit_time(current);
preempt_count_sub(HARDIRQ_OFFSET);
if (!in_interrupt() && local_softirq_pending())
@@ -488,7 +488,7 @@ EXPORT_SYMBOL(__tasklet_hi_schedule);
void __tasklet_hi_schedule_first(struct tasklet_struct *t)
{
- BUG_ON(!irqs_disabled());
+ lockdep_assert_irqs_disabled();
t->next = __this_cpu_read(tasklet_hi_vec.head);
__this_cpu_write(tasklet_hi_vec.head, t);
diff --git a/kernel/task_work.c b/kernel/task_work.c
index 5718b3ea202a..0fef395662a6 100644
--- a/kernel/task_work.c
+++ b/kernel/task_work.c
@@ -68,7 +68,7 @@ task_work_cancel(struct task_struct *task, task_work_func_t func)
* we raced with task_work_run(), *pprev == NULL/exited.
*/
raw_spin_lock_irqsave(&task->pi_lock, flags);
- while ((work = lockless_dereference(*pprev))) {
+ while ((work = READ_ONCE(*pprev))) {
if (work->func != func)
pprev = &work->next;
else if (cmpxchg(pprev, work, work->next) == work)
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index 88f75f92ef36..d32520840fde 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -758,9 +758,7 @@ void clock_was_set(void)
*/
void hrtimers_resume(void)
{
- WARN_ONCE(!irqs_disabled(),
- KERN_INFO "hrtimers_resume() called with IRQs enabled!");
-
+ lockdep_assert_irqs_disabled();
/* Retrigger on the local CPU */
retrigger_next_event(NULL);
/* And schedule a retrigger for all others */
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
index 5b117110b55b..1f27887aa194 100644
--- a/kernel/time/posix-cpu-timers.c
+++ b/kernel/time/posix-cpu-timers.c
@@ -603,7 +603,7 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
/*
* Disarm any old timer after extracting its expiry time.
*/
- WARN_ON_ONCE(!irqs_disabled());
+ lockdep_assert_irqs_disabled();
ret = 0;
old_incr = timer->it.cpu.incr;
@@ -1034,7 +1034,7 @@ static void posix_cpu_timer_rearm(struct k_itimer *timer)
/*
* Now re-arm for the new expiry time.
*/
- WARN_ON_ONCE(!irqs_disabled());
+ lockdep_assert_irqs_disabled();
arm_timer(timer);
unlock:
unlock_task_sighand(p, &flags);
@@ -1125,7 +1125,7 @@ void run_posix_cpu_timers(struct task_struct *tsk)
struct k_itimer *timer, *next;
unsigned long flags;
- WARN_ON_ONCE(!irqs_disabled());
+ lockdep_assert_irqs_disabled();
/*
* The fast path checks that there are no expired thread or thread
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index c7a899c5ce64..dd4b7b492c9b 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -198,7 +198,7 @@ static bool check_tick_dependency(atomic_t *dep)
static bool can_stop_full_tick(int cpu, struct tick_sched *ts)
{
- WARN_ON_ONCE(!irqs_disabled());
+ lockdep_assert_irqs_disabled();
if (unlikely(!cpu_online(cpu)))
return false;
@@ -960,8 +960,7 @@ void tick_nohz_idle_enter(void)
{
struct tick_sched *ts;
- WARN_ON_ONCE(irqs_disabled());
-
+ lockdep_assert_irqs_enabled();
/*
* Update the idle state in the scheduler domain hierarchy
* when tick_nohz_stop_sched_tick() is called from the idle loop.
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 81279c6602ff..845f3805c73d 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -2724,7 +2724,7 @@ rb_reserve_next_event(struct ring_buffer *buffer,
* if it happened, we have to fail the write.
*/
barrier();
- if (unlikely(ACCESS_ONCE(cpu_buffer->buffer) != buffer)) {
+ if (unlikely(READ_ONCE(cpu_buffer->buffer) != buffer)) {
local_dec(&cpu_buffer->committing);
local_dec(&cpu_buffer->commits);
return NULL;
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 401b0639116f..6b0b343a36a2 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -1460,7 +1460,7 @@ extern struct trace_event_file *find_event_file(struct trace_array *tr,
static inline void *event_file_data(struct file *filp)
{
- return ACCESS_ONCE(file_inode(filp)->i_private);
+ return READ_ONCE(file_inode(filp)->i_private);
}
extern struct mutex event_mutex;
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index 719a52a4064a..734accc02418 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -78,7 +78,7 @@ check_stack(unsigned long ip, unsigned long *stack)
{
unsigned long this_size, flags; unsigned long *p, *top, *start;
static int tracer_frame;
- int frame_size = ACCESS_ONCE(tracer_frame);
+ int frame_size = READ_ONCE(tracer_frame);
int i, x;
this_size = ((unsigned long)stack) & (THREAD_SIZE-1);
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
index c490f1e4313b..d32b45662fb6 100644
--- a/kernel/user_namespace.c
+++ b/kernel/user_namespace.c
@@ -894,7 +894,7 @@ static bool new_idmap_permitted(const struct file *file,
int proc_setgroups_show(struct seq_file *seq, void *v)
{
struct user_namespace *ns = seq->private;
- unsigned long userns_flags = ACCESS_ONCE(ns->flags);
+ unsigned long userns_flags = READ_ONCE(ns->flags);
seq_printf(seq, "%s\n",
(userns_flags & USERNS_SETGROUPS_ALLOWED) ?
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index a2dccfe1acec..13f67b5a0a0c 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1376,7 +1376,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
* queued or lose PENDING. Grabbing PENDING and queueing should
* happen with IRQ disabled.
*/
- WARN_ON_ONCE(!irqs_disabled());
+ lockdep_assert_irqs_disabled();
debug_work_activate(work);
@@ -2491,15 +2491,8 @@ static void insert_wq_barrier(struct pool_workqueue *pwq,
INIT_WORK_ONSTACK(&barr->work, wq_barrier_func);
__set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
- /*
- * Explicitly init the crosslock for wq_barrier::done, make its lock
- * key a subkey of the corresponding work. As a result we won't
- * build a dependency between wq_barrier::done and unrelated work.
- */
- lockdep_init_map_crosslock((struct lockdep_map *)&barr->done.map,
- "(complete)wq_barr::done",
- target->lockdep_map.key, 1);
- __init_completion(&barr->done);
+ init_completion_map(&barr->done, &target->lockdep_map);
+
barr->task = current;
/*
@@ -2605,16 +2598,13 @@ void flush_workqueue(struct workqueue_struct *wq)
struct wq_flusher this_flusher = {
.list = LIST_HEAD_INIT(this_flusher.list),
.flush_color = -1,
- .done = COMPLETION_INITIALIZER_ONSTACK(this_flusher.done),
+ .done = COMPLETION_INITIALIZER_ONSTACK_MAP(this_flusher.done, wq->lockdep_map),
};
int next_color;
if (WARN_ON(!wq_online))
return;
- lock_map_acquire(&wq->lockdep_map);
- lock_map_release(&wq->lockdep_map);
-
mutex_lock(&wq->mutex);
/*
@@ -2877,9 +2867,6 @@ bool flush_work(struct work_struct *work)
if (WARN_ON(!wq_online))
return false;
- lock_map_acquire(&work->lockdep_map);
- lock_map_release(&work->lockdep_map);
-
if (start_flush_work(work, &barr)) {
wait_for_completion(&barr.done);
destroy_work_on_stack(&barr.work);
@@ -4640,7 +4627,7 @@ static void rebind_workers(struct worker_pool *pool)
* concurrency management. Note that when or whether
* @worker clears REBOUND doesn't affect correctness.
*
- * ACCESS_ONCE() is necessary because @worker->flags may be
+ * WRITE_ONCE() is necessary because @worker->flags may be
* tested without holding any lock in
* wq_worker_waking_up(). Without it, NOT_RUNNING test may
* fail incorrectly leading to premature concurrency
@@ -4649,7 +4636,7 @@ static void rebind_workers(struct worker_pool *pool)
WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
worker_flags |= WORKER_REBOUND;
worker_flags &= ~WORKER_UNBOUND;
- ACCESS_ONCE(worker->flags) = worker_flags;
+ WRITE_ONCE(worker->flags, worker_flags);
}
spin_unlock_irq(&pool->lock);
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index dfdad67d8f6c..2b439a515c30 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1092,8 +1092,8 @@ config PROVE_LOCKING
select DEBUG_MUTEXES
select DEBUG_RT_MUTEXES if RT_MUTEXES
select DEBUG_LOCK_ALLOC
- select LOCKDEP_CROSSRELEASE if BROKEN
- select LOCKDEP_COMPLETIONS if BROKEN
+ select LOCKDEP_CROSSRELEASE
+ select LOCKDEP_COMPLETIONS
select TRACE_IRQFLAGS
default n
help
@@ -1179,6 +1179,21 @@ config LOCKDEP_COMPLETIONS
A deadlock caused by wait_for_completion() and complete() can be
detected by lockdep using crossrelease feature.
+config BOOTPARAM_LOCKDEP_CROSSRELEASE_FULLSTACK
+ bool "Enable the boot parameter, crossrelease_fullstack"
+ depends on LOCKDEP_CROSSRELEASE
+ default n
+ help
+ The lockdep "cross-release" feature needs to record stack traces
+ (of calling functions) for all acquisitions, for eventual later
+ use during analysis. By default only a single caller is recorded,
+ because the unwind operation can be very expensive with deeper
+ stack chains.
+
+ However a boot parameter, crossrelease_fullstack, was
+ introduced since sometimes deeper traces are required for full
+ analysis. This option turns on the boot parameter.
+
config DEBUG_LOCKDEP
bool "Lock dependency engine debugging"
depends on DEBUG_KERNEL && LOCKDEP
diff --git a/lib/assoc_array.c b/lib/assoc_array.c
index 4e53be8bc590..b77d51da8c73 100644
--- a/lib/assoc_array.c
+++ b/lib/assoc_array.c
@@ -39,7 +39,7 @@ begin_node:
/* Descend through a shortcut */
shortcut = assoc_array_ptr_to_shortcut(cursor);
smp_read_barrier_depends();
- cursor = ACCESS_ONCE(shortcut->next_node);
+ cursor = READ_ONCE(shortcut->next_node);
}
node = assoc_array_ptr_to_node(cursor);
@@ -55,7 +55,7 @@ begin_node:
*/
has_meta = 0;
for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
- ptr = ACCESS_ONCE(node->slots[slot]);
+ ptr = READ_ONCE(node->slots[slot]);
has_meta |= (unsigned long)ptr;
if (ptr && assoc_array_ptr_is_leaf(ptr)) {
/* We need a barrier between the read of the pointer
@@ -89,7 +89,7 @@ continue_node:
smp_read_barrier_depends();
for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
- ptr = ACCESS_ONCE(node->slots[slot]);
+ ptr = READ_ONCE(node->slots[slot]);
if (assoc_array_ptr_is_meta(ptr)) {
cursor = ptr;
goto begin_node;
@@ -98,7 +98,7 @@ continue_node:
finished_node:
/* Move up to the parent (may need to skip back over a shortcut) */
- parent = ACCESS_ONCE(node->back_pointer);
+ parent = READ_ONCE(node->back_pointer);
slot = node->parent_slot;
if (parent == stop)
return 0;
@@ -107,7 +107,7 @@ finished_node:
shortcut = assoc_array_ptr_to_shortcut(parent);
smp_read_barrier_depends();
cursor = parent;
- parent = ACCESS_ONCE(shortcut->back_pointer);
+ parent = READ_ONCE(shortcut->back_pointer);
slot = shortcut->parent_slot;
if (parent == stop)
return 0;
@@ -147,7 +147,7 @@ int assoc_array_iterate(const struct assoc_array *array,
void *iterator_data),
void *iterator_data)
{
- struct assoc_array_ptr *root = ACCESS_ONCE(array->root);
+ struct assoc_array_ptr *root = READ_ONCE(array->root);
if (!root)
return 0;
@@ -194,7 +194,7 @@ assoc_array_walk(const struct assoc_array *array,
pr_devel("-->%s()\n", __func__);
- cursor = ACCESS_ONCE(array->root);
+ cursor = READ_ONCE(array->root);
if (!cursor)
return assoc_array_walk_tree_empty;
@@ -220,7 +220,7 @@ consider_node:
slot = segments >> (level & ASSOC_ARRAY_KEY_CHUNK_MASK);
slot &= ASSOC_ARRAY_FAN_MASK;
- ptr = ACCESS_ONCE(node->slots[slot]);
+ ptr = READ_ONCE(node->slots[slot]);
pr_devel("consider slot %x [ix=%d type=%lu]\n",
slot, level, (unsigned long)ptr & 3);
@@ -294,7 +294,7 @@ follow_shortcut:
} while (sc_level < shortcut->skip_to_level);
/* The shortcut matches the leaf's index to this point. */
- cursor = ACCESS_ONCE(shortcut->next_node);
+ cursor = READ_ONCE(shortcut->next_node);
if (((level ^ sc_level) & ~ASSOC_ARRAY_KEY_CHUNK_MASK) != 0) {
level = sc_level;
goto jumped;
@@ -337,7 +337,7 @@ void *assoc_array_find(const struct assoc_array *array,
* the terminal node.
*/
for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
- ptr = ACCESS_ONCE(node->slots[slot]);
+ ptr = READ_ONCE(node->slots[slot]);
if (ptr && assoc_array_ptr_is_leaf(ptr)) {
/* We need a barrier between the read of the pointer
* and dereferencing the pointer - but only if we are
diff --git a/lib/dynamic_queue_limits.c b/lib/dynamic_queue_limits.c
index 6a406fafb5d6..da4672a50a54 100644
--- a/lib/dynamic_queue_limits.c
+++ b/lib/dynamic_queue_limits.c
@@ -21,7 +21,7 @@ void dql_completed(struct dql *dql, unsigned int count)
unsigned int ovlimit, completed, num_queued;
bool all_prev_completed;
- num_queued = ACCESS_ONCE(dql->num_queued);
+ num_queued = READ_ONCE(dql->num_queued);
/* Can't complete more than what's in queue */
BUG_ON(count > num_queued - dql->num_completed);
diff --git a/lib/llist.c b/lib/llist.c
index ae5872b1df0c..7062e931a7bb 100644
--- a/lib/llist.c
+++ b/lib/llist.c
@@ -41,7 +41,7 @@ bool llist_add_batch(struct llist_node *new_first, struct llist_node *new_last,
struct llist_node *first;
do {
- new_last->next = first = ACCESS_ONCE(head->first);
+ new_last->next = first = READ_ONCE(head->first);
} while (cmpxchg(&head->first, first, new_first) != first);
return !first;
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 86c3385b9eb3..1746bae94d41 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -620,8 +620,8 @@ char *dentry_name(char *buf, char *end, const struct dentry *d, struct printf_sp
rcu_read_lock();
for (i = 0; i < depth; i++, d = p) {
- p = ACCESS_ONCE(d->d_parent);
- array[i] = ACCESS_ONCE(d->d_name.name);
+ p = READ_ONCE(d->d_parent);
+ array[i] = READ_ONCE(d->d_name.name);
if (p == d) {
if (i)
array[i] = "";
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 1981ed697dab..b521ed1170f9 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2718,7 +2718,7 @@ static unsigned long deferred_split_count(struct shrinker *shrink,
struct shrink_control *sc)
{
struct pglist_data *pgdata = NODE_DATA(sc->nid);
- return ACCESS_ONCE(pgdata->split_queue_len);
+ return READ_ONCE(pgdata->split_queue_len);
}
static unsigned long deferred_split_scan(struct shrinker *shrink,
diff --git a/mm/memory.c b/mm/memory.c
index a728bed16c20..cae514e7dcfc 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3891,9 +3891,9 @@ static int handle_pte_fault(struct vm_fault *vmf)
/*
* some architectures can have larger ptes than wordsize,
* e.g.ppc44x-defconfig has CONFIG_PTE_64BIT=y and
- * CONFIG_32BIT=y, so READ_ONCE or ACCESS_ONCE cannot guarantee
- * atomic accesses. The code below just needs a consistent
- * view for the ifs and we later double check anyway with the
+ * CONFIG_32BIT=y, so READ_ONCE cannot guarantee atomic
+ * accesses. The code below just needs a consistent view
+ * for the ifs and we later double check anyway with the
* ptl lock held. So here a barrier will do.
*/
barrier();
diff --git a/mm/slab.h b/mm/slab.h
index 028cdc7df67e..86d7c7d860f9 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -259,7 +259,7 @@ cache_from_memcg_idx(struct kmem_cache *s, int idx)
* memcg_caches issues a write barrier to match this (see
* memcg_create_kmem_cache()).
*/
- cachep = lockless_dereference(arr->entries[idx]);
+ cachep = READ_ONCE(arr->entries[idx]);
rcu_read_unlock();
return cachep;
diff --git a/net/core/dev.c b/net/core/dev.c
index 11596a302a26..61559ca3980b 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3725,7 +3725,7 @@ bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
flow_table = rcu_dereference(rxqueue->rps_flow_table);
if (flow_table && flow_id <= flow_table->mask) {
rflow = &flow_table->flows[flow_id];
- cpu = ACCESS_ONCE(rflow->cpu);
+ cpu = READ_ONCE(rflow->cpu);
if (rflow->filter == filter_id && cpu < nr_cpu_ids &&
((int)(per_cpu(softnet_data, cpu).input_queue_head -
rflow->last_qtail) <
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 912731bed7b7..57557a6a950c 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -334,7 +334,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
/* It is up to the caller to keep npinfo alive. */
struct netpoll_info *npinfo;
- WARN_ON_ONCE(!irqs_disabled());
+ lockdep_assert_irqs_disabled();
npinfo = rcu_dereference_bh(np->dev->npinfo);
if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 6e1e10ff433a..3b2034f6d49d 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -3377,7 +3377,7 @@ static void pktgen_wait_for_skb(struct pktgen_dev *pkt_dev)
static void pktgen_xmit(struct pktgen_dev *pkt_dev)
{
- unsigned int burst = ACCESS_ONCE(pkt_dev->burst);
+ unsigned int burst = READ_ONCE(pkt_dev->burst);
struct net_device *odev = pkt_dev->odev;
struct netdev_queue *txq;
struct sk_buff *skb;
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index af74d0433453..f9597ba26599 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -164,7 +164,7 @@ static void inet_frag_worker(struct work_struct *work)
local_bh_disable();
- for (i = ACCESS_ONCE(f->next_bucket); budget; --budget) {
+ for (i = READ_ONCE(f->next_bucket); budget; --budget) {
evicted += inet_evict_bucket(f, &f->hash[i]);
i = (i + 1) & (INETFRAGS_HASHSZ - 1);
if (evicted > INETFRAGS_EVICT_MAX)
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 3d9f1c2f81c5..c0864562083b 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -495,7 +495,7 @@ u32 ip_idents_reserve(u32 hash, int segs)
{
u32 *p_tstamp = ip_tstamps + hash % IP_IDENTS_SZ;
atomic_t *p_id = ip_idents + hash % IP_IDENTS_SZ;
- u32 old = ACCESS_ONCE(*p_tstamp);
+ u32 old = READ_ONCE(*p_tstamp);
u32 now = (u32)jiffies;
u32 new, delta = 0;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index b6bb3cdfad09..887585045b27 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -816,12 +816,12 @@ static void tcp_update_pacing_rate(struct sock *sk)
if (likely(tp->srtt_us))
do_div(rate, tp->srtt_us);
- /* ACCESS_ONCE() is needed because sch_fq fetches sk_pacing_rate
+ /* WRITE_ONCE() is needed because sch_fq fetches sk_pacing_rate
* without any lock. We want to make sure compiler wont store
* intermediate values in this location.
*/
- ACCESS_ONCE(sk->sk_pacing_rate) = min_t(u64, rate,
- sk->sk_max_pacing_rate);
+ WRITE_ONCE(sk->sk_pacing_rate, min_t(u64, rate,
+ sk->sk_max_pacing_rate));
}
/* Calculate rto without backoff. This is the second half of Van Jacobson's
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 478909f4694d..5a42e873d44a 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1910,7 +1910,7 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len))
goto send_now;
- win_divisor = ACCESS_ONCE(sysctl_tcp_tso_win_divisor);
+ win_divisor = READ_ONCE(sysctl_tcp_tso_win_divisor);
if (win_divisor) {
u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache);
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index ebfbccae62fd..02ec9a349303 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1853,7 +1853,7 @@ static int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
*/
/* if we're overly short, let UDP handle it */
- encap_rcv = ACCESS_ONCE(up->encap_rcv);
+ encap_rcv = READ_ONCE(up->encap_rcv);
if (encap_rcv) {
int ret;
@@ -2298,7 +2298,7 @@ void udp_destroy_sock(struct sock *sk)
unlock_sock_fast(sk, slow);
if (static_key_false(&udp_encap_needed) && up->encap_type) {
void (*encap_destroy)(struct sock *sk);
- encap_destroy = ACCESS_ONCE(up->encap_destroy);
+ encap_destroy = READ_ONCE(up->encap_destroy);
if (encap_destroy)
encap_destroy(sk);
}
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index a1c24443cd9e..dab946554157 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -490,7 +490,7 @@ ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt,
if (!t)
goto out;
- tproto = ACCESS_ONCE(t->parms.proto);
+ tproto = READ_ONCE(t->parms.proto);
if (tproto != ipproto && tproto != 0)
goto out;
@@ -899,7 +899,7 @@ static int ipxip6_rcv(struct sk_buff *skb, u8 ipproto,
t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr, &ipv6h->daddr);
if (t) {
- u8 tproto = ACCESS_ONCE(t->parms.proto);
+ u8 tproto = READ_ONCE(t->parms.proto);
if (tproto != ipproto && tproto != 0)
goto drop;
@@ -1233,7 +1233,7 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
- tproto = ACCESS_ONCE(t->parms.proto);
+ tproto = READ_ONCE(t->parms.proto);
if (tproto != IPPROTO_IPIP && tproto != 0)
return -1;
@@ -1303,7 +1303,7 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
u8 tproto;
int err;
- tproto = ACCESS_ONCE(t->parms.proto);
+ tproto = READ_ONCE(t->parms.proto);
if ((tproto != IPPROTO_IPV6 && tproto != 0) ||
ip6_tnl_addr_conflict(t, ipv6h))
return -1;
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 40d7234c27b9..3f30fa313bf2 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -606,7 +606,7 @@ static int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
*/
/* if we're overly short, let UDP handle it */
- encap_rcv = ACCESS_ONCE(up->encap_rcv);
+ encap_rcv = READ_ONCE(up->encap_rcv);
if (encap_rcv) {
int ret;
@@ -1432,7 +1432,7 @@ void udpv6_destroy_sock(struct sock *sk)
if (static_key_false(&udpv6_encap_needed) && up->encap_type) {
void (*encap_destroy)(struct sock *sk);
- encap_destroy = ACCESS_ONCE(up->encap_destroy);
+ encap_destroy = READ_ONCE(up->encap_destroy);
if (encap_destroy)
encap_destroy(sk);
}
diff --git a/net/llc/llc_input.c b/net/llc/llc_input.c
index dd3e83328ad5..82cb93f66b9b 100644
--- a/net/llc/llc_input.c
+++ b/net/llc/llc_input.c
@@ -193,7 +193,7 @@ int llc_rcv(struct sk_buff *skb, struct net_device *dev,
*/
rcv = rcu_dereference(sap->rcv_func);
dest = llc_pdu_type(skb);
- sap_handler = dest ? ACCESS_ONCE(llc_type_handlers[dest - 1]) : NULL;
+ sap_handler = dest ? READ_ONCE(llc_type_handlers[dest - 1]) : NULL;
if (unlikely(!sap_handler)) {
if (rcv)
rcv(skb, dev, pt, orig_dev);
@@ -214,7 +214,7 @@ drop:
kfree_skb(skb);
goto out;
handle_station:
- sta_handler = ACCESS_ONCE(llc_station_handler);
+ sta_handler = READ_ONCE(llc_station_handler);
if (!sta_handler)
goto drop;
sta_handler(skb);
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 69615016d5bf..214d2ba02877 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -2008,7 +2008,7 @@ static void sta_stats_decode_rate(struct ieee80211_local *local, u16 rate,
static int sta_set_rate_info_rx(struct sta_info *sta, struct rate_info *rinfo)
{
- u16 rate = ACCESS_ONCE(sta_get_last_rx_stats(sta)->last_rate);
+ u16 rate = READ_ONCE(sta_get_last_rx_stats(sta)->last_rate);
if (rate == STA_STATS_RATE_INVALID)
return -EINVAL;
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index 13f740875507..9ee71cb276d7 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -458,7 +458,7 @@ static inline bool in_persistence(struct ip_vs_conn *cp)
static int ip_vs_sync_conn_needed(struct netns_ipvs *ipvs,
struct ip_vs_conn *cp, int pkts)
{
- unsigned long orig = ACCESS_ONCE(cp->sync_endtime);
+ unsigned long orig = READ_ONCE(cp->sync_endtime);
unsigned long now = jiffies;
unsigned long n = (now + cp->timeout) & ~3UL;
unsigned int sync_refresh_period;
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index c9796629858f..a16356cacec3 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -401,7 +401,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
outdev = entry->state.out;
- switch ((enum nfqnl_config_mode)ACCESS_ONCE(queue->copy_mode)) {
+ switch ((enum nfqnl_config_mode)READ_ONCE(queue->copy_mode)) {
case NFQNL_COPY_META:
case NFQNL_COPY_NONE:
break;
@@ -412,7 +412,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
skb_checksum_help(entskb))
return NULL;
- data_len = ACCESS_ONCE(queue->copy_range);
+ data_len = READ_ONCE(queue->copy_range);
if (data_len > entskb->len)
data_len = entskb->len;
diff --git a/net/netlabel/netlabel_calipso.c b/net/netlabel/netlabel_calipso.c
index d177dd066504..4d748975117d 100644
--- a/net/netlabel/netlabel_calipso.c
+++ b/net/netlabel/netlabel_calipso.c
@@ -393,7 +393,7 @@ EXPORT_SYMBOL(netlbl_calipso_ops_register);
static const struct netlbl_calipso_ops *netlbl_calipso_ops_get(void)
{
- return ACCESS_ONCE(calipso_ops);
+ return READ_ONCE(calipso_ops);
}
/**
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index d396cb61a280..eb866647a27a 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -14201,7 +14201,7 @@ static bool __nl80211_unexpected_frame(struct net_device *dev, u8 cmd,
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
struct sk_buff *msg;
void *hdr;
- u32 nlportid = ACCESS_ONCE(wdev->ap_unexpected_nlportid);
+ u32 nlportid = READ_ONCE(wdev->ap_unexpected_nlportid);
if (!nlportid)
return false;
diff --git a/samples/mic/mpssd/mpssd.c b/samples/mic/mpssd/mpssd.c
index 49db1def1721..f42ce551bb48 100644
--- a/samples/mic/mpssd/mpssd.c
+++ b/samples/mic/mpssd/mpssd.c
@@ -65,7 +65,7 @@ static struct mic_info mic_list;
/* to align the pointer to the (next) page boundary */
#define PAGE_ALIGN(addr) _ALIGN(addr, PAGE_SIZE)
-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
+#define READ_ONCE(x) (*(volatile typeof(x) *)&(x))
#define GSO_ENABLED 1
#define MAX_GSO_SIZE (64 * 1024)
@@ -382,7 +382,7 @@ disp_iovec(struct mic_info *mic, struct mic_copy_desc *copy,
static inline __u16 read_avail_idx(struct mic_vring *vr)
{
- return ACCESS_ONCE(vr->info->avail_idx);
+ return READ_ONCE(vr->info->avail_idx);
}
static inline void txrx_prepare(int type, bool tx, struct mic_vring *vr,
@@ -523,7 +523,7 @@ spin_for_descriptors(struct mic_info *mic, struct mic_vring *vr)
{
__u16 avail_idx = read_avail_idx(vr);
- while (avail_idx == le16toh(ACCESS_ONCE(vr->vr.avail->idx))) {
+ while (avail_idx == le16toh(READ_ONCE(vr->vr.avail->idx))) {
#ifdef DEBUG
mpsslog("%s %s waiting for desc avail %d info_avail %d\n",
mic->name, __func__,
diff --git a/scripts/headers_install.sh b/scripts/headers_install.sh
index 4d1ea96e8794..a18bca720995 100755
--- a/scripts/headers_install.sh
+++ b/scripts/headers_install.sh
@@ -34,7 +34,7 @@ do
sed -r \
-e 's/([ \t(])(__user|__force|__iomem)[ \t]/\1/g' \
-e 's/__attribute_const__([ \t]|$)/\1/g' \
- -e 's@^#include <linux/compiler.h>@@' \
+ -e 's@^#include <linux/compiler(|_types).h>@@' \
-e 's/(^|[^a-zA-Z0-9])__packed([^a-zA-Z0-9_]|$)/\1__attribute__((packed))\2/g' \
-e 's/(^|[ \t(])(inline|asm|volatile)([ \t(]|$)/\1__\2__\3/g' \
-e 's@#(ifndef|define|endif[ \t]*/[*])[ \t]*_UAPI@#\1 @' \
diff --git a/security/apparmor/include/lib.h b/security/apparmor/include/lib.h
index 436b3a722357..f546707a2bbb 100644
--- a/security/apparmor/include/lib.h
+++ b/security/apparmor/include/lib.h
@@ -19,17 +19,6 @@
#include "match.h"
-/* Provide our own test for whether a write lock is held for asserts
- * this is because on none SMP systems write_can_lock will always
- * resolve to true, which is what you want for code making decisions
- * based on it, but wrong for asserts checking that the lock is held
- */
-#ifdef CONFIG_SMP
-#define write_is_locked(X) !write_can_lock(X)
-#else
-#define write_is_locked(X) (1)
-#endif /* CONFIG_SMP */
-
/*
* DEBUG remains global (no per profile flag) since it is mostly used in sysctl
* which is not related to profile accesses.
diff --git a/security/apparmor/label.c b/security/apparmor/label.c
index c5b99b954580..ad28e03a6f30 100644
--- a/security/apparmor/label.c
+++ b/security/apparmor/label.c
@@ -80,7 +80,7 @@ void __aa_proxy_redirect(struct aa_label *orig, struct aa_label *new)
AA_BUG(!orig);
AA_BUG(!new);
- AA_BUG(!write_is_locked(&labels_set(orig)->lock));
+ lockdep_assert_held_exclusive(&labels_set(orig)->lock);
tmp = rcu_dereference_protected(orig->proxy->label,
&labels_ns(orig)->lock);
@@ -571,7 +571,7 @@ static bool __label_remove(struct aa_label *label, struct aa_label *new)
AA_BUG(!ls);
AA_BUG(!label);
- AA_BUG(!write_is_locked(&ls->lock));
+ lockdep_assert_held_exclusive(&ls->lock);
if (new)
__aa_proxy_redirect(label, new);
@@ -608,7 +608,7 @@ static bool __label_replace(struct aa_label *old, struct aa_label *new)
AA_BUG(!ls);
AA_BUG(!old);
AA_BUG(!new);
- AA_BUG(!write_is_locked(&ls->lock));
+ lockdep_assert_held_exclusive(&ls->lock);
AA_BUG(new->flags & FLAG_IN_TREE);
if (!label_is_stale(old))
@@ -645,7 +645,7 @@ static struct aa_label *__label_insert(struct aa_labelset *ls,
AA_BUG(!ls);
AA_BUG(!label);
AA_BUG(labels_set(label) != ls);
- AA_BUG(!write_is_locked(&ls->lock));
+ lockdep_assert_held_exclusive(&ls->lock);
AA_BUG(label->flags & FLAG_IN_TREE);
/* Figure out where to put new node */
diff --git a/sound/firewire/amdtp-am824.c b/sound/firewire/amdtp-am824.c
index 23ccddb20de1..4210e5c6262e 100644
--- a/sound/firewire/amdtp-am824.c
+++ b/sound/firewire/amdtp-am824.c
@@ -247,7 +247,7 @@ void amdtp_am824_midi_trigger(struct amdtp_stream *s, unsigned int port,
struct amdtp_am824 *p = s->protocol;
if (port < p->midi_ports)
- ACCESS_ONCE(p->midi[port]) = midi;
+ WRITE_ONCE(p->midi[port], midi);
}
EXPORT_SYMBOL_GPL(amdtp_am824_midi_trigger);
@@ -336,7 +336,7 @@ static unsigned int process_rx_data_blocks(struct amdtp_stream *s, __be32 *buffe
unsigned int data_blocks, unsigned int *syt)
{
struct amdtp_am824 *p = s->protocol;
- struct snd_pcm_substream *pcm = ACCESS_ONCE(s->pcm);
+ struct snd_pcm_substream *pcm = READ_ONCE(s->pcm);
unsigned int pcm_frames;
if (pcm) {
@@ -357,7 +357,7 @@ static unsigned int process_tx_data_blocks(struct amdtp_stream *s, __be32 *buffe
unsigned int data_blocks, unsigned int *syt)
{
struct amdtp_am824 *p = s->protocol;
- struct snd_pcm_substream *pcm = ACCESS_ONCE(s->pcm);
+ struct snd_pcm_substream *pcm = READ_ONCE(s->pcm);
unsigned int pcm_frames;
if (pcm) {
diff --git a/sound/firewire/amdtp-stream.c b/sound/firewire/amdtp-stream.c
index 3fc581a5ad62..4a1dc145327b 100644
--- a/sound/firewire/amdtp-stream.c
+++ b/sound/firewire/amdtp-stream.c
@@ -376,7 +376,7 @@ static void update_pcm_pointers(struct amdtp_stream *s,
ptr = s->pcm_buffer_pointer + frames;
if (ptr >= pcm->runtime->buffer_size)
ptr -= pcm->runtime->buffer_size;
- ACCESS_ONCE(s->pcm_buffer_pointer) = ptr;
+ WRITE_ONCE(s->pcm_buffer_pointer, ptr);
s->pcm_period_pointer += frames;
if (s->pcm_period_pointer >= pcm->runtime->period_size) {
@@ -388,7 +388,7 @@ static void update_pcm_pointers(struct amdtp_stream *s,
static void pcm_period_tasklet(unsigned long data)
{
struct amdtp_stream *s = (void *)data;
- struct snd_pcm_substream *pcm = ACCESS_ONCE(s->pcm);
+ struct snd_pcm_substream *pcm = READ_ONCE(s->pcm);
if (pcm)
snd_pcm_period_elapsed(pcm);
@@ -453,7 +453,7 @@ static int handle_out_packet(struct amdtp_stream *s,
s->data_block_counter =
(s->data_block_counter + data_blocks) & 0xff;
- buffer[0] = cpu_to_be32(ACCESS_ONCE(s->source_node_id_field) |
+ buffer[0] = cpu_to_be32(READ_ONCE(s->source_node_id_field) |
(s->data_block_quadlets << CIP_DBS_SHIFT) |
((s->sph << CIP_SPH_SHIFT) & CIP_SPH_MASK) |
s->data_block_counter);
@@ -472,7 +472,7 @@ static int handle_out_packet(struct amdtp_stream *s,
if (queue_out_packet(s, payload_length) < 0)
return -EIO;
- pcm = ACCESS_ONCE(s->pcm);
+ pcm = READ_ONCE(s->pcm);
if (pcm && pcm_frames > 0)
update_pcm_pointers(s, pcm, pcm_frames);
@@ -504,7 +504,7 @@ static int handle_out_packet_without_header(struct amdtp_stream *s,
if (queue_out_packet(s, payload_length) < 0)
return -EIO;
- pcm = ACCESS_ONCE(s->pcm);
+ pcm = READ_ONCE(s->pcm);
if (pcm && pcm_frames > 0)
update_pcm_pointers(s, pcm, pcm_frames);
@@ -621,7 +621,7 @@ end:
if (queue_in_packet(s) < 0)
return -EIO;
- pcm = ACCESS_ONCE(s->pcm);
+ pcm = READ_ONCE(s->pcm);
if (pcm && pcm_frames > 0)
update_pcm_pointers(s, pcm, pcm_frames);
@@ -649,7 +649,7 @@ static int handle_in_packet_without_header(struct amdtp_stream *s,
if (queue_in_packet(s) < 0)
return -EIO;
- pcm = ACCESS_ONCE(s->pcm);
+ pcm = READ_ONCE(s->pcm);
if (pcm && pcm_frames > 0)
update_pcm_pointers(s, pcm, pcm_frames);
@@ -947,7 +947,7 @@ unsigned long amdtp_stream_pcm_pointer(struct amdtp_stream *s)
if (!in_interrupt() && amdtp_stream_running(s))
fw_iso_context_flush_completions(s->context);
- return ACCESS_ONCE(s->pcm_buffer_pointer);
+ return READ_ONCE(s->pcm_buffer_pointer);
}
EXPORT_SYMBOL(amdtp_stream_pcm_pointer);
@@ -977,9 +977,8 @@ EXPORT_SYMBOL(amdtp_stream_pcm_ack);
void amdtp_stream_update(struct amdtp_stream *s)
{
/* Precomputing. */
- ACCESS_ONCE(s->source_node_id_field) =
- (fw_parent_device(s->unit)->card->node_id << CIP_SID_SHIFT) &
- CIP_SID_MASK;
+ WRITE_ONCE(s->source_node_id_field,
+ (fw_parent_device(s->unit)->card->node_id << CIP_SID_SHIFT) & CIP_SID_MASK);
}
EXPORT_SYMBOL(amdtp_stream_update);
@@ -1022,7 +1021,7 @@ void amdtp_stream_pcm_abort(struct amdtp_stream *s)
{
struct snd_pcm_substream *pcm;
- pcm = ACCESS_ONCE(s->pcm);
+ pcm = READ_ONCE(s->pcm);
if (pcm)
snd_pcm_stop_xrun(pcm);
}
diff --git a/sound/firewire/amdtp-stream.h b/sound/firewire/amdtp-stream.h
index a608dae83348..e45de3eecfe3 100644
--- a/sound/firewire/amdtp-stream.h
+++ b/sound/firewire/amdtp-stream.h
@@ -221,7 +221,7 @@ static inline bool amdtp_stream_pcm_running(struct amdtp_stream *s)
static inline void amdtp_stream_pcm_trigger(struct amdtp_stream *s,
struct snd_pcm_substream *pcm)
{
- ACCESS_ONCE(s->pcm) = pcm;
+ WRITE_ONCE(s->pcm, pcm);
}
static inline bool cip_sfc_is_base_44100(enum cip_sfc sfc)
diff --git a/sound/firewire/digi00x/amdtp-dot.c b/sound/firewire/digi00x/amdtp-dot.c
index 1453c34ce99f..4a884a335248 100644
--- a/sound/firewire/digi00x/amdtp-dot.c
+++ b/sound/firewire/digi00x/amdtp-dot.c
@@ -327,7 +327,7 @@ void amdtp_dot_midi_trigger(struct amdtp_stream *s, unsigned int port,
struct amdtp_dot *p = s->protocol;
if (port < MAX_MIDI_PORTS)
- ACCESS_ONCE(p->midi[port]) = midi;
+ WRITE_ONCE(p->midi[port], midi);
}
static unsigned int process_tx_data_blocks(struct amdtp_stream *s,
@@ -338,7 +338,7 @@ static unsigned int process_tx_data_blocks(struct amdtp_stream *s,
struct snd_pcm_substream *pcm;
unsigned int pcm_frames;
- pcm = ACCESS_ONCE(s->pcm);
+ pcm = READ_ONCE(s->pcm);
if (pcm) {
read_pcm_s32(s, pcm, buffer, data_blocks);
pcm_frames = data_blocks;
@@ -359,7 +359,7 @@ static unsigned int process_rx_data_blocks(struct amdtp_stream *s,
struct snd_pcm_substream *pcm;
unsigned int pcm_frames;
- pcm = ACCESS_ONCE(s->pcm);
+ pcm = READ_ONCE(s->pcm);
if (pcm) {
write_pcm_s32(s, pcm, buffer, data_blocks);
pcm_frames = data_blocks;
diff --git a/sound/firewire/fireface/amdtp-ff.c b/sound/firewire/fireface/amdtp-ff.c
index 780da9deb2f0..77c7598b61ab 100644
--- a/sound/firewire/fireface/amdtp-ff.c
+++ b/sound/firewire/fireface/amdtp-ff.c
@@ -108,7 +108,7 @@ static unsigned int process_rx_data_blocks(struct amdtp_stream *s,
unsigned int data_blocks,
unsigned int *syt)
{
- struct snd_pcm_substream *pcm = ACCESS_ONCE(s->pcm);
+ struct snd_pcm_substream *pcm = READ_ONCE(s->pcm);
unsigned int pcm_frames;
if (pcm) {
@@ -127,7 +127,7 @@ static unsigned int process_tx_data_blocks(struct amdtp_stream *s,
unsigned int data_blocks,
unsigned int *syt)
{
- struct snd_pcm_substream *pcm = ACCESS_ONCE(s->pcm);
+ struct snd_pcm_substream *pcm = READ_ONCE(s->pcm);
unsigned int pcm_frames;
if (pcm) {
diff --git a/sound/firewire/fireface/ff-midi.c b/sound/firewire/fireface/ff-midi.c
index 949ee56b4e0e..6a49611ee462 100644
--- a/sound/firewire/fireface/ff-midi.c
+++ b/sound/firewire/fireface/ff-midi.c
@@ -22,7 +22,7 @@ static int midi_playback_open(struct snd_rawmidi_substream *substream)
ff->running_status[substream->number] = 0;
ff->rx_midi_error[substream->number] = false;
- ACCESS_ONCE(ff->rx_midi_substreams[substream->number]) = substream;
+ WRITE_ONCE(ff->rx_midi_substreams[substream->number], substream);
return 0;
}
@@ -38,7 +38,7 @@ static int midi_playback_close(struct snd_rawmidi_substream *substream)
struct snd_ff *ff = substream->rmidi->private_data;
cancel_work_sync(&ff->rx_midi_work[substream->number]);
- ACCESS_ONCE(ff->rx_midi_substreams[substream->number]) = NULL;
+ WRITE_ONCE(ff->rx_midi_substreams[substream->number], NULL);
return 0;
}
@@ -52,10 +52,10 @@ static void midi_capture_trigger(struct snd_rawmidi_substream *substream,
spin_lock_irqsave(&ff->lock, flags);
if (up)
- ACCESS_ONCE(ff->tx_midi_substreams[substream->number]) =
- substream;
+ WRITE_ONCE(ff->tx_midi_substreams[substream->number],
+ substream);
else
- ACCESS_ONCE(ff->tx_midi_substreams[substream->number]) = NULL;
+ WRITE_ONCE(ff->tx_midi_substreams[substream->number], NULL);
spin_unlock_irqrestore(&ff->lock, flags);
}
diff --git a/sound/firewire/fireface/ff-transaction.c b/sound/firewire/fireface/ff-transaction.c
index dd6c8e839647..332b29f8ed75 100644
--- a/sound/firewire/fireface/ff-transaction.c
+++ b/sound/firewire/fireface/ff-transaction.c
@@ -12,7 +12,7 @@ static void finish_transmit_midi_msg(struct snd_ff *ff, unsigned int port,
int rcode)
{
struct snd_rawmidi_substream *substream =
- ACCESS_ONCE(ff->rx_midi_substreams[port]);
+ READ_ONCE(ff->rx_midi_substreams[port]);
if (rcode_is_permanent_error(rcode)) {
ff->rx_midi_error[port] = true;
@@ -60,7 +60,7 @@ static inline void fill_midi_buf(struct snd_ff *ff, unsigned int port,
static void transmit_midi_msg(struct snd_ff *ff, unsigned int port)
{
struct snd_rawmidi_substream *substream =
- ACCESS_ONCE(ff->rx_midi_substreams[port]);
+ READ_ONCE(ff->rx_midi_substreams[port]);
u8 *buf = (u8 *)ff->msg_buf[port];
int i, len;
@@ -159,7 +159,7 @@ static void handle_midi_msg(struct fw_card *card, struct fw_request *request,
*/
index = (quad >> 8) & 0xff;
if (index > 0) {
- substream = ACCESS_ONCE(ff->tx_midi_substreams[0]);
+ substream = READ_ONCE(ff->tx_midi_substreams[0]);
if (substream != NULL) {
byte = quad & 0xff;
snd_rawmidi_receive(substream, &byte, 1);
@@ -169,7 +169,7 @@ static void handle_midi_msg(struct fw_card *card, struct fw_request *request,
/* Message in second port. */
index = (quad >> 24) & 0xff;
if (index > 0) {
- substream = ACCESS_ONCE(ff->tx_midi_substreams[1]);
+ substream = READ_ONCE(ff->tx_midi_substreams[1]);
if (substream != NULL) {
byte = (quad >> 16) & 0xff;
snd_rawmidi_receive(substream, &byte, 1);
diff --git a/sound/firewire/isight.c b/sound/firewire/isight.c
index 5826aa8362f1..46092fa3ff9b 100644
--- a/sound/firewire/isight.c
+++ b/sound/firewire/isight.c
@@ -96,7 +96,7 @@ static void isight_update_pointers(struct isight *isight, unsigned int count)
ptr += count;
if (ptr >= runtime->buffer_size)
ptr -= runtime->buffer_size;
- ACCESS_ONCE(isight->buffer_pointer) = ptr;
+ WRITE_ONCE(isight->buffer_pointer, ptr);
isight->period_counter += count;
if (isight->period_counter >= runtime->period_size) {
@@ -111,7 +111,7 @@ static void isight_samples(struct isight *isight,
struct snd_pcm_runtime *runtime;
unsigned int count1;
- if (!ACCESS_ONCE(isight->pcm_running))
+ if (!READ_ONCE(isight->pcm_running))
return;
runtime = isight->pcm->runtime;
@@ -131,7 +131,7 @@ static void isight_samples(struct isight *isight,
static void isight_pcm_abort(struct isight *isight)
{
- if (ACCESS_ONCE(isight->pcm_active))
+ if (READ_ONCE(isight->pcm_active))
snd_pcm_stop_xrun(isight->pcm);
}
@@ -141,7 +141,7 @@ static void isight_dropped_samples(struct isight *isight, unsigned int total)
u32 dropped;
unsigned int count1;
- if (!ACCESS_ONCE(isight->pcm_running))
+ if (!READ_ONCE(isight->pcm_running))
return;
runtime = isight->pcm->runtime;
@@ -293,7 +293,7 @@ static int isight_hw_params(struct snd_pcm_substream *substream,
if (err < 0)
return err;
- ACCESS_ONCE(isight->pcm_active) = true;
+ WRITE_ONCE(isight->pcm_active, true);
return 0;
}
@@ -331,7 +331,7 @@ static int isight_hw_free(struct snd_pcm_substream *substream)
{
struct isight *isight = substream->private_data;
- ACCESS_ONCE(isight->pcm_active) = false;
+ WRITE_ONCE(isight->pcm_active, false);
mutex_lock(&isight->mutex);
isight_stop_streaming(isight);
@@ -424,10 +424,10 @@ static int isight_trigger(struct snd_pcm_substream *substream, int cmd)
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
- ACCESS_ONCE(isight->pcm_running) = true;
+ WRITE_ONCE(isight->pcm_running, true);
break;
case SNDRV_PCM_TRIGGER_STOP:
- ACCESS_ONCE(isight->pcm_running) = false;
+ WRITE_ONCE(isight->pcm_running, false);
break;
default:
return -EINVAL;
@@ -439,7 +439,7 @@ static snd_pcm_uframes_t isight_pointer(struct snd_pcm_substream *substream)
{
struct isight *isight = substream->private_data;
- return ACCESS_ONCE(isight->buffer_pointer);
+ return READ_ONCE(isight->buffer_pointer);
}
static int isight_create_pcm(struct isight *isight)
diff --git a/sound/firewire/motu/amdtp-motu.c b/sound/firewire/motu/amdtp-motu.c
index 96f0091144bb..f0555a24d90e 100644
--- a/sound/firewire/motu/amdtp-motu.c
+++ b/sound/firewire/motu/amdtp-motu.c
@@ -310,7 +310,7 @@ static unsigned int process_tx_data_blocks(struct amdtp_stream *s,
if (p->midi_ports)
read_midi_messages(s, buffer, data_blocks);
- pcm = ACCESS_ONCE(s->pcm);
+ pcm = READ_ONCE(s->pcm);
if (data_blocks > 0 && pcm)
read_pcm_s32(s, pcm->runtime, buffer, data_blocks);
@@ -374,7 +374,7 @@ static unsigned int process_rx_data_blocks(struct amdtp_stream *s,
if (p->midi_ports)
write_midi_messages(s, buffer, data_blocks);
- pcm = ACCESS_ONCE(s->pcm);
+ pcm = READ_ONCE(s->pcm);
if (pcm)
write_pcm_s32(s, pcm->runtime, buffer, data_blocks);
else
diff --git a/sound/firewire/oxfw/oxfw-scs1x.c b/sound/firewire/oxfw/oxfw-scs1x.c
index 02d595665898..f33497cdc706 100644
--- a/sound/firewire/oxfw/oxfw-scs1x.c
+++ b/sound/firewire/oxfw/oxfw-scs1x.c
@@ -112,7 +112,7 @@ static void handle_hss(struct fw_card *card, struct fw_request *request,
}
if (length >= 1) {
- stream = ACCESS_ONCE(scs->input);
+ stream = READ_ONCE(scs->input);
if (stream)
midi_input_packet(scs, stream, data, length);
}
@@ -183,7 +183,7 @@ static void scs_output_work(struct work_struct *work)
if (scs->transaction_running)
return;
- stream = ACCESS_ONCE(scs->output);
+ stream = READ_ONCE(scs->output);
if (!stream || scs->error) {
scs->output_idle = true;
wake_up(&scs->idle_wait);
@@ -291,9 +291,9 @@ static void midi_capture_trigger(struct snd_rawmidi_substream *stream, int up)
if (up) {
scs->input_escape_count = 0;
- ACCESS_ONCE(scs->input) = stream;
+ WRITE_ONCE(scs->input, stream);
} else {
- ACCESS_ONCE(scs->input) = NULL;
+ WRITE_ONCE(scs->input, NULL);
}
}
@@ -319,10 +319,10 @@ static void midi_playback_trigger(struct snd_rawmidi_substream *stream, int up)
scs->transaction_bytes = 0;
scs->error = false;
- ACCESS_ONCE(scs->output) = stream;
+ WRITE_ONCE(scs->output, stream);
schedule_work(&scs->work);
} else {
- ACCESS_ONCE(scs->output) = NULL;
+ WRITE_ONCE(scs->output, NULL);
}
}
static void midi_playback_drain(struct snd_rawmidi_substream *stream)
diff --git a/sound/firewire/tascam/amdtp-tascam.c b/sound/firewire/tascam/amdtp-tascam.c
index 6aff1fc1c72d..ab482423c165 100644
--- a/sound/firewire/tascam/amdtp-tascam.c
+++ b/sound/firewire/tascam/amdtp-tascam.c
@@ -124,7 +124,7 @@ static unsigned int process_tx_data_blocks(struct amdtp_stream *s,
{
struct snd_pcm_substream *pcm;
- pcm = ACCESS_ONCE(s->pcm);
+ pcm = READ_ONCE(s->pcm);
if (data_blocks > 0 && pcm)
read_pcm_s32(s, pcm, buffer, data_blocks);
@@ -143,7 +143,7 @@ static unsigned int process_rx_data_blocks(struct amdtp_stream *s,
/* This field is not used. */
*syt = 0x0000;
- pcm = ACCESS_ONCE(s->pcm);
+ pcm = READ_ONCE(s->pcm);
if (pcm)
write_pcm_s32(s, pcm, buffer, data_blocks);
else
diff --git a/sound/firewire/tascam/tascam-transaction.c b/sound/firewire/tascam/tascam-transaction.c
index 8967c52f5032..2ad692dd4b13 100644
--- a/sound/firewire/tascam/tascam-transaction.c
+++ b/sound/firewire/tascam/tascam-transaction.c
@@ -148,7 +148,7 @@ static void async_midi_port_callback(struct fw_card *card, int rcode,
void *callback_data)
{
struct snd_fw_async_midi_port *port = callback_data;
- struct snd_rawmidi_substream *substream = ACCESS_ONCE(port->substream);
+ struct snd_rawmidi_substream *substream = READ_ONCE(port->substream);
/* This port is closed. */
if (substream == NULL)
@@ -173,7 +173,7 @@ static void midi_port_work(struct work_struct *work)
{
struct snd_fw_async_midi_port *port =
container_of(work, struct snd_fw_async_midi_port, work);
- struct snd_rawmidi_substream *substream = ACCESS_ONCE(port->substream);
+ struct snd_rawmidi_substream *substream = READ_ONCE(port->substream);
int generation;
/* Under transacting or error state. */
@@ -282,7 +282,7 @@ static void handle_midi_tx(struct fw_card *card, struct fw_request *request,
bytes = 3;
}
- substream = ACCESS_ONCE(tscm->tx_midi_substreams[port]);
+ substream = READ_ONCE(tscm->tx_midi_substreams[port]);
if (substream != NULL)
snd_rawmidi_receive(substream, b + 1, bytes);
}
diff --git a/sound/soc/xtensa/xtfpga-i2s.c b/sound/soc/xtensa/xtfpga-i2s.c
index 8382ffa3bcaf..2472144b329e 100644
--- a/sound/soc/xtensa/xtfpga-i2s.c
+++ b/sound/soc/xtensa/xtfpga-i2s.c
@@ -165,7 +165,7 @@ static bool xtfpga_pcm_push_tx(struct xtfpga_i2s *i2s)
tx_substream = rcu_dereference(i2s->tx_substream);
tx_active = tx_substream && snd_pcm_running(tx_substream);
if (tx_active) {
- unsigned tx_ptr = ACCESS_ONCE(i2s->tx_ptr);
+ unsigned tx_ptr = READ_ONCE(i2s->tx_ptr);
unsigned new_tx_ptr = i2s->tx_fn(i2s, tx_substream->runtime,
tx_ptr);
@@ -437,7 +437,7 @@ static int xtfpga_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
- ACCESS_ONCE(i2s->tx_ptr) = 0;
+ WRITE_ONCE(i2s->tx_ptr, 0);
rcu_assign_pointer(i2s->tx_substream, substream);
xtfpga_pcm_refill_fifo(i2s);
break;
@@ -459,7 +459,7 @@ static snd_pcm_uframes_t xtfpga_pcm_pointer(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct xtfpga_i2s *i2s = runtime->private_data;
- snd_pcm_uframes_t pos = ACCESS_ONCE(i2s->tx_ptr);
+ snd_pcm_uframes_t pos = READ_ONCE(i2s->tx_ptr);
return pos < runtime->buffer_size ? pos : 0;
}
diff --git a/sound/usb/bcd2000/bcd2000.c b/sound/usb/bcd2000/bcd2000.c
index 7371e5b06035..fc579f330601 100644
--- a/sound/usb/bcd2000/bcd2000.c
+++ b/sound/usb/bcd2000/bcd2000.c
@@ -108,7 +108,7 @@ static void bcd2000_midi_handle_input(struct bcd2000 *bcd2k,
unsigned int payload_length, tocopy;
struct snd_rawmidi_substream *midi_receive_substream;
- midi_receive_substream = ACCESS_ONCE(bcd2k->midi_receive_substream);
+ midi_receive_substream = READ_ONCE(bcd2k->midi_receive_substream);
if (!midi_receive_substream)
return;
@@ -139,7 +139,7 @@ static void bcd2000_midi_send(struct bcd2000 *bcd2k)
BUILD_BUG_ON(sizeof(device_cmd_prefix) >= BUFSIZE);
- midi_out_substream = ACCESS_ONCE(bcd2k->midi_out_substream);
+ midi_out_substream = READ_ONCE(bcd2k->midi_out_substream);
if (!midi_out_substream)
return;
diff --git a/tools/arch/x86/include/asm/atomic.h b/tools/arch/x86/include/asm/atomic.h
index 7d8c3261a50d..1f5e26aae9fc 100644
--- a/tools/arch/x86/include/asm/atomic.h
+++ b/tools/arch/x86/include/asm/atomic.h
@@ -25,7 +25,7 @@
*/
static inline int atomic_read(const atomic_t *v)
{
- return ACCESS_ONCE((v)->counter);
+ return READ_ONCE((v)->counter);
}
/**
diff --git a/tools/include/asm-generic/atomic-gcc.h b/tools/include/asm-generic/atomic-gcc.h
index 40b231fb95bd..4c1966f7c77a 100644
--- a/tools/include/asm-generic/atomic-gcc.h
+++ b/tools/include/asm-generic/atomic-gcc.h
@@ -22,7 +22,7 @@
*/
static inline int atomic_read(const atomic_t *v)
{
- return ACCESS_ONCE((v)->counter);
+ return READ_ONCE((v)->counter);
}
/**
diff --git a/tools/perf/util/auxtrace.h b/tools/perf/util/auxtrace.h
index 33b5e6cdf38c..d19e11b68de7 100644
--- a/tools/perf/util/auxtrace.h
+++ b/tools/perf/util/auxtrace.h
@@ -378,7 +378,7 @@ struct addr_filters {
static inline u64 auxtrace_mmap__read_snapshot_head(struct auxtrace_mmap *mm)
{
struct perf_event_mmap_page *pc = mm->userpg;
- u64 head = ACCESS_ONCE(pc->aux_head);
+ u64 head = READ_ONCE(pc->aux_head);
/* Ensure all reads are done after we read the head */
rmb();
@@ -389,7 +389,7 @@ static inline u64 auxtrace_mmap__read_head(struct auxtrace_mmap *mm)
{
struct perf_event_mmap_page *pc = mm->userpg;
#if BITS_PER_LONG == 64 || !defined(HAVE_SYNC_COMPARE_AND_SWAP_SUPPORT)
- u64 head = ACCESS_ONCE(pc->aux_head);
+ u64 head = READ_ONCE(pc->aux_head);
#else
u64 head = __sync_val_compare_and_swap(&pc->aux_head, 0, 0);
#endif
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h
index 41caa098ed15..3f63ee12471d 100644
--- a/tools/perf/util/session.h
+++ b/tools/perf/util/session.h
@@ -114,7 +114,7 @@ int __perf_session__set_tracepoints_handlers(struct perf_session *session,
extern volatile int session_done;
-#define session_done() ACCESS_ONCE(session_done)
+#define session_done() READ_ONCE(session_done)
int perf_session__deliver_synth_event(struct perf_session *session,
union perf_event *event,
diff --git a/tools/testing/selftests/powerpc/dscr/dscr.h b/tools/testing/selftests/powerpc/dscr/dscr.h
index 18ea223bd398..cdb840bc54f2 100644
--- a/tools/testing/selftests/powerpc/dscr/dscr.h
+++ b/tools/testing/selftests/powerpc/dscr/dscr.h
@@ -39,7 +39,7 @@
#define rmb() asm volatile("lwsync":::"memory")
#define wmb() asm volatile("lwsync":::"memory")
-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
+#define READ_ONCE(x) (*(volatile typeof(x) *)&(x))
/* Prilvilege state DSCR access */
inline unsigned long get_dscr(void)
diff --git a/tools/testing/selftests/powerpc/dscr/dscr_default_test.c b/tools/testing/selftests/powerpc/dscr/dscr_default_test.c
index df17c3bab0a7..9e1a37e93b63 100644
--- a/tools/testing/selftests/powerpc/dscr/dscr_default_test.c
+++ b/tools/testing/selftests/powerpc/dscr/dscr_default_test.c
@@ -27,7 +27,7 @@ static void *do_test(void *in)
unsigned long d, cur_dscr, cur_dscr_usr;
unsigned long s1, s2;
- s1 = ACCESS_ONCE(sequence);
+ s1 = READ_ONCE(sequence);
if (s1 & 1)
continue;
rmb();
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/barriers.h b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/barriers.h
index be3fdd351937..3f95a768a03b 100644
--- a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/barriers.h
+++ b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/barriers.h
@@ -35,8 +35,7 @@
#define rs_smp_mb() do {} while (0)
#endif
-#define ACCESS_ONCE(x) (*(volatile typeof(x) *) &(x))
-#define READ_ONCE(x) ACCESS_ONCE(x)
-#define WRITE_ONCE(x, val) (ACCESS_ONCE(x) = (val))
+#define READ_ONCE(x) (*(volatile typeof(x) *) &(x))
+#define WRITE_ONCE(x) ((*(volatile typeof(x) *) &(x)) = (val))
#endif
diff --git a/tools/virtio/ringtest/main.h b/tools/virtio/ringtest/main.h
index 90b0133004e1..5706e075adf2 100644
--- a/tools/virtio/ringtest/main.h
+++ b/tools/virtio/ringtest/main.h
@@ -110,11 +110,15 @@ static inline void busy_wait(void)
barrier();
}
+#if defined(__x86_64__) || defined(__i386__)
+#define smp_mb() asm volatile("lock; addl $0,-128(%%rsp)" ::: "memory", "cc")
+#else
/*
* Not using __ATOMIC_SEQ_CST since gcc docs say they are only synchronized
* with other __ATOMIC_SEQ_CST calls.
*/
#define smp_mb() __sync_synchronize()
+#endif
/*
* This abuses the atomic builtins for thread fences, and
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 9deb5a245b83..ce507ae1d4f5 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -2302,7 +2302,7 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
continue;
} else if (pass && i > last_boosted_vcpu)
break;
- if (!ACCESS_ONCE(vcpu->preempted))
+ if (!READ_ONCE(vcpu->preempted))
continue;
if (vcpu == me)
continue;