summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.clang-format41
-rw-r--r--CREDITS4
-rw-r--r--Documentation/ABI/stable/sysfs-block2
-rw-r--r--Documentation/ABI/testing/sysfs-kernel-reboot10
-rw-r--r--Documentation/arch/riscv/hwprobe.rst2
-rw-r--r--Documentation/power/runtime_pm.rst2
-rw-r--r--MAINTAINERS50
-rw-r--r--Makefile8
-rw-r--r--arch/loongarch/kernel/Makefile8
-rw-r--r--arch/loongarch/kvm/Makefile2
-rw-r--r--arch/riscv/include/asm/alternative-macros.h19
-rw-r--r--arch/riscv/include/asm/kgdb.h9
-rw-r--r--arch/riscv/include/asm/syscall.h7
-rw-r--r--arch/riscv/kernel/Makefile4
-rw-r--r--arch/riscv/kernel/kgdb.c6
-rw-r--r--arch/riscv/kernel/module-sections.c13
-rw-r--r--arch/riscv/kernel/module.c11
-rw-r--r--arch/riscv/kernel/setup.c36
-rw-r--r--arch/riscv/kernel/unaligned_access_speed.c35
-rw-r--r--arch/x86/boot/compressed/mem.c5
-rw-r--r--arch/x86/boot/compressed/sev.c67
-rw-r--r--arch/x86/boot/compressed/sev.h2
-rw-r--r--arch/x86/events/intel/core.c24
-rw-r--r--arch/x86/events/intel/ds.c8
-rw-r--r--arch/x86/events/intel/uncore_snbep.c107
-rw-r--r--arch/x86/include/asm/intel-family.h2
-rw-r--r--arch/x86/kernel/cpu/amd.c19
-rw-r--r--arch/x86/kernel/cpu/microcode/amd.c9
-rw-r--r--arch/x86/kernel/i8253.c3
-rw-r--r--arch/x86/xen/enlighten.c7
-rw-r--r--block/bdev.c3
-rw-r--r--block/bio-integrity.c17
-rw-r--r--block/blk-sysfs.c2
-rw-r--r--block/blk-throttle.h1
-rw-r--r--crypto/scompress.c10
-rw-r--r--crypto/testmgr.c147
-rw-r--r--drivers/accel/ivpu/ivpu_drv.c10
-rw-r--r--drivers/accel/ivpu/ivpu_fw.c17
-rw-r--r--drivers/accel/ivpu/ivpu_hw.h14
-rw-r--r--drivers/accel/ivpu/ivpu_hw_btrs.c134
-rw-r--r--drivers/accel/ivpu/ivpu_hw_btrs.h7
-rw-r--r--drivers/accel/ivpu/ivpu_job.c15
-rw-r--r--drivers/accel/ivpu/ivpu_sysfs.c49
-rw-r--r--drivers/accel/ivpu/vpu_boot_api.h13
-rw-r--r--drivers/accel/ivpu/vpu_jsm_api.h53
-rw-r--r--drivers/acpi/acpica/aclocal.h4
-rw-r--r--drivers/acpi/acpica/nsrepair2.c2
-rw-r--r--drivers/block/Kconfig6
-rw-r--r--drivers/block/loop.c121
-rw-r--r--drivers/block/ublk_drv.c532
-rw-r--r--drivers/char/virtio_console.c7
-rw-r--r--drivers/cpufreq/amd-pstate.c36
-rw-r--r--drivers/cpufreq/cpufreq.c32
-rw-r--r--drivers/cpufreq/intel_pstate.c2
-rw-r--r--drivers/crypto/atmel-sha204a.c6
-rw-r--r--drivers/dma-buf/sw_sync.c19
-rw-r--r--drivers/gpio/gpiolib.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v13_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v14_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_device.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c7
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h3
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_2_sdm660.h3
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_3_sdm630.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c74
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml7
-rw-r--r--drivers/gpu/drm/v3d/v3d_sched.c16
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c9
-rw-r--r--drivers/gpu/drm/xe/xe_dma_buf.c5
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ads.c75
-rw-r--r--drivers/gpu/drm/xe/xe_hmm.c24
-rw-r--r--drivers/gpu/drm/xe/xe_pxp_debugfs.c13
-rw-r--r--drivers/i2c/busses/i2c-cros-ec-tunnel.c3
-rw-r--r--drivers/i2c/i2c-atr.c2
-rw-r--r--drivers/iio/magnetometer/ak8974.c4
-rw-r--r--drivers/input/joystick/magellan.c2
-rw-r--r--drivers/irqchip/Kconfig4
-rw-r--r--drivers/irqchip/irq-renesas-rzv2h.c8
-rw-r--r--drivers/md/bcache/super.c2
-rw-r--r--drivers/md/md-bitmap.c5
-rw-r--r--drivers/md/raid1.c26
-rw-r--r--drivers/md/raid10.c1
-rw-r--r--drivers/net/ethernet/sun/niu.c2
-rw-r--r--drivers/net/wireless/ath/carl9170/fw.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/d3.c9
-rw-r--r--drivers/nvme/host/core.c2
-rw-r--r--drivers/nvme/host/multipath.c14
-rw-r--r--drivers/nvme/target/auth.c2
-rw-r--r--drivers/nvme/target/pci-epf.c88
-rw-r--r--drivers/pci/msi/msi.c3
-rw-r--r--drivers/pci/pci.c4
-rw-r--r--drivers/power/supply/power_supply_sysfs.c2
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_fw.c8
-rw-r--r--drivers/scsi/scsi_lib.c6
-rw-r--r--drivers/spi/spi-imx.c5
-rw-r--r--drivers/spi/spi-sun4i.c1
-rw-r--r--drivers/spi/spi-tegra210-quad.c6
-rw-r--r--drivers/target/iscsi/iscsi_target.c2
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c7
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c33
-rw-r--r--drivers/ufs/core/ufs-mcq.c12
-rw-r--r--drivers/ufs/core/ufshcd.c31
-rw-r--r--drivers/ufs/host/ufs-qcom.c43
-rw-r--r--drivers/ufs/host/ufs-qcom.h18
-rw-r--r--drivers/vfio/pci/vfio_pci_config.c2
-rw-r--r--drivers/vhost/scsi.c74
-rw-r--r--drivers/virtio/virtio.c6
-rw-r--r--drivers/virtio/virtio_pci_modern.c4
-rw-r--r--drivers/virtio/virtio_ring.c2
-rw-r--r--fs/Kconfig1
-rw-r--r--fs/bcachefs/bcachefs.h2
-rw-r--r--fs/bcachefs/btree_update_interior.c2
-rw-r--r--fs/bcachefs/buckets.c3
-rw-r--r--fs/bcachefs/buckets.h5
-rw-r--r--fs/bcachefs/errcode.h2
-rw-r--r--fs/bcachefs/extents.c2
-rw-r--r--fs/bcachefs/fs-ioctl.c2
-rw-r--r--fs/bcachefs/fsck.c44
-rw-r--r--fs/bcachefs/io_read.c24
-rw-r--r--fs/bcachefs/recovery.c5
-rw-r--r--fs/bcachefs/sb-errors_format.h4
-rw-r--r--fs/bcachefs/super-io.c20
-rw-r--r--fs/bcachefs/super.c9
-rw-r--r--fs/btrfs/file.c9
-rw-r--r--fs/btrfs/relocation.c2
-rw-r--r--fs/btrfs/subpage.c4
-rw-r--r--fs/btrfs/tree-checker.c2
-rw-r--r--fs/btrfs/zoned.c19
-rw-r--r--fs/cachefiles/key.c2
-rw-r--r--fs/dax.c1
-rw-r--r--fs/eventpoll.c10
-rw-r--r--fs/namei.c81
-rw-r--r--fs/netfs/fscache_cache.c2
-rw-r--r--fs/netfs/fscache_cookie.c2
-rw-r--r--fs/nfs/Kconfig2
-rw-r--r--fs/nfs/internal.h7
-rw-r--r--fs/nfs/nfs4session.h4
-rw-r--r--fs/nfsd/Kconfig1
-rw-r--r--fs/nfsd/nfs4state.c2
-rw-r--r--fs/nfsd/nfsfh.h7
-rw-r--r--fs/smb/client/cifsproto.h2
-rw-r--r--fs/smb/client/connect.c34
-rw-r--r--fs/smb/client/file.c28
-rw-r--r--fs/smb/server/connection.c4
-rw-r--r--fs/smb/server/oplock.c29
-rw-r--r--fs/smb/server/oplock.h1
-rw-r--r--fs/smb/server/smb2pdu.c4
-rw-r--r--fs/smb/server/transport_ipc.c7
-rw-r--r--fs/smb/server/transport_tcp.c14
-rw-r--r--fs/smb/server/transport_tcp.h1
-rw-r--r--fs/smb/server/vfs.c3
-rw-r--r--fs/smb/server/vfs_cache.c8
-rw-r--r--fs/stat.c32
-rw-r--r--include/acpi/actbl.h2
-rw-r--r--include/drm/drm_gem.h3
-rw-r--r--include/linux/backing-dev.h1
-rw-r--r--include/linux/blkdev.h73
-rw-r--r--include/linux/local_lock_internal.h8
-rw-r--r--include/linux/mm.h17
-rw-r--r--include/linux/mmzone.h5
-rw-r--r--include/linux/namei.h1
-rw-r--r--include/linux/nfs.h7
-rw-r--r--include/linux/pci.h2
-rw-r--r--include/linux/virtio.h3
-rw-r--r--include/uapi/drm/ivpu_accel.h4
-rw-r--r--include/uapi/linux/io_uring.h4
-rw-r--r--include/uapi/linux/landlock.h87
-rw-r--r--include/uapi/linux/vhost.h4
-rw-r--r--include/uapi/linux/virtio_pci.h1
-rw-r--r--include/ufs/ufs_quirks.h6
-rw-r--r--init/Kconfig20
-rw-r--r--io_uring/rsrc.c92
-rw-r--r--io_uring/zcrx.c37
-rw-r--r--io_uring/zcrx.h1
-rw-r--r--kernel/audit_watch.c16
-rw-r--r--kernel/cgroup/cgroup.c31
-rw-r--r--kernel/sched/cpufreq_schedutil.c49
-rw-r--r--kernel/sched/ext.c50
-rw-r--r--kernel/trace/ftrace.c27
-rw-r--r--kernel/trace/trace_entries.h4
-rw-r--r--kernel/trace/trace_events_filter.c4
-rw-r--r--kernel/vhost_task.c2
-rw-r--r--lib/Kconfig.debug2
-rw-r--r--lib/Kconfig.ubsan1
-rw-r--r--lib/string.c13
-rw-r--r--lib/test_ubsan.c18
-rw-r--r--mm/gup.c4
-rw-r--r--mm/hugetlb_vmemmap.c6
-rw-r--r--mm/internal.h1
-rw-r--r--mm/kasan/kasan_test_c.c20
-rw-r--r--mm/memcontrol-v1.c2
-rw-r--r--mm/memory.c4
-rw-r--r--mm/mm_init.c1
-rw-r--r--mm/page_alloc.c40
-rw-r--r--mm/vmscan.c29
-rw-r--r--rust/Makefile2
-rw-r--r--rust/helpers/dma.c16
-rw-r--r--rust/helpers/helpers.c1
-rw-r--r--rust/helpers/io.c34
-rw-r--r--rust/pin-init/examples/pthread_mutex.rs2
-rw-r--r--rust/pin-init/src/alloc.rs8
-rw-r--r--rust/pin-init/src/lib.rs2
-rw-r--r--scripts/Makefile.compiler4
-rw-r--r--scripts/Makefile.extrawarn2
-rwxr-xr-xscripts/generate_rust_analyzer.py12
-rw-r--r--scripts/genksyms/keywords.c7
-rw-r--r--scripts/genksyms/parse.y5
-rw-r--r--security/Kconfig.hardening2
-rw-r--r--security/integrity/ima/ima_main.c4
-rw-r--r--security/landlock/domain.c4
-rw-r--r--security/landlock/domain.h2
-rw-r--r--security/landlock/syscalls.c27
-rw-r--r--tools/objtool/check.c1
-rw-r--r--tools/sched_ext/scx_flatcg.bpf.c2
-rw-r--r--tools/testing/kunit/qemu_configs/sh.py4
-rw-r--r--tools/testing/selftests/ftrace/test.d/dynevent/dynevent_limitations.tc23
-rw-r--r--tools/testing/selftests/ftrace/test.d/filter/event-filter-function.tc20
-rw-r--r--tools/testing/selftests/landlock/audit.h21
-rw-r--r--tools/testing/selftests/landlock/audit_test.c154
-rw-r--r--tools/testing/selftests/landlock/fs_test.c3
-rw-r--r--tools/testing/selftests/lib/config1
-rw-r--r--tools/testing/selftests/ublk/Makefile9
-rw-r--r--tools/testing/selftests/ublk/fault_inject.c98
-rw-r--r--tools/testing/selftests/ublk/kublk.c343
-rw-r--r--tools/testing/selftests/ublk/kublk.h47
-rw-r--r--tools/testing/selftests/ublk/stripe.c28
-rwxr-xr-xtools/testing/selftests/ublk/test_common.sh142
-rwxr-xr-xtools/testing/selftests/ublk/test_generic_04.sh40
-rwxr-xr-xtools/testing/selftests/ublk/test_generic_05.sh44
-rwxr-xr-xtools/testing/selftests/ublk/test_generic_06.sh41
-rwxr-xr-xtools/testing/selftests/ublk/test_loop_01.sh8
-rwxr-xr-xtools/testing/selftests/ublk/test_loop_02.sh8
-rwxr-xr-xtools/testing/selftests/ublk/test_loop_03.sh8
-rwxr-xr-xtools/testing/selftests/ublk/test_loop_04.sh9
-rwxr-xr-xtools/testing/selftests/ublk/test_loop_05.sh8
-rwxr-xr-xtools/testing/selftests/ublk/test_stress_01.sh45
-rwxr-xr-xtools/testing/selftests/ublk/test_stress_02.sh45
-rwxr-xr-xtools/testing/selftests/ublk/test_stress_03.sh38
-rwxr-xr-xtools/testing/selftests/ublk/test_stress_04.sh37
-rwxr-xr-xtools/testing/selftests/ublk/test_stress_05.sh64
-rwxr-xr-xtools/testing/selftests/ublk/test_stripe_01.sh12
-rwxr-xr-xtools/testing/selftests/ublk/test_stripe_02.sh13
-rwxr-xr-xtools/testing/selftests/ublk/test_stripe_03.sh12
-rwxr-xr-xtools/testing/selftests/ublk/test_stripe_04.sh13
265 files changed, 3405 insertions, 1810 deletions
diff --git a/.clang-format b/.clang-format
index fe1aa1a30d40..86c20ee744de 100644
--- a/.clang-format
+++ b/.clang-format
@@ -92,6 +92,7 @@ ForEachMacros:
- '__rq_for_each_bio'
- '__shost_for_each_device'
- '__sym_for_each'
+ - '_for_each_counter'
- 'apei_estatus_for_each_section'
- 'ata_for_each_dev'
- 'ata_for_each_link'
@@ -141,11 +142,14 @@ ForEachMacros:
- 'damon_for_each_target_safe'
- 'damos_for_each_filter'
- 'damos_for_each_filter_safe'
+ - 'damos_for_each_ops_filter'
+ - 'damos_for_each_ops_filter_safe'
- 'damos_for_each_quota_goal'
- 'damos_for_each_quota_goal_safe'
- 'data__for_each_file'
- 'data__for_each_file_new'
- 'data__for_each_file_start'
+ - 'def_for_each_cpu'
- 'device_for_each_child_node'
- 'device_for_each_child_node_scoped'
- 'dma_fence_array_for_each'
@@ -176,6 +180,7 @@ ForEachMacros:
- 'drm_for_each_privobj'
- 'drm_gem_for_each_gpuvm_bo'
- 'drm_gem_for_each_gpuvm_bo_safe'
+ - 'drm_gpusvm_for_each_range'
- 'drm_gpuva_for_each_op'
- 'drm_gpuva_for_each_op_from_reverse'
- 'drm_gpuva_for_each_op_reverse'
@@ -216,8 +221,10 @@ ForEachMacros:
- 'for_each_active_dev_scope'
- 'for_each_active_drhd_unit'
- 'for_each_active_iommu'
+ - 'for_each_active_irq'
- 'for_each_active_route'
- 'for_each_aggr_pgid'
+ - 'for_each_alloc_capable_rdt_resource'
- 'for_each_and_bit'
- 'for_each_andnot_bit'
- 'for_each_available_child_of_node'
@@ -228,6 +235,7 @@ ForEachMacros:
- 'for_each_btf_ext_rec'
- 'for_each_btf_ext_sec'
- 'for_each_bvec'
+ - 'for_each_capable_rdt_resource'
- 'for_each_card_auxs'
- 'for_each_card_auxs_safe'
- 'for_each_card_components'
@@ -241,6 +249,7 @@ ForEachMacros:
- 'for_each_cgroup_storage_type'
- 'for_each_child_of_node'
- 'for_each_child_of_node_scoped'
+ - 'for_each_child_of_node_with_prefix'
- 'for_each_clear_bit'
- 'for_each_clear_bit_from'
- 'for_each_clear_bitrange'
@@ -296,6 +305,7 @@ ForEachMacros:
- 'for_each_group_member_head'
- 'for_each_hstate'
- 'for_each_hwgpio'
+ - 'for_each_hwgpio_in_range'
- 'for_each_if'
- 'for_each_inject_fn'
- 'for_each_insn'
@@ -304,6 +314,7 @@ ForEachMacros:
- 'for_each_intid'
- 'for_each_iommu'
- 'for_each_ip_tunnel_rcu'
+ - 'for_each_irq_desc'
- 'for_each_irq_nr'
- 'for_each_lang'
- 'for_each_link_ch_maps'
@@ -324,6 +335,8 @@ ForEachMacros:
- 'for_each_missing_reg'
- 'for_each_mle_subelement'
- 'for_each_mod_mem_type'
+ - 'for_each_mon_capable_rdt_resource'
+ - 'for_each_mp_bvec'
- 'for_each_net'
- 'for_each_net_continue_reverse'
- 'for_each_net_rcu'
@@ -351,6 +364,7 @@ ForEachMacros:
- 'for_each_node_by_name'
- 'for_each_node_by_type'
- 'for_each_node_mask'
+ - 'for_each_node_numadist'
- 'for_each_node_state'
- 'for_each_node_with_cpus'
- 'for_each_node_with_property'
@@ -359,6 +373,8 @@ ForEachMacros:
- 'for_each_of_allnodes'
- 'for_each_of_allnodes_from'
- 'for_each_of_cpu_node'
+ - 'for_each_of_graph_port'
+ - 'for_each_of_graph_port_endpoint'
- 'for_each_of_pci_range'
- 'for_each_old_connector_in_state'
- 'for_each_old_crtc_in_state'
@@ -372,9 +388,11 @@ ForEachMacros:
- 'for_each_oldnew_plane_in_state_reverse'
- 'for_each_oldnew_private_obj_in_state'
- 'for_each_online_cpu'
+ - 'for_each_online_cpu_wrap'
- 'for_each_online_node'
- 'for_each_online_pgdat'
- 'for_each_or_bit'
+ - 'for_each_page_ext'
- 'for_each_path'
- 'for_each_pci_bridge'
- 'for_each_pci_dev'
@@ -382,8 +400,10 @@ ForEachMacros:
- 'for_each_physmem_range'
- 'for_each_populated_zone'
- 'for_each_possible_cpu'
+ - 'for_each_possible_cpu_wrap'
- 'for_each_present_blessed_reg'
- 'for_each_present_cpu'
+ - 'for_each_present_section_nr'
- 'for_each_prime_number'
- 'for_each_prime_number_from'
- 'for_each_probe_cache_entry'
@@ -396,6 +416,7 @@ ForEachMacros:
- 'for_each_prop_dlc_cpus'
- 'for_each_prop_dlc_platforms'
- 'for_each_property_of_node'
+ - 'for_each_rdt_resource'
- 'for_each_reg'
- 'for_each_reg_filtered'
- 'for_each_reloc'
@@ -434,10 +455,10 @@ ForEachMacros:
- 'for_each_subelement_id'
- 'for_each_sublist'
- 'for_each_subsystem'
+ - 'for_each_suite'
- 'for_each_supported_activate_fn'
- 'for_each_supported_inject_fn'
- 'for_each_sym'
- - 'for_each_test'
- 'for_each_thread'
- 'for_each_token'
- 'for_each_unicast_dest_pgid'
@@ -499,8 +520,10 @@ ForEachMacros:
- 'idr_for_each_entry_continue'
- 'idr_for_each_entry_continue_ul'
- 'idr_for_each_entry_ul'
+ - 'iio_for_each_active_channel'
- 'in_dev_for_each_ifa_rcu'
- 'in_dev_for_each_ifa_rtnl'
+ - 'in_dev_for_each_ifa_rtnl_net'
- 'inet_bind_bucket_for_each'
- 'interval_tree_for_each_span'
- 'intlist__for_each_entry'
@@ -542,7 +565,6 @@ ForEachMacros:
- 'list_for_each_prev'
- 'list_for_each_prev_safe'
- 'list_for_each_rcu'
- - 'list_for_each_reverse'
- 'list_for_each_safe'
- 'llist_for_each'
- 'llist_for_each_entry'
@@ -552,6 +574,7 @@ ForEachMacros:
- 'map__for_each_symbol'
- 'map__for_each_symbol_by_name'
- 'mas_for_each'
+ - 'mas_for_each_rev'
- 'mci_for_each_dimm'
- 'media_device_for_each_entity'
- 'media_device_for_each_intf'
@@ -561,10 +584,15 @@ ForEachMacros:
- 'media_pipeline_for_each_entity'
- 'media_pipeline_for_each_pad'
- 'mlx5_lag_for_each_peer_mdev'
+ - 'mptcp_for_each_subflow'
- 'msi_domain_for_each_desc'
- 'msi_for_each_desc'
- 'mt_for_each'
+ - 'nanddev_io_for_each_block'
- 'nanddev_io_for_each_page'
+ - 'neigh_for_each_in_bucket'
+ - 'neigh_for_each_in_bucket_rcu'
+ - 'neigh_for_each_in_bucket_safe'
- 'netdev_for_each_lower_dev'
- 'netdev_for_each_lower_private'
- 'netdev_for_each_lower_private_rcu'
@@ -604,11 +632,11 @@ ForEachMacros:
- 'perf_evlist__for_each_entry_safe'
- 'perf_evlist__for_each_evsel'
- 'perf_evlist__for_each_mmap'
+ - 'perf_evsel_for_each_per_thread_period_safe'
- 'perf_hpp_list__for_each_format'
- 'perf_hpp_list__for_each_format_safe'
- 'perf_hpp_list__for_each_sort_list'
- 'perf_hpp_list__for_each_sort_list_safe'
- - 'perf_tool_event__for_each_event'
- 'plist_for_each'
- 'plist_for_each_continue'
- 'plist_for_each_entry'
@@ -627,7 +655,6 @@ ForEachMacros:
- 'rdma_for_each_block'
- 'rdma_for_each_port'
- 'rdma_umem_for_each_dma_block'
- - 'resort_rb__for_each_entry'
- 'resource_list_for_each_entry'
- 'resource_list_for_each_entry_safe'
- 'rhl_for_each_entry_rcu'
@@ -658,6 +685,7 @@ ForEachMacros:
- 'shost_for_each_device'
- 'sk_for_each'
- 'sk_for_each_bound'
+ - 'sk_for_each_bound_safe'
- 'sk_for_each_entry_offset_rcu'
- 'sk_for_each_from'
- 'sk_for_each_rcu'
@@ -680,7 +708,11 @@ ForEachMacros:
- 'tb_property_for_each'
- 'tcf_act_for_each_action'
- 'tcf_exts_for_each_action'
+ - 'test_suite__for_each_test_case'
+ - 'tool_pmu__for_each_event'
+ - 'ttm_bo_lru_for_each_reserved_guarded'
- 'ttm_resource_manager_for_each_res'
+ - 'udp_lrpa_for_each_entry_rcu'
- 'udp_portaddr_for_each_entry'
- 'udp_portaddr_for_each_entry_rcu'
- 'usb_hub_for_each_child'
@@ -691,6 +723,7 @@ ForEachMacros:
- 'v4l2_m2m_for_each_src_buf_safe'
- 'virtio_device_for_each_vq'
- 'while_for_each_ftrace_op'
+ - 'workloads__for_each'
- 'xa_for_each'
- 'xa_for_each_marked'
- 'xa_for_each_range'
diff --git a/CREDITS b/CREDITS
index 1b77fba6c27e..f74d230992d6 100644
--- a/CREDITS
+++ b/CREDITS
@@ -2071,6 +2071,10 @@ S: 660 Harvard Ave. #7
S: Santa Clara, CA 95051
S: USA
+N: Joonsoo Kim
+E: iamjoonsoo.kim@lge.com
+D: Slab allocators
+
N: Kukjin Kim
E: kgene@kernel.org
D: Samsung S3C, S5P and Exynos ARM architectures
diff --git a/Documentation/ABI/stable/sysfs-block b/Documentation/ABI/stable/sysfs-block
index 3879963f0f01..11545c9e2e93 100644
--- a/Documentation/ABI/stable/sysfs-block
+++ b/Documentation/ABI/stable/sysfs-block
@@ -77,7 +77,7 @@ Description:
What: /sys/block/<disk>/diskseq
Date: February 2021
-Contact: Matteo Croce <mcroce@microsoft.com>
+Contact: Matteo Croce <teknoraver@meta.com>
Description:
The /sys/block/<disk>/diskseq files reports the disk
sequence number, which is a monotonically increasing
diff --git a/Documentation/ABI/testing/sysfs-kernel-reboot b/Documentation/ABI/testing/sysfs-kernel-reboot
index e117aba46be0..52571fd5ddba 100644
--- a/Documentation/ABI/testing/sysfs-kernel-reboot
+++ b/Documentation/ABI/testing/sysfs-kernel-reboot
@@ -1,7 +1,7 @@
What: /sys/kernel/reboot
Date: November 2020
KernelVersion: 5.11
-Contact: Matteo Croce <mcroce@microsoft.com>
+Contact: Matteo Croce <teknoraver@meta.com>
Description: Interface to set the kernel reboot behavior, similarly to
what can be done via the reboot= cmdline option.
(see Documentation/admin-guide/kernel-parameters.txt)
@@ -9,25 +9,25 @@ Description: Interface to set the kernel reboot behavior, similarly to
What: /sys/kernel/reboot/mode
Date: November 2020
KernelVersion: 5.11
-Contact: Matteo Croce <mcroce@microsoft.com>
+Contact: Matteo Croce <teknoraver@meta.com>
Description: Reboot mode. Valid values are: cold warm hard soft gpio
What: /sys/kernel/reboot/type
Date: November 2020
KernelVersion: 5.11
-Contact: Matteo Croce <mcroce@microsoft.com>
+Contact: Matteo Croce <teknoraver@meta.com>
Description: Reboot type. Valid values are: bios acpi kbd triple efi pci
What: /sys/kernel/reboot/cpu
Date: November 2020
KernelVersion: 5.11
-Contact: Matteo Croce <mcroce@microsoft.com>
+Contact: Matteo Croce <teknoraver@meta.com>
Description: CPU number to use to reboot.
What: /sys/kernel/reboot/force
Date: November 2020
KernelVersion: 5.11
-Contact: Matteo Croce <mcroce@microsoft.com>
+Contact: Matteo Croce <teknoraver@meta.com>
Description: Don't wait for any other CPUs on reboot and
avoid anything that could hang.
diff --git a/Documentation/arch/riscv/hwprobe.rst b/Documentation/arch/riscv/hwprobe.rst
index 53607d962653..f60bf5991755 100644
--- a/Documentation/arch/riscv/hwprobe.rst
+++ b/Documentation/arch/riscv/hwprobe.rst
@@ -51,7 +51,7 @@ The following keys are defined:
* :c:macro:`RISCV_HWPROBE_KEY_MARCHID`: Contains the value of ``marchid``, as
defined by the RISC-V privileged architecture specification.
-* :c:macro:`RISCV_HWPROBE_KEY_MIMPLID`: Contains the value of ``mimplid``, as
+* :c:macro:`RISCV_HWPROBE_KEY_MIMPID`: Contains the value of ``mimpid``, as
defined by the RISC-V privileged architecture specification.
* :c:macro:`RISCV_HWPROBE_KEY_BASE_BEHAVIOR`: A bitmask containing the base
diff --git a/Documentation/power/runtime_pm.rst b/Documentation/power/runtime_pm.rst
index 12f429359a82..63344bea8393 100644
--- a/Documentation/power/runtime_pm.rst
+++ b/Documentation/power/runtime_pm.rst
@@ -154,7 +154,7 @@ suspending the device are satisfied) and to queue up a suspend request for the
device in that case. If there is no idle callback, or if the callback returns
0, then the PM core will attempt to carry out a runtime suspend of the device,
also respecting devices configured for autosuspend. In essence this means a
-call to __pm_runtime_autosuspend() (do note that drivers needs to update the
+call to pm_runtime_autosuspend() (do note that drivers needs to update the
device last busy mark, pm_runtime_mark_last_busy(), to control the delay under
this circumstance). To prevent this (for example, if the callback routine has
started a delayed suspend), the routine must return a non-zero value. Negative
diff --git a/MAINTAINERS b/MAINTAINERS
index d980da72ec60..f31aeb6b452e 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -7027,6 +7027,7 @@ L: rust-for-linux@vger.kernel.org
S: Supported
W: https://rust-for-linux.com
T: git https://github.com/Rust-for-Linux/linux.git alloc-next
+F: rust/helpers/dma.c
F: rust/kernel/dma.rs
F: samples/rust/rust_dma.rs
@@ -15522,6 +15523,21 @@ F: mm/numa.c
F: mm/numa_emulation.c
F: mm/numa_memblks.c
+MEMORY MANAGEMENT - PAGE ALLOCATOR
+M: Andrew Morton <akpm@linux-foundation.org>
+R: Vlastimil Babka <vbabka@suse.cz>
+R: Suren Baghdasaryan <surenb@google.com>
+R: Michal Hocko <mhocko@suse.com>
+R: Brendan Jackman <jackmanb@google.com>
+R: Johannes Weiner <hannes@cmpxchg.org>
+R: Zi Yan <ziy@nvidia.com>
+L: linux-mm@kvack.org
+S: Maintained
+F: mm/compaction.c
+F: mm/page_alloc.c
+F: include/linux/gfp.h
+F: include/linux/compaction.h
+
MEMORY MANAGEMENT - SECRETMEM
M: Andrew Morton <akpm@linux-foundation.org>
M: Mike Rapoport <rppt@kernel.org>
@@ -15549,10 +15565,12 @@ M: Liam R. Howlett <Liam.Howlett@oracle.com>
M: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
R: Vlastimil Babka <vbabka@suse.cz>
R: Jann Horn <jannh@google.com>
+R: Pedro Falcato <pfalcato@suse.de>
L: linux-mm@kvack.org
S: Maintained
W: http://www.linux-mm.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
+F: include/trace/events/mmap.h
F: mm/mlock.c
F: mm/mmap.c
F: mm/mprotect.c
@@ -15563,6 +15581,36 @@ F: mm/vma.h
F: mm/vma_internal.h
F: tools/testing/vma/
+MEMORY MAPPING - LOCKING
+M: Andrew Morton <akpm@linux-foundation.org>
+M: Suren Baghdasaryan <surenb@google.com>
+M: Liam R. Howlett <Liam.Howlett@oracle.com>
+M: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
+R: Vlastimil Babka <vbabka@suse.cz>
+R: Shakeel Butt <shakeel.butt@linux.dev>
+L: linux-mm@kvack.org
+S: Maintained
+W: http://www.linux-mm.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
+F: Documentation/mm/process_addrs.rst
+F: include/linux/mmap_lock.h
+F: include/trace/events/mmap_lock.h
+F: mm/mmap_lock.c
+
+MEMORY MAPPING - MADVISE (MEMORY ADVICE)
+M: Andrew Morton <akpm@linux-foundation.org>
+M: Liam R. Howlett <Liam.Howlett@oracle.com>
+M: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
+M: David Hildenbrand <david@redhat.com>
+R: Vlastimil Babka <vbabka@suse.cz>
+R: Jann Horn <jannh@google.com>
+L: linux-mm@kvack.org
+S: Maintained
+W: http://www.linux-mm.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
+F: include/uapi/asm-generic/mman-common.h
+F: mm/madvise.c
+
MEMORY TECHNOLOGY DEVICES (MTD)
M: Miquel Raynal <miquel.raynal@bootlin.com>
M: Richard Weinberger <richard@nod.at>
@@ -22258,9 +22306,7 @@ F: drivers/nvmem/layouts/sl28vpd.c
SLAB ALLOCATOR
M: Christoph Lameter <cl@linux.com>
-M: Pekka Enberg <penberg@kernel.org>
M: David Rientjes <rientjes@google.com>
-M: Joonsoo Kim <iamjoonsoo.kim@lge.com>
M: Andrew Morton <akpm@linux-foundation.org>
M: Vlastimil Babka <vbabka@suse.cz>
R: Roman Gushchin <roman.gushchin@linux.dev>
diff --git a/Makefile b/Makefile
index c91afd55099e..07f818186151 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 6
PATCHLEVEL = 15
SUBLEVEL = 0
-EXTRAVERSION = -rc2
+EXTRAVERSION = -rc3
NAME = Baby Opossum Posse
# *DOCUMENTATION*
@@ -477,7 +477,6 @@ export rust_common_flags := --edition=2021 \
-Wclippy::ignored_unit_patterns \
-Wclippy::mut_mut \
-Wclippy::needless_bitwise_bool \
- -Wclippy::needless_continue \
-Aclippy::needless_lifetimes \
-Wclippy::no_mangle_with_rust_abi \
-Wclippy::undocumented_unsafe_blocks \
@@ -1054,9 +1053,12 @@ NOSTDINC_FLAGS += -nostdinc
KBUILD_CFLAGS += $(call cc-option, -fstrict-flex-arrays=3)
#Currently, disable -Wstringop-overflow for GCC 11, globally.
-KBUILD_CFLAGS-$(CONFIG_CC_NO_STRINGOP_OVERFLOW) += $(call cc-option, -Wno-stringop-overflow)
+KBUILD_CFLAGS-$(CONFIG_CC_NO_STRINGOP_OVERFLOW) += $(call cc-disable-warning, stringop-overflow)
KBUILD_CFLAGS-$(CONFIG_CC_STRINGOP_OVERFLOW) += $(call cc-option, -Wstringop-overflow)
+#Currently, disable -Wunterminated-string-initialization as broken
+KBUILD_CFLAGS += $(call cc-disable-warning, unterminated-string-initialization)
+
# disable invalid "can't wrap" optimizations for signed / pointers
KBUILD_CFLAGS += -fno-strict-overflow
diff --git a/arch/loongarch/kernel/Makefile b/arch/loongarch/kernel/Makefile
index 4853e8b04c6f..f9dcaa60033d 100644
--- a/arch/loongarch/kernel/Makefile
+++ b/arch/loongarch/kernel/Makefile
@@ -21,10 +21,10 @@ obj-$(CONFIG_CPU_HAS_LBT) += lbt.o
obj-$(CONFIG_ARCH_STRICT_ALIGN) += unaligned.o
-CFLAGS_module.o += $(call cc-option,-Wno-override-init,)
-CFLAGS_syscall.o += $(call cc-option,-Wno-override-init,)
-CFLAGS_traps.o += $(call cc-option,-Wno-override-init,)
-CFLAGS_perf_event.o += $(call cc-option,-Wno-override-init,)
+CFLAGS_module.o += $(call cc-disable-warning, override-init)
+CFLAGS_syscall.o += $(call cc-disable-warning, override-init)
+CFLAGS_traps.o += $(call cc-disable-warning, override-init)
+CFLAGS_perf_event.o += $(call cc-disable-warning, override-init)
ifdef CONFIG_FUNCTION_TRACER
ifndef CONFIG_DYNAMIC_FTRACE
diff --git a/arch/loongarch/kvm/Makefile b/arch/loongarch/kvm/Makefile
index f4c8e35c216a..cb41d9265662 100644
--- a/arch/loongarch/kvm/Makefile
+++ b/arch/loongarch/kvm/Makefile
@@ -21,4 +21,4 @@ kvm-y += intc/eiointc.o
kvm-y += intc/pch_pic.o
kvm-y += irqfd.o
-CFLAGS_exit.o += $(call cc-option,-Wno-override-init,)
+CFLAGS_exit.o += $(call cc-disable-warning, override-init)
diff --git a/arch/riscv/include/asm/alternative-macros.h b/arch/riscv/include/asm/alternative-macros.h
index 721ec275ce57..231d777d936c 100644
--- a/arch/riscv/include/asm/alternative-macros.h
+++ b/arch/riscv/include/asm/alternative-macros.h
@@ -115,24 +115,19 @@
\old_c
.endm
-#define _ALTERNATIVE_CFG(old_c, ...) \
- ALTERNATIVE_CFG old_c
-
-#define _ALTERNATIVE_CFG_2(old_c, ...) \
- ALTERNATIVE_CFG old_c
+#define __ALTERNATIVE_CFG(old_c, ...) ALTERNATIVE_CFG old_c
+#define __ALTERNATIVE_CFG_2(old_c, ...) ALTERNATIVE_CFG old_c
#else /* !__ASSEMBLY__ */
-#define __ALTERNATIVE_CFG(old_c) \
- old_c "\n"
+#define __ALTERNATIVE_CFG(old_c, ...) old_c "\n"
+#define __ALTERNATIVE_CFG_2(old_c, ...) old_c "\n"
-#define _ALTERNATIVE_CFG(old_c, ...) \
- __ALTERNATIVE_CFG(old_c)
+#endif /* __ASSEMBLY__ */
-#define _ALTERNATIVE_CFG_2(old_c, ...) \
- __ALTERNATIVE_CFG(old_c)
+#define _ALTERNATIVE_CFG(old_c, ...) __ALTERNATIVE_CFG(old_c)
+#define _ALTERNATIVE_CFG_2(old_c, ...) __ALTERNATIVE_CFG_2(old_c)
-#endif /* __ASSEMBLY__ */
#endif /* CONFIG_RISCV_ALTERNATIVE */
/*
diff --git a/arch/riscv/include/asm/kgdb.h b/arch/riscv/include/asm/kgdb.h
index 46677daf708b..cc11c4544cff 100644
--- a/arch/riscv/include/asm/kgdb.h
+++ b/arch/riscv/include/asm/kgdb.h
@@ -19,16 +19,9 @@
#ifndef __ASSEMBLY__
+void arch_kgdb_breakpoint(void);
extern unsigned long kgdb_compiled_break;
-static inline void arch_kgdb_breakpoint(void)
-{
- asm(".global kgdb_compiled_break\n"
- ".option norvc\n"
- "kgdb_compiled_break: ebreak\n"
- ".option rvc\n");
-}
-
#endif /* !__ASSEMBLY__ */
#define DBG_REG_ZERO "zero"
diff --git a/arch/riscv/include/asm/syscall.h b/arch/riscv/include/asm/syscall.h
index 121fff429dce..eceabf59ae48 100644
--- a/arch/riscv/include/asm/syscall.h
+++ b/arch/riscv/include/asm/syscall.h
@@ -62,8 +62,11 @@ static inline void syscall_get_arguments(struct task_struct *task,
unsigned long *args)
{
args[0] = regs->orig_a0;
- args++;
- memcpy(args, &regs->a1, 5 * sizeof(args[0]));
+ args[1] = regs->a1;
+ args[2] = regs->a2;
+ args[3] = regs->a3;
+ args[4] = regs->a4;
+ args[5] = regs->a5;
}
static inline int syscall_get_arch(struct task_struct *task)
diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile
index 8d186bfced45..f7480c9c6f8d 100644
--- a/arch/riscv/kernel/Makefile
+++ b/arch/riscv/kernel/Makefile
@@ -9,8 +9,8 @@ CFLAGS_REMOVE_patch.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_sbi.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_return_address.o = $(CC_FLAGS_FTRACE)
endif
-CFLAGS_syscall_table.o += $(call cc-option,-Wno-override-init,)
-CFLAGS_compat_syscall_table.o += $(call cc-option,-Wno-override-init,)
+CFLAGS_syscall_table.o += $(call cc-disable-warning, override-init)
+CFLAGS_compat_syscall_table.o += $(call cc-disable-warning, override-init)
ifdef CONFIG_KEXEC_CORE
AFLAGS_kexec_relocate.o := -mcmodel=medany $(call cc-option,-mno-relax)
diff --git a/arch/riscv/kernel/kgdb.c b/arch/riscv/kernel/kgdb.c
index 2e0266ae6bd7..9f3db3503dab 100644
--- a/arch/riscv/kernel/kgdb.c
+++ b/arch/riscv/kernel/kgdb.c
@@ -254,6 +254,12 @@ void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
regs->epc = pc;
}
+noinline void arch_kgdb_breakpoint(void)
+{
+ asm(".global kgdb_compiled_break\n"
+ "kgdb_compiled_break: ebreak\n");
+}
+
void kgdb_arch_handle_qxfer_pkt(char *remcom_in_buffer,
char *remcom_out_buffer)
{
diff --git a/arch/riscv/kernel/module-sections.c b/arch/riscv/kernel/module-sections.c
index e264e59e596e..91d0b355ceef 100644
--- a/arch/riscv/kernel/module-sections.c
+++ b/arch/riscv/kernel/module-sections.c
@@ -73,16 +73,17 @@ static bool duplicate_rela(const Elf_Rela *rela, int idx)
static void count_max_entries(Elf_Rela *relas, int num,
unsigned int *plts, unsigned int *gots)
{
- unsigned int type, i;
-
- for (i = 0; i < num; i++) {
- type = ELF_RISCV_R_TYPE(relas[i].r_info);
- if (type == R_RISCV_CALL_PLT) {
+ for (int i = 0; i < num; i++) {
+ switch (ELF_R_TYPE(relas[i].r_info)) {
+ case R_RISCV_CALL_PLT:
+ case R_RISCV_PLT32:
if (!duplicate_rela(relas, i))
(*plts)++;
- } else if (type == R_RISCV_GOT_HI20) {
+ break;
+ case R_RISCV_GOT_HI20:
if (!duplicate_rela(relas, i))
(*gots)++;
+ break;
}
}
}
diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c
index 47d0ebeec93c..7f6147c18033 100644
--- a/arch/riscv/kernel/module.c
+++ b/arch/riscv/kernel/module.c
@@ -648,7 +648,7 @@ process_accumulated_relocations(struct module *me,
kfree(bucket_iter);
}
- kfree(*relocation_hashtable);
+ kvfree(*relocation_hashtable);
}
static int add_relocation_to_accumulate(struct module *me, int type,
@@ -752,9 +752,10 @@ initialize_relocation_hashtable(unsigned int num_relocations,
hashtable_size <<= should_double_size;
- *relocation_hashtable = kmalloc_array(hashtable_size,
- sizeof(**relocation_hashtable),
- GFP_KERNEL);
+ /* Number of relocations may be large, so kvmalloc it */
+ *relocation_hashtable = kvmalloc_array(hashtable_size,
+ sizeof(**relocation_hashtable),
+ GFP_KERNEL);
if (!*relocation_hashtable)
return 0;
@@ -859,7 +860,7 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
}
j++;
- if (j > sechdrs[relsec].sh_size / sizeof(*rel))
+ if (j == num_relocations)
j = 0;
} while (j_idx != j);
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
index c174544eefc8..f7c9a1caa83e 100644
--- a/arch/riscv/kernel/setup.c
+++ b/arch/riscv/kernel/setup.c
@@ -66,6 +66,9 @@ static struct resource bss_res = { .name = "Kernel bss", };
static struct resource elfcorehdr_res = { .name = "ELF Core hdr", };
#endif
+static int num_standard_resources;
+static struct resource *standard_resources;
+
static int __init add_resource(struct resource *parent,
struct resource *res)
{
@@ -139,7 +142,7 @@ static void __init init_resources(void)
struct resource *res = NULL;
struct resource *mem_res = NULL;
size_t mem_res_sz = 0;
- int num_resources = 0, res_idx = 0;
+ int num_resources = 0, res_idx = 0, non_resv_res = 0;
int ret = 0;
/* + 1 as memblock_alloc() might increase memblock.reserved.cnt */
@@ -193,6 +196,7 @@ static void __init init_resources(void)
/* Add /memory regions to the resource tree */
for_each_mem_region(region) {
res = &mem_res[res_idx--];
+ non_resv_res++;
if (unlikely(memblock_is_nomap(region))) {
res->name = "Reserved";
@@ -210,6 +214,9 @@ static void __init init_resources(void)
goto error;
}
+ num_standard_resources = non_resv_res;
+ standard_resources = &mem_res[res_idx + 1];
+
/* Clean-up any unused pre-allocated resources */
if (res_idx >= 0)
memblock_free(mem_res, (res_idx + 1) * sizeof(*mem_res));
@@ -221,6 +228,33 @@ static void __init init_resources(void)
memblock_free(mem_res, mem_res_sz);
}
+static int __init reserve_memblock_reserved_regions(void)
+{
+ u64 i, j;
+
+ for (i = 0; i < num_standard_resources; i++) {
+ struct resource *mem = &standard_resources[i];
+ phys_addr_t r_start, r_end, mem_size = resource_size(mem);
+
+ if (!memblock_is_region_reserved(mem->start, mem_size))
+ continue;
+
+ for_each_reserved_mem_range(j, &r_start, &r_end) {
+ resource_size_t start, end;
+
+ start = max(PFN_PHYS(PFN_DOWN(r_start)), mem->start);
+ end = min(PFN_PHYS(PFN_UP(r_end)) - 1, mem->end);
+
+ if (start > mem->end || end < mem->start)
+ continue;
+
+ reserve_region_with_split(mem, start, end, "Reserved");
+ }
+ }
+
+ return 0;
+}
+arch_initcall(reserve_memblock_reserved_regions);
static void __init parse_dtb(void)
{
diff --git a/arch/riscv/kernel/unaligned_access_speed.c b/arch/riscv/kernel/unaligned_access_speed.c
index 585d2dcf2dab..b8ba13819d05 100644
--- a/arch/riscv/kernel/unaligned_access_speed.c
+++ b/arch/riscv/kernel/unaligned_access_speed.c
@@ -439,29 +439,36 @@ static int __init check_unaligned_access_all_cpus(void)
{
int cpu;
- if (unaligned_scalar_speed_param == RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN &&
- !check_unaligned_access_emulated_all_cpus()) {
- check_unaligned_access_speed_all_cpus();
- } else {
- pr_info("scalar unaligned access speed set to '%s' by command line\n",
- speed_str[unaligned_scalar_speed_param]);
+ if (unaligned_scalar_speed_param != RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN) {
+ pr_info("scalar unaligned access speed set to '%s' (%lu) by command line\n",
+ speed_str[unaligned_scalar_speed_param], unaligned_scalar_speed_param);
for_each_online_cpu(cpu)
per_cpu(misaligned_access_speed, cpu) = unaligned_scalar_speed_param;
+ } else if (!check_unaligned_access_emulated_all_cpus()) {
+ check_unaligned_access_speed_all_cpus();
+ }
+
+ if (unaligned_vector_speed_param != RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN) {
+ if (!has_vector() &&
+ unaligned_vector_speed_param != RISCV_HWPROBE_MISALIGNED_VECTOR_UNSUPPORTED) {
+ pr_warn("vector support is not available, ignoring unaligned_vector_speed=%s\n",
+ speed_str[unaligned_vector_speed_param]);
+ } else {
+ pr_info("vector unaligned access speed set to '%s' (%lu) by command line\n",
+ speed_str[unaligned_vector_speed_param], unaligned_vector_speed_param);
+ }
}
if (!has_vector())
unaligned_vector_speed_param = RISCV_HWPROBE_MISALIGNED_VECTOR_UNSUPPORTED;
- if (unaligned_vector_speed_param == RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN &&
- !check_vector_unaligned_access_emulated_all_cpus() &&
- IS_ENABLED(CONFIG_RISCV_PROBE_VECTOR_UNALIGNED_ACCESS)) {
- kthread_run(vec_check_unaligned_access_speed_all_cpus,
- NULL, "vec_check_unaligned_access_speed_all_cpus");
- } else {
- pr_info("vector unaligned access speed set to '%s' by command line\n",
- speed_str[unaligned_vector_speed_param]);
+ if (unaligned_vector_speed_param != RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN) {
for_each_online_cpu(cpu)
per_cpu(vector_misaligned_access, cpu) = unaligned_vector_speed_param;
+ } else if (!check_vector_unaligned_access_emulated_all_cpus() &&
+ IS_ENABLED(CONFIG_RISCV_PROBE_VECTOR_UNALIGNED_ACCESS)) {
+ kthread_run(vec_check_unaligned_access_speed_all_cpus,
+ NULL, "vec_check_unaligned_access_speed_all_cpus");
}
/*
diff --git a/arch/x86/boot/compressed/mem.c b/arch/x86/boot/compressed/mem.c
index dbba332e4a12..f676156d9f3d 100644
--- a/arch/x86/boot/compressed/mem.c
+++ b/arch/x86/boot/compressed/mem.c
@@ -34,11 +34,14 @@ static bool early_is_tdx_guest(void)
void arch_accept_memory(phys_addr_t start, phys_addr_t end)
{
+ static bool sevsnp;
+
/* Platform-specific memory-acceptance call goes here */
if (early_is_tdx_guest()) {
if (!tdx_accept_memory(start, end))
panic("TDX: Failed to accept memory\n");
- } else if (sev_snp_enabled()) {
+ } else if (sevsnp || (sev_get_status() & MSR_AMD64_SEV_SNP_ENABLED)) {
+ sevsnp = true;
snp_accept_memory(start, end);
} else {
error("Cannot accept memory: unknown platform\n");
diff --git a/arch/x86/boot/compressed/sev.c b/arch/x86/boot/compressed/sev.c
index bb55934c1cee..89ba168f4f0f 100644
--- a/arch/x86/boot/compressed/sev.c
+++ b/arch/x86/boot/compressed/sev.c
@@ -164,10 +164,7 @@ bool sev_snp_enabled(void)
static void __page_state_change(unsigned long paddr, enum psc_op op)
{
- u64 val;
-
- if (!sev_snp_enabled())
- return;
+ u64 val, msr;
/*
* If private -> shared then invalidate the page before requesting the
@@ -176,6 +173,9 @@ static void __page_state_change(unsigned long paddr, enum psc_op op)
if (op == SNP_PAGE_STATE_SHARED)
pvalidate_4k_page(paddr, paddr, false);
+ /* Save the current GHCB MSR value */
+ msr = sev_es_rd_ghcb_msr();
+
/* Issue VMGEXIT to change the page state in RMP table. */
sev_es_wr_ghcb_msr(GHCB_MSR_PSC_REQ_GFN(paddr >> PAGE_SHIFT, op));
VMGEXIT();
@@ -185,6 +185,9 @@ static void __page_state_change(unsigned long paddr, enum psc_op op)
if ((GHCB_RESP_CODE(val) != GHCB_MSR_PSC_RESP) || GHCB_MSR_PSC_RESP_VAL(val))
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC);
+ /* Restore the GHCB MSR value */
+ sev_es_wr_ghcb_msr(msr);
+
/*
* Now that page state is changed in the RMP table, validate it so that it is
* consistent with the RMP entry.
@@ -195,11 +198,17 @@ static void __page_state_change(unsigned long paddr, enum psc_op op)
void snp_set_page_private(unsigned long paddr)
{
+ if (!sev_snp_enabled())
+ return;
+
__page_state_change(paddr, SNP_PAGE_STATE_PRIVATE);
}
void snp_set_page_shared(unsigned long paddr)
{
+ if (!sev_snp_enabled())
+ return;
+
__page_state_change(paddr, SNP_PAGE_STATE_SHARED);
}
@@ -223,56 +232,10 @@ static bool early_setup_ghcb(void)
return true;
}
-static phys_addr_t __snp_accept_memory(struct snp_psc_desc *desc,
- phys_addr_t pa, phys_addr_t pa_end)
-{
- struct psc_hdr *hdr;
- struct psc_entry *e;
- unsigned int i;
-
- hdr = &desc->hdr;
- memset(hdr, 0, sizeof(*hdr));
-
- e = desc->entries;
-
- i = 0;
- while (pa < pa_end && i < VMGEXIT_PSC_MAX_ENTRY) {
- hdr->end_entry = i;
-
- e->gfn = pa >> PAGE_SHIFT;
- e->operation = SNP_PAGE_STATE_PRIVATE;
- if (IS_ALIGNED(pa, PMD_SIZE) && (pa_end - pa) >= PMD_SIZE) {
- e->pagesize = RMP_PG_SIZE_2M;
- pa += PMD_SIZE;
- } else {
- e->pagesize = RMP_PG_SIZE_4K;
- pa += PAGE_SIZE;
- }
-
- e++;
- i++;
- }
-
- if (vmgexit_psc(boot_ghcb, desc))
- sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC);
-
- pvalidate_pages(desc);
-
- return pa;
-}
-
void snp_accept_memory(phys_addr_t start, phys_addr_t end)
{
- struct snp_psc_desc desc = {};
- unsigned int i;
- phys_addr_t pa;
-
- if (!boot_ghcb && !early_setup_ghcb())
- sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC);
-
- pa = start;
- while (pa < end)
- pa = __snp_accept_memory(&desc, pa, end);
+ for (phys_addr_t pa = start; pa < end; pa += PAGE_SIZE)
+ __page_state_change(pa, SNP_PAGE_STATE_PRIVATE);
}
void sev_es_shutdown_ghcb(void)
diff --git a/arch/x86/boot/compressed/sev.h b/arch/x86/boot/compressed/sev.h
index fc725a981b09..4e463f33186d 100644
--- a/arch/x86/boot/compressed/sev.h
+++ b/arch/x86/boot/compressed/sev.h
@@ -12,11 +12,13 @@
bool sev_snp_enabled(void);
void snp_accept_memory(phys_addr_t start, phys_addr_t end);
+u64 sev_get_status(void);
#else
static inline bool sev_snp_enabled(void) { return false; }
static inline void snp_accept_memory(phys_addr_t start, phys_addr_t end) { }
+static inline u64 sev_get_status(void) { return 0; }
#endif
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 09d2d66c9f21..00dfe487bd00 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -3049,7 +3049,6 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
int bit;
int handled = 0;
- u64 intel_ctrl = hybrid(cpuc->pmu, intel_ctrl);
inc_irq_stat(apic_perf_irqs);
@@ -3093,7 +3092,6 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
handled++;
x86_pmu_handle_guest_pebs(regs, &data);
static_call(x86_pmu_drain_pebs)(regs, &data);
- status &= intel_ctrl | GLOBAL_STATUS_TRACE_TOPAPMI;
/*
* PMI throttle may be triggered, which stops the PEBS event.
@@ -3104,6 +3102,15 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
*/
if (pebs_enabled != cpuc->pebs_enabled)
wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
+
+ /*
+ * Above PEBS handler (PEBS counters snapshotting) has updated fixed
+ * counter 3 and perf metrics counts if they are in counter group,
+ * unnecessary to update again.
+ */
+ if (cpuc->events[INTEL_PMC_IDX_FIXED_SLOTS] &&
+ is_pebs_counter_event_group(cpuc->events[INTEL_PMC_IDX_FIXED_SLOTS]))
+ status &= ~GLOBAL_STATUS_PERF_METRICS_OVF_BIT;
}
/*
@@ -3123,6 +3130,8 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
static_call(intel_pmu_update_topdown_event)(NULL, NULL);
}
+ status &= hybrid(cpuc->pmu, intel_ctrl);
+
/*
* Checkpointed counters can lead to 'spurious' PMIs because the
* rollback caused by the PMI will have cleared the overflow status
@@ -7305,8 +7314,17 @@ __init int intel_pmu_init(void)
name = "meteorlake_hybrid";
break;
+ case INTEL_PANTHERLAKE_L:
+ pr_cont("Pantherlake Hybrid events, ");
+ name = "pantherlake_hybrid";
+ goto lnl_common;
+
case INTEL_LUNARLAKE_M:
case INTEL_ARROWLAKE:
+ pr_cont("Lunarlake Hybrid events, ");
+ name = "lunarlake_hybrid";
+
+ lnl_common:
intel_pmu_init_hybrid(hybrid_big_small);
x86_pmu.pebs_latency_data = lnl_latency_data;
@@ -7328,8 +7346,6 @@ __init int intel_pmu_init(void)
intel_pmu_init_skt(&pmu->pmu);
intel_pmu_pebs_data_source_lnl();
- pr_cont("Lunarlake Hybrid events, ");
- name = "lunarlake_hybrid";
break;
case INTEL_ARROWLAKE_H:
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index 1f7e1a692a7a..18c3ab579b8b 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -1399,8 +1399,10 @@ static u64 pebs_update_adaptive_cfg(struct perf_event *event)
* + precise_ip < 2 for the non event IP
* + For RTM TSX weight we need GPRs for the abort code.
*/
- gprs = (sample_type & PERF_SAMPLE_REGS_INTR) &&
- (attr->sample_regs_intr & PEBS_GP_REGS);
+ gprs = ((sample_type & PERF_SAMPLE_REGS_INTR) &&
+ (attr->sample_regs_intr & PEBS_GP_REGS)) ||
+ ((sample_type & PERF_SAMPLE_REGS_USER) &&
+ (attr->sample_regs_user & PEBS_GP_REGS));
tsx_weight = (sample_type & PERF_SAMPLE_WEIGHT_TYPE) &&
((attr->config & INTEL_ARCH_EVENT_MASK) ==
@@ -2123,7 +2125,7 @@ static void setup_pebs_adaptive_sample_data(struct perf_event *event,
regs->flags &= ~PERF_EFLAGS_EXACT;
}
- if (sample_type & PERF_SAMPLE_REGS_INTR)
+ if (sample_type & (PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER))
adaptive_pebs_save_regs(regs, gprs);
}
diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
index 60973c209c0e..76d96df1475a 100644
--- a/arch/x86/events/intel/uncore_snbep.c
+++ b/arch/x86/events/intel/uncore_snbep.c
@@ -4891,28 +4891,28 @@ static struct uncore_event_desc snr_uncore_iio_freerunning_events[] = {
INTEL_UNCORE_EVENT_DESC(ioclk, "event=0xff,umask=0x10"),
/* Free-Running IIO BANDWIDTH IN Counters */
INTEL_UNCORE_EVENT_DESC(bw_in_port0, "event=0xff,umask=0x20"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.814697266e-6"),
+ INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.0517578125e-5"),
INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_in_port1, "event=0xff,umask=0x21"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.814697266e-6"),
+ INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.0517578125e-5"),
INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_in_port2, "event=0xff,umask=0x22"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.814697266e-6"),
+ INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.0517578125e-5"),
INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_in_port3, "event=0xff,umask=0x23"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.814697266e-6"),
+ INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.0517578125e-5"),
INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_in_port4, "event=0xff,umask=0x24"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale, "3.814697266e-6"),
+ INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale, "3.0517578125e-5"),
INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_in_port5, "event=0xff,umask=0x25"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale, "3.814697266e-6"),
+ INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale, "3.0517578125e-5"),
INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_in_port6, "event=0xff,umask=0x26"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale, "3.814697266e-6"),
+ INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale, "3.0517578125e-5"),
INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_in_port7, "event=0xff,umask=0x27"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale, "3.814697266e-6"),
+ INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale, "3.0517578125e-5"),
INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit, "MiB"),
{ /* end: all zeroes */ },
};
@@ -5485,37 +5485,6 @@ static struct freerunning_counters icx_iio_freerunning[] = {
[ICX_IIO_MSR_BW_IN] = { 0xaa0, 0x1, 0x10, 8, 48, icx_iio_bw_freerunning_box_offsets },
};
-static struct uncore_event_desc icx_uncore_iio_freerunning_events[] = {
- /* Free-Running IIO CLOCKS Counter */
- INTEL_UNCORE_EVENT_DESC(ioclk, "event=0xff,umask=0x10"),
- /* Free-Running IIO BANDWIDTH IN Counters */
- INTEL_UNCORE_EVENT_DESC(bw_in_port0, "event=0xff,umask=0x20"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.814697266e-6"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit, "MiB"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port1, "event=0xff,umask=0x21"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.814697266e-6"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit, "MiB"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port2, "event=0xff,umask=0x22"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.814697266e-6"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit, "MiB"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port3, "event=0xff,umask=0x23"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.814697266e-6"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit, "MiB"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port4, "event=0xff,umask=0x24"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale, "3.814697266e-6"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit, "MiB"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port5, "event=0xff,umask=0x25"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale, "3.814697266e-6"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit, "MiB"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port6, "event=0xff,umask=0x26"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale, "3.814697266e-6"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit, "MiB"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port7, "event=0xff,umask=0x27"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale, "3.814697266e-6"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit, "MiB"),
- { /* end: all zeroes */ },
-};
-
static struct intel_uncore_type icx_uncore_iio_free_running = {
.name = "iio_free_running",
.num_counters = 9,
@@ -5523,7 +5492,7 @@ static struct intel_uncore_type icx_uncore_iio_free_running = {
.num_freerunning_types = ICX_IIO_FREERUNNING_TYPE_MAX,
.freerunning = icx_iio_freerunning,
.ops = &skx_uncore_iio_freerunning_ops,
- .event_descs = icx_uncore_iio_freerunning_events,
+ .event_descs = snr_uncore_iio_freerunning_events,
.format_group = &skx_uncore_iio_freerunning_format_group,
};
@@ -6320,69 +6289,13 @@ static struct freerunning_counters spr_iio_freerunning[] = {
[SPR_IIO_MSR_BW_OUT] = { 0x3808, 0x1, 0x10, 8, 48 },
};
-static struct uncore_event_desc spr_uncore_iio_freerunning_events[] = {
- /* Free-Running IIO CLOCKS Counter */
- INTEL_UNCORE_EVENT_DESC(ioclk, "event=0xff,umask=0x10"),
- /* Free-Running IIO BANDWIDTH IN Counters */
- INTEL_UNCORE_EVENT_DESC(bw_in_port0, "event=0xff,umask=0x20"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.814697266e-6"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit, "MiB"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port1, "event=0xff,umask=0x21"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.814697266e-6"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit, "MiB"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port2, "event=0xff,umask=0x22"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.814697266e-6"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit, "MiB"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port3, "event=0xff,umask=0x23"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.814697266e-6"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit, "MiB"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port4, "event=0xff,umask=0x24"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale, "3.814697266e-6"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit, "MiB"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port5, "event=0xff,umask=0x25"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale, "3.814697266e-6"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit, "MiB"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port6, "event=0xff,umask=0x26"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale, "3.814697266e-6"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit, "MiB"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port7, "event=0xff,umask=0x27"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale, "3.814697266e-6"),
- INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit, "MiB"),
- /* Free-Running IIO BANDWIDTH OUT Counters */
- INTEL_UNCORE_EVENT_DESC(bw_out_port0, "event=0xff,umask=0x30"),
- INTEL_UNCORE_EVENT_DESC(bw_out_port0.scale, "3.814697266e-6"),
- INTEL_UNCORE_EVENT_DESC(bw_out_port0.unit, "MiB"),
- INTEL_UNCORE_EVENT_DESC(bw_out_port1, "event=0xff,umask=0x31"),
- INTEL_UNCORE_EVENT_DESC(bw_out_port1.scale, "3.814697266e-6"),
- INTEL_UNCORE_EVENT_DESC(bw_out_port1.unit, "MiB"),
- INTEL_UNCORE_EVENT_DESC(bw_out_port2, "event=0xff,umask=0x32"),
- INTEL_UNCORE_EVENT_DESC(bw_out_port2.scale, "3.814697266e-6"),
- INTEL_UNCORE_EVENT_DESC(bw_out_port2.unit, "MiB"),
- INTEL_UNCORE_EVENT_DESC(bw_out_port3, "event=0xff,umask=0x33"),
- INTEL_UNCORE_EVENT_DESC(bw_out_port3.scale, "3.814697266e-6"),
- INTEL_UNCORE_EVENT_DESC(bw_out_port3.unit, "MiB"),
- INTEL_UNCORE_EVENT_DESC(bw_out_port4, "event=0xff,umask=0x34"),
- INTEL_UNCORE_EVENT_DESC(bw_out_port4.scale, "3.814697266e-6"),
- INTEL_UNCORE_EVENT_DESC(bw_out_port4.unit, "MiB"),
- INTEL_UNCORE_EVENT_DESC(bw_out_port5, "event=0xff,umask=0x35"),
- INTEL_UNCORE_EVENT_DESC(bw_out_port5.scale, "3.814697266e-6"),
- INTEL_UNCORE_EVENT_DESC(bw_out_port5.unit, "MiB"),
- INTEL_UNCORE_EVENT_DESC(bw_out_port6, "event=0xff,umask=0x36"),
- INTEL_UNCORE_EVENT_DESC(bw_out_port6.scale, "3.814697266e-6"),
- INTEL_UNCORE_EVENT_DESC(bw_out_port6.unit, "MiB"),
- INTEL_UNCORE_EVENT_DESC(bw_out_port7, "event=0xff,umask=0x37"),
- INTEL_UNCORE_EVENT_DESC(bw_out_port7.scale, "3.814697266e-6"),
- INTEL_UNCORE_EVENT_DESC(bw_out_port7.unit, "MiB"),
- { /* end: all zeroes */ },
-};
-
static struct intel_uncore_type spr_uncore_iio_free_running = {
.name = "iio_free_running",
.num_counters = 17,
.num_freerunning_types = SPR_IIO_FREERUNNING_TYPE_MAX,
.freerunning = spr_iio_freerunning,
.ops = &skx_uncore_iio_freerunning_ops,
- .event_descs = spr_uncore_iio_freerunning_events,
+ .event_descs = snr_uncore_iio_freerunning_events,
.format_group = &skx_uncore_iio_freerunning_format_group,
};
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
index 3a97a7eefb51..be10c188614f 100644
--- a/arch/x86/include/asm/intel-family.h
+++ b/arch/x86/include/asm/intel-family.h
@@ -126,6 +126,8 @@
#define INTEL_GRANITERAPIDS_X IFM(6, 0xAD) /* Redwood Cove */
#define INTEL_GRANITERAPIDS_D IFM(6, 0xAE)
+#define INTEL_BARTLETTLAKE IFM(6, 0xD7) /* Raptor Cove */
+
/* "Hybrid" Processors (P-Core/E-Core) */
#define INTEL_LAKEFIELD IFM(6, 0x8A) /* Sunny Cove / Tremont */
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index a839ff506f45..2b36379ff675 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -869,6 +869,16 @@ static void init_amd_zen1(struct cpuinfo_x86 *c)
pr_notice_once("AMD Zen1 DIV0 bug detected. Disable SMT for full protection.\n");
setup_force_cpu_bug(X86_BUG_DIV0);
+
+ /*
+ * Turn off the Instructions Retired free counter on machines that are
+ * susceptible to erratum #1054 "Instructions Retired Performance
+ * Counter May Be Inaccurate".
+ */
+ if (c->x86_model < 0x30) {
+ msr_clear_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT);
+ clear_cpu_cap(c, X86_FEATURE_IRPERF);
+ }
}
static bool cpu_has_zenbleed_microcode(void)
@@ -1052,13 +1062,8 @@ static void init_amd(struct cpuinfo_x86 *c)
if (!cpu_feature_enabled(X86_FEATURE_XENPV))
set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
- /*
- * Turn on the Instructions Retired free counter on machines not
- * susceptible to erratum #1054 "Instructions Retired Performance
- * Counter May Be Inaccurate".
- */
- if (cpu_has(c, X86_FEATURE_IRPERF) &&
- (boot_cpu_has(X86_FEATURE_ZEN1) && c->x86_model > 0x2f))
+ /* Enable the Instructions Retired free counter */
+ if (cpu_has(c, X86_FEATURE_IRPERF))
msr_set_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT);
check_null_seg_clears_base(c);
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
index b61028cf5c8a..4a10d35e70aa 100644
--- a/arch/x86/kernel/cpu/microcode/amd.c
+++ b/arch/x86/kernel/cpu/microcode/amd.c
@@ -199,6 +199,12 @@ static bool need_sha_check(u32 cur_rev)
case 0xa70c0: return cur_rev <= 0xa70C009; break;
case 0xaa001: return cur_rev <= 0xaa00116; break;
case 0xaa002: return cur_rev <= 0xaa00218; break;
+ case 0xb0021: return cur_rev <= 0xb002146; break;
+ case 0xb1010: return cur_rev <= 0xb101046; break;
+ case 0xb2040: return cur_rev <= 0xb204031; break;
+ case 0xb4040: return cur_rev <= 0xb404031; break;
+ case 0xb6000: return cur_rev <= 0xb600031; break;
+ case 0xb7000: return cur_rev <= 0xb700031; break;
default: break;
}
@@ -214,8 +220,7 @@ static bool verify_sha256_digest(u32 patch_id, u32 cur_rev, const u8 *data, unsi
struct sha256_state s;
int i;
- if (x86_family(bsp_cpuid_1_eax) < 0x17 ||
- x86_family(bsp_cpuid_1_eax) > 0x19)
+ if (x86_family(bsp_cpuid_1_eax) < 0x17)
return true;
if (!need_sha_check(cur_rev))
diff --git a/arch/x86/kernel/i8253.c b/arch/x86/kernel/i8253.c
index 80e262bb627f..cb9852ad6098 100644
--- a/arch/x86/kernel/i8253.c
+++ b/arch/x86/kernel/i8253.c
@@ -46,7 +46,8 @@ bool __init pit_timer_init(void)
* VMMs otherwise steal CPU time just to pointlessly waggle
* the (masked) IRQ.
*/
- clockevent_i8253_disable();
+ scoped_guard(irq)
+ clockevent_i8253_disable();
return false;
}
clockevent_i8253_init(true);
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 1b7710bd0d05..53282dc7d5ac 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -103,10 +103,6 @@ noinstr void *__xen_hypercall_setfunc(void)
void (*func)(void);
/*
- * Xen is supported only on CPUs with CPUID, so testing for
- * X86_FEATURE_CPUID is a test for early_cpu_init() having been
- * run.
- *
* Note that __xen_hypercall_setfunc() is noinstr only due to a nasty
* dependency chain: it is being called via the xen_hypercall static
* call when running as a PVH or HVM guest. Hypercalls need to be
@@ -118,8 +114,7 @@ noinstr void *__xen_hypercall_setfunc(void)
*/
instrumentation_begin();
- if (!boot_cpu_has(X86_FEATURE_CPUID))
- xen_get_vendor();
+ xen_get_vendor();
if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
boot_cpu_data.x86_vendor == X86_VENDOR_HYGON))
diff --git a/block/bdev.c b/block/bdev.c
index 4844d1e27b6f..6a34179192c9 100644
--- a/block/bdev.c
+++ b/block/bdev.c
@@ -1272,8 +1272,7 @@ void sync_bdevs(bool wait)
/*
* Handle STATX_{DIOALIGN, WRITE_ATOMIC} for block devices.
*/
-void bdev_statx(struct path *path, struct kstat *stat,
- u32 request_mask)
+void bdev_statx(const struct path *path, struct kstat *stat, u32 request_mask)
{
struct inode *backing_inode;
struct block_device *bdev;
diff --git a/block/bio-integrity.c b/block/bio-integrity.c
index 608594a154a5..43ef6bd06c85 100644
--- a/block/bio-integrity.c
+++ b/block/bio-integrity.c
@@ -66,16 +66,12 @@ struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio,
}
EXPORT_SYMBOL(bio_integrity_alloc);
-static void bio_integrity_unpin_bvec(struct bio_vec *bv, int nr_vecs,
- bool dirty)
+static void bio_integrity_unpin_bvec(struct bio_vec *bv, int nr_vecs)
{
int i;
- for (i = 0; i < nr_vecs; i++) {
- if (dirty && !PageCompound(bv[i].bv_page))
- set_page_dirty_lock(bv[i].bv_page);
+ for (i = 0; i < nr_vecs; i++)
unpin_user_page(bv[i].bv_page);
- }
}
static void bio_integrity_uncopy_user(struct bio_integrity_payload *bip)
@@ -91,7 +87,7 @@ static void bio_integrity_uncopy_user(struct bio_integrity_payload *bip)
ret = copy_to_iter(bvec_virt(bounce_bvec), bytes, &orig_iter);
WARN_ON_ONCE(ret != bytes);
- bio_integrity_unpin_bvec(orig_bvecs, orig_nr_vecs, true);
+ bio_integrity_unpin_bvec(orig_bvecs, orig_nr_vecs);
}
/**
@@ -111,8 +107,7 @@ void bio_integrity_unmap_user(struct bio *bio)
return;
}
- bio_integrity_unpin_bvec(bip->bip_vec, bip->bip_max_vcnt,
- bio_data_dir(bio) == READ);
+ bio_integrity_unpin_bvec(bip->bip_vec, bip->bip_max_vcnt);
}
/**
@@ -198,7 +193,7 @@ static int bio_integrity_copy_user(struct bio *bio, struct bio_vec *bvec,
}
if (write)
- bio_integrity_unpin_bvec(bvec, nr_vecs, false);
+ bio_integrity_unpin_bvec(bvec, nr_vecs);
else
memcpy(&bip->bip_vec[1], bvec, nr_vecs * sizeof(*bvec));
@@ -319,7 +314,7 @@ int bio_integrity_map_user(struct bio *bio, struct iov_iter *iter)
return 0;
release_pages:
- bio_integrity_unpin_bvec(bvec, nr_bvecs, false);
+ bio_integrity_unpin_bvec(bvec, nr_bvecs);
free_bvec:
if (bvec != stack_vec)
kfree(bvec);
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index a2882751f0d2..1f9b45b0b9ee 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -909,6 +909,8 @@ out_unregister_ia_ranges:
out_debugfs_remove:
blk_debugfs_remove(disk);
mutex_unlock(&q->sysfs_lock);
+ if (queue_is_mq(q))
+ blk_mq_sysfs_unregister(disk);
out_put_queue_kobj:
kobject_put(&disk->queue_kobj);
return ret;
diff --git a/block/blk-throttle.h b/block/blk-throttle.h
index 7964cc041e06..f9f8666891ab 100644
--- a/block/blk-throttle.h
+++ b/block/blk-throttle.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef BLK_THROTTLE_H
#define BLK_THROTTLE_H
diff --git a/crypto/scompress.c b/crypto/scompress.c
index 5762fcc63b51..36934c78d127 100644
--- a/crypto/scompress.c
+++ b/crypto/scompress.c
@@ -215,8 +215,8 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
spage = nth_page(spage, soff / PAGE_SIZE);
soff = offset_in_page(soff);
- n = slen / PAGE_SIZE;
- n += (offset_in_page(slen) + soff - 1) / PAGE_SIZE;
+ n = (slen - 1) / PAGE_SIZE;
+ n += (offset_in_page(slen - 1) + soff) / PAGE_SIZE;
if (PageHighMem(nth_page(spage, n)) &&
size_add(soff, slen) > PAGE_SIZE)
break;
@@ -243,9 +243,9 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
dpage = nth_page(dpage, doff / PAGE_SIZE);
doff = offset_in_page(doff);
- n = dlen / PAGE_SIZE;
- n += (offset_in_page(dlen) + doff - 1) / PAGE_SIZE;
- if (PageHighMem(dpage + n) &&
+ n = (dlen - 1) / PAGE_SIZE;
+ n += (offset_in_page(dlen - 1) + doff) / PAGE_SIZE;
+ if (PageHighMem(nth_page(dpage, n)) &&
size_add(doff, dlen) > PAGE_SIZE)
break;
dst = kmap_local_page(dpage) + doff;
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index abd609d4c8ef..82977ea25db3 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -58,9 +58,6 @@ module_param(fuzz_iterations, uint, 0644);
MODULE_PARM_DESC(fuzz_iterations, "number of fuzz test iterations");
#endif
-/* Multibuffer is unlimited. Set arbitrary limit for testing. */
-#define MAX_MB_MSGS 16
-
#ifdef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS
/* a perfect nop */
@@ -3329,48 +3326,27 @@ static int test_acomp(struct crypto_acomp *tfm,
int ctcount, int dtcount)
{
const char *algo = crypto_tfm_alg_driver_name(crypto_acomp_tfm(tfm));
- struct scatterlist *src = NULL, *dst = NULL;
- struct acomp_req *reqs[MAX_MB_MSGS] = {};
- char *decomp_out[MAX_MB_MSGS] = {};
- char *output[MAX_MB_MSGS] = {};
- struct crypto_wait wait;
- struct acomp_req *req;
- int ret = -ENOMEM;
unsigned int i;
+ char *output, *decomp_out;
+ int ret;
+ struct scatterlist src, dst;
+ struct acomp_req *req;
+ struct crypto_wait wait;
- src = kmalloc_array(MAX_MB_MSGS, sizeof(*src), GFP_KERNEL);
- if (!src)
- goto out;
- dst = kmalloc_array(MAX_MB_MSGS, sizeof(*dst), GFP_KERNEL);
- if (!dst)
- goto out;
-
- for (i = 0; i < MAX_MB_MSGS; i++) {
- reqs[i] = acomp_request_alloc(tfm);
- if (!reqs[i])
- goto out;
-
- acomp_request_set_callback(reqs[i],
- CRYPTO_TFM_REQ_MAY_SLEEP |
- CRYPTO_TFM_REQ_MAY_BACKLOG,
- crypto_req_done, &wait);
- if (i)
- acomp_request_chain(reqs[i], reqs[0]);
-
- output[i] = kmalloc(COMP_BUF_SIZE, GFP_KERNEL);
- if (!output[i])
- goto out;
+ output = kmalloc(COMP_BUF_SIZE, GFP_KERNEL);
+ if (!output)
+ return -ENOMEM;
- decomp_out[i] = kmalloc(COMP_BUF_SIZE, GFP_KERNEL);
- if (!decomp_out[i])
- goto out;
+ decomp_out = kmalloc(COMP_BUF_SIZE, GFP_KERNEL);
+ if (!decomp_out) {
+ kfree(output);
+ return -ENOMEM;
}
for (i = 0; i < ctcount; i++) {
unsigned int dlen = COMP_BUF_SIZE;
int ilen = ctemplate[i].inlen;
void *input_vec;
- int j;
input_vec = kmemdup(ctemplate[i].input, ilen, GFP_KERNEL);
if (!input_vec) {
@@ -3378,61 +3354,70 @@ static int test_acomp(struct crypto_acomp *tfm,
goto out;
}
+ memset(output, 0, dlen);
crypto_init_wait(&wait);
- sg_init_one(src, input_vec, ilen);
+ sg_init_one(&src, input_vec, ilen);
+ sg_init_one(&dst, output, dlen);
- for (j = 0; j < MAX_MB_MSGS; j++) {
- sg_init_one(dst + j, output[j], dlen);
- acomp_request_set_params(reqs[j], src, dst + j, ilen, dlen);
+ req = acomp_request_alloc(tfm);
+ if (!req) {
+ pr_err("alg: acomp: request alloc failed for %s\n",
+ algo);
+ kfree(input_vec);
+ ret = -ENOMEM;
+ goto out;
}
- req = reqs[0];
+ acomp_request_set_params(req, &src, &dst, ilen, dlen);
+ acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ crypto_req_done, &wait);
+
ret = crypto_wait_req(crypto_acomp_compress(req), &wait);
if (ret) {
pr_err("alg: acomp: compression failed on test %d for %s: ret=%d\n",
i + 1, algo, -ret);
kfree(input_vec);
+ acomp_request_free(req);
goto out;
}
ilen = req->dlen;
dlen = COMP_BUF_SIZE;
+ sg_init_one(&src, output, ilen);
+ sg_init_one(&dst, decomp_out, dlen);
crypto_init_wait(&wait);
- for (j = 0; j < MAX_MB_MSGS; j++) {
- sg_init_one(src + j, output[j], ilen);
- sg_init_one(dst + j, decomp_out[j], dlen);
- acomp_request_set_params(reqs[j], src + j, dst + j, ilen, dlen);
- }
-
- crypto_wait_req(crypto_acomp_decompress(req), &wait);
- for (j = 0; j < MAX_MB_MSGS; j++) {
- ret = reqs[j]->base.err;
- if (ret) {
- pr_err("alg: acomp: compression failed on test %d (%d) for %s: ret=%d\n",
- i + 1, j, algo, -ret);
- kfree(input_vec);
- goto out;
- }
+ acomp_request_set_params(req, &src, &dst, ilen, dlen);
- if (reqs[j]->dlen != ctemplate[i].inlen) {
- pr_err("alg: acomp: Compression test %d (%d) failed for %s: output len = %d\n",
- i + 1, j, algo, reqs[j]->dlen);
- ret = -EINVAL;
- kfree(input_vec);
- goto out;
- }
+ ret = crypto_wait_req(crypto_acomp_decompress(req), &wait);
+ if (ret) {
+ pr_err("alg: acomp: compression failed on test %d for %s: ret=%d\n",
+ i + 1, algo, -ret);
+ kfree(input_vec);
+ acomp_request_free(req);
+ goto out;
+ }
- if (memcmp(input_vec, decomp_out[j], reqs[j]->dlen)) {
- pr_err("alg: acomp: Compression test %d (%d) failed for %s\n",
- i + 1, j, algo);
- hexdump(output[j], reqs[j]->dlen);
- ret = -EINVAL;
- kfree(input_vec);
- goto out;
- }
+ if (req->dlen != ctemplate[i].inlen) {
+ pr_err("alg: acomp: Compression test %d failed for %s: output len = %d\n",
+ i + 1, algo, req->dlen);
+ ret = -EINVAL;
+ kfree(input_vec);
+ acomp_request_free(req);
+ goto out;
+ }
+
+ if (memcmp(input_vec, decomp_out, req->dlen)) {
+ pr_err("alg: acomp: Compression test %d failed for %s\n",
+ i + 1, algo);
+ hexdump(output, req->dlen);
+ ret = -EINVAL;
+ kfree(input_vec);
+ acomp_request_free(req);
+ goto out;
}
kfree(input_vec);
+ acomp_request_free(req);
}
for (i = 0; i < dtcount; i++) {
@@ -3446,9 +3431,10 @@ static int test_acomp(struct crypto_acomp *tfm,
goto out;
}
+ memset(output, 0, dlen);
crypto_init_wait(&wait);
- sg_init_one(src, input_vec, ilen);
- sg_init_one(dst, output[0], dlen);
+ sg_init_one(&src, input_vec, ilen);
+ sg_init_one(&dst, output, dlen);
req = acomp_request_alloc(tfm);
if (!req) {
@@ -3459,7 +3445,7 @@ static int test_acomp(struct crypto_acomp *tfm,
goto out;
}
- acomp_request_set_params(req, src, dst, ilen, dlen);
+ acomp_request_set_params(req, &src, &dst, ilen, dlen);
acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
crypto_req_done, &wait);
@@ -3481,10 +3467,10 @@ static int test_acomp(struct crypto_acomp *tfm,
goto out;
}
- if (memcmp(output[0], dtemplate[i].output, req->dlen)) {
+ if (memcmp(output, dtemplate[i].output, req->dlen)) {
pr_err("alg: acomp: Decompression test %d failed for %s\n",
i + 1, algo);
- hexdump(output[0], req->dlen);
+ hexdump(output, req->dlen);
ret = -EINVAL;
kfree(input_vec);
acomp_request_free(req);
@@ -3498,13 +3484,8 @@ static int test_acomp(struct crypto_acomp *tfm,
ret = 0;
out:
- acomp_request_free(reqs[0]);
- for (i = 0; i < MAX_MB_MSGS; i++) {
- kfree(output[i]);
- kfree(decomp_out[i]);
- }
- kfree(dst);
- kfree(src);
+ kfree(decomp_out);
+ kfree(output);
return ret;
}
diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c
index 4fa73189502e..eff1d3ca075f 100644
--- a/drivers/accel/ivpu/ivpu_drv.c
+++ b/drivers/accel/ivpu/ivpu_drv.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) 2020-2024 Intel Corporation
+ * Copyright (C) 2020-2025 Intel Corporation
*/
#include <linux/firmware.h>
@@ -164,7 +164,7 @@ static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_f
args->value = vdev->platform;
break;
case DRM_IVPU_PARAM_CORE_CLOCK_RATE:
- args->value = ivpu_hw_ratio_to_freq(vdev, vdev->hw->pll.max_ratio);
+ args->value = ivpu_hw_dpu_max_freq_get(vdev);
break;
case DRM_IVPU_PARAM_NUM_CONTEXTS:
args->value = ivpu_get_context_count(vdev);
@@ -421,9 +421,9 @@ void ivpu_prepare_for_reset(struct ivpu_device *vdev)
{
ivpu_hw_irq_disable(vdev);
disable_irq(vdev->irq);
- cancel_work_sync(&vdev->irq_ipc_work);
- cancel_work_sync(&vdev->irq_dct_work);
- cancel_work_sync(&vdev->context_abort_work);
+ flush_work(&vdev->irq_ipc_work);
+ flush_work(&vdev->irq_dct_work);
+ flush_work(&vdev->context_abort_work);
ivpu_ipc_disable(vdev);
ivpu_mmu_disable(vdev);
}
diff --git a/drivers/accel/ivpu/ivpu_fw.c b/drivers/accel/ivpu/ivpu_fw.c
index 7a1bb92d8c81..5e1d709c6a46 100644
--- a/drivers/accel/ivpu/ivpu_fw.c
+++ b/drivers/accel/ivpu/ivpu_fw.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) 2020-2024 Intel Corporation
+ * Copyright (C) 2020-2025 Intel Corporation
*/
#include <linux/firmware.h>
@@ -233,10 +233,20 @@ static int ivpu_fw_parse(struct ivpu_device *vdev)
fw->dvfs_mode = 0;
fw->sched_mode = ivpu_fw_sched_mode_select(vdev, fw_hdr);
- fw->primary_preempt_buf_size = fw_hdr->preemption_buffer_1_size;
- fw->secondary_preempt_buf_size = fw_hdr->preemption_buffer_2_size;
ivpu_info(vdev, "Scheduler mode: %s\n", fw->sched_mode ? "HW" : "OS");
+ if (fw_hdr->preemption_buffer_1_max_size)
+ fw->primary_preempt_buf_size = fw_hdr->preemption_buffer_1_max_size;
+ else
+ fw->primary_preempt_buf_size = fw_hdr->preemption_buffer_1_size;
+
+ if (fw_hdr->preemption_buffer_2_max_size)
+ fw->secondary_preempt_buf_size = fw_hdr->preemption_buffer_2_max_size;
+ else
+ fw->secondary_preempt_buf_size = fw_hdr->preemption_buffer_2_size;
+ ivpu_dbg(vdev, FW_BOOT, "Preemption buffer sizes: primary %u, secondary %u\n",
+ fw->primary_preempt_buf_size, fw->secondary_preempt_buf_size);
+
if (fw_hdr->ro_section_start_address && !is_within_range(fw_hdr->ro_section_start_address,
fw_hdr->ro_section_size,
fw_hdr->image_load_address,
@@ -566,7 +576,6 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
boot_params->magic = VPU_BOOT_PARAMS_MAGIC;
boot_params->vpu_id = to_pci_dev(vdev->drm.dev)->bus->number;
- boot_params->frequency = ivpu_hw_pll_freq_get(vdev);
/*
* This param is a debug firmware feature. It switches default clock
diff --git a/drivers/accel/ivpu/ivpu_hw.h b/drivers/accel/ivpu/ivpu_hw.h
index 16435f2756d0..d79668fe1609 100644
--- a/drivers/accel/ivpu/ivpu_hw.h
+++ b/drivers/accel/ivpu/ivpu_hw.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) 2020-2024 Intel Corporation
+ * Copyright (C) 2020-2025 Intel Corporation
*/
#ifndef __IVPU_HW_H__
@@ -82,19 +82,19 @@ static inline u64 ivpu_hw_range_size(const struct ivpu_addr_range *range)
return range->end - range->start;
}
-static inline u32 ivpu_hw_ratio_to_freq(struct ivpu_device *vdev, u32 ratio)
+static inline u32 ivpu_hw_dpu_max_freq_get(struct ivpu_device *vdev)
{
- return ivpu_hw_btrs_ratio_to_freq(vdev, ratio);
+ return ivpu_hw_btrs_dpu_max_freq_get(vdev);
}
-static inline void ivpu_hw_irq_clear(struct ivpu_device *vdev)
+static inline u32 ivpu_hw_dpu_freq_get(struct ivpu_device *vdev)
{
- ivpu_hw_ip_irq_clear(vdev);
+ return ivpu_hw_btrs_dpu_freq_get(vdev);
}
-static inline u32 ivpu_hw_pll_freq_get(struct ivpu_device *vdev)
+static inline void ivpu_hw_irq_clear(struct ivpu_device *vdev)
{
- return ivpu_hw_btrs_pll_freq_get(vdev);
+ ivpu_hw_ip_irq_clear(vdev);
}
static inline u32 ivpu_hw_profiling_freq_get(struct ivpu_device *vdev)
diff --git a/drivers/accel/ivpu/ivpu_hw_btrs.c b/drivers/accel/ivpu/ivpu_hw_btrs.c
index 56c56012b980..b236c7234daa 100644
--- a/drivers/accel/ivpu/ivpu_hw_btrs.c
+++ b/drivers/accel/ivpu/ivpu_hw_btrs.c
@@ -1,8 +1,10 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) 2020-2024 Intel Corporation
+ * Copyright (C) 2020-2025 Intel Corporation
*/
+#include <linux/units.h>
+
#include "ivpu_drv.h"
#include "ivpu_hw.h"
#include "ivpu_hw_btrs.h"
@@ -28,17 +30,13 @@
#define BTRS_LNL_ALL_IRQ_MASK ((u32)-1)
-#define BTRS_MTL_WP_CONFIG_1_TILE_5_3_RATIO WP_CONFIG(MTL_CONFIG_1_TILE, MTL_PLL_RATIO_5_3)
-#define BTRS_MTL_WP_CONFIG_1_TILE_4_3_RATIO WP_CONFIG(MTL_CONFIG_1_TILE, MTL_PLL_RATIO_4_3)
-#define BTRS_MTL_WP_CONFIG_2_TILE_5_3_RATIO WP_CONFIG(MTL_CONFIG_2_TILE, MTL_PLL_RATIO_5_3)
-#define BTRS_MTL_WP_CONFIG_2_TILE_4_3_RATIO WP_CONFIG(MTL_CONFIG_2_TILE, MTL_PLL_RATIO_4_3)
-#define BTRS_MTL_WP_CONFIG_0_TILE_PLL_OFF WP_CONFIG(0, 0)
#define PLL_CDYN_DEFAULT 0x80
#define PLL_EPP_DEFAULT 0x80
#define PLL_CONFIG_DEFAULT 0x0
-#define PLL_SIMULATION_FREQ 10000000
-#define PLL_REF_CLK_FREQ 50000000
+#define PLL_REF_CLK_FREQ 50000000ull
+#define PLL_RATIO_TO_FREQ(x) ((x) * PLL_REF_CLK_FREQ)
+
#define PLL_TIMEOUT_US (1500 * USEC_PER_MSEC)
#define IDLE_TIMEOUT_US (5 * USEC_PER_MSEC)
#define TIMEOUT_US (150 * USEC_PER_MSEC)
@@ -62,6 +60,8 @@
#define DCT_ENABLE 0x1
#define DCT_DISABLE 0x0
+static u32 pll_ratio_to_dpu_freq(struct ivpu_device *vdev, u32 ratio);
+
int ivpu_hw_btrs_irqs_clear_with_0_mtl(struct ivpu_device *vdev)
{
REGB_WR32(VPU_HW_BTRS_MTL_INTERRUPT_STAT, BTRS_MTL_ALL_IRQ_MASK);
@@ -156,7 +156,7 @@ static int info_init_mtl(struct ivpu_device *vdev)
hw->tile_fuse = BTRS_MTL_TILE_FUSE_ENABLE_BOTH;
hw->sku = BTRS_MTL_TILE_SKU_BOTH;
- hw->config = BTRS_MTL_WP_CONFIG_2_TILE_4_3_RATIO;
+ hw->config = WP_CONFIG(MTL_CONFIG_2_TILE, MTL_PLL_RATIO_4_3);
return 0;
}
@@ -334,8 +334,8 @@ int ivpu_hw_btrs_wp_drive(struct ivpu_device *vdev, bool enable)
prepare_wp_request(vdev, &wp, enable);
- ivpu_dbg(vdev, PM, "PLL workpoint request: %u Hz, config: 0x%x, epp: 0x%x, cdyn: 0x%x\n",
- PLL_RATIO_TO_FREQ(wp.target), wp.cfg, wp.epp, wp.cdyn);
+ ivpu_dbg(vdev, PM, "PLL workpoint request: %lu MHz, config: 0x%x, epp: 0x%x, cdyn: 0x%x\n",
+ pll_ratio_to_dpu_freq(vdev, wp.target) / HZ_PER_MHZ, wp.cfg, wp.epp, wp.cdyn);
ret = wp_request_send(vdev, &wp);
if (ret) {
@@ -573,6 +573,47 @@ int ivpu_hw_btrs_wait_for_idle(struct ivpu_device *vdev)
return REGB_POLL_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, IDLE, 0x1, IDLE_TIMEOUT_US);
}
+static u32 pll_config_get_mtl(struct ivpu_device *vdev)
+{
+ return REGB_RD32(VPU_HW_BTRS_MTL_CURRENT_PLL);
+}
+
+static u32 pll_config_get_lnl(struct ivpu_device *vdev)
+{
+ return REGB_RD32(VPU_HW_BTRS_LNL_PLL_FREQ);
+}
+
+static u32 pll_ratio_to_dpu_freq_mtl(u16 ratio)
+{
+ return (PLL_RATIO_TO_FREQ(ratio) * 2) / 3;
+}
+
+static u32 pll_ratio_to_dpu_freq_lnl(u16 ratio)
+{
+ return PLL_RATIO_TO_FREQ(ratio) / 2;
+}
+
+static u32 pll_ratio_to_dpu_freq(struct ivpu_device *vdev, u32 ratio)
+{
+ if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
+ return pll_ratio_to_dpu_freq_mtl(ratio);
+ else
+ return pll_ratio_to_dpu_freq_lnl(ratio);
+}
+
+u32 ivpu_hw_btrs_dpu_max_freq_get(struct ivpu_device *vdev)
+{
+ return pll_ratio_to_dpu_freq(vdev, vdev->hw->pll.max_ratio);
+}
+
+u32 ivpu_hw_btrs_dpu_freq_get(struct ivpu_device *vdev)
+{
+ if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
+ return pll_ratio_to_dpu_freq_mtl(pll_config_get_mtl(vdev));
+ else
+ return pll_ratio_to_dpu_freq_lnl(pll_config_get_lnl(vdev));
+}
+
/* Handler for IRQs from Buttress core (irqB) */
bool ivpu_hw_btrs_irq_handler_mtl(struct ivpu_device *vdev, int irq)
{
@@ -582,9 +623,12 @@ bool ivpu_hw_btrs_irq_handler_mtl(struct ivpu_device *vdev, int irq)
if (!status)
return false;
- if (REG_TEST_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, FREQ_CHANGE, status))
- ivpu_dbg(vdev, IRQ, "FREQ_CHANGE irq: %08x",
- REGB_RD32(VPU_HW_BTRS_MTL_CURRENT_PLL));
+ if (REG_TEST_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, FREQ_CHANGE, status)) {
+ u32 pll = pll_config_get_mtl(vdev);
+
+ ivpu_dbg(vdev, IRQ, "FREQ_CHANGE irq, wp %08x, %lu MHz",
+ pll, pll_ratio_to_dpu_freq_mtl(pll) / HZ_PER_MHZ);
+ }
if (REG_TEST_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, ATS_ERR, status)) {
ivpu_err(vdev, "ATS_ERR irq 0x%016llx", REGB_RD64(VPU_HW_BTRS_MTL_ATS_ERR_LOG_0));
@@ -633,8 +677,12 @@ bool ivpu_hw_btrs_irq_handler_lnl(struct ivpu_device *vdev, int irq)
queue_work(system_wq, &vdev->irq_dct_work);
}
- if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, FREQ_CHANGE, status))
- ivpu_dbg(vdev, IRQ, "FREQ_CHANGE irq: %08x", REGB_RD32(VPU_HW_BTRS_LNL_PLL_FREQ));
+ if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, FREQ_CHANGE, status)) {
+ u32 pll = pll_config_get_lnl(vdev);
+
+ ivpu_dbg(vdev, IRQ, "FREQ_CHANGE irq, wp %08x, %lu MHz",
+ pll, pll_ratio_to_dpu_freq_lnl(pll) / HZ_PER_MHZ);
+ }
if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, ATS_ERR, status)) {
ivpu_err(vdev, "ATS_ERR LOG1 0x%08x ATS_ERR_LOG2 0x%08x\n",
@@ -717,60 +765,6 @@ void ivpu_hw_btrs_dct_set_status(struct ivpu_device *vdev, bool enable, u32 acti
REGB_WR32(VPU_HW_BTRS_LNL_PCODE_MAILBOX_STATUS, val);
}
-static u32 pll_ratio_to_freq_mtl(u32 ratio, u32 config)
-{
- u32 pll_clock = PLL_REF_CLK_FREQ * ratio;
- u32 cpu_clock;
-
- if ((config & 0xff) == MTL_PLL_RATIO_4_3)
- cpu_clock = pll_clock * 2 / 4;
- else
- cpu_clock = pll_clock * 2 / 5;
-
- return cpu_clock;
-}
-
-u32 ivpu_hw_btrs_ratio_to_freq(struct ivpu_device *vdev, u32 ratio)
-{
- struct ivpu_hw_info *hw = vdev->hw;
-
- if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
- return pll_ratio_to_freq_mtl(ratio, hw->config);
- else
- return PLL_RATIO_TO_FREQ(ratio);
-}
-
-static u32 pll_freq_get_mtl(struct ivpu_device *vdev)
-{
- u32 pll_curr_ratio;
-
- pll_curr_ratio = REGB_RD32(VPU_HW_BTRS_MTL_CURRENT_PLL);
- pll_curr_ratio &= VPU_HW_BTRS_MTL_CURRENT_PLL_RATIO_MASK;
-
- if (!ivpu_is_silicon(vdev))
- return PLL_SIMULATION_FREQ;
-
- return pll_ratio_to_freq_mtl(pll_curr_ratio, vdev->hw->config);
-}
-
-static u32 pll_freq_get_lnl(struct ivpu_device *vdev)
-{
- u32 pll_curr_ratio;
-
- pll_curr_ratio = REGB_RD32(VPU_HW_BTRS_LNL_PLL_FREQ);
- pll_curr_ratio &= VPU_HW_BTRS_LNL_PLL_FREQ_RATIO_MASK;
-
- return PLL_RATIO_TO_FREQ(pll_curr_ratio);
-}
-
-u32 ivpu_hw_btrs_pll_freq_get(struct ivpu_device *vdev)
-{
- if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
- return pll_freq_get_mtl(vdev);
- else
- return pll_freq_get_lnl(vdev);
-}
-
u32 ivpu_hw_btrs_telemetry_offset_get(struct ivpu_device *vdev)
{
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
diff --git a/drivers/accel/ivpu/ivpu_hw_btrs.h b/drivers/accel/ivpu/ivpu_hw_btrs.h
index 1fd71b4d4ab0..300f749971d4 100644
--- a/drivers/accel/ivpu/ivpu_hw_btrs.h
+++ b/drivers/accel/ivpu/ivpu_hw_btrs.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) 2020-2024 Intel Corporation
+ * Copyright (C) 2020-2025 Intel Corporation
*/
#ifndef __IVPU_HW_BTRS_H__
@@ -13,7 +13,6 @@
#define PLL_PROFILING_FREQ_DEFAULT 38400000
#define PLL_PROFILING_FREQ_HIGH 400000000
-#define PLL_RATIO_TO_FREQ(x) ((x) * PLL_REF_CLK_FREQ)
#define DCT_DEFAULT_ACTIVE_PERCENT 15u
#define DCT_PERIOD_US 35300u
@@ -32,12 +31,12 @@ int ivpu_hw_btrs_ip_reset(struct ivpu_device *vdev);
void ivpu_hw_btrs_profiling_freq_reg_set_lnl(struct ivpu_device *vdev);
void ivpu_hw_btrs_ats_print_lnl(struct ivpu_device *vdev);
void ivpu_hw_btrs_clock_relinquish_disable_lnl(struct ivpu_device *vdev);
+u32 ivpu_hw_btrs_dpu_max_freq_get(struct ivpu_device *vdev);
+u32 ivpu_hw_btrs_dpu_freq_get(struct ivpu_device *vdev);
bool ivpu_hw_btrs_irq_handler_mtl(struct ivpu_device *vdev, int irq);
bool ivpu_hw_btrs_irq_handler_lnl(struct ivpu_device *vdev, int irq);
int ivpu_hw_btrs_dct_get_request(struct ivpu_device *vdev, bool *enable);
void ivpu_hw_btrs_dct_set_status(struct ivpu_device *vdev, bool enable, u32 dct_percent);
-u32 ivpu_hw_btrs_pll_freq_get(struct ivpu_device *vdev);
-u32 ivpu_hw_btrs_ratio_to_freq(struct ivpu_device *vdev, u32 ratio);
u32 ivpu_hw_btrs_telemetry_offset_get(struct ivpu_device *vdev);
u32 ivpu_hw_btrs_telemetry_size_get(struct ivpu_device *vdev);
u32 ivpu_hw_btrs_telemetry_enable_get(struct ivpu_device *vdev);
diff --git a/drivers/accel/ivpu/ivpu_job.c b/drivers/accel/ivpu/ivpu_job.c
index 004059e4f1e8..863e3cd6ace5 100644
--- a/drivers/accel/ivpu/ivpu_job.c
+++ b/drivers/accel/ivpu/ivpu_job.c
@@ -470,8 +470,8 @@ static void ivpu_job_destroy(struct ivpu_job *job)
struct ivpu_device *vdev = job->vdev;
u32 i;
- ivpu_dbg(vdev, JOB, "Job destroyed: id %3u ctx %2d engine %d",
- job->job_id, job->file_priv->ctx.id, job->engine_idx);
+ ivpu_dbg(vdev, JOB, "Job destroyed: id %3u ctx %2d cmdq_id %u engine %d",
+ job->job_id, job->file_priv->ctx.id, job->cmdq_id, job->engine_idx);
for (i = 0; i < job->bo_count; i++)
if (job->bos[i])
@@ -564,8 +564,8 @@ static int ivpu_job_signal_and_destroy(struct ivpu_device *vdev, u32 job_id, u32
dma_fence_signal(job->done_fence);
trace_job("done", job);
- ivpu_dbg(vdev, JOB, "Job complete: id %3u ctx %2d engine %d status 0x%x\n",
- job->job_id, job->file_priv->ctx.id, job->engine_idx, job_status);
+ ivpu_dbg(vdev, JOB, "Job complete: id %3u ctx %2d cmdq_id %u engine %d status 0x%x\n",
+ job->job_id, job->file_priv->ctx.id, job->cmdq_id, job->engine_idx, job_status);
ivpu_job_destroy(job);
ivpu_stop_job_timeout_detection(vdev);
@@ -664,8 +664,8 @@ static int ivpu_job_submit(struct ivpu_job *job, u8 priority, u32 cmdq_id)
}
trace_job("submit", job);
- ivpu_dbg(vdev, JOB, "Job submitted: id %3u ctx %2d engine %d prio %d addr 0x%llx next %d\n",
- job->job_id, file_priv->ctx.id, job->engine_idx, cmdq->priority,
+ ivpu_dbg(vdev, JOB, "Job submitted: id %3u ctx %2d cmdq_id %u engine %d prio %d addr 0x%llx next %d\n",
+ job->job_id, file_priv->ctx.id, cmdq->id, job->engine_idx, cmdq->priority,
job->cmd_buf_vpu_addr, cmdq->jobq->header.tail);
mutex_unlock(&file_priv->lock);
@@ -777,7 +777,8 @@ static int ivpu_submit(struct drm_file *file, struct ivpu_file_priv *file_priv,
goto err_free_handles;
}
- ivpu_dbg(vdev, JOB, "Submit ioctl: ctx %u buf_count %u\n", file_priv->ctx.id, buffer_count);
+ ivpu_dbg(vdev, JOB, "Submit ioctl: ctx %u cmdq_id %u buf_count %u\n",
+ file_priv->ctx.id, cmdq_id, buffer_count);
job = ivpu_job_create(file_priv, engine, buffer_count);
if (!job) {
diff --git a/drivers/accel/ivpu/ivpu_sysfs.c b/drivers/accel/ivpu/ivpu_sysfs.c
index 97102feaf8dd..268ab7744a8b 100644
--- a/drivers/accel/ivpu/ivpu_sysfs.c
+++ b/drivers/accel/ivpu/ivpu_sysfs.c
@@ -1,10 +1,12 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) 2024 Intel Corporation
+ * Copyright (C) 2024-2025 Intel Corporation
*/
#include <linux/device.h>
#include <linux/err.h>
+#include <linux/pm_runtime.h>
+#include <linux/units.h>
#include "ivpu_drv.h"
#include "ivpu_gem.h"
@@ -90,10 +92,55 @@ sched_mode_show(struct device *dev, struct device_attribute *attr, char *buf)
static DEVICE_ATTR_RO(sched_mode);
+/**
+ * DOC: npu_max_frequency
+ *
+ * The npu_max_frequency shows maximum frequency in MHz of the NPU's data
+ * processing unit
+ */
+static ssize_t
+npu_max_frequency_mhz_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+ struct ivpu_device *vdev = to_ivpu_device(drm);
+ u32 freq = ivpu_hw_dpu_max_freq_get(vdev);
+
+ return sysfs_emit(buf, "%lu\n", freq / HZ_PER_MHZ);
+}
+
+static DEVICE_ATTR_RO(npu_max_frequency_mhz);
+
+/**
+ * DOC: npu_current_frequency_mhz
+ *
+ * The npu_current_frequency_mhz shows current frequency in MHz of the NPU's
+ * data processing unit
+ */
+static ssize_t
+npu_current_frequency_mhz_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+ struct ivpu_device *vdev = to_ivpu_device(drm);
+ u32 freq = 0;
+
+ /* Read frequency only if device is active, otherwise frequency is 0 */
+ if (pm_runtime_get_if_active(vdev->drm.dev) > 0) {
+ freq = ivpu_hw_dpu_freq_get(vdev);
+
+ pm_runtime_put_autosuspend(vdev->drm.dev);
+ }
+
+ return sysfs_emit(buf, "%lu\n", freq / HZ_PER_MHZ);
+}
+
+static DEVICE_ATTR_RO(npu_current_frequency_mhz);
+
static struct attribute *ivpu_dev_attrs[] = {
&dev_attr_npu_busy_time_us.attr,
&dev_attr_npu_memory_utilization.attr,
&dev_attr_sched_mode.attr,
+ &dev_attr_npu_max_frequency_mhz.attr,
+ &dev_attr_npu_current_frequency_mhz.attr,
NULL,
};
diff --git a/drivers/accel/ivpu/vpu_boot_api.h b/drivers/accel/ivpu/vpu_boot_api.h
index 908e68ea1c39..218468bbbcad 100644
--- a/drivers/accel/ivpu/vpu_boot_api.h
+++ b/drivers/accel/ivpu/vpu_boot_api.h
@@ -26,7 +26,7 @@
* Minor version changes when API backward compatibility is preserved.
* Resets to 0 if Major version is incremented.
*/
-#define VPU_BOOT_API_VER_MINOR 26
+#define VPU_BOOT_API_VER_MINOR 28
/*
* API header changed (field names, documentation, formatting) but API itself has not been changed
@@ -76,8 +76,15 @@ struct vpu_firmware_header {
* submission queue size and device capabilities.
*/
u32 preemption_buffer_2_size;
+ /*
+ * Maximum preemption buffer size that the FW can use: no need for the host
+ * driver to allocate more space than that specified by these fields.
+ * A value of 0 means no declared limit.
+ */
+ u32 preemption_buffer_1_max_size;
+ u32 preemption_buffer_2_max_size;
/* Space reserved for future preemption-related fields. */
- u32 preemption_reserved[6];
+ u32 preemption_reserved[4];
/* FW image read only section start address, 4KB aligned */
u64 ro_section_start_address;
/* FW image read only section size, 4KB aligned */
@@ -134,7 +141,7 @@ enum vpu_trace_destination {
/*
* Processor bit shifts (for loggable HW components).
*/
-#define VPU_TRACE_PROC_BIT_ARM 0
+#define VPU_TRACE_PROC_BIT_RESERVED 0
#define VPU_TRACE_PROC_BIT_LRT 1
#define VPU_TRACE_PROC_BIT_LNN 2
#define VPU_TRACE_PROC_BIT_SHV_0 3
diff --git a/drivers/accel/ivpu/vpu_jsm_api.h b/drivers/accel/ivpu/vpu_jsm_api.h
index 7215c144158c..4b6b2b3d2583 100644
--- a/drivers/accel/ivpu/vpu_jsm_api.h
+++ b/drivers/accel/ivpu/vpu_jsm_api.h
@@ -22,7 +22,7 @@
/*
* Minor version changes when API backward compatibility is preserved.
*/
-#define VPU_JSM_API_VER_MINOR 25
+#define VPU_JSM_API_VER_MINOR 29
/*
* API header changed (field names, documentation, formatting) but API itself has not been changed
@@ -53,8 +53,7 @@
* Engine indexes.
*/
#define VPU_ENGINE_COMPUTE 0
-#define VPU_ENGINE_COPY 1
-#define VPU_ENGINE_NB 2
+#define VPU_ENGINE_NB 1
/*
* VPU status values.
@@ -126,11 +125,13 @@ enum {
* When set, indicates that job queue uses native fences (as inline commands
* in job queue). Such queues may also use legacy fences (as commands in batch buffers).
* When cleared, indicates the job queue only uses legacy fences.
- * NOTE: For queues using native fences, VPU expects that all jobs in the queue
- * are immediately followed by an inline command object. This object is expected
- * to be a fence signal command in most cases, but can also be a NOP in case the host
- * does not need per-job fence signalling. Other inline commands objects can be
- * inserted between "job and inline command" pairs.
+ * NOTES:
+ * 1. For queues using native fences, VPU expects that all jobs in the queue
+ * are immediately followed by an inline command object. This object is expected
+ * to be a fence signal command in most cases, but can also be a NOP in case the host
+ * does not need per-job fence signalling. Other inline commands objects can be
+ * inserted between "job and inline command" pairs.
+ * 2. Native fence queues are only supported on VPU 40xx onwards.
*/
VPU_JOB_QUEUE_FLAGS_USE_NATIVE_FENCE_MASK = (1 << 1U),
@@ -275,6 +276,8 @@ struct vpu_inline_cmd {
u64 value;
/* User VA of the log buffer in which to add log entry on completion. */
u64 log_buffer_va;
+ /* NPU private data. */
+ u64 npu_private_data;
} fence;
/* Other commands do not have a payload. */
/* Payload definition for future inline commands can be inserted here. */
@@ -791,12 +794,22 @@ struct vpu_jsm_metric_streamer_update {
/** Metric group mask that identifies metric streamer instance. */
u64 metric_group_mask;
/**
- * Address and size of the buffer where the VPU will write metric data. If
- * the buffer address is 0 or same as the currently used buffer the VPU will
- * continue writing metric data to the current buffer. In this case the
- * buffer size is ignored and the size of the current buffer is unchanged.
- * If the address is non-zero and differs from the current buffer address the
- * VPU will immediately switch data collection to the new buffer.
+ * Address and size of the buffer where the VPU will write metric data.
+ * This member dictates how the update operation should perform:
+ * 1. client needs information about the number of collected samples and the
+ * amount of data written to the current buffer
+ * 2. client wants to switch to a new buffer
+ *
+ * Case 1. is identified by the buffer address being 0 or the same as the
+ * currently used buffer address. In this case the buffer size is ignored and
+ * the size of the current buffer is unchanged. The VPU will return an update
+ * in the vpu_jsm_metric_streamer_done structure. The internal writing position
+ * into the buffer is not changed.
+ *
+ * Case 2. is identified by the address being non-zero and differs from the
+ * current buffer address. The VPU will immediately switch data collection to
+ * the new buffer. Then the VPU will return an update in the
+ * vpu_jsm_metric_streamer_done structure.
*/
u64 buffer_addr;
u64 buffer_size;
@@ -934,6 +947,7 @@ struct vpu_ipc_msg_payload_hws_priority_band_setup {
/*
* Default quantum in 100ns units for scheduling across processes
* within a priority band
+ * Minimum value supported by NPU is 1ms (10000 in 100ns units).
*/
u32 process_quantum[VPU_HWS_NUM_PRIORITY_BANDS];
/*
@@ -946,8 +960,10 @@ struct vpu_ipc_msg_payload_hws_priority_band_setup {
* in situations when it's starved by the focus band.
*/
u32 normal_band_percentage;
- /* Reserved */
- u32 reserved_0;
+ /*
+ * TDR timeout value in milliseconds. Default value of 0 meaning no timeout.
+ */
+ u32 tdr_timeout;
};
/*
@@ -1024,7 +1040,10 @@ struct vpu_ipc_msg_payload_hws_set_context_sched_properties {
s32 in_process_priority;
/* Zero padding / Reserved */
u32 reserved_1;
- /* Context quantum relative to other contexts of same priority in the same process */
+ /*
+ * Context quantum relative to other contexts of same priority in the same process
+ * Minimum value supported by NPU is 1ms (10000 in 100ns units).
+ */
u64 context_quantum;
/* Grace period when preempting context of the same priority within the same process */
u64 grace_period_same_priority;
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 6f4fe47c955b..6481c48c22bb 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -293,7 +293,7 @@ acpi_status (*acpi_internal_method) (struct acpi_walk_state * walk_state);
* expected_return_btypes - Allowed type(s) for the return value
*/
struct acpi_name_info {
- char name[ACPI_NAMESEG_SIZE];
+ char name[ACPI_NAMESEG_SIZE] __nonstring;
u16 argument_list;
u8 expected_btypes;
};
@@ -370,7 +370,7 @@ typedef acpi_status (*acpi_object_converter) (struct acpi_namespace_node *
converted_object);
struct acpi_simple_repair_info {
- char name[ACPI_NAMESEG_SIZE];
+ char name[ACPI_NAMESEG_SIZE] __nonstring;
u32 unexpected_btypes;
u32 package_index;
acpi_object_converter object_converter;
diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c
index 1bb7b71f07f1..330b5e4711da 100644
--- a/drivers/acpi/acpica/nsrepair2.c
+++ b/drivers/acpi/acpica/nsrepair2.c
@@ -25,7 +25,7 @@ acpi_status (*acpi_repair_function) (struct acpi_evaluate_info * info,
return_object_ptr);
typedef struct acpi_repair_info {
- char name[ACPI_NAMESEG_SIZE];
+ char name[ACPI_NAMESEG_SIZE] __nonstring;
acpi_repair_function repair_function;
} acpi_repair_info;
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 2551ebf88dda..e48b24be45ee 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -388,12 +388,6 @@ config BLK_DEV_UBLK
definition isn't finalized yet, and might change according to future
requirement, so mark is as experimental now.
- Say Y if you want to get better performance because task_work_add()
- can be used in IO path for replacing io_uring cmd, which will become
- shared between IO tasks and ubq daemon, meantime task_work_add() can
- can handle batch more effectively, but task_work_add() isn't exported
- for module, so ublk has to be built to kernel.
-
config BLKDEV_UBLK_LEGACY_OPCODES
bool "Support legacy command opcode"
depends on BLK_DEV_UBLK
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 674527d770dc..46cba261075f 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -211,72 +211,6 @@ static void loop_set_size(struct loop_device *lo, loff_t size)
kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE);
}
-static int lo_write_bvec(struct file *file, struct bio_vec *bvec, loff_t *ppos)
-{
- struct iov_iter i;
- ssize_t bw;
-
- iov_iter_bvec(&i, ITER_SOURCE, bvec, 1, bvec->bv_len);
-
- bw = vfs_iter_write(file, &i, ppos, 0);
-
- if (likely(bw == bvec->bv_len))
- return 0;
-
- printk_ratelimited(KERN_ERR
- "loop: Write error at byte offset %llu, length %i.\n",
- (unsigned long long)*ppos, bvec->bv_len);
- if (bw >= 0)
- bw = -EIO;
- return bw;
-}
-
-static int lo_write_simple(struct loop_device *lo, struct request *rq,
- loff_t pos)
-{
- struct bio_vec bvec;
- struct req_iterator iter;
- int ret = 0;
-
- rq_for_each_segment(bvec, rq, iter) {
- ret = lo_write_bvec(lo->lo_backing_file, &bvec, &pos);
- if (ret < 0)
- break;
- cond_resched();
- }
-
- return ret;
-}
-
-static int lo_read_simple(struct loop_device *lo, struct request *rq,
- loff_t pos)
-{
- struct bio_vec bvec;
- struct req_iterator iter;
- struct iov_iter i;
- ssize_t len;
-
- rq_for_each_segment(bvec, rq, iter) {
- iov_iter_bvec(&i, ITER_DEST, &bvec, 1, bvec.bv_len);
- len = vfs_iter_read(lo->lo_backing_file, &i, &pos, 0);
- if (len < 0)
- return len;
-
- flush_dcache_page(bvec.bv_page);
-
- if (len != bvec.bv_len) {
- struct bio *bio;
-
- __rq_for_each_bio(bio, rq)
- zero_fill_bio(bio);
- break;
- }
- cond_resched();
- }
-
- return 0;
-}
-
static void loop_clear_limits(struct loop_device *lo, int mode)
{
struct queue_limits lim = queue_limits_start_update(lo->lo_queue);
@@ -342,7 +276,7 @@ static void lo_complete_rq(struct request *rq)
struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
blk_status_t ret = BLK_STS_OK;
- if (!cmd->use_aio || cmd->ret < 0 || cmd->ret == blk_rq_bytes(rq) ||
+ if (cmd->ret < 0 || cmd->ret == blk_rq_bytes(rq) ||
req_op(rq) != REQ_OP_READ) {
if (cmd->ret < 0)
ret = errno_to_blk_status(cmd->ret);
@@ -358,14 +292,13 @@ static void lo_complete_rq(struct request *rq)
cmd->ret = 0;
blk_mq_requeue_request(rq, true);
} else {
- if (cmd->use_aio) {
- struct bio *bio = rq->bio;
+ struct bio *bio = rq->bio;
- while (bio) {
- zero_fill_bio(bio);
- bio = bio->bi_next;
- }
+ while (bio) {
+ zero_fill_bio(bio);
+ bio = bio->bi_next;
}
+
ret = BLK_STS_IOERR;
end_io:
blk_mq_end_request(rq, ret);
@@ -445,9 +378,14 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
cmd->iocb.ki_pos = pos;
cmd->iocb.ki_filp = file;
- cmd->iocb.ki_complete = lo_rw_aio_complete;
- cmd->iocb.ki_flags = IOCB_DIRECT;
- cmd->iocb.ki_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0);
+ cmd->iocb.ki_ioprio = req_get_ioprio(rq);
+ if (cmd->use_aio) {
+ cmd->iocb.ki_complete = lo_rw_aio_complete;
+ cmd->iocb.ki_flags = IOCB_DIRECT;
+ } else {
+ cmd->iocb.ki_complete = NULL;
+ cmd->iocb.ki_flags = 0;
+ }
if (rw == ITER_SOURCE)
ret = file->f_op->write_iter(&cmd->iocb, &iter);
@@ -458,7 +396,7 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
if (ret != -EIOCBQUEUED)
lo_rw_aio_complete(&cmd->iocb, ret);
- return 0;
+ return -EIOCBQUEUED;
}
static int do_req_filebacked(struct loop_device *lo, struct request *rq)
@@ -466,15 +404,6 @@ static int do_req_filebacked(struct loop_device *lo, struct request *rq)
struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
loff_t pos = ((loff_t) blk_rq_pos(rq) << 9) + lo->lo_offset;
- /*
- * lo_write_simple and lo_read_simple should have been covered
- * by io submit style function like lo_rw_aio(), one blocker
- * is that lo_read_simple() need to call flush_dcache_page after
- * the page is written from kernel, and it isn't easy to handle
- * this in io submit style function which submits all segments
- * of the req at one time. And direct read IO doesn't need to
- * run flush_dcache_page().
- */
switch (req_op(rq)) {
case REQ_OP_FLUSH:
return lo_req_flush(lo, rq);
@@ -490,15 +419,9 @@ static int do_req_filebacked(struct loop_device *lo, struct request *rq)
case REQ_OP_DISCARD:
return lo_fallocate(lo, rq, pos, FALLOC_FL_PUNCH_HOLE);
case REQ_OP_WRITE:
- if (cmd->use_aio)
- return lo_rw_aio(lo, cmd, pos, ITER_SOURCE);
- else
- return lo_write_simple(lo, rq, pos);
+ return lo_rw_aio(lo, cmd, pos, ITER_SOURCE);
case REQ_OP_READ:
- if (cmd->use_aio)
- return lo_rw_aio(lo, cmd, pos, ITER_DEST);
- else
- return lo_read_simple(lo, rq, pos);
+ return lo_rw_aio(lo, cmd, pos, ITER_DEST);
default:
WARN_ON_ONCE(1);
return -EIO;
@@ -662,19 +585,20 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
* dependency.
*/
fput(old_file);
+ dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0);
if (partscan)
loop_reread_partitions(lo);
error = 0;
done:
- /* enable and uncork uevent now that we are done */
- dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0);
+ kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE);
return error;
out_err:
loop_global_unlock(lo, is_loop);
out_putf:
fput(file);
+ dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0);
goto done;
}
@@ -1129,8 +1053,8 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode,
if (partscan)
clear_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state);
- /* enable and uncork uevent now that we are done */
dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0);
+ kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE);
loop_global_unlock(lo, is_loop);
if (partscan)
@@ -1921,7 +1845,6 @@ static void loop_handle_cmd(struct loop_cmd *cmd)
struct loop_device *lo = rq->q->queuedata;
int ret = 0;
struct mem_cgroup *old_memcg = NULL;
- const bool use_aio = cmd->use_aio;
if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) {
ret = -EIO;
@@ -1951,7 +1874,7 @@ static void loop_handle_cmd(struct loop_cmd *cmd)
}
failed:
/* complete non-aio request */
- if (!use_aio || ret) {
+ if (ret != -EIOCBQUEUED) {
if (ret == -EOPNOTSUPP)
cmd->ret = ret;
else
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index cdb1543fa4a9..2de7b2bd409d 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -123,15 +123,6 @@ struct ublk_uring_cmd_pdu {
#define UBLK_IO_FLAG_OWNED_BY_SRV 0x02
/*
- * IO command is aborted, so this flag is set in case of
- * !UBLK_IO_FLAG_ACTIVE.
- *
- * After this flag is observed, any pending or new incoming request
- * associated with this io command will be failed immediately
- */
-#define UBLK_IO_FLAG_ABORTED 0x04
-
-/*
* UBLK_IO_FLAG_NEED_GET_DATA is set because IO command requires
* get data buffer address from ublksrv.
*
@@ -199,8 +190,6 @@ struct ublk_device {
struct completion completion;
unsigned int nr_queues_ready;
unsigned int nr_privileged_daemon;
-
- struct work_struct nosrv_work;
};
/* header of ublk_params */
@@ -209,8 +198,8 @@ struct ublk_params_header {
__u32 types;
};
-static bool ublk_abort_requests(struct ublk_device *ub, struct ublk_queue *ubq);
-
+static void ublk_stop_dev_unlocked(struct ublk_device *ub);
+static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq);
static inline struct request *__ublk_check_and_get_req(struct ublk_device *ub,
struct ublk_queue *ubq, int tag, size_t offset);
static inline unsigned int ublk_req_build_flags(struct request *req);
@@ -1074,7 +1063,7 @@ static inline struct ublk_uring_cmd_pdu *ublk_get_uring_cmd_pdu(
static inline bool ubq_daemon_is_dying(struct ublk_queue *ubq)
{
- return ubq->ubq_daemon->flags & PF_EXITING;
+ return !ubq->ubq_daemon || ubq->ubq_daemon->flags & PF_EXITING;
}
/* todo: handle partial completion */
@@ -1085,12 +1074,6 @@ static inline void __ublk_complete_rq(struct request *req)
unsigned int unmapped_bytes;
blk_status_t res = BLK_STS_OK;
- /* called from ublk_abort_queue() code path */
- if (io->flags & UBLK_IO_FLAG_ABORTED) {
- res = BLK_STS_IOERR;
- goto exit;
- }
-
/* failed read IO if nothing is read */
if (!io->res && req_op(req) == REQ_OP_READ)
io->res = -EIO;
@@ -1140,47 +1123,6 @@ static void ublk_complete_rq(struct kref *ref)
__ublk_complete_rq(req);
}
-static void ublk_do_fail_rq(struct request *req)
-{
- struct ublk_queue *ubq = req->mq_hctx->driver_data;
-
- if (ublk_nosrv_should_reissue_outstanding(ubq->dev))
- blk_mq_requeue_request(req, false);
- else
- __ublk_complete_rq(req);
-}
-
-static void ublk_fail_rq_fn(struct kref *ref)
-{
- struct ublk_rq_data *data = container_of(ref, struct ublk_rq_data,
- ref);
- struct request *req = blk_mq_rq_from_pdu(data);
-
- ublk_do_fail_rq(req);
-}
-
-/*
- * Since ublk_rq_task_work_cb always fails requests immediately during
- * exiting, __ublk_fail_req() is only called from abort context during
- * exiting. So lock is unnecessary.
- *
- * Also aborting may not be started yet, keep in mind that one failed
- * request may be issued by block layer again.
- */
-static void __ublk_fail_req(struct ublk_queue *ubq, struct ublk_io *io,
- struct request *req)
-{
- WARN_ON_ONCE(io->flags & UBLK_IO_FLAG_ACTIVE);
-
- if (ublk_need_req_ref(ubq)) {
- struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
-
- kref_put(&data->ref, ublk_fail_rq_fn);
- } else {
- ublk_do_fail_rq(req);
- }
-}
-
static void ubq_complete_io_cmd(struct ublk_io *io, int res,
unsigned issue_flags)
{
@@ -1336,8 +1278,6 @@ static void ublk_queue_cmd_list(struct ublk_queue *ubq, struct rq_list *l)
static enum blk_eh_timer_return ublk_timeout(struct request *rq)
{
struct ublk_queue *ubq = rq->mq_hctx->driver_data;
- unsigned int nr_inflight = 0;
- int i;
if (ubq->flags & UBLK_F_UNPRIVILEGED_DEV) {
if (!ubq->timeout) {
@@ -1348,26 +1288,6 @@ static enum blk_eh_timer_return ublk_timeout(struct request *rq)
return BLK_EH_DONE;
}
- if (!ubq_daemon_is_dying(ubq))
- return BLK_EH_RESET_TIMER;
-
- for (i = 0; i < ubq->q_depth; i++) {
- struct ublk_io *io = &ubq->ios[i];
-
- if (!(io->flags & UBLK_IO_FLAG_ACTIVE))
- nr_inflight++;
- }
-
- /* cancelable uring_cmd can't help us if all commands are in-flight */
- if (nr_inflight == ubq->q_depth) {
- struct ublk_device *ub = ubq->dev;
-
- if (ublk_abort_requests(ub, ubq)) {
- schedule_work(&ub->nosrv_work);
- }
- return BLK_EH_DONE;
- }
-
return BLK_EH_RESET_TIMER;
}
@@ -1470,6 +1390,37 @@ static const struct blk_mq_ops ublk_mq_ops = {
.timeout = ublk_timeout,
};
+static void ublk_queue_reinit(struct ublk_device *ub, struct ublk_queue *ubq)
+{
+ int i;
+
+ /* All old ioucmds have to be completed */
+ ubq->nr_io_ready = 0;
+
+ /*
+ * old daemon is PF_EXITING, put it now
+ *
+ * It could be NULL in case of closing one quisced device.
+ */
+ if (ubq->ubq_daemon)
+ put_task_struct(ubq->ubq_daemon);
+ /* We have to reset it to NULL, otherwise ub won't accept new FETCH_REQ */
+ ubq->ubq_daemon = NULL;
+ ubq->timeout = false;
+
+ for (i = 0; i < ubq->q_depth; i++) {
+ struct ublk_io *io = &ubq->ios[i];
+
+ /*
+ * UBLK_IO_FLAG_CANCELED is kept for avoiding to touch
+ * io->cmd
+ */
+ io->flags &= UBLK_IO_FLAG_CANCELED;
+ io->cmd = NULL;
+ io->addr = 0;
+ }
+}
+
static int ublk_ch_open(struct inode *inode, struct file *filp)
{
struct ublk_device *ub = container_of(inode->i_cdev,
@@ -1481,10 +1432,119 @@ static int ublk_ch_open(struct inode *inode, struct file *filp)
return 0;
}
+static void ublk_reset_ch_dev(struct ublk_device *ub)
+{
+ int i;
+
+ for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
+ ublk_queue_reinit(ub, ublk_get_queue(ub, i));
+
+ /* set to NULL, otherwise new ubq_daemon cannot mmap the io_cmd_buf */
+ ub->mm = NULL;
+ ub->nr_queues_ready = 0;
+ ub->nr_privileged_daemon = 0;
+}
+
+static struct gendisk *ublk_get_disk(struct ublk_device *ub)
+{
+ struct gendisk *disk;
+
+ spin_lock(&ub->lock);
+ disk = ub->ub_disk;
+ if (disk)
+ get_device(disk_to_dev(disk));
+ spin_unlock(&ub->lock);
+
+ return disk;
+}
+
+static void ublk_put_disk(struct gendisk *disk)
+{
+ if (disk)
+ put_device(disk_to_dev(disk));
+}
+
static int ublk_ch_release(struct inode *inode, struct file *filp)
{
struct ublk_device *ub = filp->private_data;
+ struct gendisk *disk;
+ int i;
+
+ /*
+ * disk isn't attached yet, either device isn't live, or it has
+ * been removed already, so we needn't to do anything
+ */
+ disk = ublk_get_disk(ub);
+ if (!disk)
+ goto out;
+
+ /*
+ * All uring_cmd are done now, so abort any request outstanding to
+ * the ublk server
+ *
+ * This can be done in lockless way because ublk server has been
+ * gone
+ *
+ * More importantly, we have to provide forward progress guarantee
+ * without holding ub->mutex, otherwise control task grabbing
+ * ub->mutex triggers deadlock
+ *
+ * All requests may be inflight, so ->canceling may not be set, set
+ * it now.
+ */
+ for (i = 0; i < ub->dev_info.nr_hw_queues; i++) {
+ struct ublk_queue *ubq = ublk_get_queue(ub, i);
+ ubq->canceling = true;
+ ublk_abort_queue(ub, ubq);
+ }
+ blk_mq_kick_requeue_list(disk->queue);
+
+ /*
+ * All infligh requests have been completed or requeued and any new
+ * request will be failed or requeued via `->canceling` now, so it is
+ * fine to grab ub->mutex now.
+ */
+ mutex_lock(&ub->mutex);
+
+ /* double check after grabbing lock */
+ if (!ub->ub_disk)
+ goto unlock;
+
+ /*
+ * Transition the device to the nosrv state. What exactly this
+ * means depends on the recovery flags
+ */
+ blk_mq_quiesce_queue(disk->queue);
+ if (ublk_nosrv_should_stop_dev(ub)) {
+ /*
+ * Allow any pending/future I/O to pass through quickly
+ * with an error. This is needed because del_gendisk
+ * waits for all pending I/O to complete
+ */
+ for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
+ ublk_get_queue(ub, i)->force_abort = true;
+ blk_mq_unquiesce_queue(disk->queue);
+
+ ublk_stop_dev_unlocked(ub);
+ } else {
+ if (ublk_nosrv_dev_should_queue_io(ub)) {
+ /* ->canceling is set and all requests are aborted */
+ ub->dev_info.state = UBLK_S_DEV_QUIESCED;
+ } else {
+ ub->dev_info.state = UBLK_S_DEV_FAIL_IO;
+ for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
+ ublk_get_queue(ub, i)->fail_io = true;
+ }
+ blk_mq_unquiesce_queue(disk->queue);
+ }
+unlock:
+ mutex_unlock(&ub->mutex);
+ ublk_put_disk(disk);
+
+ /* all uring_cmd has been done now, reset device & ubq */
+ ublk_reset_ch_dev(ub);
+out:
clear_bit(UB_STATE_OPEN, &ub->state);
return 0;
}
@@ -1551,10 +1611,26 @@ static void ublk_commit_completion(struct ublk_device *ub,
ublk_put_req_ref(ubq, req);
}
+static void __ublk_fail_req(struct ublk_queue *ubq, struct ublk_io *io,
+ struct request *req)
+{
+ WARN_ON_ONCE(io->flags & UBLK_IO_FLAG_ACTIVE);
+
+ if (ublk_nosrv_should_reissue_outstanding(ubq->dev))
+ blk_mq_requeue_request(req, false);
+ else {
+ io->res = -EIO;
+ __ublk_complete_rq(req);
+ }
+}
+
/*
- * Called from ubq_daemon context via cancel fn, meantime quiesce ublk
- * blk-mq queue, so we are called exclusively with blk-mq and ubq_daemon
- * context, so everything is serialized.
+ * Called from ublk char device release handler, when any uring_cmd is
+ * done, meantime request queue is "quiesced" since all inflight requests
+ * can't be completed because ublk server is dead.
+ *
+ * So no one can hold our request IO reference any more, simply ignore the
+ * reference, and complete the request immediately
*/
static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq)
{
@@ -1571,46 +1647,29 @@ static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq)
* will do it
*/
rq = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], i);
- if (rq && blk_mq_request_started(rq)) {
- io->flags |= UBLK_IO_FLAG_ABORTED;
+ if (rq && blk_mq_request_started(rq))
__ublk_fail_req(ubq, io, rq);
- }
}
}
}
/* Must be called when queue is frozen */
-static bool ublk_mark_queue_canceling(struct ublk_queue *ubq)
+static void ublk_mark_queue_canceling(struct ublk_queue *ubq)
{
- bool canceled;
-
spin_lock(&ubq->cancel_lock);
- canceled = ubq->canceling;
- if (!canceled)
+ if (!ubq->canceling)
ubq->canceling = true;
spin_unlock(&ubq->cancel_lock);
-
- return canceled;
}
-static bool ublk_abort_requests(struct ublk_device *ub, struct ublk_queue *ubq)
+static void ublk_start_cancel(struct ublk_queue *ubq)
{
- bool was_canceled = ubq->canceling;
- struct gendisk *disk;
-
- if (was_canceled)
- return false;
-
- spin_lock(&ub->lock);
- disk = ub->ub_disk;
- if (disk)
- get_device(disk_to_dev(disk));
- spin_unlock(&ub->lock);
+ struct ublk_device *ub = ubq->dev;
+ struct gendisk *disk = ublk_get_disk(ub);
/* Our disk has been dead */
if (!disk)
- return false;
-
+ return;
/*
* Now we are serialized with ublk_queue_rq()
*
@@ -1619,15 +1678,9 @@ static bool ublk_abort_requests(struct ublk_device *ub, struct ublk_queue *ubq)
* touch completed uring_cmd
*/
blk_mq_quiesce_queue(disk->queue);
- was_canceled = ublk_mark_queue_canceling(ubq);
- if (!was_canceled) {
- /* abort queue is for making forward progress */
- ublk_abort_queue(ub, ubq);
- }
+ ublk_mark_queue_canceling(ubq);
blk_mq_unquiesce_queue(disk->queue);
- put_device(disk_to_dev(disk));
-
- return !was_canceled;
+ ublk_put_disk(disk);
}
static void ublk_cancel_cmd(struct ublk_queue *ubq, struct ublk_io *io,
@@ -1651,6 +1704,17 @@ static void ublk_cancel_cmd(struct ublk_queue *ubq, struct ublk_io *io,
/*
* The ublk char device won't be closed when calling cancel fn, so both
* ublk device and queue are guaranteed to be live
+ *
+ * Two-stage cancel:
+ *
+ * - make every active uring_cmd done in ->cancel_fn()
+ *
+ * - aborting inflight ublk IO requests in ublk char device release handler,
+ * which depends on 1st stage because device can only be closed iff all
+ * uring_cmd are done
+ *
+ * Do _not_ try to acquire ub->mutex before all inflight requests are
+ * aborted, otherwise deadlock may be caused.
*/
static void ublk_uring_cmd_cancel_fn(struct io_uring_cmd *cmd,
unsigned int issue_flags)
@@ -1658,8 +1722,6 @@ static void ublk_uring_cmd_cancel_fn(struct io_uring_cmd *cmd,
struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
struct ublk_queue *ubq = pdu->ubq;
struct task_struct *task;
- struct ublk_device *ub;
- bool need_schedule;
struct ublk_io *io;
if (WARN_ON_ONCE(!ubq))
@@ -1672,16 +1734,12 @@ static void ublk_uring_cmd_cancel_fn(struct io_uring_cmd *cmd,
if (WARN_ON_ONCE(task && task != ubq->ubq_daemon))
return;
- ub = ubq->dev;
- need_schedule = ublk_abort_requests(ub, ubq);
+ if (!ubq->canceling)
+ ublk_start_cancel(ubq);
io = &ubq->ios[pdu->tag];
WARN_ON_ONCE(io->cmd != cmd);
ublk_cancel_cmd(ubq, io, issue_flags);
-
- if (need_schedule) {
- schedule_work(&ub->nosrv_work);
- }
}
static inline bool ublk_queue_ready(struct ublk_queue *ubq)
@@ -1732,33 +1790,20 @@ static void ublk_wait_tagset_rqs_idle(struct ublk_device *ub)
}
}
-static void __ublk_quiesce_dev(struct ublk_device *ub)
-{
- pr_devel("%s: quiesce ub: dev_id %d state %s\n",
- __func__, ub->dev_info.dev_id,
- ub->dev_info.state == UBLK_S_DEV_LIVE ?
- "LIVE" : "QUIESCED");
- blk_mq_quiesce_queue(ub->ub_disk->queue);
- ublk_wait_tagset_rqs_idle(ub);
- ub->dev_info.state = UBLK_S_DEV_QUIESCED;
-}
-
-static void ublk_unquiesce_dev(struct ublk_device *ub)
+static void ublk_force_abort_dev(struct ublk_device *ub)
{
int i;
- pr_devel("%s: unquiesce ub: dev_id %d state %s\n",
+ pr_devel("%s: force abort ub: dev_id %d state %s\n",
__func__, ub->dev_info.dev_id,
ub->dev_info.state == UBLK_S_DEV_LIVE ?
"LIVE" : "QUIESCED");
- /* quiesce_work has run. We let requeued rqs be aborted
- * before running fallback_wq. "force_abort" must be seen
- * after request queue is unqiuesced. Then del_gendisk()
- * can move on.
- */
+ blk_mq_quiesce_queue(ub->ub_disk->queue);
+ if (ub->dev_info.state == UBLK_S_DEV_LIVE)
+ ublk_wait_tagset_rqs_idle(ub);
+
for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
ublk_get_queue(ub, i)->force_abort = true;
-
blk_mq_unquiesce_queue(ub->ub_disk->queue);
/* We may have requeued some rqs in ublk_quiesce_queue() */
blk_mq_kick_requeue_list(ub->ub_disk->queue);
@@ -1779,61 +1824,51 @@ static struct gendisk *ublk_detach_disk(struct ublk_device *ub)
return disk;
}
-static void ublk_stop_dev(struct ublk_device *ub)
+static void ublk_stop_dev_unlocked(struct ublk_device *ub)
+ __must_hold(&ub->mutex)
{
struct gendisk *disk;
- mutex_lock(&ub->mutex);
if (ub->dev_info.state == UBLK_S_DEV_DEAD)
- goto unlock;
- if (ublk_nosrv_dev_should_queue_io(ub)) {
- if (ub->dev_info.state == UBLK_S_DEV_LIVE)
- __ublk_quiesce_dev(ub);
- ublk_unquiesce_dev(ub);
- }
+ return;
+
+ if (ublk_nosrv_dev_should_queue_io(ub))
+ ublk_force_abort_dev(ub);
del_gendisk(ub->ub_disk);
disk = ublk_detach_disk(ub);
put_disk(disk);
- unlock:
+}
+
+static void ublk_stop_dev(struct ublk_device *ub)
+{
+ mutex_lock(&ub->mutex);
+ ublk_stop_dev_unlocked(ub);
mutex_unlock(&ub->mutex);
ublk_cancel_dev(ub);
}
-static void ublk_nosrv_work(struct work_struct *work)
+/* reset ublk io_uring queue & io flags */
+static void ublk_reset_io_flags(struct ublk_device *ub)
{
- struct ublk_device *ub =
- container_of(work, struct ublk_device, nosrv_work);
- int i;
+ int i, j;
- if (ublk_nosrv_should_stop_dev(ub)) {
- ublk_stop_dev(ub);
- return;
- }
+ for (i = 0; i < ub->dev_info.nr_hw_queues; i++) {
+ struct ublk_queue *ubq = ublk_get_queue(ub, i);
- mutex_lock(&ub->mutex);
- if (ub->dev_info.state != UBLK_S_DEV_LIVE)
- goto unlock;
-
- if (ublk_nosrv_dev_should_queue_io(ub)) {
- __ublk_quiesce_dev(ub);
- } else {
- blk_mq_quiesce_queue(ub->ub_disk->queue);
- ub->dev_info.state = UBLK_S_DEV_FAIL_IO;
- for (i = 0; i < ub->dev_info.nr_hw_queues; i++) {
- ublk_get_queue(ub, i)->fail_io = true;
- }
- blk_mq_unquiesce_queue(ub->ub_disk->queue);
+ /* UBLK_IO_FLAG_CANCELED can be cleared now */
+ spin_lock(&ubq->cancel_lock);
+ for (j = 0; j < ubq->q_depth; j++)
+ ubq->ios[j].flags &= ~UBLK_IO_FLAG_CANCELED;
+ spin_unlock(&ubq->cancel_lock);
+ ubq->canceling = false;
+ ubq->fail_io = false;
}
-
- unlock:
- mutex_unlock(&ub->mutex);
- ublk_cancel_dev(ub);
}
/* device can only be started after all IOs are ready */
static void ublk_mark_io_ready(struct ublk_device *ub, struct ublk_queue *ubq)
+ __must_hold(&ub->mutex)
{
- mutex_lock(&ub->mutex);
ubq->nr_io_ready++;
if (ublk_queue_ready(ubq)) {
ubq->ubq_daemon = current;
@@ -1843,9 +1878,12 @@ static void ublk_mark_io_ready(struct ublk_device *ub, struct ublk_queue *ubq)
if (capable(CAP_SYS_ADMIN))
ub->nr_privileged_daemon++;
}
- if (ub->nr_queues_ready == ub->dev_info.nr_hw_queues)
+
+ if (ub->nr_queues_ready == ub->dev_info.nr_hw_queues) {
+ /* now we are ready for handling ublk io request */
+ ublk_reset_io_flags(ub);
complete_all(&ub->completion);
- mutex_unlock(&ub->mutex);
+ }
}
static void ublk_handle_need_get_data(struct ublk_device *ub, int q_id,
@@ -1929,6 +1967,52 @@ static int ublk_unregister_io_buf(struct io_uring_cmd *cmd,
return io_buffer_unregister_bvec(cmd, index, issue_flags);
}
+static int ublk_fetch(struct io_uring_cmd *cmd, struct ublk_queue *ubq,
+ struct ublk_io *io, __u64 buf_addr)
+{
+ struct ublk_device *ub = ubq->dev;
+ int ret = 0;
+
+ /*
+ * When handling FETCH command for setting up ublk uring queue,
+ * ub->mutex is the innermost lock, and we won't block for handling
+ * FETCH, so it is fine even for IO_URING_F_NONBLOCK.
+ */
+ mutex_lock(&ub->mutex);
+ /* UBLK_IO_FETCH_REQ is only allowed before queue is setup */
+ if (ublk_queue_ready(ubq)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ /* allow each command to be FETCHed at most once */
+ if (io->flags & UBLK_IO_FLAG_ACTIVE) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ WARN_ON_ONCE(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV);
+
+ if (ublk_need_map_io(ubq)) {
+ /*
+ * FETCH_RQ has to provide IO buffer if NEED GET
+ * DATA is not enabled
+ */
+ if (!buf_addr && !ublk_need_get_data(ubq))
+ goto out;
+ } else if (buf_addr) {
+ /* User copy requires addr to be unset */
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ublk_fill_io_cmd(io, cmd, buf_addr);
+ ublk_mark_io_ready(ub, ubq);
+out:
+ mutex_unlock(&ub->mutex);
+ return ret;
+}
+
static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
unsigned int issue_flags,
const struct ublksrv_io_cmd *ub_cmd)
@@ -1985,33 +2069,9 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
case UBLK_IO_UNREGISTER_IO_BUF:
return ublk_unregister_io_buf(cmd, ub_cmd->addr, issue_flags);
case UBLK_IO_FETCH_REQ:
- /* UBLK_IO_FETCH_REQ is only allowed before queue is setup */
- if (ublk_queue_ready(ubq)) {
- ret = -EBUSY;
- goto out;
- }
- /*
- * The io is being handled by server, so COMMIT_RQ is expected
- * instead of FETCH_REQ
- */
- if (io->flags & UBLK_IO_FLAG_OWNED_BY_SRV)
- goto out;
-
- if (ublk_need_map_io(ubq)) {
- /*
- * FETCH_RQ has to provide IO buffer if NEED GET
- * DATA is not enabled
- */
- if (!ub_cmd->addr && !ublk_need_get_data(ubq))
- goto out;
- } else if (ub_cmd->addr) {
- /* User copy requires addr to be unset */
- ret = -EINVAL;
+ ret = ublk_fetch(cmd, ubq, io, ub_cmd->addr);
+ if (ret)
goto out;
- }
-
- ublk_fill_io_cmd(io, cmd, ub_cmd->addr);
- ublk_mark_io_ready(ub, ubq);
break;
case UBLK_IO_COMMIT_AND_FETCH_REQ:
req = blk_mq_tag_to_rq(ub->tag_set.tags[ub_cmd->q_id], tag);
@@ -2411,7 +2471,6 @@ static void ublk_remove(struct ublk_device *ub)
bool unprivileged;
ublk_stop_dev(ub);
- cancel_work_sync(&ub->nosrv_work);
cdev_device_del(&ub->cdev, &ub->cdev_dev);
unprivileged = ub->dev_info.flags & UBLK_F_UNPRIVILEGED_DEV;
ublk_put_device(ub);
@@ -2696,7 +2755,6 @@ static int ublk_ctrl_add_dev(const struct ublksrv_ctrl_cmd *header)
goto out_unlock;
mutex_init(&ub->mutex);
spin_lock_init(&ub->lock);
- INIT_WORK(&ub->nosrv_work, ublk_nosrv_work);
ret = ublk_alloc_dev_number(ub, header->dev_id);
if (ret < 0)
@@ -2828,7 +2886,6 @@ static inline void ublk_ctrl_cmd_dump(struct io_uring_cmd *cmd)
static int ublk_ctrl_stop_dev(struct ublk_device *ub)
{
ublk_stop_dev(ub);
- cancel_work_sync(&ub->nosrv_work);
return 0;
}
@@ -2932,42 +2989,14 @@ static int ublk_ctrl_set_params(struct ublk_device *ub,
return ret;
}
-static void ublk_queue_reinit(struct ublk_device *ub, struct ublk_queue *ubq)
-{
- int i;
-
- WARN_ON_ONCE(!(ubq->ubq_daemon && ubq_daemon_is_dying(ubq)));
-
- /* All old ioucmds have to be completed */
- ubq->nr_io_ready = 0;
- /* old daemon is PF_EXITING, put it now */
- put_task_struct(ubq->ubq_daemon);
- /* We have to reset it to NULL, otherwise ub won't accept new FETCH_REQ */
- ubq->ubq_daemon = NULL;
- ubq->timeout = false;
- ubq->canceling = false;
-
- for (i = 0; i < ubq->q_depth; i++) {
- struct ublk_io *io = &ubq->ios[i];
-
- /* forget everything now and be ready for new FETCH_REQ */
- io->flags = 0;
- io->cmd = NULL;
- io->addr = 0;
- }
-}
-
static int ublk_ctrl_start_recovery(struct ublk_device *ub,
const struct ublksrv_ctrl_cmd *header)
{
int ret = -EINVAL;
- int i;
mutex_lock(&ub->mutex);
if (ublk_nosrv_should_stop_dev(ub))
goto out_unlock;
- if (!ub->nr_queues_ready)
- goto out_unlock;
/*
* START_RECOVERY is only allowd after:
*
@@ -2991,12 +3020,6 @@ static int ublk_ctrl_start_recovery(struct ublk_device *ub,
goto out_unlock;
}
pr_devel("%s: start recovery for dev id %d.\n", __func__, header->dev_id);
- for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
- ublk_queue_reinit(ub, ublk_get_queue(ub, i));
- /* set to NULL, otherwise new ubq_daemon cannot mmap the io_cmd_buf */
- ub->mm = NULL;
- ub->nr_queues_ready = 0;
- ub->nr_privileged_daemon = 0;
init_completion(&ub->completion);
ret = 0;
out_unlock:
@@ -3009,7 +3032,6 @@ static int ublk_ctrl_end_recovery(struct ublk_device *ub,
{
int ublksrv_pid = (int)header->data[0];
int ret = -EINVAL;
- int i;
pr_devel("%s: Waiting for new ubq_daemons(nr: %d) are ready, dev id %d...\n",
__func__, ub->dev_info.nr_hw_queues, header->dev_id);
@@ -3029,24 +3051,10 @@ static int ublk_ctrl_end_recovery(struct ublk_device *ub,
goto out_unlock;
}
ub->dev_info.ublksrv_pid = ublksrv_pid;
+ ub->dev_info.state = UBLK_S_DEV_LIVE;
pr_devel("%s: new ublksrv_pid %d, dev id %d\n",
__func__, ublksrv_pid, header->dev_id);
-
- if (ublk_nosrv_dev_should_queue_io(ub)) {
- ub->dev_info.state = UBLK_S_DEV_LIVE;
- blk_mq_unquiesce_queue(ub->ub_disk->queue);
- pr_devel("%s: queue unquiesced, dev id %d.\n",
- __func__, header->dev_id);
- blk_mq_kick_requeue_list(ub->ub_disk->queue);
- } else {
- blk_mq_quiesce_queue(ub->ub_disk->queue);
- ub->dev_info.state = UBLK_S_DEV_LIVE;
- for (i = 0; i < ub->dev_info.nr_hw_queues; i++) {
- ublk_get_queue(ub, i)->fail_io = false;
- }
- blk_mq_unquiesce_queue(ub->ub_disk->queue);
- }
-
+ blk_mq_kick_requeue_list(ub->ub_disk->queue);
ret = 0;
out_unlock:
mutex_unlock(&ub->mutex);
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 5f04951d0dd4..088182e54deb 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1576,8 +1576,8 @@ static void handle_control_message(struct virtio_device *vdev,
break;
case VIRTIO_CONSOLE_RESIZE: {
struct {
- __u16 rows;
- __u16 cols;
+ __virtio16 cols;
+ __virtio16 rows;
} size;
if (!is_console_port(port))
@@ -1585,7 +1585,8 @@ static void handle_control_message(struct virtio_device *vdev,
memcpy(&size, buf->buf + buf->offset + sizeof(*cpkt),
sizeof(size));
- set_console_size(port, size.rows, size.cols);
+ set_console_size(port, virtio16_to_cpu(vdev, size.rows),
+ virtio16_to_cpu(vdev, size.cols));
port->cons.hvc->irq_requested = 1;
resize_console(port);
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
index 6789eed1bb5b..b961f3a3b580 100644
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -607,13 +607,16 @@ static void amd_pstate_update_min_max_limit(struct cpufreq_policy *policy)
union perf_cached perf = READ_ONCE(cpudata->perf);
perf.max_limit_perf = freq_to_perf(perf, cpudata->nominal_freq, policy->max);
- perf.min_limit_perf = freq_to_perf(perf, cpudata->nominal_freq, policy->min);
+ WRITE_ONCE(cpudata->max_limit_freq, policy->max);
- if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
+ if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE) {
perf.min_limit_perf = min(perf.nominal_perf, perf.max_limit_perf);
+ WRITE_ONCE(cpudata->min_limit_freq, min(cpudata->nominal_freq, cpudata->max_limit_freq));
+ } else {
+ perf.min_limit_perf = freq_to_perf(perf, cpudata->nominal_freq, policy->min);
+ WRITE_ONCE(cpudata->min_limit_freq, policy->min);
+ }
- WRITE_ONCE(cpudata->max_limit_freq, policy->max);
- WRITE_ONCE(cpudata->min_limit_freq, policy->min);
WRITE_ONCE(cpudata->perf, perf);
}
@@ -791,16 +794,6 @@ static void amd_perf_ctl_reset(unsigned int cpu)
wrmsrl_on_cpu(cpu, MSR_AMD_PERF_CTL, 0);
}
-/*
- * Set amd-pstate preferred core enable can't be done directly from cpufreq callbacks
- * due to locking, so queue the work for later.
- */
-static void amd_pstste_sched_prefcore_workfn(struct work_struct *work)
-{
- sched_set_itmt_support();
-}
-static DECLARE_WORK(sched_prefcore_work, amd_pstste_sched_prefcore_workfn);
-
#define CPPC_MAX_PERF U8_MAX
static void amd_pstate_init_prefcore(struct amd_cpudata *cpudata)
@@ -811,14 +804,8 @@ static void amd_pstate_init_prefcore(struct amd_cpudata *cpudata)
cpudata->hw_prefcore = true;
- /*
- * The priorities can be set regardless of whether or not
- * sched_set_itmt_support(true) has been called and it is valid to
- * update them at any time after it has been called.
- */
+ /* Priorities must be initialized before ITMT support can be toggled on. */
sched_set_itmt_core_prio((int)READ_ONCE(cpudata->prefcore_ranking), cpudata->cpu);
-
- schedule_work(&sched_prefcore_work);
}
static void amd_pstate_update_limits(unsigned int cpu)
@@ -1193,6 +1180,9 @@ static ssize_t show_energy_performance_preference(
static void amd_pstate_driver_cleanup(void)
{
+ if (amd_pstate_prefcore)
+ sched_clear_itmt_support();
+
cppc_state = AMD_PSTATE_DISABLE;
current_pstate_driver = NULL;
}
@@ -1235,6 +1225,10 @@ static int amd_pstate_register_driver(int mode)
return ret;
}
+ /* Enable ITMT support once all CPUs have initialized their asym priorities. */
+ if (amd_pstate_prefcore)
+ sched_set_itmt_support();
+
return 0;
}
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 3841c9da6cac..acf19b0042bb 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -540,8 +540,6 @@ static unsigned int __resolve_freq(struct cpufreq_policy *policy,
{
unsigned int idx;
- target_freq = clamp_val(target_freq, policy->min, policy->max);
-
if (!policy->freq_table)
return target_freq;
@@ -565,7 +563,22 @@ static unsigned int __resolve_freq(struct cpufreq_policy *policy,
unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
unsigned int target_freq)
{
- return __resolve_freq(policy, target_freq, CPUFREQ_RELATION_LE);
+ unsigned int min = READ_ONCE(policy->min);
+ unsigned int max = READ_ONCE(policy->max);
+
+ /*
+ * If this function runs in parallel with cpufreq_set_policy(), it may
+ * read policy->min before the update and policy->max after the update
+ * or the other way around, so there is no ordering guarantee.
+ *
+ * Resolve this by always honoring the max (in case it comes from
+ * thermal throttling or similar).
+ */
+ if (unlikely(min > max))
+ min = max;
+
+ return __resolve_freq(policy, clamp_val(target_freq, min, max),
+ CPUFREQ_RELATION_LE);
}
EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq);
@@ -2384,6 +2397,7 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
if (cpufreq_disabled())
return -ENODEV;
+ target_freq = clamp_val(target_freq, policy->min, policy->max);
target_freq = __resolve_freq(policy, target_freq, relation);
pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
@@ -2708,11 +2722,15 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
* Resolve policy min/max to available frequencies. It ensures
* no frequency resolution will neither overshoot the requested maximum
* nor undershoot the requested minimum.
+ *
+ * Avoid storing intermediate values in policy->max or policy->min and
+ * compiler optimizations around them because they may be accessed
+ * concurrently by cpufreq_driver_resolve_freq() during the update.
*/
- policy->min = new_data.min;
- policy->max = new_data.max;
- policy->min = __resolve_freq(policy, policy->min, CPUFREQ_RELATION_L);
- policy->max = __resolve_freq(policy, policy->max, CPUFREQ_RELATION_H);
+ WRITE_ONCE(policy->max, __resolve_freq(policy, new_data.max, CPUFREQ_RELATION_H));
+ new_data.min = __resolve_freq(policy, new_data.min, CPUFREQ_RELATION_L);
+ WRITE_ONCE(policy->min, new_data.min > policy->max ? policy->max : new_data.min);
+
trace_cpu_frequency_limits(policy);
cpufreq_update_pressure(policy);
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 4aad79d26c64..f41ed0b9e610 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -2209,7 +2209,7 @@ static int knl_get_turbo_pstate(int cpu)
static int hwp_get_cpu_scaling(int cpu)
{
if (hybrid_scaling_factor) {
- struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());
+ struct cpuinfo_x86 *c = &cpu_data(cpu);
u8 cpu_type = c->topo.intel_type;
/*
diff --git a/drivers/crypto/atmel-sha204a.c b/drivers/crypto/atmel-sha204a.c
index 75bebec2c757..0fcf4a39de27 100644
--- a/drivers/crypto/atmel-sha204a.c
+++ b/drivers/crypto/atmel-sha204a.c
@@ -163,6 +163,12 @@ static int atmel_sha204a_probe(struct i2c_client *client)
i2c_priv->hwrng.name = dev_name(&client->dev);
i2c_priv->hwrng.read = atmel_sha204a_rng_read;
+ /*
+ * According to review by Bill Cox [1], this HWRNG has very low entropy.
+ * [1] https://www.metzdowd.com/pipermail/cryptography/2014-December/023858.html
+ */
+ i2c_priv->hwrng.quality = 1;
+
ret = devm_hwrng_register(&client->dev, &i2c_priv->hwrng);
if (ret)
dev_warn(&client->dev, "failed to register RNG (%d)\n", ret);
diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c
index f5905d67dedb..22a808995f10 100644
--- a/drivers/dma-buf/sw_sync.c
+++ b/drivers/dma-buf/sw_sync.c
@@ -438,15 +438,17 @@ static int sw_sync_ioctl_get_deadline(struct sync_timeline *obj, unsigned long a
return -EINVAL;
pt = dma_fence_to_sync_pt(fence);
- if (!pt)
- return -EINVAL;
+ if (!pt) {
+ ret = -EINVAL;
+ goto put_fence;
+ }
spin_lock_irqsave(fence->lock, flags);
- if (test_bit(SW_SYNC_HAS_DEADLINE_BIT, &fence->flags)) {
- data.deadline_ns = ktime_to_ns(pt->deadline);
- } else {
+ if (!test_bit(SW_SYNC_HAS_DEADLINE_BIT, &fence->flags)) {
ret = -ENOENT;
+ goto unlock;
}
+ data.deadline_ns = ktime_to_ns(pt->deadline);
spin_unlock_irqrestore(fence->lock, flags);
dma_fence_put(fence);
@@ -458,6 +460,13 @@ static int sw_sync_ioctl_get_deadline(struct sync_timeline *obj, unsigned long a
return -EFAULT;
return 0;
+
+unlock:
+ spin_unlock_irqrestore(fence->lock, flags);
+put_fence:
+ dma_fence_put(fence);
+
+ return ret;
}
static long sw_sync_ioctl(struct file *file, unsigned int cmd,
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index b8197502a5ac..cd4fecbb41f2 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -2879,7 +2879,7 @@ static int gpiod_direction_output_raw_commit(struct gpio_desc *desc, int value)
* output-only, but if there is then not even a .set() operation it
* is pretty tricky to drive the output line.
*/
- if (!guard.gc->set && !guard.gc->direction_output) {
+ if (!guard.gc->set && !guard.gc->set_rv && !guard.gc->direction_output) {
gpiod_warn(desc,
"%s: missing set() and direction_output() operations\n",
__func__);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 2c04ae133848..ef6e78224fdf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1123,6 +1123,7 @@ struct amdgpu_device {
bool in_s3;
bool in_s4;
bool in_s0ix;
+ suspend_state_t last_suspend_state;
enum pp_mp1_state mp1_state;
struct amdgpu_doorbell_index doorbell_index;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index b34b915203f2..7f354cd532dc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -3510,6 +3510,7 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
amdgpu_device_mem_scratch_fini(adev);
amdgpu_ib_pool_fini(adev);
amdgpu_seq64_fini(adev);
+ amdgpu_doorbell_fini(adev);
}
if (adev->ip_blocks[i].version->funcs->sw_fini) {
r = adev->ip_blocks[i].version->funcs->sw_fini(&adev->ip_blocks[i]);
@@ -4858,7 +4859,6 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev)
iounmap(adev->rmmio);
adev->rmmio = NULL;
- amdgpu_doorbell_fini(adev);
drm_dev_exit(idx);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 26bf896f1444..24ee4710f807 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -2548,8 +2548,20 @@ static int amdgpu_pmops_suspend(struct device *dev)
adev->in_s0ix = true;
else if (amdgpu_acpi_is_s3_active(adev))
adev->in_s3 = true;
- if (!adev->in_s0ix && !adev->in_s3)
+ if (!adev->in_s0ix && !adev->in_s3) {
+ /* don't allow going deep first time followed by s2idle the next time */
+ if (adev->last_suspend_state != PM_SUSPEND_ON &&
+ adev->last_suspend_state != pm_suspend_target_state) {
+ drm_err_once(drm_dev, "Unsupported suspend state %d\n",
+ pm_suspend_target_state);
+ return -EINVAL;
+ }
return 0;
+ }
+
+ /* cache the state last used for suspend */
+ adev->last_suspend_state = pm_suspend_target_state;
+
return amdgpu_device_suspend(drm_dev, true);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 72af5e5a894a..cf2df7790077 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -1438,9 +1438,11 @@ static int amdgpu_gfx_run_cleaner_shader_job(struct amdgpu_ring *ring)
struct amdgpu_device *adev = ring->adev;
struct drm_gpu_scheduler *sched = &ring->sched;
struct drm_sched_entity entity;
+ static atomic_t counter;
struct dma_fence *f;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
+ void *owner;
int i, r;
/* Initialize the scheduler entity */
@@ -1451,9 +1453,15 @@ static int amdgpu_gfx_run_cleaner_shader_job(struct amdgpu_ring *ring)
goto err;
}
- r = amdgpu_job_alloc_with_ib(ring->adev, &entity, NULL,
- 64, 0,
- &job);
+ /*
+ * Use some unique dummy value as the owner to make sure we execute
+ * the cleaner shader on each submission. The value just need to change
+ * for each submission and is otherwise meaningless.
+ */
+ owner = (void *)(unsigned long)atomic_inc_return(&counter);
+
+ r = amdgpu_job_alloc_with_ib(ring->adev, &entity, owner,
+ 64, 0, &job);
if (r)
goto err;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index a63ce747863f..23e6a05359c2 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -6114,7 +6114,7 @@ static int gfx_v10_0_cp_gfx_load_pfp_microcode(struct amdgpu_device *adev)
}
if (amdgpu_emu_mode == 1)
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
tmp = RREG32_SOC15(GC, 0, mmCP_PFP_IC_BASE_CNTL);
tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0);
@@ -6192,7 +6192,7 @@ static int gfx_v10_0_cp_gfx_load_ce_microcode(struct amdgpu_device *adev)
}
if (amdgpu_emu_mode == 1)
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
tmp = RREG32_SOC15(GC, 0, mmCP_CE_IC_BASE_CNTL);
tmp = REG_SET_FIELD(tmp, CP_CE_IC_BASE_CNTL, VMID, 0);
@@ -6269,7 +6269,7 @@ static int gfx_v10_0_cp_gfx_load_me_microcode(struct amdgpu_device *adev)
}
if (amdgpu_emu_mode == 1)
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
tmp = RREG32_SOC15(GC, 0, mmCP_ME_IC_BASE_CNTL);
tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0);
@@ -6644,7 +6644,7 @@ static int gfx_v10_0_cp_compute_load_microcode(struct amdgpu_device *adev)
}
if (amdgpu_emu_mode == 1)
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
tmp = RREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL);
tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index d57db42f9536..2a5c2a1ae3c7 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -2428,7 +2428,7 @@ static int gfx_v11_0_config_me_cache(struct amdgpu_device *adev, uint64_t addr)
}
if (amdgpu_emu_mode == 1)
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL);
tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0);
@@ -2472,7 +2472,7 @@ static int gfx_v11_0_config_pfp_cache(struct amdgpu_device *adev, uint64_t addr)
}
if (amdgpu_emu_mode == 1)
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL);
tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0);
@@ -2517,7 +2517,7 @@ static int gfx_v11_0_config_mec_cache(struct amdgpu_device *adev, uint64_t addr)
}
if (amdgpu_emu_mode == 1)
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL);
tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
@@ -3153,7 +3153,7 @@ static int gfx_v11_0_cp_gfx_load_pfp_microcode_rs64(struct amdgpu_device *adev)
amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_data_obj);
if (amdgpu_emu_mode == 1)
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_LO,
lower_32_bits(adev->gfx.pfp.pfp_fw_gpu_addr));
@@ -3371,7 +3371,7 @@ static int gfx_v11_0_cp_gfx_load_me_microcode_rs64(struct amdgpu_device *adev)
amdgpu_bo_unreserve(adev->gfx.me.me_fw_data_obj);
if (amdgpu_emu_mode == 1)
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_LO,
lower_32_bits(adev->gfx.me.me_fw_gpu_addr));
@@ -4541,7 +4541,7 @@ static int gfx_v11_0_gfxhub_enable(struct amdgpu_device *adev)
if (r)
return r;
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
false : true;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
index e7b58e470292..62a257a4a3e9 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
@@ -2324,7 +2324,7 @@ static int gfx_v12_0_cp_gfx_load_pfp_microcode_rs64(struct amdgpu_device *adev)
amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_data_obj);
if (amdgpu_emu_mode == 1)
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_LO,
lower_32_bits(adev->gfx.pfp.pfp_fw_gpu_addr));
@@ -2468,7 +2468,7 @@ static int gfx_v12_0_cp_gfx_load_me_microcode_rs64(struct amdgpu_device *adev)
amdgpu_bo_unreserve(adev->gfx.me.me_fw_data_obj);
if (amdgpu_emu_mode == 1)
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_LO,
lower_32_bits(adev->gfx.me.me_fw_gpu_addr));
@@ -3426,7 +3426,7 @@ static int gfx_v12_0_gfxhub_enable(struct amdgpu_device *adev)
if (r)
return r;
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
false : true;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index 95d894a231fc..809b3a882d0d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -268,7 +268,7 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
/* flush hdp cache */
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
/* This is necessary for SRIOV as well as for GFXOFF to function
* properly under bare metal
@@ -969,7 +969,7 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev)
adev->hdp.funcs->init_registers(adev);
/* Flush HDP after it is initialized */
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
false : true;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
index ad099f136f84..e74e26b6a4f2 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
@@ -229,7 +229,7 @@ static void gmc_v11_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
/* flush hdp cache */
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
/* This is necessary for SRIOV as well as for GFXOFF to function
* properly under bare metal
@@ -899,7 +899,7 @@ static int gmc_v11_0_gart_enable(struct amdgpu_device *adev)
return r;
/* Flush HDP after it is initialized */
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
false : true;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
index 05c026d0b0d9..c6f290704d47 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
@@ -297,7 +297,7 @@ static void gmc_v12_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
return;
/* flush hdp cache */
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
/* This is necessary for SRIOV as well as for GFXOFF to function
* properly under bare metal
@@ -881,7 +881,7 @@ static int gmc_v12_0_gart_enable(struct amdgpu_device *adev)
return r;
/* Flush HDP after it is initialized */
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
false : true;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 783e0c3b86b4..5effe8327d29 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -2435,7 +2435,7 @@ static int gmc_v9_0_hw_init(struct amdgpu_ip_block *ip_block)
adev->hdp.funcs->init_registers(adev);
/* After HDP is initialized, flush HDP.*/
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
value = false;
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
index bb5dfc410a66..215543575f47 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
@@ -533,7 +533,7 @@ static int psp_v11_0_memory_training(struct psp_context *psp, uint32_t ops)
}
memcpy_toio(adev->mman.aper_base_kaddr, buf, sz);
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
vfree(buf);
drm_dev_exit(idx);
} else {
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
index cc621064610f..afdf8ce3b4c5 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
@@ -610,7 +610,7 @@ static int psp_v13_0_memory_training(struct psp_context *psp, uint32_t ops)
}
memcpy_toio(adev->mman.aper_base_kaddr, buf, sz);
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
vfree(buf);
drm_dev_exit(idx);
} else {
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c
index 7c49c3f3c388..256288c6cd78 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c
@@ -498,7 +498,7 @@ static int psp_v14_0_memory_training(struct psp_context *psp, uint32_t ops)
}
memcpy_toio(adev->mman.aper_base_kaddr, buf, sz);
- adev->hdp.funcs->flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
vfree(buf);
drm_dev_exit(idx);
} else {
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 86d8bc10d90a..9b3510e53112 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -239,6 +239,13 @@ static const struct amdgpu_video_codec_info cz_video_codecs_decode_array[] =
.max_pixels_per_frame = 4096 * 4096,
.max_level = 186,
},
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG,
+ .max_width = 4096,
+ .max_height = 4096,
+ .max_pixels_per_frame = 4096 * 4096,
+ .max_level = 0,
+ },
};
static const struct amdgpu_video_codecs cz_video_codecs_decode =
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
index 94e99e540691..5d16f36ec95c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
@@ -2,6 +2,7 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
+#include <linux/vmalloc.h>
#include "dml2_internal_types.h"
#include "dml_top.h"
@@ -13,11 +14,11 @@
static bool dml21_allocate_memory(struct dml2_context **dml_ctx)
{
- *dml_ctx = kzalloc(sizeof(struct dml2_context), GFP_KERNEL);
+ *dml_ctx = vzalloc(sizeof(struct dml2_context));
if (!(*dml_ctx))
return false;
- (*dml_ctx)->v21.dml_init.dml2_instance = kzalloc(sizeof(struct dml2_instance), GFP_KERNEL);
+ (*dml_ctx)->v21.dml_init.dml2_instance = vzalloc(sizeof(struct dml2_instance));
if (!((*dml_ctx)->v21.dml_init.dml2_instance))
return false;
@@ -27,7 +28,7 @@ static bool dml21_allocate_memory(struct dml2_context **dml_ctx)
(*dml_ctx)->v21.mode_support.display_config = &(*dml_ctx)->v21.display_config;
(*dml_ctx)->v21.mode_programming.display_config = (*dml_ctx)->v21.mode_support.display_config;
- (*dml_ctx)->v21.mode_programming.programming = kzalloc(sizeof(struct dml2_display_cfg_programming), GFP_KERNEL);
+ (*dml_ctx)->v21.mode_programming.programming = vzalloc(sizeof(struct dml2_display_cfg_programming));
if (!((*dml_ctx)->v21.mode_programming.programming))
return false;
@@ -115,8 +116,8 @@ bool dml21_create(const struct dc *in_dc, struct dml2_context **dml_ctx, const s
void dml21_destroy(struct dml2_context *dml2)
{
- kfree(dml2->v21.dml_init.dml2_instance);
- kfree(dml2->v21.mode_programming.programming);
+ vfree(dml2->v21.dml_init.dml2_instance);
+ vfree(dml2->v21.mode_programming.programming);
}
static void dml21_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_state *context, struct resource_context *out_new_hw_state,
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
index f549a778f6f1..e89571874185 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
@@ -24,6 +24,8 @@
*
*/
+#include <linux/vmalloc.h>
+
#include "display_mode_core.h"
#include "dml2_internal_types.h"
#include "dml2_utils.h"
@@ -747,7 +749,7 @@ bool dml2_validate(const struct dc *in_dc, struct dc_state *context, struct dml2
static inline struct dml2_context *dml2_allocate_memory(void)
{
- return (struct dml2_context *) kzalloc(sizeof(struct dml2_context), GFP_KERNEL);
+ return (struct dml2_context *) vzalloc(sizeof(struct dml2_context));
}
static void dml2_init(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2)
@@ -821,7 +823,7 @@ void dml2_destroy(struct dml2_context *dml2)
if (dml2->architecture == dml2_architecture_21)
dml21_destroy(dml2);
- kfree(dml2);
+ vfree(dml2);
}
void dml2_extract_dram_and_fclk_change_support(struct dml2_context *dml2,
diff --git a/drivers/gpu/drm/i915/display/intel_display_device.h b/drivers/gpu/drm/i915/display/intel_display_device.h
index 717286981687..7a3bb77c7af7 100644
--- a/drivers/gpu/drm/i915/display/intel_display_device.h
+++ b/drivers/gpu/drm/i915/display/intel_display_device.h
@@ -161,6 +161,7 @@ struct intel_display_platforms {
#define HAS_DPT(__display) (DISPLAY_VER(__display) >= 13)
#define HAS_DSB(__display) (DISPLAY_INFO(__display)->has_dsb)
#define HAS_DSC(__display) (DISPLAY_RUNTIME_INFO(__display)->has_dsc)
+#define HAS_DSC_3ENGINES(__display) (DISPLAY_VERx100(__display) == 1401 && HAS_DSC(__display))
#define HAS_DSC_MST(__display) (DISPLAY_VER(__display) >= 12 && HAS_DSC(__display))
#define HAS_FBC(__display) (DISPLAY_RUNTIME_INFO(__display)->fbc_mask != 0)
#define HAS_FBC_DIRTY_RECT(__display) (DISPLAY_VER(__display) >= 30)
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 9476aaa91900..392c3653d0d7 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -1050,10 +1050,11 @@ u8 intel_dp_dsc_get_slice_count(const struct intel_connector *connector,
u8 test_slice_count = valid_dsc_slicecount[i] * num_joined_pipes;
/*
- * 3 DSC Slices per pipe need 3 DSC engines,
- * which is supported only with Ultrajoiner.
+ * 3 DSC Slices per pipe need 3 DSC engines, which is supported only
+ * with Ultrajoiner only for some platforms.
*/
- if (valid_dsc_slicecount[i] == 3 && num_joined_pipes != 4)
+ if (valid_dsc_slicecount[i] == 3 &&
+ (!HAS_DSC_3ENGINES(display) || num_joined_pipes != 4))
continue;
if (test_slice_count >
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index fb71658c3117..6067d08aeee3 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -223,7 +223,7 @@ void mgag200_set_mode_regs(struct mga_device *mdev, const struct drm_display_mod
vsyncstr = mode->crtc_vsync_start - 1;
vsyncend = mode->crtc_vsync_end - 1;
vtotal = mode->crtc_vtotal - 2;
- vblkstr = mode->crtc_vblank_start;
+ vblkstr = mode->crtc_vblank_start - 1;
vblkend = vtotal + 1;
linecomp = vdispend;
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 06465bc2d0b4..242d02d48c0c 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -242,10 +242,10 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
break;
fallthrough;
case MSM_SUBMIT_CMD_BUF:
- OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
+ OUT_PKT7(ring, CP_INDIRECT_BUFFER, 3);
OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
OUT_RING(ring, upper_32_bits(submit->cmd[i].iova));
- OUT_RING(ring, submit->cmd[i].size);
+ OUT_RING(ring, A5XX_CP_INDIRECT_BUFFER_2_IB_SIZE(submit->cmd[i].size));
ibs++;
break;
}
@@ -377,10 +377,10 @@ static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
break;
fallthrough;
case MSM_SUBMIT_CMD_BUF:
- OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
+ OUT_PKT7(ring, CP_INDIRECT_BUFFER, 3);
OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
OUT_RING(ring, upper_32_bits(submit->cmd[i].iova));
- OUT_RING(ring, submit->cmd[i].size);
+ OUT_RING(ring, A5XX_CP_INDIRECT_BUFFER_2_IB_SIZE(submit->cmd[i].size));
ibs++;
break;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h
index 1f32807bb5e5..ad60089f18ea 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_14_msm8937.h
@@ -132,7 +132,6 @@ static const struct dpu_intf_cfg msm8937_intf[] = {
.prog_fetch_lines_worst_case = 14,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
- .intr_tear_rd_ptr = -1,
}, {
.name = "intf_2", .id = INTF_2,
.base = 0x6b000, .len = 0x268,
@@ -141,7 +140,6 @@ static const struct dpu_intf_cfg msm8937_intf[] = {
.prog_fetch_lines_worst_case = 14,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29),
- .intr_tear_rd_ptr = -1,
},
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h
index 42131959ff22..a1cf89a0a42d 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_15_msm8917.h
@@ -118,7 +118,6 @@ static const struct dpu_intf_cfg msm8917_intf[] = {
.prog_fetch_lines_worst_case = 14,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
- .intr_tear_rd_ptr = -1,
},
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h
index 2b4723a5c676..eea9b80e2287 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_16_msm8953.h
@@ -131,7 +131,6 @@ static const struct dpu_intf_cfg msm8953_intf[] = {
.prog_fetch_lines_worst_case = 14,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25),
- .intr_tear_rd_ptr = -1,
}, {
.name = "intf_1", .id = INTF_1,
.base = 0x6a800, .len = 0x268,
@@ -140,7 +139,6 @@ static const struct dpu_intf_cfg msm8953_intf[] = {
.prog_fetch_lines_worst_case = 14,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
- .intr_tear_rd_ptr = -1,
}, {
.name = "intf_2", .id = INTF_2,
.base = 0x6b000, .len = 0x268,
@@ -149,7 +147,6 @@ static const struct dpu_intf_cfg msm8953_intf[] = {
.prog_fetch_lines_worst_case = 14,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29),
- .intr_tear_rd_ptr = -1,
},
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h
index 5cf19de71f06..ae18a354e5d2 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_1_7_msm8996.h
@@ -241,7 +241,6 @@ static const struct dpu_intf_cfg msm8996_intf[] = {
.prog_fetch_lines_worst_case = 25,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25),
- .intr_tear_rd_ptr = -1,
}, {
.name = "intf_1", .id = INTF_1,
.base = 0x6a800, .len = 0x268,
@@ -250,7 +249,6 @@ static const struct dpu_intf_cfg msm8996_intf[] = {
.prog_fetch_lines_worst_case = 25,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
- .intr_tear_rd_ptr = -1,
}, {
.name = "intf_2", .id = INTF_2,
.base = 0x6b000, .len = 0x268,
@@ -259,7 +257,6 @@ static const struct dpu_intf_cfg msm8996_intf[] = {
.prog_fetch_lines_worst_case = 25,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29),
- .intr_tear_rd_ptr = -1,
}, {
.name = "intf_3", .id = INTF_3,
.base = 0x6b800, .len = 0x268,
@@ -267,7 +264,6 @@ static const struct dpu_intf_cfg msm8996_intf[] = {
.prog_fetch_lines_worst_case = 25,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31),
- .intr_tear_rd_ptr = -1,
},
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_2_sdm660.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_2_sdm660.h
index 4f2f68b07f20..bb89da0a481d 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_2_sdm660.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_2_sdm660.h
@@ -202,7 +202,6 @@ static const struct dpu_intf_cfg sdm660_intf[] = {
.prog_fetch_lines_worst_case = 21,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25),
- .intr_tear_rd_ptr = -1,
}, {
.name = "intf_1", .id = INTF_1,
.base = 0x6a800, .len = 0x280,
@@ -211,7 +210,6 @@ static const struct dpu_intf_cfg sdm660_intf[] = {
.prog_fetch_lines_worst_case = 21,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
- .intr_tear_rd_ptr = -1,
}, {
.name = "intf_2", .id = INTF_2,
.base = 0x6b000, .len = 0x280,
@@ -220,7 +218,6 @@ static const struct dpu_intf_cfg sdm660_intf[] = {
.prog_fetch_lines_worst_case = 21,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29),
- .intr_tear_rd_ptr = -1,
},
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_3_sdm630.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_3_sdm630.h
index c70bef025ac4..7caf876ca3e3 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_3_sdm630.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_3_sdm630.h
@@ -147,7 +147,6 @@ static const struct dpu_intf_cfg sdm630_intf[] = {
.prog_fetch_lines_worst_case = 21,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25),
- .intr_tear_rd_ptr = -1,
}, {
.name = "intf_1", .id = INTF_1,
.base = 0x6a800, .len = 0x280,
@@ -156,7 +155,6 @@ static const struct dpu_intf_cfg sdm630_intf[] = {
.prog_fetch_lines_worst_case = 21,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
- .intr_tear_rd_ptr = -1,
},
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index 8610bbf2b87c..862e9e6bf0a5 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -1666,7 +1666,7 @@ static void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
*/
static void _dpu_encoder_trigger_start(struct dpu_encoder_phys *phys)
{
- struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(phys->parent);
+ struct dpu_encoder_virt *dpu_enc;
if (!phys) {
DPU_ERROR("invalid argument(s)\n");
@@ -1678,6 +1678,8 @@ static void _dpu_encoder_trigger_start(struct dpu_encoder_phys *phys)
return;
}
+ dpu_enc = to_dpu_encoder_virt(phys->parent);
+
if (phys->parent->encoder_type == DRM_MODE_ENCODER_VIRTUAL &&
dpu_enc->cwb_mask) {
DPU_DEBUG("encoder %d CWB enabled, skipping\n", DRMID(phys->parent));
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
index af3e541f60c3..e03d6091f736 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -729,12 +729,40 @@ static int dpu_plane_check_inline_rotation(struct dpu_plane *pdpu,
static int dpu_plane_atomic_check_pipe(struct dpu_plane *pdpu,
struct dpu_sw_pipe *pipe,
struct dpu_sw_pipe_cfg *pipe_cfg,
- const struct msm_format *fmt,
- const struct drm_display_mode *mode)
+ const struct drm_display_mode *mode,
+ struct drm_plane_state *new_plane_state)
{
uint32_t min_src_size;
struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base);
int ret;
+ const struct msm_format *fmt;
+ uint32_t supported_rotations;
+ const struct dpu_sspp_cfg *pipe_hw_caps;
+ const struct dpu_sspp_sub_blks *sblk;
+
+ pipe_hw_caps = pipe->sspp->cap;
+ sblk = pipe->sspp->cap->sblk;
+
+ /*
+ * We already have verified scaling against platform limitations.
+ * Now check if the SSPP supports scaling at all.
+ */
+ if (!sblk->scaler_blk.len &&
+ ((drm_rect_width(&new_plane_state->src) >> 16 !=
+ drm_rect_width(&new_plane_state->dst)) ||
+ (drm_rect_height(&new_plane_state->src) >> 16 !=
+ drm_rect_height(&new_plane_state->dst))))
+ return -ERANGE;
+
+ fmt = msm_framebuffer_format(new_plane_state->fb);
+
+ supported_rotations = DRM_MODE_REFLECT_MASK | DRM_MODE_ROTATE_0;
+
+ if (pipe_hw_caps->features & BIT(DPU_SSPP_INLINE_ROTATION))
+ supported_rotations |= DRM_MODE_ROTATE_90;
+
+ pipe_cfg->rotation = drm_rotation_simplify(new_plane_state->rotation,
+ supported_rotations);
min_src_size = MSM_FORMAT_IS_YUV(fmt) ? 2 : 1;
@@ -923,47 +951,20 @@ static int dpu_plane_atomic_check_sspp(struct drm_plane *plane,
struct dpu_plane_state *pstate = to_dpu_plane_state(new_plane_state);
struct dpu_sw_pipe *pipe = &pstate->pipe;
struct dpu_sw_pipe *r_pipe = &pstate->r_pipe;
- const struct msm_format *fmt;
struct dpu_sw_pipe_cfg *pipe_cfg = &pstate->pipe_cfg;
struct dpu_sw_pipe_cfg *r_pipe_cfg = &pstate->r_pipe_cfg;
- uint32_t supported_rotations;
- const struct dpu_sspp_cfg *pipe_hw_caps;
- const struct dpu_sspp_sub_blks *sblk;
int ret = 0;
- pipe_hw_caps = pipe->sspp->cap;
- sblk = pipe->sspp->cap->sblk;
-
- /*
- * We already have verified scaling against platform limitations.
- * Now check if the SSPP supports scaling at all.
- */
- if (!sblk->scaler_blk.len &&
- ((drm_rect_width(&new_plane_state->src) >> 16 !=
- drm_rect_width(&new_plane_state->dst)) ||
- (drm_rect_height(&new_plane_state->src) >> 16 !=
- drm_rect_height(&new_plane_state->dst))))
- return -ERANGE;
-
- fmt = msm_framebuffer_format(new_plane_state->fb);
-
- supported_rotations = DRM_MODE_REFLECT_MASK | DRM_MODE_ROTATE_0;
-
- if (pipe_hw_caps->features & BIT(DPU_SSPP_INLINE_ROTATION))
- supported_rotations |= DRM_MODE_ROTATE_90;
-
- pipe_cfg->rotation = drm_rotation_simplify(new_plane_state->rotation,
- supported_rotations);
- r_pipe_cfg->rotation = pipe_cfg->rotation;
-
- ret = dpu_plane_atomic_check_pipe(pdpu, pipe, pipe_cfg, fmt,
- &crtc_state->adjusted_mode);
+ ret = dpu_plane_atomic_check_pipe(pdpu, pipe, pipe_cfg,
+ &crtc_state->adjusted_mode,
+ new_plane_state);
if (ret)
return ret;
if (drm_rect_width(&r_pipe_cfg->src_rect) != 0) {
- ret = dpu_plane_atomic_check_pipe(pdpu, r_pipe, r_pipe_cfg, fmt,
- &crtc_state->adjusted_mode);
+ ret = dpu_plane_atomic_check_pipe(pdpu, r_pipe, r_pipe_cfg,
+ &crtc_state->adjusted_mode,
+ new_plane_state);
if (ret)
return ret;
}
@@ -1059,6 +1060,9 @@ static int dpu_plane_virtual_atomic_check(struct drm_plane *plane,
struct drm_crtc_state *crtc_state;
int ret;
+ if (IS_ERR(plane_state))
+ return PTR_ERR(plane_state);
+
if (plane_state->crtc)
crtc_state = drm_atomic_get_new_crtc_state(state,
plane_state->crtc);
diff --git a/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml b/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml
index 55a35182858c..5a6ae9fc3194 100644
--- a/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml
+++ b/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml
@@ -2259,5 +2259,12 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
</reg32>
</domain>
+<domain name="CP_INDIRECT_BUFFER" width="32" varset="chip" prefix="chip" variants="A5XX-">
+ <reg64 offset="0" name="IB_BASE" type="address"/>
+ <reg32 offset="2" name="2">
+ <bitfield name="IB_SIZE" low="0" high="19"/>
+ </reg32>
+</domain>
+
</database>
diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
index 34c42d6e12cd..4a7701a33cf8 100644
--- a/drivers/gpu/drm/v3d/v3d_sched.c
+++ b/drivers/gpu/drm/v3d/v3d_sched.c
@@ -428,7 +428,8 @@ v3d_rewrite_csd_job_wg_counts_from_indirect(struct v3d_cpu_job *job)
struct v3d_bo *bo = to_v3d_bo(job->base.bo[0]);
struct v3d_bo *indirect = to_v3d_bo(indirect_csd->indirect);
struct drm_v3d_submit_csd *args = &indirect_csd->job->args;
- u32 *wg_counts;
+ struct v3d_dev *v3d = job->base.v3d;
+ u32 num_batches, *wg_counts;
v3d_get_bo_vaddr(bo);
v3d_get_bo_vaddr(indirect);
@@ -441,8 +442,17 @@ v3d_rewrite_csd_job_wg_counts_from_indirect(struct v3d_cpu_job *job)
args->cfg[0] = wg_counts[0] << V3D_CSD_CFG012_WG_COUNT_SHIFT;
args->cfg[1] = wg_counts[1] << V3D_CSD_CFG012_WG_COUNT_SHIFT;
args->cfg[2] = wg_counts[2] << V3D_CSD_CFG012_WG_COUNT_SHIFT;
- args->cfg[4] = DIV_ROUND_UP(indirect_csd->wg_size, 16) *
- (wg_counts[0] * wg_counts[1] * wg_counts[2]) - 1;
+
+ num_batches = DIV_ROUND_UP(indirect_csd->wg_size, 16) *
+ (wg_counts[0] * wg_counts[1] * wg_counts[2]);
+
+ /* V3D 7.1.6 and later don't subtract 1 from the number of batches */
+ if (v3d->ver < 71 || (v3d->ver == 71 && v3d->rev < 6))
+ args->cfg[4] = num_batches - 1;
+ else
+ args->cfg[4] = num_batches;
+
+ WARN_ON(args->cfg[4] == ~0);
for (int i = 0; i < 3; i++) {
/* 0xffffffff indicates that the uniform rewrite is not needed */
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index 2d88e390feb4..e32e680c7197 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -128,6 +128,14 @@ static void virtio_gpu_remove(struct virtio_device *vdev)
drm_dev_put(dev);
}
+static void virtio_gpu_shutdown(struct virtio_device *vdev)
+{
+ /*
+ * drm does its own synchronization on shutdown.
+ * Do nothing here, opt out of device reset.
+ */
+}
+
static void virtio_gpu_config_changed(struct virtio_device *vdev)
{
struct drm_device *dev = vdev->priv;
@@ -162,6 +170,7 @@ static struct virtio_driver virtio_gpu_driver = {
.id_table = id_table,
.probe = virtio_gpu_probe,
.remove = virtio_gpu_remove,
+ .shutdown = virtio_gpu_shutdown,
.config_changed = virtio_gpu_config_changed
};
diff --git a/drivers/gpu/drm/xe/xe_dma_buf.c b/drivers/gpu/drm/xe/xe_dma_buf.c
index f67803e15a0e..f7a20264ea33 100644
--- a/drivers/gpu/drm/xe/xe_dma_buf.c
+++ b/drivers/gpu/drm/xe/xe_dma_buf.c
@@ -145,10 +145,7 @@ static void xe_dma_buf_unmap(struct dma_buf_attachment *attach,
struct sg_table *sgt,
enum dma_data_direction dir)
{
- struct dma_buf *dma_buf = attach->dmabuf;
- struct xe_bo *bo = gem_to_xe_bo(dma_buf->priv);
-
- if (!xe_bo_is_vram(bo)) {
+ if (sg_page(sgt->sgl)) {
dma_unmap_sgtable(attach->dev, sgt, dir, 0);
sg_free_table(sgt);
kfree(sgt);
diff --git a/drivers/gpu/drm/xe/xe_guc_ads.c b/drivers/gpu/drm/xe/xe_guc_ads.c
index e7c9e095a19f..7031542a70ce 100644
--- a/drivers/gpu/drm/xe/xe_guc_ads.c
+++ b/drivers/gpu/drm/xe/xe_guc_ads.c
@@ -490,24 +490,52 @@ static void fill_engine_enable_masks(struct xe_gt *gt,
engine_enable_mask(gt, XE_ENGINE_CLASS_OTHER));
}
-static void guc_prep_golden_lrc_null(struct xe_guc_ads *ads)
+/*
+ * Write the offsets corresponding to the golden LRCs. The actual data is
+ * populated later by guc_golden_lrc_populate()
+ */
+static void guc_golden_lrc_init(struct xe_guc_ads *ads)
{
struct xe_device *xe = ads_to_xe(ads);
+ struct xe_gt *gt = ads_to_gt(ads);
struct iosys_map info_map = IOSYS_MAP_INIT_OFFSET(ads_to_map(ads),
offsetof(struct __guc_ads_blob, system_info));
- u8 guc_class;
+ size_t alloc_size, real_size;
+ u32 addr_ggtt, offset;
+ int class;
+
+ offset = guc_ads_golden_lrc_offset(ads);
+ addr_ggtt = xe_bo_ggtt_addr(ads->bo) + offset;
+
+ for (class = 0; class < XE_ENGINE_CLASS_MAX; ++class) {
+ u8 guc_class;
+
+ guc_class = xe_engine_class_to_guc_class(class);
- for (guc_class = 0; guc_class <= GUC_MAX_ENGINE_CLASSES; ++guc_class) {
if (!info_map_read(xe, &info_map,
engine_enabled_masks[guc_class]))
continue;
+ real_size = xe_gt_lrc_size(gt, class);
+ alloc_size = PAGE_ALIGN(real_size);
+
+ /*
+ * This interface is slightly confusing. We need to pass the
+ * base address of the full golden context and the size of just
+ * the engine state, which is the section of the context image
+ * that starts after the execlists LRC registers. This is
+ * required to allow the GuC to restore just the engine state
+ * when a watchdog reset occurs.
+ * We calculate the engine state size by removing the size of
+ * what comes before it in the context image (which is identical
+ * on all engines).
+ */
ads_blob_write(ads, ads.eng_state_size[guc_class],
- guc_ads_golden_lrc_size(ads) -
- xe_lrc_skip_size(xe));
+ real_size - xe_lrc_skip_size(xe));
ads_blob_write(ads, ads.golden_context_lrca[guc_class],
- xe_bo_ggtt_addr(ads->bo) +
- guc_ads_golden_lrc_offset(ads));
+ addr_ggtt);
+
+ addr_ggtt += alloc_size;
}
}
@@ -857,7 +885,7 @@ void xe_guc_ads_populate_minimal(struct xe_guc_ads *ads)
xe_map_memset(ads_to_xe(ads), ads_to_map(ads), 0, 0, ads->bo->size);
guc_policies_init(ads);
- guc_prep_golden_lrc_null(ads);
+ guc_golden_lrc_init(ads);
guc_mapping_table_init_invalid(gt, &info_map);
guc_doorbell_init(ads);
@@ -883,7 +911,7 @@ void xe_guc_ads_populate(struct xe_guc_ads *ads)
guc_policies_init(ads);
fill_engine_enable_masks(gt, &info_map);
guc_mmio_reg_state_init(ads);
- guc_prep_golden_lrc_null(ads);
+ guc_golden_lrc_init(ads);
guc_mapping_table_init(gt, &info_map);
guc_capture_prep_lists(ads);
guc_doorbell_init(ads);
@@ -903,18 +931,22 @@ void xe_guc_ads_populate(struct xe_guc_ads *ads)
guc_ads_private_data_offset(ads));
}
-static void guc_populate_golden_lrc(struct xe_guc_ads *ads)
+/*
+ * After the golden LRC's are recorded for each engine class by the first
+ * submission, copy them to the ADS, as initialized earlier by
+ * guc_golden_lrc_init().
+ */
+static void guc_golden_lrc_populate(struct xe_guc_ads *ads)
{
struct xe_device *xe = ads_to_xe(ads);
struct xe_gt *gt = ads_to_gt(ads);
struct iosys_map info_map = IOSYS_MAP_INIT_OFFSET(ads_to_map(ads),
offsetof(struct __guc_ads_blob, system_info));
size_t total_size = 0, alloc_size, real_size;
- u32 addr_ggtt, offset;
+ u32 offset;
int class;
offset = guc_ads_golden_lrc_offset(ads);
- addr_ggtt = xe_bo_ggtt_addr(ads->bo) + offset;
for (class = 0; class < XE_ENGINE_CLASS_MAX; ++class) {
u8 guc_class;
@@ -931,26 +963,9 @@ static void guc_populate_golden_lrc(struct xe_guc_ads *ads)
alloc_size = PAGE_ALIGN(real_size);
total_size += alloc_size;
- /*
- * This interface is slightly confusing. We need to pass the
- * base address of the full golden context and the size of just
- * the engine state, which is the section of the context image
- * that starts after the execlists LRC registers. This is
- * required to allow the GuC to restore just the engine state
- * when a watchdog reset occurs.
- * We calculate the engine state size by removing the size of
- * what comes before it in the context image (which is identical
- * on all engines).
- */
- ads_blob_write(ads, ads.eng_state_size[guc_class],
- real_size - xe_lrc_skip_size(xe));
- ads_blob_write(ads, ads.golden_context_lrca[guc_class],
- addr_ggtt);
-
xe_map_memcpy_to(xe, ads_to_map(ads), offset,
gt->default_lrc[class], real_size);
- addr_ggtt += alloc_size;
offset += alloc_size;
}
@@ -959,7 +974,7 @@ static void guc_populate_golden_lrc(struct xe_guc_ads *ads)
void xe_guc_ads_populate_post_load(struct xe_guc_ads *ads)
{
- guc_populate_golden_lrc(ads);
+ guc_golden_lrc_populate(ads);
}
static int guc_ads_action_update_policies(struct xe_guc_ads *ads, u32 policy_offset)
diff --git a/drivers/gpu/drm/xe/xe_hmm.c b/drivers/gpu/drm/xe/xe_hmm.c
index c3cc0fa105e8..57b71956ddf4 100644
--- a/drivers/gpu/drm/xe/xe_hmm.c
+++ b/drivers/gpu/drm/xe/xe_hmm.c
@@ -19,29 +19,6 @@ static u64 xe_npages_in_range(unsigned long start, unsigned long end)
return (end - start) >> PAGE_SHIFT;
}
-/**
- * xe_mark_range_accessed() - mark a range is accessed, so core mm
- * have such information for memory eviction or write back to
- * hard disk
- * @range: the range to mark
- * @write: if write to this range, we mark pages in this range
- * as dirty
- */
-static void xe_mark_range_accessed(struct hmm_range *range, bool write)
-{
- struct page *page;
- u64 i, npages;
-
- npages = xe_npages_in_range(range->start, range->end);
- for (i = 0; i < npages; i++) {
- page = hmm_pfn_to_page(range->hmm_pfns[i]);
- if (write)
- set_page_dirty_lock(page);
-
- mark_page_accessed(page);
- }
-}
-
static int xe_alloc_sg(struct xe_device *xe, struct sg_table *st,
struct hmm_range *range, struct rw_semaphore *notifier_sem)
{
@@ -331,7 +308,6 @@ int xe_hmm_userptr_populate_range(struct xe_userptr_vma *uvma,
if (ret)
goto out_unlock;
- xe_mark_range_accessed(&hmm_range, write);
userptr->sg = &userptr->sgt;
xe_hmm_userptr_set_mapped(uvma);
userptr->notifier_seq = hmm_range.notifier_seq;
diff --git a/drivers/gpu/drm/xe/xe_pxp_debugfs.c b/drivers/gpu/drm/xe/xe_pxp_debugfs.c
index ccfbacf08efc..525a2f6bb076 100644
--- a/drivers/gpu/drm/xe/xe_pxp_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_pxp_debugfs.c
@@ -66,9 +66,18 @@ static int pxp_terminate(struct seq_file *m, void *data)
{
struct xe_pxp *pxp = node_to_pxp(m->private);
struct drm_printer p = drm_seq_file_printer(m);
+ int ready = xe_pxp_get_readiness_status(pxp);
- if (!xe_pxp_is_enabled(pxp))
- return -ENODEV;
+ if (ready < 0)
+ return ready; /* disabled or error occurred */
+ else if (!ready)
+ return -EBUSY; /* init still in progress */
+
+ /* no need for a termination if PXP is not active */
+ if (pxp->status != XE_PXP_ACTIVE) {
+ drm_printf(&p, "PXP not active\n");
+ return 0;
+ }
/* simulate a termination interrupt */
spin_lock_irq(&pxp->xe->irq.lock);
diff --git a/drivers/i2c/busses/i2c-cros-ec-tunnel.c b/drivers/i2c/busses/i2c-cros-ec-tunnel.c
index 43bf90d90eeb..208ce4f9e782 100644
--- a/drivers/i2c/busses/i2c-cros-ec-tunnel.c
+++ b/drivers/i2c/busses/i2c-cros-ec-tunnel.c
@@ -247,6 +247,9 @@ static int ec_i2c_probe(struct platform_device *pdev)
u32 remote_bus;
int err;
+ if (!ec)
+ return dev_err_probe(dev, -EPROBE_DEFER, "couldn't find parent EC device\n");
+
if (!ec->cmd_xfer) {
dev_err(dev, "Missing sendrecv\n");
return -EINVAL;
diff --git a/drivers/i2c/i2c-atr.c b/drivers/i2c/i2c-atr.c
index 8fe9ddff8e96..783fb8df2ebe 100644
--- a/drivers/i2c/i2c-atr.c
+++ b/drivers/i2c/i2c-atr.c
@@ -8,12 +8,12 @@
* Originally based on i2c-mux.c
*/
-#include <linux/fwnode.h>
#include <linux/i2c-atr.h>
#include <linux/i2c.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/property.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
diff --git a/drivers/iio/magnetometer/ak8974.c b/drivers/iio/magnetometer/ak8974.c
index 08975c60e325..7bc341c69697 100644
--- a/drivers/iio/magnetometer/ak8974.c
+++ b/drivers/iio/magnetometer/ak8974.c
@@ -535,8 +535,8 @@ static int ak8974_detect(struct ak8974 *ak8974)
fab_data2, sizeof(fab_data2));
for (i = 0; i < 3; ++i) {
- static const char axis[3] = "XYZ";
- static const char pgaxis[6] = "ZYZXYX";
+ static const char axis[] = "XYZ";
+ static const char pgaxis[] = "ZYZXYX";
unsigned offz = le16_to_cpu(fab_data2[i]) & 0x7F;
unsigned fine = le16_to_cpu(fab_data1[i]);
unsigned sens = le16_to_cpu(fab_data1[i + 3]);
diff --git a/drivers/input/joystick/magellan.c b/drivers/input/joystick/magellan.c
index 2eaa25c9c68c..d73389af4dd5 100644
--- a/drivers/input/joystick/magellan.c
+++ b/drivers/input/joystick/magellan.c
@@ -48,7 +48,7 @@ struct magellan {
static int magellan_crunch_nibbles(unsigned char *data, int count)
{
- static unsigned char nibbles[16] = "0AB3D56GH9:K<MN?";
+ static unsigned char nibbles[16] __nonstring = "0AB3D56GH9:K<MN?";
do {
if (data[count] == nibbles[data[count] & 0xf])
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index cec05e443083..08bb3b031f23 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -114,8 +114,8 @@ config I8259
config BCM2712_MIP
tristate "Broadcom BCM2712 MSI-X Interrupt Peripheral support"
- depends on ARCH_BRCMSTB || COMPILE_TEST
- default m if ARCH_BRCMSTB
+ depends on ARCH_BRCMSTB || ARCH_BCM2835 || COMPILE_TEST
+ default m if ARCH_BRCMSTB || ARCH_BCM2835
depends on ARM_GIC
select GENERIC_IRQ_CHIP
select IRQ_DOMAIN_HIERARCHY
diff --git a/drivers/irqchip/irq-renesas-rzv2h.c b/drivers/irqchip/irq-renesas-rzv2h.c
index 3d5b5fdf9bde..0f0fd7d4dfdf 100644
--- a/drivers/irqchip/irq-renesas-rzv2h.c
+++ b/drivers/irqchip/irq-renesas-rzv2h.c
@@ -170,6 +170,14 @@ static void rzv2h_tint_irq_endisable(struct irq_data *d, bool enable)
else
tssr &= ~ICU_TSSR_TIEN(tssel_n, priv->info->field_width);
writel_relaxed(tssr, priv->base + priv->info->t_offs + ICU_TSSR(k));
+
+ /*
+ * A glitch in the edge detection circuit can cause a spurious
+ * interrupt. Clear the status flag after setting the ICU_TSSRk
+ * registers, which is recommended by the hardware manual as a
+ * countermeasure.
+ */
+ writel_relaxed(BIT(tint_nr), priv->base + priv->info->t_offs + ICU_TSCLR);
}
static void rzv2h_icu_irq_disable(struct irq_data *d)
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index e42f1400cea9..813b38aec3e4 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -546,7 +546,7 @@ static struct uuid_entry *uuid_find(struct cache_set *c, const char *uuid)
static struct uuid_entry *uuid_find_empty(struct cache_set *c)
{
- static const char zero_uuid[16] = "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0";
+ static const char zero_uuid[16] = { 0 };
return uuid_find(c, zero_uuid);
}
diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
index 44ec9b17cfd3..37b08f26c62f 100644
--- a/drivers/md/md-bitmap.c
+++ b/drivers/md/md-bitmap.c
@@ -2357,9 +2357,8 @@ static int bitmap_get_stats(void *data, struct md_bitmap_stats *stats)
if (!bitmap)
return -ENOENT;
- if (bitmap->mddev->bitmap_info.external)
- return -ENOENT;
- if (!bitmap->storage.sb_page) /* no superblock */
+ if (!bitmap->mddev->bitmap_info.external &&
+ !bitmap->storage.sb_page)
return -EINVAL;
sb = kmap_local_page(bitmap->storage.sb_page);
stats->sync_size = le64_to_cpu(sb->sync_size);
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 0efc03cea24e..de9bccbe7337 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -2200,14 +2200,9 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
if (!rdev_set_badblocks(rdev, sect, s, 0))
abort = 1;
}
- if (abort) {
- conf->recovery_disabled =
- mddev->recovery_disabled;
- set_bit(MD_RECOVERY_INTR, &mddev->recovery);
- md_done_sync(mddev, r1_bio->sectors, 0);
- put_buf(r1_bio);
+ if (abort)
return 0;
- }
+
/* Try next page */
sectors -= s;
sect += s;
@@ -2346,10 +2341,21 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
int disks = conf->raid_disks * 2;
struct bio *wbio;
- if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
- /* ouch - failed to read all of that. */
- if (!fix_sync_read_error(r1_bio))
+ if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) {
+ /*
+ * ouch - failed to read all of that.
+ * No need to fix read error for check/repair
+ * because all member disks are read.
+ */
+ if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) ||
+ !fix_sync_read_error(r1_bio)) {
+ conf->recovery_disabled = mddev->recovery_disabled;
+ set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+ md_done_sync(mddev, r1_bio->sectors, 0);
+ put_buf(r1_bio);
return;
+ }
+ }
if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
process_checks(r1_bio);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 846c5f29486e..ba32bac975b8 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1735,6 +1735,7 @@ retry_discard:
* The discard bio returns only first r10bio finishes
*/
if (first_copy) {
+ md_account_bio(mddev, &bio);
r10_bio->master_bio = bio;
set_bit(R10BIO_Discard, &r10_bio->state);
first_copy = false;
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 73c07f10f053..379b6e90121d 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -9064,6 +9064,8 @@ static void niu_try_msix(struct niu *np, u8 *ldg_num_map)
msi_vec[i].entry = i;
}
+ pdev->dev_flags |= PCI_DEV_FLAGS_MSIX_TOUCH_ENTRY_DATA_FIRST;
+
num_irqs = pci_enable_msix_range(pdev, msi_vec, 1, num_irqs);
if (num_irqs < 0) {
np->flags &= ~NIU_FLAGS_MSIX;
diff --git a/drivers/net/wireless/ath/carl9170/fw.c b/drivers/net/wireless/ath/carl9170/fw.c
index 4c1aecd1163c..419f5530f885 100644
--- a/drivers/net/wireless/ath/carl9170/fw.c
+++ b/drivers/net/wireless/ath/carl9170/fw.c
@@ -15,7 +15,7 @@
#include "fwcmd.h"
#include "version.h"
-static const u8 otus_magic[4] = { OTUS_MAGIC };
+static const u8 otus_magic[4] __nonstring = { OTUS_MAGIC };
static const void *carl9170_fw_find_desc(struct ar9170 *ar, const u8 descid[4],
const unsigned int len, const u8 compatible_revision)
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/d3.c b/drivers/net/wireless/intel/iwlwifi/mld/d3.c
index 2c6e8ecd93b7..ee99298eebf5 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/d3.c
@@ -1099,7 +1099,8 @@ iwl_mld_set_netdetect_info(struct iwl_mld *mld,
if (!match)
return;
- netdetect_info->matches[netdetect_info->n_matches++] = match;
+ netdetect_info->matches[netdetect_info->n_matches] = match;
+ netdetect_info->n_matches++;
/* We inverted the order of the SSIDs in the scan
* request, so invert the index here.
@@ -1116,9 +1117,11 @@ iwl_mld_set_netdetect_info(struct iwl_mld *mld,
for_each_set_bit(j,
(unsigned long *)&matches[i].matching_channels[0],
- sizeof(matches[i].matching_channels))
- match->channels[match->n_channels++] =
+ sizeof(matches[i].matching_channels)) {
+ match->channels[match->n_channels] =
netdetect_cfg->channels[j]->center_freq;
+ match->n_channels++;
+ }
}
}
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index b502ac07483b..eb6ea8acb3cc 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -4300,7 +4300,7 @@ static void nvme_scan_work(struct work_struct *work)
if (test_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events))
nvme_queue_scan(ctrl);
#ifdef CONFIG_NVME_MULTIPATH
- else
+ else if (ctrl->ana_log_buf)
/* Re-read the ANA log page to not miss updates */
queue_work(nvme_wq, &ctrl->ana_work);
#endif
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 05eccd96d34a..250f3da67cc9 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -1051,6 +1051,13 @@ void nvme_mpath_add_sysfs_link(struct nvme_ns_head *head)
list_for_each_entry_rcu(ns, &head->list, siblings) {
/*
+ * Ensure that ns path disk node is already added otherwise we
+ * may get invalid kobj name for target
+ */
+ if (!test_bit(GD_ADDED, &ns->disk->state))
+ continue;
+
+ /*
* Avoid creating link if it already exists for the given path.
* When path ana state transitions from optimized to non-
* optimized or vice-versa, the nvme_mpath_set_live() is
@@ -1065,13 +1072,6 @@ void nvme_mpath_add_sysfs_link(struct nvme_ns_head *head)
if (test_and_set_bit(NVME_NS_SYSFS_ATTR_LINK, &ns->flags))
continue;
- /*
- * Ensure that ns path disk node is already added otherwise we
- * may get invalid kobj name for target
- */
- if (!test_bit(GD_ADDED, &ns->disk->state))
- continue;
-
target = disk_to_dev(ns->disk);
/*
* Create sysfs link from head gendisk kobject @kobj to the
diff --git a/drivers/nvme/target/auth.c b/drivers/nvme/target/auth.c
index 0b0645ac5df4..cef8d77f477b 100644
--- a/drivers/nvme/target/auth.c
+++ b/drivers/nvme/target/auth.c
@@ -240,7 +240,7 @@ void nvmet_auth_sq_free(struct nvmet_sq *sq)
{
cancel_delayed_work(&sq->auth_expired_work);
#ifdef CONFIG_NVME_TARGET_TCP_TLS
- sq->tls_key = 0;
+ sq->tls_key = NULL;
#endif
kfree(sq->dhchap_c1);
sq->dhchap_c1 = NULL;
diff --git a/drivers/nvme/target/pci-epf.c b/drivers/nvme/target/pci-epf.c
index 51c27b32248d..7fab7f3d79b7 100644
--- a/drivers/nvme/target/pci-epf.c
+++ b/drivers/nvme/target/pci-epf.c
@@ -1648,16 +1648,17 @@ static int nvmet_pci_epf_process_sq(struct nvmet_pci_epf_ctrl *ctrl,
{
struct nvmet_pci_epf_iod *iod;
int ret, n = 0;
+ u16 head = sq->head;
sq->tail = nvmet_pci_epf_bar_read32(ctrl, sq->db);
- while (sq->head != sq->tail && (!ctrl->sq_ab || n < ctrl->sq_ab)) {
+ while (head != sq->tail && (!ctrl->sq_ab || n < ctrl->sq_ab)) {
iod = nvmet_pci_epf_alloc_iod(sq);
if (!iod)
break;
/* Get the NVMe command submitted by the host. */
ret = nvmet_pci_epf_transfer(ctrl, &iod->cmd,
- sq->pci_addr + sq->head * sq->qes,
+ sq->pci_addr + head * sq->qes,
sq->qes, DMA_FROM_DEVICE);
if (ret) {
/* Not much we can do... */
@@ -1666,12 +1667,13 @@ static int nvmet_pci_epf_process_sq(struct nvmet_pci_epf_ctrl *ctrl,
}
dev_dbg(ctrl->dev, "SQ[%u]: head %u, tail %u, command %s\n",
- sq->qid, sq->head, sq->tail,
+ sq->qid, head, sq->tail,
nvmet_pci_epf_iod_name(iod));
- sq->head++;
- if (sq->head == sq->depth)
- sq->head = 0;
+ head++;
+ if (head == sq->depth)
+ head = 0;
+ WRITE_ONCE(sq->head, head);
n++;
queue_work_on(WORK_CPU_UNBOUND, sq->iod_wq, &iod->work);
@@ -1761,8 +1763,17 @@ static void nvmet_pci_epf_cq_work(struct work_struct *work)
if (!iod)
break;
- /* Post the IOD completion entry. */
+ /*
+ * Post the IOD completion entry. If the IOD request was
+ * executed (req->execute() called), the CQE is already
+ * initialized. However, the IOD may have been failed before
+ * that, leaving the CQE not properly initialized. So always
+ * initialize it here.
+ */
cqe = &iod->cqe;
+ cqe->sq_head = cpu_to_le16(READ_ONCE(iod->sq->head));
+ cqe->sq_id = cpu_to_le16(iod->sq->qid);
+ cqe->command_id = iod->cmd.common.command_id;
cqe->status = cpu_to_le16((iod->status << 1) | cq->phase);
dev_dbg(ctrl->dev,
@@ -1800,6 +1811,21 @@ static void nvmet_pci_epf_cq_work(struct work_struct *work)
NVMET_PCI_EPF_CQ_RETRY_INTERVAL);
}
+static void nvmet_pci_epf_clear_ctrl_config(struct nvmet_pci_epf_ctrl *ctrl)
+{
+ struct nvmet_ctrl *tctrl = ctrl->tctrl;
+
+ /* Initialize controller status. */
+ tctrl->csts = 0;
+ ctrl->csts = 0;
+ nvmet_pci_epf_bar_write32(ctrl, NVME_REG_CSTS, ctrl->csts);
+
+ /* Initialize controller configuration and start polling. */
+ tctrl->cc = 0;
+ ctrl->cc = 0;
+ nvmet_pci_epf_bar_write32(ctrl, NVME_REG_CC, ctrl->cc);
+}
+
static int nvmet_pci_epf_enable_ctrl(struct nvmet_pci_epf_ctrl *ctrl)
{
u64 pci_addr, asq, acq;
@@ -1865,18 +1891,20 @@ static int nvmet_pci_epf_enable_ctrl(struct nvmet_pci_epf_ctrl *ctrl)
return 0;
err:
- ctrl->csts = 0;
+ nvmet_pci_epf_clear_ctrl_config(ctrl);
return -EINVAL;
}
-static void nvmet_pci_epf_disable_ctrl(struct nvmet_pci_epf_ctrl *ctrl)
+static void nvmet_pci_epf_disable_ctrl(struct nvmet_pci_epf_ctrl *ctrl,
+ bool shutdown)
{
int qid;
if (!ctrl->enabled)
return;
- dev_info(ctrl->dev, "Disabling controller\n");
+ dev_info(ctrl->dev, "%s controller\n",
+ shutdown ? "Shutting down" : "Disabling");
ctrl->enabled = false;
cancel_delayed_work_sync(&ctrl->poll_sqs);
@@ -1893,6 +1921,11 @@ static void nvmet_pci_epf_disable_ctrl(struct nvmet_pci_epf_ctrl *ctrl)
nvmet_pci_epf_delete_cq(ctrl->tctrl, 0);
ctrl->csts &= ~NVME_CSTS_RDY;
+ if (shutdown) {
+ ctrl->csts |= NVME_CSTS_SHST_CMPLT;
+ ctrl->cc &= ~NVME_CC_ENABLE;
+ nvmet_pci_epf_bar_write32(ctrl, NVME_REG_CC, ctrl->cc);
+ }
}
static void nvmet_pci_epf_poll_cc_work(struct work_struct *work)
@@ -1919,12 +1952,10 @@ static void nvmet_pci_epf_poll_cc_work(struct work_struct *work)
}
if (!nvmet_cc_en(new_cc) && nvmet_cc_en(old_cc))
- nvmet_pci_epf_disable_ctrl(ctrl);
+ nvmet_pci_epf_disable_ctrl(ctrl, false);
- if (nvmet_cc_shn(new_cc) && !nvmet_cc_shn(old_cc)) {
- nvmet_pci_epf_disable_ctrl(ctrl);
- ctrl->csts |= NVME_CSTS_SHST_CMPLT;
- }
+ if (nvmet_cc_shn(new_cc) && !nvmet_cc_shn(old_cc))
+ nvmet_pci_epf_disable_ctrl(ctrl, true);
if (!nvmet_cc_shn(new_cc) && nvmet_cc_shn(old_cc))
ctrl->csts &= ~NVME_CSTS_SHST_CMPLT;
@@ -1963,16 +1994,10 @@ static void nvmet_pci_epf_init_bar(struct nvmet_pci_epf_ctrl *ctrl)
/* Clear Controller Memory Buffer Supported (CMBS). */
ctrl->cap &= ~(0x1ULL << 57);
- /* Controller configuration. */
- ctrl->cc = tctrl->cc & (~NVME_CC_ENABLE);
-
- /* Controller status. */
- ctrl->csts = ctrl->tctrl->csts;
-
nvmet_pci_epf_bar_write64(ctrl, NVME_REG_CAP, ctrl->cap);
nvmet_pci_epf_bar_write32(ctrl, NVME_REG_VS, tctrl->subsys->ver);
- nvmet_pci_epf_bar_write32(ctrl, NVME_REG_CSTS, ctrl->csts);
- nvmet_pci_epf_bar_write32(ctrl, NVME_REG_CC, ctrl->cc);
+
+ nvmet_pci_epf_clear_ctrl_config(ctrl);
}
static int nvmet_pci_epf_create_ctrl(struct nvmet_pci_epf *nvme_epf,
@@ -2070,14 +2095,22 @@ out_mempool_exit:
static void nvmet_pci_epf_start_ctrl(struct nvmet_pci_epf_ctrl *ctrl)
{
+
+ dev_info(ctrl->dev, "PCI link up\n");
+ ctrl->link_up = true;
+
schedule_delayed_work(&ctrl->poll_cc, NVMET_PCI_EPF_CC_POLL_INTERVAL);
}
static void nvmet_pci_epf_stop_ctrl(struct nvmet_pci_epf_ctrl *ctrl)
{
+ dev_info(ctrl->dev, "PCI link down\n");
+ ctrl->link_up = false;
+
cancel_delayed_work_sync(&ctrl->poll_cc);
- nvmet_pci_epf_disable_ctrl(ctrl);
+ nvmet_pci_epf_disable_ctrl(ctrl, false);
+ nvmet_pci_epf_clear_ctrl_config(ctrl);
}
static void nvmet_pci_epf_destroy_ctrl(struct nvmet_pci_epf_ctrl *ctrl)
@@ -2300,10 +2333,8 @@ static int nvmet_pci_epf_epc_init(struct pci_epf *epf)
if (ret)
goto out_clear_bar;
- if (!epc_features->linkup_notifier) {
- ctrl->link_up = true;
+ if (!epc_features->linkup_notifier)
nvmet_pci_epf_start_ctrl(&nvme_epf->ctrl);
- }
return 0;
@@ -2319,7 +2350,6 @@ static void nvmet_pci_epf_epc_deinit(struct pci_epf *epf)
struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf);
struct nvmet_pci_epf_ctrl *ctrl = &nvme_epf->ctrl;
- ctrl->link_up = false;
nvmet_pci_epf_destroy_ctrl(ctrl);
nvmet_pci_epf_deinit_dma(nvme_epf);
@@ -2331,7 +2361,6 @@ static int nvmet_pci_epf_link_up(struct pci_epf *epf)
struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf);
struct nvmet_pci_epf_ctrl *ctrl = &nvme_epf->ctrl;
- ctrl->link_up = true;
nvmet_pci_epf_start_ctrl(ctrl);
return 0;
@@ -2342,7 +2371,6 @@ static int nvmet_pci_epf_link_down(struct pci_epf *epf)
struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf);
struct nvmet_pci_epf_ctrl *ctrl = &nvme_epf->ctrl;
- ctrl->link_up = false;
nvmet_pci_epf_stop_ctrl(ctrl);
return 0;
diff --git a/drivers/pci/msi/msi.c b/drivers/pci/msi/msi.c
index 6569ba3577fe..8b8848788618 100644
--- a/drivers/pci/msi/msi.c
+++ b/drivers/pci/msi/msi.c
@@ -615,6 +615,9 @@ void msix_prepare_msi_desc(struct pci_dev *dev, struct msi_desc *desc)
void __iomem *addr = pci_msix_desc_addr(desc);
desc->pci.msi_attrib.can_mask = 1;
+ /* Workaround for SUN NIU insanity, which requires write before read */
+ if (dev->dev_flags & PCI_DEV_FLAGS_MSIX_TOUCH_ENTRY_DATA_FIRST)
+ writel(0, addr + PCI_MSIX_ENTRY_DATA);
desc->pci.msix_ctrl = readl(addr + PCI_MSIX_ENTRY_VECTOR_CTRL);
}
}
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 4d7c9f64ea24..e77d5b53c0ce 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -5429,8 +5429,6 @@ static bool pci_bus_resettable(struct pci_bus *bus)
return false;
list_for_each_entry(dev, &bus->devices, bus_list) {
- if (!pci_reset_supported(dev))
- return false;
if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
(dev->subordinate && !pci_bus_resettable(dev->subordinate)))
return false;
@@ -5507,8 +5505,6 @@ static bool pci_slot_resettable(struct pci_slot *slot)
list_for_each_entry(dev, &slot->bus->devices, bus_list) {
if (!dev->slot || dev->slot != slot)
continue;
- if (!pci_reset_supported(dev))
- return false;
if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
(dev->subordinate && !pci_bus_resettable(dev->subordinate)))
return false;
diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c
index edb058c19c9c..439dd0bf8644 100644
--- a/drivers/power/supply/power_supply_sysfs.c
+++ b/drivers/power/supply/power_supply_sysfs.c
@@ -33,7 +33,7 @@ struct power_supply_attr {
[POWER_SUPPLY_PROP_ ## _name] = \
{ \
.prop_name = #_name, \
- .attr_name = #_name "\0", \
+ .attr_name = #_name, \
.text_values = _text, \
.text_values_len = _len, \
}
diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c
index 3fcb1ad3b070..1d7901a8f0e4 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_fw.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c
@@ -174,6 +174,9 @@ static void mpi3mr_print_event_data(struct mpi3mr_ioc *mrioc,
char *desc = NULL;
u16 event;
+ if (!(mrioc->logging_level & MPI3_DEBUG_EVENT))
+ return;
+
event = event_reply->event;
switch (event) {
@@ -451,6 +454,7 @@ int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc)
return 0;
}
+ atomic_set(&mrioc->admin_pend_isr, 0);
reply_desc = (struct mpi3_default_reply_descriptor *)mrioc->admin_reply_base +
admin_reply_ci;
@@ -565,7 +569,7 @@ int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc,
WRITE_ONCE(op_req_q->ci, le16_to_cpu(reply_desc->request_queue_ci));
mpi3mr_process_op_reply_desc(mrioc, reply_desc, &reply_dma,
reply_qidx);
- atomic_dec(&op_reply_q->pend_ios);
+
if (reply_dma)
mpi3mr_repost_reply_buf(mrioc, reply_dma);
num_op_reply++;
@@ -2925,6 +2929,7 @@ static int mpi3mr_setup_admin_qpair(struct mpi3mr_ioc *mrioc)
mrioc->admin_reply_ci = 0;
mrioc->admin_reply_ephase = 1;
atomic_set(&mrioc->admin_reply_q_in_use, 0);
+ atomic_set(&mrioc->admin_pend_isr, 0);
if (!mrioc->admin_req_base) {
mrioc->admin_req_base = dma_alloc_coherent(&mrioc->pdev->dev,
@@ -4653,6 +4658,7 @@ void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc)
if (mrioc->admin_reply_base)
memset(mrioc->admin_reply_base, 0, mrioc->admin_reply_q_sz);
atomic_set(&mrioc->admin_reply_q_in_use, 0);
+ atomic_set(&mrioc->admin_pend_isr, 0);
if (mrioc->init_cmds.reply) {
memset(mrioc->init_cmds.reply, 0, sizeof(*mrioc->init_cmds.reply));
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 0d29470e86b0..1b43013d72c0 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1253,8 +1253,12 @@ EXPORT_SYMBOL_GPL(scsi_alloc_request);
*/
static void scsi_cleanup_rq(struct request *rq)
{
+ struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
+
+ cmd->flags = 0;
+
if (rq->rq_flags & RQF_DONTPREP) {
- scsi_mq_uninit_cmd(blk_mq_rq_to_pdu(rq));
+ scsi_mq_uninit_cmd(cmd);
rq->rq_flags &= ~RQF_DONTPREP;
}
}
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index 832d6e9009eb..c93d80a4d734 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -1695,9 +1695,12 @@ static int spi_imx_transfer_one(struct spi_controller *controller,
struct spi_device *spi,
struct spi_transfer *transfer)
{
+ int ret;
struct spi_imx_data *spi_imx = spi_controller_get_devdata(spi->controller);
- spi_imx_setupxfer(spi, transfer);
+ ret = spi_imx_setupxfer(spi, transfer);
+ if (ret < 0)
+ return ret;
transfer->effective_speed_hz = spi_imx->spi_bus_clk;
/* flush rxfifo before transfer */
diff --git a/drivers/spi/spi-sun4i.c b/drivers/spi/spi-sun4i.c
index fcbe864c9b7d..f89826d7dc49 100644
--- a/drivers/spi/spi-sun4i.c
+++ b/drivers/spi/spi-sun4i.c
@@ -462,6 +462,7 @@ static int sun4i_spi_probe(struct platform_device *pdev)
sspi->host = host;
host->max_speed_hz = 100 * 1000 * 1000;
host->min_speed_hz = 3 * 1000;
+ host->use_gpio_descriptors = true;
host->set_cs = sun4i_spi_set_cs;
host->transfer_one = sun4i_spi_transfer_one;
host->num_chipselect = 4;
diff --git a/drivers/spi/spi-tegra210-quad.c b/drivers/spi/spi-tegra210-quad.c
index 08e49a876894..64e1b2f8a000 100644
--- a/drivers/spi/spi-tegra210-quad.c
+++ b/drivers/spi/spi-tegra210-quad.c
@@ -1117,9 +1117,9 @@ static int tegra_qspi_combined_seq_xfer(struct tegra_qspi *tqspi,
(&tqspi->xfer_completion,
QSPI_DMA_TIMEOUT);
- if (WARN_ON(ret == 0)) {
- dev_err(tqspi->dev, "QSPI Transfer failed with timeout: %d\n",
- ret);
+ if (WARN_ON_ONCE(ret == 0)) {
+ dev_err_ratelimited(tqspi->dev,
+ "QSPI Transfer failed with timeout\n");
if (tqspi->is_curr_dma_xfer &&
(tqspi->cur_direction & DATA_DIR_TX))
dmaengine_terminate_all
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 1244ef3aa86c..620ba6e0ab07 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -4263,8 +4263,8 @@ int iscsit_close_connection(
spin_unlock(&iscsit_global->ts_bitmap_lock);
iscsit_stop_timers_for_cmds(conn);
- iscsit_stop_nopin_response_timer(conn);
iscsit_stop_nopin_timer(conn);
+ iscsit_stop_nopin_response_timer(conn);
if (conn->conn_transport->iscsit_wait_conn)
conn->conn_transport->iscsit_wait_conn(conn);
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
index a55aaa8cef42..2097aae39946 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
@@ -485,7 +485,7 @@ static const struct pci_device_id proc_thermal_pci_ids[] = {
{ PCI_DEVICE_DATA(INTEL, ADL_THERMAL, PROC_THERMAL_FEATURE_RAPL |
PROC_THERMAL_FEATURE_FIVR | PROC_THERMAL_FEATURE_DVFS | PROC_THERMAL_FEATURE_WT_REQ) },
{ PCI_DEVICE_DATA(INTEL, LNLM_THERMAL, PROC_THERMAL_FEATURE_MSI_SUPPORT |
- PROC_THERMAL_FEATURE_RAPL | PROC_THERMAL_FEATURE_DLVR |
+ PROC_THERMAL_FEATURE_RAPL | PROC_THERMAL_FEATURE_DLVR | PROC_THERMAL_FEATURE_DVFS |
PROC_THERMAL_FEATURE_WT_HINT | PROC_THERMAL_FEATURE_POWER_FLOOR) },
{ PCI_DEVICE_DATA(INTEL, MTLP_THERMAL, PROC_THERMAL_FEATURE_RAPL |
PROC_THERMAL_FEATURE_FIVR | PROC_THERMAL_FEATURE_DVFS | PROC_THERMAL_FEATURE_DLVR |
@@ -495,8 +495,9 @@ static const struct pci_device_id proc_thermal_pci_ids[] = {
{ PCI_DEVICE_DATA(INTEL, RPL_THERMAL, PROC_THERMAL_FEATURE_RAPL |
PROC_THERMAL_FEATURE_FIVR | PROC_THERMAL_FEATURE_DVFS | PROC_THERMAL_FEATURE_WT_REQ) },
{ PCI_DEVICE_DATA(INTEL, PTL_THERMAL, PROC_THERMAL_FEATURE_RAPL |
- PROC_THERMAL_FEATURE_DLVR | PROC_THERMAL_FEATURE_MSI_SUPPORT |
- PROC_THERMAL_FEATURE_WT_HINT | PROC_THERMAL_FEATURE_POWER_FLOOR) },
+ PROC_THERMAL_FEATURE_DLVR | PROC_THERMAL_FEATURE_DVFS |
+ PROC_THERMAL_FEATURE_MSI_SUPPORT | PROC_THERMAL_FEATURE_WT_HINT |
+ PROC_THERMAL_FEATURE_POWER_FLOOR) },
{ },
};
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c
index dad63f2d5f90..3a028b78d9af 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c
@@ -166,15 +166,18 @@ static const struct mmio_reg adl_dvfs_mmio_regs[] = {
{ 0, 0x5A40, 1, 0x1, 0}, /* rfi_disable */
};
+static const struct mapping_table *dlvr_mapping;
+static const struct mmio_reg *dlvr_mmio_regs_table;
+
#define RFIM_SHOW(suffix, table)\
static ssize_t suffix##_show(struct device *dev,\
struct device_attribute *attr,\
char *buf)\
{\
- const struct mapping_table *mapping = NULL;\
+ const struct mmio_reg *mmio_regs = dlvr_mmio_regs_table;\
+ const struct mapping_table *mapping = dlvr_mapping;\
struct proc_thermal_device *proc_priv;\
struct pci_dev *pdev = to_pci_dev(dev);\
- const struct mmio_reg *mmio_regs;\
const char **match_strs;\
int ret, err;\
u32 reg_val;\
@@ -186,12 +189,6 @@ static ssize_t suffix##_show(struct device *dev,\
mmio_regs = adl_dvfs_mmio_regs;\
} else if (table == 2) { \
match_strs = (const char **)dlvr_strings;\
- if (pdev->device == PCI_DEVICE_ID_INTEL_LNLM_THERMAL) {\
- mmio_regs = lnl_dlvr_mmio_regs;\
- mapping = lnl_dlvr_mapping;\
- } else {\
- mmio_regs = dlvr_mmio_regs;\
- } \
} else {\
match_strs = (const char **)fivr_strings;\
mmio_regs = tgl_fivr_mmio_regs;\
@@ -214,12 +211,12 @@ static ssize_t suffix##_store(struct device *dev,\
struct device_attribute *attr,\
const char *buf, size_t count)\
{\
- const struct mapping_table *mapping = NULL;\
+ const struct mmio_reg *mmio_regs = dlvr_mmio_regs_table;\
+ const struct mapping_table *mapping = dlvr_mapping;\
struct proc_thermal_device *proc_priv;\
struct pci_dev *pdev = to_pci_dev(dev);\
unsigned int input;\
const char **match_strs;\
- const struct mmio_reg *mmio_regs;\
int ret, err;\
u32 reg_val;\
u32 mask;\
@@ -230,12 +227,6 @@ static ssize_t suffix##_store(struct device *dev,\
mmio_regs = adl_dvfs_mmio_regs;\
} else if (table == 2) { \
match_strs = (const char **)dlvr_strings;\
- if (pdev->device == PCI_DEVICE_ID_INTEL_LNLM_THERMAL) {\
- mmio_regs = lnl_dlvr_mmio_regs;\
- mapping = lnl_dlvr_mapping;\
- } else {\
- mmio_regs = dlvr_mmio_regs;\
- } \
} else {\
match_strs = (const char **)fivr_strings;\
mmio_regs = tgl_fivr_mmio_regs;\
@@ -448,6 +439,16 @@ int proc_thermal_rfim_add(struct pci_dev *pdev, struct proc_thermal_device *proc
}
if (proc_priv->mmio_feature_mask & PROC_THERMAL_FEATURE_DLVR) {
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_INTEL_LNLM_THERMAL:
+ case PCI_DEVICE_ID_INTEL_PTL_THERMAL:
+ dlvr_mmio_regs_table = lnl_dlvr_mmio_regs;
+ dlvr_mapping = lnl_dlvr_mapping;
+ break;
+ default:
+ dlvr_mmio_regs_table = dlvr_mmio_regs;
+ break;
+ }
ret = sysfs_create_group(&pdev->dev.kobj, &dlvr_attribute_group);
if (ret)
return ret;
diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c
index 240ce135bbfb..f1294c29f484 100644
--- a/drivers/ufs/core/ufs-mcq.c
+++ b/drivers/ufs/core/ufs-mcq.c
@@ -677,13 +677,6 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
unsigned long flags;
int err;
- if (!ufshcd_cmd_inflight(lrbp->cmd)) {
- dev_err(hba->dev,
- "%s: skip abort. cmd at tag %d already completed.\n",
- __func__, tag);
- return FAILED;
- }
-
/* Skip task abort in case previous aborts failed and report failure */
if (lrbp->req_abort_skip) {
dev_err(hba->dev, "%s: skip abort. tag %d failed earlier\n",
@@ -692,6 +685,11 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
}
hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
+ if (!hwq) {
+ dev_err(hba->dev, "%s: skip abort. cmd at tag %d already completed.\n",
+ __func__, tag);
+ return FAILED;
+ }
if (ufshcd_mcq_sqe_search(hba, hwq, tag)) {
/*
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index 44156041d88f..5cb6132b8147 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -278,6 +278,7 @@ static const struct ufs_dev_quirk ufs_fixups[] = {
.model = UFS_ANY_MODEL,
.quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE |
+ UFS_DEVICE_QUIRK_PA_HIBER8TIME |
UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS },
{ .wmanufacturerid = UFS_VENDOR_SKHYNIX,
.model = UFS_ANY_MODEL,
@@ -5677,6 +5678,8 @@ static void ufshcd_mcq_compl_pending_transfer(struct ufs_hba *hba,
continue;
hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
+ if (!hwq)
+ continue;
if (force_compl) {
ufshcd_mcq_compl_all_cqes_lock(hba, hwq);
@@ -8470,6 +8473,31 @@ out:
return ret;
}
+/**
+ * ufshcd_quirk_override_pa_h8time - Ensures proper adjustment of PA_HIBERN8TIME.
+ * @hba: per-adapter instance
+ *
+ * Some UFS devices require specific adjustments to the PA_HIBERN8TIME parameter
+ * to ensure proper hibernation timing. This function retrieves the current
+ * PA_HIBERN8TIME value and increments it by 100us.
+ */
+static void ufshcd_quirk_override_pa_h8time(struct ufs_hba *hba)
+{
+ u32 pa_h8time;
+ int ret;
+
+ ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_HIBERN8TIME), &pa_h8time);
+ if (ret) {
+ dev_err(hba->dev, "Failed to get PA_HIBERN8TIME: %d\n", ret);
+ return;
+ }
+
+ /* Increment by 1 to increase hibernation time by 100 µs */
+ ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), pa_h8time + 1);
+ if (ret)
+ dev_err(hba->dev, "Failed updating PA_HIBERN8TIME: %d\n", ret);
+}
+
static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
{
ufshcd_vops_apply_dev_quirks(hba);
@@ -8480,6 +8508,9 @@ static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
ufshcd_quirk_tune_host_pa_tactivate(hba);
+
+ if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_HIBER8TIME)
+ ufshcd_quirk_override_pa_h8time(hba);
}
static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
index 1b37449fbffc..c0761ccc1381 100644
--- a/drivers/ufs/host/ufs-qcom.c
+++ b/drivers/ufs/host/ufs-qcom.c
@@ -33,6 +33,10 @@
((((c) >> 16) & MCQ_QCFGPTR_MASK) * MCQ_QCFGPTR_UNIT)
#define MCQ_QCFG_SIZE 0x40
+/* De-emphasis for gear-5 */
+#define DEEMPHASIS_3_5_dB 0x04
+#define NO_DEEMPHASIS 0x0
+
enum {
TSTBUS_UAWM,
TSTBUS_UARM,
@@ -795,6 +799,23 @@ static int ufs_qcom_icc_update_bw(struct ufs_qcom_host *host)
return ufs_qcom_icc_set_bw(host, bw_table.mem_bw, bw_table.cfg_bw);
}
+static void ufs_qcom_set_tx_hs_equalizer(struct ufs_hba *hba, u32 gear, u32 tx_lanes)
+{
+ u32 equalizer_val;
+ int ret, i;
+
+ /* Determine the equalizer value based on the gear */
+ equalizer_val = (gear == 5) ? DEEMPHASIS_3_5_dB : NO_DEEMPHASIS;
+
+ for (i = 0; i < tx_lanes; i++) {
+ ret = ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_HS_EQUALIZER, i),
+ equalizer_val);
+ if (ret)
+ dev_err(hba->dev, "%s: failed equalizer lane %d\n",
+ __func__, i);
+ }
+}
+
static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
enum ufs_notify_change_status status,
const struct ufs_pa_layer_attr *dev_max_params,
@@ -846,6 +867,11 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
dev_req_params->gear_tx,
PA_INITIAL_ADAPT);
}
+
+ if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TX_DEEMPHASIS_TUNING)
+ ufs_qcom_set_tx_hs_equalizer(hba,
+ dev_req_params->gear_tx, dev_req_params->lane_tx);
+
break;
case POST_CHANGE:
if (ufs_qcom_cfg_timers(hba, false)) {
@@ -893,6 +919,16 @@ static int ufs_qcom_quirk_host_pa_saveconfigtime(struct ufs_hba *hba)
(pa_vs_config_reg1 | (1 << 12)));
}
+static void ufs_qcom_override_pa_tx_hsg1_sync_len(struct ufs_hba *hba)
+{
+ int err;
+
+ err = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TX_HSG1_SYNC_LENGTH),
+ PA_TX_HSG1_SYNC_LENGTH_VAL);
+ if (err)
+ dev_err(hba->dev, "Failed (%d) set PA_TX_HSG1_SYNC_LENGTH\n", err);
+}
+
static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba)
{
int err = 0;
@@ -900,6 +936,9 @@ static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba)
if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME)
err = ufs_qcom_quirk_host_pa_saveconfigtime(hba);
+ if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TX_HSG1_SYNC_LENGTH)
+ ufs_qcom_override_pa_tx_hsg1_sync_len(hba);
+
return err;
}
@@ -914,6 +953,10 @@ static struct ufs_dev_quirk ufs_qcom_dev_fixups[] = {
{ .wmanufacturerid = UFS_VENDOR_WDC,
.model = UFS_ANY_MODEL,
.quirk = UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE },
+ { .wmanufacturerid = UFS_VENDOR_SAMSUNG,
+ .model = UFS_ANY_MODEL,
+ .quirk = UFS_DEVICE_QUIRK_PA_TX_HSG1_SYNC_LENGTH |
+ UFS_DEVICE_QUIRK_PA_TX_DEEMPHASIS_TUNING },
{}
};
diff --git a/drivers/ufs/host/ufs-qcom.h b/drivers/ufs/host/ufs-qcom.h
index d0e6ec9128e7..05d4cb569c50 100644
--- a/drivers/ufs/host/ufs-qcom.h
+++ b/drivers/ufs/host/ufs-qcom.h
@@ -122,8 +122,11 @@ enum {
TMRLUT_HW_CGC_EN | OCSC_HW_CGC_EN)
/* QUniPro Vendor specific attributes */
+#define PA_TX_HSG1_SYNC_LENGTH 0x1552
#define PA_VS_CONFIG_REG1 0x9000
#define DME_VS_CORE_CLK_CTRL 0xD002
+#define TX_HS_EQUALIZER 0x0037
+
/* bit and mask definitions for DME_VS_CORE_CLK_CTRL attribute */
#define CLK_1US_CYCLES_MASK_V4 GENMASK(27, 16)
#define CLK_1US_CYCLES_MASK GENMASK(7, 0)
@@ -141,6 +144,21 @@ enum {
#define UNIPRO_CORE_CLK_FREQ_201_5_MHZ 202
#define UNIPRO_CORE_CLK_FREQ_403_MHZ 403
+/* TX_HSG1_SYNC_LENGTH attr value */
+#define PA_TX_HSG1_SYNC_LENGTH_VAL 0x4A
+
+/*
+ * Some ufs device vendors need a different TSync length.
+ * Enable this quirk to give an additional TX_HS_SYNC_LENGTH.
+ */
+#define UFS_DEVICE_QUIRK_PA_TX_HSG1_SYNC_LENGTH BIT(16)
+
+/*
+ * Some ufs device vendors need a different Deemphasis setting.
+ * Enable this quirk to tune TX Deemphasis parameters.
+ */
+#define UFS_DEVICE_QUIRK_PA_TX_DEEMPHASIS_TUNING BIT(17)
+
/* ICE allocator type to share AES engines among TX stream and RX stream */
#define ICE_ALLOCATOR_TYPE 2
diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
index 14437396d721..8f02f236b5b4 100644
--- a/drivers/vfio/pci/vfio_pci_config.c
+++ b/drivers/vfio/pci/vfio_pci_config.c
@@ -1815,7 +1815,7 @@ int vfio_config_init(struct vfio_pci_core_device *vdev)
}
if (!IS_ENABLED(CONFIG_VFIO_PCI_INTX) || vdev->nointx ||
- vdev->pdev->irq == IRQ_NOTCONNECTED)
+ !vdev->pdev->irq || vdev->pdev->irq == IRQ_NOTCONNECTED)
vconfig[PCI_INTERRUPT_PIN] = 0;
ret = vfio_cap_init(vdev);
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index f6f5a7ac7894..26bcf3a7f70c 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -627,6 +627,9 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
int ret;
llnode = llist_del_all(&svq->completion_list);
+
+ mutex_lock(&svq->vq.mutex);
+
llist_for_each_entry_safe(cmd, t, llnode, tvc_completion_list) {
se_cmd = &cmd->tvc_se_cmd;
@@ -660,6 +663,8 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
vhost_scsi_release_cmd_res(se_cmd);
}
+ mutex_unlock(&svq->vq.mutex);
+
if (signal)
vhost_signal(&svq->vs->dev, &svq->vq);
}
@@ -994,39 +999,66 @@ static void vhost_scsi_target_queue_cmd(struct vhost_scsi_nexus *nexus,
static void
vhost_scsi_send_status(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
- int head, unsigned int out, u8 status)
+ struct vhost_scsi_ctx *vc, u8 status)
{
- struct virtio_scsi_cmd_resp __user *resp;
struct virtio_scsi_cmd_resp rsp;
+ struct iov_iter iov_iter;
int ret;
memset(&rsp, 0, sizeof(rsp));
rsp.status = status;
- resp = vq->iov[out].iov_base;
- ret = __copy_to_user(resp, &rsp, sizeof(rsp));
- if (!ret)
- vhost_add_used_and_signal(&vs->dev, vq, head, 0);
+
+ iov_iter_init(&iov_iter, ITER_DEST, &vq->iov[vc->out], vc->in,
+ sizeof(rsp));
+
+ ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
+
+ if (likely(ret == sizeof(rsp)))
+ vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0);
else
pr_err("Faulted on virtio_scsi_cmd_resp\n");
}
+#define TYPE_IO_CMD 0
+#define TYPE_CTRL_TMF 1
+#define TYPE_CTRL_AN 2
+
static void
vhost_scsi_send_bad_target(struct vhost_scsi *vs,
struct vhost_virtqueue *vq,
- int head, unsigned out)
+ struct vhost_scsi_ctx *vc, int type)
{
- struct virtio_scsi_cmd_resp __user *resp;
- struct virtio_scsi_cmd_resp rsp;
+ union {
+ struct virtio_scsi_cmd_resp cmd;
+ struct virtio_scsi_ctrl_tmf_resp tmf;
+ struct virtio_scsi_ctrl_an_resp an;
+ } rsp;
+ struct iov_iter iov_iter;
+ size_t rsp_size;
int ret;
memset(&rsp, 0, sizeof(rsp));
- rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
- resp = vq->iov[out].iov_base;
- ret = __copy_to_user(resp, &rsp, sizeof(rsp));
- if (!ret)
- vhost_add_used_and_signal(&vs->dev, vq, head, 0);
+
+ if (type == TYPE_IO_CMD) {
+ rsp_size = sizeof(struct virtio_scsi_cmd_resp);
+ rsp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET;
+ } else if (type == TYPE_CTRL_TMF) {
+ rsp_size = sizeof(struct virtio_scsi_ctrl_tmf_resp);
+ rsp.tmf.response = VIRTIO_SCSI_S_BAD_TARGET;
+ } else {
+ rsp_size = sizeof(struct virtio_scsi_ctrl_an_resp);
+ rsp.an.response = VIRTIO_SCSI_S_BAD_TARGET;
+ }
+
+ iov_iter_init(&iov_iter, ITER_DEST, &vq->iov[vc->out], vc->in,
+ rsp_size);
+
+ ret = copy_to_iter(&rsp, rsp_size, &iov_iter);
+
+ if (likely(ret == rsp_size))
+ vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0);
else
- pr_err("Faulted on virtio_scsi_cmd_resp\n");
+ pr_err("Faulted on virtio scsi type=%d\n", type);
}
static int
@@ -1390,9 +1422,9 @@ err:
if (ret == -ENXIO)
break;
else if (ret == -EIO)
- vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out);
+ vhost_scsi_send_bad_target(vs, vq, &vc, TYPE_IO_CMD);
else if (ret == -ENOMEM)
- vhost_scsi_send_status(vs, vq, vc.head, vc.out,
+ vhost_scsi_send_status(vs, vq, &vc,
SAM_STAT_TASK_SET_FULL);
} while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
out:
@@ -1432,8 +1464,11 @@ static void vhost_scsi_tmf_resp_work(struct vhost_work *work)
else
resp_code = VIRTIO_SCSI_S_FUNCTION_REJECTED;
+ mutex_lock(&tmf->svq->vq.mutex);
vhost_scsi_send_tmf_resp(tmf->vhost, &tmf->svq->vq, tmf->in_iovs,
tmf->vq_desc, &tmf->resp_iov, resp_code);
+ mutex_unlock(&tmf->svq->vq.mutex);
+
vhost_scsi_release_tmf_res(tmf);
}
@@ -1623,7 +1658,10 @@ err:
if (ret == -ENXIO)
break;
else if (ret == -EIO)
- vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out);
+ vhost_scsi_send_bad_target(vs, vq, &vc,
+ v_req.type == VIRTIO_SCSI_T_TMF ?
+ TYPE_CTRL_TMF :
+ TYPE_CTRL_AN);
} while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
out:
mutex_unlock(&vq->mutex);
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index 150753c3b578..95d5d7993e5b 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -407,6 +407,12 @@ static void virtio_dev_shutdown(struct device *_d)
if (!drv)
return;
+ /* If the driver has its own shutdown method, use that. */
+ if (drv->shutdown) {
+ drv->shutdown(dev);
+ return;
+ }
+
/*
* Some devices get wedged if you kick them after they are
* reset. Mark all vqs as broken to make sure we don't.
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index 5eaade757860..d50fe030d825 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -247,7 +247,7 @@ virtio_pci_admin_cmd_dev_parts_objects_enable(struct virtio_device *virtio_dev)
sg_init_one(&data_sg, get_data, sizeof(*get_data));
sg_init_one(&result_sg, result, sizeof(*result));
cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_DEVICE_CAP_GET);
- cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV);
+ cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SELF);
cmd.data_sg = &data_sg;
cmd.result_sg = &result_sg;
ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd);
@@ -305,7 +305,7 @@ static void virtio_pci_admin_cmd_cap_init(struct virtio_device *virtio_dev)
sg_init_one(&result_sg, data, sizeof(*data));
cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_CAP_ID_LIST_QUERY);
- cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV);
+ cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SELF);
cmd.result_sg = &result_sg;
ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd);
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index fdd2d2b07b5a..b784aab66867 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -2650,7 +2650,7 @@ bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
struct vring_virtqueue *vq = to_vvq(_vq);
if (vq->event_triggered)
- vq->event_triggered = false;
+ data_race(vq->event_triggered = false);
return vq->packed_ring ? virtqueue_enable_cb_delayed_packed(_vq) :
virtqueue_enable_cb_delayed_split(_vq);
diff --git a/fs/Kconfig b/fs/Kconfig
index c718b2e2de0e..5b4847bd2fbb 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -368,6 +368,7 @@ config GRACE_PERIOD
config LOCKD
tristate
depends on FILE_LOCKING
+ select CRC32
select GRACE_PERIOD
config LOCKD_V4
diff --git a/fs/bcachefs/bcachefs.h b/fs/bcachefs/bcachefs.h
index 5cb0fc384ac0..75f7408da173 100644
--- a/fs/bcachefs/bcachefs.h
+++ b/fs/bcachefs/bcachefs.h
@@ -788,6 +788,8 @@ struct bch_fs {
unsigned long errors_silent[BITS_TO_LONGS(BCH_FSCK_ERR_MAX)];
u64 btrees_lost_data;
} sb;
+ DARRAY(enum bcachefs_metadata_version)
+ incompat_versions_requested;
#ifdef CONFIG_UNICODE
struct unicode_map *cf_encoding;
diff --git a/fs/bcachefs/btree_update_interior.c b/fs/bcachefs/btree_update_interior.c
index 55fbeeb8eaaa..44b5fe430370 100644
--- a/fs/bcachefs/btree_update_interior.c
+++ b/fs/bcachefs/btree_update_interior.c
@@ -1221,7 +1221,7 @@ bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path,
ret = bch2_disk_reservation_get(c, &as->disk_res,
(nr_nodes[0] + nr_nodes[1]) * btree_sectors(c),
- c->opts.metadata_replicas,
+ READ_ONCE(c->opts.metadata_replicas),
disk_res_flags);
if (ret)
goto err;
diff --git a/fs/bcachefs/buckets.c b/fs/bcachefs/buckets.c
index fea61e60a9ee..4ef261e8db4f 100644
--- a/fs/bcachefs/buckets.c
+++ b/fs/bcachefs/buckets.c
@@ -37,7 +37,8 @@ void bch2_dev_usage_read_fast(struct bch_dev *ca, struct bch_dev_usage *usage)
void bch2_dev_usage_full_read_fast(struct bch_dev *ca, struct bch_dev_usage_full *usage)
{
memset(usage, 0, sizeof(*usage));
- acc_u64s_percpu((u64 *) usage, (u64 __percpu *) ca->usage, dev_usage_u64s());
+ acc_u64s_percpu((u64 *) usage, (u64 __percpu *) ca->usage,
+ sizeof(struct bch_dev_usage_full) / sizeof(u64));
}
static u64 reserve_factor(u64 r)
diff --git a/fs/bcachefs/buckets.h b/fs/bcachefs/buckets.h
index 1c38b165f48b..8d75b27a1418 100644
--- a/fs/bcachefs/buckets.h
+++ b/fs/bcachefs/buckets.h
@@ -242,11 +242,6 @@ static inline u64 dev_buckets_available(struct bch_dev *ca,
/* Filesystem usage: */
-static inline unsigned dev_usage_u64s(void)
-{
- return sizeof(struct bch_dev_usage) / sizeof(u64);
-}
-
struct bch_fs_usage_short
bch2_fs_usage_read_short(struct bch_fs *);
diff --git a/fs/bcachefs/errcode.h b/fs/bcachefs/errcode.h
index c8696f01eb14..a615e4852ded 100644
--- a/fs/bcachefs/errcode.h
+++ b/fs/bcachefs/errcode.h
@@ -287,7 +287,7 @@
x(EIO, mark_stripe) \
x(EIO, stripe_reconstruct) \
x(EIO, key_type_error) \
- x(EIO, extent_poisened) \
+ x(EIO, extent_poisoned) \
x(EIO, missing_indirect_extent) \
x(EIO, invalidate_stripe_to_dev) \
x(EIO, no_encryption_key) \
diff --git a/fs/bcachefs/extents.c b/fs/bcachefs/extents.c
index ae7c7a177e10..dca2b8425cc0 100644
--- a/fs/bcachefs/extents.c
+++ b/fs/bcachefs/extents.c
@@ -139,7 +139,7 @@ int bch2_bkey_pick_read_device(struct bch_fs *c, struct bkey_s_c k,
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
if (bch2_bkey_extent_ptrs_flags(ptrs) & BIT_ULL(BCH_EXTENT_FLAG_poisoned))
- return -BCH_ERR_extent_poisened;
+ return -BCH_ERR_extent_poisoned;
rcu_read_lock();
const union bch_extent_entry *entry;
diff --git a/fs/bcachefs/fs-ioctl.c b/fs/bcachefs/fs-ioctl.c
index c1553e44e049..14886e1d4d6d 100644
--- a/fs/bcachefs/fs-ioctl.c
+++ b/fs/bcachefs/fs-ioctl.c
@@ -69,7 +69,7 @@ static int bch2_inode_flags_set(struct btree_trans *trans,
if (ret < 0)
return ret;
- ret = bch2_request_incompat_feature(c,bcachefs_metadata_version_casefolding);
+ ret = bch2_request_incompat_feature(c, bcachefs_metadata_version_casefolding);
if (ret)
return ret;
diff --git a/fs/bcachefs/fsck.c b/fs/bcachefs/fsck.c
index 18308f3d64a1..7b25cedd3e40 100644
--- a/fs/bcachefs/fsck.c
+++ b/fs/bcachefs/fsck.c
@@ -321,6 +321,31 @@ static inline bool inode_should_reattach(struct bch_inode_unpacked *inode)
inode->bi_subvol == BCACHEFS_ROOT_SUBVOL)
return false;
+ /*
+ * Subvolume roots are special: older versions of subvolume roots may be
+ * disconnected, it's only the newest version that matters.
+ *
+ * We only keep a single dirent pointing to a subvolume root, i.e.
+ * older versions of snapshots will not have a different dirent pointing
+ * to the same subvolume root.
+ *
+ * This is because dirents that point to subvolumes are only visible in
+ * the parent subvolume - versioning is not needed - and keeping them
+ * around would break fsck, because when we're crossing subvolumes we
+ * don't have a consistent snapshot ID to do check the inode <-> dirent
+ * relationships.
+ *
+ * Thus, a subvolume root that's been renamed after a snapshot will have
+ * a disconnected older version - that's expected.
+ *
+ * Note that taking a snapshot always updates the root inode (to update
+ * the dirent backpointer), so a subvolume root inode with
+ * BCH_INODE_has_child_snapshot is never visible.
+ */
+ if (inode->bi_subvol &&
+ (inode->bi_flags & BCH_INODE_has_child_snapshot))
+ return false;
+
return !inode->bi_dir && !(inode->bi_flags & BCH_INODE_unlinked);
}
@@ -1007,6 +1032,23 @@ static int check_inode_dirent_inode(struct btree_trans *trans,
if (ret && !bch2_err_matches(ret, ENOENT))
return ret;
+ if ((ret || dirent_points_to_inode_nowarn(d, inode)) &&
+ inode->bi_subvol &&
+ (inode->bi_flags & BCH_INODE_has_child_snapshot)) {
+ /* Older version of a renamed subvolume root: we won't have a
+ * correct dirent for it. That's expected, see
+ * inode_should_reattach().
+ *
+ * We don't clear the backpointer field when doing the rename
+ * because there might be arbitrarily many versions in older
+ * snapshots.
+ */
+ inode->bi_dir = 0;
+ inode->bi_dir_offset = 0;
+ *write_inode = true;
+ goto out;
+ }
+
if (fsck_err_on(ret,
trans, inode_points_to_missing_dirent,
"inode points to missing dirent\n%s",
@@ -1027,7 +1069,7 @@ static int check_inode_dirent_inode(struct btree_trans *trans,
inode->bi_dir_offset = 0;
*write_inode = true;
}
-
+out:
ret = 0;
fsck_err:
bch2_trans_iter_exit(trans, &dirent_iter);
diff --git a/fs/bcachefs/io_read.c b/fs/bcachefs/io_read.c
index fd627c8d1053..def4a26a3b45 100644
--- a/fs/bcachefs/io_read.c
+++ b/fs/bcachefs/io_read.c
@@ -487,6 +487,8 @@ static void bch2_rbio_retry(struct work_struct *work)
.inum = rbio->read_pos.inode,
};
struct bch_io_failures failed = { .nr = 0 };
+ int orig_error = rbio->ret;
+
struct btree_trans *trans = bch2_trans_get(c);
trace_io_read_retry(&rbio->bio);
@@ -519,7 +521,9 @@ static void bch2_rbio_retry(struct work_struct *work)
if (ret) {
rbio->ret = ret;
rbio->bio.bi_status = BLK_STS_IOERR;
- } else {
+ } else if (orig_error != -BCH_ERR_data_read_retry_csum_err_maybe_userspace &&
+ orig_error != -BCH_ERR_data_read_ptr_stale_race &&
+ !failed.nr) {
struct printbuf buf = PRINTBUF;
lockrestart_do(trans,
@@ -1345,14 +1349,16 @@ err:
bch2_trans_iter_exit(trans, &iter);
- if (ret) {
- struct printbuf buf = PRINTBUF;
- lockrestart_do(trans,
- bch2_inum_offset_err_msg_trans(trans, &buf, inum,
- bvec_iter.bi_sector << 9));
- prt_printf(&buf, "read error: %s", bch2_err_str(ret));
- bch_err_ratelimited(c, "%s", buf.buf);
- printbuf_exit(&buf);
+ if (unlikely(ret)) {
+ if (ret != -BCH_ERR_extent_poisoned) {
+ struct printbuf buf = PRINTBUF;
+ lockrestart_do(trans,
+ bch2_inum_offset_err_msg_trans(trans, &buf, inum,
+ bvec_iter.bi_sector << 9));
+ prt_printf(&buf, "data read error: %s", bch2_err_str(ret));
+ bch_err_ratelimited(c, "%s", buf.buf);
+ printbuf_exit(&buf);
+ }
rbio->bio.bi_status = BLK_STS_IOERR;
rbio->ret = ret;
diff --git a/fs/bcachefs/recovery.c b/fs/bcachefs/recovery.c
index d2b07f602da9..606d684e6f23 100644
--- a/fs/bcachefs/recovery.c
+++ b/fs/bcachefs/recovery.c
@@ -1125,7 +1125,10 @@ int bch2_fs_initialize(struct bch_fs *c)
* journal_res_get() will crash if called before this has
* set up the journal.pin FIFO and journal.cur pointer:
*/
- bch2_fs_journal_start(&c->journal, 1);
+ ret = bch2_fs_journal_start(&c->journal, 1);
+ if (ret)
+ goto err;
+
set_bit(BCH_FS_accounting_replay_done, &c->flags);
bch2_journal_set_replay_done(&c->journal);
diff --git a/fs/bcachefs/sb-errors_format.h b/fs/bcachefs/sb-errors_format.h
index 5d43e3504386..dc53d25c7cbb 100644
--- a/fs/bcachefs/sb-errors_format.h
+++ b/fs/bcachefs/sb-errors_format.h
@@ -290,8 +290,8 @@ enum bch_fsck_flags {
x(btree_node_bkey_bad_u64s, 260, 0) \
x(btree_node_topology_empty_interior_node, 261, 0) \
x(btree_ptr_v2_min_key_bad, 262, 0) \
- x(btree_root_unreadable_and_scan_found_nothing, 263, 0) \
- x(snapshot_node_missing, 264, 0) \
+ x(btree_root_unreadable_and_scan_found_nothing, 263, FSCK_AUTOFIX) \
+ x(snapshot_node_missing, 264, FSCK_AUTOFIX) \
x(dup_backpointer_to_bad_csum_extent, 265, 0) \
x(btree_bitmap_not_marked, 266, FSCK_AUTOFIX) \
x(sb_clean_entry_overrun, 267, 0) \
diff --git a/fs/bcachefs/super-io.c b/fs/bcachefs/super-io.c
index e27422b6d9c6..25b6bce05c3c 100644
--- a/fs/bcachefs/super-io.c
+++ b/fs/bcachefs/super-io.c
@@ -73,14 +73,30 @@ int bch2_set_version_incompat(struct bch_fs *c, enum bcachefs_metadata_version v
? 0
: -BCH_ERR_may_not_use_incompat_feature;
+ mutex_lock(&c->sb_lock);
if (!ret) {
- mutex_lock(&c->sb_lock);
SET_BCH_SB_VERSION_INCOMPAT(c->disk_sb.sb,
max(BCH_SB_VERSION_INCOMPAT(c->disk_sb.sb), version));
bch2_write_super(c);
- mutex_unlock(&c->sb_lock);
+ } else {
+ darray_for_each(c->incompat_versions_requested, i)
+ if (version == *i)
+ goto out;
+
+ darray_push(&c->incompat_versions_requested, version);
+ struct printbuf buf = PRINTBUF;
+ prt_str(&buf, "requested incompat feature ");
+ bch2_version_to_text(&buf, version);
+ prt_str(&buf, " currently not enabled");
+ prt_printf(&buf, "\n set version_upgrade=incompat to enable");
+
+ bch_notice(c, "%s", buf.buf);
+ printbuf_exit(&buf);
}
+out:
+ mutex_unlock(&c->sb_lock);
+
return ret;
}
diff --git a/fs/bcachefs/super.c b/fs/bcachefs/super.c
index b79e80a435e0..e8a17ed1615d 100644
--- a/fs/bcachefs/super.c
+++ b/fs/bcachefs/super.c
@@ -589,6 +589,7 @@ static void __bch2_fs_free(struct bch_fs *c)
free_percpu(c->online_reserved);
}
+ darray_exit(&c->incompat_versions_requested);
darray_exit(&c->btree_roots_extra);
free_percpu(c->pcpu);
free_percpu(c->usage);
@@ -1013,6 +1014,11 @@ static void print_mount_opts(struct bch_fs *c)
bch2_opt_to_text(&p, c, c->disk_sb.sb, opt, v, OPT_SHOW_MOUNT_STYLE);
}
+ if (c->sb.version_incompat_allowed != c->sb.version) {
+ prt_printf(&p, "\n allowing incompatible features above ");
+ bch2_version_to_text(&p, c->sb.version_incompat_allowed);
+ }
+
bch_info(c, "%s", p.buf);
printbuf_exit(&p);
}
@@ -1757,7 +1763,8 @@ int bch2_dev_remove(struct bch_fs *c, struct bch_dev *ca, int flags)
up_write(&c->state_lock);
return 0;
err:
- if (ca->mi.state == BCH_MEMBER_STATE_rw &&
+ if (test_bit(BCH_FS_rw, &c->flags) &&
+ ca->mi.state == BCH_MEMBER_STATE_rw &&
!percpu_ref_is_zero(&ca->io_ref[READ]))
__bch2_dev_read_write(c, ca);
up_write(&c->state_lock);
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 262a707d8990..71b8a825c447 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -2104,15 +2104,20 @@ static void btrfs_punch_hole_lock_range(struct inode *inode,
* will always return true.
* So here we need to do extra page alignment for
* filemap_range_has_page().
+ *
+ * And do not decrease page_lockend right now, as it can be 0.
*/
const u64 page_lockstart = round_up(lockstart, PAGE_SIZE);
- const u64 page_lockend = round_down(lockend + 1, PAGE_SIZE) - 1;
+ const u64 page_lockend = round_down(lockend + 1, PAGE_SIZE);
while (1) {
truncate_pagecache_range(inode, lockstart, lockend);
lock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
cached_state);
+ /* The same page or adjacent pages. */
+ if (page_lockend <= page_lockstart)
+ break;
/*
* We can't have ordered extents in the range, nor dirty/writeback
* pages, because we have locked the inode's VFS lock in exclusive
@@ -2124,7 +2129,7 @@ static void btrfs_punch_hole_lock_range(struct inode *inode,
* we do, unlock the range and retry.
*/
if (!filemap_range_has_page(inode->i_mapping, page_lockstart,
- page_lockend))
+ page_lockend - 1))
break;
unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index f948f4f6431c..e17bcb034595 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -3803,7 +3803,7 @@ out:
if (ret) {
if (inode)
iput(&inode->vfs_inode);
- inode = ERR_PTR(ret);
+ return ERR_PTR(ret);
}
return &inode->vfs_inode;
}
diff --git a/fs/btrfs/subpage.c b/fs/btrfs/subpage.c
index 11dbd7be6a3b..c0a0b8b063d0 100644
--- a/fs/btrfs/subpage.c
+++ b/fs/btrfs/subpage.c
@@ -204,7 +204,7 @@ static void btrfs_subpage_assert(const struct btrfs_fs_info *fs_info,
btrfs_blocks_per_folio(fs_info, folio); \
\
btrfs_subpage_assert(fs_info, folio, start, len); \
- __start_bit = offset_in_page(start) >> fs_info->sectorsize_bits; \
+ __start_bit = offset_in_folio(folio, start) >> fs_info->sectorsize_bits; \
__start_bit += blocks_per_folio * btrfs_bitmap_nr_##name; \
__start_bit; \
})
@@ -666,7 +666,7 @@ IMPLEMENT_BTRFS_PAGE_OPS(checked, folio_set_checked, folio_clear_checked,
btrfs_blocks_per_folio(fs_info, folio); \
const struct btrfs_subpage *subpage = folio_get_private(folio); \
\
- ASSERT(blocks_per_folio < BITS_PER_LONG); \
+ ASSERT(blocks_per_folio <= BITS_PER_LONG); \
*dst = bitmap_read(subpage->bitmaps, \
blocks_per_folio * btrfs_bitmap_nr_##name, \
blocks_per_folio); \
diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
index 43979891f7c8..2b66a6130269 100644
--- a/fs/btrfs/tree-checker.c
+++ b/fs/btrfs/tree-checker.c
@@ -2235,7 +2235,7 @@ int btrfs_verify_level_key(struct extent_buffer *eb,
btrfs_err(fs_info,
"tree level mismatch detected, bytenr=%llu level expected=%u has=%u",
eb->start, check->level, found_level);
- return -EIO;
+ return -EUCLEAN;
}
if (!check->has_first_key)
diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
index fb8b8b29c169..4a3e02b49f29 100644
--- a/fs/btrfs/zoned.c
+++ b/fs/btrfs/zoned.c
@@ -1277,7 +1277,7 @@ struct zone_info {
static int btrfs_load_zone_info(struct btrfs_fs_info *fs_info, int zone_idx,
struct zone_info *info, unsigned long *active,
- struct btrfs_chunk_map *map)
+ struct btrfs_chunk_map *map, bool new)
{
struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
struct btrfs_device *device;
@@ -1307,6 +1307,8 @@ static int btrfs_load_zone_info(struct btrfs_fs_info *fs_info, int zone_idx,
return 0;
}
+ ASSERT(!new || btrfs_dev_is_empty_zone(device, info->physical));
+
/* This zone will be used for allocation, so mark this zone non-empty. */
btrfs_dev_clear_zone_empty(device, info->physical);
@@ -1319,6 +1321,18 @@ static int btrfs_load_zone_info(struct btrfs_fs_info *fs_info, int zone_idx,
* to determine the allocation offset within the zone.
*/
WARN_ON(!IS_ALIGNED(info->physical, fs_info->zone_size));
+
+ if (new) {
+ sector_t capacity;
+
+ capacity = bdev_zone_capacity(device->bdev, info->physical >> SECTOR_SHIFT);
+ up_read(&dev_replace->rwsem);
+ info->alloc_offset = 0;
+ info->capacity = capacity << SECTOR_SHIFT;
+
+ return 0;
+ }
+
nofs_flag = memalloc_nofs_save();
ret = btrfs_get_dev_zone(device, info->physical, &zone);
memalloc_nofs_restore(nofs_flag);
@@ -1588,7 +1602,7 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
}
for (i = 0; i < map->num_stripes; i++) {
- ret = btrfs_load_zone_info(fs_info, i, &zone_info[i], active, map);
+ ret = btrfs_load_zone_info(fs_info, i, &zone_info[i], active, map, new);
if (ret)
goto out;
@@ -1659,7 +1673,6 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
* stripe.
*/
cache->alloc_offset = cache->zone_capacity;
- ret = 0;
}
out:
diff --git a/fs/cachefiles/key.c b/fs/cachefiles/key.c
index bf935e25bdbe..b48525680e73 100644
--- a/fs/cachefiles/key.c
+++ b/fs/cachefiles/key.c
@@ -8,7 +8,7 @@
#include <linux/slab.h>
#include "internal.h"
-static const char cachefiles_charmap[64] =
+static const char cachefiles_charmap[64] __nonstring =
"0123456789" /* 0 - 9 */
"abcdefghijklmnopqrstuvwxyz" /* 10 - 35 */
"ABCDEFGHIJKLMNOPQRSTUVWXYZ" /* 36 - 61 */
diff --git a/fs/dax.c b/fs/dax.c
index af5045b0f476..676303419e9e 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -396,6 +396,7 @@ static inline unsigned long dax_folio_put(struct folio *folio)
order = folio_order(folio);
if (!order)
return 0;
+ folio_reset_order(folio);
for (i = 0; i < (1UL << order); i++) {
struct dev_pagemap *pgmap = page_pgmap(&folio->page);
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 100376863a44..4bc264b854c4 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -1996,6 +1996,14 @@ static int ep_try_send_events(struct eventpoll *ep,
return res;
}
+static int ep_schedule_timeout(ktime_t *to)
+{
+ if (to)
+ return ktime_after(*to, ktime_get());
+ else
+ return 1;
+}
+
/**
* ep_poll - Retrieves ready events, and delivers them to the caller-supplied
* event buffer.
@@ -2103,7 +2111,7 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
write_unlock_irq(&ep->lock);
- if (!eavail)
+ if (!eavail && ep_schedule_timeout(to))
timed_out = !schedule_hrtimeout_range(to, slack,
HRTIMER_MODE_ABS);
__set_current_state(TASK_RUNNING);
diff --git a/fs/namei.c b/fs/namei.c
index 8510ff53f12e..84a0e0b0111c 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1665,27 +1665,20 @@ static struct dentry *lookup_dcache(const struct qstr *name,
return dentry;
}
-/*
- * Parent directory has inode locked exclusive. This is one
- * and only case when ->lookup() gets called on non in-lookup
- * dentries - as the matter of fact, this only gets called
- * when directory is guaranteed to have no in-lookup children
- * at all.
- * Will return -ENOENT if name isn't found and LOOKUP_CREATE wasn't passed.
- * Will return -EEXIST if name is found and LOOKUP_EXCL was passed.
- */
-struct dentry *lookup_one_qstr_excl(const struct qstr *name,
- struct dentry *base,
- unsigned int flags)
+static struct dentry *lookup_one_qstr_excl_raw(const struct qstr *name,
+ struct dentry *base,
+ unsigned int flags)
{
- struct dentry *dentry = lookup_dcache(name, base, flags);
+ struct dentry *dentry;
struct dentry *old;
- struct inode *dir = base->d_inode;
+ struct inode *dir;
+ dentry = lookup_dcache(name, base, flags);
if (dentry)
- goto found;
+ return dentry;
/* Don't create child dentry for a dead directory. */
+ dir = base->d_inode;
if (unlikely(IS_DEADDIR(dir)))
return ERR_PTR(-ENOENT);
@@ -1698,7 +1691,24 @@ struct dentry *lookup_one_qstr_excl(const struct qstr *name,
dput(dentry);
dentry = old;
}
-found:
+ return dentry;
+}
+
+/*
+ * Parent directory has inode locked exclusive. This is one
+ * and only case when ->lookup() gets called on non in-lookup
+ * dentries - as the matter of fact, this only gets called
+ * when directory is guaranteed to have no in-lookup children
+ * at all.
+ * Will return -ENOENT if name isn't found and LOOKUP_CREATE wasn't passed.
+ * Will return -EEXIST if name is found and LOOKUP_EXCL was passed.
+ */
+struct dentry *lookup_one_qstr_excl(const struct qstr *name,
+ struct dentry *base, unsigned int flags)
+{
+ struct dentry *dentry;
+
+ dentry = lookup_one_qstr_excl_raw(name, base, flags);
if (IS_ERR(dentry))
return dentry;
if (d_is_negative(dentry) && !(flags & LOOKUP_CREATE)) {
@@ -2742,23 +2752,48 @@ static int filename_parentat(int dfd, struct filename *name,
/* does lookup, returns the object with parent locked */
static struct dentry *__kern_path_locked(int dfd, struct filename *name, struct path *path)
{
+ struct path parent_path __free(path_put) = {};
struct dentry *d;
struct qstr last;
int type, error;
- error = filename_parentat(dfd, name, 0, path, &last, &type);
+ error = filename_parentat(dfd, name, 0, &parent_path, &last, &type);
if (error)
return ERR_PTR(error);
- if (unlikely(type != LAST_NORM)) {
- path_put(path);
+ if (unlikely(type != LAST_NORM))
return ERR_PTR(-EINVAL);
+ inode_lock_nested(parent_path.dentry->d_inode, I_MUTEX_PARENT);
+ d = lookup_one_qstr_excl(&last, parent_path.dentry, 0);
+ if (IS_ERR(d)) {
+ inode_unlock(parent_path.dentry->d_inode);
+ return d;
}
- inode_lock_nested(path->dentry->d_inode, I_MUTEX_PARENT);
- d = lookup_one_qstr_excl(&last, path->dentry, 0);
+ path->dentry = no_free_ptr(parent_path.dentry);
+ path->mnt = no_free_ptr(parent_path.mnt);
+ return d;
+}
+
+struct dentry *kern_path_locked_negative(const char *name, struct path *path)
+{
+ struct path parent_path __free(path_put) = {};
+ struct filename *filename __free(putname) = getname_kernel(name);
+ struct dentry *d;
+ struct qstr last;
+ int type, error;
+
+ error = filename_parentat(AT_FDCWD, filename, 0, &parent_path, &last, &type);
+ if (error)
+ return ERR_PTR(error);
+ if (unlikely(type != LAST_NORM))
+ return ERR_PTR(-EINVAL);
+ inode_lock_nested(parent_path.dentry->d_inode, I_MUTEX_PARENT);
+ d = lookup_one_qstr_excl_raw(&last, parent_path.dentry, 0);
if (IS_ERR(d)) {
- inode_unlock(path->dentry->d_inode);
- path_put(path);
+ inode_unlock(parent_path.dentry->d_inode);
+ return d;
}
+ path->dentry = no_free_ptr(parent_path.dentry);
+ path->mnt = no_free_ptr(parent_path.mnt);
return d;
}
diff --git a/fs/netfs/fscache_cache.c b/fs/netfs/fscache_cache.c
index 9397ed39b0b4..8f70f8da064b 100644
--- a/fs/netfs/fscache_cache.c
+++ b/fs/netfs/fscache_cache.c
@@ -372,7 +372,7 @@ void fscache_withdraw_cache(struct fscache_cache *cache)
EXPORT_SYMBOL(fscache_withdraw_cache);
#ifdef CONFIG_PROC_FS
-static const char fscache_cache_states[NR__FSCACHE_CACHE_STATE] = "-PAEW";
+static const char fscache_cache_states[NR__FSCACHE_CACHE_STATE] __nonstring = "-PAEW";
/*
* Generate a list of caches in /proc/fs/fscache/caches
diff --git a/fs/netfs/fscache_cookie.c b/fs/netfs/fscache_cookie.c
index d4d4b3a8b106..3d56fc73435f 100644
--- a/fs/netfs/fscache_cookie.c
+++ b/fs/netfs/fscache_cookie.c
@@ -29,7 +29,7 @@ static LIST_HEAD(fscache_cookie_lru);
static DEFINE_SPINLOCK(fscache_cookie_lru_lock);
DEFINE_TIMER(fscache_cookie_lru_timer, fscache_cookie_lru_timed_out);
static DECLARE_WORK(fscache_cookie_lru_work, fscache_cookie_lru_worker);
-static const char fscache_cookie_states[FSCACHE_COOKIE_STATE__NR] = "-LCAIFUWRD";
+static const char fscache_cookie_states[FSCACHE_COOKIE_STATE__NR] __nonstring = "-LCAIFUWRD";
static unsigned int fscache_lru_cookie_timeout = 10 * HZ;
void fscache_print_cookie(struct fscache_cookie *cookie, char prefix)
diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig
index d3f76101ad4b..07932ce9246c 100644
--- a/fs/nfs/Kconfig
+++ b/fs/nfs/Kconfig
@@ -2,6 +2,7 @@
config NFS_FS
tristate "NFS client support"
depends on INET && FILE_LOCKING && MULTIUSER
+ select CRC32
select LOCKD
select SUNRPC
select NFS_COMMON
@@ -196,7 +197,6 @@ config NFS_USE_KERNEL_DNS
config NFS_DEBUG
bool
depends on NFS_FS && SUNRPC_DEBUG
- select CRC32
default y
config NFS_DISABLE_UDP_SUPPORT
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index ec8d32d0e2e9..6655e5f32ec6 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -899,18 +899,11 @@ u64 nfs_timespec_to_change_attr(const struct timespec64 *ts)
return ((u64)ts->tv_sec << 30) + ts->tv_nsec;
}
-#ifdef CONFIG_CRC32
static inline u32 nfs_stateid_hash(const nfs4_stateid *stateid)
{
return ~crc32_le(0xFFFFFFFF, &stateid->other[0],
NFS4_STATEID_OTHER_SIZE);
}
-#else
-static inline u32 nfs_stateid_hash(nfs4_stateid *stateid)
-{
- return 0;
-}
-#endif
static inline bool nfs_current_task_exiting(void)
{
diff --git a/fs/nfs/nfs4session.h b/fs/nfs/nfs4session.h
index 351616c61df5..f9c291e2165c 100644
--- a/fs/nfs/nfs4session.h
+++ b/fs/nfs/nfs4session.h
@@ -148,16 +148,12 @@ static inline void nfs4_copy_sessionid(struct nfs4_sessionid *dst,
memcpy(dst->data, src->data, NFS4_MAX_SESSIONID_LEN);
}
-#ifdef CONFIG_CRC32
/*
* nfs_session_id_hash - calculate the crc32 hash for the session id
* @session - pointer to session
*/
#define nfs_session_id_hash(sess_id) \
(~crc32_le(0xFFFFFFFF, &(sess_id)->data[0], sizeof((sess_id)->data)))
-#else
-#define nfs_session_id_hash(session) (0)
-#endif
#else /* defined(CONFIG_NFS_V4_1) */
static inline int nfs4_init_session(struct nfs_client *clp)
diff --git a/fs/nfsd/Kconfig b/fs/nfsd/Kconfig
index 792d3fed1b45..731a88f6313e 100644
--- a/fs/nfsd/Kconfig
+++ b/fs/nfsd/Kconfig
@@ -4,6 +4,7 @@ config NFSD
depends on INET
depends on FILE_LOCKING
depends on FSNOTIFY
+ select CRC32
select LOCKD
select SUNRPC
select EXPORTFS
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 2041268b398a..59a693f22452 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -5430,7 +5430,7 @@ static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
queued = nfsd4_run_cb(&dp->dl_recall);
WARN_ON_ONCE(!queued);
if (!queued)
- nfs4_put_stid(&dp->dl_stid);
+ refcount_dec(&dp->dl_stid.sc_count);
}
/* Called from break_lease() with flc_lock held. */
diff --git a/fs/nfsd/nfsfh.h b/fs/nfsd/nfsfh.h
index 876152a91f12..5103c2f4d225 100644
--- a/fs/nfsd/nfsfh.h
+++ b/fs/nfsd/nfsfh.h
@@ -267,7 +267,6 @@ static inline bool fh_fsid_match(const struct knfsd_fh *fh1,
return true;
}
-#ifdef CONFIG_CRC32
/**
* knfsd_fh_hash - calculate the crc32 hash for the filehandle
* @fh - pointer to filehandle
@@ -279,12 +278,6 @@ static inline u32 knfsd_fh_hash(const struct knfsd_fh *fh)
{
return ~crc32_le(0xFFFFFFFF, fh->fh_raw, fh->fh_size);
}
-#else
-static inline u32 knfsd_fh_hash(const struct knfsd_fh *fh)
-{
- return 0;
-}
-#endif
/**
* fh_clear_pre_post_attrs - Reset pre/post attributes
diff --git a/fs/smb/client/cifsproto.h b/fs/smb/client/cifsproto.h
index cfcc07905bdf..59f6fdfe560e 100644
--- a/fs/smb/client/cifsproto.h
+++ b/fs/smb/client/cifsproto.h
@@ -163,6 +163,8 @@ extern int cifs_get_writable_path(struct cifs_tcon *tcon, const char *name,
extern struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *, bool);
extern int cifs_get_readable_path(struct cifs_tcon *tcon, const char *name,
struct cifsFileInfo **ret_file);
+extern int cifs_get_hardlink_path(struct cifs_tcon *tcon, struct inode *inode,
+ struct file *file);
extern unsigned int smbCalcSize(void *buf);
extern int decode_negTokenInit(unsigned char *security_blob, int length,
struct TCP_Server_Info *server);
diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c
index 4a0b2d220fe8..df976ce6aed9 100644
--- a/fs/smb/client/connect.c
+++ b/fs/smb/client/connect.c
@@ -300,7 +300,6 @@ cifs_abort_connection(struct TCP_Server_Info *server)
server->ssocket->flags);
sock_release(server->ssocket);
server->ssocket = NULL;
- put_net(cifs_net_ns(server));
}
server->sequence_number = 0;
server->session_estab = false;
@@ -1074,13 +1073,9 @@ clean_demultiplex_info(struct TCP_Server_Info *server)
msleep(125);
if (cifs_rdma_enabled(server))
smbd_destroy(server);
-
if (server->ssocket) {
sock_release(server->ssocket);
server->ssocket = NULL;
-
- /* Release netns reference for the socket. */
- put_net(cifs_net_ns(server));
}
if (!list_empty(&server->pending_mid_q)) {
@@ -1128,7 +1123,6 @@ clean_demultiplex_info(struct TCP_Server_Info *server)
*/
}
- /* Release netns reference for this server. */
put_net(cifs_net_ns(server));
kfree(server->leaf_fullpath);
kfree(server->hostname);
@@ -1774,8 +1768,6 @@ cifs_get_tcp_session(struct smb3_fs_context *ctx,
tcp_ses->ops = ctx->ops;
tcp_ses->vals = ctx->vals;
-
- /* Grab netns reference for this server. */
cifs_set_net_ns(tcp_ses, get_net(current->nsproxy->net_ns));
tcp_ses->sign = ctx->sign;
@@ -1903,7 +1895,6 @@ smbd_connected:
out_err_crypto_release:
cifs_crypto_secmech_release(tcp_ses);
- /* Release netns reference for this server. */
put_net(cifs_net_ns(tcp_ses));
out_err:
@@ -1912,10 +1903,8 @@ out_err:
cifs_put_tcp_session(tcp_ses->primary_server, false);
kfree(tcp_ses->hostname);
kfree(tcp_ses->leaf_fullpath);
- if (tcp_ses->ssocket) {
+ if (tcp_ses->ssocket)
sock_release(tcp_ses->ssocket);
- put_net(cifs_net_ns(tcp_ses));
- }
kfree(tcp_ses);
}
return ERR_PTR(rc);
@@ -3359,24 +3348,20 @@ generic_ip_connect(struct TCP_Server_Info *server)
socket = server->ssocket;
} else {
struct net *net = cifs_net_ns(server);
+ struct sock *sk;
- rc = sock_create_kern(net, sfamily, SOCK_STREAM, IPPROTO_TCP, &server->ssocket);
+ rc = __sock_create(net, sfamily, SOCK_STREAM,
+ IPPROTO_TCP, &server->ssocket, 1);
if (rc < 0) {
cifs_server_dbg(VFS, "Error %d creating socket\n", rc);
return rc;
}
- /*
- * Grab netns reference for the socket.
- *
- * This reference will be released in several situations:
- * - In the failure path before the cifsd thread is started.
- * - In the all place where server->socket is released, it is
- * also set to NULL.
- * - Ultimately in clean_demultiplex_info(), during the final
- * teardown.
- */
- get_net(net);
+ sk = server->ssocket->sk;
+ __netns_tracker_free(net, &sk->ns_tracker, false);
+ sk->sk_net_refcnt = 1;
+ get_net_track(net, &sk->ns_tracker, GFP_KERNEL);
+ sock_inuse_add(net, 1);
/* BB other socket options to set KEEPALIVE, NODELAY? */
cifs_dbg(FYI, "Socket created\n");
@@ -3428,7 +3413,6 @@ generic_ip_connect(struct TCP_Server_Info *server)
if (rc < 0) {
cifs_dbg(FYI, "Error %d connecting to server\n", rc);
trace_smb3_connect_err(server->hostname, server->conn_id, &server->dstaddr, rc);
- put_net(cifs_net_ns(server));
sock_release(socket);
server->ssocket = NULL;
return rc;
diff --git a/fs/smb/client/file.c b/fs/smb/client/file.c
index 8407fb108664..9e8f404b9e56 100644
--- a/fs/smb/client/file.c
+++ b/fs/smb/client/file.c
@@ -1007,6 +1007,11 @@ int cifs_open(struct inode *inode, struct file *file)
} else {
_cifsFileInfo_put(cfile, true, false);
}
+ } else {
+ /* hard link on the defeered close file */
+ rc = cifs_get_hardlink_path(tcon, inode, file);
+ if (rc)
+ cifs_close_deferred_file(CIFS_I(inode));
}
if (server->oplocks)
@@ -2071,6 +2076,29 @@ cifs_move_llist(struct list_head *source, struct list_head *dest)
list_move(li, dest);
}
+int
+cifs_get_hardlink_path(struct cifs_tcon *tcon, struct inode *inode,
+ struct file *file)
+{
+ struct cifsFileInfo *open_file = NULL;
+ struct cifsInodeInfo *cinode = CIFS_I(inode);
+ int rc = 0;
+
+ spin_lock(&tcon->open_file_lock);
+ spin_lock(&cinode->open_file_lock);
+
+ list_for_each_entry(open_file, &cinode->openFileList, flist) {
+ if (file->f_flags == open_file->f_flags) {
+ rc = -EINVAL;
+ break;
+ }
+ }
+
+ spin_unlock(&cinode->open_file_lock);
+ spin_unlock(&tcon->open_file_lock);
+ return rc;
+}
+
void
cifs_free_llist(struct list_head *llist)
{
diff --git a/fs/smb/server/connection.c b/fs/smb/server/connection.c
index c1f22c129111..83764c230e9d 100644
--- a/fs/smb/server/connection.c
+++ b/fs/smb/server/connection.c
@@ -39,8 +39,10 @@ void ksmbd_conn_free(struct ksmbd_conn *conn)
xa_destroy(&conn->sessions);
kvfree(conn->request_buf);
kfree(conn->preauth_info);
- if (atomic_dec_and_test(&conn->refcnt))
+ if (atomic_dec_and_test(&conn->refcnt)) {
+ ksmbd_free_transport(conn->transport);
kfree(conn);
+ }
}
/**
diff --git a/fs/smb/server/oplock.c b/fs/smb/server/oplock.c
index f103b1bd0400..81a29857b1e3 100644
--- a/fs/smb/server/oplock.c
+++ b/fs/smb/server/oplock.c
@@ -129,14 +129,6 @@ static void free_opinfo(struct oplock_info *opinfo)
kfree(opinfo);
}
-static inline void opinfo_free_rcu(struct rcu_head *rcu_head)
-{
- struct oplock_info *opinfo;
-
- opinfo = container_of(rcu_head, struct oplock_info, rcu_head);
- free_opinfo(opinfo);
-}
-
struct oplock_info *opinfo_get(struct ksmbd_file *fp)
{
struct oplock_info *opinfo;
@@ -157,8 +149,8 @@ static struct oplock_info *opinfo_get_list(struct ksmbd_inode *ci)
if (list_empty(&ci->m_op_list))
return NULL;
- rcu_read_lock();
- opinfo = list_first_or_null_rcu(&ci->m_op_list, struct oplock_info,
+ down_read(&ci->m_lock);
+ opinfo = list_first_entry(&ci->m_op_list, struct oplock_info,
op_entry);
if (opinfo) {
if (opinfo->conn == NULL ||
@@ -171,8 +163,7 @@ static struct oplock_info *opinfo_get_list(struct ksmbd_inode *ci)
}
}
}
-
- rcu_read_unlock();
+ up_read(&ci->m_lock);
return opinfo;
}
@@ -185,7 +176,7 @@ void opinfo_put(struct oplock_info *opinfo)
if (!atomic_dec_and_test(&opinfo->refcount))
return;
- call_rcu(&opinfo->rcu_head, opinfo_free_rcu);
+ free_opinfo(opinfo);
}
static void opinfo_add(struct oplock_info *opinfo)
@@ -193,7 +184,7 @@ static void opinfo_add(struct oplock_info *opinfo)
struct ksmbd_inode *ci = opinfo->o_fp->f_ci;
down_write(&ci->m_lock);
- list_add_rcu(&opinfo->op_entry, &ci->m_op_list);
+ list_add(&opinfo->op_entry, &ci->m_op_list);
up_write(&ci->m_lock);
}
@@ -207,7 +198,7 @@ static void opinfo_del(struct oplock_info *opinfo)
write_unlock(&lease_list_lock);
}
down_write(&ci->m_lock);
- list_del_rcu(&opinfo->op_entry);
+ list_del(&opinfo->op_entry);
up_write(&ci->m_lock);
}
@@ -1347,8 +1338,8 @@ void smb_break_all_levII_oplock(struct ksmbd_work *work, struct ksmbd_file *fp,
ci = fp->f_ci;
op = opinfo_get(fp);
- rcu_read_lock();
- list_for_each_entry_rcu(brk_op, &ci->m_op_list, op_entry) {
+ down_read(&ci->m_lock);
+ list_for_each_entry(brk_op, &ci->m_op_list, op_entry) {
if (brk_op->conn == NULL)
continue;
@@ -1358,7 +1349,6 @@ void smb_break_all_levII_oplock(struct ksmbd_work *work, struct ksmbd_file *fp,
if (ksmbd_conn_releasing(brk_op->conn))
continue;
- rcu_read_unlock();
if (brk_op->is_lease && (brk_op->o_lease->state &
(~(SMB2_LEASE_READ_CACHING_LE |
SMB2_LEASE_HANDLE_CACHING_LE)))) {
@@ -1388,9 +1378,8 @@ void smb_break_all_levII_oplock(struct ksmbd_work *work, struct ksmbd_file *fp,
oplock_break(brk_op, SMB2_OPLOCK_LEVEL_NONE, NULL);
next:
opinfo_put(brk_op);
- rcu_read_lock();
}
- rcu_read_unlock();
+ up_read(&ci->m_lock);
if (op)
opinfo_put(op);
diff --git a/fs/smb/server/oplock.h b/fs/smb/server/oplock.h
index 3f64f0787263..9a56eaadd0dd 100644
--- a/fs/smb/server/oplock.h
+++ b/fs/smb/server/oplock.h
@@ -71,7 +71,6 @@ struct oplock_info {
struct list_head lease_entry;
wait_queue_head_t oplock_q; /* Other server threads */
wait_queue_head_t oplock_brk; /* oplock breaking wait */
- struct rcu_head rcu_head;
};
struct lease_break_info {
diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
index d24d95d15d87..57839f9708bb 100644
--- a/fs/smb/server/smb2pdu.c
+++ b/fs/smb/server/smb2pdu.c
@@ -1602,8 +1602,10 @@ static int krb5_authenticate(struct ksmbd_work *work,
if (prev_sess_id && prev_sess_id != sess->id)
destroy_previous_session(conn, sess->user, prev_sess_id);
- if (sess->state == SMB2_SESSION_VALID)
+ if (sess->state == SMB2_SESSION_VALID) {
ksmbd_free_user(sess->user);
+ sess->user = NULL;
+ }
retval = ksmbd_krb5_authenticate(sess, in_blob, in_len,
out_blob, &out_len);
diff --git a/fs/smb/server/transport_ipc.c b/fs/smb/server/transport_ipc.c
index 3f185ae60dc5..2a3e2b0ce557 100644
--- a/fs/smb/server/transport_ipc.c
+++ b/fs/smb/server/transport_ipc.c
@@ -310,7 +310,11 @@ static int ipc_server_config_on_startup(struct ksmbd_startup_request *req)
server_conf.signing = req->signing;
server_conf.tcp_port = req->tcp_port;
server_conf.ipc_timeout = req->ipc_timeout * HZ;
- server_conf.deadtime = req->deadtime * SMB_ECHO_INTERVAL;
+ if (check_mul_overflow(req->deadtime, SMB_ECHO_INTERVAL,
+ &server_conf.deadtime)) {
+ ret = -EINVAL;
+ goto out;
+ }
server_conf.share_fake_fscaps = req->share_fake_fscaps;
ksmbd_init_domain(req->sub_auth);
@@ -337,6 +341,7 @@ static int ipc_server_config_on_startup(struct ksmbd_startup_request *req)
server_conf.bind_interfaces_only = req->bind_interfaces_only;
ret |= ksmbd_tcp_set_interfaces(KSMBD_STARTUP_CONFIG_INTERFACES(req),
req->ifc_list_sz);
+out:
if (ret) {
pr_err("Server configuration error: %s %s %s\n",
req->netbios_name, req->server_string,
diff --git a/fs/smb/server/transport_tcp.c b/fs/smb/server/transport_tcp.c
index 7f38a3c3f5bd..abedf510899a 100644
--- a/fs/smb/server/transport_tcp.c
+++ b/fs/smb/server/transport_tcp.c
@@ -93,17 +93,21 @@ static struct tcp_transport *alloc_transport(struct socket *client_sk)
return t;
}
-static void free_transport(struct tcp_transport *t)
+void ksmbd_free_transport(struct ksmbd_transport *kt)
{
- kernel_sock_shutdown(t->sock, SHUT_RDWR);
- sock_release(t->sock);
- t->sock = NULL;
+ struct tcp_transport *t = TCP_TRANS(kt);
- ksmbd_conn_free(KSMBD_TRANS(t)->conn);
+ sock_release(t->sock);
kfree(t->iov);
kfree(t);
}
+static void free_transport(struct tcp_transport *t)
+{
+ kernel_sock_shutdown(t->sock, SHUT_RDWR);
+ ksmbd_conn_free(KSMBD_TRANS(t)->conn);
+}
+
/**
* kvec_array_init() - initialize a IO vector segment
* @new: IO vector to be initialized
diff --git a/fs/smb/server/transport_tcp.h b/fs/smb/server/transport_tcp.h
index 8c9aa624cfe3..1e51675ee1b2 100644
--- a/fs/smb/server/transport_tcp.h
+++ b/fs/smb/server/transport_tcp.h
@@ -8,6 +8,7 @@
int ksmbd_tcp_set_interfaces(char *ifc_list, int ifc_list_sz);
struct interface *ksmbd_find_netdev_name_iface_list(char *netdev_name);
+void ksmbd_free_transport(struct ksmbd_transport *kt);
int ksmbd_tcp_init(void);
void ksmbd_tcp_destroy(void);
diff --git a/fs/smb/server/vfs.c b/fs/smb/server/vfs.c
index 8554aa5a1059..391d07da586c 100644
--- a/fs/smb/server/vfs.c
+++ b/fs/smb/server/vfs.c
@@ -479,7 +479,8 @@ int ksmbd_vfs_write(struct ksmbd_work *work, struct ksmbd_file *fp,
int err = 0;
if (work->conn->connection_type) {
- if (!(fp->daccess & (FILE_WRITE_DATA_LE | FILE_APPEND_DATA_LE))) {
+ if (!(fp->daccess & (FILE_WRITE_DATA_LE | FILE_APPEND_DATA_LE)) ||
+ S_ISDIR(file_inode(fp->filp)->i_mode)) {
pr_err("no right to write(%pD)\n", fp->filp);
err = -EACCES;
goto out;
diff --git a/fs/smb/server/vfs_cache.c b/fs/smb/server/vfs_cache.c
index 8d1f30dcba7e..1f8fa3468173 100644
--- a/fs/smb/server/vfs_cache.c
+++ b/fs/smb/server/vfs_cache.c
@@ -713,12 +713,8 @@ static bool tree_conn_fd_check(struct ksmbd_tree_connect *tcon,
static bool ksmbd_durable_scavenger_alive(void)
{
- mutex_lock(&durable_scavenger_lock);
- if (!durable_scavenger_running) {
- mutex_unlock(&durable_scavenger_lock);
+ if (!durable_scavenger_running)
return false;
- }
- mutex_unlock(&durable_scavenger_lock);
if (kthread_should_stop())
return false;
@@ -799,9 +795,7 @@ static int ksmbd_durable_scavenger(void *dummy)
break;
}
- mutex_lock(&durable_scavenger_lock);
durable_scavenger_running = false;
- mutex_unlock(&durable_scavenger_lock);
module_put(THIS_MODULE);
diff --git a/fs/stat.c b/fs/stat.c
index f13308bfdc98..3d9222807214 100644
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -204,12 +204,25 @@ int vfs_getattr_nosec(const struct path *path, struct kstat *stat,
STATX_ATTR_DAX);
idmap = mnt_idmap(path->mnt);
- if (inode->i_op->getattr)
- return inode->i_op->getattr(idmap, path, stat,
- request_mask,
- query_flags);
+ if (inode->i_op->getattr) {
+ int ret;
+
+ ret = inode->i_op->getattr(idmap, path, stat, request_mask,
+ query_flags);
+ if (ret)
+ return ret;
+ } else {
+ generic_fillattr(idmap, request_mask, inode, stat);
+ }
+
+ /*
+ * If this is a block device inode, override the filesystem attributes
+ * with the block device specific parameters that need to be obtained
+ * from the bdev backing inode.
+ */
+ if (S_ISBLK(stat->mode))
+ bdev_statx(path, stat, request_mask);
- generic_fillattr(idmap, request_mask, inode, stat);
return 0;
}
EXPORT_SYMBOL(vfs_getattr_nosec);
@@ -295,15 +308,6 @@ static int vfs_statx_path(struct path *path, int flags, struct kstat *stat,
if (path_mounted(path))
stat->attributes |= STATX_ATTR_MOUNT_ROOT;
stat->attributes_mask |= STATX_ATTR_MOUNT_ROOT;
-
- /*
- * If this is a block device inode, override the filesystem
- * attributes with the block device specific parameters that need to be
- * obtained from the bdev backing inode.
- */
- if (S_ISBLK(stat->mode))
- bdev_statx(path, stat, request_mask);
-
return 0;
}
diff --git a/include/acpi/actbl.h b/include/acpi/actbl.h
index 451f6276da49..2fc89704be17 100644
--- a/include/acpi/actbl.h
+++ b/include/acpi/actbl.h
@@ -66,7 +66,7 @@
******************************************************************************/
struct acpi_table_header {
- char signature[ACPI_NAMESEG_SIZE]; /* ASCII table signature */
+ char signature[ACPI_NAMESEG_SIZE] __nonstring; /* ASCII table signature */
u32 length; /* Length of table in bytes, including this header */
u8 revision; /* ACPI Specification minor version number */
u8 checksum; /* To make sum of entire table == 0 */
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index 2bf893eabb4b..bcd54020d6ba 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -585,8 +585,7 @@ static inline bool drm_gem_object_is_shared_for_memory_stats(struct drm_gem_obje
*/
static inline bool drm_gem_is_imported(const struct drm_gem_object *obj)
{
- /* The dma-buf's priv field points to the original GEM object. */
- return obj->dma_buf && (obj->dma_buf->priv != obj);
+ return !!obj->import_attach;
}
#ifdef CONFIG_LOCKDEP
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index 8e7af9a03b41..e721148c95d0 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -249,6 +249,7 @@ static inline struct bdi_writeback *inode_to_wb(const struct inode *inode)
{
#ifdef CONFIG_LOCKDEP
WARN_ON_ONCE(debug_locks &&
+ (inode->i_sb->s_iflags & SB_I_CGROUPWB) &&
(!lockdep_is_held(&inode->i_lock) &&
!lockdep_is_held(&inode->i_mapping->i_pages.xa_lock) &&
!lockdep_is_held(&inode->i_wb->list_lock)));
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index e39c45bc0a97..2b8875a82ff8 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -712,23 +712,6 @@ static inline bool blk_queue_is_zoned(struct request_queue *q)
(q->limits.features & BLK_FEAT_ZONED);
}
-#ifdef CONFIG_BLK_DEV_ZONED
-static inline unsigned int disk_nr_zones(struct gendisk *disk)
-{
- return disk->nr_zones;
-}
-bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs);
-#else /* CONFIG_BLK_DEV_ZONED */
-static inline unsigned int disk_nr_zones(struct gendisk *disk)
-{
- return 0;
-}
-static inline bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs)
-{
- return false;
-}
-#endif /* CONFIG_BLK_DEV_ZONED */
-
static inline unsigned int disk_zone_no(struct gendisk *disk, sector_t sector)
{
if (!blk_queue_is_zoned(disk->queue))
@@ -736,11 +719,6 @@ static inline unsigned int disk_zone_no(struct gendisk *disk, sector_t sector)
return sector >> ilog2(disk->queue->limits.chunk_sectors);
}
-static inline unsigned int bdev_nr_zones(struct block_device *bdev)
-{
- return disk_nr_zones(bdev->bd_disk);
-}
-
static inline unsigned int bdev_max_open_zones(struct block_device *bdev)
{
return bdev->bd_disk->queue->limits.max_open_zones;
@@ -847,6 +825,51 @@ static inline u64 sb_bdev_nr_blocks(struct super_block *sb)
(sb->s_blocksize_bits - SECTOR_SHIFT);
}
+#ifdef CONFIG_BLK_DEV_ZONED
+static inline unsigned int disk_nr_zones(struct gendisk *disk)
+{
+ return disk->nr_zones;
+}
+bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs);
+
+/**
+ * disk_zone_capacity - returns the zone capacity of zone containing @sector
+ * @disk: disk to work with
+ * @sector: sector number within the querying zone
+ *
+ * Returns the zone capacity of a zone containing @sector. @sector can be any
+ * sector in the zone.
+ */
+static inline unsigned int disk_zone_capacity(struct gendisk *disk,
+ sector_t sector)
+{
+ sector_t zone_sectors = disk->queue->limits.chunk_sectors;
+
+ if (sector + zone_sectors >= get_capacity(disk))
+ return disk->last_zone_capacity;
+ return disk->zone_capacity;
+}
+static inline unsigned int bdev_zone_capacity(struct block_device *bdev,
+ sector_t pos)
+{
+ return disk_zone_capacity(bdev->bd_disk, pos);
+}
+#else /* CONFIG_BLK_DEV_ZONED */
+static inline unsigned int disk_nr_zones(struct gendisk *disk)
+{
+ return 0;
+}
+static inline bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs)
+{
+ return false;
+}
+#endif /* CONFIG_BLK_DEV_ZONED */
+
+static inline unsigned int bdev_nr_zones(struct block_device *bdev)
+{
+ return disk_nr_zones(bdev->bd_disk);
+}
+
int bdev_disk_changed(struct gendisk *disk, bool invalidate);
void put_disk(struct gendisk *disk);
@@ -1685,7 +1708,7 @@ int sync_blockdev(struct block_device *bdev);
int sync_blockdev_range(struct block_device *bdev, loff_t lstart, loff_t lend);
int sync_blockdev_nowait(struct block_device *bdev);
void sync_bdevs(bool wait);
-void bdev_statx(struct path *, struct kstat *, u32);
+void bdev_statx(const struct path *path, struct kstat *stat, u32 request_mask);
void printk_all_partitions(void);
int __init early_lookup_bdev(const char *pathname, dev_t *dev);
#else
@@ -1703,8 +1726,8 @@ static inline int sync_blockdev_nowait(struct block_device *bdev)
static inline void sync_bdevs(bool wait)
{
}
-static inline void bdev_statx(struct path *path, struct kstat *stat,
- u32 request_mask)
+static inline void bdev_statx(const struct path *path, struct kstat *stat,
+ u32 request_mask)
{
}
static inline void printk_all_partitions(void)
diff --git a/include/linux/local_lock_internal.h b/include/linux/local_lock_internal.h
index bf2bf40d7b18..8d5ac16a9b17 100644
--- a/include/linux/local_lock_internal.h
+++ b/include/linux/local_lock_internal.h
@@ -102,11 +102,11 @@ do { \
l = (local_lock_t *)this_cpu_ptr(lock); \
tl = (local_trylock_t *)l; \
_Generic((lock), \
- local_trylock_t *: ({ \
+ __percpu local_trylock_t *: ({ \
lockdep_assert(tl->acquired == 0); \
WRITE_ONCE(tl->acquired, 1); \
}), \
- default:(void)0); \
+ __percpu local_lock_t *: (void)0); \
local_lock_acquire(l); \
} while (0)
@@ -171,11 +171,11 @@ do { \
tl = (local_trylock_t *)l; \
local_lock_release(l); \
_Generic((lock), \
- local_trylock_t *: ({ \
+ __percpu local_trylock_t *: ({ \
lockdep_assert(tl->acquired == 1); \
WRITE_ONCE(tl->acquired, 0); \
}), \
- default:(void)0); \
+ __percpu local_lock_t *: (void)0); \
} while (0)
#define __local_unlock(lock) \
diff --git a/include/linux/mm.h b/include/linux/mm.h
index b7f13f087954..bf55206935c4 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1218,6 +1218,23 @@ static inline unsigned int folio_order(const struct folio *folio)
return folio_large_order(folio);
}
+/**
+ * folio_reset_order - Reset the folio order and derived _nr_pages
+ * @folio: The folio.
+ *
+ * Reset the order and derived _nr_pages to 0. Must only be used in the
+ * process of splitting large folios.
+ */
+static inline void folio_reset_order(struct folio *folio)
+{
+ if (WARN_ON_ONCE(!folio_test_large(folio)))
+ return;
+ folio->_flags_1 &= ~0xffUL;
+#ifdef NR_PAGES_IN_LARGE_FOLIO
+ folio->_nr_pages = 0;
+#endif
+}
+
#include <linux/huge_mm.h>
/*
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 25e80b2ca7f4..6ccec1bf2896 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -967,6 +967,9 @@ struct zone {
#ifdef CONFIG_UNACCEPTED_MEMORY
/* Pages to be accepted. All pages on the list are MAX_PAGE_ORDER */
struct list_head unaccepted_pages;
+
+ /* To be called once the last page in the zone is accepted */
+ struct work_struct unaccepted_cleanup;
#endif
/* zone flags, see below */
@@ -1499,8 +1502,6 @@ bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
bool zone_watermark_ok(struct zone *z, unsigned int order,
unsigned long mark, int highest_zoneidx,
unsigned int alloc_flags);
-bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
- unsigned long mark, int highest_zoneidx);
/*
* Memory initialization context, use to differentiate memory added by
* the platform statically or via memory hotplug interface.
diff --git a/include/linux/namei.h b/include/linux/namei.h
index e3042176cdf4..bbaf55fb3101 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -62,6 +62,7 @@ extern struct dentry *kern_path_create(int, const char *, struct path *, unsigne
extern struct dentry *user_path_create(int, const char __user *, struct path *, unsigned int);
extern void done_path_create(struct path *, struct dentry *);
extern struct dentry *kern_path_locked(const char *, struct path *);
+extern struct dentry *kern_path_locked_negative(const char *, struct path *);
extern struct dentry *user_path_locked_at(int , const char __user *, struct path *);
int vfs_path_parent_lookup(struct filename *filename, unsigned int flags,
struct path *parent, struct qstr *last, int *type,
diff --git a/include/linux/nfs.h b/include/linux/nfs.h
index 9ad727ddfedb..0906a0b40c6a 100644
--- a/include/linux/nfs.h
+++ b/include/linux/nfs.h
@@ -55,7 +55,6 @@ enum nfs3_stable_how {
NFS_INVALID_STABLE_HOW = -1
};
-#ifdef CONFIG_CRC32
/**
* nfs_fhandle_hash - calculate the crc32 hash for the filehandle
* @fh - pointer to filehandle
@@ -67,10 +66,4 @@ static inline u32 nfs_fhandle_hash(const struct nfs_fh *fh)
{
return ~crc32_le(0xFFFFFFFF, &fh->data[0], fh->size);
}
-#else /* CONFIG_CRC32 */
-static inline u32 nfs_fhandle_hash(const struct nfs_fh *fh)
-{
- return 0;
-}
-#endif /* CONFIG_CRC32 */
#endif /* _LINUX_NFS_H */
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 0e8e3fd77e96..51e2bd6405cd 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -245,6 +245,8 @@ enum pci_dev_flags {
PCI_DEV_FLAGS_NO_RELAXED_ORDERING = (__force pci_dev_flags_t) (1 << 11),
/* Device does honor MSI masking despite saying otherwise */
PCI_DEV_FLAGS_HAS_MSI_MASKING = (__force pci_dev_flags_t) (1 << 12),
+ /* Device requires write to PCI_MSIX_ENTRY_DATA before any MSIX reads */
+ PCI_DEV_FLAGS_MSIX_TOUCH_ENTRY_DATA_FIRST = (__force pci_dev_flags_t) (1 << 13),
};
enum pci_irq_reroute_variant {
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index 4d16c13d0df5..64cb4b04be7a 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -220,6 +220,8 @@ size_t virtio_max_dma_size(const struct virtio_device *vdev);
* occurs.
* @reset_done: optional function to call after transport specific reset
* operation has finished.
+ * @shutdown: synchronize with the device on shutdown. If provided, replaces
+ * the virtio core implementation.
*/
struct virtio_driver {
struct device_driver driver;
@@ -237,6 +239,7 @@ struct virtio_driver {
int (*restore)(struct virtio_device *dev);
int (*reset_prepare)(struct virtio_device *dev);
int (*reset_done)(struct virtio_device *dev);
+ void (*shutdown)(struct virtio_device *dev);
};
#define drv_to_virtio(__drv) container_of_const(__drv, struct virtio_driver, driver)
diff --git a/include/uapi/drm/ivpu_accel.h b/include/uapi/drm/ivpu_accel.h
index 746c43bd3eb6..2f24103f4533 100644
--- a/include/uapi/drm/ivpu_accel.h
+++ b/include/uapi/drm/ivpu_accel.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
/*
- * Copyright (C) 2020-2024 Intel Corporation
+ * Copyright (C) 2020-2025 Intel Corporation
*/
#ifndef __UAPI_IVPU_DRM_H__
@@ -147,7 +147,7 @@ struct drm_ivpu_param {
* platform type when executing on a simulator or emulator (read-only)
*
* %DRM_IVPU_PARAM_CORE_CLOCK_RATE:
- * Current PLL frequency (read-only)
+ * Maximum frequency of the NPU data processing unit clock (read-only)
*
* %DRM_IVPU_PARAM_NUM_CONTEXTS:
* Maximum number of simultaneously existing contexts (read-only)
diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
index ed2beb4def3f..8f1fc12bac46 100644
--- a/include/uapi/linux/io_uring.h
+++ b/include/uapi/linux/io_uring.h
@@ -1010,7 +1010,9 @@ struct io_uring_zcrx_ifq_reg {
__u64 region_ptr; /* struct io_uring_region_desc * */
struct io_uring_zcrx_offsets offsets;
- __u64 __resv[4];
+ __u32 zcrx_id;
+ __u32 __resv2;
+ __u64 __resv[3];
};
#ifdef __cplusplus
diff --git a/include/uapi/linux/landlock.h b/include/uapi/linux/landlock.h
index d9d0cb827117..f030adc462ee 100644
--- a/include/uapi/linux/landlock.h
+++ b/include/uapi/linux/landlock.h
@@ -53,43 +53,70 @@ struct landlock_ruleset_attr {
__u64 scoped;
};
-/*
- * sys_landlock_create_ruleset() flags:
+/**
+ * DOC: landlock_create_ruleset_flags
+ *
+ * **Flags**
*
- * - %LANDLOCK_CREATE_RULESET_VERSION: Get the highest supported Landlock ABI
- * version.
- * - %LANDLOCK_CREATE_RULESET_ERRATA: Get a bitmask of fixed issues.
+ * %LANDLOCK_CREATE_RULESET_VERSION
+ * Get the highest supported Landlock ABI version (starting at 1).
+ *
+ * %LANDLOCK_CREATE_RULESET_ERRATA
+ * Get a bitmask of fixed issues for the current Landlock ABI version.
*/
/* clang-format off */
#define LANDLOCK_CREATE_RULESET_VERSION (1U << 0)
#define LANDLOCK_CREATE_RULESET_ERRATA (1U << 1)
/* clang-format on */
-/*
- * sys_landlock_restrict_self() flags:
- *
- * - %LANDLOCK_RESTRICT_SELF_LOG_SAME_EXEC_OFF: Do not create any log related to the
- * enforced restrictions. This should only be set by tools launching unknown
- * or untrusted programs (e.g. a sandbox tool, container runtime, system
- * service manager). Because programs sandboxing themselves should fix any
- * denied access, they should not set this flag to be aware of potential
- * issues reported by system's logs (i.e. audit).
- * - %LANDLOCK_RESTRICT_SELF_LOG_NEW_EXEC_ON: Explicitly ask to continue
- * logging denied access requests even after an :manpage:`execve(2)` call.
- * This flag should only be set if all the programs than can legitimately be
- * executed will not try to request a denied access (which could spam audit
- * logs).
- * - %LANDLOCK_RESTRICT_SELF_LOG_SUBDOMAINS_OFF: Do not create any log related
- * to the enforced restrictions coming from future nested domains created by
- * the caller or its descendants. This should only be set according to a
- * runtime configuration (i.e. not hardcoded) by programs launching other
- * unknown or untrusted programs that may create their own Landlock domains
- * and spam logs. The main use case is for container runtimes to enable users
- * to mute buggy sandboxed programs for a specific container image. Other use
- * cases include sandboxer tools and init systems. Unlike
- * %LANDLOCK_RESTRICT_SELF_LOG_SAME_EXEC_OFF,
- * %LANDLOCK_RESTRICT_SELF_LOG_SUBDOMAINS_OFF does not impact the requested
- * restriction (if any) but only the future nested domains.
+/**
+ * DOC: landlock_restrict_self_flags
+ *
+ * **Flags**
+ *
+ * By default, denied accesses originating from programs that sandbox themselves
+ * are logged via the audit subsystem. Such events typically indicate unexpected
+ * behavior, such as bugs or exploitation attempts. However, to avoid excessive
+ * logging, access requests denied by a domain not created by the originating
+ * program are not logged by default. The rationale is that programs should know
+ * their own behavior, but not necessarily the behavior of other programs. This
+ * default configuration is suitable for most programs that sandbox themselves.
+ * For specific use cases, the following flags allow programs to modify this
+ * default logging behavior.
+ *
+ * The %LANDLOCK_RESTRICT_SELF_LOG_SAME_EXEC_OFF and
+ * %LANDLOCK_RESTRICT_SELF_LOG_NEW_EXEC_ON flags apply to the newly created
+ * Landlock domain.
+ *
+ * %LANDLOCK_RESTRICT_SELF_LOG_SAME_EXEC_OFF
+ * Disables logging of denied accesses originating from the thread creating
+ * the Landlock domain, as well as its children, as long as they continue
+ * running the same executable code (i.e., without an intervening
+ * :manpage:`execve(2)` call). This is intended for programs that execute
+ * unknown code without invoking :manpage:`execve(2)`, such as script
+ * interpreters. Programs that only sandbox themselves should not set this
+ * flag, so users can be notified of unauthorized access attempts via system
+ * logs.
+ *
+ * %LANDLOCK_RESTRICT_SELF_LOG_NEW_EXEC_ON
+ * Enables logging of denied accesses after an :manpage:`execve(2)` call,
+ * providing visibility into unauthorized access attempts by newly executed
+ * programs within the created Landlock domain. This flag is recommended
+ * only when all potential executables in the domain are expected to comply
+ * with the access restrictions, as excessive audit log entries could make
+ * it more difficult to identify critical events.
+ *
+ * %LANDLOCK_RESTRICT_SELF_LOG_SUBDOMAINS_OFF
+ * Disables logging of denied accesses originating from nested Landlock
+ * domains created by the caller or its descendants. This flag should be set
+ * according to runtime configuration, not hardcoded, to avoid suppressing
+ * important security events. It is useful for container runtimes or
+ * sandboxing tools that may launch programs which themselves create
+ * Landlock domains and could otherwise generate excessive logs. Unlike
+ * ``LANDLOCK_RESTRICT_SELF_LOG_SAME_EXEC_OFF``, this flag only affects
+ * future nested domains, not the one being created. It can also be used
+ * with a @ruleset_fd value of -1 to mute subdomain logs without creating a
+ * domain.
*/
/* clang-format off */
#define LANDLOCK_RESTRICT_SELF_LOG_SAME_EXEC_OFF (1U << 0)
diff --git a/include/uapi/linux/vhost.h b/include/uapi/linux/vhost.h
index b95dd84eef2d..d4b3e2ae1314 100644
--- a/include/uapi/linux/vhost.h
+++ b/include/uapi/linux/vhost.h
@@ -28,10 +28,10 @@
/* Set current process as the (exclusive) owner of this file descriptor. This
* must be called before any other vhost command. Further calls to
- * VHOST_OWNER_SET fail until VHOST_OWNER_RESET is called. */
+ * VHOST_SET_OWNER fail until VHOST_RESET_OWNER is called. */
#define VHOST_SET_OWNER _IO(VHOST_VIRTIO, 0x01)
/* Give up ownership, and reset the device to default values.
- * Allows subsequent call to VHOST_OWNER_SET to succeed. */
+ * Allows subsequent call to VHOST_SET_OWNER to succeed. */
#define VHOST_RESET_OWNER _IO(VHOST_VIRTIO, 0x02)
/* Set up/modify memory layout */
diff --git a/include/uapi/linux/virtio_pci.h b/include/uapi/linux/virtio_pci.h
index 8549d4571257..c691ac210ce2 100644
--- a/include/uapi/linux/virtio_pci.h
+++ b/include/uapi/linux/virtio_pci.h
@@ -246,6 +246,7 @@ struct virtio_pci_cfg_cap {
#define VIRTIO_ADMIN_CMD_LIST_USE 0x1
/* Admin command group type. */
+#define VIRTIO_ADMIN_GROUP_TYPE_SELF 0x0
#define VIRTIO_ADMIN_GROUP_TYPE_SRIOV 0x1
/* Transitional device admin command. */
diff --git a/include/ufs/ufs_quirks.h b/include/ufs/ufs_quirks.h
index 41ff44dfa1db..f52de5ed1b3b 100644
--- a/include/ufs/ufs_quirks.h
+++ b/include/ufs/ufs_quirks.h
@@ -107,4 +107,10 @@ struct ufs_dev_quirk {
*/
#define UFS_DEVICE_QUIRK_DELAY_AFTER_LPM (1 << 11)
+/*
+ * Some ufs devices may need more time to be in hibern8 before exiting.
+ * Enable this quirk to give it an additional 100us.
+ */
+#define UFS_DEVICE_QUIRK_PA_HIBER8TIME (1 << 12)
+
#endif /* UFS_QUIRKS_H_ */
diff --git a/init/Kconfig b/init/Kconfig
index dd2ea3b9a799..63f5974b9fa6 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1555,6 +1555,16 @@ config SYSCTL_ARCH_UNALIGN_ALLOW
the unaligned access emulation.
see arch/parisc/kernel/unaligned.c for reference
+config SYSFS_SYSCALL
+ bool "Sysfs syscall support"
+ default n
+ help
+ sys_sysfs is an obsolete system call no longer supported in libc.
+ Note that disabling this option is more secure but might break
+ compatibility with some systems.
+
+ If unsure say N here.
+
config HAVE_PCSPKR_PLATFORM
bool
@@ -1599,16 +1609,6 @@ config SGETMASK_SYSCALL
If unsure, leave the default option here.
-config SYSFS_SYSCALL
- bool "Sysfs syscall support" if EXPERT
- default y
- help
- sys_sysfs is an obsolete system call no longer supported in libc.
- Note that disabling this option is more secure but might break
- compatibility with some systems.
-
- If unsure say Y here.
-
config FHANDLE
bool "open by fhandle syscalls" if EXPERT
select EXPORTFS
diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c
index b36c8825550e..f80a77c4973f 100644
--- a/io_uring/rsrc.c
+++ b/io_uring/rsrc.c
@@ -1032,10 +1032,33 @@ static int validate_fixed_range(u64 buf_addr, size_t len,
return 0;
}
+static int io_import_kbuf(int ddir, struct iov_iter *iter,
+ struct io_mapped_ubuf *imu, size_t len, size_t offset)
+{
+ size_t count = len + offset;
+
+ iov_iter_bvec(iter, ddir, imu->bvec, imu->nr_bvecs, count);
+ iov_iter_advance(iter, offset);
+
+ if (count < imu->len) {
+ const struct bio_vec *bvec = iter->bvec;
+
+ while (len > bvec->bv_len) {
+ len -= bvec->bv_len;
+ bvec++;
+ }
+ iter->nr_segs = 1 + bvec - iter->bvec;
+ }
+ return 0;
+}
+
static int io_import_fixed(int ddir, struct iov_iter *iter,
struct io_mapped_ubuf *imu,
u64 buf_addr, size_t len)
{
+ const struct bio_vec *bvec;
+ size_t folio_mask;
+ unsigned nr_segs;
size_t offset;
int ret;
@@ -1047,56 +1070,35 @@ static int io_import_fixed(int ddir, struct iov_iter *iter,
if (!(imu->dir & (1 << ddir)))
return -EFAULT;
- /*
- * Might not be a start of buffer, set size appropriately
- * and advance us to the beginning.
- */
offset = buf_addr - imu->ubuf;
- iov_iter_bvec(iter, ddir, imu->bvec, imu->nr_bvecs, offset + len);
- if (offset) {
- /*
- * Don't use iov_iter_advance() here, as it's really slow for
- * using the latter parts of a big fixed buffer - it iterates
- * over each segment manually. We can cheat a bit here for user
- * registered nodes, because we know that:
- *
- * 1) it's a BVEC iter, we set it up
- * 2) all bvecs are the same in size, except potentially the
- * first and last bvec
- *
- * So just find our index, and adjust the iterator afterwards.
- * If the offset is within the first bvec (or the whole first
- * bvec, just use iov_iter_advance(). This makes it easier
- * since we can just skip the first segment, which may not
- * be folio_size aligned.
- */
- const struct bio_vec *bvec = imu->bvec;
-
- /*
- * Kernel buffer bvecs, on the other hand, don't necessarily
- * have the size property of user registered ones, so we have
- * to use the slow iter advance.
- */
- if (offset < bvec->bv_len) {
- iter->count -= offset;
- iter->iov_offset = offset;
- } else if (imu->is_kbuf) {
- iov_iter_advance(iter, offset);
- } else {
- unsigned long seg_skip;
+ if (imu->is_kbuf)
+ return io_import_kbuf(ddir, iter, imu, len, offset);
- /* skip first vec */
- offset -= bvec->bv_len;
- seg_skip = 1 + (offset >> imu->folio_shift);
+ /*
+ * Don't use iov_iter_advance() here, as it's really slow for
+ * using the latter parts of a big fixed buffer - it iterates
+ * over each segment manually. We can cheat a bit here for user
+ * registered nodes, because we know that:
+ *
+ * 1) it's a BVEC iter, we set it up
+ * 2) all bvecs are the same in size, except potentially the
+ * first and last bvec
+ */
+ folio_mask = (1UL << imu->folio_shift) - 1;
+ bvec = imu->bvec;
+ if (offset >= bvec->bv_len) {
+ unsigned long seg_skip;
- iter->bvec += seg_skip;
- iter->nr_segs -= seg_skip;
- iter->count -= bvec->bv_len + offset;
- iter->iov_offset = offset & ((1UL << imu->folio_shift) - 1);
- }
+ /* skip first vec */
+ offset -= bvec->bv_len;
+ seg_skip = 1 + (offset >> imu->folio_shift);
+ bvec += seg_skip;
+ offset &= folio_mask;
}
-
+ nr_segs = (offset + len + bvec->bv_offset + folio_mask) >> imu->folio_shift;
+ iov_iter_bvec(iter, ddir, bvec, nr_segs, len);
+ iter->iov_offset = offset;
return 0;
}
diff --git a/io_uring/zcrx.c b/io_uring/zcrx.c
index 0f46e0404c04..fe86606b9f30 100644
--- a/io_uring/zcrx.c
+++ b/io_uring/zcrx.c
@@ -26,6 +26,11 @@
#include "zcrx.h"
#include "rsrc.h"
+static inline struct io_zcrx_ifq *io_pp_to_ifq(struct page_pool *pp)
+{
+ return pp->mp_priv;
+}
+
#define IO_DMA_ATTR (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
static void __io_zcrx_unmap_area(struct io_zcrx_ifq *ifq,
@@ -46,14 +51,21 @@ static void __io_zcrx_unmap_area(struct io_zcrx_ifq *ifq,
static void io_zcrx_unmap_area(struct io_zcrx_ifq *ifq, struct io_zcrx_area *area)
{
+ guard(mutex)(&ifq->dma_lock);
+
if (area->is_mapped)
__io_zcrx_unmap_area(ifq, area, area->nia.num_niovs);
+ area->is_mapped = false;
}
static int io_zcrx_map_area(struct io_zcrx_ifq *ifq, struct io_zcrx_area *area)
{
int i;
+ guard(mutex)(&ifq->dma_lock);
+ if (area->is_mapped)
+ return 0;
+
for (i = 0; i < area->nia.num_niovs; i++) {
struct net_iov *niov = &area->nia.niovs[i];
dma_addr_t dma;
@@ -275,6 +287,7 @@ static struct io_zcrx_ifq *io_zcrx_ifq_alloc(struct io_ring_ctx *ctx)
ifq->ctx = ctx;
spin_lock_init(&ifq->lock);
spin_lock_init(&ifq->rq_lock);
+ mutex_init(&ifq->dma_lock);
return ifq;
}
@@ -324,6 +337,7 @@ static void io_zcrx_ifq_free(struct io_zcrx_ifq *ifq)
put_device(ifq->dev);
io_free_rbuf_ring(ifq);
+ mutex_destroy(&ifq->dma_lock);
kfree(ifq);
}
@@ -354,7 +368,8 @@ int io_register_zcrx_ifq(struct io_ring_ctx *ctx,
return -EFAULT;
if (copy_from_user(&rd, u64_to_user_ptr(reg.region_ptr), sizeof(rd)))
return -EFAULT;
- if (memchr_inv(&reg.__resv, 0, sizeof(reg.__resv)))
+ if (memchr_inv(&reg.__resv, 0, sizeof(reg.__resv)) ||
+ reg.__resv2 || reg.zcrx_id)
return -EINVAL;
if (reg.if_rxq == -1 || !reg.rq_entries || reg.flags)
return -EINVAL;
@@ -394,10 +409,6 @@ int io_register_zcrx_ifq(struct io_ring_ctx *ctx,
goto err;
get_device(ifq->dev);
- ret = io_zcrx_map_area(ifq, ifq->area);
- if (ret)
- goto err;
-
mp_param.mp_ops = &io_uring_pp_zc_ops;
mp_param.mp_priv = ifq;
ret = net_mp_open_rxq(ifq->netdev, reg.if_rxq, &mp_param);
@@ -585,7 +596,7 @@ static void io_zcrx_refill_slow(struct page_pool *pp, struct io_zcrx_ifq *ifq)
static netmem_ref io_pp_zc_alloc_netmems(struct page_pool *pp, gfp_t gfp)
{
- struct io_zcrx_ifq *ifq = pp->mp_priv;
+ struct io_zcrx_ifq *ifq = io_pp_to_ifq(pp);
/* pp should already be ensuring that */
if (unlikely(pp->alloc.count))
@@ -617,7 +628,8 @@ static bool io_pp_zc_release_netmem(struct page_pool *pp, netmem_ref netmem)
static int io_pp_zc_init(struct page_pool *pp)
{
- struct io_zcrx_ifq *ifq = pp->mp_priv;
+ struct io_zcrx_ifq *ifq = io_pp_to_ifq(pp);
+ int ret;
if (WARN_ON_ONCE(!ifq))
return -EINVAL;
@@ -630,13 +642,17 @@ static int io_pp_zc_init(struct page_pool *pp)
if (pp->p.dma_dir != DMA_FROM_DEVICE)
return -EOPNOTSUPP;
+ ret = io_zcrx_map_area(ifq, ifq->area);
+ if (ret)
+ return ret;
+
percpu_ref_get(&ifq->ctx->refs);
return 0;
}
static void io_pp_zc_destroy(struct page_pool *pp)
{
- struct io_zcrx_ifq *ifq = pp->mp_priv;
+ struct io_zcrx_ifq *ifq = io_pp_to_ifq(pp);
struct io_zcrx_area *area = ifq->area;
if (WARN_ON_ONCE(area->free_count != area->nia.num_niovs))
@@ -665,6 +681,9 @@ static void io_pp_uninstall(void *mp_priv, struct netdev_rx_queue *rxq)
struct io_zcrx_ifq *ifq = mp_priv;
io_zcrx_drop_netdev(ifq);
+ if (ifq->area)
+ io_zcrx_unmap_area(ifq, ifq->area);
+
p->mp_ops = NULL;
p->mp_priv = NULL;
}
@@ -791,7 +810,7 @@ static int io_zcrx_recv_frag(struct io_kiocb *req, struct io_zcrx_ifq *ifq,
niov = netmem_to_net_iov(frag->netmem);
if (niov->pp->mp_ops != &io_uring_pp_zc_ops ||
- niov->pp->mp_priv != ifq)
+ io_pp_to_ifq(niov->pp) != ifq)
return -EFAULT;
if (!io_zcrx_queue_cqe(req, niov, ifq, off + skb_frag_off(frag), len))
diff --git a/io_uring/zcrx.h b/io_uring/zcrx.h
index 47f1c0e8c197..f2bc811f022c 100644
--- a/io_uring/zcrx.h
+++ b/io_uring/zcrx.h
@@ -38,6 +38,7 @@ struct io_zcrx_ifq {
struct net_device *netdev;
netdevice_tracker netdev_tracker;
spinlock_t lock;
+ struct mutex dma_lock;
};
#if defined(CONFIG_IO_URING_ZCRX)
diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c
index 367eaf2c78b7..0ebbbe37a60f 100644
--- a/kernel/audit_watch.c
+++ b/kernel/audit_watch.c
@@ -347,12 +347,17 @@ static void audit_remove_parent_watches(struct audit_parent *parent)
/* Get path information necessary for adding watches. */
static int audit_get_nd(struct audit_watch *watch, struct path *parent)
{
- struct dentry *d = kern_path_locked(watch->path, parent);
+ struct dentry *d;
+
+ d = kern_path_locked_negative(watch->path, parent);
if (IS_ERR(d))
return PTR_ERR(d);
- /* update watch filter fields */
- watch->dev = d->d_sb->s_dev;
- watch->ino = d_backing_inode(d)->i_ino;
+
+ if (d_is_positive(d)) {
+ /* update watch filter fields */
+ watch->dev = d->d_sb->s_dev;
+ watch->ino = d_backing_inode(d)->i_ino;
+ }
inode_unlock(d_backing_inode(parent->dentry));
dput(d);
@@ -418,11 +423,10 @@ int audit_add_watch(struct audit_krule *krule, struct list_head **list)
/* caller expects mutex locked */
mutex_lock(&audit_filter_mutex);
- if (ret && ret != -ENOENT) {
+ if (ret) {
audit_put_watch(watch);
return ret;
}
- ret = 0;
/* either find an old parent or attach a new one */
parent = audit_find_parent(d_backing_inode(parent_path.dentry));
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index 3caf2cd86e65..63e5b90da1f3 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -90,7 +90,7 @@
DEFINE_MUTEX(cgroup_mutex);
DEFINE_SPINLOCK(css_set_lock);
-#ifdef CONFIG_PROVE_RCU
+#if (defined CONFIG_PROVE_RCU || defined CONFIG_LOCKDEP)
EXPORT_SYMBOL_GPL(cgroup_mutex);
EXPORT_SYMBOL_GPL(css_set_lock);
#endif
@@ -2353,9 +2353,37 @@ static struct file_system_type cgroup2_fs_type = {
};
#ifdef CONFIG_CPUSETS_V1
+enum cpuset_param {
+ Opt_cpuset_v2_mode,
+};
+
+static const struct fs_parameter_spec cpuset_fs_parameters[] = {
+ fsparam_flag ("cpuset_v2_mode", Opt_cpuset_v2_mode),
+ {}
+};
+
+static int cpuset_parse_param(struct fs_context *fc, struct fs_parameter *param)
+{
+ struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
+ struct fs_parse_result result;
+ int opt;
+
+ opt = fs_parse(fc, cpuset_fs_parameters, param, &result);
+ if (opt < 0)
+ return opt;
+
+ switch (opt) {
+ case Opt_cpuset_v2_mode:
+ ctx->flags |= CGRP_ROOT_CPUSET_V2_MODE;
+ return 0;
+ }
+ return -EINVAL;
+}
+
static const struct fs_context_operations cpuset_fs_context_ops = {
.get_tree = cgroup1_get_tree,
.free = cgroup_fs_context_free,
+ .parse_param = cpuset_parse_param,
};
/*
@@ -2392,6 +2420,7 @@ static int cpuset_init_fs_context(struct fs_context *fc)
static struct file_system_type cpuset_fs_type = {
.name = "cpuset",
.init_fs_context = cpuset_init_fs_context,
+ .parameters = cpuset_fs_parameters,
.fs_flags = FS_USERNS_MOUNT,
};
#endif
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 1a19d69b91ed..816f07f9d30f 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -81,9 +81,23 @@ static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
if (!cpufreq_this_cpu_can_update(sg_policy->policy))
return false;
- if (unlikely(sg_policy->limits_changed)) {
- sg_policy->limits_changed = false;
- sg_policy->need_freq_update = cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS);
+ if (unlikely(READ_ONCE(sg_policy->limits_changed))) {
+ WRITE_ONCE(sg_policy->limits_changed, false);
+ sg_policy->need_freq_update = true;
+
+ /*
+ * The above limits_changed update must occur before the reads
+ * of policy limits in cpufreq_driver_resolve_freq() or a policy
+ * limits update might be missed, so use a memory barrier to
+ * ensure it.
+ *
+ * This pairs with the write memory barrier in sugov_limits().
+ */
+ smp_mb();
+
+ return true;
+ } else if (sg_policy->need_freq_update) {
+ /* ignore_dl_rate_limit() wants a new frequency to be found. */
return true;
}
@@ -95,10 +109,22 @@ static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
static bool sugov_update_next_freq(struct sugov_policy *sg_policy, u64 time,
unsigned int next_freq)
{
- if (sg_policy->need_freq_update)
+ if (sg_policy->need_freq_update) {
sg_policy->need_freq_update = false;
- else if (sg_policy->next_freq == next_freq)
+ /*
+ * The policy limits have changed, but if the return value of
+ * cpufreq_driver_resolve_freq() after applying the new limits
+ * is still equal to the previously selected frequency, the
+ * driver callback need not be invoked unless the driver
+ * specifically wants that to happen on every update of the
+ * policy limits.
+ */
+ if (sg_policy->next_freq == next_freq &&
+ !cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS))
+ return false;
+ } else if (sg_policy->next_freq == next_freq) {
return false;
+ }
sg_policy->next_freq = next_freq;
sg_policy->last_freq_update_time = time;
@@ -365,7 +391,7 @@ static inline bool sugov_hold_freq(struct sugov_cpu *sg_cpu) { return false; }
static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu)
{
if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_min)
- sg_cpu->sg_policy->limits_changed = true;
+ sg_cpu->sg_policy->need_freq_update = true;
}
static inline bool sugov_update_single_common(struct sugov_cpu *sg_cpu,
@@ -871,7 +897,16 @@ static void sugov_limits(struct cpufreq_policy *policy)
mutex_unlock(&sg_policy->work_lock);
}
- sg_policy->limits_changed = true;
+ /*
+ * The limits_changed update below must take place before the updates
+ * of policy limits in cpufreq_set_policy() or a policy limits update
+ * might be missed, so use a memory barrier to ensure it.
+ *
+ * This pairs with the memory barrier in sugov_should_update_freq().
+ */
+ smp_wmb();
+
+ WRITE_ONCE(sg_policy->limits_changed, true);
}
struct cpufreq_governor schedutil_gov = {
diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
index 66bcd40a28ca..fdbf249d1c68 100644
--- a/kernel/sched/ext.c
+++ b/kernel/sched/ext.c
@@ -163,7 +163,7 @@ enum scx_ops_flags {
/*
* CPU cgroup support flags
*/
- SCX_OPS_HAS_CGROUP_WEIGHT = 1LLU << 16, /* cpu.weight */
+ SCX_OPS_HAS_CGROUP_WEIGHT = 1LLU << 16, /* DEPRECATED, will be removed on 6.18 */
SCX_OPS_ALL_FLAGS = SCX_OPS_KEEP_BUILTIN_IDLE |
SCX_OPS_ENQ_LAST |
@@ -3899,35 +3899,6 @@ bool scx_can_stop_tick(struct rq *rq)
DEFINE_STATIC_PERCPU_RWSEM(scx_cgroup_rwsem);
static bool scx_cgroup_enabled;
-static bool cgroup_warned_missing_weight;
-static bool cgroup_warned_missing_idle;
-
-static void scx_cgroup_warn_missing_weight(struct task_group *tg)
-{
- if (scx_ops_enable_state() == SCX_OPS_DISABLED ||
- cgroup_warned_missing_weight)
- return;
-
- if ((scx_ops.flags & SCX_OPS_HAS_CGROUP_WEIGHT) || !tg->css.parent)
- return;
-
- pr_warn("sched_ext: \"%s\" does not implement cgroup cpu.weight\n",
- scx_ops.name);
- cgroup_warned_missing_weight = true;
-}
-
-static void scx_cgroup_warn_missing_idle(struct task_group *tg)
-{
- if (!scx_cgroup_enabled || cgroup_warned_missing_idle)
- return;
-
- if (!tg->idle)
- return;
-
- pr_warn("sched_ext: \"%s\" does not implement cgroup cpu.idle\n",
- scx_ops.name);
- cgroup_warned_missing_idle = true;
-}
int scx_tg_online(struct task_group *tg)
{
@@ -3937,8 +3908,6 @@ int scx_tg_online(struct task_group *tg)
percpu_down_read(&scx_cgroup_rwsem);
- scx_cgroup_warn_missing_weight(tg);
-
if (scx_cgroup_enabled) {
if (SCX_HAS_OP(cgroup_init)) {
struct scx_cgroup_init_args args =
@@ -4076,9 +4045,7 @@ void scx_group_set_weight(struct task_group *tg, unsigned long weight)
void scx_group_set_idle(struct task_group *tg, bool idle)
{
- percpu_down_read(&scx_cgroup_rwsem);
- scx_cgroup_warn_missing_idle(tg);
- percpu_up_read(&scx_cgroup_rwsem);
+ /* TODO: Implement ops->cgroup_set_idle() */
}
static void scx_cgroup_lock(void)
@@ -4272,9 +4239,6 @@ static int scx_cgroup_init(void)
percpu_rwsem_assert_held(&scx_cgroup_rwsem);
- cgroup_warned_missing_weight = false;
- cgroup_warned_missing_idle = false;
-
/*
* scx_tg_on/offline() are excluded through scx_cgroup_rwsem. If we walk
* cgroups and init, all online cgroups are initialized.
@@ -4284,9 +4248,6 @@ static int scx_cgroup_init(void)
struct task_group *tg = css_tg(css);
struct scx_cgroup_init_args args = { .weight = tg->scx_weight };
- scx_cgroup_warn_missing_weight(tg);
- scx_cgroup_warn_missing_idle(tg);
-
if ((tg->scx_flags &
(SCX_TG_ONLINE | SCX_TG_INITED)) != SCX_TG_ONLINE)
continue;
@@ -4623,7 +4584,7 @@ unlock:
static void free_exit_info(struct scx_exit_info *ei)
{
- kfree(ei->dump);
+ kvfree(ei->dump);
kfree(ei->msg);
kfree(ei->bt);
kfree(ei);
@@ -4639,7 +4600,7 @@ static struct scx_exit_info *alloc_exit_info(size_t exit_dump_len)
ei->bt = kcalloc(SCX_EXIT_BT_LEN, sizeof(ei->bt[0]), GFP_KERNEL);
ei->msg = kzalloc(SCX_EXIT_MSG_LEN, GFP_KERNEL);
- ei->dump = kzalloc(exit_dump_len, GFP_KERNEL);
+ ei->dump = kvzalloc(exit_dump_len, GFP_KERNEL);
if (!ei->bt || !ei->msg || !ei->dump) {
free_exit_info(ei);
@@ -5252,6 +5213,9 @@ static int validate_ops(const struct sched_ext_ops *ops)
return -EINVAL;
}
+ if (ops->flags & SCX_OPS_HAS_CGROUP_WEIGHT)
+ pr_warn("SCX_OPS_HAS_CGROUP_WEIGHT is deprecated and a noop\n");
+
return 0;
}
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index a8a02868b435..61130bb34d6c 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1297,6 +1297,8 @@ void ftrace_free_filter(struct ftrace_ops *ops)
return;
free_ftrace_hash(ops->func_hash->filter_hash);
free_ftrace_hash(ops->func_hash->notrace_hash);
+ ops->func_hash->filter_hash = EMPTY_HASH;
+ ops->func_hash->notrace_hash = EMPTY_HASH;
}
EXPORT_SYMBOL_GPL(ftrace_free_filter);
@@ -3443,6 +3445,7 @@ static int add_next_hash(struct ftrace_hash **filter_hash, struct ftrace_hash **
size_bits);
if (ret < 0) {
free_ftrace_hash(*filter_hash);
+ *filter_hash = EMPTY_HASH;
return ret;
}
}
@@ -3472,6 +3475,7 @@ static int add_next_hash(struct ftrace_hash **filter_hash, struct ftrace_hash **
subops_hash->notrace_hash);
if (ret < 0) {
free_ftrace_hash(*notrace_hash);
+ *notrace_hash = EMPTY_HASH;
return ret;
}
}
@@ -3490,8 +3494,8 @@ static int add_next_hash(struct ftrace_hash **filter_hash, struct ftrace_hash **
*/
int ftrace_startup_subops(struct ftrace_ops *ops, struct ftrace_ops *subops, int command)
{
- struct ftrace_hash *filter_hash;
- struct ftrace_hash *notrace_hash;
+ struct ftrace_hash *filter_hash = EMPTY_HASH;
+ struct ftrace_hash *notrace_hash = EMPTY_HASH;
struct ftrace_hash *save_filter_hash;
struct ftrace_hash *save_notrace_hash;
int ret;
@@ -3605,6 +3609,9 @@ static int rebuild_hashes(struct ftrace_hash **filter_hash, struct ftrace_hash *
}
}
+ free_ftrace_hash(temp_hash.filter_hash);
+ free_ftrace_hash(temp_hash.notrace_hash);
+
temp_hash.filter_hash = *filter_hash;
temp_hash.notrace_hash = *notrace_hash;
}
@@ -3625,8 +3632,8 @@ static int rebuild_hashes(struct ftrace_hash **filter_hash, struct ftrace_hash *
*/
int ftrace_shutdown_subops(struct ftrace_ops *ops, struct ftrace_ops *subops, int command)
{
- struct ftrace_hash *filter_hash;
- struct ftrace_hash *notrace_hash;
+ struct ftrace_hash *filter_hash = EMPTY_HASH;
+ struct ftrace_hash *notrace_hash = EMPTY_HASH;
int ret;
if (unlikely(ftrace_disabled))
@@ -3699,8 +3706,11 @@ static int ftrace_hash_move_and_update_subops(struct ftrace_ops *subops,
}
ret = rebuild_hashes(&filter_hash, &notrace_hash, ops);
- if (!ret)
+ if (!ret) {
ret = ftrace_update_ops(ops, filter_hash, notrace_hash);
+ free_ftrace_hash(filter_hash);
+ free_ftrace_hash(notrace_hash);
+ }
if (ret) {
/* Put back the original hash */
@@ -5954,9 +5964,10 @@ int register_ftrace_direct(struct ftrace_ops *ops, unsigned long addr)
/* Make a copy hash to place the new and the old entries in */
size = hash->count + direct_functions->count;
- if (size > 32)
- size = 32;
- new_hash = alloc_ftrace_hash(fls(size));
+ size = fls(size);
+ if (size > FTRACE_HASH_MAX_BITS)
+ size = FTRACE_HASH_MAX_BITS;
+ new_hash = alloc_ftrace_hash(size);
if (!new_hash)
goto out_unlock;
diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h
index ee40d4e6ad1c..4ef4df6623a8 100644
--- a/kernel/trace/trace_entries.h
+++ b/kernel/trace/trace_entries.h
@@ -80,11 +80,11 @@ FTRACE_ENTRY(funcgraph_entry, ftrace_graph_ent_entry,
F_STRUCT(
__field_struct( struct ftrace_graph_ent, graph_ent )
__field_packed( unsigned long, graph_ent, func )
- __field_packed( unsigned long, graph_ent, depth )
+ __field_packed( unsigned int, graph_ent, depth )
__dynamic_array(unsigned long, args )
),
- F_printk("--> %ps (%lu)", (void *)__entry->func, __entry->depth)
+ F_printk("--> %ps (%u)", (void *)__entry->func, __entry->depth)
);
#ifdef CONFIG_FUNCTION_GRAPH_RETADDR
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 0993dfc1c5c1..2048560264bb 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -808,7 +808,7 @@ static __always_inline char *test_string(char *str)
kstr = ubuf->buffer;
/* For safety, do not trust the string pointer */
- if (!strncpy_from_kernel_nofault(kstr, str, USTRING_BUF_SIZE))
+ if (strncpy_from_kernel_nofault(kstr, str, USTRING_BUF_SIZE) < 0)
return NULL;
return kstr;
}
@@ -827,7 +827,7 @@ static __always_inline char *test_ustring(char *str)
/* user space address? */
ustr = (char __user *)str;
- if (!strncpy_from_user_nofault(kstr, ustr, USTRING_BUF_SIZE))
+ if (strncpy_from_user_nofault(kstr, ustr, USTRING_BUF_SIZE) < 0)
return NULL;
return kstr;
diff --git a/kernel/vhost_task.c b/kernel/vhost_task.c
index 2ef2e1b80091..2f844c279a3e 100644
--- a/kernel/vhost_task.c
+++ b/kernel/vhost_task.c
@@ -111,7 +111,7 @@ EXPORT_SYMBOL_GPL(vhost_task_stop);
* @arg: data to be passed to fn and handled_kill
* @name: the thread's name
*
- * This returns a specialized task for use by the vhost layer or NULL on
+ * This returns a specialized task for use by the vhost layer or ERR_PTR() on
* failure. The returned task is inactive, and the caller must fire it up
* through vhost_task_start().
*/
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 9fe4d8dfe578..f9051ab610d5 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -3290,7 +3290,7 @@ config GCD_KUNIT_TEST
config PRIME_NUMBERS_KUNIT_TEST
tristate "Prime number generator test" if !KUNIT_ALL_TESTS
depends on KUNIT
- select PRIME_NUMBERS
+ depends on PRIME_NUMBERS
default KUNIT_ALL_TESTS
help
This option enables the KUnit test suite for the {is,next}_prime_number
diff --git a/lib/Kconfig.ubsan b/lib/Kconfig.ubsan
index 4216b3a4ff21..f6ea0c5b5da3 100644
--- a/lib/Kconfig.ubsan
+++ b/lib/Kconfig.ubsan
@@ -118,7 +118,6 @@ config UBSAN_UNREACHABLE
config UBSAN_INTEGER_WRAP
bool "Perform checking for integer arithmetic wrap-around"
- default UBSAN
depends on !COMPILE_TEST
depends on $(cc-option,-fsanitize-undefined-ignore-overflow-pattern=all)
depends on $(cc-option,-fsanitize=signed-integer-overflow)
diff --git a/lib/string.c b/lib/string.c
index eb4486ed40d2..b632c71df1a5 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -119,6 +119,7 @@ ssize_t sized_strscpy(char *dest, const char *src, size_t count)
if (count == 0 || WARN_ON_ONCE(count > INT_MAX))
return -E2BIG;
+#ifndef CONFIG_DCACHE_WORD_ACCESS
#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
/*
* If src is unaligned, don't cross a page boundary,
@@ -134,11 +135,13 @@ ssize_t sized_strscpy(char *dest, const char *src, size_t count)
if (((long) dest | (long) src) & (sizeof(long) - 1))
max = 0;
#endif
+#endif
/*
- * read_word_at_a_time() below may read uninitialized bytes after the
- * trailing zero and use them in comparisons. Disable this optimization
- * under KMSAN to prevent false positive reports.
+ * load_unaligned_zeropad() or read_word_at_a_time() below may read
+ * uninitialized bytes after the trailing zero and use them in
+ * comparisons. Disable this optimization under KMSAN to prevent
+ * false positive reports.
*/
if (IS_ENABLED(CONFIG_KMSAN))
max = 0;
@@ -146,7 +149,11 @@ ssize_t sized_strscpy(char *dest, const char *src, size_t count)
while (max >= sizeof(unsigned long)) {
unsigned long c, data;
+#ifdef CONFIG_DCACHE_WORD_ACCESS
+ c = load_unaligned_zeropad(src+res);
+#else
c = read_word_at_a_time(src+res);
+#endif
if (has_zero(c, &data, &constants)) {
data = prep_zero_mask(c, data, &constants);
data = create_zero_mask(data);
diff --git a/lib/test_ubsan.c b/lib/test_ubsan.c
index 8772e5edaa4f..a4b6f52b9c57 100644
--- a/lib/test_ubsan.c
+++ b/lib/test_ubsan.c
@@ -77,18 +77,22 @@ static void test_ubsan_shift_out_of_bounds(void)
static void test_ubsan_out_of_bounds(void)
{
- volatile int i = 4, j = 5, k = -1;
- volatile char above[4] = { }; /* Protect surrounding memory. */
- volatile int arr[4];
- volatile char below[4] = { }; /* Protect surrounding memory. */
+ int i = 4, j = 4, k = -1;
+ volatile struct {
+ char above[4]; /* Protect surrounding memory. */
+ int arr[4];
+ char below[4]; /* Protect surrounding memory. */
+ } data;
- above[0] = below[0];
+ OPTIMIZER_HIDE_VAR(i);
+ OPTIMIZER_HIDE_VAR(j);
+ OPTIMIZER_HIDE_VAR(k);
UBSAN_TEST(CONFIG_UBSAN_BOUNDS, "above");
- arr[j] = i;
+ data.arr[j] = i;
UBSAN_TEST(CONFIG_UBSAN_BOUNDS, "below");
- arr[k] = i;
+ data.arr[k] = i;
}
enum ubsan_test_enum {
diff --git a/mm/gup.c b/mm/gup.c
index 92351e2fa876..84461d384ae2 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -2207,8 +2207,8 @@ size_t fault_in_safe_writeable(const char __user *uaddr, size_t size)
} while (start != end);
mmap_read_unlock(mm);
- if (size > (unsigned long)uaddr - start)
- return size - ((unsigned long)uaddr - start);
+ if (size > start - (unsigned long)uaddr)
+ return size - (start - (unsigned long)uaddr);
return 0;
}
EXPORT_SYMBOL(fault_in_safe_writeable);
diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c
index 9a99dfa3c495..27245e86df25 100644
--- a/mm/hugetlb_vmemmap.c
+++ b/mm/hugetlb_vmemmap.c
@@ -238,11 +238,11 @@ static void vmemmap_remap_pte(pte_t *pte, unsigned long addr,
* struct page, the special metadata (e.g. page->flags or page->mapping)
* cannot copy to the tail struct page structs. The invalid value will be
* checked in the free_tail_page_prepare(). In order to avoid the message
- * of "corrupted mapping in tail page". We need to reset at least 3 (one
- * head struct page struct and two tail struct page structs) struct page
+ * of "corrupted mapping in tail page". We need to reset at least 4 (one
+ * head struct page struct and three tail struct page structs) struct page
* structs.
*/
-#define NR_RESET_STRUCT_PAGE 3
+#define NR_RESET_STRUCT_PAGE 4
static inline void reset_struct_pages(struct page *start)
{
diff --git a/mm/internal.h b/mm/internal.h
index 50c2f590b2d0..e9695baa5922 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -1595,6 +1595,7 @@ unsigned long move_page_tables(struct pagetable_move_control *pmc);
#ifdef CONFIG_UNACCEPTED_MEMORY
void accept_page(struct page *page);
+void unaccepted_cleanup_work(struct work_struct *work);
#else /* CONFIG_UNACCEPTED_MEMORY */
static inline void accept_page(struct page *page)
{
diff --git a/mm/kasan/kasan_test_c.c b/mm/kasan/kasan_test_c.c
index f24e3bef72a4..5f922dd38ffa 100644
--- a/mm/kasan/kasan_test_c.c
+++ b/mm/kasan/kasan_test_c.c
@@ -1567,6 +1567,7 @@ static void kasan_memcmp(struct kunit *test)
static void kasan_strings(struct kunit *test)
{
char *ptr;
+ char *src;
size_t size = 24;
/*
@@ -1578,6 +1579,25 @@ static void kasan_strings(struct kunit *test)
ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
+ src = kmalloc(KASAN_GRANULE_SIZE, GFP_KERNEL | __GFP_ZERO);
+ strscpy(src, "f0cacc1a0000000", KASAN_GRANULE_SIZE);
+
+ /*
+ * Make sure that strscpy() does not trigger KASAN if it overreads into
+ * poisoned memory.
+ *
+ * The expected size does not include the terminator '\0'
+ * so it is (KASAN_GRANULE_SIZE - 2) ==
+ * KASAN_GRANULE_SIZE - ("initial removed character" + "\0").
+ */
+ KUNIT_EXPECT_EQ(test, KASAN_GRANULE_SIZE - 2,
+ strscpy(ptr, src + 1, KASAN_GRANULE_SIZE));
+
+ /* strscpy should fail if the first byte is unreadable. */
+ KUNIT_EXPECT_KASAN_FAIL(test, strscpy(ptr, src + KASAN_GRANULE_SIZE,
+ KASAN_GRANULE_SIZE));
+
+ kfree(src);
kfree(ptr);
/*
diff --git a/mm/memcontrol-v1.c b/mm/memcontrol-v1.c
index 8660908850dc..4a9cf27a70af 100644
--- a/mm/memcontrol-v1.c
+++ b/mm/memcontrol-v1.c
@@ -620,7 +620,7 @@ void memcg1_swapout(struct folio *folio, swp_entry_t entry)
mem_cgroup_id_get_many(swap_memcg, nr_entries - 1);
mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries);
- swap_cgroup_record(folio, mem_cgroup_id(memcg), entry);
+ swap_cgroup_record(folio, mem_cgroup_id(swap_memcg), entry);
folio_unqueue_deferred_split(folio);
folio->memcg_data = 0;
diff --git a/mm/memory.c b/mm/memory.c
index 44481fe7c629..ba3ea0a82f7f 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3734,8 +3734,6 @@ static bool __wp_can_reuse_large_anon_folio(struct folio *folio,
return false;
VM_WARN_ON_ONCE(folio_test_ksm(folio));
- VM_WARN_ON_ONCE(folio_mapcount(folio) > folio_nr_pages(folio));
- VM_WARN_ON_ONCE(folio_entire_mapcount(folio));
if (unlikely(folio_test_swapcache(folio))) {
/*
@@ -3760,6 +3758,8 @@ static bool __wp_can_reuse_large_anon_folio(struct folio *folio,
if (folio_large_mapcount(folio) != folio_ref_count(folio))
goto unlock;
+ VM_WARN_ON_ONCE_FOLIO(folio_large_mapcount(folio) > folio_nr_pages(folio), folio);
+ VM_WARN_ON_ONCE_FOLIO(folio_entire_mapcount(folio), folio);
VM_WARN_ON_ONCE(folio_mm_id(folio, 0) != vma->vm_mm->mm_id &&
folio_mm_id(folio, 1) != vma->vm_mm->mm_id);
diff --git a/mm/mm_init.c b/mm/mm_init.c
index 84f14fa12d0d..9659689b8ace 100644
--- a/mm/mm_init.c
+++ b/mm/mm_init.c
@@ -1441,6 +1441,7 @@ static void __meminit zone_init_free_lists(struct zone *zone)
#ifdef CONFIG_UNACCEPTED_MEMORY
INIT_LIST_HEAD(&zone->unaccepted_pages);
+ INIT_WORK(&zone->unaccepted_cleanup, unaccepted_cleanup_work);
#endif
}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 1715e34b91af..5669baf2a6fe 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3470,18 +3470,6 @@ static inline bool zone_watermark_fast(struct zone *z, unsigned int order,
return false;
}
-bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
- unsigned long mark, int highest_zoneidx)
-{
- long free_pages = zone_page_state(z, NR_FREE_PAGES);
-
- if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
- free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);
-
- return __zone_watermark_ok(z, order, mark, highest_zoneidx, 0,
- free_pages);
-}
-
#ifdef CONFIG_NUMA
int __read_mostly node_reclaim_distance = RECLAIM_DISTANCE;
@@ -7191,6 +7179,11 @@ static DEFINE_STATIC_KEY_FALSE(zones_with_unaccepted_pages);
static bool lazy_accept = true;
+void unaccepted_cleanup_work(struct work_struct *work)
+{
+ static_branch_dec(&zones_with_unaccepted_pages);
+}
+
static int __init accept_memory_parse(char *p)
{
if (!strcmp(p, "lazy")) {
@@ -7229,8 +7222,27 @@ static void __accept_page(struct zone *zone, unsigned long *flags,
__free_pages_ok(page, MAX_PAGE_ORDER, FPI_TO_TAIL);
- if (last)
- static_branch_dec(&zones_with_unaccepted_pages);
+ if (last) {
+ /*
+ * There are two corner cases:
+ *
+ * - If allocation occurs during the CPU bring up,
+ * static_branch_dec() cannot be used directly as
+ * it causes a deadlock on cpu_hotplug_lock.
+ *
+ * Instead, use schedule_work() to prevent deadlock.
+ *
+ * - If allocation occurs before workqueues are initialized,
+ * static_branch_dec() should be called directly.
+ *
+ * Workqueues are initialized before CPU bring up, so this
+ * will not conflict with the first scenario.
+ */
+ if (system_wq)
+ schedule_work(&zone->unaccepted_cleanup);
+ else
+ unaccepted_cleanup_work(&zone->unaccepted_cleanup);
+ }
}
void accept_page(struct page *page)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index b620d74b0f66..3783e45bfc92 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -6736,6 +6736,7 @@ static bool pgdat_balanced(pg_data_t *pgdat, int order, int highest_zoneidx)
* meet watermarks.
*/
for_each_managed_zone_pgdat(zone, pgdat, i, highest_zoneidx) {
+ enum zone_stat_item item;
unsigned long free_pages;
if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING)
@@ -6746,11 +6747,33 @@ static bool pgdat_balanced(pg_data_t *pgdat, int order, int highest_zoneidx)
/*
* In defrag_mode, watermarks must be met in whole
* blocks to avoid polluting allocator fallbacks.
+ *
+ * However, kswapd usually cannot accomplish this on
+ * its own and needs kcompactd support. Once it's
+ * reclaimed a compaction gap, and kswapd_shrink_node
+ * has dropped order, simply ensure there are enough
+ * base pages for compaction, wake kcompactd & sleep.
*/
- if (defrag_mode)
- free_pages = zone_page_state(zone, NR_FREE_PAGES_BLOCKS);
+ if (defrag_mode && order)
+ item = NR_FREE_PAGES_BLOCKS;
else
- free_pages = zone_page_state(zone, NR_FREE_PAGES);
+ item = NR_FREE_PAGES;
+
+ /*
+ * When there is a high number of CPUs in the system,
+ * the cumulative error from the vmstat per-cpu cache
+ * can blur the line between the watermarks. In that
+ * case, be safe and get an accurate snapshot.
+ *
+ * TODO: NR_FREE_PAGES_BLOCKS moves in steps of
+ * pageblock_nr_pages, while the vmstat pcp threshold
+ * is limited to 125. On many configurations that
+ * counter won't actually be per-cpu cached. But keep
+ * things simple for now; revisit when somebody cares.
+ */
+ free_pages = zone_page_state(zone, item);
+ if (zone->percpu_drift_mark && free_pages < zone->percpu_drift_mark)
+ free_pages = zone_page_state_snapshot(zone, item);
if (__zone_watermark_ok(zone, order, mark, highest_zoneidx,
0, free_pages))
diff --git a/rust/Makefile b/rust/Makefile
index fa0eea8a9eca..3aca903a7d08 100644
--- a/rust/Makefile
+++ b/rust/Makefile
@@ -368,7 +368,7 @@ $(obj)/bindings/bindings_helpers_generated.rs: private bindgen_target_extra = ;
$(obj)/bindings/bindings_helpers_generated.rs: $(src)/helpers/helpers.c FORCE
$(call if_changed_dep,bindgen)
-rust_exports = $(NM) -p --defined-only $(1) | awk '$$2~/(T|R|D|B)/ && $$3!~/__cfi/ && $$3!~/__odr_asan/ { printf $(2),$$3 }'
+rust_exports = $(NM) -p --defined-only $(1) | awk '$$2~/(T|R|D|B)/ && $$3!~/__(pfx|cfi|odr_asan)/ { printf $(2),$$3 }'
quiet_cmd_exports = EXPORTS $@
cmd_exports = \
diff --git a/rust/helpers/dma.c b/rust/helpers/dma.c
new file mode 100644
index 000000000000..df8b8a77355a
--- /dev/null
+++ b/rust/helpers/dma.c
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/dma-mapping.h>
+
+void *rust_helper_dma_alloc_attrs(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag,
+ unsigned long attrs)
+{
+ return dma_alloc_attrs(dev, size, dma_handle, flag, attrs);
+}
+
+void rust_helper_dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t dma_handle, unsigned long attrs)
+{
+ dma_free_attrs(dev, size, cpu_addr, dma_handle, attrs);
+}
diff --git a/rust/helpers/helpers.c b/rust/helpers/helpers.c
index e1c21eba9b15..1e7c84df7252 100644
--- a/rust/helpers/helpers.c
+++ b/rust/helpers/helpers.c
@@ -14,6 +14,7 @@
#include "cpumask.c"
#include "cred.c"
#include "device.c"
+#include "dma.c"
#include "err.c"
#include "fs.c"
#include "io.c"
diff --git a/rust/helpers/io.c b/rust/helpers/io.c
index 4c2401ccd720..15ea187c5466 100644
--- a/rust/helpers/io.c
+++ b/rust/helpers/io.c
@@ -7,94 +7,94 @@ void __iomem *rust_helper_ioremap(phys_addr_t offset, size_t size)
return ioremap(offset, size);
}
-void rust_helper_iounmap(volatile void __iomem *addr)
+void rust_helper_iounmap(void __iomem *addr)
{
iounmap(addr);
}
-u8 rust_helper_readb(const volatile void __iomem *addr)
+u8 rust_helper_readb(const void __iomem *addr)
{
return readb(addr);
}
-u16 rust_helper_readw(const volatile void __iomem *addr)
+u16 rust_helper_readw(const void __iomem *addr)
{
return readw(addr);
}
-u32 rust_helper_readl(const volatile void __iomem *addr)
+u32 rust_helper_readl(const void __iomem *addr)
{
return readl(addr);
}
#ifdef CONFIG_64BIT
-u64 rust_helper_readq(const volatile void __iomem *addr)
+u64 rust_helper_readq(const void __iomem *addr)
{
return readq(addr);
}
#endif
-void rust_helper_writeb(u8 value, volatile void __iomem *addr)
+void rust_helper_writeb(u8 value, void __iomem *addr)
{
writeb(value, addr);
}
-void rust_helper_writew(u16 value, volatile void __iomem *addr)
+void rust_helper_writew(u16 value, void __iomem *addr)
{
writew(value, addr);
}
-void rust_helper_writel(u32 value, volatile void __iomem *addr)
+void rust_helper_writel(u32 value, void __iomem *addr)
{
writel(value, addr);
}
#ifdef CONFIG_64BIT
-void rust_helper_writeq(u64 value, volatile void __iomem *addr)
+void rust_helper_writeq(u64 value, void __iomem *addr)
{
writeq(value, addr);
}
#endif
-u8 rust_helper_readb_relaxed(const volatile void __iomem *addr)
+u8 rust_helper_readb_relaxed(const void __iomem *addr)
{
return readb_relaxed(addr);
}
-u16 rust_helper_readw_relaxed(const volatile void __iomem *addr)
+u16 rust_helper_readw_relaxed(const void __iomem *addr)
{
return readw_relaxed(addr);
}
-u32 rust_helper_readl_relaxed(const volatile void __iomem *addr)
+u32 rust_helper_readl_relaxed(const void __iomem *addr)
{
return readl_relaxed(addr);
}
#ifdef CONFIG_64BIT
-u64 rust_helper_readq_relaxed(const volatile void __iomem *addr)
+u64 rust_helper_readq_relaxed(const void __iomem *addr)
{
return readq_relaxed(addr);
}
#endif
-void rust_helper_writeb_relaxed(u8 value, volatile void __iomem *addr)
+void rust_helper_writeb_relaxed(u8 value, void __iomem *addr)
{
writeb_relaxed(value, addr);
}
-void rust_helper_writew_relaxed(u16 value, volatile void __iomem *addr)
+void rust_helper_writew_relaxed(u16 value, void __iomem *addr)
{
writew_relaxed(value, addr);
}
-void rust_helper_writel_relaxed(u32 value, volatile void __iomem *addr)
+void rust_helper_writel_relaxed(u32 value, void __iomem *addr)
{
writel_relaxed(value, addr);
}
#ifdef CONFIG_64BIT
-void rust_helper_writeq_relaxed(u64 value, volatile void __iomem *addr)
+void rust_helper_writeq_relaxed(u64 value, void __iomem *addr)
{
writeq_relaxed(value, addr);
}
diff --git a/rust/pin-init/examples/pthread_mutex.rs b/rust/pin-init/examples/pthread_mutex.rs
index 9164298c44c0..5ac22f1880d2 100644
--- a/rust/pin-init/examples/pthread_mutex.rs
+++ b/rust/pin-init/examples/pthread_mutex.rs
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: Apache-2.0 OR MIT
-// inspired by https://github.com/nbdd0121/pin-init/blob/trunk/examples/pthread_mutex.rs
+// inspired by <https://github.com/nbdd0121/pin-init/blob/trunk/examples/pthread_mutex.rs>
#![allow(clippy::undocumented_unsafe_blocks)]
#![cfg_attr(feature = "alloc", feature(allocator_api))]
#[cfg(not(windows))]
diff --git a/rust/pin-init/src/alloc.rs b/rust/pin-init/src/alloc.rs
index e16baa3b434e..5017f57442d8 100644
--- a/rust/pin-init/src/alloc.rs
+++ b/rust/pin-init/src/alloc.rs
@@ -17,11 +17,9 @@ use crate::{
pub extern crate alloc;
-// SAFETY: All zeros is equivalent to `None` (option layout optimization guarantee).
-//
-// In this case we are allowed to use `T: ?Sized`, since all zeros is the `None` variant and there
-// is no problem with a VTABLE pointer being null.
-unsafe impl<T: ?Sized> ZeroableOption for Box<T> {}
+// SAFETY: All zeros is equivalent to `None` (option layout optimization guarantee:
+// <https://doc.rust-lang.org/stable/std/option/index.html#representation>).
+unsafe impl<T> ZeroableOption for Box<T> {}
/// Smart pointer that can initialize memory in-place.
pub trait InPlaceInit<T>: Sized {
diff --git a/rust/pin-init/src/lib.rs b/rust/pin-init/src/lib.rs
index 05c44514765e..0806c689f693 100644
--- a/rust/pin-init/src/lib.rs
+++ b/rust/pin-init/src/lib.rs
@@ -1447,7 +1447,7 @@ impl_zeroable! {
{<T: ?Sized + Zeroable>} UnsafeCell<T>,
// SAFETY: All zeros is equivalent to `None` (option layout optimization guarantee:
- // https://doc.rust-lang.org/stable/std/option/index.html#representation).
+ // <https://doc.rust-lang.org/stable/std/option/index.html#representation>).
Option<NonZeroU8>, Option<NonZeroU16>, Option<NonZeroU32>, Option<NonZeroU64>,
Option<NonZeroU128>, Option<NonZeroUsize>,
Option<NonZeroI8>, Option<NonZeroI16>, Option<NonZeroI32>, Option<NonZeroI64>,
diff --git a/scripts/Makefile.compiler b/scripts/Makefile.compiler
index 8956587b8547..f4fcc1eaaeae 100644
--- a/scripts/Makefile.compiler
+++ b/scripts/Makefile.compiler
@@ -79,8 +79,8 @@ ld-option = $(call try-run, $(LD) $(KBUILD_LDFLAGS) $(1) -v,$(1),$(2),$(3))
# Usage: MY_RUSTFLAGS += $(call __rustc-option,$(RUSTC),$(MY_RUSTFLAGS),-Cinstrument-coverage,-Zinstrument-coverage)
# TODO: remove RUSTC_BOOTSTRAP=1 when we raise the minimum GNU Make version to 4.4
__rustc-option = $(call try-run,\
- echo '#![allow(missing_docs)]#![feature(no_core)]#![no_core]' | RUSTC_BOOTSTRAP=1\
- $(1) --sysroot=/dev/null $(filter-out --sysroot=/dev/null,$(2)) $(3)\
+ echo '$(pound)![allow(missing_docs)]$(pound)![feature(no_core)]$(pound)![no_core]' | RUSTC_BOOTSTRAP=1\
+ $(1) --sysroot=/dev/null $(filter-out --sysroot=/dev/null --target=%,$(2)) $(3)\
--crate-type=rlib --out-dir=$(TMPOUT) --emit=obj=- - >/dev/null,$(3),$(4))
# rustc-option
diff --git a/scripts/Makefile.extrawarn b/scripts/Makefile.extrawarn
index d75897559d18..2d6e59561c9d 100644
--- a/scripts/Makefile.extrawarn
+++ b/scripts/Makefile.extrawarn
@@ -15,7 +15,7 @@ KBUILD_CFLAGS += -Werror=return-type
KBUILD_CFLAGS += -Werror=strict-prototypes
KBUILD_CFLAGS += -Wno-format-security
KBUILD_CFLAGS += -Wno-trigraphs
-KBUILD_CFLAGS += $(call cc-disable-warning,frame-address,)
+KBUILD_CFLAGS += $(call cc-disable-warning, frame-address)
KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
KBUILD_CFLAGS += -Wmissing-declarations
KBUILD_CFLAGS += -Wmissing-prototypes
diff --git a/scripts/generate_rust_analyzer.py b/scripts/generate_rust_analyzer.py
index cd41bc906fbd..fe663dd0c43b 100755
--- a/scripts/generate_rust_analyzer.py
+++ b/scripts/generate_rust_analyzer.py
@@ -112,6 +112,12 @@ def generate_crates(srctree, objtree, sysroot_src, external_src, cfgs):
cfg=["kernel"],
)
+ append_crate(
+ "ffi",
+ srctree / "rust" / "ffi.rs",
+ ["core", "compiler_builtins"],
+ )
+
def append_crate_with_generated(
display_name,
deps,
@@ -131,9 +137,9 @@ def generate_crates(srctree, objtree, sysroot_src, external_src, cfgs):
"exclude_dirs": [],
}
- append_crate_with_generated("bindings", ["core"])
- append_crate_with_generated("uapi", ["core"])
- append_crate_with_generated("kernel", ["core", "macros", "build_error", "pin_init", "bindings", "uapi"])
+ append_crate_with_generated("bindings", ["core", "ffi"])
+ append_crate_with_generated("uapi", ["core", "ffi"])
+ append_crate_with_generated("kernel", ["core", "macros", "build_error", "pin_init", "ffi", "bindings", "uapi"])
def is_root_crate(build_file, target):
try:
diff --git a/scripts/genksyms/keywords.c b/scripts/genksyms/keywords.c
index b85e0979a00c..ee1499d27061 100644
--- a/scripts/genksyms/keywords.c
+++ b/scripts/genksyms/keywords.c
@@ -17,6 +17,8 @@ static struct resword {
{ "__signed__", SIGNED_KEYW },
{ "__typeof", TYPEOF_KEYW },
{ "__typeof__", TYPEOF_KEYW },
+ { "__typeof_unqual", TYPEOF_KEYW },
+ { "__typeof_unqual__", TYPEOF_KEYW },
{ "__volatile", VOLATILE_KEYW },
{ "__volatile__", VOLATILE_KEYW },
{ "__builtin_va_list", VA_LIST_KEYW },
@@ -40,6 +42,10 @@ static struct resword {
// KAO. },
// { "attribute", ATTRIBUTE_KEYW },
+ // X86 named address space qualifiers
+ { "__seg_gs", X86_SEG_KEYW },
+ { "__seg_fs", X86_SEG_KEYW },
+
{ "auto", AUTO_KEYW },
{ "char", CHAR_KEYW },
{ "const", CONST_KEYW },
@@ -57,6 +63,7 @@ static struct resword {
{ "struct", STRUCT_KEYW },
{ "typedef", TYPEDEF_KEYW },
{ "typeof", TYPEOF_KEYW },
+ { "typeof_unqual", TYPEOF_KEYW },
{ "union", UNION_KEYW },
{ "unsigned", UNSIGNED_KEYW },
{ "void", VOID_KEYW },
diff --git a/scripts/genksyms/parse.y b/scripts/genksyms/parse.y
index ee600a804fa1..efdcf07c4eb6 100644
--- a/scripts/genksyms/parse.y
+++ b/scripts/genksyms/parse.y
@@ -91,6 +91,8 @@ static void record_compound(struct string_list **keyw,
%token TYPEOF_KEYW
%token VA_LIST_KEYW
+%token X86_SEG_KEYW
+
%token EXPORT_SYMBOL_KEYW
%token ASM_PHRASE
@@ -292,7 +294,8 @@ type_qualifier_seq:
;
type_qualifier:
- CONST_KEYW | VOLATILE_KEYW
+ X86_SEG_KEYW
+ | CONST_KEYW | VOLATILE_KEYW
| RESTRICT_KEYW
{ /* restrict has no effect in prototypes so ignore it */
remove_node($1);
diff --git a/security/Kconfig.hardening b/security/Kconfig.hardening
index c17366ce8224..3fe9d7b945c4 100644
--- a/security/Kconfig.hardening
+++ b/security/Kconfig.hardening
@@ -344,7 +344,7 @@ config CC_HAS_RANDSTRUCT
choice
prompt "Randomize layout of sensitive kernel structures"
- default RANDSTRUCT_FULL if COMPILE_TEST && (GCC_PLUGINS || CC_HAS_RANDSTRUCT)
+ default RANDSTRUCT_FULL if COMPILE_TEST && CC_HAS_RANDSTRUCT
default RANDSTRUCT_NONE
help
If you enable this, the layouts of structures that are entirely
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
index f3e7ac513db3..f99ab1a3b0f0 100644
--- a/security/integrity/ima/ima_main.c
+++ b/security/integrity/ima/ima_main.c
@@ -245,7 +245,9 @@ static int process_measurement(struct file *file, const struct cred *cred,
&allowed_algos);
violation_check = ((func == FILE_CHECK || func == MMAP_CHECK ||
func == MMAP_CHECK_REQPROT) &&
- (ima_policy_flag & IMA_MEASURE));
+ (ima_policy_flag & IMA_MEASURE) &&
+ ((action & IMA_MEASURE) ||
+ (file->f_mode & FMODE_WRITE)));
if (!action && !violation_check)
return 0;
diff --git a/security/landlock/domain.c b/security/landlock/domain.c
index bae2e9909013..a647b68e8d06 100644
--- a/security/landlock/domain.c
+++ b/security/landlock/domain.c
@@ -16,6 +16,7 @@
#include <linux/path.h>
#include <linux/pid.h>
#include <linux/sched.h>
+#include <linux/signal.h>
#include <linux/uidgid.h>
#include "access.h"
@@ -99,8 +100,7 @@ static struct landlock_details *get_current_details(void)
return ERR_PTR(-ENOMEM);
memcpy(details->exe_path, path_str, path_size);
- WARN_ON_ONCE(current_cred() != current_real_cred());
- details->pid = get_pid(task_pid(current));
+ details->pid = get_pid(task_tgid(current));
details->uid = from_kuid(&init_user_ns, current_uid());
get_task_comm(details->comm, current);
return details;
diff --git a/security/landlock/domain.h b/security/landlock/domain.h
index ed0d348e214c..7fb70b25f85a 100644
--- a/security/landlock/domain.h
+++ b/security/landlock/domain.h
@@ -130,7 +130,7 @@ int landlock_init_hierarchy_log(struct landlock_hierarchy *const hierarchy);
static inline void
landlock_free_hierarchy_details(struct landlock_hierarchy *const hierarchy)
{
- if (WARN_ON_ONCE(!hierarchy || !hierarchy->details))
+ if (!hierarchy || !hierarchy->details)
return;
put_pid(hierarchy->details->pid);
diff --git a/security/landlock/syscalls.c b/security/landlock/syscalls.c
index 54a9f29e6ebb..b9561e3417ae 100644
--- a/security/landlock/syscalls.c
+++ b/security/landlock/syscalls.c
@@ -169,20 +169,16 @@ const int landlock_abi_version = 7;
* the new ruleset.
* @size: Size of the pointed &struct landlock_ruleset_attr (needed for
* backward and forward compatibility).
- * @flags: Supported value:
+ * @flags: Supported values:
+ *
* - %LANDLOCK_CREATE_RULESET_VERSION
* - %LANDLOCK_CREATE_RULESET_ERRATA
*
* This system call enables to create a new Landlock ruleset, and returns the
* related file descriptor on success.
*
- * If @flags is %LANDLOCK_CREATE_RULESET_VERSION and @attr is NULL and @size is
- * 0, then the returned value is the highest supported Landlock ABI version
- * (starting at 1).
- *
- * If @flags is %LANDLOCK_CREATE_RULESET_ERRATA and @attr is NULL and @size is
- * 0, then the returned value is a bitmask of fixed issues for the current
- * Landlock ABI version.
+ * If %LANDLOCK_CREATE_RULESET_VERSION or %LANDLOCK_CREATE_RULESET_ERRATA is
+ * set, then @attr must be NULL and @size must be 0.
*
* Possible returned errors are:
*
@@ -191,6 +187,9 @@ const int landlock_abi_version = 7;
* - %E2BIG: @attr or @size inconsistencies;
* - %EFAULT: @attr or @size inconsistencies;
* - %ENOMSG: empty &landlock_ruleset_attr.handled_access_fs.
+ *
+ * .. kernel-doc:: include/uapi/linux/landlock.h
+ * :identifiers: landlock_create_ruleset_flags
*/
SYSCALL_DEFINE3(landlock_create_ruleset,
const struct landlock_ruleset_attr __user *const, attr,
@@ -452,18 +451,15 @@ SYSCALL_DEFINE4(landlock_add_rule, const int, ruleset_fd,
* @ruleset_fd: File descriptor tied to the ruleset to merge with the target.
* @flags: Supported values:
*
- * - %LANDLOCK_RESTRICT_SELF_LOG_SAME_EXEC_OFF
- * - %LANDLOCK_RESTRICT_SELF_LOG_NEW_EXEC_ON
- * - %LANDLOCK_RESTRICT_SELF_LOG_SUBDOMAINS_OFF
+ * - %LANDLOCK_RESTRICT_SELF_LOG_SAME_EXEC_OFF
+ * - %LANDLOCK_RESTRICT_SELF_LOG_NEW_EXEC_ON
+ * - %LANDLOCK_RESTRICT_SELF_LOG_SUBDOMAINS_OFF
*
* This system call enables to enforce a Landlock ruleset on the current
* thread. Enforcing a ruleset requires that the task has %CAP_SYS_ADMIN in its
* namespace or is running with no_new_privs. This avoids scenarios where
* unprivileged tasks can affect the behavior of privileged children.
*
- * It is allowed to only pass the %LANDLOCK_RESTRICT_SELF_LOG_SUBDOMAINS_OFF
- * flag with a @ruleset_fd value of -1.
- *
* Possible returned errors are:
*
* - %EOPNOTSUPP: Landlock is supported by the kernel but disabled at boot time;
@@ -475,6 +471,9 @@ SYSCALL_DEFINE4(landlock_add_rule, const int, ruleset_fd,
* %CAP_SYS_ADMIN in its namespace.
* - %E2BIG: The maximum number of stacked rulesets is reached for the current
* thread.
+ *
+ * .. kernel-doc:: include/uapi/linux/landlock.h
+ * :identifiers: landlock_restrict_self_flags
*/
SYSCALL_DEFINE2(landlock_restrict_self, const int, ruleset_fd, const __u32,
flags)
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index b649049b6a11..3a411064fa34 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -225,6 +225,7 @@ static bool is_rust_noreturn(const struct symbol *func)
str_ends_with(func->name, "_4core9panicking14panic_nounwind") ||
str_ends_with(func->name, "_4core9panicking18panic_bounds_check") ||
str_ends_with(func->name, "_4core9panicking19assert_failed_inner") ||
+ str_ends_with(func->name, "_4core9panicking30panic_null_pointer_dereference") ||
str_ends_with(func->name, "_4core9panicking36panic_misaligned_pointer_dereference") ||
strstr(func->name, "_4core9panicking13assert_failed") ||
strstr(func->name, "_4core9panicking11panic_const24panic_const_") ||
diff --git a/tools/sched_ext/scx_flatcg.bpf.c b/tools/sched_ext/scx_flatcg.bpf.c
index 2c720e3ecad5..fdc7170639e6 100644
--- a/tools/sched_ext/scx_flatcg.bpf.c
+++ b/tools/sched_ext/scx_flatcg.bpf.c
@@ -950,5 +950,5 @@ SCX_OPS_DEFINE(flatcg_ops,
.cgroup_move = (void *)fcg_cgroup_move,
.init = (void *)fcg_init,
.exit = (void *)fcg_exit,
- .flags = SCX_OPS_HAS_CGROUP_WEIGHT | SCX_OPS_ENQ_EXITING,
+ .flags = SCX_OPS_ENQ_EXITING,
.name = "flatcg");
diff --git a/tools/testing/kunit/qemu_configs/sh.py b/tools/testing/kunit/qemu_configs/sh.py
index 78a474a5b95f..f00cb89fdef6 100644
--- a/tools/testing/kunit/qemu_configs/sh.py
+++ b/tools/testing/kunit/qemu_configs/sh.py
@@ -7,7 +7,9 @@ CONFIG_CPU_SUBTYPE_SH7751R=y
CONFIG_MEMORY_START=0x0c000000
CONFIG_SH_RTS7751R2D=y
CONFIG_RTS7751R2D_PLUS=y
-CONFIG_SERIAL_SH_SCI=y''',
+CONFIG_SERIAL_SH_SCI=y
+CONFIG_CMDLINE_EXTEND=y
+''',
qemu_arch='sh4',
kernel_path='arch/sh/boot/zImage',
kernel_command_line='console=ttySC1',
diff --git a/tools/testing/selftests/ftrace/test.d/dynevent/dynevent_limitations.tc b/tools/testing/selftests/ftrace/test.d/dynevent/dynevent_limitations.tc
index 6b94b678741a..f656bccb1a14 100644
--- a/tools/testing/selftests/ftrace/test.d/dynevent/dynevent_limitations.tc
+++ b/tools/testing/selftests/ftrace/test.d/dynevent/dynevent_limitations.tc
@@ -7,11 +7,32 @@
MAX_ARGS=128
EXCEED_ARGS=$((MAX_ARGS + 1))
+# bash and dash evaluate variables differently.
+# dash will evaluate '\\' every time it is read whereas bash does not.
+#
+# TEST_STRING="$TEST_STRING \\$i"
+# echo $TEST_STRING
+#
+# With i=123
+# On bash, that will print "\123"
+# but on dash, that will print the escape sequence of \123 as the \ will
+# be interpreted again in the echo.
+#
+# Set a variable "bs" to save a double backslash, then echo that
+# to "ts" to see if $ts changed or not. If it changed, it's dash,
+# if not, it's bash, and then bs can equal a single backslash.
+bs='\\'
+ts=`echo $bs`
+if [ "$ts" = '\\' ]; then
+ # this is bash
+ bs='\'
+fi
+
check_max_args() { # event_header
TEST_STRING=$1
# Acceptable
for i in `seq 1 $MAX_ARGS`; do
- TEST_STRING="$TEST_STRING \\$i"
+ TEST_STRING="$TEST_STRING $bs$i"
done
echo "$TEST_STRING" >> dynamic_events
echo > dynamic_events
diff --git a/tools/testing/selftests/ftrace/test.d/filter/event-filter-function.tc b/tools/testing/selftests/ftrace/test.d/filter/event-filter-function.tc
index 118247b8dd84..c62165fabd0c 100644
--- a/tools/testing/selftests/ftrace/test.d/filter/event-filter-function.tc
+++ b/tools/testing/selftests/ftrace/test.d/filter/event-filter-function.tc
@@ -80,6 +80,26 @@ if [ $misscnt -gt 0 ]; then
exit_fail
fi
+# Check strings too
+if [ -f events/syscalls/sys_enter_openat/filter ]; then
+ DIRNAME=`basename $TMPDIR`
+ echo "filename.ustring ~ \"*$DIRNAME*\"" > events/syscalls/sys_enter_openat/filter
+ echo 1 > events/syscalls/sys_enter_openat/enable
+ echo 1 > tracing_on
+ ls /bin/sh
+ nocnt=`grep openat trace | wc -l`
+ ls $TMPDIR
+ echo 0 > tracing_on
+ hitcnt=`grep openat trace | wc -l`;
+ echo 0 > events/syscalls/sys_enter_openat/enable
+ if [ $nocnt -gt 0 ]; then
+ exit_fail
+ fi
+ if [ $hitcnt -eq 0 ]; then
+ exit_fail
+ fi
+fi
+
reset_events_filter
exit 0
diff --git a/tools/testing/selftests/landlock/audit.h b/tools/testing/selftests/landlock/audit.h
index b9054086a0c9..18a6014920b5 100644
--- a/tools/testing/selftests/landlock/audit.h
+++ b/tools/testing/selftests/landlock/audit.h
@@ -300,15 +300,22 @@ out:
return err;
}
-static int __maybe_unused matches_log_domain_allocated(int audit_fd,
+static int __maybe_unused matches_log_domain_allocated(int audit_fd, pid_t pid,
__u64 *domain_id)
{
- return audit_match_record(
- audit_fd, AUDIT_LANDLOCK_DOMAIN,
- REGEX_LANDLOCK_PREFIX
- " status=allocated mode=enforcing pid=[0-9]\\+ uid=[0-9]\\+"
- " exe=\"[^\"]\\+\" comm=\".*_test\"$",
- domain_id);
+ static const char log_template[] = REGEX_LANDLOCK_PREFIX
+ " status=allocated mode=enforcing pid=%d uid=[0-9]\\+"
+ " exe=\"[^\"]\\+\" comm=\".*_test\"$";
+ char log_match[sizeof(log_template) + 10];
+ int log_match_len;
+
+ log_match_len =
+ snprintf(log_match, sizeof(log_match), log_template, pid);
+ if (log_match_len > sizeof(log_match))
+ return -E2BIG;
+
+ return audit_match_record(audit_fd, AUDIT_LANDLOCK_DOMAIN, log_match,
+ domain_id);
}
static int __maybe_unused matches_log_domain_deallocated(
diff --git a/tools/testing/selftests/landlock/audit_test.c b/tools/testing/selftests/landlock/audit_test.c
index a0643070c403..cfc571afd0eb 100644
--- a/tools/testing/selftests/landlock/audit_test.c
+++ b/tools/testing/selftests/landlock/audit_test.c
@@ -9,6 +9,7 @@
#include <errno.h>
#include <limits.h>
#include <linux/landlock.h>
+#include <pthread.h>
#include <stdlib.h>
#include <sys/mount.h>
#include <sys/prctl.h>
@@ -40,7 +41,6 @@ FIXTURE(audit)
{
struct audit_filter audit_filter;
int audit_fd;
- __u64(*domain_stack)[16];
};
FIXTURE_SETUP(audit)
@@ -60,18 +60,10 @@ FIXTURE_SETUP(audit)
TH_LOG("Failed to initialize audit: %s", error_msg);
}
clear_cap(_metadata, CAP_AUDIT_CONTROL);
-
- self->domain_stack = mmap(NULL, sizeof(*self->domain_stack),
- PROT_READ | PROT_WRITE,
- MAP_SHARED | MAP_ANONYMOUS, -1, 0);
- ASSERT_NE(MAP_FAILED, self->domain_stack);
- memset(self->domain_stack, 0, sizeof(*self->domain_stack));
}
FIXTURE_TEARDOWN(audit)
{
- EXPECT_EQ(0, munmap(self->domain_stack, sizeof(*self->domain_stack)));
-
set_cap(_metadata, CAP_AUDIT_CONTROL);
EXPECT_EQ(0, audit_cleanup(self->audit_fd, &self->audit_filter));
clear_cap(_metadata, CAP_AUDIT_CONTROL);
@@ -83,9 +75,15 @@ TEST_F(audit, layers)
.scoped = LANDLOCK_SCOPE_SIGNAL,
};
int status, ruleset_fd, i;
+ __u64(*domain_stack)[16];
__u64 prev_dom = 3;
pid_t child;
+ domain_stack = mmap(NULL, sizeof(*domain_stack), PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_ANONYMOUS, -1, 0);
+ ASSERT_NE(MAP_FAILED, domain_stack);
+ memset(domain_stack, 0, sizeof(*domain_stack));
+
ruleset_fd =
landlock_create_ruleset(&ruleset_attr, sizeof(ruleset_attr), 0);
ASSERT_LE(0, ruleset_fd);
@@ -94,7 +92,7 @@ TEST_F(audit, layers)
child = fork();
ASSERT_LE(0, child);
if (child == 0) {
- for (i = 0; i < ARRAY_SIZE(*self->domain_stack); i++) {
+ for (i = 0; i < ARRAY_SIZE(*domain_stack); i++) {
__u64 denial_dom = 1;
__u64 allocated_dom = 2;
@@ -107,7 +105,8 @@ TEST_F(audit, layers)
matches_log_signal(_metadata, self->audit_fd,
getppid(), &denial_dom));
EXPECT_EQ(0, matches_log_domain_allocated(
- self->audit_fd, &allocated_dom));
+ self->audit_fd, getpid(),
+ &allocated_dom));
EXPECT_NE(denial_dom, 1);
EXPECT_NE(denial_dom, 0);
EXPECT_EQ(denial_dom, allocated_dom);
@@ -115,7 +114,7 @@ TEST_F(audit, layers)
/* Checks that the new domain is younger than the previous one. */
EXPECT_GT(allocated_dom, prev_dom);
prev_dom = allocated_dom;
- (*self->domain_stack)[i] = allocated_dom;
+ (*domain_stack)[i] = allocated_dom;
}
/* Checks that we reached the maximum number of layers. */
@@ -142,23 +141,143 @@ TEST_F(audit, layers)
/* Purges log from deallocated domains. */
EXPECT_EQ(0, setsockopt(self->audit_fd, SOL_SOCKET, SO_RCVTIMEO,
&audit_tv_dom_drop, sizeof(audit_tv_dom_drop)));
- for (i = ARRAY_SIZE(*self->domain_stack) - 1; i >= 0; i--) {
+ for (i = ARRAY_SIZE(*domain_stack) - 1; i >= 0; i--) {
__u64 deallocated_dom = 2;
EXPECT_EQ(0, matches_log_domain_deallocated(self->audit_fd, 1,
&deallocated_dom));
- EXPECT_EQ((*self->domain_stack)[i], deallocated_dom)
+ EXPECT_EQ((*domain_stack)[i], deallocated_dom)
{
TH_LOG("Failed to match domain %llx (#%d)",
- (*self->domain_stack)[i], i);
+ (*domain_stack)[i], i);
}
}
+ EXPECT_EQ(0, munmap(domain_stack, sizeof(*domain_stack)));
EXPECT_EQ(0, setsockopt(self->audit_fd, SOL_SOCKET, SO_RCVTIMEO,
&audit_tv_default, sizeof(audit_tv_default)));
-
EXPECT_EQ(0, close(ruleset_fd));
}
+struct thread_data {
+ pid_t parent_pid;
+ int ruleset_fd, pipe_child, pipe_parent;
+};
+
+static void *thread_audit_test(void *arg)
+{
+ const struct thread_data *data = (struct thread_data *)arg;
+ uintptr_t err = 0;
+ char buffer;
+
+ /* TGID and TID are different for a second thread. */
+ if (getpid() == gettid()) {
+ err = 1;
+ goto out;
+ }
+
+ if (landlock_restrict_self(data->ruleset_fd, 0)) {
+ err = 2;
+ goto out;
+ }
+
+ if (close(data->ruleset_fd)) {
+ err = 3;
+ goto out;
+ }
+
+ /* Creates a denial to get the domain ID. */
+ if (kill(data->parent_pid, 0) != -1) {
+ err = 4;
+ goto out;
+ }
+
+ if (EPERM != errno) {
+ err = 5;
+ goto out;
+ }
+
+ /* Signals the parent to read denial logs. */
+ if (write(data->pipe_child, ".", 1) != 1) {
+ err = 6;
+ goto out;
+ }
+
+ /* Waits for the parent to update audit filters. */
+ if (read(data->pipe_parent, &buffer, 1) != 1) {
+ err = 7;
+ goto out;
+ }
+
+out:
+ close(data->pipe_child);
+ close(data->pipe_parent);
+ return (void *)err;
+}
+
+/* Checks that the PID tied to a domain is not a TID but the TGID. */
+TEST_F(audit, thread)
+{
+ const struct landlock_ruleset_attr ruleset_attr = {
+ .scoped = LANDLOCK_SCOPE_SIGNAL,
+ };
+ __u64 denial_dom = 1;
+ __u64 allocated_dom = 2;
+ __u64 deallocated_dom = 3;
+ pthread_t thread;
+ int pipe_child[2], pipe_parent[2];
+ char buffer;
+ struct thread_data child_data;
+
+ child_data.parent_pid = getppid();
+ ASSERT_EQ(0, pipe2(pipe_child, O_CLOEXEC));
+ child_data.pipe_child = pipe_child[1];
+ ASSERT_EQ(0, pipe2(pipe_parent, O_CLOEXEC));
+ child_data.pipe_parent = pipe_parent[0];
+ child_data.ruleset_fd =
+ landlock_create_ruleset(&ruleset_attr, sizeof(ruleset_attr), 0);
+ ASSERT_LE(0, child_data.ruleset_fd);
+
+ /* TGID and TID are the same for the initial thread . */
+ EXPECT_EQ(getpid(), gettid());
+ EXPECT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0));
+ ASSERT_EQ(0, pthread_create(&thread, NULL, thread_audit_test,
+ &child_data));
+
+ /* Waits for the child to generate a denial. */
+ ASSERT_EQ(1, read(pipe_child[0], &buffer, 1));
+ EXPECT_EQ(0, close(pipe_child[0]));
+
+ /* Matches the signal log to get the domain ID. */
+ EXPECT_EQ(0, matches_log_signal(_metadata, self->audit_fd,
+ child_data.parent_pid, &denial_dom));
+ EXPECT_NE(denial_dom, 1);
+ EXPECT_NE(denial_dom, 0);
+
+ EXPECT_EQ(0, matches_log_domain_allocated(self->audit_fd, getpid(),
+ &allocated_dom));
+ EXPECT_EQ(denial_dom, allocated_dom);
+
+ /* Updates filter rules to match the drop record. */
+ set_cap(_metadata, CAP_AUDIT_CONTROL);
+ EXPECT_EQ(0, audit_filter_drop(self->audit_fd, AUDIT_ADD_RULE));
+ EXPECT_EQ(0, audit_filter_exe(self->audit_fd, &self->audit_filter,
+ AUDIT_DEL_RULE));
+ clear_cap(_metadata, CAP_AUDIT_CONTROL);
+
+ /* Signals the thread to exit, which will generate a domain deallocation. */
+ ASSERT_EQ(1, write(pipe_parent[1], ".", 1));
+ EXPECT_EQ(0, close(pipe_parent[1]));
+ ASSERT_EQ(0, pthread_join(thread, NULL));
+
+ EXPECT_EQ(0, setsockopt(self->audit_fd, SOL_SOCKET, SO_RCVTIMEO,
+ &audit_tv_dom_drop, sizeof(audit_tv_dom_drop)));
+ EXPECT_EQ(0, matches_log_domain_deallocated(self->audit_fd, 1,
+ &deallocated_dom));
+ EXPECT_EQ(denial_dom, deallocated_dom);
+ EXPECT_EQ(0, setsockopt(self->audit_fd, SOL_SOCKET, SO_RCVTIMEO,
+ &audit_tv_default, sizeof(audit_tv_default)));
+}
+
FIXTURE(audit_flags)
{
struct audit_filter audit_filter;
@@ -273,7 +392,8 @@ TEST_F(audit_flags, signal)
/* Checks domain information records. */
EXPECT_EQ(0, matches_log_domain_allocated(
- self->audit_fd, &allocated_dom));
+ self->audit_fd, getpid(),
+ &allocated_dom));
EXPECT_NE(*self->domain_id, 1);
EXPECT_NE(*self->domain_id, 0);
EXPECT_EQ(*self->domain_id, allocated_dom);
diff --git a/tools/testing/selftests/landlock/fs_test.c b/tools/testing/selftests/landlock/fs_test.c
index f819011a8798..73729382d40f 100644
--- a/tools/testing/selftests/landlock/fs_test.c
+++ b/tools/testing/selftests/landlock/fs_test.c
@@ -5964,7 +5964,8 @@ TEST_F(audit_layout1, refer_handled)
EXPECT_EQ(EXDEV, errno);
EXPECT_EQ(0, matches_log_fs(_metadata, self->audit_fd, "fs\\.refer",
dir_s1d1));
- EXPECT_EQ(0, matches_log_domain_allocated(self->audit_fd, NULL));
+ EXPECT_EQ(0,
+ matches_log_domain_allocated(self->audit_fd, getpid(), NULL));
EXPECT_EQ(0, matches_log_fs(_metadata, self->audit_fd, "fs\\.refer",
dir_s1d3));
diff --git a/tools/testing/selftests/lib/config b/tools/testing/selftests/lib/config
index 81a1f64a22e8..377b3699ff31 100644
--- a/tools/testing/selftests/lib/config
+++ b/tools/testing/selftests/lib/config
@@ -1,2 +1,3 @@
CONFIG_TEST_BITMAP=m
+CONFIG_PRIME_NUMBERS=m
CONFIG_TEST_BITOPS=m
diff --git a/tools/testing/selftests/ublk/Makefile b/tools/testing/selftests/ublk/Makefile
index c7781efea0f3..ec4624a283bc 100644
--- a/tools/testing/selftests/ublk/Makefile
+++ b/tools/testing/selftests/ublk/Makefile
@@ -6,6 +6,9 @@ LDLIBS += -lpthread -lm -luring
TEST_PROGS := test_generic_01.sh
TEST_PROGS += test_generic_02.sh
TEST_PROGS += test_generic_03.sh
+TEST_PROGS += test_generic_04.sh
+TEST_PROGS += test_generic_05.sh
+TEST_PROGS += test_generic_06.sh
TEST_PROGS += test_null_01.sh
TEST_PROGS += test_null_02.sh
@@ -21,12 +24,16 @@ TEST_PROGS += test_stripe_04.sh
TEST_PROGS += test_stress_01.sh
TEST_PROGS += test_stress_02.sh
+TEST_PROGS += test_stress_03.sh
+TEST_PROGS += test_stress_04.sh
+TEST_PROGS += test_stress_05.sh
TEST_GEN_PROGS_EXTENDED = kublk
include ../lib.mk
-$(TEST_GEN_PROGS_EXTENDED): kublk.c null.c file_backed.c common.c stripe.c
+$(TEST_GEN_PROGS_EXTENDED): kublk.c null.c file_backed.c common.c stripe.c \
+ fault_inject.c
check:
shellcheck -x -f gcc *.sh
diff --git a/tools/testing/selftests/ublk/fault_inject.c b/tools/testing/selftests/ublk/fault_inject.c
new file mode 100644
index 000000000000..94a8e729ba4c
--- /dev/null
+++ b/tools/testing/selftests/ublk/fault_inject.c
@@ -0,0 +1,98 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Fault injection ublk target. Hack this up however you like for
+ * testing specific behaviors of ublk_drv. Currently is a null target
+ * with a configurable delay before completing each I/O. This delay can
+ * be used to test ublk_drv's handling of I/O outstanding to the ublk
+ * server when it dies.
+ */
+
+#include "kublk.h"
+
+static int ublk_fault_inject_tgt_init(const struct dev_ctx *ctx,
+ struct ublk_dev *dev)
+{
+ const struct ublksrv_ctrl_dev_info *info = &dev->dev_info;
+ unsigned long dev_size = 250UL << 30;
+
+ dev->tgt.dev_size = dev_size;
+ dev->tgt.params = (struct ublk_params) {
+ .types = UBLK_PARAM_TYPE_BASIC,
+ .basic = {
+ .logical_bs_shift = 9,
+ .physical_bs_shift = 12,
+ .io_opt_shift = 12,
+ .io_min_shift = 9,
+ .max_sectors = info->max_io_buf_bytes >> 9,
+ .dev_sectors = dev_size >> 9,
+ },
+ };
+
+ dev->private_data = (void *)(unsigned long)(ctx->fault_inject.delay_us * 1000);
+ return 0;
+}
+
+static int ublk_fault_inject_queue_io(struct ublk_queue *q, int tag)
+{
+ const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag);
+ struct io_uring_sqe *sqe;
+ struct __kernel_timespec ts = {
+ .tv_nsec = (long long)q->dev->private_data,
+ };
+
+ ublk_queue_alloc_sqes(q, &sqe, 1);
+ io_uring_prep_timeout(sqe, &ts, 1, 0);
+ sqe->user_data = build_user_data(tag, ublksrv_get_op(iod), 0, 1);
+
+ ublk_queued_tgt_io(q, tag, 1);
+
+ return 0;
+}
+
+static void ublk_fault_inject_tgt_io_done(struct ublk_queue *q, int tag,
+ const struct io_uring_cqe *cqe)
+{
+ const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag);
+
+ if (cqe->res != -ETIME)
+ ublk_err("%s: unexpected cqe res %d\n", __func__, cqe->res);
+
+ if (ublk_completed_tgt_io(q, tag))
+ ublk_complete_io(q, tag, iod->nr_sectors << 9);
+ else
+ ublk_err("%s: io not complete after 1 cqe\n", __func__);
+}
+
+static void ublk_fault_inject_cmd_line(struct dev_ctx *ctx, int argc, char *argv[])
+{
+ static const struct option longopts[] = {
+ { "delay_us", 1, NULL, 0 },
+ { 0, 0, 0, 0 }
+ };
+ int option_idx, opt;
+
+ ctx->fault_inject.delay_us = 0;
+ while ((opt = getopt_long(argc, argv, "",
+ longopts, &option_idx)) != -1) {
+ switch (opt) {
+ case 0:
+ if (!strcmp(longopts[option_idx].name, "delay_us"))
+ ctx->fault_inject.delay_us = strtoll(optarg, NULL, 10);
+ }
+ }
+}
+
+static void ublk_fault_inject_usage(const struct ublk_tgt_ops *ops)
+{
+ printf("\tfault_inject: [--delay_us us (default 0)]\n");
+}
+
+const struct ublk_tgt_ops fault_inject_tgt_ops = {
+ .name = "fault_inject",
+ .init_tgt = ublk_fault_inject_tgt_init,
+ .queue_io = ublk_fault_inject_queue_io,
+ .tgt_io_done = ublk_fault_inject_tgt_io_done,
+ .parse_cmd_line = ublk_fault_inject_cmd_line,
+ .usage = ublk_fault_inject_usage,
+};
diff --git a/tools/testing/selftests/ublk/kublk.c b/tools/testing/selftests/ublk/kublk.c
index 91c282bc7674..759f06637146 100644
--- a/tools/testing/selftests/ublk/kublk.c
+++ b/tools/testing/selftests/ublk/kublk.c
@@ -5,22 +5,24 @@
#include "kublk.h"
+#define MAX_NR_TGT_ARG 64
+
unsigned int ublk_dbg_mask = UBLK_LOG;
static const struct ublk_tgt_ops *tgt_ops_list[] = {
&null_tgt_ops,
&loop_tgt_ops,
&stripe_tgt_ops,
+ &fault_inject_tgt_ops,
};
static const struct ublk_tgt_ops *ublk_find_tgt(const char *name)
{
- const struct ublk_tgt_ops *ops;
int i;
if (name == NULL)
return NULL;
- for (i = 0; sizeof(tgt_ops_list) / sizeof(ops); i++)
+ for (i = 0; i < ARRAY_SIZE(tgt_ops_list); i++)
if (strcmp(tgt_ops_list[i]->name, name) == 0)
return tgt_ops_list[i];
return NULL;
@@ -118,6 +120,27 @@ static int ublk_ctrl_start_dev(struct ublk_dev *dev,
return __ublk_ctrl_cmd(dev, &data);
}
+static int ublk_ctrl_start_user_recovery(struct ublk_dev *dev)
+{
+ struct ublk_ctrl_cmd_data data = {
+ .cmd_op = UBLK_U_CMD_START_USER_RECOVERY,
+ };
+
+ return __ublk_ctrl_cmd(dev, &data);
+}
+
+static int ublk_ctrl_end_user_recovery(struct ublk_dev *dev, int daemon_pid)
+{
+ struct ublk_ctrl_cmd_data data = {
+ .cmd_op = UBLK_U_CMD_END_USER_RECOVERY,
+ .flags = CTRL_CMD_HAS_DATA,
+ };
+
+ dev->dev_info.ublksrv_pid = data.data[0] = daemon_pid;
+
+ return __ublk_ctrl_cmd(dev, &data);
+}
+
static int ublk_ctrl_add_dev(struct ublk_dev *dev)
{
struct ublk_ctrl_cmd_data data = {
@@ -207,10 +230,73 @@ static const char *ublk_dev_state_desc(struct ublk_dev *dev)
};
}
+static void ublk_print_cpu_set(const cpu_set_t *set, char *buf, unsigned len)
+{
+ unsigned done = 0;
+ int i;
+
+ for (i = 0; i < CPU_SETSIZE; i++) {
+ if (CPU_ISSET(i, set))
+ done += snprintf(&buf[done], len - done, "%d ", i);
+ }
+}
+
+static void ublk_adjust_affinity(cpu_set_t *set)
+{
+ int j, updated = 0;
+
+ /*
+ * Just keep the 1st CPU now.
+ *
+ * In future, auto affinity selection can be tried.
+ */
+ for (j = 0; j < CPU_SETSIZE; j++) {
+ if (CPU_ISSET(j, set)) {
+ if (!updated) {
+ updated = 1;
+ continue;
+ }
+ CPU_CLR(j, set);
+ }
+ }
+}
+
+/* Caller must free the allocated buffer */
+static int ublk_ctrl_get_affinity(struct ublk_dev *ctrl_dev, cpu_set_t **ptr_buf)
+{
+ struct ublk_ctrl_cmd_data data = {
+ .cmd_op = UBLK_U_CMD_GET_QUEUE_AFFINITY,
+ .flags = CTRL_CMD_HAS_DATA | CTRL_CMD_HAS_BUF,
+ };
+ cpu_set_t *buf;
+ int i, ret;
+
+ buf = malloc(sizeof(cpu_set_t) * ctrl_dev->dev_info.nr_hw_queues);
+ if (!buf)
+ return -ENOMEM;
+
+ for (i = 0; i < ctrl_dev->dev_info.nr_hw_queues; i++) {
+ data.data[0] = i;
+ data.len = sizeof(cpu_set_t);
+ data.addr = (__u64)&buf[i];
+
+ ret = __ublk_ctrl_cmd(ctrl_dev, &data);
+ if (ret < 0) {
+ free(buf);
+ return ret;
+ }
+ ublk_adjust_affinity(&buf[i]);
+ }
+
+ *ptr_buf = buf;
+ return 0;
+}
+
static void ublk_ctrl_dump(struct ublk_dev *dev)
{
struct ublksrv_ctrl_dev_info *info = &dev->dev_info;
struct ublk_params p;
+ cpu_set_t *affinity;
int ret;
ret = ublk_ctrl_get_params(dev, &p);
@@ -219,12 +305,31 @@ static void ublk_ctrl_dump(struct ublk_dev *dev)
return;
}
+ ret = ublk_ctrl_get_affinity(dev, &affinity);
+ if (ret < 0) {
+ ublk_err("failed to get affinity %m\n");
+ return;
+ }
+
ublk_log("dev id %d: nr_hw_queues %d queue_depth %d block size %d dev_capacity %lld\n",
info->dev_id, info->nr_hw_queues, info->queue_depth,
1 << p.basic.logical_bs_shift, p.basic.dev_sectors);
ublk_log("\tmax rq size %d daemon pid %d flags 0x%llx state %s\n",
info->max_io_buf_bytes, info->ublksrv_pid, info->flags,
ublk_dev_state_desc(dev));
+
+ if (affinity) {
+ char buf[512];
+ int i;
+
+ for (i = 0; i < info->nr_hw_queues; i++) {
+ ublk_print_cpu_set(&affinity[i], buf, sizeof(buf));
+ printf("\tqueue %u: tid %d affinity(%s)\n",
+ i, dev->q[i].tid, buf);
+ }
+ free(affinity);
+ }
+
fflush(stdout);
}
@@ -347,7 +452,9 @@ static int ublk_queue_init(struct ublk_queue *q)
}
ret = ublk_setup_ring(&q->ring, ring_depth, cq_depth,
- IORING_SETUP_COOP_TASKRUN);
+ IORING_SETUP_COOP_TASKRUN |
+ IORING_SETUP_SINGLE_ISSUER |
+ IORING_SETUP_DEFER_TASKRUN);
if (ret < 0) {
ublk_err("ublk dev %d queue %d setup io_uring failed %d\n",
q->dev->dev_info.dev_id, q->q_id, ret);
@@ -602,9 +709,24 @@ static int ublk_process_io(struct ublk_queue *q)
return reapped;
}
+static void ublk_queue_set_sched_affinity(const struct ublk_queue *q,
+ cpu_set_t *cpuset)
+{
+ if (sched_setaffinity(0, sizeof(*cpuset), cpuset) < 0)
+ ublk_err("ublk dev %u queue %u set affinity failed",
+ q->dev->dev_info.dev_id, q->q_id);
+}
+
+struct ublk_queue_info {
+ struct ublk_queue *q;
+ sem_t *queue_sem;
+ cpu_set_t *affinity;
+};
+
static void *ublk_io_handler_fn(void *data)
{
- struct ublk_queue *q = data;
+ struct ublk_queue_info *info = data;
+ struct ublk_queue *q = info->q;
int dev_id = q->dev->dev_info.dev_id;
int ret;
@@ -614,6 +736,10 @@ static void *ublk_io_handler_fn(void *data)
dev_id, q->q_id);
return NULL;
}
+ /* IO perf is sensitive with queue pthread affinity on NUMA machine*/
+ ublk_queue_set_sched_affinity(q, info->affinity);
+ sem_post(info->queue_sem);
+
ublk_dbg(UBLK_DBG_QUEUE, "tid %d: ublk dev %d queue %d started\n",
q->tid, dev_id, q->q_id);
@@ -639,7 +765,7 @@ static void ublk_set_parameters(struct ublk_dev *dev)
dev->dev_info.dev_id, ret);
}
-static int ublk_send_dev_event(const struct dev_ctx *ctx, int dev_id)
+static int ublk_send_dev_event(const struct dev_ctx *ctx, struct ublk_dev *dev, int dev_id)
{
uint64_t id;
int evtfd = ctx->_evtfd;
@@ -652,36 +778,68 @@ static int ublk_send_dev_event(const struct dev_ctx *ctx, int dev_id)
else
id = ERROR_EVTFD_DEVID;
+ if (dev && ctx->shadow_dev)
+ memcpy(&ctx->shadow_dev->q, &dev->q, sizeof(dev->q));
+
if (write(evtfd, &id, sizeof(id)) != sizeof(id))
return -EINVAL;
+ close(evtfd);
+ shmdt(ctx->shadow_dev);
+
return 0;
}
static int ublk_start_daemon(const struct dev_ctx *ctx, struct ublk_dev *dev)
{
- int ret, i;
- void *thread_ret;
const struct ublksrv_ctrl_dev_info *dinfo = &dev->dev_info;
+ struct ublk_queue_info *qinfo;
+ cpu_set_t *affinity_buf;
+ void *thread_ret;
+ sem_t queue_sem;
+ int ret, i;
ublk_dbg(UBLK_DBG_DEV, "%s enter\n", __func__);
+ qinfo = (struct ublk_queue_info *)calloc(sizeof(struct ublk_queue_info),
+ dinfo->nr_hw_queues);
+ if (!qinfo)
+ return -ENOMEM;
+
+ sem_init(&queue_sem, 0, 0);
ret = ublk_dev_prep(ctx, dev);
if (ret)
return ret;
+ ret = ublk_ctrl_get_affinity(dev, &affinity_buf);
+ if (ret)
+ return ret;
+
for (i = 0; i < dinfo->nr_hw_queues; i++) {
dev->q[i].dev = dev;
dev->q[i].q_id = i;
+
+ qinfo[i].q = &dev->q[i];
+ qinfo[i].queue_sem = &queue_sem;
+ qinfo[i].affinity = &affinity_buf[i];
pthread_create(&dev->q[i].thread, NULL,
ublk_io_handler_fn,
- &dev->q[i]);
+ &qinfo[i]);
}
+ for (i = 0; i < dinfo->nr_hw_queues; i++)
+ sem_wait(&queue_sem);
+ free(qinfo);
+ free(affinity_buf);
+
/* everything is fine now, start us */
- ublk_set_parameters(dev);
- ret = ublk_ctrl_start_dev(dev, getpid());
+ if (ctx->recovery)
+ ret = ublk_ctrl_end_user_recovery(dev, getpid());
+ else {
+ ublk_set_parameters(dev);
+ ret = ublk_ctrl_start_dev(dev, getpid());
+ }
if (ret < 0) {
ublk_err("%s: ublk_ctrl_start_dev failed: %d\n", __func__, ret);
goto fail;
@@ -691,7 +849,7 @@ static int ublk_start_daemon(const struct dev_ctx *ctx, struct ublk_dev *dev)
if (ctx->fg)
ublk_ctrl_dump(dev);
else
- ublk_send_dev_event(ctx, dev->dev_info.dev_id);
+ ublk_send_dev_event(ctx, dev, dev->dev_info.dev_id);
/* wait until we are terminated */
for (i = 0; i < dinfo->nr_hw_queues; i++)
@@ -856,7 +1014,10 @@ static int __cmd_dev_add(const struct dev_ctx *ctx)
}
}
- ret = ublk_ctrl_add_dev(dev);
+ if (ctx->recovery)
+ ret = ublk_ctrl_start_user_recovery(dev);
+ else
+ ret = ublk_ctrl_add_dev(dev);
if (ret < 0) {
ublk_err("%s: can't add dev id %d, type %s ret %d\n",
__func__, dev_id, tgt_type, ret);
@@ -870,7 +1031,7 @@ static int __cmd_dev_add(const struct dev_ctx *ctx)
fail:
if (ret < 0)
- ublk_send_dev_event(ctx, -1);
+ ublk_send_dev_event(ctx, dev, -1);
ublk_ctrl_deinit(dev);
return ret;
}
@@ -884,30 +1045,58 @@ static int cmd_dev_add(struct dev_ctx *ctx)
if (ctx->fg)
goto run;
+ ctx->_shmid = shmget(IPC_PRIVATE, sizeof(struct ublk_dev), IPC_CREAT | 0666);
+ if (ctx->_shmid < 0) {
+ ublk_err("%s: failed to shmget %s\n", __func__, strerror(errno));
+ exit(-1);
+ }
+ ctx->shadow_dev = (struct ublk_dev *)shmat(ctx->_shmid, NULL, 0);
+ if (ctx->shadow_dev == (struct ublk_dev *)-1) {
+ ublk_err("%s: failed to shmat %s\n", __func__, strerror(errno));
+ exit(-1);
+ }
ctx->_evtfd = eventfd(0, 0);
if (ctx->_evtfd < 0) {
ublk_err("%s: failed to create eventfd %s\n", __func__, strerror(errno));
exit(-1);
}
- setsid();
res = fork();
if (res == 0) {
+ int res2;
+
+ setsid();
+ res2 = fork();
+ if (res2 == 0) {
+ /* prepare for detaching */
+ close(STDIN_FILENO);
+ close(STDOUT_FILENO);
+ close(STDERR_FILENO);
run:
- res = __cmd_dev_add(ctx);
- return res;
+ res = __cmd_dev_add(ctx);
+ return res;
+ } else {
+ /* detached from the foreground task */
+ exit(EXIT_SUCCESS);
+ }
} else if (res > 0) {
uint64_t id;
+ int exit_code = EXIT_FAILURE;
res = read(ctx->_evtfd, &id, sizeof(id));
close(ctx->_evtfd);
if (res == sizeof(id) && id != ERROR_EVTFD_DEVID) {
ctx->dev_id = id - 1;
- return __cmd_dev_list(ctx);
+ if (__cmd_dev_list(ctx) >= 0)
+ exit_code = EXIT_SUCCESS;
}
- exit(EXIT_FAILURE);
+ shmdt(ctx->shadow_dev);
+ shmctl(ctx->_shmid, IPC_RMID, NULL);
+ /* wait for child and detach from it */
+ wait(NULL);
+ exit(exit_code);
} else {
- return res;
+ exit(EXIT_FAILURE);
}
}
@@ -969,6 +1158,9 @@ static int __cmd_dev_list(struct dev_ctx *ctx)
ublk_err("%s: can't get dev info from %d: %d\n",
__func__, ctx->dev_id, ret);
} else {
+ if (ctx->shadow_dev)
+ memcpy(&dev->q, ctx->shadow_dev->q, sizeof(dev->q));
+
ublk_ctrl_dump(dev);
}
@@ -1039,14 +1231,47 @@ static int cmd_dev_get_features(void)
return ret;
}
+static void __cmd_create_help(char *exe, bool recovery)
+{
+ int i;
+
+ printf("%s %s -t [null|loop|stripe|fault_inject] [-q nr_queues] [-d depth] [-n dev_id]\n",
+ exe, recovery ? "recover" : "add");
+ printf("\t[--foreground] [--quiet] [-z] [--debug_mask mask] [-r 0|1 ] [-g 0|1]\n");
+ printf("\t[-e 0|1 ] [-i 0|1]\n");
+ printf("\t[target options] [backfile1] [backfile2] ...\n");
+ printf("\tdefault: nr_queues=2(max 32), depth=128(max 1024), dev_id=-1(auto allocation)\n");
+
+ for (i = 0; i < sizeof(tgt_ops_list) / sizeof(tgt_ops_list[0]); i++) {
+ const struct ublk_tgt_ops *ops = tgt_ops_list[i];
+
+ if (ops->usage)
+ ops->usage(ops);
+ }
+}
+
+static void cmd_add_help(char *exe)
+{
+ __cmd_create_help(exe, false);
+ printf("\n");
+}
+
+static void cmd_recover_help(char *exe)
+{
+ __cmd_create_help(exe, true);
+ printf("\tPlease provide exact command line for creating this device with real dev_id\n");
+ printf("\n");
+}
+
static int cmd_dev_help(char *exe)
{
- printf("%s add -t [null|loop] [-q nr_queues] [-d depth] [-n dev_id] [backfile1] [backfile2] ...\n", exe);
- printf("\t default: nr_queues=2(max 4), depth=128(max 128), dev_id=-1(auto allocation)\n");
+ cmd_add_help(exe);
+ cmd_recover_help(exe);
+
printf("%s del [-n dev_id] -a \n", exe);
- printf("\t -a delete all devices -n delete specified device\n");
+ printf("\t -a delete all devices -n delete specified device\n\n");
printf("%s list [-n dev_id] -a \n", exe);
- printf("\t -a list all devices, -n list specified device, default -a \n");
+ printf("\t -a list all devices, -n list specified device, default -a \n\n");
printf("%s features\n", exe);
return 0;
}
@@ -1063,9 +1288,13 @@ int main(int argc, char *argv[])
{ "quiet", 0, NULL, 0 },
{ "zero_copy", 0, NULL, 'z' },
{ "foreground", 0, NULL, 0 },
- { "chunk_size", 1, NULL, 0 },
+ { "recovery", 1, NULL, 'r' },
+ { "recovery_fail_io", 1, NULL, 'e'},
+ { "recovery_reissue", 1, NULL, 'i'},
+ { "get_data", 1, NULL, 'g'},
{ 0, 0, 0, 0 }
};
+ const struct ublk_tgt_ops *ops = NULL;
int option_idx, opt;
const char *cmd = argv[1];
struct dev_ctx ctx = {
@@ -1073,15 +1302,18 @@ int main(int argc, char *argv[])
.nr_hw_queues = 2,
.dev_id = -1,
.tgt_type = "unknown",
- .chunk_size = 65536, /* def chunk size is 64K */
};
int ret = -EINVAL, i;
+ int tgt_argc = 1;
+ char *tgt_argv[MAX_NR_TGT_ARG] = { NULL };
+ int value;
if (argc == 1)
return ret;
+ opterr = 0;
optind = 2;
- while ((opt = getopt_long(argc, argv, "t:n:d:q:az",
+ while ((opt = getopt_long(argc, argv, "t:n:d:q:r:e:i:az",
longopts, &option_idx)) != -1) {
switch (opt) {
case 'a':
@@ -1103,6 +1335,25 @@ int main(int argc, char *argv[])
case 'z':
ctx.flags |= UBLK_F_SUPPORT_ZERO_COPY | UBLK_F_USER_COPY;
break;
+ case 'r':
+ value = strtol(optarg, NULL, 10);
+ if (value)
+ ctx.flags |= UBLK_F_USER_RECOVERY;
+ break;
+ case 'e':
+ value = strtol(optarg, NULL, 10);
+ if (value)
+ ctx.flags |= UBLK_F_USER_RECOVERY | UBLK_F_USER_RECOVERY_FAIL_IO;
+ break;
+ case 'i':
+ value = strtol(optarg, NULL, 10);
+ if (value)
+ ctx.flags |= UBLK_F_USER_RECOVERY | UBLK_F_USER_RECOVERY_REISSUE;
+ break;
+ case 'g':
+ value = strtol(optarg, NULL, 10);
+ if (value)
+ ctx.flags |= UBLK_F_NEED_GET_DATA;
case 0:
if (!strcmp(longopts[option_idx].name, "debug_mask"))
ublk_dbg_mask = strtol(optarg, NULL, 16);
@@ -1110,8 +1361,26 @@ int main(int argc, char *argv[])
ublk_dbg_mask = 0;
if (!strcmp(longopts[option_idx].name, "foreground"))
ctx.fg = 1;
- if (!strcmp(longopts[option_idx].name, "chunk_size"))
- ctx.chunk_size = strtol(optarg, NULL, 10);
+ break;
+ case '?':
+ /*
+ * target requires every option must have argument
+ */
+ if (argv[optind][0] == '-' || argv[optind - 1][0] != '-') {
+ fprintf(stderr, "every target option requires argument: %s %s\n",
+ argv[optind - 1], argv[optind]);
+ exit(EXIT_FAILURE);
+ }
+
+ if (tgt_argc < (MAX_NR_TGT_ARG - 1) / 2) {
+ tgt_argv[tgt_argc++] = argv[optind - 1];
+ tgt_argv[tgt_argc++] = argv[optind];
+ } else {
+ fprintf(stderr, "too many target options\n");
+ exit(EXIT_FAILURE);
+ }
+ optind += 1;
+ break;
}
}
@@ -1120,9 +1389,25 @@ int main(int argc, char *argv[])
ctx.files[ctx.nr_files++] = argv[i++];
}
+ ops = ublk_find_tgt(ctx.tgt_type);
+ if (ops && ops->parse_cmd_line) {
+ optind = 0;
+
+ tgt_argv[0] = ctx.tgt_type;
+ ops->parse_cmd_line(&ctx, tgt_argc, tgt_argv);
+ }
+
if (!strcmp(cmd, "add"))
ret = cmd_dev_add(&ctx);
- else if (!strcmp(cmd, "del"))
+ else if (!strcmp(cmd, "recover")) {
+ if (ctx.dev_id < 0) {
+ fprintf(stderr, "device id isn't provided for recovering\n");
+ ret = -EINVAL;
+ } else {
+ ctx.recovery = 1;
+ ret = cmd_dev_add(&ctx);
+ }
+ } else if (!strcmp(cmd, "del"))
ret = cmd_dev_del(&ctx);
else if (!strcmp(cmd, "list")) {
ctx.all = 1;
diff --git a/tools/testing/selftests/ublk/kublk.h b/tools/testing/selftests/ublk/kublk.h
index 760ff8ffb810..29571eb296f1 100644
--- a/tools/testing/selftests/ublk/kublk.h
+++ b/tools/testing/selftests/ublk/kublk.h
@@ -20,9 +20,15 @@
#include <sys/wait.h>
#include <sys/eventfd.h>
#include <sys/uio.h>
+#include <sys/ipc.h>
+#include <sys/shm.h>
+#include <linux/io_uring.h>
#include <liburing.h>
-#include <linux/ublk_cmd.h>
+#include <semaphore.h>
+
+/* allow ublk_dep.h to override ublk_cmd.h */
#include "ublk_dep.h"
+#include <linux/ublk_cmd.h>
#define __maybe_unused __attribute__((unused))
#define MAX_BACK_FILES 4
@@ -30,6 +36,8 @@
#define min(a, b) ((a) < (b) ? (a) : (b))
#endif
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof(x[0]))
+
/****************** part 1: libublk ********************/
#define CTRL_DEV "/dev/ublk-control"
@@ -42,8 +50,8 @@
#define UBLKSRV_IO_IDLE_SECS 20
#define UBLK_IO_MAX_BYTES (1 << 20)
-#define UBLK_MAX_QUEUES 4
-#define UBLK_QUEUE_DEPTH 128
+#define UBLK_MAX_QUEUES 32
+#define UBLK_QUEUE_DEPTH 1024
#define UBLK_DBG_DEV (1U << 0)
#define UBLK_DBG_QUEUE (1U << 1)
@@ -55,6 +63,16 @@
struct ublk_dev;
struct ublk_queue;
+struct stripe_ctx {
+ /* stripe */
+ unsigned int chunk_size;
+};
+
+struct fault_inject_ctx {
+ /* fault_inject */
+ unsigned long delay_us;
+};
+
struct dev_ctx {
char tgt_type[16];
unsigned long flags;
@@ -66,11 +84,21 @@ struct dev_ctx {
unsigned int logging:1;
unsigned int all:1;
unsigned int fg:1;
+ unsigned int recovery:1;
- /* stripe */
- unsigned int chunk_size;
+ /* fault_inject */
+ long long delay_us;
int _evtfd;
+ int _shmid;
+
+ /* built from shmem, only for ublk_dump_dev() */
+ struct ublk_dev *shadow_dev;
+
+ union {
+ struct stripe_ctx stripe;
+ struct fault_inject_ctx fault_inject;
+ };
};
struct ublk_ctrl_cmd_data {
@@ -107,6 +135,14 @@ struct ublk_tgt_ops {
int (*queue_io)(struct ublk_queue *, int tag);
void (*tgt_io_done)(struct ublk_queue *,
int tag, const struct io_uring_cqe *);
+
+ /*
+ * Target specific command line handling
+ *
+ * each option requires argument for target command line
+ */
+ void (*parse_cmd_line)(struct dev_ctx *ctx, int argc, char *argv[]);
+ void (*usage)(const struct ublk_tgt_ops *ops);
};
struct ublk_tgt {
@@ -357,6 +393,7 @@ static inline int ublk_queue_use_zc(const struct ublk_queue *q)
extern const struct ublk_tgt_ops null_tgt_ops;
extern const struct ublk_tgt_ops loop_tgt_ops;
extern const struct ublk_tgt_ops stripe_tgt_ops;
+extern const struct ublk_tgt_ops fault_inject_tgt_ops;
void backing_file_tgt_deinit(struct ublk_dev *dev);
int backing_file_tgt_init(struct ublk_dev *dev);
diff --git a/tools/testing/selftests/ublk/stripe.c b/tools/testing/selftests/ublk/stripe.c
index 179731c3dd6f..5dbd6392d83d 100644
--- a/tools/testing/selftests/ublk/stripe.c
+++ b/tools/testing/selftests/ublk/stripe.c
@@ -281,7 +281,7 @@ static int ublk_stripe_tgt_init(const struct dev_ctx *ctx, struct ublk_dev *dev)
.max_sectors = dev->dev_info.max_io_buf_bytes >> 9,
},
};
- unsigned chunk_size = ctx->chunk_size;
+ unsigned chunk_size = ctx->stripe.chunk_size;
struct stripe_conf *conf;
unsigned chunk_shift;
loff_t bytes = 0;
@@ -344,10 +344,36 @@ static void ublk_stripe_tgt_deinit(struct ublk_dev *dev)
backing_file_tgt_deinit(dev);
}
+static void ublk_stripe_cmd_line(struct dev_ctx *ctx, int argc, char *argv[])
+{
+ static const struct option longopts[] = {
+ { "chunk_size", 1, NULL, 0 },
+ { 0, 0, 0, 0 }
+ };
+ int option_idx, opt;
+
+ ctx->stripe.chunk_size = 65536;
+ while ((opt = getopt_long(argc, argv, "",
+ longopts, &option_idx)) != -1) {
+ switch (opt) {
+ case 0:
+ if (!strcmp(longopts[option_idx].name, "chunk_size"))
+ ctx->stripe.chunk_size = strtol(optarg, NULL, 10);
+ }
+ }
+}
+
+static void ublk_stripe_usage(const struct ublk_tgt_ops *ops)
+{
+ printf("\tstripe: [--chunk_size chunk_size (default 65536)]\n");
+}
+
const struct ublk_tgt_ops stripe_tgt_ops = {
.name = "stripe",
.init_tgt = ublk_stripe_tgt_init,
.deinit_tgt = ublk_stripe_tgt_deinit,
.queue_io = ublk_stripe_queue_io,
.tgt_io_done = ublk_stripe_io_done,
+ .parse_cmd_line = ublk_stripe_cmd_line,
+ .usage = ublk_stripe_usage,
};
diff --git a/tools/testing/selftests/ublk/test_common.sh b/tools/testing/selftests/ublk/test_common.sh
index a88b35943227..9fc111f64576 100755
--- a/tools/testing/selftests/ublk/test_common.sh
+++ b/tools/testing/selftests/ublk/test_common.sh
@@ -30,18 +30,26 @@ _run_fio_verify_io() {
}
_create_backfile() {
- local my_size=$1
- local my_file
+ local index=$1
+ local new_size=$2
+ local old_file
+ local new_file
- my_file=$(mktemp ublk_file_"${my_size}"_XXXXX)
- truncate -s "${my_size}" "${my_file}"
- echo "$my_file"
+ old_file="${UBLK_BACKFILES[$index]}"
+ [ -f "$old_file" ] && rm -f "$old_file"
+
+ new_file=$(mktemp ublk_file_"${new_size}"_XXXXX)
+ truncate -s "${new_size}" "${new_file}"
+ UBLK_BACKFILES["$index"]="$new_file"
}
-_remove_backfile() {
- local file=$1
+_remove_files() {
+ local file
- [ -f "$file" ] && rm -f "$file"
+ for file in "${UBLK_BACKFILES[@]}"; do
+ [ -f "$file" ] && rm -f "$file"
+ done
+ [ -f "$UBLK_TMP" ] && rm -f "$UBLK_TMP"
}
_create_tmp_dir() {
@@ -106,6 +114,7 @@ _prep_test() {
local type=$1
shift 1
modprobe ublk_drv > /dev/null 2>&1
+ UBLK_TMP=$(mktemp ublk_test_XXXXX)
[ "$UBLK_TEST_QUIET" -eq 0 ] && echo "ublk $type: $*"
}
@@ -129,7 +138,10 @@ _show_result()
echo "$1 : [FAIL]"
fi
fi
- [ "$2" -ne 0 ] && exit "$2"
+ if [ "$2" -ne 0 ]; then
+ _remove_files
+ exit "$2"
+ fi
return 0
}
@@ -138,16 +150,16 @@ _check_add_dev()
{
local tid=$1
local code=$2
- shift 2
+
if [ "${code}" -ne 0 ]; then
- _remove_test_files "$@"
_show_result "${tid}" "${code}"
fi
}
_cleanup_test() {
"${UBLK_PROG}" del -a
- rm -f "$UBLK_TMP"
+
+ _remove_files
}
_have_feature()
@@ -158,9 +170,11 @@ _have_feature()
return 1
}
-_add_ublk_dev() {
- local kublk_temp;
+_create_ublk_dev() {
local dev_id;
+ local cmd=$1
+
+ shift 1
if [ ! -c /dev/ublk-control ]; then
return ${UBLK_SKIP_CODE}
@@ -171,17 +185,34 @@ _add_ublk_dev() {
fi
fi
- kublk_temp=$(mktemp /tmp/kublk-XXXXXX)
- if ! "${UBLK_PROG}" add "$@" > "${kublk_temp}" 2>&1; then
+ if ! dev_id=$("${UBLK_PROG}" "$cmd" "$@" | grep "dev id" | awk -F '[ :]' '{print $3}'); then
echo "fail to add ublk dev $*"
- rm -f "${kublk_temp}"
return 255
fi
-
- dev_id=$(grep "dev id" "${kublk_temp}" | awk -F '[ :]' '{print $3}')
udevadm settle
- rm -f "${kublk_temp}"
- echo "${dev_id}"
+
+ if [[ "$dev_id" =~ ^[0-9]+$ ]]; then
+ echo "${dev_id}"
+ else
+ return 255
+ fi
+}
+
+_add_ublk_dev() {
+ _create_ublk_dev "add" "$@"
+}
+
+_recover_ublk_dev() {
+ local dev_id
+ local state
+
+ dev_id=$(_create_ublk_dev "recover" "$@")
+ for ((j=0;j<20;j++)); do
+ state=$(_get_ublk_dev_state "${dev_id}")
+ [ "$state" == "LIVE" ] && break
+ sleep 1
+ done
+ echo "$state"
}
# kill the ublk daemon and return ublk device state
@@ -220,7 +251,7 @@ __run_io_and_remove()
local kill_server=$3
fio --name=job1 --filename=/dev/ublkb"${dev_id}" --ioengine=libaio \
- --rw=readwrite --iodepth=64 --size="${size}" --numjobs=4 \
+ --rw=readwrite --iodepth=256 --size="${size}" --numjobs=4 \
--runtime=20 --time_based > /dev/null 2>&1 &
sleep 2
if [ "${kill_server}" = "yes" ]; then
@@ -238,15 +269,80 @@ __run_io_and_remove()
wait
}
+run_io_and_remove()
+{
+ local size=$1
+ local dev_id
+ shift 1
+
+ dev_id=$(_add_ublk_dev "$@")
+ _check_add_dev "$TID" $?
+
+ [ "$UBLK_TEST_QUIET" -eq 0 ] && echo "run ublk IO vs. remove device(ublk add $*)"
+ if ! __run_io_and_remove "$dev_id" "${size}" "no"; then
+ echo "/dev/ublkc$dev_id isn't removed"
+ exit 255
+ fi
+}
+
+run_io_and_kill_daemon()
+{
+ local size=$1
+ local dev_id
+ shift 1
+
+ dev_id=$(_add_ublk_dev "$@")
+ _check_add_dev "$TID" $?
+
+ [ "$UBLK_TEST_QUIET" -eq 0 ] && echo "run ublk IO vs kill ublk server(ublk add $*)"
+ if ! __run_io_and_remove "$dev_id" "${size}" "yes"; then
+ echo "/dev/ublkc$dev_id isn't removed res ${res}"
+ exit 255
+ fi
+}
+
+run_io_and_recover()
+{
+ local state
+ local dev_id
+
+ dev_id=$(_add_ublk_dev "$@")
+ _check_add_dev "$TID" $?
+
+ fio --name=job1 --filename=/dev/ublkb"${dev_id}" --ioengine=libaio \
+ --rw=readwrite --iodepth=256 --size="${size}" --numjobs=4 \
+ --runtime=20 --time_based > /dev/null 2>&1 &
+ sleep 4
+
+ state=$(__ublk_kill_daemon "${dev_id}" "QUIESCED")
+ if [ "$state" != "QUIESCED" ]; then
+ echo "device isn't quiesced($state) after killing daemon"
+ return 255
+ fi
+
+ state=$(_recover_ublk_dev -n "$dev_id" "$@")
+ if [ "$state" != "LIVE" ]; then
+ echo "faile to recover to LIVE($state)"
+ return 255
+ fi
+
+ if ! __remove_ublk_dev_return "${dev_id}"; then
+ echo "delete dev ${dev_id} failed"
+ return 255
+ fi
+ wait
+}
+
+
_ublk_test_top_dir()
{
cd "$(dirname "$0")" && pwd
}
-UBLK_TMP=$(mktemp ublk_test_XXXXX)
UBLK_PROG=$(_ublk_test_top_dir)/kublk
UBLK_TEST_QUIET=1
UBLK_TEST_SHOW_RESULT=1
+UBLK_BACKFILES=()
export UBLK_PROG
export UBLK_TEST_QUIET
export UBLK_TEST_SHOW_RESULT
diff --git a/tools/testing/selftests/ublk/test_generic_04.sh b/tools/testing/selftests/ublk/test_generic_04.sh
new file mode 100755
index 000000000000..8a3bc080c577
--- /dev/null
+++ b/tools/testing/selftests/ublk/test_generic_04.sh
@@ -0,0 +1,40 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+. "$(cd "$(dirname "$0")" && pwd)"/test_common.sh
+
+TID="generic_04"
+ERR_CODE=0
+
+ublk_run_recover_test()
+{
+ run_io_and_recover "$@"
+ ERR_CODE=$?
+ if [ ${ERR_CODE} -ne 0 ]; then
+ echo "$TID failure: $*"
+ _show_result $TID $ERR_CODE
+ fi
+}
+
+if ! _have_program fio; then
+ exit "$UBLK_SKIP_CODE"
+fi
+
+_prep_test "recover" "basic recover function verification"
+
+_create_backfile 0 256M
+_create_backfile 1 128M
+_create_backfile 2 128M
+
+ublk_run_recover_test -t null -q 2 -r 1 &
+ublk_run_recover_test -t loop -q 2 -r 1 "${UBLK_BACKFILES[0]}" &
+ublk_run_recover_test -t stripe -q 2 -r 1 "${UBLK_BACKFILES[1]}" "${UBLK_BACKFILES[2]}" &
+wait
+
+ublk_run_recover_test -t null -q 2 -r 1 -i 1 &
+ublk_run_recover_test -t loop -q 2 -r 1 -i 1 "${UBLK_BACKFILES[0]}" &
+ublk_run_recover_test -t stripe -q 2 -r 1 -i 1 "${UBLK_BACKFILES[1]}" "${UBLK_BACKFILES[2]}" &
+wait
+
+_cleanup_test "recover"
+_show_result $TID $ERR_CODE
diff --git a/tools/testing/selftests/ublk/test_generic_05.sh b/tools/testing/selftests/ublk/test_generic_05.sh
new file mode 100755
index 000000000000..714630b4b329
--- /dev/null
+++ b/tools/testing/selftests/ublk/test_generic_05.sh
@@ -0,0 +1,44 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+. "$(cd "$(dirname "$0")" && pwd)"/test_common.sh
+
+TID="generic_04"
+ERR_CODE=0
+
+ublk_run_recover_test()
+{
+ run_io_and_recover "$@"
+ ERR_CODE=$?
+ if [ ${ERR_CODE} -ne 0 ]; then
+ echo "$TID failure: $*"
+ _show_result $TID $ERR_CODE
+ fi
+}
+
+if ! _have_program fio; then
+ exit "$UBLK_SKIP_CODE"
+fi
+
+if ! _have_feature "ZERO_COPY"; then
+ exit "$UBLK_SKIP_CODE"
+fi
+
+_prep_test "recover" "basic recover function verification (zero copy)"
+
+_create_backfile 0 256M
+_create_backfile 1 128M
+_create_backfile 2 128M
+
+ublk_run_recover_test -t null -q 2 -r 1 -z &
+ublk_run_recover_test -t loop -q 2 -r 1 -z "${UBLK_BACKFILES[0]}" &
+ublk_run_recover_test -t stripe -q 2 -r 1 -z "${UBLK_BACKFILES[1]}" "${UBLK_BACKFILES[2]}" &
+wait
+
+ublk_run_recover_test -t null -q 2 -r 1 -z -i 1 &
+ublk_run_recover_test -t loop -q 2 -r 1 -z -i 1 "${UBLK_BACKFILES[0]}" &
+ublk_run_recover_test -t stripe -q 2 -r 1 -z -i 1 "${UBLK_BACKFILES[1]}" "${UBLK_BACKFILES[2]}" &
+wait
+
+_cleanup_test "recover"
+_show_result $TID $ERR_CODE
diff --git a/tools/testing/selftests/ublk/test_generic_06.sh b/tools/testing/selftests/ublk/test_generic_06.sh
new file mode 100755
index 000000000000..b67230c42c84
--- /dev/null
+++ b/tools/testing/selftests/ublk/test_generic_06.sh
@@ -0,0 +1,41 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+. "$(cd "$(dirname "$0")" && pwd)"/test_common.sh
+
+TID="generic_06"
+ERR_CODE=0
+
+_prep_test "fault_inject" "fast cleanup when all I/Os of one hctx are in server"
+
+# configure ublk server to sleep 2s before completing each I/O
+dev_id=$(_add_ublk_dev -t fault_inject -q 2 -d 1 --delay_us 2000000)
+_check_add_dev $TID $?
+
+STARTTIME=${SECONDS}
+
+dd if=/dev/urandom of=/dev/ublkb${dev_id} oflag=direct bs=4k count=1 status=none > /dev/null 2>&1 &
+dd_pid=$!
+
+__ublk_kill_daemon ${dev_id} "DEAD"
+
+wait $dd_pid
+dd_exitcode=$?
+
+ENDTIME=${SECONDS}
+ELAPSED=$(($ENDTIME - $STARTTIME))
+
+# assert that dd sees an error and exits quickly after ublk server is
+# killed. previously this relied on seeing an I/O timeout and so would
+# take ~30s
+if [ $dd_exitcode -eq 0 ]; then
+ echo "dd unexpectedly exited successfully!"
+ ERR_CODE=255
+fi
+if [ $ELAPSED -ge 5 ]; then
+ echo "dd took $ELAPSED seconds to exit (>= 5s tolerance)!"
+ ERR_CODE=255
+fi
+
+_cleanup_test "fault_inject"
+_show_result $TID $ERR_CODE
diff --git a/tools/testing/selftests/ublk/test_loop_01.sh b/tools/testing/selftests/ublk/test_loop_01.sh
index 1ef8b6044777..833fa0dbc700 100755
--- a/tools/testing/selftests/ublk/test_loop_01.sh
+++ b/tools/testing/selftests/ublk/test_loop_01.sh
@@ -12,10 +12,10 @@ fi
_prep_test "loop" "write and verify test"
-backfile_0=$(_create_backfile 256M)
+_create_backfile 0 256M
-dev_id=$(_add_ublk_dev -t loop "$backfile_0")
-_check_add_dev $TID $? "${backfile_0}"
+dev_id=$(_add_ublk_dev -t loop "${UBLK_BACKFILES[0]}")
+_check_add_dev $TID $?
# run fio over the ublk disk
_run_fio_verify_io --filename=/dev/ublkb"${dev_id}" --size=256M
@@ -23,6 +23,4 @@ ERR_CODE=$?
_cleanup_test "loop"
-_remove_backfile "$backfile_0"
-
_show_result $TID $ERR_CODE
diff --git a/tools/testing/selftests/ublk/test_loop_02.sh b/tools/testing/selftests/ublk/test_loop_02.sh
index 03863d825e07..874568b3646b 100755
--- a/tools/testing/selftests/ublk/test_loop_02.sh
+++ b/tools/testing/selftests/ublk/test_loop_02.sh
@@ -8,15 +8,13 @@ ERR_CODE=0
_prep_test "loop" "mkfs & mount & umount"
-backfile_0=$(_create_backfile 256M)
-dev_id=$(_add_ublk_dev -t loop "$backfile_0")
-_check_add_dev $TID $? "$backfile_0"
+_create_backfile 0 256M
+dev_id=$(_add_ublk_dev -t loop "${UBLK_BACKFILES[0]}")
+_check_add_dev $TID $?
_mkfs_mount_test /dev/ublkb"${dev_id}"
ERR_CODE=$?
_cleanup_test "loop"
-_remove_backfile "$backfile_0"
-
_show_result $TID $ERR_CODE
diff --git a/tools/testing/selftests/ublk/test_loop_03.sh b/tools/testing/selftests/ublk/test_loop_03.sh
index e9ca744de8b1..c30f797c6429 100755
--- a/tools/testing/selftests/ublk/test_loop_03.sh
+++ b/tools/testing/selftests/ublk/test_loop_03.sh
@@ -12,9 +12,9 @@ fi
_prep_test "loop" "write and verify over zero copy"
-backfile_0=$(_create_backfile 256M)
-dev_id=$(_add_ublk_dev -t loop -z "$backfile_0")
-_check_add_dev $TID $? "$backfile_0"
+_create_backfile 0 256M
+dev_id=$(_add_ublk_dev -t loop -z "${UBLK_BACKFILES[0]}")
+_check_add_dev $TID $?
# run fio over the ublk disk
_run_fio_verify_io --filename=/dev/ublkb"${dev_id}" --size=256M
@@ -22,6 +22,4 @@ ERR_CODE=$?
_cleanup_test "loop"
-_remove_backfile "$backfile_0"
-
_show_result $TID $ERR_CODE
diff --git a/tools/testing/selftests/ublk/test_loop_04.sh b/tools/testing/selftests/ublk/test_loop_04.sh
index 1435422c38ec..b01d75b3214d 100755
--- a/tools/testing/selftests/ublk/test_loop_04.sh
+++ b/tools/testing/selftests/ublk/test_loop_04.sh
@@ -8,15 +8,14 @@ ERR_CODE=0
_prep_test "loop" "mkfs & mount & umount with zero copy"
-backfile_0=$(_create_backfile 256M)
-dev_id=$(_add_ublk_dev -t loop -z "$backfile_0")
-_check_add_dev $TID $? "$backfile_0"
+_create_backfile 0 256M
+
+dev_id=$(_add_ublk_dev -t loop -z "${UBLK_BACKFILES[0]}")
+_check_add_dev $TID $?
_mkfs_mount_test /dev/ublkb"${dev_id}"
ERR_CODE=$?
_cleanup_test "loop"
-_remove_backfile "$backfile_0"
-
_show_result $TID $ERR_CODE
diff --git a/tools/testing/selftests/ublk/test_loop_05.sh b/tools/testing/selftests/ublk/test_loop_05.sh
index 2e6e2e6978fc..de2141533074 100755
--- a/tools/testing/selftests/ublk/test_loop_05.sh
+++ b/tools/testing/selftests/ublk/test_loop_05.sh
@@ -12,10 +12,10 @@ fi
_prep_test "loop" "write and verify test"
-backfile_0=$(_create_backfile 256M)
+_create_backfile 0 256M
-dev_id=$(_add_ublk_dev -q 2 -t loop "$backfile_0")
-_check_add_dev $TID $? "${backfile_0}"
+dev_id=$(_add_ublk_dev -q 2 -t loop "${UBLK_BACKFILES[0]}")
+_check_add_dev $TID $?
# run fio over the ublk disk
_run_fio_verify_io --filename=/dev/ublkb"${dev_id}" --size=256M
@@ -23,6 +23,4 @@ ERR_CODE=$?
_cleanup_test "loop"
-_remove_backfile "$backfile_0"
-
_show_result $TID $ERR_CODE
diff --git a/tools/testing/selftests/ublk/test_stress_01.sh b/tools/testing/selftests/ublk/test_stress_01.sh
index a8be24532b24..7d3150f057d4 100755
--- a/tools/testing/selftests/ublk/test_stress_01.sh
+++ b/tools/testing/selftests/ublk/test_stress_01.sh
@@ -4,44 +4,31 @@
. "$(cd "$(dirname "$0")" && pwd)"/test_common.sh
TID="stress_01"
ERR_CODE=0
-DEV_ID=-1
ublk_io_and_remove()
{
- local size=$1
- shift 1
- local backfile=""
- if echo "$@" | grep -q "loop"; then
- backfile=${*: -1}
- fi
- DEV_ID=$(_add_ublk_dev "$@")
- _check_add_dev $TID $? "${backfile}"
-
- [ "$UBLK_TEST_QUIET" -eq 0 ] && echo "run ublk IO vs. remove device(ublk add $*)"
- if ! __run_io_and_remove "${DEV_ID}" "${size}" "no"; then
- echo "/dev/ublkc${DEV_ID} isn't removed"
- _remove_backfile "${backfile}"
- exit 255
+ run_io_and_remove "$@"
+ ERR_CODE=$?
+ if [ ${ERR_CODE} -ne 0 ]; then
+ echo "$TID failure: $*"
+ _show_result $TID $ERR_CODE
fi
}
+if ! _have_program fio; then
+ exit "$UBLK_SKIP_CODE"
+fi
+
_prep_test "stress" "run IO and remove device"
-ublk_io_and_remove 8G -t null -q 4
-ERR_CODE=$?
-if [ ${ERR_CODE} -ne 0 ]; then
- _show_result $TID $ERR_CODE
-fi
+_create_backfile 0 256M
+_create_backfile 1 128M
+_create_backfile 2 128M
-BACK_FILE=$(_create_backfile 256M)
-ublk_io_and_remove 256M -t loop -q 4 "${BACK_FILE}"
-ERR_CODE=$?
-if [ ${ERR_CODE} -ne 0 ]; then
- _show_result $TID $ERR_CODE
-fi
+ublk_io_and_remove 8G -t null -q 4 &
+ublk_io_and_remove 256M -t loop -q 4 "${UBLK_BACKFILES[0]}" &
+ublk_io_and_remove 256M -t stripe -q 4 "${UBLK_BACKFILES[1]}" "${UBLK_BACKFILES[2]}" &
+wait
-ublk_io_and_remove 256M -t loop -q 4 -z "${BACK_FILE}"
-ERR_CODE=$?
_cleanup_test "stress"
-_remove_backfile "${BACK_FILE}"
_show_result $TID $ERR_CODE
diff --git a/tools/testing/selftests/ublk/test_stress_02.sh b/tools/testing/selftests/ublk/test_stress_02.sh
index 2159e4cc8140..1a9065125ae1 100755
--- a/tools/testing/selftests/ublk/test_stress_02.sh
+++ b/tools/testing/selftests/ublk/test_stress_02.sh
@@ -4,44 +4,31 @@
. "$(cd "$(dirname "$0")" && pwd)"/test_common.sh
TID="stress_02"
ERR_CODE=0
-DEV_ID=-1
+
+if ! _have_program fio; then
+ exit "$UBLK_SKIP_CODE"
+fi
ublk_io_and_kill_daemon()
{
- local size=$1
- shift 1
- local backfile=""
- if echo "$@" | grep -q "loop"; then
- backfile=${*: -1}
- fi
- DEV_ID=$(_add_ublk_dev "$@")
- _check_add_dev $TID $? "${backfile}"
-
- [ "$UBLK_TEST_QUIET" -eq 0 ] && echo "run ublk IO vs kill ublk server(ublk add $*)"
- if ! __run_io_and_remove "${DEV_ID}" "${size}" "yes"; then
- echo "/dev/ublkc${DEV_ID} isn't removed res ${res}"
- _remove_backfile "${backfile}"
- exit 255
+ run_io_and_kill_daemon "$@"
+ ERR_CODE=$?
+ if [ ${ERR_CODE} -ne 0 ]; then
+ echo "$TID failure: $*"
+ _show_result $TID $ERR_CODE
fi
}
_prep_test "stress" "run IO and kill ublk server"
-ublk_io_and_kill_daemon 8G -t null -q 4
-ERR_CODE=$?
-if [ ${ERR_CODE} -ne 0 ]; then
- _show_result $TID $ERR_CODE
-fi
+_create_backfile 0 256M
+_create_backfile 1 128M
+_create_backfile 2 128M
-BACK_FILE=$(_create_backfile 256M)
-ublk_io_and_kill_daemon 256M -t loop -q 4 "${BACK_FILE}"
-ERR_CODE=$?
-if [ ${ERR_CODE} -ne 0 ]; then
- _show_result $TID $ERR_CODE
-fi
+ublk_io_and_kill_daemon 8G -t null -q 4 &
+ublk_io_and_kill_daemon 256M -t loop -q 4 "${UBLK_BACKFILES[0]}" &
+ublk_io_and_kill_daemon 256M -t stripe -q 4 "${UBLK_BACKFILES[1]}" "${UBLK_BACKFILES[2]}" &
+wait
-ublk_io_and_kill_daemon 256M -t loop -q 4 -z "${BACK_FILE}"
-ERR_CODE=$?
_cleanup_test "stress"
-_remove_backfile "${BACK_FILE}"
_show_result $TID $ERR_CODE
diff --git a/tools/testing/selftests/ublk/test_stress_03.sh b/tools/testing/selftests/ublk/test_stress_03.sh
new file mode 100755
index 000000000000..e0854f71d35b
--- /dev/null
+++ b/tools/testing/selftests/ublk/test_stress_03.sh
@@ -0,0 +1,38 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+. "$(cd "$(dirname "$0")" && pwd)"/test_common.sh
+TID="stress_03"
+ERR_CODE=0
+
+ublk_io_and_remove()
+{
+ run_io_and_remove "$@"
+ ERR_CODE=$?
+ if [ ${ERR_CODE} -ne 0 ]; then
+ echo "$TID failure: $*"
+ _show_result $TID $ERR_CODE
+ fi
+}
+
+if ! _have_program fio; then
+ exit "$UBLK_SKIP_CODE"
+fi
+
+if ! _have_feature "ZERO_COPY"; then
+ exit "$UBLK_SKIP_CODE"
+fi
+
+_prep_test "stress" "run IO and remove device(zero copy)"
+
+_create_backfile 0 256M
+_create_backfile 1 128M
+_create_backfile 2 128M
+
+ublk_io_and_remove 8G -t null -q 4 -z &
+ublk_io_and_remove 256M -t loop -q 4 -z "${UBLK_BACKFILES[0]}" &
+ublk_io_and_remove 256M -t stripe -q 4 -z "${UBLK_BACKFILES[1]}" "${UBLK_BACKFILES[2]}" &
+wait
+
+_cleanup_test "stress"
+_show_result $TID $ERR_CODE
diff --git a/tools/testing/selftests/ublk/test_stress_04.sh b/tools/testing/selftests/ublk/test_stress_04.sh
new file mode 100755
index 000000000000..1798a98387e8
--- /dev/null
+++ b/tools/testing/selftests/ublk/test_stress_04.sh
@@ -0,0 +1,37 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+. "$(cd "$(dirname "$0")" && pwd)"/test_common.sh
+TID="stress_04"
+ERR_CODE=0
+
+ublk_io_and_kill_daemon()
+{
+ run_io_and_kill_daemon "$@"
+ ERR_CODE=$?
+ if [ ${ERR_CODE} -ne 0 ]; then
+ echo "$TID failure: $*"
+ _show_result $TID $ERR_CODE
+ fi
+}
+
+if ! _have_program fio; then
+ exit "$UBLK_SKIP_CODE"
+fi
+if ! _have_feature "ZERO_COPY"; then
+ exit "$UBLK_SKIP_CODE"
+fi
+
+_prep_test "stress" "run IO and kill ublk server(zero copy)"
+
+_create_backfile 0 256M
+_create_backfile 1 128M
+_create_backfile 2 128M
+
+ublk_io_and_kill_daemon 8G -t null -q 4 -z &
+ublk_io_and_kill_daemon 256M -t loop -q 4 -z "${UBLK_BACKFILES[0]}" &
+ublk_io_and_kill_daemon 256M -t stripe -q 4 -z "${UBLK_BACKFILES[1]}" "${UBLK_BACKFILES[2]}" &
+wait
+
+_cleanup_test "stress"
+_show_result $TID $ERR_CODE
diff --git a/tools/testing/selftests/ublk/test_stress_05.sh b/tools/testing/selftests/ublk/test_stress_05.sh
new file mode 100755
index 000000000000..a7071b10224d
--- /dev/null
+++ b/tools/testing/selftests/ublk/test_stress_05.sh
@@ -0,0 +1,64 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+. "$(cd "$(dirname "$0")" && pwd)"/test_common.sh
+TID="stress_05"
+ERR_CODE=0
+
+run_io_and_remove()
+{
+ local size=$1
+ local dev_id
+ local dev_pid
+ shift 1
+
+ dev_id=$(_add_ublk_dev "$@")
+ _check_add_dev $TID $?
+
+ [ "$UBLK_TEST_QUIET" -eq 0 ] && echo "run ublk IO vs. remove device(ublk add $*)"
+
+ fio --name=job1 --filename=/dev/ublkb"${dev_id}" --ioengine=libaio \
+ --rw=readwrite --iodepth=128 --size="${size}" --numjobs=4 \
+ --runtime=40 --time_based > /dev/null 2>&1 &
+ sleep 4
+
+ dev_pid=$(_get_ublk_daemon_pid "$dev_id")
+ kill -9 "$dev_pid"
+
+ if ! __remove_ublk_dev_return "${dev_id}"; then
+ echo "delete dev ${dev_id} failed"
+ return 255
+ fi
+}
+
+ublk_io_and_remove()
+{
+ run_io_and_remove "$@"
+ ERR_CODE=$?
+ if [ ${ERR_CODE} -ne 0 ]; then
+ echo "$TID failure: $*"
+ _show_result $TID $ERR_CODE
+ fi
+}
+
+_prep_test "stress" "run IO and remove device with recovery enabled"
+
+_create_backfile 0 256M
+_create_backfile 1 256M
+
+for reissue in $(seq 0 1); do
+ ublk_io_and_remove 8G -t null -q 4 -g 1 -r 1 -i "$reissue" &
+ ublk_io_and_remove 256M -t loop -q 4 -g 1 -r 1 -i "$reissue" "${UBLK_BACKFILES[0]}" &
+ wait
+done
+
+if _have_feature "ZERO_COPY"; then
+ for reissue in $(seq 0 1); do
+ ublk_io_and_remove 8G -t null -q 4 -g 1 -z -r 1 -i "$reissue" &
+ ublk_io_and_remove 256M -t loop -q 4 -g 1 -z -r 1 -i "$reissue" "${UBLK_BACKFILES[1]}" &
+ wait
+ done
+fi
+
+_cleanup_test "stress"
+_show_result $TID $ERR_CODE
diff --git a/tools/testing/selftests/ublk/test_stripe_01.sh b/tools/testing/selftests/ublk/test_stripe_01.sh
index 7e387ef656ea..4e4f0fdf3c9b 100755
--- a/tools/testing/selftests/ublk/test_stripe_01.sh
+++ b/tools/testing/selftests/ublk/test_stripe_01.sh
@@ -12,19 +12,15 @@ fi
_prep_test "stripe" "write and verify test"
-backfile_0=$(_create_backfile 256M)
-backfile_1=$(_create_backfile 256M)
+_create_backfile 0 256M
+_create_backfile 1 256M
-dev_id=$(_add_ublk_dev -t stripe "$backfile_0" "$backfile_1")
-_check_add_dev $TID $? "${backfile_0}"
+dev_id=$(_add_ublk_dev -t stripe "${UBLK_BACKFILES[0]}" "${UBLK_BACKFILES[1]}")
+_check_add_dev $TID $?
# run fio over the ublk disk
_run_fio_verify_io --filename=/dev/ublkb"${dev_id}" --size=512M
ERR_CODE=$?
_cleanup_test "stripe"
-
-_remove_backfile "$backfile_0"
-_remove_backfile "$backfile_1"
-
_show_result $TID $ERR_CODE
diff --git a/tools/testing/selftests/ublk/test_stripe_02.sh b/tools/testing/selftests/ublk/test_stripe_02.sh
index e8a45fa82dde..5820ab2efba4 100755
--- a/tools/testing/selftests/ublk/test_stripe_02.sh
+++ b/tools/testing/selftests/ublk/test_stripe_02.sh
@@ -8,17 +8,14 @@ ERR_CODE=0
_prep_test "stripe" "mkfs & mount & umount"
-backfile_0=$(_create_backfile 256M)
-backfile_1=$(_create_backfile 256M)
-dev_id=$(_add_ublk_dev -t stripe "$backfile_0" "$backfile_1")
-_check_add_dev $TID $? "$backfile_0" "$backfile_1"
+_create_backfile 0 256M
+_create_backfile 1 256M
+
+dev_id=$(_add_ublk_dev -t stripe "${UBLK_BACKFILES[0]}" "${UBLK_BACKFILES[1]}")
+_check_add_dev $TID $?
_mkfs_mount_test /dev/ublkb"${dev_id}"
ERR_CODE=$?
_cleanup_test "stripe"
-
-_remove_backfile "$backfile_0"
-_remove_backfile "$backfile_1"
-
_show_result $TID $ERR_CODE
diff --git a/tools/testing/selftests/ublk/test_stripe_03.sh b/tools/testing/selftests/ublk/test_stripe_03.sh
index c1b34af36145..20b977e27814 100755
--- a/tools/testing/selftests/ublk/test_stripe_03.sh
+++ b/tools/testing/selftests/ublk/test_stripe_03.sh
@@ -12,19 +12,15 @@ fi
_prep_test "stripe" "write and verify test"
-backfile_0=$(_create_backfile 256M)
-backfile_1=$(_create_backfile 256M)
+_create_backfile 0 256M
+_create_backfile 1 256M
-dev_id=$(_add_ublk_dev -q 2 -t stripe "$backfile_0" "$backfile_1")
-_check_add_dev $TID $? "${backfile_0}"
+dev_id=$(_add_ublk_dev -q 2 -t stripe "${UBLK_BACKFILES[0]}" "${UBLK_BACKFILES[1]}")
+_check_add_dev $TID $?
# run fio over the ublk disk
_run_fio_verify_io --filename=/dev/ublkb"${dev_id}" --size=512M
ERR_CODE=$?
_cleanup_test "stripe"
-
-_remove_backfile "$backfile_0"
-_remove_backfile "$backfile_1"
-
_show_result $TID $ERR_CODE
diff --git a/tools/testing/selftests/ublk/test_stripe_04.sh b/tools/testing/selftests/ublk/test_stripe_04.sh
index 1f2b642381d1..1b51ed2f1d84 100755
--- a/tools/testing/selftests/ublk/test_stripe_04.sh
+++ b/tools/testing/selftests/ublk/test_stripe_04.sh
@@ -8,17 +8,14 @@ ERR_CODE=0
_prep_test "stripe" "mkfs & mount & umount on zero copy"
-backfile_0=$(_create_backfile 256M)
-backfile_1=$(_create_backfile 256M)
-dev_id=$(_add_ublk_dev -t stripe -z -q 2 "$backfile_0" "$backfile_1")
-_check_add_dev $TID $? "$backfile_0" "$backfile_1"
+_create_backfile 0 256M
+_create_backfile 1 256M
+
+dev_id=$(_add_ublk_dev -t stripe -z -q 2 "${UBLK_BACKFILES[0]}" "${UBLK_BACKFILES[1]}")
+_check_add_dev $TID $?
_mkfs_mount_test /dev/ublkb"${dev_id}"
ERR_CODE=$?
_cleanup_test "stripe"
-
-_remove_backfile "$backfile_0"
-_remove_backfile "$backfile_1"
-
_show_result $TID $ERR_CODE