summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/README3
-rw-r--r--Documentation/ABI/removed/sysfs-class-rfkill2
-rw-r--r--Documentation/ABI/stable/sysfs-class-rfkill12
-rw-r--r--Documentation/ABI/stable/sysfs-devices-system-cpu10
-rw-r--r--Documentation/ABI/stable/sysfs-driver-dma-idxd4
-rw-r--r--Documentation/ABI/testing/configfs-usb-gadget-midi22
-rw-r--r--Documentation/ABI/testing/sysfs-bus-coresight-devices-cti78
-rw-r--r--Documentation/ABI/testing/sysfs-bus-coresight-devices-tpdm52
-rw-r--r--Documentation/ABI/testing/sysfs-fs-f2fs4
-rw-r--r--Documentation/ABI/testing/sysfs-power2
-rw-r--r--Documentation/Makefile2
-rw-r--r--Documentation/admin-guide/README.rst2
-rw-r--r--Documentation/admin-guide/abi-obsolete-files.rst7
-rw-r--r--Documentation/admin-guide/abi-obsolete.rst6
-rw-r--r--Documentation/admin-guide/abi-removed-files.rst7
-rw-r--r--Documentation/admin-guide/abi-removed.rst6
-rw-r--r--Documentation/admin-guide/abi-stable-files.rst7
-rw-r--r--Documentation/admin-guide/abi-stable.rst6
-rw-r--r--Documentation/admin-guide/abi-testing-files.rst7
-rw-r--r--Documentation/admin-guide/abi-testing.rst6
-rw-r--r--Documentation/admin-guide/abi.rst18
-rw-r--r--Documentation/admin-guide/cgroup-v1/freezer-subsystem.rst4
-rw-r--r--Documentation/admin-guide/cgroup-v1/memory.rst1
-rw-r--r--Documentation/admin-guide/cgroup-v2.rst23
-rw-r--r--Documentation/admin-guide/gpio/gpio-sim.rst2
-rw-r--r--Documentation/admin-guide/gpio/gpio-virtuser.rst2
-rw-r--r--Documentation/admin-guide/highuid.rst80
-rw-r--r--Documentation/admin-guide/index.rst1
-rw-r--r--Documentation/admin-guide/iostats.rst89
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt6
-rw-r--r--Documentation/admin-guide/thunderbolt.rst2
-rw-r--r--Documentation/admin-guide/workload-tracing.rst2
-rw-r--r--Documentation/arch/arm64/amu.rst2
-rw-r--r--Documentation/arch/arm64/asymmetric-32bit.rst2
-rw-r--r--Documentation/conf.py2
-rw-r--r--Documentation/core-api/min_heap.rst4
-rw-r--r--Documentation/core-api/printk-formats.rst4
-rw-r--r--Documentation/dev-tools/kcsan.rst2
-rw-r--r--Documentation/dev-tools/kselftest.rst2
-rw-r--r--Documentation/devicetree/bindings/net/can/microchip,mcp251xfd.yaml2
-rw-r--r--Documentation/driver-api/firmware/firmware-usage-guidelines.rst5
-rw-r--r--Documentation/driver-api/generic-counter.rst4
-rw-r--r--Documentation/driver-api/iio/core.rst2
-rw-r--r--Documentation/driver-api/infiniband.rst16
-rw-r--r--Documentation/driver-api/media/drivers/zoran.rst2
-rw-r--r--Documentation/driver-api/media/maintainer-entry-profile.rst2
-rw-r--r--Documentation/driver-api/nvdimm/nvdimm.rst6
-rw-r--r--Documentation/driver-api/pm/devices.rst2
-rw-r--r--Documentation/features/debug/kprobes-on-ftrace/arch-support.txt2
-rwxr-xr-xDocumentation/features/list-arch.sh2
-rw-r--r--Documentation/filesystems/9p.rst2
-rw-r--r--Documentation/filesystems/bcachefs/SubmittingPatches.rst4
-rw-r--r--Documentation/filesystems/coda.rst2
-rw-r--r--Documentation/filesystems/debugfs.rst2
-rw-r--r--Documentation/filesystems/index.rst1
-rw-r--r--Documentation/filesystems/iomap/design.rst9
-rw-r--r--Documentation/filesystems/iomap/operations.rst42
-rw-r--r--Documentation/filesystems/locking.rst2
-rw-r--r--Documentation/filesystems/netfs_library.rst2
-rw-r--r--Documentation/filesystems/overlayfs.rst24
-rw-r--r--Documentation/filesystems/porting.rst48
-rw-r--r--Documentation/filesystems/sysv-fs.rst264
-rw-r--r--Documentation/filesystems/vfs.rst23
-rw-r--r--Documentation/filesystems/xfs/xfs-delayed-logging-design.rst2
-rw-r--r--Documentation/filesystems/xfs/xfs-maintainer-entry-profile.rst2
-rw-r--r--Documentation/filesystems/xfs/xfs-online-fsck-design.rst4
-rw-r--r--Documentation/iio/iio_devbuf.rst2
-rw-r--r--Documentation/input/devices/elantech.rst2
-rw-r--r--Documentation/input/input-programming.rst19
-rw-r--r--Documentation/mm/split_page_table_lock.rst2
-rw-r--r--Documentation/networking/statistics.rst2
-rw-r--r--Documentation/nvme/nvme-pci-endpoint-target.rst2
-rw-r--r--Documentation/process/5.Posting.rst13
-rw-r--r--Documentation/process/changes.rst4
-rw-r--r--Documentation/process/code-of-conduct-interpretation.rst17
-rw-r--r--Documentation/process/kernel-docs.rst11
-rw-r--r--Documentation/process/submit-checklist.rst12
-rw-r--r--Documentation/process/submitting-patches.rst45
-rw-r--r--Documentation/scheduler/sched-bwc.rst2
-rw-r--r--Documentation/scheduler/sched-ext.rst36
-rw-r--r--Documentation/sound/soc/machine.rst2
-rw-r--r--Documentation/sphinx/automarkup.py82
-rw-r--r--Documentation/sphinx/cdomain.py7
-rw-r--r--Documentation/sphinx/kernel_abi.py162
-rw-r--r--Documentation/sphinx/kernel_feat.py4
-rwxr-xr-xDocumentation/sphinx/kernel_include.py4
-rw-r--r--Documentation/sphinx/kerneldoc.py19
-rw-r--r--Documentation/sphinx/kernellog.py22
-rw-r--r--Documentation/sphinx/kfigure.py91
-rw-r--r--Documentation/sphinx/load_config.py2
-rwxr-xr-xDocumentation/sphinx/maintainers_include.py4
-rwxr-xr-xDocumentation/sphinx/rstFlatTable.py10
-rw-r--r--Documentation/trace/postprocess/decode_msr.py2
-rw-r--r--Documentation/translations/it_IT/process/submit-checklist.rst7
-rw-r--r--Documentation/translations/ja_JP/SubmitChecklist105
-rw-r--r--Documentation/translations/ja_JP/disclaimer-ja_JP.rst24
-rw-r--r--Documentation/translations/ja_JP/index.rst2
-rw-r--r--Documentation/translations/ja_JP/process/howto.rst37
-rw-r--r--Documentation/translations/ja_JP/process/submit-checklist.rst163
-rw-r--r--Documentation/translations/sp_SP/process/submit-checklist.rst7
-rw-r--r--Documentation/translations/zh_CN/admin-guide/README.rst2
-rw-r--r--Documentation/translations/zh_CN/dev-tools/ubsan.rst33
-rw-r--r--Documentation/translations/zh_CN/disclaimer-zh_CN.rst8
-rw-r--r--Documentation/translations/zh_CN/index.rst8
-rw-r--r--Documentation/translations/zh_CN/mm/balance.rst2
-rw-r--r--Documentation/translations/zh_CN/process/submit-checklist.rst4
-rw-r--r--Documentation/translations/zh_CN/security/credentials.rst479
-rw-r--r--Documentation/translations/zh_CN/security/index.rst8
-rw-r--r--Documentation/translations/zh_CN/security/keys/index.rst22
-rw-r--r--Documentation/translations/zh_CN/security/secrets/index.rst17
-rw-r--r--Documentation/translations/zh_CN/security/self-protection.rst271
-rw-r--r--Documentation/translations/zh_CN/security/snp-tdx-threat-model.rst209
-rw-r--r--Documentation/translations/zh_CN/security/tpm/index.rst20
-rw-r--r--Documentation/translations/zh_CN/security/tpm/tpm-security.rst151
-rw-r--r--Documentation/translations/zh_CN/security/tpm/tpm_event_log.rst49
-rw-r--r--Documentation/translations/zh_CN/security/tpm/tpm_ftpm_tee.rst31
-rw-r--r--Documentation/translations/zh_CN/security/tpm/tpm_tis.rst43
-rw-r--r--Documentation/translations/zh_CN/security/tpm/tpm_vtpm_proxy.rst51
-rw-r--r--Documentation/translations/zh_CN/security/tpm/xen-tpmfront.rst114
-rw-r--r--Documentation/translations/zh_TW/admin-guide/README.rst2
-rw-r--r--Documentation/translations/zh_TW/process/submit-checklist.rst4
-rw-r--r--Documentation/usb/gadget-testing.rst2
-rw-r--r--Documentation/userspace-api/accelerators/ocxl.rst7
-rw-r--r--Documentation/userspace-api/dma-buf-heaps.rst25
-rw-r--r--Documentation/userspace-api/index.rst1
-rw-r--r--Documentation/userspace-api/media/rc/rc-sysfs-nodes.rst2
-rw-r--r--MAINTAINERS41
-rw-r--r--Makefile2
-rw-r--r--arch/alpha/kernel/syscalls/syscall.tbl1
-rw-r--r--arch/arm/tools/syscall.tbl1
-rw-r--r--arch/arm64/kernel/elfcore.c3
-rw-r--r--arch/arm64/tools/syscall_32.tbl1
-rw-r--r--arch/loongarch/configs/loongson3_defconfig1
-rw-r--r--arch/m68k/configs/amiga_defconfig1
-rw-r--r--arch/m68k/configs/apollo_defconfig1
-rw-r--r--arch/m68k/configs/atari_defconfig1
-rw-r--r--arch/m68k/configs/bvme6000_defconfig1
-rw-r--r--arch/m68k/configs/hp300_defconfig1
-rw-r--r--arch/m68k/configs/mac_defconfig1
-rw-r--r--arch/m68k/configs/multi_defconfig1
-rw-r--r--arch/m68k/configs/mvme147_defconfig1
-rw-r--r--arch/m68k/configs/mvme16x_defconfig1
-rw-r--r--arch/m68k/configs/q40_defconfig1
-rw-r--r--arch/m68k/configs/sun3_defconfig1
-rw-r--r--arch/m68k/configs/sun3x_defconfig1
-rw-r--r--arch/m68k/kernel/syscalls/syscall.tbl1
-rw-r--r--arch/microblaze/kernel/syscalls/syscall.tbl1
-rw-r--r--arch/mips/configs/malta_defconfig1
-rw-r--r--arch/mips/configs/malta_kvm_defconfig1
-rw-r--r--arch/mips/configs/maltaup_xpa_defconfig1
-rw-r--r--arch/mips/configs/rm200_defconfig1
-rw-r--r--arch/mips/kernel/ptrace.c20
-rw-r--r--arch/mips/kernel/syscalls/syscall_n32.tbl1
-rw-r--r--arch/mips/kernel/syscalls/syscall_n64.tbl1
-rw-r--r--arch/mips/kernel/syscalls/syscall_o32.tbl1
-rw-r--r--arch/parisc/configs/generic-64bit_defconfig1
-rw-r--r--arch/parisc/kernel/syscalls/syscall.tbl1
-rw-r--r--arch/powerpc/configs/fsl-emb-nonhw.config1
-rw-r--r--arch/powerpc/configs/ppc6xx_defconfig1
-rw-r--r--arch/powerpc/kernel/fadump.c2
-rw-r--r--arch/powerpc/kernel/ptrace/ptrace.c2
-rw-r--r--arch/powerpc/kernel/syscalls/syscall.tbl1
-rw-r--r--arch/powerpc/platforms/powernv/opal-core.c8
-rw-r--r--arch/powerpc/platforms/pseries/papr-vpd.c7
-rw-r--r--arch/s390/kernel/crash_dump.c62
-rw-r--r--arch/s390/kernel/syscalls/syscall.tbl1
-rw-r--r--arch/sh/kernel/syscalls/syscall.tbl1
-rw-r--r--arch/sparc/kernel/syscalls/syscall.tbl1
-rw-r--r--arch/x86/Makefile4
-rw-r--r--arch/x86/coco/tdx/tdx.c4
-rw-r--r--arch/x86/entry/syscalls/syscall_32.tbl1
-rw-r--r--arch/x86/entry/syscalls/syscall_64.tbl1
-rw-r--r--arch/x86/events/amd/ibs.c76
-rw-r--r--arch/x86/events/rapl.c7
-rw-r--r--arch/xtensa/kernel/syscalls/syscall.tbl1
-rw-r--r--block/bdev.c13
-rw-r--r--block/blk-cgroup.c8
-rw-r--r--block/blk-ioprio.c23
-rw-r--r--drivers/accel/qaic/qaic_data.c9
-rw-r--r--drivers/base/devtmpfs.c153
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c43
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h677
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx12.asm82
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_queue.c12
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c8
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c11
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c2
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c94
-rw-r--r--drivers/gpu/drm/radeon/radeon_vce.c2
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c11
-rw-r--r--drivers/gpu/drm/v3d/v3d_sched.c23
-rw-r--r--drivers/gpu/drm/xe/xe_bo.h2
-rw-r--r--drivers/gpu/drm/xe/xe_dma_buf.c2
-rw-r--r--drivers/gpu/host1x/dev.c6
-rw-r--r--drivers/i2c/busses/i2c-amd-mp2-pci.c5
-rw-r--r--drivers/message/fusion/mptsas.c8
-rw-r--r--drivers/pinctrl/spacemit/Kconfig2
-rw-r--r--drivers/regulator/core.c88
-rw-r--r--drivers/regulator/dummy.c2
-rw-r--r--drivers/regulator/rtq2208-regulator.c216
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_transport.c8
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_transport.c8
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.h4
-rw-r--r--drivers/spi/spi.c6
-rw-r--r--drivers/staging/media/ipu3/include/uapi/intel-ipu3.h3
-rw-r--r--drivers/vfio/group.c16
-rw-r--r--fs/9p/vfs_inode.c7
-rw-r--r--fs/9p/vfs_inode_dotl.c8
-rw-r--r--fs/Kconfig1
-rw-r--r--fs/Makefile1
-rw-r--r--fs/affs/affs.h2
-rw-r--r--fs/affs/namei.c8
-rw-r--r--fs/afs/addr_list.c50
-rw-r--r--fs/afs/cell.c437
-rw-r--r--fs/afs/cmservice.c82
-rw-r--r--fs/afs/dir.c17
-rw-r--r--fs/afs/dynroot.c486
-rw-r--r--fs/afs/fs_probe.c32
-rw-r--r--fs/afs/fsclient.c4
-rw-r--r--fs/afs/internal.h98
-rw-r--r--fs/afs/main.c16
-rw-r--r--fs/afs/mntpt.c5
-rw-r--r--fs/afs/proc.c15
-rw-r--r--fs/afs/rxrpc.c8
-rw-r--r--fs/afs/server.c601
-rw-r--r--fs/afs/server_list.c6
-rw-r--r--fs/afs/super.c25
-rw-r--r--fs/afs/vl_alias.c7
-rw-r--r--fs/afs/vl_rotate.c2
-rw-r--r--fs/afs/volume.c15
-rw-r--r--fs/autofs/autofs_i.h2
-rw-r--r--fs/autofs/dev-ioctl.c3
-rw-r--r--fs/autofs/root.c14
-rw-r--r--fs/bad_inode.c6
-rw-r--r--fs/bcachefs/fs-ioctl.c4
-rw-r--r--fs/bcachefs/fs.c8
-rw-r--r--fs/binfmt_elf.c21
-rw-r--r--fs/binfmt_elf_fdpic.c13
-rw-r--r--fs/btrfs/inode.c8
-rw-r--r--fs/buffer.c58
-rw-r--r--fs/cachefiles/namei.c16
-rw-r--r--fs/cachefiles/ondemand.c7
-rw-r--r--fs/ceph/addr.c1259
-rw-r--r--fs/ceph/dir.c45
-rw-r--r--fs/ceph/inode.c31
-rw-r--r--fs/ceph/mds_client.c2
-rw-r--r--fs/ceph/mds_client.h3
-rw-r--r--fs/ceph/super.c11
-rw-r--r--fs/ceph/super.h2
-rw-r--r--fs/coda/dir.c14
-rw-r--r--fs/configfs/dir.c6
-rw-r--r--fs/coredump.c38
-rw-r--r--fs/crypto/crypto.c22
-rw-r--r--fs/dax.c111
-rw-r--r--fs/dcache.c49
-rw-r--r--fs/devpts/inode.c251
-rw-r--r--fs/ecryptfs/inode.c20
-rw-r--r--fs/ecryptfs/super.c1
-rw-r--r--fs/eventfd.c5
-rw-r--r--fs/eventpoll.c95
-rw-r--r--fs/exec.c2
-rw-r--r--fs/exfat/namei.c8
-rw-r--r--fs/exportfs/expfs.c2
-rw-r--r--fs/ext2/namei.c9
-rw-r--r--fs/ext4/inode.c4
-rw-r--r--fs/ext4/namei.c10
-rw-r--r--fs/ext4/page-io.c2
-rw-r--r--fs/f2fs/data.c2
-rw-r--r--fs/f2fs/namei.c14
-rw-r--r--fs/fat/namei_msdos.c8
-rw-r--r--fs/fat/namei_vfat.c8
-rw-r--r--fs/file.c133
-rw-r--r--fs/file_table.c73
-rw-r--r--fs/fsopen.c2
-rw-r--r--fs/fuse/dir.c50
-rw-r--r--fs/gfs2/bmap.c3
-rw-r--r--fs/gfs2/inode.c9
-rw-r--r--fs/hfs/dir.c10
-rw-r--r--fs/hfsplus/dir.c6
-rw-r--r--fs/hostfs/hostfs_kern.c16
-rw-r--r--fs/hpfs/namei.c10
-rw-r--r--fs/hugetlbfs/inode.c6
-rw-r--r--fs/init.c7
-rw-r--r--fs/inode.c127
-rw-r--r--fs/internal.h11
-rw-r--r--fs/ioctl.c10
-rw-r--r--fs/iomap/Makefile1
-rw-r--r--fs/iomap/buffered-io.c356
-rw-r--r--fs/iomap/direct-io.c279
-rw-r--r--fs/iomap/fiemap.c21
-rw-r--r--fs/iomap/internal.h10
-rw-r--r--fs/iomap/ioend.c216
-rw-r--r--fs/iomap/iter.c97
-rw-r--r--fs/iomap/seek.c16
-rw-r--r--fs/iomap/swapfile.c7
-rw-r--r--fs/iomap/trace.h8
-rw-r--r--fs/jffs2/dir.c18
-rw-r--r--fs/jfs/namei.c8
-rw-r--r--fs/kernfs/dir.c12
-rw-r--r--fs/libfs.c4
-rw-r--r--fs/minix/namei.c8
-rw-r--r--fs/mnt_idmapping.c51
-rw-r--r--fs/mount.h37
-rw-r--r--fs/mpage.c49
-rw-r--r--fs/namei.c149
-rw-r--r--fs/namespace.c852
-rw-r--r--fs/nfs/dir.c20
-rw-r--r--fs/nfs/internal.h4
-rw-r--r--fs/nfs/nfs3proc.c29
-rw-r--r--fs/nfs/nfs4proc.c47
-rw-r--r--fs/nfs/proc.c12
-rw-r--r--fs/nfsd/nfs4recover.c7
-rw-r--r--fs/nfsd/vfs.c34
-rw-r--r--fs/nilfs2/namei.c8
-rw-r--r--fs/notify/fanotify/fanotify.c38
-rw-r--r--fs/notify/fanotify/fanotify.h18
-rw-r--r--fs/notify/fanotify/fanotify_user.c89
-rw-r--r--fs/notify/fdinfo.c5
-rw-r--r--fs/notify/fsnotify.c47
-rw-r--r--fs/notify/fsnotify.h11
-rw-r--r--fs/notify/mark.c14
-rw-r--r--fs/nsfs.c32
-rw-r--r--fs/ntfs3/namei.c8
-rw-r--r--fs/ocfs2/dlmfs/dlmfs.c10
-rw-r--r--fs/ocfs2/namei.c10
-rw-r--r--fs/omfs/dir.c6
-rw-r--r--fs/open.c44
-rw-r--r--fs/orangefs/file.c4
-rw-r--r--fs/orangefs/inode.c149
-rw-r--r--fs/orangefs/namei.c8
-rw-r--r--fs/orangefs/orangefs-debug.h43
-rw-r--r--fs/orangefs/orangefs-debugfs.c43
-rw-r--r--fs/overlayfs/dir.c46
-rw-r--r--fs/overlayfs/overlayfs.h15
-rw-r--r--fs/overlayfs/params.c25
-rw-r--r--fs/overlayfs/super.c23
-rw-r--r--fs/pidfs.c247
-rw-r--r--fs/pipe.c181
-rw-r--r--fs/pnode.c14
-rw-r--r--fs/pnode.h2
-rw-r--r--fs/proc/kcore.c12
-rw-r--r--fs/pstore/inode.c111
-rw-r--r--fs/pstore/internal.h4
-rw-r--r--fs/pstore/platform.c11
-rw-r--r--fs/ramfs/inode.c6
-rw-r--r--fs/read_write.c13
-rw-r--r--fs/signalfd.c7
-rw-r--r--fs/smb/client/cifsfs.h4
-rw-r--r--fs/smb/client/file.c2
-rw-r--r--fs/smb/client/inode.c10
-rw-r--r--fs/smb/server/vfs.c58
-rw-r--r--fs/splice.c40
-rw-r--r--fs/super.c57
-rw-r--r--fs/sysv/Kconfig38
-rw-r--r--fs/sysv/Makefile9
-rw-r--r--fs/sysv/balloc.c240
-rw-r--r--fs/sysv/dir.c378
-rw-r--r--fs/sysv/file.c59
-rw-r--r--fs/sysv/ialloc.c235
-rw-r--r--fs/sysv/inode.c354
-rw-r--r--fs/sysv/itree.c511
-rw-r--r--fs/sysv/namei.c280
-rw-r--r--fs/sysv/super.c595
-rw-r--r--fs/sysv/sysv.h245
-rw-r--r--fs/timerfd.c6
-rw-r--r--fs/tracefs/inode.c10
-rw-r--r--fs/ubifs/dir.c10
-rw-r--r--fs/udf/namei.c12
-rw-r--r--fs/ufs/namei.c8
-rw-r--r--fs/unicode/Kconfig5
-rw-r--r--fs/unicode/Makefile2
-rw-r--r--fs/unicode/tests/.kunitconfig3
-rw-r--r--fs/unicode/tests/utf8_kunit.c (renamed from fs/unicode/utf8-selftest.c)153
-rw-r--r--fs/unicode/utf8-norm.c2
-rw-r--r--fs/vboxsf/dir.c8
-rw-r--r--fs/xfs/scrub/orphanage.c9
-rw-r--r--fs/xfs/xfs_aops.c25
-rw-r--r--fs/xfs/xfs_file.c6
-rw-r--r--fs/xfs/xfs_iomap.c8
-rw-r--r--fs/xfs/xfs_iops.c4
-rw-r--r--fs/xfs/xfs_super.c3
-rw-r--r--fs/zonefs/file.c2
-rw-r--r--include/asm-generic/io.h6
-rw-r--r--include/linux/binfmts.h2
-rw-r--r--include/linux/blkdev.h8
-rw-r--r--include/linux/cgroup-defs.h5
-rw-r--r--include/linux/cgroup.h3
-rw-r--r--include/linux/compiler.h18
-rw-r--r--include/linux/compiler_types.h23
-rw-r--r--include/linux/dcache.h39
-rw-r--r--include/linux/eventpoll.h4
-rw-r--r--include/linux/fanotify.h12
-rw-r--r--include/linux/file_ref.h48
-rw-r--r--include/linux/fs.h62
-rw-r--r--include/linux/fs_context.h2
-rw-r--r--include/linux/fscrypt.h12
-rw-r--r--include/linux/fsnotify.h20
-rw-r--r--include/linux/fsnotify_backend.h42
-rw-r--r--include/linux/iomap.h116
-rw-r--r--include/linux/kexec.h2
-rw-r--r--include/linux/key.h1
-rw-r--r--include/linux/kstrtox.h1
-rw-r--r--include/linux/misc_cgroup.h6
-rw-r--r--include/linux/mm.h2
-rw-r--r--include/linux/mnt_idmapping.h5
-rw-r--r--include/linux/namei.h45
-rw-r--r--include/linux/nfs_xdr.h2
-rw-r--r--include/linux/nodemask.h8
-rw-r--r--include/linux/nodemask_types.h11
-rw-r--r--include/linux/numa.h17
-rw-r--r--include/linux/page-flags.h18
-rw-r--r--include/linux/pagemap.h48
-rw-r--r--include/linux/pid.h7
-rw-r--r--include/linux/pidfs.h1
-rw-r--r--include/linux/pipe_fs_i.h2
-rw-r--r--include/linux/rcupdate.h33
-rw-r--r--include/linux/rcutiny.h36
-rw-r--r--include/linux/rcutree.h3
-rw-r--r--include/linux/sched/ext.h1
-rw-r--r--include/linux/seccomp.h12
-rw-r--r--include/linux/slab.h16
-rw-r--r--include/linux/string.h16
-rw-r--r--include/linux/string_choices.h24
-rw-r--r--include/linux/syscalls.h8
-rw-r--r--include/linux/sysv_fs.h214
-rw-r--r--include/linux/thread_info.h48
-rw-r--r--include/linux/topology.h30
-rw-r--r--include/linux/uaccess.h2
-rw-r--r--include/linux/ucopysize.h63
-rw-r--r--include/linux/uidgid.h6
-rw-r--r--include/linux/uio.h2
-rw-r--r--include/linux/vfsdebug.h45
-rw-r--r--include/linux/vmcore_info.h3
-rw-r--r--include/linux/wait.h3
-rw-r--r--include/net/af_rxrpc.h2
-rw-r--r--include/trace/events/afs.h83
-rw-r--r--include/trace/events/rcu.h34
-rw-r--r--include/trace/events/sched_ext.h19
-rw-r--r--include/uapi/asm-generic/unistd.h4
-rw-r--r--include/uapi/linux/elf.h99
-rw-r--r--include/uapi/linux/fanotify.h10
-rw-r--r--include/uapi/linux/firewire-cdev.h3
-rw-r--r--include/uapi/linux/mount.h10
-rw-r--r--include/uapi/linux/nilfs2_ondisk.h3
-rw-r--r--include/uapi/linux/pidfd.h31
-rw-r--r--include/uapi/linux/stddef.h6
-rw-r--r--init/.kunitconfig3
-rw-r--r--init/Kconfig15
-rw-r--r--init/Makefile1
-rw-r--r--init/initramfs.c66
-rw-r--r--init/initramfs_internal.h8
-rw-r--r--init/initramfs_test.c407
-rw-r--r--io_uring/net.c5
-rw-r--r--kernel/audit_watch.c12
-rw-r--r--kernel/auditsc.c12
-rw-r--r--kernel/bpf/inode.c8
-rw-r--r--kernel/cgroup/cgroup-internal.h1
-rw-r--r--kernel/cgroup/cgroup-v1.c7
-rw-r--r--kernel/cgroup/cgroup.c6
-rw-r--r--kernel/cgroup/cpuset-v1.c49
-rw-r--r--kernel/cgroup/cpuset.c45
-rw-r--r--kernel/cgroup/legacy_freezer.c6
-rw-r--r--kernel/cgroup/misc.c16
-rw-r--r--kernel/cgroup/rstat.c116
-rw-r--r--kernel/configs/hardening.config2
-rw-r--r--kernel/crash_core.c2
-rw-r--r--kernel/entry/common.c2
-rw-r--r--kernel/exit.c64
-rw-r--r--kernel/fork.c22
-rw-r--r--kernel/kcmp.c2
-rw-r--r--kernel/pid.c106
-rw-r--r--kernel/rcu/tiny.c25
-rw-r--r--kernel/rcu/tree.c9
-rw-r--r--kernel/sched/build_policy.c1
-rw-r--r--kernel/sched/core.c30
-rw-r--r--kernel/sched/ext.c1085
-rw-r--r--kernel/sched/ext.h10
-rw-r--r--kernel/sched/ext_idle.c1171
-rw-r--r--kernel/sched/ext_idle.h35
-rw-r--r--kernel/seccomp.c49
-rw-r--r--kernel/signal.c108
-rw-r--r--kernel/stop_machine.c1
-rw-r--r--kernel/sys.c14
-rw-r--r--kernel/user_namespace.c26
-rw-r--r--kernel/watch_queue.c16
-rw-r--r--lib/Kconfig.debug87
-rw-r--r--lib/Kconfig.ubsan25
-rw-r--r--lib/Makefile41
-rw-r--r--lib/math/Makefile5
-rw-r--r--lib/math/prime_numbers.c91
-rw-r--r--lib/math/prime_numbers_private.h16
-rw-r--r--lib/math/tests/Makefile8
-rw-r--r--lib/math/tests/gcd_kunit.c56
-rw-r--r--lib/math/tests/int_log_kunit.c74
-rw-r--r--lib/math/tests/prime_numbers_kunit.c59
-rw-r--r--lib/math/tests/rational_kunit.c (renamed from lib/math/rational-test.c)0
-rw-r--r--lib/test_ubsan.c18
-rw-r--r--lib/tests/Makefile43
-rw-r--r--lib/tests/bitfield_kunit.c (renamed from lib/bitfield_kunit.c)0
-rw-r--r--lib/tests/checksum_kunit.c (renamed from lib/checksum_kunit.c)0
-rw-r--r--lib/tests/cmdline_kunit.c (renamed from lib/cmdline_kunit.c)0
-rw-r--r--lib/tests/cpumask_kunit.c (renamed from lib/cpumask_kunit.c)0
-rw-r--r--lib/tests/crc_kunit.c (renamed from lib/crc_kunit.c)0
-rw-r--r--lib/tests/fortify_kunit.c (renamed from lib/fortify_kunit.c)156
-rw-r--r--lib/tests/hashtable_test.c (renamed from lib/hashtable_test.c)0
-rw-r--r--lib/tests/is_signed_type_kunit.c (renamed from lib/is_signed_type_kunit.c)0
-rw-r--r--lib/tests/kfifo_kunit.c224
-rw-r--r--lib/tests/kunit_iov_iter.c (renamed from lib/kunit_iov_iter.c)0
-rw-r--r--lib/tests/list-test.c (renamed from lib/list-test.c)0
-rw-r--r--lib/tests/memcpy_kunit.c (renamed from lib/memcpy_kunit.c)0
-rw-r--r--lib/tests/overflow_kunit.c (renamed from lib/overflow_kunit.c)38
-rw-r--r--lib/tests/printf_kunit.c (renamed from lib/test_printf.c)442
-rw-r--r--lib/tests/scanf_kunit.c (renamed from lib/test_scanf.c)295
-rw-r--r--lib/tests/siphash_kunit.c (renamed from lib/siphash_kunit.c)0
-rw-r--r--lib/tests/slub_kunit.c (renamed from lib/slub_kunit.c)59
-rw-r--r--lib/tests/stackinit_kunit.c (renamed from lib/stackinit_kunit.c)30
-rw-r--r--lib/tests/string_helpers_kunit.c (renamed from lib/string_helpers_kunit.c)0
-rw-r--r--lib/tests/string_kunit.c (renamed from lib/string_kunit.c)4
-rw-r--r--lib/tests/test_bits.c (renamed from lib/test_bits.c)0
-rw-r--r--lib/tests/test_fprobe.c (renamed from lib/test_fprobe.c)0
-rw-r--r--lib/tests/test_hash.c (renamed from lib/test_hash.c)0
-rw-r--r--lib/tests/test_kprobes.c (renamed from lib/test_kprobes.c)0
-rw-r--r--lib/tests/test_linear_ranges.c (renamed from lib/test_linear_ranges.c)0
-rw-r--r--lib/tests/test_list_sort.c (renamed from lib/test_list_sort.c)0
-rw-r--r--lib/tests/test_sort.c (renamed from lib/test_sort.c)0
-rw-r--r--lib/tests/usercopy_kunit.c (renamed from lib/usercopy_kunit.c)0
-rw-r--r--lib/tests/util_macros_kunit.c (renamed from lib/util_macros_kunit.c)0
-rw-r--r--lib/ubsan.c28
-rw-r--r--lib/ubsan.h8
-rw-r--r--lib/vsprintf.c7
-rw-r--r--mm/Kconfig4
-rw-r--r--mm/gup.c6
-rw-r--r--mm/memcontrol-v1.c6
-rw-r--r--mm/mempolicy.c31
-rw-r--r--mm/shmem.c8
-rw-r--r--mm/slab.h34
-rw-r--r--mm/slab_common.c44
-rw-r--r--mm/slub.c336
-rw-r--r--mm/usercopy.c18
-rw-r--r--mm/util.c162
-rw-r--r--net/rxrpc/ar-internal.h1
-rw-r--r--net/rxrpc/peer_object.c30
-rw-r--r--rust/kernel/fs/file.rs4
-rw-r--r--rust/kernel/seq_file.rs1
-rw-r--r--rust/kernel/workqueue.rs18
-rwxr-xr-xsamples/check-exec/run-script-ask.sh (renamed from samples/check-exec/run-script-ask.inc)0
-rw-r--r--samples/vfs/samples-vfs.h14
-rw-r--r--samples/vfs/test-list-all-mounts.c35
-rw-r--r--scripts/Makefile.clang2
-rw-r--r--scripts/Makefile.lib4
-rw-r--r--scripts/Makefile.ubsan10
-rwxr-xr-xscripts/documentation-file-ref-check2
-rwxr-xr-xscripts/get_abi.pl1103
-rwxr-xr-xscripts/get_abi.py214
-rwxr-xr-xscripts/get_feat.pl4
-rw-r--r--scripts/integer-wrap-ignore.scl3
-rwxr-xr-xscripts/kernel-doc163
-rw-r--r--scripts/lib/abi/abi_parser.py628
-rw-r--r--scripts/lib/abi/abi_regex.py234
-rw-r--r--scripts/lib/abi/helpers.py38
-rw-r--r--scripts/lib/abi/system_symbols.py378
-rw-r--r--scripts/syscall.tbl1
-rw-r--r--security/Kconfig21
-rw-r--r--security/Kconfig.hardening33
-rw-r--r--security/apparmor/apparmorfs.c8
-rw-r--r--security/keys/gc.c4
-rw-r--r--security/keys/key.c2
-rw-r--r--security/landlock/fs.c2
-rw-r--r--security/loadpin/Kconfig2
-rw-r--r--security/selinux/hooks.c3
-rw-r--r--security/selinux/include/classmap.h2
-rw-r--r--security/yama/yama_lsm.c9
-rw-r--r--tools/include/nolibc/Makefile1
-rw-r--r--tools/include/nolibc/arch-mips.h1
-rw-r--r--tools/include/nolibc/arch-s390.h9
-rw-r--r--tools/include/nolibc/arch.h2
-rw-r--r--tools/include/nolibc/crt.h2
-rw-r--r--tools/include/nolibc/dirent.h98
-rw-r--r--tools/include/nolibc/errno.h2
-rw-r--r--tools/include/nolibc/nolibc.h4
-rw-r--r--tools/include/nolibc/signal.h1
-rw-r--r--tools/include/nolibc/stackprotector.h2
-rw-r--r--tools/include/nolibc/stdio.h98
-rw-r--r--tools/include/nolibc/stdlib.h1
-rw-r--r--tools/include/nolibc/string.h4
-rw-r--r--tools/include/nolibc/sys.h83
-rw-r--r--tools/memory-model/Documentation/glossary.txt32
-rw-r--r--tools/memory-model/Documentation/herd-representation.txt49
-rw-r--r--tools/memory-model/README4
-rw-r--r--tools/memory-model/linux-kernel.bell33
-rw-r--r--tools/memory-model/linux-kernel.cat10
-rw-r--r--tools/memory-model/linux-kernel.cfg1
-rw-r--r--tools/memory-model/linux-kernel.def169
-rw-r--r--tools/sched_ext/include/scx/common.bpf.h34
-rw-r--r--tools/sched_ext/include/scx/common.h1
-rw-r--r--tools/sched_ext/include/scx/compat.bpf.h95
-rw-r--r--tools/sched_ext/include/scx/compat.h16
-rw-r--r--tools/sched_ext/include/scx/enum_defs.autogen.h120
-rw-r--r--tools/sched_ext/scx_central.c15
-rw-r--r--tools/sched_ext/scx_qmap.bpf.c23
-rw-r--r--tools/testing/selftests/Makefile1
-rw-r--r--tools/testing/selftests/filesystems/mount-notify/.gitignore2
-rw-r--r--tools/testing/selftests/filesystems/mount-notify/Makefile6
-rw-r--r--tools/testing/selftests/filesystems/mount-notify/mount-notify_test.c516
-rw-r--r--tools/testing/selftests/filesystems/nsfs/iterate_mntns.c14
-rw-r--r--tools/testing/selftests/filesystems/overlayfs/Makefile11
-rw-r--r--tools/testing/selftests/filesystems/overlayfs/set_layers_via_fds.c507
-rw-r--r--tools/testing/selftests/filesystems/overlayfs/wrappers.h17
-rw-r--r--tools/testing/selftests/filesystems/statmount/statmount.h2
-rw-r--r--tools/testing/selftests/filesystems/utils.c501
-rw-r--r--tools/testing/selftests/filesystems/utils.h45
-rw-r--r--tools/testing/selftests/kselftest.h5
-rwxr-xr-xtools/testing/selftests/kselftest/module.sh2
-rw-r--r--tools/testing/selftests/lib/Makefile2
-rw-r--r--tools/testing/selftests/lib/config3
-rwxr-xr-xtools/testing/selftests/lib/prime_numbers.sh4
-rwxr-xr-xtools/testing/selftests/lib/printf.sh4
-rwxr-xr-xtools/testing/selftests/lib/scanf.sh4
-rw-r--r--tools/testing/selftests/mm/guard-pages.c16
-rw-r--r--tools/testing/selftests/mount_setattr/mount_setattr_test.c652
-rw-r--r--tools/testing/selftests/nolibc/Makefile30
-rw-r--r--tools/testing/selftests/nolibc/nolibc-test-linkage.c6
-rw-r--r--tools/testing/selftests/nolibc/nolibc-test.c138
-rwxr-xr-xtools/testing/selftests/nolibc/run-tests.sh26
-rw-r--r--tools/testing/selftests/pidfd/.gitignore2
-rw-r--r--tools/testing/selftests/pidfd/Makefile4
-rw-r--r--tools/testing/selftests/pidfd/pidfd.h109
-rw-r--r--tools/testing/selftests/pidfd/pidfd_exec_helper.c12
-rw-r--r--tools/testing/selftests/pidfd/pidfd_fdinfo_test.c1
-rw-r--r--tools/testing/selftests/pidfd/pidfd_info_test.c692
-rw-r--r--tools/testing/selftests/pidfd/pidfd_open_test.c30
-rw-r--r--tools/testing/selftests/pidfd/pidfd_setns_test.c45
-rw-r--r--tools/testing/selftests/pidfd/pidfd_test.c76
-rw-r--r--tools/testing/selftests/sched_ext/Makefile1
-rw-r--r--tools/testing/selftests/sched_ext/numa.bpf.c100
-rw-r--r--tools/testing/selftests/sched_ext/numa.c59
-rw-r--r--tools/testing/selftests/seccomp/seccomp_bpf.c6
-rw-r--r--virt/kvm/kvm_main.c11
645 files changed, 19267 insertions, 12751 deletions
diff --git a/Documentation/ABI/README b/Documentation/ABI/README
index 8bac9cb09a6d..ef0e6d11e919 100644
--- a/Documentation/ABI/README
+++ b/Documentation/ABI/README
@@ -1,4 +1,5 @@
-This directory attempts to document the ABI between the Linux kernel and
+This part of the documentation inside Documentation/ABI directory
+attempts to document the ABI between the Linux kernel and
userspace, and the relative stability of these interfaces. Due to the
everchanging nature of Linux, and the differing maturity levels, these
interfaces should be used by userspace programs in different ways.
diff --git a/Documentation/ABI/removed/sysfs-class-rfkill b/Documentation/ABI/removed/sysfs-class-rfkill
index f25174eafd55..20cb688af173 100644
--- a/Documentation/ABI/removed/sysfs-class-rfkill
+++ b/Documentation/ABI/removed/sysfs-class-rfkill
@@ -4,7 +4,7 @@ For details to this subsystem look at Documentation/driver-api/rfkill.rst.
What: /sys/class/rfkill/rfkill[0-9]+/claim
Date: 09-Jul-2007
-KernelVersion v2.6.22
+KernelVersion: v2.6.22
Contact: linux-wireless@vger.kernel.org
Description: This file was deprecated because there no longer was a way to
claim just control over a single rfkill instance.
diff --git a/Documentation/ABI/stable/sysfs-class-rfkill b/Documentation/ABI/stable/sysfs-class-rfkill
index 037979f7dc4b..67b605e3dd16 100644
--- a/Documentation/ABI/stable/sysfs-class-rfkill
+++ b/Documentation/ABI/stable/sysfs-class-rfkill
@@ -16,7 +16,7 @@ Description: The rfkill class subsystem folder.
What: /sys/class/rfkill/rfkill[0-9]+/name
Date: 09-Jul-2007
-KernelVersion v2.6.22
+KernelVersion: v2.6.22
Contact: linux-wireless@vger.kernel.org
Description: Name assigned by driver to this key (interface or driver name).
Values: arbitrary string.
@@ -24,7 +24,7 @@ Values: arbitrary string.
What: /sys/class/rfkill/rfkill[0-9]+/type
Date: 09-Jul-2007
-KernelVersion v2.6.22
+KernelVersion: v2.6.22
Contact: linux-wireless@vger.kernel.org
Description: Driver type string ("wlan", "bluetooth", etc).
Values: See include/linux/rfkill.h.
@@ -32,7 +32,7 @@ Values: See include/linux/rfkill.h.
What: /sys/class/rfkill/rfkill[0-9]+/persistent
Date: 09-Jul-2007
-KernelVersion v2.6.22
+KernelVersion: v2.6.22
Contact: linux-wireless@vger.kernel.org
Description: Whether the soft blocked state is initialised from non-volatile
storage at startup.
@@ -44,7 +44,7 @@ Values: A numeric value:
What: /sys/class/rfkill/rfkill[0-9]+/state
Date: 09-Jul-2007
-KernelVersion v2.6.22
+KernelVersion: v2.6.22
Contact: linux-wireless@vger.kernel.org
Description: Current state of the transmitter.
This file was scheduled to be removed in 2014, but due to its
@@ -67,7 +67,7 @@ Values: A numeric value.
What: /sys/class/rfkill/rfkill[0-9]+/hard
Date: 12-March-2010
-KernelVersion v2.6.34
+KernelVersion: v2.6.34
Contact: linux-wireless@vger.kernel.org
Description: Current hardblock state. This file is read only.
Values: A numeric value.
@@ -81,7 +81,7 @@ Values: A numeric value.
What: /sys/class/rfkill/rfkill[0-9]+/soft
Date: 12-March-2010
-KernelVersion v2.6.34
+KernelVersion: v2.6.34
Contact: linux-wireless@vger.kernel.org
Description: Current softblock state. This file is read and write.
Values: A numeric value.
diff --git a/Documentation/ABI/stable/sysfs-devices-system-cpu b/Documentation/ABI/stable/sysfs-devices-system-cpu
index 902392d7eddf..cf78bd99f6c8 100644
--- a/Documentation/ABI/stable/sysfs-devices-system-cpu
+++ b/Documentation/ABI/stable/sysfs-devices-system-cpu
@@ -24,12 +24,6 @@ Description: Default value for the Data Stream Control Register (DSCR) on
If set by a process it will be inherited by child processes.
Values: 64 bit unsigned integer (bit field)
-What: /sys/devices/system/cpu/cpuX/topology/physical_package_id
-Description: physical package id of cpuX. Typically corresponds to a physical
- socket number, but the actual value is architecture and platform
- dependent.
-Values: integer
-
What: /sys/devices/system/cpu/cpuX/topology/die_id
Description: the CPU die ID of cpuX. Typically it is the hardware platform's
identifier (rather than the kernel's). The actual value is
@@ -86,10 +80,6 @@ What: /sys/devices/system/cpu/cpuX/topology/die_cpus
Description: internal kernel map of CPUs within the same die.
Values: hexadecimal bitmask.
-What: /sys/devices/system/cpu/cpuX/topology/ppin
-Description: per-socket protected processor inventory number
-Values: hexadecimal.
-
What: /sys/devices/system/cpu/cpuX/topology/die_cpus_list
Description: human-readable list of CPUs within the same die.
The format is like 0-3, 8-11, 14,17.
diff --git a/Documentation/ABI/stable/sysfs-driver-dma-idxd b/Documentation/ABI/stable/sysfs-driver-dma-idxd
index f2ec42949a54..4a355e6747ae 100644
--- a/Documentation/ABI/stable/sysfs-driver-dma-idxd
+++ b/Documentation/ABI/stable/sysfs-driver-dma-idxd
@@ -246,14 +246,14 @@ Description: Controls whether PRS disable is turned on for the workqueue.
capability.
What: /sys/bus/dsa/devices/wq<m>.<n>/occupancy
-Date May 25, 2021
+Date: May 25, 2021
KernelVersion: 5.14.0
Contact: dmaengine@vger.kernel.org
Description: Show the current number of entries in this WQ if WQ Occupancy
Support bit WQ capabilities is 1.
What: /sys/bus/dsa/devices/wq<m>.<n>/enqcmds_retries
-Date Oct 29, 2021
+Date: Oct 29, 2021
KernelVersion: 5.17.0
Contact: dmaengine@vger.kernel.org
Description: Indicate the number of retires for an enqcmds submission on a sharedwq.
diff --git a/Documentation/ABI/testing/configfs-usb-gadget-midi2 b/Documentation/ABI/testing/configfs-usb-gadget-midi2
index 0eac3aaba137..d76a52e2ca7f 100644
--- a/Documentation/ABI/testing/configfs-usb-gadget-midi2
+++ b/Documentation/ABI/testing/configfs-usb-gadget-midi2
@@ -47,7 +47,7 @@ Description:
midi1_first_group The first UMP Group number for MIDI 1.0 (0-15)
midi1_num_groups The number of groups for MIDI 1.0 (0-16)
ui_hint 0: unknown, 1: receiver, 2: sender, 3: both
- midi_ci_verison Supported MIDI-CI version number (8 bit)
+ midi_ci_version Supported MIDI-CI version number (8 bit)
is_midi1 Legacy MIDI 1.0 device (0, 1 or 2)
sysex8_streams Max number of SysEx8 streams (8 bit)
active Active FB flag (0 or 1)
diff --git a/Documentation/ABI/testing/sysfs-bus-coresight-devices-cti b/Documentation/ABI/testing/sysfs-bus-coresight-devices-cti
index bf2869c413e7..a97b70f588da 100644
--- a/Documentation/ABI/testing/sysfs-bus-coresight-devices-cti
+++ b/Documentation/ABI/testing/sysfs-bus-coresight-devices-cti
@@ -1,241 +1,241 @@
What: /sys/bus/coresight/devices/<cti-name>/enable
Date: March 2020
-KernelVersion 5.7
+KernelVersion: 5.7
Contact: Mike Leach or Mathieu Poirier
Description: (RW) Enable/Disable the CTI hardware.
What: /sys/bus/coresight/devices/<cti-name>/powered
Date: March 2020
-KernelVersion 5.7
+KernelVersion: 5.7
Contact: Mike Leach or Mathieu Poirier
Description: (Read) Indicate if the CTI hardware is powered.
What: /sys/bus/coresight/devices/<cti-name>/ctmid
Date: March 2020
-KernelVersion 5.7
+KernelVersion: 5.7
Contact: Mike Leach or Mathieu Poirier
Description: (Read) Display the associated CTM ID
What: /sys/bus/coresight/devices/<cti-name>/nr_trigger_cons
Date: March 2020
-KernelVersion 5.7
+KernelVersion: 5.7
Contact: Mike Leach or Mathieu Poirier
Description: (Read) Number of devices connected to triggers on this CTI
What: /sys/bus/coresight/devices/<cti-name>/triggers<N>/name
Date: March 2020
-KernelVersion 5.7
+KernelVersion: 5.7
Contact: Mike Leach or Mathieu Poirier
Description: (Read) Name of connected device <N>
What: /sys/bus/coresight/devices/<cti-name>/triggers<N>/in_signals
Date: March 2020
-KernelVersion 5.7
+KernelVersion: 5.7
Contact: Mike Leach or Mathieu Poirier
Description: (Read) Input trigger signals from connected device <N>
What: /sys/bus/coresight/devices/<cti-name>/triggers<N>/in_types
Date: March 2020
-KernelVersion 5.7
+KernelVersion: 5.7
Contact: Mike Leach or Mathieu Poirier
Description: (Read) Functional types for the input trigger signals
from connected device <N>
What: /sys/bus/coresight/devices/<cti-name>/triggers<N>/out_signals
Date: March 2020
-KernelVersion 5.7
+KernelVersion: 5.7
Contact: Mike Leach or Mathieu Poirier
Description: (Read) Output trigger signals to connected device <N>
What: /sys/bus/coresight/devices/<cti-name>/triggers<N>/out_types
Date: March 2020
-KernelVersion 5.7
+KernelVersion: 5.7
Contact: Mike Leach or Mathieu Poirier
Description: (Read) Functional types for the output trigger signals
to connected device <N>
What: /sys/bus/coresight/devices/<cti-name>/regs/inout_sel
Date: March 2020
-KernelVersion 5.7
+KernelVersion: 5.7
Contact: Mike Leach or Mathieu Poirier
Description: (RW) Select the index for inen and outen registers.
What: /sys/bus/coresight/devices/<cti-name>/regs/inen
Date: March 2020
-KernelVersion 5.7
+KernelVersion: 5.7
Contact: Mike Leach or Mathieu Poirier
Description: (RW) Read or write the CTIINEN register selected by inout_sel.
What: /sys/bus/coresight/devices/<cti-name>/regs/outen
Date: March 2020
-KernelVersion 5.7
+KernelVersion: 5.7
Contact: Mike Leach or Mathieu Poirier
Description: (RW) Read or write the CTIOUTEN register selected by inout_sel.
What: /sys/bus/coresight/devices/<cti-name>/regs/gate
Date: March 2020
-KernelVersion 5.7
+KernelVersion: 5.7
Contact: Mike Leach or Mathieu Poirier
Description: (RW) Read or write CTIGATE register.
What: /sys/bus/coresight/devices/<cti-name>/regs/asicctl
Date: March 2020
-KernelVersion 5.7
+KernelVersion: 5.7
Contact: Mike Leach or Mathieu Poirier
Description: (RW) Read or write ASICCTL register.
What: /sys/bus/coresight/devices/<cti-name>/regs/intack
Date: March 2020
-KernelVersion 5.7
+KernelVersion: 5.7
Contact: Mike Leach or Mathieu Poirier
Description: (Write) Write the INTACK register.
What: /sys/bus/coresight/devices/<cti-name>/regs/appset
Date: March 2020
-KernelVersion 5.7
+KernelVersion: 5.7
Contact: Mike Leach or Mathieu Poirier
Description: (RW) Set CTIAPPSET register to activate channel. Read back to
determine current value of register.
What: /sys/bus/coresight/devices/<cti-name>/regs/appclear
Date: March 2020
-KernelVersion 5.7
+KernelVersion: 5.7
Contact: Mike Leach or Mathieu Poirier
Description: (Write) Write APPCLEAR register to deactivate channel.
What: /sys/bus/coresight/devices/<cti-name>/regs/apppulse
Date: March 2020
-KernelVersion 5.7
+KernelVersion: 5.7
Contact: Mike Leach or Mathieu Poirier
Description: (Write) Write APPPULSE to pulse a channel active for one clock
cycle.
What: /sys/bus/coresight/devices/<cti-name>/regs/chinstatus
Date: March 2020
-KernelVersion 5.7
+KernelVersion: 5.7
Contact: Mike Leach or Mathieu Poirier
Description: (Read) Read current status of channel inputs.
What: /sys/bus/coresight/devices/<cti-name>/regs/choutstatus
Date: March 2020
-KernelVersion 5.7
+KernelVersion: 5.7
Contact: Mike Leach or Mathieu Poirier
Description: (Read) read current status of channel outputs.
What: /sys/bus/coresight/devices/<cti-name>/regs/triginstatus
Date: March 2020
-KernelVersion 5.7
+KernelVersion: 5.7
Contact: Mike Leach or Mathieu Poirier
Description: (Read) read current status of input trigger signals
What: /sys/bus/coresight/devices/<cti-name>/regs/trigoutstatus
Date: March 2020
-KernelVersion 5.7
+KernelVersion: 5.7
Contact: Mike Leach or Mathieu Poirier
Description: (Read) read current status of output trigger signals.
What: /sys/bus/coresight/devices/<cti-name>/channels/trigin_attach
Date: March 2020
-KernelVersion 5.7
+KernelVersion: 5.7
Contact: Mike Leach or Mathieu Poirier
Description: (Write) Attach a CTI input trigger to a CTM channel.
What: /sys/bus/coresight/devices/<cti-name>/channels/trigin_detach
Date: March 2020
-KernelVersion 5.7
+KernelVersion: 5.7
Contact: Mike Leach or Mathieu Poirier
Description: (Write) Detach a CTI input trigger from a CTM channel.
What: /sys/bus/coresight/devices/<cti-name>/channels/trigout_attach
Date: March 2020
-KernelVersion 5.7
+KernelVersion: 5.7
Contact: Mike Leach or Mathieu Poirier
Description: (Write) Attach a CTI output trigger to a CTM channel.
What: /sys/bus/coresight/devices/<cti-name>/channels/trigout_detach
Date: March 2020
-KernelVersion 5.7
+KernelVersion: 5.7
Contact: Mike Leach or Mathieu Poirier
Description: (Write) Detach a CTI output trigger from a CTM channel.
What: /sys/bus/coresight/devices/<cti-name>/channels/chan_gate_enable
Date: March 2020
-KernelVersion 5.7
+KernelVersion: 5.7
Contact: Mike Leach or Mathieu Poirier
Description: (RW) Enable CTIGATE for single channel (Write) or list enabled
channels through the gate (R).
What: /sys/bus/coresight/devices/<cti-name>/channels/chan_gate_disable
Date: March 2020
-KernelVersion 5.7
+KernelVersion: 5.7
Contact: Mike Leach or Mathieu Poirier
Description: (Write) Disable CTIGATE for single channel.
What: /sys/bus/coresight/devices/<cti-name>/channels/chan_set
Date: March 2020
-KernelVersion 5.7
+KernelVersion: 5.7
Contact: Mike Leach or Mathieu Poirier
Description: (Write) Activate a single channel.
What: /sys/bus/coresight/devices/<cti-name>/channels/chan_clear
Date: March 2020
-KernelVersion 5.7
+KernelVersion: 5.7
Contact: Mike Leach or Mathieu Poirier
Description: (Write) Deactivate a single channel.
What: /sys/bus/coresight/devices/<cti-name>/channels/chan_pulse
Date: March 2020
-KernelVersion 5.7
+KernelVersion: 5.7
Contact: Mike Leach or Mathieu Poirier
Description: (Write) Pulse a single channel - activate for a single clock cycle.
What: /sys/bus/coresight/devices/<cti-name>/channels/trigout_filtered
Date: March 2020
-KernelVersion 5.7
+KernelVersion: 5.7
Contact: Mike Leach or Mathieu Poirier
Description: (Read) List of output triggers filtered across all connections.
What: /sys/bus/coresight/devices/<cti-name>/channels/trig_filter_enable
Date: March 2020
-KernelVersion 5.7
+KernelVersion: 5.7
Contact: Mike Leach or Mathieu Poirier
Description: (RW) Enable or disable trigger output signal filtering.
What: /sys/bus/coresight/devices/<cti-name>/channels/chan_inuse
Date: March 2020
-KernelVersion 5.7
+KernelVersion: 5.7
Contact: Mike Leach or Mathieu Poirier
Description: (Read) show channels with at least one attached trigger signal.
What: /sys/bus/coresight/devices/<cti-name>/channels/chan_free
Date: March 2020
-KernelVersion 5.7
+KernelVersion: 5.7
Contact: Mike Leach or Mathieu Poirier
Description: (Read) show channels with no attached trigger signals.
What: /sys/bus/coresight/devices/<cti-name>/channels/chan_xtrigs_sel
Date: March 2020
-KernelVersion 5.7
+KernelVersion: 5.7
Contact: Mike Leach or Mathieu Poirier
Description: (RW) Write channel number to select a channel to view, read to
see selected channel number.
What: /sys/bus/coresight/devices/<cti-name>/channels/chan_xtrigs_in
Date: March 2020
-KernelVersion 5.7
+KernelVersion: 5.7
Contact: Mike Leach or Mathieu Poirier
Description: (Read) Read to see input triggers connected to selected view
channel.
What: /sys/bus/coresight/devices/<cti-name>/channels/chan_xtrigs_out
Date: March 2020
-KernelVersion 5.7
+KernelVersion: 5.7
Contact: Mike Leach or Mathieu Poirier
Description: (Read) Read to see output triggers connected to selected view
channel.
What: /sys/bus/coresight/devices/<cti-name>/channels/chan_xtrigs_reset
Date: March 2020
-KernelVersion 5.7
+KernelVersion: 5.7
Contact: Mike Leach or Mathieu Poirier
Description: (Write) Clear all channel / trigger programming.
diff --git a/Documentation/ABI/testing/sysfs-bus-coresight-devices-tpdm b/Documentation/ABI/testing/sysfs-bus-coresight-devices-tpdm
index bf710ea6e0ef..53cb454b60d0 100644
--- a/Documentation/ABI/testing/sysfs-bus-coresight-devices-tpdm
+++ b/Documentation/ABI/testing/sysfs-bus-coresight-devices-tpdm
@@ -1,6 +1,6 @@
What: /sys/bus/coresight/devices/<tpdm-name>/integration_test
Date: January 2023
-KernelVersion 6.2
+KernelVersion: 6.2
Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com>
Description:
(Write) Run integration test for tpdm. Integration test
@@ -14,7 +14,7 @@ Description:
What: /sys/bus/coresight/devices/<tpdm-name>/reset_dataset
Date: March 2023
-KernelVersion 6.7
+KernelVersion: 6.7
Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com>
Description:
(Write) Reset the dataset of the tpdm.
@@ -24,7 +24,7 @@ Description:
What: /sys/bus/coresight/devices/<tpdm-name>/dsb_trig_type
Date: March 2023
-KernelVersion 6.7
+KernelVersion: 6.7
Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com>
Description:
(RW) Set/Get the trigger type of the DSB for tpdm.
@@ -35,7 +35,7 @@ Description:
What: /sys/bus/coresight/devices/<tpdm-name>/dsb_trig_ts
Date: March 2023
-KernelVersion 6.7
+KernelVersion: 6.7
Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com>
Description:
(RW) Set/Get the trigger timestamp of the DSB for tpdm.
@@ -46,7 +46,7 @@ Description:
What: /sys/bus/coresight/devices/<tpdm-name>/dsb_mode
Date: March 2023
-KernelVersion 6.7
+KernelVersion: 6.7
Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com>
Description:
(RW) Set/Get the programming mode of the DSB for tpdm.
@@ -60,7 +60,7 @@ Description:
What: /sys/bus/coresight/devices/<tpdm-name>/dsb_edge/ctrl_idx
Date: March 2023
-KernelVersion 6.7
+KernelVersion: 6.7
Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com>
Description:
(RW) Set/Get the index number of the edge detection for the DSB
@@ -69,7 +69,7 @@ Description:
What: /sys/bus/coresight/devices/<tpdm-name>/dsb_edge/ctrl_val
Date: March 2023
-KernelVersion 6.7
+KernelVersion: 6.7
Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com>
Description:
Write a data to control the edge detection corresponding to
@@ -85,7 +85,7 @@ Description:
What: /sys/bus/coresight/devices/<tpdm-name>/dsb_edge/ctrl_mask
Date: March 2023
-KernelVersion 6.7
+KernelVersion: 6.7
Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com>
Description:
Write a data to mask the edge detection corresponding to the index
@@ -97,21 +97,21 @@ Description:
What: /sys/bus/coresight/devices/<tpdm-name>/dsb_edge/edcr[0:15]
Date: March 2023
-KernelVersion 6.7
+KernelVersion: 6.7
Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com>
Description:
Read a set of the edge control value of the DSB in TPDM.
What: /sys/bus/coresight/devices/<tpdm-name>/dsb_edge/edcmr[0:7]
Date: March 2023
-KernelVersion 6.7
+KernelVersion: 6.7
Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com>
Description:
Read a set of the edge control mask of the DSB in TPDM.
What: /sys/bus/coresight/devices/<tpdm-name>/dsb_trig_patt/xpr[0:7]
Date: March 2023
-KernelVersion 6.7
+KernelVersion: 6.7
Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com>
Description:
(RW) Set/Get the value of the trigger pattern for the DSB
@@ -119,7 +119,7 @@ Description:
What: /sys/bus/coresight/devices/<tpdm-name>/dsb_trig_patt/xpmr[0:7]
Date: March 2023
-KernelVersion 6.7
+KernelVersion: 6.7
Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com>
Description:
(RW) Set/Get the mask of the trigger pattern for the DSB
@@ -127,21 +127,21 @@ Description:
What: /sys/bus/coresight/devices/<tpdm-name>/dsb_patt/tpr[0:7]
Date: March 2023
-KernelVersion 6.7
+KernelVersion: 6.7
Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com>
Description:
(RW) Set/Get the value of the pattern for the DSB subunit TPDM.
What: /sys/bus/coresight/devices/<tpdm-name>/dsb_patt/tpmr[0:7]
Date: March 2023
-KernelVersion 6.7
+KernelVersion: 6.7
Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com>
Description:
(RW) Set/Get the mask of the pattern for the DSB subunit TPDM.
What: /sys/bus/coresight/devices/<tpdm-name>/dsb_patt/enable_ts
Date: March 2023
-KernelVersion 6.7
+KernelVersion: 6.7
Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com>
Description:
(Write) Set the pattern timestamp of DSB tpdm. Read
@@ -153,7 +153,7 @@ Description:
What: /sys/bus/coresight/devices/<tpdm-name>/dsb_patt/set_type
Date: March 2023
-KernelVersion 6.7
+KernelVersion: 6.7
Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com>
Description:
(Write) Set the pattern type of DSB tpdm. Read
@@ -165,7 +165,7 @@ Description:
What: /sys/bus/coresight/devices/<tpdm-name>/dsb_msr/msr[0:31]
Date: March 2023
-KernelVersion 6.7
+KernelVersion: 6.7
Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com>
Description:
(RW) Set/Get the MSR(mux select register) for the DSB subunit
@@ -173,7 +173,7 @@ Description:
What: /sys/bus/coresight/devices/<tpdm-name>/cmb_mode
Date: January 2024
-KernelVersion 6.9
+KernelVersion: 6.9
Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com>
Description: (Write) Set the data collection mode of CMB tpdm. Continuous
change creates CMB data set elements on every CMBCLK edge.
@@ -187,7 +187,7 @@ Description: (Write) Set the data collection mode of CMB tpdm. Continuous
What: /sys/bus/coresight/devices/<tpdm-name>/cmb_trig_patt/xpr[0:1]
Date: January 2024
-KernelVersion 6.9
+KernelVersion: 6.9
Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com>
Description:
(RW) Set/Get the value of the trigger pattern for the CMB
@@ -195,7 +195,7 @@ Description:
What: /sys/bus/coresight/devices/<tpdm-name>/cmb_trig_patt/xpmr[0:1]
Date: January 2024
-KernelVersion 6.9
+KernelVersion: 6.9
Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com>
Description:
(RW) Set/Get the mask of the trigger pattern for the CMB
@@ -203,21 +203,21 @@ Description:
What: /sys/bus/coresight/devices/<tpdm-name>/dsb_patt/tpr[0:1]
Date: January 2024
-KernelVersion 6.9
+KernelVersion: 6.9
Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com>
Description:
(RW) Set/Get the value of the pattern for the CMB subunit TPDM.
What: /sys/bus/coresight/devices/<tpdm-name>/dsb_patt/tpmr[0:1]
Date: January 2024
-KernelVersion 6.9
+KernelVersion: 6.9
Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com>
Description:
(RW) Set/Get the mask of the pattern for the CMB subunit TPDM.
What: /sys/bus/coresight/devices/<tpdm-name>/cmb_patt/enable_ts
Date: January 2024
-KernelVersion 6.9
+KernelVersion: 6.9
Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com>
Description:
(Write) Set the pattern timestamp of CMB tpdm. Read
@@ -229,7 +229,7 @@ Description:
What: /sys/bus/coresight/devices/<tpdm-name>/cmb_trig_ts
Date: January 2024
-KernelVersion 6.9
+KernelVersion: 6.9
Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com>
Description:
(RW) Set/Get the trigger timestamp of the CMB for tpdm.
@@ -240,7 +240,7 @@ Description:
What: /sys/bus/coresight/devices/<tpdm-name>/cmb_ts_all
Date: January 2024
-KernelVersion 6.9
+KernelVersion: 6.9
Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com>
Description:
(RW) Read or write the status of timestamp upon all interface.
@@ -252,7 +252,7 @@ Description:
What: /sys/bus/coresight/devices/<tpdm-name>/cmb_msr/msr[0:31]
Date: January 2024
-KernelVersion 6.9
+KernelVersion: 6.9
Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com>
Description:
(RW) Set/Get the MSR(mux select register) for the CMB subunit
diff --git a/Documentation/ABI/testing/sysfs-fs-f2fs b/Documentation/ABI/testing/sysfs-fs-f2fs
index 3e1630c70d8a..e44bb614964b 100644
--- a/Documentation/ABI/testing/sysfs-fs-f2fs
+++ b/Documentation/ABI/testing/sysfs-fs-f2fs
@@ -347,7 +347,7 @@ Description: Used to control configure extension list:
- [c] means add/del cold file extension
What: /sys/fs/f2fs/<disk>/unusable
-Date April 2019
+Date: April 2019
Contact: "Daniel Rosenberg" <drosen@google.com>
Description: If checkpoint=disable, it displays the number of blocks that
are unusable.
@@ -355,7 +355,7 @@ Description: If checkpoint=disable, it displays the number of blocks that
would be unusable if checkpoint=disable were to be set.
What: /sys/fs/f2fs/<disk>/encoding
-Date July 2019
+Date: July 2019
Contact: "Daniel Rosenberg" <drosen@google.com>
Description: Displays name and version of the encoding set for the filesystem.
If no encoding is set, displays (none)
diff --git a/Documentation/ABI/testing/sysfs-power b/Documentation/ABI/testing/sysfs-power
index a3942b1036e2..2192478e83cf 100644
--- a/Documentation/ABI/testing/sysfs-power
+++ b/Documentation/ABI/testing/sysfs-power
@@ -131,7 +131,7 @@ Description:
CAUTION: Using it will cause your machine's real-time (CMOS)
clock to be set to a random invalid time after a resume.
-What; /sys/power/pm_trace_dev_match
+What: /sys/power/pm_trace_dev_match
Date: October 2010
Contact: James Hogan <jhogan@kernel.org>
Description:
diff --git a/Documentation/Makefile b/Documentation/Makefile
index 52c6c5a3efa9..63094646df28 100644
--- a/Documentation/Makefile
+++ b/Documentation/Makefile
@@ -12,7 +12,7 @@ endif
# Check for broken ABI files
ifeq ($(CONFIG_WARN_ABI_ERRORS),y)
-$(shell $(srctree)/scripts/get_abi.pl validate --dir $(srctree)/Documentation/ABI)
+$(shell $(srctree)/scripts/get_abi.py --dir $(srctree)/Documentation/ABI validate)
endif
# You can set these variables from the command line.
diff --git a/Documentation/admin-guide/README.rst b/Documentation/admin-guide/README.rst
index b557cf1c820d..70b02f30013a 100644
--- a/Documentation/admin-guide/README.rst
+++ b/Documentation/admin-guide/README.rst
@@ -165,7 +165,7 @@ Configuring the kernel
"make xconfig" Qt based configuration tool.
- "make gconfig" GTK+ based configuration tool.
+ "make gconfig" GTK based configuration tool.
"make oldconfig" Default all questions based on the contents of
your existing ./.config file and asking about
diff --git a/Documentation/admin-guide/abi-obsolete-files.rst b/Documentation/admin-guide/abi-obsolete-files.rst
new file mode 100644
index 000000000000..3061a916b4b5
--- /dev/null
+++ b/Documentation/admin-guide/abi-obsolete-files.rst
@@ -0,0 +1,7 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Obsolete ABI Files
+==================
+
+.. kernel-abi:: obsolete
+ :no-symbols:
diff --git a/Documentation/admin-guide/abi-obsolete.rst b/Documentation/admin-guide/abi-obsolete.rst
index 594e697aa1b2..640f3903e847 100644
--- a/Documentation/admin-guide/abi-obsolete.rst
+++ b/Documentation/admin-guide/abi-obsolete.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
ABI obsolete symbols
====================
@@ -7,5 +9,5 @@ marked to be removed at some later point in time.
The description of the interface will document the reason why it is
obsolete and when it can be expected to be removed.
-.. kernel-abi:: ABI/obsolete
- :rst:
+.. kernel-abi:: obsolete
+ :no-files:
diff --git a/Documentation/admin-guide/abi-removed-files.rst b/Documentation/admin-guide/abi-removed-files.rst
new file mode 100644
index 000000000000..f1bdfadd2ec4
--- /dev/null
+++ b/Documentation/admin-guide/abi-removed-files.rst
@@ -0,0 +1,7 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Removed ABI Files
+=================
+
+.. kernel-abi:: removed
+ :no-symbols:
diff --git a/Documentation/admin-guide/abi-removed.rst b/Documentation/admin-guide/abi-removed.rst
index f9e000c81828..88832d3eacd6 100644
--- a/Documentation/admin-guide/abi-removed.rst
+++ b/Documentation/admin-guide/abi-removed.rst
@@ -1,5 +1,7 @@
+.. SPDX-License-Identifier: GPL-2.0
+
ABI removed symbols
===================
-.. kernel-abi:: ABI/removed
- :rst:
+.. kernel-abi:: removed
+ :no-files:
diff --git a/Documentation/admin-guide/abi-stable-files.rst b/Documentation/admin-guide/abi-stable-files.rst
new file mode 100644
index 000000000000..f867738fc178
--- /dev/null
+++ b/Documentation/admin-guide/abi-stable-files.rst
@@ -0,0 +1,7 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Stable ABI Files
+================
+
+.. kernel-abi:: stable
+ :no-symbols:
diff --git a/Documentation/admin-guide/abi-stable.rst b/Documentation/admin-guide/abi-stable.rst
index fc3361d847b1..528c68401f4b 100644
--- a/Documentation/admin-guide/abi-stable.rst
+++ b/Documentation/admin-guide/abi-stable.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
ABI stable symbols
==================
@@ -10,5 +12,5 @@ for at least 2 years.
Most interfaces (like syscalls) are expected to never change and always
be available.
-.. kernel-abi:: ABI/stable
- :rst:
+.. kernel-abi:: stable
+ :no-files:
diff --git a/Documentation/admin-guide/abi-testing-files.rst b/Documentation/admin-guide/abi-testing-files.rst
new file mode 100644
index 000000000000..1da868e42fdb
--- /dev/null
+++ b/Documentation/admin-guide/abi-testing-files.rst
@@ -0,0 +1,7 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Testing ABI Files
+=================
+
+.. kernel-abi:: testing
+ :no-symbols:
diff --git a/Documentation/admin-guide/abi-testing.rst b/Documentation/admin-guide/abi-testing.rst
index 19767926b344..6153ebd38e2d 100644
--- a/Documentation/admin-guide/abi-testing.rst
+++ b/Documentation/admin-guide/abi-testing.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
ABI testing symbols
===================
@@ -16,5 +18,5 @@ Programs that use these interfaces are strongly encouraged to add their
name to the description of these interfaces, so that the kernel
developers can easily notify them if any changes occur.
-.. kernel-abi:: ABI/testing
- :rst:
+.. kernel-abi:: testing
+ :no-files:
diff --git a/Documentation/admin-guide/abi.rst b/Documentation/admin-guide/abi.rst
index bcab3ef2597c..c6039359e585 100644
--- a/Documentation/admin-guide/abi.rst
+++ b/Documentation/admin-guide/abi.rst
@@ -1,7 +1,14 @@
+.. SPDX-License-Identifier: GPL-2.0
+
=====================
Linux ABI description
=====================
+.. kernel-abi:: README
+
+ABI symbols
+-----------
+
.. toctree::
:maxdepth: 2
@@ -9,3 +16,14 @@ Linux ABI description
abi-testing
abi-obsolete
abi-removed
+
+ABI files
+---------
+
+.. toctree::
+ :maxdepth: 2
+
+ abi-stable-files
+ abi-testing-files
+ abi-obsolete-files
+ abi-removed-files
diff --git a/Documentation/admin-guide/cgroup-v1/freezer-subsystem.rst b/Documentation/admin-guide/cgroup-v1/freezer-subsystem.rst
index 582d3427de3f..a964aff373b1 100644
--- a/Documentation/admin-guide/cgroup-v1/freezer-subsystem.rst
+++ b/Documentation/admin-guide/cgroup-v1/freezer-subsystem.rst
@@ -125,3 +125,7 @@ to unfreeze all tasks in the container::
This is the basic mechanism which should do the right thing for user space task
in a simple scenario.
+
+This freezer implementation is affected by shortcomings (see commit
+76f969e8948d8 ("cgroup: cgroup v2 freezer")) and cgroup v2 freezer is
+recommended.
diff --git a/Documentation/admin-guide/cgroup-v1/memory.rst b/Documentation/admin-guide/cgroup-v1/memory.rst
index 286d16fc22eb..02b8206a3594 100644
--- a/Documentation/admin-guide/cgroup-v1/memory.rst
+++ b/Documentation/admin-guide/cgroup-v1/memory.rst
@@ -90,6 +90,7 @@ Brief summary of control files.
used.
memory.swappiness set/show swappiness parameter of vmscan
(See sysctl's vm.swappiness)
+ Per memcg knob does not exist in cgroup v2.
memory.move_charge_at_immigrate This knob is deprecated.
memory.oom_control set/show oom controls.
This knob is deprecated and shouldn't be
diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst
index cb1b4e759b7e..f293a13b42ed 100644
--- a/Documentation/admin-guide/cgroup-v2.rst
+++ b/Documentation/admin-guide/cgroup-v2.rst
@@ -1076,15 +1076,20 @@ cpufreq governor about the minimum desired frequency which should always be
provided by a CPU, as well as the maximum desired frequency, which should not
be exceeded by a CPU.
-WARNING: cgroup2 doesn't yet support control of realtime processes. For
-a kernel built with the CONFIG_RT_GROUP_SCHED option enabled for group
-scheduling of realtime processes, the cpu controller can only be enabled
-when all RT processes are in the root cgroup. This limitation does
-not apply if CONFIG_RT_GROUP_SCHED is disabled. Be aware that system
-management software may already have placed RT processes into nonroot
-cgroups during the system boot process, and these processes may need
-to be moved to the root cgroup before the cpu controller can be enabled
-with a CONFIG_RT_GROUP_SCHED enabled kernel.
+WARNING: cgroup2 cpu controller doesn't yet fully support the control of
+realtime processes. For a kernel built with the CONFIG_RT_GROUP_SCHED option
+enabled for group scheduling of realtime processes, the cpu controller can only
+be enabled when all RT processes are in the root cgroup. Be aware that system
+management software may already have placed RT processes into non-root cgroups
+during the system boot process, and these processes may need to be moved to the
+root cgroup before the cpu controller can be enabled with a
+CONFIG_RT_GROUP_SCHED enabled kernel.
+
+With CONFIG_RT_GROUP_SCHED disabled, this limitation does not apply and some of
+the interface files either affect realtime processes or account for them. See
+the following section for details. Only the cpu controller is affected by
+CONFIG_RT_GROUP_SCHED. Other controllers can be used for the resource control of
+realtime processes irrespective of CONFIG_RT_GROUP_SCHED.
CPU Interface Files
diff --git a/Documentation/admin-guide/gpio/gpio-sim.rst b/Documentation/admin-guide/gpio/gpio-sim.rst
index 1cc5567a4bbe..35d49ccd49e0 100644
--- a/Documentation/admin-guide/gpio/gpio-sim.rst
+++ b/Documentation/admin-guide/gpio/gpio-sim.rst
@@ -71,7 +71,7 @@ specific lines. The name of those subdirectories must take the form of:
``'line<offset>'`` (e.g. ``'line0'``, ``'line20'``, etc.) as the name will be
used by the module to assign the config to the specific line at given offset.
-Once the confiuration is complete, the ``'live'`` attribute must be set to 1 in
+Once the configuration is complete, the ``'live'`` attribute must be set to 1 in
order to instantiate the chip. It can be set back to 0 to destroy the simulated
chip. The module will synchronously wait for the new simulated device to be
successfully probed and if this doesn't happen, writing to ``'live'`` will
diff --git a/Documentation/admin-guide/gpio/gpio-virtuser.rst b/Documentation/admin-guide/gpio/gpio-virtuser.rst
index 2aca70db9f3b..7e7c0df51640 100644
--- a/Documentation/admin-guide/gpio/gpio-virtuser.rst
+++ b/Documentation/admin-guide/gpio/gpio-virtuser.rst
@@ -92,7 +92,7 @@ struct. The first two take string values as arguments:
Activating GPIO consumers
-------------------------
-Once the confiuration is complete, the ``'live'`` attribute must be set to 1 in
+Once the configuration is complete, the ``'live'`` attribute must be set to 1 in
order to instantiate the consumer. It can be set back to 0 to destroy the
virtual device. The module will synchronously wait for the new simulated device
to be successfully probed and if this doesn't happen, writing to ``'live'`` will
diff --git a/Documentation/admin-guide/highuid.rst b/Documentation/admin-guide/highuid.rst
deleted file mode 100644
index 6ee70465c0ea..000000000000
--- a/Documentation/admin-guide/highuid.rst
+++ /dev/null
@@ -1,80 +0,0 @@
-===================================================
-Notes on the change from 16-bit UIDs to 32-bit UIDs
-===================================================
-
-:Author: Chris Wing <wingc@umich.edu>
-:Last updated: January 11, 2000
-
-- kernel code MUST take into account __kernel_uid_t and __kernel_uid32_t
- when communicating between user and kernel space in an ioctl or data
- structure.
-
-- kernel code should use uid_t and gid_t in kernel-private structures and
- code.
-
-What's left to be done for 32-bit UIDs on all Linux architectures:
-
-- Disk quotas have an interesting limitation that is not related to the
- maximum UID/GID. They are limited by the maximum file size on the
- underlying filesystem, because quota records are written at offsets
- corresponding to the UID in question.
- Further investigation is needed to see if the quota system can cope
- properly with huge UIDs. If it can deal with 64-bit file offsets on all
- architectures, this should not be a problem.
-
-- Decide whether or not to keep backwards compatibility with the system
- accounting file, or if we should break it as the comments suggest
- (currently, the old 16-bit UID and GID are still written to disk, and
- part of the former pad space is used to store separate 32-bit UID and
- GID)
-
-- Need to validate that OS emulation calls the 16-bit UID
- compatibility syscalls, if the OS being emulated used 16-bit UIDs, or
- uses the 32-bit UID system calls properly otherwise.
-
- This affects at least:
-
- - iBCS on Intel
-
- - sparc32 emulation on sparc64
- (need to support whatever new 32-bit UID system calls are added to
- sparc32)
-
-- Validate that all filesystems behave properly.
-
- At present, 32-bit UIDs _should_ work for:
-
- - ext2
- - ufs
- - isofs
- - nfs
- - coda
- - udf
-
- Ioctl() fixups have been made for:
-
- - ncpfs
- - smbfs
-
- Filesystems with simple fixups to prevent 16-bit UID wraparound:
-
- - minix
- - sysv
- - qnx4
-
- Other filesystems have not been checked yet.
-
-- The ncpfs and smpfs filesystems cannot presently use 32-bit UIDs in
- all ioctl()s. Some new ioctl()s have been added with 32-bit UIDs, but
- more are needed. (as well as new user<->kernel data structures)
-
-- The ELF core dump format only supports 16-bit UIDs on arm, i386, m68k,
- sh, and sparc32. Fixing this is probably not that important, but would
- require adding a new ELF section.
-
-- The ioctl()s used to control the in-kernel NFS server only support
- 16-bit UIDs on arm, i386, m68k, sh, and sparc32.
-
-- make sure that the UID mapping feature of AX25 networking works properly
- (it should be safe because it's always used a 32-bit integer to
- communicate between user and kernel)
diff --git a/Documentation/admin-guide/index.rst b/Documentation/admin-guide/index.rst
index c8af32a8f800..259d79fbeb94 100644
--- a/Documentation/admin-guide/index.rst
+++ b/Documentation/admin-guide/index.rst
@@ -187,7 +187,6 @@ A few hard-to-categorize and generally obsolete documents.
.. toctree::
:maxdepth: 1
- highuid
ldm
unicode
diff --git a/Documentation/admin-guide/iostats.rst b/Documentation/admin-guide/iostats.rst
index 609a3201fd4e..9453196ade51 100644
--- a/Documentation/admin-guide/iostats.rst
+++ b/Documentation/admin-guide/iostats.rst
@@ -2,62 +2,39 @@
I/O statistics fields
=====================
-Since 2.4.20 (and some versions before, with patches), and 2.5.45,
-more extensive disk statistics have been introduced to help measure disk
-activity. Tools such as ``sar`` and ``iostat`` typically interpret these and do
-the work for you, but in case you are interested in creating your own
-tools, the fields are explained here.
-
-In 2.4 now, the information is found as additional fields in
-``/proc/partitions``. In 2.6 and upper, the same information is found in two
-places: one is in the file ``/proc/diskstats``, and the other is within
-the sysfs file system, which must be mounted in order to obtain
-the information. Throughout this document we'll assume that sysfs
-is mounted on ``/sys``, although of course it may be mounted anywhere.
-Both ``/proc/diskstats`` and sysfs use the same source for the information
-and so should not differ.
-
-Here are examples of these different formats::
-
- 2.4:
- 3 0 39082680 hda 446216 784926 9550688 4382310 424847 312726 5922052 19310380 0 3376340 23705160
- 3 1 9221278 hda1 35486 0 35496 38030 0 0 0 0 0 38030 38030
-
- 2.6+ sysfs:
- 446216 784926 9550688 4382310 424847 312726 5922052 19310380 0 3376340 23705160
- 35486 38030 38030 38030
-
- 2.6+ diskstats:
- 3 0 hda 446216 784926 9550688 4382310 424847 312726 5922052 19310380 0 3376340 23705160
- 3 1 hda1 35486 38030 38030 38030
-
- 4.18+ diskstats:
- 3 0 hda 446216 784926 9550688 4382310 424847 312726 5922052 19310380 0 3376340 23705160 0 0 0 0
-
-On 2.4 you might execute ``grep 'hda ' /proc/partitions``. On 2.6+, you have
-a choice of ``cat /sys/block/hda/stat`` or ``grep 'hda ' /proc/diskstats``.
-
-The advantage of one over the other is that the sysfs choice works well
-if you are watching a known, small set of disks. ``/proc/diskstats`` may
-be a better choice if you are watching a large number of disks because
-you'll avoid the overhead of 50, 100, or 500 or more opens/closes with
-each snapshot of your disk statistics.
-
-In 2.4, the statistics fields are those after the device name. In
-the above example, the first field of statistics would be 446216.
-By contrast, in 2.6+ if you look at ``/sys/block/hda/stat``, you'll
-find just the 15 fields, beginning with 446216. If you look at
-``/proc/diskstats``, the 15 fields will be preceded by the major and
-minor device numbers, and device name. Each of these formats provides
-15 fields of statistics, each meaning exactly the same things.
-All fields except field 9 are cumulative since boot. Field 9 should
-go to zero as I/Os complete; all others only increase (unless they
-overflow and wrap). Wrapping might eventually occur on a very busy
-or long-lived system; so applications should be prepared to deal with
-it. Regarding wrapping, the types of the fields are either unsigned
-int (32 bit) or unsigned long (32-bit or 64-bit, depending on your
-machine) as noted per-field below. Unless your observations are very
-spread in time, these fields should not wrap twice before you notice it.
+The kernel exposes disk statistics via ``/proc/diskstats`` and
+``/sys/block/<device>/stat``. These stats are usually accessed via tools
+such as ``sar`` and ``iostat``.
+
+Here are examples using a disk with two partitions::
+
+ /proc/diskstats:
+ 259 0 nvme0n1 255999 814 12369153 47919 996852 81 36123024 425995 0 301795 580470 0 0 0 0 60602 106555
+ 259 1 nvme0n1p1 492 813 17572 96 848 81 108288 210 0 76 307 0 0 0 0 0 0
+ 259 2 nvme0n1p2 255401 1 12343477 47799 996004 0 36014736 425784 0 344336 473584 0 0 0 0 0 0
+
+ /sys/block/nvme0n1/stat:
+ 255999 814 12369153 47919 996858 81 36123056 426009 0 301809 580491 0 0 0 0 60605 106562
+
+ /sys/block/nvme0n1/nvme0n1p1/stat:
+ 492 813 17572 96 848 81 108288 210 0 76 307 0 0 0 0 0 0
+
+Both files contain the same 17 statistics. ``/sys/block/<device>/stat``
+contains the fields for ``<device>``. In ``/proc/diskstats`` the fields
+are prefixed with the major and minor device numbers and the device
+name. In the example above, the first stat value for ``nvme0n1`` is
+255999 in both files.
+
+The sysfs ``stat`` file is efficient for monitoring a small, known set
+of disks. If you're tracking a large number of devices,
+``/proc/diskstats`` is often the better choice since it avoids the
+overhead of opening and closing multiple files for each snapshot.
+
+All fields are cumulative, monotonic counters, except for field 9, which
+resets to zero as I/Os complete. The remaining fields reset at boot, on
+device reattachment or reinitialization, or when the underlying counter
+overflows. Applications reading these counters should detect and handle
+resets when comparing stat snapshots.
Each set of stats only applies to the indicated device; if you want
system-wide stats you'll have to find all the devices and sum them all up.
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index fb8752b42ec8..7042fbe26a60 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -1785,7 +1785,9 @@
allocation boundaries as a proactive defense
against bounds-checking flaws in the kernel's
copy_to_user()/copy_from_user() interface.
- on Perform hardened usercopy checks (default).
+ The default is determined by
+ CONFIG_HARDENED_USERCOPY_DEFAULT_ON.
+ on Perform hardened usercopy checks.
off Disable hardened usercopy checks.
hardlockup_all_cpu_backtrace=
@@ -6082,7 +6084,7 @@
is assumed to be I/O ports; otherwise it is memory.
reserve_mem= [RAM]
- Format: nn[KNG]:<align>:<label>
+ Format: nn[KMG]:<align>:<label>
Reserve physical memory and label it with a name that
other subsystems can use to access it. This is typically
used for systems that do not wipe the RAM, and this command
diff --git a/Documentation/admin-guide/thunderbolt.rst b/Documentation/admin-guide/thunderbolt.rst
index 2ed79f41a411..d0502691dfa1 100644
--- a/Documentation/admin-guide/thunderbolt.rst
+++ b/Documentation/admin-guide/thunderbolt.rst
@@ -28,7 +28,7 @@ should be a userspace tool that handles all the low-level details, keeps
a database of the authorized devices and prompts users for new connections.
More details about the sysfs interface for Thunderbolt devices can be
-found in ``Documentation/ABI/testing/sysfs-bus-thunderbolt``.
+found in Documentation/ABI/testing/sysfs-bus-thunderbolt.
Those users who just want to connect any device without any sort of
manual work can add following line to
diff --git a/Documentation/admin-guide/workload-tracing.rst b/Documentation/admin-guide/workload-tracing.rst
index 6be38c1b9c5b..d6313890ee41 100644
--- a/Documentation/admin-guide/workload-tracing.rst
+++ b/Documentation/admin-guide/workload-tracing.rst
@@ -82,7 +82,7 @@ Install tools to build Linux kernel and tools in kernel repository.
scripts/ver_linux is a good way to check if your system already has
the necessary tools::
- sudo apt-get build-essentials flex bison yacc
+ sudo apt-get install build-essential flex bison yacc
sudo apt install libelf-dev systemtap-sdt-dev libslang2-dev libperl-dev libdw-dev
cscope is a good tool to browse kernel sources. Let's install it now::
diff --git a/Documentation/arch/arm64/amu.rst b/Documentation/arch/arm64/amu.rst
index 01f2de2b0450..ac1b3f0e211d 100644
--- a/Documentation/arch/arm64/amu.rst
+++ b/Documentation/arch/arm64/amu.rst
@@ -80,7 +80,7 @@ bypass the setting of AMUSERENR_EL0 to trap accesses from EL0 (userspace) to
EL1 (kernel). Therefore, firmware should still ensure accesses to AMU registers
are not trapped in EL2/EL3.
-The fixed counters of AMUv1 are accessible though the following system
+The fixed counters of AMUv1 are accessible through the following system
register definitions:
- SYS_AMEVCNTR0_CORE_EL0
diff --git a/Documentation/arch/arm64/asymmetric-32bit.rst b/Documentation/arch/arm64/asymmetric-32bit.rst
index 1ca2b359a907..57b8d7476f71 100644
--- a/Documentation/arch/arm64/asymmetric-32bit.rst
+++ b/Documentation/arch/arm64/asymmetric-32bit.rst
@@ -55,7 +55,7 @@ sysfs
The subset of CPUs capable of running 32-bit tasks is described in
``/sys/devices/system/cpu/aarch32_el0`` and is documented further in
-``Documentation/ABI/testing/sysfs-devices-system-cpu``.
+Documentation/ABI/testing/sysfs-devices-system-cpu.
**Note:** CPUs are advertised by this file as they are detected and so
late-onlining of 32-bit-capable CPUs can result in the file contents
diff --git a/Documentation/conf.py b/Documentation/conf.py
index 0c2205d536b3..3dad1f90b098 100644
--- a/Documentation/conf.py
+++ b/Documentation/conf.py
@@ -47,7 +47,7 @@ from load_config import loadConfig
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
-needs_sphinx = '2.4.4'
+needs_sphinx = '3.4.3'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
diff --git a/Documentation/core-api/min_heap.rst b/Documentation/core-api/min_heap.rst
index 683bc6d09f00..9f57766581df 100644
--- a/Documentation/core-api/min_heap.rst
+++ b/Documentation/core-api/min_heap.rst
@@ -47,8 +47,8 @@ Example:
#define MIN_HEAP_PREALLOCATED(_type, _name, _nr)
struct _name {
- int nr; /* Number of elements in the heap */
- int size; /* Maximum number of elements that can be held */
+ size_t nr; /* Number of elements in the heap */
+ size_t size; /* Maximum number of elements that can be held */
_type *data; /* Pointer to the heap data */
_type preallocated[_nr]; /* Static preallocated array */
}
diff --git a/Documentation/core-api/printk-formats.rst b/Documentation/core-api/printk-formats.rst
index ecccc0473da9..4bdc394e86af 100644
--- a/Documentation/core-api/printk-formats.rst
+++ b/Documentation/core-api/printk-formats.rst
@@ -661,7 +661,7 @@ Do *not* use it from C.
Thanks
======
-If you add other %p extensions, please extend <lib/test_printf.c> with
-one or more test cases, if at all feasible.
+If you add other %p extensions, please extend <lib/tests/printf_kunit.c>
+with one or more test cases, if at all feasible.
Thank you for your cooperation and attention.
diff --git a/Documentation/dev-tools/kcsan.rst b/Documentation/dev-tools/kcsan.rst
index d81c42d1063e..8575178aa87f 100644
--- a/Documentation/dev-tools/kcsan.rst
+++ b/Documentation/dev-tools/kcsan.rst
@@ -203,7 +203,7 @@ they happen concurrently in different threads, and at least one of them is a
least one is a write. For a more thorough discussion and definition, see `"Plain
Accesses and Data Races" in the LKMM`_.
-.. _"Plain Accesses and Data Races" in the LKMM: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/tools/memory-model/Documentation/explanation.txt#n1922
+.. _"Plain Accesses and Data Races" in the LKMM: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/tools/memory-model/Documentation/explanation.txt?id=8f6629c004b193d23612641c3607e785819e97ab#n2164
Relationship with the Linux-Kernel Memory Consistency Model (LKMM)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/Documentation/dev-tools/kselftest.rst b/Documentation/dev-tools/kselftest.rst
index fdb1df86783a..18c2da67fae4 100644
--- a/Documentation/dev-tools/kselftest.rst
+++ b/Documentation/dev-tools/kselftest.rst
@@ -347,7 +347,7 @@ kselftest. We use kselftests for lib/ as an example.
1. Create the test module
2. Create the test script that will run (load/unload) the module
- e.g. ``tools/testing/selftests/lib/printf.sh``
+ e.g. ``tools/testing/selftests/lib/bitmap.sh``
3. Add line to config file e.g. ``tools/testing/selftests/lib/config``
diff --git a/Documentation/devicetree/bindings/net/can/microchip,mcp251xfd.yaml b/Documentation/devicetree/bindings/net/can/microchip,mcp251xfd.yaml
index 2a98b26630cb..c155c9c6db39 100644
--- a/Documentation/devicetree/bindings/net/can/microchip,mcp251xfd.yaml
+++ b/Documentation/devicetree/bindings/net/can/microchip,mcp251xfd.yaml
@@ -40,7 +40,7 @@ properties:
microchip,rx-int-gpios:
description:
- GPIO phandle of GPIO connected to to INT1 pin of the MCP251XFD, which
+ GPIO phandle of GPIO connected to INT1 pin of the MCP251XFD, which
signals a pending RX interrupt.
maxItems: 1
diff --git a/Documentation/driver-api/firmware/firmware-usage-guidelines.rst b/Documentation/driver-api/firmware/firmware-usage-guidelines.rst
index fdcfce42c6d2..336e912281c3 100644
--- a/Documentation/driver-api/firmware/firmware-usage-guidelines.rst
+++ b/Documentation/driver-api/firmware/firmware-usage-guidelines.rst
@@ -42,3 +42,8 @@ then of course these rules will not apply strictly.)
deprecating old major versions, then this should only be done as a
last option, and be stated clearly in all communications.
+* Firmware files that affect the User API (UAPI) shall not introduce
+ changes that break existing userspace programs. Updates to such firmware
+ must ensure backward compatibility with existing userspace applications.
+ This includes maintaining consistent interfaces and behaviors that
+ userspace programs rely on.
diff --git a/Documentation/driver-api/generic-counter.rst b/Documentation/driver-api/generic-counter.rst
index 71ccc30e586b..e826f16ea43d 100644
--- a/Documentation/driver-api/generic-counter.rst
+++ b/Documentation/driver-api/generic-counter.rst
@@ -467,7 +467,7 @@ Counter sysfs
Translates counter data to the standard Counter sysfs interface format
and vice versa.
-Please refer to the ``Documentation/ABI/testing/sysfs-bus-counter`` file
+Please refer to the Documentation/ABI/testing/sysfs-bus-counter file
for a detailed breakdown of the available Generic Counter interface
sysfs attributes.
@@ -483,7 +483,7 @@ Sysfs Interface
Several sysfs attributes are generated by the Generic Counter interface,
and reside under the ``/sys/bus/counter/devices/counterX`` directory,
where ``X`` is to the respective counter device id. Please see
-``Documentation/ABI/testing/sysfs-bus-counter`` for detailed information
+Documentation/ABI/testing/sysfs-bus-counter for detailed information
on each Generic Counter interface sysfs attribute.
Through these sysfs attributes, programs and scripts may interact with
diff --git a/Documentation/driver-api/iio/core.rst b/Documentation/driver-api/iio/core.rst
index dfe438dc91a7..42b580fb2989 100644
--- a/Documentation/driver-api/iio/core.rst
+++ b/Documentation/driver-api/iio/core.rst
@@ -60,7 +60,7 @@ directory. Common attributes are:
* :file:`sampling_frequency_available`, available discrete set of sampling
frequency values for device.
* Available standard attributes for IIO devices are described in the
- :file:`Documentation/ABI/testing/sysfs-bus-iio` file in the Linux kernel
+ :file:Documentation/ABI/testing/sysfs-bus-iio file in the Linux kernel
sources.
IIO device channels
diff --git a/Documentation/driver-api/infiniband.rst b/Documentation/driver-api/infiniband.rst
index 30e142ccbee9..10d8be9e74fe 100644
--- a/Documentation/driver-api/infiniband.rst
+++ b/Documentation/driver-api/infiniband.rst
@@ -77,14 +77,14 @@ iSCSI Extensions for RDMA (iSER)
:internal:
.. kernel-doc:: drivers/infiniband/ulp/iser/iscsi_iser.c
- :functions: iscsi_iser_pdu_alloc iser_initialize_task_headers \
- iscsi_iser_task_init iscsi_iser_mtask_xmit iscsi_iser_task_xmit \
- iscsi_iser_cleanup_task iscsi_iser_check_protection \
- iscsi_iser_conn_create iscsi_iser_conn_bind \
- iscsi_iser_conn_start iscsi_iser_conn_stop \
- iscsi_iser_session_destroy iscsi_iser_session_create \
- iscsi_iser_set_param iscsi_iser_ep_connect iscsi_iser_ep_poll \
- iscsi_iser_ep_disconnect
+ :functions: iscsi_iser_pdu_alloc iser_initialize_task_headers
+ iscsi_iser_task_init iscsi_iser_mtask_xmit iscsi_iser_task_xmit
+ iscsi_iser_cleanup_task iscsi_iser_check_protection
+ iscsi_iser_conn_create iscsi_iser_conn_bind
+ iscsi_iser_conn_start iscsi_iser_conn_stop
+ iscsi_iser_session_destroy iscsi_iser_session_create
+ iscsi_iser_set_param iscsi_iser_ep_connect iscsi_iser_ep_poll
+ iscsi_iser_ep_disconnect
.. kernel-doc:: drivers/infiniband/ulp/iser/iser_initiator.c
:internal:
diff --git a/Documentation/driver-api/media/drivers/zoran.rst b/Documentation/driver-api/media/drivers/zoran.rst
index b205e10c3154..3e05b7f0442a 100644
--- a/Documentation/driver-api/media/drivers/zoran.rst
+++ b/Documentation/driver-api/media/drivers/zoran.rst
@@ -222,7 +222,7 @@ The CCIR - I uses the PAL colorsystem, and is used in Great Britain, Hong Kong,
Ireland, Nigeria, South Africa.
The CCIR - N uses the PAL colorsystem and PAL frame size but the NTSC framerate,
-and is used in Argentinia, Uruguay, an a few others
+and is used in Argentina, Uruguay, an a few others
We do not talk about how the audio is broadcast !
diff --git a/Documentation/driver-api/media/maintainer-entry-profile.rst b/Documentation/driver-api/media/maintainer-entry-profile.rst
index ffc712a5f632..ad96a89ee916 100644
--- a/Documentation/driver-api/media/maintainer-entry-profile.rst
+++ b/Documentation/driver-api/media/maintainer-entry-profile.rst
@@ -116,7 +116,7 @@ CEC drivers ``cec-compliance``
.. [3] The ``v4l2-compliance`` also covers the media controller usage inside
V4L2 drivers.
-Other compilance tools are under development to check other parts of the
+Other compliance tools are under development to check other parts of the
subsystem.
Those tests need to pass before the patches go upstream.
diff --git a/Documentation/driver-api/nvdimm/nvdimm.rst b/Documentation/driver-api/nvdimm/nvdimm.rst
index ca16b5acbf30..c205efa4d45b 100644
--- a/Documentation/driver-api/nvdimm/nvdimm.rst
+++ b/Documentation/driver-api/nvdimm/nvdimm.rst
@@ -535,12 +535,12 @@ internally with a static identifier::
char devname[50];
snprintf(devname, sizeof(devname), "namespace%d.%d",
- ndctl_region_get_id(region), paramaters->id);
+ ndctl_region_get_id(region), parameters->id);
ndctl_namespace_set_alt_name(ndns, devname);
/* 'uuid' must be set prior to setting size! */
- ndctl_namespace_set_uuid(ndns, paramaters->uuid);
- ndctl_namespace_set_size(ndns, paramaters->size);
+ ndctl_namespace_set_uuid(ndns, parameters->uuid);
+ ndctl_namespace_set_size(ndns, parameters->size);
/* unlike pmem namespaces, blk namespaces have a sector size */
if (parameters->lbasize)
ndctl_namespace_set_sector_size(ndns, parameters->lbasize);
diff --git a/Documentation/driver-api/pm/devices.rst b/Documentation/driver-api/pm/devices.rst
index d448cb57df86..8d86d5da4023 100644
--- a/Documentation/driver-api/pm/devices.rst
+++ b/Documentation/driver-api/pm/devices.rst
@@ -358,7 +358,7 @@ the phases are: ``prepare``, ``suspend``, ``suspend_late``, ``suspend_noirq``.
is probed against the device in question by passing them to the
:c:func:`dev_pm_set_driver_flags` helper function.] If the first of
these flags is set, the PM core will not apply the direct-complete
- procedure described above to the given device and, consequenty, to any
+ procedure described above to the given device and, consequently, to any
of its ancestors. The second flag, when set, informs the middle layer
code (bus types, device types, PM domains, classes) that it should take
the return value of the ``->prepare`` callback provided by the driver
diff --git a/Documentation/features/debug/kprobes-on-ftrace/arch-support.txt b/Documentation/features/debug/kprobes-on-ftrace/arch-support.txt
index 02febc883588..d937b7a03575 100644
--- a/Documentation/features/debug/kprobes-on-ftrace/arch-support.txt
+++ b/Documentation/features/debug/kprobes-on-ftrace/arch-support.txt
@@ -20,7 +20,7 @@
| openrisc: | TODO |
| parisc: | ok |
| powerpc: | ok |
- | riscv: | ok |
+ | riscv: | TODO |
| s390: | ok |
| sh: | TODO |
| sparc: | TODO |
diff --git a/Documentation/features/list-arch.sh b/Documentation/features/list-arch.sh
index e73aa35848f0..ac8ff7f6f859 100755
--- a/Documentation/features/list-arch.sh
+++ b/Documentation/features/list-arch.sh
@@ -6,6 +6,6 @@
# (If no arguments are given then it will print the host architecture's status.)
#
-ARCH=${1:-$(uname -m | sed 's/x86_64/x86/' | sed 's/i386/x86/')}
+ARCH=${1:-$(uname -m | sed 's/x86_64/x86/' | sed 's/i386/x86/' | sed 's/s390x/s390/')}
$(dirname $0)/../../scripts/get_feat.pl list --arch $ARCH
diff --git a/Documentation/filesystems/9p.rst b/Documentation/filesystems/9p.rst
index 2bbf68b56b0d..3078f3c9256a 100644
--- a/Documentation/filesystems/9p.rst
+++ b/Documentation/filesystems/9p.rst
@@ -90,7 +90,7 @@ Just start the 9pfs capable network server like diod/nfs-ganesha e.g.::
$ diod -f -n -d 0 -S -l 0.0.0.0:9999 -e $PWD
-Optionaly scan your bus if there are more then one usbg gadgets to find their path::
+Optionally scan your bus if there are more then one usbg gadgets to find their path::
$ python $kernel_dir/tools/usb/p9_fwd.py list
diff --git a/Documentation/filesystems/bcachefs/SubmittingPatches.rst b/Documentation/filesystems/bcachefs/SubmittingPatches.rst
index 026b12ae0d6a..4b79ca58faf2 100644
--- a/Documentation/filesystems/bcachefs/SubmittingPatches.rst
+++ b/Documentation/filesystems/bcachefs/SubmittingPatches.rst
@@ -30,7 +30,7 @@ CI:
===
Instead of running your tests locally, when running the full test suite it's
-prefereable to let a server farm do it in parallel, and then have the results
+preferable to let a server farm do it in parallel, and then have the results
in a nice test dashboard (which can tell you which failures are new, and
presents results in a git log view, avoiding the need for most bisecting).
@@ -68,7 +68,7 @@ Other things to think about:
land - use them. Use them judiciously, and not as a replacement for proper
error handling, but use them.
-- Does it need to be performance tested? Should we add new peformance counters?
+- Does it need to be performance tested? Should we add new performance counters?
bcachefs has a set of persistent runtime counters which can be viewed with
the 'bcachefs fs top' command; this should give users a basic idea of what
diff --git a/Documentation/filesystems/coda.rst b/Documentation/filesystems/coda.rst
index bdde7e4e010b..0db3c83a50e5 100644
--- a/Documentation/filesystems/coda.rst
+++ b/Documentation/filesystems/coda.rst
@@ -141,7 +141,7 @@ kernel support.
a process P which accessing a Coda file. It makes a system call which
traps to the OS kernel. Examples of such calls trapping to the kernel
are ``read``, ``write``, ``open``, ``close``, ``create``, ``mkdir``,
- ``rmdir``, ``chmod`` in a Unix ontext. Similar calls exist in the Win32
+ ``rmdir``, ``chmod`` in a Unix context. Similar calls exist in the Win32
environment, and are named ``CreateFile``.
Generally the operating system handles the request in a virtual
diff --git a/Documentation/filesystems/debugfs.rst b/Documentation/filesystems/debugfs.rst
index f7f977ffbf8d..610f718ef8b5 100644
--- a/Documentation/filesystems/debugfs.rst
+++ b/Documentation/filesystems/debugfs.rst
@@ -220,7 +220,7 @@ There are a couple of other directory-oriented helper functions::
A call to debugfs_change_name() will give a new name to an existing debugfs
file, always in the same directory. The new_name must not exist prior
-to the call; the return value is 0 on success and -E... on failuer.
+to the call; the return value is 0 on success and -E... on failure.
Symbolic links can be created with debugfs_create_symlink().
There is one important thing that all debugfs users must take into account:
diff --git a/Documentation/filesystems/index.rst b/Documentation/filesystems/index.rst
index 2636f2a41bd3..a9cf8e950b15 100644
--- a/Documentation/filesystems/index.rst
+++ b/Documentation/filesystems/index.rst
@@ -118,7 +118,6 @@ Documentation for filesystem implementations.
spufs/index
squashfs
sysfs
- sysv-fs
tmpfs
ubifs
ubifs-authentication
diff --git a/Documentation/filesystems/iomap/design.rst b/Documentation/filesystems/iomap/design.rst
index b0d0188a095e..e29651a42eec 100644
--- a/Documentation/filesystems/iomap/design.rst
+++ b/Documentation/filesystems/iomap/design.rst
@@ -246,6 +246,10 @@ The fields are as follows:
* **IOMAP_F_PRIVATE**: Starting with this value, the upper bits can
be set by the filesystem for its own purposes.
+ * **IOMAP_F_ANON_WRITE**: Indicates that (write) I/O does not have a target
+ block assigned to it yet and the file system will do that in the bio
+ submission handler, splitting the I/O as needed.
+
These flags can be set by iomap itself during file operations.
The filesystem should supply an ``->iomap_end`` function if it needs
to observe these flags:
@@ -352,6 +356,11 @@ operations:
``IOMAP_NOWAIT`` is often set on behalf of ``IOCB_NOWAIT`` or
``RWF_NOWAIT``.
+ * ``IOMAP_DONTCACHE`` is set when the caller wishes to perform a
+ buffered file I/O and would like the kernel to drop the pagecache
+ after the I/O completes, if it isn't already being used by another
+ thread.
+
If it is necessary to read existing file contents from a `different
<https://lore.kernel.org/all/20191008071527.29304-9-hch@lst.de/>`_
device or address range on a device, the filesystem should return that
diff --git a/Documentation/filesystems/iomap/operations.rst b/Documentation/filesystems/iomap/operations.rst
index 2c7f5df9d8b0..3b628e370d88 100644
--- a/Documentation/filesystems/iomap/operations.rst
+++ b/Documentation/filesystems/iomap/operations.rst
@@ -131,6 +131,8 @@ These ``struct kiocb`` flags are significant for buffered I/O with iomap:
* ``IOCB_NOWAIT``: Turns on ``IOMAP_NOWAIT``.
+ * ``IOCB_DONTCACHE``: Turns on ``IOMAP_DONTCACHE``.
+
Internal per-Folio State
------------------------
@@ -283,7 +285,7 @@ The ``ops`` structure must be specified and is as follows:
struct iomap_writeback_ops {
int (*map_blocks)(struct iomap_writepage_ctx *wpc, struct inode *inode,
loff_t offset, unsigned len);
- int (*prepare_ioend)(struct iomap_ioend *ioend, int status);
+ int (*submit_ioend)(struct iomap_writepage_ctx *wpc, int status);
void (*discard_folio)(struct folio *folio, loff_t pos);
};
@@ -306,13 +308,12 @@ The fields are as follows:
purpose.
This function must be supplied by the filesystem.
- - ``prepare_ioend``: Enables filesystems to transform the writeback
- ioend or perform any other preparatory work before the writeback I/O
- is submitted.
+ - ``submit_ioend``: Allows the file systems to hook into writeback bio
+ submission.
This might include pre-write space accounting updates, or installing
a custom ``->bi_end_io`` function for internal purposes, such as
deferring the ioend completion to a workqueue to run metadata update
- transactions from process context.
+ transactions from process context before submitting the bio.
This function is optional.
- ``discard_folio``: iomap calls this function after ``->map_blocks``
@@ -341,7 +342,7 @@ This can happen in interrupt or process context, depending on the
storage device.
Filesystems that need to update internal bookkeeping (e.g. unwritten
-extent conversions) should provide a ``->prepare_ioend`` function to
+extent conversions) should provide a ``->submit_ioend`` function to
set ``struct iomap_end::bio::bi_end_io`` to its own function.
This function should call ``iomap_finish_ioends`` after finishing its
own work (e.g. unwritten extent conversion).
@@ -515,18 +516,33 @@ IOMAP_WRITE`` with any combination of the following enhancements:
* ``IOMAP_ATOMIC``: This write is being issued with torn-write
protection.
- Only a single bio can be created for the write, and the write must
- not be split into multiple I/O requests, i.e. flag REQ_ATOMIC must be
- set.
+ Torn-write protection may be provided based on HW-offload or by a
+ software mechanism provided by the filesystem.
+
+ For HW-offload based support, only a single bio can be created for the
+ write, and the write must not be split into multiple I/O requests, i.e.
+ flag REQ_ATOMIC must be set.
The file range to write must be aligned to satisfy the requirements
of both the filesystem and the underlying block device's atomic
commit capabilities.
If filesystem metadata updates are required (e.g. unwritten extent
- conversion or copy on write), all updates for the entire file range
+ conversion or copy-on-write), all updates for the entire file range
must be committed atomically as well.
- Only one space mapping is allowed per untorn write.
- Untorn writes must be aligned to, and must not be longer than, a
- single file block.
+ Untorn-writes may be longer than a single file block. In all cases,
+ the mapping start disk block must have at least the same alignment as
+ the write offset.
+ The filesystems must set IOMAP_F_ATOMIC_BIO to inform iomap core of an
+ untorn-write based on HW-offload.
+
+ For untorn-writes based on a software mechanism provided by the
+ filesystem, all the disk block alignment and single bio restrictions
+ which apply for HW-offload based untorn-writes do not apply.
+ The mechanism would typically be used as a fallback for when
+ HW-offload based untorn-writes may not be issued, e.g. the range of the
+ write covers multiple extents, meaning that it is not possible to issue
+ a single bio.
+ All filesystem metadata updates for the entire file range must be
+ committed atomically as well.
Callers commonly hold ``i_rwsem`` in shared or exclusive mode before
calling this function.
diff --git a/Documentation/filesystems/locking.rst b/Documentation/filesystems/locking.rst
index d20a32b77b60..0ec0bb6eb0fb 100644
--- a/Documentation/filesystems/locking.rst
+++ b/Documentation/filesystems/locking.rst
@@ -66,7 +66,7 @@ prototypes::
int (*link) (struct dentry *,struct inode *,struct dentry *);
int (*unlink) (struct inode *,struct dentry *);
int (*symlink) (struct mnt_idmap *, struct inode *,struct dentry *,const char *);
- int (*mkdir) (struct mnt_idmap *, struct inode *,struct dentry *,umode_t);
+ struct dentry *(*mkdir) (struct mnt_idmap *, struct inode *,struct dentry *,umode_t);
int (*rmdir) (struct inode *,struct dentry *);
int (*mknod) (struct mnt_idmap *, struct inode *,struct dentry *,umode_t,dev_t);
int (*rename) (struct mnt_idmap *, struct inode *, struct dentry *,
diff --git a/Documentation/filesystems/netfs_library.rst b/Documentation/filesystems/netfs_library.rst
index 73f0bfd7e903..3886c14f89f4 100644
--- a/Documentation/filesystems/netfs_library.rst
+++ b/Documentation/filesystems/netfs_library.rst
@@ -515,7 +515,7 @@ The methods defined in the table are:
the cache to expand a request in either direction. This allows the cache to
size the request appropriately for the cache granularity.
- The function is passed poiners to the start and length in its parameters,
+ The function is passed pointers to the start and length in its parameters,
plus the size of the file for reference, and adjusts the start and length
appropriately. It should return one of:
diff --git a/Documentation/filesystems/overlayfs.rst b/Documentation/filesystems/overlayfs.rst
index 6245b67ae9e0..2db379b4b31e 100644
--- a/Documentation/filesystems/overlayfs.rst
+++ b/Documentation/filesystems/overlayfs.rst
@@ -292,13 +292,27 @@ rename or unlink will of course be noticed and handled).
Permission model
----------------
+An overlay filesystem stashes credentials that will be used when
+accessing lower or upper filesystems.
+
+In the old mount api the credentials of the task calling mount(2) are
+stashed. In the new mount api the credentials of the task creating the
+superblock through FSCONFIG_CMD_CREATE command of fsconfig(2) are
+stashed.
+
+Starting with kernel v6.15 it is possible to use the "override_creds"
+mount option which will cause the credentials of the calling task to be
+recorded. Note that "override_creds" is only meaningful when used with
+the new mount api as the old mount api combines setting options and
+superblock creation in a single mount(2) syscall.
+
Permission checking in the overlay filesystem follows these principles:
1) permission check SHOULD return the same result before and after copy up
2) task creating the overlay mount MUST NOT gain additional privileges
- 3) non-mounting task MAY gain additional privileges through the overlay,
+ 3) task[*] MAY gain additional privileges through the overlay,
compared to direct access on underlying lower or upper filesystems
This is achieved by performing two permission checks on each access:
@@ -306,7 +320,7 @@ This is achieved by performing two permission checks on each access:
a) check if current task is allowed access based on local DAC (owner,
group, mode and posix acl), as well as MAC checks
- b) check if mounting task would be allowed real operation on lower or
+ b) check if stashed credentials would be allowed real operation on lower or
upper layer based on underlying filesystem permissions, again including
MAC checks
@@ -315,10 +329,10 @@ are copied up. On the other hand it can result in server enforced
permissions (used by NFS, for example) being ignored (3).
Check (b) ensures that no task gains permissions to underlying layers that
-the mounting task does not have (2). This also means that it is possible
+the stashed credentials do not have (2). This also means that it is possible
to create setups where the consistency rule (1) does not hold; normally,
-however, the mounting task will have sufficient privileges to perform all
-operations.
+however, the stashed credentials will have sufficient privileges to
+perform all operations.
Another way to demonstrate this model is drawing parallels between::
diff --git a/Documentation/filesystems/porting.rst b/Documentation/filesystems/porting.rst
index 1639e78e3146..767b2927c762 100644
--- a/Documentation/filesystems/porting.rst
+++ b/Documentation/filesystems/porting.rst
@@ -1144,7 +1144,7 @@ and it *must* be opened exclusive.
---
-** mandatory**
+**mandatory**
->d_revalidate() gets two extra arguments - inode of parent directory and
name our dentry is expected to have. Both are stable (dir is pinned in
@@ -1157,3 +1157,49 @@ in normal case it points into the pathname being looked up.
NOTE: if you need something like full path from the root of filesystem,
you are still on your own - this assists with simple cases, but it's not
magic.
+
+---
+
+**recommended**
+
+kern_path_locked() and user_path_locked() no longer return a negative
+dentry so this doesn't need to be checked. If the name cannot be found,
+ERR_PTR(-ENOENT) is returned.
+
+---
+
+**recommended**
+
+lookup_one_qstr_excl() is changed to return errors in more cases, so
+these conditions don't require explicit checks:
+
+ - if LOOKUP_CREATE is NOT given, then the dentry won't be negative,
+ ERR_PTR(-ENOENT) is returned instead
+ - if LOOKUP_EXCL IS given, then the dentry won't be positive,
+ ERR_PTR(-EEXIST) is rreturned instread
+
+LOOKUP_EXCL now means "target must not exist". It can be combined with
+LOOK_CREATE or LOOKUP_RENAME_TARGET.
+
+---
+
+**mandatory**
+invalidate_inodes() is gone use evict_inodes() instead.
+
+---
+
+**mandatory**
+
+->mkdir() now returns a dentry. If the created inode is found to
+already be in cache and have a dentry (often IS_ROOT()), it will need to
+be spliced into the given name in place of the given dentry. That dentry
+now needs to be returned. If the original dentry is used, NULL should
+be returned. Any error should be returned with ERR_PTR().
+
+In general, filesystems which use d_instantiate_new() to install the new
+inode can safely return NULL. Filesystems which may not have an I_NEW inode
+should use d_drop();d_splice_alias() and return the result of the latter.
+
+If a positive dentry cannot be returned for some reason, in-kernel
+clients such as cachefiles, nfsd, smb/server may not perform ideally but
+will fail-safe.
diff --git a/Documentation/filesystems/sysv-fs.rst b/Documentation/filesystems/sysv-fs.rst
deleted file mode 100644
index 89e40911ad7c..000000000000
--- a/Documentation/filesystems/sysv-fs.rst
+++ /dev/null
@@ -1,264 +0,0 @@
-.. SPDX-License-Identifier: GPL-2.0
-
-==================
-SystemV Filesystem
-==================
-
-It implements all of
- - Xenix FS,
- - SystemV/386 FS,
- - Coherent FS.
-
-To install:
-
-* Answer the 'System V and Coherent filesystem support' question with 'y'
- when configuring the kernel.
-* To mount a disk or a partition, use::
-
- mount [-r] -t sysv device mountpoint
-
- The file system type names::
-
- -t sysv
- -t xenix
- -t coherent
-
- may be used interchangeably, but the last two will eventually disappear.
-
-Bugs in the present implementation:
-
-- Coherent FS:
-
- - The "free list interleave" n:m is currently ignored.
- - Only file systems with no filesystem name and no pack name are recognized.
- (See Coherent "man mkfs" for a description of these features.)
-
-- SystemV Release 2 FS:
-
- The superblock is only searched in the blocks 9, 15, 18, which
- corresponds to the beginning of track 1 on floppy disks. No support
- for this FS on hard disk yet.
-
-
-These filesystems are rather similar. Here is a comparison with Minix FS:
-
-* Linux fdisk reports on partitions
-
- - Minix FS 0x81 Linux/Minix
- - Xenix FS ??
- - SystemV FS ??
- - Coherent FS 0x08 AIX bootable
-
-* Size of a block or zone (data allocation unit on disk)
-
- - Minix FS 1024
- - Xenix FS 1024 (also 512 ??)
- - SystemV FS 1024 (also 512 and 2048)
- - Coherent FS 512
-
-* General layout: all have one boot block, one super block and
- separate areas for inodes and for directories/data.
- On SystemV Release 2 FS (e.g. Microport) the first track is reserved and
- all the block numbers (including the super block) are offset by one track.
-
-* Byte ordering of "short" (16 bit entities) on disk:
-
- - Minix FS little endian 0 1
- - Xenix FS little endian 0 1
- - SystemV FS little endian 0 1
- - Coherent FS little endian 0 1
-
- Of course, this affects only the file system, not the data of files on it!
-
-* Byte ordering of "long" (32 bit entities) on disk:
-
- - Minix FS little endian 0 1 2 3
- - Xenix FS little endian 0 1 2 3
- - SystemV FS little endian 0 1 2 3
- - Coherent FS PDP-11 2 3 0 1
-
- Of course, this affects only the file system, not the data of files on it!
-
-* Inode on disk: "short", 0 means non-existent, the root dir ino is:
-
- ================================= ==
- Minix FS 1
- Xenix FS, SystemV FS, Coherent FS 2
- ================================= ==
-
-* Maximum number of hard links to a file:
-
- =========== =========
- Minix FS 250
- Xenix FS ??
- SystemV FS ??
- Coherent FS >=10000
- =========== =========
-
-* Free inode management:
-
- - Minix FS
- a bitmap
- - Xenix FS, SystemV FS, Coherent FS
- There is a cache of a certain number of free inodes in the super-block.
- When it is exhausted, new free inodes are found using a linear search.
-
-* Free block management:
-
- - Minix FS
- a bitmap
- - Xenix FS, SystemV FS, Coherent FS
- Free blocks are organized in a "free list". Maybe a misleading term,
- since it is not true that every free block contains a pointer to
- the next free block. Rather, the free blocks are organized in chunks
- of limited size, and every now and then a free block contains pointers
- to the free blocks pertaining to the next chunk; the first of these
- contains pointers and so on. The list terminates with a "block number"
- 0 on Xenix FS and SystemV FS, with a block zeroed out on Coherent FS.
-
-* Super-block location:
-
- =========== ==========================
- Minix FS block 1 = bytes 1024..2047
- Xenix FS block 1 = bytes 1024..2047
- SystemV FS bytes 512..1023
- Coherent FS block 1 = bytes 512..1023
- =========== ==========================
-
-* Super-block layout:
-
- - Minix FS::
-
- unsigned short s_ninodes;
- unsigned short s_nzones;
- unsigned short s_imap_blocks;
- unsigned short s_zmap_blocks;
- unsigned short s_firstdatazone;
- unsigned short s_log_zone_size;
- unsigned long s_max_size;
- unsigned short s_magic;
-
- - Xenix FS, SystemV FS, Coherent FS::
-
- unsigned short s_firstdatazone;
- unsigned long s_nzones;
- unsigned short s_fzone_count;
- unsigned long s_fzones[NICFREE];
- unsigned short s_finode_count;
- unsigned short s_finodes[NICINOD];
- char s_flock;
- char s_ilock;
- char s_modified;
- char s_rdonly;
- unsigned long s_time;
- short s_dinfo[4]; -- SystemV FS only
- unsigned long s_free_zones;
- unsigned short s_free_inodes;
- short s_dinfo[4]; -- Xenix FS only
- unsigned short s_interleave_m,s_interleave_n; -- Coherent FS only
- char s_fname[6];
- char s_fpack[6];
-
- then they differ considerably:
-
- Xenix FS::
-
- char s_clean;
- char s_fill[371];
- long s_magic;
- long s_type;
-
- SystemV FS::
-
- long s_fill[12 or 14];
- long s_state;
- long s_magic;
- long s_type;
-
- Coherent FS::
-
- unsigned long s_unique;
-
- Note that Coherent FS has no magic.
-
-* Inode layout:
-
- - Minix FS::
-
- unsigned short i_mode;
- unsigned short i_uid;
- unsigned long i_size;
- unsigned long i_time;
- unsigned char i_gid;
- unsigned char i_nlinks;
- unsigned short i_zone[7+1+1];
-
- - Xenix FS, SystemV FS, Coherent FS::
-
- unsigned short i_mode;
- unsigned short i_nlink;
- unsigned short i_uid;
- unsigned short i_gid;
- unsigned long i_size;
- unsigned char i_zone[3*(10+1+1+1)];
- unsigned long i_atime;
- unsigned long i_mtime;
- unsigned long i_ctime;
-
-
-* Regular file data blocks are organized as
-
- - Minix FS:
-
- - 7 direct blocks
- - 1 indirect block (pointers to blocks)
- - 1 double-indirect block (pointer to pointers to blocks)
-
- - Xenix FS, SystemV FS, Coherent FS:
-
- - 10 direct blocks
- - 1 indirect block (pointers to blocks)
- - 1 double-indirect block (pointer to pointers to blocks)
- - 1 triple-indirect block (pointer to pointers to pointers to blocks)
-
-
- =========== ========== ================
- Inode size inodes per block
- =========== ========== ================
- Minix FS 32 32
- Xenix FS 64 16
- SystemV FS 64 16
- Coherent FS 64 8
- =========== ========== ================
-
-* Directory entry on disk
-
- - Minix FS::
-
- unsigned short inode;
- char name[14/30];
-
- - Xenix FS, SystemV FS, Coherent FS::
-
- unsigned short inode;
- char name[14];
-
- =========== ============== =====================
- Dir entry size dir entries per block
- =========== ============== =====================
- Minix FS 16/32 64/32
- Xenix FS 16 64
- SystemV FS 16 64
- Coherent FS 16 32
- =========== ============== =====================
-
-* How to implement symbolic links such that the host fsck doesn't scream:
-
- - Minix FS normal
- - Xenix FS kludge: as regular files with chmod 1000
- - SystemV FS ??
- - Coherent FS kludge: as regular files with chmod 1000
-
-
-Notation: We often speak of a "block" but mean a zone (the allocation unit)
-and not the disk driver's notion of "block".
diff --git a/Documentation/filesystems/vfs.rst b/Documentation/filesystems/vfs.rst
index 31eea688609a..ae79c30b6c0c 100644
--- a/Documentation/filesystems/vfs.rst
+++ b/Documentation/filesystems/vfs.rst
@@ -495,7 +495,7 @@ As of kernel 2.6.22, the following members are defined:
int (*link) (struct dentry *,struct inode *,struct dentry *);
int (*unlink) (struct inode *,struct dentry *);
int (*symlink) (struct mnt_idmap *, struct inode *,struct dentry *,const char *);
- int (*mkdir) (struct mnt_idmap *, struct inode *,struct dentry *,umode_t);
+ struct dentry *(*mkdir) (struct mnt_idmap *, struct inode *,struct dentry *,umode_t);
int (*rmdir) (struct inode *,struct dentry *);
int (*mknod) (struct mnt_idmap *, struct inode *,struct dentry *,umode_t,dev_t);
int (*rename) (struct mnt_idmap *, struct inode *, struct dentry *,
@@ -562,7 +562,26 @@ otherwise noted.
``mkdir``
called by the mkdir(2) system call. Only required if you want
to support creating subdirectories. You will probably need to
- call d_instantiate() just as you would in the create() method
+ call d_instantiate_new() just as you would in the create() method.
+
+ If d_instantiate_new() is not used and if the fh_to_dentry()
+ export operation is provided, or if the storage might be
+ accessible by another path (e.g. with a network filesystem)
+ then more care may be needed. Importantly d_instantate()
+ should not be used with an inode that is no longer I_NEW if there
+ any chance that the inode could already be attached to a dentry.
+ This is because of a hard rule in the VFS that a directory must
+ only ever have one dentry.
+
+ For example, if an NFS filesystem is mounted twice the new directory
+ could be visible on the other mount before it is on the original
+ mount, and a pair of name_to_handle_at(), open_by_handle_at()
+ calls could instantiate the directory inode with an IS_ROOT()
+ dentry before the first mkdir returns.
+
+ If there is any chance this could happen, then the new inode
+ should be d_drop()ed and attached with d_splice_alias(). The
+ returned dentry (if any) should be returned by ->mkdir().
``rmdir``
called by the rmdir(2) system call. Only required if you want
diff --git a/Documentation/filesystems/xfs/xfs-delayed-logging-design.rst b/Documentation/filesystems/xfs/xfs-delayed-logging-design.rst
index 6402ab8e370c..2a2705e975e8 100644
--- a/Documentation/filesystems/xfs/xfs-delayed-logging-design.rst
+++ b/Documentation/filesystems/xfs/xfs-delayed-logging-design.rst
@@ -219,7 +219,7 @@ The log is circular, so the positions in the log are defined by the combination
of a cycle number - the number of times the log has been overwritten - and the
offset into the log. A LSN carries the cycle in the upper 32 bits and the
offset in the lower 32 bits. The offset is in units of "basic blocks" (512
-bytes). Hence we can do realtively simple LSN based math to keep track of
+bytes). Hence we can do relatively simple LSN based math to keep track of
available space in the log.
Log space accounting is done via a pair of constructs called "grant heads". The
diff --git a/Documentation/filesystems/xfs/xfs-maintainer-entry-profile.rst b/Documentation/filesystems/xfs/xfs-maintainer-entry-profile.rst
index 32b6ac4ca9d6..ce4584fb3103 100644
--- a/Documentation/filesystems/xfs/xfs-maintainer-entry-profile.rst
+++ b/Documentation/filesystems/xfs/xfs-maintainer-entry-profile.rst
@@ -93,7 +93,7 @@ others on a regular basis about burnout.
sponsoring work on any part of XFS.
- **LTS Maintainer**: Someone who backports and tests bug fixes from
- uptream to the LTS kernels.
+ upstream to the LTS kernels.
There tend to be six separate LTS trees at any given time.
The maintainer for a given LTS release should identify themselves with an
diff --git a/Documentation/filesystems/xfs/xfs-online-fsck-design.rst b/Documentation/filesystems/xfs/xfs-online-fsck-design.rst
index 12aa63840830..e231d127cd40 100644
--- a/Documentation/filesystems/xfs/xfs-online-fsck-design.rst
+++ b/Documentation/filesystems/xfs/xfs-online-fsck-design.rst
@@ -4521,8 +4521,8 @@ Both online and offline repair can use this strategy.
| For this second effort, the ondisk parent pointer format as originally |
| proposed was ``(parent_inum, parent_gen, dirent_pos) → (dirent_name)``. |
| The format was changed during development to eliminate the requirement |
-| of repair tools needing to to ensure that the ``dirent_pos`` field |
-| always matched when reconstructing a directory. |
+| of repair tools needing to ensure that the ``dirent_pos`` field always |
+| matched when reconstructing a directory. |
| |
| There were a few other ways to have solved that problem: |
| |
diff --git a/Documentation/iio/iio_devbuf.rst b/Documentation/iio/iio_devbuf.rst
index 9919e4792d0e..dca1f0200b0d 100644
--- a/Documentation/iio/iio_devbuf.rst
+++ b/Documentation/iio/iio_devbuf.rst
@@ -148,5 +148,5 @@ applied), however there are corner cases in which the buffered data may be found
in a processed form. Please note that these corner cases are not addressed by
this documentation.
-Please see ``Documentation/ABI/testing/sysfs-bus-iio`` for a complete
+Please see Documentation/ABI/testing/sysfs-bus-iio for a complete
description of the attributes.
diff --git a/Documentation/input/devices/elantech.rst b/Documentation/input/devices/elantech.rst
index c3374a7ce7af..98163a258b83 100644
--- a/Documentation/input/devices/elantech.rst
+++ b/Documentation/input/devices/elantech.rst
@@ -556,7 +556,7 @@ Note on debounce:
In case the box has unstable power supply or other electricity issues, or
when number of finger changes, F/W would send "debounce packet" to inform
driver that the hardware is in debounce status.
-The debouce packet has the following signature::
+The debounce packet has the following signature::
byte 0: 0xc4
byte 1: 0xff
diff --git a/Documentation/input/input-programming.rst b/Documentation/input/input-programming.rst
index c9264814c7aa..2b3e6a34e34b 100644
--- a/Documentation/input/input-programming.rst
+++ b/Documentation/input/input-programming.rst
@@ -346,3 +346,22 @@ driver can handle these events, it has to set the respective bits in evbit,
This callback routine can be called from an interrupt or a BH (although that
isn't a rule), and thus must not sleep, and must not take too long to finish.
+
+Polled input devices
+~~~~~~~~~~~~~~~~~~~~
+
+Input polling is set up by passing an input device struct and a callback to
+the function::
+
+ int input_setup_polling(struct input_dev *dev,
+ void (*poll_fn)(struct input_dev *dev))
+
+Within the callback, devices should use the regular input_report_* functions
+and input_sync as is used by other devices.
+
+There is also the function::
+
+ void input_set_poll_interval(struct input_dev *dev, unsigned int interval)
+
+which is used to configure the interval, in milliseconds, that the device will
+be polled at.
diff --git a/Documentation/mm/split_page_table_lock.rst b/Documentation/mm/split_page_table_lock.rst
index 8e1ceb0a6619..cc3cd46abd1b 100644
--- a/Documentation/mm/split_page_table_lock.rst
+++ b/Documentation/mm/split_page_table_lock.rst
@@ -4,7 +4,7 @@ Split page table lock
Originally, mm->page_table_lock spinlock protected all page tables of the
mm_struct. But this approach leads to poor page fault scalability of
-multi-threaded applications due high contention on the lock. To improve
+multi-threaded applications due to high contention on the lock. To improve
scalability, split page table lock was introduced.
With split page table lock we have separate per-table lock to serialize
diff --git a/Documentation/networking/statistics.rst b/Documentation/networking/statistics.rst
index 75e017dfa825..518284e287b0 100644
--- a/Documentation/networking/statistics.rst
+++ b/Documentation/networking/statistics.rst
@@ -143,7 +143,7 @@ reading multiple stats as it internally performs a full dump of
and reports only the stat corresponding to the accessed file.
Sysfs files are documented in
-`Documentation/ABI/testing/sysfs-class-net-statistics`.
+Documentation/ABI/testing/sysfs-class-net-statistics.
netlink
diff --git a/Documentation/nvme/nvme-pci-endpoint-target.rst b/Documentation/nvme/nvme-pci-endpoint-target.rst
index 66e7b7d869b4..b699595d1762 100644
--- a/Documentation/nvme/nvme-pci-endpoint-target.rst
+++ b/Documentation/nvme/nvme-pci-endpoint-target.rst
@@ -86,7 +86,7 @@ configurable through configfs before starting the controller. To avoid issues
with excessive local memory usage for executing commands, MDTS defaults to 512
KB and is limited to a maximum of 2 MB (arbitrary limit).
-Mimimum number of PCI Address Mapping Windows Required
+Minimum number of PCI Address Mapping Windows Required
------------------------------------------------------
Most PCI endpoint controllers provide a limited number of mapping windows for
diff --git a/Documentation/process/5.Posting.rst b/Documentation/process/5.Posting.rst
index dbb763a8de90..22fa925353cf 100644
--- a/Documentation/process/5.Posting.rst
+++ b/Documentation/process/5.Posting.rst
@@ -268,10 +268,15 @@ The tags in common use are:
- Cc: the named person received a copy of the patch and had the
opportunity to comment on it.
-Be careful in the addition of tags to your patches, as only Cc: is appropriate
-for addition without the explicit permission of the person named; using
-Reported-by: is fine most of the time as well, but ask for permission if
-the bug was reported in private.
+Be careful in the addition of the aforementioned tags to your patches, as all
+except for Cc:, Reported-by:, and Suggested-by: need explicit permission of the
+person named. For those three implicit permission is sufficient if the person
+contributed to the Linux kernel using that name and email address according
+to the lore archives or the commit history -- and in case of Reported-by:
+and Suggested-by: did the reporting or suggestion in public. Note,
+bugzilla.kernel.org is a public place in this sense, but email addresses
+used there are private; so do not expose them in tags, unless the person
+used them in earlier contributions.
Sending the patch
diff --git a/Documentation/process/changes.rst b/Documentation/process/changes.rst
index a0beca805362..d564362773b5 100644
--- a/Documentation/process/changes.rst
+++ b/Documentation/process/changes.rst
@@ -58,11 +58,11 @@ mcelog 0.6 mcelog --version
iptables 1.4.2 iptables -V
openssl & libcrypto 1.0.0 openssl version
bc 1.06.95 bc --version
-Sphinx\ [#f1]_ 2.4.4 sphinx-build --version
+Sphinx\ [#f1]_ 3.4.3 sphinx-build --version
GNU tar 1.28 tar --version
gtags (optional) 6.6.5 gtags --version
mkimage (optional) 2017.01 mkimage --version
-Python (optional) 3.5.x python3 --version
+Python (optional) 3.9.x python3 --version
GNU AWK (optional) 5.1.0 gawk --version
====================== =============== ========================================
diff --git a/Documentation/process/code-of-conduct-interpretation.rst b/Documentation/process/code-of-conduct-interpretation.rst
index 1d1150954be3..4cdef8360698 100644
--- a/Documentation/process/code-of-conduct-interpretation.rst
+++ b/Documentation/process/code-of-conduct-interpretation.rst
@@ -145,13 +145,16 @@ kernel community.
Any decisions regarding enforcement recommendations will be brought to
the TAB for implementation of enforcement with the relevant maintainers
-if needed. A decision by the Code of Conduct Committee can be overturned
-by the TAB by a two-thirds vote.
+if needed. Once the TAB approves one or more of the measures outlined
+in the scope of the ban by two-thirds of the members voting for the
+measures, the Code of Conduct Committee will enforce the TAB approved
+measures. Any Code of Conduct Committee members serving on the TAB will
+not vote on the measures.
At quarterly intervals, the Code of Conduct Committee and TAB will
provide a report summarizing the anonymised reports that the Code of
Conduct committee has received and their status, as well details of any
-overridden decisions including complete and identifiable voting details.
+TAB approved decisions including complete and identifiable voting details.
Because how we interpret and enforce the Code of Conduct will evolve over
time, this document will be updated when necessary to reflect any
@@ -227,9 +230,11 @@ The scope of the ban for a period of time could include:
such as mailing lists and social media sites
Once the TAB approves one or more of the measures outlined in the scope of
-the ban by a two-thirds vote, the Code of Conduct Committee will enforce
-the TAB approved measure(s) in collaboration with the community, maintainers,
-sub-maintainers, and kernel.org administrators.
+the ban by two-thirds of the members voting for the measures, the Code of
+Conduct Committee will enforce the TAB approved measure(s) in collaboration
+with the community, maintainers, sub-maintainers, and kernel.org
+administrators. Any Code of Conduct Committee members serving on the TAB
+will not vote on the measures.
The Code of Conduct Committee is mindful of the negative impact of seeking
public apology and instituting ban could have on individuals. It is also
diff --git a/Documentation/process/kernel-docs.rst b/Documentation/process/kernel-docs.rst
index 3b5b5983fea8..c67ac12cf789 100644
--- a/Documentation/process/kernel-docs.rst
+++ b/Documentation/process/kernel-docs.rst
@@ -75,6 +75,17 @@ On-line docs
Published books
---------------
+ * Title: **The Linux Memory Manager**
+
+ :Author: Lorenzo Stoakes
+ :Publisher: No Starch Press
+ :Date: February 2025
+ :Pages: 1300
+ :ISBN: 978-1718504462
+ :Notes: Memory management. Full draft available as early access for
+ pre-order, full release scheduled for Fall 2025. See
+ https://nostarch.com/linux-memory-manager for further info.
+
* Title: **Practical Linux System Administration: A Guide to Installation, Configuration, and Management, 1st Edition**
:Author: Kenneth Hess
diff --git a/Documentation/process/submit-checklist.rst b/Documentation/process/submit-checklist.rst
index e531dd504b6c..beb7f94279fd 100644
--- a/Documentation/process/submit-checklist.rst
+++ b/Documentation/process/submit-checklist.rst
@@ -52,7 +52,8 @@ Provide documentation
4) All new module parameters are documented with ``MODULE_PARM_DESC()``
5) All new userspace interfaces are documented in ``Documentation/ABI/``.
- See ``Documentation/ABI/README`` for more information.
+ See Documentation/admin-guide/abi.rst (or ``Documentation/ABI/README``)
+ for more information.
Patches that change userspace interfaces should be CCed to
linux-api@vger.kernel.org.
@@ -91,9 +92,12 @@ Build your code
fix any issues.
2) Builds on multiple CPU architectures by using local cross-compile tools
- or some other build farm. Note that ppc64 is a good architecture for
- cross-compilation checking because it tends to use ``unsigned long`` for
- 64-bit quantities.
+ or some other build farm.
+ Note that testing against architectures of different word sizes
+ (32- and 64-bit) and different endianness (big- and little-) is effective
+ in catching various portability issues due to false assumptions on
+ representable quantity range, data alignment, or endianness, among
+ others.
3) Newly-added code has been compiled with ``gcc -W`` (use
``make KCFLAGS=-W``). This will generate lots of noise, but is good
diff --git a/Documentation/process/submitting-patches.rst b/Documentation/process/submitting-patches.rst
index 8fdc0ef3e604..cede4e7b29af 100644
--- a/Documentation/process/submitting-patches.rst
+++ b/Documentation/process/submitting-patches.rst
@@ -495,10 +495,10 @@ list archives. A "# Suffix" may also be used in this case to clarify.
If a person has had the opportunity to comment on a patch, but has not
provided such comments, you may optionally add a ``Cc:`` tag to the patch.
-This is the only tag which might be added without an explicit action by the
-person it names - but it should indicate that this person was copied on the
-patch. This tag documents that potentially interested parties
-have been included in the discussion.
+This tag documents that potentially interested parties have been included in
+the discussion. Note, this is one of only three tags you might be able to use
+without explicit permission of the person named (see 'Tagging people requires
+permission' below for details).
Co-developed-by: states that the patch was co-created by multiple developers;
it is used to give attribution to co-authors (in addition to the author
@@ -544,9 +544,9 @@ hopefully inspires them to help us again in the future. The tag is intended for
bugs; please do not use it to credit feature requests. The tag should be
followed by a Closes: tag pointing to the report, unless the report is not
available on the web. The Link: tag can be used instead of Closes: if the patch
-fixes a part of the issue(s) being reported. Please note that if the bug was
-reported in private, then ask for permission first before using the Reported-by
-tag.
+fixes a part of the issue(s) being reported. Note, the Reported-by tag is one
+of only three tags you might be able to use without explicit permission of the
+person named (see 'Tagging people requires permission' below for details).
A Tested-by: tag indicates that the patch has been successfully tested (in
some environment) by the person named. This tag informs maintainers that
@@ -596,11 +596,11 @@ Usually removal of someone's Tested-by or Reviewed-by tags should be mentioned
in the patch changelog (after the '---' separator).
A Suggested-by: tag indicates that the patch idea is suggested by the person
-named and ensures credit to the person for the idea. Please note that this
-tag should not be added without the reporter's permission, especially if the
-idea was not posted in a public forum. That said, if we diligently credit our
-idea reporters, they will, hopefully, be inspired to help us again in the
-future.
+named and ensures credit to the person for the idea: if we diligently credit
+our idea reporters, they will, hopefully, be inspired to help us again in the
+future. Note, this is one of only three tags you might be able to use without
+explicit permission of the person named (see 'Tagging people requires
+permission' below for details).
A Fixes: tag indicates that the patch fixes an issue in a previous commit. It
is used to make it easy to determine where a bug originated, which can help
@@ -618,6 +618,21 @@ Finally, while providing tags is welcome and typically very appreciated, please
note that signers (i.e. submitters and maintainers) may use their discretion in
applying offered tags.
+.. _tagging_people:
+
+Tagging people requires permission
+----------------------------------
+
+Be careful in the addition of the aforementioned tags to your patches, as all
+except for Cc:, Reported-by:, and Suggested-by: need explicit permission of the
+person named. For those three implicit permission is sufficient if the person
+contributed to the Linux kernel using that name and email address according
+to the lore archives or the commit history -- and in case of Reported-by:
+and Suggested-by: did the reporting or suggestion in public. Note,
+bugzilla.kernel.org is a public place in this sense, but email addresses
+used there are private; so do not expose them in tags, unless the person
+used them in earlier contributions.
+
.. _the_canonical_patch_format:
The canonical patch format
@@ -717,6 +732,12 @@ patch in the permanent changelog. If the ``from`` line is missing,
then the ``From:`` line from the email header will be used to determine
the patch author in the changelog.
+The author may indicate their affiliation or the sponsor of the work
+by adding the name of an organization to the ``from`` and ``SoB`` lines,
+e.g.:
+
+ From: Patch Author (Company) <author@example.com>
+
Explanation Body
^^^^^^^^^^^^^^^^
diff --git a/Documentation/scheduler/sched-bwc.rst b/Documentation/scheduler/sched-bwc.rst
index 41ed2ceafc92..e881a945c188 100644
--- a/Documentation/scheduler/sched-bwc.rst
+++ b/Documentation/scheduler/sched-bwc.rst
@@ -59,7 +59,7 @@ At the same time, we can say that the worst case deadline miss, will be
\Sum e_i; that is, there is a bounded tardiness (under the assumption
that x+e is indeed WCET).
-The interferenece when using burst is valued by the possibilities for
+The interference when using burst is valued by the possibilities for
missing the deadline and the average WCET. Test results showed that when
there many cgroups or CPU is under utilized, the interference is
limited. More details are shown in:
diff --git a/Documentation/scheduler/sched-ext.rst b/Documentation/scheduler/sched-ext.rst
index c4672d7df2f7..0993e41353db 100644
--- a/Documentation/scheduler/sched-ext.rst
+++ b/Documentation/scheduler/sched-ext.rst
@@ -294,6 +294,42 @@ dispatching, and must be dispatched to with ``scx_bpf_dsq_insert()``. See
the function documentation and usage in ``tools/sched_ext/scx_simple.bpf.c``
for more information.
+Task Lifecycle
+--------------
+
+The following pseudo-code summarizes the entire lifecycle of a task managed
+by a sched_ext scheduler:
+
+.. code-block:: c
+
+ ops.init_task(); /* A new task is created */
+ ops.enable(); /* Enable BPF scheduling for the task */
+
+ while (task in SCHED_EXT) {
+ if (task can migrate)
+ ops.select_cpu(); /* Called on wakeup (optimization) */
+
+ ops.runnable(); /* Task becomes ready to run */
+
+ while (task is runnable) {
+ if (task is not in a DSQ) {
+ ops.enqueue(); /* Task can be added to a DSQ */
+
+ /* A CPU becomes available */
+
+ ops.dispatch(); /* Task is moved to a local DSQ */
+ }
+ ops.running(); /* Task starts running on its assigned CPU */
+ ops.tick(); /* Called every 1/HZ seconds */
+ ops.stopping(); /* Task stops running (time slice expires or wait) */
+ }
+
+ ops.quiescent(); /* Task releases its assigned CPU (wait) */
+ }
+
+ ops.disable(); /* Disable BPF scheduling for the task */
+ ops.exit_task(); /* Task is destroyed */
+
Where to Look
=============
diff --git a/Documentation/sound/soc/machine.rst b/Documentation/sound/soc/machine.rst
index 9db132bc0070..1828f5edca3e 100644
--- a/Documentation/sound/soc/machine.rst
+++ b/Documentation/sound/soc/machine.rst
@@ -75,7 +75,7 @@ In the above struct, dai’s are registered using names but you can pass
either dai name or device tree node but not both. Also, names used here
for cpu/codec/platform dais should be globally unique.
-Additionaly below example macro can be used to register cpu, codec and
+Additionally below example macro can be used to register cpu, codec and
platform dai::
SND_SOC_DAILINK_DEFS(wm2200_cpu_dsp,
diff --git a/Documentation/sphinx/automarkup.py b/Documentation/sphinx/automarkup.py
index a413f8dd5115..ecf54d22e9dc 100644
--- a/Documentation/sphinx/automarkup.py
+++ b/Documentation/sphinx/automarkup.py
@@ -11,13 +11,7 @@ from sphinx.errors import NoUri
import re
from itertools import chain
-#
-# Python 2 lacks re.ASCII...
-#
-try:
- ascii_p3 = re.ASCII
-except AttributeError:
- ascii_p3 = 0
+from kernel_abi import get_kernel_abi
#
# Regex nastiness. Of course.
@@ -26,28 +20,30 @@ except AttributeError:
# :c:func: block (i.e. ":c:func:`mmap()`s" flakes out), so the last
# bit tries to restrict matches to things that won't create trouble.
#
-RE_function = re.compile(r'\b(([a-zA-Z_]\w+)\(\))', flags=ascii_p3)
+RE_function = re.compile(r'\b(([a-zA-Z_]\w+)\(\))', flags=re.ASCII)
#
# Sphinx 2 uses the same :c:type role for struct, union, enum and typedef
#
RE_generic_type = re.compile(r'\b(struct|union|enum|typedef)\s+([a-zA-Z_]\w+)',
- flags=ascii_p3)
+ flags=re.ASCII)
#
# Sphinx 3 uses a different C role for each one of struct, union, enum and
# typedef
#
-RE_struct = re.compile(r'\b(struct)\s+([a-zA-Z_]\w+)', flags=ascii_p3)
-RE_union = re.compile(r'\b(union)\s+([a-zA-Z_]\w+)', flags=ascii_p3)
-RE_enum = re.compile(r'\b(enum)\s+([a-zA-Z_]\w+)', flags=ascii_p3)
-RE_typedef = re.compile(r'\b(typedef)\s+([a-zA-Z_]\w+)', flags=ascii_p3)
+RE_struct = re.compile(r'\b(struct)\s+([a-zA-Z_]\w+)', flags=re.ASCII)
+RE_union = re.compile(r'\b(union)\s+([a-zA-Z_]\w+)', flags=re.ASCII)
+RE_enum = re.compile(r'\b(enum)\s+([a-zA-Z_]\w+)', flags=re.ASCII)
+RE_typedef = re.compile(r'\b(typedef)\s+([a-zA-Z_]\w+)', flags=re.ASCII)
#
# Detects a reference to a documentation page of the form Documentation/... with
# an optional extension
#
RE_doc = re.compile(r'(\bDocumentation/)?((\.\./)*[\w\-/]+)\.(rst|txt)')
+RE_abi_file = re.compile(r'(\bDocumentation/ABI/[\w\-/]+)')
+RE_abi_symbol = re.compile(r'(\b/(sys|config|proc)/[\w\-/]+)')
RE_namespace = re.compile(r'^\s*..\s*c:namespace::\s*(\S+)\s*$')
@@ -83,11 +79,10 @@ def markup_refs(docname, app, node):
#
# Associate each regex with the function that will markup its matches
#
- markup_func_sphinx2 = {RE_doc: markup_doc_ref,
- RE_function: markup_c_ref,
- RE_generic_type: markup_c_ref}
- markup_func_sphinx3 = {RE_doc: markup_doc_ref,
+ markup_func = {RE_doc: markup_doc_ref,
+ RE_abi_file: markup_abi_file_ref,
+ RE_abi_symbol: markup_abi_ref,
RE_function: markup_func_ref_sphinx3,
RE_struct: markup_c_ref,
RE_union: markup_c_ref,
@@ -95,11 +90,6 @@ def markup_refs(docname, app, node):
RE_typedef: markup_c_ref,
RE_git: markup_git}
- if sphinx.version_info[0] >= 3:
- markup_func = markup_func_sphinx3
- else:
- markup_func = markup_func_sphinx2
-
match_iterators = [regex.finditer(t) for regex in markup_func]
#
# Sort all references by the starting position in text
@@ -270,6 +260,54 @@ def markup_doc_ref(docname, app, match):
else:
return nodes.Text(match.group(0))
+#
+# Try to replace a documentation reference for ABI symbols and files
+# with a cross reference to that page
+#
+def markup_abi_ref(docname, app, match, warning=False):
+ stddom = app.env.domains['std']
+ #
+ # Go through the dance of getting an xref out of the std domain
+ #
+ kernel_abi = get_kernel_abi()
+
+ fname = match.group(1)
+ target = kernel_abi.xref(fname)
+
+ # Kernel ABI doesn't describe such file or symbol
+ if not target:
+ if warning:
+ kernel_abi.log.warning("%s not found", fname)
+ return nodes.Text(match.group(0))
+
+ pxref = addnodes.pending_xref('', refdomain = 'std', reftype = 'ref',
+ reftarget = target, modname = None,
+ classname = None, refexplicit = False)
+
+ #
+ # XXX The Latex builder will throw NoUri exceptions here,
+ # work around that by ignoring them.
+ #
+ try:
+ xref = stddom.resolve_xref(app.env, docname, app.builder, 'ref',
+ target, pxref, None)
+ except NoUri:
+ xref = None
+ #
+ # Return the xref if we got it; otherwise just return the plain text.
+ #
+ if xref:
+ return xref
+ else:
+ return nodes.Text(match.group(0))
+
+#
+# Variant of markup_abi_ref() that warns whan a reference is not found
+#
+def markup_abi_file_ref(docname, app, match):
+ return markup_abi_ref(docname, app, match, warning=True)
+
+
def get_c_namespace(app, docname):
source = app.env.doc2path(docname)
with open(source) as f:
diff --git a/Documentation/sphinx/cdomain.py b/Documentation/sphinx/cdomain.py
index e6959af25402..e8ea80d4324c 100644
--- a/Documentation/sphinx/cdomain.py
+++ b/Documentation/sphinx/cdomain.py
@@ -1,6 +1,6 @@
# -*- coding: utf-8; mode: python -*-
# pylint: disable=W0141,C0113,C0103,C0325
-u"""
+"""
cdomain
~~~~~~~
@@ -45,9 +45,6 @@ import re
__version__ = '1.1'
-# Get Sphinx version
-major, minor, patch = sphinx.version_info[:3]
-
# Namespace to be prepended to the full name
namespace = None
@@ -145,7 +142,7 @@ class CObject(Base_CObject):
}
def handle_func_like_macro(self, sig, signode):
- u"""Handles signatures of function-like macros.
+ """Handles signatures of function-like macros.
If the objtype is 'function' and the signature ``sig`` is a
function-like macro, the name of the macro is returned. Otherwise
diff --git a/Documentation/sphinx/kernel_abi.py b/Documentation/sphinx/kernel_abi.py
index 5911bd0d7965..db6f0380de94 100644
--- a/Documentation/sphinx/kernel_abi.py
+++ b/Documentation/sphinx/kernel_abi.py
@@ -2,7 +2,7 @@
# coding=utf-8
# SPDX-License-Identifier: GPL-2.0
#
-u"""
+"""
kernel-abi
~~~~~~~~~~
@@ -14,7 +14,7 @@ u"""
:license: GPL Version 2, June 1991 see Linux/COPYING for details.
The ``kernel-abi`` (:py:class:`KernelCmd`) directive calls the
- scripts/get_abi.pl script to parse the Kernel ABI files.
+ scripts/get_abi.py script to parse the Kernel ABI files.
Overview of directive's argument and options.
@@ -32,107 +32,137 @@ u"""
"""
-import codecs
import os
-import subprocess
-import sys
import re
-import kernellog
+import sys
from docutils import nodes, statemachine
from docutils.statemachine import ViewList
from docutils.parsers.rst import directives, Directive
-from docutils.utils.error_reporting import ErrorString
from sphinx.util.docutils import switch_source_input
+from sphinx.util import logging
+
+srctree = os.path.abspath(os.environ["srctree"])
+sys.path.insert(0, os.path.join(srctree, "scripts/lib/abi"))
+
+from abi_parser import AbiParser
+
+__version__ = "1.0"
+
+logger = logging.getLogger('kernel_abi')
+path = os.path.join(srctree, "Documentation/ABI")
-__version__ = '1.0'
+_kernel_abi = None
+
+def get_kernel_abi():
+ """
+ Initialize kernel_abi global var, if not initialized yet.
+
+ This is needed to avoid warnings during Sphinx module initialization.
+ """
+ global _kernel_abi
+
+ if not _kernel_abi:
+ # Parse ABI symbols only once
+ _kernel_abi = AbiParser(path, logger=logger)
+ _kernel_abi.parse_abi()
+ _kernel_abi.check_issues()
+
+ return _kernel_abi
def setup(app):
app.add_directive("kernel-abi", KernelCmd)
- return dict(
- version = __version__
- , parallel_read_safe = True
- , parallel_write_safe = True
- )
+ return {
+ "version": __version__,
+ "parallel_read_safe": True,
+ "parallel_write_safe": True
+ }
-class KernelCmd(Directive):
- u"""KernelABI (``kernel-abi``) directive"""
+class KernelCmd(Directive):
+ """KernelABI (``kernel-abi``) directive"""
required_arguments = 1
- optional_arguments = 2
+ optional_arguments = 3
has_content = False
final_argument_whitespace = True
+ parser = None
option_spec = {
- "debug" : directives.flag,
- "rst" : directives.unchanged
+ "debug": directives.flag,
+ "no-symbols": directives.flag,
+ "no-files": directives.flag,
}
def run(self):
+ kernel_abi = get_kernel_abi()
+
doc = self.state.document
if not doc.settings.file_insertion_enabled:
raise self.warning("docutils: file insertion disabled")
- srctree = os.path.abspath(os.environ["srctree"])
-
- args = [
- os.path.join(srctree, 'scripts/get_abi.pl'),
- 'rest',
- '--enable-lineno',
- '--dir', os.path.join(srctree, 'Documentation', self.arguments[0]),
- ]
-
- if 'rst' in self.options:
- args.append('--rst-source')
-
- lines = subprocess.check_output(args, cwd=os.path.dirname(doc.current_source)).decode('utf-8')
- nodeList = self.nestedParse(lines, self.arguments[0])
- return nodeList
-
- def nestedParse(self, lines, fname):
env = self.state.document.settings.env
content = ViewList()
node = nodes.section()
- if "debug" in self.options:
- code_block = "\n\n.. code-block:: rst\n :linenos:\n"
- for l in lines.split("\n"):
- code_block += "\n " + l
- lines = code_block + "\n\n"
+ abi_type = self.arguments[0]
- line_regex = re.compile(r"^\.\. LINENO (\S+)\#([0-9]+)$")
- ln = 0
- n = 0
- f = fname
+ if "no-symbols" in self.options:
+ show_symbols = False
+ else:
+ show_symbols = True
- for line in lines.split("\n"):
- n = n + 1
- match = line_regex.search(line)
- if match:
- new_f = match.group(1)
+ if "no-files" in self.options:
+ show_file = False
+ else:
+ show_file = True
- # Sphinx parser is lazy: it stops parsing contents in the
- # middle, if it is too big. So, handle it per input file
- if new_f != f and content:
- self.do_parse(content, node)
- content = ViewList()
+ tab_width = self.options.get('tab-width',
+ self.state.document.settings.tab_width)
- # Add the file to Sphinx build dependencies
- env.note_dependency(os.path.abspath(f))
-
- f = new_f
-
- # sphinx counts lines from 0
- ln = int(match.group(2)) - 1
+ old_f = None
+ n = 0
+ n_sym = 0
+ for msg, f, ln in kernel_abi.doc(show_file=show_file,
+ show_symbols=show_symbols,
+ filter_path=abi_type):
+ n_sym += 1
+ msg_list = statemachine.string2lines(msg, tab_width,
+ convert_whitespace=True)
+ if "debug" in self.options:
+ lines = [
+ "", "", ".. code-block:: rst",
+ " :linenos:", ""
+ ]
+ for m in msg_list:
+ lines.append(" " + m)
else:
- content.append(line, f, ln)
-
- kernellog.info(self.state.document.settings.env.app, "%s: parsed %i lines" % (fname, n))
+ lines = msg_list
- if content:
- self.do_parse(content, node)
+ for line in lines:
+ # sphinx counts lines from 0
+ content.append(line, f, ln - 1)
+ n += 1
+
+ if f != old_f:
+ # Add the file to Sphinx build dependencies
+ env.note_dependency(os.path.abspath(f))
+
+ old_f = f
+
+ # Sphinx doesn't like to parse big messages. So, let's
+ # add content symbol by symbol
+ if content:
+ self.do_parse(content, node)
+ content = ViewList()
+
+ if show_symbols and not show_file:
+ logger.verbose("%s ABI: %i symbols (%i ReST lines)" % (abi_type, n_sym, n))
+ elif not show_symbols and show_file:
+ logger.verbose("%s ABI: %i files (%i ReST lines)" % (abi_type, n_sym, n))
+ else:
+ logger.verbose("%s ABI: %i data (%i ReST lines)" % (abi_type, n_sym, n))
return node.children
diff --git a/Documentation/sphinx/kernel_feat.py b/Documentation/sphinx/kernel_feat.py
index 03ace5f01b5c..e3a51867f27b 100644
--- a/Documentation/sphinx/kernel_feat.py
+++ b/Documentation/sphinx/kernel_feat.py
@@ -1,7 +1,7 @@
# coding=utf-8
# SPDX-License-Identifier: GPL-2.0
#
-u"""
+"""
kernel-feat
~~~~~~~~~~~
@@ -56,7 +56,7 @@ def setup(app):
class KernelFeat(Directive):
- u"""KernelFeat (``kernel-feat``) directive"""
+ """KernelFeat (``kernel-feat``) directive"""
required_arguments = 1
optional_arguments = 2
diff --git a/Documentation/sphinx/kernel_include.py b/Documentation/sphinx/kernel_include.py
index 638762442336..8db176045bc5 100755
--- a/Documentation/sphinx/kernel_include.py
+++ b/Documentation/sphinx/kernel_include.py
@@ -2,7 +2,7 @@
# -*- coding: utf-8; mode: python -*-
# pylint: disable=R0903, C0330, R0914, R0912, E0401
-u"""
+"""
kernel-include
~~~~~~~~~~~~~~
@@ -56,7 +56,7 @@ def setup(app):
class KernelInclude(Include):
# ==============================================================================
- u"""KernelInclude (``kernel-include``) directive"""
+ """KernelInclude (``kernel-include``) directive"""
def run(self):
env = self.state.document.settings.env
diff --git a/Documentation/sphinx/kerneldoc.py b/Documentation/sphinx/kerneldoc.py
index ec1ddfff1863..39ddae6ae7dd 100644
--- a/Documentation/sphinx/kerneldoc.py
+++ b/Documentation/sphinx/kerneldoc.py
@@ -39,7 +39,7 @@ from docutils.statemachine import ViewList
from docutils.parsers.rst import directives, Directive
import sphinx
from sphinx.util.docutils import switch_source_input
-import kernellog
+from sphinx.util import logging
__version__ = '1.0'
@@ -56,16 +56,12 @@ class KernelDocDirective(Directive):
'functions': directives.unchanged,
}
has_content = False
+ logger = logging.getLogger('kerneldoc')
def run(self):
env = self.state.document.settings.env
cmd = [env.config.kerneldoc_bin, '-rst', '-enable-lineno']
- # Pass the version string to kernel-doc, as it needs to use a different
- # dialect, depending what the C domain supports for each specific
- # Sphinx versions
- cmd += ['-sphinx-version', sphinx.__version__]
-
filename = env.config.kerneldoc_srctree + '/' + self.arguments[0]
export_file_patterns = []
@@ -109,8 +105,7 @@ class KernelDocDirective(Directive):
cmd += [filename]
try:
- kernellog.verbose(env.app,
- 'calling kernel-doc \'%s\'' % (" ".join(cmd)))
+ self.logger.verbose("calling kernel-doc '%s'" % (" ".join(cmd)))
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
@@ -120,8 +115,8 @@ class KernelDocDirective(Directive):
if p.returncode != 0:
sys.stderr.write(err)
- kernellog.warn(env.app,
- 'kernel-doc \'%s\' failed with return code %d' % (" ".join(cmd), p.returncode))
+ self.logger.warning("kernel-doc '%s' failed with return code %d"
+ % (" ".join(cmd), p.returncode))
return [nodes.error(None, nodes.paragraph(text = "kernel-doc missing"))]
elif env.config.kerneldoc_verbosity > 0:
sys.stderr.write(err)
@@ -148,8 +143,8 @@ class KernelDocDirective(Directive):
return node.children
except Exception as e: # pylint: disable=W0703
- kernellog.warn(env.app, 'kernel-doc \'%s\' processing failed with: %s' %
- (" ".join(cmd), str(e)))
+ self.logger.warning("kernel-doc '%s' processing failed with: %s" %
+ (" ".join(cmd), str(e)))
return [nodes.error(None, nodes.paragraph(text = "kernel-doc missing"))]
def do_parse(self, result, node):
diff --git a/Documentation/sphinx/kernellog.py b/Documentation/sphinx/kernellog.py
deleted file mode 100644
index 0bc00c138cad..000000000000
--- a/Documentation/sphinx/kernellog.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Sphinx has deprecated its older logging interface, but the replacement
-# only goes back to 1.6. So here's a wrapper layer to keep around for
-# as long as we support 1.4.
-#
-# We don't support 1.4 anymore, but we'll keep the wrappers around until
-# we change all the code to not use them anymore :)
-#
-import sphinx
-from sphinx.util import logging
-
-logger = logging.getLogger('kerneldoc')
-
-def warn(app, message):
- logger.warning(message)
-
-def verbose(app, message):
- logger.verbose(message)
-
-def info(app, message):
- logger.info(message)
diff --git a/Documentation/sphinx/kfigure.py b/Documentation/sphinx/kfigure.py
index 97166333b727..f1a7f13c9c60 100644
--- a/Documentation/sphinx/kfigure.py
+++ b/Documentation/sphinx/kfigure.py
@@ -1,6 +1,6 @@
# -*- coding: utf-8; mode: python -*-
# pylint: disable=C0103, R0903, R0912, R0915
-u"""
+"""
scalable figure and image handling
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -59,12 +59,14 @@ from docutils.parsers.rst import directives
from docutils.parsers.rst.directives import images
import sphinx
from sphinx.util.nodes import clean_astext
-import kernellog
+from sphinx.util import logging
Figure = images.Figure
__version__ = '1.0.0'
+logger = logging.getLogger('kfigure')
+
# simple helper
# -------------
@@ -163,14 +165,14 @@ def setup(app):
def setupTools(app):
- u"""
+ """
Check available build tools and log some *verbose* messages.
This function is called once, when the builder is initiated.
"""
global dot_cmd, dot_Tpdf, convert_cmd, rsvg_convert_cmd # pylint: disable=W0603
global inkscape_cmd, inkscape_ver_one # pylint: disable=W0603
- kernellog.verbose(app, "kfigure: check installed tools ...")
+ logger.verbose("kfigure: check installed tools ...")
dot_cmd = which('dot')
convert_cmd = which('convert')
@@ -178,7 +180,7 @@ def setupTools(app):
inkscape_cmd = which('inkscape')
if dot_cmd:
- kernellog.verbose(app, "use dot(1) from: " + dot_cmd)
+ logger.verbose("use dot(1) from: " + dot_cmd)
try:
dot_Thelp_list = subprocess.check_output([dot_cmd, '-Thelp'],
@@ -190,10 +192,11 @@ def setupTools(app):
dot_Tpdf_ptn = b'pdf'
dot_Tpdf = re.search(dot_Tpdf_ptn, dot_Thelp_list)
else:
- kernellog.warn(app, "dot(1) not found, for better output quality install "
- "graphviz from https://www.graphviz.org")
+ logger.warning(
+ "dot(1) not found, for better output quality install graphviz from https://www.graphviz.org"
+ )
if inkscape_cmd:
- kernellog.verbose(app, "use inkscape(1) from: " + inkscape_cmd)
+ logger.verbose("use inkscape(1) from: " + inkscape_cmd)
inkscape_ver = subprocess.check_output([inkscape_cmd, '--version'],
stderr=subprocess.DEVNULL)
ver_one_ptn = b'Inkscape 1'
@@ -204,26 +207,27 @@ def setupTools(app):
else:
if convert_cmd:
- kernellog.verbose(app, "use convert(1) from: " + convert_cmd)
+ logger.verbose("use convert(1) from: " + convert_cmd)
else:
- kernellog.verbose(app,
+ logger.verbose(
"Neither inkscape(1) nor convert(1) found.\n"
- "For SVG to PDF conversion, "
- "install either Inkscape (https://inkscape.org/) (preferred) or\n"
- "ImageMagick (https://www.imagemagick.org)")
+ "For SVG to PDF conversion, install either Inkscape (https://inkscape.org/) (preferred) or\n"
+ "ImageMagick (https://www.imagemagick.org)"
+ )
if rsvg_convert_cmd:
- kernellog.verbose(app, "use rsvg-convert(1) from: " + rsvg_convert_cmd)
- kernellog.verbose(app, "use 'dot -Tsvg' and rsvg-convert(1) for DOT -> PDF conversion")
+ logger.verbose("use rsvg-convert(1) from: " + rsvg_convert_cmd)
+ logger.verbose("use 'dot -Tsvg' and rsvg-convert(1) for DOT -> PDF conversion")
dot_Tpdf = False
else:
- kernellog.verbose(app,
+ logger.verbose(
"rsvg-convert(1) not found.\n"
- " SVG rendering of convert(1) is done by ImageMagick-native renderer.")
+ " SVG rendering of convert(1) is done by ImageMagick-native renderer."
+ )
if dot_Tpdf:
- kernellog.verbose(app, "use 'dot -Tpdf' for DOT -> PDF conversion")
+ logger.verbose("use 'dot -Tpdf' for DOT -> PDF conversion")
else:
- kernellog.verbose(app, "use 'dot -Tsvg' and convert(1) for DOT -> PDF conversion")
+ logger.verbose("use 'dot -Tsvg' and convert(1) for DOT -> PDF conversion")
# integrate conversion tools
@@ -257,13 +261,12 @@ def convert_image(img_node, translator, src_fname=None):
# in kernel builds, use 'make SPHINXOPTS=-v' to see verbose messages
- kernellog.verbose(app, 'assert best format for: ' + img_node['uri'])
+ logger.verbose('assert best format for: ' + img_node['uri'])
if in_ext == '.dot':
if not dot_cmd:
- kernellog.verbose(app,
- "dot from graphviz not available / include DOT raw.")
+ logger.verbose("dot from graphviz not available / include DOT raw.")
img_node.replace_self(file2literal(src_fname))
elif translator.builder.format == 'latex':
@@ -290,10 +293,11 @@ def convert_image(img_node, translator, src_fname=None):
if translator.builder.format == 'latex':
if not inkscape_cmd and convert_cmd is None:
- kernellog.warn(app,
- "no SVG to PDF conversion available / include SVG raw."
- "\nIncluding large raw SVGs can cause xelatex error."
- "\nInstall Inkscape (preferred) or ImageMagick.")
+ logger.warning(
+ "no SVG to PDF conversion available / include SVG raw.\n"
+ "Including large raw SVGs can cause xelatex error.\n"
+ "Install Inkscape (preferred) or ImageMagick."
+ )
img_node.replace_self(file2literal(src_fname))
else:
dst_fname = path.join(translator.builder.outdir, fname + '.pdf')
@@ -306,15 +310,14 @@ def convert_image(img_node, translator, src_fname=None):
_name = dst_fname[len(str(translator.builder.outdir)) + 1:]
if isNewer(dst_fname, src_fname):
- kernellog.verbose(app,
- "convert: {out}/%s already exists and is newer" % _name)
+ logger.verbose("convert: {out}/%s already exists and is newer" % _name)
else:
ok = False
mkdir(path.dirname(dst_fname))
if in_ext == '.dot':
- kernellog.verbose(app, 'convert DOT to: {out}/' + _name)
+ logger.verbose('convert DOT to: {out}/' + _name)
if translator.builder.format == 'latex' and not dot_Tpdf:
svg_fname = path.join(translator.builder.outdir, fname + '.svg')
ok1 = dot2format(app, src_fname, svg_fname)
@@ -325,7 +328,7 @@ def convert_image(img_node, translator, src_fname=None):
ok = dot2format(app, src_fname, dst_fname)
elif in_ext == '.svg':
- kernellog.verbose(app, 'convert SVG to: {out}/' + _name)
+ logger.verbose('convert SVG to: {out}/' + _name)
ok = svg2pdf(app, src_fname, dst_fname)
if not ok:
@@ -354,7 +357,7 @@ def dot2format(app, dot_fname, out_fname):
with open(out_fname, "w") as out:
exit_code = subprocess.call(cmd, stdout = out)
if exit_code != 0:
- kernellog.warn(app,
+ logger.warning(
"Error #%d when calling: %s" % (exit_code, " ".join(cmd)))
return bool(exit_code == 0)
@@ -388,13 +391,14 @@ def svg2pdf(app, svg_fname, pdf_fname):
pass
if exit_code != 0:
- kernellog.warn(app, "Error #%d when calling: %s" % (exit_code, " ".join(cmd)))
+ logger.warning("Error #%d when calling: %s" %
+ (exit_code, " ".join(cmd)))
if warning_msg:
- kernellog.warn(app, "Warning msg from %s: %s"
- % (cmd_name, str(warning_msg, 'utf-8')))
+ logger.warning( "Warning msg from %s: %s" %
+ (cmd_name, str(warning_msg, 'utf-8')))
elif warning_msg:
- kernellog.verbose(app, "Warning msg from %s (likely harmless):\n%s"
- % (cmd_name, str(warning_msg, 'utf-8')))
+ logger.verbose("Warning msg from %s (likely harmless):\n%s" %
+ (cmd_name, str(warning_msg, 'utf-8')))
return bool(exit_code == 0)
@@ -418,7 +422,8 @@ def svg2pdf_by_rsvg(app, svg_fname, pdf_fname):
# use stdout and stderr from parent
exit_code = subprocess.call(cmd)
if exit_code != 0:
- kernellog.warn(app, "Error #%d when calling: %s" % (exit_code, " ".join(cmd)))
+ logger.warning("Error #%d when calling: %s" %
+ (exit_code, " ".join(cmd)))
ok = bool(exit_code == 0)
return ok
@@ -440,7 +445,7 @@ class kernel_image(nodes.image):
pass
class KernelImage(images.Image):
- u"""KernelImage directive
+ """KernelImage directive
Earns everything from ``.. image::`` directive, except *remote URI* and
*glob* pattern. The KernelImage wraps a image node into a
@@ -476,7 +481,7 @@ class kernel_figure(nodes.figure):
"""Node for ``kernel-figure`` directive."""
class KernelFigure(Figure):
- u"""KernelImage directive
+ """KernelImage directive
Earns everything from ``.. figure::`` directive, except *remote URI* and
*glob* pattern. The KernelFigure wraps a figure node into a kernel_figure
@@ -513,15 +518,15 @@ def visit_kernel_render(self, node):
app = self.builder.app
srclang = node.get('srclang')
- kernellog.verbose(app, 'visit kernel-render node lang: "%s"' % (srclang))
+ logger.verbose('visit kernel-render node lang: "%s"' % srclang)
tmp_ext = RENDER_MARKUP_EXT.get(srclang, None)
if tmp_ext is None:
- kernellog.warn(app, 'kernel-render: "%s" unknown / include raw.' % (srclang))
+ logger.warning( 'kernel-render: "%s" unknown / include raw.' % srclang)
return
if not dot_cmd and tmp_ext == '.dot':
- kernellog.verbose(app, "dot from graphviz not available / include raw.")
+ logger.verbose("dot from graphviz not available / include raw.")
return
literal_block = node[0]
@@ -552,7 +557,7 @@ class kernel_render(nodes.General, nodes.Inline, nodes.Element):
pass
class KernelRender(Figure):
- u"""KernelRender directive
+ """KernelRender directive
Render content by external tool. Has all the options known from the
*figure* directive, plus option ``caption``. If ``caption`` has a
diff --git a/Documentation/sphinx/load_config.py b/Documentation/sphinx/load_config.py
index 8b416bfd75ac..ec50e1ee5223 100644
--- a/Documentation/sphinx/load_config.py
+++ b/Documentation/sphinx/load_config.py
@@ -9,7 +9,7 @@ from sphinx.util.osutil import fs_encoding
def loadConfig(namespace):
# ------------------------------------------------------------------------------
- u"""Load an additional configuration file into *namespace*.
+ """Load an additional configuration file into *namespace*.
The name of the configuration file is taken from the environment
``SPHINX_CONF``. The external configuration file extends (or overwrites) the
diff --git a/Documentation/sphinx/maintainers_include.py b/Documentation/sphinx/maintainers_include.py
index dcad0fff4723..d31cff867436 100755
--- a/Documentation/sphinx/maintainers_include.py
+++ b/Documentation/sphinx/maintainers_include.py
@@ -3,7 +3,7 @@
# -*- coding: utf-8; mode: python -*-
# pylint: disable=R0903, C0330, R0914, R0912, E0401
-u"""
+"""
maintainers-include
~~~~~~~~~~~~~~~~~~~
@@ -37,7 +37,7 @@ def setup(app):
)
class MaintainersInclude(Include):
- u"""MaintainersInclude (``maintainers-include``) directive"""
+ """MaintainersInclude (``maintainers-include``) directive"""
required_arguments = 0
def parse_maintainers(self, path):
diff --git a/Documentation/sphinx/rstFlatTable.py b/Documentation/sphinx/rstFlatTable.py
index 16bea0632555..180fbb50c337 100755
--- a/Documentation/sphinx/rstFlatTable.py
+++ b/Documentation/sphinx/rstFlatTable.py
@@ -2,7 +2,7 @@
# -*- coding: utf-8; mode: python -*-
# pylint: disable=C0330, R0903, R0912
-u"""
+"""
flat-table
~~~~~~~~~~
@@ -99,7 +99,7 @@ class colSpan(nodes.General, nodes.Element): pass # pylint: disable=C0103,C0321
class FlatTable(Table):
# ==============================================================================
- u"""FlatTable (``flat-table``) directive"""
+ """FlatTable (``flat-table``) directive"""
option_spec = {
'name': directives.unchanged
@@ -135,7 +135,7 @@ class FlatTable(Table):
class ListTableBuilder(object):
# ==============================================================================
- u"""Builds a table from a double-stage list"""
+ """Builds a table from a double-stage list"""
def __init__(self, directive):
self.directive = directive
@@ -212,7 +212,7 @@ class ListTableBuilder(object):
raise SystemMessagePropagation(error)
def parseFlatTableNode(self, node):
- u"""parses the node from a :py:class:`FlatTable` directive's body"""
+ """parses the node from a :py:class:`FlatTable` directive's body"""
if len(node) != 1 or not isinstance(node[0], nodes.bullet_list):
self.raiseError(
@@ -225,7 +225,7 @@ class ListTableBuilder(object):
self.roundOffTableDefinition()
def roundOffTableDefinition(self):
- u"""Round off the table definition.
+ """Round off the table definition.
This method rounds off the table definition in :py:member:`rows`.
diff --git a/Documentation/trace/postprocess/decode_msr.py b/Documentation/trace/postprocess/decode_msr.py
index aa9cc7abd5c2..f5609b16f589 100644
--- a/Documentation/trace/postprocess/decode_msr.py
+++ b/Documentation/trace/postprocess/decode_msr.py
@@ -32,6 +32,6 @@ for j in sys.stdin:
break
if r:
j = j.replace(" " + m.group(2), " " + r + "(" + m.group(2) + ")")
- print j,
+ print(j)
diff --git a/Documentation/translations/it_IT/process/submit-checklist.rst b/Documentation/translations/it_IT/process/submit-checklist.rst
index 692be4af9c9b..5bf1b4adebc1 100644
--- a/Documentation/translations/it_IT/process/submit-checklist.rst
+++ b/Documentation/translations/it_IT/process/submit-checklist.rst
@@ -58,9 +58,10 @@ Fornite documentazione
4) Tutti i nuovi parametri dei moduli sono documentati con ``MODULE_PARM_DESC()``.
5) Tutte le nuove interfacce verso lo spazio utente sono documentate in
- ``Documentation/ABI/``. Leggete ``Documentation/ABI/README`` per maggiori
- informazioni. Le patch che modificano le interfacce utente dovrebbero
- essere inviate in copia anche a linux-api@vger.kernel.org.
+ ``Documentation/ABI/``. Leggete Documentation/admin-guide/abi.rst
+ (o ``Documentation/ABI/README``) per maggiori informazioni.
+ Le patch che modificano le interfacce utente dovrebbero essere inviate
+ in copia anche a linux-api@vger.kernel.org.
6) Se la patch aggiunge nuove chiamate ioctl, allora aggiornate
``Documentation/userspace-api/ioctl/ioctl-number.rst``.
diff --git a/Documentation/translations/ja_JP/SubmitChecklist b/Documentation/translations/ja_JP/SubmitChecklist
deleted file mode 100644
index 1759c6b452d6..000000000000
--- a/Documentation/translations/ja_JP/SubmitChecklist
+++ /dev/null
@@ -1,105 +0,0 @@
-NOTE:
-This is a version of Documentation/process/submit-checklist.rst into Japanese.
-This document is maintained by Takenori Nagano <t-nagano@ah.jp.nec.com>
-and the JF Project team <http://www.linux.or.jp/JF/>.
-If you find any difference between this document and the original file
-or a problem with the translation,
-please contact the maintainer of this file or JF project.
-
-Please also note that the purpose of this file is to be easier to read
-for non English (read: Japanese) speakers and is not intended as a
-fork. So if you have any comments or updates of this file, please try
-to update the original English file first.
-
-Last Updated: 2008/07/14
-==================================
-ã“ã‚Œã¯ã€
-linux-2.6.26/Documentation/process/submit-checklist.rst ã®å’Œè¨³ã§ã™ã€‚
-
-翻訳団体: JF プロジェクト < http://www.linux.or.jp/JF/ >
-翻訳日: 2008/07/14
-翻訳者: Takenori Nagano <t-nagano at ah dot jp dot nec dot com>
-校正者: Masanori Kobayashi ã•ã‚“ <zap03216 at nifty dot ne dot jp>
-==================================
-
-
-Linux カーãƒãƒ«ãƒ‘ッãƒæŠ•ç¨¿è€…å‘ã‘ãƒã‚§ãƒƒã‚¯ãƒªã‚¹ãƒˆ
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-本書ã§ã¯ã€ãƒ‘ッãƒã‚’より素早ãå–り込んã§ã‚‚らã„ãŸã„開発者ãŒå®Ÿè·µã™ã¹ã基本的ãªäº‹æŸ„
-ã‚’ã„ãã¤ã‹ç´¹ä»‹ã—ã¾ã™ã€‚ã“ã“ã«ã‚ã‚‹å…¨ã¦ã®äº‹æŸ„ã¯ã€Documentation/process/submitting-patches.rst
-ãªã©ã®Linuxカーãƒãƒ«ãƒ‘ッãƒæŠ•ç¨¿ã«éš›ã—ã¦ã®å¿ƒå¾—を補足ã™ã‚‹ã‚‚ã®ã§ã™ã€‚
-
- 1: 妥当ãªCONFIGオプションや変更ã•ã‚ŒãŸCONFIGオプションã€ã¤ã¾ã‚Š =y, =m, =n
- å…¨ã¦ã§æ­£ã—ãビルドã§ãã‚‹ã“ã¨ã‚’確èªã—ã¦ãã ã•ã„。ãã®éš›ã€gccåŠã³ãƒªãƒ³ã‚«ãŒ
- warningã‚„errorを出ã—ã¦ã„ãªã„ã“ã¨ã‚‚確èªã—ã¦ãã ã•ã„。
-
- 2: allnoconfig, allmodconfig オプションを用ã„ã¦æ­£ã—ãビルドã§ãã‚‹ã“ã¨ã‚’
- 確èªã—ã¦ãã ã•ã„。
-
- 3: 手許ã®ã‚¯ãƒ­ã‚¹ã‚³ãƒ³ãƒ‘イルツールやOSDLã®PLMã®ã‚ˆã†ãªã‚‚ã®ã‚’用ã„ã¦ã€è¤‡æ•°ã®
- アーキテクãƒãƒ£ã«ãŠã„ã¦ã‚‚æ­£ã—ãビルドã§ãã‚‹ã“ã¨ã‚’確èªã—ã¦ãã ã•ã„。
-
- 4: 64bité•·ã®'unsigned long'を使用ã—ã¦ã„ã‚‹ppc64ã¯ã€ã‚¯ãƒ­ã‚¹ã‚³ãƒ³ãƒ‘イルã§ã®
- ãƒã‚§ãƒƒã‚¯ã«é©å½“ãªã‚¢ãƒ¼ã‚­ãƒ†ã‚¯ãƒãƒ£ã§ã™ã€‚
-
- 5: カーãƒãƒ«ã‚³ãƒ¼ãƒ‡ã‚£ãƒ³ã‚°ã‚¹ã‚¿ã‚¤ãƒ«ã«æº–æ‹ ã—ã¦ã„ã‚‹ã‹ã©ã†ã‹ç¢ºèªã—ã¦ãã ã•ã„(!)
-
- 6: CONFIGオプションã®è¿½åŠ ãƒ»å¤‰æ›´ã‚’ã—ãŸå ´åˆã«ã¯ã€CONFIGメニューãŒå£Šã‚Œã¦ã„ãªã„
- ã“ã¨ã‚’確èªã—ã¦ãã ã•ã„。
-
- 7: æ–°ã—ãKconfigã®ã‚ªãƒ—ションを追加ã™ã‚‹éš›ã«ã¯ã€å¿…ãšãã®helpも記述ã—ã¦ãã ã•ã„。
-
- 8: é©åˆ‡ãªKconfigã®ä¾å­˜é–¢ä¿‚を考ãˆãªãŒã‚‰æ…Žé‡ã«ãƒã‚§ãƒƒã‚¯ã—ã¦ãã ã•ã„。
- ãŸã ã—ã€ã“ã®ä½œæ¥­ã¯ãƒžã‚·ãƒ³ã‚’使ã£ãŸãƒ†ã‚¹ãƒˆã§ãã¡ã‚“ã¨è¡Œã†ã®ãŒã¨ã¦ã‚‚困難ã§ã™ã€‚
- ã†ã¾ãã‚„ã‚‹ã«ã¯ã€è‡ªåˆ†ã®é ­ã§è€ƒãˆã‚‹ã“ã¨ã§ã™ã€‚
-
- 9: sparseを利用ã—ã¦ã¡ã‚ƒã‚“ã¨ã—ãŸã‚³ãƒ¼ãƒ‰ãƒã‚§ãƒƒã‚¯ã‚’ã—ã¦ãã ã•ã„。
-
-10: 'make checkstack' を利用ã—ã€å•é¡ŒãŒç™ºè¦‹ã•ã‚ŒãŸã‚‰ä¿®æ­£ã—ã¦ãã ã•ã„。
- 'make checkstack' ã¯æ˜Žç¤ºçš„ã«å•é¡Œã‚’示ã—ã¾ã›ã‚“ãŒã€ã©ã‚Œã‹
- 1ã¤ã®é–¢æ•°ãŒ512ãƒã‚¤ãƒˆã‚ˆã‚Šå¤§ãã„スタックを使ã£ã¦ã„ã‚Œã°ã€ä¿®æ­£ã™ã¹ã候補ã¨
- ãªã‚Šã¾ã™ã€‚
-
-11: グローãƒãƒ«ãªkernel API を説明ã™ã‚‹ kernel-doc をソースã®ä¸­ã«å«ã‚ã¦ãã ã•ã„。
- ( staticãªé–¢æ•°ã«ãŠã„ã¦ã¯å¿…é ˆã§ã¯ã‚ã‚Šã¾ã›ã‚“ãŒã€å«ã‚ã¦ã‚‚らã£ã¦ã‚‚çµæ§‹ã§ã™ )
- ãã—ã¦ã€'make htmldocs' ã‚‚ã—ã㯠'make mandocs' を利用ã—ã¦è¿½è¨˜ã—ãŸ
- ドキュメントã®ãƒã‚§ãƒƒã‚¯ã‚’è¡Œã„ã€å•é¡ŒãŒè¦‹ã¤ã‹ã£ãŸå ´åˆã«ã¯ä¿®æ­£ã‚’è¡Œã£ã¦ãã ã•ã„。
-
-12: CONFIG_PREEMPT, CONFIG_DEBUG_PREEMPT, CONFIG_DEBUG_SLAB,
- CONFIG_DEBUG_PAGEALLOC, CONFIG_DEBUG_MUTEXES, CONFIG_DEBUG_SPINLOCK,
- CONFIG_DEBUG_ATOMIC_SLEEP ã“れら全ã¦ã‚’åŒæ™‚ã«æœ‰åŠ¹ã«ã—ã¦å‹•ä½œç¢ºèªã‚’
- è¡Œã£ã¦ãã ã•ã„。
-
-13: CONFIG_SMP, CONFIG_PREEMPT を有効ã«ã—ãŸå ´åˆã¨ç„¡åŠ¹ã«ã—ãŸå ´åˆã®ä¸¡æ–¹ã§
- ビルドã—ãŸä¸Šã€å‹•ä½œç¢ºèªã‚’è¡Œã£ã¦ãã ã•ã„。
-
-14: lockdepã®æ©Ÿèƒ½ã‚’å…¨ã¦æœ‰åŠ¹ã«ã—ãŸä¸Šã§ã€å…¨ã¦ã®ã‚³ãƒ¼ãƒ‰ãƒ‘スを評価ã—ã¦ãã ã•ã„。
-
-15: /proc ã«æ–°ã—ã„エントリを追加ã—ãŸå ´åˆã«ã¯ã€Documentation/ é…下ã«
- å¿…ãšãƒ‰ã‚­ãƒ¥ãƒ¡ãƒ³ãƒˆã‚’追加ã—ã¦ãã ã•ã„。
-
-16: æ–°ã—ã„ブートパラメータを追加ã—ãŸå ´åˆã«ã¯ã€
- å¿…ãšDocumentation/admin-guide/kernel-parameters.rst ã«èª¬æ˜Žã‚’追加ã—ã¦ãã ã•ã„。
-
-17: æ–°ã—ãmoduleã«ãƒ‘ラメータを追加ã—ãŸå ´åˆã«ã¯ã€MODULE_PARM_DESC()ã‚’
- 利用ã—ã¦å¿…ãšãã®èª¬æ˜Žã‚’記述ã—ã¦ãã ã•ã„。
-
-18: æ–°ã—ã„userspaceインタフェースを作æˆã—ãŸå ´åˆã«ã¯ã€Documentation/ABI/ ã«
- Documentation/ABI/README ã‚’å‚考ã«ã—ã¦å¿…ãšãƒ‰ã‚­ãƒ¥ãƒ¡ãƒ³ãƒˆã‚’追加ã—ã¦ãã ã•ã„。
-
-19: å°‘ãªãã¨ã‚‚slabアロケーションã¨pageアロケーションã«å¤±æ•—ã—ãŸå ´åˆã®
- 挙動ã«ã¤ã„ã¦ã€fault-injectionを利用ã—ã¦ç¢ºèªã—ã¦ãã ã•ã„。
- Documentation/fault-injection/ ã‚’å‚ç…§ã—ã¦ãã ã•ã„。
-
- 追加ã—ãŸã‚³ãƒ¼ãƒ‰ãŒã‹ãªã‚Šã®é‡ã§ã‚ã£ãŸãªã‚‰ã°ã€ã‚µãƒ–システム特有ã®
- fault-injectionを追加ã—ãŸã»ã†ãŒè‰¯ã„ã‹ã‚‚ã—ã‚Œã¾ã›ã‚“。
-
-20: æ–°ãŸã«è¿½åŠ ã—ãŸã‚³ãƒ¼ãƒ‰ã¯ã€`gcc -W'ã§ã‚³ãƒ³ãƒ‘イルã—ã¦ãã ã•ã„。
- ã“ã®ã‚ªãƒ—ションã¯å¤§é‡ã®ä¸è¦ãªãƒ¡ãƒƒã‚»ãƒ¼ã‚¸ã‚’出力ã—ã¾ã™ãŒã€
- "warning: comparison between signed and unsigned" ã®ã‚ˆã†ãªãƒ¡ãƒƒã‚»ãƒ¼ã‚¸ã¯ã€
- ãƒã‚°ã‚’見ã¤ã‘ã‚‹ã®ã«å½¹ã«ç«‹ã¡ã¾ã™ã€‚
-
-21: 投稿ã—ãŸãƒ‘ッãƒãŒ -mm パッãƒã‚»ãƒƒãƒˆã«ãƒžãƒ¼ã‚¸ã•ã‚ŒãŸå¾Œã€å…¨ã¦ã®æ—¢å­˜ã®ãƒ‘ッãƒã‚„
- VM, VFS ãŠã‚ˆã³ãã®ä»–ã®ã‚µãƒ–システムã«é–¢ã™ã‚‹æ§˜ã€…ãªå¤‰æ›´ã¨ã€ç¾æ™‚点ã§ã‚‚共存
- ã§ãã‚‹ã“ã¨ã‚’確èªã™ã‚‹ãƒ†ã‚¹ãƒˆã‚’è¡Œã£ã¦ãã ã•ã„。
diff --git a/Documentation/translations/ja_JP/disclaimer-ja_JP.rst b/Documentation/translations/ja_JP/disclaimer-ja_JP.rst
new file mode 100644
index 000000000000..46a026000407
--- /dev/null
+++ b/Documentation/translations/ja_JP/disclaimer-ja_JP.rst
@@ -0,0 +1,24 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+.. _translations_ja_JP_disclaimer:
+
+==========================
+å…責æ¡é … (Disclaimer) 抄訳
+==========================
+
+.. note:: ã€è¨³è¨»ã€‘
+ ã“ã®æ–‡æ›¸ã¯ã€
+ :ref:`Disclaimer (英語版) <translations_disclaimer>`
+ ã®ä¸€éƒ¨ã‚’翻訳ã—ãŸã‚‚ã®ã§ã™ã€‚全文ã¯è‹±èªžç‰ˆã‚’å‚照願ã„ã¾ã™ã€‚
+
+Documentation/translations/ja_JP/ 以下ã®ãƒ•ã‚¡ã‚¤ãƒ«ã¯ã€å¯¾å¿œã™ã‚‹
+Documentation/ 以下ã®ãƒ•ã‚¡ã‚¤ãƒ« (原文) ã®æ—¥æœ¬èªžè¨³ã§ã™ã€‚
+翻訳ã¨åŽŸæ–‡ã¨ã®é•ã„や翻訳上ã®å•é¡Œã‚’見ã¤ã‘ãŸã‚‰ã€
+MAINTAINERS ã«è¨˜è¼‰ã®ç¶­æŒç®¡ç†è€…ã«çŸ¥ã‚‰ã›ã¦ãã ã•ã„。
+翻訳ãŒåŽŸæ–‡ã®æ›´æ–°ã«è¿½ã„ã¤ã„ã¦ã„ãªã„å ´åˆã¯ã€ãれを日本語版ã«å映ã™ã‚‹ãƒ‘ッãƒã®
+投稿も歓迎ã§ã™ã€‚
+
+ãªãŠã€ã“ã®ç¿»è¨³ã®ç›®çš„ã¯éžè‹±èªž (ã“ã“ã§ã¯æ—¥æœ¬èªž) 話者ã¸ã®ä¾¿å®œæä¾›ã§ã‚ã‚Šã€
+フォークをæ„図ã—ãŸã‚‚ã®ã§ã¯ãªã„事を念頭ã«ãŠã„ã¦ãã ã•ã„。ã—ãŸãŒã£ã¦ã€
+ã“ã®ãƒ•ã‚¡ã‚¤ãƒ«ã®å†…容ã«å¯¾ã™ã‚‹ã‚³ãƒ¡ãƒ³ãƒˆã‚„æ›´æ–°ã™ã¹ãã“ã¨ãŒã‚ã‚Œã°ã€å…ˆã«åŽŸæ–‡ã®
+更新を検討ã—ã¦ãã ã•ã„。
diff --git a/Documentation/translations/ja_JP/index.rst b/Documentation/translations/ja_JP/index.rst
index 0b476b429e3b..4159b417bfdd 100644
--- a/Documentation/translations/ja_JP/index.rst
+++ b/Documentation/translations/ja_JP/index.rst
@@ -11,7 +11,9 @@
.. toctree::
:maxdepth: 1
+ disclaimer-ja_JP
process/howto
+ process/submit-checklist
.. raw:: latex
diff --git a/Documentation/translations/ja_JP/process/howto.rst b/Documentation/translations/ja_JP/process/howto.rst
index d9ba40588e46..5e307f90982c 100644
--- a/Documentation/translations/ja_JP/process/howto.rst
+++ b/Documentation/translations/ja_JP/process/howto.rst
@@ -1,35 +1,18 @@
-.. raw:: latex
+.. SPDX-License-Identifier: GPL-2.0
- \kerneldocCJKoff
-
-NOTE:
-This is a version of Documentation/process/howto.rst translated into Japanese.
-This document is maintained by Tsugikazu Shibata <tshibata@ab.jp.nec.com>
-If you find any difference between this document and the original file or
-a problem with the translation, please contact the maintainer of this file.
-
-Please also note that the purpose of this file is to be easier to
-read for non English (read: Japanese) speakers and is not intended as
-a fork. So if you have any comments or updates for this file, please
-try to update the original English file first.
-
-----------------------------------
-
-.. raw:: latex
-
- \kerneldocCJKon
-
-ã“ã®æ–‡æ›¸ã¯ã€
-Documentation/process/howto.rst
-ã®å’Œè¨³ã§ã™ã€‚
-
-翻訳者: Tsugikazu Shibata <tshibata@ab.jp.nec.com>
-
-----------------------------------
+.. Originally contributed by Tsugikazu Shibata
Linux カーãƒãƒ«é–‹ç™ºã®ã‚„ã‚Šæ–¹
==========================
+.. note:: ã€è¨³è¨»ã€‘
+ ã“ã®æ–‡æ›¸ã¯ã€
+ Documentation/process/howto.rst
+ ã®ç¿»è¨³ã§ã™ã€‚
+ å…責æ¡é …ã«ã¤ã„ã¦ã¯ã€
+ :ref:`å…責æ¡é …ã®æŠ„訳 <translations_ja_JP_disclaimer>` ãŠã‚ˆã³ã€
+ :ref:`Disclaimer (英語版) <translations_disclaimer>` ã‚’å‚ç…§ã—ã¦ãã ã•ã„。
+
ã“ã‚Œã¯ä¸Šã®ãƒˆãƒ”ック( Linux カーãƒãƒ«é–‹ç™ºã®ã‚„ã‚Šæ–¹)ã®é‡è¦ãªäº‹æŸ„を網羅ã—ãŸ
ドキュメントã§ã™ã€‚ã“ã“ã«ã¯ Linux カーãƒãƒ«é–‹ç™ºè€…ã«ãªã‚‹ãŸã‚ã®æ–¹æ³•ã¨Linux
カーãƒãƒ«é–‹ç™ºã‚³ãƒŸãƒ¥ãƒ‹ãƒ†ã‚£ã¨å…±ã«æ´»å‹•ã™ã‚‹ã‚„り方を学ã¶æ–¹æ³•ãŒå«ã¾ã‚Œã¦ã„ã¾ã™ã€‚
diff --git a/Documentation/translations/ja_JP/process/submit-checklist.rst b/Documentation/translations/ja_JP/process/submit-checklist.rst
new file mode 100644
index 000000000000..fb3b9e3bd8ee
--- /dev/null
+++ b/Documentation/translations/ja_JP/process/submit-checklist.rst
@@ -0,0 +1,163 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+.. Translated by Akira Yokosawa <akiyks@gmail.com>
+
+.. An old translation of this document of a different origin was at
+ Documentation/translations/ja_JP/SubmitChecklist, which can be found
+ in the pre-v6.14 tree if you are interested.
+ Please note that this translation is independent of the previous one.
+
+======================================
+Linux カーãƒãƒ«ãƒ‘ッãƒæŠ•ç¨¿ãƒã‚§ãƒƒã‚¯ãƒªã‚¹ãƒˆ
+======================================
+
+.. note:: ã€è¨³è¨»ã€‘
+ ã“ã®æ–‡æ›¸ã¯ã€
+ Documentation/process/submit-checklist.rst
+ ã®ç¿»è¨³ã§ã™ã€‚
+ å…責æ¡é …ã«ã¤ã„ã¦ã¯ã€
+ :ref:`å…責æ¡é …ã®æŠ„訳 <translations_ja_JP_disclaimer>` ãŠã‚ˆã³ã€
+ :ref:`Disclaimer (英語版) <translations_disclaimer>` ã‚’å‚ç…§ã—ã¦ãã ã•ã„。
+
+以下ã¯ã€ã‚«ãƒ¼ãƒãƒ«ãƒ‘ッãƒã®æŠ•ç¨¿æ™‚ã«ã€ãã®ã‚¹ãƒ ãƒ¼ã‚ºãªå—ã‘入れã®ãŸã‚ã«å¿ƒãŒã‘ã‚‹
+ã¹ã基本的ãªäº‹é …ã§ã™ã€‚
+
+ã“ã‚Œã¯ã€ Documentation/process/submitting-patches.rst ãŠã‚ˆã³ãã®ä»–ã®
+Linux カーãƒãƒ«ãƒ‘ッãƒæŠ•ç¨¿ã«é–¢ã™ã‚‹æ–‡æ›¸ã‚’è¸ã¾ãˆã€ãれを補足ã™ã‚‹ã‚‚ã®ã§ã™ã€‚
+
+.. note:: ã€è¨³è¨»ã€‘
+ å¯èƒ½ãªé …ç›®ã«ã¤ã„ã¦ã¯ã€ãƒ‘ッãƒã‚‚ã—ãã¯ãƒ‘ッãƒå†…ã®æ›´æ–°ã‚’æš—é»™ã®ä¸»èªžã¨ã—ã¦ã€
+ ãã®æœ›ã¾ã—ã„状態を表ã™æ–‡ä½“ã¨ã—ã¾ã™ã€‚ãã®ä»–ã€åŽŸç¾©ã‚’æãªã‚ãªã„範囲ã§
+ ä¿‚ã‚Šçµã³ã‚’調整ã™ã‚‹ãªã©ã€ç°¡æ½”ã§æŠŠæ¡ã—ã‚„ã™ã„箇æ¡æ›¸ãを目指ã—ã¾ã™ã€‚
+
+
+コードã®ãƒ¬ãƒ“ュー
+================
+
+1) 利用ã™ã‚‹æ©Ÿèƒ½ã«ã¤ã„ã¦ã€ãã®æ©Ÿèƒ½ã‚’定義・宣言ã—ã¦ã„るファイルを
+ ``#include`` ã—ã¦ã„る。
+ ä»–ã®ãƒ˜ãƒƒãƒ€ãƒ¼ãƒ•ã‚¡ã‚¤ãƒ«çµŒç”±ã§ã®å–ã‚Šè¾¼ã¿ã«ä¾å­˜ã—ãªã„。
+
+2) Documentation/process/coding-style.rst ã«è©³è¿°ã•ã‚Œã¦ã„る一般的ãªã‚¹ã‚¿ã‚¤ãƒ«
+ ã«ã¤ã„ã¦ãƒã‚§ãƒƒã‚¯æ¸ˆã¿ã€‚
+
+3) メモリãƒãƒªã‚¢ãƒ¼ (例, ``barrier()``, ``rmb()``, ``wmb()``) ã«ã¤ã„ã¦ã€
+ ãã®ã™ã¹ã¦ã«ã€ä½œç”¨ã¨ç›®çš„ã€åŠã³å¿…è¦ç†ç”±ã«ã¤ã„ã¦ã®èª¬æ˜ŽãŒã‚½ãƒ¼ã‚¹ã‚³ãƒ¼ãƒ‰å†…ã®
+ コメントã¨ã—ã¦è¨˜è¿°ã•ã‚Œã¦ã„る。
+
+
+Kconfig 変更ã®ãƒ¬ãƒ“ュー
+======================
+
+1) æ–°è¦ã®ã€ã‚‚ã—ãã¯å¤‰æ›´ã•ã‚ŒãŸ ``CONFIG`` オプションã«ã¤ã„ã¦ã€ãã‚ŒãŒé–¢ä¿‚ã™ã‚‹
+ コンフィグメニューã¸ã®æ‚ªå½±éŸ¿ãŒãªã„。ã¾ãŸã€
+ Documentation/kbuild/kconfig-language.rst ã®
+ "Menu attibutes: default value" ã«è¨˜è¼‰ã®ä¾‹å¤–æ¡ä»¶ã‚’満ãŸã™å ´åˆã‚’除ãã€
+ ãã®ãƒ‡ãƒ•ã‚©ãƒ«ãƒˆãŒç„¡åŠ¹ã«ãªã£ã¦ã„る。
+
+2) æ–°è¦ã® ``Kconfig`` オプションã«ãƒ˜ãƒ«ãƒ—テキストãŒã‚る。
+
+3) 妥当㪠``Kconfig`` ã®çµ„ã¿åˆã‚ã›ã«ã¤ã„ã¦æ³¨æ„æ·±ãレビューã•ã‚Œã¦ã„る。
+ ã“れをテストã§ã‚„り切るã®ã¯å›°é›£ã§ã€çŸ¥åŠ›ãŒæ±ºã‚手ã¨ãªã‚‹ã€‚
+
+ドキュメンテーションã®ä½œæˆ
+==========================
+
+1) グローãƒãƒ«ãªã‚«ãƒ¼ãƒãƒ« API ㌠:ref:`kernel-doc <kernel_doc>` ã®å½¢å¼ã§
+ ドキュメント化ã•ã‚Œã¦ã„ã‚‹ (é™çš„関数ã«ã¯æ±‚ã‚られãªã„ãŒã€ä»˜ã‘ã¦ã‚‚よã„)。
+
+2) æ–°è¦ ``/proc`` エントリーãŒã€ã™ã¹ã¦ ``Documentation/`` 以下ã«è¨˜è¼‰ã•ã‚Œã¦
+ ã„る。
+
+3) æ–°è¦ã‚«ãƒ¼ãƒãƒ«ãƒ»ãƒ–ート・パラメータãŒã€ã™ã¹ã¦
+ ``Documentation/admin-guide/kernel-parameters.rst`` ã«è¨˜è¼‰ã•ã‚Œã¦ã„る。
+
+4) æ–°è¦ãƒ¢ã‚¸ãƒ¥ãƒ¼ãƒ«ãƒ»ãƒ‘ラメータãŒã€ã™ã¹ã¦ ``MODULE_PARM_DESC()`` ã«ã‚ˆã£ã¦è¨˜è¿°
+ ã•ã‚Œã¦ã„る。
+
+5) æ–°è¦ãƒ¦ãƒ¼ã‚¶ãƒ¼ã‚¹ãƒšãƒ¼ã‚¹ãƒ»ã‚¤ãƒ³ã‚¿ãƒ¼ãƒ•ã‚§ãƒ¼ã‚¹ãŒã€ã™ã¹ã¦ ``Documentaion/ABI/``
+ 以下ã«è¨˜è¼‰ã•ã‚Œã¦ã„る。詳ã—ãã¯ã€ Documentation/admin-guide/abi.rst
+ (ã‚‚ã—ã㯠``Documentation/ABI/README``) ã‚’å‚照。
+ ユーザースペース・インターフェースを変更ã™ã‚‹ãƒ‘ッãƒã¯ã€
+ linux-api@vger.kernel.org ã«ã‚‚ CC ã™ã¹ã—。
+
+6) ãªã‚“らã‹ã® ioctl を追加ã™ã‚‹ãƒ‘ッãƒã¯ã€
+ ``Documentation/userspace-api/ioctl/ioctl-number.rst``
+ ã®æ›´æ–°ã‚’ä¼´ã†ã€‚
+
+ツールã«ã‚ˆã‚‹ã‚³ãƒ¼ãƒ‰ã®ãƒã‚§ãƒƒã‚¯
+============================
+
+1) スタイル・ãƒã‚§ãƒƒã‚«ãƒ¼ (``scripts/checkpatch.pl``) ã«ã‚ˆã£ã¦ã€çŠ¯ã—ãŒã¡ãª
+ パッãƒãƒ»ã‚¹ã‚¿ã‚¤ãƒ«ã®é•åãŒãªã„ã“ã¨ã‚’確èªæ¸ˆã¿ã€‚
+ 指摘ã•ã‚Œã‚‹é•åを残ã™å ´åˆã¯ã€ãれを正当化ã§ãã‚‹ã“ã¨ã€‚
+
+2) sparse ã«ã‚ˆã‚Šå…¥å¿µã«ãƒã‚§ãƒƒã‚¯æ¸ˆã¿ã€‚
+
+3) ``make checkstack`` ã§æŒ‡æ‘˜ã•ã‚Œã‚‹å•é¡ŒãŒã‚ã‚Œã°ã€ãã‚ŒãŒä¿®æ­£æ¸ˆã¿ã€‚
+ ``checkstack`` ã¯å•é¡Œç‚¹ã‚’明示的ã«ã¯æŒ‡æ‘˜ã—ãªã„ãŒã€ スタック消費ãŒ
+ 512 ãƒã‚¤ãƒˆã‚’越ãˆã‚‹é–¢æ•°ã¯è¦‹ç›´ã—ã®å€™è£œã€‚
+
+コードã®ãƒ“ルド
+==============
+
+1) 以下ã®æ¡ä»¶ã§ã‚¯ãƒªãƒ¼ãƒ³ã«ãƒ“ルドã§ãる。
+
+ a) é©ç”¨å¯èƒ½ãªã€ãŠã‚ˆã³ ``=y``, ``=m``, ``=n`` を変更ã—㟠``CONFIG``
+ オプションã§ã®ãƒ“ルド。
+ ``gcc`` ãŠã‚ˆã³ãƒªãƒ³ã‚«ãƒ¼ã‹ã‚‰ã®è­¦å‘Šãƒ»ã‚¨ãƒ©ãƒ¼ãŒãªã„ã“ã¨ã€‚
+
+ b) ``allnoconfig`` 㨠``allmodconfig`` ãŒãƒ‘ス
+
+ c) ``O=builddir`` を指定ã—ã¦ã®ãƒ“ルド
+
+ d) Documentation/ 以下ã®å¤‰æ›´ã«é–¢ã—ã¦ã€ãƒ‰ã‚­ãƒ¥ãƒ¡ãƒ³ãƒˆã®ãƒ“ルドã§æ–°ãŸãªè­¦å‘Šã‚„
+ エラーãŒå‡ºãªã„。
+ ``make htmldocs`` ã¾ãŸã¯ ``make pdfdocs`` ã§ãƒ“ルドã—ã€å•é¡ŒãŒã‚ã‚Œã°ä¿®æ­£ã€‚
+
+2) ローカルã®ã‚¯ãƒ­ã‚¹ãƒ»ã‚³ãƒ³ãƒ‘イル・ツールã€ãã®ä»–ã®ãƒ“ルド環境 (訳註: build farm)
+ を使ã£ã¦ã€è¤‡æ•°ã® CPU アーキテクãƒãƒ£å‘ã‘ã«ãƒ“ルドã§ãる。
+ 特ã«ã€ãƒ¯ãƒ¼ãƒ‰ã‚µã‚¤ã‚º (32 ビット㨠64 ビット) やエンディアン (ビッグã¨ãƒªãƒˆãƒ«)
+ ã®ç•°ãªã‚‹ã‚¢ãƒ¼ã‚­ãƒ†ã‚¯ãƒãƒ£ã‚’対象ã¨ã™ã‚‹ãƒ†ã‚¹ãƒˆã¯ã€è¡¨ç¾å¯èƒ½æ•°å€¤ç¯„囲・データ整列・
+ エンディアンãªã©ã«ã¤ã„ã¦ã®èª¤ã£ãŸä»®å®šã«èµ·å› ã™ã‚‹æ§˜ã€…ãªç§»æ¤ä¸Šã®å•é¡Œã‚’æ•ãˆã‚‹
+ ã®ã«åŠ¹æžœçš„。
+
+3) æ–°è¦ã«è¿½åŠ ã•ã‚ŒãŸã‚³ãƒ¼ãƒ‰ã«ã¤ã„㦠(``make KCFLAGS=-W`` を使ã£ã¦)
+ ``gcc -W`` ã§ã‚³ãƒ³ãƒ‘イル。
+ ã“ã‚Œã¯å¤šãã®ãƒŽã‚¤ã‚ºã‚’ä¼´ã†ãŒã€
+ ``warning: comparison between signed and unsigned``
+ ã®é¡žã„ã®ãƒã‚°ã‚’ã‚ã¶ã‚Šå‡ºã™ã®ã«åŠ¹æžœçš„。
+
+4) 変更ã•ã‚Œã‚‹ã‚½ãƒ¼ã‚¹ã‚³ãƒ¼ãƒ‰ãŒã€ä¸‹è¨˜ã® ``Kconfig`` シンボルã«é–¢é€£ã™ã‚‹ã‚«ãƒ¼ãƒãƒ«
+ API や機能ã«ä¾å­˜ (ã‚‚ã—ãã¯åˆ©ç”¨) ã™ã‚‹å ´åˆã€ãれら㮠``Kconfig`` シンボルãŒã€
+ 無効ã€ãŠã‚ˆã³ (å¯èƒ½ãªã‚‰) ``=m`` ã®å ´åˆã‚’組ã¿åˆã‚ã›ãŸè¤‡æ•°ã®ãƒ“ルドを
+ (全部ã¾ã¨ã‚ã¦ã§ã¯ãªãã€ã„ã‚ã„ã‚ãªãƒ©ãƒ³ãƒ€ãƒ ã®çµ„ã¿åˆã‚ã›ã§) テスト済ã¿ã€‚
+
+ ``CONFIG_SMP``, ``CONFIG_SYSFS``, ``CONFIG_PROC_FS``, ``CONFIG_INPUT``,
+ ``CONFIG_PCI``, ``CONFIG_BLOCK``, ``CONFIG_PM``, ``CONFIG_MAGIC_SYSRQ``,
+ ``CONFIG_NET``, ``CONFIG_INET=n`` (ãŸã ã—ã€å¾Œè€…㯠``CONFIG_NET=y``
+ ã¨ã®çµ„ã¿åˆã‚ã›)。
+
+コードã®ãƒ†ã‚¹ãƒˆ
+==============
+
+1) ``CONFIG_PREEMPT``, ``CONFIG_DEBUG_PREEMPT``,
+ ``CONFIG_SLUB_DEBUG``, ``CONFIG_DEBUG_PAGEALLOC``, ``CONFIG_DEBUG_MUTEXES``,
+ ``CONFIG_DEBUG_SPINLOCK``, ``CONFIG_DEBUG_ATOMIC_SLEEP``,
+ ``CONFIG_PROVE_RCU`` ãŠã‚ˆã³ ``CONFIG_DEBUG_OBJECTS_RCU_HEAD`` ã‚’ã™ã¹ã¦
+ åŒæ™‚ã«æœ‰åŠ¹ã«ã—ã¦ã®ãƒ†ã‚¹ãƒˆæ¸ˆã¿ã€‚
+
+2) ``CONFIG_SMP`` 㨠``CONFIG_PREEMPT`` ãŒæœ‰åŠ¹ã¨ç„¡åŠ¹ã®å ´åˆã«ã¤ã„ã¦ã€ãƒ“ルドã¨
+ ランタイムã®ãƒ†ã‚¹ãƒˆæ¸ˆã¿ã€‚
+
+3) lockdep ã®æ©Ÿèƒ½ã‚’ã™ã¹ã¦æœ‰åŠ¹ã«ã—ã¦ã®å®Ÿè¡Œã§ã€ã™ã¹ã¦ã®ã‚³ãƒ¼ãƒ‰çµŒè·¯ãŒç¢ºèªæ¸ˆã¿ã€‚
+
+4) 最低é™ã€slab 㨠ページ・アロケーションã®å¤±æ•—ã«é–¢ã™ã‚‹èª¤ã‚Šæ³¨å…¥
+ (訳註: fault injection) ã«ã‚ˆã‚‹ãƒã‚§ãƒƒã‚¯æ¸ˆã¿ã€‚
+ 詳ã—ãã¯ã€ Documentation/fault-injection/index.rst ã‚’å‚照。
+ æ–°è¦ã®ã‚³ãƒ¼ãƒ‰ãŒå¤šã„å ´åˆã¯ã€ã‚µãƒ–システム対象ã®èª¤ã‚Šæ³¨å…¥ã‚’追加ã™ã‚‹ã®ãŒæœ›ã¾ã—ã„
+ å¯èƒ½æ€§ã‚り。
+
+5) linux-next ã®æœ€æ–°ã‚¿ã‚°ã«å¯¾ã™ã‚‹ãƒ†ã‚¹ãƒˆã«ã‚ˆã‚Šã€ä»–ã§ã‚­ãƒ¥ãƒ¼ã‚¤ãƒ³ã‚°ã•ã‚Œã¦ã„ã‚‹
+ パッãƒã‚„ã€VMã€VFSã€ãã®ä»–ã®ã‚µãƒ–システム内ã®ã™ã¹ã¦ã®å¤‰æ›´ã¨çµ„ã¿åˆã‚ã›ã¦ã®
+ 動作を確èªæ¸ˆã¿ã€‚
diff --git a/Documentation/translations/sp_SP/process/submit-checklist.rst b/Documentation/translations/sp_SP/process/submit-checklist.rst
index 0d6651f9d871..e7107cc97001 100644
--- a/Documentation/translations/sp_SP/process/submit-checklist.rst
+++ b/Documentation/translations/sp_SP/process/submit-checklist.rst
@@ -97,9 +97,10 @@ y en otros lugares con respecto al envío de parches del kernel de Linux.
``MODULE_PARM_DESC()``.
18) Todas las nuevas interfaces de espacio de usuario están documentadas
- en ``Documentation/ABI/``. Consulte ``Documentation/ABI/README`` para
- obtener más información. Los parches que cambian las interfaces del
- espacio de usuario deben ser CCed a linux-api@vger.kernel.org.
+ en ``Documentation/ABI/``. Consulte Documentation/admin-guide/abi.rst
+ (o ``Documentation/ABI/README``) para obtener más información.
+ Los parches que cambian las interfaces del espacio de usuario deben
+ ser CCed a linux-api@vger.kernel.org.
19) Se ha comprobado con la inyección de al menos errores de asignación
de slab y página. Consulte ``Documentation/fault-injection/``.
diff --git a/Documentation/translations/zh_CN/admin-guide/README.rst b/Documentation/translations/zh_CN/admin-guide/README.rst
index e679cbc3c89d..1bdafdc4c8e2 100644
--- a/Documentation/translations/zh_CN/admin-guide/README.rst
+++ b/Documentation/translations/zh_CN/admin-guide/README.rst
@@ -146,7 +146,7 @@ Linux内核6.x版本 <http://kernel.org/>
"make xconfig" 基于Qtçš„é…置工具。
- "make gconfig" 基于GTK+çš„é…置工具。
+ "make gconfig" 基于GTKçš„é…置工具。
"make oldconfig" 基于现有的 ./.config 文件选择所有选项,并询问
æ–°é…置选项。
diff --git a/Documentation/translations/zh_CN/dev-tools/ubsan.rst b/Documentation/translations/zh_CN/dev-tools/ubsan.rst
index 2487696b3772..81ef6f77caeb 100644
--- a/Documentation/translations/zh_CN/dev-tools/ubsan.rst
+++ b/Documentation/translations/zh_CN/dev-tools/ubsan.rst
@@ -3,7 +3,14 @@
.. include:: ../disclaimer-zh_CN.rst
:Original: Documentation/dev-tools/ubsan.rst
-:Translator: Dongliang Mu <dzm91@hust.edu.cn>
+
+:翻译:
+
+ 慕冬亮 Dongliang Mu <dzm91@hust.edu.cn>
+
+:校译:
+
+ 王昱力 WangYuli <wangyuli@uniontech.com>
未定义行为消毒剂 - UBSAN
====================================
@@ -55,30 +62,20 @@ GCC自4.9.x [1_] ï¼ˆè¯¦è§ ``-fsanitize=undefined`` 选项åŠå…¶å­é€‰é¡¹ï¼‰ç‰ˆæ
使用如下内核é…ç½®å¯ç”¨UBSAN::
- CONFIG_UBSAN=y
-
-使用如下内核é…置检查整个内核::
-
- CONFIG_UBSAN_SANITIZE_ALL=y
-
-为了在特定文件或目录å¯åŠ¨ä»£ç æ’桩,需è¦åœ¨ç›¸åº”的内核Makefile中添加一行类似内容:
+ CONFIG_UBSAN=y
-- å•æ–‡ä»¶ï¼ˆå¦‚main.o)::
-
- UBSAN_SANITIZE_main.o := y
-
-- 一个目录中的所有文件::
-
- UBSAN_SANITIZE := y
-
-å³ä½¿è®¾ç½®äº†``CONFIG_UBSAN_SANITIZE_ALL=y``,为了é¿å…文件被æ’桩,å¯ä½¿ç”¨::
+排除è¦è¢«æ£€æµ‹çš„文件::
UBSAN_SANITIZE_main.o := n
-与::
+排除一个目录中的所有文件::
UBSAN_SANITIZE := n
+当全部文件都被ç¦ç”¨ï¼Œå¯é€šè¿‡å¦‚下方å¼ä¸ºç‰¹å®šæ–‡ä»¶å¯ç”¨::
+
+ UBSAN_SANITIZE_main.o := y
+
未对é½çš„内存访问检测å¯é€šè¿‡å¼€å¯ç‹¬ç«‹é€‰é¡¹ - CONFIG_UBSAN_ALIGNMENT 检测。
该选项在支æŒæœªå¯¹é½è®¿é—®çš„架构上(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y)
默认为关闭。该选项ä»å¯é€šè¿‡å†…æ ¸é…ç½®å¯ç”¨ï¼Œä½†å®ƒå°†äº§ç”Ÿå¤§é‡çš„UBSAN报告。
diff --git a/Documentation/translations/zh_CN/disclaimer-zh_CN.rst b/Documentation/translations/zh_CN/disclaimer-zh_CN.rst
index 3c6db094a63c..37681c0b2a01 100644
--- a/Documentation/translations/zh_CN/disclaimer-zh_CN.rst
+++ b/Documentation/translations/zh_CN/disclaimer-zh_CN.rst
@@ -1,9 +1,7 @@
:orphan:
-.. warning::
+.. note::
此文件的目的是为让中文读者更容易阅读和ç†è§£ï¼Œè€Œä¸æ˜¯ä½œä¸ºä¸€ä¸ªåˆ†æ”¯ã€‚ 因此,
如果您对此文件有任何æ„è§æˆ–更新,请先å°è¯•æ›´æ–°åŽŸå§‹è‹±æ–‡æ–‡ä»¶ã€‚
-
-.. note::
- 如果您å‘现本文档与原始文件有任何ä¸åŒæˆ–者有翻译问题,请è”系该文件的译者,
- 或者请求时奎亮的帮助:<alexs@kernel.org>。
+ 如果您å‘现本文档与原始文件有任何ä¸åŒæˆ–者有翻译问题,请å‘建议或者补ä¸ç»™
+ 该文件的译者,或者请求中文文档维护者和审阅者的帮助。
diff --git a/Documentation/translations/zh_CN/index.rst b/Documentation/translations/zh_CN/index.rst
index 7574e1673180..cc512ca54172 100644
--- a/Documentation/translations/zh_CN/index.rst
+++ b/Documentation/translations/zh_CN/index.rst
@@ -26,7 +26,13 @@
顺便说下,中文文档也需è¦éµå®ˆå†…核编ç é£Žæ ¼ï¼Œé£Žæ ¼ä¸­ä¸­æ–‡å’Œè‹±æ–‡çš„主è¦ä¸åŒå°±æ˜¯ä¸­æ–‡
的字符标点å ç”¨ä¸¤ä¸ªè‹±æ–‡å­—符宽度,所以,当英文è¦æ±‚ä¸è¦è¶…过æ¯è¡Œ100个字符时,
中文就ä¸è¦è¶…过50个字符。å¦å¤–,也è¦æ³¨æ„'-','='等符å·ä¸Žç›¸å…³æ ‡é¢˜çš„对é½ã€‚在将
-è¡¥ä¸æ交到社区之å‰ï¼Œä¸€å®šè¦è¿›è¡Œå¿…è¦çš„ ``checkpatch.pl`` 检查和编译测试。
+è¡¥ä¸æ交到社区之å‰ï¼Œä¸€å®šè¦è¿›è¡Œå¿…è¦çš„ ``checkpatch.pl`` 检查和编译测试,确ä¿
+在 ``make htmldocs/pdfdocs`` 中ä¸å¢žåŠ æ–°çš„告警,最åŽï¼Œå®‰è£…检查你生æˆçš„
+html/pdf 文件,确认它们看起æ¥æ˜¯æ­£å¸¸çš„。
+
+æ交之å‰è¯·ç¡®è®¤ä½ çš„è¡¥ä¸å¯ä»¥æ­£å¸¸æ交到中文文档维护库:
+https://git.kernel.org/pub/scm/linux/kernel/git/alexs/linux.git/
+如果你的补ä¸ä¾èµ–于其他人的补ä¸, å¯ä»¥ä¸Žå…¶ä»–人商é‡åŽç”±æŸä¸€ä¸ªäººåˆå¹¶æ交。
与Linux 内核社区一起工作
------------------------
diff --git a/Documentation/translations/zh_CN/mm/balance.rst b/Documentation/translations/zh_CN/mm/balance.rst
index 6fd79209c307..f877c0cfa39a 100644
--- a/Documentation/translations/zh_CN/mm/balance.rst
+++ b/Documentation/translations/zh_CN/mm/balance.rst
@@ -64,7 +64,7 @@ kswapd并ä¸çœŸæ­£éœ€è¦å¹³è¡¡é«˜å†…存区,因为中断上下文并ä¸è¯·æ±‚é«
如果从进程内存和shm中å·å–页é¢å¯ä»¥å‡è½»è¯¥é¡µé¢èŠ‚点中任何区的内存压力,而该区的内存压力
å·²ç»ä½ŽäºŽå…¶æ°´ä½ï¼Œåˆ™ä¼šè¿›è¡Œå·å–。
-watemark[WMARK_MIN/WMARK_LOW/WMARK_HIGH]/low_on_memory/zone_wake_kswapd:
+watermark[WMARK_MIN/WMARK_LOW/WMARK_HIGH]/low_on_memory/zone_wake_kswapd:
这些是æ¯ä¸ªåŒºçš„字段,用于确定一个区何时需è¦å¹³è¡¡ã€‚当页é¢æ•°ä½ŽäºŽæ°´ä½[WMARK_MIN]时,
hysteric 的字段low_on_memory被设置。这个字段会一直被设置,直到空闲页数å˜æˆæ°´ä½
[WMARK_HIGH]。当low_on_memory被设置时,页é¢åˆ†é…请求将å°è¯•é‡Šæ”¾è¯¥åŒºåŸŸçš„一些页é¢ï¼ˆå¦‚æžœ
diff --git a/Documentation/translations/zh_CN/process/submit-checklist.rst b/Documentation/translations/zh_CN/process/submit-checklist.rst
index 10536b74aeec..0e524f1c1af5 100644
--- a/Documentation/translations/zh_CN/process/submit-checklist.rst
+++ b/Documentation/translations/zh_CN/process/submit-checklist.rst
@@ -82,8 +82,8 @@ Linux内核补ä¸æ交检查å•
17) 所有新的模å—å‚数都记录在 ``MODULE_PARM_DESC()``
18) 所有新的用户空间接å£éƒ½è®°å½•åœ¨ ``Documentation/ABI/`` 中。有关详细信æ¯ï¼Œ
- 请å‚阅 ``Documentation/ABI/README`` 。更改用户空间接å£çš„è¡¥ä¸åº”该抄é€
- linux-api@vger.kernel.org。
+ 请å‚阅 Documentation/admin-guide/abi.rst (或 ``Documentation/ABI/README``)。
+ 更改用户空间接å£çš„è¡¥ä¸åº”è¯¥æŠ„é€ linux-api@vger.kernel.org\ 。
19) 已通过至少注入slabå’Œpage分é…失败进行检查。请å‚阅 ``Documentation/fault-injection/`` 。
如果新代ç æ˜¯å®žè´¨æ€§çš„,那么添加å­ç³»ç»Ÿç‰¹å®šçš„故障注入å¯èƒ½æ˜¯åˆé€‚的。
diff --git a/Documentation/translations/zh_CN/security/credentials.rst b/Documentation/translations/zh_CN/security/credentials.rst
new file mode 100644
index 000000000000..91c353dfb622
--- /dev/null
+++ b/Documentation/translations/zh_CN/security/credentials.rst
@@ -0,0 +1,479 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: Documentation/security/credentials.rst
+
+:翻译:
+ 赵硕 Shuo Zhao <zhaoshuo@cqsoftware.com.cn>
+
+=============
+Linux中的凭æ®
+=============
+
+作者: David Howells <dhowells@redhat.com>
+
+.. contents:: :local:
+
+概述
+====
+
+当一个对象对å¦ä¸€ä¸ªå¯¹è±¡è¿›è¡Œæ“作时,Linux执行的安全检查包å«å‡ ä¸ªéƒ¨åˆ†ï¼š
+
+ 1. 对象
+
+ 对象是å¯ä»¥ç›´æŽ¥ç”±ç”¨æˆ·ç©ºé—´ç¨‹åºæ“作的系统中的实体。Linux具有多ç§å¯æ“作
+ 的对象,包括:
+
+ - 任务
+ - 文件/索引节点
+ - 套接字
+ - 消æ¯é˜Ÿåˆ—
+ - 共享内存段
+ - ä¿¡å·é‡
+ - 密钥
+
+ 所有这些对象的æ述的一部分是一组凭æ®ã€‚集åˆä¸­çš„内容å–决于对象的类型。
+
+ 2. 对象所有æƒ
+
+ 大多数对象的凭æ®ä¸­ä¼šæœ‰ä¸€ä¸ªå­é›†ç”¨æ¥è¡¨ç¤ºè¯¥å¯¹è±¡çš„所有æƒã€‚
+ 这用于资æºæ ¸ç®—å’Œé™åˆ¶ï¼ˆå¦‚ç£ç›˜é…é¢å’Œä»»åŠ¡èµ„æºé™åˆ¶ï¼‰ã€‚
+
+ 例如,在标准的UNIX文件系统中,这将由标记在索引节点上的UID定义。
+
+ 3. 对象上下文
+
+ 此外在这些对象的凭æ®ä¸­ï¼Œå°†æœ‰ä¸€ä¸ªå­é›†è¡¨ç¤ºå¯¹è±¡çš„“对象上下文â€ã€‚
+ è¿™å¯èƒ½ä¸Žï¼ˆ2)中相åŒï¼Œä¹Ÿå¯èƒ½ä¸åŒ —— 例如,在标准的UNIX文件中,
+ 这是由标记在索引节点上的UID和GID定义的。
+
+ 对象上下文是进行安全计算的一部分,当对象被æ“作时会用到。
+
+ 4. 主体
+
+ 主体是正在对其他对象执行æ“作的对象。
+
+ 系统中的大多数对象是ä¸æ´»åŠ¨çš„:他们ä¸ä¼šå¯¹ç³»ç»Ÿä¸­çš„其他对象起作用。
+ 进程/任务是明显的例外:它们å¯ä»¥è®¿é—®å’Œæ“纵其他对象。
+
+ 任务之外的其他对象在æŸäº›æƒ…况下也å¯ä»¥æ˜¯ä¸»ä½“。例如,打开的文件å¯ä»¥ä½¿ç”¨
+ å为 ``fcntl(F_SETOWN)`` 的任务给它的UIDå’ŒEUIDå‘一个任务å‘é€SIGIO
+ ä¿¡å·ã€‚在这ç§æƒ…况下,文件结构也会有一个主体上下文。
+
+ 5. 主体上下文
+
+ 主体对其凭æ®æœ‰ä¸€ä¸ªé¢å¤–的解释。其凭æ®çš„一个å­é›†å½¢æˆäº†â€œä¸»ä½“上下文â€ã€‚主体
+ 上下文在主体执行æ“作时作为安全计算的一部分使用。
+
+ 例如,Linux任务在æ“作文件时会有FSUIDã€FSGID和附加组列表 —— 这些凭æ®
+ 与通常构æˆä»»åŠ¡çš„对象上下文的真实UIDå’ŒGID是相互独立的。
+
+ 6. æ“作
+
+ Linuxæ供许多æ“作,主体å¯ä»¥å¯¹å¯¹è±¡æ‰§è¡Œè¿™äº›æ“作。å¯ç”¨çš„æ“作集å–决于主体
+ 和对象的性质。
+
+
+ æ“作包括读å–ã€å†™å…¥ã€åˆ›å»ºå’Œåˆ é™¤æ–‡ä»¶ï¼Œä»¥åŠæ´¾ç”Ÿï¼ˆforking)或å‘é€
+ ä¿¡å·ï¼ˆsignalling)和跟踪(tracing)任务等。
+
+ 7. 规则,访问控制列表和安全计算
+
+ 当主体对对象进行æ“作时,会进行安全计算。这涉åŠåˆ°ä½¿ç”¨ä¸»ä½“上下文ã€å¯¹è±¡
+ 上下文和æ“作,并æœç´¢ä¸€ä¸ªæˆ–多个规则集,以确定在给定这些上下文的情况下,
+ 主体是å¦è¢«æŽˆäºˆæˆ–æ‹’ç»ä»¥æ‰€éœ€æ–¹å¼å¯¹å¯¹è±¡è¿›è¡Œæ“作的æƒé™ã€‚
+
+ 主è¦æœ‰ä¸¤ä¸ªè§„则æ¥æºï¼š
+
+ a. 自主访问控制(DAC):
+
+ 有时,对象的æ述中会包å«ä¸€ç»„规则。这就是所谓的“访问控制列表â€æˆ–‘ACL’。
+ 一个Linux文件å¯ä»¥æ供多个ACL。
+
+ 例如,传统的UNIX文件包括一个æƒé™æŽ©ç ï¼Œå®ƒæ˜¯ä¸€ä¸ªç®€åŒ–çš„ACL,具有三个固定的
+ 主体类别(“用户â€ã€â€œç»„â€å’Œâ€œå…¶ä»–â€ï¼‰ï¼Œæ¯ä¸€ä¸ªéƒ½å¯ä»¥è¢«æŽˆäºˆä¸€å®šçš„特æƒï¼ˆå¦‚“读å–â€ã€
+ “写入â€å’Œâ€œæ‰§è¡Œâ€ —— 无论这些映射对于对象æ„味ç€ä»€ä¹ˆï¼‰ã€‚然而,UNIX文件æƒé™ä¸
+ å…许任æ„指定主体,因此用途有é™ã€‚
+
+ Linux文件还å¯ä»¥æ”¯æŒPOSIX ACL。这是一个规则列表,为任æ„主体授予å„ç§æƒé™ã€‚
+
+ b. 强制访问控制(MAC):
+
+ 整个系统å¯èƒ½æœ‰ä¸€ä¸ªæˆ–多个规则集,适用于所有主体和对象,ä¸è€ƒè™‘它们的æ¥æºã€‚
+ SELinuxå’ŒSmack就是这ç§æƒ…况的例å­ã€‚
+
+ 在SELinuxå’ŒSmack的情况下,æ¯ä¸ªå¯¹è±¡åœ¨å…¶å‡­æ®ä¸­éƒ½è¢«èµ‹äºˆä¸€ä¸ªæ ‡ç­¾ã€‚当请求执
+ è¡Œæ“作时,它们使用主体标签ã€å¯¹è±¡æ ‡ç­¾å’Œæ“作,寻找一个规则,该规则表示此æ“
+ 作是授予还是拒ç»çš„。
+
+
+凭æ®ç±»åž‹
+========
+
+Linux内核支æŒä»¥ä¸‹ç±»åž‹çš„凭æ®ï¼š
+
+ 1. 传统的UNIX凭æ®ã€‚
+
+ - 真实用户ID
+ - 真实组ID
+
+ UIDå’ŒGID由大多数(如果ä¸æ˜¯å…¨éƒ¨ï¼‰Linux对象æºå¸¦ï¼Œå³ä½¿æœ‰æ—¶å®ƒä»¬éœ€è¦è¢«è™šæž„出
+ æ¥ï¼ˆä¾‹å¦‚FAT或CIFS文件,这些文件æ¥æºäºŽWindows)。这些(通常)定义了该对象
+ 的对象上下文,但任务在æŸäº›æƒ…况下略有ä¸åŒã€‚
+
+ - 有效用户ID,ä¿å­˜ç”¨æˆ·IDå’ŒFS用户ID
+ - 有效组ID,ä¿å­˜ç»„IDå’ŒFS组ID
+ - 补充组
+
+ 这些是仅由任务使用的é¢å¤–凭æ®ã€‚通常,一个EUID/EGID/GROUPS 被用作主体上下文,
+ 而真实UID/GID 被用作对象上下文。对于任务,这并ä¸æ€»æ˜¯æ­£ç¡®çš„。
+
+ 2. 能力
+
+ - å…许的能力集åˆ
+ - å¯ç»§æ‰¿çš„能力集åˆ
+ - 有效的能力集åˆ
+ - 能力边界集åˆ
+
+ 这些仅由任务æºå¸¦ï¼Œè¡¨ç¤ºæŽˆäºˆä»»åŠ¡çš„超出普通任务æƒé™çš„能力。这些å¯ä»¥é€šè¿‡ä¼ ç»Ÿ
+ UNIX凭æ®çš„更改进行éšå¼æ“作,但也å¯ä»¥é€šè¿‡ ``capset()`` 系统调用直接æ“作。
+
+ å…许的能力是指进程å¯ä»¥é€šè¿‡ ``capset()`` 将其添加到其有效或å…许集åˆä¸­çš„
+ 那些能力。这个å¯ç»§æ‰¿çš„集åˆä¹Ÿå¯èƒ½å—到这样的é™åˆ¶ã€‚
+
+ 有效能力是任务本身实际å¯ä»¥ä½¿ç”¨çš„能力。
+
+ å¯ç»§æ‰¿èƒ½åŠ›æ˜¯é‚£äº›å¯ä»¥é€šè¿‡ ``execve()`` 传递的能力。
+
+ 边界集é™åˆ¶äº†é€šè¿‡ ``execve()`` 继承的能力,特别是在以UID 0执行二进制文件时。
+
+ 3. 安全管ç†æ ‡è®°ï¼ˆsecurebits)
+
+ 它们用于控制上述凭æ®åœ¨ç‰¹å®šæ“作如execve()中的æ“作和继承方å¼ã€‚它们并ä¸ç›´æŽ¥
+ 用作对象或主体凭æ®ä½¿ç”¨ã€‚
+
+ 4. 密钥和密钥环
+
+ 这些仅由任务æºå¸¦ã€‚它们用于æºå¸¦å’Œç¼“å­˜ä¸é€‚åˆæ”¾å…¥å…¶ä»–标准UNIX凭æ®ä¸­çš„安全令牌。
+ 它们用诸如使网络文件系统密钥在进程执行的文件访问时å¯ç”¨ï¼Œè€Œæ— éœ€è®©æ™®é€šç¨‹åºäº†è§£
+ 涉åŠçš„安全细节。
+
+ 密钥环是一ç§ç‰¹æ®Šç±»åž‹çš„密钥。它们æºå¸¦ä¸€ç»„其他密钥,并å¯ä»¥æœç´¢æ¥æŸ¥æ‰¾æ‰€éœ€çš„密钥。
+ æ¯ä¸ªè¿›ç¨‹å¯ä»¥è®¢é˜…多个密钥环:
+
+ æ¯çº¿ç¨‹å¯†é’¥
+ æ¯è¿›ç¨‹å¯†é’¥çŽ¯
+ æ¯ä¼šè¯å¯†é’¥çŽ¯
+
+ 当进程访问一个密钥时,若尚ä¸å­˜åœ¨ï¼Œåˆ™é€šå¸¸ä¼šå°†å…¶ç¼“存在一个密钥环中,以便将æ¥çš„
+ 访问时找到该密钥。
+
+ 有关密钥的更多信æ¯ï¼Œè¯·å‚è§ ``Documentation/translations/zh_CN/security/keys/*`` 。
+
+ 5. LSM
+
+ Linux安全模å—å…许在任务执行æ“作时施加é¢å¤–的控制。目å‰ï¼ŒLinux支æŒå‡ ç§LSM选项。
+
+ 一些工作通过标记系统中的对象,并应用一组规则(策略)说明æŸä¸ªæ ‡ç­¾çš„任务å¯ä»¥å¯¹
+ å¦ä¸€æ ‡ç­¾çš„对象执行哪些æ“作。
+
+ 6. AF_KEY
+
+ 这是一ç§åŸºäºŽå¥—接字网络å议栈中的凭æ®ç®¡ç†[RFC 2367]。本文档中没有讨论它,因为ä¸
+ 直接与任务和文件凭æ®è¿›è¡Œäº¤äº’,而是ä¿ç•™äº†ç³»ç»Ÿçº§çš„凭æ®ã€‚
+
+
+当打开一个文件时,打开任务的主体上下文的一部分会记录在创建的文件结构中。
+这使得使用该文件结构的æ“作å¯ä»¥ä½¿ç”¨è¿™äº›å‡­æ®ï¼Œè€Œä¸æ˜¯å‘出æ“作的任务的主体上下文。
+一个例å­æ˜¯åœ¨ç½‘络文件系统上打开的文件,打开文件的凭æ®åº”该被呈现给æœåŠ¡å™¨ï¼Œè€Œä¸ç®¡
+实际进行读å–或写入æ“作的是è°ã€‚
+
+
+文件标记
+========
+
+存储在ç£ç›˜ä¸Šæˆ–通过网络获å–的文件å¯èƒ½å…·æœ‰æ³¨é‡Šï¼Œæž„æˆè¯¥æ–‡ä»¶çš„对象安全上下文。
+æ ¹æ®æ–‡ä»¶ç³»ç»Ÿçš„类型,这些注释å¯èƒ½åŒ…括以下一项或多项:
+
+ * UNIX UID, GID, mode;
+ * Windows user ID;
+ * Access control list;
+ * LSM security label;
+ * UNIX exec privilege escalation bits (SUID/SGID);
+ * File capabilities exec privilege escalation bits.
+
+将这些与任务的主体安全上下文进行比较,并根æ®æ¯”较结果å…许或ç¦æ­¢æ‰§è¡ŒæŸäº›æ“作。
+在execve()的情况下,特æƒæå‡ä½èµ·ä½œç”¨ï¼Œå¹¶ä¸”å¯èƒ½å…许由å¯æ‰§è¡Œæ–‡ä»¶çš„注释决定的
+进程获得é¢å¤–的特æƒã€‚
+
+
+任务凭æ®
+========
+
+在Linux中,一个任务的所有凭æ®éƒ½ä¿å­˜åœ¨ä¸€ä¸ªå¼•ç”¨è®¡æ•°ç»“构体‘struct cred’中,
+通过(uid, gid)或(groups, keys, LSM security)进行访问。æ¯ä¸ªä»»åŠ¡åœ¨å…¶
+task_struct中通过一个å为‘cred’的指针指å‘其凭æ®ã€‚
+
+一旦一组凭æ®å·²ç»å‡†å¤‡å¥½å¹¶æ交,除éžä»¥ä¸‹å‡ ç§æƒ…况,å¦åˆ™ä¸èƒ½æ›´æ”¹ï¼š
+
+ 1. 其引用计数å¯ä»¥æ›´æ”¹ï¼›
+
+ 2. 它所指å‘çš„ group_info 结构体的引用计数å¯ä»¥æ›´æ”¹ï¼›
+
+ 3. 它所指å‘的安全数æ®çš„引用计数å¯ä»¥æ›´æ”¹ï¼›
+
+ 4. 它所指å‘的任何密钥环的引用计数å¯ä»¥æ›´æ”¹ï¼›
+
+ 5. 它所指å‘的任何密钥环å¯ä»¥è¢«æ’¤é”€ã€è¿‡æœŸæˆ–其安全属性å¯ä»¥æ›´æ”¹ï¼›
+
+ 6. 它所指å‘的任何密钥环的内容å¯ä»¥æ›´æ”¹ï¼ˆå¯†é’¥çŽ¯çš„整个目的就是作为一组共享凭æ®ï¼Œ
+ å¯ç”±å…·æœ‰é€‚当访问æƒé™çš„任何人修改)。
+
+è¦æ›´æ”¹cred结构体中的任何内容,必须éµå¾ªå¤åˆ¶å’Œæ›¿æ¢çš„原则。首先进行å¤åˆ¶ï¼Œç„¶åŽä¿®
+改副本,最åŽä½¿ç”¨RCU(读-å¤åˆ¶-更新)将任务指针更改为指å‘新的副本。有一些å°è£…å¯
+用于帮助执行这个过程(è§ä¸‹æ–‡ï¼‰ã€‚
+
+一个任务åªèƒ½ä¿®æ”¹è‡ªå·±çš„凭æ®ï¼›ä¸å†å…许一个任务修改å¦ä¸€ä¸ªä»»åŠ¡çš„凭æ®ã€‚
+è¿™æ„å‘³ç€ ``capset()`` 系统调用ä¸å†å…许使用除当å‰è¿›ç¨‹ä¹‹å¤–的任何PID。
+此外, ``keyctl_instantiate()`` å’Œ ``keyctl_negate()`` 函数也ä¸å†
+å…许在请求进程中附加到特定于进程的密钥环,因为实例化进程å¯èƒ½éœ€è¦åˆ›å»ºå®ƒä»¬ã€‚
+
+
+ä¸å¯å˜å‡­æ®
+----------
+
+一旦一组凭æ®å·²ç»è¢«å…¬å¼€ï¼ˆä¾‹å¦‚通过调用 ``commit_creds()`` ),必须将其视为
+ä¸å¯å˜çš„,除了两个例外情况:
+
+ 1. 引用计数å¯ä»¥è¢«ä¿®æ”¹ã€‚
+
+ 2. 虽然无法更改一组凭æ®çš„密钥环订阅,但订阅的密钥环的内容å¯ä»¥è¢«æ›´æ”¹ã€‚
+
+为了在编译时æ•èŽ·æ„外的凭æ®ä¿®æ”¹ï¼Œstruct task_struct具有_const_指针指å‘其凭æ®é›†ï¼Œ
+struct file也是如此。此外,æŸäº›å‡½æ•°å¦‚ ``get_cred()`` å’Œ ``put_cred()`` 在
+const指针上æ“作,因此ä¸éœ€è¦è¿›è¡Œç±»åž‹è½¬æ¢ï¼Œä½†éœ€è¦ä¸´æ—¶æ”¾å¼ƒconsté™å®šï¼Œä»¥ä¾¿èƒ½å¤Ÿä¿®æ”¹
+引用计数。
+
+
+访问任务凭æ®
+------------
+
+任务åªèƒ½ä¿®æ”¹è‡ªå·±çš„凭æ®ï¼Œå…许当å‰è¿›ç¨‹å¯ä»¥è¯»å–或替æ¢è‡ªå·±çš„凭æ®ï¼Œæ— éœ€ä»»ä½•å½¢å¼é”定的
+情况下 —— è¿™æžå¤§ç®€åŒ–了事情。它å¯ä»¥è°ƒç”¨::
+
+ const struct cred *current_cred()
+
+获å–指å‘其凭æ®ç»“构的指针,并且之åŽä¸å¿…释放它。
+
+有一些方便的å°è£…用于检索任务凭æ®çš„特定方é¢ï¼ˆåœ¨æ¯ç§æƒ…况下都åªè¿”回值)::
+
+ uid_t current_uid(void) Current's real UID
+ gid_t current_gid(void) Current's real GID
+ uid_t current_euid(void) Current's effective UID
+ gid_t current_egid(void) Current's effective GID
+ uid_t current_fsuid(void) Current's file access UID
+ gid_t current_fsgid(void) Current's file access GID
+ kernel_cap_t current_cap(void) Current's effective capabilities
+ struct user_struct *current_user(void) Current's user account
+
+还有一些方便的å°è£…,用于检索任务凭æ®çš„特定关è”对::
+
+ void current_uid_gid(uid_t *, gid_t *);
+ void current_euid_egid(uid_t *, gid_t *);
+ void current_fsuid_fsgid(uid_t *, gid_t *);
+
+在从当å‰ä»»åŠ¡çš„凭æ®ä¸­æ£€ç´¢åŽï¼Œé€šè¿‡å…¶å‚数返回这些值对。
+
+
+此外,还有一个函数用于获å–当å‰è¿›ç¨‹çš„当å‰å‡­æ®é›†çš„引用::
+
+ const struct cred *get_current_cred(void);
+
+以åŠç”¨äºŽèŽ·å–对一个实际上ä¸å­˜åœ¨äºŽstruct cred中的凭æ®çš„引用的函数::
+
+ struct user_struct *get_current_user(void);
+ struct group_info *get_current_groups(void);
+
+分别获得对当å‰è¿›ç¨‹çš„ user accounting structure 和补充组列表的引用。
+
+一旦获得引用,就必须使用 ``put_cred()``, ``free_uid()`` 或
+``put_group_info()`` æ¥é€‚当释放它。
+
+
+访问其他任务的凭æ®
+------------------
+
+虽然一个任务å¯ä»¥åœ¨ä¸éœ€è¦é”定的情况下访问自己的凭æ®ï¼Œä½†æƒ³è¦è®¿é—®å¦ä¸€ä¸ªä»»åŠ¡
+的凭æ®çš„任务并éžå¦‚此。它必须使用RCU读é”å’Œ ``rcu_dereference()``。
+
+``rcu_dereference()`` 是由::
+
+ const struct cred *__task_cred(struct task_struct *task);
+
+这应该在RCU读é”中使用,如下例所示::
+
+ void foo(struct task_struct *t, struct foo_data *f)
+ {
+ const struct cred *tcred;
+ ...
+ rcu_read_lock();
+ tcred = __task_cred(t);
+ f->uid = tcred->uid;
+ f->gid = tcred->gid;
+ f->groups = get_group_info(tcred->groups);
+ rcu_read_unlock();
+ ...
+ }
+
+如果需è¦é•¿æ—¶é—´æŒæœ‰å¦ä¸€ä¸ªä»»åŠ¡çš„凭æ®ï¼Œå¹¶ä¸”å¯èƒ½åœ¨æ­¤è¿‡ç¨‹ä¸­ä¼‘眠,则调用方
+应该使用以下函数æ¥èŽ·å–对这些凭æ®çš„引用::
+
+ const struct cred *get_task_cred(struct task_struct *task);
+
+这个函数内部完æˆäº†æ‰€æœ‰çš„RCUæ“作。当使用完这些凭æ®æ—¶ï¼Œè°ƒç”¨æ–¹å¿…须调用put_cred()
+函数释放它们。
+
+.. note::
+ ``__task_cred()`` 的结果ä¸åº”直接传递给 ``get_cred()`` ,
+ 因为这å¯èƒ½ä¸Ž ``commit_cred()`` å‘生竞争æ¡ä»¶ã€‚
+
+还有一些方便的函数å¯ä»¥è®¿é—®å¦ä¸€ä¸ªä»»åŠ¡å‡­æ®çš„特定部分,将RCUæ“作对调用方éšè—èµ·æ¥::
+
+ uid_t task_uid(task) Task's real UID
+ uid_t task_euid(task) Task's effective UID
+
+如果调用方在此时已ç»æŒæœ‰RCU读é”,则应使用::
+
+ __task_cred(task)->uid
+ __task_cred(task)->euid
+
+类似地,如果需è¦è®¿é—®ä»»åŠ¡å‡­æ®çš„多个方é¢ï¼Œåº”使用RCU读é”,调用 ``__task_cred()``
+函数,将结果存储在临时指针中,然åŽä»Žä¸´æ—¶æŒ‡é’ˆä¸­è°ƒç”¨å‡­æ®çš„å„个方é¢ï¼Œæœ€åŽé‡Šæ”¾é”。
+这样å¯ä»¥é˜²æ­¢å¤šæ¬¡è°ƒç”¨æ˜‚贵的RCUæ“作。
+
+如果需è¦è®¿é—®å¦ä¸€ä¸ªä»»åŠ¡å‡­æ®çš„其他å•ä¸ªæ–¹é¢ï¼Œå¯ä»¥ä½¿ç”¨::
+
+ task_cred_xxx(task, member)
+
+这里的‘member’是cred结构体的éžæŒ‡é’ˆæˆå‘˜ã€‚例如::
+
+ uid_t task_cred_xxx(task, suid);
+
+将从任务中检索‘struct cred::suid’,并执行适当的RCUæ“作。对于指针æˆå‘˜ï¼Œ
+ä¸èƒ½ä½¿ç”¨è¿™ç§å½¢å¼ï¼Œå› ä¸ºå®ƒä»¬æŒ‡å‘的内容å¯èƒ½åœ¨é‡Šæ”¾RCU读é”的瞬间消失。
+
+
+修改凭æ®
+--------
+
+如先å‰æ到的,一个任务åªèƒ½ä¿®æ”¹è‡ªå·±çš„凭æ®ï¼Œä¸èƒ½ä¿®æ”¹å…¶ä»–任务的凭æ®ã€‚è¿™æ„味
+ç€å®ƒä¸éœ€è¦ä½¿ç”¨ä»»ä½•é”æ¥ä¿®æ”¹è‡ªå·±çš„凭æ®ã€‚
+
+è¦ä¿®æ”¹å½“å‰è¿›ç¨‹çš„凭æ®ï¼Œå‡½æ•°åº”首先调用::
+
+ struct cred *prepare_creds(void);
+
+这将é”定current->cred_replace_mutex,然åŽåˆ†é…并构建当å‰è¿›ç¨‹å‡­æ®çš„副本。
+如果æˆåŠŸï¼Œå‡½æ•°è¿”回时ä»ç„¶ä¿æŒäº’æ–¥é”。如果ä¸æˆåŠŸï¼ˆå†…å­˜ä¸è¶³ï¼‰ï¼Œåˆ™è¿”回NULL。
+
+互斥é”防止 ``ptrace()`` 在进行凭æ®æž„建和更改的安全检查时更改进程的ptrace
+状æ€ï¼Œå› ä¸ºptrace状æ€å¯èƒ½ä¼šæ”¹å˜ç»“果,特别是在 ``execve()`` 的情况下。
+
+新的凭æ®é›†åº”适当地进行修改,并进行任何安全检查和挂钩。在此时,当å‰å’Œå»ºè®®çš„
+凭æ®é›†éƒ½å¯ç”¨ï¼Œå› ä¸ºcurrent_cred()将返回当å‰çš„凭æ®é›†ã€‚
+
+在替æ¢ç»„列表时,必须在将其添加到凭æ®ä¹‹å‰å¯¹æ–°åˆ—表进行排åºï¼Œå› ä¸ºä½¿ç”¨äºŒåˆ†æŸ¥æ‰¾
+测试æˆå‘˜èµ„格。实际上,这æ„味ç€åœ¨set_groups()或set_current_groups()之
+å‰åº”调用groups_sort()。groups_sort()ä¸èƒ½åœ¨å…±äº«çš„ ``struct group_list``
+上调用,因为å³ä½¿æ•°ç»„å·²ç»æŽ’åºï¼Œå®ƒä¹Ÿå¯èƒ½ä½œä¸ºæŽ’åºè¿‡ç¨‹çš„一部分对元素进行排列。
+
+当凭æ®é›†å‡†å¤‡å¥½æ—¶ï¼Œåº”通过调用以下函数将其æ交给当å‰è¿›ç¨‹::
+
+ int commit_creds(struct cred *new);
+
+这将修改凭æ®å’Œè¿›ç¨‹çš„å„个方é¢ï¼Œç»™LSMæ供机会åšåŒæ ·çš„修改,然åŽä½¿ç”¨
+``rcu_assign_pointer()`` 将新的凭æ®å®žé™…æ交给 ``current->cred`` ,
+释放 ``current->cred_replace_mutex`` 以å…许 ``ptrace()`` 进行æ“
+作,并通知调度程åºå’Œå…¶ä»–组件有关更改的情况。
+
+该函数ä¿è¯è¿”回0,以便å¯ä»¥åœ¨è¯¸å¦‚ ``sys_setresuid()`` 函数的末尾进行尾调用。
+
+请注æ„,该函数会消耗调用者对新凭æ®çš„引用。调用者在此之åŽä¸åº”调用
+``put_cred()`` 释放新凭æ®ã€‚
+
+此外,一旦新的凭æ®ä¸Šè°ƒç”¨äº†è¯¥å‡½æ•°ï¼Œå°±ä¸èƒ½è¿›ä¸€æ­¥æ›´æ”¹è¿™äº›å‡­æ®ã€‚
+
+
+如果在调用 ``prepare_creds()`` 之åŽå®‰å…¨æ£€æŸ¥å¤±è´¥æˆ–å‘生其他错误,
+则应调用以下函数::
+
+ void abort_creds(struct cred *new);
+
+这将释放 ``prepare_creds()`` 获å–çš„ ``current->cred_replace_mutex`` çš„é”,
+并释放新的凭æ®ã€‚
+
+一个典型的凭æ®ä¿®æ”¹å‡½æ•°çœ‹èµ·æ¥åƒè¿™æ ·::
+
+ int alter_suid(uid_t suid)
+ {
+ struct cred *new;
+ int ret;
+
+ new = prepare_creds();
+ if (!new)
+ return -ENOMEM;
+
+ new->suid = suid;
+ ret = security_alter_suid(new);
+ if (ret < 0) {
+ abort_creds(new);
+ return ret;
+ }
+
+ return commit_creds(new);
+ }
+
+
+管ç†å‡­æ®
+--------
+
+有一些函数用æ¥è¾…助凭æ®ç®¡ç†:
+
+ - ``void put_cred(const struct cred *cred);``
+
+ 这将释放对给定凭æ®é›†çš„引用。如果引用计数为零,凭æ®é›†å°†ç”±
+ RCU系统安排进行销æ¯ã€‚
+
+ - ``const struct cred *get_cred(const struct cred *cred);``
+
+ 这将获å–对活动凭æ®é›†çš„引用。返回指å‘凭æ®é›†çš„指针。
+
+ - ``struct cred *get_new_cred(struct cred *cred);``
+
+ 这将获å–对当å‰æ­£åœ¨æž„建且å¯å˜çš„凭æ®é›†çš„引用。返回指å‘凭æ®é›†çš„指针。
+
+打开文件凭æ®
+============
+
+当打开新文件时,会获å–对打开任务凭æ®çš„引用,并将其附加到文件结构体的
+``f_cred`` 字段中,替代原æ¥çš„ ``f_uid`` å’Œ ``f_gid`` 。原æ¥è®¿é—®
+``file->f_uid`` å’Œ ``file->f_gid`` 的代ç çŽ°åœ¨åº”访问 ``file->f_cred->fsuid``
+和 ``file->f_cred->fsgid`` 。
+
+安全访问 ``f_cred`` 的情况下å¯ä»¥ä¸ä½¿ç”¨RCU或加é”,因为指å‘凭æ®çš„指针
+以åŠæŒ‡å‘的凭æ®ç»“构的内容在文件结构的整个生命周期中ä¿æŒä¸å˜ï¼Œé™¤éžæ˜¯
+上述列出的例外情况(å‚阅任务凭æ®éƒ¨åˆ†ï¼‰ã€‚
+
+为了é¿å…“混淆代ç†â€æƒé™æå‡æ”»å‡»ï¼Œåœ¨æ‰“开的文件åŽç»­æ“作时,访问控制检查
+应该使用这些凭æ®ï¼Œè€Œä¸æ˜¯ä½¿ç”¨â€œå½“å‰â€çš„凭æ®ï¼Œå› ä¸ºè¯¥æ–‡ä»¶å¯èƒ½å·²ç»è¢«ä¼ é€’ç»™
+一个更具特æƒçš„进程。
+
+覆盖VFS对凭æ®çš„使用
+===================
+
+在æŸäº›æƒ…况下,需è¦è¦†ç›–VFS使用的凭æ®ï¼Œå¯ä»¥é€šè¿‡ä½¿ç”¨ä¸åŒçš„凭æ®é›†è°ƒç”¨
+如 ``vfs_mkdir()`` æ¥å®žçŽ°ã€‚以下是一些进行此æ“作的ä½ç½®:
+
+ * ``sys_faccessat()``.
+ * ``do_coredump()``.
+ * nfs4recover.c.
diff --git a/Documentation/translations/zh_CN/security/index.rst b/Documentation/translations/zh_CN/security/index.rst
index d8aacd1930d9..78d9d4b36dca 100644
--- a/Documentation/translations/zh_CN/security/index.rst
+++ b/Documentation/translations/zh_CN/security/index.rst
@@ -15,20 +15,20 @@
.. toctree::
:maxdepth: 1
+ credentials
+ snp-tdx-threat-model
lsm
sak
+ self-protection
siphash
+ tpm/index
digsig
landlock
TODOLIST:
-* credentials
-* snp-tdx-threat-model
* IMA-templates
* keys/index
* lsm-development
* SCTP
-* self-protection
-* tpm/index
* secrets/index
* ipe
diff --git a/Documentation/translations/zh_CN/security/keys/index.rst b/Documentation/translations/zh_CN/security/keys/index.rst
new file mode 100644
index 000000000000..7c28d003fb0a
--- /dev/null
+++ b/Documentation/translations/zh_CN/security/keys/index.rst
@@ -0,0 +1,22 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+.. include:: ../../disclaimer-zh_CN.rst
+
+:Original: Documentation/security/keys/index.rst
+
+:翻译:
+
+
+========
+内核密钥
+========
+
+.. toctree::
+ :maxdepth: 1
+
+
+TODOLIST:
+* core
+* ecryptfs
+* request-key
+* trusted-encrypted
diff --git a/Documentation/translations/zh_CN/security/secrets/index.rst b/Documentation/translations/zh_CN/security/secrets/index.rst
new file mode 100644
index 000000000000..5ea78713f10e
--- /dev/null
+++ b/Documentation/translations/zh_CN/security/secrets/index.rst
@@ -0,0 +1,17 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: ../../disclaimer-zh_CN.rst
+
+:Original: Documentation/security/secrets/index.rst
+
+:翻译:
+
+=====================
+密钥文档
+=====================
+
+.. toctree::
+
+
+TODOLIST:
+
+* coco
diff --git a/Documentation/translations/zh_CN/security/self-protection.rst b/Documentation/translations/zh_CN/security/self-protection.rst
new file mode 100644
index 000000000000..3c8a68b1e1be
--- /dev/null
+++ b/Documentation/translations/zh_CN/security/self-protection.rst
@@ -0,0 +1,271 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: ../disclaimer-zh_CN.rst
+:Original: Documentation/security/self-protection.rst
+
+:翻译:
+
+ å¼ å· zhangwei <zhangwei@cqsoftware.com.cn>
+
+============
+内核自我ä¿æŠ¤
+============
+
+内核自我ä¿æŠ¤æ˜¯æŒ‡åœ¨Linux内核中设计与实现的å„ç§ç³»ç»Ÿä¸Žç»“æž„
+以防止内核本身的安全æ¼æ´žé—®é¢˜ã€‚它涵盖了广泛问题,包括去除
+整个类的æ¼æ´žï¼Œé˜»æ­¢å®‰å…¨æ¼æ´žåˆ©ç”¨æ–¹æ³•ï¼Œä»¥åŠä¸»åŠ¨æ£€æµ‹æ”»å‡»å°
+试。并éžæ‰€æœ‰çš„è¯é¢˜éƒ½åœ¨æœ¬æ–‡ä¸­æ¶‰åŠï¼Œä½†å®ƒåº”该为了解内核自我
+ä¿æŠ¤æ供一个åˆç†çš„起点,并解答常è§çš„问题。(当然,欢迎æ
+交补ä¸ï¼ï¼‰
+
+在最å的情况下,我们å‡è®¾ä¸€ä¸ªéžç‰¹æƒçš„本地攻击者对内核内存
+有任æ„读写访问æƒé™ã€‚虽然在许多情况下,æ¼æ´žè¢«åˆ©ç”¨æ—¶å¹¶ä¸ä¼š
+æ供此级别的访问æƒé™ï¼Œä½†å¦‚果我们能防御最å情况,也能应对
+æƒé™è¾ƒä½Žçš„攻击。一个更高的标准,且需è¦ç‰¢è®°çš„是ä¿æŠ¤å†…æ ¸å…
+å—具有特æƒçš„本地攻击者的攻击,因为root用户å¯ä»¥æœ‰æ›´å¤šæƒé™ã€‚
+(尤其是当他们能够加载任æ„内核模å—时)
+
+æˆåŠŸçš„自我ä¿æŠ¤çš„目标是:有效ã€é»˜è®¤å¼€å¯ã€ä¸éœ€è¦å¼€å‘者主动
+选择ã€æ²¡æœ‰æ€§èƒ½å½±å“ã€ä¸å¦¨ç¢å†…核调试ã€å¹¶ä¸”没有测试。虽然很
+难满足所有的这些目标,但明确æ到这些目标éžå¸¸é‡è¦ï¼Œå› ä¸ºè¿™
+些方é¢éœ€è¦è¢«æŽ¢ç´¢ã€è§£å†³æˆ–接å—。
+
+==========
+攻击é¢ç¼©å‡
+==========
+
+防止安全æ¼æ´žæœ€åŸºæœ¬çš„防御方å¼æ˜¯å‡å°‘å¯ä»¥è¢«ç”¨æ¥é‡å®šå‘执行的
+内核区域。这包括é™åˆ¶ç”¨æˆ·å…¬å¼€ä½¿ç”¨çš„APIã€ä½¿å†…æ ¸API更难被错
+误使用ã€æœ€å°åŒ–å¯å†™å†…核内存区域等。
+
+严格的内核内存æƒé™
+-------------------
+
+当所有内核内存都是å¯å†™çš„,攻击者å¯ä»¥è½»æ¾åœ°é‡å®šå‘执行æµã€‚
+为了å‡å°‘è¿™ç§æ”»å‡»ç›®æ ‡çš„å¯ç”¨æ€§ï¼Œå†…核需è¦æ›´ä¸¥æ ¼çš„æƒé™é›†æ¥
+ä¿æŠ¤å…¶å†…存。
+
+å¯æ‰§è¡Œä»£ç å’Œåªè¯»æ•°æ®å¿…é¡»ä¸å¯å†™
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+任何具有å¯æ‰§è¡Œå†…存的区域必须ä¸å¯å†™ï¼Œæ˜¾ç„¶è¿™ä¹ŸåŒ…括内核文本
+本身。我们还必须考虑其他地方:内核模å—ã€JIT内存等,(在
+æŸäº›æƒ…况下,为了支æŒåƒæŒ‡ä»¤æ›¿ä»£ã€æ–­ç‚¹ã€kprobes等功能,这些
+区域会暂时被设置为å¯å†™ã€‚如果这些功能必须存在于内核中,它
+们的实现方å¼æ˜¯ï¼šåœ¨æ›´æ–°æœŸé—´å°†å†…存临时设置å¯å†™ï¼Œç„¶åŽå†æ¢å¤
+为原始æƒé™ã€‚)
+
+为了支æŒè¿™ä¸€ç‚¹ï¼ŒCONFIG_STRICT_KERNEL_RWX å’Œ
+CONFIG_STRICT_MODULE_RWX 的设计旨在确ä¿ä»£ç ä¸å¯å†™ï¼Œæ•°æ®ä¸
+å¯æ‰§è¡Œï¼Œä»¥åŠåªè¯»æ•°æ®æ—¢ä¸å¯å†™ä¹Ÿä¸å¯æ‰§è¡Œã€‚
+
+大多数架构默认支æŒè¿™äº›é€‰é¡¹ï¼Œä¸”用户无法选择。对于一些åƒarm
+è¿™ç§å¸Œæœ›èƒ½å¤Ÿé€‰æ‹©è¿™äº›é€‰é¡¹çš„架构,å¯ä»¥åœ¨æž¶æž„Kconfig中选择
+ARCH_OPTIONAL_KERNEL_RWX以å¯ç”¨Kconfigæ示。
+CONFIG_ARCH_OPTIONAL_KERNEL_RWX_DEFAULT决定在å¯ç”¨
+ARCH_OPTIONAL_KERNEL_RWX时的默认设置。
+
+函数指针和æ•æ„Ÿå˜é‡å¿…é¡»ä¸å¯å†™
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+内核内存中有大é‡çš„函数指针,这些指针被内核查找并用于继续执行
+(例如,æ述符/å‘é‡è¡¨ã€æ–‡ä»¶/网络等æ“作结构等)。这些å˜é‡çš„æ•°
+é‡å¿…é¡»å‡å°‘到最低é™åº¦
+
+许多åƒè¿™æ ·çš„å˜é‡å¯ä»¥é€šè¿‡è®¾ç½®ä¸º"const"æ¥å®žçŽ°åªè¯»ï¼Œä»Žè€Œä½¿å®ƒä»¬
+存放在内核的.rodata段而éž.data段,从而获得内核严格内存æƒé™çš„
+ä¿æŠ¤ã€‚
+
+对于在_init是仅åˆå§‹åŒ–一次的å˜é‡ï¼Œå¯ä»¥ä½¿ç”¨_ro_after_init属性
+进行标记。
+
+剩下的å˜é‡é€šå¸¸æ˜¯é‚£äº›æ›´æ–°é¢‘率较低的(例如GDT)。这些å˜é‡éœ€è¦å¦
+一个机制(类似于上述æ到的对内核代ç æ‰€åšçš„临时例外),以便在
+其余生命周期内ä¿æŒåªè¯»çŠ¶æ€ã€‚(例如,在进行更新时,åªæœ‰æ‰§è¡Œ
+æ›´æ–°çš„CPU线程会被授予对内存的ä¸å¯ä¸­æ–­å†™å…¥è®¿é—®æƒé™ã€‚)
+
+将内核内存与用户空间内存分隔开
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+内核ç»å¯¹ä¸å¯ä»¥æ‰§è¡Œç”¨æˆ·ç©ºé—´å†…存,åŒæ—¶ï¼Œå†…核也ä¸å¾—在没有明确预
+期的情况下访问用户内存空间。这些规则å¯ä»¥é€šè¿‡ä¸€äº›ç¡¬ä»¶é™åˆ¶æ¥æ”¯
+æŒï¼ˆå¦‚x86çš„SMEP/SMAP,ARMçš„PXN/PAN)或通过仿真(如ARM的内存
+域)æ¥å¼ºåˆ¶æ‰§è¡Œã€‚通过这ç§æ–¹å¼é˜»æ­¢ç”¨æˆ·ç©ºé—´å†…存的访问,攻击者就
+无法将执行和数æ®è§£æžè½¬ç§»åˆ°æ˜“于控制的用户空间内存,从而迫使攻
+击完全在内核中进行。
+
+å‡å°‘对系统调用的访问
+--------------------
+
+对于64ä½ç³»ç»Ÿï¼Œä¸€ç§æ¶ˆé™¤è®¸å¤šç³»ç»Ÿè°ƒç”¨æœ€ç®€å•çš„方法是构建时ä¸å¯ç”¨
+CONFIG_CONPAT。然而,这ç§æƒ…况通常ä¸å¯è¡Œã€‚
+
+“seccompâ€ç³»ç»Ÿä¸ºç”¨æˆ·ç©ºé—´æ供了一ç§å¯é€‰åŠŸèƒ½ï¼Œæ供了一ç§å‡å°‘å¯ä¾›
+è¿è¡Œä¸­è¿›ç¨‹ä½¿ç”¨å†…核入å£ç‚¹æ•°é‡çš„方法。这é™åˆ¶äº†å¯ä»¥è®¿é—®å†…核代ç 
+的范围,å¯èƒ½é™ä½Žäº†æŸä¸ªç‰¹å®šæ¼æ´žè¢«æ”»å‡»è€…利用的å¯èƒ½æ€§ã€‚
+
+一个改进的方å‘是创建有效的方法,仅å…许å—信任的进程访问例如兼
+容模å¼ã€ç”¨æˆ·å‘½å空间ã€BPF创建和性能分æžç­‰åŠŸèƒ½ã€‚这将把内核入å£
+点范围é™åˆ¶åœ¨é€šå¸¸å¯ä»¥è¢«éžç‰¹æƒç”¨æˆ·ç©ºé—´è¿›ç¨‹è®¿é—®çš„较常è§é›†åˆä¸­
+
+é™åˆ¶å¯¹å†…核模å—的访问
+--------------------
+
+内核ç»ä¸åº”å…许éžç‰¹æƒç”¨æˆ·åŠ è½½ç‰¹å®šçš„内核模å—,因为这å¯èƒ½ä¸ºæ”»å‡»è€…
+æ供一个æ„外扩展的å¯ç”¨æ”»å‡»é¢çš„方法。(通过已预定义å­ç³»ç»ŸæŒ‰éœ€åŠ 
+载模å—,如MODULE_ALIAS_*,被认为是“预期的â€ï¼Œä½†å³ä¾¿å¦‚此,也应对
+这些情况给予更多的关注。)例如,通过éžç‰¹æƒçš„套接字API加载文件
+系统模å—是没有æ„义的:åªæœ‰root用户或物ç†æœ¬åœ°ç”¨æˆ·åº”该触å‘文件系
+统模å—的加载。(在æŸäº›æƒ…况下,这甚至å¯èƒ½å­˜åœ¨äº‰è®®ã€‚)
+
+为了防止特æƒç”¨æˆ·çš„攻击,系统å¯èƒ½éœ€è¦å®Œå…¨ç¦æ­¢æ¨¡å—加载(例如,通
+过å•ä½“内核构建或modules_disabled sysctl),或者使用签å模å—(例
+如,CONFIG_MODULE_SIG_FORCE或通过LoadPinä¿æŠ¤çš„dm-crypt),以防
+æ­¢root用户通过模å—加载器加载任æ„内核代ç ã€‚
+
+内存完整性
+----------
+
+内核中有许多内存结构在攻击过程中被定期泛滥用以获å–执行控制,迄今
+为止,最常è§çš„是堆栈缓冲区溢出,在这ç§æ”»å‡»ä¸­ï¼Œå †æ ˆä¸Šå­˜å‚¨çš„返回地
+å€è¢«è¦†ç›–。除此之外,还有许多其他类型的攻击,防护措施也应è¿è€Œç”Ÿã€‚
+
+堆栈缓冲区溢出
+--------------
+
+ç»å…¸çš„堆栈缓冲区溢出攻击是指超出栈上分é…çš„å˜é‡é¢„期大å°ï¼Œä»Žè€Œå°†ä¸€
+个å—控值写入栈帧的返回地å€ã€‚最常è§çš„防御措施是堆栈ä¿æŠ¤
+(CONFIG_STACKPROTECTOR),它在函数返回å‰ä¼šéªŒè¯æ ˆä¸Šçš„“stack canaryâ€ã€‚
+其他防御措施还包括影å­å †æ ˆç­‰ã€‚
+
+堆栈深度溢出
+------------
+
+一个ä¸å¤ªå®¹æ˜“被ç†è§£çš„攻击方å¼æ˜¯åˆ©ç”¨bug触å‘内核通过深度函数调用或
+大的堆栈分é…æ¥æ¶ˆè€—堆栈内存。通过这ç§æ”»å‡»ï¼Œæ”»å‡»è€…å¯ä»¥å°†æ•°æ®å†™å…¥å†…
+核预分é…堆栈空间之外的æ•æ„Ÿç»“构。为了更好的防护这ç§æ”»å‡»ï¼Œå¿…须进行
+两项é‡è¦çš„更改:将æ•æ„Ÿçš„线程信æ¯ç»“构转移到其他地方,并在堆栈底部
+添加一个故障内存洞,以æ•èŽ·è¿™äº›æº¢å‡º
+
+栈内存完整性
+------------
+
+用于跟踪堆空闲列表的结构å¯ä»¥åœ¨åˆ†é…和释放时进行完整性检查,以确ä¿å®ƒ
+们ä¸ä¼šè¢«ç”¨æ¥æ“作其它内存区域。
+
+计算器完整性
+------------
+
+内核中的许多地方使用原å­è®¡æ•°å™¨æ¥è·Ÿè¸ªå¯¹è±¡å¼•ç”¨æˆ–执行类似的生命周期管
+ç†ã€‚当这些计数器å¯èƒ½å‘生溢出时(无论是上溢还是下溢),这通常会暴露
+出使用åŽé‡Šæ”¾ï¼ˆuse-after-free)æ¼æ´žã€‚通过æ•æ‰åŽŸå­è®¡æ•°å™¨æº¢å‡ºï¼Œè¿™ç±»æ¼
+æ´žå°±å¯ä»¥æ¶ˆå¤±ã€‚
+
+大å°è®¡ç®—溢出检测
+----------------
+
+与计算器溢出类似,整数溢出(通常是大å°è®¡ç®—)需è¦åœ¨è¿è¡Œæ—¶è¿›è¡Œæ£€æµ‹ï¼Œ
+以防止这类在传统上会导致能够写入内核缓冲区末尾之外的æ¼æ´žã€‚
+
+概率性防御
+----------
+
+尽管许多防御措施å¯ä»¥è¢«è®¤å®šæ˜¯ç¡®å®šçš„(例如,åªè¯»å†…å­˜ä¸èƒ½å†™å…¥ï¼‰ï¼Œä½†
+有些确ä¿æŽªæ–½ä»…æ供统计防御,å³æ”»å‡»è€…必须收集足够的关于è¿è¡Œç³»ç»Ÿçš„
+ä¿¡æ¯æ‰èƒ½çªç ´é˜²å¾¡ã€‚尽管这些防御并ä¸å®Œç¾Žï¼Œä½†å®ƒä»¬ç¡®å®žæ供了有æ„义的
+ä¿æŠ¤ã€‚
+
+æ ˆä¿æŠ¤ã€è¿·æƒ‘技术和其他秘密
+--------------------------
+
+值得注æ„的是,åƒä¹‹å‰è®¨è®ºçš„æ ˆä¿æŠ¤è¿™æ ·çš„技术,从技术上æ¥è¯´æ˜¯ç»Ÿè®¡æ€§é˜²
+御,因为它们ä¾èµ–于一个秘密值,而这样的值å¯èƒ½ä¼šé€šè¿‡ä¿¡æ¯æ³„露æ¼æ´žè€Œè¢«
+å‘现。
+
+对于想JIT(åŠæ—¶ç¿»è¯‘器)这样的情况,其中å¯æ‰§è¡Œå†…容å¯èƒ½éƒ¨åˆ†ç”±ç”¨æˆ·ç©ºé—´
+控制,也需è¦ç±»ä¼¼çš„秘密之æ¥è¿·æƒ‘。
+
+至关é‡è¦çš„是,所使用的秘密值必须是独立的(例如,æ¯ä¸ªæ ˆä½¿ç”¨ä¸åŒçš„æ ˆ
+ä¿æŠ¤å€¼ï¼‰ï¼Œå¹¶ä¸”具有高熵(例如,éšæœºæ•°ç”Ÿæˆå™¨ï¼ˆRNG)是å¦æ­£å¸¸å·¥ä½œï¼Ÿï¼‰ï¼Œ
+以最大é™åº¦åœ°æ高其æˆåŠŸçŽ‡ã€‚
+
+内核地å€ç©ºé—´å¸ƒå±€éšæœºåŒ–(KASLR)
+-------------------------------
+
+由于内核内存的ä½ç½®å‡ ä¹Žæ€»æ˜¯æ”»å‡»æˆåŠŸçš„关键因素,因此使内核内存ä½ç½®å˜
+å¾—éžç¡®å®šæ€§ä¼šå¢žåŠ æ”»å‡»çš„难度。(请注æ„,这å过æ¥æ高了信æ¯æ³„露的价
+值,因为泄露的信æ¯å¯ä»¥ç”¨æ¥å‘现目标内存ä½ç½®ã€‚)
+
+文本和模å—基å€
+--------------
+
+通过在å¯åŠ¨æ—¶é‡æ–°è®¾å®šå†…核的物ç†åŸºåœ°å€å’Œè™šæ‹ŸåŸºåœ°å€
+(CONFIG_RANDOMIZE_BASE),那些需è¦åˆ©ç”¨å†…核代ç çš„攻击将会å—阻。此外
+通过å移模å—加载基地å€ï¼Œæ„味ç€å³ä½¿ç³»ç»Ÿæ¯æ¬¡å¯åŠ¨æ—¶æŒ‰ç›¸åŒé¡ºåºåŠ è½½åŒä¸€
+组模å—,这些模å—也ä¸ä¼šä¸Žå†…核文本的其余部分公用一个基地å€ã€‚
+
+堆栈基地å€
+----------
+
+如果进程之间内核堆栈的基地å€ä¸ç›¸åŒï¼Œç”šè‡³åœ¨ä¸åŒç³»ç»Ÿè°ƒç”¨ä¹‹é—´ä¹Ÿä¸ç›¸åŒï¼Œ
+那么栈上或超出栈的目标ä½ç½®å°±ä¼šå˜å¾—更加难以确定。
+
+动æ€å†…存基å€
+------------
+
+很多内核的动æ€å†…存(例如kmalloc,vmalloc等)由于早期å¯åŠ¨åˆå§‹åŒ–的顺
+åºï¼Œæœ€ç»ˆå¸ƒå±€æ˜¯ç›¸å¯¹ç¡®å®šçš„。如果这些区域的基地å€åœ¨å¯åŠ¨ä¹‹é—´ä¸ç›¸åŒï¼Œæ”»
+击者就无法轻易定ä½å®ƒä»¬ï¼Œå¿…é¡»ä¾èµ–于针对该区域的信æ¯æ³„露æ‰èƒ½æˆåŠŸã€‚
+
+结构布局
+--------
+
+通过在æ¯æ¬¡æž„建时对æ•æ„Ÿç»“构的布局进行éšæœºåŒ–处ç†ï¼Œæ”»å‡»è¿™å¿…须将攻击调
+节到已知的内核版本,或者泄露足够的内核内存æ¥ç¡®å®šç»“构布局,然åŽæ‰èƒ½
+对其进行æ“作。
+
+防止信æ¯æ³„露
+------------
+
+由于æ•æ„Ÿç»“æž„çš„ä½ç½®æ˜¯æ”»å‡»çš„主è¦ç›®æ ‡ï¼Œå› æ­¤é˜²æ­¢å†…核内存地å€å’Œå†…核内存
+内容泄露éžå¸¸é‡è¦ï¼ˆå› ä¸ºå®ƒä»¬å¯èƒ½åŒ…å«å†…核地å€æˆ–者其他æ•æ„Ÿæ•°æ®ï¼Œä¾‹å¦‚
+æ ˆä¿æŠ¤å€¼ï¼‰ã€‚
+
+内核地å€
+--------
+
+将内核地å€æ‰“å°åˆ°ç”¨æˆ·ç©ºé—´ä¼šæ³„露有关内核内存布局的æ•æ„Ÿä¿¡æ¯ã€‚在使用任
+何打å°ç¬¦å·æ‰“å°åŽŸå§‹åœ°å€æ—¶ï¼Œç›®å‰%px,%p[ad](和在æŸäº›æƒ…况下的%p[sSb])
+时。使用这些格å¼ç¬¦å†™å…¥çš„文件需è¦é™åˆ¶ä¸ºåªæœ‰ç‰¹æƒè¿›ç¨‹å¯è¯»ã€‚
+
+在4.14åŠä»¥å‰çš„内核版本中,使用%pæ ¼å¼ç¬¦æ‰“å°çš„是原始地å€ã€‚从4.15-rcl
+版本开始,使用%pæ ¼å¼ç¬¦æ‰“å°çš„地å€ä¼šåœ¨æ‰“å°å‰è¿›è¡Œå“ˆå¸Œå¤„ç†ã€‚
+
+[*]如果å¯ç”¨KALLSYMS并且符å·æŸ¥æ‰¾å¤±è´¥ï¼Œåˆ™æ‰“å°åŽŸå§‹åœ°å€ï¼›å¦‚果没有å¯ç”¨
+KALLSYSM,则会直接打å°åŽŸå§‹åœ°å€ã€‚
+
+唯一标识符
+----------
+
+内核内存地å€ç»ä¸å¯èƒ½ç”¨ä½œå‘用户空间公开的标识符。相å,应该使用原å­
+计数器,IDR(ID映射表)或类似的唯一标识符。
+
+内存åˆå§‹åŒ–
+----------
+
+å¤åˆ¶åˆ°ç”¨æˆ·ç©ºé—´çš„内存必须始终被完全åˆå§‹åŒ–,如果没有显å¼åœ°ä½¿ç”¨memset()
+函数进行åˆå§‹åŒ–,那就需è¦ä¿®æ”¹ç¼–译器,确ä¿æ¸…除结构中的空洞。
+
+内存清除
+--------
+
+在释放内存时,最好对内存内容进行清除处ç†ï¼Œä»¥é˜²æ­¢æ”»å‡»è€…é‡ç”¨å†…存中以å‰
+的内容。例如,在系统调用返回时清除堆栈(CONFIG_GCC_PLUGIN_STACKLEAK),
+在释放堆内容是清除其内容。这有助于防止许多未åˆå§‹åŒ–å˜é‡æ”»å‡»ã€å †æ ˆå†…容
+泄露ã€å †å†…容泄露以åŠä½¿ç”¨åŽé‡Šæ”¾æ”»å‡»ï¼ˆuser-after-free)。
+
+目标追踪
+--------
+
+为了帮助消除导致内核地å€è¢«å†™å…¥ç”¨æˆ·ç©ºé—´çš„å„ç§é”™è¯¯ï¼Œéœ€è¦è·Ÿè¸ªå†™å…¥çš„目标。
+如果缓冲区的目标是用户空间(例如,基于seq_file的/proc文件),则应该自
+动审查æ•æ„Ÿå€¼ã€‚
diff --git a/Documentation/translations/zh_CN/security/snp-tdx-threat-model.rst b/Documentation/translations/zh_CN/security/snp-tdx-threat-model.rst
new file mode 100644
index 000000000000..b51eeaebab67
--- /dev/null
+++ b/Documentation/translations/zh_CN/security/snp-tdx-threat-model.rst
@@ -0,0 +1,209 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: Documentation/security/snp-tdx-threat-model.rst
+
+:翻译:
+
+ 毛玉贤 Yuxian Mao <maoyuxian@cqsoftware.com.cn>
+
+==========================
+Linux中x86虚拟化的机密计算
+==========================
+
+.. contents:: :local:
+
+By: Elena Reshetova <elena.reshetova@intel.com> and Carlos Bilbao <carlos.bilbao.osdev@gmail.com>
+
+动机
+====
+
+在x86虚拟环境中从事机密计算工作的内核开å‘人员,是基于一组与传统Linux内核
+å¨èƒæ¨¡åž‹æœ‰æ‰€ä¸åŒçš„å‡è®¾æ¡ä»¶ä¸‹å¼€å±•å·¥ä½œçš„。传统æ„义上,Linuxå¨èƒæ¨¡åž‹æ‰¿è®¤æ”»
+击者å¯ä»¥å­˜åœ¨äºŽç”¨æˆ·ç©ºé—´ï¼Œä»¥åŠä¸€å°éƒ¨åˆ†èƒ½å¤Ÿé€šè¿‡å„ç§ç½‘络接å£æˆ–有é™çš„硬件特定
+暴露接å£ï¼ˆå¦‚USBã€Thunderbolt)与内核交互的外部攻击者。本文档的目的是解释
+在机密计算领域中出现的é¢å¤–攻击å‘é‡ï¼Œå¹¶è®¨è®ºä¸º Linux 内核æ出的ä¿æŠ¤æœºåˆ¶ã€‚
+
+概述与术语
+==========
+
+机密计算(Confidential Computing,简称CoCo)是一个广泛的术语,涵盖了多ç§
+旨在ä¿æŠ¤æ•°æ®åœ¨ä½¿ç”¨è¿‡ç¨‹ä¸­ï¼ˆä¸Žé™æ€æ•°æ®æˆ–传输数æ®ç›¸æ¯”)的机密性和完整性的安
+全技术。从本质上讲,机密计算(CoCo)解决方案æ供了一个å—信任执行环境(TEE),
+在该环境中å¯ä»¥è¿›è¡Œå®‰å…¨çš„æ•°æ®å¤„ç†ï¼Œå› æ­¤ï¼Œå®ƒä»¬é€šå¸¸æ ¹æ®é¢„期在TEE中è¿è¡Œçš„软件
+æ¥è¿›ä¸€æ­¥åˆ’分为ä¸åŒçš„å­ç±»åž‹ã€‚本文档专注于一类针对虚拟化环境的机密计算技术
+(Confidential Computing, CoCo),这些技术å…许在å¯ä¿¡æ‰§è¡ŒçŽ¯å¢ƒ
+(Trusted Execution Environment, TEE)中è¿è¡Œè™šæ‹Ÿæœºï¼ˆVM)。从现在起,本文档
+将把这一类机密计算(CoCo)技术称为“虚拟化环境(VE)中的机密计算(CoCo)â€ã€‚
+
+在虚拟化环境中,机密计算(CoCo)指的是一组硬件和/或软件技术,这些技术能够
+为在CoCo虚拟机(VM)内è¿è¡Œçš„软件æ供更强的安全ä¿éšœã€‚具体æ¥è¯´ï¼Œæœºå¯†è®¡ç®—å…许
+其用户确认所有软件组件的å¯ä¿¡åº¦ï¼Œä»Žè€Œå°†å…¶åŒ…å«åœ¨ç²¾ç®€çš„å—信任计算基(TCB)中,
+这是基于机密计算具备验è¯è¿™äº›å—信组件状æ€çš„能力。
+
+虽然ä¸åŒæŠ€æœ¯ä¹‹é—´çš„具体实现细节有所ä¸åŒï¼Œä½†æ‰€æœ‰çŽ°æœ‰æœºåˆ¶éƒ½æ—¨åœ¨ä¸ºè™šæ‹Ÿæœºçš„客户
+内存和执行状æ€ï¼ˆvCPU寄存器)æ供更高的机密性和完整性,更严格地控制客户中断
+注入,并æ供一些é¢å¤–机制æ¥æŽ§åˆ¶å®¢æˆ·ä¸Žå®¿ä¸»æœºä¹‹é—´çš„页映射。有关x86特定解决方案
+的更多细节,å¯ä»¥å‚考
+:doc:`Intel Trust Domain Extensions (TDX) </arch/x86/tdx>` 和
+`AMD Memory Encryption <https://www.amd.com/system/files/techdocs/sev-snp-strengthening-vm-isolation-with-integrity-protection-and-more.pdf>`_.
+
+基本的机密计算(CoCo)客户布局包括宿主机ã€å®¢æˆ·æœºã€ç”¨äºŽå®¢æˆ·æœºä¸Žå®¿ä¸»æœºä¹‹é—´é€šä¿¡
+的接å£ã€èƒ½å¤Ÿæ”¯æŒCoCo虚拟机(VM)的平å°ï¼Œä»¥åŠä¸€ä¸ªåœ¨å®¢æˆ·VM和底层平å°ä¹‹é—´å……当安
+全管ç†å‘˜çš„å¯ä¿¡ä¸­ä»‹ã€‚宿主机侧的虚拟机监视器(VMM)通常由传统VMM功能的一个å­é›†
+组æˆï¼Œå¹¶ä»ç„¶è´Ÿè´£å®¢æˆ·æœºç”Ÿå‘½å‘¨æœŸçš„管ç†ï¼Œå³åˆ›å»ºæˆ–销æ¯CoCo虚拟机ã€ç®¡ç†å…¶å¯¹ç³»ç»Ÿèµ„
+æºçš„访问等。然而,由于它通常ä¸åœ¨CoCo VMçš„å¯ä¿¡è®¡ç®—基(TCB)内,其访问æƒé™å—到
+é™åˆ¶ï¼Œä»¥ç¡®ä¿å®žçŽ°å®‰å…¨ç›®æ ‡ã€‚
+
+在下图中,"<--->" 线表示机密计算(CoCo)安全管ç†å‘˜ä¸Žå…¶ä½™ç»„件之间的åŒå‘通信通
+é“或接å£ï¼Œè¿™äº›ç»„件包括客户机ã€å®¿ä¸»æœºå’Œç¡¬ä»¶ï¼ˆæ•°æ®æµï¼‰::
+
+ +-------------------+ +-----------------------+
+ | CoCo guest VM |<---->| |
+ +-------------------+ | |
+ | Interfaces | | CoCo security manager |
+ +-------------------+ | |
+ | Host VMM |<---->| |
+ +-------------------+ | |
+ | |
+ +--------------------+ | |
+ | CoCo platform |<--->| |
+ +--------------------+ +-----------------------+
+
+机密计算(CoCo)安全管ç†å™¨çš„具体细节在在ä¸åŒæŠ€æœ¯ä¹‹é—´å­˜åœ¨æ˜¾è‘—差异。例如,在æŸ
+些情况下,它å¯èƒ½é€šè¿‡ç¡¬ä»¶ï¼ˆHW)实现,而在其他情况下,它å¯èƒ½æ˜¯çº¯è½¯ä»¶ï¼ˆSW)实现。
+
+现有的Linux内核å¨èƒæ¨¡åž‹
+=======================
+
+当å‰Linux内核å¨èƒæ¨¡åž‹çš„总体组件包括::
+
+ +-----------------------+ +-------------------+
+ | |<---->| Userspace |
+ | | +-------------------+
+ | External attack | | Interfaces |
+ | vectors | +-------------------+
+ | |<---->| Linux Kernel |
+ | | +-------------------+
+ +-----------------------+ +-------------------+
+ | Bootloader/BIOS |
+ +-------------------+
+ +-------------------+
+ | HW platform |
+ +-------------------+
+
+在å¯åŠ¨è¿‡ç¨‹ä¸­ï¼Œå¼•å¯¼åŠ è½½ç¨‹åºï¼ˆbootloader)和内核之间也存在通信,但本图并未明确
+表示这一点。“接å£â€æ¡†è¡¨ç¤ºå…许内核与用户空间之间通信的å„ç§æŽ¥å£ã€‚ 这包括系统调用ã€
+内核 APIã€è®¾å¤‡é©±åŠ¨ç¨‹åºç­‰ã€‚
+
+现有的 Linux 内核å¨èƒæ¨¡åž‹é€šå¸¸å‡è®¾å…¶åœ¨ä¸€ä¸ªå—信任的硬件平å°ä¸Šæ‰§è¡Œï¼Œå¹¶ä¸”所有固件
+å’Œå¯åŠ¨åŠ è½½ç¨‹åºéƒ½åŒ…å«åœ¨è¯¥å¹³å°çš„å—信任计算基(TCB)中。主è¦æ”»å‡»è€…驻留在用户空间
+中,æ¥è‡ªç”¨æˆ·ç©ºé—´çš„所有数æ®é€šå¸¸è¢«è®¤ä¸ºæ˜¯ä¸å¯ä¿¡çš„,除éžç”¨æˆ·ç©ºé—´å…·æœ‰è¶³å¤Ÿçš„特æƒæ¥
+执行å—信任的æ“作。此外,通常还会考虑外部攻击者,包括那些能够访问å¯ç”¨çš„外部网络
+(例如以太网ã€æ— çº¿ç½‘络ã€è“牙)ã€æš´éœ²çš„硬件接å£ï¼ˆä¾‹å¦‚ USBã€Thunderbolt),以åŠ
+能够离线修改ç£ç›˜å†…容的攻击者。
+
+关于外部攻击途径,值得注æ„的是,在大多数情况下,外部攻击者会首先å°è¯•åˆ©ç”¨ç”¨æˆ·ç©º
+é—´çš„æ¼æ´žï¼Œä½†æ”»å‡»è€…也å¯èƒ½ç›´æŽ¥é’ˆå¯¹å†…核,特别是在宿主机具有物ç†è®¿é—®æƒé™çš„情况下。直
+接攻击内核的例å­åŒ…括æ¼æ´ž CVE-2019-19524ã€CVE-2022-0435 å’Œ CVE-2020-24490。
+
+机密计算å¨èƒæ¨¡åž‹åŠå…¶å®‰å…¨ç›®æ ‡
+============================
+
+机密计算在上述攻击者列表中增加了一ç§æ–°çš„攻击者类型:å¯èƒ½å­˜åœ¨è¡Œä¸ºä¸å½“的宿主机
+(这å¯èƒ½åŒ…括传统虚拟机监视器VMM的部分组件或全部),由于其较大的软件攻击é¢ï¼Œ
+通常被置于CoCo VM TCB之外。需è¦æ³¨æ„的是,这并ä¸æ„味ç€å®¿ä¸»æœºæˆ–VMM是故æ„æ¶æ„的,
+而是强调拥有一个较å°çš„CoCo VM TCB具有安全价值。这ç§æ–°åž‹çš„攻击者å¯ä»¥è¢«è§†ä¸ºä¸€ç§
+更强大的外部攻击者,因为它ä½äºŽåŒä¸€ç‰©ç†æœºå™¨ä¸Šï¼ˆä¸Žè¿œç¨‹ç½‘络攻击者ä¸åŒï¼‰ï¼Œå¹¶ä¸”对
+客户机内核与大部分硬件的通信具有控制æƒ::
+
+ +------------------------+
+ | CoCo guest VM |
+ +-----------------------+ | +-------------------+ |
+ | |<--->| | Userspace | |
+ | | | +-------------------+ |
+ | External attack | | | Interfaces | |
+ | vectors | | +-------------------+ |
+ | |<--->| | Linux Kernel | |
+ | | | +-------------------+ |
+ +-----------------------+ | +-------------------+ |
+ | | Bootloader/BIOS | |
+ +-----------------------+ | +-------------------+ |
+ | |<--->+------------------------+
+ | | | Interfaces |
+ | | +------------------------+
+ | CoCo security |<--->| Host/Host-side VMM |
+ | manager | +------------------------+
+ | | +------------------------+
+ | |<--->| CoCo platform |
+ +-----------------------+ +------------------------+
+
+传统上,宿主机对客户机数æ®æ‹¥æœ‰æ— é™è®¿é—®æƒé™ï¼Œå¹¶å¯ä»¥åˆ©ç”¨è¿™ç§è®¿é—®æƒé™æ¥æ”»å‡»å®¢æˆ·è™š
+拟机。然而,机密计算(CoCo)系统通过添加诸如客户数æ®ä¿å¯†æ€§å’Œå®Œæ•´æ€§ä¿æŠ¤ç­‰å®‰å…¨
+特性æ¥ç¼“解此类攻击。该å¨èƒæ¨¡åž‹å‡è®¾è¿™äº›å®‰å…¨ç‰¹æ€§æ˜¯å¯ç”¨ä¸”完好的。
+
+这个 **Linux内核机密计算虚拟机(CoCo VM)的安全目标** å¯ä»¥æ€»ç»“如下:
+
+1. ä¿æŠ¤CoCo客户机ç§æœ‰å†…存和寄存器的机密性和完整性。
+
+2. 防止宿主机特æƒå‡çº§åˆ°CoCo客户机Linux内核。虽然宿主机(åŠä¸»æœºç«¯è™šæ‹Ÿæœºç®¡ç†ç¨‹åºï¼‰
+ 确实需è¦ä¸€å®šçš„特æƒæ¥åˆ›å»ºã€é”€æ¯æˆ–æš‚åœè®¿å®¢ï¼Œä½†é˜²æ­¢ç‰¹æƒå‡çº§çš„部分目标是确ä¿è¿™äº›
+ æ“作ä¸ä¼šä¸ºæ”»å‡»è€…æ供获å–客户机内核访问æƒé™çš„途径。
+
+上述安全目标导致了两个主è¦çš„**Linux内核机密计算虚拟机(CoCo VM)资产**:
+
+1. 客户机内核执行上下文。
+2. 客户机内核ç§æœ‰å†…存。
+
+宿主机对CoCo客户机资æºå…·æœ‰å®Œå…¨æŽ§åˆ¶æƒï¼Œå¹¶å¯ä»¥éšæ—¶æ‹’ç»è®¿é—®è¿™äº›èµ„æºã€‚资æºçš„示例包
+括CPU时间ã€å®¢æˆ·æœºå¯ä»¥æ¶ˆè€—的内存ã€ç½‘络带宽等。因此,宿主机对CoCo客户机的拒ç»æœåŠ¡
+(DoS)攻击超出了此å¨èƒæ¨¡åž‹çš„范围。
+
+Linux CoCo虚拟机攻击é¢æ˜¯æŒ‡ä»ŽCoCo客户机Linux内核暴露到ä¸å—信任的主机的任何接å£ï¼Œ
+这些接å£æœªè¢«CoCo技术的软硬件ä¿æŠ¤æ‰€è¦†ç›–。这包括所有å¯èƒ½çš„侧信é“攻击以åŠçž¬æ€æ‰§
+行侧信é“攻击。显å¼ï¼ˆéžæ—é“)接å£çš„示例包括访问端å£I/Oã€å†…存映射I/O(MMIO)和
+直接内存访问(DMA)接å£ã€è®¿é—®PCIé…置空间ã€ç‰¹å®šäºŽè™šæ‹Ÿæœºç®¡ç†ç¨‹åºï¼ˆVMM)的超调用
+(指å‘主机端VMM)ã€è®¿é—®å…±äº«å†…存页ã€ä¸»æœºå…许注入到访客内核的中断,以åŠç‰¹å®šäºŽ
+CoCo技术的超调用(如果存在)。此外,在CoCo系统中,宿主机通常控制创建CoCo客户机
+的过程:它有方法将固件和引导程åºé•œåƒã€å†…核镜åƒä»¥åŠå†…核命令行加载到客户机中。所有
+这些数æ®åœ¨é€šè¿‡è¯æ˜Žæœºåˆ¶ç¡®è®¤å…¶å®Œæ•´æ€§å’ŒçœŸå®žæ€§ä¹‹å‰ï¼Œéƒ½åº”视为ä¸å¯ä¿¡çš„。
+
+下表显示了针对CoCo客户机Linux内核的å¨èƒçŸ©é˜µï¼Œä½†å¹¶æœªè®¨è®ºæ½œåœ¨çš„缓解策略。该矩阵涉
+åŠçš„是CoCo特定版本的客户机ã€å®¿ä¸»æœºå’Œå¹³å°ã€‚
+
+.. list-table:: CoCo Linux客户机内核å¨èƒçŸ©é˜µ
+ :widths: auto
+ :align: center
+ :header-rows: 1
+
+ * - å¨èƒå称
+ - å¨èƒæè¿°
+
+ * - 客户机æ¶æ„é…ç½®
+ - 一个行为ä¸å½“的主机修改了以下其中一个客户机的é…置:
+
+ 1. 客户机固件或引导加载程åº
+
+ 2. 客户机内核或模å—二进制文件
+
+ 3. 客户机命令行å‚æ•°
+
+ 这使得宿主机能够破å在CoCo客户虚拟机内部è¿è¡Œä»£ç çš„完整性,从而è¿å了机密计算
+ (CoCo)的安全目标。
+
+ * - CoCo客户机数æ®æ”»å‡»
+ - 一个行为ä¸å½“的宿主机对CoCo客户虚拟机与宿主机管ç†çš„物ç†æˆ–虚拟设备之间传输的数
+ æ®æ‹¥æœ‰å®Œå…¨æŽ§åˆ¶æƒã€‚这使得宿主机å¯ä»¥å¯¹è¿™ç±»æ•°æ®çš„ä¿å¯†æ€§ã€å®Œæ•´æ€§å’Œæ–°é²œæ€§è¿›è¡Œä»»ä½•æ”»å‡»ã€‚
+
+ * - æ ¼å¼é”™è¯¯çš„è¿è¡Œæ—¶è¾“å…¥
+ - 一个行为ä¸å½“的宿主机通过客户机内核代ç ä½¿ç”¨çš„ä»»æ„通信接å£æ³¨å…¥æ ¼å¼é”™è¯¯çš„输入。
+ 如果代ç æ²¡æœ‰æ­£ç¡®å¤„ç†è¿™äº›è¾“入,这å¯èƒ½å¯¼è‡´ä»Žå®¿ä¸»æœºåˆ°å®¢æˆ·æœºå†…核的特æƒæå‡ã€‚这包
+ 括传统的侧信é“攻击和/或瞬æ€æ‰§è¡Œæ”»å‡»è·¯å¾„。
+
+ * - æ¶æ„è¿è¡Œæ—¶è¾“å…¥
+ - 一个行为ä¸å½“的宿主机通过客户机内核代ç ä½¿ç”¨çš„ä»»æ„通信接å£æ³¨å…¥ç‰¹å®šçš„输入值。与之å‰
+ 的攻击å‘é‡ï¼ˆæ ¼å¼é”™è¯¯çš„è¿è¡Œæ—¶è¾“入)ä¸åŒï¼Œè¿™ä¸ªè¾“入并éžæ ¼å¼é”™è¯¯ï¼Œè€Œæ˜¯å…¶å€¼è¢«ç²¾å¿ƒè®¾
+ 计以影å“客户机内核的安全性。这类输入的例å­åŒ…括å‘客户机æä¾›æ¶æ„的时间或å‘客户机
+ çš„éšæœºæ•°ç”Ÿæˆå™¨æ供熵值。此外,如果它导致客户机内核执行特定æ“作(例如处ç†ä¸»æœºæ³¨
+ 入的中断),此类事件的时åºæœ¬èº«ä¹Ÿå¯èƒ½æˆä¸ºä¸€ç§æ”»å‡»è·¯å¾„。这ç§æ”»å‡»æ˜¯å¯¹æ供的宿主机输
+ 入具有抵抗性的一ç§æ–¹å¼ã€‚
diff --git a/Documentation/translations/zh_CN/security/tpm/index.rst b/Documentation/translations/zh_CN/security/tpm/index.rst
new file mode 100644
index 000000000000..707646590647
--- /dev/null
+++ b/Documentation/translations/zh_CN/security/tpm/index.rst
@@ -0,0 +1,20 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: ../../disclaimer-zh_CN.rst
+
+:Original: Documentation/security/tpm/index.rst
+
+:翻译:
+ 赵硕 Shuo Zhao <zhaoshuo@cqsoftware.com.cn>
+
+================
+å¯ä¿¡å¹³å°æ¨¡å—文档
+================
+
+.. toctree::
+
+ tpm_event_log
+ tpm-security
+ tpm_tis
+ tpm_vtpm_proxy
+ xen-tpmfront
+ tpm_ftpm_tee
diff --git a/Documentation/translations/zh_CN/security/tpm/tpm-security.rst b/Documentation/translations/zh_CN/security/tpm/tpm-security.rst
new file mode 100644
index 000000000000..26818d28c98f
--- /dev/null
+++ b/Documentation/translations/zh_CN/security/tpm/tpm-security.rst
@@ -0,0 +1,151 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: ../../disclaimer-zh_CN.rst
+
+:Original: Documentation/security/tpm/tpm-security.rst
+
+:翻译:
+ 赵硕 Shuo Zhao <zhaoshuo@cqsoftware.com.cn>
+
+TPM安全
+=======
+
+本文档的目的是æ述我们如何使内核使用TPM在é¢å¯¹å¤–部窥探和数æ®åŒ…篡改
+攻击(文献中称为被动和主动中间人攻击)时ä¿æŒåˆç†çš„稳å¥æ€§ã€‚当å‰çš„
+安全文档适用于TPM2.0。
+
+介ç»
+----
+
+TPM通常是一个通过æŸç§ä½Žå¸¦å®½æ€»çº¿è¿žæŽ¥åˆ°PC的独立芯片。虽然有一些
+例外,例如Intel PTT,它是è¿è¡Œåœ¨é è¿‘CPU的软件环境中的软件TPM,
+容易å—到ä¸åŒç±»åž‹çš„攻击,但目å‰å¤§å¤šæ•°å¼ºåŒ–的安全环境è¦æ±‚使用独立
+硬件TPM,这是本文讨论的使用场景。
+
+总线上的窥探和篡改攻击
+----------------------
+
+当å‰çš„技术状æ€å…许使用 `TPM Genie`_ 硬件中间人,这是一ç§ç®€å•çš„外部设备,å¯ä»¥åœ¨
+任何系统或笔记本电脑上几秒钟内安装。最近æˆåŠŸæ¼”示了针对 `Windows Bitlocker TPM`_
+系统的攻击。最近åŒæ ·çš„攻击针对 `基于TPMçš„Linuxç£ç›˜åŠ å¯†`_ 方案也é­åˆ°äº†åŒæ ·çš„攻击。
+下一阶段的研究似乎是入侵总线上现有的设备以充当中间人,因此攻击者需è¦ç‰©ç†è®¿é—®å‡ 
+秒钟的è¦æ±‚å¯èƒ½ä¸å†å­˜åœ¨ã€‚然而,本文档的目标是尽å¯èƒ½åœ¨è¿™ç§çŽ¯å¢ƒä¸‹ä¿æŠ¤TPM的机密性和
+完整性,并å°è¯•ç¡®ä¿å³ä½¿æˆ‘们ä¸èƒ½é˜²æ­¢æ”»å‡»ï¼Œè‡³å°‘å¯ä»¥æ£€æµ‹åˆ°å®ƒã€‚
+
+ä¸å¹¸çš„是,大多数TPM功能,包括硬件é‡ç½®åŠŸèƒ½ï¼Œéƒ½èƒ½è¢«èƒ½å¤Ÿè®¿é—®æ€»çº¿çš„攻击
+者控制,因此下é¢æˆ‘们将讨论一些å¯èƒ½å‡ºçŽ°çš„干扰情况。
+
+测é‡ï¼ˆPCR)完整性
+-----------------
+
+由于攻击者å¯ä»¥å‘TPMå‘é€è‡ªå·±çš„命令,他们å¯ä»¥å‘é€ä»»æ„çš„PCR扩展,从而破
+å测é‡ç³»ç»Ÿï¼Œè¿™å°†æ˜¯ä¸€ç§çƒ¦äººçš„æ‹’ç»æœåŠ¡æ”»å‡»ã€‚然而,针对密å°åˆ°ä¿¡ä»»æµ‹é‡ä¸­
+的实体,有两类更严é‡çš„攻击。
+
+1. 攻击者å¯ä»¥æ‹¦æˆªæ¥è‡ªç³»ç»Ÿçš„所有PCR扩展,并完全替æ¢ä¸ºè‡ªå·±çš„值,产生
+ 一个未篡改状æ€çš„é‡çŽ°ï¼Œè¿™ä¼šä½¿PCR测é‡è¯æ˜ŽçŠ¶æ€æ˜¯å¯ä¿¡çš„,并释放密钥。
+
+2. 攻击者å¯èƒ½ä¼šåœ¨æŸä¸ªæ—¶åˆ»é‡ç½®TPM,清除PCR,然åŽå‘é€è‡ªå·±çš„测é‡ï¼Œä»Žè€Œ
+ 有效地覆盖TPMå·²ç»å®Œæˆçš„å¯åŠ¨æ—¶é—´æµ‹é‡ã€‚
+
+第一ç§æ”»å‡»å¯ä»¥é€šè¿‡å§‹ç»ˆå¯¹PCR扩展和读å–命令进行HMACä¿æŠ¤æ¥é˜²æ­¢ï¼Œè¿™æ„味ç€
+如果没有在å“应中产生å¯æ£€æµ‹çš„HMAC失败,则测é‡å€¼æ— æ³•è¢«æ›¿æ¢ã€‚然而第二ç§
+攻击åªèƒ½é€šè¿‡ä¾èµ–æŸç§æœºåˆ¶æ¥æ£€æµ‹ï¼Œè¿™ç§æœºåˆ¶ä¼šåœ¨TPMé‡ç½®åŽå‘生å˜åŒ–。
+
+秘密ä¿æŠ¤
+--------
+
+æŸäº›è¿›å‡ºTPMçš„ä¿¡æ¯,如密钥密å°ã€ç§é’¥å¯¼å…¥å’Œéšæœºæ•°ç”Ÿæˆå®¹æ˜“被拦截,而仅仅
+使用HMACä¿æŠ¤æ— æ³•é˜²æ­¢è¿™ç§æƒ…况。因此,对于这些类型的命令,我们必须使用
+请求和å“应加密æ¥é˜²æ­¢ç§˜å¯†ä¿¡æ¯çš„泄露。
+
+与TPM建立åˆå§‹ä¿¡ä»»
+-----------------
+
+为了从一开始就æ供安全性,必须建立一个åˆå§‹çš„共享或éžå¯¹ç§°ç§˜å¯†ï¼Œå¹¶ä¸”该
+秘钥必须对攻击者ä¸å¯çŸ¥ã€‚最明显的途径是使用背书和存储ç§å­ï¼Œè¿™äº›å¯ä»¥ç”¨
+æ¥æ´¾ç”Ÿéžå¯¹ç§°å¯†é’¥ã€‚然而,使用这些密钥很困难,因为将它们传递给内核的唯
+一方法是通过命令行,这需è¦åœ¨å¯åŠ¨ç³»ç»Ÿä¸­è¿›è¡Œå¹¿æ³›çš„支æŒï¼Œè€Œä¸”无法ä¿è¯ä»»
+何一个层次ä¸ä¼šæœ‰ä»»ä½•å½¢å¼çš„授æƒã€‚
+
+Linux内核选择的机制是从空ç§å­ä½¿ç”¨æ ‡å‡†çš„存储ç§å­å‚数派生出主椭圆曲线
+密钥。空ç§å­æœ‰ä¸¤ä¸ªä¼˜åŠ¿ï¼šé¦–先该层次物ç†ä¸Šæ— æ³•å…·æœ‰æŽˆæƒï¼Œå› æ­¤æˆ‘们始终å¯
+以使用它;其次空ç§å­åœ¨TPMé‡ç½®æ—¶ä¼šå‘生å˜åŒ–,这æ„味ç€å¦‚果我们在当天开始
+时基于空ç§å­å»ºç«‹ä¿¡ä»»ï¼Œå¦‚æžœTPMé‡ç½®ä¸”ç§å­å˜åŒ–,则所有派生的密钥进行加ç›
+处ç†çš„会è¯éƒ½å°†å¤±è´¥ã€‚
+
+显然,在没有任何其他共享秘密的情况下使用空ç§å­ï¼Œæˆ‘们必须创建并读å–åˆå§‹
+公钥,这当然å¯èƒ½ä¼šè¢«æ€»çº¿ä¸­é—´äººæ‹¦æˆªå¹¶æ›¿æ¢ã€‚然而,TPM有一个密钥认è¯æœºåˆ¶
+(使用EK背书è¯ä¹¦ï¼Œåˆ›å»ºè®¤è¯èº«ä»½å¯†é’¥ï¼Œå¹¶ç”¨è¯¥å¯†é’¥è®¤è¯ç©ºç§å­ä¸»å¯†é’¥ï¼‰ï¼Œä½†ç”±
+于它过于å¤æ‚,无法在内核中è¿è¡Œï¼Œå› æ­¤æˆ‘们ä¿ç•™ç©ºä¸»å¯†é’¥å称的副本,通过
+sysfs导出,以便用户空间在å¯åŠ¨æ—¶è¿›è¡Œå®Œæ•´çš„认è¯ã€‚这里的明确ä¿è¯æ˜¯ï¼Œå¦‚果空
+主密钥认è¯æˆåŠŸï¼Œé‚£ä¹ˆä»Žå½“天开始的所有TPM交易都是安全的;如果认è¯å¤±è´¥ï¼Œåˆ™
+说明系统上有中间人(并且任何在å¯åŠ¨æœŸé—´ä½¿ç”¨çš„秘密å¯èƒ½å·²è¢«æ³„露)。
+
+信任堆å 
+--------
+
+在当å‰çš„空主密钥场景中,TPM必须在交给下一个使用者之å‰å®Œå…¨æ¸…除。然而,
+内核将派生出的空ç§å­å¯†é’¥çš„å称传递给用户空间,用户空间å¯ä»¥é€šè¿‡è®¤è¯æ¥
+验è¯è¯¥å称。因此,这ç§å称传递链也å¯ä»¥ç”¨äºŽå„个å¯åŠ¨ç»„件之间(通过未指
+定的机制)。例如grubå¯ä»¥ä½¿ç”¨ç©ºç§å­æ–¹æ¡ˆæ¥å®žçŽ°å®‰å…¨æ€§ï¼Œå¹¶å°†å称交给内核。
+内核å¯ä»¥æ´¾ç”Ÿå‡ºå¯†é’¥å’Œå称,并确定如果它们与交接的版本ä¸åŒï¼Œåˆ™è¡¨ç¤ºå‘生
+了篡改。因此å¯ä»¥é€šè¿‡å称传递将任æ„å¯åŠ¨ç»„件(从UEFI到grub到内核)串è”
+èµ·æ¥ï¼Œåªè¦æ¯ä¸ªåŽç»­ç»„件知é“如何收集该å称,并根æ®å…¶æ´¾ç”Ÿçš„密钥进行验è¯ã€‚
+
+会è¯å±žæ€§
+--------
+
+所有内核使用的TPM命令都å…许会è¯ã€‚HMAC会è¯å¯ç”¨äºŽæ£€æŸ¥è¯·æ±‚å’Œå“应的完整性,
+而解密和加密标志å¯ç”¨äºŽä¿æŠ¤å‚æ•°å’Œå“应。HMAC和加密密钥通常是从共享授æƒç§˜
+钥推导出æ¥çš„,但对于许多内核æ“作æ¥è¯´ï¼Œè¿™äº›å¯†é’¥æ˜¯å·²çŸ¥çš„(通常为空)。因
+此内核使用空主密钥作为ç›å¯†é’¥æ¥åˆ›å»ºæ¯ä¸ªHMAC会è¯ï¼Œè¿™æ ·å°±ä¸ºä¼šè¯å¯†é’¥çš„派生
+æ供了加密输入。因此内核仅需创建一次空主密钥(作为一个易失的TPMå¥æŸ„),
+并将其ä¿å­˜åœ¨tpm_chip中,用于æ¯æ¬¡åœ¨å†…核中使用TPM时。由于内核资æºç®¡ç†å™¨ç¼º
+ä¹åŽ»é—´éš™åŒ–,当å‰æ¯æ¬¡æ“作都需è¦åˆ›å»ºå’Œé”€æ¯ä¼šè¯ï¼Œä½†æœªæ¥å¯èƒ½ä¼šå°†å•ä¸ªä¼šè¯é‡ç”¨
+用于内核中的HMACã€åŠ å¯†å’Œè§£å¯†ä¼šè¯ã€‚
+
+ä¿æŠ¤ç±»åž‹
+--------
+
+对于æ¯ä¸ªå†…æ ¸æ“作,我们使用空主密钥加ç›çš„HMACæ¥ä¿æŠ¤å®Œæ•´æ€§ã€‚此外我们使用å‚æ•°
+加密æ¥ä¿æŠ¤å¯†é’¥å¯†å°ï¼Œå¹¶ä½¿ç”¨å‚数解密æ¥ä¿æŠ¤å¯†é’¥è§£å°å’Œéšæœºæ•°ç”Ÿæˆã€‚
+
+空主密钥认è¯åœ¨ç”¨æˆ·ç©ºé—´çš„实现
+============================
+
+æ¯ä¸ªTPM都会附带几个X.509è¯ä¹¦ï¼Œé€šå¸¸ç”¨äºŽä¸»èƒŒä¹¦å¯†é’¥ã€‚本文档å‡è®¾å­˜åœ¨æ¤­åœ†æ›²çº¿
+版本的è¯ä¹¦ï¼Œä½äºŽ01C00002,但也åŒæ ·é€‚用于RSAè¯ä¹¦(ä½äºŽ01C00001)。
+
+认è¯çš„第一步是使用 `TCG EK Credential Profile`_ 模æ¿è¿›è¡Œä¸»å¯†é’¥çš„创建,该
+模æ¿å…许将生æˆçš„主密钥与è¯ä¹¦ä¸­çš„主密钥进行比较(公钥必须匹é…)。需è¦æ³¨æ„
+的是,生æˆEK主密钥需è¦EK层级密ç ï¼Œä½†EC主密钥的预生æˆç‰ˆæœ¬åº”ä½äºŽ81010002,
+并且å¯ä»¥æ— éœ€å¯†é’¥æŽˆæƒå¯¹å…¶æ‰§è¡ŒTPM2_ReadPublic()æ“作。接下æ¥ï¼Œè¯ä¹¦æœ¬èº«å¿…é¡»
+ç»è¿‡éªŒè¯ï¼Œä»¥ç¡®ä¿å…¶å¯ä»¥è¿½æº¯åˆ°åˆ¶é€ å•†æ ¹è¯ä¹¦ï¼ˆè¯¥æ ¹è¯ä¹¦åº”公开在制造商网站上)。
+完æˆæ­¤æ­¥éª¤åŽå°†åœ¨TPM内部生æˆä¸€ä¸ªè®¤è¯å¯†é’¥ï¼ˆAK),并使用TPM2_MakeCredentialã€
+AKçš„å称和EK公钥加密一个秘密。然åŽTPM执行TPM2_ActivateCredential,åªæœ‰åœ¨
+TPMã€EKå’ŒAK之间的绑定关系æˆç«‹æ—¶ï¼Œæ‰èƒ½æ¢å¤ç§˜å¯†ã€‚现在,生æˆçš„AKå¯ä»¥ç”¨äºŽå¯¹ç”±
+内核导出的空主密钥进行认è¯ã€‚由于TPM2_MakeCredential/ActivateCredentialæ“作
+相对å¤æ‚,下é¢å°†æ述一ç§æ¶‰åŠå¤–部生æˆç§é’¥çš„简化过程。
+
+这个过程是通常基于éšç§CA认è¯è¿‡ç¨‹çš„简化缩写。å‡è®¾æ­¤æ—¶è®¤è¯ç”±TPM所有者进行,
+所有者åªèƒ½è®¿é—®æ‰€æœ‰è€…层次。所有者创建一个外部公/ç§é’¥å¯¹ï¼ˆå‡è®¾æ˜¯æ¤­åœ†æ›²çº¿ï¼‰ï¼Œ
+并使用内部包装过程将ç§é’¥è¿›è¡Œå°è£…以便导入,该ç§é’¥è¢«å…¶çˆ¶çº§ç”±EC派生的存储主密
+é’¥ä¿æŠ¤ã€‚TPM2_Import()æ“作使用一个以EK主密钥为ç›å€¼çš„å‚数解密HMAC会è¯ï¼ˆè¿™ä¹Ÿä¸
+需è¦EK密钥授æƒï¼‰ï¼Œæ„味ç€å†…部å°è£…密钥是加密å‚数,因此除éžTPM拥有认è¯çš„EK,å¦
+则无法执行导入æ“作。如果该命令æˆåŠŸæ‰§è¡Œå¹¶ä¸”HMAC在返回时通过验è¯ï¼Œæˆ‘们就知é“
+我们有一个åªä¸ºè®¤è¯TPM加载的ç§é’¥å‰¯æœ¬ã€‚现在该密钥已加载到TPM中,并且存储主密
+钥已被清除(以释放空间用于生æˆç©ºå¯†é’¥ï¼‰ã€‚
+
+çŽ°åœ¨æ ¹æ® `TCG TPM v2.0 Provisioning Guidance`_ 中的存储é…置生æˆç©ºEC主密钥;
+该密钥的å称(å³å…¬é’¥åŒºåŸŸçš„哈希值)被计算出æ¥å¹¶ä¸Žå†…核在/sys/class/tpm/tpm0/null_name
+中æ供的空ç§å­å称进行比较。如果å称ä¸åŒ¹é…,TPM就被认为是å—æŸçš„。如果å称匹é…,
+用户执行TPM2_Certify(),使用空主密钥作为对象å¥æŸ„,使用加载的ç§é’¥ä½œä¸ºç­¾åå¥æŸ„,
+并æä¾›éšæœºçš„åˆæ ¼æ•°æ®ã€‚返回的certifyInfoçš„ç­¾å将与加载的ç§é’¥çš„公钥部分进行验è¯ï¼Œ
+并检查åˆæ ¼æ•°æ®ä»¥é˜²æ­¢é‡æ”¾ã€‚如果所有测试都通过,用户就å¯ä»¥ç¡®ä¿¡TPM的完整性和éšç§
+性在整个内核å¯åŠ¨è¿‡ç¨‹ä¸­å¾—到了ä¿æŠ¤ã€‚
+
+.. _TPM Genie: https://www.nccgroup.trust/globalassets/about-us/us/documents/tpm-genie.pdf
+.. _Windows Bitlocker TPM: https://dolosgroup.io/blog/2021/7/9/from-stolen-laptop-to-inside-the-company-network
+.. _基于TPMçš„Linuxç£ç›˜åŠ å¯†: https://www.secura.com/blog/tpm-sniffing-attacks-against-non-bitlocker-targets
+.. _TCG EK Credential Profile: https://trustedcomputinggroup.org/resource/tcg-ek-credential-profile-for-tpm-family-2-0/
+.. _TCG TPM v2.0 Provisioning Guidance: https://trustedcomputinggroup.org/resource/tcg-tpm-v2-0-provisioning-guidance/
diff --git a/Documentation/translations/zh_CN/security/tpm/tpm_event_log.rst b/Documentation/translations/zh_CN/security/tpm/tpm_event_log.rst
new file mode 100644
index 000000000000..9c173291ac3e
--- /dev/null
+++ b/Documentation/translations/zh_CN/security/tpm/tpm_event_log.rst
@@ -0,0 +1,49 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: ../../disclaimer-zh_CN.rst
+
+:Original: Documentation/security/tpm/tpm_event_log.rst
+
+:翻译:
+ 赵硕 Shuo Zhao <zhaoshuo@cqsoftware.com.cn>
+
+===========
+TPM事件日志
+===========
+
+本文档简è¦ä»‹ç»äº†ä»€ä¹ˆæ˜¯TPM日志,以åŠå®ƒæ˜¯å¦‚何从预å¯åŠ¨å›ºä»¶ç§»äº¤åˆ°æ“作系统的。
+
+介ç»
+====
+
+预å¯åŠ¨å›ºä»¶ç»´æŠ¤ä¸€ä¸ªäº‹ä»¶æ—¥å¿—,æ¯å½“它将æŸäº›å†…容哈希到任何一个PCR寄存器时,该
+日志会添加新æ¡ç›®ã€‚这些事件按类型分类,并包å«å“ˆå¸ŒåŽçš„PCR寄存器值。通常,预
+å¯åŠ¨å›ºä»¶ä¼šå“ˆå¸Œé‚£äº›å³å°†ç§»äº¤æ‰§è¡Œæƒæˆ–与å¯åŠ¨è¿‡ç¨‹ç›¸å…³çš„组件。
+
+其主è¦åº”用是远程认è¯ï¼Œè€Œå®ƒä¹‹æ‰€ä»¥æœ‰ç”¨çš„原因在[1]中第一部分很好地é˜è¿°äº†ï¼š
+
+认è¯ç”¨äºŽå‘挑战者æ供有关平å°çŠ¶æ€çš„ä¿¡æ¯ã€‚然而,PCR的内容难以解读;因此,当
+PCR内容附有测é‡æ—¥å¿—时,认è¯é€šå¸¸ä¼šæ›´æœ‰ç”¨ã€‚尽管测é‡æ—¥å¿—本身并ä¸å¯ä¿¡ï¼Œä½†å®ƒä»¬
+包å«æ¯”PCR内容更为丰富的信æ¯é›†ã€‚PCR内容用于对测é‡æ—¥å¿—进行验è¯ã€‚
+
+UEFI事件日志
+============
+
+UEFIæ供的事件日志有一些比较奇怪的特性。
+
+在调用ExitBootServices()之å‰ï¼ŒLinux EFI引导加载程åºä¼šå°†äº‹ä»¶æ—¥å¿—å¤åˆ¶åˆ°ç”±
+引导加载程åºè‡ªå®šä¹‰çš„é…置表中。ä¸å¹¸çš„是,通过ExitBootServices()生æˆçš„事件
+并ä¸ä¼šå‡ºçŽ°åœ¨è¿™ä¸ªè¡¨é‡Œã€‚
+
+固件æ供了一个所谓的最终事件é…置表排åºæ¥è§£å†³è¿™ä¸ªé—®é¢˜ã€‚事件会在第一次调用
+EFI_TCG2_PROTOCOL.GetEventLog()åŽè¢«é•œåƒåˆ°è¿™ä¸ªè¡¨ä¸­ã€‚
+
+这引出了å¦ä¸€ä¸ªé—®é¢˜ï¼šæ— æ³•ä¿è¯å®ƒä¸ä¼šåœ¨ Linux EFI stub 开始è¿è¡Œä¹‹å‰è¢«è°ƒç”¨ã€‚
+因此,在 stub è¿è¡Œæ—¶ï¼Œå®ƒéœ€è¦è®¡ç®—并将最终事件表的大å°ä¿å­˜åˆ°è‡ªå®šä¹‰é…置表中,
+以便TPM驱动程åºå¯ä»¥åœ¨ç¨åŽè¿žæŽ¥æ¥è‡ªè‡ªå®šä¹‰é…置表和最终事件表的两个事件日志时
+跳过这些事件。
+
+å‚考文献
+========
+
+- [1] https://trustedcomputinggroup.org/resource/pc-client-specific-platform-firmware-profile-specification/
+- [2] The final concatenation is done in drivers/char/tpm/eventlog/efi.c
diff --git a/Documentation/translations/zh_CN/security/tpm/tpm_ftpm_tee.rst b/Documentation/translations/zh_CN/security/tpm/tpm_ftpm_tee.rst
new file mode 100644
index 000000000000..5901eee32563
--- /dev/null
+++ b/Documentation/translations/zh_CN/security/tpm/tpm_ftpm_tee.rst
@@ -0,0 +1,31 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: ../../disclaimer-zh_CN.rst
+
+:Original: Documentation/security/tpm/tpm_ftpm_tee.rst
+
+:翻译:
+ 赵硕 Shuo Zhao <zhaoshuo@cqsoftware.com.cn>
+
+===========
+固件TPM驱动
+===========
+
+本文档æ述了固件å¯ä¿¡å¹³å°æ¨¡å—(fTPM)设备驱动。
+
+介ç»
+====
+
+该驱动程åºæ˜¯ç”¨äºŽARMçš„TrustZone环境中实现的固件的适é…器。该驱动
+程åºå…许程åºä»¥ä¸Žç¡¬ä»¶TPM相åŒçš„æ–¹å¼ä¸ŽTPM进行交互。
+
+设计
+====
+
+该驱动程åºå……当一个薄层,传递命令到固件实现的TPM并接收其å“应。驱动
+程åºæœ¬èº«å¹¶ä¸åŒ…å«å¤ªå¤šé€»è¾‘,更åƒæ˜¯å›ºä»¶ä¸Žå†…æ ¸/用户空间之间的一个管é“。
+
+固件本身基于以下论文:
+https://www.microsoft.com/en-us/research/wp-content/uploads/2017/06/ftpm1.pdf
+
+当驱动程åºè¢«åŠ è½½æ—¶ï¼Œå®ƒä¼šå‘用户空间暴露 ``/dev/tpmX`` 字符设备,å…许
+用户空间通过该设备与固件TPM进行通信。
diff --git a/Documentation/translations/zh_CN/security/tpm/tpm_tis.rst b/Documentation/translations/zh_CN/security/tpm/tpm_tis.rst
new file mode 100644
index 000000000000..0fb009f93e10
--- /dev/null
+++ b/Documentation/translations/zh_CN/security/tpm/tpm_tis.rst
@@ -0,0 +1,43 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: ../../disclaimer-zh_CN.rst
+
+:Original: Documentation/security/tpm/tpm_tis.rst
+
+:翻译:
+ 赵硕 Shuo Zhao <zhaoshuo@cqsoftware.com.cn>
+
+TPM FIFO接å£é©±åŠ¨
+================
+
+TCG PTP规范定义了两ç§æŽ¥å£ç±»åž‹ï¼šFIFOå’ŒCRB。å‰è€…基于顺åºçš„读写æ“作,
+åŽè€…基于包å«å®Œæ•´å‘½ä»¤æˆ–å“应的缓冲区。
+
+FIFO(先进先出)接å£è¢«tpm_tis_coreä¾èµ–的驱动程åºä½¿ç”¨ã€‚最åˆï¼ŒLinuxåª
+有一个å为tpm_tisçš„é©±åŠ¨ï¼Œè¦†ç›–äº†å†…å­˜æ˜ å°„ï¼ˆå³ MMIO)接å£ï¼Œä½†åŽæ¥å®ƒè¢«
+扩展以支æŒTCG标准所支æŒçš„其他物ç†æŽ¥å£ã€‚
+
+由于历å²åŽŸå› ï¼Œæœ€åˆçš„MMIO驱动被称为tpm_tis,而FIFO驱动的框架被命å为
+tpm_tis_core。在tpm_tis中的“tisâ€åŽç¼€æ¥è‡ªTPM接å£è§„范,这是针对TPM1.x
+芯片的硬件接å£è§„范。
+
+通信基于一个由TPM芯片通过硬件总线或内存映射共享的20KiB 缓冲区,具体
+å–决于物ç†æŽ¥çº¿ã€‚该缓冲区进一步分为五个相等大å°çš„4KiB缓冲区,为CPUå’Œ
+TPM之间的通信æ供等效的寄存器集。这些通信端点在TCG术语中称为localities。
+
+当内核想è¦å‘TPM芯片å‘é€å‘½ä»¤æ—¶ï¼Œå®ƒé¦–先通过在TPM_ACCESS寄存器中设置
+requestUseä½æ¥ä¿ç•™locality0。当访问被授予时,该ä½ç”±èŠ¯ç‰‡æ¸…除。一旦完æˆ
+通信,内核会写入TPM_ACCESS.activeLocalityä½ã€‚这告诉芯片该本地性已被
+释放。
+
+待处ç†çš„本地性由芯片按优先级é™åºé€ä¸ªæœåŠ¡ï¼Œä¸€æ¬¡ä¸€ä¸ªï¼š
+
+- Locality0优先级最低。
+- Locality5优先级最高。
+
+关于localities的更多信æ¯å’Œå«ä¹‰ï¼Œè¯·å‚阅TCG PC客户端平å°TPM é…置文件规范的第3.2节。
+
+å‚考文献
+========
+
+TCG PC客户端平å°TPMé…置文件(PTP)规范
+https://trustedcomputinggroup.org/resource/pc-client-platform-tpm-profile-ptp-specification/
diff --git a/Documentation/translations/zh_CN/security/tpm/tpm_vtpm_proxy.rst b/Documentation/translations/zh_CN/security/tpm/tpm_vtpm_proxy.rst
new file mode 100644
index 000000000000..bc92cfb684c3
--- /dev/null
+++ b/Documentation/translations/zh_CN/security/tpm/tpm_vtpm_proxy.rst
@@ -0,0 +1,51 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: ../../disclaimer-zh_CN.rst
+
+:Original: Documentation/security/tpm/tpm_vtpm_proxy.rst
+
+:翻译:
+ 赵硕 Shuo Zhao <zhaoshuo@cqsoftware.com.cn>
+
+==========================
+Linux容器的虚拟TPM代ç†é©±åŠ¨
+==========================
+
+| 作者:
+| Stefan Berger <stefanb@linux.vnet.ibm.com>
+
+本文档æ述了用于Linux容器的虚拟å¯ä¿¡å¹³å°æ¨¡å—(vTPM)代ç†è®¾å¤‡é©±åŠ¨ã€‚
+
+介ç»
+====
+
+这项工作的目标是为æ¯ä¸ªLinux容器æä¾›TPM功能。这使得程åºèƒ½å¤Ÿåƒä¸Žç‰©ç†ç³»ç»Ÿ
+上的TPM交互一样,与容器中的TPM进行交互。æ¯ä¸ªå®¹å™¨éƒ½ä¼šèŽ·å¾—一个唯一的ã€æ¨¡
+拟的软件TPM。
+
+设计
+====
+
+为了使æ¯ä¸ªå®¹å™¨éƒ½èƒ½ä½¿ç”¨æ¨¡æ‹Ÿçš„软件TPM,容器管ç†æ ˆéœ€è¦åˆ›å»ºä¸€å¯¹è®¾å¤‡ï¼Œå…¶ä¸­
+包括一个客户端TPM字符设备 ``/dev/tpmX`` (X=0,1,2...)和一个‘æœåŠ¡å™¨ç«¯â€™
+文件æ述符。当文件æ述符传被递给TPM模拟器时,å‰è€…通过创建具有适当主次
+设备å·çš„字符设备被移入容器,然åŽï¼Œå®¹å™¨å†…的软件å¯ä»¥ä½¿ç”¨å­—符设备å‘é€TPM
+命令,模拟器将通过文件æ述符接收这些命令,并用它æ¥å‘é€å“应。
+
+为了支æŒè¿™ä¸€ç‚¹ï¼Œè™šæ‹ŸTPM代ç†é©±åŠ¨ç¨‹åºæ供了一个设备 ``/dev/vtpmx`` ,该设备
+用于通过ioctl创建设备对。ioctl将其作为é…置设备的输入标志,例如这些标志指示
+TPM模拟器是å¦æ”¯æŒTPM1.2或TPM2功能。ioctl的结果是返回‘æœåŠ¡å™¨ç«¯â€™çš„文件æ述符
+以åŠåˆ›å»ºçš„字符设备的主次设备å·ã€‚此外,还会返回TPM字符设备的编å·ã€‚例如,如果
+创建了 ``/dev/tpm10`` ,则返回编å·ï¼ˆ ``dev_num`` )10。
+
+一旦设备被创建,驱动程åºå°†ç«‹å³å°è¯•ä¸ŽTPM进行通信。æ¥è‡ªé©±åŠ¨ç¨‹åºçš„所有命令
+都å¯ä»¥ä»Žioctl返回的文件æ述符中读å–。这些命令应该立å³å¾—到å“应。
+
+UAPI
+====
+
+该API在以下内核代ç ä¸­ï¼š
+
+include/uapi/linux/vtpm_proxy.h
+drivers/char/tpm/tpm_vtpm_proxy.c
+
+函数:vtpmx_ioc_new_dev
diff --git a/Documentation/translations/zh_CN/security/tpm/xen-tpmfront.rst b/Documentation/translations/zh_CN/security/tpm/xen-tpmfront.rst
new file mode 100644
index 000000000000..fa085d98a99b
--- /dev/null
+++ b/Documentation/translations/zh_CN/security/tpm/xen-tpmfront.rst
@@ -0,0 +1,114 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: ../../disclaimer-zh_CN.rst
+
+:Original: Documentation/security/tpm/xen-tpmfront.rst
+
+:翻译:
+ 赵硕 Shuo Zhao <zhaoshuo@cqsoftware.com.cn>
+
+================
+Xen的虚拟TPM接å£
+================
+
+作者:Matthew Fioravante (JHUAPL), Daniel De Graaf (NSA)
+
+本文档æ述了用于Xen的虚拟å¯ä¿¡å¹³å°æ¨¡å—(vTPM)å­ç³»ç»Ÿã€‚å‡å®šè¯»è€…熟悉
+Xenå’ŒLinux的构建和安装,并对TPMå’ŒvTPM概念有基本的ç†è§£ã€‚
+
+介ç»
+----
+
+这项工作的目标是为虚拟客户æ“作系统(在Xen中称为DomU)æä¾›TPM功能。这使得
+程åºèƒ½å¤Ÿåƒä¸Žç‰©ç†ç³»ç»Ÿä¸Šçš„TPM交互一样,与虚拟系统中的TPM进行交互。æ¯ä¸ªå®¢æˆ·
+æ“作系统都会获得一个唯一的ã€æ¨¡æ‹Ÿçš„软件TPM。然而,vTPM的所有秘密(如密钥ã€
+NVRAM 等)由vTPM管ç†åŸŸè¿›è¡Œç®¡ç†ï¼Œè¯¥åŸŸå°†è¿™äº›ç§˜å¯†å°å­˜åˆ°ç‰©ç†TPM中。如果创建这
+些域(管ç†åŸŸã€vTPM域和客户域)的过程是å¯ä¿¡çš„,vTPMå­ç³»ç»Ÿå°±èƒ½å°†æ ¹æ¤äºŽç¡¬ä»¶
+TPM的信任链扩展到Xen中的虚拟机。vTPMçš„æ¯ä¸ªä¸»è¦ç»„件都作为一个独立的域实现,
+从而通过虚拟机监控程åºï¼ˆhypervisor)æ供安全隔离。
+
+这个mini-os vTPM å­ç³»ç»Ÿæ˜¯å»ºç«‹åœ¨IBMå’ŒIntelå…¬å¸ä¹‹å‰çš„vTPM工作基础上的。
+
+
+设计概述
+--------
+
+vTPM的架构æ述如下::
+
+ +------------------+
+ | Linux DomU | ...
+ | | ^ |
+ | v | |
+ | xen-tpmfront |
+ +------------------+
+ | ^
+ v |
+ +------------------+
+ | mini-os/tpmback |
+ | | ^ |
+ | v | |
+ | vtpm-stubdom | ...
+ | | ^ |
+ | v | |
+ | mini-os/tpmfront |
+ +------------------+
+ | ^
+ v |
+ +------------------+
+ | mini-os/tpmback |
+ | | ^ |
+ | v | |
+ | vtpmmgr-stubdom |
+ | | ^ |
+ | v | |
+ | mini-os/tpm_tis |
+ +------------------+
+ | ^
+ v |
+ +------------------+
+ | Hardware TPM |
+ +------------------+
+
+* Linux DomU:
+ 希望使用vTPM的基于Linux的客户机。å¯èƒ½æœ‰å¤šä¸ªè¿™æ ·çš„实例。
+
+* xen-tpmfront.ko:
+ Linux内核虚拟TPMå‰ç«¯é©±åŠ¨ç¨‹åºã€‚该驱动程åºä¸ºåŸºäºŽLinuxçš„DomUæä¾›
+ vTPM访问。
+
+* mini-os/tpmback:
+ Mini-os TPMåŽç«¯é©±åŠ¨ç¨‹åºã€‚Linuxå‰ç«¯é©±åŠ¨ç¨‹åºé€šè¿‡è¯¥åŽç«¯é©±åŠ¨ç¨‹åºè¿ž
+ 接,以便在Linux DomU和其vTPM之间进行通信。该驱动程åºè¿˜è¢«
+ vtpmmgr-stubdom用于与vtpm-stubdom通信。
+
+* vtpm-stubdom:
+ 一个实现vTPMçš„mini-os存根域。æ¯ä¸ªæ­£åœ¨è¿è¡Œçš„vtpm-stubdom实例与系统
+ 上的逻辑vTPM之间有一一对应的关系。vTPMå¹³å°é…置寄存器(PCRs)通常都
+ åˆå§‹åŒ–为零。
+
+* mini-os/tpmfront:
+ Mini-os TPMå‰ç«¯é©±åŠ¨ç¨‹åºã€‚vTPM mini-os域vtpm-stubdom使用该驱动程åº
+ 与vtpmmgr-stubdom通信。此驱动程åºè¿˜ç”¨äºŽä¸ŽvTPM域通信的mini-os域,例
+ 如 pv-grub。
+
+* vtpmmgr-stubdom:
+ 一个实现vTPM管ç†å™¨çš„mini-os域。系统中åªæœ‰ä¸€ä¸ªvTPM管ç†å™¨ï¼Œå¹¶ä¸”在整个
+ 机器生命周期内应一直è¿è¡Œã€‚此域调节对系统中物ç†TPM的访问,并确ä¿æ¯ä¸ª
+ vTPMçš„æŒä¹…状æ€ã€‚
+
+* mini-os/tpm_tis:
+ Mini-osTPM1.2版本TPM 接å£è§„范(TIS)驱动程åºã€‚该驱动程åºç”±vtpmmgr-stubdom
+ 用于直接与硬件TPM通信。通信通过将硬件内存页映射到vtpmmgr-stubdomæ¥å®žçŽ°ã€‚
+
+* 硬件TPM:
+ 固定在主æ¿ä¸Šçš„ç‰©ç† TPM。
+
+与Xen的集æˆ
+-----------
+
+vTPM驱动程åºçš„支æŒå·²åœ¨Xen4.3中通过libxl工具堆栈添加。有关设置vTPMå’ŒvTPM
+管ç†å™¨å­˜æ ¹åŸŸçš„详细信æ¯ï¼Œè¯·å‚è§Xen文档(docs/misc/vtpm.txt)。一旦存根域
+è¿è¡Œï¼Œä¸Žç£ç›˜æˆ–网络设备相åŒï¼ŒvTPM设备将在域的é…置文件中进行设置
+
+为了使用诸如IMA(完整性测é‡æž¶æž„)等需è¦åœ¨initrd之å‰åŠ è½½TPM的功能,必须将
+xen-tpmfront驱动程åºç¼–译到内核中。如果ä¸ä½¿ç”¨è¿™äº›åŠŸèƒ½ï¼Œé©±åŠ¨ç¨‹åºå¯ä»¥ä½œä¸º
+模å—编译,并åƒå¾€å¸¸ä¸€æ ·åŠ è½½ã€‚
diff --git a/Documentation/translations/zh_TW/admin-guide/README.rst b/Documentation/translations/zh_TW/admin-guide/README.rst
index a6e34c200ea3..0b038074d9d1 100644
--- a/Documentation/translations/zh_TW/admin-guide/README.rst
+++ b/Documentation/translations/zh_TW/admin-guide/README.rst
@@ -149,7 +149,7 @@ Linux內核6.x版本 <http://kernel.org/>
"make xconfig" 基於Qtçš„é…置工具。
- "make gconfig" 基於GTK+çš„é…置工具。
+ "make gconfig" 基於GTKçš„é…置工具。
"make oldconfig" 基於ç¾æœ‰çš„ ./.config 文件é¸æ“‡æ‰€æœ‰é¸é …,並詢å•
æ–°é…ç½®é¸é …。
diff --git a/Documentation/translations/zh_TW/process/submit-checklist.rst b/Documentation/translations/zh_TW/process/submit-checklist.rst
index 0ecb187753e4..a0cb91a6945f 100644
--- a/Documentation/translations/zh_TW/process/submit-checklist.rst
+++ b/Documentation/translations/zh_TW/process/submit-checklist.rst
@@ -85,8 +85,8 @@ Linux內核補ä¸æ交檢查單
17) 所有新的模塊åƒæ•¸éƒ½è¨˜éŒ„在 ``MODULE_PARM_DESC()``
18) 所有新的用戶空間接å£éƒ½è¨˜éŒ„在 ``Documentation/ABI/`` 中。有關詳細信æ¯ï¼Œ
- è«‹åƒé–± ``Documentation/ABI/README`` 。更改用戶空間接å£çš„補ä¸æ‡‰è©²æŠ„é€
- linux-api@vger.kernel.org。
+ è«‹åƒé–± Documentation/admin-guide/abi.rst (或 ``Documentation/ABI/README``)。
+ 更改用戶空間接å£çš„補ä¸æ‡‰è©²æŠ„é€ linux-api@vger.kernel.org\ 。
19) 已通éŽè‡³å°‘注入slabå’Œpage分é…失敗進行檢查。請åƒé–± ``Documentation/fault-injection/`` 。
如果新代碼是實質性的,那麼添加å­ç³»çµ±ç‰¹å®šçš„故障注入å¯èƒ½æ˜¯åˆé©çš„。
diff --git a/Documentation/usb/gadget-testing.rst b/Documentation/usb/gadget-testing.rst
index bf555c2270f5..1998dc146c56 100644
--- a/Documentation/usb/gadget-testing.rst
+++ b/Documentation/usb/gadget-testing.rst
@@ -1050,7 +1050,7 @@ Its attributes are:
midi1_num_groups The number of groups for MIDI 1.0 (0-16)
ui_hint UI-hint of this FB
0: unknown, 1: receiver, 2: sender, 3: both
- midi_ci_verison Supported MIDI-CI version number (8 bit)
+ midi_ci_version Supported MIDI-CI version number (8 bit)
is_midi1 Legacy MIDI 1.0 device (0-2)
0: MIDI 2.0 device,
1: MIDI 1.0 without restriction, or
diff --git a/Documentation/userspace-api/accelerators/ocxl.rst b/Documentation/userspace-api/accelerators/ocxl.rst
index db7570d5e50d..4e213af70237 100644
--- a/Documentation/userspace-api/accelerators/ocxl.rst
+++ b/Documentation/userspace-api/accelerators/ocxl.rst
@@ -3,8 +3,11 @@ OpenCAPI (Open Coherent Accelerator Processor Interface)
========================================================
OpenCAPI is an interface between processors and accelerators. It aims
-at being low-latency and high-bandwidth. The specification is
-developed by the `OpenCAPI Consortium <http://opencapi.org/>`_.
+at being low-latency and high-bandwidth.
+
+The specification was developed by the OpenCAPI Consortium, and is now
+available from the `Compute Express Link Consortium
+<https://computeexpresslink.org/resource/opencapi-specification-archive/>`_.
It allows an accelerator (which could be an FPGA, ASICs, ...) to access
the host memory coherently, using virtual addresses. An OpenCAPI
diff --git a/Documentation/userspace-api/dma-buf-heaps.rst b/Documentation/userspace-api/dma-buf-heaps.rst
new file mode 100644
index 000000000000..535f49047ce6
--- /dev/null
+++ b/Documentation/userspace-api/dma-buf-heaps.rst
@@ -0,0 +1,25 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+==============================
+Allocating dma-buf using heaps
+==============================
+
+Dma-buf Heaps are a way for userspace to allocate dma-buf objects. They are
+typically used to allocate buffers from a specific allocation pool, or to share
+buffers across frameworks.
+
+Heaps
+=====
+
+A heap represents a specific allocator. The Linux kernel currently supports the
+following heaps:
+
+ - The ``system`` heap allocates virtually contiguous, cacheable, buffers.
+
+ - The ``cma`` heap allocates physically contiguous, cacheable,
+ buffers. Only present if a CMA region is present. Such a region is
+ usually created either through the kernel commandline through the
+ `cma` parameter, a memory region Device-Tree node with the
+ `linux,cma-default` property set, or through the `CMA_SIZE_MBYTES` or
+ `CMA_SIZE_PERCENTAGE` Kconfig options. Depending on the platform, it
+ might be called ``reserved``, ``linux,cma``, or ``default-pool``.
diff --git a/Documentation/userspace-api/index.rst b/Documentation/userspace-api/index.rst
index b1395d94b3fd..9cbe4390c872 100644
--- a/Documentation/userspace-api/index.rst
+++ b/Documentation/userspace-api/index.rst
@@ -44,6 +44,7 @@ Devices and I/O
:maxdepth: 1
accelerators/ocxl
+ dma-buf-heaps
dma-buf-alloc-exchange
gpio/index
iommufd
diff --git a/Documentation/userspace-api/media/rc/rc-sysfs-nodes.rst b/Documentation/userspace-api/media/rc/rc-sysfs-nodes.rst
index 34d6a0a1f4d3..70b5966aaff8 100644
--- a/Documentation/userspace-api/media/rc/rc-sysfs-nodes.rst
+++ b/Documentation/userspace-api/media/rc/rc-sysfs-nodes.rst
@@ -6,7 +6,7 @@
Remote Controller's sysfs nodes
*******************************
-As defined at ``Documentation/ABI/testing/sysfs-class-rc``, those are
+As defined at Documentation/ABI/testing/sysfs-class-rc, those are
the sysfs nodes that control the Remote Controllers:
diff --git a/MAINTAINERS b/MAINTAINERS
index 00e94bec401e..c0d0ecde9a7e 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -4014,10 +4014,10 @@ F: include/vdso/bits.h
F: lib/bitmap-str.c
F: lib/bitmap.c
F: lib/cpumask.c
-F: lib/cpumask_kunit.c
F: lib/find_bit.c
F: lib/find_bit_benchmark.c
F: lib/test_bitmap.c
+F: lib/tests/cpumask_kunit.c
F: tools/include/linux/bitfield.h
F: tools/include/linux/bitmap.h
F: tools/include/linux/bits.h
@@ -5402,6 +5402,8 @@ F: Documentation/dev-tools/checkpatch.rst
CHINESE DOCUMENTATION
M: Alex Shi <alexs@kernel.org>
M: Yanteng Si <siyanteng@loongson.cn>
+R: Dongliang Mu <dzm91@hust.edu.cn>
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/alexs/linux.git
S: Maintained
F: Documentation/translations/zh_CN/
@@ -6918,6 +6920,7 @@ L: dri-devel@lists.freedesktop.org
L: linaro-mm-sig@lists.linaro.org (moderated for non-subscribers)
S: Maintained
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
+F: Documentation/userspace-api/dma-buf-heaps.rst
F: drivers/dma-buf/dma-heap.c
F: drivers/dma-buf/heaps/*
F: include/linux/dma-heap.h
@@ -8903,6 +8906,9 @@ F: include/linux/fs.h
F: include/linux/fs_types.h
F: include/uapi/linux/fs.h
F: include/uapi/linux/openat2.h
+F: Documentation/driver-api/early-userspace/buffer-format.rst
+F: init/do_mounts*
+F: init/*initramfs*
FILESYSTEMS [EXPORTFS]
M: Chuck Lever <chuck.lever@oracle.com>
@@ -9072,9 +9078,9 @@ L: linux-hardening@vger.kernel.org
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/hardening
F: include/linux/fortify-string.h
-F: lib/fortify_kunit.c
-F: lib/memcpy_kunit.c
F: lib/test_fortify/*
+F: lib/tests/fortify_kunit.c
+F: lib/tests/memcpy_kunit.c
K: \bunsafe_memcpy\b
K: \b__NO_FORTIFY\b
@@ -9759,9 +9765,9 @@ F: include/linux/string.h
F: include/linux/string_choices.h
F: include/linux/string_helpers.h
F: lib/string.c
-F: lib/string_kunit.c
F: lib/string_helpers.c
-F: lib/string_helpers_kunit.c
+F: lib/tests/string_helpers_kunit.c
+F: lib/tests/string_kunit.c
F: scripts/coccinelle/api/string_choices.cocci
GENERIC UIO DRIVER FOR PCI DEVICES
@@ -12588,8 +12594,9 @@ F: Documentation/ABI/testing/sysfs-kernel-warn_count
F: arch/*/configs/hardening.config
F: include/linux/overflow.h
F: include/linux/randomize_kstack.h
+F: include/linux/ucopysize.h
F: kernel/configs/hardening.config
-F: lib/usercopy_kunit.c
+F: lib/tests/usercopy_kunit.c
F: mm/usercopy.c
F: security/Kconfig.hardening
K: \b(add|choose)_random_kstack_offset\b
@@ -12987,7 +12994,7 @@ F: Documentation/trace/kprobes.rst
F: include/asm-generic/kprobes.h
F: include/linux/kprobes.h
F: kernel/kprobes.c
-F: lib/test_kprobes.c
+F: lib/tests/test_kprobes.c
F: samples/kprobes
KS0108 LCD CONTROLLER DRIVER
@@ -13317,7 +13324,7 @@ M: Mark Brown <broonie@kernel.org>
R: Matti Vaittinen <mazziesaccount@gmail.com>
F: include/linux/linear_range.h
F: lib/linear_ranges.c
-F: lib/test_linear_ranges.c
+F: lib/tests/test_linear_ranges.c
LINUX FOR POWER MACINTOSH
L: linuxppc-dev@lists.ozlabs.org
@@ -13445,7 +13452,7 @@ M: David Gow <davidgow@google.com>
L: linux-kselftest@vger.kernel.org
L: kunit-dev@googlegroups.com
S: Maintained
-F: lib/list-test.c
+F: lib/tests/list-test.c
LITEX PLATFORM
M: Karol Gugala <kgugala@antmicro.com>
@@ -21192,8 +21199,7 @@ S: Maintained
W: https://github.com/sched-ext/scx
T: git://git.kernel.org/pub/scm/linux/kernel/git/tj/sched_ext.git
F: include/linux/sched/ext.h
-F: kernel/sched/ext.h
-F: kernel/sched/ext.c
+F: kernel/sched/ext*
F: tools/sched_ext/
F: tools/testing/selftests/sched_ext
@@ -21775,7 +21781,7 @@ M: Jason A. Donenfeld <Jason@zx2c4.com>
S: Maintained
F: include/linux/siphash.h
F: lib/siphash.c
-F: lib/siphash_kunit.c
+F: lib/tests/siphash_kunit.c
SIS 190 ETHERNET DRIVER
M: Francois Romieu <romieu@fr.zoreil.com>
@@ -23119,12 +23125,6 @@ L: platform-driver-x86@vger.kernel.org
S: Maintained
F: drivers/platform/x86/system76_acpi.c
-SYSV FILESYSTEM
-S: Orphan
-F: Documentation/filesystems/sysv-fs.rst
-F: fs/sysv/
-F: include/linux/sysv_fs.h
-
TASKSTATS STATISTICS INTERFACE
M: Balbir Singh <bsingharora@gmail.com>
S: Maintained
@@ -25456,8 +25456,8 @@ R: Sergey Senozhatsky <senozhatsky@chromium.org>
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/printk/linux.git
F: Documentation/core-api/printk-formats.rst
-F: lib/test_printf.c
-F: lib/test_scanf.c
+F: lib/tests/printf_kunit.c
+F: lib/tests/scanf_kunit.c
F: lib/vsprintf.c
VT1211 HARDWARE MONITOR DRIVER
@@ -25990,7 +25990,6 @@ F: include/xen/swiotlb-xen.h
XFS FILESYSTEM
M: Carlos Maiolino <cem@kernel.org>
-R: Darrick J. Wong <djwong@kernel.org>
L: linux-xfs@vger.kernel.org
S: Supported
W: http://xfs.org/
diff --git a/Makefile b/Makefile
index 50694bbbf828..8b6764d44a61 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 6
PATCHLEVEL = 14
SUBLEVEL = 0
-EXTRAVERSION = -rc7
+EXTRAVERSION =
NAME = Baby Opossum Posse
# *DOCUMENTATION*
diff --git a/arch/alpha/kernel/syscalls/syscall.tbl b/arch/alpha/kernel/syscalls/syscall.tbl
index c59d53d6d3f3..2dd6340de6b4 100644
--- a/arch/alpha/kernel/syscalls/syscall.tbl
+++ b/arch/alpha/kernel/syscalls/syscall.tbl
@@ -506,3 +506,4 @@
574 common getxattrat sys_getxattrat
575 common listxattrat sys_listxattrat
576 common removexattrat sys_removexattrat
+577 common open_tree_attr sys_open_tree_attr
diff --git a/arch/arm/tools/syscall.tbl b/arch/arm/tools/syscall.tbl
index 49eeb2ad8dbd..27c1d5ebcd91 100644
--- a/arch/arm/tools/syscall.tbl
+++ b/arch/arm/tools/syscall.tbl
@@ -481,3 +481,4 @@
464 common getxattrat sys_getxattrat
465 common listxattrat sys_listxattrat
466 common removexattrat sys_removexattrat
+467 common open_tree_attr sys_open_tree_attr
diff --git a/arch/arm64/kernel/elfcore.c b/arch/arm64/kernel/elfcore.c
index 2e94d20c4ac7..b735f4c2fe5e 100644
--- a/arch/arm64/kernel/elfcore.c
+++ b/arch/arm64/kernel/elfcore.c
@@ -27,9 +27,10 @@ static int mte_dump_tag_range(struct coredump_params *cprm,
int ret = 1;
unsigned long addr;
void *tags = NULL;
+ int locked = 0;
for (addr = start; addr < start + len; addr += PAGE_SIZE) {
- struct page *page = get_dump_page(addr);
+ struct page *page = get_dump_page(addr, &locked);
/*
* get_dump_page() returns NULL when encountering an empty
diff --git a/arch/arm64/tools/syscall_32.tbl b/arch/arm64/tools/syscall_32.tbl
index 69a829912a05..0765b3a8d6d6 100644
--- a/arch/arm64/tools/syscall_32.tbl
+++ b/arch/arm64/tools/syscall_32.tbl
@@ -478,3 +478,4 @@
464 common getxattrat sys_getxattrat
465 common listxattrat sys_listxattrat
466 common removexattrat sys_removexattrat
+467 common open_tree_attr sys_open_tree_attr
diff --git a/arch/loongarch/configs/loongson3_defconfig b/arch/loongarch/configs/loongson3_defconfig
index 73c77500ac46..3c240afe5aed 100644
--- a/arch/loongarch/configs/loongson3_defconfig
+++ b/arch/loongarch/configs/loongson3_defconfig
@@ -981,7 +981,6 @@ CONFIG_MINIX_FS=m
CONFIG_ROMFS_FS=m
CONFIG_PSTORE=m
CONFIG_PSTORE_COMPRESS=y
-CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
CONFIG_EROFS_FS=m
CONFIG_EROFS_FS_ZIP_LZMA=y
diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig
index dbf2ea561c85..68a2e299ea71 100644
--- a/arch/m68k/configs/amiga_defconfig
+++ b/arch/m68k/configs/amiga_defconfig
@@ -486,7 +486,6 @@ CONFIG_OMFS_FS=m
CONFIG_HPFS_FS=m
CONFIG_QNX4FS_FS=m
CONFIG_QNX6FS_FS=m
-CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
CONFIG_EROFS_FS=m
CONFIG_NFS_FS=y
diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig
index b0fd199cc0a4..3696d0c19579 100644
--- a/arch/m68k/configs/apollo_defconfig
+++ b/arch/m68k/configs/apollo_defconfig
@@ -443,7 +443,6 @@ CONFIG_OMFS_FS=m
CONFIG_HPFS_FS=m
CONFIG_QNX4FS_FS=m
CONFIG_QNX6FS_FS=m
-CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
CONFIG_EROFS_FS=m
CONFIG_NFS_FS=y
diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig
index bb5b2d3b6c10..dbd4b58b724c 100644
--- a/arch/m68k/configs/atari_defconfig
+++ b/arch/m68k/configs/atari_defconfig
@@ -463,7 +463,6 @@ CONFIG_OMFS_FS=m
CONFIG_HPFS_FS=m
CONFIG_QNX4FS_FS=m
CONFIG_QNX6FS_FS=m
-CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
CONFIG_EROFS_FS=m
CONFIG_NFS_FS=y
diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig
index 8315a13bab73..95178e66703f 100644
--- a/arch/m68k/configs/bvme6000_defconfig
+++ b/arch/m68k/configs/bvme6000_defconfig
@@ -435,7 +435,6 @@ CONFIG_OMFS_FS=m
CONFIG_HPFS_FS=m
CONFIG_QNX4FS_FS=m
CONFIG_QNX6FS_FS=m
-CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
CONFIG_EROFS_FS=m
CONFIG_NFS_FS=y
diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig
index 350370657e5f..68372a0b05ac 100644
--- a/arch/m68k/configs/hp300_defconfig
+++ b/arch/m68k/configs/hp300_defconfig
@@ -445,7 +445,6 @@ CONFIG_OMFS_FS=m
CONFIG_HPFS_FS=m
CONFIG_QNX4FS_FS=m
CONFIG_QNX6FS_FS=m
-CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
CONFIG_EROFS_FS=m
CONFIG_NFS_FS=y
diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig
index f942b4755702..a9beb4ec8a15 100644
--- a/arch/m68k/configs/mac_defconfig
+++ b/arch/m68k/configs/mac_defconfig
@@ -462,7 +462,6 @@ CONFIG_OMFS_FS=m
CONFIG_HPFS_FS=m
CONFIG_QNX4FS_FS=m
CONFIG_QNX6FS_FS=m
-CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
CONFIG_EROFS_FS=m
CONFIG_NFS_FS=y
diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig
index b1eaad02efab..45a898e9c1d1 100644
--- a/arch/m68k/configs/multi_defconfig
+++ b/arch/m68k/configs/multi_defconfig
@@ -549,7 +549,6 @@ CONFIG_OMFS_FS=m
CONFIG_HPFS_FS=m
CONFIG_QNX4FS_FS=m
CONFIG_QNX6FS_FS=m
-CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
CONFIG_EROFS_FS=m
CONFIG_NFS_FS=y
diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig
index 6309a4442bb3..350dc6d08461 100644
--- a/arch/m68k/configs/mvme147_defconfig
+++ b/arch/m68k/configs/mvme147_defconfig
@@ -435,7 +435,6 @@ CONFIG_OMFS_FS=m
CONFIG_HPFS_FS=m
CONFIG_QNX4FS_FS=m
CONFIG_QNX6FS_FS=m
-CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
CONFIG_EROFS_FS=m
CONFIG_NFS_FS=y
diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig
index 3feb0731f814..7ea283ba86b7 100644
--- a/arch/m68k/configs/mvme16x_defconfig
+++ b/arch/m68k/configs/mvme16x_defconfig
@@ -436,7 +436,6 @@ CONFIG_OMFS_FS=m
CONFIG_HPFS_FS=m
CONFIG_QNX4FS_FS=m
CONFIG_QNX6FS_FS=m
-CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
CONFIG_EROFS_FS=m
CONFIG_NFS_FS=y
diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig
index ea04b1b0da7d..eb0de405e6a9 100644
--- a/arch/m68k/configs/q40_defconfig
+++ b/arch/m68k/configs/q40_defconfig
@@ -452,7 +452,6 @@ CONFIG_OMFS_FS=m
CONFIG_HPFS_FS=m
CONFIG_QNX4FS_FS=m
CONFIG_QNX6FS_FS=m
-CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
CONFIG_EROFS_FS=m
CONFIG_NFS_FS=y
diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig
index f52d9af92153..7026a139ccf8 100644
--- a/arch/m68k/configs/sun3_defconfig
+++ b/arch/m68k/configs/sun3_defconfig
@@ -433,7 +433,6 @@ CONFIG_OMFS_FS=m
CONFIG_HPFS_FS=m
CONFIG_QNX4FS_FS=m
CONFIG_QNX6FS_FS=m
-CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
CONFIG_EROFS_FS=m
CONFIG_NFS_FS=y
diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig
index f348447824da..c91abb80b081 100644
--- a/arch/m68k/configs/sun3x_defconfig
+++ b/arch/m68k/configs/sun3x_defconfig
@@ -433,7 +433,6 @@ CONFIG_OMFS_FS=m
CONFIG_HPFS_FS=m
CONFIG_QNX4FS_FS=m
CONFIG_QNX6FS_FS=m
-CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
CONFIG_EROFS_FS=m
CONFIG_NFS_FS=y
diff --git a/arch/m68k/kernel/syscalls/syscall.tbl b/arch/m68k/kernel/syscalls/syscall.tbl
index f5ed71f1910d..9fe47112c586 100644
--- a/arch/m68k/kernel/syscalls/syscall.tbl
+++ b/arch/m68k/kernel/syscalls/syscall.tbl
@@ -466,3 +466,4 @@
464 common getxattrat sys_getxattrat
465 common listxattrat sys_listxattrat
466 common removexattrat sys_removexattrat
+467 common open_tree_attr sys_open_tree_attr
diff --git a/arch/microblaze/kernel/syscalls/syscall.tbl b/arch/microblaze/kernel/syscalls/syscall.tbl
index 680f568b77f2..7b6e97828e55 100644
--- a/arch/microblaze/kernel/syscalls/syscall.tbl
+++ b/arch/microblaze/kernel/syscalls/syscall.tbl
@@ -472,3 +472,4 @@
464 common getxattrat sys_getxattrat
465 common listxattrat sys_listxattrat
466 common removexattrat sys_removexattrat
+467 common open_tree_attr sys_open_tree_attr
diff --git a/arch/mips/configs/malta_defconfig b/arch/mips/configs/malta_defconfig
index 4390d30206d9..e308b82c094a 100644
--- a/arch/mips/configs/malta_defconfig
+++ b/arch/mips/configs/malta_defconfig
@@ -347,7 +347,6 @@ CONFIG_CRAMFS=m
CONFIG_VXFS_FS=m
CONFIG_MINIX_FS=m
CONFIG_ROMFS_FS=m
-CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
CONFIG_NFS_FS=y
CONFIG_ROOT_NFS=y
diff --git a/arch/mips/configs/malta_kvm_defconfig b/arch/mips/configs/malta_kvm_defconfig
index d63d8be8cb50..fa5b04063ddb 100644
--- a/arch/mips/configs/malta_kvm_defconfig
+++ b/arch/mips/configs/malta_kvm_defconfig
@@ -354,7 +354,6 @@ CONFIG_CRAMFS=m
CONFIG_VXFS_FS=m
CONFIG_MINIX_FS=m
CONFIG_ROMFS_FS=m
-CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
CONFIG_NFS_FS=y
CONFIG_ROOT_NFS=y
diff --git a/arch/mips/configs/maltaup_xpa_defconfig b/arch/mips/configs/maltaup_xpa_defconfig
index 338bb6544a93..40283171af68 100644
--- a/arch/mips/configs/maltaup_xpa_defconfig
+++ b/arch/mips/configs/maltaup_xpa_defconfig
@@ -353,7 +353,6 @@ CONFIG_CRAMFS=m
CONFIG_VXFS_FS=m
CONFIG_MINIX_FS=m
CONFIG_ROMFS_FS=m
-CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
CONFIG_NFS_FS=y
CONFIG_ROOT_NFS=y
diff --git a/arch/mips/configs/rm200_defconfig b/arch/mips/configs/rm200_defconfig
index 08e1c1f2f4de..3a6d0384b774 100644
--- a/arch/mips/configs/rm200_defconfig
+++ b/arch/mips/configs/rm200_defconfig
@@ -336,7 +336,6 @@ CONFIG_MINIX_FS=m
CONFIG_HPFS_FS=m
CONFIG_QNX4FS_FS=m
CONFIG_ROMFS_FS=m
-CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
CONFIG_NFS_FS=m
CONFIG_NFSD=m
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index 61503a36067e..f7107479c7fa 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -1326,24 +1326,8 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs)
return -1;
}
-#ifdef CONFIG_SECCOMP
- if (unlikely(test_thread_flag(TIF_SECCOMP))) {
- int ret, i;
- struct seccomp_data sd;
- unsigned long args[6];
-
- sd.nr = current_thread_info()->syscall;
- sd.arch = syscall_get_arch(current);
- syscall_get_arguments(current, regs, args);
- for (i = 0; i < 6; i++)
- sd.args[i] = args[i];
- sd.instruction_pointer = KSTK_EIP(current);
-
- ret = __secure_computing(&sd);
- if (ret == -1)
- return ret;
- }
-#endif
+ if (secure_computing())
+ return -1;
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_enter(regs, regs->regs[2]);
diff --git a/arch/mips/kernel/syscalls/syscall_n32.tbl b/arch/mips/kernel/syscalls/syscall_n32.tbl
index 0b9b7e25b69a..aa70e371bb54 100644
--- a/arch/mips/kernel/syscalls/syscall_n32.tbl
+++ b/arch/mips/kernel/syscalls/syscall_n32.tbl
@@ -405,3 +405,4 @@
464 n32 getxattrat sys_getxattrat
465 n32 listxattrat sys_listxattrat
466 n32 removexattrat sys_removexattrat
+467 n32 open_tree_attr sys_open_tree_attr
diff --git a/arch/mips/kernel/syscalls/syscall_n64.tbl b/arch/mips/kernel/syscalls/syscall_n64.tbl
index c844cd5cda62..1e8c44c7b614 100644
--- a/arch/mips/kernel/syscalls/syscall_n64.tbl
+++ b/arch/mips/kernel/syscalls/syscall_n64.tbl
@@ -381,3 +381,4 @@
464 n64 getxattrat sys_getxattrat
465 n64 listxattrat sys_listxattrat
466 n64 removexattrat sys_removexattrat
+467 n64 open_tree_attr sys_open_tree_attr
diff --git a/arch/mips/kernel/syscalls/syscall_o32.tbl b/arch/mips/kernel/syscalls/syscall_o32.tbl
index 349b8aad1159..114a5a1a6230 100644
--- a/arch/mips/kernel/syscalls/syscall_o32.tbl
+++ b/arch/mips/kernel/syscalls/syscall_o32.tbl
@@ -454,3 +454,4 @@
464 o32 getxattrat sys_getxattrat
465 o32 listxattrat sys_listxattrat
466 o32 removexattrat sys_removexattrat
+467 o32 open_tree_attr sys_open_tree_attr
diff --git a/arch/parisc/configs/generic-64bit_defconfig b/arch/parisc/configs/generic-64bit_defconfig
index 19a804860ed5..ea623f910748 100644
--- a/arch/parisc/configs/generic-64bit_defconfig
+++ b/arch/parisc/configs/generic-64bit_defconfig
@@ -268,7 +268,6 @@ CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_TMPFS_XATTR=y
CONFIG_CONFIGFS_FS=y
-CONFIG_SYSV_FS=y
CONFIG_NFS_FS=m
CONFIG_NFS_V4=m
CONFIG_NFS_V4_1=y
diff --git a/arch/parisc/kernel/syscalls/syscall.tbl b/arch/parisc/kernel/syscalls/syscall.tbl
index d9fc94c86965..94df3cb957e9 100644
--- a/arch/parisc/kernel/syscalls/syscall.tbl
+++ b/arch/parisc/kernel/syscalls/syscall.tbl
@@ -465,3 +465,4 @@
464 common getxattrat sys_getxattrat
465 common listxattrat sys_listxattrat
466 common removexattrat sys_removexattrat
+467 common open_tree_attr sys_open_tree_attr
diff --git a/arch/powerpc/configs/fsl-emb-nonhw.config b/arch/powerpc/configs/fsl-emb-nonhw.config
index 3009b0efaf34..d6d2a458847b 100644
--- a/arch/powerpc/configs/fsl-emb-nonhw.config
+++ b/arch/powerpc/configs/fsl-emb-nonhw.config
@@ -112,7 +112,6 @@ CONFIG_QNX4FS_FS=m
CONFIG_RCU_TRACE=y
CONFIG_RESET_CONTROLLER=y
CONFIG_ROOT_NFS=y
-CONFIG_SYSV_FS=m
CONFIG_SYSVIPC=y
CONFIG_TMPFS=y
CONFIG_UBIFS_FS=y
diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig
index ca0c90e95837..364d1a78bc12 100644
--- a/arch/powerpc/configs/ppc6xx_defconfig
+++ b/arch/powerpc/configs/ppc6xx_defconfig
@@ -986,7 +986,6 @@ CONFIG_MINIX_FS=m
CONFIG_OMFS_FS=m
CONFIG_QNX4FS_FS=m
CONFIG_ROMFS_FS=m
-CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
CONFIG_NFS_FS=m
CONFIG_NFS_V3_ACL=y
diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
index 4b371c738213..d44349fe8e2b 100644
--- a/arch/powerpc/kernel/fadump.c
+++ b/arch/powerpc/kernel/fadump.c
@@ -751,7 +751,7 @@ u32 *__init fadump_regs_to_elf_notes(u32 *buf, struct pt_regs *regs)
* prstatus.pr_pid = ????
*/
elf_core_copy_regs(&prstatus.pr_reg, regs);
- buf = append_elf_note(buf, CRASH_CORE_NOTE_NAME, NT_PRSTATUS,
+ buf = append_elf_note(buf, NN_PRSTATUS, NT_PRSTATUS,
&prstatus, sizeof(prstatus));
return buf;
}
diff --git a/arch/powerpc/kernel/ptrace/ptrace.c b/arch/powerpc/kernel/ptrace/ptrace.c
index 727ed4a14545..c6997df63287 100644
--- a/arch/powerpc/kernel/ptrace/ptrace.c
+++ b/arch/powerpc/kernel/ptrace/ptrace.c
@@ -215,7 +215,7 @@ static int do_seccomp(struct pt_regs *regs)
* have already loaded -ENOSYS into r3, or seccomp has put
* something else in r3 (via SECCOMP_RET_ERRNO/TRACE).
*/
- if (__secure_computing(NULL))
+ if (__secure_computing())
return -1;
/*
diff --git a/arch/powerpc/kernel/syscalls/syscall.tbl b/arch/powerpc/kernel/syscalls/syscall.tbl
index d8b4ab78bef0..9a084bdb8926 100644
--- a/arch/powerpc/kernel/syscalls/syscall.tbl
+++ b/arch/powerpc/kernel/syscalls/syscall.tbl
@@ -557,3 +557,4 @@
464 common getxattrat sys_getxattrat
465 common listxattrat sys_listxattrat
466 common removexattrat sys_removexattrat
+467 common open_tree_attr sys_open_tree_attr
diff --git a/arch/powerpc/platforms/powernv/opal-core.c b/arch/powerpc/platforms/powernv/opal-core.c
index c9a9b759cc92..a379ff86c120 100644
--- a/arch/powerpc/platforms/powernv/opal-core.c
+++ b/arch/powerpc/platforms/powernv/opal-core.c
@@ -149,7 +149,7 @@ static Elf64_Word *__init auxv_to_elf64_notes(Elf64_Word *buf,
/* end of vector */
bufp[idx++] = cpu_to_be64(AT_NULL);
- buf = append_elf64_note(buf, CRASH_CORE_NOTE_NAME, NT_AUXV,
+ buf = append_elf64_note(buf, NN_AUXV, NT_AUXV,
oc_conf->auxv_buf, AUXV_DESC_SZ);
return buf;
}
@@ -252,7 +252,7 @@ static Elf64_Word * __init opalcore_append_cpu_notes(Elf64_Word *buf)
* crashing CPU's prstatus.
*/
first_cpu_note = buf;
- buf = append_elf64_note(buf, CRASH_CORE_NOTE_NAME, NT_PRSTATUS,
+ buf = append_elf64_note(buf, NN_PRSTATUS, NT_PRSTATUS,
&prstatus, sizeof(prstatus));
for (i = 0; i < oc_conf->num_cpus; i++, bufp += size_per_thread) {
@@ -279,7 +279,7 @@ static Elf64_Word * __init opalcore_append_cpu_notes(Elf64_Word *buf)
fill_prstatus(&prstatus, thread_pir, &regs);
if (thread_pir != oc_conf->crashing_cpu) {
- buf = append_elf64_note(buf, CRASH_CORE_NOTE_NAME,
+ buf = append_elf64_note(buf, NN_PRSTATUS,
NT_PRSTATUS, &prstatus,
sizeof(prstatus));
} else {
@@ -287,7 +287,7 @@ static Elf64_Word * __init opalcore_append_cpu_notes(Elf64_Word *buf)
* Add crashing CPU as the first NT_PRSTATUS note for
* GDB to process the core file appropriately.
*/
- append_elf64_note(first_cpu_note, CRASH_CORE_NOTE_NAME,
+ append_elf64_note(first_cpu_note, NN_PRSTATUS,
NT_PRSTATUS, &prstatus,
sizeof(prstatus));
}
diff --git a/arch/powerpc/platforms/pseries/papr-vpd.c b/arch/powerpc/platforms/pseries/papr-vpd.c
index 1574176e3ffc..c86950d7105a 100644
--- a/arch/powerpc/platforms/pseries/papr-vpd.c
+++ b/arch/powerpc/platforms/pseries/papr-vpd.c
@@ -482,14 +482,13 @@ static long papr_vpd_create_handle(struct papr_location_code __user *ulc)
goto free_blob;
}
- file = anon_inode_getfile("[papr-vpd]", &papr_vpd_handle_ops,
- (void *)blob, O_RDONLY);
+ file = anon_inode_getfile_fmode("[papr-vpd]", &papr_vpd_handle_ops,
+ (void *)blob, O_RDONLY,
+ FMODE_LSEEK | FMODE_PREAD);
if (IS_ERR(file)) {
err = PTR_ERR(file);
goto put_fd;
}
-
- file->f_mode |= FMODE_LSEEK | FMODE_PREAD;
fd_install(fd, file);
return fd;
put_fd:
diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
index 276cb4c1e11b..4a981266b483 100644
--- a/arch/s390/kernel/crash_dump.c
+++ b/arch/s390/kernel/crash_dump.c
@@ -246,15 +246,6 @@ bool is_kdump_kernel(void)
}
EXPORT_SYMBOL_GPL(is_kdump_kernel);
-static const char *nt_name(Elf64_Word type)
-{
- const char *name = "LINUX";
-
- if (type == NT_PRPSINFO || type == NT_PRSTATUS || type == NT_PRFPREG)
- name = KEXEC_CORE_NOTE_NAME;
- return name;
-}
-
/*
* Initialize ELF note
*/
@@ -279,10 +270,8 @@ static void *nt_init_name(void *buf, Elf64_Word type, void *desc, int d_len,
return PTR_ADD(buf, len);
}
-static inline void *nt_init(void *buf, Elf64_Word type, void *desc, int d_len)
-{
- return nt_init_name(buf, type, desc, d_len, nt_name(type));
-}
+#define nt_init(buf, type, desc) \
+ nt_init_name(buf, NT_ ## type, &(desc), sizeof(desc), NN_ ## type)
/*
* Calculate the size of ELF note
@@ -298,10 +287,7 @@ static size_t nt_size_name(int d_len, const char *name)
return size;
}
-static inline size_t nt_size(Elf64_Word type, int d_len)
-{
- return nt_size_name(d_len, nt_name(type));
-}
+#define nt_size(type, desc) nt_size_name(sizeof(desc), NN_ ## type)
/*
* Fill ELF notes for one CPU with save area registers
@@ -322,18 +308,16 @@ static void *fill_cpu_elf_notes(void *ptr, int cpu, struct save_area *sa)
memcpy(&nt_fpregset.fpc, &sa->fpc, sizeof(sa->fpc));
memcpy(&nt_fpregset.fprs, &sa->fprs, sizeof(sa->fprs));
/* Create ELF notes for the CPU */
- ptr = nt_init(ptr, NT_PRSTATUS, &nt_prstatus, sizeof(nt_prstatus));
- ptr = nt_init(ptr, NT_PRFPREG, &nt_fpregset, sizeof(nt_fpregset));
- ptr = nt_init(ptr, NT_S390_TIMER, &sa->timer, sizeof(sa->timer));
- ptr = nt_init(ptr, NT_S390_TODCMP, &sa->todcmp, sizeof(sa->todcmp));
- ptr = nt_init(ptr, NT_S390_TODPREG, &sa->todpreg, sizeof(sa->todpreg));
- ptr = nt_init(ptr, NT_S390_CTRS, &sa->ctrs, sizeof(sa->ctrs));
- ptr = nt_init(ptr, NT_S390_PREFIX, &sa->prefix, sizeof(sa->prefix));
+ ptr = nt_init(ptr, PRSTATUS, nt_prstatus);
+ ptr = nt_init(ptr, PRFPREG, nt_fpregset);
+ ptr = nt_init(ptr, S390_TIMER, sa->timer);
+ ptr = nt_init(ptr, S390_TODCMP, sa->todcmp);
+ ptr = nt_init(ptr, S390_TODPREG, sa->todpreg);
+ ptr = nt_init(ptr, S390_CTRS, sa->ctrs);
+ ptr = nt_init(ptr, S390_PREFIX, sa->prefix);
if (cpu_has_vx()) {
- ptr = nt_init(ptr, NT_S390_VXRS_HIGH,
- &sa->vxrs_high, sizeof(sa->vxrs_high));
- ptr = nt_init(ptr, NT_S390_VXRS_LOW,
- &sa->vxrs_low, sizeof(sa->vxrs_low));
+ ptr = nt_init(ptr, S390_VXRS_HIGH, sa->vxrs_high);
+ ptr = nt_init(ptr, S390_VXRS_LOW, sa->vxrs_low);
}
return ptr;
}
@@ -346,16 +330,16 @@ static size_t get_cpu_elf_notes_size(void)
struct save_area *sa = NULL;
size_t size;
- size = nt_size(NT_PRSTATUS, sizeof(struct elf_prstatus));
- size += nt_size(NT_PRFPREG, sizeof(elf_fpregset_t));
- size += nt_size(NT_S390_TIMER, sizeof(sa->timer));
- size += nt_size(NT_S390_TODCMP, sizeof(sa->todcmp));
- size += nt_size(NT_S390_TODPREG, sizeof(sa->todpreg));
- size += nt_size(NT_S390_CTRS, sizeof(sa->ctrs));
- size += nt_size(NT_S390_PREFIX, sizeof(sa->prefix));
+ size = nt_size(PRSTATUS, struct elf_prstatus);
+ size += nt_size(PRFPREG, elf_fpregset_t);
+ size += nt_size(S390_TIMER, sa->timer);
+ size += nt_size(S390_TODCMP, sa->todcmp);
+ size += nt_size(S390_TODPREG, sa->todpreg);
+ size += nt_size(S390_CTRS, sa->ctrs);
+ size += nt_size(S390_PREFIX, sa->prefix);
if (cpu_has_vx()) {
- size += nt_size(NT_S390_VXRS_HIGH, sizeof(sa->vxrs_high));
- size += nt_size(NT_S390_VXRS_LOW, sizeof(sa->vxrs_low));
+ size += nt_size(S390_VXRS_HIGH, sa->vxrs_high);
+ size += nt_size(S390_VXRS_LOW, sa->vxrs_low);
}
return size;
@@ -371,7 +355,7 @@ static void *nt_prpsinfo(void *ptr)
memset(&prpsinfo, 0, sizeof(prpsinfo));
prpsinfo.pr_sname = 'R';
strcpy(prpsinfo.pr_fname, "vmlinux");
- return nt_init(ptr, NT_PRPSINFO, &prpsinfo, sizeof(prpsinfo));
+ return nt_init(ptr, PRPSINFO, prpsinfo);
}
/*
@@ -610,7 +594,7 @@ static size_t get_elfcorehdr_size(int phdr_count)
/* PT_NOTES */
size += sizeof(Elf64_Phdr);
/* nt_prpsinfo */
- size += nt_size(NT_PRPSINFO, sizeof(struct elf_prpsinfo));
+ size += nt_size(PRPSINFO, struct elf_prpsinfo);
/* regsets */
size += get_cpu_cnt() * get_cpu_elf_notes_size();
/* nt_vmcoreinfo */
diff --git a/arch/s390/kernel/syscalls/syscall.tbl b/arch/s390/kernel/syscalls/syscall.tbl
index e9115b4d8b63..a4569b96ef06 100644
--- a/arch/s390/kernel/syscalls/syscall.tbl
+++ b/arch/s390/kernel/syscalls/syscall.tbl
@@ -469,3 +469,4 @@
464 common getxattrat sys_getxattrat sys_getxattrat
465 common listxattrat sys_listxattrat sys_listxattrat
466 common removexattrat sys_removexattrat sys_removexattrat
+467 common open_tree_attr sys_open_tree_attr sys_open_tree_attr
diff --git a/arch/sh/kernel/syscalls/syscall.tbl b/arch/sh/kernel/syscalls/syscall.tbl
index c8cad33bf250..52a7652fcff6 100644
--- a/arch/sh/kernel/syscalls/syscall.tbl
+++ b/arch/sh/kernel/syscalls/syscall.tbl
@@ -470,3 +470,4 @@
464 common getxattrat sys_getxattrat
465 common listxattrat sys_listxattrat
466 common removexattrat sys_removexattrat
+467 common open_tree_attr sys_open_tree_attr
diff --git a/arch/sparc/kernel/syscalls/syscall.tbl b/arch/sparc/kernel/syscalls/syscall.tbl
index 727f99d333b3..83e45eb6c095 100644
--- a/arch/sparc/kernel/syscalls/syscall.tbl
+++ b/arch/sparc/kernel/syscalls/syscall.tbl
@@ -512,3 +512,4 @@
464 common getxattrat sys_getxattrat
465 common listxattrat sys_listxattrat
466 common removexattrat sys_removexattrat
+467 common open_tree_attr sys_open_tree_attr
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 5b773b34768d..3ba7e185924e 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -137,8 +137,10 @@ ifeq ($(CONFIG_X86_32),y)
include $(srctree)/arch/x86/Makefile_32.cpu
KBUILD_CFLAGS += $(cflags-y)
- # temporary until string.h is fixed
+ ifneq ($(call clang-min-version, 160000),y)
+ # https://github.com/llvm/llvm-project/issues/53645
KBUILD_CFLAGS += -ffreestanding
+ endif
ifeq ($(CONFIG_STACKPROTECTOR),y)
ifeq ($(CONFIG_SMP),y)
diff --git a/arch/x86/coco/tdx/tdx.c b/arch/x86/coco/tdx/tdx.c
index 32809a06dab4..7772b01ab738 100644
--- a/arch/x86/coco/tdx/tdx.c
+++ b/arch/x86/coco/tdx/tdx.c
@@ -167,11 +167,11 @@ static void __noreturn tdx_panic(const char *msg)
/* Define register order according to the GHCI */
struct { u64 r14, r15, rbx, rdi, rsi, r8, r9, rdx; };
- char str[64];
+ char bytes[64] __nonstring;
} message;
/* VMM assumes '\0' in byte 65, if the message took all 64 bytes */
- strtomem_pad(message.str, msg, '\0');
+ strtomem_pad(message.bytes, msg, '\0');
args.r8 = message.r8;
args.r9 = message.r9;
diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl
index 4d0fb2fba7e2..3f0ec87d5db4 100644
--- a/arch/x86/entry/syscalls/syscall_32.tbl
+++ b/arch/x86/entry/syscalls/syscall_32.tbl
@@ -472,3 +472,4 @@
464 i386 getxattrat sys_getxattrat
465 i386 listxattrat sys_listxattrat
466 i386 removexattrat sys_removexattrat
+467 i386 open_tree_attr sys_open_tree_attr
diff --git a/arch/x86/entry/syscalls/syscall_64.tbl b/arch/x86/entry/syscalls/syscall_64.tbl
index 5eb708bff1c7..cfb5ca41e30d 100644
--- a/arch/x86/entry/syscalls/syscall_64.tbl
+++ b/arch/x86/entry/syscalls/syscall_64.tbl
@@ -390,6 +390,7 @@
464 common getxattrat sys_getxattrat
465 common listxattrat sys_listxattrat
466 common removexattrat sys_removexattrat
+467 common open_tree_attr sys_open_tree_attr
#
# Due to a historical design error, certain syscalls are numbered differently
diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c
index e7a8b8758e08..e36c9c63c97c 100644
--- a/arch/x86/events/amd/ibs.c
+++ b/arch/x86/events/amd/ibs.c
@@ -941,6 +941,8 @@ static void perf_ibs_get_mem_lock(union ibs_op_data3 *op_data3,
data_src->mem_lock = PERF_MEM_LOCK_LOCKED;
}
+/* Be careful. Works only for contiguous MSRs. */
+#define ibs_fetch_msr_idx(msr) (msr - MSR_AMD64_IBSFETCHCTL)
#define ibs_op_msr_idx(msr) (msr - MSR_AMD64_IBSOPCTL)
static void perf_ibs_get_data_src(struct perf_ibs_data *ibs_data,
@@ -1036,6 +1038,67 @@ static int perf_ibs_get_offset_max(struct perf_ibs *perf_ibs, u64 sample_type,
return 1;
}
+static bool perf_ibs_is_kernel_data_addr(struct perf_event *event,
+ struct perf_ibs_data *ibs_data)
+{
+ u64 sample_type_mask = PERF_SAMPLE_ADDR | PERF_SAMPLE_RAW;
+ union ibs_op_data3 op_data3;
+ u64 dc_lin_addr;
+
+ op_data3.val = ibs_data->regs[ibs_op_msr_idx(MSR_AMD64_IBSOPDATA3)];
+ dc_lin_addr = ibs_data->regs[ibs_op_msr_idx(MSR_AMD64_IBSDCLINAD)];
+
+ return unlikely((event->attr.sample_type & sample_type_mask) &&
+ op_data3.dc_lin_addr_valid && kernel_ip(dc_lin_addr));
+}
+
+static bool perf_ibs_is_kernel_br_target(struct perf_event *event,
+ struct perf_ibs_data *ibs_data,
+ int br_target_idx)
+{
+ union ibs_op_data op_data;
+ u64 br_target;
+
+ op_data.val = ibs_data->regs[ibs_op_msr_idx(MSR_AMD64_IBSOPDATA)];
+ br_target = ibs_data->regs[br_target_idx];
+
+ return unlikely((event->attr.sample_type & PERF_SAMPLE_RAW) &&
+ op_data.op_brn_ret && kernel_ip(br_target));
+}
+
+static bool perf_ibs_swfilt_discard(struct perf_ibs *perf_ibs, struct perf_event *event,
+ struct pt_regs *regs, struct perf_ibs_data *ibs_data,
+ int br_target_idx)
+{
+ if (perf_exclude_event(event, regs))
+ return true;
+
+ if (perf_ibs != &perf_ibs_op || !event->attr.exclude_kernel)
+ return false;
+
+ if (perf_ibs_is_kernel_data_addr(event, ibs_data))
+ return true;
+
+ if (br_target_idx != -1 &&
+ perf_ibs_is_kernel_br_target(event, ibs_data, br_target_idx))
+ return true;
+
+ return false;
+}
+
+static void perf_ibs_phyaddr_clear(struct perf_ibs *perf_ibs,
+ struct perf_ibs_data *ibs_data)
+{
+ if (perf_ibs == &perf_ibs_op) {
+ ibs_data->regs[ibs_op_msr_idx(MSR_AMD64_IBSOPDATA3)] &= ~(1ULL << 18);
+ ibs_data->regs[ibs_op_msr_idx(MSR_AMD64_IBSDCPHYSAD)] = 0;
+ return;
+ }
+
+ ibs_data->regs[ibs_fetch_msr_idx(MSR_AMD64_IBSFETCHCTL)] &= ~(1ULL << 52);
+ ibs_data->regs[ibs_fetch_msr_idx(MSR_AMD64_IBSFETCHPHYSAD)] = 0;
+}
+
static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs)
{
struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
@@ -1048,6 +1111,7 @@ static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs)
int offset, size, check_rip, offset_max, throttle = 0;
unsigned int msr;
u64 *buf, *config, period, new_config = 0;
+ int br_target_idx = -1;
if (!test_bit(IBS_STARTED, pcpu->state)) {
fail:
@@ -1102,6 +1166,7 @@ fail:
if (perf_ibs == &perf_ibs_op) {
if (ibs_caps & IBS_CAPS_BRNTRGT) {
rdmsrl(MSR_AMD64_IBSBRTARGET, *buf++);
+ br_target_idx = size;
size++;
}
if (ibs_caps & IBS_CAPS_OPDATA4) {
@@ -1129,10 +1194,19 @@ fail:
}
if ((event->attr.config2 & IBS_SW_FILTER_MASK) &&
- perf_exclude_event(event, &regs)) {
+ perf_ibs_swfilt_discard(perf_ibs, event, &regs, &ibs_data, br_target_idx)) {
throttle = perf_event_account_interrupt(event);
goto out;
}
+ /*
+ * Prevent leaking physical addresses to unprivileged users. Skip
+ * PERF_SAMPLE_PHYS_ADDR check since generic code prevents it for
+ * unprivileged users.
+ */
+ if ((event->attr.sample_type & PERF_SAMPLE_RAW) &&
+ perf_allow_kernel(&event->attr)) {
+ perf_ibs_phyaddr_clear(perf_ibs, &ibs_data);
+ }
if (event->attr.sample_type & PERF_SAMPLE_RAW) {
raw = (struct perf_raw_record){
diff --git a/arch/x86/events/rapl.c b/arch/x86/events/rapl.c
index 6941f4811bec..043f0a0b1e00 100644
--- a/arch/x86/events/rapl.c
+++ b/arch/x86/events/rapl.c
@@ -730,6 +730,7 @@ static int __init init_rapl_pmus(struct rapl_pmus **rapl_pmus_ptr, int rapl_pmu_
{
int nr_rapl_pmu = topology_max_packages();
struct rapl_pmus *rapl_pmus;
+ int ret;
/*
* rapl_pmu_scope must be either PKG, DIE or CORE
@@ -761,7 +762,11 @@ static int __init init_rapl_pmus(struct rapl_pmus **rapl_pmus_ptr, int rapl_pmu_
rapl_pmus->pmu.module = THIS_MODULE;
rapl_pmus->pmu.capabilities = PERF_PMU_CAP_NO_EXCLUDE;
- return init_rapl_pmu(rapl_pmus);
+ ret = init_rapl_pmu(rapl_pmus);
+ if (ret)
+ kfree(rapl_pmus);
+
+ return ret;
}
static struct rapl_model model_snb = {
diff --git a/arch/xtensa/kernel/syscalls/syscall.tbl b/arch/xtensa/kernel/syscalls/syscall.tbl
index 37effc1b134e..f657a77314f8 100644
--- a/arch/xtensa/kernel/syscalls/syscall.tbl
+++ b/arch/xtensa/kernel/syscalls/syscall.tbl
@@ -437,3 +437,4 @@
464 common getxattrat sys_getxattrat
465 common listxattrat sys_listxattrat
466 common removexattrat sys_removexattrat
+467 common open_tree_attr sys_open_tree_attr
diff --git a/block/bdev.c b/block/bdev.c
index 9d73a8fbf7f9..4844d1e27b6f 100644
--- a/block/bdev.c
+++ b/block/bdev.c
@@ -148,6 +148,8 @@ static void set_init_blocksize(struct block_device *bdev)
bsize <<= 1;
}
BD_INODE(bdev)->i_blkbits = blksize_bits(bsize);
+ mapping_set_folio_min_order(BD_INODE(bdev)->i_mapping,
+ get_order(bsize));
}
int set_blocksize(struct file *file, int size)
@@ -169,6 +171,7 @@ int set_blocksize(struct file *file, int size)
if (inode->i_blkbits != blksize_bits(size)) {
sync_blockdev(bdev);
inode->i_blkbits = blksize_bits(size);
+ mapping_set_folio_min_order(inode->i_mapping, get_order(size));
kill_bdev(bdev);
}
return 0;
@@ -178,10 +181,11 @@ EXPORT_SYMBOL(set_blocksize);
int sb_set_blocksize(struct super_block *sb, int size)
{
+ if (!(sb->s_type->fs_flags & FS_LBS) && size > PAGE_SIZE)
+ return 0;
if (set_blocksize(sb->s_bdev_file, size))
return 0;
- /* If we get here, we know size is power of two
- * and it's value is between 512 and PAGE_SIZE */
+ /* If we get here, we know size is validated */
sb->s_blocksize = size;
sb->s_blocksize_bits = blksize_bits(size);
return sb->s_blocksize;
@@ -1274,9 +1278,6 @@ void bdev_statx(struct path *path, struct kstat *stat,
struct inode *backing_inode;
struct block_device *bdev;
- if (!(request_mask & (STATX_DIOALIGN | STATX_WRITE_ATOMIC)))
- return;
-
backing_inode = d_backing_inode(path->dentry);
/*
@@ -1303,6 +1304,8 @@ void bdev_statx(struct path *path, struct kstat *stat,
queue_atomic_write_unit_max_bytes(bd_queue));
}
+ stat->blksize = bdev_io_min(bdev);
+
blkdev_put_no_open(bdev);
}
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 9ed93d91d754..1994a11ff903 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -659,6 +659,7 @@ static int blkcg_reset_stats(struct cgroup_subsys_state *css,
struct blkcg_gq *blkg;
int i;
+ pr_info_once("blkio.%s is deprecated\n", cftype->name);
mutex_lock(&blkcg_pol_mutex);
spin_lock_irq(&blkcg->lock);
@@ -1770,12 +1771,15 @@ int blkcg_policy_register(struct blkcg_policy *pol)
mutex_unlock(&blkcg_pol_mutex);
/* everything is in place, add intf files for the new policy */
- if (pol->dfl_cftypes)
+ if (pol->dfl_cftypes == pol->legacy_cftypes) {
+ WARN_ON(cgroup_add_cftypes(&io_cgrp_subsys,
+ pol->dfl_cftypes));
+ } else {
WARN_ON(cgroup_add_dfl_cftypes(&io_cgrp_subsys,
pol->dfl_cftypes));
- if (pol->legacy_cftypes)
WARN_ON(cgroup_add_legacy_cftypes(&io_cgrp_subsys,
pol->legacy_cftypes));
+ }
mutex_unlock(&blkcg_pol_register_mutex);
return 0;
diff --git a/block/blk-ioprio.c b/block/blk-ioprio.c
index 8fff7ccc0ac7..13659dc15c3f 100644
--- a/block/blk-ioprio.c
+++ b/block/blk-ioprio.c
@@ -113,27 +113,18 @@ static void ioprio_free_cpd(struct blkcg_policy_data *cpd)
kfree(blkcg);
}
-#define IOPRIO_ATTRS \
- { \
- .name = "prio.class", \
- .seq_show = ioprio_show_prio_policy, \
- .write = ioprio_set_prio_policy, \
- }, \
- { } /* sentinel */
-
-/* cgroup v2 attributes */
static struct cftype ioprio_files[] = {
- IOPRIO_ATTRS
-};
-
-/* cgroup v1 attributes */
-static struct cftype ioprio_legacy_files[] = {
- IOPRIO_ATTRS
+ {
+ .name = "prio.class",
+ .seq_show = ioprio_show_prio_policy,
+ .write = ioprio_set_prio_policy,
+ },
+ { } /* sentinel */
};
static struct blkcg_policy ioprio_policy = {
.dfl_cftypes = ioprio_files,
- .legacy_cftypes = ioprio_legacy_files,
+ .legacy_cftypes = ioprio_files,
.cpd_alloc_fn = ioprio_alloc_cpd,
.cpd_free_fn = ioprio_free_cpd,
diff --git a/drivers/accel/qaic/qaic_data.c b/drivers/accel/qaic/qaic_data.c
index c20eb63750f5..43aba57b48f0 100644
--- a/drivers/accel/qaic/qaic_data.c
+++ b/drivers/accel/qaic/qaic_data.c
@@ -172,9 +172,10 @@ static void free_slice(struct kref *kref)
static int clone_range_of_sgt_for_slice(struct qaic_device *qdev, struct sg_table **sgt_out,
struct sg_table *sgt_in, u64 size, u64 offset)
{
- int total_len, len, nents, offf = 0, offl = 0;
struct scatterlist *sg, *sgn, *sgf, *sgl;
+ unsigned int len, nents, offf, offl;
struct sg_table *sgt;
+ size_t total_len;
int ret, j;
/* find out number of relevant nents needed for this mem */
@@ -182,6 +183,8 @@ static int clone_range_of_sgt_for_slice(struct qaic_device *qdev, struct sg_tabl
sgf = NULL;
sgl = NULL;
nents = 0;
+ offf = 0;
+ offl = 0;
size = size ? size : PAGE_SIZE;
for_each_sgtable_dma_sg(sgt_in, sg, j) {
@@ -554,6 +557,7 @@ static bool invalid_sem(struct qaic_sem *sem)
static int qaic_validate_req(struct qaic_device *qdev, struct qaic_attach_slice_entry *slice_ent,
u32 count, u64 total_size)
{
+ u64 total;
int i;
for (i = 0; i < count; i++) {
@@ -563,7 +567,8 @@ static int qaic_validate_req(struct qaic_device *qdev, struct qaic_attach_slice_
invalid_sem(&slice_ent[i].sem2) || invalid_sem(&slice_ent[i].sem3))
return -EINVAL;
- if (slice_ent[i].offset + slice_ent[i].size > total_size)
+ if (check_add_overflow(slice_ent[i].offset, slice_ent[i].size, &total) ||
+ total > total_size)
return -EINVAL;
}
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index b848764ef018..6dd1a8860f1c 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -63,22 +63,6 @@ __setup("devtmpfs.mount=", mount_param);
static struct vfsmount *mnt;
-static struct dentry *public_dev_mount(struct file_system_type *fs_type, int flags,
- const char *dev_name, void *data)
-{
- struct super_block *s = mnt->mnt_sb;
- int err;
-
- atomic_inc(&s->s_active);
- down_write(&s->s_umount);
- err = reconfigure_single(s, flags, data);
- if (err < 0) {
- deactivate_locked_super(s);
- return ERR_PTR(err);
- }
- return dget(s->s_root);
-}
-
static struct file_system_type internal_fs_type = {
.name = "devtmpfs",
#ifdef CONFIG_TMPFS
@@ -89,9 +73,40 @@ static struct file_system_type internal_fs_type = {
.kill_sb = kill_litter_super,
};
+/* Simply take a ref on the existing mount */
+static int devtmpfs_get_tree(struct fs_context *fc)
+{
+ struct super_block *sb = mnt->mnt_sb;
+
+ atomic_inc(&sb->s_active);
+ down_write(&sb->s_umount);
+ fc->root = dget(sb->s_root);
+ return 0;
+}
+
+/* Ops are filled in during init depending on underlying shmem or ramfs type */
+struct fs_context_operations devtmpfs_context_ops = {};
+
+/* Call the underlying initialization and set to our ops */
+static int devtmpfs_init_fs_context(struct fs_context *fc)
+{
+ int ret;
+#ifdef CONFIG_TMPFS
+ ret = shmem_init_fs_context(fc);
+#else
+ ret = ramfs_init_fs_context(fc);
+#endif
+ if (ret < 0)
+ return ret;
+
+ fc->ops = &devtmpfs_context_ops;
+
+ return 0;
+}
+
static struct file_system_type dev_fs_type = {
.name = "devtmpfs",
- .mount = public_dev_mount,
+ .init_fs_context = devtmpfs_init_fs_context,
};
static int devtmpfs_submit_req(struct req *req, const char *tmp)
@@ -160,18 +175,17 @@ static int dev_mkdir(const char *name, umode_t mode)
{
struct dentry *dentry;
struct path path;
- int err;
dentry = kern_path_create(AT_FDCWD, name, &path, LOOKUP_DIRECTORY);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
- err = vfs_mkdir(&nop_mnt_idmap, d_inode(path.dentry), dentry, mode);
- if (!err)
+ dentry = vfs_mkdir(&nop_mnt_idmap, d_inode(path.dentry), dentry, mode);
+ if (!IS_ERR(dentry))
/* mark as kernel-created inode */
d_inode(dentry)->i_private = &thread;
done_path_create(&path, dentry);
- return err;
+ return PTR_ERR_OR_ZERO(dentry);
}
static int create_path(const char *nodepath)
@@ -245,15 +259,12 @@ static int dev_rmdir(const char *name)
dentry = kern_path_locked(name, &parent);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
- if (d_really_is_positive(dentry)) {
- if (d_inode(dentry)->i_private == &thread)
- err = vfs_rmdir(&nop_mnt_idmap, d_inode(parent.dentry),
- dentry);
- else
- err = -EPERM;
- } else {
- err = -ENOENT;
- }
+ if (d_inode(dentry)->i_private == &thread)
+ err = vfs_rmdir(&nop_mnt_idmap, d_inode(parent.dentry),
+ dentry);
+ else
+ err = -EPERM;
+
dput(dentry);
inode_unlock(d_inode(parent.dentry));
path_put(&parent);
@@ -310,6 +321,8 @@ static int handle_remove(const char *nodename, struct device *dev)
{
struct path parent;
struct dentry *dentry;
+ struct kstat stat;
+ struct path p;
int deleted = 0;
int err;
@@ -317,32 +330,28 @@ static int handle_remove(const char *nodename, struct device *dev)
if (IS_ERR(dentry))
return PTR_ERR(dentry);
- if (d_really_is_positive(dentry)) {
- struct kstat stat;
- struct path p = {.mnt = parent.mnt, .dentry = dentry};
- err = vfs_getattr(&p, &stat, STATX_TYPE | STATX_MODE,
- AT_STATX_SYNC_AS_STAT);
- if (!err && dev_mynode(dev, d_inode(dentry), &stat)) {
- struct iattr newattrs;
- /*
- * before unlinking this node, reset permissions
- * of possible references like hardlinks
- */
- newattrs.ia_uid = GLOBAL_ROOT_UID;
- newattrs.ia_gid = GLOBAL_ROOT_GID;
- newattrs.ia_mode = stat.mode & ~0777;
- newattrs.ia_valid =
- ATTR_UID|ATTR_GID|ATTR_MODE;
- inode_lock(d_inode(dentry));
- notify_change(&nop_mnt_idmap, dentry, &newattrs, NULL);
- inode_unlock(d_inode(dentry));
- err = vfs_unlink(&nop_mnt_idmap, d_inode(parent.dentry),
- dentry, NULL);
- if (!err || err == -ENOENT)
- deleted = 1;
- }
- } else {
- err = -ENOENT;
+ p.mnt = parent.mnt;
+ p.dentry = dentry;
+ err = vfs_getattr(&p, &stat, STATX_TYPE | STATX_MODE,
+ AT_STATX_SYNC_AS_STAT);
+ if (!err && dev_mynode(dev, d_inode(dentry), &stat)) {
+ struct iattr newattrs;
+ /*
+ * before unlinking this node, reset permissions
+ * of possible references like hardlinks
+ */
+ newattrs.ia_uid = GLOBAL_ROOT_UID;
+ newattrs.ia_gid = GLOBAL_ROOT_GID;
+ newattrs.ia_mode = stat.mode & ~0777;
+ newattrs.ia_valid =
+ ATTR_UID|ATTR_GID|ATTR_MODE;
+ inode_lock(d_inode(dentry));
+ notify_change(&nop_mnt_idmap, dentry, &newattrs, NULL);
+ inode_unlock(d_inode(dentry));
+ err = vfs_unlink(&nop_mnt_idmap, d_inode(parent.dentry),
+ dentry, NULL);
+ if (!err || err == -ENOENT)
+ deleted = 1;
}
dput(dentry);
inode_unlock(d_inode(parent.dentry));
@@ -443,6 +452,31 @@ static int __ref devtmpfsd(void *p)
}
/*
+ * Get the underlying (shmem/ramfs) context ops to build ours
+ */
+static int devtmpfs_configure_context(void)
+{
+ struct fs_context *fc;
+
+ fc = fs_context_for_reconfigure(mnt->mnt_root, mnt->mnt_sb->s_flags,
+ MS_RMT_MASK);
+ if (IS_ERR(fc))
+ return PTR_ERR(fc);
+
+ /* Set up devtmpfs_context_ops based on underlying type */
+ devtmpfs_context_ops.free = fc->ops->free;
+ devtmpfs_context_ops.dup = fc->ops->dup;
+ devtmpfs_context_ops.parse_param = fc->ops->parse_param;
+ devtmpfs_context_ops.parse_monolithic = fc->ops->parse_monolithic;
+ devtmpfs_context_ops.get_tree = &devtmpfs_get_tree;
+ devtmpfs_context_ops.reconfigure = fc->ops->reconfigure;
+
+ put_fs_context(fc);
+
+ return 0;
+}
+
+/*
* Create devtmpfs instance, driver-core devices will add their device
* nodes here.
*/
@@ -456,6 +490,13 @@ int __init devtmpfs_init(void)
pr_err("unable to create devtmpfs %ld\n", PTR_ERR(mnt));
return PTR_ERR(mnt);
}
+
+ err = devtmpfs_configure_context();
+ if (err) {
+ pr_err("unable to configure devtmpfs type %d\n", err);
+ return err;
+ }
+
err = register_filesystem(&dev_fs_type);
if (err) {
pr_err("unable to register devtmpfs type %d\n", err);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
index 2523221a2519..48ff00427882 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
@@ -2437,7 +2437,7 @@ static int gfx_v12_0_cp_gfx_load_me_microcode_rs64(struct amdgpu_device *adev)
(void **)&adev->gfx.me.me_fw_data_ptr);
if (r) {
dev_err(adev->dev, "(%d) failed to create me data bo\n", r);
- gfx_v12_0_pfp_fini(adev);
+ gfx_v12_0_me_fini(adev);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
index 0fb88e6d5d54..c3c144a4f45e 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
@@ -501,9 +501,6 @@ static void gmc_v12_0_get_vm_pte(struct amdgpu_device *adev,
uint64_t *flags)
{
struct amdgpu_bo *bo = mapping->bo_va->base.bo;
- struct amdgpu_device *bo_adev;
- bool coherent, is_system;
-
*flags &= ~AMDGPU_PTE_EXECUTABLE;
*flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
@@ -519,26 +516,11 @@ static void gmc_v12_0_get_vm_pte(struct amdgpu_device *adev,
*flags &= ~AMDGPU_PTE_VALID;
}
- if (!bo)
- return;
-
- if (bo->flags & (AMDGPU_GEM_CREATE_COHERENT |
- AMDGPU_GEM_CREATE_UNCACHED))
- *flags = AMDGPU_PTE_MTYPE_GFX12(*flags, MTYPE_UC);
-
- bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
- coherent = bo->flags & AMDGPU_GEM_CREATE_COHERENT;
- is_system = bo->tbo.resource &&
- (bo->tbo.resource->mem_type == TTM_PL_TT ||
- bo->tbo.resource->mem_type == AMDGPU_PL_PREEMPT);
-
if (bo && bo->flags & AMDGPU_GEM_CREATE_GFX12_DCC)
*flags |= AMDGPU_PTE_DCC;
- /* WA for HW bug */
- if (is_system || ((bo_adev != adev) && coherent))
- *flags = AMDGPU_PTE_MTYPE_GFX12(*flags, MTYPE_NC);
-
+ if (bo && bo->flags & AMDGPU_GEM_CREATE_UNCACHED)
+ *flags = AMDGPU_PTE_MTYPE_GFX12(*flags, MTYPE_UC);
}
static unsigned gmc_v12_0_get_vbios_fb_size(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index 47db483c3516..95c609317a8d 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -78,12 +78,12 @@ static const struct amdgpu_video_codecs nv_video_codecs_encode = {
/* Navi1x */
static const struct amdgpu_video_codec_info nv_video_codecs_decode_array[] = {
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 1920, 1088, 3)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 1920, 1088, 5)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 1920, 1088, 4)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 8192, 8192, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
};
@@ -104,10 +104,10 @@ static const struct amdgpu_video_codecs sc_video_codecs_encode = {
};
static const struct amdgpu_video_codec_info sc_video_codecs_decode_array_vcn0[] = {
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 1920, 1088, 3)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 1920, 1088, 5)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 1920, 1088, 4)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 16384, 16384, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
@@ -115,10 +115,10 @@ static const struct amdgpu_video_codec_info sc_video_codecs_decode_array_vcn0[]
};
static const struct amdgpu_video_codec_info sc_video_codecs_decode_array_vcn1[] = {
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 1920, 1088, 3)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 1920, 1088, 5)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 1920, 1088, 4)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 16384, 16384, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index a59b4c36cad7..e98fb3fa36a8 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -103,12 +103,11 @@ static const struct amdgpu_video_codecs vega_video_codecs_encode =
/* Vega */
static const struct amdgpu_video_codec_info vega_video_codecs_decode_array[] =
{
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 1920, 1088, 3)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 1920, 1088, 5)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 1920, 1088, 4)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 4096, 186)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
};
static const struct amdgpu_video_codecs vega_video_codecs_decode =
@@ -120,12 +119,12 @@ static const struct amdgpu_video_codecs vega_video_codecs_decode =
/* Raven */
static const struct amdgpu_video_codec_info rv_video_codecs_decode_array[] =
{
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 1920, 1088, 3)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 1920, 1088, 5)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 1920, 1088, 4)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 4096, 186)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 8192, 8192, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 4096, 4096, 0)},
};
@@ -138,10 +137,10 @@ static const struct amdgpu_video_codecs rv_video_codecs_decode =
/* Renoir, Arcturus */
static const struct amdgpu_video_codec_info rn_video_codecs_decode_array[] =
{
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 1920, 1088, 3)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 1920, 1088, 5)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 1920, 1088, 4)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 16384, 16384, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 06615f160331..0c9c4d8b7b71 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -167,16 +167,16 @@ static const struct amdgpu_video_codec_info tonga_video_codecs_decode_array[] =
{
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
+ .max_width = 1920,
+ .max_height = 1088,
+ .max_pixels_per_frame = 1920 * 1088,
.max_level = 3,
},
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
+ .max_width = 1920,
+ .max_height = 1088,
+ .max_pixels_per_frame = 1920 * 1088,
.max_level = 5,
},
{
@@ -188,9 +188,9 @@ static const struct amdgpu_video_codec_info tonga_video_codecs_decode_array[] =
},
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
+ .max_width = 1920,
+ .max_height = 1088,
+ .max_pixels_per_frame = 1920 * 1088,
.max_level = 4,
},
};
@@ -206,16 +206,16 @@ static const struct amdgpu_video_codec_info cz_video_codecs_decode_array[] =
{
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
+ .max_width = 1920,
+ .max_height = 1088,
+ .max_pixels_per_frame = 1920 * 1088,
.max_level = 3,
},
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
+ .max_width = 1920,
+ .max_height = 1088,
+ .max_pixels_per_frame = 1920 * 1088,
.max_level = 5,
},
{
@@ -227,9 +227,9 @@ static const struct amdgpu_video_codec_info cz_video_codecs_decode_array[] =
},
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
+ .max_width = 1920,
+ .max_height = 1088,
+ .max_pixels_per_frame = 1920 * 1088,
.max_level = 4,
},
{
@@ -239,13 +239,6 @@ static const struct amdgpu_video_codec_info cz_video_codecs_decode_array[] =
.max_pixels_per_frame = 4096 * 4096,
.max_level = 186,
},
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
- .max_level = 0,
- },
};
static const struct amdgpu_video_codecs cz_video_codecs_decode =
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
index 651660958e5b..0320163b6e74 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
@@ -3644,7 +3644,7 @@ static const uint32_t cwsr_trap_gfx9_4_3_hex[] = {
};
static const uint32_t cwsr_trap_gfx12_hex[] = {
- 0xbfa00001, 0xbfa0024b,
+ 0xbfa00001, 0xbfa002a2,
0xb0804009, 0xb8f8f804,
0x9178ff78, 0x00008c00,
0xb8fbf811, 0x8b6eff78,
@@ -3718,7 +3718,15 @@ static const uint32_t cwsr_trap_gfx12_hex[] = {
0x00011677, 0xd7610000,
0x00011a79, 0xd7610000,
0x00011c7e, 0xd7610000,
- 0x00011e7f, 0xbefe00ff,
+ 0x00011e7f, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xbefe00ff,
0x00003fff, 0xbeff0080,
0xee0a407a, 0x000c0000,
0x00004000, 0xd760007a,
@@ -3755,38 +3763,46 @@ static const uint32_t cwsr_trap_gfx12_hex[] = {
0x00000200, 0xbef600ff,
0x01000000, 0x7e000280,
0x7e020280, 0x7e040280,
- 0xbefd0080, 0xbe804ec2,
- 0xbf94fffe, 0xb8faf804,
- 0x8b7a847a, 0x91788478,
- 0x8c787a78, 0xd7610002,
- 0x0000fa71, 0x807d817d,
- 0xd7610002, 0x0000fa6c,
- 0x807d817d, 0x917aff6d,
- 0x80000000, 0xd7610002,
- 0x0000fa7a, 0x807d817d,
- 0xd7610002, 0x0000fa6e,
- 0x807d817d, 0xd7610002,
- 0x0000fa6f, 0x807d817d,
- 0xd7610002, 0x0000fa78,
- 0x807d817d, 0xb8faf811,
- 0xd7610002, 0x0000fa7a,
- 0x807d817d, 0xd7610002,
- 0x0000fa7b, 0x807d817d,
- 0xb8f1f801, 0xd7610002,
- 0x0000fa71, 0x807d817d,
- 0xb8f1f814, 0xd7610002,
- 0x0000fa71, 0x807d817d,
- 0xb8f1f815, 0xd7610002,
- 0x0000fa71, 0x807d817d,
- 0xb8f1f812, 0xd7610002,
- 0x0000fa71, 0x807d817d,
- 0xb8f1f813, 0xd7610002,
- 0x0000fa71, 0x807d817d,
+ 0xbe804ec2, 0xbf94fffe,
+ 0xb8faf804, 0x8b7a847a,
+ 0x91788478, 0x8c787a78,
+ 0x917aff6d, 0x80000000,
+ 0xd7610002, 0x00010071,
+ 0xd7610002, 0x0001026c,
+ 0xd7610002, 0x0001047a,
+ 0xd7610002, 0x0001066e,
+ 0xd7610002, 0x0001086f,
+ 0xd7610002, 0x00010a78,
+ 0xd7610002, 0x00010e7b,
+ 0xd8500000, 0x00000000,
+ 0xd8500000, 0x00000000,
+ 0xd8500000, 0x00000000,
+ 0xd8500000, 0x00000000,
+ 0xd8500000, 0x00000000,
+ 0xd8500000, 0x00000000,
+ 0xd8500000, 0x00000000,
+ 0xd8500000, 0x00000000,
+ 0xb8faf811, 0xd7610002,
+ 0x00010c7a, 0xb8faf801,
+ 0xd7610002, 0x0001107a,
+ 0xb8faf814, 0xd7610002,
+ 0x0001127a, 0xb8faf815,
+ 0xd7610002, 0x0001147a,
+ 0xb8faf812, 0xd7610002,
+ 0x0001167a, 0xb8faf813,
+ 0xd7610002, 0x0001187a,
0xb8faf802, 0xd7610002,
- 0x0000fa7a, 0x807d817d,
- 0xbefa50c1, 0xbfc70000,
- 0xd7610002, 0x0000fa7a,
- 0x807d817d, 0xbefe00ff,
+ 0x00011a7a, 0xbefa50c1,
+ 0xbfc70000, 0xd7610002,
+ 0x00011c7a, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xbefe00ff,
0x0000ffff, 0xbeff0080,
0xc4068070, 0x008ce802,
0x00000000, 0xbefe00c1,
@@ -3801,331 +3817,358 @@ static const uint32_t cwsr_trap_gfx12_hex[] = {
0xbe824102, 0xbe844104,
0xbe864106, 0xbe884108,
0xbe8a410a, 0xbe8c410c,
- 0xbe8e410e, 0xd7610002,
- 0x0000f200, 0x80798179,
- 0xd7610002, 0x0000f201,
- 0x80798179, 0xd7610002,
- 0x0000f202, 0x80798179,
- 0xd7610002, 0x0000f203,
- 0x80798179, 0xd7610002,
- 0x0000f204, 0x80798179,
- 0xd7610002, 0x0000f205,
- 0x80798179, 0xd7610002,
- 0x0000f206, 0x80798179,
- 0xd7610002, 0x0000f207,
- 0x80798179, 0xd7610002,
- 0x0000f208, 0x80798179,
- 0xd7610002, 0x0000f209,
- 0x80798179, 0xd7610002,
- 0x0000f20a, 0x80798179,
- 0xd7610002, 0x0000f20b,
- 0x80798179, 0xd7610002,
- 0x0000f20c, 0x80798179,
- 0xd7610002, 0x0000f20d,
- 0x80798179, 0xd7610002,
- 0x0000f20e, 0x80798179,
- 0xd7610002, 0x0000f20f,
- 0x80798179, 0xbf06a079,
- 0xbfa10007, 0xc4068070,
+ 0xbe8e410e, 0xbf068079,
+ 0xbfa10032, 0xd7610002,
+ 0x00010000, 0xd7610002,
+ 0x00010201, 0xd7610002,
+ 0x00010402, 0xd7610002,
+ 0x00010603, 0xd7610002,
+ 0x00010804, 0xd7610002,
+ 0x00010a05, 0xd7610002,
+ 0x00010c06, 0xd7610002,
+ 0x00010e07, 0xd7610002,
+ 0x00011008, 0xd7610002,
+ 0x00011209, 0xd7610002,
+ 0x0001140a, 0xd7610002,
+ 0x0001160b, 0xd7610002,
+ 0x0001180c, 0xd7610002,
+ 0x00011a0d, 0xd7610002,
+ 0x00011c0e, 0xd7610002,
+ 0x00011e0f, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0x80799079,
+ 0xbfa00038, 0xd7610002,
+ 0x00012000, 0xd7610002,
+ 0x00012201, 0xd7610002,
+ 0x00012402, 0xd7610002,
+ 0x00012603, 0xd7610002,
+ 0x00012804, 0xd7610002,
+ 0x00012a05, 0xd7610002,
+ 0x00012c06, 0xd7610002,
+ 0x00012e07, 0xd7610002,
+ 0x00013008, 0xd7610002,
+ 0x00013209, 0xd7610002,
+ 0x0001340a, 0xd7610002,
+ 0x0001360b, 0xd7610002,
+ 0x0001380c, 0xd7610002,
+ 0x00013a0d, 0xd7610002,
+ 0x00013c0e, 0xd7610002,
+ 0x00013e0f, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0x80799079,
+ 0xc4068070, 0x008ce802,
+ 0x00000000, 0x8070ff70,
+ 0x00000080, 0xbef90080,
+ 0x7e040280, 0x807d907d,
+ 0xbf0aff7d, 0x00000060,
+ 0xbfa2ff88, 0xbe804100,
+ 0xbe824102, 0xbe844104,
+ 0xbe864106, 0xbe884108,
+ 0xbe8a410a, 0xd7610002,
+ 0x00010000, 0xd7610002,
+ 0x00010201, 0xd7610002,
+ 0x00010402, 0xd7610002,
+ 0x00010603, 0xd7610002,
+ 0x00010804, 0xd7610002,
+ 0x00010a05, 0xd7610002,
+ 0x00010c06, 0xd7610002,
+ 0x00010e07, 0xd7610002,
+ 0x00011008, 0xd7610002,
+ 0x00011209, 0xd7610002,
+ 0x0001140a, 0xd7610002,
+ 0x0001160b, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xc4068070,
0x008ce802, 0x00000000,
+ 0xbefe00c1, 0x857d9973,
+ 0x8b7d817d, 0xbf06817d,
+ 0xbfa20002, 0xbeff0080,
+ 0xbfa00001, 0xbeff00c1,
+ 0xb8fb4306, 0x8b7bc17b,
+ 0xbfa10044, 0x8b7aff6d,
+ 0x80000000, 0xbfa10041,
+ 0x847b897b, 0xbef6007b,
+ 0xb8f03b05, 0x80708170,
+ 0xbf0d9973, 0xbfa20002,
+ 0x84708970, 0xbfa00001,
+ 0x84708a70, 0xb8fa1e06,
+ 0x847a8a7a, 0x80707a70,
+ 0x8070ff70, 0x00000200,
0x8070ff70, 0x00000080,
- 0xbef90080, 0x7e040280,
- 0x807d907d, 0xbf0aff7d,
- 0x00000060, 0xbfa2ffbb,
- 0xbe804100, 0xbe824102,
- 0xbe844104, 0xbe864106,
- 0xbe884108, 0xbe8a410a,
- 0xd7610002, 0x0000f200,
- 0x80798179, 0xd7610002,
- 0x0000f201, 0x80798179,
- 0xd7610002, 0x0000f202,
- 0x80798179, 0xd7610002,
- 0x0000f203, 0x80798179,
- 0xd7610002, 0x0000f204,
- 0x80798179, 0xd7610002,
- 0x0000f205, 0x80798179,
- 0xd7610002, 0x0000f206,
- 0x80798179, 0xd7610002,
- 0x0000f207, 0x80798179,
- 0xd7610002, 0x0000f208,
- 0x80798179, 0xd7610002,
- 0x0000f209, 0x80798179,
- 0xd7610002, 0x0000f20a,
- 0x80798179, 0xd7610002,
- 0x0000f20b, 0x80798179,
- 0xc4068070, 0x008ce802,
- 0x00000000, 0xbefe00c1,
- 0x857d9973, 0x8b7d817d,
- 0xbf06817d, 0xbfa20002,
- 0xbeff0080, 0xbfa00001,
- 0xbeff00c1, 0xb8fb4306,
- 0x8b7bc17b, 0xbfa10044,
- 0x8b7aff6d, 0x80000000,
- 0xbfa10041, 0x847b897b,
- 0xbef6007b, 0xb8f03b05,
- 0x80708170, 0xbf0d9973,
- 0xbfa20002, 0x84708970,
- 0xbfa00001, 0x84708a70,
- 0xb8fa1e06, 0x847a8a7a,
- 0x80707a70, 0x8070ff70,
- 0x00000200, 0x8070ff70,
- 0x00000080, 0xbef600ff,
- 0x01000000, 0xd71f0000,
- 0x000100c1, 0xd7200000,
- 0x000200c1, 0x16000084,
- 0x857d9973, 0x8b7d817d,
- 0xbf06817d, 0xbefd0080,
- 0xbfa20013, 0xbe8300ff,
- 0x00000080, 0xbf800000,
- 0xbf800000, 0xbf800000,
- 0xd8d80000, 0x01000000,
- 0xbf8a0000, 0xc4068070,
- 0x008ce801, 0x00000000,
- 0x807d037d, 0x80700370,
- 0xd5250000, 0x0001ff00,
- 0x00000080, 0xbf0a7b7d,
- 0xbfa2fff3, 0xbfa00012,
- 0xbe8300ff, 0x00000100,
+ 0xbef600ff, 0x01000000,
+ 0xd71f0000, 0x000100c1,
+ 0xd7200000, 0x000200c1,
+ 0x16000084, 0x857d9973,
+ 0x8b7d817d, 0xbf06817d,
+ 0xbefd0080, 0xbfa20013,
+ 0xbe8300ff, 0x00000080,
0xbf800000, 0xbf800000,
0xbf800000, 0xd8d80000,
0x01000000, 0xbf8a0000,
0xc4068070, 0x008ce801,
0x00000000, 0x807d037d,
0x80700370, 0xd5250000,
- 0x0001ff00, 0x00000100,
+ 0x0001ff00, 0x00000080,
0xbf0a7b7d, 0xbfa2fff3,
- 0xbefe00c1, 0x857d9973,
- 0x8b7d817d, 0xbf06817d,
- 0xbfa20004, 0xbef000ff,
- 0x00000200, 0xbeff0080,
- 0xbfa00003, 0xbef000ff,
- 0x00000400, 0xbeff00c1,
- 0xb8fb3b05, 0x807b817b,
- 0x847b827b, 0x857d9973,
- 0x8b7d817d, 0xbf06817d,
- 0xbfa2001b, 0xbef600ff,
- 0x01000000, 0xbefd0084,
- 0xbf0a7b7d, 0xbfa10040,
- 0x7e008700, 0x7e028701,
- 0x7e048702, 0x7e068703,
- 0xc4068070, 0x008ce800,
- 0x00000000, 0xc4068070,
- 0x008ce801, 0x00008000,
- 0xc4068070, 0x008ce802,
- 0x00010000, 0xc4068070,
- 0x008ce803, 0x00018000,
- 0x807d847d, 0x8070ff70,
- 0x00000200, 0xbf0a7b7d,
- 0xbfa2ffeb, 0xbfa0002a,
+ 0xbfa00012, 0xbe8300ff,
+ 0x00000100, 0xbf800000,
+ 0xbf800000, 0xbf800000,
+ 0xd8d80000, 0x01000000,
+ 0xbf8a0000, 0xc4068070,
+ 0x008ce801, 0x00000000,
+ 0x807d037d, 0x80700370,
+ 0xd5250000, 0x0001ff00,
+ 0x00000100, 0xbf0a7b7d,
+ 0xbfa2fff3, 0xbefe00c1,
+ 0x857d9973, 0x8b7d817d,
+ 0xbf06817d, 0xbfa20004,
+ 0xbef000ff, 0x00000200,
+ 0xbeff0080, 0xbfa00003,
+ 0xbef000ff, 0x00000400,
+ 0xbeff00c1, 0xb8fb3b05,
+ 0x807b817b, 0x847b827b,
+ 0x857d9973, 0x8b7d817d,
+ 0xbf06817d, 0xbfa2001b,
0xbef600ff, 0x01000000,
0xbefd0084, 0xbf0a7b7d,
- 0xbfa10015, 0x7e008700,
+ 0xbfa10040, 0x7e008700,
0x7e028701, 0x7e048702,
0x7e068703, 0xc4068070,
0x008ce800, 0x00000000,
0xc4068070, 0x008ce801,
- 0x00010000, 0xc4068070,
- 0x008ce802, 0x00020000,
+ 0x00008000, 0xc4068070,
+ 0x008ce802, 0x00010000,
0xc4068070, 0x008ce803,
- 0x00030000, 0x807d847d,
- 0x8070ff70, 0x00000400,
+ 0x00018000, 0x807d847d,
+ 0x8070ff70, 0x00000200,
0xbf0a7b7d, 0xbfa2ffeb,
- 0xb8fb1e06, 0x8b7bc17b,
- 0xbfa1000d, 0x847b837b,
- 0x807b7d7b, 0xbefe00c1,
- 0xbeff0080, 0x7e008700,
+ 0xbfa0002a, 0xbef600ff,
+ 0x01000000, 0xbefd0084,
+ 0xbf0a7b7d, 0xbfa10015,
+ 0x7e008700, 0x7e028701,
+ 0x7e048702, 0x7e068703,
0xc4068070, 0x008ce800,
- 0x00000000, 0x807d817d,
- 0x8070ff70, 0x00000080,
- 0xbf0a7b7d, 0xbfa2fff7,
- 0xbfa0016e, 0xbef4007e,
- 0x8b75ff7f, 0x0000ffff,
- 0x8c75ff75, 0x00040000,
- 0xbef60080, 0xbef700ff,
- 0x10807fac, 0xbef1007f,
- 0xb8f20742, 0x84729972,
- 0x8b6eff7f, 0x04000000,
- 0xbfa1003b, 0xbefe00c1,
- 0x857d9972, 0x8b7d817d,
- 0xbf06817d, 0xbfa20002,
- 0xbeff0080, 0xbfa00001,
- 0xbeff00c1, 0xb8ef4306,
- 0x8b6fc16f, 0xbfa10030,
- 0x846f896f, 0xbef6006f,
+ 0x00000000, 0xc4068070,
+ 0x008ce801, 0x00010000,
+ 0xc4068070, 0x008ce802,
+ 0x00020000, 0xc4068070,
+ 0x008ce803, 0x00030000,
+ 0x807d847d, 0x8070ff70,
+ 0x00000400, 0xbf0a7b7d,
+ 0xbfa2ffeb, 0xb8fb1e06,
+ 0x8b7bc17b, 0xbfa1000d,
+ 0x847b837b, 0x807b7d7b,
+ 0xbefe00c1, 0xbeff0080,
+ 0x7e008700, 0xc4068070,
+ 0x008ce800, 0x00000000,
+ 0x807d817d, 0x8070ff70,
+ 0x00000080, 0xbf0a7b7d,
+ 0xbfa2fff7, 0xbfa0016e,
+ 0xbef4007e, 0x8b75ff7f,
+ 0x0000ffff, 0x8c75ff75,
+ 0x00040000, 0xbef60080,
+ 0xbef700ff, 0x10807fac,
+ 0xbef1007f, 0xb8f20742,
+ 0x84729972, 0x8b6eff7f,
+ 0x04000000, 0xbfa1003b,
+ 0xbefe00c1, 0x857d9972,
+ 0x8b7d817d, 0xbf06817d,
+ 0xbfa20002, 0xbeff0080,
+ 0xbfa00001, 0xbeff00c1,
+ 0xb8ef4306, 0x8b6fc16f,
+ 0xbfa10030, 0x846f896f,
+ 0xbef6006f, 0xb8f83b05,
+ 0x80788178, 0xbf0d9972,
+ 0xbfa20002, 0x84788978,
+ 0xbfa00001, 0x84788a78,
+ 0xb8ee1e06, 0x846e8a6e,
+ 0x80786e78, 0x8078ff78,
+ 0x00000200, 0x8078ff78,
+ 0x00000080, 0xbef600ff,
+ 0x01000000, 0x857d9972,
+ 0x8b7d817d, 0xbf06817d,
+ 0xbefd0080, 0xbfa2000d,
+ 0xc4050078, 0x0080e800,
+ 0x00000000, 0xbf8a0000,
+ 0xdac00000, 0x00000000,
+ 0x807dff7d, 0x00000080,
+ 0x8078ff78, 0x00000080,
+ 0xbf0a6f7d, 0xbfa2fff4,
+ 0xbfa0000c, 0xc4050078,
+ 0x0080e800, 0x00000000,
+ 0xbf8a0000, 0xdac00000,
+ 0x00000000, 0x807dff7d,
+ 0x00000100, 0x8078ff78,
+ 0x00000100, 0xbf0a6f7d,
+ 0xbfa2fff4, 0xbef80080,
+ 0xbefe00c1, 0x857d9972,
+ 0x8b7d817d, 0xbf06817d,
+ 0xbfa20002, 0xbeff0080,
+ 0xbfa00001, 0xbeff00c1,
+ 0xb8ef3b05, 0x806f816f,
+ 0x846f826f, 0x857d9972,
+ 0x8b7d817d, 0xbf06817d,
+ 0xbfa2002c, 0xbef600ff,
+ 0x01000000, 0xbeee0078,
+ 0x8078ff78, 0x00000200,
+ 0xbefd0084, 0xbf0a6f7d,
+ 0xbfa10061, 0xc4050078,
+ 0x008ce800, 0x00000000,
+ 0xc4050078, 0x008ce801,
+ 0x00008000, 0xc4050078,
+ 0x008ce802, 0x00010000,
+ 0xc4050078, 0x008ce803,
+ 0x00018000, 0xbf8a0000,
+ 0x7e008500, 0x7e028501,
+ 0x7e048502, 0x7e068503,
+ 0x807d847d, 0x8078ff78,
+ 0x00000200, 0xbf0a6f7d,
+ 0xbfa2ffea, 0xc405006e,
+ 0x008ce800, 0x00000000,
+ 0xc405006e, 0x008ce801,
+ 0x00008000, 0xc405006e,
+ 0x008ce802, 0x00010000,
+ 0xc405006e, 0x008ce803,
+ 0x00018000, 0xbf8a0000,
+ 0xbfa0003d, 0xbef600ff,
+ 0x01000000, 0xbeee0078,
+ 0x8078ff78, 0x00000400,
+ 0xbefd0084, 0xbf0a6f7d,
+ 0xbfa10016, 0xc4050078,
+ 0x008ce800, 0x00000000,
+ 0xc4050078, 0x008ce801,
+ 0x00010000, 0xc4050078,
+ 0x008ce802, 0x00020000,
+ 0xc4050078, 0x008ce803,
+ 0x00030000, 0xbf8a0000,
+ 0x7e008500, 0x7e028501,
+ 0x7e048502, 0x7e068503,
+ 0x807d847d, 0x8078ff78,
+ 0x00000400, 0xbf0a6f7d,
+ 0xbfa2ffea, 0xb8ef1e06,
+ 0x8b6fc16f, 0xbfa1000f,
+ 0x846f836f, 0x806f7d6f,
+ 0xbefe00c1, 0xbeff0080,
+ 0xc4050078, 0x008ce800,
+ 0x00000000, 0xbf8a0000,
+ 0x7e008500, 0x807d817d,
+ 0x8078ff78, 0x00000080,
+ 0xbf0a6f7d, 0xbfa2fff6,
+ 0xbeff00c1, 0xc405006e,
+ 0x008ce800, 0x00000000,
+ 0xc405006e, 0x008ce801,
+ 0x00010000, 0xc405006e,
+ 0x008ce802, 0x00020000,
+ 0xc405006e, 0x008ce803,
+ 0x00030000, 0xbf8a0000,
0xb8f83b05, 0x80788178,
0xbf0d9972, 0xbfa20002,
0x84788978, 0xbfa00001,
0x84788a78, 0xb8ee1e06,
0x846e8a6e, 0x80786e78,
0x8078ff78, 0x00000200,
- 0x8078ff78, 0x00000080,
- 0xbef600ff, 0x01000000,
- 0x857d9972, 0x8b7d817d,
- 0xbf06817d, 0xbefd0080,
- 0xbfa2000d, 0xc4050078,
- 0x0080e800, 0x00000000,
- 0xbf8a0000, 0xdac00000,
- 0x00000000, 0x807dff7d,
- 0x00000080, 0x8078ff78,
- 0x00000080, 0xbf0a6f7d,
- 0xbfa2fff4, 0xbfa0000c,
- 0xc4050078, 0x0080e800,
- 0x00000000, 0xbf8a0000,
- 0xdac00000, 0x00000000,
- 0x807dff7d, 0x00000100,
- 0x8078ff78, 0x00000100,
- 0xbf0a6f7d, 0xbfa2fff4,
- 0xbef80080, 0xbefe00c1,
- 0x857d9972, 0x8b7d817d,
- 0xbf06817d, 0xbfa20002,
- 0xbeff0080, 0xbfa00001,
- 0xbeff00c1, 0xb8ef3b05,
- 0x806f816f, 0x846f826f,
- 0x857d9972, 0x8b7d817d,
- 0xbf06817d, 0xbfa2002c,
+ 0x80f8ff78, 0x00000050,
0xbef600ff, 0x01000000,
- 0xbeee0078, 0x8078ff78,
- 0x00000200, 0xbefd0084,
- 0xbf0a6f7d, 0xbfa10061,
- 0xc4050078, 0x008ce800,
- 0x00000000, 0xc4050078,
- 0x008ce801, 0x00008000,
- 0xc4050078, 0x008ce802,
- 0x00010000, 0xc4050078,
- 0x008ce803, 0x00018000,
- 0xbf8a0000, 0x7e008500,
- 0x7e028501, 0x7e048502,
- 0x7e068503, 0x807d847d,
+ 0xbefd00ff, 0x0000006c,
+ 0x80f89078, 0xf462403a,
+ 0xf0000000, 0xbf8a0000,
+ 0x80fd847d, 0xbf800000,
+ 0xbe804300, 0xbe824302,
+ 0x80f8a078, 0xf462603a,
+ 0xf0000000, 0xbf8a0000,
+ 0x80fd887d, 0xbf800000,
+ 0xbe804300, 0xbe824302,
+ 0xbe844304, 0xbe864306,
+ 0x80f8c078, 0xf462803a,
+ 0xf0000000, 0xbf8a0000,
+ 0x80fd907d, 0xbf800000,
+ 0xbe804300, 0xbe824302,
+ 0xbe844304, 0xbe864306,
+ 0xbe884308, 0xbe8a430a,
+ 0xbe8c430c, 0xbe8e430e,
+ 0xbf06807d, 0xbfa1fff0,
+ 0xb980f801, 0x00000000,
+ 0xb8f83b05, 0x80788178,
+ 0xbf0d9972, 0xbfa20002,
+ 0x84788978, 0xbfa00001,
+ 0x84788a78, 0xb8ee1e06,
+ 0x846e8a6e, 0x80786e78,
0x8078ff78, 0x00000200,
- 0xbf0a6f7d, 0xbfa2ffea,
- 0xc405006e, 0x008ce800,
- 0x00000000, 0xc405006e,
- 0x008ce801, 0x00008000,
- 0xc405006e, 0x008ce802,
- 0x00010000, 0xc405006e,
- 0x008ce803, 0x00018000,
- 0xbf8a0000, 0xbfa0003d,
0xbef600ff, 0x01000000,
- 0xbeee0078, 0x8078ff78,
- 0x00000400, 0xbefd0084,
- 0xbf0a6f7d, 0xbfa10016,
- 0xc4050078, 0x008ce800,
- 0x00000000, 0xc4050078,
- 0x008ce801, 0x00010000,
- 0xc4050078, 0x008ce802,
- 0x00020000, 0xc4050078,
- 0x008ce803, 0x00030000,
- 0xbf8a0000, 0x7e008500,
- 0x7e028501, 0x7e048502,
- 0x7e068503, 0x807d847d,
- 0x8078ff78, 0x00000400,
- 0xbf0a6f7d, 0xbfa2ffea,
- 0xb8ef1e06, 0x8b6fc16f,
- 0xbfa1000f, 0x846f836f,
- 0x806f7d6f, 0xbefe00c1,
- 0xbeff0080, 0xc4050078,
- 0x008ce800, 0x00000000,
- 0xbf8a0000, 0x7e008500,
- 0x807d817d, 0x8078ff78,
- 0x00000080, 0xbf0a6f7d,
- 0xbfa2fff6, 0xbeff00c1,
- 0xc405006e, 0x008ce800,
- 0x00000000, 0xc405006e,
- 0x008ce801, 0x00010000,
- 0xc405006e, 0x008ce802,
- 0x00020000, 0xc405006e,
- 0x008ce803, 0x00030000,
- 0xbf8a0000, 0xb8f83b05,
- 0x80788178, 0xbf0d9972,
- 0xbfa20002, 0x84788978,
- 0xbfa00001, 0x84788a78,
- 0xb8ee1e06, 0x846e8a6e,
- 0x80786e78, 0x8078ff78,
- 0x00000200, 0x80f8ff78,
- 0x00000050, 0xbef600ff,
- 0x01000000, 0xbefd00ff,
- 0x0000006c, 0x80f89078,
- 0xf462403a, 0xf0000000,
- 0xbf8a0000, 0x80fd847d,
- 0xbf800000, 0xbe804300,
- 0xbe824302, 0x80f8a078,
- 0xf462603a, 0xf0000000,
- 0xbf8a0000, 0x80fd887d,
- 0xbf800000, 0xbe804300,
- 0xbe824302, 0xbe844304,
- 0xbe864306, 0x80f8c078,
- 0xf462803a, 0xf0000000,
- 0xbf8a0000, 0x80fd907d,
- 0xbf800000, 0xbe804300,
- 0xbe824302, 0xbe844304,
- 0xbe864306, 0xbe884308,
- 0xbe8a430a, 0xbe8c430c,
- 0xbe8e430e, 0xbf06807d,
- 0xbfa1fff0, 0xb980f801,
- 0x00000000, 0xb8f83b05,
- 0x80788178, 0xbf0d9972,
- 0xbfa20002, 0x84788978,
- 0xbfa00001, 0x84788a78,
- 0xb8ee1e06, 0x846e8a6e,
- 0x80786e78, 0x8078ff78,
- 0x00000200, 0xbef600ff,
- 0x01000000, 0xbeff0071,
- 0xf4621bfa, 0xf0000000,
- 0x80788478, 0xf4621b3a,
+ 0xbeff0071, 0xf4621bfa,
0xf0000000, 0x80788478,
- 0xf4621b7a, 0xf0000000,
- 0x80788478, 0xf4621c3a,
+ 0xf4621b3a, 0xf0000000,
+ 0x80788478, 0xf4621b7a,
0xf0000000, 0x80788478,
- 0xf4621c7a, 0xf0000000,
- 0x80788478, 0xf4621eba,
+ 0xf4621c3a, 0xf0000000,
+ 0x80788478, 0xf4621c7a,
0xf0000000, 0x80788478,
- 0xf4621efa, 0xf0000000,
- 0x80788478, 0xf4621e7a,
+ 0xf4621eba, 0xf0000000,
+ 0x80788478, 0xf4621efa,
0xf0000000, 0x80788478,
- 0xf4621cfa, 0xf0000000,
- 0x80788478, 0xf4621bba,
+ 0xf4621e7a, 0xf0000000,
+ 0x80788478, 0xf4621cfa,
0xf0000000, 0x80788478,
- 0xbf8a0000, 0xb96ef814,
0xf4621bba, 0xf0000000,
0x80788478, 0xbf8a0000,
- 0xb96ef815, 0xf4621bba,
+ 0xb96ef814, 0xf4621bba,
0xf0000000, 0x80788478,
- 0xbf8a0000, 0xb96ef812,
+ 0xbf8a0000, 0xb96ef815,
0xf4621bba, 0xf0000000,
0x80788478, 0xbf8a0000,
- 0xb96ef813, 0x8b6eff7f,
- 0x04000000, 0xbfa1000d,
- 0x80788478, 0xf4621bba,
+ 0xb96ef812, 0xf4621bba,
0xf0000000, 0x80788478,
- 0xbf8a0000, 0xbf0d806e,
- 0xbfa10006, 0x856e906e,
- 0x8b6e6e6e, 0xbfa10003,
- 0xbe804ec1, 0x816ec16e,
- 0xbfa0fffb, 0xbefd006f,
- 0xbefe0070, 0xbeff0071,
- 0xb97b2011, 0x857b867b,
- 0xb97b0191, 0x857b827b,
- 0xb97bba11, 0xb973f801,
- 0xb8ee3b05, 0x806e816e,
- 0xbf0d9972, 0xbfa20002,
- 0x846e896e, 0xbfa00001,
- 0x846e8a6e, 0xb8ef1e06,
- 0x846f8a6f, 0x806e6f6e,
- 0x806eff6e, 0x00000200,
- 0x806e746e, 0x826f8075,
- 0x8b6fff6f, 0x0000ffff,
- 0xf4605c37, 0xf8000050,
- 0xf4605d37, 0xf8000060,
- 0xf4601e77, 0xf8000074,
- 0xbf8a0000, 0x8b6dff6d,
- 0x0000ffff, 0x8bfe7e7e,
- 0x8bea6a6a, 0xb97af804,
+ 0xbf8a0000, 0xb96ef813,
+ 0x8b6eff7f, 0x04000000,
+ 0xbfa1000d, 0x80788478,
+ 0xf4621bba, 0xf0000000,
+ 0x80788478, 0xbf8a0000,
+ 0xbf0d806e, 0xbfa10006,
+ 0x856e906e, 0x8b6e6e6e,
+ 0xbfa10003, 0xbe804ec1,
+ 0x816ec16e, 0xbfa0fffb,
+ 0xbefd006f, 0xbefe0070,
+ 0xbeff0071, 0xb97b2011,
+ 0x857b867b, 0xb97b0191,
+ 0x857b827b, 0xb97bba11,
+ 0xb973f801, 0xb8ee3b05,
+ 0x806e816e, 0xbf0d9972,
+ 0xbfa20002, 0x846e896e,
+ 0xbfa00001, 0x846e8a6e,
+ 0xb8ef1e06, 0x846f8a6f,
+ 0x806e6f6e, 0x806eff6e,
+ 0x00000200, 0x806e746e,
+ 0x826f8075, 0x8b6fff6f,
+ 0x0000ffff, 0xf4605c37,
+ 0xf8000050, 0xf4605d37,
+ 0xf8000060, 0xf4601e77,
+ 0xf8000074, 0xbf8a0000,
+ 0x8b6dff6d, 0x0000ffff,
+ 0x8bfe7e7e, 0x8bea6a6a,
+ 0xb97af804, 0xbe804ec2,
+ 0xbf94fffe, 0xbe804a6c,
0xbe804ec2, 0xbf94fffe,
- 0xbe804a6c, 0xbe804ec2,
- 0xbf94fffe, 0xbfb10000,
+ 0xbfb10000, 0xbf9f0000,
0xbf9f0000, 0xbf9f0000,
0xbf9f0000, 0xbf9f0000,
- 0xbf9f0000, 0x00000000,
};
static const uint32_t cwsr_trap_gfx9_5_0_hex[] = {
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx12.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx12.asm
index 7b9d36e5fa43..5a1a1b1f897f 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx12.asm
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx12.asm
@@ -30,6 +30,7 @@
#define CHIP_GFX12 37
#define SINGLE_STEP_MISSED_WORKAROUND 1 //workaround for lost TRAP_AFTER_INST exception when SAVECTX raised
+#define HAVE_VALU_SGPR_HAZARD (ASIC_FAMILY == CHIP_GFX12)
var SQ_WAVE_STATE_PRIV_BARRIER_COMPLETE_MASK = 0x4
var SQ_WAVE_STATE_PRIV_SCC_SHIFT = 9
@@ -351,6 +352,7 @@ L_HAVE_VGPRS:
v_writelane_b32 v0, ttmp13, 0xD
v_writelane_b32 v0, exec_lo, 0xE
v_writelane_b32 v0, exec_hi, 0xF
+ valu_sgpr_hazard()
s_mov_b32 exec_lo, 0x3FFF
s_mov_b32 exec_hi, 0x0
@@ -417,7 +419,6 @@ L_SAVE_HWREG:
v_mov_b32 v0, 0x0 //Offset[31:0] from buffer resource
v_mov_b32 v1, 0x0 //Offset[63:32] from buffer resource
v_mov_b32 v2, 0x0 //Set of SGPRs for TCP store
- s_mov_b32 m0, 0x0 //Next lane of v2 to write to
// Ensure no further changes to barrier or LDS state.
// STATE_PRIV.BARRIER_COMPLETE may change up to this point.
@@ -430,40 +431,41 @@ L_SAVE_HWREG:
s_andn2_b32 s_save_state_priv, s_save_state_priv, SQ_WAVE_STATE_PRIV_BARRIER_COMPLETE_MASK
s_or_b32 s_save_state_priv, s_save_state_priv, s_save_tmp
- write_hwreg_to_v2(s_save_m0)
- write_hwreg_to_v2(s_save_pc_lo)
s_andn2_b32 s_save_tmp, s_save_pc_hi, S_SAVE_PC_HI_FIRST_WAVE_MASK
- write_hwreg_to_v2(s_save_tmp)
- write_hwreg_to_v2(s_save_exec_lo)
- write_hwreg_to_v2(s_save_exec_hi)
- write_hwreg_to_v2(s_save_state_priv)
+ v_writelane_b32 v2, s_save_m0, 0x0
+ v_writelane_b32 v2, s_save_pc_lo, 0x1
+ v_writelane_b32 v2, s_save_tmp, 0x2
+ v_writelane_b32 v2, s_save_exec_lo, 0x3
+ v_writelane_b32 v2, s_save_exec_hi, 0x4
+ v_writelane_b32 v2, s_save_state_priv, 0x5
+ v_writelane_b32 v2, s_save_xnack_mask, 0x7
+ valu_sgpr_hazard()
s_getreg_b32 s_save_tmp, hwreg(HW_REG_WAVE_EXCP_FLAG_PRIV)
- write_hwreg_to_v2(s_save_tmp)
+ v_writelane_b32 v2, s_save_tmp, 0x6
- write_hwreg_to_v2(s_save_xnack_mask)
+ s_getreg_b32 s_save_tmp, hwreg(HW_REG_WAVE_MODE)
+ v_writelane_b32 v2, s_save_tmp, 0x8
- s_getreg_b32 s_save_m0, hwreg(HW_REG_WAVE_MODE)
- write_hwreg_to_v2(s_save_m0)
+ s_getreg_b32 s_save_tmp, hwreg(HW_REG_WAVE_SCRATCH_BASE_LO)
+ v_writelane_b32 v2, s_save_tmp, 0x9
- s_getreg_b32 s_save_m0, hwreg(HW_REG_WAVE_SCRATCH_BASE_LO)
- write_hwreg_to_v2(s_save_m0)
+ s_getreg_b32 s_save_tmp, hwreg(HW_REG_WAVE_SCRATCH_BASE_HI)
+ v_writelane_b32 v2, s_save_tmp, 0xA
- s_getreg_b32 s_save_m0, hwreg(HW_REG_WAVE_SCRATCH_BASE_HI)
- write_hwreg_to_v2(s_save_m0)
+ s_getreg_b32 s_save_tmp, hwreg(HW_REG_WAVE_EXCP_FLAG_USER)
+ v_writelane_b32 v2, s_save_tmp, 0xB
- s_getreg_b32 s_save_m0, hwreg(HW_REG_WAVE_EXCP_FLAG_USER)
- write_hwreg_to_v2(s_save_m0)
-
- s_getreg_b32 s_save_m0, hwreg(HW_REG_WAVE_TRAP_CTRL)
- write_hwreg_to_v2(s_save_m0)
+ s_getreg_b32 s_save_tmp, hwreg(HW_REG_WAVE_TRAP_CTRL)
+ v_writelane_b32 v2, s_save_tmp, 0xC
s_getreg_b32 s_save_tmp, hwreg(HW_REG_WAVE_STATUS)
- write_hwreg_to_v2(s_save_tmp)
+ v_writelane_b32 v2, s_save_tmp, 0xD
s_get_barrier_state s_save_tmp, -1
s_wait_kmcnt (0)
- write_hwreg_to_v2(s_save_tmp)
+ v_writelane_b32 v2, s_save_tmp, 0xE
+ valu_sgpr_hazard()
// Write HWREGs with 16 VGPR lanes. TTMPs occupy space after this.
s_mov_b32 exec_lo, 0xFFFF
@@ -497,10 +499,12 @@ L_SAVE_SGPR_LOOP:
s_movrels_b64 s12, s12 //s12 = s[12+m0], s13 = s[13+m0]
s_movrels_b64 s14, s14 //s14 = s[14+m0], s15 = s[15+m0]
- write_16sgpr_to_v2(s0)
-
- s_cmp_eq_u32 ttmp13, 0x20 //have 32 VGPR lanes filled?
- s_cbranch_scc0 L_SAVE_SGPR_SKIP_TCP_STORE
+ s_cmp_eq_u32 ttmp13, 0x0
+ s_cbranch_scc0 L_WRITE_V2_SECOND_HALF
+ write_16sgpr_to_v2(s0, 0x0)
+ s_branch L_SAVE_SGPR_SKIP_TCP_STORE
+L_WRITE_V2_SECOND_HALF:
+ write_16sgpr_to_v2(s0, 0x10)
buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS
s_add_u32 s_save_mem_offset, s_save_mem_offset, 0x80
@@ -1056,27 +1060,21 @@ L_END_PGM:
s_endpgm_saved
end
-function write_hwreg_to_v2(s)
- // Copy into VGPR for later TCP store.
- v_writelane_b32 v2, s, m0
- s_add_u32 m0, m0, 0x1
-end
-
-
-function write_16sgpr_to_v2(s)
+function write_16sgpr_to_v2(s, lane_offset)
// Copy into VGPR for later TCP store.
for var sgpr_idx = 0; sgpr_idx < 16; sgpr_idx ++
- v_writelane_b32 v2, s[sgpr_idx], ttmp13
- s_add_u32 ttmp13, ttmp13, 0x1
+ v_writelane_b32 v2, s[sgpr_idx], sgpr_idx + lane_offset
end
+ valu_sgpr_hazard()
+ s_add_u32 ttmp13, ttmp13, 0x10
end
function write_12sgpr_to_v2(s)
// Copy into VGPR for later TCP store.
for var sgpr_idx = 0; sgpr_idx < 12; sgpr_idx ++
- v_writelane_b32 v2, s[sgpr_idx], ttmp13
- s_add_u32 ttmp13, ttmp13, 0x1
+ v_writelane_b32 v2, s[sgpr_idx], sgpr_idx
end
+ valu_sgpr_hazard()
end
function read_hwreg_from_mem(s, s_rsrc, s_mem_offset)
@@ -1128,3 +1126,11 @@ function get_wave_size2(s_reg)
s_getreg_b32 s_reg, hwreg(HW_REG_WAVE_STATUS,SQ_WAVE_STATUS_WAVE64_SHIFT,SQ_WAVE_STATUS_WAVE64_SIZE)
s_lshl_b32 s_reg, s_reg, S_WAVE_SIZE
end
+
+function valu_sgpr_hazard
+#if HAVE_VALU_SGPR_HAZARD
+ for var rep = 0; rep < 8; rep ++
+ ds_nop
+ end
+#endif
+end
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
index 24396a2c77bd..4afff7094caf 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
@@ -233,6 +233,7 @@ void kfd_queue_buffer_put(struct amdgpu_bo **bo)
int kfd_queue_acquire_buffers(struct kfd_process_device *pdd, struct queue_properties *properties)
{
struct kfd_topology_device *topo_dev;
+ u64 expected_queue_size;
struct amdgpu_vm *vm;
u32 total_cwsr_size;
int err;
@@ -241,6 +242,15 @@ int kfd_queue_acquire_buffers(struct kfd_process_device *pdd, struct queue_prope
if (!topo_dev)
return -EINVAL;
+ /* AQL queues on GFX7 and GFX8 appear twice their actual size */
+ if (properties->type == KFD_QUEUE_TYPE_COMPUTE &&
+ properties->format == KFD_QUEUE_FORMAT_AQL &&
+ topo_dev->node_props.gfx_target_version >= 70000 &&
+ topo_dev->node_props.gfx_target_version < 90000)
+ expected_queue_size = properties->queue_size / 2;
+ else
+ expected_queue_size = properties->queue_size;
+
vm = drm_priv_to_vm(pdd->drm_priv);
err = amdgpu_bo_reserve(vm->root.bo, false);
if (err)
@@ -255,7 +265,7 @@ int kfd_queue_acquire_buffers(struct kfd_process_device *pdd, struct queue_prope
goto out_err_unreserve;
err = kfd_queue_buffer_get(vm, (void *)properties->queue_address,
- &properties->ring_bo, properties->queue_size);
+ &properties->ring_bo, expected_queue_size);
if (err)
goto out_err_unreserve;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index bd3e20d981e0..9477a4adcd36 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -1286,13 +1286,7 @@ svm_range_get_pte_flags(struct kfd_node *node,
break;
case IP_VERSION(12, 0, 0):
case IP_VERSION(12, 0, 1):
- if (domain == SVM_RANGE_VRAM_DOMAIN) {
- if (bo_node != node)
- mapping_flags |= AMDGPU_VM_MTYPE_NC;
- } else {
- mapping_flags |= coherent ?
- AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
- }
+ mapping_flags |= AMDGPU_VM_MTYPE_NC;
break;
default:
mapping_flags |= coherent ?
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 74ad0d1240fe..39df45f652b3 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1745,7 +1745,7 @@ static void retrieve_dmi_info(struct amdgpu_display_manager *dm, struct dc_init_
}
if (quirk_entries.support_edp0_on_dp1) {
init_data->flags.support_edp0_on_dp1 = true;
- drm_info(dev, "aux_hpd_discon_quirk attached\n");
+ drm_info(dev, "support_edp0_on_dp1 attached\n");
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
index bf636b28e3e1..6e2fce329d73 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
@@ -69,5 +69,16 @@ bool should_use_dmub_lock(struct dc_link *link)
if (link->replay_settings.replay_feature_enabled)
return true;
+ /* only use HW lock for PSR1 on single eDP */
+ if (link->psr_settings.psr_version == DC_PSR_VERSION_1) {
+ struct dc_link *edp_links[MAX_NUM_EDP];
+ int edp_num;
+
+ dc_get_edp_links(link->dc, edp_links, &edp_num);
+
+ if (edp_num == 1)
+ return true;
+ }
+
return false;
}
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
index 15ea216e903d..6157886f4802 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
@@ -704,7 +704,7 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
cw6.region.base = DMUB_CW6_BASE;
cw6.region.top = cw6.region.base + fw_state_fb->size;
- dmub->fw_state = fw_state_fb->cpu_addr;
+ dmub->fw_state = (void *)((uintptr_t)(fw_state_fb->cpu_addr) + DMUB_DEBUG_FW_STATE_OFFSET);
region6.offset.quad_part = shared_state_fb->gpu_addr;
region6.region.base = DMUB_CW6_BASE;
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index e8ae7681bf0a..77b1f061bbf0 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -2421,6 +2421,8 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 2):
case IP_VERSION(11, 0, 3):
+ case IP_VERSION(12, 0, 0):
+ case IP_VERSION(12, 0, 1):
*states = ATTR_STATE_SUPPORTED;
break;
default:
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
index 5cad09c5f2ff..3f1fcf8c4ee8 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
@@ -1193,16 +1193,9 @@ static int smu_v14_0_2_print_clk_levels(struct smu_context *smu,
PP_OD_FEATURE_GFXCLK_BIT))
break;
- PPTable_t *pptable = smu->smu_table.driver_pptable;
- const OverDriveLimits_t * const overdrive_upperlimits =
- &pptable->SkuTable.OverDriveLimitsBasicMax;
- const OverDriveLimits_t * const overdrive_lowerlimits =
- &pptable->SkuTable.OverDriveLimitsBasicMin;
-
size += sysfs_emit_at(buf, size, "OD_SCLK_OFFSET:\n");
- size += sysfs_emit_at(buf, size, "0: %dMhz\n1: %uMhz\n",
- overdrive_lowerlimits->GfxclkFoffset,
- overdrive_upperlimits->GfxclkFoffset);
+ size += sysfs_emit_at(buf, size, "%dMhz\n",
+ od_table->OverDriveTable.GfxclkFoffset);
break;
case SMU_OD_MCLK:
@@ -1337,12 +1330,8 @@ static int smu_v14_0_2_print_clk_levels(struct smu_context *smu,
if (smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT)) {
smu_v14_0_2_get_od_setting_limits(smu,
- PP_OD_FEATURE_GFXCLK_FMIN,
- &min_value,
- NULL);
- smu_v14_0_2_get_od_setting_limits(smu,
PP_OD_FEATURE_GFXCLK_FMAX,
- NULL,
+ &min_value,
&max_value);
size += sysfs_emit_at(buf, size, "SCLK_OFFSET: %7dMhz %10uMhz\n",
min_value, max_value);
@@ -1627,6 +1616,39 @@ out:
adev->unique_id = ((uint64_t)upper32 << 32) | lower32;
}
+static int smu_v14_0_2_get_fan_speed_pwm(struct smu_context *smu,
+ uint32_t *speed)
+{
+ int ret;
+
+ if (!speed)
+ return -EINVAL;
+
+ ret = smu_v14_0_2_get_smu_metrics_data(smu,
+ METRICS_CURR_FANPWM,
+ speed);
+ if (ret) {
+ dev_err(smu->adev->dev, "Failed to get fan speed(PWM)!");
+ return ret;
+ }
+
+ /* Convert the PMFW output which is in percent to pwm(255) based */
+ *speed = min(*speed * 255 / 100, (uint32_t)255);
+
+ return 0;
+}
+
+static int smu_v14_0_2_get_fan_speed_rpm(struct smu_context *smu,
+ uint32_t *speed)
+{
+ if (!speed)
+ return -EINVAL;
+
+ return smu_v14_0_2_get_smu_metrics_data(smu,
+ METRICS_CURR_FANSPEED,
+ speed);
+}
+
static int smu_v14_0_2_get_power_limit(struct smu_context *smu,
uint32_t *current_power_limit,
uint32_t *default_power_limit,
@@ -2417,36 +2439,24 @@ static int smu_v14_0_2_od_edit_dpm_table(struct smu_context *smu,
return -ENOTSUPP;
}
- for (i = 0; i < size; i += 2) {
- if (i + 2 > size) {
- dev_info(adev->dev, "invalid number of input parameters %d\n", size);
- return -EINVAL;
- }
-
- switch (input[i]) {
- case 1:
- smu_v14_0_2_get_od_setting_limits(smu,
- PP_OD_FEATURE_GFXCLK_FMAX,
- &minimum,
- &maximum);
- if (input[i + 1] < minimum ||
- input[i + 1] > maximum) {
- dev_info(adev->dev, "GfxclkFmax (%ld) must be within [%u, %u]!\n",
- input[i + 1], minimum, maximum);
- return -EINVAL;
- }
-
- od_table->OverDriveTable.GfxclkFoffset = input[i + 1];
- od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_GFXCLK_BIT;
- break;
+ if (size != 1) {
+ dev_info(adev->dev, "invalid number of input parameters %d\n", size);
+ return -EINVAL;
+ }
- default:
- dev_info(adev->dev, "Invalid SCLK_VDDC_TABLE index: %ld\n", input[i]);
- dev_info(adev->dev, "Supported indices: [0:min,1:max]\n");
- return -EINVAL;
- }
+ smu_v14_0_2_get_od_setting_limits(smu,
+ PP_OD_FEATURE_GFXCLK_FMAX,
+ &minimum,
+ &maximum);
+ if (input[0] < minimum ||
+ input[0] > maximum) {
+ dev_info(adev->dev, "GfxclkFoffset must be within [%d, %u]!\n",
+ minimum, maximum);
+ return -EINVAL;
}
+ od_table->OverDriveTable.GfxclkFoffset = input[0];
+ od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_GFXCLK_BIT;
break;
case PP_OD_EDIT_MCLK_VDDC_TABLE:
@@ -2804,6 +2814,8 @@ static const struct pptable_funcs smu_v14_0_2_ppt_funcs = {
.set_performance_level = smu_v14_0_set_performance_level,
.gfx_off_control = smu_v14_0_gfx_off_control,
.get_unique_id = smu_v14_0_2_get_unique_id,
+ .get_fan_speed_pwm = smu_v14_0_2_get_fan_speed_pwm,
+ .get_fan_speed_rpm = smu_v14_0_2_get_fan_speed_rpm,
.get_power_limit = smu_v14_0_2_get_power_limit,
.set_power_limit = smu_v14_0_2_set_power_limit,
.get_power_profile_mode = smu_v14_0_2_get_power_profile_mode,
diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c
index d1871af967d4..2355a78e1b69 100644
--- a/drivers/gpu/drm/radeon/radeon_vce.c
+++ b/drivers/gpu/drm/radeon/radeon_vce.c
@@ -557,7 +557,7 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
{
int session_idx = -1;
bool destroyed = false, created = false, allocated = false;
- uint32_t tmp, handle = 0;
+ uint32_t tmp = 0, handle = 0;
uint32_t *size = &tmp;
int i, r = 0;
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index 69bcf0e99d57..da00572d7d42 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -259,9 +259,16 @@ static void drm_sched_entity_kill(struct drm_sched_entity *entity)
struct drm_sched_fence *s_fence = job->s_fence;
dma_fence_get(&s_fence->finished);
- if (!prev || dma_fence_add_callback(prev, &job->finish_cb,
- drm_sched_entity_kill_jobs_cb))
+ if (!prev ||
+ dma_fence_add_callback(prev, &job->finish_cb,
+ drm_sched_entity_kill_jobs_cb)) {
+ /*
+ * Adding callback above failed.
+ * dma_fence_put() checks for NULL.
+ */
+ dma_fence_put(prev);
drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
+ }
prev = &s_fence->finished;
}
diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
index da08ddb01d21..05608c894ed9 100644
--- a/drivers/gpu/drm/v3d/v3d_sched.c
+++ b/drivers/gpu/drm/v3d/v3d_sched.c
@@ -226,8 +226,12 @@ static struct dma_fence *v3d_bin_job_run(struct drm_sched_job *sched_job)
struct dma_fence *fence;
unsigned long irqflags;
- if (unlikely(job->base.base.s_fence->finished.error))
+ if (unlikely(job->base.base.s_fence->finished.error)) {
+ spin_lock_irqsave(&v3d->job_lock, irqflags);
+ v3d->bin_job = NULL;
+ spin_unlock_irqrestore(&v3d->job_lock, irqflags);
return NULL;
+ }
/* Lock required around bin_job update vs
* v3d_overflow_mem_work().
@@ -281,8 +285,10 @@ static struct dma_fence *v3d_render_job_run(struct drm_sched_job *sched_job)
struct drm_device *dev = &v3d->drm;
struct dma_fence *fence;
- if (unlikely(job->base.base.s_fence->finished.error))
+ if (unlikely(job->base.base.s_fence->finished.error)) {
+ v3d->render_job = NULL;
return NULL;
+ }
v3d->render_job = job;
@@ -327,11 +333,17 @@ v3d_tfu_job_run(struct drm_sched_job *sched_job)
struct drm_device *dev = &v3d->drm;
struct dma_fence *fence;
+ if (unlikely(job->base.base.s_fence->finished.error)) {
+ v3d->tfu_job = NULL;
+ return NULL;
+ }
+
+ v3d->tfu_job = job;
+
fence = v3d_fence_create(v3d, V3D_TFU);
if (IS_ERR(fence))
return NULL;
- v3d->tfu_job = job;
if (job->base.irq_fence)
dma_fence_put(job->base.irq_fence);
job->base.irq_fence = dma_fence_get(fence);
@@ -369,6 +381,11 @@ v3d_csd_job_run(struct drm_sched_job *sched_job)
struct dma_fence *fence;
int i, csd_cfg0_reg;
+ if (unlikely(job->base.base.s_fence->finished.error)) {
+ v3d->csd_job = NULL;
+ return NULL;
+ }
+
v3d->csd_job = job;
v3d_invalidate_caches(v3d);
diff --git a/drivers/gpu/drm/xe/xe_bo.h b/drivers/gpu/drm/xe/xe_bo.h
index d9386ab03140..43bf6f140d40 100644
--- a/drivers/gpu/drm/xe/xe_bo.h
+++ b/drivers/gpu/drm/xe/xe_bo.h
@@ -341,7 +341,6 @@ static inline unsigned int xe_sg_segment_size(struct device *dev)
return round_down(max / 2, PAGE_SIZE);
}
-#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
/**
* xe_bo_is_mem_type - Whether the bo currently resides in the given
* TTM memory type
@@ -356,4 +355,3 @@ static inline bool xe_bo_is_mem_type(struct xe_bo *bo, u32 mem_type)
return bo->ttm.resource->mem_type == mem_type;
}
#endif
-#endif
diff --git a/drivers/gpu/drm/xe/xe_dma_buf.c b/drivers/gpu/drm/xe/xe_dma_buf.c
index c5b95470fa32..f67803e15a0e 100644
--- a/drivers/gpu/drm/xe/xe_dma_buf.c
+++ b/drivers/gpu/drm/xe/xe_dma_buf.c
@@ -58,7 +58,7 @@ static int xe_dma_buf_pin(struct dma_buf_attachment *attach)
* 1) Avoid pinning in a placement not accessible to some importers.
* 2) Pinning in VRAM requires PIN accounting which is a to-do.
*/
- if (xe_bo_is_pinned(bo) && bo->ttm.resource->placement != XE_PL_TT) {
+ if (xe_bo_is_pinned(bo) && !xe_bo_is_mem_type(bo, XE_PL_TT)) {
drm_dbg(&xe->drm, "Can't migrate pinned bo for dma-buf pin.\n");
return -EINVAL;
}
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index 46cae925b095..1f93e5e276c0 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -361,6 +361,10 @@ static bool host1x_wants_iommu(struct host1x *host1x)
return true;
}
+/*
+ * Returns ERR_PTR on failure, NULL if the translation is IDENTITY, otherwise a
+ * valid paging domain.
+ */
static struct iommu_domain *host1x_iommu_attach(struct host1x *host)
{
struct iommu_domain *domain = iommu_get_domain_for_dev(host->dev);
@@ -385,6 +389,8 @@ static struct iommu_domain *host1x_iommu_attach(struct host1x *host)
* Similarly, if host1x is already attached to an IOMMU (via the DMA
* API), don't try to attach again.
*/
+ if (domain && domain->type == IOMMU_DOMAIN_IDENTITY)
+ domain = NULL;
if (!host1x_wants_iommu(host) || domain)
return domain;
diff --git a/drivers/i2c/busses/i2c-amd-mp2-pci.c b/drivers/i2c/busses/i2c-amd-mp2-pci.c
index 143165300949..ef7370d3dbea 100644
--- a/drivers/i2c/busses/i2c-amd-mp2-pci.c
+++ b/drivers/i2c/busses/i2c-amd-mp2-pci.c
@@ -327,13 +327,11 @@ static int amd_mp2_pci_init(struct amd_mp2_dev *privdata,
amd_mp2_irq_isr, irq_flag, dev_name(&pci_dev->dev), privdata);
if (rc) {
pci_err(pci_dev, "Failure requesting irq %i: %d\n", privdata->dev_irq, rc);
- goto free_irq_vectors;
+ goto err_dma_mask;
}
return rc;
-free_irq_vectors:
- free_irq(privdata->dev_irq, privdata);
err_dma_mask:
pci_clear_master(pci_dev);
err_pci_enable:
@@ -376,7 +374,6 @@ static void amd_mp2_pci_remove(struct pci_dev *pci_dev)
pm_runtime_forbid(&pci_dev->dev);
pm_runtime_get_noresume(&pci_dev->dev);
- free_irq(privdata->dev_irq, privdata);
pci_clear_master(pci_dev);
amd_mp2_clear_reg(privdata);
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 7e79da9684ed..185c08eab4ca 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -2834,10 +2834,10 @@ struct rep_manu_reply{
u8 sas_format:1;
u8 reserved1:7;
u8 reserved2[3];
- u8 vendor_id[SAS_EXPANDER_VENDOR_ID_LEN];
- u8 product_id[SAS_EXPANDER_PRODUCT_ID_LEN];
- u8 product_rev[SAS_EXPANDER_PRODUCT_REV_LEN];
- u8 component_vendor_id[SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN];
+ u8 vendor_id[SAS_EXPANDER_VENDOR_ID_LEN] __nonstring;
+ u8 product_id[SAS_EXPANDER_PRODUCT_ID_LEN] __nonstring;
+ u8 product_rev[SAS_EXPANDER_PRODUCT_REV_LEN] __nonstring;
+ u8 component_vendor_id[SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN] __nonstring;
u16 component_id;
u8 component_revision_id;
u8 reserved3;
diff --git a/drivers/pinctrl/spacemit/Kconfig b/drivers/pinctrl/spacemit/Kconfig
index a2f98b3f8a75..d6f6017fd097 100644
--- a/drivers/pinctrl/spacemit/Kconfig
+++ b/drivers/pinctrl/spacemit/Kconfig
@@ -7,7 +7,7 @@ config PINCTRL_SPACEMIT_K1
bool "SpacemiT K1 SoC Pinctrl driver"
depends on ARCH_SPACEMIT || COMPILE_TEST
depends on OF
- default y
+ default ARCH_SPACEMIT
select GENERIC_PINCTRL_GROUPS
select GENERIC_PINMUX_FUNCTIONS
select GENERIC_PINCONF
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 4ddf0efead68..00a7f3617cd8 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -1830,12 +1830,49 @@ static const struct file_operations constraint_flags_fops = {
#define REG_STR_SIZE 64
+static void link_and_create_debugfs(struct regulator *regulator, struct regulator_dev *rdev,
+ struct device *dev)
+{
+ int err = 0;
+
+ if (dev) {
+ regulator->dev = dev;
+
+ /* Add a link to the device sysfs entry */
+ err = sysfs_create_link_nowarn(&rdev->dev.kobj, &dev->kobj,
+ regulator->supply_name);
+ if (err) {
+ rdev_dbg(rdev, "could not add device link %s: %pe\n",
+ dev->kobj.name, ERR_PTR(err));
+ /* non-fatal */
+ }
+ }
+
+ if (err != -EEXIST) {
+ regulator->debugfs = debugfs_create_dir(regulator->supply_name, rdev->debugfs);
+ if (IS_ERR(regulator->debugfs)) {
+ rdev_dbg(rdev, "Failed to create debugfs directory\n");
+ regulator->debugfs = NULL;
+ }
+ }
+
+ if (regulator->debugfs) {
+ debugfs_create_u32("uA_load", 0444, regulator->debugfs,
+ &regulator->uA_load);
+ debugfs_create_u32("min_uV", 0444, regulator->debugfs,
+ &regulator->voltage[PM_SUSPEND_ON].min_uV);
+ debugfs_create_u32("max_uV", 0444, regulator->debugfs,
+ &regulator->voltage[PM_SUSPEND_ON].max_uV);
+ debugfs_create_file("constraint_flags", 0444, regulator->debugfs,
+ regulator, &constraint_flags_fops);
+ }
+}
+
static struct regulator *create_regulator(struct regulator_dev *rdev,
struct device *dev,
const char *supply_name)
{
struct regulator *regulator;
- int err = 0;
lockdep_assert_held_once(&rdev->mutex.base);
@@ -1868,38 +1905,6 @@ static struct regulator *create_regulator(struct regulator_dev *rdev,
list_add(&regulator->list, &rdev->consumer_list);
- if (dev) {
- regulator->dev = dev;
-
- /* Add a link to the device sysfs entry */
- err = sysfs_create_link_nowarn(&rdev->dev.kobj, &dev->kobj,
- supply_name);
- if (err) {
- rdev_dbg(rdev, "could not add device link %s: %pe\n",
- dev->kobj.name, ERR_PTR(err));
- /* non-fatal */
- }
- }
-
- if (err != -EEXIST) {
- regulator->debugfs = debugfs_create_dir(supply_name, rdev->debugfs);
- if (IS_ERR(regulator->debugfs)) {
- rdev_dbg(rdev, "Failed to create debugfs directory\n");
- regulator->debugfs = NULL;
- }
- }
-
- if (regulator->debugfs) {
- debugfs_create_u32("uA_load", 0444, regulator->debugfs,
- &regulator->uA_load);
- debugfs_create_u32("min_uV", 0444, regulator->debugfs,
- &regulator->voltage[PM_SUSPEND_ON].min_uV);
- debugfs_create_u32("max_uV", 0444, regulator->debugfs,
- &regulator->voltage[PM_SUSPEND_ON].max_uV);
- debugfs_create_file("constraint_flags", 0444, regulator->debugfs,
- regulator, &constraint_flags_fops);
- }
-
/*
* Check now if the regulator is an always on regulator - if
* it is then we don't need to do nearly so much work for
@@ -2069,6 +2074,10 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
if (have_full_constraints()) {
r = dummy_regulator_rdev;
+ if (!r) {
+ ret = -EPROBE_DEFER;
+ goto out;
+ }
get_device(&r->dev);
} else {
dev_err(dev, "Failed to resolve %s-supply for %s\n",
@@ -2086,6 +2095,10 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
goto out;
}
r = dummy_regulator_rdev;
+ if (!r) {
+ ret = -EPROBE_DEFER;
+ goto out;
+ }
get_device(&r->dev);
}
@@ -2133,6 +2146,9 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
regulator_unlock_two(rdev, r, &ww_ctx);
+ /* rdev->supply was created in set_supply() */
+ link_and_create_debugfs(rdev->supply, r, &rdev->dev);
+
/*
* In set_machine_constraints() we may have turned this regulator on
* but we couldn't propagate to the supply if it hadn't been resolved
@@ -2211,8 +2227,10 @@ struct regulator *_regulator_get_common(struct regulator_dev *rdev, struct devic
* enabled, even if it isn't hooked up, and just
* provide a dummy.
*/
- dev_warn(dev, "supply %s not found, using dummy regulator\n", id);
rdev = dummy_regulator_rdev;
+ if (!rdev)
+ return ERR_PTR(-EPROBE_DEFER);
+ dev_warn(dev, "supply %s not found, using dummy regulator\n", id);
get_device(&rdev->dev);
break;
@@ -2271,6 +2289,8 @@ struct regulator *_regulator_get_common(struct regulator_dev *rdev, struct devic
return regulator;
}
+ link_and_create_debugfs(regulator, rdev, dev);
+
rdev->open_count++;
if (get_type == EXCLUSIVE_GET) {
rdev->exclusive = 1;
diff --git a/drivers/regulator/dummy.c b/drivers/regulator/dummy.c
index 5b9b9e4e762d..9f59889129ab 100644
--- a/drivers/regulator/dummy.c
+++ b/drivers/regulator/dummy.c
@@ -60,7 +60,7 @@ static struct platform_driver dummy_regulator_driver = {
.probe = dummy_regulator_probe,
.driver = {
.name = "reg-dummy",
- .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ .probe_type = PROBE_FORCE_SYNCHRONOUS,
},
};
diff --git a/drivers/regulator/rtq2208-regulator.c b/drivers/regulator/rtq2208-regulator.c
index 5925fa7a9a06..9cde7181b0f0 100644
--- a/drivers/regulator/rtq2208-regulator.c
+++ b/drivers/regulator/rtq2208-regulator.c
@@ -27,6 +27,11 @@
#define RTQ2208_REG_LDO1_CFG 0xB1
#define RTQ2208_REG_LDO2_CFG 0xC1
#define RTQ2208_REG_LDO_DVS_CTRL 0xD0
+#define RTQ2208_REG_HIDDEN_BUCKPH 0x55
+#define RTQ2208_REG_HIDDEN_LDOCFG0 0x8F
+#define RTQ2208_REG_HIDDEN_LDOCFG1 0x96
+#define RTQ2208_REG_HIDDEN0 0xFE
+#define RTQ2208_REG_HIDDEN1 0xFF
/* Mask */
#define RTQ2208_BUCK_NR_MTP_SEL_MASK GENMASK(7, 0)
@@ -45,6 +50,11 @@
#define RTQ2208_LDO1_VOSEL_SD_MASK BIT(5)
#define RTQ2208_LDO2_DISCHG_EN_MASK BIT(6)
#define RTQ2208_LDO2_VOSEL_SD_MASK BIT(7)
+#define RTQ2208_MASK_BUCKPH_GROUP1 GENMASK(6, 4)
+#define RTQ2208_MASK_BUCKPH_GROUP2 GENMASK(2, 0)
+#define RTQ2208_MASK_LDO2_OPT0 BIT(7)
+#define RTQ2208_MASK_LDO2_OPT1 BIT(6)
+#define RTQ2208_MASK_LDO1_FIXED BIT(6)
/* Size */
#define RTQ2208_VOUT_MAXNUM 256
@@ -245,11 +255,6 @@ static const unsigned int rtq2208_ldo_volt_table[] = {
3300000,
};
-static struct of_regulator_match rtq2208_ldo_match[] = {
- {.name = "ldo2", },
- {.name = "ldo1", },
-};
-
static unsigned int rtq2208_of_map_mode(unsigned int mode)
{
switch (mode) {
@@ -344,59 +349,6 @@ static irqreturn_t rtq2208_irq_handler(int irqno, void *devid)
return IRQ_HANDLED;
}
-static int rtq2208_of_get_ldo_dvs_ability(struct device *dev)
-{
- struct device_node *np;
- struct of_regulator_match *match;
- struct regulator_desc *desc;
- struct regulator_init_data *init_data;
- u32 fixed_uV;
- int ret, i;
-
- if (!dev->of_node)
- return -ENODEV;
-
- np = of_get_child_by_name(dev->of_node, "regulators");
- if (!np)
- np = dev->of_node;
-
- ret = of_regulator_match(dev, np, rtq2208_ldo_match, ARRAY_SIZE(rtq2208_ldo_match));
-
- of_node_put(np);
-
- if (ret < 0)
- return ret;
-
- for (i = 0; i < ARRAY_SIZE(rtq2208_ldo_match); i++) {
- match = rtq2208_ldo_match + i;
- init_data = match->init_data;
- desc = (struct regulator_desc *)match->desc;
-
- if (!init_data || !desc)
- continue;
-
- /* specify working fixed voltage if the propery exists */
- ret = of_property_read_u32(match->of_node, "richtek,fixed-microvolt", &fixed_uV);
-
- if (!ret) {
- if (fixed_uV != init_data->constraints.min_uV ||
- fixed_uV != init_data->constraints.max_uV)
- return -EINVAL;
- desc->n_voltages = 1;
- desc->fixed_uV = fixed_uV;
- desc->fixed_uV = init_data->constraints.min_uV;
- desc->ops = &rtq2208_regulator_ldo_fix_ops;
- } else {
- desc->n_voltages = ARRAY_SIZE(rtq2208_ldo_volt_table);
- desc->volt_table = rtq2208_ldo_volt_table;
- desc->ops = &rtq2208_regulator_ldo_adj_ops;
- }
- }
-
- return 0;
-}
-
-
#define BUCK_INFO(_name, _id) \
{ \
.name = _name, \
@@ -424,9 +376,11 @@ static const struct linear_range rtq2208_vout_range[] = {
REGULATOR_LINEAR_RANGE(1310000, 181, 255, 10000),
};
-static void rtq2208_init_regulator_desc(struct rtq2208_regulator_desc *rdesc, int mtp_sel, int idx)
+static void rtq2208_init_regulator_desc(struct rtq2208_regulator_desc *rdesc, int mtp_sel, int idx,
+ unsigned int ldo1_fixed, unsigned int ldo2_fixed)
{
struct regulator_desc *desc;
+ unsigned int fixed_uV;
static const struct {
char *name;
int base;
@@ -462,7 +416,8 @@ static void rtq2208_init_regulator_desc(struct rtq2208_regulator_desc *rdesc, in
rdesc->mode_mask = RTQ2208_BUCK_NRMODE_MASK;
- if (idx >= RTQ2208_BUCK_B && idx <= RTQ2208_BUCK_E) {
+ switch (idx) {
+ case RTQ2208_BUCK_B ... RTQ2208_BUCK_E:
/* init buck desc */
desc->ops = &rtq2208_regulator_buck_ops;
desc->vsel_reg = curr_info->base + VSEL_SHIFT(mtp_sel);
@@ -480,7 +435,19 @@ static void rtq2208_init_regulator_desc(struct rtq2208_regulator_desc *rdesc, in
rdesc->suspend_config_reg = BUCK_RG_SHIFT(curr_info->base, 4);
rdesc->suspend_enable_mask = RTQ2208_BUCK_EN_STR_MASK;
rdesc->suspend_mode_mask = RTQ2208_BUCK_STRMODE_MASK;
- } else {
+ break;
+ default:
+ fixed_uV = idx == RTQ2208_LDO2 ? ldo2_fixed : ldo1_fixed;
+ if (fixed_uV) {
+ desc->n_voltages = 1;
+ desc->fixed_uV = fixed_uV;
+ desc->ops = &rtq2208_regulator_ldo_fix_ops;
+ } else {
+ desc->n_voltages = ARRAY_SIZE(rtq2208_ldo_volt_table);
+ desc->volt_table = rtq2208_ldo_volt_table;
+ desc->ops = &rtq2208_regulator_ldo_adj_ops;
+ }
+
/* init ldo desc */
desc->active_discharge_reg = RTQ2208_REG_LDO_DVS_CTRL;
desc->active_discharge_on = curr_info->dis_on;
@@ -490,13 +457,15 @@ static void rtq2208_init_regulator_desc(struct rtq2208_regulator_desc *rdesc, in
rdesc->suspend_config_reg = curr_info->base;
rdesc->suspend_enable_mask = RTQ2208_LDO_EN_STR_MASK;
+ break;
}
}
static int rtq2208_parse_regulator_dt_data(int n_regulator, const unsigned int *regulator_idx_table,
- struct rtq2208_regulator_desc *rdesc[RTQ2208_LDO_MAX], struct device *dev)
+ struct rtq2208_regulator_desc *rdesc[RTQ2208_LDO_MAX], struct device *dev,
+ unsigned int ldo1_fixed, unsigned int ldo2_fixed)
{
- int mtp_sel, i, idx, ret;
+ int mtp_sel, i, idx;
/* get mtp_sel0 or mtp_sel1 */
mtp_sel = device_property_read_bool(dev, "richtek,mtp-sel-high");
@@ -508,43 +477,101 @@ static int rtq2208_parse_regulator_dt_data(int n_regulator, const unsigned int *
if (!rdesc[i])
return -ENOMEM;
- rtq2208_init_regulator_desc(rdesc[i], mtp_sel, idx);
-
- /* init ldo dvs ability */
- if (idx >= RTQ2208_LDO2)
- rtq2208_ldo_match[idx - RTQ2208_LDO2].desc = &rdesc[i]->desc;
+ rtq2208_init_regulator_desc(rdesc[i], mtp_sel, idx, ldo1_fixed, ldo2_fixed);
}
- /* init ldo fixed_uV */
- ret = rtq2208_of_get_ldo_dvs_ability(dev);
- if (ret)
- return dev_err_probe(dev, ret, "Failed to get ldo fixed_uV\n");
-
return 0;
}
-/** different slave address corresponds different used bucks
- * slave address 0x10: BUCK[BCA FGE]
- * slave address 0x20: BUCK[BC FGHE]
- * slave address 0x40: BUCK[C G]
- */
-static int rtq2208_regulator_check(int slave_addr, int *num,
- int *regulator_idx_table, unsigned int *buck_masks)
+static int rtq2208_regulator_check(struct device *dev, int *num, int *regulator_idx_table,
+ unsigned int *buck_masks, unsigned int *ldo1_fixed_uV,
+ unsigned int *ldo2_fixed_uV)
{
- static bool rtq2208_used_table[3][RTQ2208_LDO_MAX] = {
- /* BUCK[BCA FGE], LDO[12] */
- {1, 1, 0, 1, 1, 1, 0, 1, 1, 1},
- /* BUCK[BC FGHE], LDO[12]*/
- {1, 1, 0, 0, 1, 1, 1, 1, 1, 1},
- /* BUCK[C G], LDO[12] */
- {0, 1, 0, 0, 0, 1, 0, 0, 1, 1},
- };
- int i, idx = ffs(slave_addr >> 4) - 1;
+ struct regmap *regmap = dev_get_regmap(dev, NULL);
+ bool rtq2208_used_table[RTQ2208_LDO_MAX] = {0};
+ u8 entry_key[] = { 0x69, 0x01 };
+ unsigned int buck_phase, ldo_cfg0, ldo_cfg1;
+ int i, ret;
u8 mask;
+ ret = regmap_raw_write(regmap, RTQ2208_REG_HIDDEN0, entry_key, ARRAY_SIZE(entry_key));
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to enter hidden page\n");
+
+ ret = regmap_read(regmap, RTQ2208_REG_HIDDEN_BUCKPH, &buck_phase);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to read buck phase configuration\n");
+
+ ret = regmap_read(regmap, RTQ2208_REG_HIDDEN_LDOCFG0, &ldo_cfg0);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to read ldo cfg0\n");
+
+ ret = regmap_read(regmap, RTQ2208_REG_HIDDEN_LDOCFG1, &ldo_cfg1);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to read ldo cfg1\n");
+
+ ret = regmap_write(regmap, RTQ2208_REG_HIDDEN1, 0x00);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to exit hidden page\n");
+
+ dev_info(dev, "BUCK Phase 0x%x\n", buck_phase);
+ /*
+ * Use buck phase configuration to assign used table mask
+ * GROUP1 GROUP2
+ * 0 -> 2P + 2P BC FG
+ * 1 -> 2P + 1P + 1P BCA FGE
+ * 2 -> 1P + 1P + 1P + 1P BCDA FGHE
+ * 3 -> 3P + 1P BC FG
+ * others -> 4P C G
+ */
+ switch (FIELD_GET(RTQ2208_MASK_BUCKPH_GROUP1, buck_phase)) {
+ case 2:
+ rtq2208_used_table[RTQ2208_BUCK_D] = true;
+ fallthrough;
+ case 1:
+ rtq2208_used_table[RTQ2208_BUCK_A] = true;
+ fallthrough;
+ case 0:
+ case 3:
+ rtq2208_used_table[RTQ2208_BUCK_B] = true;
+ fallthrough;
+ default:
+ rtq2208_used_table[RTQ2208_BUCK_C] = true;
+ break;
+ }
+
+ switch (FIELD_GET(RTQ2208_MASK_BUCKPH_GROUP2, buck_phase)) {
+ case 2:
+ rtq2208_used_table[RTQ2208_BUCK_F] = true;
+ fallthrough;
+ case 1:
+ rtq2208_used_table[RTQ2208_BUCK_E] = true;
+ fallthrough;
+ case 0:
+ case 3:
+ rtq2208_used_table[RTQ2208_BUCK_H] = true;
+ fallthrough;
+ default:
+ rtq2208_used_table[RTQ2208_BUCK_G] = true;
+ break;
+ }
+
+ *ldo1_fixed_uV = FIELD_GET(RTQ2208_MASK_LDO1_FIXED, ldo_cfg1) ? 1200000 : 0;
+
+ if (!FIELD_GET(RTQ2208_MASK_LDO2_OPT0, ldo_cfg0) &&
+ !FIELD_GET(RTQ2208_MASK_LDO2_OPT1, ldo_cfg1))
+ *ldo2_fixed_uV = 0;
+ else if (FIELD_GET(RTQ2208_MASK_LDO2_OPT1, ldo_cfg1))
+ *ldo2_fixed_uV = 900000;
+ else
+ *ldo2_fixed_uV = 1200000;
+
+ /* By default, LDO1 & LDO2 are always used */
+ rtq2208_used_table[RTQ2208_LDO1] = rtq2208_used_table[RTQ2208_LDO2] = true;
+
for (i = 0; i < RTQ2208_LDO_MAX; i++) {
- if (!rtq2208_used_table[idx][i])
+ if (!rtq2208_used_table[i])
continue;
regulator_idx_table[(*num)++] = i;
@@ -559,7 +586,7 @@ static int rtq2208_regulator_check(int slave_addr, int *num,
static const struct regmap_config rtq2208_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
- .max_register = 0xEF,
+ .max_register = 0xFF,
};
static int rtq2208_probe(struct i2c_client *i2c)
@@ -573,6 +600,7 @@ static int rtq2208_probe(struct i2c_client *i2c)
int i, ret = 0, idx, n_regulator = 0;
unsigned int regulator_idx_table[RTQ2208_LDO_MAX],
buck_masks[RTQ2208_BUCK_NUM_IRQ_REGS] = {0x33, 0x33, 0x33, 0x33, 0x33};
+ unsigned int ldo1_fixed_uV, ldo2_fixed_uV;
rdev_map = devm_kzalloc(dev, sizeof(struct rtq2208_rdev_map), GFP_KERNEL);
if (!rdev_map)
@@ -583,7 +611,8 @@ static int rtq2208_probe(struct i2c_client *i2c)
return dev_err_probe(dev, PTR_ERR(regmap), "Failed to allocate regmap\n");
/* get needed regulator */
- ret = rtq2208_regulator_check(i2c->addr, &n_regulator, regulator_idx_table, buck_masks);
+ ret = rtq2208_regulator_check(dev, &n_regulator, regulator_idx_table, buck_masks,
+ &ldo1_fixed_uV, &ldo2_fixed_uV);
if (ret)
return dev_err_probe(dev, ret, "Failed to check used regulators\n");
@@ -593,7 +622,8 @@ static int rtq2208_probe(struct i2c_client *i2c)
cfg.dev = dev;
/* init regulator desc */
- ret = rtq2208_parse_regulator_dt_data(n_regulator, regulator_idx_table, rdesc, dev);
+ ret = rtq2208_parse_regulator_dt_data(n_regulator, regulator_idx_table, rdesc, dev,
+ ldo1_fixed_uV, ldo2_fixed_uV);
if (ret)
return ret;
diff --git a/drivers/scsi/mpi3mr/mpi3mr_transport.c b/drivers/scsi/mpi3mr/mpi3mr_transport.c
index 0ba9e6a6a13c..c8d6ced5640e 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_transport.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_transport.c
@@ -105,10 +105,10 @@ struct rep_manu_reply {
u8 reserved0[2];
u8 sas_format;
u8 reserved2[3];
- u8 vendor_id[SAS_EXPANDER_VENDOR_ID_LEN];
- u8 product_id[SAS_EXPANDER_PRODUCT_ID_LEN];
- u8 product_rev[SAS_EXPANDER_PRODUCT_REV_LEN];
- u8 component_vendor_id[SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN];
+ u8 vendor_id[SAS_EXPANDER_VENDOR_ID_LEN] __nonstring;
+ u8 product_id[SAS_EXPANDER_PRODUCT_ID_LEN] __nonstring;
+ u8 product_rev[SAS_EXPANDER_PRODUCT_REV_LEN] __nonstring;
+ u8 component_vendor_id[SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN] __nonstring;
u16 component_id;
u8 component_revision_id;
u8 reserved3;
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
index 587f7d248219..d123d3b740e1 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
@@ -606,7 +606,7 @@ typedef struct _MPI2_CONFIG_REPLY {
typedef struct _MPI2_CONFIG_PAGE_MAN_0 {
MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
- U8 ChipName[16]; /*0x04 */
+ U8 ChipName[16] __nonstring; /*0x04 */
U8 ChipRevision[8]; /*0x14 */
U8 BoardName[16]; /*0x1C */
U8 BoardAssembly[16]; /*0x2C */
diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
index d84413b77d84..dc74ebc6405a 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
@@ -328,10 +328,10 @@ struct rep_manu_reply {
u8 reserved0[2];
u8 sas_format;
u8 reserved2[3];
- u8 vendor_id[SAS_EXPANDER_VENDOR_ID_LEN];
- u8 product_id[SAS_EXPANDER_PRODUCT_ID_LEN];
- u8 product_rev[SAS_EXPANDER_PRODUCT_REV_LEN];
- u8 component_vendor_id[SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN];
+ u8 vendor_id[SAS_EXPANDER_VENDOR_ID_LEN] __nonstring;
+ u8 product_id[SAS_EXPANDER_PRODUCT_ID_LEN] __nonstring;
+ u8 product_rev[SAS_EXPANDER_PRODUCT_REV_LEN] __nonstring;
+ u8 component_vendor_id[SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN] __nonstring;
u16 component_id;
u8 component_revision_id;
u8 reserved3;
diff --git a/drivers/scsi/qla2xxx/qla_mr.h b/drivers/scsi/qla2xxx/qla_mr.h
index 4f63aff333db..3a2bd953a976 100644
--- a/drivers/scsi/qla2xxx/qla_mr.h
+++ b/drivers/scsi/qla2xxx/qla_mr.h
@@ -282,8 +282,8 @@ struct register_host_info {
#define QLAFX00_TGT_NODE_LIST_SIZE (sizeof(uint32_t) * 32)
struct config_info_data {
- uint8_t model_num[16];
- uint8_t model_description[80];
+ uint8_t model_num[16] __nonstring;
+ uint8_t model_description[80] __nonstring;
uint8_t reserved0[160];
uint8_t symbolic_name[64];
uint8_t serial_num[32];
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index a7a4647717d4..ff07c87dbadc 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -2954,9 +2954,13 @@ static ssize_t slave_show(struct device *dev, struct device_attribute *attr,
struct spi_controller *ctlr = container_of(dev, struct spi_controller,
dev);
struct device *child;
+ int ret;
child = device_find_any_child(&ctlr->dev);
- return sysfs_emit(buf, "%s\n", child ? to_spi_device(child)->modalias : NULL);
+ ret = sysfs_emit(buf, "%s\n", child ? to_spi_device(child)->modalias : NULL);
+ put_device(child);
+
+ return ret;
}
static ssize_t slave_store(struct device *dev, struct device_attribute *attr,
diff --git a/drivers/staging/media/ipu3/include/uapi/intel-ipu3.h b/drivers/staging/media/ipu3/include/uapi/intel-ipu3.h
index 4aa2797f5e3c..8b85524beb59 100644
--- a/drivers/staging/media/ipu3/include/uapi/intel-ipu3.h
+++ b/drivers/staging/media/ipu3/include/uapi/intel-ipu3.h
@@ -322,7 +322,8 @@ struct ipu3_uapi_ae_config {
* 0: positive, 1: negative, default 0.
* @y_calc: Pre-processing that converts Bayer quad to RGB+Y values to be
* used for building histogram. Range [0, 32], default 8.
- * Rule:
+ *
+ * Rule:
* y_gen_rate_gr + y_gen_rate_r + y_gen_rate_b + y_gen_rate_gb = 32
* A single Y is calculated based on sum of Gr/R/B/Gb based on
* their contribution ratio.
diff --git a/drivers/vfio/group.c b/drivers/vfio/group.c
index 49559605177e..c321d442f0da 100644
--- a/drivers/vfio/group.c
+++ b/drivers/vfio/group.c
@@ -266,24 +266,12 @@ static struct file *vfio_device_open_file(struct vfio_device *device)
if (ret)
goto err_free;
- /*
- * We can't use anon_inode_getfd() because we need to modify
- * the f_mode flags directly to allow more than just ioctls
- */
- filep = anon_inode_getfile("[vfio-device]", &vfio_device_fops,
- df, O_RDWR);
+ filep = anon_inode_getfile_fmode("[vfio-device]", &vfio_device_fops,
+ df, O_RDWR, FMODE_PREAD | FMODE_PWRITE);
if (IS_ERR(filep)) {
ret = PTR_ERR(filep);
goto err_close_device;
}
-
- /*
- * TODO: add an anon_inode interface to do this.
- * Appears to be missing by lack of need rather than
- * explicitly prevented. Now there's need.
- */
- filep->f_mode |= (FMODE_PREAD | FMODE_PWRITE);
-
/*
* Use the pseudo fs inode on the device to link all mmaps
* to the same address space, allowing us to unmap all vmas
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 3e68521f4e2f..399d455d50d6 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -669,8 +669,8 @@ v9fs_vfs_create(struct mnt_idmap *idmap, struct inode *dir,
*
*/
-static int v9fs_vfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
- struct dentry *dentry, umode_t mode)
+static struct dentry *v9fs_vfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
int err;
u32 perm;
@@ -692,8 +692,7 @@ static int v9fs_vfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
if (fid)
p9_fid_put(fid);
-
- return err;
+ return ERR_PTR(err);
}
/**
diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
index 143ac03b7425..cc2007be2173 100644
--- a/fs/9p/vfs_inode_dotl.c
+++ b/fs/9p/vfs_inode_dotl.c
@@ -350,9 +350,9 @@ out:
*
*/
-static int v9fs_vfs_mkdir_dotl(struct mnt_idmap *idmap,
- struct inode *dir, struct dentry *dentry,
- umode_t omode)
+static struct dentry *v9fs_vfs_mkdir_dotl(struct mnt_idmap *idmap,
+ struct inode *dir, struct dentry *dentry,
+ umode_t omode)
{
int err;
struct v9fs_session_info *v9ses;
@@ -417,7 +417,7 @@ error:
p9_fid_put(fid);
v9fs_put_acl(dacl, pacl);
p9_fid_put(dfid);
- return err;
+ return ERR_PTR(err);
}
static int
diff --git a/fs/Kconfig b/fs/Kconfig
index 64d420e3c475..afe21866d6b4 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -336,7 +336,6 @@ source "fs/qnx4/Kconfig"
source "fs/qnx6/Kconfig"
source "fs/romfs/Kconfig"
source "fs/pstore/Kconfig"
-source "fs/sysv/Kconfig"
source "fs/ufs/Kconfig"
source "fs/erofs/Kconfig"
source "fs/vboxsf/Kconfig"
diff --git a/fs/Makefile b/fs/Makefile
index 15df0a923d3a..77fd7f7b5d02 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -87,7 +87,6 @@ obj-$(CONFIG_NFSD) += nfsd/
obj-$(CONFIG_LOCKD) += lockd/
obj-$(CONFIG_NLS) += nls/
obj-y += unicode/
-obj-$(CONFIG_SYSV_FS) += sysv/
obj-$(CONFIG_SMBFS) += smb/
obj-$(CONFIG_HPFS_FS) += hpfs/
obj-$(CONFIG_NTFS3_FS) += ntfs3/
diff --git a/fs/affs/affs.h b/fs/affs/affs.h
index e8c2c4535cb3..ac4e9a02910b 100644
--- a/fs/affs/affs.h
+++ b/fs/affs/affs.h
@@ -168,7 +168,7 @@ extern struct dentry *affs_lookup(struct inode *dir, struct dentry *dentry, unsi
extern int affs_unlink(struct inode *dir, struct dentry *dentry);
extern int affs_create(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, umode_t mode, bool);
-extern int affs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+extern struct dentry *affs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, umode_t mode);
extern int affs_rmdir(struct inode *dir, struct dentry *dentry);
extern int affs_link(struct dentry *olddentry, struct inode *dir,
diff --git a/fs/affs/namei.c b/fs/affs/namei.c
index 8c154490a2d6..f883be50db12 100644
--- a/fs/affs/namei.c
+++ b/fs/affs/namei.c
@@ -273,7 +273,7 @@ affs_create(struct mnt_idmap *idmap, struct inode *dir,
return 0;
}
-int
+struct dentry *
affs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, umode_t mode)
{
@@ -285,7 +285,7 @@ affs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
inode = affs_new_inode(dir);
if (!inode)
- return -ENOSPC;
+ return ERR_PTR(-ENOSPC);
inode->i_mode = S_IFDIR | mode;
affs_mode_to_prot(inode);
@@ -298,9 +298,9 @@ affs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
clear_nlink(inode);
mark_inode_dirty(inode);
iput(inode);
- return error;
+ return ERR_PTR(error);
}
- return 0;
+ return NULL;
}
int
diff --git a/fs/afs/addr_list.c b/fs/afs/addr_list.c
index 6d42f85c6be5..e941da5b6dd9 100644
--- a/fs/afs/addr_list.c
+++ b/fs/afs/addr_list.c
@@ -362,3 +362,53 @@ int afs_merge_fs_addr6(struct afs_net *net, struct afs_addr_list *alist,
alist->nr_addrs++;
return 0;
}
+
+/*
+ * Set the app data on the rxrpc peers an address list points to
+ */
+void afs_set_peer_appdata(struct afs_server *server,
+ struct afs_addr_list *old_alist,
+ struct afs_addr_list *new_alist)
+{
+ unsigned long data = (unsigned long)server;
+ int n = 0, o = 0;
+
+ if (!old_alist) {
+ /* New server. Just set all. */
+ for (; n < new_alist->nr_addrs; n++)
+ rxrpc_kernel_set_peer_data(new_alist->addrs[n].peer, data);
+ return;
+ }
+ if (!new_alist) {
+ /* Dead server. Just remove all. */
+ for (; o < old_alist->nr_addrs; o++)
+ rxrpc_kernel_set_peer_data(old_alist->addrs[o].peer, 0);
+ return;
+ }
+
+ /* Walk through the two lists simultaneously, setting new peers and
+ * clearing old ones. The two lists are ordered by pointer to peer
+ * record.
+ */
+ while (n < new_alist->nr_addrs && o < old_alist->nr_addrs) {
+ struct rxrpc_peer *pn = new_alist->addrs[n].peer;
+ struct rxrpc_peer *po = old_alist->addrs[o].peer;
+
+ if (pn == po)
+ continue;
+ if (pn < po) {
+ rxrpc_kernel_set_peer_data(pn, data);
+ n++;
+ } else {
+ rxrpc_kernel_set_peer_data(po, 0);
+ o++;
+ }
+ }
+
+ if (n < new_alist->nr_addrs)
+ for (; n < new_alist->nr_addrs; n++)
+ rxrpc_kernel_set_peer_data(new_alist->addrs[n].peer, data);
+ if (o < old_alist->nr_addrs)
+ for (; o < old_alist->nr_addrs; o++)
+ rxrpc_kernel_set_peer_data(old_alist->addrs[o].peer, 0);
+}
diff --git a/fs/afs/cell.c b/fs/afs/cell.c
index 96a6781f3653..0168bbf53fe0 100644
--- a/fs/afs/cell.c
+++ b/fs/afs/cell.c
@@ -20,8 +20,9 @@ static unsigned __read_mostly afs_cell_min_ttl = 10 * 60;
static unsigned __read_mostly afs_cell_max_ttl = 24 * 60 * 60;
static atomic_t cell_debug_id;
-static void afs_queue_cell_manager(struct afs_net *);
-static void afs_manage_cell_work(struct work_struct *);
+static void afs_cell_timer(struct timer_list *timer);
+static void afs_destroy_cell_work(struct work_struct *work);
+static void afs_manage_cell_work(struct work_struct *work);
static void afs_dec_cells_outstanding(struct afs_net *net)
{
@@ -29,19 +30,11 @@ static void afs_dec_cells_outstanding(struct afs_net *net)
wake_up_var(&net->cells_outstanding);
}
-/*
- * Set the cell timer to fire after a given delay, assuming it's not already
- * set for an earlier time.
- */
-static void afs_set_cell_timer(struct afs_net *net, time64_t delay)
+static void afs_set_cell_state(struct afs_cell *cell, enum afs_cell_state state)
{
- if (net->live) {
- atomic_inc(&net->cells_outstanding);
- if (timer_reduce(&net->cells_timer, jiffies + delay * HZ))
- afs_dec_cells_outstanding(net);
- } else {
- afs_queue_cell_manager(net);
- }
+ smp_store_release(&cell->state, state); /* Commit cell changes before state */
+ smp_wmb(); /* Set cell state before task state */
+ wake_up_var(&cell->state);
}
/*
@@ -116,7 +109,7 @@ static struct afs_cell *afs_alloc_cell(struct afs_net *net,
const char *name, unsigned int namelen,
const char *addresses)
{
- struct afs_vlserver_list *vllist;
+ struct afs_vlserver_list *vllist = NULL;
struct afs_cell *cell;
int i, ret;
@@ -163,13 +156,15 @@ static struct afs_cell *afs_alloc_cell(struct afs_net *net,
cell->net = net;
refcount_set(&cell->ref, 1);
atomic_set(&cell->active, 0);
+ INIT_WORK(&cell->destroyer, afs_destroy_cell_work);
INIT_WORK(&cell->manager, afs_manage_cell_work);
+ timer_setup(&cell->management_timer, afs_cell_timer, 0);
init_rwsem(&cell->vs_lock);
cell->volumes = RB_ROOT;
INIT_HLIST_HEAD(&cell->proc_volumes);
seqlock_init(&cell->volume_lock);
cell->fs_servers = RB_ROOT;
- seqlock_init(&cell->fs_lock);
+ init_rwsem(&cell->fs_lock);
rwlock_init(&cell->vl_servers_lock);
cell->flags = (1 << AFS_CELL_FL_CHECK_ALIAS);
@@ -204,7 +199,13 @@ static struct afs_cell *afs_alloc_cell(struct afs_net *net,
cell->dns_status = vllist->status;
smp_store_release(&cell->dns_lookup_count, 1); /* vs source/status */
atomic_inc(&net->cells_outstanding);
+ ret = idr_alloc_cyclic(&net->cells_dyn_ino, cell,
+ 2, INT_MAX / 2, GFP_KERNEL);
+ if (ret < 0)
+ goto error;
+ cell->dynroot_ino = ret;
cell->debug_id = atomic_inc_return(&cell_debug_id);
+
trace_afs_cell(cell->debug_id, 1, 0, afs_cell_trace_alloc);
_leave(" = %p", cell);
@@ -214,6 +215,7 @@ parse_failed:
if (ret == -EINVAL)
printk(KERN_ERR "kAFS: bad VL server IP address\n");
error:
+ afs_put_vlserverlist(cell->net, vllist);
kfree(cell->name - 1);
kfree(cell);
_leave(" = %d", ret);
@@ -227,6 +229,7 @@ error:
* @namesz: The strlen of the cell name.
* @vllist: A colon/comma separated list of numeric IP addresses or NULL.
* @excl: T if an error should be given if the cell name already exists.
+ * @trace: The reason to be logged if the lookup is successful.
*
* Look up a cell record by name and query the DNS for VL server addresses if
* needed. Note that that actual DNS query is punted off to the manager thread
@@ -235,7 +238,8 @@ error:
*/
struct afs_cell *afs_lookup_cell(struct afs_net *net,
const char *name, unsigned int namesz,
- const char *vllist, bool excl)
+ const char *vllist, bool excl,
+ enum afs_cell_trace trace)
{
struct afs_cell *cell, *candidate, *cursor;
struct rb_node *parent, **pp;
@@ -245,7 +249,7 @@ struct afs_cell *afs_lookup_cell(struct afs_net *net,
_enter("%s,%s", name, vllist);
if (!excl) {
- cell = afs_find_cell(net, name, namesz, afs_cell_trace_use_lookup);
+ cell = afs_find_cell(net, name, namesz, trace);
if (!IS_ERR(cell))
goto wait_for_cell;
}
@@ -288,26 +292,28 @@ struct afs_cell *afs_lookup_cell(struct afs_net *net,
cell = candidate;
candidate = NULL;
- atomic_set(&cell->active, 2);
- trace_afs_cell(cell->debug_id, refcount_read(&cell->ref), 2, afs_cell_trace_insert);
+ afs_use_cell(cell, trace);
rb_link_node_rcu(&cell->net_node, parent, pp);
rb_insert_color(&cell->net_node, &net->cells);
up_write(&net->cells_lock);
- afs_queue_cell(cell, afs_cell_trace_get_queue_new);
+ afs_queue_cell(cell, afs_cell_trace_queue_new);
wait_for_cell:
- trace_afs_cell(cell->debug_id, refcount_read(&cell->ref), atomic_read(&cell->active),
- afs_cell_trace_wait);
_debug("wait_for_cell");
- wait_var_event(&cell->state,
- ({
- state = smp_load_acquire(&cell->state); /* vs error */
- state == AFS_CELL_ACTIVE || state == AFS_CELL_REMOVED;
- }));
+ state = smp_load_acquire(&cell->state); /* vs error */
+ if (state != AFS_CELL_ACTIVE &&
+ state != AFS_CELL_DEAD) {
+ afs_see_cell(cell, afs_cell_trace_wait);
+ wait_var_event(&cell->state,
+ ({
+ state = smp_load_acquire(&cell->state); /* vs error */
+ state == AFS_CELL_ACTIVE || state == AFS_CELL_DEAD;
+ }));
+ }
/* Check the state obtained from the wait check. */
- if (state == AFS_CELL_REMOVED) {
+ if (state == AFS_CELL_DEAD) {
ret = cell->error;
goto error;
}
@@ -321,7 +327,7 @@ cell_already_exists:
if (excl) {
ret = -EEXIST;
} else {
- afs_use_cell(cursor, afs_cell_trace_use_lookup);
+ afs_use_cell(cursor, trace);
ret = 0;
}
up_write(&net->cells_lock);
@@ -331,7 +337,7 @@ cell_already_exists:
goto wait_for_cell;
goto error_noput;
error:
- afs_unuse_cell(net, cell, afs_cell_trace_unuse_lookup);
+ afs_unuse_cell(cell, afs_cell_trace_unuse_lookup_error);
error_noput:
_leave(" = %d [error]", ret);
return ERR_PTR(ret);
@@ -376,8 +382,9 @@ int afs_cell_init(struct afs_net *net, const char *rootcell)
if (cp && cp < rootcell + len)
return -EINVAL;
- /* allocate a cell record for the root cell */
- new_root = afs_lookup_cell(net, rootcell, len, vllist, false);
+ /* allocate a cell record for the root/workstation cell */
+ new_root = afs_lookup_cell(net, rootcell, len, vllist, false,
+ afs_cell_trace_use_lookup_ws);
if (IS_ERR(new_root)) {
_leave(" = %ld", PTR_ERR(new_root));
return PTR_ERR(new_root);
@@ -388,12 +395,11 @@ int afs_cell_init(struct afs_net *net, const char *rootcell)
/* install the new cell */
down_write(&net->cells_lock);
- afs_see_cell(new_root, afs_cell_trace_see_ws);
old_root = rcu_replace_pointer(net->ws_cell, new_root,
lockdep_is_held(&net->cells_lock));
up_write(&net->cells_lock);
- afs_unuse_cell(net, old_root, afs_cell_trace_unuse_ws);
+ afs_unuse_cell(old_root, afs_cell_trace_unuse_ws);
_leave(" = 0");
return 0;
}
@@ -511,8 +517,9 @@ static void afs_cell_destroy(struct rcu_head *rcu)
trace_afs_cell(cell->debug_id, r, atomic_read(&cell->active), afs_cell_trace_free);
afs_put_vlserverlist(net, rcu_access_pointer(cell->vl_servers));
- afs_unuse_cell(net, cell->alias_of, afs_cell_trace_unuse_alias);
+ afs_unuse_cell(cell->alias_of, afs_cell_trace_unuse_alias);
key_put(cell->anonymous_key);
+ idr_remove(&net->cells_dyn_ino, cell->dynroot_ino);
kfree(cell->name - 1);
kfree(cell);
@@ -520,30 +527,14 @@ static void afs_cell_destroy(struct rcu_head *rcu)
_leave(" [destroyed]");
}
-/*
- * Queue the cell manager.
- */
-static void afs_queue_cell_manager(struct afs_net *net)
-{
- int outstanding = atomic_inc_return(&net->cells_outstanding);
-
- _enter("%d", outstanding);
-
- if (!queue_work(afs_wq, &net->cells_manager))
- afs_dec_cells_outstanding(net);
-}
-
-/*
- * Cell management timer. We have an increment on cells_outstanding that we
- * need to pass along to the work item.
- */
-void afs_cells_timer(struct timer_list *timer)
+static void afs_destroy_cell_work(struct work_struct *work)
{
- struct afs_net *net = container_of(timer, struct afs_net, cells_timer);
+ struct afs_cell *cell = container_of(work, struct afs_cell, destroyer);
- _enter("");
- if (!queue_work(afs_wq, &net->cells_manager))
- afs_dec_cells_outstanding(net);
+ afs_see_cell(cell, afs_cell_trace_destroy);
+ timer_delete_sync(&cell->management_timer);
+ cancel_work_sync(&cell->manager);
+ call_rcu(&cell->rcu, afs_cell_destroy);
}
/*
@@ -575,7 +566,7 @@ void afs_put_cell(struct afs_cell *cell, enum afs_cell_trace reason)
if (zero) {
a = atomic_read(&cell->active);
WARN(a != 0, "Cell active count %u > 0\n", a);
- call_rcu(&cell->rcu, afs_cell_destroy);
+ WARN_ON(!queue_work(afs_wq, &cell->destroyer));
}
}
}
@@ -587,10 +578,9 @@ struct afs_cell *afs_use_cell(struct afs_cell *cell, enum afs_cell_trace reason)
{
int r, a;
- r = refcount_read(&cell->ref);
- WARN_ON(r == 0);
+ __refcount_inc(&cell->ref, &r);
a = atomic_inc_return(&cell->active);
- trace_afs_cell(cell->debug_id, r, a, reason);
+ trace_afs_cell(cell->debug_id, r + 1, a, reason);
return cell;
}
@@ -598,10 +588,11 @@ struct afs_cell *afs_use_cell(struct afs_cell *cell, enum afs_cell_trace reason)
* Record a cell becoming less active. When the active counter reaches 1, it
* is scheduled for destruction, but may get reactivated.
*/
-void afs_unuse_cell(struct afs_net *net, struct afs_cell *cell, enum afs_cell_trace reason)
+void afs_unuse_cell(struct afs_cell *cell, enum afs_cell_trace reason)
{
unsigned int debug_id;
time64_t now, expire_delay;
+ bool zero;
int r, a;
if (!cell)
@@ -616,13 +607,15 @@ void afs_unuse_cell(struct afs_net *net, struct afs_cell *cell, enum afs_cell_tr
expire_delay = afs_cell_gc_delay;
debug_id = cell->debug_id;
- r = refcount_read(&cell->ref);
a = atomic_dec_return(&cell->active);
- trace_afs_cell(debug_id, r, a, reason);
- WARN_ON(a == 0);
- if (a == 1)
+ if (!a)
/* 'cell' may now be garbage collected. */
- afs_set_cell_timer(net, expire_delay);
+ afs_set_cell_timer(cell, expire_delay);
+
+ zero = __refcount_dec_and_test(&cell->ref, &r);
+ trace_afs_cell(debug_id, r - 1, a, reason);
+ if (zero)
+ WARN_ON(!queue_work(afs_wq, &cell->destroyer));
}
/*
@@ -642,9 +635,27 @@ void afs_see_cell(struct afs_cell *cell, enum afs_cell_trace reason)
*/
void afs_queue_cell(struct afs_cell *cell, enum afs_cell_trace reason)
{
- afs_get_cell(cell, reason);
- if (!queue_work(afs_wq, &cell->manager))
- afs_put_cell(cell, afs_cell_trace_put_queue_fail);
+ queue_work(afs_wq, &cell->manager);
+}
+
+/*
+ * Cell-specific management timer.
+ */
+static void afs_cell_timer(struct timer_list *timer)
+{
+ struct afs_cell *cell = container_of(timer, struct afs_cell, management_timer);
+
+ afs_see_cell(cell, afs_cell_trace_see_mgmt_timer);
+ if (refcount_read(&cell->ref) > 0 && cell->net->live)
+ queue_work(afs_wq, &cell->manager);
+}
+
+/*
+ * Set/reduce the cell timer.
+ */
+void afs_set_cell_timer(struct afs_cell *cell, unsigned int delay_secs)
+{
+ timer_reduce(&cell->management_timer, jiffies + delay_secs * HZ);
}
/*
@@ -706,7 +717,6 @@ static int afs_activate_cell(struct afs_net *net, struct afs_cell *cell)
if (cell->proc_link.next)
cell->proc_link.next->pprev = &cell->proc_link.next;
- afs_dynroot_mkdir(net, cell);
mutex_unlock(&net->proc_cells_lock);
return 0;
}
@@ -723,217 +733,130 @@ static void afs_deactivate_cell(struct afs_net *net, struct afs_cell *cell)
mutex_lock(&net->proc_cells_lock);
if (!hlist_unhashed(&cell->proc_link))
hlist_del_rcu(&cell->proc_link);
- afs_dynroot_rmdir(net, cell);
mutex_unlock(&net->proc_cells_lock);
_leave("");
}
+static bool afs_has_cell_expired(struct afs_cell *cell, time64_t *_next_manage)
+{
+ const struct afs_vlserver_list *vllist;
+ time64_t expire_at = cell->last_inactive;
+ time64_t now = ktime_get_real_seconds();
+
+ if (atomic_read(&cell->active))
+ return false;
+ if (!cell->net->live)
+ return true;
+
+ vllist = rcu_dereference_protected(cell->vl_servers, true);
+ if (vllist && vllist->nr_servers > 0)
+ expire_at += afs_cell_gc_delay;
+
+ if (expire_at <= now)
+ return true;
+ if (expire_at < *_next_manage)
+ *_next_manage = expire_at;
+ return false;
+}
+
/*
* Manage a cell record, initialising and destroying it, maintaining its DNS
* records.
*/
-static void afs_manage_cell(struct afs_cell *cell)
+static bool afs_manage_cell(struct afs_cell *cell)
{
struct afs_net *net = cell->net;
- int ret, active;
+ time64_t next_manage = TIME64_MAX;
+ int ret;
_enter("%s", cell->name);
-again:
_debug("state %u", cell->state);
switch (cell->state) {
- case AFS_CELL_INACTIVE:
- case AFS_CELL_FAILED:
- down_write(&net->cells_lock);
- active = 1;
- if (atomic_try_cmpxchg_relaxed(&cell->active, &active, 0)) {
- rb_erase(&cell->net_node, &net->cells);
- trace_afs_cell(cell->debug_id, refcount_read(&cell->ref), 0,
- afs_cell_trace_unuse_delete);
- smp_store_release(&cell->state, AFS_CELL_REMOVED);
- }
- up_write(&net->cells_lock);
- if (cell->state == AFS_CELL_REMOVED) {
- wake_up_var(&cell->state);
- goto final_destruction;
- }
- if (cell->state == AFS_CELL_FAILED)
- goto done;
- smp_store_release(&cell->state, AFS_CELL_UNSET);
- wake_up_var(&cell->state);
- goto again;
-
- case AFS_CELL_UNSET:
- smp_store_release(&cell->state, AFS_CELL_ACTIVATING);
- wake_up_var(&cell->state);
- goto again;
-
- case AFS_CELL_ACTIVATING:
- ret = afs_activate_cell(net, cell);
- if (ret < 0)
- goto activation_failed;
+ case AFS_CELL_SETTING_UP:
+ goto set_up_cell;
+ case AFS_CELL_ACTIVE:
+ goto cell_is_active;
+ case AFS_CELL_REMOVING:
+ WARN_ON_ONCE(1);
+ return false;
+ case AFS_CELL_DEAD:
+ return false;
+ default:
+ _debug("bad state %u", cell->state);
+ WARN_ON_ONCE(1); /* Unhandled state */
+ return false;
+ }
- smp_store_release(&cell->state, AFS_CELL_ACTIVE);
- wake_up_var(&cell->state);
- goto again;
+set_up_cell:
+ ret = afs_activate_cell(net, cell);
+ if (ret < 0) {
+ cell->error = ret;
+ goto remove_cell;
+ }
- case AFS_CELL_ACTIVE:
- if (atomic_read(&cell->active) > 1) {
- if (test_and_clear_bit(AFS_CELL_FL_DO_LOOKUP, &cell->flags)) {
- ret = afs_update_cell(cell);
- if (ret < 0)
- cell->error = ret;
- }
- goto done;
- }
- smp_store_release(&cell->state, AFS_CELL_DEACTIVATING);
- wake_up_var(&cell->state);
- goto again;
+ afs_set_cell_state(cell, AFS_CELL_ACTIVE);
- case AFS_CELL_DEACTIVATING:
- if (atomic_read(&cell->active) > 1)
- goto reverse_deactivation;
- afs_deactivate_cell(net, cell);
- smp_store_release(&cell->state, AFS_CELL_INACTIVE);
- wake_up_var(&cell->state);
- goto again;
+cell_is_active:
+ if (afs_has_cell_expired(cell, &next_manage))
+ goto remove_cell;
- case AFS_CELL_REMOVED:
- goto done;
+ if (test_and_clear_bit(AFS_CELL_FL_DO_LOOKUP, &cell->flags)) {
+ ret = afs_update_cell(cell);
+ if (ret < 0)
+ cell->error = ret;
+ }
- default:
- break;
+ if (next_manage < TIME64_MAX && cell->net->live) {
+ time64_t now = ktime_get_real_seconds();
+
+ if (next_manage - now <= 0)
+ afs_queue_cell(cell, afs_cell_trace_queue_again);
+ else
+ afs_set_cell_timer(cell, next_manage - now);
}
- _debug("bad state %u", cell->state);
- BUG(); /* Unhandled state */
+ _leave(" [done %u]", cell->state);
+ return false;
-activation_failed:
- cell->error = ret;
- afs_deactivate_cell(net, cell);
+remove_cell:
+ down_write(&net->cells_lock);
- smp_store_release(&cell->state, AFS_CELL_FAILED); /* vs error */
- wake_up_var(&cell->state);
- goto again;
+ if (atomic_read(&cell->active)) {
+ up_write(&net->cells_lock);
+ goto cell_is_active;
+ }
-reverse_deactivation:
- smp_store_release(&cell->state, AFS_CELL_ACTIVE);
- wake_up_var(&cell->state);
- _leave(" [deact->act]");
- return;
+ /* Make sure that the expiring server records are going to see the fact
+ * that the cell is caput.
+ */
+ afs_set_cell_state(cell, AFS_CELL_REMOVING);
-done:
- _leave(" [done %u]", cell->state);
- return;
+ afs_deactivate_cell(net, cell);
+ afs_purge_servers(cell);
+
+ rb_erase(&cell->net_node, &net->cells);
+ afs_see_cell(cell, afs_cell_trace_unuse_delete);
+ up_write(&net->cells_lock);
-final_destruction:
/* The root volume is pinning the cell */
afs_put_volume(cell->root_volume, afs_volume_trace_put_cell_root);
cell->root_volume = NULL;
- afs_put_cell(cell, afs_cell_trace_put_destroy);
+
+ afs_set_cell_state(cell, AFS_CELL_DEAD);
+ return true;
}
static void afs_manage_cell_work(struct work_struct *work)
{
struct afs_cell *cell = container_of(work, struct afs_cell, manager);
+ bool final_put;
- afs_manage_cell(cell);
- afs_put_cell(cell, afs_cell_trace_put_queue_work);
-}
-
-/*
- * Manage the records of cells known to a network namespace. This includes
- * updating the DNS records and garbage collecting unused cells that were
- * automatically added.
- *
- * Note that constructed cell records may only be removed from net->cells by
- * this work item, so it is safe for this work item to stash a cursor pointing
- * into the tree and then return to caller (provided it skips cells that are
- * still under construction).
- *
- * Note also that we were given an increment on net->cells_outstanding by
- * whoever queued us that we need to deal with before returning.
- */
-void afs_manage_cells(struct work_struct *work)
-{
- struct afs_net *net = container_of(work, struct afs_net, cells_manager);
- struct rb_node *cursor;
- time64_t now = ktime_get_real_seconds(), next_manage = TIME64_MAX;
- bool purging = !net->live;
-
- _enter("");
-
- /* Trawl the cell database looking for cells that have expired from
- * lack of use and cells whose DNS results have expired and dispatch
- * their managers.
- */
- down_read(&net->cells_lock);
-
- for (cursor = rb_first(&net->cells); cursor; cursor = rb_next(cursor)) {
- struct afs_cell *cell =
- rb_entry(cursor, struct afs_cell, net_node);
- unsigned active;
- bool sched_cell = false;
-
- active = atomic_read(&cell->active);
- trace_afs_cell(cell->debug_id, refcount_read(&cell->ref),
- active, afs_cell_trace_manage);
-
- ASSERTCMP(active, >=, 1);
-
- if (purging) {
- if (test_and_clear_bit(AFS_CELL_FL_NO_GC, &cell->flags)) {
- active = atomic_dec_return(&cell->active);
- trace_afs_cell(cell->debug_id, refcount_read(&cell->ref),
- active, afs_cell_trace_unuse_pin);
- }
- }
-
- if (active == 1) {
- struct afs_vlserver_list *vllist;
- time64_t expire_at = cell->last_inactive;
-
- read_lock(&cell->vl_servers_lock);
- vllist = rcu_dereference_protected(
- cell->vl_servers,
- lockdep_is_held(&cell->vl_servers_lock));
- if (vllist->nr_servers > 0)
- expire_at += afs_cell_gc_delay;
- read_unlock(&cell->vl_servers_lock);
- if (purging || expire_at <= now)
- sched_cell = true;
- else if (expire_at < next_manage)
- next_manage = expire_at;
- }
-
- if (!purging) {
- if (test_bit(AFS_CELL_FL_DO_LOOKUP, &cell->flags))
- sched_cell = true;
- }
-
- if (sched_cell)
- afs_queue_cell(cell, afs_cell_trace_get_queue_manage);
- }
-
- up_read(&net->cells_lock);
-
- /* Update the timer on the way out. We have to pass an increment on
- * cells_outstanding in the namespace that we are in to the timer or
- * the work scheduler.
- */
- if (!purging && next_manage < TIME64_MAX) {
- now = ktime_get_real_seconds();
-
- if (next_manage - now <= 0) {
- if (queue_work(afs_wq, &net->cells_manager))
- atomic_inc(&net->cells_outstanding);
- } else {
- afs_set_cell_timer(net, next_manage - now);
- }
- }
-
- afs_dec_cells_outstanding(net);
- _leave(" [%d]", atomic_read(&net->cells_outstanding));
+ afs_see_cell(cell, afs_cell_trace_manage);
+ final_put = afs_manage_cell(cell);
+ afs_see_cell(cell, afs_cell_trace_managed);
+ if (final_put)
+ afs_put_cell(cell, afs_cell_trace_put_final);
}
/*
@@ -942,6 +865,7 @@ void afs_manage_cells(struct work_struct *work)
void afs_cell_purge(struct afs_net *net)
{
struct afs_cell *ws;
+ struct rb_node *cursor;
_enter("");
@@ -949,14 +873,21 @@ void afs_cell_purge(struct afs_net *net)
ws = rcu_replace_pointer(net->ws_cell, NULL,
lockdep_is_held(&net->cells_lock));
up_write(&net->cells_lock);
- afs_unuse_cell(net, ws, afs_cell_trace_unuse_ws);
+ afs_unuse_cell(ws, afs_cell_trace_unuse_ws);
- _debug("del timer");
- if (del_timer_sync(&net->cells_timer))
- atomic_dec(&net->cells_outstanding);
+ _debug("kick cells");
+ down_read(&net->cells_lock);
+ for (cursor = rb_first(&net->cells); cursor; cursor = rb_next(cursor)) {
+ struct afs_cell *cell = rb_entry(cursor, struct afs_cell, net_node);
+
+ afs_see_cell(cell, afs_cell_trace_purge);
- _debug("kick mgr");
- afs_queue_cell_manager(net);
+ if (test_and_clear_bit(AFS_CELL_FL_NO_GC, &cell->flags))
+ afs_unuse_cell(cell, afs_cell_trace_unuse_pin);
+
+ afs_queue_cell(cell, afs_cell_trace_queue_purge);
+ }
+ up_read(&net->cells_lock);
_debug("wait");
wait_var_event(&net->cells_outstanding,
diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c
index 99a3f20bc786..1a906805a9e3 100644
--- a/fs/afs/cmservice.c
+++ b/fs/afs/cmservice.c
@@ -139,49 +139,6 @@ bool afs_cm_incoming_call(struct afs_call *call)
}
/*
- * Find the server record by peer address and record a probe to the cache
- * manager from a server.
- */
-static int afs_find_cm_server_by_peer(struct afs_call *call)
-{
- struct sockaddr_rxrpc srx;
- struct afs_server *server;
- struct rxrpc_peer *peer;
-
- peer = rxrpc_kernel_get_call_peer(call->net->socket, call->rxcall);
-
- server = afs_find_server(call->net, peer);
- if (!server) {
- trace_afs_cm_no_server(call, &srx);
- return 0;
- }
-
- call->server = server;
- return 0;
-}
-
-/*
- * Find the server record by server UUID and record a probe to the cache
- * manager from a server.
- */
-static int afs_find_cm_server_by_uuid(struct afs_call *call,
- struct afs_uuid *uuid)
-{
- struct afs_server *server;
-
- rcu_read_lock();
- server = afs_find_server_by_uuid(call->net, call->request);
- rcu_read_unlock();
- if (!server) {
- trace_afs_cm_no_server_u(call, call->request);
- return 0;
- }
-
- call->server = server;
- return 0;
-}
-
-/*
* Clean up a cache manager call.
*/
static void afs_cm_destructor(struct afs_call *call)
@@ -322,10 +279,7 @@ static int afs_deliver_cb_callback(struct afs_call *call)
if (!afs_check_call_state(call, AFS_CALL_SV_REPLYING))
return afs_io_error(call, afs_io_error_cm_reply);
-
- /* we'll need the file server record as that tells us which set of
- * vnodes to operate upon */
- return afs_find_cm_server_by_peer(call);
+ return 0;
}
/*
@@ -349,18 +303,10 @@ static void SRXAFSCB_InitCallBackState(struct work_struct *work)
*/
static int afs_deliver_cb_init_call_back_state(struct afs_call *call)
{
- int ret;
-
_enter("");
afs_extract_discard(call, 0);
- ret = afs_extract_data(call, false);
- if (ret < 0)
- return ret;
-
- /* we'll need the file server record as that tells us which set of
- * vnodes to operate upon */
- return afs_find_cm_server_by_peer(call);
+ return afs_extract_data(call, false);
}
/*
@@ -373,8 +319,6 @@ static int afs_deliver_cb_init_call_back_state3(struct afs_call *call)
__be32 *b;
int ret;
- _enter("");
-
_enter("{%u}", call->unmarshall);
switch (call->unmarshall) {
@@ -421,9 +365,13 @@ static int afs_deliver_cb_init_call_back_state3(struct afs_call *call)
if (!afs_check_call_state(call, AFS_CALL_SV_REPLYING))
return afs_io_error(call, afs_io_error_cm_reply);
- /* we'll need the file server record as that tells us which set of
- * vnodes to operate upon */
- return afs_find_cm_server_by_uuid(call, call->request);
+ if (memcmp(call->request, &call->server->_uuid, sizeof(call->server->_uuid)) != 0) {
+ pr_notice("Callback UUID does not match fileserver UUID\n");
+ trace_afs_cm_no_server_u(call, call->request);
+ return 0;
+ }
+
+ return 0;
}
/*
@@ -455,7 +403,7 @@ static int afs_deliver_cb_probe(struct afs_call *call)
if (!afs_check_call_state(call, AFS_CALL_SV_REPLYING))
return afs_io_error(call, afs_io_error_cm_reply);
- return afs_find_cm_server_by_peer(call);
+ return 0;
}
/*
@@ -533,7 +481,7 @@ static int afs_deliver_cb_probe_uuid(struct afs_call *call)
if (!afs_check_call_state(call, AFS_CALL_SV_REPLYING))
return afs_io_error(call, afs_io_error_cm_reply);
- return afs_find_cm_server_by_peer(call);
+ return 0;
}
/*
@@ -593,7 +541,7 @@ static int afs_deliver_cb_tell_me_about_yourself(struct afs_call *call)
if (!afs_check_call_state(call, AFS_CALL_SV_REPLYING))
return afs_io_error(call, afs_io_error_cm_reply);
- return afs_find_cm_server_by_peer(call);
+ return 0;
}
/*
@@ -667,9 +615,5 @@ static int afs_deliver_yfs_cb_callback(struct afs_call *call)
if (!afs_check_call_state(call, AFS_CALL_SV_REPLYING))
return afs_io_error(call, afs_io_error_cm_reply);
-
- /* We'll need the file server record as that tells us which set of
- * vnodes to operate upon.
- */
- return afs_find_cm_server_by_peer(call);
+ return 0;
}
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index 02cbf38e1a77..9e7b1fe82c27 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -33,8 +33,8 @@ static bool afs_lookup_filldir(struct dir_context *ctx, const char *name, int nl
loff_t fpos, u64 ino, unsigned dtype);
static int afs_create(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, umode_t mode, bool excl);
-static int afs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
- struct dentry *dentry, umode_t mode);
+static struct dentry *afs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode);
static int afs_rmdir(struct inode *dir, struct dentry *dentry);
static int afs_unlink(struct inode *dir, struct dentry *dentry);
static int afs_link(struct dentry *from, struct inode *dir,
@@ -1004,9 +1004,8 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
afs_stat_v(dvnode, n_lookup);
inode = afs_do_lookup(dir, dentry);
if (inode == ERR_PTR(-ENOENT))
- inode = afs_try_auto_mntpt(dentry, dir);
-
- if (!IS_ERR_OR_NULL(inode))
+ inode = NULL;
+ else if (!IS_ERR_OR_NULL(inode))
fid = AFS_FS_I(inode)->fid;
_debug("splice %p", dentry->d_inode);
@@ -1315,8 +1314,8 @@ static const struct afs_operation_ops afs_mkdir_operation = {
/*
* create a directory on an AFS filesystem
*/
-static int afs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
- struct dentry *dentry, umode_t mode)
+static struct dentry *afs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
struct afs_operation *op;
struct afs_vnode *dvnode = AFS_FS_I(dir);
@@ -1328,7 +1327,7 @@ static int afs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
op = afs_alloc_operation(NULL, dvnode->volume);
if (IS_ERR(op)) {
d_drop(dentry);
- return PTR_ERR(op);
+ return ERR_CAST(op);
}
fscache_use_cookie(afs_vnode_cache(dvnode), true);
@@ -1344,7 +1343,7 @@ static int afs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
op->ops = &afs_mkdir_operation;
ret = afs_do_sync_operation(op);
afs_dir_unuse_cookie(dvnode, ret);
- return ret;
+ return ERR_PTR(ret);
}
/*
diff --git a/fs/afs/dynroot.c b/fs/afs/dynroot.c
index 7d997f7a8028..691e0ae607a1 100644
--- a/fs/afs/dynroot.c
+++ b/fs/afs/dynroot.c
@@ -10,16 +10,19 @@
#include <linux/dns_resolver.h>
#include "internal.h"
-static atomic_t afs_autocell_ino;
+#define AFS_MIN_DYNROOT_CELL_INO 4 /* Allow for ., .., @cell, .@cell */
+#define AFS_MAX_DYNROOT_CELL_INO ((unsigned int)INT_MAX)
+
+static struct dentry *afs_lookup_atcell(struct inode *dir, struct dentry *dentry, ino_t ino);
/*
* iget5() comparator for inode created by autocell operations
- *
- * These pseudo inodes don't match anything.
*/
static int afs_iget5_pseudo_test(struct inode *inode, void *opaque)
{
- return 0;
+ struct afs_fid *fid = opaque;
+
+ return inode->i_ino == fid->vnode;
}
/*
@@ -39,28 +42,16 @@ static int afs_iget5_pseudo_set(struct inode *inode, void *opaque)
}
/*
- * Create an inode for a dynamic root directory or an autocell dynamic
- * automount dir.
+ * Create an inode for an autocell dynamic automount dir.
*/
-struct inode *afs_iget_pseudo_dir(struct super_block *sb, bool root)
+static struct inode *afs_iget_pseudo_dir(struct super_block *sb, ino_t ino)
{
- struct afs_super_info *as = AFS_FS_S(sb);
struct afs_vnode *vnode;
struct inode *inode;
- struct afs_fid fid = {};
+ struct afs_fid fid = { .vnode = ino, .unique = 1, };
_enter("");
- if (as->volume)
- fid.vid = as->volume->vid;
- if (root) {
- fid.vnode = 1;
- fid.unique = 1;
- } else {
- fid.vnode = atomic_inc_return(&afs_autocell_ino);
- fid.unique = 0;
- }
-
inode = iget5_locked(sb, fid.vnode,
afs_iget5_pseudo_test, afs_iget5_pseudo_set, &fid);
if (!inode) {
@@ -73,115 +64,71 @@ struct inode *afs_iget_pseudo_dir(struct super_block *sb, bool root)
vnode = AFS_FS_I(inode);
- /* there shouldn't be an existing inode */
- BUG_ON(!(inode->i_state & I_NEW));
-
- netfs_inode_init(&vnode->netfs, NULL, false);
- inode->i_size = 0;
- inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO;
- if (root) {
- inode->i_op = &afs_dynroot_inode_operations;
- inode->i_fop = &simple_dir_operations;
- } else {
- inode->i_op = &afs_autocell_inode_operations;
- }
- set_nlink(inode, 2);
- inode->i_uid = GLOBAL_ROOT_UID;
- inode->i_gid = GLOBAL_ROOT_GID;
- simple_inode_init_ts(inode);
- inode->i_blocks = 0;
- inode->i_generation = 0;
-
- set_bit(AFS_VNODE_PSEUDODIR, &vnode->flags);
- if (!root) {
+ if (inode->i_state & I_NEW) {
+ netfs_inode_init(&vnode->netfs, NULL, false);
+ simple_inode_init_ts(inode);
+ set_nlink(inode, 2);
+ inode->i_size = 0;
+ inode->i_mode = S_IFDIR | 0555;
+ inode->i_op = &afs_autocell_inode_operations;
+ inode->i_uid = GLOBAL_ROOT_UID;
+ inode->i_gid = GLOBAL_ROOT_GID;
+ inode->i_blocks = 0;
+ inode->i_generation = 0;
+ inode->i_flags |= S_AUTOMOUNT | S_NOATIME;
+
+ set_bit(AFS_VNODE_PSEUDODIR, &vnode->flags);
set_bit(AFS_VNODE_MOUNTPOINT, &vnode->flags);
- inode->i_flags |= S_AUTOMOUNT;
- }
- inode->i_flags |= S_NOATIME;
- unlock_new_inode(inode);
+ unlock_new_inode(inode);
+ }
_leave(" = %p", inode);
return inode;
}
/*
- * Probe to see if a cell may exist. This prevents positive dentries from
- * being created unnecessarily.
+ * Try to automount the mountpoint with pseudo directory, if the autocell
+ * option is set.
*/
-static int afs_probe_cell_name(struct dentry *dentry)
+static struct dentry *afs_dynroot_lookup_cell(struct inode *dir, struct dentry *dentry,
+ unsigned int flags)
{
- struct afs_cell *cell;
+ struct afs_cell *cell = NULL;
struct afs_net *net = afs_d2net(dentry);
+ struct inode *inode = NULL;
const char *name = dentry->d_name.name;
size_t len = dentry->d_name.len;
- char *result = NULL;
- int ret;
+ bool dotted = false;
+ int ret = -ENOENT;
/* Names prefixed with a dot are R/W mounts. */
if (name[0] == '.') {
- if (len == 1)
- return -EINVAL;
name++;
len--;
+ dotted = true;
}
- cell = afs_find_cell(net, name, len, afs_cell_trace_use_probe);
- if (!IS_ERR(cell)) {
- afs_unuse_cell(net, cell, afs_cell_trace_unuse_probe);
- return 0;
- }
-
- ret = dns_query(net->net, "afsdb", name, len, "srv=1",
- &result, NULL, false);
- if (ret == -ENODATA || ret == -ENOKEY || ret == 0)
- ret = -ENOENT;
- if (ret > 0 && ret >= sizeof(struct dns_server_list_v1_header)) {
- struct dns_server_list_v1_header *v1 = (void *)result;
-
- if (v1->hdr.zero == 0 &&
- v1->hdr.content == DNS_PAYLOAD_IS_SERVER_LIST &&
- v1->hdr.version == 1 &&
- (v1->status != DNS_LOOKUP_GOOD &&
- v1->status != DNS_LOOKUP_GOOD_WITH_BAD))
- return -ENOENT;
-
+ cell = afs_lookup_cell(net, name, len, NULL, false,
+ afs_cell_trace_use_lookup_dynroot);
+ if (IS_ERR(cell)) {
+ ret = PTR_ERR(cell);
+ goto out_no_cell;
}
- kfree(result);
- return ret;
-}
-
-/*
- * Try to auto mount the mountpoint with pseudo directory, if the autocell
- * operation is setted.
- */
-struct inode *afs_try_auto_mntpt(struct dentry *dentry, struct inode *dir)
-{
- struct afs_vnode *vnode = AFS_FS_I(dir);
- struct inode *inode;
- int ret = -ENOENT;
-
- _enter("%p{%pd}, {%llx:%llu}",
- dentry, dentry, vnode->fid.vid, vnode->fid.vnode);
-
- if (!test_bit(AFS_VNODE_AUTOCELL, &vnode->flags))
- goto out;
-
- ret = afs_probe_cell_name(dentry);
- if (ret < 0)
- goto out;
-
- inode = afs_iget_pseudo_dir(dir->i_sb, false);
+ inode = afs_iget_pseudo_dir(dir->i_sb, cell->dynroot_ino * 2 + dotted);
if (IS_ERR(inode)) {
ret = PTR_ERR(inode);
goto out;
}
- _leave("= %p", inode);
- return inode;
+ dentry->d_fsdata = cell;
+ return d_splice_alias(inode, dentry);
out:
- _leave("= %d", ret);
+ afs_unuse_cell(cell, afs_cell_trace_unuse_lookup_dynroot);
+out_no_cell:
+ if (!inode)
+ return d_splice_alias(inode, dentry);
return ret == -ENOENT ? NULL : ERR_PTR(ret);
}
@@ -193,8 +140,6 @@ static struct dentry *afs_dynroot_lookup(struct inode *dir, struct dentry *dentr
{
_enter("%pd", dentry);
- ASSERTCMP(d_inode(dentry), ==, NULL);
-
if (flags & LOOKUP_CREATE)
return ERR_PTR(-EOPNOTSUPP);
@@ -203,98 +148,49 @@ static struct dentry *afs_dynroot_lookup(struct inode *dir, struct dentry *dentr
return ERR_PTR(-ENAMETOOLONG);
}
- return d_splice_alias(afs_try_auto_mntpt(dentry, dir), dentry);
+ if (dentry->d_name.len == 5 &&
+ memcmp(dentry->d_name.name, "@cell", 5) == 0)
+ return afs_lookup_atcell(dir, dentry, 2);
+
+ if (dentry->d_name.len == 6 &&
+ memcmp(dentry->d_name.name, ".@cell", 6) == 0)
+ return afs_lookup_atcell(dir, dentry, 3);
+
+ return afs_dynroot_lookup_cell(dir, dentry, flags);
}
const struct inode_operations afs_dynroot_inode_operations = {
.lookup = afs_dynroot_lookup,
};
-const struct dentry_operations afs_dynroot_dentry_operations = {
- .d_delete = always_delete_dentry,
- .d_release = afs_d_release,
- .d_automount = afs_d_automount,
-};
-
-/*
- * Create a manually added cell mount directory.
- * - The caller must hold net->proc_cells_lock
- */
-int afs_dynroot_mkdir(struct afs_net *net, struct afs_cell *cell)
-{
- struct super_block *sb = net->dynroot_sb;
- struct dentry *root, *subdir, *dsubdir;
- char *dotname = cell->name - 1;
- int ret;
-
- if (!sb || atomic_read(&sb->s_active) == 0)
- return 0;
-
- /* Let the ->lookup op do the creation */
- root = sb->s_root;
- inode_lock(root->d_inode);
- subdir = lookup_one_len(cell->name, root, cell->name_len);
- if (IS_ERR(subdir)) {
- ret = PTR_ERR(subdir);
- goto unlock;
- }
-
- dsubdir = lookup_one_len(dotname, root, cell->name_len + 1);
- if (IS_ERR(dsubdir)) {
- ret = PTR_ERR(dsubdir);
- dput(subdir);
- goto unlock;
- }
-
- /* Note that we're retaining extra refs on the dentries. */
- subdir->d_fsdata = (void *)1UL;
- dsubdir->d_fsdata = (void *)1UL;
- ret = 0;
-unlock:
- inode_unlock(root->d_inode);
- return ret;
-}
-
-static void afs_dynroot_rm_one_dir(struct dentry *root, const char *name, size_t name_len)
+static void afs_dynroot_d_release(struct dentry *dentry)
{
- struct dentry *subdir;
-
- /* Don't want to trigger a lookup call, which will re-add the cell */
- subdir = try_lookup_one_len(name, root, name_len);
- if (IS_ERR_OR_NULL(subdir)) {
- _debug("lookup %ld", PTR_ERR(subdir));
- return;
- }
+ struct afs_cell *cell = dentry->d_fsdata;
- _debug("rmdir %pd %u", subdir, d_count(subdir));
-
- if (subdir->d_fsdata) {
- _debug("unpin %u", d_count(subdir));
- subdir->d_fsdata = NULL;
- dput(subdir);
- }
- dput(subdir);
+ afs_unuse_cell(cell, afs_cell_trace_unuse_dynroot_mntpt);
}
/*
- * Remove a manually added cell mount directory.
- * - The caller must hold net->proc_cells_lock
+ * Keep @cell symlink dentries around, but only keep cell autodirs when they're
+ * being used.
*/
-void afs_dynroot_rmdir(struct afs_net *net, struct afs_cell *cell)
+static int afs_dynroot_delete_dentry(const struct dentry *dentry)
{
- struct super_block *sb = net->dynroot_sb;
- char *dotname = cell->name - 1;
-
- if (!sb || atomic_read(&sb->s_active) == 0)
- return;
+ const struct qstr *name = &dentry->d_name;
- inode_lock(sb->s_root->d_inode);
- afs_dynroot_rm_one_dir(sb->s_root, cell->name, cell->name_len);
- afs_dynroot_rm_one_dir(sb->s_root, dotname, cell->name_len + 1);
- inode_unlock(sb->s_root->d_inode);
- _leave("");
+ if (name->len == 5 && memcmp(name->name, "@cell", 5) == 0)
+ return 0;
+ if (name->len == 6 && memcmp(name->name, ".@cell", 6) == 0)
+ return 0;
+ return 1;
}
+const struct dentry_operations afs_dynroot_dentry_operations = {
+ .d_delete = afs_dynroot_delete_dentry,
+ .d_release = afs_dynroot_d_release,
+ .d_automount = afs_d_automount,
+};
+
static void afs_atcell_delayed_put_cell(void *arg)
{
struct afs_cell *cell = arg;
@@ -347,149 +243,163 @@ static const struct inode_operations afs_atcell_inode_operations = {
};
/*
- * Look up @cell or .@cell in a dynroot directory. This is a substitution for
- * the local cell name for the net namespace.
+ * Create an inode for the @cell or .@cell symlinks.
*/
-static struct dentry *afs_dynroot_create_symlink(struct dentry *root, const char *name)
+static struct dentry *afs_lookup_atcell(struct inode *dir, struct dentry *dentry, ino_t ino)
{
struct afs_vnode *vnode;
- struct afs_fid fid = { .vnode = 2, .unique = 1, };
- struct dentry *dentry;
struct inode *inode;
+ struct afs_fid fid = { .vnode = ino, .unique = 1, };
- if (name[0] == '.')
- fid.vnode = 3;
-
- dentry = d_alloc_name(root, name);
- if (!dentry)
- return ERR_PTR(-ENOMEM);
-
- inode = iget5_locked(dentry->d_sb, fid.vnode,
+ inode = iget5_locked(dir->i_sb, fid.vnode,
afs_iget5_pseudo_test, afs_iget5_pseudo_set, &fid);
- if (!inode) {
- dput(dentry);
+ if (!inode)
return ERR_PTR(-ENOMEM);
- }
vnode = AFS_FS_I(inode);
- /* there shouldn't be an existing inode */
- if (WARN_ON_ONCE(!(inode->i_state & I_NEW))) {
- iput(inode);
- dput(dentry);
- return ERR_PTR(-EIO);
+ if (inode->i_state & I_NEW) {
+ netfs_inode_init(&vnode->netfs, NULL, false);
+ simple_inode_init_ts(inode);
+ set_nlink(inode, 1);
+ inode->i_size = 0;
+ inode->i_mode = S_IFLNK | 0555;
+ inode->i_op = &afs_atcell_inode_operations;
+ inode->i_uid = GLOBAL_ROOT_UID;
+ inode->i_gid = GLOBAL_ROOT_GID;
+ inode->i_blocks = 0;
+ inode->i_generation = 0;
+ inode->i_flags |= S_NOATIME;
+
+ unlock_new_inode(inode);
}
-
- netfs_inode_init(&vnode->netfs, NULL, false);
- simple_inode_init_ts(inode);
- set_nlink(inode, 1);
- inode->i_size = 0;
- inode->i_mode = S_IFLNK | 0555;
- inode->i_op = &afs_atcell_inode_operations;
- inode->i_uid = GLOBAL_ROOT_UID;
- inode->i_gid = GLOBAL_ROOT_GID;
- inode->i_blocks = 0;
- inode->i_generation = 0;
- inode->i_flags |= S_NOATIME;
-
- unlock_new_inode(inode);
- d_splice_alias(inode, dentry);
- return dentry;
+ return d_splice_alias(inode, dentry);
}
/*
- * Create @cell and .@cell symlinks.
+ * Transcribe the cell database into readdir content under the RCU read lock.
+ * Each cell produces two entries, one prefixed with a dot and one not.
*/
-static int afs_dynroot_symlink(struct afs_net *net)
+static int afs_dynroot_readdir_cells(struct afs_net *net, struct dir_context *ctx)
{
- struct super_block *sb = net->dynroot_sb;
- struct dentry *root, *symlink, *dsymlink;
- int ret;
-
- /* Let the ->lookup op do the creation */
- root = sb->s_root;
- inode_lock(root->d_inode);
- symlink = afs_dynroot_create_symlink(root, "@cell");
- if (IS_ERR(symlink)) {
- ret = PTR_ERR(symlink);
- goto unlock;
- }
+ const struct afs_cell *cell;
+ loff_t newpos;
+
+ _enter("%llu", ctx->pos);
+
+ for (;;) {
+ unsigned int ix = ctx->pos >> 1;
+
+ cell = idr_get_next(&net->cells_dyn_ino, &ix);
+ if (!cell)
+ return 0;
+ if (READ_ONCE(cell->state) == AFS_CELL_REMOVING ||
+ READ_ONCE(cell->state) == AFS_CELL_DEAD) {
+ ctx->pos += 2;
+ ctx->pos &= ~1;
+ continue;
+ }
- dsymlink = afs_dynroot_create_symlink(root, ".@cell");
- if (IS_ERR(dsymlink)) {
- ret = PTR_ERR(dsymlink);
- dput(symlink);
- goto unlock;
- }
+ newpos = ix << 1;
+ if (newpos > ctx->pos)
+ ctx->pos = newpos;
- /* Note that we're retaining extra refs on the dentries. */
- symlink->d_fsdata = (void *)1UL;
- dsymlink->d_fsdata = (void *)1UL;
- ret = 0;
-unlock:
- inode_unlock(root->d_inode);
- return ret;
+ _debug("pos %llu -> cell %u", ctx->pos, cell->dynroot_ino);
+
+ if ((ctx->pos & 1) == 0) {
+ if (!dir_emit(ctx, cell->name, cell->name_len,
+ cell->dynroot_ino, DT_DIR))
+ return 0;
+ ctx->pos++;
+ }
+ if ((ctx->pos & 1) == 1) {
+ if (!dir_emit(ctx, cell->name - 1, cell->name_len + 1,
+ cell->dynroot_ino + 1, DT_DIR))
+ return 0;
+ ctx->pos++;
+ }
+ }
+ return 0;
}
/*
- * Populate a newly created dynamic root with cell names.
+ * Read the AFS dynamic root directory. This produces a list of cellnames,
+ * dotted and undotted, along with @cell and .@cell links if configured.
*/
-int afs_dynroot_populate(struct super_block *sb)
+static int afs_dynroot_readdir(struct file *file, struct dir_context *ctx)
{
- struct afs_cell *cell;
- struct afs_net *net = afs_sb2net(sb);
- int ret;
-
- mutex_lock(&net->proc_cells_lock);
+ struct afs_net *net = afs_d2net(file->f_path.dentry);
+ int ret = 0;
- net->dynroot_sb = sb;
- ret = afs_dynroot_symlink(net);
- if (ret < 0)
- goto error;
+ if (!dir_emit_dots(file, ctx))
+ return 0;
- hlist_for_each_entry(cell, &net->proc_cells, proc_link) {
- ret = afs_dynroot_mkdir(net, cell);
- if (ret < 0)
- goto error;
+ if (ctx->pos == 2) {
+ if (rcu_access_pointer(net->ws_cell) &&
+ !dir_emit(ctx, "@cell", 5, 2, DT_LNK))
+ return 0;
+ ctx->pos = 3;
+ }
+ if (ctx->pos == 3) {
+ if (rcu_access_pointer(net->ws_cell) &&
+ !dir_emit(ctx, ".@cell", 6, 3, DT_LNK))
+ return 0;
+ ctx->pos = 4;
}
- ret = 0;
-out:
- mutex_unlock(&net->proc_cells_lock);
+ if ((unsigned long long)ctx->pos <= AFS_MAX_DYNROOT_CELL_INO) {
+ rcu_read_lock();
+ ret = afs_dynroot_readdir_cells(net, ctx);
+ rcu_read_unlock();
+ }
return ret;
-
-error:
- net->dynroot_sb = NULL;
- goto out;
}
+static const struct file_operations afs_dynroot_file_operations = {
+ .llseek = generic_file_llseek,
+ .read = generic_read_dir,
+ .iterate_shared = afs_dynroot_readdir,
+ .fsync = noop_fsync,
+};
+
/*
- * When a dynamic root that's in the process of being destroyed, depopulate it
- * of pinned directories.
+ * Create an inode for a dynamic root directory.
*/
-void afs_dynroot_depopulate(struct super_block *sb)
+struct inode *afs_dynroot_iget_root(struct super_block *sb)
{
- struct afs_net *net = afs_sb2net(sb);
- struct dentry *root = sb->s_root, *subdir;
-
- /* Prevent more subdirs from being created */
- mutex_lock(&net->proc_cells_lock);
- if (net->dynroot_sb == sb)
- net->dynroot_sb = NULL;
- mutex_unlock(&net->proc_cells_lock);
-
- if (root) {
- struct hlist_node *n;
- inode_lock(root->d_inode);
-
- /* Remove all the pins for dirs created for manually added cells */
- hlist_for_each_entry_safe(subdir, n, &root->d_children, d_sib) {
- if (subdir->d_fsdata) {
- subdir->d_fsdata = NULL;
- dput(subdir);
- }
- }
+ struct afs_super_info *as = AFS_FS_S(sb);
+ struct afs_vnode *vnode;
+ struct inode *inode;
+ struct afs_fid fid = { .vid = 0, .vnode = 1, .unique = 1,};
+
+ if (as->volume)
+ fid.vid = as->volume->vid;
- inode_unlock(root->d_inode);
+ inode = iget5_locked(sb, fid.vnode,
+ afs_iget5_pseudo_test, afs_iget5_pseudo_set, &fid);
+ if (!inode)
+ return ERR_PTR(-ENOMEM);
+
+ vnode = AFS_FS_I(inode);
+
+ /* there shouldn't be an existing inode */
+ if (inode->i_state & I_NEW) {
+ netfs_inode_init(&vnode->netfs, NULL, false);
+ simple_inode_init_ts(inode);
+ set_nlink(inode, 2);
+ inode->i_size = 0;
+ inode->i_mode = S_IFDIR | 0555;
+ inode->i_op = &afs_dynroot_inode_operations;
+ inode->i_fop = &afs_dynroot_file_operations;
+ inode->i_uid = GLOBAL_ROOT_UID;
+ inode->i_gid = GLOBAL_ROOT_GID;
+ inode->i_blocks = 0;
+ inode->i_generation = 0;
+ inode->i_flags |= S_NOATIME;
+
+ set_bit(AFS_VNODE_PSEUDODIR, &vnode->flags);
+ unlock_new_inode(inode);
}
+ _leave(" = %p", inode);
+ return inode;
}
diff --git a/fs/afs/fs_probe.c b/fs/afs/fs_probe.c
index b516d05b0fef..07a8bfbdd9b9 100644
--- a/fs/afs/fs_probe.c
+++ b/fs/afs/fs_probe.c
@@ -235,20 +235,20 @@ out:
* Probe all of a fileserver's addresses to find out the best route and to
* query its capabilities.
*/
-void afs_fs_probe_fileserver(struct afs_net *net, struct afs_server *server,
- struct afs_addr_list *new_alist, struct key *key)
+int afs_fs_probe_fileserver(struct afs_net *net, struct afs_server *server,
+ struct afs_addr_list *new_alist, struct key *key)
{
struct afs_endpoint_state *estate, *old;
- struct afs_addr_list *alist;
+ struct afs_addr_list *old_alist = NULL, *alist;
unsigned long unprobed;
_enter("%pU", &server->uuid);
estate = kzalloc(sizeof(*estate), GFP_KERNEL);
if (!estate)
- return;
+ return -ENOMEM;
- refcount_set(&estate->ref, 1);
+ refcount_set(&estate->ref, 2);
estate->server_id = server->debug_id;
estate->rtt = UINT_MAX;
@@ -256,21 +256,31 @@ void afs_fs_probe_fileserver(struct afs_net *net, struct afs_server *server,
old = rcu_dereference_protected(server->endpoint_state,
lockdep_is_held(&server->fs_lock));
- estate->responsive_set = old->responsive_set;
- estate->addresses = afs_get_addrlist(new_alist ?: old->addresses,
- afs_alist_trace_get_estate);
+ if (old) {
+ estate->responsive_set = old->responsive_set;
+ if (!new_alist)
+ new_alist = old->addresses;
+ }
+
+ if (old_alist != new_alist)
+ afs_set_peer_appdata(server, old_alist, new_alist);
+
+ estate->addresses = afs_get_addrlist(new_alist, afs_alist_trace_get_estate);
alist = estate->addresses;
estate->probe_seq = ++server->probe_counter;
atomic_set(&estate->nr_probing, alist->nr_addrs);
+ if (new_alist)
+ server->addr_version = new_alist->version;
rcu_assign_pointer(server->endpoint_state, estate);
- set_bit(AFS_ESTATE_SUPERSEDED, &old->flags);
write_unlock(&server->fs_lock);
+ if (old)
+ set_bit(AFS_ESTATE_SUPERSEDED, &old->flags);
trace_afs_estate(estate->server_id, estate->probe_seq, refcount_read(&estate->ref),
afs_estate_trace_alloc_probe);
- afs_get_address_preferences(net, alist);
+ afs_get_address_preferences(net, new_alist);
server->probed_at = jiffies;
unprobed = (1UL << alist->nr_addrs) - 1;
@@ -293,6 +303,8 @@ void afs_fs_probe_fileserver(struct afs_net *net, struct afs_server *server,
}
afs_put_endpoint_state(old, afs_estate_trace_put_probe);
+ afs_put_endpoint_state(estate, afs_estate_trace_put_probe);
+ return 0;
}
/*
diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c
index 1d9ecd5418d8..bc9556991d7c 100644
--- a/fs/afs/fsclient.c
+++ b/fs/afs/fsclient.c
@@ -1653,7 +1653,7 @@ int afs_fs_give_up_all_callbacks(struct afs_net *net, struct afs_server *server,
bp = call->request;
*bp++ = htonl(FSGIVEUPALLCALLBACKS);
- call->server = afs_use_server(server, afs_server_trace_give_up_cb);
+ call->server = afs_use_server(server, false, afs_server_trace_use_give_up_cb);
afs_make_call(call, GFP_NOFS);
afs_wait_for_call_to_complete(call);
ret = call->error;
@@ -1760,7 +1760,7 @@ bool afs_fs_get_capabilities(struct afs_net *net, struct afs_server *server,
return false;
call->key = key;
- call->server = afs_use_server(server, afs_server_trace_get_caps);
+ call->server = afs_use_server(server, false, afs_server_trace_use_get_caps);
call->peer = rxrpc_kernel_get_peer(estate->addresses->addrs[addr_index].peer);
call->probe = afs_get_endpoint_state(estate, afs_estate_trace_get_getcaps);
call->probe_index = addr_index;
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index df30bd62da79..440b0e731093 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -287,9 +287,8 @@ struct afs_net {
/* Cell database */
struct rb_root cells;
+ struct idr cells_dyn_ino; /* cell->dynroot_ino mapping */
struct afs_cell __rcu *ws_cell;
- struct work_struct cells_manager;
- struct timer_list cells_timer;
atomic_t cells_outstanding;
struct rw_semaphore cells_lock;
struct mutex cells_alias_lock;
@@ -301,18 +300,11 @@ struct afs_net {
* cell, but in practice, people create aliases and subsets and there's
* no easy way to distinguish them.
*/
- seqlock_t fs_lock; /* For fs_servers, fs_probe_*, fs_proc */
- struct rb_root fs_servers; /* afs_server (by server UUID or address) */
+ seqlock_t fs_lock; /* For fs_probe_*, fs_proc */
struct list_head fs_probe_fast; /* List of afs_server to probe at 30s intervals */
struct list_head fs_probe_slow; /* List of afs_server to probe at 5m intervals */
struct hlist_head fs_proc; /* procfs servers list */
- struct hlist_head fs_addresses; /* afs_server (by lowest IPv6 addr) */
- seqlock_t fs_addr_lock; /* For fs_addresses[46] */
-
- struct work_struct fs_manager;
- struct timer_list fs_timer;
-
struct work_struct fs_prober;
struct timer_list fs_probe_timer;
atomic_t servers_outstanding;
@@ -345,13 +337,10 @@ struct afs_net {
extern const char afs_init_sysname[];
enum afs_cell_state {
- AFS_CELL_UNSET,
- AFS_CELL_ACTIVATING,
+ AFS_CELL_SETTING_UP,
AFS_CELL_ACTIVE,
- AFS_CELL_DEACTIVATING,
- AFS_CELL_INACTIVE,
- AFS_CELL_FAILED,
- AFS_CELL_REMOVED,
+ AFS_CELL_REMOVING,
+ AFS_CELL_DEAD,
};
/*
@@ -382,7 +371,9 @@ struct afs_cell {
struct afs_cell *alias_of; /* The cell this is an alias of */
struct afs_volume *root_volume; /* The root.cell volume if there is one */
struct key *anonymous_key; /* anonymous user key for this cell */
+ struct work_struct destroyer; /* Destroyer for cell */
struct work_struct manager; /* Manager for init/deinit/dns */
+ struct timer_list management_timer; /* General management timer */
struct hlist_node proc_link; /* /proc cell list link */
time64_t dns_expiry; /* Time AFSDB/SRV record expires */
time64_t last_inactive; /* Time of last drop of usage count */
@@ -398,6 +389,7 @@ struct afs_cell {
enum dns_lookup_status dns_status:8; /* Latest status of data from lookup */
unsigned int dns_lookup_count; /* Counter of DNS lookups */
unsigned int debug_id;
+ unsigned int dynroot_ino; /* Inode numbers for dynroot (a pair) */
/* The volumes belonging to this cell */
struct rw_semaphore vs_lock; /* Lock for server->volumes */
@@ -407,7 +399,7 @@ struct afs_cell {
/* Active fileserver interaction state. */
struct rb_root fs_servers; /* afs_server (by server UUID) */
- seqlock_t fs_lock; /* For fs_servers */
+ struct rw_semaphore fs_lock; /* For fs_servers */
/* VL server list. */
rwlock_t vl_servers_lock; /* Lock on vl_servers */
@@ -542,22 +534,22 @@ struct afs_server {
};
struct afs_cell *cell; /* Cell to which belongs (pins ref) */
- struct rb_node uuid_rb; /* Link in net->fs_servers */
- struct afs_server __rcu *uuid_next; /* Next server with same UUID */
- struct afs_server *uuid_prev; /* Previous server with same UUID */
- struct list_head probe_link; /* Link in net->fs_probe_list */
- struct hlist_node addr_link; /* Link in net->fs_addresses6 */
+ struct rb_node uuid_rb; /* Link in cell->fs_servers */
+ struct list_head probe_link; /* Link in net->fs_probe_* */
struct hlist_node proc_link; /* Link in net->fs_proc */
struct list_head volumes; /* RCU list of afs_server_entry objects */
- struct afs_server *gc_next; /* Next server in manager's list */
+ struct work_struct destroyer; /* Work item to try and destroy a server */
+ struct timer_list timer; /* Management timer */
time64_t unuse_time; /* Time at which last unused */
unsigned long flags;
#define AFS_SERVER_FL_RESPONDING 0 /* The server is responding */
#define AFS_SERVER_FL_UPDATING 1
#define AFS_SERVER_FL_NEEDS_UPDATE 2 /* Fileserver address list is out of date */
-#define AFS_SERVER_FL_NOT_READY 4 /* The record is not ready for use */
-#define AFS_SERVER_FL_NOT_FOUND 5 /* VL server says no such server */
-#define AFS_SERVER_FL_VL_FAIL 6 /* Failed to access VL server */
+#define AFS_SERVER_FL_UNCREATED 3 /* The record needs creating */
+#define AFS_SERVER_FL_CREATING 4 /* The record is being created */
+#define AFS_SERVER_FL_EXPIRED 5 /* The record has expired */
+#define AFS_SERVER_FL_NOT_FOUND 6 /* VL server says no such server */
+#define AFS_SERVER_FL_VL_FAIL 7 /* Failed to access VL server */
#define AFS_SERVER_FL_MAY_HAVE_CB 8 /* May have callbacks on this fileserver */
#define AFS_SERVER_FL_IS_YFS 16 /* Server is YFS not AFS */
#define AFS_SERVER_FL_NO_IBULK 17 /* Fileserver doesn't support FS.InlineBulkStatus */
@@ -567,6 +559,7 @@ struct afs_server {
atomic_t active; /* Active user count */
u32 addr_version; /* Address list version */
u16 service_id; /* Service ID we're using. */
+ short create_error; /* Creation error */
unsigned int rtt; /* Server's current RTT in uS */
unsigned int debug_id; /* Debugging ID for traces */
@@ -621,6 +614,7 @@ struct afs_volume {
afs_volid_t vid; /* The volume ID of this volume */
afs_volid_t vids[AFS_MAXTYPES]; /* All associated volume IDs */
refcount_t ref;
+ unsigned int debug_id; /* Debugging ID for traces */
time64_t update_at; /* Time at which to next update */
struct afs_cell *cell; /* Cell to which belongs (pins ref) */
struct rb_node cell_node; /* Link in cell->volumes */
@@ -700,7 +694,6 @@ struct afs_vnode {
#define AFS_VNODE_ZAP_DATA 3 /* set if vnode's data should be invalidated */
#define AFS_VNODE_DELETED 4 /* set if vnode deleted on server */
#define AFS_VNODE_MOUNTPOINT 5 /* set if vnode is a mountpoint symlink */
-#define AFS_VNODE_AUTOCELL 6 /* set if Vnode is an auto mount point */
#define AFS_VNODE_PSEUDODIR 7 /* set if Vnode is a pseudo directory */
#define AFS_VNODE_NEW_CONTENT 8 /* Set if file has new content (create/trunc-0) */
#define AFS_VNODE_SILLY_DELETED 9 /* Set if file has been silly-deleted */
@@ -1008,6 +1001,9 @@ extern int afs_merge_fs_addr4(struct afs_net *net, struct afs_addr_list *addr,
__be32 xdr, u16 port);
extern int afs_merge_fs_addr6(struct afs_net *net, struct afs_addr_list *addr,
__be32 *xdr, u16 port);
+void afs_set_peer_appdata(struct afs_server *server,
+ struct afs_addr_list *old_alist,
+ struct afs_addr_list *new_alist);
/*
* addr_prefs.c
@@ -1044,16 +1040,17 @@ static inline bool afs_cb_is_broken(unsigned int cb_break,
extern int afs_cell_init(struct afs_net *, const char *);
extern struct afs_cell *afs_find_cell(struct afs_net *, const char *, unsigned,
enum afs_cell_trace);
-extern struct afs_cell *afs_lookup_cell(struct afs_net *, const char *, unsigned,
- const char *, bool);
+struct afs_cell *afs_lookup_cell(struct afs_net *net,
+ const char *name, unsigned int namesz,
+ const char *vllist, bool excl,
+ enum afs_cell_trace trace);
extern struct afs_cell *afs_use_cell(struct afs_cell *, enum afs_cell_trace);
-extern void afs_unuse_cell(struct afs_net *, struct afs_cell *, enum afs_cell_trace);
+void afs_unuse_cell(struct afs_cell *cell, enum afs_cell_trace reason);
extern struct afs_cell *afs_get_cell(struct afs_cell *, enum afs_cell_trace);
extern void afs_see_cell(struct afs_cell *, enum afs_cell_trace);
extern void afs_put_cell(struct afs_cell *, enum afs_cell_trace);
extern void afs_queue_cell(struct afs_cell *, enum afs_cell_trace);
-extern void afs_manage_cells(struct work_struct *);
-extern void afs_cells_timer(struct timer_list *);
+void afs_set_cell_timer(struct afs_cell *cell, unsigned int delay_secs);
extern void __net_exit afs_cell_purge(struct afs_net *);
/*
@@ -1111,11 +1108,7 @@ extern int afs_silly_iput(struct dentry *, struct inode *);
extern const struct inode_operations afs_dynroot_inode_operations;
extern const struct dentry_operations afs_dynroot_dentry_operations;
-extern struct inode *afs_try_auto_mntpt(struct dentry *, struct inode *);
-extern int afs_dynroot_mkdir(struct afs_net *, struct afs_cell *);
-extern void afs_dynroot_rmdir(struct afs_net *, struct afs_cell *);
-extern int afs_dynroot_populate(struct super_block *);
-extern void afs_dynroot_depopulate(struct super_block *);
+struct inode *afs_dynroot_iget_root(struct super_block *sb);
/*
* file.c
@@ -1207,8 +1200,8 @@ struct afs_endpoint_state *afs_get_endpoint_state(struct afs_endpoint_state *est
enum afs_estate_trace where);
void afs_put_endpoint_state(struct afs_endpoint_state *estate, enum afs_estate_trace where);
extern void afs_fileserver_probe_result(struct afs_call *);
-void afs_fs_probe_fileserver(struct afs_net *net, struct afs_server *server,
- struct afs_addr_list *new_addrs, struct key *key);
+int afs_fs_probe_fileserver(struct afs_net *net, struct afs_server *server,
+ struct afs_addr_list *new_alist, struct key *key);
int afs_wait_for_fs_probes(struct afs_operation *op, struct afs_server_state *states, bool intr);
extern void afs_probe_fileserver(struct afs_net *, struct afs_server *);
extern void afs_fs_probe_dispatcher(struct work_struct *);
@@ -1228,7 +1221,6 @@ int afs_readlink(struct dentry *dentry, char __user *buffer, int buflen);
extern void afs_vnode_commit_status(struct afs_operation *, struct afs_vnode_param *);
extern int afs_fetch_status(struct afs_vnode *, struct key *, bool, afs_access_t *);
extern int afs_ilookup5_test_by_fid(struct inode *, void *);
-extern struct inode *afs_iget_pseudo_dir(struct super_block *, bool);
extern struct inode *afs_iget(struct afs_operation *, struct afs_vnode_param *);
extern struct inode *afs_root_iget(struct super_block *, struct key *);
extern int afs_getattr(struct mnt_idmap *idmap, const struct path *,
@@ -1510,20 +1502,30 @@ extern void __exit afs_clean_up_permit_cache(void);
*/
extern spinlock_t afs_server_peer_lock;
-extern struct afs_server *afs_find_server(struct afs_net *, const struct rxrpc_peer *);
-extern struct afs_server *afs_find_server_by_uuid(struct afs_net *, const uuid_t *);
+struct afs_server *afs_find_server(const struct rxrpc_peer *peer);
extern struct afs_server *afs_lookup_server(struct afs_cell *, struct key *, const uuid_t *, u32);
extern struct afs_server *afs_get_server(struct afs_server *, enum afs_server_trace);
-extern struct afs_server *afs_use_server(struct afs_server *, enum afs_server_trace);
-extern void afs_unuse_server(struct afs_net *, struct afs_server *, enum afs_server_trace);
-extern void afs_unuse_server_notime(struct afs_net *, struct afs_server *, enum afs_server_trace);
+struct afs_server *afs_use_server(struct afs_server *server, bool activate,
+ enum afs_server_trace reason);
+void afs_unuse_server(struct afs_net *net, struct afs_server *server,
+ enum afs_server_trace reason);
+void afs_unuse_server_notime(struct afs_net *net, struct afs_server *server,
+ enum afs_server_trace reason);
extern void afs_put_server(struct afs_net *, struct afs_server *, enum afs_server_trace);
-extern void afs_manage_servers(struct work_struct *);
-extern void afs_servers_timer(struct timer_list *);
+void afs_purge_servers(struct afs_cell *cell);
extern void afs_fs_probe_timer(struct timer_list *);
-extern void __net_exit afs_purge_servers(struct afs_net *);
+void __net_exit afs_wait_for_servers(struct afs_net *net);
bool afs_check_server_record(struct afs_operation *op, struct afs_server *server, struct key *key);
+static inline void afs_see_server(struct afs_server *server, enum afs_server_trace trace)
+{
+ int r = refcount_read(&server->ref);
+ int a = atomic_read(&server->active);
+
+ trace_afs_server(server->debug_id, r, a, trace);
+
+}
+
static inline void afs_inc_servers_outstanding(struct afs_net *net)
{
atomic_inc(&net->servers_outstanding);
diff --git a/fs/afs/main.c b/fs/afs/main.c
index 1ae0067f772d..c845c5daaeba 100644
--- a/fs/afs/main.c
+++ b/fs/afs/main.c
@@ -76,25 +76,17 @@ static int __net_init afs_net_init(struct net *net_ns)
mutex_init(&net->socket_mutex);
net->cells = RB_ROOT;
+ idr_init(&net->cells_dyn_ino);
init_rwsem(&net->cells_lock);
- INIT_WORK(&net->cells_manager, afs_manage_cells);
- timer_setup(&net->cells_timer, afs_cells_timer, 0);
-
mutex_init(&net->cells_alias_lock);
mutex_init(&net->proc_cells_lock);
INIT_HLIST_HEAD(&net->proc_cells);
seqlock_init(&net->fs_lock);
- net->fs_servers = RB_ROOT;
INIT_LIST_HEAD(&net->fs_probe_fast);
INIT_LIST_HEAD(&net->fs_probe_slow);
INIT_HLIST_HEAD(&net->fs_proc);
- INIT_HLIST_HEAD(&net->fs_addresses);
- seqlock_init(&net->fs_addr_lock);
-
- INIT_WORK(&net->fs_manager, afs_manage_servers);
- timer_setup(&net->fs_timer, afs_servers_timer, 0);
INIT_WORK(&net->fs_prober, afs_fs_probe_dispatcher);
timer_setup(&net->fs_probe_timer, afs_fs_probe_timer, 0);
atomic_set(&net->servers_outstanding, 1);
@@ -130,13 +122,14 @@ error_open_socket:
net->live = false;
afs_fs_probe_cleanup(net);
afs_cell_purge(net);
- afs_purge_servers(net);
+ afs_wait_for_servers(net);
error_cell_init:
net->live = false;
afs_proc_cleanup(net);
error_proc:
afs_put_sysnames(net->sysnames);
error_sysnames:
+ idr_destroy(&net->cells_dyn_ino);
net->live = false;
return ret;
}
@@ -151,10 +144,11 @@ static void __net_exit afs_net_exit(struct net *net_ns)
net->live = false;
afs_fs_probe_cleanup(net);
afs_cell_purge(net);
- afs_purge_servers(net);
+ afs_wait_for_servers(net);
afs_close_socket(net);
afs_proc_cleanup(net);
afs_put_sysnames(net->sysnames);
+ idr_destroy(&net->cells_dyn_ino);
kfree_rcu(rcu_access_pointer(net->address_prefs), rcu);
}
diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c
index 507c25a5b2cb..45cee6534122 100644
--- a/fs/afs/mntpt.c
+++ b/fs/afs/mntpt.c
@@ -87,7 +87,7 @@ static int afs_mntpt_set_params(struct fs_context *fc, struct dentry *mntpt)
ctx->force = true;
}
if (ctx->cell) {
- afs_unuse_cell(ctx->net, ctx->cell, afs_cell_trace_unuse_mntpt);
+ afs_unuse_cell(ctx->cell, afs_cell_trace_unuse_mntpt);
ctx->cell = NULL;
}
if (test_bit(AFS_VNODE_PSEUDODIR, &vnode->flags)) {
@@ -107,7 +107,8 @@ static int afs_mntpt_set_params(struct fs_context *fc, struct dentry *mntpt)
if (size > AFS_MAXCELLNAME)
return -ENAMETOOLONG;
- cell = afs_lookup_cell(ctx->net, p, size, NULL, false);
+ cell = afs_lookup_cell(ctx->net, p, size, NULL, false,
+ afs_cell_trace_use_lookup_mntpt);
if (IS_ERR(cell)) {
pr_err("kAFS: unable to lookup cell '%pd'\n", mntpt);
return PTR_ERR(cell);
diff --git a/fs/afs/proc.c b/fs/afs/proc.c
index 12c88d8be3fe..40e879c8ca77 100644
--- a/fs/afs/proc.c
+++ b/fs/afs/proc.c
@@ -122,14 +122,15 @@ static int afs_proc_cells_write(struct file *file, char *buf, size_t size)
if (strcmp(buf, "add") == 0) {
struct afs_cell *cell;
- cell = afs_lookup_cell(net, name, strlen(name), args, true);
+ cell = afs_lookup_cell(net, name, strlen(name), args, true,
+ afs_cell_trace_use_lookup_add);
if (IS_ERR(cell)) {
ret = PTR_ERR(cell);
goto done;
}
if (test_and_set_bit(AFS_CELL_FL_NO_GC, &cell->flags))
- afs_unuse_cell(net, cell, afs_cell_trace_unuse_no_pin);
+ afs_unuse_cell(cell, afs_cell_trace_unuse_no_pin);
} else {
goto inval;
}
@@ -443,8 +444,6 @@ static int afs_proc_servers_show(struct seq_file *m, void *v)
}
server = list_entry(v, struct afs_server, proc_link);
- estate = rcu_dereference(server->endpoint_state);
- alist = estate->addresses;
seq_printf(m, "%pU %3d %3d %s\n",
&server->uuid,
refcount_read(&server->ref),
@@ -454,10 +453,16 @@ static int afs_proc_servers_show(struct seq_file *m, void *v)
server->flags, server->rtt);
seq_printf(m, " - probe: last=%d\n",
(int)(jiffies - server->probed_at) / HZ);
+
+ estate = rcu_dereference(server->endpoint_state);
+ if (!estate)
+ goto out;
failed = estate->failed_set;
seq_printf(m, " - ESTATE pq=%x np=%u rsp=%lx f=%lx\n",
estate->probe_seq, atomic_read(&estate->nr_probing),
estate->responsive_set, estate->failed_set);
+
+ alist = estate->addresses;
seq_printf(m, " - ALIST v=%u ap=%u\n",
alist->version, alist->addr_pref_version);
for (i = 0; i < alist->nr_addrs; i++) {
@@ -470,6 +475,8 @@ static int afs_proc_servers_show(struct seq_file *m, void *v)
rxrpc_kernel_get_srtt(addr->peer),
addr->last_error, addr->prio);
}
+
+out:
return 0;
}
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
index 886416ea1d96..d5e480a33859 100644
--- a/fs/afs/rxrpc.c
+++ b/fs/afs/rxrpc.c
@@ -179,7 +179,7 @@ static void afs_free_call(struct afs_call *call)
if (call->type->destructor)
call->type->destructor(call);
- afs_unuse_server_notime(call->net, call->server, afs_server_trace_put_call);
+ afs_unuse_server_notime(call->net, call->server, afs_server_trace_unuse_call);
kfree(call->request);
o = atomic_read(&net->nr_outstanding_calls);
@@ -766,8 +766,14 @@ static void afs_rx_discard_new_call(struct rxrpc_call *rxcall,
static void afs_rx_new_call(struct sock *sk, struct rxrpc_call *rxcall,
unsigned long user_call_ID)
{
+ struct afs_call *call = (struct afs_call *)user_call_ID;
struct afs_net *net = afs_sock2net(sk);
+ call->peer = rxrpc_kernel_get_call_peer(sk->sk_socket, call->rxcall);
+ call->server = afs_find_server(call->peer);
+ if (!call->server)
+ trace_afs_cm_no_server(call, rxrpc_kernel_remote_srx(call->peer));
+
queue_work(afs_wq, &net->charge_preallocation_work);
}
diff --git a/fs/afs/server.c b/fs/afs/server.c
index 4504e16b458c..c530d1ca15df 100644
--- a/fs/afs/server.c
+++ b/fs/afs/server.c
@@ -14,190 +14,104 @@
static unsigned afs_server_gc_delay = 10; /* Server record timeout in seconds */
static atomic_t afs_server_debug_id;
-static struct afs_server *afs_maybe_use_server(struct afs_server *,
- enum afs_server_trace);
static void __afs_put_server(struct afs_net *, struct afs_server *);
+static void afs_server_timer(struct timer_list *timer);
+static void afs_server_destroyer(struct work_struct *work);
/*
* Find a server by one of its addresses.
*/
-struct afs_server *afs_find_server(struct afs_net *net, const struct rxrpc_peer *peer)
+struct afs_server *afs_find_server(const struct rxrpc_peer *peer)
{
- const struct afs_endpoint_state *estate;
- const struct afs_addr_list *alist;
- struct afs_server *server = NULL;
- unsigned int i;
- int seq = 1;
+ struct afs_server *server = (struct afs_server *)rxrpc_kernel_get_peer_data(peer);
- rcu_read_lock();
-
- do {
- if (server)
- afs_unuse_server_notime(net, server, afs_server_trace_put_find_rsq);
- server = NULL;
- seq++; /* 2 on the 1st/lockless path, otherwise odd */
- read_seqbegin_or_lock(&net->fs_addr_lock, &seq);
-
- hlist_for_each_entry_rcu(server, &net->fs_addresses, addr_link) {
- estate = rcu_dereference(server->endpoint_state);
- alist = estate->addresses;
- for (i = 0; i < alist->nr_addrs; i++)
- if (alist->addrs[i].peer == peer)
- goto found;
- }
-
- server = NULL;
- continue;
- found:
- server = afs_maybe_use_server(server, afs_server_trace_get_by_addr);
-
- } while (need_seqretry(&net->fs_addr_lock, seq));
-
- done_seqretry(&net->fs_addr_lock, seq);
-
- rcu_read_unlock();
- return server;
+ if (!server)
+ return NULL;
+ return afs_use_server(server, false, afs_server_trace_use_cm_call);
}
/*
- * Look up a server by its UUID and mark it active.
+ * Look up a server by its UUID and mark it active. The caller must hold
+ * cell->fs_lock.
*/
-struct afs_server *afs_find_server_by_uuid(struct afs_net *net, const uuid_t *uuid)
+static struct afs_server *afs_find_server_by_uuid(struct afs_cell *cell, const uuid_t *uuid)
{
- struct afs_server *server = NULL;
+ struct afs_server *server;
struct rb_node *p;
- int diff, seq = 1;
+ int diff;
_enter("%pU", uuid);
- do {
- /* Unfortunately, rbtree walking doesn't give reliable results
- * under just the RCU read lock, so we have to check for
- * changes.
- */
- if (server)
- afs_unuse_server(net, server, afs_server_trace_put_uuid_rsq);
- server = NULL;
- seq++; /* 2 on the 1st/lockless path, otherwise odd */
- read_seqbegin_or_lock(&net->fs_lock, &seq);
-
- p = net->fs_servers.rb_node;
- while (p) {
- server = rb_entry(p, struct afs_server, uuid_rb);
-
- diff = memcmp(uuid, &server->uuid, sizeof(*uuid));
- if (diff < 0) {
- p = p->rb_left;
- } else if (diff > 0) {
- p = p->rb_right;
- } else {
- afs_use_server(server, afs_server_trace_get_by_uuid);
- break;
- }
-
- server = NULL;
- }
- } while (need_seqretry(&net->fs_lock, seq));
+ p = cell->fs_servers.rb_node;
+ while (p) {
+ server = rb_entry(p, struct afs_server, uuid_rb);
- done_seqretry(&net->fs_lock, seq);
+ diff = memcmp(uuid, &server->uuid, sizeof(*uuid));
+ if (diff < 0) {
+ p = p->rb_left;
+ } else if (diff > 0) {
+ p = p->rb_right;
+ } else {
+ if (test_bit(AFS_SERVER_FL_UNCREATED, &server->flags))
+ return NULL; /* Need a write lock */
+ afs_use_server(server, true, afs_server_trace_use_by_uuid);
+ return server;
+ }
+ }
- _leave(" = %p", server);
- return server;
+ return NULL;
}
/*
- * Install a server record in the namespace tree. If there's a clash, we stick
- * it into a list anchored on whichever afs_server struct is actually in the
- * tree.
+ * Install a server record in the cell tree. The caller must hold an exclusive
+ * lock on cell->fs_lock.
*/
static struct afs_server *afs_install_server(struct afs_cell *cell,
- struct afs_server *candidate)
+ struct afs_server **candidate)
{
- const struct afs_endpoint_state *estate;
- const struct afs_addr_list *alist;
- struct afs_server *server, *next;
+ struct afs_server *server;
struct afs_net *net = cell->net;
struct rb_node **pp, *p;
int diff;
_enter("%p", candidate);
- write_seqlock(&net->fs_lock);
-
/* Firstly install the server in the UUID lookup tree */
- pp = &net->fs_servers.rb_node;
+ pp = &cell->fs_servers.rb_node;
p = NULL;
while (*pp) {
p = *pp;
_debug("- consider %p", p);
server = rb_entry(p, struct afs_server, uuid_rb);
- diff = memcmp(&candidate->uuid, &server->uuid, sizeof(uuid_t));
- if (diff < 0) {
+ diff = memcmp(&(*candidate)->uuid, &server->uuid, sizeof(uuid_t));
+ if (diff < 0)
pp = &(*pp)->rb_left;
- } else if (diff > 0) {
+ else if (diff > 0)
pp = &(*pp)->rb_right;
- } else {
- if (server->cell == cell)
- goto exists;
-
- /* We have the same UUID representing servers in
- * different cells. Append the new server to the list.
- */
- for (;;) {
- next = rcu_dereference_protected(
- server->uuid_next,
- lockdep_is_held(&net->fs_lock.lock));
- if (!next)
- break;
- server = next;
- }
- rcu_assign_pointer(server->uuid_next, candidate);
- candidate->uuid_prev = server;
- server = candidate;
- goto added_dup;
- }
+ else
+ goto exists;
}
- server = candidate;
+ server = *candidate;
+ *candidate = NULL;
rb_link_node(&server->uuid_rb, p, pp);
- rb_insert_color(&server->uuid_rb, &net->fs_servers);
+ rb_insert_color(&server->uuid_rb, &cell->fs_servers);
+ write_seqlock(&net->fs_lock);
hlist_add_head_rcu(&server->proc_link, &net->fs_proc);
+ write_sequnlock(&net->fs_lock);
afs_get_cell(cell, afs_cell_trace_get_server);
-added_dup:
- write_seqlock(&net->fs_addr_lock);
- estate = rcu_dereference_protected(server->endpoint_state,
- lockdep_is_held(&net->fs_addr_lock.lock));
- alist = estate->addresses;
-
- /* Secondly, if the server has any IPv4 and/or IPv6 addresses, install
- * it in the IPv4 and/or IPv6 reverse-map lists.
- *
- * TODO: For speed we want to use something other than a flat list
- * here; even sorting the list in terms of lowest address would help a
- * bit, but anything we might want to do gets messy and memory
- * intensive.
- */
- if (alist->nr_addrs > 0)
- hlist_add_head_rcu(&server->addr_link, &net->fs_addresses);
-
- write_sequnlock(&net->fs_addr_lock);
-
exists:
- afs_get_server(server, afs_server_trace_get_install);
- write_sequnlock(&net->fs_lock);
+ afs_use_server(server, true, afs_server_trace_use_install);
return server;
}
/*
- * Allocate a new server record and mark it active.
+ * Allocate a new server record and mark it as active but uncreated.
*/
-static struct afs_server *afs_alloc_server(struct afs_cell *cell,
- const uuid_t *uuid,
- struct afs_addr_list *alist)
+static struct afs_server *afs_alloc_server(struct afs_cell *cell, const uuid_t *uuid)
{
- struct afs_endpoint_state *estate;
struct afs_server *server;
struct afs_net *net = cell->net;
@@ -205,65 +119,49 @@ static struct afs_server *afs_alloc_server(struct afs_cell *cell,
server = kzalloc(sizeof(struct afs_server), GFP_KERNEL);
if (!server)
- goto enomem;
-
- estate = kzalloc(sizeof(struct afs_endpoint_state), GFP_KERNEL);
- if (!estate)
- goto enomem_server;
+ return NULL;
refcount_set(&server->ref, 1);
- atomic_set(&server->active, 1);
+ atomic_set(&server->active, 0);
+ __set_bit(AFS_SERVER_FL_UNCREATED, &server->flags);
server->debug_id = atomic_inc_return(&afs_server_debug_id);
- server->addr_version = alist->version;
server->uuid = *uuid;
rwlock_init(&server->fs_lock);
+ INIT_WORK(&server->destroyer, &afs_server_destroyer);
+ timer_setup(&server->timer, afs_server_timer, 0);
INIT_LIST_HEAD(&server->volumes);
init_waitqueue_head(&server->probe_wq);
INIT_LIST_HEAD(&server->probe_link);
+ INIT_HLIST_NODE(&server->proc_link);
spin_lock_init(&server->probe_lock);
server->cell = cell;
server->rtt = UINT_MAX;
server->service_id = FS_SERVICE;
-
server->probe_counter = 1;
server->probed_at = jiffies - LONG_MAX / 2;
- refcount_set(&estate->ref, 1);
- estate->addresses = alist;
- estate->server_id = server->debug_id;
- estate->probe_seq = 1;
- rcu_assign_pointer(server->endpoint_state, estate);
afs_inc_servers_outstanding(net);
- trace_afs_server(server->debug_id, 1, 1, afs_server_trace_alloc);
- trace_afs_estate(estate->server_id, estate->probe_seq, refcount_read(&estate->ref),
- afs_estate_trace_alloc_server);
_leave(" = %p", server);
return server;
-
-enomem_server:
- kfree(server);
-enomem:
- _leave(" = NULL [nomem]");
- return NULL;
}
/*
* Look up an address record for a server
*/
-static struct afs_addr_list *afs_vl_lookup_addrs(struct afs_cell *cell,
- struct key *key, const uuid_t *uuid)
+static struct afs_addr_list *afs_vl_lookup_addrs(struct afs_server *server,
+ struct key *key)
{
struct afs_vl_cursor vc;
struct afs_addr_list *alist = NULL;
int ret;
ret = -ERESTARTSYS;
- if (afs_begin_vlserver_operation(&vc, cell, key)) {
+ if (afs_begin_vlserver_operation(&vc, server->cell, key)) {
while (afs_select_vlserver(&vc)) {
if (test_bit(AFS_VLSERVER_FL_IS_YFS, &vc.server->flags))
- alist = afs_yfsvl_get_endpoints(&vc, uuid);
+ alist = afs_yfsvl_get_endpoints(&vc, &server->uuid);
else
- alist = afs_vl_get_addrs_u(&vc, uuid);
+ alist = afs_vl_get_addrs_u(&vc, &server->uuid);
}
ret = afs_end_vlserver_operation(&vc);
@@ -273,72 +171,122 @@ static struct afs_addr_list *afs_vl_lookup_addrs(struct afs_cell *cell,
}
/*
- * Get or create a fileserver record.
+ * Get or create a fileserver record and return it with an active-use count on
+ * it.
*/
struct afs_server *afs_lookup_server(struct afs_cell *cell, struct key *key,
const uuid_t *uuid, u32 addr_version)
{
- struct afs_addr_list *alist;
- struct afs_server *server, *candidate;
+ struct afs_addr_list *alist = NULL;
+ struct afs_server *server, *candidate = NULL;
+ bool creating = false;
+ int ret;
_enter("%p,%pU", cell->net, uuid);
- server = afs_find_server_by_uuid(cell->net, uuid);
+ down_read(&cell->fs_lock);
+ server = afs_find_server_by_uuid(cell, uuid);
+ /* Won't see servers marked uncreated. */
+ up_read(&cell->fs_lock);
+
if (server) {
+ timer_delete_sync(&server->timer);
+ if (test_bit(AFS_SERVER_FL_CREATING, &server->flags))
+ goto wait_for_creation;
if (server->addr_version != addr_version)
set_bit(AFS_SERVER_FL_NEEDS_UPDATE, &server->flags);
return server;
}
- alist = afs_vl_lookup_addrs(cell, key, uuid);
- if (IS_ERR(alist))
- return ERR_CAST(alist);
-
- candidate = afs_alloc_server(cell, uuid, alist);
+ candidate = afs_alloc_server(cell, uuid);
if (!candidate) {
afs_put_addrlist(alist, afs_alist_trace_put_server_oom);
return ERR_PTR(-ENOMEM);
}
- server = afs_install_server(cell, candidate);
- if (server != candidate) {
- afs_put_addrlist(alist, afs_alist_trace_put_server_dup);
+ down_write(&cell->fs_lock);
+ server = afs_install_server(cell, &candidate);
+ if (test_bit(AFS_SERVER_FL_CREATING, &server->flags)) {
+ /* We need to wait for creation to complete. */
+ up_write(&cell->fs_lock);
+ goto wait_for_creation;
+ }
+ if (test_bit(AFS_SERVER_FL_UNCREATED, &server->flags)) {
+ set_bit(AFS_SERVER_FL_CREATING, &server->flags);
+ clear_bit(AFS_SERVER_FL_UNCREATED, &server->flags);
+ creating = true;
+ }
+ up_write(&cell->fs_lock);
+ timer_delete_sync(&server->timer);
+
+ /* If we get to create the server, we look up the addresses and then
+ * immediately dispatch an asynchronous probe to each interface on the
+ * fileserver. This will make sure the repeat-probing service is
+ * started.
+ */
+ if (creating) {
+ alist = afs_vl_lookup_addrs(server, key);
+ if (IS_ERR(alist)) {
+ ret = PTR_ERR(alist);
+ goto create_failed;
+ }
+
+ ret = afs_fs_probe_fileserver(cell->net, server, alist, key);
+ if (ret)
+ goto create_failed;
+
+ clear_and_wake_up_bit(AFS_SERVER_FL_CREATING, &server->flags);
+ }
+
+out:
+ afs_put_addrlist(alist, afs_alist_trace_put_server_create);
+ if (candidate) {
+ kfree(rcu_access_pointer(server->endpoint_state));
kfree(candidate);
- } else {
- /* Immediately dispatch an asynchronous probe to each interface
- * on the fileserver. This will make sure the repeat-probing
- * service is started.
- */
- afs_fs_probe_fileserver(cell->net, server, alist, key);
+ afs_dec_servers_outstanding(cell->net);
+ }
+ return server ?: ERR_PTR(ret);
+
+wait_for_creation:
+ afs_see_server(server, afs_server_trace_wait_create);
+ wait_on_bit(&server->flags, AFS_SERVER_FL_CREATING, TASK_UNINTERRUPTIBLE);
+ if (test_bit_acquire(AFS_SERVER_FL_UNCREATED, &server->flags)) {
+ /* Barrier: read flag before error */
+ ret = READ_ONCE(server->create_error);
+ afs_put_server(cell->net, server, afs_server_trace_unuse_create_fail);
+ server = NULL;
+ goto out;
}
- return server;
-}
+ ret = 0;
+ goto out;
-/*
- * Set the server timer to fire after a given delay, assuming it's not already
- * set for an earlier time.
- */
-static void afs_set_server_timer(struct afs_net *net, time64_t delay)
-{
- if (net->live) {
- afs_inc_servers_outstanding(net);
- if (timer_reduce(&net->fs_timer, jiffies + delay * HZ))
- afs_dec_servers_outstanding(net);
+create_failed:
+ down_write(&cell->fs_lock);
+
+ WRITE_ONCE(server->create_error, ret);
+ smp_wmb(); /* Barrier: set error before flag. */
+ set_bit(AFS_SERVER_FL_UNCREATED, &server->flags);
+
+ clear_and_wake_up_bit(AFS_SERVER_FL_CREATING, &server->flags);
+
+ if (test_bit(AFS_SERVER_FL_UNCREATED, &server->flags)) {
+ clear_bit(AFS_SERVER_FL_UNCREATED, &server->flags);
+ creating = true;
}
+ afs_unuse_server(cell->net, server, afs_server_trace_unuse_create_fail);
+ server = NULL;
+
+ up_write(&cell->fs_lock);
+ goto out;
}
/*
- * Server management timer. We have an increment on fs_outstanding that we
- * need to pass along to the work item.
+ * Set/reduce a server's timer.
*/
-void afs_servers_timer(struct timer_list *timer)
+static void afs_set_server_timer(struct afs_server *server, unsigned int delay_secs)
{
- struct afs_net *net = container_of(timer, struct afs_net, fs_timer);
-
- _enter("");
- if (!queue_work(afs_wq, &net->fs_manager))
- afs_dec_servers_outstanding(net);
+ mod_timer(&server->timer, jiffies + delay_secs * HZ);
}
/*
@@ -357,32 +305,20 @@ struct afs_server *afs_get_server(struct afs_server *server,
}
/*
- * Try to get a reference on a server object.
+ * Get an active count on a server object and maybe remove from the inactive
+ * list.
*/
-static struct afs_server *afs_maybe_use_server(struct afs_server *server,
- enum afs_server_trace reason)
-{
- unsigned int a;
- int r;
-
- if (!__refcount_inc_not_zero(&server->ref, &r))
- return NULL;
-
- a = atomic_inc_return(&server->active);
- trace_afs_server(server->debug_id, r + 1, a, reason);
- return server;
-}
-
-/*
- * Get an active count on a server object.
- */
-struct afs_server *afs_use_server(struct afs_server *server, enum afs_server_trace reason)
+struct afs_server *afs_use_server(struct afs_server *server, bool activate,
+ enum afs_server_trace reason)
{
unsigned int a;
int r;
__refcount_inc(&server->ref, &r);
a = atomic_inc_return(&server->active);
+ if (a == 1 && activate &&
+ !test_bit(AFS_SERVER_FL_EXPIRED, &server->flags))
+ del_timer(&server->timer);
trace_afs_server(server->debug_id, r + 1, a, reason);
return server;
@@ -415,13 +351,16 @@ void afs_put_server(struct afs_net *net, struct afs_server *server,
void afs_unuse_server_notime(struct afs_net *net, struct afs_server *server,
enum afs_server_trace reason)
{
- if (server) {
- unsigned int active = atomic_dec_return(&server->active);
+ if (!server)
+ return;
- if (active == 0)
- afs_set_server_timer(net, afs_server_gc_delay);
- afs_put_server(net, server, reason);
+ if (atomic_dec_and_test(&server->active)) {
+ if (test_bit(AFS_SERVER_FL_EXPIRED, &server->flags) ||
+ READ_ONCE(server->cell->state) >= AFS_CELL_REMOVING)
+ schedule_work(&server->destroyer);
}
+
+ afs_put_server(net, server, reason);
}
/*
@@ -430,10 +369,22 @@ void afs_unuse_server_notime(struct afs_net *net, struct afs_server *server,
void afs_unuse_server(struct afs_net *net, struct afs_server *server,
enum afs_server_trace reason)
{
- if (server) {
- server->unuse_time = ktime_get_real_seconds();
- afs_unuse_server_notime(net, server, reason);
+ if (!server)
+ return;
+
+ if (atomic_dec_and_test(&server->active)) {
+ if (!test_bit(AFS_SERVER_FL_EXPIRED, &server->flags) &&
+ READ_ONCE(server->cell->state) < AFS_CELL_REMOVING) {
+ time64_t unuse_time = ktime_get_real_seconds();
+
+ server->unuse_time = unuse_time;
+ afs_set_server_timer(server, afs_server_gc_delay);
+ } else {
+ schedule_work(&server->destroyer);
+ }
}
+
+ afs_put_server(net, server, reason);
}
static void afs_server_rcu(struct rcu_head *rcu)
@@ -463,159 +414,119 @@ static void afs_give_up_callbacks(struct afs_net *net, struct afs_server *server
}
/*
- * destroy a dead server
+ * Check to see if the server record has expired.
*/
-static void afs_destroy_server(struct afs_net *net, struct afs_server *server)
+static bool afs_has_server_expired(const struct afs_server *server)
{
- if (test_bit(AFS_SERVER_FL_MAY_HAVE_CB, &server->flags))
- afs_give_up_callbacks(net, server);
+ time64_t expires_at;
- afs_put_server(net, server, afs_server_trace_destroy);
+ if (atomic_read(&server->active))
+ return false;
+
+ if (server->cell->net->live ||
+ server->cell->state >= AFS_CELL_REMOVING) {
+ trace_afs_server(server->debug_id, refcount_read(&server->ref),
+ 0, afs_server_trace_purging);
+ return true;
+ }
+
+ expires_at = server->unuse_time;
+ if (!test_bit(AFS_SERVER_FL_VL_FAIL, &server->flags) &&
+ !test_bit(AFS_SERVER_FL_NOT_FOUND, &server->flags))
+ expires_at += afs_server_gc_delay;
+
+ return ktime_get_real_seconds() > expires_at;
}
/*
- * Garbage collect any expired servers.
+ * Remove a server record from it's parent cell's database.
*/
-static void afs_gc_servers(struct afs_net *net, struct afs_server *gc_list)
+static bool afs_remove_server_from_cell(struct afs_server *server)
{
- struct afs_server *server, *next, *prev;
- int active;
-
- while ((server = gc_list)) {
- gc_list = server->gc_next;
-
- write_seqlock(&net->fs_lock);
-
- active = atomic_read(&server->active);
- if (active == 0) {
- trace_afs_server(server->debug_id, refcount_read(&server->ref),
- active, afs_server_trace_gc);
- next = rcu_dereference_protected(
- server->uuid_next, lockdep_is_held(&net->fs_lock.lock));
- prev = server->uuid_prev;
- if (!prev) {
- /* The one at the front is in the tree */
- if (!next) {
- rb_erase(&server->uuid_rb, &net->fs_servers);
- } else {
- rb_replace_node_rcu(&server->uuid_rb,
- &next->uuid_rb,
- &net->fs_servers);
- next->uuid_prev = NULL;
- }
- } else {
- /* This server is not at the front */
- rcu_assign_pointer(prev->uuid_next, next);
- if (next)
- next->uuid_prev = prev;
- }
-
- list_del(&server->probe_link);
- hlist_del_rcu(&server->proc_link);
- if (!hlist_unhashed(&server->addr_link))
- hlist_del_rcu(&server->addr_link);
- }
- write_sequnlock(&net->fs_lock);
+ struct afs_cell *cell = server->cell;
+
+ down_write(&cell->fs_lock);
- if (active == 0)
- afs_destroy_server(net, server);
+ if (!afs_has_server_expired(server)) {
+ up_write(&cell->fs_lock);
+ return false;
}
+
+ set_bit(AFS_SERVER_FL_EXPIRED, &server->flags);
+ _debug("expire %pU %u", &server->uuid, atomic_read(&server->active));
+ afs_see_server(server, afs_server_trace_see_expired);
+ rb_erase(&server->uuid_rb, &cell->fs_servers);
+ up_write(&cell->fs_lock);
+ return true;
}
-/*
- * Manage the records of servers known to be within a network namespace. This
- * includes garbage collecting unused servers.
- *
- * Note also that we were given an increment on net->servers_outstanding by
- * whoever queued us that we need to deal with before returning.
- */
-void afs_manage_servers(struct work_struct *work)
+static void afs_server_destroyer(struct work_struct *work)
{
- struct afs_net *net = container_of(work, struct afs_net, fs_manager);
- struct afs_server *gc_list = NULL;
- struct rb_node *cursor;
- time64_t now = ktime_get_real_seconds(), next_manage = TIME64_MAX;
- bool purging = !net->live;
-
- _enter("");
+ struct afs_endpoint_state *estate;
+ struct afs_server *server = container_of(work, struct afs_server, destroyer);
+ struct afs_net *net = server->cell->net;
- /* Trawl the server list looking for servers that have expired from
- * lack of use.
- */
- read_seqlock_excl(&net->fs_lock);
+ afs_see_server(server, afs_server_trace_see_destroyer);
- for (cursor = rb_first(&net->fs_servers); cursor; cursor = rb_next(cursor)) {
- struct afs_server *server =
- rb_entry(cursor, struct afs_server, uuid_rb);
- int active = atomic_read(&server->active);
+ if (test_bit(AFS_SERVER_FL_EXPIRED, &server->flags))
+ return;
- _debug("manage %pU %u", &server->uuid, active);
+ if (!afs_remove_server_from_cell(server))
+ return;
- if (purging) {
- trace_afs_server(server->debug_id, refcount_read(&server->ref),
- active, afs_server_trace_purging);
- if (active != 0)
- pr_notice("Can't purge s=%08x\n", server->debug_id);
- }
+ timer_shutdown_sync(&server->timer);
+ cancel_work(&server->destroyer);
- if (active == 0) {
- time64_t expire_at = server->unuse_time;
-
- if (!test_bit(AFS_SERVER_FL_VL_FAIL, &server->flags) &&
- !test_bit(AFS_SERVER_FL_NOT_FOUND, &server->flags))
- expire_at += afs_server_gc_delay;
- if (purging || expire_at <= now) {
- server->gc_next = gc_list;
- gc_list = server;
- } else if (expire_at < next_manage) {
- next_manage = expire_at;
- }
- }
- }
+ if (test_bit(AFS_SERVER_FL_MAY_HAVE_CB, &server->flags))
+ afs_give_up_callbacks(net, server);
- read_sequnlock_excl(&net->fs_lock);
+ /* Unbind the rxrpc_peer records from the server. */
+ estate = rcu_access_pointer(server->endpoint_state);
+ if (estate)
+ afs_set_peer_appdata(server, estate->addresses, NULL);
- /* Update the timer on the way out. We have to pass an increment on
- * servers_outstanding in the namespace that we are in to the timer or
- * the work scheduler.
- */
- if (!purging && next_manage < TIME64_MAX) {
- now = ktime_get_real_seconds();
+ write_seqlock(&net->fs_lock);
+ list_del_init(&server->probe_link);
+ if (!hlist_unhashed(&server->proc_link))
+ hlist_del_rcu(&server->proc_link);
+ write_sequnlock(&net->fs_lock);
- if (next_manage - now <= 0) {
- if (queue_work(afs_wq, &net->fs_manager))
- afs_inc_servers_outstanding(net);
- } else {
- afs_set_server_timer(net, next_manage - now);
- }
- }
+ afs_put_server(net, server, afs_server_trace_destroy);
+}
- afs_gc_servers(net, gc_list);
+static void afs_server_timer(struct timer_list *timer)
+{
+ struct afs_server *server = container_of(timer, struct afs_server, timer);
- afs_dec_servers_outstanding(net);
- _leave(" [%d]", atomic_read(&net->servers_outstanding));
+ afs_see_server(server, afs_server_trace_see_timer);
+ if (!test_bit(AFS_SERVER_FL_EXPIRED, &server->flags))
+ schedule_work(&server->destroyer);
}
-static void afs_queue_server_manager(struct afs_net *net)
+/*
+ * Wake up all the servers in a cell so that they can purge themselves.
+ */
+void afs_purge_servers(struct afs_cell *cell)
{
- afs_inc_servers_outstanding(net);
- if (!queue_work(afs_wq, &net->fs_manager))
- afs_dec_servers_outstanding(net);
+ struct afs_server *server;
+ struct rb_node *rb;
+
+ down_read(&cell->fs_lock);
+ for (rb = rb_first(&cell->fs_servers); rb; rb = rb_next(rb)) {
+ server = rb_entry(rb, struct afs_server, uuid_rb);
+ afs_see_server(server, afs_server_trace_see_purge);
+ schedule_work(&server->destroyer);
+ }
+ up_read(&cell->fs_lock);
}
/*
- * Purge list of servers.
+ * Wait for outstanding servers.
*/
-void afs_purge_servers(struct afs_net *net)
+void afs_wait_for_servers(struct afs_net *net)
{
_enter("");
- if (del_timer_sync(&net->fs_timer))
- afs_dec_servers_outstanding(net);
-
- afs_queue_server_manager(net);
-
- _debug("wait");
atomic_dec(&net->servers_outstanding);
wait_var_event(&net->servers_outstanding,
!atomic_read(&net->servers_outstanding));
@@ -639,7 +550,7 @@ static noinline bool afs_update_server_record(struct afs_operation *op,
atomic_read(&server->active),
afs_server_trace_update);
- alist = afs_vl_lookup_addrs(op->volume->cell, op->key, &server->uuid);
+ alist = afs_vl_lookup_addrs(server, op->key);
if (IS_ERR(alist)) {
rcu_read_lock();
estate = rcu_dereference(server->endpoint_state);
diff --git a/fs/afs/server_list.c b/fs/afs/server_list.c
index d20cd902ef94..20d5474837df 100644
--- a/fs/afs/server_list.c
+++ b/fs/afs/server_list.c
@@ -16,7 +16,7 @@ void afs_put_serverlist(struct afs_net *net, struct afs_server_list *slist)
if (slist && refcount_dec_and_test(&slist->usage)) {
for (i = 0; i < slist->nr_servers; i++)
afs_unuse_server(net, slist->servers[i].server,
- afs_server_trace_put_slist);
+ afs_server_trace_unuse_slist);
kfree_rcu(slist, rcu);
}
}
@@ -97,8 +97,8 @@ struct afs_server_list *afs_alloc_server_list(struct afs_volume *volume,
break;
if (j < slist->nr_servers) {
if (slist->servers[j].server == server) {
- afs_unuse_server(volume->cell->net, server,
- afs_server_trace_put_slist_isort);
+ afs_unuse_server_notime(volume->cell->net, server,
+ afs_server_trace_unuse_slist_isort);
continue;
}
diff --git a/fs/afs/super.c b/fs/afs/super.c
index a9bee610674e..25b306db6992 100644
--- a/fs/afs/super.c
+++ b/fs/afs/super.c
@@ -194,8 +194,6 @@ static int afs_show_options(struct seq_file *m, struct dentry *root)
if (as->dyn_root)
seq_puts(m, ",dyn");
- if (test_bit(AFS_VNODE_AUTOCELL, &AFS_FS_I(d_inode(root))->flags))
- seq_puts(m, ",autocell");
switch (as->flock_mode) {
case afs_flock_mode_unset: break;
case afs_flock_mode_local: p = "local"; break;
@@ -292,13 +290,14 @@ static int afs_parse_source(struct fs_context *fc, struct fs_parameter *param)
/* lookup the cell record */
if (cellname) {
cell = afs_lookup_cell(ctx->net, cellname, cellnamesz,
- NULL, false);
+ NULL, false,
+ afs_cell_trace_use_lookup_mount);
if (IS_ERR(cell)) {
pr_err("kAFS: unable to lookup cell '%*.*s'\n",
cellnamesz, cellnamesz, cellname ?: "");
return PTR_ERR(cell);
}
- afs_unuse_cell(ctx->net, ctx->cell, afs_cell_trace_unuse_parse);
+ afs_unuse_cell(ctx->cell, afs_cell_trace_unuse_parse);
afs_see_cell(cell, afs_cell_trace_see_source);
ctx->cell = cell;
}
@@ -395,7 +394,7 @@ static int afs_validate_fc(struct fs_context *fc)
ctx->key = NULL;
cell = afs_use_cell(ctx->cell->alias_of,
afs_cell_trace_use_fc_alias);
- afs_unuse_cell(ctx->net, ctx->cell, afs_cell_trace_unuse_fc);
+ afs_unuse_cell(ctx->cell, afs_cell_trace_unuse_fc);
ctx->cell = cell;
goto reget_key;
}
@@ -468,7 +467,7 @@ static int afs_fill_super(struct super_block *sb, struct afs_fs_context *ctx)
/* allocate the root inode and dentry */
if (as->dyn_root) {
- inode = afs_iget_pseudo_dir(sb, true);
+ inode = afs_dynroot_iget_root(sb);
} else {
sprintf(sb->s_id, "%llu", as->volume->vid);
afs_activate_volume(as->volume);
@@ -478,9 +477,6 @@ static int afs_fill_super(struct super_block *sb, struct afs_fs_context *ctx)
if (IS_ERR(inode))
return PTR_ERR(inode);
- if (ctx->autocell || as->dyn_root)
- set_bit(AFS_VNODE_AUTOCELL, &AFS_FS_I(inode)->flags);
-
ret = -ENOMEM;
sb->s_root = d_make_root(inode);
if (!sb->s_root)
@@ -488,9 +484,6 @@ static int afs_fill_super(struct super_block *sb, struct afs_fs_context *ctx)
if (as->dyn_root) {
sb->s_d_op = &afs_dynroot_dentry_operations;
- ret = afs_dynroot_populate(sb);
- if (ret < 0)
- goto error;
} else {
sb->s_d_op = &afs_fs_dentry_operations;
rcu_assign_pointer(as->volume->sb, sb);
@@ -527,9 +520,8 @@ static struct afs_super_info *afs_alloc_sbi(struct fs_context *fc)
static void afs_destroy_sbi(struct afs_super_info *as)
{
if (as) {
- struct afs_net *net = afs_net(as->net_ns);
afs_put_volume(as->volume, afs_volume_trace_put_destroy_sbi);
- afs_unuse_cell(net, as->cell, afs_cell_trace_unuse_sbi);
+ afs_unuse_cell(as->cell, afs_cell_trace_unuse_sbi);
put_net(as->net_ns);
kfree(as);
}
@@ -539,9 +531,6 @@ static void afs_kill_super(struct super_block *sb)
{
struct afs_super_info *as = AFS_FS_S(sb);
- if (as->dyn_root)
- afs_dynroot_depopulate(sb);
-
/* Clear the callback interests (which will do ilookup5) before
* deactivating the superblock.
*/
@@ -615,7 +604,7 @@ static void afs_free_fc(struct fs_context *fc)
afs_destroy_sbi(fc->s_fs_info);
afs_put_volume(ctx->volume, afs_volume_trace_put_free_fc);
- afs_unuse_cell(ctx->net, ctx->cell, afs_cell_trace_unuse_fc);
+ afs_unuse_cell(ctx->cell, afs_cell_trace_unuse_fc);
key_put(ctx->key);
kfree(ctx);
}
diff --git a/fs/afs/vl_alias.c b/fs/afs/vl_alias.c
index f9e76b604f31..709b4cdb723e 100644
--- a/fs/afs/vl_alias.c
+++ b/fs/afs/vl_alias.c
@@ -205,11 +205,11 @@ static int afs_query_for_alias(struct afs_cell *cell, struct key *key)
goto is_alias;
if (mutex_lock_interruptible(&cell->net->proc_cells_lock) < 0) {
- afs_unuse_cell(cell->net, p, afs_cell_trace_unuse_check_alias);
+ afs_unuse_cell(p, afs_cell_trace_unuse_check_alias);
return -ERESTARTSYS;
}
- afs_unuse_cell(cell->net, p, afs_cell_trace_unuse_check_alias);
+ afs_unuse_cell(p, afs_cell_trace_unuse_check_alias);
}
mutex_unlock(&cell->net->proc_cells_lock);
@@ -269,7 +269,8 @@ static int yfs_check_canonical_cell_name(struct afs_cell *cell, struct key *key)
if (!name_len || name_len > AFS_MAXCELLNAME)
master = ERR_PTR(-EOPNOTSUPP);
else
- master = afs_lookup_cell(cell->net, cell_name, name_len, NULL, false);
+ master = afs_lookup_cell(cell->net, cell_name, name_len, NULL, false,
+ afs_cell_trace_use_lookup_canonical);
kfree(cell_name);
if (IS_ERR(master))
return PTR_ERR(master);
diff --git a/fs/afs/vl_rotate.c b/fs/afs/vl_rotate.c
index d8f79f6ada3d..6ad9688d8f4b 100644
--- a/fs/afs/vl_rotate.c
+++ b/fs/afs/vl_rotate.c
@@ -48,7 +48,7 @@ static bool afs_start_vl_iteration(struct afs_vl_cursor *vc)
cell->dns_expiry <= ktime_get_real_seconds()) {
dns_lookup_count = smp_load_acquire(&cell->dns_lookup_count);
set_bit(AFS_CELL_FL_DO_LOOKUP, &cell->flags);
- afs_queue_cell(cell, afs_cell_trace_get_queue_dns);
+ afs_queue_cell(cell, afs_cell_trace_queue_dns);
if (cell->dns_source == DNS_RECORD_UNAVAILABLE) {
if (wait_var_event_interruptible(
diff --git a/fs/afs/volume.c b/fs/afs/volume.c
index af3a3f57c1b3..0efff3d25133 100644
--- a/fs/afs/volume.c
+++ b/fs/afs/volume.c
@@ -10,6 +10,7 @@
#include "internal.h"
static unsigned __read_mostly afs_volume_record_life = 60 * 60;
+static atomic_t afs_volume_debug_id;
static void afs_destroy_volume(struct work_struct *work);
@@ -59,7 +60,7 @@ static void afs_remove_volume_from_cell(struct afs_volume *volume)
struct afs_cell *cell = volume->cell;
if (!hlist_unhashed(&volume->proc_link)) {
- trace_afs_volume(volume->vid, refcount_read(&cell->ref),
+ trace_afs_volume(volume->debug_id, volume->vid, refcount_read(&volume->ref),
afs_volume_trace_remove);
write_seqlock(&cell->volume_lock);
hlist_del_rcu(&volume->proc_link);
@@ -84,6 +85,7 @@ static struct afs_volume *afs_alloc_volume(struct afs_fs_context *params,
if (!volume)
goto error_0;
+ volume->debug_id = atomic_inc_return(&afs_volume_debug_id);
volume->vid = vldb->vid[params->type];
volume->update_at = ktime_get_real_seconds() + afs_volume_record_life;
volume->cell = afs_get_cell(params->cell, afs_cell_trace_get_vol);
@@ -115,7 +117,7 @@ static struct afs_volume *afs_alloc_volume(struct afs_fs_context *params,
*_slist = slist;
rcu_assign_pointer(volume->servers, slist);
- trace_afs_volume(volume->vid, 1, afs_volume_trace_alloc);
+ trace_afs_volume(volume->debug_id, volume->vid, 1, afs_volume_trace_alloc);
return volume;
error_1:
@@ -247,7 +249,7 @@ static void afs_destroy_volume(struct work_struct *work)
afs_remove_volume_from_cell(volume);
afs_put_serverlist(volume->cell->net, slist);
afs_put_cell(volume->cell, afs_cell_trace_put_vol);
- trace_afs_volume(volume->vid, refcount_read(&volume->ref),
+ trace_afs_volume(volume->debug_id, volume->vid, refcount_read(&volume->ref),
afs_volume_trace_free);
kfree_rcu(volume, rcu);
@@ -262,7 +264,7 @@ bool afs_try_get_volume(struct afs_volume *volume, enum afs_volume_trace reason)
int r;
if (__refcount_inc_not_zero(&volume->ref, &r)) {
- trace_afs_volume(volume->vid, r + 1, reason);
+ trace_afs_volume(volume->debug_id, volume->vid, r + 1, reason);
return true;
}
return false;
@@ -278,7 +280,7 @@ struct afs_volume *afs_get_volume(struct afs_volume *volume,
int r;
__refcount_inc(&volume->ref, &r);
- trace_afs_volume(volume->vid, r + 1, reason);
+ trace_afs_volume(volume->debug_id, volume->vid, r + 1, reason);
}
return volume;
}
@@ -290,12 +292,13 @@ struct afs_volume *afs_get_volume(struct afs_volume *volume,
void afs_put_volume(struct afs_volume *volume, enum afs_volume_trace reason)
{
if (volume) {
+ unsigned int debug_id = volume->debug_id;
afs_volid_t vid = volume->vid;
bool zero;
int r;
zero = __refcount_dec_and_test(&volume->ref, &r);
- trace_afs_volume(vid, r - 1, reason);
+ trace_afs_volume(debug_id, vid, r - 1, reason);
if (zero)
schedule_work(&volume->destructor);
}
diff --git a/fs/autofs/autofs_i.h b/fs/autofs/autofs_i.h
index 77c7991d89aa..23cea74f9933 100644
--- a/fs/autofs/autofs_i.h
+++ b/fs/autofs/autofs_i.h
@@ -218,6 +218,8 @@ void autofs_clean_ino(struct autofs_info *);
static inline int autofs_check_pipe(struct file *pipe)
{
+ if (pipe->f_mode & FMODE_PATH)
+ return -EINVAL;
if (!(pipe->f_mode & FMODE_CAN_WRITE))
return -EINVAL;
if (!S_ISFIFO(file_inode(pipe)->i_mode))
diff --git a/fs/autofs/dev-ioctl.c b/fs/autofs/dev-ioctl.c
index 6d57efbb8110..c5a6aae12d2c 100644
--- a/fs/autofs/dev-ioctl.c
+++ b/fs/autofs/dev-ioctl.c
@@ -442,7 +442,6 @@ static int autofs_dev_ioctl_timeout(struct file *fp,
sbi->exp_timeout = timeout * HZ;
} else {
struct dentry *base = fp->f_path.dentry;
- struct inode *inode = base->d_inode;
int path_len = param->size - AUTOFS_DEV_IOCTL_SIZE - 1;
struct dentry *dentry;
struct autofs_info *ino;
@@ -460,9 +459,7 @@ static int autofs_dev_ioctl_timeout(struct file *fp,
"the parent autofs mount timeout which could "
"prevent shutdown\n");
- inode_lock_shared(inode);
dentry = try_lookup_one_len(param->path, base, path_len);
- inode_unlock_shared(inode);
if (IS_ERR_OR_NULL(dentry))
return dentry ? PTR_ERR(dentry) : -ENOENT;
ino = autofs_dentry_ino(dentry);
diff --git a/fs/autofs/root.c b/fs/autofs/root.c
index 530d18827e35..174c7205fee4 100644
--- a/fs/autofs/root.c
+++ b/fs/autofs/root.c
@@ -15,8 +15,8 @@ static int autofs_dir_symlink(struct mnt_idmap *, struct inode *,
struct dentry *, const char *);
static int autofs_dir_unlink(struct inode *, struct dentry *);
static int autofs_dir_rmdir(struct inode *, struct dentry *);
-static int autofs_dir_mkdir(struct mnt_idmap *, struct inode *,
- struct dentry *, umode_t);
+static struct dentry *autofs_dir_mkdir(struct mnt_idmap *, struct inode *,
+ struct dentry *, umode_t);
static long autofs_root_ioctl(struct file *, unsigned int, unsigned long);
#ifdef CONFIG_COMPAT
static long autofs_root_compat_ioctl(struct file *,
@@ -720,9 +720,9 @@ static int autofs_dir_rmdir(struct inode *dir, struct dentry *dentry)
return 0;
}
-static int autofs_dir_mkdir(struct mnt_idmap *idmap,
- struct inode *dir, struct dentry *dentry,
- umode_t mode)
+static struct dentry *autofs_dir_mkdir(struct mnt_idmap *idmap,
+ struct inode *dir, struct dentry *dentry,
+ umode_t mode)
{
struct autofs_sb_info *sbi = autofs_sbi(dir->i_sb);
struct autofs_info *ino = autofs_dentry_ino(dentry);
@@ -739,7 +739,7 @@ static int autofs_dir_mkdir(struct mnt_idmap *idmap,
inode = autofs_get_inode(dir->i_sb, S_IFDIR | mode);
if (!inode)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
d_add(dentry, inode);
if (sbi->version < 5)
@@ -751,7 +751,7 @@ static int autofs_dir_mkdir(struct mnt_idmap *idmap,
inc_nlink(dir);
inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
- return 0;
+ return NULL;
}
/* Get/set timeout ioctl() operation */
diff --git a/fs/bad_inode.c b/fs/bad_inode.c
index 316d88da2ce1..0ef9bcb744dd 100644
--- a/fs/bad_inode.c
+++ b/fs/bad_inode.c
@@ -58,10 +58,10 @@ static int bad_inode_symlink(struct mnt_idmap *idmap,
return -EIO;
}
-static int bad_inode_mkdir(struct mnt_idmap *idmap, struct inode *dir,
- struct dentry *dentry, umode_t mode)
+static struct dentry *bad_inode_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
- return -EIO;
+ return ERR_PTR(-EIO);
}
static int bad_inode_rmdir (struct inode *dir, struct dentry *dentry)
diff --git a/fs/bcachefs/fs-ioctl.c b/fs/bcachefs/fs-ioctl.c
index 15725b4ce393..595b57fabc9a 100644
--- a/fs/bcachefs/fs-ioctl.c
+++ b/fs/bcachefs/fs-ioctl.c
@@ -511,10 +511,6 @@ static long bch2_ioctl_subvolume_destroy(struct bch_fs *c, struct file *filp,
ret = -EXDEV;
goto err;
}
- if (!d_is_positive(victim)) {
- ret = -ENOENT;
- goto err;
- }
ret = __bch2_unlink(dir, victim, true);
if (!ret) {
fsnotify_rmdir(dir, victim);
diff --git a/fs/bcachefs/fs.c b/fs/bcachefs/fs.c
index 90ade8f648d9..b2669d7ffec5 100644
--- a/fs/bcachefs/fs.c
+++ b/fs/bcachefs/fs.c
@@ -858,10 +858,10 @@ err:
return bch2_err_class(ret);
}
-static int bch2_mkdir(struct mnt_idmap *idmap,
- struct inode *vdir, struct dentry *dentry, umode_t mode)
+static struct dentry *bch2_mkdir(struct mnt_idmap *idmap,
+ struct inode *vdir, struct dentry *dentry, umode_t mode)
{
- return bch2_mknod(idmap, vdir, dentry, mode|S_IFDIR, 0);
+ return ERR_PTR(bch2_mknod(idmap, vdir, dentry, mode|S_IFDIR, 0));
}
static int bch2_rename2(struct mnt_idmap *idmap,
@@ -2396,7 +2396,7 @@ static struct file_system_type bcache_fs_type = {
.name = "bcachefs",
.init_fs_context = bch2_init_fs_context,
.kill_sb = bch2_kill_sb,
- .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
+ .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP | FS_LBS,
};
MODULE_ALIAS_FS("bcachefs");
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 8054f44d39cf..584fa89bc877 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -762,8 +762,7 @@ static int parse_elf_property(const char *data, size_t *off, size_t datasz,
}
#define NOTE_DATA_SZ SZ_1K
-#define GNU_PROPERTY_TYPE_0_NAME "GNU"
-#define NOTE_NAME_SZ (sizeof(GNU_PROPERTY_TYPE_0_NAME))
+#define NOTE_NAME_SZ (sizeof(NN_GNU_PROPERTY_TYPE_0))
static int parse_elf_properties(struct file *f, const struct elf_phdr *phdr,
struct arch_elf_state *arch)
@@ -800,7 +799,7 @@ static int parse_elf_properties(struct file *f, const struct elf_phdr *phdr,
if (note.nhdr.n_type != NT_GNU_PROPERTY_TYPE_0 ||
note.nhdr.n_namesz != NOTE_NAME_SZ ||
strncmp(note.data + sizeof(note.nhdr),
- GNU_PROPERTY_TYPE_0_NAME, n - sizeof(note.nhdr)))
+ NN_GNU_PROPERTY_TYPE_0, n - sizeof(note.nhdr)))
return -ENOEXEC;
off = round_up(sizeof(note.nhdr) + NOTE_NAME_SZ,
@@ -1603,14 +1602,14 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
do
i += 2;
while (auxv[i - 2] != AT_NULL);
- fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
+ fill_note(note, NN_AUXV, NT_AUXV, i * sizeof(elf_addr_t), auxv);
}
static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
const kernel_siginfo_t *siginfo)
{
copy_siginfo_to_external(csigdata, siginfo);
- fill_note(note, "CORE", NT_SIGINFO, sizeof(*csigdata), csigdata);
+ fill_note(note, NN_SIGINFO, NT_SIGINFO, sizeof(*csigdata), csigdata);
}
/*
@@ -1706,7 +1705,7 @@ static int fill_files_note(struct memelfnote *note, struct coredump_params *cprm
}
size = name_curpos - (char *)data;
- fill_note(note, "CORE", NT_FILE, size, data);
+ fill_note(note, NN_FILE, NT_FILE, size, data);
return 0;
}
@@ -1767,7 +1766,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t,
regset_get(t->task, &view->regsets[0],
sizeof(t->prstatus.pr_reg), &t->prstatus.pr_reg);
- fill_note(&t->notes[0], "CORE", NT_PRSTATUS,
+ fill_note(&t->notes[0], NN_PRSTATUS, NT_PRSTATUS,
PRSTATUS_SIZE, &t->prstatus);
info->size += notesize(&t->notes[0]);
@@ -1801,7 +1800,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t,
if (is_fpreg)
SET_PR_FPVALID(&t->prstatus);
- fill_note(&t->notes[note_iter], is_fpreg ? "CORE" : "LINUX",
+ fill_note(&t->notes[note_iter], is_fpreg ? NN_PRFPREG : "LINUX",
note_type, ret, data);
info->size += notesize(&t->notes[note_iter]);
@@ -1821,7 +1820,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t,
fill_prstatus(&t->prstatus.common, p, signr);
elf_core_copy_task_regs(p, &t->prstatus.pr_reg);
- fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus),
+ fill_note(&t->notes[0], NN_PRSTATUS, NT_PRSTATUS, sizeof(t->prstatus),
&(t->prstatus));
info->size += notesize(&t->notes[0]);
@@ -1832,7 +1831,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t,
}
t->prstatus.pr_fpvalid = 1;
- fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(*fpu), fpu);
+ fill_note(&t->notes[1], NN_PRFPREG, NT_PRFPREG, sizeof(*fpu), fpu);
info->size += notesize(&t->notes[1]);
return 1;
@@ -1852,7 +1851,7 @@ static int fill_note_info(struct elfhdr *elf, int phdrs,
psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL);
if (!psinfo)
return 0;
- fill_note(&info->psinfo, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
+ fill_note(&info->psinfo, NN_PRPSINFO, NT_PRPSINFO, sizeof(*psinfo), psinfo);
#ifdef CORE_DUMP_USE_REGSET
view = task_user_regset_view(dump_task);
diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
index c13ee8180b17..9133f3827f90 100644
--- a/fs/binfmt_elf_fdpic.c
+++ b/fs/binfmt_elf_fdpic.c
@@ -1024,7 +1024,7 @@ static int elf_fdpic_map_file_by_direct_mmap(struct elf_fdpic_params *params,
/* deal with each load segment separately */
phdr = params->phdrs;
for (loop = 0; loop < params->hdr.e_phnum; loop++, phdr++) {
- unsigned long maddr, disp, excess, excess1;
+ unsigned long maddr, disp, excess;
int prot = 0, flags;
if (phdr->p_type != PT_LOAD)
@@ -1120,9 +1120,10 @@ static int elf_fdpic_map_file_by_direct_mmap(struct elf_fdpic_params *params,
* extant in the file
*/
excess = phdr->p_memsz - phdr->p_filesz;
- excess1 = PAGE_SIZE - ((maddr + phdr->p_filesz) & ~PAGE_MASK);
#ifdef CONFIG_MMU
+ unsigned long excess1
+ = PAGE_SIZE - ((maddr + phdr->p_filesz) & ~PAGE_MASK);
if (excess > excess1) {
unsigned long xaddr = maddr + phdr->p_filesz + excess1;
unsigned long xmaddr;
@@ -1397,7 +1398,7 @@ static struct elf_thread_status *elf_dump_thread_status(long signr, struct task_
regset_get(p, &view->regsets[0],
sizeof(t->prstatus.pr_reg), &t->prstatus.pr_reg);
- fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus),
+ fill_note(&t->notes[0], NN_PRSTATUS, NT_PRSTATUS, sizeof(t->prstatus),
&t->prstatus);
t->num_notes++;
*sz += notesize(&t->notes[0]);
@@ -1415,7 +1416,7 @@ static struct elf_thread_status *elf_dump_thread_status(long signr, struct task_
}
if (t->prstatus.pr_fpvalid) {
- fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(t->fpu),
+ fill_note(&t->notes[1], NN_PRFPREG, NT_PRFPREG, sizeof(t->fpu),
&t->fpu);
t->num_notes++;
*sz += notesize(&t->notes[1]);
@@ -1530,7 +1531,7 @@ static int elf_fdpic_core_dump(struct coredump_params *cprm)
*/
fill_psinfo(psinfo, current->group_leader, current->mm);
- fill_note(&psinfo_note, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
+ fill_note(&psinfo_note, NN_PRPSINFO, NT_PRPSINFO, sizeof(*psinfo), psinfo);
thread_status_size += notesize(&psinfo_note);
auxv = (elf_addr_t *) current->mm->saved_auxv;
@@ -1538,7 +1539,7 @@ static int elf_fdpic_core_dump(struct coredump_params *cprm)
do
i += 2;
while (auxv[i - 2] != AT_NULL);
- fill_note(&auxv_note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
+ fill_note(&auxv_note, NN_AUXV, NT_AUXV, i * sizeof(elf_addr_t), auxv);
thread_status_size += notesize(&auxv_note);
offset = sizeof(*elf); /* ELF header */
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 38756f8cef46..a9e56c994e9e 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -6744,18 +6744,18 @@ fail:
return err;
}
-static int btrfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
- struct dentry *dentry, umode_t mode)
+static struct dentry *btrfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
struct inode *inode;
inode = new_inode(dir->i_sb);
if (!inode)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
inode_init_owner(idmap, inode, dir, S_IFDIR | mode);
inode->i_op = &btrfs_dir_inode_operations;
inode->i_fop = &btrfs_dir_file_operations;
- return btrfs_create_common(dir, dentry, inode);
+ return ERR_PTR(btrfs_create_common(dir, dentry, inode));
}
static noinline int uncompress_inline(struct btrfs_path *path,
diff --git a/fs/buffer.c b/fs/buffer.c
index cc8452f60251..194eacbefc95 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -2361,9 +2361,8 @@ int block_read_full_folio(struct folio *folio, get_block_t *get_block)
{
struct inode *inode = folio->mapping->host;
sector_t iblock, lblock;
- struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
+ struct buffer_head *bh, *head, *prev = NULL;
size_t blocksize;
- int nr, i;
int fully_mapped = 1;
bool page_error = false;
loff_t limit = i_size_read(inode);
@@ -2372,16 +2371,12 @@ int block_read_full_folio(struct folio *folio, get_block_t *get_block)
if (IS_ENABLED(CONFIG_FS_VERITY) && IS_VERITY(inode))
limit = inode->i_sb->s_maxbytes;
- VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
-
head = folio_create_buffers(folio, inode, 0);
blocksize = head->b_size;
iblock = div_u64(folio_pos(folio), blocksize);
lblock = div_u64(limit + blocksize - 1, blocksize);
bh = head;
- nr = 0;
- i = 0;
do {
if (buffer_uptodate(bh))
@@ -2398,7 +2393,7 @@ int block_read_full_folio(struct folio *folio, get_block_t *get_block)
page_error = true;
}
if (!buffer_mapped(bh)) {
- folio_zero_range(folio, i * blocksize,
+ folio_zero_range(folio, bh_offset(bh),
blocksize);
if (!err)
set_buffer_uptodate(bh);
@@ -2411,40 +2406,33 @@ int block_read_full_folio(struct folio *folio, get_block_t *get_block)
if (buffer_uptodate(bh))
continue;
}
- arr[nr++] = bh;
- } while (i++, iblock++, (bh = bh->b_this_page) != head);
-
- if (fully_mapped)
- folio_set_mappedtodisk(folio);
-
- if (!nr) {
- /*
- * All buffers are uptodate or get_block() returned an
- * error when trying to map them - we can finish the read.
- */
- folio_end_read(folio, !page_error);
- return 0;
- }
- /* Stage two: lock the buffers */
- for (i = 0; i < nr; i++) {
- bh = arr[i];
lock_buffer(bh);
+ if (buffer_uptodate(bh)) {
+ unlock_buffer(bh);
+ continue;
+ }
+
mark_buffer_async_read(bh);
- }
+ if (prev)
+ submit_bh(REQ_OP_READ, prev);
+ prev = bh;
+ } while (iblock++, (bh = bh->b_this_page) != head);
+
+ if (fully_mapped)
+ folio_set_mappedtodisk(folio);
/*
- * Stage 3: start the IO. Check for uptodateness
- * inside the buffer lock in case another process reading
- * the underlying blockdev brought it uptodate (the sct fix).
+ * All buffers are uptodate or get_block() returned an error
+ * when trying to map them - we must finish the read because
+ * end_buffer_async_read() will never be called on any buffer
+ * in this folio.
*/
- for (i = 0; i < nr; i++) {
- bh = arr[i];
- if (buffer_uptodate(bh))
- end_buffer_async_read(bh, 1);
- else
- submit_bh(REQ_OP_READ, bh);
- }
+ if (prev)
+ submit_bh(REQ_OP_READ, prev);
+ else
+ folio_end_read(folio, !page_error);
+
return 0;
}
EXPORT_SYMBOL(block_read_full_folio);
diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
index 7cf59713f0f7..83a60126de0f 100644
--- a/fs/cachefiles/namei.c
+++ b/fs/cachefiles/namei.c
@@ -128,18 +128,19 @@ retry:
ret = security_path_mkdir(&path, subdir, 0700);
if (ret < 0)
goto mkdir_error;
- ret = cachefiles_inject_write_error();
- if (ret == 0)
- ret = vfs_mkdir(&nop_mnt_idmap, d_inode(dir), subdir, 0700);
- if (ret < 0) {
+ subdir = ERR_PTR(cachefiles_inject_write_error());
+ if (!IS_ERR(subdir))
+ subdir = vfs_mkdir(&nop_mnt_idmap, d_inode(dir), subdir, 0700);
+ ret = PTR_ERR(subdir);
+ if (IS_ERR(subdir)) {
trace_cachefiles_vfs_error(NULL, d_inode(dir), ret,
cachefiles_trace_mkdir_error);
goto mkdir_error;
}
trace_cachefiles_mkdir(dir, subdir);
- if (unlikely(d_unhashed(subdir))) {
- cachefiles_put_directory(subdir);
+ if (unlikely(d_unhashed(subdir) || d_is_negative(subdir))) {
+ dput(subdir);
goto retry;
}
ASSERT(d_backing_inode(subdir));
@@ -195,7 +196,8 @@ mark_error:
mkdir_error:
inode_unlock(d_inode(dir));
- dput(subdir);
+ if (!IS_ERR(subdir))
+ dput(subdir);
pr_err("mkdir %s failed with error %d\n", dirname, ret);
return ERR_PTR(ret);
diff --git a/fs/cachefiles/ondemand.c b/fs/cachefiles/ondemand.c
index fe3de9ad57bf..d9bc67176128 100644
--- a/fs/cachefiles/ondemand.c
+++ b/fs/cachefiles/ondemand.c
@@ -317,8 +317,9 @@ static int cachefiles_ondemand_get_fd(struct cachefiles_req *req,
goto err_free_id;
}
- anon_file->file = anon_inode_getfile("[cachefiles]",
- &cachefiles_ondemand_fd_fops, object, O_WRONLY);
+ anon_file->file = anon_inode_getfile_fmode("[cachefiles]",
+ &cachefiles_ondemand_fd_fops, object,
+ O_WRONLY, FMODE_PWRITE | FMODE_LSEEK);
if (IS_ERR(anon_file->file)) {
ret = PTR_ERR(anon_file->file);
goto err_put_fd;
@@ -333,8 +334,6 @@ static int cachefiles_ondemand_get_fd(struct cachefiles_req *req,
goto err_put_file;
}
- anon_file->file->f_mode |= FMODE_PWRITE | FMODE_LSEEK;
-
load = (void *)req->msg.data;
load->fd = anon_file->fd;
object->ondemand->ondemand_id = object_id;
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index f5224a566b69..29be367905a1 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -82,6 +82,7 @@ static bool ceph_dirty_folio(struct address_space *mapping, struct folio *folio)
{
struct inode *inode = mapping->host;
struct ceph_client *cl = ceph_inode_to_client(inode);
+ struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
struct ceph_inode_info *ci;
struct ceph_snap_context *snapc;
@@ -92,6 +93,8 @@ static bool ceph_dirty_folio(struct address_space *mapping, struct folio *folio)
return false;
}
+ atomic64_inc(&mdsc->dirty_folios);
+
ci = ceph_inode(inode);
/* dirty the head */
@@ -568,7 +571,36 @@ struct ceph_writeback_ctl
u64 truncate_size;
u32 truncate_seq;
bool size_stable;
+
bool head_snapc;
+ struct ceph_snap_context *snapc;
+ struct ceph_snap_context *last_snapc;
+
+ bool done;
+ bool should_loop;
+ bool range_whole;
+ pgoff_t start_index;
+ pgoff_t index;
+ pgoff_t end;
+ xa_mark_t tag;
+
+ pgoff_t strip_unit_end;
+ unsigned int wsize;
+ unsigned int nr_folios;
+ unsigned int max_pages;
+ unsigned int locked_pages;
+
+ int op_idx;
+ int num_ops;
+ u64 offset;
+ u64 len;
+
+ struct folio_batch fbatch;
+ unsigned int processed_in_fbatch;
+
+ bool from_pool;
+ struct page **pages;
+ struct page **data_pages;
};
/*
@@ -666,22 +698,23 @@ static u64 get_writepages_data_length(struct inode *inode,
}
/*
- * Write a single page, but leave the page locked.
+ * Write a folio, but leave it locked.
*
* If we get a write error, mark the mapping for error, but still adjust the
- * dirty page accounting (i.e., page is no longer dirty).
+ * dirty page accounting (i.e., folio is no longer dirty).
*/
-static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
+static int write_folio_nounlock(struct folio *folio,
+ struct writeback_control *wbc)
{
- struct folio *folio = page_folio(page);
- struct inode *inode = page->mapping->host;
+ struct page *page = &folio->page;
+ struct inode *inode = folio->mapping->host;
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
struct ceph_client *cl = fsc->client;
struct ceph_snap_context *snapc, *oldest;
- loff_t page_off = page_offset(page);
+ loff_t page_off = folio_pos(folio);
int err;
- loff_t len = thp_size(page);
+ loff_t len = folio_size(folio);
loff_t wlen;
struct ceph_writeback_ctl ceph_wbc;
struct ceph_osd_client *osdc = &fsc->client->osdc;
@@ -689,27 +722,27 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
bool caching = ceph_is_cache_enabled(inode);
struct page *bounce_page = NULL;
- doutc(cl, "%llx.%llx page %p idx %lu\n", ceph_vinop(inode), page,
- page->index);
+ doutc(cl, "%llx.%llx folio %p idx %lu\n", ceph_vinop(inode), folio,
+ folio->index);
if (ceph_inode_is_shutdown(inode))
return -EIO;
/* verify this is a writeable snap context */
- snapc = page_snap_context(page);
+ snapc = page_snap_context(&folio->page);
if (!snapc) {
- doutc(cl, "%llx.%llx page %p not dirty?\n", ceph_vinop(inode),
- page);
+ doutc(cl, "%llx.%llx folio %p not dirty?\n", ceph_vinop(inode),
+ folio);
return 0;
}
oldest = get_oldest_context(inode, &ceph_wbc, snapc);
if (snapc->seq > oldest->seq) {
- doutc(cl, "%llx.%llx page %p snapc %p not writeable - noop\n",
- ceph_vinop(inode), page, snapc);
+ doutc(cl, "%llx.%llx folio %p snapc %p not writeable - noop\n",
+ ceph_vinop(inode), folio, snapc);
/* we should only noop if called by kswapd */
WARN_ON(!(current->flags & PF_MEMALLOC));
ceph_put_snap_context(oldest);
- redirty_page_for_writepage(wbc, page);
+ folio_redirty_for_writepage(wbc, folio);
return 0;
}
ceph_put_snap_context(oldest);
@@ -726,8 +759,8 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
len = ceph_wbc.i_size - page_off;
wlen = IS_ENCRYPTED(inode) ? round_up(len, CEPH_FSCRYPT_BLOCK_SIZE) : len;
- doutc(cl, "%llx.%llx page %p index %lu on %llu~%llu snapc %p seq %lld\n",
- ceph_vinop(inode), page, page->index, page_off, wlen, snapc,
+ doutc(cl, "%llx.%llx folio %p index %lu on %llu~%llu snapc %p seq %lld\n",
+ ceph_vinop(inode), folio, folio->index, page_off, wlen, snapc,
snapc->seq);
if (atomic_long_inc_return(&fsc->writeback_count) >
@@ -740,32 +773,32 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
ceph_wbc.truncate_seq,
ceph_wbc.truncate_size, true);
if (IS_ERR(req)) {
- redirty_page_for_writepage(wbc, page);
+ folio_redirty_for_writepage(wbc, folio);
return PTR_ERR(req);
}
if (wlen < len)
len = wlen;
- set_page_writeback(page);
+ folio_start_writeback(folio);
if (caching)
- ceph_set_page_fscache(page);
+ ceph_set_page_fscache(&folio->page);
ceph_fscache_write_to_cache(inode, page_off, len, caching);
if (IS_ENCRYPTED(inode)) {
- bounce_page = fscrypt_encrypt_pagecache_blocks(page,
+ bounce_page = fscrypt_encrypt_pagecache_blocks(folio,
CEPH_FSCRYPT_BLOCK_SIZE, 0,
GFP_NOFS);
if (IS_ERR(bounce_page)) {
- redirty_page_for_writepage(wbc, page);
- end_page_writeback(page);
+ folio_redirty_for_writepage(wbc, folio);
+ folio_end_writeback(folio);
ceph_osdc_put_request(req);
return PTR_ERR(bounce_page);
}
}
/* it may be a short write due to an object boundary */
- WARN_ON_ONCE(len > thp_size(page));
+ WARN_ON_ONCE(len > folio_size(folio));
osd_req_op_extent_osd_data_pages(req, 0,
bounce_page ? &bounce_page : &page, wlen, 0,
false, false);
@@ -791,25 +824,25 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
if (err == -ERESTARTSYS) {
/* killed by SIGKILL */
doutc(cl, "%llx.%llx interrupted page %p\n",
- ceph_vinop(inode), page);
- redirty_page_for_writepage(wbc, page);
- end_page_writeback(page);
+ ceph_vinop(inode), folio);
+ folio_redirty_for_writepage(wbc, folio);
+ folio_end_writeback(folio);
return err;
}
if (err == -EBLOCKLISTED)
fsc->blocklisted = true;
- doutc(cl, "%llx.%llx setting page/mapping error %d %p\n",
- ceph_vinop(inode), err, page);
+ doutc(cl, "%llx.%llx setting mapping error %d %p\n",
+ ceph_vinop(inode), err, folio);
mapping_set_error(&inode->i_data, err);
wbc->pages_skipped++;
} else {
doutc(cl, "%llx.%llx cleaned page %p\n",
- ceph_vinop(inode), page);
+ ceph_vinop(inode), folio);
err = 0; /* vfs expects us to return 0 */
}
- oldest = detach_page_private(page);
+ oldest = folio_detach_private(folio);
WARN_ON_ONCE(oldest != snapc);
- end_page_writeback(page);
+ folio_end_writeback(folio);
ceph_put_wrbuffer_cap_refs(ci, 1, snapc);
ceph_put_snap_context(snapc); /* page's reference */
@@ -820,32 +853,6 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
return err;
}
-static int ceph_writepage(struct page *page, struct writeback_control *wbc)
-{
- int err;
- struct inode *inode = page->mapping->host;
- BUG_ON(!inode);
- ihold(inode);
-
- if (wbc->sync_mode == WB_SYNC_NONE &&
- ceph_inode_to_fs_client(inode)->write_congested) {
- redirty_page_for_writepage(wbc, page);
- return AOP_WRITEPAGE_ACTIVATE;
- }
-
- folio_wait_private_2(page_folio(page)); /* [DEPRECATED] */
-
- err = writepage_nounlock(page, wbc);
- if (err == -ERESTARTSYS) {
- /* direct memory reclaimer was killed by SIGKILL. return 0
- * to prevent caller from setting mapping/page error */
- err = 0;
- }
- unlock_page(page);
- iput(inode);
- return err;
-}
-
/*
* async writeback completion handler.
*
@@ -865,6 +872,7 @@ static void writepages_finish(struct ceph_osd_request *req)
struct ceph_snap_context *snapc = req->r_snapc;
struct address_space *mapping = inode->i_mapping;
struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
unsigned int len = 0;
bool remove_page;
@@ -920,6 +928,12 @@ static void writepages_finish(struct ceph_osd_request *req)
ceph_put_snap_context(detach_page_private(page));
end_page_writeback(page);
+
+ if (atomic64_dec_return(&mdsc->dirty_folios) <= 0) {
+ wake_up_all(&mdsc->flush_end_wq);
+ WARN_ON(atomic64_read(&mdsc->dirty_folios) < 0);
+ }
+
doutc(cl, "unlocking %p\n", page);
if (remove_page)
@@ -949,36 +963,13 @@ static void writepages_finish(struct ceph_osd_request *req)
ceph_dec_osd_stopping_blocker(fsc->mdsc);
}
-/*
- * initiate async writeback
- */
-static int ceph_writepages_start(struct address_space *mapping,
- struct writeback_control *wbc)
+static inline
+bool is_forced_umount(struct address_space *mapping)
{
struct inode *inode = mapping->host;
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
struct ceph_client *cl = fsc->client;
- struct ceph_vino vino = ceph_vino(inode);
- pgoff_t index, start_index, end = -1;
- struct ceph_snap_context *snapc = NULL, *last_snapc = NULL, *pgsnapc;
- struct folio_batch fbatch;
- int rc = 0;
- unsigned int wsize = i_blocksize(inode);
- struct ceph_osd_request *req = NULL;
- struct ceph_writeback_ctl ceph_wbc;
- bool should_loop, range_whole = false;
- bool done = false;
- bool caching = ceph_is_cache_enabled(inode);
- xa_mark_t tag;
-
- if (wbc->sync_mode == WB_SYNC_NONE &&
- fsc->write_congested)
- return 0;
-
- doutc(cl, "%llx.%llx (mode=%s)\n", ceph_vinop(inode),
- wbc->sync_mode == WB_SYNC_NONE ? "NONE" :
- (wbc->sync_mode == WB_SYNC_ALL ? "ALL" : "HOLD"));
if (ceph_inode_is_shutdown(inode)) {
if (ci->i_wrbuffer_ref > 0) {
@@ -987,388 +978,733 @@ static int ceph_writepages_start(struct address_space *mapping,
ceph_vinop(inode), ceph_ino(inode));
}
mapping_set_error(mapping, -EIO);
- return -EIO; /* we're in a forced umount, don't write! */
+ return true;
}
+
+ return false;
+}
+
+static inline
+unsigned int ceph_define_write_size(struct address_space *mapping)
+{
+ struct inode *inode = mapping->host;
+ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ unsigned int wsize = i_blocksize(inode);
+
if (fsc->mount_options->wsize < wsize)
wsize = fsc->mount_options->wsize;
- folio_batch_init(&fbatch);
+ return wsize;
+}
+
+static inline
+void ceph_folio_batch_init(struct ceph_writeback_ctl *ceph_wbc)
+{
+ folio_batch_init(&ceph_wbc->fbatch);
+ ceph_wbc->processed_in_fbatch = 0;
+}
+
+static inline
+void ceph_folio_batch_reinit(struct ceph_writeback_ctl *ceph_wbc)
+{
+ folio_batch_release(&ceph_wbc->fbatch);
+ ceph_folio_batch_init(ceph_wbc);
+}
+
+static inline
+void ceph_init_writeback_ctl(struct address_space *mapping,
+ struct writeback_control *wbc,
+ struct ceph_writeback_ctl *ceph_wbc)
+{
+ ceph_wbc->snapc = NULL;
+ ceph_wbc->last_snapc = NULL;
+
+ ceph_wbc->strip_unit_end = 0;
+ ceph_wbc->wsize = ceph_define_write_size(mapping);
- start_index = wbc->range_cyclic ? mapping->writeback_index : 0;
- index = start_index;
+ ceph_wbc->nr_folios = 0;
+ ceph_wbc->max_pages = 0;
+ ceph_wbc->locked_pages = 0;
+
+ ceph_wbc->done = false;
+ ceph_wbc->should_loop = false;
+ ceph_wbc->range_whole = false;
+
+ ceph_wbc->start_index = wbc->range_cyclic ? mapping->writeback_index : 0;
+ ceph_wbc->index = ceph_wbc->start_index;
+ ceph_wbc->end = -1;
if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) {
- tag = PAGECACHE_TAG_TOWRITE;
+ ceph_wbc->tag = PAGECACHE_TAG_TOWRITE;
} else {
- tag = PAGECACHE_TAG_DIRTY;
+ ceph_wbc->tag = PAGECACHE_TAG_DIRTY;
}
-retry:
+
+ ceph_wbc->op_idx = -1;
+ ceph_wbc->num_ops = 0;
+ ceph_wbc->offset = 0;
+ ceph_wbc->len = 0;
+ ceph_wbc->from_pool = false;
+
+ ceph_folio_batch_init(ceph_wbc);
+
+ ceph_wbc->pages = NULL;
+ ceph_wbc->data_pages = NULL;
+}
+
+static inline
+int ceph_define_writeback_range(struct address_space *mapping,
+ struct writeback_control *wbc,
+ struct ceph_writeback_ctl *ceph_wbc)
+{
+ struct inode *inode = mapping->host;
+ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ struct ceph_client *cl = fsc->client;
+
/* find oldest snap context with dirty data */
- snapc = get_oldest_context(inode, &ceph_wbc, NULL);
- if (!snapc) {
+ ceph_wbc->snapc = get_oldest_context(inode, ceph_wbc, NULL);
+ if (!ceph_wbc->snapc) {
/* hmm, why does writepages get called when there
is no dirty data? */
doutc(cl, " no snap context with dirty data?\n");
- goto out;
+ return -ENODATA;
}
- doutc(cl, " oldest snapc is %p seq %lld (%d snaps)\n", snapc,
- snapc->seq, snapc->num_snaps);
- should_loop = false;
- if (ceph_wbc.head_snapc && snapc != last_snapc) {
+ doutc(cl, " oldest snapc is %p seq %lld (%d snaps)\n",
+ ceph_wbc->snapc, ceph_wbc->snapc->seq,
+ ceph_wbc->snapc->num_snaps);
+
+ ceph_wbc->should_loop = false;
+
+ if (ceph_wbc->head_snapc && ceph_wbc->snapc != ceph_wbc->last_snapc) {
/* where to start/end? */
if (wbc->range_cyclic) {
- index = start_index;
- end = -1;
- if (index > 0)
- should_loop = true;
- doutc(cl, " cyclic, start at %lu\n", index);
+ ceph_wbc->index = ceph_wbc->start_index;
+ ceph_wbc->end = -1;
+ if (ceph_wbc->index > 0)
+ ceph_wbc->should_loop = true;
+ doutc(cl, " cyclic, start at %lu\n", ceph_wbc->index);
} else {
- index = wbc->range_start >> PAGE_SHIFT;
- end = wbc->range_end >> PAGE_SHIFT;
+ ceph_wbc->index = wbc->range_start >> PAGE_SHIFT;
+ ceph_wbc->end = wbc->range_end >> PAGE_SHIFT;
if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
- range_whole = true;
- doutc(cl, " not cyclic, %lu to %lu\n", index, end);
+ ceph_wbc->range_whole = true;
+ doutc(cl, " not cyclic, %lu to %lu\n",
+ ceph_wbc->index, ceph_wbc->end);
}
- } else if (!ceph_wbc.head_snapc) {
+ } else if (!ceph_wbc->head_snapc) {
/* Do not respect wbc->range_{start,end}. Dirty pages
* in that range can be associated with newer snapc.
* They are not writeable until we write all dirty pages
* associated with 'snapc' get written */
- if (index > 0)
- should_loop = true;
+ if (ceph_wbc->index > 0)
+ ceph_wbc->should_loop = true;
doutc(cl, " non-head snapc, range whole\n");
}
- if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
- tag_pages_for_writeback(mapping, index, end);
+ ceph_put_snap_context(ceph_wbc->last_snapc);
+ ceph_wbc->last_snapc = ceph_wbc->snapc;
- ceph_put_snap_context(last_snapc);
- last_snapc = snapc;
+ return 0;
+}
- while (!done && index <= end) {
- int num_ops = 0, op_idx;
- unsigned i, nr_folios, max_pages, locked_pages = 0;
- struct page **pages = NULL, **data_pages;
- struct page *page;
- pgoff_t strip_unit_end = 0;
- u64 offset = 0, len = 0;
- bool from_pool = false;
+static inline
+bool has_writeback_done(struct ceph_writeback_ctl *ceph_wbc)
+{
+ return ceph_wbc->done && ceph_wbc->index > ceph_wbc->end;
+}
- max_pages = wsize >> PAGE_SHIFT;
+static inline
+bool can_next_page_be_processed(struct ceph_writeback_ctl *ceph_wbc,
+ unsigned index)
+{
+ return index < ceph_wbc->nr_folios &&
+ ceph_wbc->locked_pages < ceph_wbc->max_pages;
+}
-get_more_pages:
- nr_folios = filemap_get_folios_tag(mapping, &index,
- end, tag, &fbatch);
- doutc(cl, "pagevec_lookup_range_tag got %d\n", nr_folios);
- if (!nr_folios && !locked_pages)
- break;
- for (i = 0; i < nr_folios && locked_pages < max_pages; i++) {
- struct folio *folio = fbatch.folios[i];
+static
+int ceph_check_page_before_write(struct address_space *mapping,
+ struct writeback_control *wbc,
+ struct ceph_writeback_ctl *ceph_wbc,
+ struct folio *folio)
+{
+ struct inode *inode = mapping->host;
+ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ struct ceph_client *cl = fsc->client;
+ struct ceph_snap_context *pgsnapc;
- page = &folio->page;
- doutc(cl, "? %p idx %lu\n", page, page->index);
- if (locked_pages == 0)
- lock_page(page); /* first page */
- else if (!trylock_page(page))
- break;
+ /* only dirty folios, or our accounting breaks */
+ if (unlikely(!folio_test_dirty(folio) || folio->mapping != mapping)) {
+ doutc(cl, "!dirty or !mapping %p\n", folio);
+ return -ENODATA;
+ }
- /* only dirty pages, or our accounting breaks */
- if (unlikely(!PageDirty(page)) ||
- unlikely(page->mapping != mapping)) {
- doutc(cl, "!dirty or !mapping %p\n", page);
- unlock_page(page);
- continue;
- }
- /* only if matching snap context */
- pgsnapc = page_snap_context(page);
- if (pgsnapc != snapc) {
- doutc(cl, "page snapc %p %lld != oldest %p %lld\n",
- pgsnapc, pgsnapc->seq, snapc, snapc->seq);
- if (!should_loop &&
- !ceph_wbc.head_snapc &&
- wbc->sync_mode != WB_SYNC_NONE)
- should_loop = true;
- unlock_page(page);
- continue;
+ /* only if matching snap context */
+ pgsnapc = page_snap_context(&folio->page);
+ if (pgsnapc != ceph_wbc->snapc) {
+ doutc(cl, "folio snapc %p %lld != oldest %p %lld\n",
+ pgsnapc, pgsnapc->seq,
+ ceph_wbc->snapc, ceph_wbc->snapc->seq);
+
+ if (!ceph_wbc->should_loop && !ceph_wbc->head_snapc &&
+ wbc->sync_mode != WB_SYNC_NONE)
+ ceph_wbc->should_loop = true;
+
+ return -ENODATA;
+ }
+
+ if (folio_pos(folio) >= ceph_wbc->i_size) {
+ doutc(cl, "folio at %lu beyond eof %llu\n",
+ folio->index, ceph_wbc->i_size);
+
+ if ((ceph_wbc->size_stable ||
+ folio_pos(folio) >= i_size_read(inode)) &&
+ folio_clear_dirty_for_io(folio))
+ folio_invalidate(folio, 0, folio_size(folio));
+
+ return -ENODATA;
+ }
+
+ if (ceph_wbc->strip_unit_end &&
+ (folio->index > ceph_wbc->strip_unit_end)) {
+ doutc(cl, "end of strip unit %p\n", folio);
+ return -E2BIG;
+ }
+
+ return 0;
+}
+
+static inline
+void __ceph_allocate_page_array(struct ceph_writeback_ctl *ceph_wbc,
+ unsigned int max_pages)
+{
+ ceph_wbc->pages = kmalloc_array(max_pages,
+ sizeof(*ceph_wbc->pages),
+ GFP_NOFS);
+ if (!ceph_wbc->pages) {
+ ceph_wbc->from_pool = true;
+ ceph_wbc->pages = mempool_alloc(ceph_wb_pagevec_pool, GFP_NOFS);
+ BUG_ON(!ceph_wbc->pages);
+ }
+}
+
+static inline
+void ceph_allocate_page_array(struct address_space *mapping,
+ struct ceph_writeback_ctl *ceph_wbc,
+ struct folio *folio)
+{
+ struct inode *inode = mapping->host;
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ u64 objnum;
+ u64 objoff;
+ u32 xlen;
+
+ /* prepare async write request */
+ ceph_wbc->offset = (u64)folio_pos(folio);
+ ceph_calc_file_object_mapping(&ci->i_layout,
+ ceph_wbc->offset, ceph_wbc->wsize,
+ &objnum, &objoff, &xlen);
+
+ ceph_wbc->num_ops = 1;
+ ceph_wbc->strip_unit_end = folio->index + ((xlen - 1) >> PAGE_SHIFT);
+
+ BUG_ON(ceph_wbc->pages);
+ ceph_wbc->max_pages = calc_pages_for(0, (u64)xlen);
+ __ceph_allocate_page_array(ceph_wbc, ceph_wbc->max_pages);
+
+ ceph_wbc->len = 0;
+}
+
+static inline
+bool is_folio_index_contiguous(const struct ceph_writeback_ctl *ceph_wbc,
+ const struct folio *folio)
+{
+ return folio->index == (ceph_wbc->offset + ceph_wbc->len) >> PAGE_SHIFT;
+}
+
+static inline
+bool is_num_ops_too_big(struct ceph_writeback_ctl *ceph_wbc)
+{
+ return ceph_wbc->num_ops >=
+ (ceph_wbc->from_pool ? CEPH_OSD_SLAB_OPS : CEPH_OSD_MAX_OPS);
+}
+
+static inline
+bool is_write_congestion_happened(struct ceph_fs_client *fsc)
+{
+ return atomic_long_inc_return(&fsc->writeback_count) >
+ CONGESTION_ON_THRESH(fsc->mount_options->congestion_kb);
+}
+
+static inline int move_dirty_folio_in_page_array(struct address_space *mapping,
+ struct writeback_control *wbc,
+ struct ceph_writeback_ctl *ceph_wbc, struct folio *folio)
+{
+ struct inode *inode = mapping->host;
+ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ struct ceph_client *cl = fsc->client;
+ struct page **pages = ceph_wbc->pages;
+ unsigned int index = ceph_wbc->locked_pages;
+ gfp_t gfp_flags = ceph_wbc->locked_pages ? GFP_NOWAIT : GFP_NOFS;
+
+ if (IS_ENCRYPTED(inode)) {
+ pages[index] = fscrypt_encrypt_pagecache_blocks(folio,
+ PAGE_SIZE,
+ 0,
+ gfp_flags);
+ if (IS_ERR(pages[index])) {
+ if (PTR_ERR(pages[index]) == -EINVAL) {
+ pr_err_client(cl, "inode->i_blkbits=%hhu\n",
+ inode->i_blkbits);
}
- if (page_offset(page) >= ceph_wbc.i_size) {
- doutc(cl, "folio at %lu beyond eof %llu\n",
- folio->index, ceph_wbc.i_size);
- if ((ceph_wbc.size_stable ||
- folio_pos(folio) >= i_size_read(inode)) &&
- folio_clear_dirty_for_io(folio))
- folio_invalidate(folio, 0,
- folio_size(folio));
+
+ /* better not fail on first page! */
+ BUG_ON(ceph_wbc->locked_pages == 0);
+
+ pages[index] = NULL;
+ return PTR_ERR(pages[index]);
+ }
+ } else {
+ pages[index] = &folio->page;
+ }
+
+ ceph_wbc->locked_pages++;
+
+ return 0;
+}
+
+static
+int ceph_process_folio_batch(struct address_space *mapping,
+ struct writeback_control *wbc,
+ struct ceph_writeback_ctl *ceph_wbc)
+{
+ struct inode *inode = mapping->host;
+ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ struct ceph_client *cl = fsc->client;
+ struct folio *folio = NULL;
+ unsigned i;
+ int rc = 0;
+
+ for (i = 0; can_next_page_be_processed(ceph_wbc, i); i++) {
+ folio = ceph_wbc->fbatch.folios[i];
+
+ if (!folio)
+ continue;
+
+ doutc(cl, "? %p idx %lu, folio_test_writeback %#x, "
+ "folio_test_dirty %#x, folio_test_locked %#x\n",
+ folio, folio->index, folio_test_writeback(folio),
+ folio_test_dirty(folio),
+ folio_test_locked(folio));
+
+ if (folio_test_writeback(folio) ||
+ folio_test_private_2(folio) /* [DEPRECATED] */) {
+ doutc(cl, "waiting on writeback %p\n", folio);
+ folio_wait_writeback(folio);
+ folio_wait_private_2(folio); /* [DEPRECATED] */
+ continue;
+ }
+
+ if (ceph_wbc->locked_pages == 0)
+ folio_lock(folio);
+ else if (!folio_trylock(folio))
+ break;
+
+ rc = ceph_check_page_before_write(mapping, wbc,
+ ceph_wbc, folio);
+ if (rc == -ENODATA) {
+ rc = 0;
+ folio_unlock(folio);
+ ceph_wbc->fbatch.folios[i] = NULL;
+ continue;
+ } else if (rc == -E2BIG) {
+ rc = 0;
+ folio_unlock(folio);
+ ceph_wbc->fbatch.folios[i] = NULL;
+ break;
+ }
+
+ if (!folio_clear_dirty_for_io(folio)) {
+ doutc(cl, "%p !folio_clear_dirty_for_io\n", folio);
+ folio_unlock(folio);
+ ceph_wbc->fbatch.folios[i] = NULL;
+ continue;
+ }
+
+ /*
+ * We have something to write. If this is
+ * the first locked page this time through,
+ * calculate max possible write size and
+ * allocate a page array
+ */
+ if (ceph_wbc->locked_pages == 0) {
+ ceph_allocate_page_array(mapping, ceph_wbc, folio);
+ } else if (!is_folio_index_contiguous(ceph_wbc, folio)) {
+ if (is_num_ops_too_big(ceph_wbc)) {
+ folio_redirty_for_writepage(wbc, folio);
folio_unlock(folio);
- continue;
- }
- if (strip_unit_end && (page->index > strip_unit_end)) {
- doutc(cl, "end of strip unit %p\n", page);
- unlock_page(page);
break;
}
- if (folio_test_writeback(folio) ||
- folio_test_private_2(folio) /* [DEPRECATED] */) {
- if (wbc->sync_mode == WB_SYNC_NONE) {
- doutc(cl, "%p under writeback\n", folio);
- folio_unlock(folio);
- continue;
- }
- doutc(cl, "waiting on writeback %p\n", folio);
- folio_wait_writeback(folio);
- folio_wait_private_2(folio); /* [DEPRECATED] */
- }
- if (!clear_page_dirty_for_io(page)) {
- doutc(cl, "%p !clear_page_dirty_for_io\n", page);
- unlock_page(page);
- continue;
- }
+ ceph_wbc->num_ops++;
+ ceph_wbc->offset = (u64)folio_pos(folio);
+ ceph_wbc->len = 0;
+ }
- /*
- * We have something to write. If this is
- * the first locked page this time through,
- * calculate max possinle write size and
- * allocate a page array
- */
- if (locked_pages == 0) {
- u64 objnum;
- u64 objoff;
- u32 xlen;
-
- /* prepare async write request */
- offset = (u64)page_offset(page);
- ceph_calc_file_object_mapping(&ci->i_layout,
- offset, wsize,
- &objnum, &objoff,
- &xlen);
- len = xlen;
-
- num_ops = 1;
- strip_unit_end = page->index +
- ((len - 1) >> PAGE_SHIFT);
-
- BUG_ON(pages);
- max_pages = calc_pages_for(0, (u64)len);
- pages = kmalloc_array(max_pages,
- sizeof(*pages),
- GFP_NOFS);
- if (!pages) {
- from_pool = true;
- pages = mempool_alloc(ceph_wb_pagevec_pool, GFP_NOFS);
- BUG_ON(!pages);
- }
-
- len = 0;
- } else if (page->index !=
- (offset + len) >> PAGE_SHIFT) {
- if (num_ops >= (from_pool ? CEPH_OSD_SLAB_OPS :
- CEPH_OSD_MAX_OPS)) {
- redirty_page_for_writepage(wbc, page);
- unlock_page(page);
- break;
- }
-
- num_ops++;
- offset = (u64)page_offset(page);
- len = 0;
- }
+ /* note position of first page in fbatch */
+ doutc(cl, "%llx.%llx will write folio %p idx %lu\n",
+ ceph_vinop(inode), folio, folio->index);
- /* note position of first page in fbatch */
- doutc(cl, "%llx.%llx will write page %p idx %lu\n",
- ceph_vinop(inode), page, page->index);
-
- if (atomic_long_inc_return(&fsc->writeback_count) >
- CONGESTION_ON_THRESH(
- fsc->mount_options->congestion_kb))
- fsc->write_congested = true;
-
- if (IS_ENCRYPTED(inode)) {
- pages[locked_pages] =
- fscrypt_encrypt_pagecache_blocks(page,
- PAGE_SIZE, 0,
- locked_pages ? GFP_NOWAIT : GFP_NOFS);
- if (IS_ERR(pages[locked_pages])) {
- if (PTR_ERR(pages[locked_pages]) == -EINVAL)
- pr_err_client(cl,
- "inode->i_blkbits=%hhu\n",
- inode->i_blkbits);
- /* better not fail on first page! */
- BUG_ON(locked_pages == 0);
- pages[locked_pages] = NULL;
- redirty_page_for_writepage(wbc, page);
- unlock_page(page);
- break;
- }
- ++locked_pages;
- } else {
- pages[locked_pages++] = page;
- }
+ fsc->write_congested = is_write_congestion_happened(fsc);
- fbatch.folios[i] = NULL;
- len += thp_size(page);
+ rc = move_dirty_folio_in_page_array(mapping, wbc, ceph_wbc,
+ folio);
+ if (rc) {
+ folio_redirty_for_writepage(wbc, folio);
+ folio_unlock(folio);
+ break;
}
- /* did we get anything? */
- if (!locked_pages)
- goto release_folios;
- if (i) {
- unsigned j, n = 0;
- /* shift unused page to beginning of fbatch */
- for (j = 0; j < nr_folios; j++) {
- if (!fbatch.folios[j])
- continue;
- if (n < j)
- fbatch.folios[n] = fbatch.folios[j];
- n++;
- }
- fbatch.nr = n;
+ ceph_wbc->fbatch.folios[i] = NULL;
+ ceph_wbc->len += folio_size(folio);
+ }
- if (nr_folios && i == nr_folios &&
- locked_pages < max_pages) {
- doutc(cl, "reached end fbatch, trying for more\n");
- folio_batch_release(&fbatch);
- goto get_more_pages;
- }
+ ceph_wbc->processed_in_fbatch = i;
+
+ return rc;
+}
+
+static inline
+void ceph_shift_unused_folios_left(struct folio_batch *fbatch)
+{
+ unsigned j, n = 0;
+
+ /* shift unused page to beginning of fbatch */
+ for (j = 0; j < folio_batch_count(fbatch); j++) {
+ if (!fbatch->folios[j])
+ continue;
+
+ if (n < j) {
+ fbatch->folios[n] = fbatch->folios[j];
}
+ n++;
+ }
+
+ fbatch->nr = n;
+}
+
+static
+int ceph_submit_write(struct address_space *mapping,
+ struct writeback_control *wbc,
+ struct ceph_writeback_ctl *ceph_wbc)
+{
+ struct inode *inode = mapping->host;
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ struct ceph_client *cl = fsc->client;
+ struct ceph_vino vino = ceph_vino(inode);
+ struct ceph_osd_request *req = NULL;
+ struct page *page = NULL;
+ bool caching = ceph_is_cache_enabled(inode);
+ u64 offset;
+ u64 len;
+ unsigned i;
+
new_request:
- offset = ceph_fscrypt_page_offset(pages[0]);
- len = wsize;
+ offset = ceph_fscrypt_page_offset(ceph_wbc->pages[0]);
+ len = ceph_wbc->wsize;
+ req = ceph_osdc_new_request(&fsc->client->osdc,
+ &ci->i_layout, vino,
+ offset, &len, 0, ceph_wbc->num_ops,
+ CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE,
+ ceph_wbc->snapc, ceph_wbc->truncate_seq,
+ ceph_wbc->truncate_size, false);
+ if (IS_ERR(req)) {
req = ceph_osdc_new_request(&fsc->client->osdc,
- &ci->i_layout, vino,
- offset, &len, 0, num_ops,
- CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE,
- snapc, ceph_wbc.truncate_seq,
- ceph_wbc.truncate_size, false);
- if (IS_ERR(req)) {
- req = ceph_osdc_new_request(&fsc->client->osdc,
- &ci->i_layout, vino,
- offset, &len, 0,
- min(num_ops,
- CEPH_OSD_SLAB_OPS),
- CEPH_OSD_OP_WRITE,
- CEPH_OSD_FLAG_WRITE,
- snapc, ceph_wbc.truncate_seq,
- ceph_wbc.truncate_size, true);
- BUG_ON(IS_ERR(req));
+ &ci->i_layout, vino,
+ offset, &len, 0,
+ min(ceph_wbc->num_ops,
+ CEPH_OSD_SLAB_OPS),
+ CEPH_OSD_OP_WRITE,
+ CEPH_OSD_FLAG_WRITE,
+ ceph_wbc->snapc,
+ ceph_wbc->truncate_seq,
+ ceph_wbc->truncate_size,
+ true);
+ BUG_ON(IS_ERR(req));
+ }
+
+ page = ceph_wbc->pages[ceph_wbc->locked_pages - 1];
+ BUG_ON(len < ceph_fscrypt_page_offset(page) + thp_size(page) - offset);
+
+ if (!ceph_inc_osd_stopping_blocker(fsc->mdsc)) {
+ for (i = 0; i < folio_batch_count(&ceph_wbc->fbatch); i++) {
+ struct folio *folio = ceph_wbc->fbatch.folios[i];
+
+ if (!folio)
+ continue;
+
+ page = &folio->page;
+ redirty_page_for_writepage(wbc, page);
+ unlock_page(page);
}
- BUG_ON(len < ceph_fscrypt_page_offset(pages[locked_pages - 1]) +
- thp_size(pages[locked_pages - 1]) - offset);
- if (!ceph_inc_osd_stopping_blocker(fsc->mdsc)) {
- rc = -EIO;
- goto release_folios;
+ for (i = 0; i < ceph_wbc->locked_pages; i++) {
+ page = ceph_fscrypt_pagecache_page(ceph_wbc->pages[i]);
+
+ if (!page)
+ continue;
+
+ redirty_page_for_writepage(wbc, page);
+ unlock_page(page);
}
- req->r_callback = writepages_finish;
- req->r_inode = inode;
-
- /* Format the osd request message and submit the write */
- len = 0;
- data_pages = pages;
- op_idx = 0;
- for (i = 0; i < locked_pages; i++) {
- struct page *page = ceph_fscrypt_pagecache_page(pages[i]);
-
- u64 cur_offset = page_offset(page);
- /*
- * Discontinuity in page range? Ceph can handle that by just passing
- * multiple extents in the write op.
- */
- if (offset + len != cur_offset) {
- /* If it's full, stop here */
- if (op_idx + 1 == req->r_num_ops)
- break;
-
- /* Kick off an fscache write with what we have so far. */
- ceph_fscache_write_to_cache(inode, offset, len, caching);
-
- /* Start a new extent */
- osd_req_op_extent_dup_last(req, op_idx,
- cur_offset - offset);
- doutc(cl, "got pages at %llu~%llu\n", offset,
- len);
- osd_req_op_extent_osd_data_pages(req, op_idx,
- data_pages, len, 0,
- from_pool, false);
- osd_req_op_extent_update(req, op_idx, len);
-
- len = 0;
- offset = cur_offset;
- data_pages = pages + i;
- op_idx++;
- }
- set_page_writeback(page);
- if (caching)
- ceph_set_page_fscache(page);
- len += thp_size(page);
+ ceph_osdc_put_request(req);
+ return -EIO;
+ }
+
+ req->r_callback = writepages_finish;
+ req->r_inode = inode;
+
+ /* Format the osd request message and submit the write */
+ len = 0;
+ ceph_wbc->data_pages = ceph_wbc->pages;
+ ceph_wbc->op_idx = 0;
+ for (i = 0; i < ceph_wbc->locked_pages; i++) {
+ u64 cur_offset;
+
+ page = ceph_fscrypt_pagecache_page(ceph_wbc->pages[i]);
+ cur_offset = page_offset(page);
+
+ /*
+ * Discontinuity in page range? Ceph can handle that by just passing
+ * multiple extents in the write op.
+ */
+ if (offset + len != cur_offset) {
+ /* If it's full, stop here */
+ if (ceph_wbc->op_idx + 1 == req->r_num_ops)
+ break;
+
+ /* Kick off an fscache write with what we have so far. */
+ ceph_fscache_write_to_cache(inode, offset, len, caching);
+
+ /* Start a new extent */
+ osd_req_op_extent_dup_last(req, ceph_wbc->op_idx,
+ cur_offset - offset);
+
+ doutc(cl, "got pages at %llu~%llu\n", offset, len);
+
+ osd_req_op_extent_osd_data_pages(req, ceph_wbc->op_idx,
+ ceph_wbc->data_pages,
+ len, 0,
+ ceph_wbc->from_pool,
+ false);
+ osd_req_op_extent_update(req, ceph_wbc->op_idx, len);
+
+ len = 0;
+ offset = cur_offset;
+ ceph_wbc->data_pages = ceph_wbc->pages + i;
+ ceph_wbc->op_idx++;
}
- ceph_fscache_write_to_cache(inode, offset, len, caching);
-
- if (ceph_wbc.size_stable) {
- len = min(len, ceph_wbc.i_size - offset);
- } else if (i == locked_pages) {
- /* writepages_finish() clears writeback pages
- * according to the data length, so make sure
- * data length covers all locked pages */
- u64 min_len = len + 1 - thp_size(page);
- len = get_writepages_data_length(inode, pages[i - 1],
- offset);
- len = max(len, min_len);
+
+ set_page_writeback(page);
+
+ if (caching)
+ ceph_set_page_fscache(page);
+
+ len += thp_size(page);
+ }
+
+ ceph_fscache_write_to_cache(inode, offset, len, caching);
+
+ if (ceph_wbc->size_stable) {
+ len = min(len, ceph_wbc->i_size - offset);
+ } else if (i == ceph_wbc->locked_pages) {
+ /* writepages_finish() clears writeback pages
+ * according to the data length, so make sure
+ * data length covers all locked pages */
+ u64 min_len = len + 1 - thp_size(page);
+ len = get_writepages_data_length(inode,
+ ceph_wbc->pages[i - 1],
+ offset);
+ len = max(len, min_len);
+ }
+
+ if (IS_ENCRYPTED(inode))
+ len = round_up(len, CEPH_FSCRYPT_BLOCK_SIZE);
+
+ doutc(cl, "got pages at %llu~%llu\n", offset, len);
+
+ if (IS_ENCRYPTED(inode) &&
+ ((offset | len) & ~CEPH_FSCRYPT_BLOCK_MASK)) {
+ pr_warn_client(cl,
+ "bad encrypted write offset=%lld len=%llu\n",
+ offset, len);
+ }
+
+ osd_req_op_extent_osd_data_pages(req, ceph_wbc->op_idx,
+ ceph_wbc->data_pages, len,
+ 0, ceph_wbc->from_pool, false);
+ osd_req_op_extent_update(req, ceph_wbc->op_idx, len);
+
+ BUG_ON(ceph_wbc->op_idx + 1 != req->r_num_ops);
+
+ ceph_wbc->from_pool = false;
+ if (i < ceph_wbc->locked_pages) {
+ BUG_ON(ceph_wbc->num_ops <= req->r_num_ops);
+ ceph_wbc->num_ops -= req->r_num_ops;
+ ceph_wbc->locked_pages -= i;
+
+ /* allocate new pages array for next request */
+ ceph_wbc->data_pages = ceph_wbc->pages;
+ __ceph_allocate_page_array(ceph_wbc, ceph_wbc->locked_pages);
+ memcpy(ceph_wbc->pages, ceph_wbc->data_pages + i,
+ ceph_wbc->locked_pages * sizeof(*ceph_wbc->pages));
+ memset(ceph_wbc->data_pages + i, 0,
+ ceph_wbc->locked_pages * sizeof(*ceph_wbc->pages));
+ } else {
+ BUG_ON(ceph_wbc->num_ops != req->r_num_ops);
+ /* request message now owns the pages array */
+ ceph_wbc->pages = NULL;
+ }
+
+ req->r_mtime = inode_get_mtime(inode);
+ ceph_osdc_start_request(&fsc->client->osdc, req);
+ req = NULL;
+
+ wbc->nr_to_write -= i;
+ if (ceph_wbc->pages)
+ goto new_request;
+
+ return 0;
+}
+
+static
+void ceph_wait_until_current_writes_complete(struct address_space *mapping,
+ struct writeback_control *wbc,
+ struct ceph_writeback_ctl *ceph_wbc)
+{
+ struct page *page;
+ unsigned i, nr;
+
+ if (wbc->sync_mode != WB_SYNC_NONE &&
+ ceph_wbc->start_index == 0 && /* all dirty pages were checked */
+ !ceph_wbc->head_snapc) {
+ ceph_wbc->index = 0;
+
+ while ((ceph_wbc->index <= ceph_wbc->end) &&
+ (nr = filemap_get_folios_tag(mapping,
+ &ceph_wbc->index,
+ (pgoff_t)-1,
+ PAGECACHE_TAG_WRITEBACK,
+ &ceph_wbc->fbatch))) {
+ for (i = 0; i < nr; i++) {
+ page = &ceph_wbc->fbatch.folios[i]->page;
+ if (page_snap_context(page) != ceph_wbc->snapc)
+ continue;
+ wait_on_page_writeback(page);
+ }
+
+ folio_batch_release(&ceph_wbc->fbatch);
+ cond_resched();
}
- if (IS_ENCRYPTED(inode))
- len = round_up(len, CEPH_FSCRYPT_BLOCK_SIZE);
+ }
+}
+
+/*
+ * initiate async writeback
+ */
+static int ceph_writepages_start(struct address_space *mapping,
+ struct writeback_control *wbc)
+{
+ struct inode *inode = mapping->host;
+ struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+ struct ceph_client *cl = fsc->client;
+ struct ceph_writeback_ctl ceph_wbc;
+ int rc = 0;
- doutc(cl, "got pages at %llu~%llu\n", offset, len);
+ if (wbc->sync_mode == WB_SYNC_NONE && fsc->write_congested)
+ return 0;
- if (IS_ENCRYPTED(inode) &&
- ((offset | len) & ~CEPH_FSCRYPT_BLOCK_MASK))
- pr_warn_client(cl,
- "bad encrypted write offset=%lld len=%llu\n",
- offset, len);
-
- osd_req_op_extent_osd_data_pages(req, op_idx, data_pages, len,
- 0, from_pool, false);
- osd_req_op_extent_update(req, op_idx, len);
-
- BUG_ON(op_idx + 1 != req->r_num_ops);
-
- from_pool = false;
- if (i < locked_pages) {
- BUG_ON(num_ops <= req->r_num_ops);
- num_ops -= req->r_num_ops;
- locked_pages -= i;
-
- /* allocate new pages array for next request */
- data_pages = pages;
- pages = kmalloc_array(locked_pages, sizeof(*pages),
- GFP_NOFS);
- if (!pages) {
- from_pool = true;
- pages = mempool_alloc(ceph_wb_pagevec_pool, GFP_NOFS);
- BUG_ON(!pages);
+ doutc(cl, "%llx.%llx (mode=%s)\n", ceph_vinop(inode),
+ wbc->sync_mode == WB_SYNC_NONE ? "NONE" :
+ (wbc->sync_mode == WB_SYNC_ALL ? "ALL" : "HOLD"));
+
+ if (is_forced_umount(mapping)) {
+ /* we're in a forced umount, don't write! */
+ return -EIO;
+ }
+
+ ceph_init_writeback_ctl(mapping, wbc, &ceph_wbc);
+
+ if (!ceph_inc_osd_stopping_blocker(fsc->mdsc)) {
+ rc = -EIO;
+ goto out;
+ }
+
+retry:
+ rc = ceph_define_writeback_range(mapping, wbc, &ceph_wbc);
+ if (rc == -ENODATA) {
+ /* hmm, why does writepages get called when there
+ is no dirty data? */
+ rc = 0;
+ goto dec_osd_stopping_blocker;
+ }
+
+ if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
+ tag_pages_for_writeback(mapping, ceph_wbc.index, ceph_wbc.end);
+
+ while (!has_writeback_done(&ceph_wbc)) {
+ ceph_wbc.locked_pages = 0;
+ ceph_wbc.max_pages = ceph_wbc.wsize >> PAGE_SHIFT;
+
+get_more_pages:
+ ceph_folio_batch_reinit(&ceph_wbc);
+
+ ceph_wbc.nr_folios = filemap_get_folios_tag(mapping,
+ &ceph_wbc.index,
+ ceph_wbc.end,
+ ceph_wbc.tag,
+ &ceph_wbc.fbatch);
+ doutc(cl, "pagevec_lookup_range_tag for tag %#x got %d\n",
+ ceph_wbc.tag, ceph_wbc.nr_folios);
+
+ if (!ceph_wbc.nr_folios && !ceph_wbc.locked_pages)
+ break;
+
+process_folio_batch:
+ rc = ceph_process_folio_batch(mapping, wbc, &ceph_wbc);
+ if (rc)
+ goto release_folios;
+
+ /* did we get anything? */
+ if (!ceph_wbc.locked_pages)
+ goto release_folios;
+
+ if (ceph_wbc.processed_in_fbatch) {
+ ceph_shift_unused_folios_left(&ceph_wbc.fbatch);
+
+ if (folio_batch_count(&ceph_wbc.fbatch) == 0 &&
+ ceph_wbc.locked_pages < ceph_wbc.max_pages) {
+ doutc(cl, "reached end fbatch, trying for more\n");
+ goto get_more_pages;
}
- memcpy(pages, data_pages + i,
- locked_pages * sizeof(*pages));
- memset(data_pages + i, 0,
- locked_pages * sizeof(*pages));
- } else {
- BUG_ON(num_ops != req->r_num_ops);
- index = pages[i - 1]->index + 1;
- /* request message now owns the pages array */
- pages = NULL;
}
- req->r_mtime = inode_get_mtime(inode);
- ceph_osdc_start_request(&fsc->client->osdc, req);
- req = NULL;
+ rc = ceph_submit_write(mapping, wbc, &ceph_wbc);
+ if (rc)
+ goto release_folios;
+
+ ceph_wbc.locked_pages = 0;
+ ceph_wbc.strip_unit_end = 0;
- wbc->nr_to_write -= i;
- if (pages)
- goto new_request;
+ if (folio_batch_count(&ceph_wbc.fbatch) > 0) {
+ ceph_wbc.nr_folios =
+ folio_batch_count(&ceph_wbc.fbatch);
+ goto process_folio_batch;
+ }
/*
* We stop writing back only if we are not doing
@@ -1377,61 +1713,44 @@ new_request:
* we tagged for writeback prior to entering this loop.
*/
if (wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE)
- done = true;
+ ceph_wbc.done = true;
release_folios:
doutc(cl, "folio_batch release on %d folios (%p)\n",
- (int)fbatch.nr, fbatch.nr ? fbatch.folios[0] : NULL);
- folio_batch_release(&fbatch);
+ (int)ceph_wbc.fbatch.nr,
+ ceph_wbc.fbatch.nr ? ceph_wbc.fbatch.folios[0] : NULL);
+ folio_batch_release(&ceph_wbc.fbatch);
}
- if (should_loop && !done) {
+ if (ceph_wbc.should_loop && !ceph_wbc.done) {
/* more to do; loop back to beginning of file */
doutc(cl, "looping back to beginning of file\n");
- end = start_index - 1; /* OK even when start_index == 0 */
+ /* OK even when start_index == 0 */
+ ceph_wbc.end = ceph_wbc.start_index - 1;
/* to write dirty pages associated with next snapc,
* we need to wait until current writes complete */
- if (wbc->sync_mode != WB_SYNC_NONE &&
- start_index == 0 && /* all dirty pages were checked */
- !ceph_wbc.head_snapc) {
- struct page *page;
- unsigned i, nr;
- index = 0;
- while ((index <= end) &&
- (nr = filemap_get_folios_tag(mapping, &index,
- (pgoff_t)-1,
- PAGECACHE_TAG_WRITEBACK,
- &fbatch))) {
- for (i = 0; i < nr; i++) {
- page = &fbatch.folios[i]->page;
- if (page_snap_context(page) != snapc)
- continue;
- wait_on_page_writeback(page);
- }
- folio_batch_release(&fbatch);
- cond_resched();
- }
- }
+ ceph_wait_until_current_writes_complete(mapping, wbc, &ceph_wbc);
- start_index = 0;
- index = 0;
+ ceph_wbc.start_index = 0;
+ ceph_wbc.index = 0;
goto retry;
}
- if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
- mapping->writeback_index = index;
+ if (wbc->range_cyclic || (ceph_wbc.range_whole && wbc->nr_to_write > 0))
+ mapping->writeback_index = ceph_wbc.index;
+
+dec_osd_stopping_blocker:
+ ceph_dec_osd_stopping_blocker(fsc->mdsc);
out:
- ceph_osdc_put_request(req);
- ceph_put_snap_context(last_snapc);
+ ceph_put_snap_context(ceph_wbc.last_snapc);
doutc(cl, "%llx.%llx dend - startone, rc = %d\n", ceph_vinop(inode),
rc);
+
return rc;
}
-
-
/*
* See if a given @snapc is either writeable, or already written.
*/
@@ -1447,56 +1766,56 @@ static int context_is_writeable_or_written(struct inode *inode,
/**
* ceph_find_incompatible - find an incompatible context and return it
- * @page: page being dirtied
+ * @folio: folio being dirtied
*
- * We are only allowed to write into/dirty a page if the page is
+ * We are only allowed to write into/dirty a folio if the folio is
* clean, or already dirty within the same snap context. Returns a
* conflicting context if there is one, NULL if there isn't, or a
* negative error code on other errors.
*
- * Must be called with page lock held.
+ * Must be called with folio lock held.
*/
static struct ceph_snap_context *
-ceph_find_incompatible(struct page *page)
+ceph_find_incompatible(struct folio *folio)
{
- struct inode *inode = page->mapping->host;
+ struct inode *inode = folio->mapping->host;
struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_inode_info *ci = ceph_inode(inode);
if (ceph_inode_is_shutdown(inode)) {
- doutc(cl, " %llx.%llx page %p is shutdown\n",
- ceph_vinop(inode), page);
+ doutc(cl, " %llx.%llx folio %p is shutdown\n",
+ ceph_vinop(inode), folio);
return ERR_PTR(-ESTALE);
}
for (;;) {
struct ceph_snap_context *snapc, *oldest;
- wait_on_page_writeback(page);
+ folio_wait_writeback(folio);
- snapc = page_snap_context(page);
+ snapc = page_snap_context(&folio->page);
if (!snapc || snapc == ci->i_head_snapc)
break;
/*
- * this page is already dirty in another (older) snap
+ * this folio is already dirty in another (older) snap
* context! is it writeable now?
*/
oldest = get_oldest_context(inode, NULL, NULL);
if (snapc->seq > oldest->seq) {
/* not writeable -- return it for the caller to deal with */
ceph_put_snap_context(oldest);
- doutc(cl, " %llx.%llx page %p snapc %p not current or oldest\n",
- ceph_vinop(inode), page, snapc);
+ doutc(cl, " %llx.%llx folio %p snapc %p not current or oldest\n",
+ ceph_vinop(inode), folio, snapc);
return ceph_get_snap_context(snapc);
}
ceph_put_snap_context(oldest);
- /* yay, writeable, do it now (without dropping page lock) */
- doutc(cl, " %llx.%llx page %p snapc %p not current, but oldest\n",
- ceph_vinop(inode), page, snapc);
- if (clear_page_dirty_for_io(page)) {
- int r = writepage_nounlock(page, NULL);
+ /* yay, writeable, do it now (without dropping folio lock) */
+ doutc(cl, " %llx.%llx folio %p snapc %p not current, but oldest\n",
+ ceph_vinop(inode), folio, snapc);
+ if (folio_clear_dirty_for_io(folio)) {
+ int r = write_folio_nounlock(folio, NULL);
if (r < 0)
return ERR_PTR(r);
}
@@ -1511,7 +1830,7 @@ static int ceph_netfs_check_write_begin(struct file *file, loff_t pos, unsigned
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_snap_context *snapc;
- snapc = ceph_find_incompatible(folio_page(*foliop, 0));
+ snapc = ceph_find_incompatible(*foliop);
if (snapc) {
int r;
@@ -1594,7 +1913,6 @@ out:
const struct address_space_operations ceph_aops = {
.read_folio = netfs_read_folio,
.readahead = netfs_readahead,
- .writepage = ceph_writepage,
.writepages = ceph_writepages_start,
.write_begin = ceph_write_begin,
.write_end = ceph_write_end,
@@ -1602,6 +1920,7 @@ const struct address_space_operations ceph_aops = {
.invalidate_folio = ceph_invalidate_folio,
.release_folio = netfs_release_folio,
.direct_IO = noop_direct_IO,
+ .migrate_folio = filemap_migrate_folio,
};
static void ceph_block_sigs(sigset_t *oldset)
@@ -1718,8 +2037,8 @@ static vm_fault_t ceph_page_mkwrite(struct vm_fault *vmf)
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_file_info *fi = vma->vm_file->private_data;
struct ceph_cap_flush *prealloc_cf;
- struct page *page = vmf->page;
- loff_t off = page_offset(page);
+ struct folio *folio = page_folio(vmf->page);
+ loff_t off = folio_pos(folio);
loff_t size = i_size_read(inode);
size_t len;
int want, got, err;
@@ -1736,10 +2055,10 @@ static vm_fault_t ceph_page_mkwrite(struct vm_fault *vmf)
sb_start_pagefault(inode->i_sb);
ceph_block_sigs(&oldset);
- if (off + thp_size(page) <= size)
- len = thp_size(page);
+ if (off + folio_size(folio) <= size)
+ len = folio_size(folio);
else
- len = offset_in_thp(page, size);
+ len = offset_in_folio(folio, size);
doutc(cl, "%llx.%llx %llu~%zd getting caps i_size %llu\n",
ceph_vinop(inode), off, len, size);
@@ -1756,30 +2075,30 @@ static vm_fault_t ceph_page_mkwrite(struct vm_fault *vmf)
doutc(cl, "%llx.%llx %llu~%zd got cap refs on %s\n", ceph_vinop(inode),
off, len, ceph_cap_string(got));
- /* Update time before taking page lock */
+ /* Update time before taking folio lock */
file_update_time(vma->vm_file);
inode_inc_iversion_raw(inode);
do {
struct ceph_snap_context *snapc;
- lock_page(page);
+ folio_lock(folio);
- if (page_mkwrite_check_truncate(page, inode) < 0) {
- unlock_page(page);
+ if (folio_mkwrite_check_truncate(folio, inode) < 0) {
+ folio_unlock(folio);
ret = VM_FAULT_NOPAGE;
break;
}
- snapc = ceph_find_incompatible(page);
+ snapc = ceph_find_incompatible(folio);
if (!snapc) {
- /* success. we'll keep the page locked. */
- set_page_dirty(page);
+ /* success. we'll keep the folio locked. */
+ folio_mark_dirty(folio);
ret = VM_FAULT_LOCKED;
break;
}
- unlock_page(page);
+ folio_unlock(folio);
if (IS_ERR(snapc)) {
ret = VM_FAULT_SIGBUS;
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index 62e99e65250d..a321aa6d0ed2 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -141,17 +141,18 @@ __dcache_find_get_entry(struct dentry *parent, u64 idx,
if (ptr_pos >= i_size_read(dir))
return NULL;
- if (!cache_ctl->page || ptr_pgoff != cache_ctl->page->index) {
+ if (!cache_ctl->folio || ptr_pgoff != cache_ctl->folio->index) {
ceph_readdir_cache_release(cache_ctl);
- cache_ctl->page = find_lock_page(&dir->i_data, ptr_pgoff);
- if (!cache_ctl->page) {
- doutc(cl, " page %lu not found\n", ptr_pgoff);
+ cache_ctl->folio = filemap_lock_folio(&dir->i_data, ptr_pgoff);
+ if (IS_ERR(cache_ctl->folio)) {
+ cache_ctl->folio = NULL;
+ doutc(cl, " folio %lu not found\n", ptr_pgoff);
return ERR_PTR(-EAGAIN);
}
/* reading/filling the cache are serialized by
- i_rwsem, no need to use page lock */
- unlock_page(cache_ctl->page);
- cache_ctl->dentries = kmap(cache_ctl->page);
+ i_rwsem, no need to use folio lock */
+ folio_unlock(cache_ctl->folio);
+ cache_ctl->dentries = kmap_local_folio(cache_ctl->folio, 0);
}
cache_ctl->index = idx & idx_mask;
@@ -1092,19 +1093,20 @@ out:
return err;
}
-static int ceph_mkdir(struct mnt_idmap *idmap, struct inode *dir,
- struct dentry *dentry, umode_t mode)
+static struct dentry *ceph_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dir->i_sb);
struct ceph_client *cl = mdsc->fsc->client;
struct ceph_mds_request *req;
struct ceph_acl_sec_ctx as_ctx = {};
+ struct dentry *ret;
int err;
int op;
err = ceph_wait_on_conflict_unlink(dentry);
if (err)
- return err;
+ return ERR_PTR(err);
if (ceph_snap(dir) == CEPH_SNAPDIR) {
/* mkdir .snap/foo is a MKSNAP */
@@ -1116,32 +1118,32 @@ static int ceph_mkdir(struct mnt_idmap *idmap, struct inode *dir,
ceph_vinop(dir), dentry, dentry, mode);
op = CEPH_MDS_OP_MKDIR;
} else {
- err = -EROFS;
+ ret = ERR_PTR(-EROFS);
goto out;
}
if (op == CEPH_MDS_OP_MKDIR &&
ceph_quota_is_max_files_exceeded(dir)) {
- err = -EDQUOT;
+ ret = ERR_PTR(-EDQUOT);
goto out;
}
if ((op == CEPH_MDS_OP_MKSNAP) && IS_ENCRYPTED(dir) &&
!fscrypt_has_encryption_key(dir)) {
- err = -ENOKEY;
+ ret = ERR_PTR(-ENOKEY);
goto out;
}
req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
if (IS_ERR(req)) {
- err = PTR_ERR(req);
+ ret = ERR_CAST(req);
goto out;
}
mode |= S_IFDIR;
req->r_new_inode = ceph_new_inode(dir, dentry, &mode, &as_ctx);
if (IS_ERR(req->r_new_inode)) {
- err = PTR_ERR(req->r_new_inode);
+ ret = ERR_CAST(req->r_new_inode);
req->r_new_inode = NULL;
goto out_req;
}
@@ -1165,15 +1167,22 @@ static int ceph_mkdir(struct mnt_idmap *idmap, struct inode *dir,
!req->r_reply_info.head->is_target &&
!req->r_reply_info.head->is_dentry)
err = ceph_handle_notrace_create(dir, dentry);
+ ret = ERR_PTR(err);
out_req:
+ if (!IS_ERR(ret) && req->r_dentry != dentry)
+ /* Some other dentry was spliced in */
+ ret = dget(req->r_dentry);
ceph_mdsc_put_request(req);
out:
- if (!err)
+ if (!IS_ERR(ret)) {
+ if (ret)
+ dentry = ret;
ceph_init_inode_acls(d_inode(dentry), &as_ctx);
- else
+ } else {
d_drop(dentry);
+ }
ceph_release_acl_sec_ctx(&as_ctx);
- return err;
+ return ret;
}
static int ceph_link(struct dentry *old_dentry, struct inode *dir,
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 7dd6c2275085..6ac2bd555e86 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -1845,10 +1845,9 @@ static int readdir_prepopulate_inodes_only(struct ceph_mds_request *req,
void ceph_readdir_cache_release(struct ceph_readdir_cache_control *ctl)
{
- if (ctl->page) {
- kunmap(ctl->page);
- put_page(ctl->page);
- ctl->page = NULL;
+ if (ctl->folio) {
+ folio_release_kmap(ctl->folio, ctl->dentries);
+ ctl->folio = NULL;
}
}
@@ -1862,20 +1861,26 @@ static int fill_readdir_cache(struct inode *dir, struct dentry *dn,
unsigned idx = ctl->index % nsize;
pgoff_t pgoff = ctl->index / nsize;
- if (!ctl->page || pgoff != ctl->page->index) {
+ if (!ctl->folio || pgoff != ctl->folio->index) {
ceph_readdir_cache_release(ctl);
+ fgf_t fgf = FGP_LOCK;
+
if (idx == 0)
- ctl->page = grab_cache_page(&dir->i_data, pgoff);
- else
- ctl->page = find_lock_page(&dir->i_data, pgoff);
- if (!ctl->page) {
+ fgf |= FGP_ACCESSED | FGP_CREAT;
+
+ ctl->folio = __filemap_get_folio(&dir->i_data, pgoff,
+ fgf, mapping_gfp_mask(&dir->i_data));
+ if (IS_ERR(ctl->folio)) {
+ int err = PTR_ERR(ctl->folio);
+
+ ctl->folio = NULL;
ctl->index = -1;
- return idx == 0 ? -ENOMEM : 0;
+ return idx == 0 ? err : 0;
}
/* reading/filling the cache are serialized by
- * i_rwsem, no need to use page lock */
- unlock_page(ctl->page);
- ctl->dentries = kmap(ctl->page);
+ * i_rwsem, no need to use folio lock */
+ folio_unlock(ctl->folio);
+ ctl->dentries = kmap_local_folio(ctl->folio, 0);
if (idx == 0)
memset(ctl->dentries, 0, PAGE_SIZE);
}
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 54b3421501e9..230e0c3f341f 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -5489,6 +5489,8 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc)
spin_lock_init(&mdsc->stopping_lock);
atomic_set(&mdsc->stopping_blockers, 0);
init_completion(&mdsc->stopping_waiter);
+ atomic64_set(&mdsc->dirty_folios, 0);
+ init_waitqueue_head(&mdsc->flush_end_wq);
init_waitqueue_head(&mdsc->session_close_wq);
INIT_LIST_HEAD(&mdsc->waiting_for_map);
mdsc->quotarealms_inodes = RB_ROOT;
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
index 7c9fee9e80d4..3e2a6fa7c19a 100644
--- a/fs/ceph/mds_client.h
+++ b/fs/ceph/mds_client.h
@@ -458,6 +458,9 @@ struct ceph_mds_client {
atomic_t stopping_blockers;
struct completion stopping_waiter;
+ atomic64_t dirty_folios;
+ wait_queue_head_t flush_end_wq;
+
atomic64_t quotarealms_count; /* # realms with quota */
/*
* We keep a list of inodes we don't see in the mountpoint but that we
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index 4344e1f11806..f3951253e393 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -1563,6 +1563,17 @@ static void ceph_kill_sb(struct super_block *s)
*/
sync_filesystem(s);
+ if (atomic64_read(&mdsc->dirty_folios) > 0) {
+ wait_queue_head_t *wq = &mdsc->flush_end_wq;
+ long timeleft = wait_event_killable_timeout(*wq,
+ atomic64_read(&mdsc->dirty_folios) <= 0,
+ fsc->client->options->mount_timeout);
+ if (!timeleft) /* timed out */
+ pr_warn_client(cl, "umount timed out, %ld\n", timeleft);
+ else if (timeleft < 0) /* killed */
+ pr_warn_client(cl, "umount was killed, %ld\n", timeleft);
+ }
+
spin_lock(&mdsc->stopping_lock);
mdsc->stopping = CEPH_MDSC_STOPPING_FLUSHING;
wait = !!atomic_read(&mdsc->stopping_blockers);
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 7fa1e7be50e4..bb0db0cc8003 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -903,7 +903,7 @@ ceph_find_rw_context(struct ceph_file_info *cf)
}
struct ceph_readdir_cache_control {
- struct page *page;
+ struct folio *folio;
struct dentry **dentries;
int index;
};
diff --git a/fs/coda/dir.c b/fs/coda/dir.c
index a3e2dfeedfbf..ab69d8f0cec2 100644
--- a/fs/coda/dir.c
+++ b/fs/coda/dir.c
@@ -166,8 +166,8 @@ err_out:
return error;
}
-static int coda_mkdir(struct mnt_idmap *idmap, struct inode *dir,
- struct dentry *de, umode_t mode)
+static struct dentry *coda_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *de, umode_t mode)
{
struct inode *inode;
struct coda_vattr attrs;
@@ -177,14 +177,14 @@ static int coda_mkdir(struct mnt_idmap *idmap, struct inode *dir,
struct CodaFid newfid;
if (is_root_inode(dir) && coda_iscontrol(name, len))
- return -EPERM;
+ return ERR_PTR(-EPERM);
attrs.va_mode = mode;
- error = venus_mkdir(dir->i_sb, coda_i2f(dir),
+ error = venus_mkdir(dir->i_sb, coda_i2f(dir),
name, len, &newfid, &attrs);
if (error)
goto err_out;
-
+
inode = coda_iget(dir->i_sb, &newfid, &attrs);
if (IS_ERR(inode)) {
error = PTR_ERR(inode);
@@ -195,10 +195,10 @@ static int coda_mkdir(struct mnt_idmap *idmap, struct inode *dir,
coda_dir_inc_nlink(dir);
coda_dir_update_mtime(dir);
d_instantiate(de, inode);
- return 0;
+ return NULL;
err_out:
d_drop(de);
- return error;
+ return ERR_PTR(error);
}
/* try to make de an entry in dir_inodde linked to source_de */
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
index 7d10278db30d..5568cb74b322 100644
--- a/fs/configfs/dir.c
+++ b/fs/configfs/dir.c
@@ -1280,8 +1280,8 @@ out_root_unlock:
}
EXPORT_SYMBOL(configfs_depend_item_unlocked);
-static int configfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
- struct dentry *dentry, umode_t mode)
+static struct dentry *configfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
int ret = 0;
int module_got = 0;
@@ -1461,7 +1461,7 @@ out_put:
put_fragment(frag);
out:
- return ret;
+ return ERR_PTR(ret);
}
static int configfs_rmdir(struct inode *dir, struct dentry *dentry)
diff --git a/fs/coredump.c b/fs/coredump.c
index 4375c70144d0..d6a92cd6018e 100644
--- a/fs/coredump.c
+++ b/fs/coredump.c
@@ -926,14 +926,23 @@ int dump_user_range(struct coredump_params *cprm, unsigned long start,
{
unsigned long addr;
struct page *dump_page;
+ int locked, ret;
dump_page = dump_page_alloc();
if (!dump_page)
return 0;
+ ret = 0;
+ locked = 0;
for (addr = start; addr < start + len; addr += PAGE_SIZE) {
struct page *page;
+ if (!locked) {
+ if (mmap_read_lock_killable(current->mm))
+ goto out;
+ locked = 1;
+ }
+
/*
* To avoid having to allocate page tables for virtual address
* ranges that have never been used yet, and also to make it
@@ -941,21 +950,38 @@ int dump_user_range(struct coredump_params *cprm, unsigned long start,
* NULL when encountering an empty page table entry that would
* otherwise have been filled with the zero page.
*/
- page = get_dump_page(addr);
+ page = get_dump_page(addr, &locked);
if (page) {
+ if (locked) {
+ mmap_read_unlock(current->mm);
+ locked = 0;
+ }
int stop = !dump_emit_page(cprm, dump_page_copy(page, dump_page));
put_page(page);
- if (stop) {
- dump_page_free(dump_page);
- return 0;
- }
+ if (stop)
+ goto out;
} else {
dump_skip(cprm, PAGE_SIZE);
}
+
+ if (dump_interrupted())
+ goto out;
+
+ if (!need_resched())
+ continue;
+ if (locked) {
+ mmap_read_unlock(current->mm);
+ locked = 0;
+ }
cond_resched();
}
+ ret = 1;
+out:
+ if (locked)
+ mmap_read_unlock(current->mm);
+
dump_page_free(dump_page);
- return 1;
+ return ret;
}
#endif
diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c
index 328470d40dec..b74b5937e695 100644
--- a/fs/crypto/crypto.c
+++ b/fs/crypto/crypto.c
@@ -153,8 +153,8 @@ int fscrypt_crypt_data_unit(const struct fscrypt_inode_info *ci,
}
/**
- * fscrypt_encrypt_pagecache_blocks() - Encrypt data from a pagecache page
- * @page: the locked pagecache page containing the data to encrypt
+ * fscrypt_encrypt_pagecache_blocks() - Encrypt data from a pagecache folio
+ * @folio: the locked pagecache folio containing the data to encrypt
* @len: size of the data to encrypt, in bytes
* @offs: offset within @page of the data to encrypt, in bytes
* @gfp_flags: memory allocation flags; see details below
@@ -177,23 +177,21 @@ int fscrypt_crypt_data_unit(const struct fscrypt_inode_info *ci,
*
* Return: the new encrypted bounce page on success; an ERR_PTR() on failure
*/
-struct page *fscrypt_encrypt_pagecache_blocks(struct page *page,
- unsigned int len,
- unsigned int offs,
- gfp_t gfp_flags)
-
+struct page *fscrypt_encrypt_pagecache_blocks(struct folio *folio,
+ size_t len, size_t offs, gfp_t gfp_flags)
{
- const struct inode *inode = page->mapping->host;
+ const struct inode *inode = folio->mapping->host;
const struct fscrypt_inode_info *ci = inode->i_crypt_info;
const unsigned int du_bits = ci->ci_data_unit_bits;
const unsigned int du_size = 1U << du_bits;
struct page *ciphertext_page;
- u64 index = ((u64)page->index << (PAGE_SHIFT - du_bits)) +
+ u64 index = ((u64)folio->index << (PAGE_SHIFT - du_bits)) +
(offs >> du_bits);
unsigned int i;
int err;
- if (WARN_ON_ONCE(!PageLocked(page)))
+ VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
+ if (WARN_ON_ONCE(!folio_test_locked(folio)))
return ERR_PTR(-EINVAL);
if (WARN_ON_ONCE(len <= 0 || !IS_ALIGNED(len | offs, du_size)))
@@ -205,7 +203,7 @@ struct page *fscrypt_encrypt_pagecache_blocks(struct page *page,
for (i = offs; i < offs + len; i += du_size, index++) {
err = fscrypt_crypt_data_unit(ci, FS_ENCRYPT, index,
- page, ciphertext_page,
+ &folio->page, ciphertext_page,
du_size, i, gfp_flags);
if (err) {
fscrypt_free_bounce_page(ciphertext_page);
@@ -213,7 +211,7 @@ struct page *fscrypt_encrypt_pagecache_blocks(struct page *page,
}
}
SetPagePrivate(ciphertext_page);
- set_page_private(ciphertext_page, (unsigned long)page);
+ set_page_private(ciphertext_page, (unsigned long)folio);
return ciphertext_page;
}
EXPORT_SYMBOL(fscrypt_encrypt_pagecache_blocks);
diff --git a/fs/dax.c b/fs/dax.c
index 21b47402b3dc..7fd4cd9a51f2 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -1258,7 +1258,7 @@ static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
}
#endif /* CONFIG_FS_DAX_PMD */
-static s64 dax_unshare_iter(struct iomap_iter *iter)
+static int dax_unshare_iter(struct iomap_iter *iter)
{
struct iomap *iomap = &iter->iomap;
const struct iomap *srcmap = iomap_iter_srcmap(iter);
@@ -1266,11 +1266,11 @@ static s64 dax_unshare_iter(struct iomap_iter *iter)
u64 copy_len = iomap_length(iter);
u32 mod;
int id = 0;
- s64 ret = 0;
+ s64 ret;
void *daddr = NULL, *saddr = NULL;
if (!iomap_want_unshare_iter(iter))
- return iomap_length(iter);
+ return iomap_iter_advance_full(iter);
/*
* Extend the file range to be aligned to fsblock/pagesize, because
@@ -1300,14 +1300,14 @@ static s64 dax_unshare_iter(struct iomap_iter *iter)
if (ret < 0)
goto out_unlock;
- if (copy_mc_to_kernel(daddr, saddr, copy_len) == 0)
- ret = iomap_length(iter);
- else
+ if (copy_mc_to_kernel(daddr, saddr, copy_len) != 0)
ret = -EIO;
out_unlock:
dax_read_unlock(id);
- return dax_mem2blk_err(ret);
+ if (ret < 0)
+ return dax_mem2blk_err(ret);
+ return iomap_iter_advance_full(iter);
}
int dax_file_unshare(struct inode *inode, loff_t pos, loff_t len,
@@ -1326,7 +1326,7 @@ int dax_file_unshare(struct inode *inode, loff_t pos, loff_t len,
iter.len = min(len, size - pos);
while ((ret = iomap_iter(&iter, ops)) > 0)
- iter.processed = dax_unshare_iter(&iter);
+ iter.status = dax_unshare_iter(&iter);
return ret;
}
EXPORT_SYMBOL_GPL(dax_file_unshare);
@@ -1354,17 +1354,16 @@ static int dax_memzero(struct iomap_iter *iter, loff_t pos, size_t size)
return ret;
}
-static s64 dax_zero_iter(struct iomap_iter *iter, bool *did_zero)
+static int dax_zero_iter(struct iomap_iter *iter, bool *did_zero)
{
const struct iomap *iomap = &iter->iomap;
const struct iomap *srcmap = iomap_iter_srcmap(iter);
- loff_t pos = iter->pos;
u64 length = iomap_length(iter);
- s64 written = 0;
+ int ret;
/* already zeroed? we're done. */
if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN)
- return length;
+ return iomap_iter_advance(iter, &length);
/*
* invalidate the pages whose sharing state is to be changed
@@ -1372,33 +1371,35 @@ static s64 dax_zero_iter(struct iomap_iter *iter, bool *did_zero)
*/
if (iomap->flags & IOMAP_F_SHARED)
invalidate_inode_pages2_range(iter->inode->i_mapping,
- pos >> PAGE_SHIFT,
- (pos + length - 1) >> PAGE_SHIFT);
+ iter->pos >> PAGE_SHIFT,
+ (iter->pos + length - 1) >> PAGE_SHIFT);
do {
+ loff_t pos = iter->pos;
unsigned offset = offset_in_page(pos);
- unsigned size = min_t(u64, PAGE_SIZE - offset, length);
pgoff_t pgoff = dax_iomap_pgoff(iomap, pos);
- long rc;
int id;
+ length = min_t(u64, PAGE_SIZE - offset, length);
+
id = dax_read_lock();
- if (IS_ALIGNED(pos, PAGE_SIZE) && size == PAGE_SIZE)
- rc = dax_zero_page_range(iomap->dax_dev, pgoff, 1);
+ if (IS_ALIGNED(pos, PAGE_SIZE) && length == PAGE_SIZE)
+ ret = dax_zero_page_range(iomap->dax_dev, pgoff, 1);
else
- rc = dax_memzero(iter, pos, size);
+ ret = dax_memzero(iter, pos, length);
dax_read_unlock(id);
- if (rc < 0)
- return rc;
- pos += size;
- length -= size;
- written += size;
+ if (ret < 0)
+ return ret;
+
+ ret = iomap_iter_advance(iter, &length);
+ if (ret)
+ return ret;
} while (length > 0);
if (did_zero)
*did_zero = true;
- return written;
+ return ret;
}
int dax_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
@@ -1413,7 +1414,7 @@ int dax_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
int ret;
while ((ret = iomap_iter(&iter, ops)) > 0)
- iter.processed = dax_zero_iter(&iter, did_zero);
+ iter.status = dax_zero_iter(&iter, did_zero);
return ret;
}
EXPORT_SYMBOL_GPL(dax_zero_range);
@@ -1431,8 +1432,7 @@ int dax_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
}
EXPORT_SYMBOL_GPL(dax_truncate_page);
-static loff_t dax_iomap_iter(const struct iomap_iter *iomi,
- struct iov_iter *iter)
+static int dax_iomap_iter(struct iomap_iter *iomi, struct iov_iter *iter)
{
const struct iomap *iomap = &iomi->iomap;
const struct iomap *srcmap = iomap_iter_srcmap(iomi);
@@ -1451,8 +1451,10 @@ static loff_t dax_iomap_iter(const struct iomap_iter *iomi,
if (pos >= end)
return 0;
- if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
- return iov_iter_zero(min(length, end - pos), iter);
+ if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN) {
+ done = iov_iter_zero(min(length, end - pos), iter);
+ return iomap_iter_advance(iomi, &done);
+ }
}
/*
@@ -1485,7 +1487,7 @@ static loff_t dax_iomap_iter(const struct iomap_iter *iomi,
}
id = dax_read_lock();
- while (pos < end) {
+ while ((pos = iomi->pos) < end) {
unsigned offset = pos & (PAGE_SIZE - 1);
const size_t size = ALIGN(length + offset, PAGE_SIZE);
pgoff_t pgoff = dax_iomap_pgoff(iomap, pos);
@@ -1535,18 +1537,16 @@ static loff_t dax_iomap_iter(const struct iomap_iter *iomi,
xfer = dax_copy_to_iter(dax_dev, pgoff, kaddr,
map_len, iter);
- pos += xfer;
- length -= xfer;
- done += xfer;
-
- if (xfer == 0)
+ length = xfer;
+ ret = iomap_iter_advance(iomi, &length);
+ if (!ret && xfer == 0)
ret = -EFAULT;
if (xfer < map_len)
break;
}
dax_read_unlock(id);
- return done ? done : ret;
+ return ret;
}
/**
@@ -1586,7 +1586,7 @@ dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
iomi.flags |= IOMAP_NOWAIT;
while ((ret = iomap_iter(&iomi, ops)) > 0)
- iomi.processed = dax_iomap_iter(&iomi, iter);
+ iomi.status = dax_iomap_iter(&iomi, iter);
done = iomi.pos - iocb->ki_pos;
iocb->ki_pos = iomi.pos;
@@ -1757,7 +1757,7 @@ static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
while ((error = iomap_iter(&iter, ops)) > 0) {
if (WARN_ON_ONCE(iomap_length(&iter) < PAGE_SIZE)) {
- iter.processed = -EIO; /* fs corruption? */
+ iter.status = -EIO; /* fs corruption? */
continue;
}
@@ -1769,8 +1769,10 @@ static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
ret |= VM_FAULT_MAJOR;
}
- if (!(ret & VM_FAULT_ERROR))
- iter.processed = PAGE_SIZE;
+ if (!(ret & VM_FAULT_ERROR)) {
+ u64 length = PAGE_SIZE;
+ iter.status = iomap_iter_advance(&iter, &length);
+ }
}
if (iomap_errp)
@@ -1883,8 +1885,10 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
continue; /* actually breaks out of the loop */
ret = dax_fault_iter(vmf, &iter, pfnp, &xas, &entry, true);
- if (ret != VM_FAULT_FALLBACK)
- iter.processed = PMD_SIZE;
+ if (ret != VM_FAULT_FALLBACK) {
+ u64 length = PMD_SIZE;
+ iter.status = iomap_iter_advance(&iter, &length);
+ }
}
unlock_entry:
@@ -1999,12 +2003,13 @@ vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf, unsigned int order,
}
EXPORT_SYMBOL_GPL(dax_finish_sync_fault);
-static loff_t dax_range_compare_iter(struct iomap_iter *it_src,
+static int dax_range_compare_iter(struct iomap_iter *it_src,
struct iomap_iter *it_dest, u64 len, bool *same)
{
const struct iomap *smap = &it_src->iomap;
const struct iomap *dmap = &it_dest->iomap;
loff_t pos1 = it_src->pos, pos2 = it_dest->pos;
+ u64 dest_len;
void *saddr, *daddr;
int id, ret;
@@ -2012,7 +2017,7 @@ static loff_t dax_range_compare_iter(struct iomap_iter *it_src,
if (smap->type == IOMAP_HOLE && dmap->type == IOMAP_HOLE) {
*same = true;
- return len;
+ goto advance;
}
if (smap->type == IOMAP_HOLE || dmap->type == IOMAP_HOLE) {
@@ -2035,7 +2040,13 @@ static loff_t dax_range_compare_iter(struct iomap_iter *it_src,
if (!*same)
len = 0;
dax_read_unlock(id);
- return len;
+
+advance:
+ dest_len = len;
+ ret = iomap_iter_advance(it_src, &len);
+ if (!ret)
+ ret = iomap_iter_advance(it_dest, &dest_len);
+ return ret;
out_unlock:
dax_read_unlock(id);
@@ -2058,15 +2069,15 @@ int dax_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
.len = len,
.flags = IOMAP_DAX,
};
- int ret, compared = 0;
+ int ret, status;
while ((ret = iomap_iter(&src_iter, ops)) > 0 &&
(ret = iomap_iter(&dst_iter, ops)) > 0) {
- compared = dax_range_compare_iter(&src_iter, &dst_iter,
+ status = dax_range_compare_iter(&src_iter, &dst_iter,
min(src_iter.len, dst_iter.len), same);
- if (compared < 0)
+ if (status < 0)
return ret;
- src_iter.processed = dst_iter.processed = compared;
+ src_iter.status = dst_iter.status = status;
}
return ret;
}
diff --git a/fs/dcache.c b/fs/dcache.c
index e3634916ffb9..623947d6a676 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -2480,7 +2480,8 @@ static inline void end_dir_add(struct inode *dir, unsigned int n,
{
smp_store_release(&dir->i_dir_seq, n + 2);
preempt_enable_nested();
- wake_up_all(d_wait);
+ if (wq_has_sleeper(d_wait))
+ wake_up_all(d_wait);
}
static void d_wait_lookup(struct dentry *dentry)
@@ -2687,52 +2688,6 @@ void d_add(struct dentry *entry, struct inode *inode)
}
EXPORT_SYMBOL(d_add);
-/**
- * d_exact_alias - find and hash an exact unhashed alias
- * @entry: dentry to add
- * @inode: The inode to go with this dentry
- *
- * If an unhashed dentry with the same name/parent and desired
- * inode already exists, hash and return it. Otherwise, return
- * NULL.
- *
- * Parent directory should be locked.
- */
-struct dentry *d_exact_alias(struct dentry *entry, struct inode *inode)
-{
- struct dentry *alias;
- unsigned int hash = entry->d_name.hash;
-
- spin_lock(&inode->i_lock);
- hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
- /*
- * Don't need alias->d_lock here, because aliases with
- * d_parent == entry->d_parent are not subject to name or
- * parent changes, because the parent inode i_mutex is held.
- */
- if (alias->d_name.hash != hash)
- continue;
- if (alias->d_parent != entry->d_parent)
- continue;
- if (!d_same_name(alias, entry->d_parent, &entry->d_name))
- continue;
- spin_lock(&alias->d_lock);
- if (!d_unhashed(alias)) {
- spin_unlock(&alias->d_lock);
- alias = NULL;
- } else {
- dget_dlock(alias);
- __d_rehash(alias);
- spin_unlock(&alias->d_lock);
- }
- spin_unlock(&inode->i_lock);
- return alias;
- }
- spin_unlock(&inode->i_lock);
- return NULL;
-}
-EXPORT_SYMBOL(d_exact_alias);
-
static void swap_names(struct dentry *dentry, struct dentry *target)
{
if (unlikely(dname_external(target))) {
diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
index 1096ff8562fa..42e4d6eeb29f 100644
--- a/fs/devpts/inode.c
+++ b/fs/devpts/inode.c
@@ -12,6 +12,8 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/fs.h>
+#include <linux/fs_context.h>
+#include <linux/fs_parser.h>
#include <linux/sched.h>
#include <linux/namei.h>
#include <linux/slab.h>
@@ -21,7 +23,6 @@
#include <linux/magic.h>
#include <linux/idr.h>
#include <linux/devpts_fs.h>
-#include <linux/parser.h>
#include <linux/fsnotify.h>
#include <linux/seq_file.h>
@@ -87,14 +88,14 @@ enum {
Opt_err
};
-static const match_table_t tokens = {
- {Opt_uid, "uid=%u"},
- {Opt_gid, "gid=%u"},
- {Opt_mode, "mode=%o"},
- {Opt_ptmxmode, "ptmxmode=%o"},
- {Opt_newinstance, "newinstance"},
- {Opt_max, "max=%d"},
- {Opt_err, NULL}
+static const struct fs_parameter_spec devpts_param_specs[] = {
+ fsparam_u32 ("gid", Opt_gid),
+ fsparam_s32 ("max", Opt_max),
+ fsparam_u32oct ("mode", Opt_mode),
+ fsparam_flag ("newinstance", Opt_newinstance),
+ fsparam_u32oct ("ptmxmode", Opt_ptmxmode),
+ fsparam_u32 ("uid", Opt_uid),
+ {}
};
struct pts_fs_info {
@@ -214,93 +215,48 @@ void devpts_release(struct pts_fs_info *fsi)
deactivate_super(fsi->sb);
}
-#define PARSE_MOUNT 0
-#define PARSE_REMOUNT 1
-
/*
- * parse_mount_options():
- * Set @opts to mount options specified in @data. If an option is not
- * specified in @data, set it to its default value.
- *
- * Note: @data may be NULL (in which case all options are set to default).
+ * devpts_parse_param - Parse mount parameters
*/
-static int parse_mount_options(char *data, int op, struct pts_mount_opts *opts)
+static int devpts_parse_param(struct fs_context *fc, struct fs_parameter *param)
{
- char *p;
- kuid_t uid;
- kgid_t gid;
-
- opts->setuid = 0;
- opts->setgid = 0;
- opts->uid = GLOBAL_ROOT_UID;
- opts->gid = GLOBAL_ROOT_GID;
- opts->mode = DEVPTS_DEFAULT_MODE;
- opts->ptmxmode = DEVPTS_DEFAULT_PTMX_MODE;
- opts->max = NR_UNIX98_PTY_MAX;
-
- /* Only allow instances mounted from the initial mount
- * namespace to tap the reserve pool of ptys.
- */
- if (op == PARSE_MOUNT)
- opts->reserve =
- (current->nsproxy->mnt_ns == init_task.nsproxy->mnt_ns);
-
- while ((p = strsep(&data, ",")) != NULL) {
- substring_t args[MAX_OPT_ARGS];
- int token;
- int option;
-
- if (!*p)
- continue;
-
- token = match_token(p, tokens, args);
- switch (token) {
- case Opt_uid:
- if (match_int(&args[0], &option))
- return -EINVAL;
- uid = make_kuid(current_user_ns(), option);
- if (!uid_valid(uid))
- return -EINVAL;
- opts->uid = uid;
- opts->setuid = 1;
- break;
- case Opt_gid:
- if (match_int(&args[0], &option))
- return -EINVAL;
- gid = make_kgid(current_user_ns(), option);
- if (!gid_valid(gid))
- return -EINVAL;
- opts->gid = gid;
- opts->setgid = 1;
- break;
- case Opt_mode:
- if (match_octal(&args[0], &option))
- return -EINVAL;
- opts->mode = option & S_IALLUGO;
- break;
- case Opt_ptmxmode:
- if (match_octal(&args[0], &option))
- return -EINVAL;
- opts->ptmxmode = option & S_IALLUGO;
- break;
- case Opt_newinstance:
- break;
- case Opt_max:
- if (match_int(&args[0], &option) ||
- option < 0 || option > NR_UNIX98_PTY_MAX)
- return -EINVAL;
- opts->max = option;
- break;
- default:
- pr_err("called with bogus options\n");
- return -EINVAL;
- }
+ struct pts_fs_info *fsi = fc->s_fs_info;
+ struct pts_mount_opts *opts = &fsi->mount_opts;
+ struct fs_parse_result result;
+ int opt;
+
+ opt = fs_parse(fc, devpts_param_specs, param, &result);
+ if (opt < 0)
+ return opt;
+
+ switch (opt) {
+ case Opt_uid:
+ opts->uid = result.uid;
+ opts->setuid = 1;
+ break;
+ case Opt_gid:
+ opts->gid = result.gid;
+ opts->setgid = 1;
+ break;
+ case Opt_mode:
+ opts->mode = result.uint_32 & S_IALLUGO;
+ break;
+ case Opt_ptmxmode:
+ opts->ptmxmode = result.uint_32 & S_IALLUGO;
+ break;
+ case Opt_newinstance:
+ break;
+ case Opt_max:
+ if (result.uint_32 > NR_UNIX98_PTY_MAX)
+ return invalf(fc, "max out of range");
+ opts->max = result.uint_32;
+ break;
}
return 0;
}
-static int mknod_ptmx(struct super_block *sb)
+static int mknod_ptmx(struct super_block *sb, struct fs_context *fc)
{
int mode;
int rc = -ENOMEM;
@@ -362,13 +318,23 @@ static void update_ptmx_mode(struct pts_fs_info *fsi)
}
}
-static int devpts_remount(struct super_block *sb, int *flags, char *data)
+static int devpts_reconfigure(struct fs_context *fc)
{
- int err;
- struct pts_fs_info *fsi = DEVPTS_SB(sb);
- struct pts_mount_opts *opts = &fsi->mount_opts;
+ struct pts_fs_info *fsi = DEVPTS_SB(fc->root->d_sb);
+ struct pts_fs_info *new = fc->s_fs_info;
- err = parse_mount_options(data, PARSE_REMOUNT, opts);
+ /* Apply the revised options. We don't want to change ->reserve.
+ * Ideally, we'd update each option conditionally on it having been
+ * explicitly changed, but the default is to reset everything so that
+ * would break UAPI...
+ */
+ fsi->mount_opts.setuid = new->mount_opts.setuid;
+ fsi->mount_opts.setgid = new->mount_opts.setgid;
+ fsi->mount_opts.uid = new->mount_opts.uid;
+ fsi->mount_opts.gid = new->mount_opts.gid;
+ fsi->mount_opts.mode = new->mount_opts.mode;
+ fsi->mount_opts.ptmxmode = new->mount_opts.ptmxmode;
+ fsi->mount_opts.max = new->mount_opts.max;
/*
* parse_mount_options() restores options to default values
@@ -378,7 +344,7 @@ static int devpts_remount(struct super_block *sb, int *flags, char *data)
*/
update_ptmx_mode(fsi);
- return err;
+ return 0;
}
static int devpts_show_options(struct seq_file *seq, struct dentry *root)
@@ -402,31 +368,13 @@ static int devpts_show_options(struct seq_file *seq, struct dentry *root)
static const struct super_operations devpts_sops = {
.statfs = simple_statfs,
- .remount_fs = devpts_remount,
.show_options = devpts_show_options,
};
-static void *new_pts_fs_info(struct super_block *sb)
-{
- struct pts_fs_info *fsi;
-
- fsi = kzalloc(sizeof(struct pts_fs_info), GFP_KERNEL);
- if (!fsi)
- return NULL;
-
- ida_init(&fsi->allocated_ptys);
- fsi->mount_opts.mode = DEVPTS_DEFAULT_MODE;
- fsi->mount_opts.ptmxmode = DEVPTS_DEFAULT_PTMX_MODE;
- fsi->sb = sb;
-
- return fsi;
-}
-
-static int
-devpts_fill_super(struct super_block *s, void *data, int silent)
+static int devpts_fill_super(struct super_block *s, struct fs_context *fc)
{
+ struct pts_fs_info *fsi = DEVPTS_SB(s);
struct inode *inode;
- int error;
s->s_iflags &= ~SB_I_NODEV;
s->s_blocksize = 1024;
@@ -435,20 +383,11 @@ devpts_fill_super(struct super_block *s, void *data, int silent)
s->s_op = &devpts_sops;
s->s_d_op = &simple_dentry_operations;
s->s_time_gran = 1;
+ fsi->sb = s;
- error = -ENOMEM;
- s->s_fs_info = new_pts_fs_info(s);
- if (!s->s_fs_info)
- goto fail;
-
- error = parse_mount_options(data, PARSE_MOUNT, &DEVPTS_SB(s)->mount_opts);
- if (error)
- goto fail;
-
- error = -ENOMEM;
inode = new_inode(s);
if (!inode)
- goto fail;
+ return -ENOMEM;
inode->i_ino = 1;
simple_inode_init_ts(inode);
inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO | S_IWUSR;
@@ -459,31 +398,60 @@ devpts_fill_super(struct super_block *s, void *data, int silent)
s->s_root = d_make_root(inode);
if (!s->s_root) {
pr_err("get root dentry failed\n");
- goto fail;
+ return -ENOMEM;
}
- error = mknod_ptmx(s);
- if (error)
- goto fail_dput;
-
- return 0;
-fail_dput:
- dput(s->s_root);
- s->s_root = NULL;
-fail:
- return error;
+ return mknod_ptmx(s, fc);
}
/*
- * devpts_mount()
+ * devpts_get_tree()
*
* Mount a new (private) instance of devpts. PTYs created in this
* instance are independent of the PTYs in other devpts instances.
*/
-static struct dentry *devpts_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int devpts_get_tree(struct fs_context *fc)
+{
+ return get_tree_nodev(fc, devpts_fill_super);
+}
+
+static void devpts_free_fc(struct fs_context *fc)
+{
+ kfree(fc->s_fs_info);
+}
+
+static const struct fs_context_operations devpts_context_ops = {
+ .free = devpts_free_fc,
+ .parse_param = devpts_parse_param,
+ .get_tree = devpts_get_tree,
+ .reconfigure = devpts_reconfigure,
+};
+
+/*
+ * Set up the filesystem mount context.
+ */
+static int devpts_init_fs_context(struct fs_context *fc)
{
- return mount_nodev(fs_type, flags, data, devpts_fill_super);
+ struct pts_fs_info *fsi;
+
+ fsi = kzalloc(sizeof(struct pts_fs_info), GFP_KERNEL);
+ if (!fsi)
+ return -ENOMEM;
+
+ ida_init(&fsi->allocated_ptys);
+ fsi->mount_opts.uid = GLOBAL_ROOT_UID;
+ fsi->mount_opts.gid = GLOBAL_ROOT_GID;
+ fsi->mount_opts.mode = DEVPTS_DEFAULT_MODE;
+ fsi->mount_opts.ptmxmode = DEVPTS_DEFAULT_PTMX_MODE;
+ fsi->mount_opts.max = NR_UNIX98_PTY_MAX;
+
+ if (fc->purpose == FS_CONTEXT_FOR_MOUNT &&
+ current->nsproxy->mnt_ns == init_task.nsproxy->mnt_ns)
+ fsi->mount_opts.reserve = true;
+
+ fc->s_fs_info = fsi;
+ fc->ops = &devpts_context_ops;
+ return 0;
}
static void devpts_kill_sb(struct super_block *sb)
@@ -498,7 +466,8 @@ static void devpts_kill_sb(struct super_block *sb)
static struct file_system_type devpts_fs_type = {
.name = "devpts",
- .mount = devpts_mount,
+ .init_fs_context = devpts_init_fs_context,
+ .parameters = devpts_param_specs,
.kill_sb = devpts_kill_sb,
.fs_flags = FS_USERNS_MOUNT,
};
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index a9819ddb1ab8..51a5c54eb740 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -503,18 +503,24 @@ out_lock:
return rc;
}
-static int ecryptfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
- struct dentry *dentry, umode_t mode)
+static struct dentry *ecryptfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
int rc;
struct dentry *lower_dentry;
struct inode *lower_dir;
rc = lock_parent(dentry, &lower_dentry, &lower_dir);
- if (!rc)
- rc = vfs_mkdir(&nop_mnt_idmap, lower_dir,
- lower_dentry, mode);
- if (rc || d_really_is_negative(lower_dentry))
+ if (rc)
+ goto out;
+
+ lower_dentry = vfs_mkdir(&nop_mnt_idmap, lower_dir,
+ lower_dentry, mode);
+ rc = PTR_ERR(lower_dentry);
+ if (IS_ERR(lower_dentry))
+ goto out;
+ rc = 0;
+ if (d_unhashed(lower_dentry))
goto out;
rc = ecryptfs_interpose(lower_dentry, dentry, dir->i_sb);
if (rc)
@@ -526,7 +532,7 @@ out:
inode_unlock(lower_dir);
if (d_really_is_negative(dentry))
d_drop(dentry);
- return rc;
+ return ERR_PTR(rc);
}
static int ecryptfs_rmdir(struct inode *dir, struct dentry *dentry)
diff --git a/fs/ecryptfs/super.c b/fs/ecryptfs/super.c
index 0b1c878317ab..e7b7f426fecf 100644
--- a/fs/ecryptfs/super.c
+++ b/fs/ecryptfs/super.c
@@ -172,7 +172,6 @@ const struct super_operations ecryptfs_sops = {
.destroy_inode = ecryptfs_destroy_inode,
.free_inode = ecryptfs_free_inode,
.statfs = ecryptfs_statfs,
- .remount_fs = NULL,
.evict_inode = ecryptfs_evict_inode,
.show_options = ecryptfs_show_options
};
diff --git a/fs/eventfd.c b/fs/eventfd.c
index 76129bfcd663..af42b2c7d235 100644
--- a/fs/eventfd.c
+++ b/fs/eventfd.c
@@ -406,14 +406,13 @@ static int do_eventfd(unsigned int count, int flags)
if (fd < 0)
goto err;
- file = anon_inode_getfile("[eventfd]", &eventfd_fops, ctx, flags);
+ file = anon_inode_getfile_fmode("[eventfd]", &eventfd_fops,
+ ctx, flags, FMODE_NOWAIT);
if (IS_ERR(file)) {
put_unused_fd(fd);
fd = PTR_ERR(file);
goto err;
}
-
- file->f_mode |= FMODE_NOWAIT;
fd_install(fd, file);
return fd;
err:
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 7c0980db77b3..9b06a0ab9c32 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -438,7 +438,7 @@ static bool ep_busy_loop_end(void *p, unsigned long start_time)
*
* we must do our busy polling with irqs enabled
*/
-static bool ep_busy_loop(struct eventpoll *ep, int nonblock)
+static bool ep_busy_loop(struct eventpoll *ep)
{
unsigned int napi_id = READ_ONCE(ep->napi_id);
u16 budget = READ_ONCE(ep->busy_poll_budget);
@@ -448,7 +448,7 @@ static bool ep_busy_loop(struct eventpoll *ep, int nonblock)
budget = BUSY_POLL_BUDGET;
if (napi_id >= MIN_NAPI_ID && ep_busy_loop_on(ep)) {
- napi_busy_loop(napi_id, nonblock ? NULL : ep_busy_loop_end,
+ napi_busy_loop(napi_id, ep_busy_loop_end,
ep, prefer_busy_poll, budget);
if (ep_events_available(ep))
return true;
@@ -560,7 +560,7 @@ static void ep_resume_napi_irqs(struct eventpoll *ep)
#else
-static inline bool ep_busy_loop(struct eventpoll *ep, int nonblock)
+static inline bool ep_busy_loop(struct eventpoll *ep)
{
return false;
}
@@ -1980,6 +1980,22 @@ static int ep_autoremove_wake_function(struct wait_queue_entry *wq_entry,
return ret;
}
+static int ep_try_send_events(struct eventpoll *ep,
+ struct epoll_event __user *events, int maxevents)
+{
+ int res;
+
+ /*
+ * Try to transfer events to user space. In case we get 0 events and
+ * there's still timeout left over, we go trying again in search of
+ * more luck.
+ */
+ res = ep_send_events(ep, events, maxevents);
+ if (res > 0)
+ ep_suspend_napi_irqs(ep);
+ return res;
+}
+
/**
* ep_poll - Retrieves ready events, and delivers them to the caller-supplied
* event buffer.
@@ -2031,23 +2047,15 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
while (1) {
if (eavail) {
- /*
- * Try to transfer events to user space. In case we get
- * 0 events and there's still timeout left over, we go
- * trying again in search of more luck.
- */
- res = ep_send_events(ep, events, maxevents);
- if (res) {
- if (res > 0)
- ep_suspend_napi_irqs(ep);
+ res = ep_try_send_events(ep, events, maxevents);
+ if (res)
return res;
- }
}
if (timed_out)
return 0;
- eavail = ep_busy_loop(ep, timed_out);
+ eavail = ep_busy_loop(ep);
if (eavail)
continue;
@@ -2445,6 +2453,47 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
return do_epoll_ctl(epfd, op, fd, &epds, false);
}
+static int ep_check_params(struct file *file, struct epoll_event __user *evs,
+ int maxevents)
+{
+ /* The maximum number of event must be greater than zero */
+ if (maxevents <= 0 || maxevents > EP_MAX_EVENTS)
+ return -EINVAL;
+
+ /* Verify that the area passed by the user is writeable */
+ if (!access_ok(evs, maxevents * sizeof(struct epoll_event)))
+ return -EFAULT;
+
+ /*
+ * We have to check that the file structure underneath the fd
+ * the user passed to us _is_ an eventpoll file.
+ */
+ if (!is_file_epoll(file))
+ return -EINVAL;
+
+ return 0;
+}
+
+int epoll_sendevents(struct file *file, struct epoll_event __user *events,
+ int maxevents)
+{
+ struct eventpoll *ep;
+ int ret;
+
+ ret = ep_check_params(file, events, maxevents);
+ if (unlikely(ret))
+ return ret;
+
+ ep = file->private_data;
+ /*
+ * Racy call, but that's ok - it should get retried based on
+ * poll readiness anyway.
+ */
+ if (ep_events_available(ep))
+ return ep_try_send_events(ep, events, maxevents);
+ return 0;
+}
+
/*
* Implement the event wait interface for the eventpoll file. It is the kernel
* part of the user space epoll_wait(2).
@@ -2453,26 +2502,16 @@ static int do_epoll_wait(int epfd, struct epoll_event __user *events,
int maxevents, struct timespec64 *to)
{
struct eventpoll *ep;
-
- /* The maximum number of event must be greater than zero */
- if (maxevents <= 0 || maxevents > EP_MAX_EVENTS)
- return -EINVAL;
-
- /* Verify that the area passed by the user is writeable */
- if (!access_ok(events, maxevents * sizeof(struct epoll_event)))
- return -EFAULT;
+ int ret;
/* Get the "struct file *" for the eventpoll file */
CLASS(fd, f)(epfd);
if (fd_empty(f))
return -EBADF;
- /*
- * We have to check that the file structure underneath the fd
- * the user passed to us _is_ an eventpoll file.
- */
- if (!is_file_epoll(fd_file(f)))
- return -EINVAL;
+ ret = ep_check_params(fd_file(f), events, maxevents);
+ if (unlikely(ret))
+ return ret;
/*
* At this point it is safe to assume that the "private_data" contains
diff --git a/fs/exec.c b/fs/exec.c
index 506cd411f4ac..f45859ad13ac 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -755,8 +755,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
mm->arg_start = bprm->p;
#endif
- if (bprm->loader)
- bprm->loader -= stack_shift;
bprm->exec -= stack_shift;
if (mmap_write_lock_killable(mm))
diff --git a/fs/exfat/namei.c b/fs/exfat/namei.c
index 8b30027d8251..fede0283d6e2 100644
--- a/fs/exfat/namei.c
+++ b/fs/exfat/namei.c
@@ -840,8 +840,8 @@ unlock:
return err;
}
-static int exfat_mkdir(struct mnt_idmap *idmap, struct inode *dir,
- struct dentry *dentry, umode_t mode)
+static struct dentry *exfat_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
struct super_block *sb = dir->i_sb;
struct inode *inode;
@@ -851,7 +851,7 @@ static int exfat_mkdir(struct mnt_idmap *idmap, struct inode *dir,
loff_t size = i_size_read(dir);
if (unlikely(exfat_forced_shutdown(sb)))
- return -EIO;
+ return ERR_PTR(-EIO);
mutex_lock(&EXFAT_SB(sb)->s_lock);
exfat_set_volume_dirty(sb);
@@ -882,7 +882,7 @@ static int exfat_mkdir(struct mnt_idmap *idmap, struct inode *dir,
unlock:
mutex_unlock(&EXFAT_SB(sb)->s_lock);
- return err;
+ return ERR_PTR(err);
}
static int exfat_check_dir_empty(struct super_block *sb,
diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c
index 0c899cfba578..b5845c4846b8 100644
--- a/fs/exportfs/expfs.c
+++ b/fs/exportfs/expfs.c
@@ -126,10 +126,8 @@ static struct dentry *reconnect_one(struct vfsmount *mnt,
int err;
parent = ERR_PTR(-EACCES);
- inode_lock(dentry->d_inode);
if (mnt->mnt_sb->s_export_op->get_parent)
parent = mnt->mnt_sb->s_export_op->get_parent(dentry);
- inode_unlock(dentry->d_inode);
if (IS_ERR(parent)) {
dprintk("get_parent of %lu failed, err %ld\n",
diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c
index 8346ab9534c1..bde617a66cec 100644
--- a/fs/ext2/namei.c
+++ b/fs/ext2/namei.c
@@ -225,15 +225,16 @@ static int ext2_link (struct dentry * old_dentry, struct inode * dir,
return err;
}
-static int ext2_mkdir(struct mnt_idmap * idmap,
- struct inode * dir, struct dentry * dentry, umode_t mode)
+static struct dentry *ext2_mkdir(struct mnt_idmap * idmap,
+ struct inode * dir, struct dentry * dentry,
+ umode_t mode)
{
struct inode * inode;
int err;
err = dquot_initialize(dir);
if (err)
- return err;
+ return ERR_PTR(err);
inode_inc_link_count(dir);
@@ -258,7 +259,7 @@ static int ext2_mkdir(struct mnt_idmap * idmap,
d_instantiate_new(dentry, inode);
out:
- return err;
+ return ERR_PTR(err);
out_fail:
inode_dec_link_count(inode);
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 7c54ae5fcbd4..d04d8a7f12e7 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -3290,6 +3290,10 @@ static void ext4_set_iomap(struct inode *inode, struct iomap *iomap,
if (map->m_flags & EXT4_MAP_NEW)
iomap->flags |= IOMAP_F_NEW;
+ /* HW-offload atomics are always used */
+ if (flags & IOMAP_ATOMIC)
+ iomap->flags |= IOMAP_F_ATOMIC_BIO;
+
if (flags & IOMAP_DAX)
iomap->dax_dev = EXT4_SB(inode->i_sb)->s_daxdev;
else
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 536d56d15072..716cc6096870 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -3004,19 +3004,19 @@ out:
return err;
}
-static int ext4_mkdir(struct mnt_idmap *idmap, struct inode *dir,
- struct dentry *dentry, umode_t mode)
+static struct dentry *ext4_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
handle_t *handle;
struct inode *inode;
int err, err2 = 0, credits, retries = 0;
if (EXT4_DIR_LINK_MAX(dir))
- return -EMLINK;
+ return ERR_PTR(-EMLINK);
err = dquot_initialize(dir);
if (err)
- return err;
+ return ERR_PTR(err);
credits = (EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3);
@@ -3066,7 +3066,7 @@ out_stop:
out_retry:
if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries))
goto retry;
- return err;
+ return ERR_PTR(err);
}
/*
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index 69b8a7221a2b..37abee5016c3 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -522,7 +522,7 @@ int ext4_bio_write_folio(struct ext4_io_submit *io, struct folio *folio,
if (io->io_bio)
gfp_flags = GFP_NOWAIT | __GFP_NOWARN;
retry_encrypt:
- bounce_page = fscrypt_encrypt_pagecache_blocks(&folio->page,
+ bounce_page = fscrypt_encrypt_pagecache_blocks(folio,
enc_bytes, 0, gfp_flags);
if (IS_ERR(bounce_page)) {
ret = PTR_ERR(bounce_page);
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index de4da6d9cd93..ece5208223c1 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -2500,7 +2500,7 @@ int f2fs_encrypt_one_page(struct f2fs_io_info *fio)
return 0;
retry_encrypt:
- fio->encrypted_page = fscrypt_encrypt_pagecache_blocks(page,
+ fio->encrypted_page = fscrypt_encrypt_pagecache_blocks(page_folio(page),
PAGE_SIZE, 0, gfp_flags);
if (IS_ERR(fio->encrypted_page)) {
/* flush pending IOs and wait for a while in the ENOMEM case */
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index a278c7da8177..24dca4dc85a9 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -684,23 +684,23 @@ out_free_encrypted_link:
return err;
}
-static int f2fs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
- struct dentry *dentry, umode_t mode)
+static struct dentry *f2fs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
struct inode *inode;
int err;
if (unlikely(f2fs_cp_error(sbi)))
- return -EIO;
+ return ERR_PTR(-EIO);
err = f2fs_dquot_initialize(dir);
if (err)
- return err;
+ return ERR_PTR(err);
inode = f2fs_new_inode(idmap, dir, S_IFDIR | mode, NULL);
if (IS_ERR(inode))
- return PTR_ERR(inode);
+ return ERR_CAST(inode);
inode->i_op = &f2fs_dir_inode_operations;
inode->i_fop = &f2fs_dir_operations;
@@ -722,12 +722,12 @@ static int f2fs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
f2fs_sync_fs(sbi->sb, 1);
f2fs_balance_fs(sbi, true);
- return 0;
+ return NULL;
out_fail:
clear_inode_flag(inode, FI_INC_LINK);
f2fs_handle_failed_inode(inode);
- return err;
+ return ERR_PTR(err);
}
static int f2fs_rmdir(struct inode *dir, struct dentry *dentry)
diff --git a/fs/fat/namei_msdos.c b/fs/fat/namei_msdos.c
index f06f6ba643cc..23e9b9371ec3 100644
--- a/fs/fat/namei_msdos.c
+++ b/fs/fat/namei_msdos.c
@@ -339,8 +339,8 @@ out:
}
/***** Make a directory */
-static int msdos_mkdir(struct mnt_idmap *idmap, struct inode *dir,
- struct dentry *dentry, umode_t mode)
+static struct dentry *msdos_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
struct super_block *sb = dir->i_sb;
struct fat_slot_info sinfo;
@@ -389,13 +389,13 @@ static int msdos_mkdir(struct mnt_idmap *idmap, struct inode *dir,
mutex_unlock(&MSDOS_SB(sb)->s_lock);
fat_flush_inodes(sb, dir, inode);
- return 0;
+ return NULL;
out_free:
fat_free_clusters(dir, cluster);
out:
mutex_unlock(&MSDOS_SB(sb)->s_lock);
- return err;
+ return ERR_PTR(err);
}
/***** Unlink a file */
diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c
index 926c26e90ef8..dd910edd2404 100644
--- a/fs/fat/namei_vfat.c
+++ b/fs/fat/namei_vfat.c
@@ -841,8 +841,8 @@ out:
return err;
}
-static int vfat_mkdir(struct mnt_idmap *idmap, struct inode *dir,
- struct dentry *dentry, umode_t mode)
+static struct dentry *vfat_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
struct super_block *sb = dir->i_sb;
struct inode *inode;
@@ -877,13 +877,13 @@ static int vfat_mkdir(struct mnt_idmap *idmap, struct inode *dir,
d_instantiate(dentry, inode);
mutex_unlock(&MSDOS_SB(sb)->s_lock);
- return 0;
+ return NULL;
out_free:
fat_free_clusters(dir, cluster);
out:
mutex_unlock(&MSDOS_SB(sb)->s_lock);
- return err;
+ return ERR_PTR(err);
}
static int vfat_get_dotdot_de(struct inode *inode, struct buffer_head **bh,
diff --git a/fs/file.c b/fs/file.c
index d868cdb95d1e..dc3f7e120e3e 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -26,6 +26,28 @@
#include "internal.h"
+bool __file_ref_put_badval(file_ref_t *ref, unsigned long cnt)
+{
+ /*
+ * If the reference count was already in the dead zone, then this
+ * put() operation is imbalanced. Warn, put the reference count back to
+ * DEAD and tell the caller to not deconstruct the object.
+ */
+ if (WARN_ONCE(cnt >= FILE_REF_RELEASED, "imbalanced put on file reference count")) {
+ atomic_long_set(&ref->refcnt, FILE_REF_DEAD);
+ return false;
+ }
+
+ /*
+ * This is a put() operation on a saturated refcount. Restore the
+ * mean saturation value and tell the caller to not deconstruct the
+ * object.
+ */
+ if (cnt > FILE_REF_MAXREF)
+ atomic_long_set(&ref->refcnt, FILE_REF_SATURATED);
+ return false;
+}
+
/**
* __file_ref_put - Slowpath of file_ref_put()
* @ref: Pointer to the reference count
@@ -67,24 +89,7 @@ bool __file_ref_put(file_ref_t *ref, unsigned long cnt)
return true;
}
- /*
- * If the reference count was already in the dead zone, then this
- * put() operation is imbalanced. Warn, put the reference count back to
- * DEAD and tell the caller to not deconstruct the object.
- */
- if (WARN_ONCE(cnt >= FILE_REF_RELEASED, "imbalanced put on file reference count")) {
- atomic_long_set(&ref->refcnt, FILE_REF_DEAD);
- return false;
- }
-
- /*
- * This is a put() operation on a saturated refcount. Restore the
- * mean saturation value and tell the caller to not deconstruct the
- * object.
- */
- if (cnt > FILE_REF_MAXREF)
- atomic_long_set(&ref->refcnt, FILE_REF_SATURATED);
- return false;
+ return __file_ref_put_badval(ref, cnt);
}
EXPORT_SYMBOL_GPL(__file_ref_put);
@@ -418,17 +423,25 @@ struct files_struct *dup_fd(struct files_struct *oldf, struct fd_range *punch_ho
old_fds = old_fdt->fd;
new_fds = new_fdt->fd;
+ /*
+ * We may be racing against fd allocation from other threads using this
+ * files_struct, despite holding ->file_lock.
+ *
+ * alloc_fd() might have already claimed a slot, while fd_install()
+ * did not populate it yet. Note the latter operates locklessly, so
+ * the file can show up as we are walking the array below.
+ *
+ * At the same time we know no files will disappear as all other
+ * operations take the lock.
+ *
+ * Instead of trying to placate userspace racing with itself, we
+ * ref the file if we see it and mark the fd slot as unused otherwise.
+ */
for (i = open_files; i != 0; i--) {
- struct file *f = *old_fds++;
+ struct file *f = rcu_dereference_raw(*old_fds++);
if (f) {
get_file(f);
} else {
- /*
- * The fd may be claimed in the fd bitmap but not yet
- * instantiated in the files array if a sibling thread
- * is partway through open(). So make sure that this
- * fd is available to the new process.
- */
__clear_open_fd(open_files - i, new_fdt);
}
rcu_assign_pointer(*new_fds++, f);
@@ -577,6 +590,7 @@ repeat:
__set_open_fd(fd, fdt, flags & O_CLOEXEC);
error = fd;
+ VFS_BUG_ON(rcu_access_pointer(fdt->fd[fd]) != NULL);
out:
spin_unlock(&files->file_lock);
@@ -612,22 +626,14 @@ void put_unused_fd(unsigned int fd)
EXPORT_SYMBOL(put_unused_fd);
-/*
- * Install a file pointer in the fd array.
- *
- * The VFS is full of places where we drop the files lock between
- * setting the open_fds bitmap and installing the file in the file
- * array. At any such point, we are vulnerable to a dup2() race
- * installing a file in the array before us. We need to detect this and
- * fput() the struct file we are about to overwrite in this case.
- *
- * It should never happen - if we allow dup2() do it, _really_ bad things
- * will follow.
+/**
+ * fd_install - install a file pointer in the fd array
+ * @fd: file descriptor to install the file in
+ * @file: the file to install
*
* This consumes the "file" refcount, so callers should treat it
* as if they had called fput(file).
*/
-
void fd_install(unsigned int fd, struct file *file)
{
struct files_struct *files = current->files;
@@ -642,7 +648,7 @@ void fd_install(unsigned int fd, struct file *file)
rcu_read_unlock_sched();
spin_lock(&files->file_lock);
fdt = files_fdtable(files);
- WARN_ON(fdt->fd[fd] != NULL);
+ VFS_BUG_ON(rcu_access_pointer(fdt->fd[fd]) != NULL);
rcu_assign_pointer(fdt->fd[fd], file);
spin_unlock(&files->file_lock);
return;
@@ -650,7 +656,7 @@ void fd_install(unsigned int fd, struct file *file)
/* coupled with smp_wmb() in expand_fdtable() */
smp_rmb();
fdt = rcu_dereference_sched(files->fdt);
- BUG_ON(fdt->fd[fd] != NULL);
+ VFS_BUG_ON(rcu_access_pointer(fdt->fd[fd]) != NULL);
rcu_assign_pointer(fdt->fd[fd], file);
rcu_read_unlock_sched();
}
@@ -679,7 +685,7 @@ struct file *file_close_fd_locked(struct files_struct *files, unsigned fd)
return NULL;
fd = array_index_nospec(fd, fdt->max_fds);
- file = fdt->fd[fd];
+ file = rcu_dereference_raw(fdt->fd[fd]);
if (file) {
rcu_assign_pointer(fdt->fd[fd], NULL);
__put_unused_fd(files, fd);
@@ -1178,8 +1184,23 @@ struct fd fdget_raw(unsigned int fd)
*/
static inline bool file_needs_f_pos_lock(struct file *file)
{
- return (file->f_mode & FMODE_ATOMIC_POS) &&
- (file_count(file) > 1 || file->f_op->iterate_shared);
+ if (!(file->f_mode & FMODE_ATOMIC_POS))
+ return false;
+ if (__file_ref_read_raw(&file->f_ref) != FILE_REF_ONEREF)
+ return true;
+ if (file->f_op->iterate_shared)
+ return true;
+ return false;
+}
+
+bool file_seek_cur_needs_f_lock(struct file *file)
+{
+ if (!(file->f_mode & FMODE_ATOMIC_POS) && !file->f_op->iterate_shared)
+ return false;
+
+ VFS_WARN_ON_ONCE((file_count(file) > 1) &&
+ !mutex_is_locked(&file->f_pos_lock));
+ return true;
}
struct fd fdget_pos(unsigned int fd)
@@ -1187,7 +1208,7 @@ struct fd fdget_pos(unsigned int fd)
struct fd f = fdget(fd);
struct file *file = fd_file(f);
- if (file && file_needs_f_pos_lock(file)) {
+ if (likely(file) && file_needs_f_pos_lock(file)) {
f.word |= FDPUT_POS_UNLOCK;
mutex_lock(&file->f_pos_lock);
}
@@ -1230,14 +1251,34 @@ __releases(&files->file_lock)
struct fdtable *fdt;
/*
- * We need to detect attempts to do dup2() over allocated but still
- * not finished descriptor.
+ * dup2() is expected to close the file installed in the target fd slot
+ * (if any). However, userspace hand-picking a fd may be racing against
+ * its own threads which happened to allocate it in open() et al but did
+ * not populate it yet.
+ *
+ * Broadly speaking we may be racing against the following:
+ * fd = get_unused_fd_flags(); // fd slot reserved, ->fd[fd] == NULL
+ * file = hard_work_goes_here();
+ * fd_install(fd, file); // only now ->fd[fd] == file
+ *
+ * It is an invariant that a successfully allocated fd has a NULL entry
+ * in the array until the matching fd_install().
+ *
+ * If we fit the window, we have the fd to populate, yet no target file
+ * to close. Trying to ignore it and install our new file would violate
+ * the invariant and make fd_install() overwrite our file.
+ *
+ * Things can be done(tm) to handle this. However, the issue does not
+ * concern legitimate programs and we only need to make sure the kernel
+ * does not trip over it.
+ *
+ * The simplest way out is to return an error if we find ourselves here.
*
* POSIX is silent on the issue, we return -EBUSY.
*/
fdt = files_fdtable(files);
fd = array_index_nospec(fd, fdt->max_fds);
- tofree = fdt->fd[fd];
+ tofree = rcu_dereference_raw(fdt->fd[fd]);
if (!tofree && fd_is_open(fd, fdt))
goto Ebusy;
get_file(file);
diff --git a/fs/file_table.c b/fs/file_table.c
index 5c00dc38558d..c04ed94cdc4b 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -221,7 +221,8 @@ struct file *alloc_empty_file(int flags, const struct cred *cred)
/*
* Privileged users can go above max_files
*/
- if (get_nr_files() >= files_stat.max_files && !capable(CAP_SYS_ADMIN)) {
+ if (unlikely(get_nr_files() >= files_stat.max_files) &&
+ !capable(CAP_SYS_ADMIN)) {
/*
* percpu_counters are inaccurate. Do an expensive check before
* we go and fail.
@@ -511,31 +512,37 @@ void flush_delayed_fput(void)
}
EXPORT_SYMBOL_GPL(flush_delayed_fput);
-void fput(struct file *file)
+static void __fput_deferred(struct file *file)
{
- if (file_ref_put(&file->f_ref)) {
- struct task_struct *task = current;
+ struct task_struct *task = current;
+
+ if (unlikely(!(file->f_mode & (FMODE_BACKING | FMODE_OPENED)))) {
+ file_free(file);
+ return;
+ }
- if (unlikely(!(file->f_mode & (FMODE_BACKING | FMODE_OPENED)))) {
- file_free(file);
+ if (likely(!in_interrupt() && !(task->flags & PF_KTHREAD))) {
+ init_task_work(&file->f_task_work, ____fput);
+ if (!task_work_add(task, &file->f_task_work, TWA_RESUME))
return;
- }
- if (likely(!in_interrupt() && !(task->flags & PF_KTHREAD))) {
- init_task_work(&file->f_task_work, ____fput);
- if (!task_work_add(task, &file->f_task_work, TWA_RESUME))
- return;
- /*
- * After this task has run exit_task_work(),
- * task_work_add() will fail. Fall through to delayed
- * fput to avoid leaking *file.
- */
- }
-
- if (llist_add(&file->f_llist, &delayed_fput_list))
- schedule_delayed_work(&delayed_fput_work, 1);
+ /*
+ * After this task has run exit_task_work(),
+ * task_work_add() will fail. Fall through to delayed
+ * fput to avoid leaking *file.
+ */
}
+
+ if (llist_add(&file->f_llist, &delayed_fput_list))
+ schedule_delayed_work(&delayed_fput_work, 1);
}
+void fput(struct file *file)
+{
+ if (unlikely(file_ref_put(&file->f_ref)))
+ __fput_deferred(file);
+}
+EXPORT_SYMBOL(fput);
+
/*
* synchronous analog of fput(); for kernel threads that might be needed
* in some umount() (and thus can't use flush_delayed_fput() without
@@ -549,10 +556,32 @@ void __fput_sync(struct file *file)
if (file_ref_put(&file->f_ref))
__fput(file);
}
-
-EXPORT_SYMBOL(fput);
EXPORT_SYMBOL(__fput_sync);
+/*
+ * Equivalent to __fput_sync(), but optimized for being called with the last
+ * reference.
+ *
+ * See file_ref_put_close() for details.
+ */
+void fput_close_sync(struct file *file)
+{
+ if (likely(file_ref_put_close(&file->f_ref)))
+ __fput(file);
+}
+
+/*
+ * Equivalent to fput(), but optimized for being called with the last
+ * reference.
+ *
+ * See file_ref_put_close() for details.
+ */
+void fput_close(struct file *file)
+{
+ if (file_ref_put_close(&file->f_ref))
+ __fput_deferred(file);
+}
+
void __init files_init(void)
{
struct kmem_cache_args args = {
diff --git a/fs/fsopen.c b/fs/fsopen.c
index 094a7f510edf..1aaf4cb2afb2 100644
--- a/fs/fsopen.c
+++ b/fs/fsopen.c
@@ -453,7 +453,7 @@ SYSCALL_DEFINE5(fsconfig,
case FSCONFIG_SET_FD:
param.type = fs_value_is_file;
ret = -EBADF;
- param.file = fget(aux);
+ param.file = fget_raw(aux);
if (!param.file)
goto out_key;
param.dirfd = aux;
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 3805f9b06c9d..fa8f1141ea74 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -781,9 +781,9 @@ no_open:
/*
* Code shared between mknod, mkdir, symlink and link
*/
-static int create_new_entry(struct mnt_idmap *idmap, struct fuse_mount *fm,
- struct fuse_args *args, struct inode *dir,
- struct dentry *entry, umode_t mode)
+static struct dentry *create_new_entry(struct mnt_idmap *idmap, struct fuse_mount *fm,
+ struct fuse_args *args, struct inode *dir,
+ struct dentry *entry, umode_t mode)
{
struct fuse_entry_out outarg;
struct inode *inode;
@@ -792,11 +792,11 @@ static int create_new_entry(struct mnt_idmap *idmap, struct fuse_mount *fm,
struct fuse_forget_link *forget;
if (fuse_is_bad(dir))
- return -EIO;
+ return ERR_PTR(-EIO);
forget = fuse_alloc_forget();
if (!forget)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
memset(&outarg, 0, sizeof(outarg));
args->nodeid = get_node_id(dir);
@@ -826,29 +826,43 @@ static int create_new_entry(struct mnt_idmap *idmap, struct fuse_mount *fm,
&outarg.attr, ATTR_TIMEOUT(&outarg), 0, 0);
if (!inode) {
fuse_queue_forget(fm->fc, forget, outarg.nodeid, 1);
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
}
kfree(forget);
d_drop(entry);
d = d_splice_alias(inode, entry);
if (IS_ERR(d))
- return PTR_ERR(d);
+ return d;
- if (d) {
+ if (d)
fuse_change_entry_timeout(d, &outarg);
- dput(d);
- } else {
+ else
fuse_change_entry_timeout(entry, &outarg);
- }
fuse_dir_changed(dir);
- return 0;
+ return d;
out_put_forget_req:
if (err == -EEXIST)
fuse_invalidate_entry(entry);
kfree(forget);
- return err;
+ return ERR_PTR(err);
+}
+
+static int create_new_nondir(struct mnt_idmap *idmap, struct fuse_mount *fm,
+ struct fuse_args *args, struct inode *dir,
+ struct dentry *entry, umode_t mode)
+{
+ /*
+ * Note that when creating anything other than a directory we
+ * can be sure create_new_entry() will NOT return an alternate
+ * dentry as d_splice_alias() only returns an alternate dentry
+ * for directories. So we don't need to check for that case
+ * when passing back the result.
+ */
+ WARN_ON_ONCE(S_ISDIR(mode));
+
+ return PTR_ERR(create_new_entry(idmap, fm, args, dir, entry, mode));
}
static int fuse_mknod(struct mnt_idmap *idmap, struct inode *dir,
@@ -871,7 +885,7 @@ static int fuse_mknod(struct mnt_idmap *idmap, struct inode *dir,
args.in_args[0].value = &inarg;
args.in_args[1].size = entry->d_name.len + 1;
args.in_args[1].value = entry->d_name.name;
- return create_new_entry(idmap, fm, &args, dir, entry, mode);
+ return create_new_nondir(idmap, fm, &args, dir, entry, mode);
}
static int fuse_create(struct mnt_idmap *idmap, struct inode *dir,
@@ -898,8 +912,8 @@ static int fuse_tmpfile(struct mnt_idmap *idmap, struct inode *dir,
return err;
}
-static int fuse_mkdir(struct mnt_idmap *idmap, struct inode *dir,
- struct dentry *entry, umode_t mode)
+static struct dentry *fuse_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *entry, umode_t mode)
{
struct fuse_mkdir_in inarg;
struct fuse_mount *fm = get_fuse_mount(dir);
@@ -934,7 +948,7 @@ static int fuse_symlink(struct mnt_idmap *idmap, struct inode *dir,
args.in_args[1].value = entry->d_name.name;
args.in_args[2].size = len;
args.in_args[2].value = link;
- return create_new_entry(idmap, fm, &args, dir, entry, S_IFLNK);
+ return create_new_nondir(idmap, fm, &args, dir, entry, S_IFLNK);
}
void fuse_flush_time_update(struct inode *inode)
@@ -1131,7 +1145,7 @@ static int fuse_link(struct dentry *entry, struct inode *newdir,
args.in_args[0].value = &inarg;
args.in_args[1].size = newent->d_name.len + 1;
args.in_args[1].value = newent->d_name.name;
- err = create_new_entry(&invalid_mnt_idmap, fm, &args, newdir, newent, inode->i_mode);
+ err = create_new_nondir(&invalid_mnt_idmap, fm, &args, newdir, newent, inode->i_mode);
if (!err)
fuse_update_ctime_in_cache(inode);
else if (err == -EINTR)
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index 1795c4e8dbf6..366516b98b3f 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -1300,7 +1300,8 @@ static int gfs2_block_zero_range(struct inode *inode, loff_t from,
unsigned int length)
{
BUG_ON(current->journal_info);
- return iomap_zero_range(inode, from, length, NULL, &gfs2_iomap_ops);
+ return iomap_zero_range(inode, from, length, NULL, &gfs2_iomap_ops,
+ NULL);
}
#define GFS2_JTRUNC_REVOKES 8192
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index 6fbbaaad1cd0..198a8cbaf5e5 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -1248,14 +1248,15 @@ static int gfs2_symlink(struct mnt_idmap *idmap, struct inode *dir,
* @dentry: The dentry of the new directory
* @mode: The mode of the new directory
*
- * Returns: errno
+ * Returns: the dentry, or ERR_PTR(errno)
*/
-static int gfs2_mkdir(struct mnt_idmap *idmap, struct inode *dir,
- struct dentry *dentry, umode_t mode)
+static struct dentry *gfs2_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
unsigned dsize = gfs2_max_stuffed_size(GFS2_I(dir));
- return gfs2_create_inode(dir, dentry, NULL, S_IFDIR | mode, 0, NULL, dsize, 0);
+
+ return ERR_PTR(gfs2_create_inode(dir, dentry, NULL, S_IFDIR | mode, 0, NULL, dsize, 0));
}
/**
diff --git a/fs/hfs/dir.c b/fs/hfs/dir.c
index b75c26045df4..86a6b317b474 100644
--- a/fs/hfs/dir.c
+++ b/fs/hfs/dir.c
@@ -219,26 +219,26 @@ static int hfs_create(struct mnt_idmap *idmap, struct inode *dir,
* in a directory, given the inode for the parent directory and the
* name (and its length) of the new directory.
*/
-static int hfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
- struct dentry *dentry, umode_t mode)
+static struct dentry *hfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
struct inode *inode;
int res;
inode = hfs_new_inode(dir, &dentry->d_name, S_IFDIR | mode);
if (!inode)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
res = hfs_cat_create(inode->i_ino, dir, &dentry->d_name, inode);
if (res) {
clear_nlink(inode);
hfs_delete_inode(inode);
iput(inode);
- return res;
+ return ERR_PTR(res);
}
d_instantiate(dentry, inode);
mark_inode_dirty(inode);
- return 0;
+ return NULL;
}
/*
diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c
index f5c4b3e31a1c..876bbb80fb4d 100644
--- a/fs/hfsplus/dir.c
+++ b/fs/hfsplus/dir.c
@@ -523,10 +523,10 @@ static int hfsplus_create(struct mnt_idmap *idmap, struct inode *dir,
return hfsplus_mknod(&nop_mnt_idmap, dir, dentry, mode, 0);
}
-static int hfsplus_mkdir(struct mnt_idmap *idmap, struct inode *dir,
- struct dentry *dentry, umode_t mode)
+static struct dentry *hfsplus_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
- return hfsplus_mknod(&nop_mnt_idmap, dir, dentry, mode | S_IFDIR, 0);
+ return ERR_PTR(hfsplus_mknod(&nop_mnt_idmap, dir, dentry, mode | S_IFDIR, 0));
}
static int hfsplus_rename(struct mnt_idmap *idmap,
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index e0741e468956..a2c6b9051c5b 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -679,17 +679,25 @@ static int hostfs_symlink(struct mnt_idmap *idmap, struct inode *ino,
return err;
}
-static int hostfs_mkdir(struct mnt_idmap *idmap, struct inode *ino,
- struct dentry *dentry, umode_t mode)
+static struct dentry *hostfs_mkdir(struct mnt_idmap *idmap, struct inode *ino,
+ struct dentry *dentry, umode_t mode)
{
+ struct inode *inode;
char *file;
int err;
if ((file = dentry_name(dentry)) == NULL)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
err = do_mkdir(file, mode);
+ if (err) {
+ dentry = ERR_PTR(err);
+ } else {
+ inode = hostfs_iget(dentry->d_sb, file);
+ d_drop(dentry);
+ dentry = d_splice_alias(inode, dentry);
+ }
__putname(file);
- return err;
+ return dentry;
}
static int hostfs_rmdir(struct inode *ino, struct dentry *dentry)
diff --git a/fs/hpfs/namei.c b/fs/hpfs/namei.c
index d0edf9ed33b6..e3cdc421dfba 100644
--- a/fs/hpfs/namei.c
+++ b/fs/hpfs/namei.c
@@ -19,8 +19,8 @@ static void hpfs_update_directory_times(struct inode *dir)
hpfs_write_inode_nolock(dir);
}
-static int hpfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
- struct dentry *dentry, umode_t mode)
+static struct dentry *hpfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
const unsigned char *name = dentry->d_name.name;
unsigned len = dentry->d_name.len;
@@ -35,7 +35,7 @@ static int hpfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
int r;
struct hpfs_dirent dee;
int err;
- if ((err = hpfs_chk_name(name, &len))) return err==-ENOENT ? -EINVAL : err;
+ if ((err = hpfs_chk_name(name, &len))) return ERR_PTR(err==-ENOENT ? -EINVAL : err);
hpfs_lock(dir->i_sb);
err = -ENOSPC;
fnode = hpfs_alloc_fnode(dir->i_sb, hpfs_i(dir)->i_dno, &fno, &bh);
@@ -112,7 +112,7 @@ static int hpfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
hpfs_update_directory_times(dir);
d_instantiate(dentry, result);
hpfs_unlock(dir->i_sb);
- return 0;
+ return NULL;
bail3:
iput(result);
bail2:
@@ -123,7 +123,7 @@ bail1:
hpfs_free_sectors(dir->i_sb, fno, 1);
bail:
hpfs_unlock(dir->i_sb);
- return err;
+ return ERR_PTR(err);
}
static int hpfs_create(struct mnt_idmap *idmap, struct inode *dir,
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 0fc179a59830..d98caedbb723 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -991,14 +991,14 @@ static int hugetlbfs_mknod(struct mnt_idmap *idmap, struct inode *dir,
return 0;
}
-static int hugetlbfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
- struct dentry *dentry, umode_t mode)
+static struct dentry *hugetlbfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
int retval = hugetlbfs_mknod(idmap, dir, dentry,
mode | S_IFDIR, 0);
if (!retval)
inc_nlink(dir);
- return retval;
+ return ERR_PTR(retval);
}
static int hugetlbfs_create(struct mnt_idmap *idmap,
diff --git a/fs/init.c b/fs/init.c
index e9387b6c4f30..eef5124885e3 100644
--- a/fs/init.c
+++ b/fs/init.c
@@ -230,9 +230,12 @@ int __init init_mkdir(const char *pathname, umode_t mode)
return PTR_ERR(dentry);
mode = mode_strip_umask(d_inode(path.dentry), mode);
error = security_path_mkdir(&path, dentry, mode);
- if (!error)
- error = vfs_mkdir(mnt_idmap(path.mnt), path.dentry->d_inode,
+ if (!error) {
+ dentry = vfs_mkdir(mnt_idmap(path.mnt), path.dentry->d_inode,
dentry, mode);
+ if (IS_ERR(dentry))
+ error = PTR_ERR(dentry);
+ }
done_path_create(&path, dentry);
return error;
}
diff --git a/fs/inode.c b/fs/inode.c
index 5587aabdaa5e..99318b157a9a 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -327,7 +327,17 @@ static void i_callback(struct rcu_head *head)
free_inode_nonrcu(inode);
}
-static struct inode *alloc_inode(struct super_block *sb)
+/**
+ * alloc_inode - obtain an inode
+ * @sb: superblock
+ *
+ * Allocates a new inode for given superblock.
+ * Inode wont be chained in superblock s_inodes list
+ * This means :
+ * - fs can't be unmount
+ * - quotas, fsnotify, writeback can't work
+ */
+struct inode *alloc_inode(struct super_block *sb)
{
const struct super_operations *ops = sb->s_op;
struct inode *inode;
@@ -613,18 +623,22 @@ static void inode_wait_for_lru_isolating(struct inode *inode)
*/
void inode_sb_list_add(struct inode *inode)
{
- spin_lock(&inode->i_sb->s_inode_list_lock);
- list_add(&inode->i_sb_list, &inode->i_sb->s_inodes);
- spin_unlock(&inode->i_sb->s_inode_list_lock);
+ struct super_block *sb = inode->i_sb;
+
+ spin_lock(&sb->s_inode_list_lock);
+ list_add(&inode->i_sb_list, &sb->s_inodes);
+ spin_unlock(&sb->s_inode_list_lock);
}
EXPORT_SYMBOL_GPL(inode_sb_list_add);
static inline void inode_sb_list_del(struct inode *inode)
{
+ struct super_block *sb = inode->i_sb;
+
if (!list_empty(&inode->i_sb_list)) {
- spin_lock(&inode->i_sb->s_inode_list_lock);
+ spin_lock(&sb->s_inode_list_lock);
list_del_init(&inode->i_sb_list);
- spin_unlock(&inode->i_sb->s_inode_list_lock);
+ spin_unlock(&sb->s_inode_list_lock);
}
}
@@ -806,23 +820,16 @@ static void evict(struct inode *inode)
/*
* Wake up waiters in __wait_on_freeing_inode().
*
- * Lockless hash lookup may end up finding the inode before we removed
- * it above, but only lock it *after* we are done with the wakeup below.
- * In this case the potential waiter cannot safely block.
+ * It is an invariant that any thread we need to wake up is already
+ * accounted for before remove_inode_hash() acquires ->i_lock -- both
+ * sides take the lock and sleep is aborted if the inode is found
+ * unhashed. Thus either the sleeper wins and goes off CPU, or removal
+ * wins and the sleeper aborts after testing with the lock.
*
- * The inode being unhashed after the call to remove_inode_hash() is
- * used as an indicator whether blocking on it is safe.
+ * This also means we don't need any fences for the call below.
*/
- spin_lock(&inode->i_lock);
- /*
- * Pairs with the barrier in prepare_to_wait_event() to make sure
- * ___wait_var_event() either sees the bit cleared or
- * waitqueue_active() check in wake_up_var() sees the waiter.
- */
- smp_mb__after_spinlock();
inode_wake_up_bit(inode, __I_NEW);
BUG_ON(inode->i_state != (I_FREEING | I_CLEAR));
- spin_unlock(&inode->i_lock);
destroy_inode(inode);
}
@@ -900,46 +907,6 @@ again:
}
EXPORT_SYMBOL_GPL(evict_inodes);
-/**
- * invalidate_inodes - attempt to free all inodes on a superblock
- * @sb: superblock to operate on
- *
- * Attempts to free all inodes (including dirty inodes) for a given superblock.
- */
-void invalidate_inodes(struct super_block *sb)
-{
- struct inode *inode, *next;
- LIST_HEAD(dispose);
-
-again:
- spin_lock(&sb->s_inode_list_lock);
- list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
- spin_lock(&inode->i_lock);
- if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
- spin_unlock(&inode->i_lock);
- continue;
- }
- if (atomic_read(&inode->i_count)) {
- spin_unlock(&inode->i_lock);
- continue;
- }
-
- inode->i_state |= I_FREEING;
- inode_lru_list_del(inode);
- spin_unlock(&inode->i_lock);
- list_add(&inode->i_lru, &dispose);
- if (need_resched()) {
- spin_unlock(&sb->s_inode_list_lock);
- cond_resched();
- dispose_list(&dispose);
- goto again;
- }
- }
- spin_unlock(&sb->s_inode_list_lock);
-
- dispose_list(&dispose);
-}
-
/*
* Isolate the inode from the LRU in preparation for freeing it.
*
@@ -1160,21 +1127,6 @@ unsigned int get_next_ino(void)
EXPORT_SYMBOL(get_next_ino);
/**
- * new_inode_pseudo - obtain an inode
- * @sb: superblock
- *
- * Allocates a new inode for given superblock.
- * Inode wont be chained in superblock s_inodes list
- * This means :
- * - fs can't be unmount
- * - quotas, fsnotify, writeback can't work
- */
-struct inode *new_inode_pseudo(struct super_block *sb)
-{
- return alloc_inode(sb);
-}
-
-/**
* new_inode - obtain an inode
* @sb: superblock
*
@@ -1190,7 +1142,7 @@ struct inode *new_inode(struct super_block *sb)
{
struct inode *inode;
- inode = new_inode_pseudo(sb);
+ inode = alloc_inode(sb);
if (inode)
inode_sb_list_add(inode);
return inode;
@@ -1348,8 +1300,8 @@ again:
}
if (set && unlikely(set(inode, data))) {
- inode = NULL;
- goto unlock;
+ spin_unlock(&inode_hash_lock);
+ return NULL;
}
/*
@@ -1361,14 +1313,14 @@ again:
hlist_add_head_rcu(&inode->i_hash, head);
spin_unlock(&inode->i_lock);
+ spin_unlock(&inode_hash_lock);
+
/*
* Add inode to the sb list if it's not already. It has I_NEW at this
* point, so it should be safe to test i_sb_list locklessly.
*/
if (list_empty(&inode->i_sb_list))
inode_sb_list_add(inode);
-unlock:
- spin_unlock(&inode_hash_lock);
return inode;
}
@@ -1497,8 +1449,8 @@ again:
inode->i_state = I_NEW;
hlist_add_head_rcu(&inode->i_hash, head);
spin_unlock(&inode->i_lock);
- inode_sb_list_add(inode);
spin_unlock(&inode_hash_lock);
+ inode_sb_list_add(inode);
/* Return the locked inode with I_NEW set, the
* caller is responsible for filling in the contents
@@ -2953,3 +2905,18 @@ umode_t mode_strip_sgid(struct mnt_idmap *idmap,
return mode & ~S_ISGID;
}
EXPORT_SYMBOL(mode_strip_sgid);
+
+#ifdef CONFIG_DEBUG_VFS
+/*
+ * Dump an inode.
+ *
+ * TODO: add a proper inode dumping routine, this is a stub to get debug off the
+ * ground.
+ */
+void dump_inode(struct inode *inode, const char *reason)
+{
+ pr_warn("%s encountered for inode %px", reason, inode);
+}
+
+EXPORT_SYMBOL(dump_inode);
+#endif
diff --git a/fs/internal.h b/fs/internal.h
index e7f02ae1e098..b9b3e29a73fd 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -118,6 +118,9 @@ static inline void put_file_access(struct file *file)
}
}
+void fput_close_sync(struct file *);
+void fput_close(struct file *);
+
/*
* super.c
*/
@@ -187,8 +190,8 @@ extern struct open_how build_open_how(int flags, umode_t mode);
extern int build_open_flags(const struct open_how *how, struct open_flags *op);
struct file *file_close_fd_locked(struct files_struct *files, unsigned fd);
-long do_ftruncate(struct file *file, loff_t length, int small);
-long do_sys_ftruncate(unsigned int fd, loff_t length, int small);
+int do_ftruncate(struct file *file, loff_t length, int small);
+int do_sys_ftruncate(unsigned int fd, loff_t length, int small);
int chmod_common(const struct path *path, umode_t mode);
int do_fchownat(int dfd, const char __user *filename, uid_t user, gid_t group,
int flag);
@@ -207,7 +210,6 @@ bool in_group_or_capable(struct mnt_idmap *idmap,
* fs-writeback.c
*/
extern long get_nr_dirty_inodes(void);
-void invalidate_inodes(struct super_block *sb);
/*
* dcache.c
@@ -325,6 +327,7 @@ struct stashed_operations {
int path_from_stashed(struct dentry **stashed, struct vfsmount *mnt, void *data,
struct path *path);
void stashed_dentry_prune(struct dentry *dentry);
+struct dentry *stashed_dentry_get(struct dentry **stashed);
/**
* path_mounted - check whether path is mounted
* @path: path to check
@@ -338,3 +341,5 @@ static inline bool path_mounted(const struct path *path)
return path->mnt->mnt_root == path->dentry;
}
void file_f_owner_release(struct file *file);
+bool file_seek_cur_needs_f_lock(struct file *file);
+int statmount_mnt_idmap(struct mnt_idmap *idmap, struct seq_file *seq, bool uid_map);
diff --git a/fs/ioctl.c b/fs/ioctl.c
index 638a36be31c1..c91fd2b46a77 100644
--- a/fs/ioctl.c
+++ b/fs/ioctl.c
@@ -41,7 +41,7 @@
*
* Returns 0 on success, -errno on error.
*/
-long vfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+int vfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
int error = -ENOTTY;
@@ -228,8 +228,8 @@ static int ioctl_fiemap(struct file *filp, struct fiemap __user *ufiemap)
return error;
}
-static long ioctl_file_clone(struct file *dst_file, unsigned long srcfd,
- u64 off, u64 olen, u64 destoff)
+static int ioctl_file_clone(struct file *dst_file, unsigned long srcfd,
+ u64 off, u64 olen, u64 destoff)
{
CLASS(fd, src_file)(srcfd);
loff_t cloned;
@@ -248,8 +248,8 @@ static long ioctl_file_clone(struct file *dst_file, unsigned long srcfd,
return ret;
}
-static long ioctl_file_clone_range(struct file *file,
- struct file_clone_range __user *argp)
+static int ioctl_file_clone_range(struct file *file,
+ struct file_clone_range __user *argp)
{
struct file_clone_range args;
diff --git a/fs/iomap/Makefile b/fs/iomap/Makefile
index 381d76c5c232..69e8ebb41302 100644
--- a/fs/iomap/Makefile
+++ b/fs/iomap/Makefile
@@ -12,6 +12,7 @@ iomap-y += trace.o \
iter.o
iomap-$(CONFIG_BLOCK) += buffered-io.o \
direct-io.o \
+ ioend.o \
fiemap.o \
seek.o
iomap-$(CONFIG_SWAP) += swapfile.o
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index d303e6c8900c..814b7f679486 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -12,17 +12,15 @@
#include <linux/buffer_head.h>
#include <linux/dax.h>
#include <linux/writeback.h>
-#include <linux/list_sort.h>
#include <linux/swap.h>
#include <linux/bio.h>
#include <linux/sched/signal.h>
#include <linux/migrate.h>
+#include "internal.h"
#include "trace.h"
#include "../internal.h"
-#define IOEND_BATCH_SIZE 4096
-
/*
* Structure allocated for each folio to track per-block uptodate, dirty state
* and I/O completions.
@@ -40,8 +38,6 @@ struct iomap_folio_state {
unsigned long state[];
};
-static struct bio_set iomap_ioend_bioset;
-
static inline bool ifs_is_fully_uptodate(struct folio *folio,
struct iomap_folio_state *ifs)
{
@@ -366,20 +362,24 @@ static inline bool iomap_block_needs_zeroing(const struct iomap_iter *iter,
pos >= i_size_read(iter->inode);
}
-static loff_t iomap_readpage_iter(const struct iomap_iter *iter,
- struct iomap_readpage_ctx *ctx, loff_t offset)
+static int iomap_readpage_iter(struct iomap_iter *iter,
+ struct iomap_readpage_ctx *ctx)
{
const struct iomap *iomap = &iter->iomap;
- loff_t pos = iter->pos + offset;
- loff_t length = iomap_length(iter) - offset;
+ loff_t pos = iter->pos;
+ loff_t length = iomap_length(iter);
struct folio *folio = ctx->cur_folio;
struct iomap_folio_state *ifs;
- loff_t orig_pos = pos;
size_t poff, plen;
sector_t sector;
+ int ret;
- if (iomap->type == IOMAP_INLINE)
- return iomap_read_inline_data(iter, folio);
+ if (iomap->type == IOMAP_INLINE) {
+ ret = iomap_read_inline_data(iter, folio);
+ if (ret)
+ return ret;
+ return iomap_iter_advance(iter, &length);
+ }
/* zero post-eof blocks as the page may be mapped */
ifs = ifs_alloc(iter->inode, folio, iter->flags);
@@ -438,25 +438,22 @@ done:
* we can skip trailing ones as they will be handled in the next
* iteration.
*/
- return pos - orig_pos + plen;
+ length = pos - iter->pos + plen;
+ return iomap_iter_advance(iter, &length);
}
-static loff_t iomap_read_folio_iter(const struct iomap_iter *iter,
+static int iomap_read_folio_iter(struct iomap_iter *iter,
struct iomap_readpage_ctx *ctx)
{
- struct folio *folio = ctx->cur_folio;
- size_t offset = offset_in_folio(folio, iter->pos);
- loff_t length = min_t(loff_t, folio_size(folio) - offset,
- iomap_length(iter));
- loff_t done, ret;
-
- for (done = 0; done < length; done += ret) {
- ret = iomap_readpage_iter(iter, ctx, done);
- if (ret <= 0)
+ int ret;
+
+ while (iomap_length(iter)) {
+ ret = iomap_readpage_iter(iter, ctx);
+ if (ret)
return ret;
}
- return done;
+ return 0;
}
int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops)
@@ -474,7 +471,7 @@ int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops)
trace_iomap_readpage(iter.inode, 1);
while ((ret = iomap_iter(&iter, ops)) > 0)
- iter.processed = iomap_read_folio_iter(&iter, &ctx);
+ iter.status = iomap_read_folio_iter(&iter, &ctx);
if (ctx.bio) {
submit_bio(ctx.bio);
@@ -493,15 +490,14 @@ int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops)
}
EXPORT_SYMBOL_GPL(iomap_read_folio);
-static loff_t iomap_readahead_iter(const struct iomap_iter *iter,
+static int iomap_readahead_iter(struct iomap_iter *iter,
struct iomap_readpage_ctx *ctx)
{
- loff_t length = iomap_length(iter);
- loff_t done, ret;
+ int ret;
- for (done = 0; done < length; done += ret) {
+ while (iomap_length(iter)) {
if (ctx->cur_folio &&
- offset_in_folio(ctx->cur_folio, iter->pos + done) == 0) {
+ offset_in_folio(ctx->cur_folio, iter->pos) == 0) {
if (!ctx->cur_folio_in_bio)
folio_unlock(ctx->cur_folio);
ctx->cur_folio = NULL;
@@ -510,12 +506,12 @@ static loff_t iomap_readahead_iter(const struct iomap_iter *iter,
ctx->cur_folio = readahead_folio(ctx->rac);
ctx->cur_folio_in_bio = false;
}
- ret = iomap_readpage_iter(iter, ctx, done);
- if (ret <= 0)
+ ret = iomap_readpage_iter(iter, ctx);
+ if (ret)
return ret;
}
- return done;
+ return 0;
}
/**
@@ -547,7 +543,7 @@ void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops)
trace_iomap_readahead(rac->mapping->host, readahead_count(rac));
while (iomap_iter(&iter, ops) > 0)
- iter.processed = iomap_readahead_iter(&iter, &ctx);
+ iter.status = iomap_readahead_iter(&iter, &ctx);
if (ctx.bio)
submit_bio(ctx.bio);
@@ -603,6 +599,8 @@ struct folio *iomap_get_folio(struct iomap_iter *iter, loff_t pos, size_t len)
if (iter->flags & IOMAP_NOWAIT)
fgp |= FGP_NOWAIT;
+ if (iter->flags & IOMAP_DONTCACHE)
+ fgp |= FGP_DONTCACHE;
fgp |= fgf_set_order(len);
return __filemap_get_folio(iter->inode->i_mapping, pos >> PAGE_SHIFT,
@@ -907,12 +905,10 @@ static bool iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len,
return __iomap_write_end(iter->inode, pos, len, copied, folio);
}
-static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
+static int iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
{
- loff_t length = iomap_length(iter);
- loff_t pos = iter->pos;
ssize_t total_written = 0;
- long status = 0;
+ int status = 0;
struct address_space *mapping = iter->inode->i_mapping;
size_t chunk = mapping_max_folio_size(mapping);
unsigned int bdp_flags = (iter->flags & IOMAP_NOWAIT) ? BDP_ASYNC : 0;
@@ -923,7 +919,8 @@ static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
size_t offset; /* Offset into folio */
size_t bytes; /* Bytes to write to folio */
size_t copied; /* Bytes copied from user */
- size_t written; /* Bytes have been written */
+ u64 written; /* Bytes have been written */
+ loff_t pos = iter->pos;
bytes = iov_iter_count(i);
retry:
@@ -934,8 +931,8 @@ retry:
if (unlikely(status))
break;
- if (bytes > length)
- bytes = length;
+ if (bytes > iomap_length(iter))
+ bytes = iomap_length(iter);
/*
* Bring in the user page that we'll copy from _first_.
@@ -1006,17 +1003,12 @@ retry:
goto retry;
}
} else {
- pos += written;
total_written += written;
- length -= written;
+ iomap_iter_advance(iter, &written);
}
- } while (iov_iter_count(i) && length);
+ } while (iov_iter_count(i) && iomap_length(iter));
- if (status == -EAGAIN) {
- iov_iter_revert(i, total_written);
- return -EAGAIN;
- }
- return total_written ? total_written : status;
+ return total_written ? 0 : status;
}
ssize_t
@@ -1034,9 +1026,11 @@ iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *i,
if (iocb->ki_flags & IOCB_NOWAIT)
iter.flags |= IOMAP_NOWAIT;
+ if (iocb->ki_flags & IOCB_DONTCACHE)
+ iter.flags |= IOMAP_DONTCACHE;
while ((ret = iomap_iter(&iter, ops)) > 0)
- iter.processed = iomap_write_iter(&iter, i);
+ iter.status = iomap_write_iter(&iter, i);
if (unlikely(iter.pos == iocb->ki_pos))
return ret;
@@ -1270,23 +1264,22 @@ void iomap_write_delalloc_release(struct inode *inode, loff_t start_byte,
}
EXPORT_SYMBOL_GPL(iomap_write_delalloc_release);
-static loff_t iomap_unshare_iter(struct iomap_iter *iter)
+static int iomap_unshare_iter(struct iomap_iter *iter)
{
struct iomap *iomap = &iter->iomap;
- loff_t pos = iter->pos;
- loff_t length = iomap_length(iter);
- loff_t written = 0;
+ u64 bytes = iomap_length(iter);
+ int status;
if (!iomap_want_unshare_iter(iter))
- return length;
+ return iomap_iter_advance(iter, &bytes);
do {
struct folio *folio;
- int status;
size_t offset;
- size_t bytes = min_t(u64, SIZE_MAX, length);
+ loff_t pos = iter->pos;
bool ret;
+ bytes = min_t(u64, SIZE_MAX, bytes);
status = iomap_write_begin(iter, pos, bytes, &folio);
if (unlikely(status))
return status;
@@ -1304,14 +1297,14 @@ static loff_t iomap_unshare_iter(struct iomap_iter *iter)
cond_resched();
- pos += bytes;
- written += bytes;
- length -= bytes;
-
balance_dirty_pages_ratelimited(iter->inode->i_mapping);
- } while (length > 0);
- return written;
+ status = iomap_iter_advance(iter, &bytes);
+ if (status)
+ break;
+ } while (bytes > 0);
+
+ return status;
}
int
@@ -1331,7 +1324,7 @@ iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
iter.len = min(len, size - pos);
while ((ret = iomap_iter(&iter, ops)) > 0)
- iter.processed = iomap_unshare_iter(&iter);
+ iter.status = iomap_unshare_iter(&iter);
return ret;
}
EXPORT_SYMBOL_GPL(iomap_file_unshare);
@@ -1350,19 +1343,18 @@ static inline int iomap_zero_iter_flush_and_stale(struct iomap_iter *i)
return filemap_write_and_wait_range(mapping, i->pos, end);
}
-static loff_t iomap_zero_iter(struct iomap_iter *iter, bool *did_zero)
+static int iomap_zero_iter(struct iomap_iter *iter, bool *did_zero)
{
- loff_t pos = iter->pos;
- loff_t length = iomap_length(iter);
- loff_t written = 0;
+ u64 bytes = iomap_length(iter);
+ int status;
do {
struct folio *folio;
- int status;
size_t offset;
- size_t bytes = min_t(u64, SIZE_MAX, length);
+ loff_t pos = iter->pos;
bool ret;
+ bytes = min_t(u64, SIZE_MAX, bytes);
status = iomap_write_begin(iter, pos, bytes, &folio);
if (status)
return status;
@@ -1383,25 +1375,26 @@ static loff_t iomap_zero_iter(struct iomap_iter *iter, bool *did_zero)
if (WARN_ON_ONCE(!ret))
return -EIO;
- pos += bytes;
- length -= bytes;
- written += bytes;
- } while (length > 0);
+ status = iomap_iter_advance(iter, &bytes);
+ if (status)
+ break;
+ } while (bytes > 0);
if (did_zero)
*did_zero = true;
- return written;
+ return status;
}
int
iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
- const struct iomap_ops *ops)
+ const struct iomap_ops *ops, void *private)
{
struct iomap_iter iter = {
.inode = inode,
.pos = pos,
.len = len,
.flags = IOMAP_ZERO,
+ .private = private,
};
struct address_space *mapping = inode->i_mapping;
unsigned int blocksize = i_blocksize(inode);
@@ -1424,7 +1417,7 @@ iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
filemap_range_needs_writeback(mapping, pos, pos + plen - 1)) {
iter.len = plen;
while ((ret = iomap_iter(&iter, ops)) > 0)
- iter.processed = iomap_zero_iter(&iter, did_zero);
+ iter.status = iomap_zero_iter(&iter, did_zero);
iter.len = len - (iter.pos - pos);
if (ret || !iter.len)
@@ -1443,17 +1436,19 @@ iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
if (srcmap->type == IOMAP_HOLE ||
srcmap->type == IOMAP_UNWRITTEN) {
- loff_t proc = iomap_length(&iter);
+ s64 status;
if (range_dirty) {
range_dirty = false;
- proc = iomap_zero_iter_flush_and_stale(&iter);
+ status = iomap_zero_iter_flush_and_stale(&iter);
+ } else {
+ status = iomap_iter_advance_full(&iter);
}
- iter.processed = proc;
+ iter.status = status;
continue;
}
- iter.processed = iomap_zero_iter(&iter, did_zero);
+ iter.status = iomap_zero_iter(&iter, did_zero);
}
return ret;
}
@@ -1461,7 +1456,7 @@ EXPORT_SYMBOL_GPL(iomap_zero_range);
int
iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
- const struct iomap_ops *ops)
+ const struct iomap_ops *ops, void *private)
{
unsigned int blocksize = i_blocksize(inode);
unsigned int off = pos & (blocksize - 1);
@@ -1469,11 +1464,12 @@ iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
/* Block boundary? Nothing to do */
if (!off)
return 0;
- return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops);
+ return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops,
+ private);
}
EXPORT_SYMBOL_GPL(iomap_truncate_page);
-static loff_t iomap_folio_mkwrite_iter(struct iomap_iter *iter,
+static int iomap_folio_mkwrite_iter(struct iomap_iter *iter,
struct folio *folio)
{
loff_t length = iomap_length(iter);
@@ -1490,14 +1486,16 @@ static loff_t iomap_folio_mkwrite_iter(struct iomap_iter *iter,
folio_mark_dirty(folio);
}
- return length;
+ return iomap_iter_advance(iter, &length);
}
-vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
+vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops,
+ void *private)
{
struct iomap_iter iter = {
.inode = file_inode(vmf->vma->vm_file),
.flags = IOMAP_WRITE | IOMAP_FAULT,
+ .private = private,
};
struct folio *folio = page_folio(vmf->page);
ssize_t ret;
@@ -1509,7 +1507,7 @@ vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
iter.pos = folio_pos(folio);
iter.len = ret;
while ((ret = iomap_iter(&iter, ops)) > 0)
- iter.processed = iomap_folio_mkwrite_iter(&iter, folio);
+ iter.status = iomap_folio_mkwrite_iter(&iter, folio);
if (ret < 0)
goto out_unlock;
@@ -1538,16 +1536,15 @@ static void iomap_finish_folio_write(struct inode *inode, struct folio *folio,
* state, release holds on bios, and finally free up memory. Do not use the
* ioend after this.
*/
-static u32
-iomap_finish_ioend(struct iomap_ioend *ioend, int error)
+u32 iomap_finish_ioend_buffered(struct iomap_ioend *ioend)
{
struct inode *inode = ioend->io_inode;
struct bio *bio = &ioend->io_bio;
struct folio_iter fi;
u32 folio_count = 0;
- if (error) {
- mapping_set_error(inode->i_mapping, error);
+ if (ioend->io_error) {
+ mapping_set_error(inode->i_mapping, ioend->io_error);
if (!bio_flagged(bio, BIO_QUIET)) {
pr_err_ratelimited(
"%s: writeback error on inode %lu, offset %lld, sector %llu",
@@ -1566,116 +1563,16 @@ iomap_finish_ioend(struct iomap_ioend *ioend, int error)
return folio_count;
}
-/*
- * Ioend completion routine for merged bios. This can only be called from task
- * contexts as merged ioends can be of unbound length. Hence we have to break up
- * the writeback completions into manageable chunks to avoid long scheduler
- * holdoffs. We aim to keep scheduler holdoffs down below 10ms so that we get
- * good batch processing throughput without creating adverse scheduler latency
- * conditions.
- */
-void
-iomap_finish_ioends(struct iomap_ioend *ioend, int error)
-{
- struct list_head tmp;
- u32 completions;
-
- might_sleep();
-
- list_replace_init(&ioend->io_list, &tmp);
- completions = iomap_finish_ioend(ioend, error);
-
- while (!list_empty(&tmp)) {
- if (completions > IOEND_BATCH_SIZE * 8) {
- cond_resched();
- completions = 0;
- }
- ioend = list_first_entry(&tmp, struct iomap_ioend, io_list);
- list_del_init(&ioend->io_list);
- completions += iomap_finish_ioend(ioend, error);
- }
-}
-EXPORT_SYMBOL_GPL(iomap_finish_ioends);
-
-/*
- * We can merge two adjacent ioends if they have the same set of work to do.
- */
-static bool
-iomap_ioend_can_merge(struct iomap_ioend *ioend, struct iomap_ioend *next)
-{
- if (ioend->io_bio.bi_status != next->io_bio.bi_status)
- return false;
- if (next->io_flags & IOMAP_F_BOUNDARY)
- return false;
- if ((ioend->io_flags & IOMAP_F_SHARED) ^
- (next->io_flags & IOMAP_F_SHARED))
- return false;
- if ((ioend->io_type == IOMAP_UNWRITTEN) ^
- (next->io_type == IOMAP_UNWRITTEN))
- return false;
- if (ioend->io_offset + ioend->io_size != next->io_offset)
- return false;
- /*
- * Do not merge physically discontiguous ioends. The filesystem
- * completion functions will have to iterate the physical
- * discontiguities even if we merge the ioends at a logical level, so
- * we don't gain anything by merging physical discontiguities here.
- *
- * We cannot use bio->bi_iter.bi_sector here as it is modified during
- * submission so does not point to the start sector of the bio at
- * completion.
- */
- if (ioend->io_sector + (ioend->io_size >> 9) != next->io_sector)
- return false;
- return true;
-}
-
-void
-iomap_ioend_try_merge(struct iomap_ioend *ioend, struct list_head *more_ioends)
-{
- struct iomap_ioend *next;
-
- INIT_LIST_HEAD(&ioend->io_list);
-
- while ((next = list_first_entry_or_null(more_ioends, struct iomap_ioend,
- io_list))) {
- if (!iomap_ioend_can_merge(ioend, next))
- break;
- list_move_tail(&next->io_list, &ioend->io_list);
- ioend->io_size += next->io_size;
- }
-}
-EXPORT_SYMBOL_GPL(iomap_ioend_try_merge);
-
-static int
-iomap_ioend_compare(void *priv, const struct list_head *a,
- const struct list_head *b)
-{
- struct iomap_ioend *ia = container_of(a, struct iomap_ioend, io_list);
- struct iomap_ioend *ib = container_of(b, struct iomap_ioend, io_list);
-
- if (ia->io_offset < ib->io_offset)
- return -1;
- if (ia->io_offset > ib->io_offset)
- return 1;
- return 0;
-}
-
-void
-iomap_sort_ioends(struct list_head *ioend_list)
-{
- list_sort(NULL, ioend_list, iomap_ioend_compare);
-}
-EXPORT_SYMBOL_GPL(iomap_sort_ioends);
-
static void iomap_writepage_end_bio(struct bio *bio)
{
- iomap_finish_ioend(iomap_ioend_from_bio(bio),
- blk_status_to_errno(bio->bi_status));
+ struct iomap_ioend *ioend = iomap_ioend_from_bio(bio);
+
+ ioend->io_error = blk_status_to_errno(bio->bi_status);
+ iomap_finish_ioend_buffered(ioend);
}
/*
- * Submit the final bio for an ioend.
+ * Submit an ioend.
*
* If @error is non-zero, it means that we have a situation where some part of
* the submission process has failed after we've marked pages for writeback.
@@ -1694,14 +1591,18 @@ static int iomap_submit_ioend(struct iomap_writepage_ctx *wpc, int error)
* failure happened so that the file system end I/O handler gets called
* to clean up.
*/
- if (wpc->ops->prepare_ioend)
- error = wpc->ops->prepare_ioend(wpc->ioend, error);
+ if (wpc->ops->submit_ioend) {
+ error = wpc->ops->submit_ioend(wpc, error);
+ } else {
+ if (WARN_ON_ONCE(wpc->iomap.flags & IOMAP_F_ANON_WRITE))
+ error = -EIO;
+ if (!error)
+ submit_bio(&wpc->ioend->io_bio);
+ }
if (error) {
wpc->ioend->io_bio.bi_status = errno_to_blk_status(error);
bio_endio(&wpc->ioend->io_bio);
- } else {
- submit_bio(&wpc->ioend->io_bio);
}
wpc->ioend = NULL;
@@ -1709,9 +1610,9 @@ static int iomap_submit_ioend(struct iomap_writepage_ctx *wpc, int error)
}
static struct iomap_ioend *iomap_alloc_ioend(struct iomap_writepage_ctx *wpc,
- struct writeback_control *wbc, struct inode *inode, loff_t pos)
+ struct writeback_control *wbc, struct inode *inode, loff_t pos,
+ u16 ioend_flags)
{
- struct iomap_ioend *ioend;
struct bio *bio;
bio = bio_alloc_bioset(wpc->iomap.bdev, BIO_MAX_VECS,
@@ -1719,36 +1620,24 @@ static struct iomap_ioend *iomap_alloc_ioend(struct iomap_writepage_ctx *wpc,
GFP_NOFS, &iomap_ioend_bioset);
bio->bi_iter.bi_sector = iomap_sector(&wpc->iomap, pos);
bio->bi_end_io = iomap_writepage_end_bio;
- wbc_init_bio(wbc, bio);
bio->bi_write_hint = inode->i_write_hint;
-
- ioend = iomap_ioend_from_bio(bio);
- INIT_LIST_HEAD(&ioend->io_list);
- ioend->io_type = wpc->iomap.type;
- ioend->io_flags = wpc->iomap.flags;
- if (pos > wpc->iomap.offset)
- wpc->iomap.flags &= ~IOMAP_F_BOUNDARY;
- ioend->io_inode = inode;
- ioend->io_size = 0;
- ioend->io_offset = pos;
- ioend->io_sector = bio->bi_iter.bi_sector;
-
+ wbc_init_bio(wbc, bio);
wpc->nr_folios = 0;
- return ioend;
+ return iomap_init_ioend(inode, bio, pos, ioend_flags);
}
-static bool iomap_can_add_to_ioend(struct iomap_writepage_ctx *wpc, loff_t pos)
+static bool iomap_can_add_to_ioend(struct iomap_writepage_ctx *wpc, loff_t pos,
+ u16 ioend_flags)
{
- if (wpc->iomap.offset == pos && (wpc->iomap.flags & IOMAP_F_BOUNDARY))
- return false;
- if ((wpc->iomap.flags & IOMAP_F_SHARED) !=
- (wpc->ioend->io_flags & IOMAP_F_SHARED))
+ if (ioend_flags & IOMAP_IOEND_BOUNDARY)
return false;
- if (wpc->iomap.type != wpc->ioend->io_type)
+ if ((ioend_flags & IOMAP_IOEND_NOMERGE_FLAGS) !=
+ (wpc->ioend->io_flags & IOMAP_IOEND_NOMERGE_FLAGS))
return false;
if (pos != wpc->ioend->io_offset + wpc->ioend->io_size)
return false;
- if (iomap_sector(&wpc->iomap, pos) !=
+ if (!(wpc->iomap.flags & IOMAP_F_ANON_WRITE) &&
+ iomap_sector(&wpc->iomap, pos) !=
bio_end_sector(&wpc->ioend->io_bio))
return false;
/*
@@ -1779,14 +1668,23 @@ static int iomap_add_to_ioend(struct iomap_writepage_ctx *wpc,
{
struct iomap_folio_state *ifs = folio->private;
size_t poff = offset_in_folio(folio, pos);
+ unsigned int ioend_flags = 0;
int error;
- if (!wpc->ioend || !iomap_can_add_to_ioend(wpc, pos)) {
+ if (wpc->iomap.type == IOMAP_UNWRITTEN)
+ ioend_flags |= IOMAP_IOEND_UNWRITTEN;
+ if (wpc->iomap.flags & IOMAP_F_SHARED)
+ ioend_flags |= IOMAP_IOEND_SHARED;
+ if (pos == wpc->iomap.offset && (wpc->iomap.flags & IOMAP_F_BOUNDARY))
+ ioend_flags |= IOMAP_IOEND_BOUNDARY;
+
+ if (!wpc->ioend || !iomap_can_add_to_ioend(wpc, pos, ioend_flags)) {
new_ioend:
error = iomap_submit_ioend(wpc, 0);
if (error)
return error;
- wpc->ioend = iomap_alloc_ioend(wpc, wbc, inode, pos);
+ wpc->ioend = iomap_alloc_ioend(wpc, wbc, inode, pos,
+ ioend_flags);
}
if (!bio_add_folio(&wpc->ioend->io_bio, folio, len, poff))
@@ -2062,11 +1960,3 @@ iomap_writepages(struct address_space *mapping, struct writeback_control *wbc,
return iomap_submit_ioend(wpc, error);
}
EXPORT_SYMBOL_GPL(iomap_writepages);
-
-static int __init iomap_buffered_init(void)
-{
- return bioset_init(&iomap_ioend_bioset, 4 * (PAGE_SIZE / SECTOR_SIZE),
- offsetof(struct iomap_ioend, io_bio),
- BIOSET_NEED_BVECS);
-}
-fs_initcall(iomap_buffered_init);
diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c
index 0e47da82b0c2..844261a31156 100644
--- a/fs/iomap/direct-io.c
+++ b/fs/iomap/direct-io.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2010 Red Hat, Inc.
- * Copyright (c) 2016-2021 Christoph Hellwig.
+ * Copyright (c) 2016-2025 Christoph Hellwig.
*/
#include <linux/module.h>
#include <linux/compiler.h>
@@ -12,6 +12,7 @@
#include <linux/backing-dev.h>
#include <linux/uio.h>
#include <linux/task_io_accounting_ops.h>
+#include "internal.h"
#include "trace.h"
#include "../internal.h"
@@ -20,6 +21,7 @@
* Private flags for iomap_dio, must not overlap with the public ones in
* iomap.h:
*/
+#define IOMAP_DIO_NO_INVALIDATE (1U << 25)
#define IOMAP_DIO_CALLER_COMP (1U << 26)
#define IOMAP_DIO_INLINE_COMP (1U << 27)
#define IOMAP_DIO_WRITE_THROUGH (1U << 28)
@@ -81,10 +83,12 @@ static void iomap_dio_submit_bio(const struct iomap_iter *iter,
WRITE_ONCE(iocb->private, bio);
}
- if (dio->dops && dio->dops->submit_io)
+ if (dio->dops && dio->dops->submit_io) {
dio->dops->submit_io(iter, bio, pos);
- else
+ } else {
+ WARN_ON_ONCE(iter->iomap.flags & IOMAP_F_ANON_WRITE);
submit_bio(bio);
+ }
}
ssize_t iomap_dio_complete(struct iomap_dio *dio)
@@ -117,7 +121,8 @@ ssize_t iomap_dio_complete(struct iomap_dio *dio)
* ->end_io() when necessary, otherwise a racing buffer read would cache
* zeros from unwritten extents.
*/
- if (!dio->error && dio->size && (dio->flags & IOMAP_DIO_WRITE))
+ if (!dio->error && dio->size && (dio->flags & IOMAP_DIO_WRITE) &&
+ !(dio->flags & IOMAP_DIO_NO_INVALIDATE))
kiocb_invalidate_post_direct_write(iocb, dio->size);
inode_dio_end(file_inode(iocb->ki_filp));
@@ -163,43 +168,31 @@ static inline void iomap_dio_set_error(struct iomap_dio *dio, int ret)
cmpxchg(&dio->error, 0, ret);
}
-void iomap_dio_bio_end_io(struct bio *bio)
+/*
+ * Called when dio->ref reaches zero from an I/O completion.
+ */
+static void iomap_dio_done(struct iomap_dio *dio)
{
- struct iomap_dio *dio = bio->bi_private;
- bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY);
struct kiocb *iocb = dio->iocb;
- if (bio->bi_status)
- iomap_dio_set_error(dio, blk_status_to_errno(bio->bi_status));
- if (!atomic_dec_and_test(&dio->ref))
- goto release_bio;
-
- /*
- * Synchronous dio, task itself will handle any completion work
- * that needs after IO. All we need to do is wake the task.
- */
if (dio->wait_for_completion) {
+ /*
+ * Synchronous I/O, task itself will handle any completion work
+ * that needs after IO. All we need to do is wake the task.
+ */
struct task_struct *waiter = dio->submit.waiter;
WRITE_ONCE(dio->submit.waiter, NULL);
blk_wake_io_task(waiter);
- goto release_bio;
- }
-
- /*
- * Flagged with IOMAP_DIO_INLINE_COMP, we can complete it inline
- */
- if (dio->flags & IOMAP_DIO_INLINE_COMP) {
+ } else if (dio->flags & IOMAP_DIO_INLINE_COMP) {
WRITE_ONCE(iocb->private, NULL);
iomap_dio_complete_work(&dio->aio.work);
- goto release_bio;
- }
-
- /*
- * If this dio is flagged with IOMAP_DIO_CALLER_COMP, then schedule
- * our completion that way to avoid an async punt to a workqueue.
- */
- if (dio->flags & IOMAP_DIO_CALLER_COMP) {
+ } else if (dio->flags & IOMAP_DIO_CALLER_COMP) {
+ /*
+ * If this dio is flagged with IOMAP_DIO_CALLER_COMP, then
+ * schedule our completion that way to avoid an async punt to a
+ * workqueue.
+ */
/* only polled IO cares about private cleared */
iocb->private = dio;
iocb->dio_complete = iomap_dio_deferred_complete;
@@ -217,19 +210,31 @@ void iomap_dio_bio_end_io(struct bio *bio)
* issuer.
*/
iocb->ki_complete(iocb, 0);
- goto release_bio;
+ } else {
+ struct inode *inode = file_inode(iocb->ki_filp);
+
+ /*
+ * Async DIO completion that requires filesystem level
+ * completion work gets punted to a work queue to complete as
+ * the operation may require more IO to be issued to finalise
+ * filesystem metadata changes or guarantee data integrity.
+ */
+ INIT_WORK(&dio->aio.work, iomap_dio_complete_work);
+ queue_work(inode->i_sb->s_dio_done_wq, &dio->aio.work);
}
+}
+
+void iomap_dio_bio_end_io(struct bio *bio)
+{
+ struct iomap_dio *dio = bio->bi_private;
+ bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY);
+
+ if (bio->bi_status)
+ iomap_dio_set_error(dio, blk_status_to_errno(bio->bi_status));
+
+ if (atomic_dec_and_test(&dio->ref))
+ iomap_dio_done(dio);
- /*
- * Async DIO completion that requires filesystem level completion work
- * gets punted to a work queue to complete as the operation may require
- * more IO to be issued to finalise filesystem metadata changes or
- * guarantee data integrity.
- */
- INIT_WORK(&dio->aio.work, iomap_dio_complete_work);
- queue_work(file_inode(iocb->ki_filp)->i_sb->s_dio_done_wq,
- &dio->aio.work);
-release_bio:
if (should_dirty) {
bio_check_pages_dirty(bio);
} else {
@@ -239,6 +244,47 @@ release_bio:
}
EXPORT_SYMBOL_GPL(iomap_dio_bio_end_io);
+u32 iomap_finish_ioend_direct(struct iomap_ioend *ioend)
+{
+ struct iomap_dio *dio = ioend->io_bio.bi_private;
+ bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY);
+ u32 vec_count = ioend->io_bio.bi_vcnt;
+
+ if (ioend->io_error)
+ iomap_dio_set_error(dio, ioend->io_error);
+
+ if (atomic_dec_and_test(&dio->ref)) {
+ /*
+ * Try to avoid another context switch for the completion given
+ * that we are already called from the ioend completion
+ * workqueue, but never invalidate pages from this thread to
+ * avoid deadlocks with buffered I/O completions. Tough luck if
+ * you hit the tiny race with someone dirtying the range now
+ * between this check and the actual completion.
+ */
+ if (!dio->iocb->ki_filp->f_mapping->nrpages) {
+ dio->flags |= IOMAP_DIO_INLINE_COMP;
+ dio->flags |= IOMAP_DIO_NO_INVALIDATE;
+ }
+ dio->flags &= ~IOMAP_DIO_CALLER_COMP;
+ iomap_dio_done(dio);
+ }
+
+ if (should_dirty) {
+ bio_check_pages_dirty(&ioend->io_bio);
+ } else {
+ bio_release_pages(&ioend->io_bio, false);
+ bio_put(&ioend->io_bio);
+ }
+
+ /*
+ * Return the number of bvecs completed as even direct I/O completions
+ * do significant per-folio work and we'll still want to give up the
+ * CPU after a lot of completions.
+ */
+ return vec_count;
+}
+
static int iomap_dio_zero(const struct iomap_iter *iter, struct iomap_dio *dio,
loff_t pos, unsigned len)
{
@@ -266,81 +312,85 @@ static int iomap_dio_zero(const struct iomap_iter *iter, struct iomap_dio *dio,
}
/*
- * Figure out the bio's operation flags from the dio request, the
- * mapping, and whether or not we want FUA. Note that we can end up
- * clearing the WRITE_THROUGH flag in the dio request.
+ * Use a FUA write if we need datasync semantics and this is a pure data I/O
+ * that doesn't require any metadata updates (including after I/O completion
+ * such as unwritten extent conversion) and the underlying device either
+ * doesn't have a volatile write cache or supports FUA.
+ * This allows us to avoid cache flushes on I/O completion.
*/
-static inline blk_opf_t iomap_dio_bio_opflags(struct iomap_dio *dio,
- const struct iomap *iomap, bool use_fua, bool atomic)
+static inline bool iomap_dio_can_use_fua(const struct iomap *iomap,
+ struct iomap_dio *dio)
{
- blk_opf_t opflags = REQ_SYNC | REQ_IDLE;
-
- if (!(dio->flags & IOMAP_DIO_WRITE))
- return REQ_OP_READ;
-
- opflags |= REQ_OP_WRITE;
- if (use_fua)
- opflags |= REQ_FUA;
- else
- dio->flags &= ~IOMAP_DIO_WRITE_THROUGH;
- if (atomic)
- opflags |= REQ_ATOMIC;
-
- return opflags;
+ if (iomap->flags & (IOMAP_F_SHARED | IOMAP_F_DIRTY))
+ return false;
+ if (!(dio->flags & IOMAP_DIO_WRITE_THROUGH))
+ return false;
+ return !bdev_write_cache(iomap->bdev) || bdev_fua(iomap->bdev);
}
-static loff_t iomap_dio_bio_iter(const struct iomap_iter *iter,
- struct iomap_dio *dio)
+static int iomap_dio_bio_iter(struct iomap_iter *iter, struct iomap_dio *dio)
{
const struct iomap *iomap = &iter->iomap;
struct inode *inode = iter->inode;
unsigned int fs_block_size = i_blocksize(inode), pad;
const loff_t length = iomap_length(iter);
- bool atomic = iter->flags & IOMAP_ATOMIC;
loff_t pos = iter->pos;
- blk_opf_t bio_opf;
+ blk_opf_t bio_opf = REQ_SYNC | REQ_IDLE;
struct bio *bio;
bool need_zeroout = false;
- bool use_fua = false;
int nr_pages, ret = 0;
- size_t copied = 0;
+ u64 copied = 0;
size_t orig_count;
- if (atomic && length != fs_block_size)
- return -EINVAL;
-
if ((pos | length) & (bdev_logical_block_size(iomap->bdev) - 1) ||
!bdev_iter_is_aligned(iomap->bdev, dio->submit.iter))
return -EINVAL;
- if (iomap->type == IOMAP_UNWRITTEN) {
- dio->flags |= IOMAP_DIO_UNWRITTEN;
- need_zeroout = true;
- }
+ if (dio->flags & IOMAP_DIO_WRITE) {
+ bio_opf |= REQ_OP_WRITE;
+
+ if (iomap->flags & IOMAP_F_ATOMIC_BIO) {
+ /*
+ * Ensure that the mapping covers the full write
+ * length, otherwise it won't be submitted as a single
+ * bio, which is required to use hardware atomics.
+ */
+ if (length != iter->len)
+ return -EINVAL;
+ bio_opf |= REQ_ATOMIC;
+ }
- if (iomap->flags & IOMAP_F_SHARED)
- dio->flags |= IOMAP_DIO_COW;
+ if (iomap->type == IOMAP_UNWRITTEN) {
+ dio->flags |= IOMAP_DIO_UNWRITTEN;
+ need_zeroout = true;
+ }
+
+ if (iomap->flags & IOMAP_F_SHARED)
+ dio->flags |= IOMAP_DIO_COW;
+
+ if (iomap->flags & IOMAP_F_NEW) {
+ need_zeroout = true;
+ } else if (iomap->type == IOMAP_MAPPED) {
+ if (iomap_dio_can_use_fua(iomap, dio))
+ bio_opf |= REQ_FUA;
+ else
+ dio->flags &= ~IOMAP_DIO_WRITE_THROUGH;
+ }
- if (iomap->flags & IOMAP_F_NEW) {
- need_zeroout = true;
- } else if (iomap->type == IOMAP_MAPPED) {
/*
- * Use a FUA write if we need datasync semantics, this is a pure
- * data IO that doesn't require any metadata updates (including
- * after IO completion such as unwritten extent conversion) and
- * the underlying device either supports FUA or doesn't have
- * a volatile write cache. This allows us to avoid cache flushes
- * on IO completion. If we can't use writethrough and need to
- * sync, disable in-task completions as dio completion will
- * need to call generic_write_sync() which will do a blocking
- * fsync / cache flush call.
+ * We can only do deferred completion for pure overwrites that
+ * don't require additional I/O at completion time.
+ *
+ * This rules out writes that need zeroing or extent conversion,
+ * extend the file size, or issue metadata I/O or cache flushes
+ * during completion processing.
*/
- if (!(iomap->flags & (IOMAP_F_SHARED|IOMAP_F_DIRTY)) &&
- (dio->flags & IOMAP_DIO_WRITE_THROUGH) &&
- (bdev_fua(iomap->bdev) || !bdev_write_cache(iomap->bdev)))
- use_fua = true;
- else if (dio->flags & IOMAP_DIO_NEED_SYNC)
+ if (need_zeroout || (pos >= i_size_read(inode)) ||
+ ((dio->flags & IOMAP_DIO_NEED_SYNC) &&
+ !(bio_opf & REQ_FUA)))
dio->flags &= ~IOMAP_DIO_CALLER_COMP;
+ } else {
+ bio_opf |= REQ_OP_READ;
}
/*
@@ -355,18 +405,6 @@ static loff_t iomap_dio_bio_iter(const struct iomap_iter *iter,
goto out;
/*
- * We can only do deferred completion for pure overwrites that
- * don't require additional IO at completion. This rules out
- * writes that need zeroing or extent conversion, extend
- * the file size, or issue journal IO or cache flushes
- * during completion processing.
- */
- if (need_zeroout ||
- ((dio->flags & IOMAP_DIO_NEED_SYNC) && !use_fua) ||
- ((dio->flags & IOMAP_DIO_WRITE) && pos >= i_size_read(inode)))
- dio->flags &= ~IOMAP_DIO_CALLER_COMP;
-
- /*
* The rules for polled IO completions follow the guidelines as the
* ones we set for inline and deferred completions. If none of those
* are available for this IO, clear the polled flag.
@@ -383,8 +421,6 @@ static loff_t iomap_dio_bio_iter(const struct iomap_iter *iter,
goto out;
}
- bio_opf = iomap_dio_bio_opflags(dio, iomap, use_fua, atomic);
-
nr_pages = bio_iov_vecs_to_alloc(dio->submit.iter, BIO_MAX_VECS);
do {
size_t n;
@@ -416,9 +452,9 @@ static loff_t iomap_dio_bio_iter(const struct iomap_iter *iter,
}
n = bio->bi_iter.bi_size;
- if (WARN_ON_ONCE(atomic && n != length)) {
+ if (WARN_ON_ONCE((bio_opf & REQ_ATOMIC) && n != length)) {
/*
- * This bio should have covered the complete length,
+ * An atomic write bio must cover the complete length,
* which it doesn't, so error. We may need to zero out
* the tail (complete FS block), similar to when
* bio_iov_iter_get_pages() returns an error, above.
@@ -465,30 +501,28 @@ out:
/* Undo iter limitation to current extent */
iov_iter_reexpand(dio->submit.iter, orig_count - copied);
if (copied)
- return copied;
+ return iomap_iter_advance(iter, &copied);
return ret;
}
-static loff_t iomap_dio_hole_iter(const struct iomap_iter *iter,
- struct iomap_dio *dio)
+static int iomap_dio_hole_iter(struct iomap_iter *iter, struct iomap_dio *dio)
{
loff_t length = iov_iter_zero(iomap_length(iter), dio->submit.iter);
dio->size += length;
if (!length)
return -EFAULT;
- return length;
+ return iomap_iter_advance(iter, &length);
}
-static loff_t iomap_dio_inline_iter(const struct iomap_iter *iomi,
- struct iomap_dio *dio)
+static int iomap_dio_inline_iter(struct iomap_iter *iomi, struct iomap_dio *dio)
{
const struct iomap *iomap = &iomi->iomap;
struct iov_iter *iter = dio->submit.iter;
void *inline_data = iomap_inline_data(iomap, iomi->pos);
loff_t length = iomap_length(iomi);
loff_t pos = iomi->pos;
- size_t copied;
+ u64 copied;
if (WARN_ON_ONCE(!iomap_inline_data_valid(iomap)))
return -EIO;
@@ -510,11 +544,10 @@ static loff_t iomap_dio_inline_iter(const struct iomap_iter *iomi,
dio->size += copied;
if (!copied)
return -EFAULT;
- return copied;
+ return iomap_iter_advance(iomi, &copied);
}
-static loff_t iomap_dio_iter(const struct iomap_iter *iter,
- struct iomap_dio *dio)
+static int iomap_dio_iter(struct iomap_iter *iter, struct iomap_dio *dio)
{
switch (iter->iomap.type) {
case IOMAP_HOLE:
@@ -608,9 +641,6 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
if (iocb->ki_flags & IOCB_NOWAIT)
iomi.flags |= IOMAP_NOWAIT;
- if (iocb->ki_flags & IOCB_ATOMIC)
- iomi.flags |= IOMAP_ATOMIC;
-
if (iov_iter_rw(iter) == READ) {
/* reads can always complete inline */
dio->flags |= IOMAP_DIO_INLINE_COMP;
@@ -645,6 +675,9 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
iomi.flags |= IOMAP_OVERWRITE_ONLY;
}
+ if (iocb->ki_flags & IOCB_ATOMIC)
+ iomi.flags |= IOMAP_ATOMIC;
+
/* for data sync or sync, we need sync completion processing */
if (iocb_is_dsync(iocb)) {
dio->flags |= IOMAP_DIO_NEED_SYNC;
@@ -698,7 +731,7 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
blk_start_plug(&plug);
while ((ret = iomap_iter(&iomi, ops)) > 0) {
- iomi.processed = iomap_dio_iter(&iomi, dio);
+ iomi.status = iomap_dio_iter(&iomi, dio);
/*
* We can only poll for single bio I/Os.
diff --git a/fs/iomap/fiemap.c b/fs/iomap/fiemap.c
index 610ca6f1ec9b..80675c42e94e 100644
--- a/fs/iomap/fiemap.c
+++ b/fs/iomap/fiemap.c
@@ -39,24 +39,23 @@ static int iomap_to_fiemap(struct fiemap_extent_info *fi,
iomap->length, flags);
}
-static loff_t iomap_fiemap_iter(const struct iomap_iter *iter,
+static int iomap_fiemap_iter(struct iomap_iter *iter,
struct fiemap_extent_info *fi, struct iomap *prev)
{
int ret;
if (iter->iomap.type == IOMAP_HOLE)
- return iomap_length(iter);
+ goto advance;
ret = iomap_to_fiemap(fi, prev, 0);
*prev = iter->iomap;
- switch (ret) {
- case 0: /* success */
- return iomap_length(iter);
- case 1: /* extent array full */
- return 0;
- default: /* error */
+ if (ret < 0)
return ret;
- }
+ if (ret == 1) /* extent array full */
+ return 0;
+
+advance:
+ return iomap_iter_advance_full(iter);
}
int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fi,
@@ -78,7 +77,7 @@ int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fi,
return ret;
while ((ret = iomap_iter(&iter, ops)) > 0)
- iter.processed = iomap_fiemap_iter(&iter, fi, &prev);
+ iter.status = iomap_fiemap_iter(&iter, fi, &prev);
if (prev.type != IOMAP_HOLE) {
ret = iomap_to_fiemap(fi, &prev, FIEMAP_EXTENT_LAST);
@@ -114,7 +113,7 @@ iomap_bmap(struct address_space *mapping, sector_t bno,
while ((ret = iomap_iter(&iter, ops)) > 0) {
if (iter.iomap.type == IOMAP_MAPPED)
bno = iomap_sector(&iter.iomap, iter.pos) >> blkshift;
- /* leave iter.processed unset to abort loop */
+ /* leave iter.status unset to abort loop */
}
if (ret)
return 0;
diff --git a/fs/iomap/internal.h b/fs/iomap/internal.h
new file mode 100644
index 000000000000..f6992a3bf66a
--- /dev/null
+++ b/fs/iomap/internal.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _IOMAP_INTERNAL_H
+#define _IOMAP_INTERNAL_H 1
+
+#define IOEND_BATCH_SIZE 4096
+
+u32 iomap_finish_ioend_buffered(struct iomap_ioend *ioend);
+u32 iomap_finish_ioend_direct(struct iomap_ioend *ioend);
+
+#endif /* _IOMAP_INTERNAL_H */
diff --git a/fs/iomap/ioend.c b/fs/iomap/ioend.c
new file mode 100644
index 000000000000..18894ebba6db
--- /dev/null
+++ b/fs/iomap/ioend.c
@@ -0,0 +1,216 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2024-2025 Christoph Hellwig.
+ */
+#include <linux/iomap.h>
+#include <linux/list_sort.h>
+#include "internal.h"
+
+struct bio_set iomap_ioend_bioset;
+EXPORT_SYMBOL_GPL(iomap_ioend_bioset);
+
+struct iomap_ioend *iomap_init_ioend(struct inode *inode,
+ struct bio *bio, loff_t file_offset, u16 ioend_flags)
+{
+ struct iomap_ioend *ioend = iomap_ioend_from_bio(bio);
+
+ atomic_set(&ioend->io_remaining, 1);
+ ioend->io_error = 0;
+ ioend->io_parent = NULL;
+ INIT_LIST_HEAD(&ioend->io_list);
+ ioend->io_flags = ioend_flags;
+ ioend->io_inode = inode;
+ ioend->io_offset = file_offset;
+ ioend->io_size = bio->bi_iter.bi_size;
+ ioend->io_sector = bio->bi_iter.bi_sector;
+ ioend->io_private = NULL;
+ return ioend;
+}
+EXPORT_SYMBOL_GPL(iomap_init_ioend);
+
+static u32 iomap_finish_ioend(struct iomap_ioend *ioend, int error)
+{
+ if (ioend->io_parent) {
+ struct bio *bio = &ioend->io_bio;
+
+ ioend = ioend->io_parent;
+ bio_put(bio);
+ }
+
+ if (error)
+ cmpxchg(&ioend->io_error, 0, error);
+
+ if (!atomic_dec_and_test(&ioend->io_remaining))
+ return 0;
+ if (ioend->io_flags & IOMAP_IOEND_DIRECT)
+ return iomap_finish_ioend_direct(ioend);
+ return iomap_finish_ioend_buffered(ioend);
+}
+
+/*
+ * Ioend completion routine for merged bios. This can only be called from task
+ * contexts as merged ioends can be of unbound length. Hence we have to break up
+ * the writeback completions into manageable chunks to avoid long scheduler
+ * holdoffs. We aim to keep scheduler holdoffs down below 10ms so that we get
+ * good batch processing throughput without creating adverse scheduler latency
+ * conditions.
+ */
+void iomap_finish_ioends(struct iomap_ioend *ioend, int error)
+{
+ struct list_head tmp;
+ u32 completions;
+
+ might_sleep();
+
+ list_replace_init(&ioend->io_list, &tmp);
+ completions = iomap_finish_ioend(ioend, error);
+
+ while (!list_empty(&tmp)) {
+ if (completions > IOEND_BATCH_SIZE * 8) {
+ cond_resched();
+ completions = 0;
+ }
+ ioend = list_first_entry(&tmp, struct iomap_ioend, io_list);
+ list_del_init(&ioend->io_list);
+ completions += iomap_finish_ioend(ioend, error);
+ }
+}
+EXPORT_SYMBOL_GPL(iomap_finish_ioends);
+
+/*
+ * We can merge two adjacent ioends if they have the same set of work to do.
+ */
+static bool iomap_ioend_can_merge(struct iomap_ioend *ioend,
+ struct iomap_ioend *next)
+{
+ if (ioend->io_bio.bi_status != next->io_bio.bi_status)
+ return false;
+ if (next->io_flags & IOMAP_IOEND_BOUNDARY)
+ return false;
+ if ((ioend->io_flags & IOMAP_IOEND_NOMERGE_FLAGS) !=
+ (next->io_flags & IOMAP_IOEND_NOMERGE_FLAGS))
+ return false;
+ if (ioend->io_offset + ioend->io_size != next->io_offset)
+ return false;
+ /*
+ * Do not merge physically discontiguous ioends. The filesystem
+ * completion functions will have to iterate the physical
+ * discontiguities even if we merge the ioends at a logical level, so
+ * we don't gain anything by merging physical discontiguities here.
+ *
+ * We cannot use bio->bi_iter.bi_sector here as it is modified during
+ * submission so does not point to the start sector of the bio at
+ * completion.
+ */
+ if (ioend->io_sector + (ioend->io_size >> SECTOR_SHIFT) !=
+ next->io_sector)
+ return false;
+ return true;
+}
+
+void iomap_ioend_try_merge(struct iomap_ioend *ioend,
+ struct list_head *more_ioends)
+{
+ struct iomap_ioend *next;
+
+ INIT_LIST_HEAD(&ioend->io_list);
+
+ while ((next = list_first_entry_or_null(more_ioends, struct iomap_ioend,
+ io_list))) {
+ if (!iomap_ioend_can_merge(ioend, next))
+ break;
+ list_move_tail(&next->io_list, &ioend->io_list);
+ ioend->io_size += next->io_size;
+ }
+}
+EXPORT_SYMBOL_GPL(iomap_ioend_try_merge);
+
+static int iomap_ioend_compare(void *priv, const struct list_head *a,
+ const struct list_head *b)
+{
+ struct iomap_ioend *ia = container_of(a, struct iomap_ioend, io_list);
+ struct iomap_ioend *ib = container_of(b, struct iomap_ioend, io_list);
+
+ if (ia->io_offset < ib->io_offset)
+ return -1;
+ if (ia->io_offset > ib->io_offset)
+ return 1;
+ return 0;
+}
+
+void iomap_sort_ioends(struct list_head *ioend_list)
+{
+ list_sort(NULL, ioend_list, iomap_ioend_compare);
+}
+EXPORT_SYMBOL_GPL(iomap_sort_ioends);
+
+/*
+ * Split up to the first @max_len bytes from @ioend if the ioend covers more
+ * than @max_len bytes.
+ *
+ * If @is_append is set, the split will be based on the hardware limits for
+ * REQ_OP_ZONE_APPEND commands and can be less than @max_len if the hardware
+ * limits don't allow the entire @max_len length.
+ *
+ * The bio embedded into @ioend must be a REQ_OP_WRITE because the block layer
+ * does not allow splitting REQ_OP_ZONE_APPEND bios. The file systems has to
+ * switch the operation after this call, but before submitting the bio.
+ */
+struct iomap_ioend *iomap_split_ioend(struct iomap_ioend *ioend,
+ unsigned int max_len, bool is_append)
+{
+ struct bio *bio = &ioend->io_bio;
+ struct iomap_ioend *split_ioend;
+ unsigned int nr_segs;
+ int sector_offset;
+ struct bio *split;
+
+ if (is_append) {
+ struct queue_limits *lim = bdev_limits(bio->bi_bdev);
+
+ max_len = min(max_len,
+ lim->max_zone_append_sectors << SECTOR_SHIFT);
+
+ sector_offset = bio_split_rw_at(bio, lim, &nr_segs, max_len);
+ if (unlikely(sector_offset < 0))
+ return ERR_PTR(sector_offset);
+ if (!sector_offset)
+ return NULL;
+ } else {
+ if (bio->bi_iter.bi_size <= max_len)
+ return NULL;
+ sector_offset = max_len >> SECTOR_SHIFT;
+ }
+
+ /* ensure the split ioend is still block size aligned */
+ sector_offset = ALIGN_DOWN(sector_offset << SECTOR_SHIFT,
+ i_blocksize(ioend->io_inode)) >> SECTOR_SHIFT;
+
+ split = bio_split(bio, sector_offset, GFP_NOFS, &iomap_ioend_bioset);
+ if (IS_ERR(split))
+ return ERR_CAST(split);
+ split->bi_private = bio->bi_private;
+ split->bi_end_io = bio->bi_end_io;
+
+ split_ioend = iomap_init_ioend(ioend->io_inode, split, ioend->io_offset,
+ ioend->io_flags);
+ split_ioend->io_parent = ioend;
+
+ atomic_inc(&ioend->io_remaining);
+ ioend->io_offset += split_ioend->io_size;
+ ioend->io_size -= split_ioend->io_size;
+
+ split_ioend->io_sector = ioend->io_sector;
+ if (!is_append)
+ ioend->io_sector += (split_ioend->io_size >> SECTOR_SHIFT);
+ return split_ioend;
+}
+EXPORT_SYMBOL_GPL(iomap_split_ioend);
+
+static int __init iomap_ioend_init(void)
+{
+ return bioset_init(&iomap_ioend_bioset, 4 * (PAGE_SIZE / SECTOR_SIZE),
+ offsetof(struct iomap_ioend, io_bio),
+ BIOSET_NEED_BVECS);
+}
+fs_initcall(iomap_ioend_init);
diff --git a/fs/iomap/iter.c b/fs/iomap/iter.c
index 3790918646af..6ffc6a7b9ba5 100644
--- a/fs/iomap/iter.c
+++ b/fs/iomap/iter.c
@@ -7,40 +7,25 @@
#include <linux/iomap.h>
#include "trace.h"
-/*
- * Advance to the next range we need to map.
- *
- * If the iomap is marked IOMAP_F_STALE, it means the existing map was not fully
- * processed - it was aborted because the extent the iomap spanned may have been
- * changed during the operation. In this case, the iteration behaviour is to
- * remap the unprocessed range of the iter, and that means we may need to remap
- * even when we've made no progress (i.e. iter->processed = 0). Hence the
- * "finished iterating" case needs to distinguish between
- * (processed = 0) meaning we are done and (processed = 0 && stale) meaning we
- * need to remap the entire remaining range.
- */
-static inline int iomap_iter_advance(struct iomap_iter *iter)
+static inline void iomap_iter_reset_iomap(struct iomap_iter *iter)
{
- bool stale = iter->iomap.flags & IOMAP_F_STALE;
- int ret = 1;
-
- /* handle the previous iteration (if any) */
- if (iter->iomap.length) {
- if (iter->processed < 0)
- return iter->processed;
- if (WARN_ON_ONCE(iter->processed > iomap_length(iter)))
- return -EIO;
- iter->pos += iter->processed;
- iter->len -= iter->processed;
- if (!iter->len || (!iter->processed && !stale))
- ret = 0;
- }
-
- /* clear the per iteration state */
- iter->processed = 0;
+ iter->status = 0;
memset(&iter->iomap, 0, sizeof(iter->iomap));
memset(&iter->srcmap, 0, sizeof(iter->srcmap));
- return ret;
+}
+
+/*
+ * Advance the current iterator position and output the length remaining for the
+ * current mapping.
+ */
+int iomap_iter_advance(struct iomap_iter *iter, u64 *count)
+{
+ if (WARN_ON_ONCE(*count > iomap_length(iter)))
+ return -EIO;
+ iter->pos += *count;
+ iter->len -= *count;
+ *count = iomap_length(iter);
+ return 0;
}
static inline void iomap_iter_done(struct iomap_iter *iter)
@@ -50,6 +35,8 @@ static inline void iomap_iter_done(struct iomap_iter *iter)
WARN_ON_ONCE(iter->iomap.offset + iter->iomap.length <= iter->pos);
WARN_ON_ONCE(iter->iomap.flags & IOMAP_F_STALE);
+ iter->iter_start_pos = iter->pos;
+
trace_iomap_iter_dstmap(iter->inode, &iter->iomap);
if (iter->srcmap.type != IOMAP_HOLE)
trace_iomap_iter_srcmap(iter->inode, &iter->srcmap);
@@ -67,26 +54,58 @@ static inline void iomap_iter_done(struct iomap_iter *iter)
* function must be called in a loop that continues as long it returns a
* positive value. If 0 or a negative value is returned, the caller must not
* return to the loop body. Within a loop body, there are two ways to break out
- * of the loop body: leave @iter.processed unchanged, or set it to a negative
+ * of the loop body: leave @iter.status unchanged, or set it to a negative
* errno.
*/
int iomap_iter(struct iomap_iter *iter, const struct iomap_ops *ops)
{
+ bool stale = iter->iomap.flags & IOMAP_F_STALE;
+ ssize_t advanced;
+ u64 olen;
int ret;
- if (iter->iomap.length && ops->iomap_end) {
- ret = ops->iomap_end(iter->inode, iter->pos, iomap_length(iter),
- iter->processed > 0 ? iter->processed : 0,
- iter->flags, &iter->iomap);
- if (ret < 0 && !iter->processed)
+ trace_iomap_iter(iter, ops, _RET_IP_);
+
+ if (!iter->iomap.length)
+ goto begin;
+
+ /*
+ * Calculate how far the iter was advanced and the original length bytes
+ * for ->iomap_end().
+ */
+ advanced = iter->pos - iter->iter_start_pos;
+ olen = iter->len + advanced;
+
+ if (ops->iomap_end) {
+ ret = ops->iomap_end(iter->inode, iter->iter_start_pos,
+ iomap_length_trim(iter, iter->iter_start_pos,
+ olen),
+ advanced, iter->flags, &iter->iomap);
+ if (ret < 0 && !advanced)
return ret;
}
- trace_iomap_iter(iter, ops, _RET_IP_);
- ret = iomap_iter_advance(iter);
+ /* detect old return semantics where this would advance */
+ if (WARN_ON_ONCE(iter->status > 0))
+ iter->status = -EIO;
+
+ /*
+ * Use iter->len to determine whether to continue onto the next mapping.
+ * Explicitly terminate on error status or if the current iter has not
+ * advanced at all (i.e. no work was done for some reason) unless the
+ * mapping has been marked stale and needs to be reprocessed.
+ */
+ if (iter->status < 0)
+ ret = iter->status;
+ else if (iter->len == 0 || (!advanced && !stale))
+ ret = 0;
+ else
+ ret = 1;
+ iomap_iter_reset_iomap(iter);
if (ret <= 0)
return ret;
+begin:
ret = ops->iomap_begin(iter->inode, iter->pos, iter->len, iter->flags,
&iter->iomap, &iter->srcmap);
if (ret < 0)
diff --git a/fs/iomap/seek.c b/fs/iomap/seek.c
index a845c012b50c..04d7919636c1 100644
--- a/fs/iomap/seek.c
+++ b/fs/iomap/seek.c
@@ -10,7 +10,7 @@
#include <linux/pagemap.h>
#include <linux/pagevec.h>
-static loff_t iomap_seek_hole_iter(const struct iomap_iter *iter,
+static int iomap_seek_hole_iter(struct iomap_iter *iter,
loff_t *hole_pos)
{
loff_t length = iomap_length(iter);
@@ -20,13 +20,13 @@ static loff_t iomap_seek_hole_iter(const struct iomap_iter *iter,
*hole_pos = mapping_seek_hole_data(iter->inode->i_mapping,
iter->pos, iter->pos + length, SEEK_HOLE);
if (*hole_pos == iter->pos + length)
- return length;
+ return iomap_iter_advance(iter, &length);
return 0;
case IOMAP_HOLE:
*hole_pos = iter->pos;
return 0;
default:
- return length;
+ return iomap_iter_advance(iter, &length);
}
}
@@ -47,7 +47,7 @@ iomap_seek_hole(struct inode *inode, loff_t pos, const struct iomap_ops *ops)
iter.len = size - pos;
while ((ret = iomap_iter(&iter, ops)) > 0)
- iter.processed = iomap_seek_hole_iter(&iter, &pos);
+ iter.status = iomap_seek_hole_iter(&iter, &pos);
if (ret < 0)
return ret;
if (iter.len) /* found hole before EOF */
@@ -56,19 +56,19 @@ iomap_seek_hole(struct inode *inode, loff_t pos, const struct iomap_ops *ops)
}
EXPORT_SYMBOL_GPL(iomap_seek_hole);
-static loff_t iomap_seek_data_iter(const struct iomap_iter *iter,
+static int iomap_seek_data_iter(struct iomap_iter *iter,
loff_t *hole_pos)
{
loff_t length = iomap_length(iter);
switch (iter->iomap.type) {
case IOMAP_HOLE:
- return length;
+ return iomap_iter_advance(iter, &length);
case IOMAP_UNWRITTEN:
*hole_pos = mapping_seek_hole_data(iter->inode->i_mapping,
iter->pos, iter->pos + length, SEEK_DATA);
if (*hole_pos < 0)
- return length;
+ return iomap_iter_advance(iter, &length);
return 0;
default:
*hole_pos = iter->pos;
@@ -93,7 +93,7 @@ iomap_seek_data(struct inode *inode, loff_t pos, const struct iomap_ops *ops)
iter.len = size - pos;
while ((ret = iomap_iter(&iter, ops)) > 0)
- iter.processed = iomap_seek_data_iter(&iter, &pos);
+ iter.status = iomap_seek_data_iter(&iter, &pos);
if (ret < 0)
return ret;
if (iter.len) /* found data before EOF */
diff --git a/fs/iomap/swapfile.c b/fs/iomap/swapfile.c
index b90d0eda9e51..c1a762c10ce4 100644
--- a/fs/iomap/swapfile.c
+++ b/fs/iomap/swapfile.c
@@ -94,7 +94,7 @@ static int iomap_swapfile_fail(struct iomap_swapfile_info *isi, const char *str)
* swap only cares about contiguous page-aligned physical extents and makes no
* distinction between written and unwritten extents.
*/
-static loff_t iomap_swapfile_iter(const struct iomap_iter *iter,
+static int iomap_swapfile_iter(struct iomap_iter *iter,
struct iomap *iomap, struct iomap_swapfile_info *isi)
{
switch (iomap->type) {
@@ -132,7 +132,8 @@ static loff_t iomap_swapfile_iter(const struct iomap_iter *iter,
return error;
memcpy(&isi->iomap, iomap, sizeof(isi->iomap));
}
- return iomap_length(iter);
+
+ return iomap_iter_advance_full(iter);
}
/*
@@ -166,7 +167,7 @@ int iomap_swapfile_activate(struct swap_info_struct *sis,
return ret;
while ((ret = iomap_iter(&iter, ops)) > 0)
- iter.processed = iomap_swapfile_iter(&iter, &iter.iomap, &isi);
+ iter.status = iomap_swapfile_iter(&iter, &iter.iomap, &isi);
if (ret < 0)
return ret;
diff --git a/fs/iomap/trace.h b/fs/iomap/trace.h
index 4118a42cdab0..9eab2c8ac3c5 100644
--- a/fs/iomap/trace.h
+++ b/fs/iomap/trace.h
@@ -207,7 +207,7 @@ TRACE_EVENT(iomap_iter,
__field(u64, ino)
__field(loff_t, pos)
__field(u64, length)
- __field(s64, processed)
+ __field(int, status)
__field(unsigned int, flags)
__field(const void *, ops)
__field(unsigned long, caller)
@@ -217,17 +217,17 @@ TRACE_EVENT(iomap_iter,
__entry->ino = iter->inode->i_ino;
__entry->pos = iter->pos;
__entry->length = iomap_length(iter);
- __entry->processed = iter->processed;
+ __entry->status = iter->status;
__entry->flags = iter->flags;
__entry->ops = ops;
__entry->caller = caller;
),
- TP_printk("dev %d:%d ino 0x%llx pos 0x%llx length 0x%llx processed %lld flags %s (0x%x) ops %ps caller %pS",
+ TP_printk("dev %d:%d ino 0x%llx pos 0x%llx length 0x%llx status %d flags %s (0x%x) ops %ps caller %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->ino,
__entry->pos,
__entry->length,
- __entry->processed,
+ __entry->status,
__print_flags(__entry->flags, "|", IOMAP_FLAGS_STRINGS),
__entry->flags,
__entry->ops,
diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c
index 2b2938970da3..dd91f725ded6 100644
--- a/fs/jffs2/dir.c
+++ b/fs/jffs2/dir.c
@@ -32,8 +32,8 @@ static int jffs2_link (struct dentry *,struct inode *,struct dentry *);
static int jffs2_unlink (struct inode *,struct dentry *);
static int jffs2_symlink (struct mnt_idmap *, struct inode *,
struct dentry *, const char *);
-static int jffs2_mkdir (struct mnt_idmap *, struct inode *,struct dentry *,
- umode_t);
+static struct dentry *jffs2_mkdir (struct mnt_idmap *, struct inode *,struct dentry *,
+ umode_t);
static int jffs2_rmdir (struct inode *,struct dentry *);
static int jffs2_mknod (struct mnt_idmap *, struct inode *,struct dentry *,
umode_t,dev_t);
@@ -446,8 +446,8 @@ static int jffs2_symlink (struct mnt_idmap *idmap, struct inode *dir_i,
}
-static int jffs2_mkdir (struct mnt_idmap *idmap, struct inode *dir_i,
- struct dentry *dentry, umode_t mode)
+static struct dentry *jffs2_mkdir (struct mnt_idmap *idmap, struct inode *dir_i,
+ struct dentry *dentry, umode_t mode)
{
struct jffs2_inode_info *f, *dir_f;
struct jffs2_sb_info *c;
@@ -464,7 +464,7 @@ static int jffs2_mkdir (struct mnt_idmap *idmap, struct inode *dir_i,
ri = jffs2_alloc_raw_inode();
if (!ri)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
c = JFFS2_SB_INFO(dir_i->i_sb);
@@ -477,7 +477,7 @@ static int jffs2_mkdir (struct mnt_idmap *idmap, struct inode *dir_i,
if (ret) {
jffs2_free_raw_inode(ri);
- return ret;
+ return ERR_PTR(ret);
}
inode = jffs2_new_inode(dir_i, mode, ri);
@@ -485,7 +485,7 @@ static int jffs2_mkdir (struct mnt_idmap *idmap, struct inode *dir_i,
if (IS_ERR(inode)) {
jffs2_free_raw_inode(ri);
jffs2_complete_reservation(c);
- return PTR_ERR(inode);
+ return ERR_CAST(inode);
}
inode->i_op = &jffs2_dir_inode_operations;
@@ -584,11 +584,11 @@ static int jffs2_mkdir (struct mnt_idmap *idmap, struct inode *dir_i,
jffs2_complete_reservation(c);
d_instantiate_new(dentry, inode);
- return 0;
+ return NULL;
fail:
iget_failed(inode);
- return ret;
+ return ERR_PTR(ret);
}
static int jffs2_rmdir (struct inode *dir_i, struct dentry *dentry)
diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c
index fc8ede43afde..65a218eba8fa 100644
--- a/fs/jfs/namei.c
+++ b/fs/jfs/namei.c
@@ -187,13 +187,13 @@ static int jfs_create(struct mnt_idmap *idmap, struct inode *dip,
* dentry - dentry of child directory
* mode - create mode (rwxrwxrwx).
*
- * RETURN: Errors from subroutines
+ * RETURN: ERR_PTR() of errors from subroutines.
*
* note:
* EACCES: user needs search+write permission on the parent directory
*/
-static int jfs_mkdir(struct mnt_idmap *idmap, struct inode *dip,
- struct dentry *dentry, umode_t mode)
+static struct dentry *jfs_mkdir(struct mnt_idmap *idmap, struct inode *dip,
+ struct dentry *dentry, umode_t mode)
{
int rc = 0;
tid_t tid; /* transaction id */
@@ -308,7 +308,7 @@ static int jfs_mkdir(struct mnt_idmap *idmap, struct inode *dip,
out1:
jfs_info("jfs_mkdir: rc:%d", rc);
- return rc;
+ return ERR_PTR(rc);
}
/*
diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
index 5f0f8b95f44c..d296aad70800 100644
--- a/fs/kernfs/dir.c
+++ b/fs/kernfs/dir.c
@@ -1230,24 +1230,24 @@ static struct dentry *kernfs_iop_lookup(struct inode *dir,
return d_splice_alias(inode, dentry);
}
-static int kernfs_iop_mkdir(struct mnt_idmap *idmap,
- struct inode *dir, struct dentry *dentry,
- umode_t mode)
+static struct dentry *kernfs_iop_mkdir(struct mnt_idmap *idmap,
+ struct inode *dir, struct dentry *dentry,
+ umode_t mode)
{
struct kernfs_node *parent = dir->i_private;
struct kernfs_syscall_ops *scops = kernfs_root(parent)->syscall_ops;
int ret;
if (!scops || !scops->mkdir)
- return -EPERM;
+ return ERR_PTR(-EPERM);
if (!kernfs_get_active(parent))
- return -ENODEV;
+ return ERR_PTR(-ENODEV);
ret = scops->mkdir(parent, dentry->d_name.name, mode);
kernfs_put_active(parent);
- return ret;
+ return ERR_PTR(ret);
}
static int kernfs_iop_rmdir(struct inode *dir, struct dentry *dentry)
diff --git a/fs/libfs.c b/fs/libfs.c
index dc042a975a56..6393d7c49ee6 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -2113,7 +2113,7 @@ struct timespec64 simple_inode_init_ts(struct inode *inode)
}
EXPORT_SYMBOL(simple_inode_init_ts);
-static inline struct dentry *get_stashed_dentry(struct dentry **stashed)
+struct dentry *stashed_dentry_get(struct dentry **stashed)
{
struct dentry *dentry;
@@ -2215,7 +2215,7 @@ int path_from_stashed(struct dentry **stashed, struct vfsmount *mnt, void *data,
const struct stashed_operations *sops = mnt->mnt_sb->s_fs_info;
/* See if dentry can be reused. */
- path->dentry = get_stashed_dentry(stashed);
+ path->dentry = stashed_dentry_get(stashed);
if (path->dentry) {
sops->put_data(data);
goto out_path;
diff --git a/fs/minix/namei.c b/fs/minix/namei.c
index 5d9c1406fe27..8938536d8d3c 100644
--- a/fs/minix/namei.c
+++ b/fs/minix/namei.c
@@ -104,15 +104,15 @@ static int minix_link(struct dentry * old_dentry, struct inode * dir,
return add_nondir(dentry, inode);
}
-static int minix_mkdir(struct mnt_idmap *idmap, struct inode *dir,
- struct dentry *dentry, umode_t mode)
+static struct dentry *minix_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
struct inode * inode;
int err;
inode = minix_new_inode(dir, S_IFDIR | mode);
if (IS_ERR(inode))
- return PTR_ERR(inode);
+ return ERR_CAST(inode);
inode_inc_link_count(dir);
minix_set_inode(inode, 0);
@@ -128,7 +128,7 @@ static int minix_mkdir(struct mnt_idmap *idmap, struct inode *dir,
d_instantiate(dentry, inode);
out:
- return err;
+ return ERR_PTR(err);
out_fail:
inode_dec_link_count(inode);
diff --git a/fs/mnt_idmapping.c b/fs/mnt_idmapping.c
index 7b1df8cc2821..a37991fdb194 100644
--- a/fs/mnt_idmapping.c
+++ b/fs/mnt_idmapping.c
@@ -6,6 +6,7 @@
#include <linux/mnt_idmapping.h>
#include <linux/slab.h>
#include <linux/user_namespace.h>
+#include <linux/seq_file.h>
#include "internal.h"
@@ -334,3 +335,53 @@ void mnt_idmap_put(struct mnt_idmap *idmap)
free_mnt_idmap(idmap);
}
EXPORT_SYMBOL_GPL(mnt_idmap_put);
+
+int statmount_mnt_idmap(struct mnt_idmap *idmap, struct seq_file *seq, bool uid_map)
+{
+ struct uid_gid_map *map, *map_up;
+ u32 idx, nr_mappings;
+
+ if (!is_valid_mnt_idmap(idmap))
+ return 0;
+
+ /*
+ * Idmappings are shown relative to the caller's idmapping.
+ * This is both the most intuitive and most useful solution.
+ */
+ if (uid_map) {
+ map = &idmap->uid_map;
+ map_up = &current_user_ns()->uid_map;
+ } else {
+ map = &idmap->gid_map;
+ map_up = &current_user_ns()->gid_map;
+ }
+
+ for (idx = 0, nr_mappings = 0; idx < map->nr_extents; idx++) {
+ uid_t lower;
+ struct uid_gid_extent *extent;
+
+ if (map->nr_extents <= UID_GID_MAP_MAX_BASE_EXTENTS)
+ extent = &map->extent[idx];
+ else
+ extent = &map->forward[idx];
+
+ /*
+ * Verify that the whole range of the mapping can be
+ * resolved in the caller's idmapping. If it cannot be
+ * resolved skip the mapping.
+ */
+ lower = map_id_range_up(map_up, extent->lower_first, extent->count);
+ if (lower == (uid_t) -1)
+ continue;
+
+ seq_printf(seq, "%u %u %u", extent->first, lower, extent->count);
+
+ seq->count++; /* mappings are separated by \0 */
+ if (seq_has_overflowed(seq))
+ return -EAGAIN;
+
+ nr_mappings++;
+ }
+
+ return nr_mappings;
+}
diff --git a/fs/mount.h b/fs/mount.h
index ffb613cdfeee..7aecf2a60472 100644
--- a/fs/mount.h
+++ b/fs/mount.h
@@ -5,6 +5,12 @@
#include <linux/ns_common.h>
#include <linux/fs_pin.h>
+extern struct list_head notify_list;
+
+typedef __u32 __bitwise mntns_flags_t;
+
+#define MNTNS_PROPAGATING ((__force mntns_flags_t)(1 << 0))
+
struct mnt_namespace {
struct ns_common ns;
struct mount * root;
@@ -20,12 +26,18 @@ struct mnt_namespace {
wait_queue_head_t poll;
struct rcu_head mnt_ns_rcu;
};
+ u64 seq_origin; /* Sequence number of origin mount namespace */
u64 event;
+#ifdef CONFIG_FSNOTIFY
+ __u32 n_fsnotify_mask;
+ struct fsnotify_mark_connector __rcu *n_fsnotify_marks;
+#endif
unsigned int nr_mounts; /* # of mounts in the namespace */
unsigned int pending_mounts;
struct rb_node mnt_ns_tree_node; /* node in the mnt_ns_tree */
struct list_head mnt_ns_list; /* entry in the sequential list of mounts namespace */
refcount_t passive; /* number references not pinning @mounts */
+ mntns_flags_t mntns_flags;
} __randomize_layout;
struct mnt_pcp {
@@ -76,6 +88,8 @@ struct mount {
#ifdef CONFIG_FSNOTIFY
struct fsnotify_mark_connector __rcu *mnt_fsnotify_marks;
__u32 mnt_fsnotify_mask;
+ struct list_head to_notify; /* need to queue notification */
+ struct mnt_namespace *prev_ns; /* previous namespace (NULL if none) */
#endif
int mnt_id; /* mount identifier, reused */
u64 mnt_id_unique; /* mount ID unique until reboot */
@@ -156,6 +170,11 @@ static inline bool mnt_ns_attached(const struct mount *mnt)
return !RB_EMPTY_NODE(&mnt->mnt_node);
}
+static inline bool mnt_ns_empty(const struct mnt_namespace *ns)
+{
+ return RB_EMPTY_ROOT(&ns->mounts);
+}
+
static inline void move_from_ns(struct mount *mnt, struct list_head *dt_list)
{
struct mnt_namespace *ns = mnt->mnt_ns;
@@ -177,3 +196,21 @@ static inline struct mnt_namespace *to_mnt_ns(struct ns_common *ns)
{
return container_of(ns, struct mnt_namespace, ns);
}
+
+#ifdef CONFIG_FSNOTIFY
+static inline void mnt_notify_add(struct mount *m)
+{
+ /* Optimize the case where there are no watches */
+ if ((m->mnt_ns && m->mnt_ns->n_fsnotify_marks) ||
+ (m->prev_ns && m->prev_ns->n_fsnotify_marks))
+ list_add_tail(&m->to_notify, &notify_list);
+ else
+ m->prev_ns = m->mnt_ns;
+}
+#else
+static inline void mnt_notify_add(struct mount *m)
+{
+}
+#endif
+
+struct mnt_namespace *mnt_ns_from_dentry(struct dentry *dentry);
diff --git a/fs/mpage.c b/fs/mpage.c
index 82aecf372743..ad7844de87c3 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -107,7 +107,7 @@ static void map_buffer_to_folio(struct folio *folio, struct buffer_head *bh,
* don't make any buffers if there is only one buffer on
* the folio and the folio just needs to be set up to date
*/
- if (inode->i_blkbits == PAGE_SHIFT &&
+ if (inode->i_blkbits == folio_shift(folio) &&
buffer_uptodate(bh)) {
folio_mark_uptodate(folio);
return;
@@ -153,7 +153,7 @@ static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
struct folio *folio = args->folio;
struct inode *inode = folio->mapping->host;
const unsigned blkbits = inode->i_blkbits;
- const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
+ const unsigned blocks_per_folio = folio_size(folio) >> blkbits;
const unsigned blocksize = 1 << blkbits;
struct buffer_head *map_bh = &args->map_bh;
sector_t block_in_file;
@@ -161,7 +161,7 @@ static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
sector_t last_block_in_file;
sector_t first_block;
unsigned page_block;
- unsigned first_hole = blocks_per_page;
+ unsigned first_hole = blocks_per_folio;
struct block_device *bdev = NULL;
int length;
int fully_mapped = 1;
@@ -170,9 +170,6 @@ static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
unsigned relative_block;
gfp_t gfp = mapping_gfp_constraint(folio->mapping, GFP_KERNEL);
- /* MAX_BUF_PER_PAGE, for example */
- VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
-
if (args->is_readahead) {
opf |= REQ_RAHEAD;
gfp |= __GFP_NORETRY | __GFP_NOWARN;
@@ -181,8 +178,8 @@ static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
if (folio_buffers(folio))
goto confused;
- block_in_file = (sector_t)folio->index << (PAGE_SHIFT - blkbits);
- last_block = block_in_file + args->nr_pages * blocks_per_page;
+ block_in_file = folio_pos(folio) >> blkbits;
+ last_block = block_in_file + ((args->nr_pages * PAGE_SIZE) >> blkbits);
last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits;
if (last_block > last_block_in_file)
last_block = last_block_in_file;
@@ -204,7 +201,7 @@ static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
clear_buffer_mapped(map_bh);
break;
}
- if (page_block == blocks_per_page)
+ if (page_block == blocks_per_folio)
break;
page_block++;
block_in_file++;
@@ -216,7 +213,7 @@ static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
* Then do more get_blocks calls until we are done with this folio.
*/
map_bh->b_folio = folio;
- while (page_block < blocks_per_page) {
+ while (page_block < blocks_per_folio) {
map_bh->b_state = 0;
map_bh->b_size = 0;
@@ -229,7 +226,7 @@ static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
if (!buffer_mapped(map_bh)) {
fully_mapped = 0;
- if (first_hole == blocks_per_page)
+ if (first_hole == blocks_per_folio)
first_hole = page_block;
page_block++;
block_in_file++;
@@ -247,7 +244,7 @@ static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
goto confused;
}
- if (first_hole != blocks_per_page)
+ if (first_hole != blocks_per_folio)
goto confused; /* hole -> non-hole */
/* Contiguous blocks? */
@@ -260,7 +257,7 @@ static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
if (relative_block == nblocks) {
clear_buffer_mapped(map_bh);
break;
- } else if (page_block == blocks_per_page)
+ } else if (page_block == blocks_per_folio)
break;
page_block++;
block_in_file++;
@@ -268,8 +265,8 @@ static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
bdev = map_bh->b_bdev;
}
- if (first_hole != blocks_per_page) {
- folio_zero_segment(folio, first_hole << blkbits, PAGE_SIZE);
+ if (first_hole != blocks_per_folio) {
+ folio_zero_segment(folio, first_hole << blkbits, folio_size(folio));
if (first_hole == 0) {
folio_mark_uptodate(folio);
folio_unlock(folio);
@@ -303,10 +300,10 @@ alloc_new:
relative_block = block_in_file - args->first_logical_block;
nblocks = map_bh->b_size >> blkbits;
if ((buffer_boundary(map_bh) && relative_block == nblocks) ||
- (first_hole != blocks_per_page))
+ (first_hole != blocks_per_folio))
args->bio = mpage_bio_submit_read(args->bio);
else
- args->last_block_in_bio = first_block + blocks_per_page - 1;
+ args->last_block_in_bio = first_block + blocks_per_folio - 1;
out:
return args->bio;
@@ -385,7 +382,7 @@ int mpage_read_folio(struct folio *folio, get_block_t get_block)
{
struct mpage_readpage_args args = {
.folio = folio,
- .nr_pages = 1,
+ .nr_pages = folio_nr_pages(folio),
.get_block = get_block,
};
@@ -456,12 +453,12 @@ static int __mpage_writepage(struct folio *folio, struct writeback_control *wbc,
struct address_space *mapping = folio->mapping;
struct inode *inode = mapping->host;
const unsigned blkbits = inode->i_blkbits;
- const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
+ const unsigned blocks_per_folio = folio_size(folio) >> blkbits;
sector_t last_block;
sector_t block_in_file;
sector_t first_block;
unsigned page_block;
- unsigned first_unmapped = blocks_per_page;
+ unsigned first_unmapped = blocks_per_folio;
struct block_device *bdev = NULL;
int boundary = 0;
sector_t boundary_block = 0;
@@ -486,12 +483,12 @@ static int __mpage_writepage(struct folio *folio, struct writeback_control *wbc,
*/
if (buffer_dirty(bh))
goto confused;
- if (first_unmapped == blocks_per_page)
+ if (first_unmapped == blocks_per_folio)
first_unmapped = page_block;
continue;
}
- if (first_unmapped != blocks_per_page)
+ if (first_unmapped != blocks_per_folio)
goto confused; /* hole -> non-hole */
if (!buffer_dirty(bh) || !buffer_uptodate(bh))
@@ -527,7 +524,7 @@ static int __mpage_writepage(struct folio *folio, struct writeback_control *wbc,
* The page has no buffers: map it to disk
*/
BUG_ON(!folio_test_uptodate(folio));
- block_in_file = (sector_t)folio->index << (PAGE_SHIFT - blkbits);
+ block_in_file = folio_pos(folio) >> blkbits;
/*
* Whole page beyond EOF? Skip allocating blocks to avoid leaking
* space.
@@ -536,7 +533,7 @@ static int __mpage_writepage(struct folio *folio, struct writeback_control *wbc,
goto page_is_mapped;
last_block = (i_size - 1) >> blkbits;
map_bh.b_folio = folio;
- for (page_block = 0; page_block < blocks_per_page; ) {
+ for (page_block = 0; page_block < blocks_per_folio; ) {
map_bh.b_state = 0;
map_bh.b_size = 1 << blkbits;
@@ -618,14 +615,14 @@ alloc_new:
BUG_ON(folio_test_writeback(folio));
folio_start_writeback(folio);
folio_unlock(folio);
- if (boundary || (first_unmapped != blocks_per_page)) {
+ if (boundary || (first_unmapped != blocks_per_folio)) {
bio = mpage_bio_submit_write(bio);
if (boundary_block) {
write_boundary_block(boundary_bdev,
boundary_block, 1 << blkbits);
}
} else {
- mpd->last_block_in_bio = first_block + blocks_per_page - 1;
+ mpd->last_block_in_bio = first_block + blocks_per_folio - 1;
}
goto out;
diff --git a/fs/namei.c b/fs/namei.c
index ecb7b95c2ca3..360a86ca1f02 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -125,6 +125,13 @@
#define EMBEDDED_NAME_MAX (PATH_MAX - offsetof(struct filename, iname))
+static inline void initname(struct filename *name)
+{
+ name->uptr = NULL;
+ name->aname = NULL;
+ atomic_set(&name->refcnt, 1);
+}
+
struct filename *
getname_flags(const char __user *filename, int flags)
{
@@ -203,10 +210,7 @@ getname_flags(const char __user *filename, int flags)
return ERR_PTR(-ENAMETOOLONG);
}
}
-
- atomic_set(&result->refcnt, 1);
- result->uptr = filename;
- result->aname = NULL;
+ initname(result);
audit_getname(result);
return result;
}
@@ -218,11 +222,6 @@ struct filename *getname_uflags(const char __user *filename, int uflags)
return getname_flags(filename, flags);
}
-struct filename *getname(const char __user * filename)
-{
- return getname_flags(filename, 0);
-}
-
struct filename *__getname_maybe_null(const char __user *pathname)
{
struct filename *name;
@@ -269,25 +268,27 @@ struct filename *getname_kernel(const char * filename)
return ERR_PTR(-ENAMETOOLONG);
}
memcpy((char *)result->name, filename, len);
- result->uptr = NULL;
- result->aname = NULL;
- atomic_set(&result->refcnt, 1);
+ initname(result);
audit_getname(result);
-
return result;
}
EXPORT_SYMBOL(getname_kernel);
void putname(struct filename *name)
{
+ int refcnt;
+
if (IS_ERR_OR_NULL(name))
return;
- if (WARN_ON_ONCE(!atomic_read(&name->refcnt)))
- return;
+ refcnt = atomic_read(&name->refcnt);
+ if (refcnt != 1) {
+ if (WARN_ON_ONCE(!refcnt))
+ return;
- if (!atomic_dec_and_test(&name->refcnt))
- return;
+ if (!atomic_dec_and_test(&name->refcnt))
+ return;
+ }
if (name->name != name->iname) {
__putname(name->name);
@@ -1670,6 +1671,8 @@ static struct dentry *lookup_dcache(const struct qstr *name,
* dentries - as the matter of fact, this only gets called
* when directory is guaranteed to have no in-lookup children
* at all.
+ * Will return -ENOENT if name isn't found and LOOKUP_CREATE wasn't passed.
+ * Will return -EEXIST if name is found and LOOKUP_EXCL was passed.
*/
struct dentry *lookup_one_qstr_excl(const struct qstr *name,
struct dentry *base,
@@ -1680,7 +1683,7 @@ struct dentry *lookup_one_qstr_excl(const struct qstr *name,
struct inode *dir = base->d_inode;
if (dentry)
- return dentry;
+ goto found;
/* Don't create child dentry for a dead directory. */
if (unlikely(IS_DEADDIR(dir)))
@@ -1695,6 +1698,17 @@ struct dentry *lookup_one_qstr_excl(const struct qstr *name,
dput(dentry);
dentry = old;
}
+found:
+ if (IS_ERR(dentry))
+ return dentry;
+ if (d_is_negative(dentry) && !(flags & LOOKUP_CREATE)) {
+ dput(dentry);
+ return ERR_PTR(-ENOENT);
+ }
+ if (d_is_positive(dentry) && (flags & LOOKUP_EXCL)) {
+ dput(dentry);
+ return ERR_PTR(-EEXIST);
+ }
return dentry;
}
EXPORT_SYMBOL(lookup_one_qstr_excl);
@@ -2863,15 +2877,14 @@ static int lookup_one_common(struct mnt_idmap *idmap,
* Note that this routine is purely a helper for filesystem usage and should
* not be called by generic code.
*
- * The caller must hold base->i_mutex.
+ * No locks need be held - only a counted reference to @base is needed.
+ *
*/
struct dentry *try_lookup_one_len(const char *name, struct dentry *base, int len)
{
struct qstr this;
int err;
- WARN_ON_ONCE(!inode_is_locked(base->d_inode));
-
err = lookup_one_common(&nop_mnt_idmap, name, base, len, &this);
if (err)
return ERR_PTR(err);
@@ -3415,6 +3428,8 @@ static int may_open(struct mnt_idmap *idmap, const struct path *path,
if ((acc_mode & MAY_EXEC) && path_noexec(path))
return -EACCES;
break;
+ default:
+ VFS_BUG_ON_INODE(1, inode);
}
error = inode_permission(idmap, inode, MAY_OPEN | acc_mode);
@@ -3995,7 +4010,7 @@ static struct file *path_openat(struct nameidata *nd,
WARN_ON(1);
error = -EINVAL;
}
- fput(file);
+ fput_close(file);
if (error == -EOPENSTALE) {
if (flags & LOOKUP_RCU)
error = -ECHILD;
@@ -4078,27 +4093,13 @@ static struct dentry *filename_create(int dfd, struct filename *name,
* '/', and a directory wasn't requested.
*/
if (last.name[last.len] && !want_dir)
- create_flags = 0;
+ create_flags &= ~LOOKUP_CREATE;
inode_lock_nested(path->dentry->d_inode, I_MUTEX_PARENT);
dentry = lookup_one_qstr_excl(&last, path->dentry,
reval_flag | create_flags);
if (IS_ERR(dentry))
goto unlock;
- error = -EEXIST;
- if (d_is_positive(dentry))
- goto fail;
-
- /*
- * Special case - lookup gave negative, but... we had foo/bar/
- * From the vfs_mknod() POV we just have a negative dentry -
- * all is fine. Let's be bastards - you had / on the end, you've
- * been asking for (non-existent) directory. -ENOENT for you.
- */
- if (unlikely(!create_flags)) {
- error = -ENOENT;
- goto fail;
- }
if (unlikely(err2)) {
error = err2;
goto fail;
@@ -4129,7 +4130,8 @@ EXPORT_SYMBOL(kern_path_create);
void done_path_create(struct path *path, struct dentry *dentry)
{
- dput(dentry);
+ if (!IS_ERR(dentry))
+ dput(dentry);
inode_unlock(path->dentry->d_inode);
mnt_drop_write(path->mnt);
path_put(path);
@@ -4275,7 +4277,7 @@ SYSCALL_DEFINE3(mknod, const char __user *, filename, umode_t, mode, unsigned, d
}
/**
- * vfs_mkdir - create directory
+ * vfs_mkdir - create directory returning correct dentry if possible
* @idmap: idmap of the mount the inode was found from
* @dir: inode of the parent directory
* @dentry: dentry of the child directory
@@ -4288,32 +4290,51 @@ SYSCALL_DEFINE3(mknod, const char __user *, filename, umode_t, mode, unsigned, d
* care to map the inode according to @idmap before checking permissions.
* On non-idmapped mounts or if permission checking is to be performed on the
* raw inode simply pass @nop_mnt_idmap.
+ *
+ * In the event that the filesystem does not use the *@dentry but leaves it
+ * negative or unhashes it and possibly splices a different one returning it,
+ * the original dentry is dput() and the alternate is returned.
+ *
+ * In case of an error the dentry is dput() and an ERR_PTR() is returned.
*/
-int vfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
- struct dentry *dentry, umode_t mode)
+struct dentry *vfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
int error;
unsigned max_links = dir->i_sb->s_max_links;
+ struct dentry *de;
error = may_create(idmap, dir, dentry);
if (error)
- return error;
+ goto err;
+ error = -EPERM;
if (!dir->i_op->mkdir)
- return -EPERM;
+ goto err;
mode = vfs_prepare_mode(idmap, dir, mode, S_IRWXUGO | S_ISVTX, 0);
error = security_inode_mkdir(dir, dentry, mode);
if (error)
- return error;
+ goto err;
+ error = -EMLINK;
if (max_links && dir->i_nlink >= max_links)
- return -EMLINK;
+ goto err;
- error = dir->i_op->mkdir(idmap, dir, dentry, mode);
- if (!error)
- fsnotify_mkdir(dir, dentry);
- return error;
+ de = dir->i_op->mkdir(idmap, dir, dentry, mode);
+ error = PTR_ERR(de);
+ if (IS_ERR(de))
+ goto err;
+ if (de) {
+ dput(dentry);
+ dentry = de;
+ }
+ fsnotify_mkdir(dir, dentry);
+ return dentry;
+
+err:
+ dput(dentry);
+ return ERR_PTR(error);
}
EXPORT_SYMBOL(vfs_mkdir);
@@ -4333,8 +4354,10 @@ retry:
error = security_path_mkdir(&path, dentry,
mode_strip_umask(path.dentry->d_inode, mode));
if (!error) {
- error = vfs_mkdir(mnt_idmap(path.mnt), path.dentry->d_inode,
+ dentry = vfs_mkdir(mnt_idmap(path.mnt), path.dentry->d_inode,
dentry, mode);
+ if (IS_ERR(dentry))
+ error = PTR_ERR(dentry);
}
done_path_create(&path, dentry);
if (retry_estale(error, lookup_flags)) {
@@ -4445,10 +4468,6 @@ retry:
error = PTR_ERR(dentry);
if (IS_ERR(dentry))
goto exit3;
- if (!dentry->d_inode) {
- error = -ENOENT;
- goto exit4;
- }
error = security_path_rmdir(&path, dentry);
if (error)
goto exit4;
@@ -4579,7 +4598,7 @@ retry_deleg:
if (!IS_ERR(dentry)) {
/* Why not before? Because we want correct error value */
- if (last.name[last.len] || d_is_negative(dentry))
+ if (last.name[last.len])
goto slashes;
inode = dentry->d_inode;
ihold(inode);
@@ -4613,9 +4632,7 @@ exit1:
return error;
slashes:
- if (d_is_negative(dentry))
- error = -ENOENT;
- else if (d_is_dir(dentry))
+ if (d_is_dir(dentry))
error = -EISDIR;
else
error = -ENOTDIR;
@@ -5115,7 +5132,8 @@ int do_renameat2(int olddfd, struct filename *from, int newdfd,
struct qstr old_last, new_last;
int old_type, new_type;
struct inode *delegated_inode = NULL;
- unsigned int lookup_flags = 0, target_flags = LOOKUP_RENAME_TARGET;
+ unsigned int lookup_flags = 0, target_flags =
+ LOOKUP_RENAME_TARGET | LOOKUP_CREATE;
bool should_retry = false;
int error = -EINVAL;
@@ -5128,6 +5146,8 @@ int do_renameat2(int olddfd, struct filename *from, int newdfd,
if (flags & RENAME_EXCHANGE)
target_flags = 0;
+ if (flags & RENAME_NOREPLACE)
+ target_flags |= LOOKUP_EXCL;
retry:
error = filename_parentat(olddfd, from, lookup_flags, &old_path,
@@ -5169,23 +5189,12 @@ retry_deleg:
error = PTR_ERR(old_dentry);
if (IS_ERR(old_dentry))
goto exit3;
- /* source must exist */
- error = -ENOENT;
- if (d_is_negative(old_dentry))
- goto exit4;
new_dentry = lookup_one_qstr_excl(&new_last, new_path.dentry,
lookup_flags | target_flags);
error = PTR_ERR(new_dentry);
if (IS_ERR(new_dentry))
goto exit4;
- error = -EEXIST;
- if ((flags & RENAME_NOREPLACE) && d_is_positive(new_dentry))
- goto exit5;
if (flags & RENAME_EXCHANGE) {
- error = -ENOENT;
- if (d_is_negative(new_dentry))
- goto exit5;
-
if (!d_is_dir(new_dentry)) {
error = -ENOTDIR;
if (new_last.name[new_last.len])
diff --git a/fs/namespace.c b/fs/namespace.c
index 8f1000f9f3df..6100e5b962a6 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -81,15 +81,23 @@ static HLIST_HEAD(unmounted); /* protected by namespace_sem */
static LIST_HEAD(ex_mountpoints); /* protected by namespace_sem */
static DEFINE_SEQLOCK(mnt_ns_tree_lock);
+#ifdef CONFIG_FSNOTIFY
+LIST_HEAD(notify_list); /* protected by namespace_sem */
+#endif
static struct rb_root mnt_ns_tree = RB_ROOT; /* protected by mnt_ns_tree_lock */
static LIST_HEAD(mnt_ns_list); /* protected by mnt_ns_tree_lock */
+enum mount_kattr_flags_t {
+ MOUNT_KATTR_RECURSE = (1 << 0),
+ MOUNT_KATTR_IDMAP_REPLACE = (1 << 1),
+};
+
struct mount_kattr {
unsigned int attr_set;
unsigned int attr_clr;
unsigned int propagation;
unsigned int lookup_flags;
- bool recurse;
+ enum mount_kattr_flags_t kflags;
struct user_namespace *mnt_userns;
struct mnt_idmap *mnt_idmap;
};
@@ -163,6 +171,7 @@ static void mnt_ns_release(struct mnt_namespace *ns)
{
/* keep alive for {list,stat}mount() */
if (refcount_dec_and_test(&ns->passive)) {
+ fsnotify_mntns_delete(ns);
put_user_ns(ns->user_ns);
kfree(ns);
}
@@ -998,6 +1007,17 @@ static inline int check_mnt(struct mount *mnt)
return mnt->mnt_ns == current->nsproxy->mnt_ns;
}
+static inline bool check_anonymous_mnt(struct mount *mnt)
+{
+ u64 seq;
+
+ if (!is_anon_ns(mnt->mnt_ns))
+ return false;
+
+ seq = mnt->mnt_ns->seq_origin;
+ return !seq || (seq == current->nsproxy->mnt_ns->seq);
+}
+
/*
* vfsmount lock must be held for write
*/
@@ -1176,6 +1196,8 @@ static void mnt_add_to_ns(struct mnt_namespace *ns, struct mount *mnt)
ns->mnt_first_node = &mnt->mnt_node;
rb_link_node(&mnt->mnt_node, parent, link);
rb_insert_color(&mnt->mnt_node, &ns->mounts);
+
+ mnt_notify_add(mnt);
}
/*
@@ -1723,6 +1745,50 @@ int may_umount(struct vfsmount *mnt)
EXPORT_SYMBOL(may_umount);
+#ifdef CONFIG_FSNOTIFY
+static void mnt_notify(struct mount *p)
+{
+ if (!p->prev_ns && p->mnt_ns) {
+ fsnotify_mnt_attach(p->mnt_ns, &p->mnt);
+ } else if (p->prev_ns && !p->mnt_ns) {
+ fsnotify_mnt_detach(p->prev_ns, &p->mnt);
+ } else if (p->prev_ns == p->mnt_ns) {
+ fsnotify_mnt_move(p->mnt_ns, &p->mnt);
+ } else {
+ fsnotify_mnt_detach(p->prev_ns, &p->mnt);
+ fsnotify_mnt_attach(p->mnt_ns, &p->mnt);
+ }
+ p->prev_ns = p->mnt_ns;
+}
+
+static void notify_mnt_list(void)
+{
+ struct mount *m, *tmp;
+ /*
+ * Notify about mounts that were added/reparented/detached/remain
+ * connected after unmount.
+ */
+ list_for_each_entry_safe(m, tmp, &notify_list, to_notify) {
+ mnt_notify(m);
+ list_del_init(&m->to_notify);
+ }
+}
+
+static bool need_notify_mnt_list(void)
+{
+ return !list_empty(&notify_list);
+}
+#else
+static void notify_mnt_list(void)
+{
+}
+
+static bool need_notify_mnt_list(void)
+{
+ return false;
+}
+#endif
+
static void namespace_unlock(void)
{
struct hlist_head head;
@@ -1733,7 +1799,18 @@ static void namespace_unlock(void)
hlist_move_list(&unmounted, &head);
list_splice_init(&ex_mountpoints, &list);
- up_write(&namespace_sem);
+ if (need_notify_mnt_list()) {
+ /*
+ * No point blocking out concurrent readers while notifications
+ * are sent. This will also allow statmount()/listmount() to run
+ * concurrently.
+ */
+ downgrade_write(&namespace_sem);
+ notify_mnt_list();
+ up_read(&namespace_sem);
+ } else {
+ up_write(&namespace_sem);
+ }
shrink_dentry_list(&list);
@@ -1846,6 +1923,19 @@ static void umount_tree(struct mount *mnt, enum umount_tree_flags how)
change_mnt_propagation(p, MS_PRIVATE);
if (disconnect)
hlist_add_head(&p->mnt_umount, &unmounted);
+
+ /*
+ * At this point p->mnt_ns is NULL, notification will be queued
+ * only if
+ *
+ * - p->prev_ns is non-NULL *and*
+ * - p->prev_ns->n_fsnotify_marks is non-NULL
+ *
+ * This will preclude queuing the mount if this is a cleanup
+ * after a failed copy_tree() or destruction of an anonymous
+ * namespace, etc.
+ */
+ mnt_notify_add(p);
}
}
@@ -2026,6 +2116,7 @@ static void warn_mandlock(void)
static int can_umount(const struct path *path, int flags)
{
struct mount *mnt = real_mount(path->mnt);
+ struct super_block *sb = path->dentry->d_sb;
if (!may_mount())
return -EPERM;
@@ -2035,7 +2126,7 @@ static int can_umount(const struct path *path, int flags)
return -EINVAL;
if (mnt->mnt.mnt_flags & MNT_LOCKED) /* Check optimistically */
return -EINVAL;
- if (flags & MNT_FORCE && !capable(CAP_SYS_ADMIN))
+ if (flags & MNT_FORCE && !ns_capable(sb->s_user_ns, CAP_SYS_ADMIN))
return -EPERM;
return 0;
}
@@ -2145,16 +2236,24 @@ struct mnt_namespace *get_sequential_mnt_ns(struct mnt_namespace *mntns, bool pr
}
}
+struct mnt_namespace *mnt_ns_from_dentry(struct dentry *dentry)
+{
+ if (!is_mnt_ns_file(dentry))
+ return NULL;
+
+ return to_mnt_ns(get_proc_ns(dentry->d_inode));
+}
+
static bool mnt_ns_loop(struct dentry *dentry)
{
/* Could bind mounting the mount namespace inode cause a
* mount namespace loop?
*/
- struct mnt_namespace *mnt_ns;
- if (!is_mnt_ns_file(dentry))
+ struct mnt_namespace *mnt_ns = mnt_ns_from_dentry(dentry);
+
+ if (!mnt_ns)
return false;
- mnt_ns = to_mnt_ns(get_proc_ns(dentry->d_inode));
return current->nsproxy->mnt_ns->seq >= mnt_ns->seq;
}
@@ -2246,22 +2345,75 @@ struct vfsmount *collect_mounts(const struct path *path)
static void free_mnt_ns(struct mnt_namespace *);
static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *, bool);
+static inline bool must_dissolve(struct mnt_namespace *mnt_ns)
+{
+ /*
+ * This mount belonged to an anonymous mount namespace
+ * but was moved to a non-anonymous mount namespace and
+ * then unmounted.
+ */
+ if (unlikely(!mnt_ns))
+ return false;
+
+ /*
+ * This mount belongs to a non-anonymous mount namespace
+ * and we know that such a mount can never transition to
+ * an anonymous mount namespace again.
+ */
+ if (!is_anon_ns(mnt_ns)) {
+ /*
+ * A detached mount either belongs to an anonymous mount
+ * namespace or a non-anonymous mount namespace. It
+ * should never belong to something purely internal.
+ */
+ VFS_WARN_ON_ONCE(mnt_ns == MNT_NS_INTERNAL);
+ return false;
+ }
+
+ return true;
+}
+
void dissolve_on_fput(struct vfsmount *mnt)
{
struct mnt_namespace *ns;
- namespace_lock();
- lock_mount_hash();
- ns = real_mount(mnt)->mnt_ns;
- if (ns) {
- if (is_anon_ns(ns))
- umount_tree(real_mount(mnt), UMOUNT_CONNECTED);
- else
- ns = NULL;
+ struct mount *m = real_mount(mnt);
+
+ scoped_guard(rcu) {
+ if (!must_dissolve(READ_ONCE(m->mnt_ns)))
+ return;
}
- unlock_mount_hash();
- namespace_unlock();
- if (ns)
- free_mnt_ns(ns);
+
+ scoped_guard(rwsem_write, &namespace_sem) {
+ ns = m->mnt_ns;
+ if (!must_dissolve(ns))
+ return;
+
+ /*
+ * After must_dissolve() we know that this is a detached
+ * mount in an anonymous mount namespace.
+ *
+ * Now when mnt_has_parent() reports that this mount
+ * tree has a parent, we know that this anonymous mount
+ * tree has been moved to another anonymous mount
+ * namespace.
+ *
+ * So when closing this file we cannot unmount the mount
+ * tree. This will be done when the file referring to
+ * the root of the anonymous mount namespace will be
+ * closed (It could already be closed but it would sync
+ * on @namespace_sem and wait for us to finish.).
+ */
+ if (mnt_has_parent(m))
+ return;
+
+ lock_mount_hash();
+ umount_tree(m, UMOUNT_CONNECTED);
+ unlock_mount_hash();
+ }
+
+ /* Make sure we notice when we leak mounts. */
+ VFS_WARN_ON_ONCE(!mnt_ns_empty(ns));
+ free_mnt_ns(ns);
}
void drop_collected_mounts(struct vfsmount *mnt)
@@ -2287,6 +2439,28 @@ bool has_locked_children(struct mount *mnt, struct dentry *dentry)
return false;
}
+/*
+ * Check that there aren't references to earlier/same mount namespaces in the
+ * specified subtree. Such references can act as pins for mount namespaces
+ * that aren't checked by the mount-cycle checking code, thereby allowing
+ * cycles to be made.
+ */
+static bool check_for_nsfs_mounts(struct mount *subtree)
+{
+ struct mount *p;
+ bool ret = false;
+
+ lock_mount_hash();
+ for (p = subtree; p; p = next_mnt(p, subtree))
+ if (mnt_ns_loop(p->mnt.mnt_root))
+ goto out;
+
+ ret = true;
+out:
+ unlock_mount_hash();
+ return ret;
+}
+
/**
* clone_private_mount - create a private clone of a path
* @path: path to clone
@@ -2295,6 +2469,8 @@ bool has_locked_children(struct mount *mnt, struct dentry *dentry)
* will not be attached anywhere in the namespace and will be private (i.e.
* changes to the originating mount won't be propagated into this).
*
+ * This assumes caller has called or done the equivalent of may_mount().
+ *
* Release with mntput().
*/
struct vfsmount *clone_private_mount(const struct path *path)
@@ -2302,30 +2478,36 @@ struct vfsmount *clone_private_mount(const struct path *path)
struct mount *old_mnt = real_mount(path->mnt);
struct mount *new_mnt;
- down_read(&namespace_sem);
+ scoped_guard(rwsem_read, &namespace_sem)
if (IS_MNT_UNBINDABLE(old_mnt))
- goto invalid;
+ return ERR_PTR(-EINVAL);
- if (!check_mnt(old_mnt))
- goto invalid;
+ if (mnt_has_parent(old_mnt)) {
+ if (!check_mnt(old_mnt))
+ return ERR_PTR(-EINVAL);
+ } else {
+ if (!is_mounted(&old_mnt->mnt))
+ return ERR_PTR(-EINVAL);
+
+ /* Make sure this isn't something purely kernel internal. */
+ if (!is_anon_ns(old_mnt->mnt_ns))
+ return ERR_PTR(-EINVAL);
+
+ /* Make sure we don't create mount namespace loops. */
+ if (!check_for_nsfs_mounts(old_mnt))
+ return ERR_PTR(-EINVAL);
+ }
if (has_locked_children(old_mnt, path->dentry))
- goto invalid;
+ return ERR_PTR(-EINVAL);
new_mnt = clone_mnt(old_mnt, path->dentry, CL_PRIVATE);
- up_read(&namespace_sem);
-
if (IS_ERR(new_mnt))
- return ERR_CAST(new_mnt);
+ return ERR_PTR(-EINVAL);
/* Longterm mount to be removed by kern_unmount*() */
new_mnt->mnt_ns = MNT_NS_INTERNAL;
-
return &new_mnt->mnt;
-
-invalid:
- up_read(&namespace_sem);
- return ERR_PTR(-EINVAL);
}
EXPORT_SYMBOL_GPL(clone_private_mount);
@@ -2424,6 +2606,7 @@ int count_mounts(struct mnt_namespace *ns, struct mount *mnt)
enum mnt_tree_flags_t {
MNT_TREE_MOVE = BIT(0),
MNT_TREE_BENEATH = BIT(1),
+ MNT_TREE_PROPAGATION = BIT(2),
};
/**
@@ -2547,6 +2730,7 @@ static int attach_recursive_mnt(struct mount *source_mnt,
dest_mp = smp;
unhash_mnt(source_mnt);
attach_mnt(source_mnt, top_mnt, dest_mp, beneath);
+ mnt_notify_add(source_mnt);
touch_mnt_namespace(source_mnt->mnt_ns);
} else {
if (source_mnt->mnt_ns) {
@@ -2773,6 +2957,71 @@ static int do_change_type(struct path *path, int ms_flags)
return err;
}
+/* may_copy_tree() - check if a mount tree can be copied
+ * @path: path to the mount tree to be copied
+ *
+ * This helper checks if the caller may copy the mount tree starting
+ * from @path->mnt. The caller may copy the mount tree under the
+ * following circumstances:
+ *
+ * (1) The caller is located in the mount namespace of the mount tree.
+ * This also implies that the mount does not belong to an anonymous
+ * mount namespace.
+ * (2) The caller tries to copy an nfs mount referring to a mount
+ * namespace, i.e., the caller is trying to copy a mount namespace
+ * entry from nsfs.
+ * (3) The caller tries to copy a pidfs mount referring to a pidfd.
+ * (4) The caller is trying to copy a mount tree that belongs to an
+ * anonymous mount namespace.
+ *
+ * For that to be safe, this helper enforces that the origin mount
+ * namespace the anonymous mount namespace was created from is the
+ * same as the caller's mount namespace by comparing the sequence
+ * numbers.
+ *
+ * This is not strictly necessary. The current semantics of the new
+ * mount api enforce that the caller must be located in the same
+ * mount namespace as the mount tree it interacts with. Using the
+ * origin sequence number preserves these semantics even for
+ * anonymous mount namespaces. However, one could envision extending
+ * the api to directly operate across mount namespace if needed.
+ *
+ * The ownership of a non-anonymous mount namespace such as the
+ * caller's cannot change.
+ * => We know that the caller's mount namespace is stable.
+ *
+ * If the origin sequence number of the anonymous mount namespace is
+ * the same as the sequence number of the caller's mount namespace.
+ * => The owning namespaces are the same.
+ *
+ * ==> The earlier capability check on the owning namespace of the
+ * caller's mount namespace ensures that the caller has the
+ * ability to copy the mount tree.
+ *
+ * Returns true if the mount tree can be copied, false otherwise.
+ */
+static inline bool may_copy_tree(struct path *path)
+{
+ struct mount *mnt = real_mount(path->mnt);
+ const struct dentry_operations *d_op;
+
+ if (check_mnt(mnt))
+ return true;
+
+ d_op = path->dentry->d_op;
+ if (d_op == &ns_dentry_operations)
+ return true;
+
+ if (d_op == &pidfs_dentry_operations)
+ return true;
+
+ if (!is_mounted(path->mnt))
+ return false;
+
+ return check_anonymous_mnt(mnt);
+}
+
+
static struct mount *__do_loopback(struct path *old_path, int recurse)
{
struct mount *mnt = ERR_PTR(-EINVAL), *old = real_mount(old_path->mnt);
@@ -2780,13 +3029,8 @@ static struct mount *__do_loopback(struct path *old_path, int recurse)
if (IS_MNT_UNBINDABLE(old))
return mnt;
- if (!check_mnt(old)) {
- const struct dentry_operations *d_op = old_path->dentry->d_op;
-
- if (d_op != &ns_dentry_operations &&
- d_op != &pidfs_dentry_operations)
- return mnt;
- }
+ if (!may_copy_tree(old_path))
+ return mnt;
if (!recurse && has_locked_children(old, old_path->dentry))
return mnt;
@@ -2853,15 +3097,30 @@ out:
static struct file *open_detached_copy(struct path *path, bool recursive)
{
- struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns;
- struct mnt_namespace *ns = alloc_mnt_ns(user_ns, true);
+ struct mnt_namespace *ns, *mnt_ns = current->nsproxy->mnt_ns, *src_mnt_ns;
+ struct user_namespace *user_ns = mnt_ns->user_ns;
struct mount *mnt, *p;
struct file *file;
+ ns = alloc_mnt_ns(user_ns, true);
if (IS_ERR(ns))
return ERR_CAST(ns);
namespace_lock();
+
+ /*
+ * Record the sequence number of the source mount namespace.
+ * This needs to hold namespace_sem to ensure that the mount
+ * doesn't get attached.
+ */
+ if (is_mounted(path->mnt)) {
+ src_mnt_ns = real_mount(path->mnt)->mnt_ns;
+ if (is_anon_ns(src_mnt_ns))
+ ns->seq_origin = src_mnt_ns->seq_origin;
+ else
+ ns->seq_origin = src_mnt_ns->seq;
+ }
+
mnt = __do_loopback(path, recursive);
if (IS_ERR(mnt)) {
namespace_unlock();
@@ -2889,24 +3148,22 @@ static struct file *open_detached_copy(struct path *path, bool recursive)
return file;
}
-SYSCALL_DEFINE3(open_tree, int, dfd, const char __user *, filename, unsigned, flags)
+static struct file *vfs_open_tree(int dfd, const char __user *filename, unsigned int flags)
{
- struct file *file;
- struct path path;
+ int ret;
+ struct path path __free(path_put) = {};
int lookup_flags = LOOKUP_AUTOMOUNT | LOOKUP_FOLLOW;
bool detached = flags & OPEN_TREE_CLONE;
- int error;
- int fd;
BUILD_BUG_ON(OPEN_TREE_CLOEXEC != O_CLOEXEC);
if (flags & ~(AT_EMPTY_PATH | AT_NO_AUTOMOUNT | AT_RECURSIVE |
AT_SYMLINK_NOFOLLOW | OPEN_TREE_CLONE |
OPEN_TREE_CLOEXEC))
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
if ((flags & (AT_RECURSIVE | OPEN_TREE_CLONE)) == AT_RECURSIVE)
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
if (flags & AT_NO_AUTOMOUNT)
lookup_flags &= ~LOOKUP_AUTOMOUNT;
@@ -2916,27 +3173,32 @@ SYSCALL_DEFINE3(open_tree, int, dfd, const char __user *, filename, unsigned, fl
lookup_flags |= LOOKUP_EMPTY;
if (detached && !may_mount())
- return -EPERM;
+ return ERR_PTR(-EPERM);
+
+ ret = user_path_at(dfd, filename, lookup_flags, &path);
+ if (unlikely(ret))
+ return ERR_PTR(ret);
+
+ if (detached)
+ return open_detached_copy(&path, flags & AT_RECURSIVE);
+
+ return dentry_open(&path, O_PATH, current_cred());
+}
+
+SYSCALL_DEFINE3(open_tree, int, dfd, const char __user *, filename, unsigned, flags)
+{
+ int fd;
+ struct file *file __free(fput) = NULL;
+
+ file = vfs_open_tree(dfd, filename, flags);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
fd = get_unused_fd_flags(flags & O_CLOEXEC);
if (fd < 0)
return fd;
- error = user_path_at(dfd, filename, lookup_flags, &path);
- if (unlikely(error)) {
- file = ERR_PTR(error);
- } else {
- if (detached)
- file = open_detached_copy(&path, flags & AT_RECURSIVE);
- else
- file = dentry_open(&path, O_PATH, current_cred());
- path_put(&path);
- }
- if (IS_ERR(file)) {
- put_unused_fd(fd);
- return PTR_ERR(file);
- }
- fd_install(fd, file);
+ fd_install(fd, no_free_ptr(file));
return fd;
}
@@ -3123,28 +3385,6 @@ static inline int tree_contains_unbindable(struct mount *mnt)
return 0;
}
-/*
- * Check that there aren't references to earlier/same mount namespaces in the
- * specified subtree. Such references can act as pins for mount namespaces
- * that aren't checked by the mount-cycle checking code, thereby allowing
- * cycles to be made.
- */
-static bool check_for_nsfs_mounts(struct mount *subtree)
-{
- struct mount *p;
- bool ret = false;
-
- lock_mount_hash();
- for (p = subtree; p; p = next_mnt(p, subtree))
- if (mnt_ns_loop(p->mnt.mnt_root))
- goto out;
-
- ret = true;
-out:
- unlock_mount_hash();
- return ret;
-}
-
static int do_set_group(struct path *from_path, struct path *to_path)
{
struct mount *from, *to;
@@ -3320,8 +3560,56 @@ static int can_move_mount_beneath(const struct path *from,
return 0;
}
-static int do_move_mount(struct path *old_path, struct path *new_path,
- bool beneath)
+/* may_use_mount() - check if a mount tree can be used
+ * @mnt: vfsmount to be used
+ *
+ * This helper checks if the caller may use the mount tree starting
+ * from @path->mnt. The caller may use the mount tree under the
+ * following circumstances:
+ *
+ * (1) The caller is located in the mount namespace of the mount tree.
+ * This also implies that the mount does not belong to an anonymous
+ * mount namespace.
+ * (2) The caller is trying to use a mount tree that belongs to an
+ * anonymous mount namespace.
+ *
+ * For that to be safe, this helper enforces that the origin mount
+ * namespace the anonymous mount namespace was created from is the
+ * same as the caller's mount namespace by comparing the sequence
+ * numbers.
+ *
+ * The ownership of a non-anonymous mount namespace such as the
+ * caller's cannot change.
+ * => We know that the caller's mount namespace is stable.
+ *
+ * If the origin sequence number of the anonymous mount namespace is
+ * the same as the sequence number of the caller's mount namespace.
+ * => The owning namespaces are the same.
+ *
+ * ==> The earlier capability check on the owning namespace of the
+ * caller's mount namespace ensures that the caller has the
+ * ability to use the mount tree.
+ *
+ * Returns true if the mount tree can be used, false otherwise.
+ */
+static inline bool may_use_mount(struct mount *mnt)
+{
+ if (check_mnt(mnt))
+ return true;
+
+ /*
+ * Make sure that noone unmounted the target path or somehow
+ * managed to get their hands on something purely kernel
+ * internal.
+ */
+ if (!is_mounted(&mnt->mnt))
+ return false;
+
+ return check_anonymous_mnt(mnt);
+}
+
+static int do_move_mount(struct path *old_path,
+ struct path *new_path, enum mnt_tree_flags_t flags)
{
struct mnt_namespace *ns;
struct mount *p;
@@ -3329,8 +3617,7 @@ static int do_move_mount(struct path *old_path, struct path *new_path,
struct mount *parent;
struct mountpoint *mp, *old_mp;
int err;
- bool attached;
- enum mnt_tree_flags_t flags = 0;
+ bool attached, beneath = flags & MNT_TREE_BENEATH;
mp = do_lock_mount(new_path, beneath);
if (IS_ERR(mp))
@@ -3346,8 +3633,7 @@ static int do_move_mount(struct path *old_path, struct path *new_path,
ns = old->mnt_ns;
err = -EINVAL;
- /* The mountpoint must be in our namespace. */
- if (!check_mnt(p))
+ if (!may_use_mount(p))
goto out;
/* The thing moved must be mounted... */
@@ -3358,6 +3644,32 @@ static int do_move_mount(struct path *old_path, struct path *new_path,
if (!(attached ? check_mnt(old) : is_anon_ns(ns)))
goto out;
+ if (is_anon_ns(ns)) {
+ /*
+ * Ending up with two files referring to the root of the
+ * same anonymous mount namespace would cause an error
+ * as this would mean trying to move the same mount
+ * twice into the mount tree which would be rejected
+ * later. But be explicit about it right here.
+ */
+ if ((is_anon_ns(p->mnt_ns) && ns == p->mnt_ns))
+ goto out;
+
+ /*
+ * If this is an anonymous mount tree ensure that mount
+ * propagation can detect mounts that were just
+ * propagated to the target mount tree so we don't
+ * propagate onto them.
+ */
+ ns->mntns_flags |= MNTNS_PROPAGATING;
+ } else if (is_anon_ns(p->mnt_ns)) {
+ /*
+ * Don't allow moving an attached mount tree to an
+ * anonymous mount tree.
+ */
+ goto out;
+ }
+
if (old->mnt.mnt_flags & MNT_LOCKED)
goto out;
@@ -3400,6 +3712,9 @@ static int do_move_mount(struct path *old_path, struct path *new_path,
if (err)
goto out;
+ if (is_anon_ns(ns))
+ ns->mntns_flags &= ~MNTNS_PROPAGATING;
+
/* if the mount is moved, it should no longer be expire
* automatically */
list_del_init(&old->mnt_expire);
@@ -3408,10 +3723,13 @@ static int do_move_mount(struct path *old_path, struct path *new_path,
out:
unlock_mount(mp);
if (!err) {
- if (attached)
+ if (attached) {
mntput_no_expire(parent);
- else
+ } else {
+ /* Make sure we notice when we leak mounts. */
+ VFS_WARN_ON_ONCE(!mnt_ns_empty(ns));
free_mnt_ns(ns);
+ }
}
return err;
}
@@ -3428,7 +3746,7 @@ static int do_move_mount_old(struct path *path, const char *old_name)
if (err)
return err;
- err = do_move_mount(&old_path, path, false);
+ err = do_move_mount(&old_path, path, 0);
path_put(&old_path);
return err;
}
@@ -4269,6 +4587,21 @@ err_unlock:
return ret;
}
+static inline int vfs_move_mount(struct path *from_path, struct path *to_path,
+ enum mnt_tree_flags_t mflags)
+{
+ int ret;
+
+ ret = security_move_mount(from_path, to_path);
+ if (ret)
+ return ret;
+
+ if (mflags & MNT_TREE_PROPAGATION)
+ return do_set_group(from_path, to_path);
+
+ return do_move_mount(from_path, to_path, mflags);
+}
+
/*
* Move a mount from one place to another. In combination with
* fsopen()/fsmount() this is used to install a new mount and in combination
@@ -4282,8 +4615,12 @@ SYSCALL_DEFINE5(move_mount,
int, to_dfd, const char __user *, to_pathname,
unsigned int, flags)
{
- struct path from_path, to_path;
- unsigned int lflags;
+ struct path to_path __free(path_put) = {};
+ struct path from_path __free(path_put) = {};
+ struct filename *to_name __free(putname) = NULL;
+ struct filename *from_name __free(putname) = NULL;
+ unsigned int lflags, uflags;
+ enum mnt_tree_flags_t mflags = 0;
int ret = 0;
if (!may_mount())
@@ -4296,43 +4633,53 @@ SYSCALL_DEFINE5(move_mount,
(MOVE_MOUNT_BENEATH | MOVE_MOUNT_SET_GROUP))
return -EINVAL;
- /* If someone gives a pathname, they aren't permitted to move
- * from an fd that requires unmount as we can't get at the flag
- * to clear it afterwards.
- */
+ if (flags & MOVE_MOUNT_SET_GROUP) mflags |= MNT_TREE_PROPAGATION;
+ if (flags & MOVE_MOUNT_BENEATH) mflags |= MNT_TREE_BENEATH;
+
lflags = 0;
if (flags & MOVE_MOUNT_F_SYMLINKS) lflags |= LOOKUP_FOLLOW;
if (flags & MOVE_MOUNT_F_AUTOMOUNTS) lflags |= LOOKUP_AUTOMOUNT;
- if (flags & MOVE_MOUNT_F_EMPTY_PATH) lflags |= LOOKUP_EMPTY;
-
- ret = user_path_at(from_dfd, from_pathname, lflags, &from_path);
- if (ret < 0)
- return ret;
+ uflags = 0;
+ if (flags & MOVE_MOUNT_F_EMPTY_PATH) uflags = AT_EMPTY_PATH;
+ from_name = getname_maybe_null(from_pathname, uflags);
+ if (IS_ERR(from_name))
+ return PTR_ERR(from_name);
lflags = 0;
if (flags & MOVE_MOUNT_T_SYMLINKS) lflags |= LOOKUP_FOLLOW;
if (flags & MOVE_MOUNT_T_AUTOMOUNTS) lflags |= LOOKUP_AUTOMOUNT;
- if (flags & MOVE_MOUNT_T_EMPTY_PATH) lflags |= LOOKUP_EMPTY;
+ uflags = 0;
+ if (flags & MOVE_MOUNT_T_EMPTY_PATH) uflags = AT_EMPTY_PATH;
+ to_name = getname_maybe_null(to_pathname, uflags);
+ if (IS_ERR(to_name))
+ return PTR_ERR(to_name);
+
+ if (!to_name && to_dfd >= 0) {
+ CLASS(fd_raw, f_to)(to_dfd);
+ if (fd_empty(f_to))
+ return -EBADF;
+
+ to_path = fd_file(f_to)->f_path;
+ path_get(&to_path);
+ } else {
+ ret = filename_lookup(to_dfd, to_name, lflags, &to_path, NULL);
+ if (ret)
+ return ret;
+ }
- ret = user_path_at(to_dfd, to_pathname, lflags, &to_path);
- if (ret < 0)
- goto out_from;
+ if (!from_name && from_dfd >= 0) {
+ CLASS(fd_raw, f_from)(from_dfd);
+ if (fd_empty(f_from))
+ return -EBADF;
- ret = security_move_mount(&from_path, &to_path);
- if (ret < 0)
- goto out_to;
+ return vfs_move_mount(&fd_file(f_from)->f_path, &to_path, mflags);
+ }
- if (flags & MOVE_MOUNT_SET_GROUP)
- ret = do_set_group(&from_path, &to_path);
- else
- ret = do_move_mount(&from_path, &to_path,
- (flags & MOVE_MOUNT_BENEATH));
+ ret = filename_lookup(from_dfd, from_name, lflags, &from_path, NULL);
+ if (ret)
+ return ret;
-out_to:
- path_put(&to_path);
-out_from:
- path_put(&from_path);
- return ret;
+ return vfs_move_mount(&from_path, &to_path, mflags);
}
/*
@@ -4468,6 +4815,8 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
list_del_init(&new_mnt->mnt_expire);
put_mountpoint(root_mp);
unlock_mount_hash();
+ mnt_notify_add(root_mnt);
+ mnt_notify_add(new_mnt);
chroot_fs_refs(&root, &new);
error = 0;
out4:
@@ -4512,11 +4861,10 @@ static int can_idmap_mount(const struct mount_kattr *kattr, struct mount *mnt)
return -EINVAL;
/*
- * Once a mount has been idmapped we don't allow it to change its
- * mapping. It makes things simpler and callers can just create
- * another bind-mount they can idmap if they want to.
+ * We only allow an mount to change it's idmapping if it has
+ * never been accessible to userspace.
*/
- if (is_idmapped_mnt(m))
+ if (!(kattr->kflags & MOUNT_KATTR_IDMAP_REPLACE) && is_idmapped_mnt(m))
return -EPERM;
/* The underlying filesystem doesn't support idmapped mounts yet. */
@@ -4576,7 +4924,7 @@ static int mount_setattr_prepare(struct mount_kattr *kattr, struct mount *mnt)
break;
}
- if (!kattr->recurse)
+ if (!(kattr->kflags & MOUNT_KATTR_RECURSE))
return 0;
}
@@ -4606,18 +4954,16 @@ static int mount_setattr_prepare(struct mount_kattr *kattr, struct mount *mnt)
static void do_idmap_mount(const struct mount_kattr *kattr, struct mount *mnt)
{
+ struct mnt_idmap *old_idmap;
+
if (!kattr->mnt_idmap)
return;
- /*
- * Pairs with smp_load_acquire() in mnt_idmap().
- *
- * Since we only allow a mount to change the idmapping once and
- * verified this in can_idmap_mount() we know that the mount has
- * @nop_mnt_idmap attached to it. So there's no need to drop any
- * references.
- */
+ old_idmap = mnt_idmap(&mnt->mnt);
+
+ /* Pairs with smp_load_acquire() in mnt_idmap(). */
smp_store_release(&mnt->mnt.mnt_idmap, mnt_idmap_get(kattr->mnt_idmap));
+ mnt_idmap_put(old_idmap);
}
static void mount_setattr_commit(struct mount_kattr *kattr, struct mount *mnt)
@@ -4637,7 +4983,7 @@ static void mount_setattr_commit(struct mount_kattr *kattr, struct mount *mnt)
if (kattr->propagation)
change_mnt_propagation(m, kattr->propagation);
- if (!kattr->recurse)
+ if (!(kattr->kflags & MOUNT_KATTR_RECURSE))
break;
}
touch_mnt_namespace(mnt->mnt_ns);
@@ -4667,7 +5013,7 @@ static int do_mount_setattr(struct path *path, struct mount_kattr *kattr)
*/
namespace_lock();
if (kattr->propagation == MS_SHARED) {
- err = invent_group_ids(mnt, kattr->recurse);
+ err = invent_group_ids(mnt, kattr->kflags & MOUNT_KATTR_RECURSE);
if (err) {
namespace_unlock();
return err;
@@ -4718,7 +5064,7 @@ out:
}
static int build_mount_idmapped(const struct mount_attr *attr, size_t usize,
- struct mount_kattr *kattr, unsigned int flags)
+ struct mount_kattr *kattr)
{
struct ns_common *ns;
struct user_namespace *mnt_userns;
@@ -4726,13 +5072,23 @@ static int build_mount_idmapped(const struct mount_attr *attr, size_t usize,
if (!((attr->attr_set | attr->attr_clr) & MOUNT_ATTR_IDMAP))
return 0;
- /*
- * We currently do not support clearing an idmapped mount. If this ever
- * is a use-case we can revisit this but for now let's keep it simple
- * and not allow it.
- */
- if (attr->attr_clr & MOUNT_ATTR_IDMAP)
- return -EINVAL;
+ if (attr->attr_clr & MOUNT_ATTR_IDMAP) {
+ /*
+ * We can only remove an idmapping if it's never been
+ * exposed to userspace.
+ */
+ if (!(kattr->kflags & MOUNT_KATTR_IDMAP_REPLACE))
+ return -EINVAL;
+
+ /*
+ * Removal of idmappings is equivalent to setting
+ * nop_mnt_idmap.
+ */
+ if (!(attr->attr_set & MOUNT_ATTR_IDMAP)) {
+ kattr->mnt_idmap = &nop_mnt_idmap;
+ return 0;
+ }
+ }
if (attr->userns_fd > INT_MAX)
return -EINVAL;
@@ -4769,22 +5125,8 @@ static int build_mount_idmapped(const struct mount_attr *attr, size_t usize,
}
static int build_mount_kattr(const struct mount_attr *attr, size_t usize,
- struct mount_kattr *kattr, unsigned int flags)
+ struct mount_kattr *kattr)
{
- unsigned int lookup_flags = LOOKUP_AUTOMOUNT | LOOKUP_FOLLOW;
-
- if (flags & AT_NO_AUTOMOUNT)
- lookup_flags &= ~LOOKUP_AUTOMOUNT;
- if (flags & AT_SYMLINK_NOFOLLOW)
- lookup_flags &= ~LOOKUP_FOLLOW;
- if (flags & AT_EMPTY_PATH)
- lookup_flags |= LOOKUP_EMPTY;
-
- *kattr = (struct mount_kattr) {
- .lookup_flags = lookup_flags,
- .recurse = !!(flags & AT_RECURSIVE),
- };
-
if (attr->propagation & ~MOUNT_SETATTR_PROPAGATION_FLAGS)
return -EINVAL;
if (hweight32(attr->propagation & MOUNT_SETATTR_PROPAGATION_FLAGS) > 1)
@@ -4832,35 +5174,28 @@ static int build_mount_kattr(const struct mount_attr *attr, size_t usize,
return -EINVAL;
}
- return build_mount_idmapped(attr, usize, kattr, flags);
+ return build_mount_idmapped(attr, usize, kattr);
}
static void finish_mount_kattr(struct mount_kattr *kattr)
{
- put_user_ns(kattr->mnt_userns);
- kattr->mnt_userns = NULL;
+ if (kattr->mnt_userns) {
+ put_user_ns(kattr->mnt_userns);
+ kattr->mnt_userns = NULL;
+ }
if (kattr->mnt_idmap)
mnt_idmap_put(kattr->mnt_idmap);
}
-SYSCALL_DEFINE5(mount_setattr, int, dfd, const char __user *, path,
- unsigned int, flags, struct mount_attr __user *, uattr,
- size_t, usize)
+static int copy_mount_setattr(struct mount_attr __user *uattr, size_t usize,
+ struct mount_kattr *kattr)
{
- int err;
- struct path target;
+ int ret;
struct mount_attr attr;
- struct mount_kattr kattr;
BUILD_BUG_ON(sizeof(struct mount_attr) != MOUNT_ATTR_SIZE_VER0);
- if (flags & ~(AT_EMPTY_PATH |
- AT_RECURSIVE |
- AT_SYMLINK_NOFOLLOW |
- AT_NO_AUTOMOUNT))
- return -EINVAL;
-
if (unlikely(usize > PAGE_SIZE))
return -E2BIG;
if (unlikely(usize < MOUNT_ATTR_SIZE_VER0))
@@ -4869,9 +5204,9 @@ SYSCALL_DEFINE5(mount_setattr, int, dfd, const char __user *, path,
if (!may_mount())
return -EPERM;
- err = copy_struct_from_user(&attr, sizeof(attr), uattr, usize);
- if (err)
- return err;
+ ret = copy_struct_from_user(&attr, sizeof(attr), uattr, usize);
+ if (ret)
+ return ret;
/* Don't bother walking through the mounts if this is a nop. */
if (attr.attr_set == 0 &&
@@ -4879,7 +5214,39 @@ SYSCALL_DEFINE5(mount_setattr, int, dfd, const char __user *, path,
attr.propagation == 0)
return 0;
- err = build_mount_kattr(&attr, usize, &kattr, flags);
+ return build_mount_kattr(&attr, usize, kattr);
+}
+
+SYSCALL_DEFINE5(mount_setattr, int, dfd, const char __user *, path,
+ unsigned int, flags, struct mount_attr __user *, uattr,
+ size_t, usize)
+{
+ int err;
+ struct path target;
+ struct mount_kattr kattr;
+ unsigned int lookup_flags = LOOKUP_AUTOMOUNT | LOOKUP_FOLLOW;
+
+ if (flags & ~(AT_EMPTY_PATH |
+ AT_RECURSIVE |
+ AT_SYMLINK_NOFOLLOW |
+ AT_NO_AUTOMOUNT))
+ return -EINVAL;
+
+ if (flags & AT_NO_AUTOMOUNT)
+ lookup_flags &= ~LOOKUP_AUTOMOUNT;
+ if (flags & AT_SYMLINK_NOFOLLOW)
+ lookup_flags &= ~LOOKUP_FOLLOW;
+ if (flags & AT_EMPTY_PATH)
+ lookup_flags |= LOOKUP_EMPTY;
+
+ kattr = (struct mount_kattr) {
+ .lookup_flags = lookup_flags,
+ };
+
+ if (flags & AT_RECURSIVE)
+ kattr.kflags |= MOUNT_KATTR_RECURSE;
+
+ err = copy_mount_setattr(uattr, usize, &kattr);
if (err)
return err;
@@ -4892,6 +5259,47 @@ SYSCALL_DEFINE5(mount_setattr, int, dfd, const char __user *, path,
return err;
}
+SYSCALL_DEFINE5(open_tree_attr, int, dfd, const char __user *, filename,
+ unsigned, flags, struct mount_attr __user *, uattr,
+ size_t, usize)
+{
+ struct file __free(fput) *file = NULL;
+ int fd;
+
+ if (!uattr && usize)
+ return -EINVAL;
+
+ file = vfs_open_tree(dfd, filename, flags);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ if (uattr) {
+ int ret;
+ struct mount_kattr kattr = {};
+
+ kattr.kflags = MOUNT_KATTR_IDMAP_REPLACE;
+ if (flags & AT_RECURSIVE)
+ kattr.kflags |= MOUNT_KATTR_RECURSE;
+
+ ret = copy_mount_setattr(uattr, usize, &kattr);
+ if (ret)
+ return ret;
+
+ ret = do_mount_setattr(&file->f_path, &kattr);
+ if (ret)
+ return ret;
+
+ finish_mount_kattr(&kattr);
+ }
+
+ fd = get_unused_fd_flags(flags & O_CLOEXEC);
+ if (fd < 0)
+ return fd;
+
+ fd_install(fd, no_free_ptr(file));
+ return fd;
+}
+
int show_path(struct seq_file *m, struct dentry *root)
{
if (root->d_sb->s_op->show_path)
@@ -4915,6 +5323,7 @@ struct kstatmount {
struct statmount __user *buf;
size_t bufsize;
struct vfsmount *mnt;
+ struct mnt_idmap *idmap;
u64 mask;
struct path root;
struct statmount sm;
@@ -5184,6 +5593,46 @@ static int statmount_opt_sec_array(struct kstatmount *s, struct seq_file *seq)
return 0;
}
+static inline int statmount_mnt_uidmap(struct kstatmount *s, struct seq_file *seq)
+{
+ int ret;
+
+ ret = statmount_mnt_idmap(s->idmap, seq, true);
+ if (ret < 0)
+ return ret;
+
+ s->sm.mnt_uidmap_num = ret;
+ /*
+ * Always raise STATMOUNT_MNT_UIDMAP even if there are no valid
+ * mappings. This allows userspace to distinguish between a
+ * non-idmapped mount and an idmapped mount where none of the
+ * individual mappings are valid in the caller's idmapping.
+ */
+ if (is_valid_mnt_idmap(s->idmap))
+ s->sm.mask |= STATMOUNT_MNT_UIDMAP;
+ return 0;
+}
+
+static inline int statmount_mnt_gidmap(struct kstatmount *s, struct seq_file *seq)
+{
+ int ret;
+
+ ret = statmount_mnt_idmap(s->idmap, seq, false);
+ if (ret < 0)
+ return ret;
+
+ s->sm.mnt_gidmap_num = ret;
+ /*
+ * Always raise STATMOUNT_MNT_GIDMAP even if there are no valid
+ * mappings. This allows userspace to distinguish between a
+ * non-idmapped mount and an idmapped mount where none of the
+ * individual mappings are valid in the caller's idmapping.
+ */
+ if (is_valid_mnt_idmap(s->idmap))
+ s->sm.mask |= STATMOUNT_MNT_GIDMAP;
+ return 0;
+}
+
static int statmount_string(struct kstatmount *s, u64 flag)
{
int ret = 0;
@@ -5231,6 +5680,14 @@ static int statmount_string(struct kstatmount *s, u64 flag)
offp = &sm->sb_source;
ret = statmount_sb_source(s, seq);
break;
+ case STATMOUNT_MNT_UIDMAP:
+ sm->mnt_uidmap = start;
+ ret = statmount_mnt_uidmap(s, seq);
+ break;
+ case STATMOUNT_MNT_GIDMAP:
+ sm->mnt_gidmap = start;
+ ret = statmount_mnt_gidmap(s, seq);
+ break;
default:
WARN_ON_ONCE(true);
return -EINVAL;
@@ -5306,7 +5763,7 @@ static int grab_requested_root(struct mnt_namespace *ns, struct path *root)
* We have to find the first mount in our ns and use that, however it
* may not exist, so handle that properly.
*/
- if (RB_EMPTY_ROOT(&ns->mounts))
+ if (mnt_ns_empty(ns))
return -ENOENT;
first = child = ns->root;
@@ -5323,6 +5780,21 @@ static int grab_requested_root(struct mnt_namespace *ns, struct path *root)
return 0;
}
+/* This must be updated whenever a new flag is added */
+#define STATMOUNT_SUPPORTED (STATMOUNT_SB_BASIC | \
+ STATMOUNT_MNT_BASIC | \
+ STATMOUNT_PROPAGATE_FROM | \
+ STATMOUNT_MNT_ROOT | \
+ STATMOUNT_MNT_POINT | \
+ STATMOUNT_FS_TYPE | \
+ STATMOUNT_MNT_NS_ID | \
+ STATMOUNT_MNT_OPTS | \
+ STATMOUNT_FS_SUBTYPE | \
+ STATMOUNT_SB_SOURCE | \
+ STATMOUNT_OPT_ARRAY | \
+ STATMOUNT_OPT_SEC_ARRAY | \
+ STATMOUNT_SUPPORTED_MASK)
+
static int do_statmount(struct kstatmount *s, u64 mnt_id, u64 mnt_ns_id,
struct mnt_namespace *ns)
{
@@ -5331,7 +5803,7 @@ static int do_statmount(struct kstatmount *s, u64 mnt_id, u64 mnt_ns_id,
int err;
/* Has the namespace already been emptied? */
- if (mnt_ns_id && RB_EMPTY_ROOT(&ns->mounts))
+ if (mnt_ns_id && mnt_ns_empty(ns))
return -ENOENT;
s->mnt = lookup_mnt_in_ns(mnt_id, ns);
@@ -5356,6 +5828,7 @@ static int do_statmount(struct kstatmount *s, u64 mnt_id, u64 mnt_ns_id,
return err;
s->root = root;
+ s->idmap = mnt_idmap(s->mnt);
if (s->mask & STATMOUNT_SB_BASIC)
statmount_sb_basic(s);
@@ -5389,12 +5862,26 @@ static int do_statmount(struct kstatmount *s, u64 mnt_id, u64 mnt_ns_id,
if (!err && s->mask & STATMOUNT_SB_SOURCE)
err = statmount_string(s, STATMOUNT_SB_SOURCE);
+ if (!err && s->mask & STATMOUNT_MNT_UIDMAP)
+ err = statmount_string(s, STATMOUNT_MNT_UIDMAP);
+
+ if (!err && s->mask & STATMOUNT_MNT_GIDMAP)
+ err = statmount_string(s, STATMOUNT_MNT_GIDMAP);
+
if (!err && s->mask & STATMOUNT_MNT_NS_ID)
statmount_mnt_ns_id(s, ns);
+ if (!err && s->mask & STATMOUNT_SUPPORTED_MASK) {
+ s->sm.mask |= STATMOUNT_SUPPORTED_MASK;
+ s->sm.supported_mask = STATMOUNT_SUPPORTED;
+ }
+
if (err)
return err;
+ /* Are there bits in the return mask not present in STATMOUNT_SUPPORTED? */
+ WARN_ON_ONCE(~STATMOUNT_SUPPORTED & s->sm.mask);
+
return 0;
}
@@ -5412,7 +5899,8 @@ static inline bool retry_statmount(const long ret, size_t *seq_size)
#define STATMOUNT_STRING_REQ (STATMOUNT_MNT_ROOT | STATMOUNT_MNT_POINT | \
STATMOUNT_FS_TYPE | STATMOUNT_MNT_OPTS | \
STATMOUNT_FS_SUBTYPE | STATMOUNT_SB_SOURCE | \
- STATMOUNT_OPT_ARRAY | STATMOUNT_OPT_SEC_ARRAY)
+ STATMOUNT_OPT_ARRAY | STATMOUNT_OPT_SEC_ARRAY | \
+ STATMOUNT_MNT_UIDMAP | STATMOUNT_MNT_GIDMAP)
static int prepare_kstatmount(struct kstatmount *ks, struct mnt_id_req *kreq,
struct statmount __user *buf, size_t bufsize,
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 2b04038b0e40..bc957487f6ec 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -1532,7 +1532,8 @@ static int nfs_is_exclusive_create(struct inode *dir, unsigned int flags)
{
if (NFS_PROTO(dir)->version == 2)
return 0;
- return flags & LOOKUP_EXCL;
+ return (flags & (LOOKUP_CREATE | LOOKUP_EXCL)) ==
+ (LOOKUP_CREATE | LOOKUP_EXCL);
}
/*
@@ -2421,11 +2422,11 @@ EXPORT_SYMBOL_GPL(nfs_mknod);
/*
* See comments for nfs_proc_create regarding failed operations.
*/
-int nfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
- struct dentry *dentry, umode_t mode)
+struct dentry *nfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
struct iattr attr;
- int error;
+ struct dentry *ret;
dfprintk(VFS, "NFS: mkdir(%s/%lu), %pd\n",
dir->i_sb->s_id, dir->i_ino, dentry);
@@ -2434,14 +2435,9 @@ int nfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
attr.ia_mode = mode | S_IFDIR;
trace_nfs_mkdir_enter(dir, dentry);
- error = NFS_PROTO(dir)->mkdir(dir, dentry, &attr);
- trace_nfs_mkdir_exit(dir, dentry, error);
- if (error != 0)
- goto out_err;
- return 0;
-out_err:
- d_drop(dentry);
- return error;
+ ret = NFS_PROTO(dir)->mkdir(dir, dentry, &attr);
+ trace_nfs_mkdir_exit(dir, dentry, PTR_ERR_OR_ZERO(ret));
+ return ret;
}
EXPORT_SYMBOL_GPL(nfs_mkdir);
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index fae2c7ae4acc..1ac1d3eec517 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -400,8 +400,8 @@ struct dentry *nfs_lookup(struct inode *, struct dentry *, unsigned int);
void nfs_d_prune_case_insensitive_aliases(struct inode *inode);
int nfs_create(struct mnt_idmap *, struct inode *, struct dentry *,
umode_t, bool);
-int nfs_mkdir(struct mnt_idmap *, struct inode *, struct dentry *,
- umode_t);
+struct dentry *nfs_mkdir(struct mnt_idmap *, struct inode *, struct dentry *,
+ umode_t);
int nfs_rmdir(struct inode *, struct dentry *);
int nfs_unlink(struct inode *, struct dentry *);
int nfs_symlink(struct mnt_idmap *, struct inode *, struct dentry *,
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index 0c3bc98cd999..755ed3c37051 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -578,13 +578,13 @@ out:
return status;
}
-static int
+static struct dentry *
nfs3_proc_mkdir(struct inode *dir, struct dentry *dentry, struct iattr *sattr)
{
struct posix_acl *default_acl, *acl;
struct nfs3_createdata *data;
- struct dentry *d_alias;
- int status = -ENOMEM;
+ struct dentry *ret = ERR_PTR(-ENOMEM);
+ int status;
dprintk("NFS call mkdir %pd\n", dentry);
@@ -592,8 +592,9 @@ nfs3_proc_mkdir(struct inode *dir, struct dentry *dentry, struct iattr *sattr)
if (data == NULL)
goto out;
- status = posix_acl_create(dir, &sattr->ia_mode, &default_acl, &acl);
- if (status)
+ ret = ERR_PTR(posix_acl_create(dir, &sattr->ia_mode,
+ &default_acl, &acl));
+ if (IS_ERR(ret))
goto out;
data->msg.rpc_proc = &nfs3_procedures[NFS3PROC_MKDIR];
@@ -602,25 +603,27 @@ nfs3_proc_mkdir(struct inode *dir, struct dentry *dentry, struct iattr *sattr)
data->arg.mkdir.len = dentry->d_name.len;
data->arg.mkdir.sattr = sattr;
- d_alias = nfs3_do_create(dir, dentry, data);
- status = PTR_ERR_OR_ZERO(d_alias);
+ ret = nfs3_do_create(dir, dentry, data);
- if (status != 0)
+ if (IS_ERR(ret))
goto out_release_acls;
- if (d_alias)
- dentry = d_alias;
+ if (ret)
+ dentry = ret;
status = nfs3_proc_setacls(d_inode(dentry), acl, default_acl);
+ if (status) {
+ dput(ret);
+ ret = ERR_PTR(status);
+ }
- dput(d_alias);
out_release_acls:
posix_acl_release(acl);
posix_acl_release(default_acl);
out:
nfs3_free_createdata(data);
- dprintk("NFS reply mkdir: %d\n", status);
- return status;
+ dprintk("NFS reply mkdir: %d\n", PTR_ERR_OR_ZERO(ret));
+ return ret;
}
static int
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 6e95db6c17e9..70c8ea943019 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -3154,9 +3154,7 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
if (d_really_is_negative(dentry)) {
struct dentry *alias;
d_drop(dentry);
- alias = d_exact_alias(dentry, state->inode);
- if (!alias)
- alias = d_splice_alias(igrab(state->inode), dentry);
+ alias = d_splice_alias(igrab(state->inode), dentry);
/* d_splice_alias() can't fail here - it's a non-directory */
if (alias) {
dput(ctx->dentry);
@@ -5139,9 +5137,6 @@ static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_
&data->arg.seq_args, &data->res.seq_res, 1);
if (status == 0) {
spin_lock(&dir->i_lock);
- /* Creating a directory bumps nlink in the parent */
- if (data->arg.ftype == NF4DIR)
- nfs4_inc_nlink_locked(dir);
nfs4_update_changeattr_locked(dir, &data->res.dir_cinfo,
data->res.fattr->time_start,
NFS_INO_INVALID_DATA);
@@ -5151,6 +5146,25 @@ static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_
return status;
}
+static struct dentry *nfs4_do_mkdir(struct inode *dir, struct dentry *dentry,
+ struct nfs4_createdata *data)
+{
+ int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg,
+ &data->arg.seq_args, &data->res.seq_res, 1);
+
+ if (status)
+ return ERR_PTR(status);
+
+ spin_lock(&dir->i_lock);
+ /* Creating a directory bumps nlink in the parent */
+ nfs4_inc_nlink_locked(dir);
+ nfs4_update_changeattr_locked(dir, &data->res.dir_cinfo,
+ data->res.fattr->time_start,
+ NFS_INO_INVALID_DATA);
+ spin_unlock(&dir->i_lock);
+ return nfs_add_or_obtain(dentry, data->res.fh, data->res.fattr);
+}
+
static void nfs4_free_createdata(struct nfs4_createdata *data)
{
nfs4_label_free(data->fattr.label);
@@ -5207,32 +5221,34 @@ static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
return err;
}
-static int _nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
- struct iattr *sattr, struct nfs4_label *label)
+static struct dentry *_nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
+ struct iattr *sattr,
+ struct nfs4_label *label)
{
struct nfs4_createdata *data;
- int status = -ENOMEM;
+ struct dentry *ret = ERR_PTR(-ENOMEM);
data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4DIR);
if (data == NULL)
goto out;
data->arg.label = label;
- status = nfs4_do_create(dir, dentry, data);
+ ret = nfs4_do_mkdir(dir, dentry, data);
nfs4_free_createdata(data);
out:
- return status;
+ return ret;
}
-static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
- struct iattr *sattr)
+static struct dentry *nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
+ struct iattr *sattr)
{
struct nfs_server *server = NFS_SERVER(dir);
struct nfs4_exception exception = {
.interruptible = true,
};
struct nfs4_label l, *label;
+ struct dentry *alias;
int err;
label = nfs4_label_init_security(dir, dentry, sattr, &l);
@@ -5240,14 +5256,15 @@ static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK))
sattr->ia_mode &= ~current_umask();
do {
- err = _nfs4_proc_mkdir(dir, dentry, sattr, label);
+ alias = _nfs4_proc_mkdir(dir, dentry, sattr, label);
+ err = PTR_ERR_OR_ZERO(alias);
trace_nfs4_mkdir(dir, &dentry->d_name, err);
err = nfs4_handle_exception(NFS_SERVER(dir), err,
&exception);
} while (exception.retry);
nfs4_label_release_security(label);
- return err;
+ return alias;
}
static int _nfs4_proc_readdir(struct nfs_readdir_arg *nr_arg,
diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c
index 77920a2e3cef..63e71310b9f6 100644
--- a/fs/nfs/proc.c
+++ b/fs/nfs/proc.c
@@ -446,13 +446,14 @@ out:
return status;
}
-static int
+static struct dentry *
nfs_proc_mkdir(struct inode *dir, struct dentry *dentry, struct iattr *sattr)
{
struct nfs_createdata *data;
struct rpc_message msg = {
.rpc_proc = &nfs_procedures[NFSPROC_MKDIR],
};
+ struct dentry *alias = NULL;
int status = -ENOMEM;
dprintk("NFS call mkdir %pd\n", dentry);
@@ -464,12 +465,15 @@ nfs_proc_mkdir(struct inode *dir, struct dentry *dentry, struct iattr *sattr)
status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
nfs_mark_for_revalidate(dir);
- if (status == 0)
- status = nfs_instantiate(dentry, data->res.fh, data->res.fattr);
+ if (status == 0) {
+ alias = nfs_add_or_obtain(dentry, data->res.fh, data->res.fattr);
+ status = PTR_ERR_OR_ZERO(alias);
+ } else
+ alias = ERR_PTR(status);
nfs_free_createdata(data);
out:
dprintk("NFS reply mkdir: %d\n", status);
- return status;
+ return alias;
}
static int
diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c
index 28f4d5311c40..c1d9bd07285f 100644
--- a/fs/nfsd/nfs4recover.c
+++ b/fs/nfsd/nfs4recover.c
@@ -233,9 +233,12 @@ nfsd4_create_clid_dir(struct nfs4_client *clp)
* as well be forgiving and just succeed silently.
*/
goto out_put;
- status = vfs_mkdir(&nop_mnt_idmap, d_inode(dir), dentry, S_IRWXU);
+ dentry = vfs_mkdir(&nop_mnt_idmap, d_inode(dir), dentry, S_IRWXU);
+ if (IS_ERR(dentry))
+ status = PTR_ERR(dentry);
out_put:
- dput(dentry);
+ if (!status)
+ dput(dentry);
out_unlock:
inode_unlock(d_inode(dir));
if (status == 0) {
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 29cb7b812d71..34d7aa531662 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -1461,7 +1461,7 @@ nfsd_create_locked(struct svc_rqst *rqstp, struct svc_fh *fhp,
struct inode *dirp;
struct iattr *iap = attrs->na_iattr;
__be32 err;
- int host_err;
+ int host_err = 0;
dentry = fhp->fh_dentry;
dirp = d_inode(dentry);
@@ -1488,28 +1488,15 @@ nfsd_create_locked(struct svc_rqst *rqstp, struct svc_fh *fhp,
nfsd_check_ignore_resizing(iap);
break;
case S_IFDIR:
- host_err = vfs_mkdir(&nop_mnt_idmap, dirp, dchild, iap->ia_mode);
- if (!host_err && unlikely(d_unhashed(dchild))) {
- struct dentry *d;
- d = lookup_one_len(dchild->d_name.name,
- dchild->d_parent,
- dchild->d_name.len);
- if (IS_ERR(d)) {
- host_err = PTR_ERR(d);
- break;
- }
- if (unlikely(d_is_negative(d))) {
- dput(d);
- err = nfserr_serverfault;
- goto out;
- }
+ dchild = vfs_mkdir(&nop_mnt_idmap, dirp, dchild, iap->ia_mode);
+ if (IS_ERR(dchild)) {
+ host_err = PTR_ERR(dchild);
+ } else if (d_is_negative(dchild)) {
+ err = nfserr_serverfault;
+ goto out;
+ } else if (unlikely(dchild != resfhp->fh_dentry)) {
dput(resfhp->fh_dentry);
- resfhp->fh_dentry = dget(d);
- err = fh_update(resfhp);
- dput(dchild);
- dchild = d;
- if (err)
- goto out;
+ resfhp->fh_dentry = dget(dchild);
}
break;
case S_IFCHR:
@@ -1530,7 +1517,8 @@ nfsd_create_locked(struct svc_rqst *rqstp, struct svc_fh *fhp,
err = nfsd_create_setattr(rqstp, fhp, resfhp, attrs);
out:
- dput(dchild);
+ if (!IS_ERR(dchild))
+ dput(dchild);
return err;
out_nfserr:
diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c
index 953fbd5f0851..40f4b1a28705 100644
--- a/fs/nilfs2/namei.c
+++ b/fs/nilfs2/namei.c
@@ -218,8 +218,8 @@ static int nilfs_link(struct dentry *old_dentry, struct inode *dir,
return err;
}
-static int nilfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
- struct dentry *dentry, umode_t mode)
+static struct dentry *nilfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
struct inode *inode;
struct nilfs_transaction_info ti;
@@ -227,7 +227,7 @@ static int nilfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
err = nilfs_transaction_begin(dir->i_sb, &ti, 1);
if (err)
- return err;
+ return ERR_PTR(err);
inc_nlink(dir);
@@ -258,7 +258,7 @@ out:
else
nilfs_transaction_abort(dir->i_sb);
- return err;
+ return ERR_PTR(err);
out_fail:
drop_nlink(inode);
diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c
index 95646f7c46ca..6d386080faf2 100644
--- a/fs/notify/fanotify/fanotify.c
+++ b/fs/notify/fanotify/fanotify.c
@@ -166,6 +166,8 @@ static bool fanotify_should_merge(struct fanotify_event *old,
case FANOTIFY_EVENT_TYPE_FS_ERROR:
return fanotify_error_event_equal(FANOTIFY_EE(old),
FANOTIFY_EE(new));
+ case FANOTIFY_EVENT_TYPE_MNT:
+ return false;
default:
WARN_ON_ONCE(1);
}
@@ -312,7 +314,10 @@ static u32 fanotify_group_event_mask(struct fsnotify_group *group,
pr_debug("%s: report_mask=%x mask=%x data=%p data_type=%d\n",
__func__, iter_info->report_mask, event_mask, data, data_type);
- if (!fid_mode) {
+ if (FAN_GROUP_FLAG(group, FAN_REPORT_MNT)) {
+ if (data_type != FSNOTIFY_EVENT_MNT)
+ return 0;
+ } else if (!fid_mode) {
/* Do we have path to open a file descriptor? */
if (!path)
return 0;
@@ -557,6 +562,20 @@ static struct fanotify_event *fanotify_alloc_path_event(const struct path *path,
return &pevent->fae;
}
+static struct fanotify_event *fanotify_alloc_mnt_event(u64 mnt_id, gfp_t gfp)
+{
+ struct fanotify_mnt_event *pevent;
+
+ pevent = kmem_cache_alloc(fanotify_mnt_event_cachep, gfp);
+ if (!pevent)
+ return NULL;
+
+ pevent->fae.type = FANOTIFY_EVENT_TYPE_MNT;
+ pevent->mnt_id = mnt_id;
+
+ return &pevent->fae;
+}
+
static struct fanotify_event *fanotify_alloc_perm_event(const void *data,
int data_type,
gfp_t gfp)
@@ -731,6 +750,7 @@ static struct fanotify_event *fanotify_alloc_event(
fid_mode);
struct inode *dirid = fanotify_dfid_inode(mask, data, data_type, dir);
const struct path *path = fsnotify_data_path(data, data_type);
+ u64 mnt_id = fsnotify_data_mnt_id(data, data_type);
struct mem_cgroup *old_memcg;
struct dentry *moved = NULL;
struct inode *child = NULL;
@@ -826,8 +846,12 @@ static struct fanotify_event *fanotify_alloc_event(
moved, &hash, gfp);
} else if (fid_mode) {
event = fanotify_alloc_fid_event(id, fsid, &hash, gfp);
- } else {
+ } else if (path) {
event = fanotify_alloc_path_event(path, &hash, gfp);
+ } else if (mnt_id) {
+ event = fanotify_alloc_mnt_event(mnt_id, gfp);
+ } else {
+ WARN_ON_ONCE(1);
}
if (!event)
@@ -927,7 +951,7 @@ static int fanotify_handle_event(struct fsnotify_group *group, u32 mask,
BUILD_BUG_ON(FAN_RENAME != FS_RENAME);
BUILD_BUG_ON(FAN_PRE_ACCESS != FS_PRE_ACCESS);
- BUILD_BUG_ON(HWEIGHT32(ALL_FANOTIFY_EVENT_BITS) != 22);
+ BUILD_BUG_ON(HWEIGHT32(ALL_FANOTIFY_EVENT_BITS) != 24);
mask = fanotify_group_event_mask(group, iter_info, &match_mask,
mask, data, data_type, dir);
@@ -1028,6 +1052,11 @@ static void fanotify_free_error_event(struct fsnotify_group *group,
mempool_free(fee, &group->fanotify_data.error_events_pool);
}
+static void fanotify_free_mnt_event(struct fanotify_event *event)
+{
+ kmem_cache_free(fanotify_mnt_event_cachep, FANOTIFY_ME(event));
+}
+
static void fanotify_free_event(struct fsnotify_group *group,
struct fsnotify_event *fsn_event)
{
@@ -1054,6 +1083,9 @@ static void fanotify_free_event(struct fsnotify_group *group,
case FANOTIFY_EVENT_TYPE_FS_ERROR:
fanotify_free_error_event(group, event);
break;
+ case FANOTIFY_EVENT_TYPE_MNT:
+ fanotify_free_mnt_event(event);
+ break;
default:
WARN_ON_ONCE(1);
}
diff --git a/fs/notify/fanotify/fanotify.h b/fs/notify/fanotify/fanotify.h
index c12cbc270539..b44e70e44be6 100644
--- a/fs/notify/fanotify/fanotify.h
+++ b/fs/notify/fanotify/fanotify.h
@@ -9,6 +9,7 @@ extern struct kmem_cache *fanotify_mark_cache;
extern struct kmem_cache *fanotify_fid_event_cachep;
extern struct kmem_cache *fanotify_path_event_cachep;
extern struct kmem_cache *fanotify_perm_event_cachep;
+extern struct kmem_cache *fanotify_mnt_event_cachep;
/* Possible states of the permission event */
enum {
@@ -244,6 +245,7 @@ enum fanotify_event_type {
FANOTIFY_EVENT_TYPE_PATH_PERM,
FANOTIFY_EVENT_TYPE_OVERFLOW, /* struct fanotify_event */
FANOTIFY_EVENT_TYPE_FS_ERROR, /* struct fanotify_error_event */
+ FANOTIFY_EVENT_TYPE_MNT,
__FANOTIFY_EVENT_TYPE_NUM
};
@@ -409,12 +411,23 @@ struct fanotify_path_event {
struct path path;
};
+struct fanotify_mnt_event {
+ struct fanotify_event fae;
+ u64 mnt_id;
+};
+
static inline struct fanotify_path_event *
FANOTIFY_PE(struct fanotify_event *event)
{
return container_of(event, struct fanotify_path_event, fae);
}
+static inline struct fanotify_mnt_event *
+FANOTIFY_ME(struct fanotify_event *event)
+{
+ return container_of(event, struct fanotify_mnt_event, fae);
+}
+
/*
* Structure for permission fanotify events. It gets allocated and freed in
* fanotify_handle_event() since we wait there for user response. When the
@@ -466,6 +479,11 @@ static inline bool fanotify_is_error_event(u32 mask)
return mask & FAN_FS_ERROR;
}
+static inline bool fanotify_is_mnt_event(u32 mask)
+{
+ return mask & (FAN_MNT_ATTACH | FAN_MNT_DETACH);
+}
+
static inline const struct path *fanotify_event_path(struct fanotify_event *event)
{
if (event->type == FANOTIFY_EVENT_TYPE_PATH)
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
index ba3e2d09eb44..f2d840ae4ded 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -113,6 +113,7 @@ struct kmem_cache *fanotify_mark_cache __ro_after_init;
struct kmem_cache *fanotify_fid_event_cachep __ro_after_init;
struct kmem_cache *fanotify_path_event_cachep __ro_after_init;
struct kmem_cache *fanotify_perm_event_cachep __ro_after_init;
+struct kmem_cache *fanotify_mnt_event_cachep __ro_after_init;
#define FANOTIFY_EVENT_ALIGN 4
#define FANOTIFY_FID_INFO_HDR_LEN \
@@ -123,6 +124,8 @@ struct kmem_cache *fanotify_perm_event_cachep __ro_after_init;
(sizeof(struct fanotify_event_info_error))
#define FANOTIFY_RANGE_INFO_LEN \
(sizeof(struct fanotify_event_info_range))
+#define FANOTIFY_MNT_INFO_LEN \
+ (sizeof(struct fanotify_event_info_mnt))
static int fanotify_fid_info_len(int fh_len, int name_len)
{
@@ -178,6 +181,8 @@ static size_t fanotify_event_len(unsigned int info_mode,
fh_len = fanotify_event_object_fh_len(event);
event_len += fanotify_fid_info_len(fh_len, dot_len);
}
+ if (fanotify_is_mnt_event(event->mask))
+ event_len += FANOTIFY_MNT_INFO_LEN;
if (info_mode & FAN_REPORT_PIDFD)
event_len += FANOTIFY_PIDFD_INFO_LEN;
@@ -405,6 +410,25 @@ static int process_access_response(struct fsnotify_group *group,
return -ENOENT;
}
+static size_t copy_mnt_info_to_user(struct fanotify_event *event,
+ char __user *buf, int count)
+{
+ struct fanotify_event_info_mnt info = { };
+
+ info.hdr.info_type = FAN_EVENT_INFO_TYPE_MNT;
+ info.hdr.len = FANOTIFY_MNT_INFO_LEN;
+
+ if (WARN_ON(count < info.hdr.len))
+ return -EFAULT;
+
+ info.mnt_id = FANOTIFY_ME(event)->mnt_id;
+
+ if (copy_to_user(buf, &info, sizeof(info)))
+ return -EFAULT;
+
+ return info.hdr.len;
+}
+
static size_t copy_error_info_to_user(struct fanotify_event *event,
char __user *buf, int count)
{
@@ -700,6 +724,15 @@ static int copy_info_records_to_user(struct fanotify_event *event,
total_bytes += ret;
}
+ if (fanotify_is_mnt_event(event->mask)) {
+ ret = copy_mnt_info_to_user(event, buf, count);
+ if (ret < 0)
+ return ret;
+ buf += ret;
+ count -= ret;
+ total_bytes += ret;
+ }
+
return total_bytes;
}
@@ -1508,6 +1541,14 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
if ((flags & FAN_REPORT_PIDFD) && (flags & FAN_REPORT_TID))
return -EINVAL;
+ /* Don't allow mixing mnt events with inode events for now */
+ if (flags & FAN_REPORT_MNT) {
+ if (class != FAN_CLASS_NOTIF)
+ return -EINVAL;
+ if (flags & (FANOTIFY_FID_BITS | FAN_REPORT_FD_ERROR))
+ return -EINVAL;
+ }
+
if (event_f_flags & ~FANOTIFY_INIT_ALL_EVENT_F_BITS)
return -EINVAL;
@@ -1767,7 +1808,6 @@ static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask,
int dfd, const char __user *pathname)
{
struct inode *inode = NULL;
- struct vfsmount *mnt = NULL;
struct fsnotify_group *group;
struct path path;
struct fan_fsid __fsid, *fsid = NULL;
@@ -1776,7 +1816,7 @@ static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask,
unsigned int mark_cmd = flags & FANOTIFY_MARK_CMD_BITS;
unsigned int ignore = flags & FANOTIFY_MARK_IGNORE_BITS;
unsigned int obj_type, fid_mode;
- void *obj;
+ void *obj = NULL;
u32 umask = 0;
int ret;
@@ -1800,6 +1840,9 @@ static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask,
case FAN_MARK_FILESYSTEM:
obj_type = FSNOTIFY_OBJ_TYPE_SB;
break;
+ case FAN_MARK_MNTNS:
+ obj_type = FSNOTIFY_OBJ_TYPE_MNTNS;
+ break;
default:
return -EINVAL;
}
@@ -1847,6 +1890,19 @@ static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask,
return -EINVAL;
group = fd_file(f)->private_data;
+ /* Only report mount events on mnt namespace */
+ if (FAN_GROUP_FLAG(group, FAN_REPORT_MNT)) {
+ if (mask & ~FANOTIFY_MOUNT_EVENTS)
+ return -EINVAL;
+ if (mark_type != FAN_MARK_MNTNS)
+ return -EINVAL;
+ } else {
+ if (mask & FANOTIFY_MOUNT_EVENTS)
+ return -EINVAL;
+ if (mark_type == FAN_MARK_MNTNS)
+ return -EINVAL;
+ }
+
/*
* An unprivileged user is not allowed to setup mount nor filesystem
* marks. This also includes setting up such marks by a group that
@@ -1888,7 +1944,7 @@ static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask,
* point.
*/
fid_mode = FAN_GROUP_FLAG(group, FANOTIFY_FID_BITS);
- if (mask & ~(FANOTIFY_FD_EVENTS|FANOTIFY_EVENT_FLAGS) &&
+ if (mask & ~(FANOTIFY_FD_EVENTS|FANOTIFY_MOUNT_EVENTS|FANOTIFY_EVENT_FLAGS) &&
(!fid_mode || mark_type == FAN_MARK_MOUNT))
return -EINVAL;
@@ -1938,17 +1994,21 @@ static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask,
}
/* inode held in place by reference to path; group by fget on fd */
- if (mark_type == FAN_MARK_INODE) {
+ if (obj_type == FSNOTIFY_OBJ_TYPE_INODE) {
inode = path.dentry->d_inode;
obj = inode;
- } else {
- mnt = path.mnt;
- if (mark_type == FAN_MARK_MOUNT)
- obj = mnt;
- else
- obj = mnt->mnt_sb;
+ } else if (obj_type == FSNOTIFY_OBJ_TYPE_VFSMOUNT) {
+ obj = path.mnt;
+ } else if (obj_type == FSNOTIFY_OBJ_TYPE_SB) {
+ obj = path.mnt->mnt_sb;
+ } else if (obj_type == FSNOTIFY_OBJ_TYPE_MNTNS) {
+ obj = mnt_ns_from_dentry(path.dentry);
}
+ ret = -EINVAL;
+ if (!obj)
+ goto path_put_and_out;
+
/*
* If some other task has this inode open for write we should not add
* an ignore mask, unless that ignore mask is supposed to survive
@@ -1956,10 +2016,10 @@ static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask,
*/
if (mark_cmd == FAN_MARK_ADD && (flags & FANOTIFY_MARK_IGNORE_BITS) &&
!(flags & FAN_MARK_IGNORED_SURV_MODIFY)) {
- ret = mnt ? -EINVAL : -EISDIR;
+ ret = !inode ? -EINVAL : -EISDIR;
/* FAN_MARK_IGNORE requires SURV_MODIFY for sb/mount/dir marks */
if (ignore == FAN_MARK_IGNORE &&
- (mnt || S_ISDIR(inode->i_mode)))
+ (!inode || S_ISDIR(inode->i_mode)))
goto path_put_and_out;
ret = 0;
@@ -1968,7 +2028,7 @@ static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask,
}
/* Mask out FAN_EVENT_ON_CHILD flag for sb/mount/non-dir marks */
- if (mnt || !S_ISDIR(inode->i_mode)) {
+ if (!inode || !S_ISDIR(inode->i_mode)) {
mask &= ~FAN_EVENT_ON_CHILD;
umask = FAN_EVENT_ON_CHILD;
/*
@@ -2042,7 +2102,7 @@ static int __init fanotify_user_setup(void)
FANOTIFY_DEFAULT_MAX_USER_MARKS);
BUILD_BUG_ON(FANOTIFY_INIT_FLAGS & FANOTIFY_INTERNAL_GROUP_FLAGS);
- BUILD_BUG_ON(HWEIGHT32(FANOTIFY_INIT_FLAGS) != 13);
+ BUILD_BUG_ON(HWEIGHT32(FANOTIFY_INIT_FLAGS) != 14);
BUILD_BUG_ON(HWEIGHT32(FANOTIFY_MARK_FLAGS) != 11);
fanotify_mark_cache = KMEM_CACHE(fanotify_mark,
@@ -2055,6 +2115,7 @@ static int __init fanotify_user_setup(void)
fanotify_perm_event_cachep =
KMEM_CACHE(fanotify_perm_event, SLAB_PANIC);
}
+ fanotify_mnt_event_cachep = KMEM_CACHE(fanotify_mnt_event, SLAB_PANIC);
fanotify_max_queued_events = FANOTIFY_DEFAULT_MAX_EVENTS;
init_user_ns.ucount_max[UCOUNT_FANOTIFY_GROUPS] =
diff --git a/fs/notify/fdinfo.c b/fs/notify/fdinfo.c
index e933f9c65d90..1161eabf11ee 100644
--- a/fs/notify/fdinfo.c
+++ b/fs/notify/fdinfo.c
@@ -121,6 +121,11 @@ static void fanotify_fdinfo(struct seq_file *m, struct fsnotify_mark *mark)
seq_printf(m, "fanotify sdev:%x mflags:%x mask:%x ignored_mask:%x\n",
sb->s_dev, mflags, mark->mask, mark->ignore_mask);
+ } else if (mark->connector->type == FSNOTIFY_OBJ_TYPE_MNTNS) {
+ struct mnt_namespace *mnt_ns = fsnotify_conn_mntns(mark->connector);
+
+ seq_printf(m, "fanotify mnt_ns:%u mflags:%x mask:%x ignored_mask:%x\n",
+ mnt_ns->ns.inum, mflags, mark->mask, mark->ignore_mask);
}
}
diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c
index fae1b6d397ea..e2b4f17a48bb 100644
--- a/fs/notify/fsnotify.c
+++ b/fs/notify/fsnotify.c
@@ -28,6 +28,11 @@ void __fsnotify_vfsmount_delete(struct vfsmount *mnt)
fsnotify_clear_marks_by_mount(mnt);
}
+void __fsnotify_mntns_delete(struct mnt_namespace *mntns)
+{
+ fsnotify_clear_marks_by_mntns(mntns);
+}
+
/**
* fsnotify_unmount_inodes - an sb is unmounting. handle any watched inodes.
* @sb: superblock being unmounted.
@@ -420,7 +425,7 @@ static int send_to_group(__u32 mask, const void *data, int data_type,
file_name, cookie, iter_info);
}
-static struct fsnotify_mark *fsnotify_first_mark(struct fsnotify_mark_connector **connp)
+static struct fsnotify_mark *fsnotify_first_mark(struct fsnotify_mark_connector *const *connp)
{
struct fsnotify_mark_connector *conn;
struct hlist_node *node = NULL;
@@ -538,14 +543,15 @@ int fsnotify(__u32 mask, const void *data, int data_type, struct inode *dir,
{
const struct path *path = fsnotify_data_path(data, data_type);
struct super_block *sb = fsnotify_data_sb(data, data_type);
- struct fsnotify_sb_info *sbinfo = fsnotify_sb_info(sb);
+ const struct fsnotify_mnt *mnt_data = fsnotify_data_mnt(data, data_type);
+ struct fsnotify_sb_info *sbinfo = sb ? fsnotify_sb_info(sb) : NULL;
struct fsnotify_iter_info iter_info = {};
struct mount *mnt = NULL;
struct inode *inode2 = NULL;
struct dentry *moved;
int inode2_type;
int ret = 0;
- __u32 test_mask, marks_mask;
+ __u32 test_mask, marks_mask = 0;
if (path)
mnt = real_mount(path->mnt);
@@ -578,17 +584,20 @@ int fsnotify(__u32 mask, const void *data, int data_type, struct inode *dir,
if ((!sbinfo || !sbinfo->sb_marks) &&
(!mnt || !mnt->mnt_fsnotify_marks) &&
(!inode || !inode->i_fsnotify_marks) &&
- (!inode2 || !inode2->i_fsnotify_marks))
+ (!inode2 || !inode2->i_fsnotify_marks) &&
+ (!mnt_data || !mnt_data->ns->n_fsnotify_marks))
return 0;
- marks_mask = READ_ONCE(sb->s_fsnotify_mask);
+ if (sb)
+ marks_mask |= READ_ONCE(sb->s_fsnotify_mask);
if (mnt)
marks_mask |= READ_ONCE(mnt->mnt_fsnotify_mask);
if (inode)
marks_mask |= READ_ONCE(inode->i_fsnotify_mask);
if (inode2)
marks_mask |= READ_ONCE(inode2->i_fsnotify_mask);
-
+ if (mnt_data)
+ marks_mask |= READ_ONCE(mnt_data->ns->n_fsnotify_mask);
/*
* If this is a modify event we may need to clear some ignore masks.
@@ -618,6 +627,10 @@ int fsnotify(__u32 mask, const void *data, int data_type, struct inode *dir,
iter_info.marks[inode2_type] =
fsnotify_first_mark(&inode2->i_fsnotify_marks);
}
+ if (mnt_data) {
+ iter_info.marks[FSNOTIFY_ITER_TYPE_MNTNS] =
+ fsnotify_first_mark(&mnt_data->ns->n_fsnotify_marks);
+ }
/*
* We need to merge inode/vfsmount/sb mark lists so that e.g. inode mark
@@ -708,11 +721,31 @@ void file_set_fsnotify_mode_from_watchers(struct file *file)
}
#endif
+void fsnotify_mnt(__u32 mask, struct mnt_namespace *ns, struct vfsmount *mnt)
+{
+ struct fsnotify_mnt data = {
+ .ns = ns,
+ .mnt_id = real_mount(mnt)->mnt_id_unique,
+ };
+
+ if (WARN_ON_ONCE(!ns))
+ return;
+
+ /*
+ * This is an optimization as well as making sure fsnotify_init() has
+ * been called.
+ */
+ if (!ns->n_fsnotify_marks)
+ return;
+
+ fsnotify(mask, &data, FSNOTIFY_EVENT_MNT, NULL, NULL, NULL, 0);
+}
+
static __init int fsnotify_init(void)
{
int ret;
- BUILD_BUG_ON(HWEIGHT32(ALL_FSNOTIFY_BITS) != 24);
+ BUILD_BUG_ON(HWEIGHT32(ALL_FSNOTIFY_BITS) != 26);
ret = init_srcu_struct(&fsnotify_mark_srcu);
if (ret)
diff --git a/fs/notify/fsnotify.h b/fs/notify/fsnotify.h
index 663759ed6fbc..5950c7a67f41 100644
--- a/fs/notify/fsnotify.h
+++ b/fs/notify/fsnotify.h
@@ -33,6 +33,12 @@ static inline struct super_block *fsnotify_conn_sb(
return conn->obj;
}
+static inline struct mnt_namespace *fsnotify_conn_mntns(
+ struct fsnotify_mark_connector *conn)
+{
+ return conn->obj;
+}
+
static inline struct super_block *fsnotify_object_sb(void *obj,
enum fsnotify_obj_type obj_type)
{
@@ -89,6 +95,11 @@ static inline void fsnotify_clear_marks_by_sb(struct super_block *sb)
fsnotify_destroy_marks(fsnotify_sb_marks(sb));
}
+static inline void fsnotify_clear_marks_by_mntns(struct mnt_namespace *mntns)
+{
+ fsnotify_destroy_marks(&mntns->n_fsnotify_marks);
+}
+
/*
* update the dentry->d_flags of all of inode's children to indicate if inode cares
* about events that happen to its children.
diff --git a/fs/notify/mark.c b/fs/notify/mark.c
index 4981439e6209..798340db69d7 100644
--- a/fs/notify/mark.c
+++ b/fs/notify/mark.c
@@ -107,6 +107,8 @@ static fsnotify_connp_t *fsnotify_object_connp(void *obj,
return &real_mount(obj)->mnt_fsnotify_marks;
case FSNOTIFY_OBJ_TYPE_SB:
return fsnotify_sb_marks(obj);
+ case FSNOTIFY_OBJ_TYPE_MNTNS:
+ return &((struct mnt_namespace *)obj)->n_fsnotify_marks;
default:
return NULL;
}
@@ -120,6 +122,8 @@ static __u32 *fsnotify_conn_mask_p(struct fsnotify_mark_connector *conn)
return &fsnotify_conn_mount(conn)->mnt_fsnotify_mask;
else if (conn->type == FSNOTIFY_OBJ_TYPE_SB)
return &fsnotify_conn_sb(conn)->s_fsnotify_mask;
+ else if (conn->type == FSNOTIFY_OBJ_TYPE_MNTNS)
+ return &fsnotify_conn_mntns(conn)->n_fsnotify_mask;
return NULL;
}
@@ -346,12 +350,15 @@ static void *fsnotify_detach_connector_from_object(
fsnotify_conn_mount(conn)->mnt_fsnotify_mask = 0;
} else if (conn->type == FSNOTIFY_OBJ_TYPE_SB) {
fsnotify_conn_sb(conn)->s_fsnotify_mask = 0;
+ } else if (conn->type == FSNOTIFY_OBJ_TYPE_MNTNS) {
+ fsnotify_conn_mntns(conn)->n_fsnotify_mask = 0;
}
rcu_assign_pointer(*connp, NULL);
conn->obj = NULL;
conn->type = FSNOTIFY_OBJ_TYPE_DETACHED;
- fsnotify_update_sb_watchers(sb, conn);
+ if (sb)
+ fsnotify_update_sb_watchers(sb, conn);
return inode;
}
@@ -724,7 +731,7 @@ static int fsnotify_add_mark_list(struct fsnotify_mark *mark, void *obj,
* Attach the sb info before attaching a connector to any object on sb.
* The sb info will remain attached as long as sb lives.
*/
- if (!fsnotify_sb_info(sb)) {
+ if (sb && !fsnotify_sb_info(sb)) {
err = fsnotify_attach_info_to_sb(sb);
if (err)
return err;
@@ -770,7 +777,8 @@ restart:
/* mark should be the last entry. last is the current last entry */
hlist_add_behind_rcu(&mark->obj_list, &last->obj_list);
added:
- fsnotify_update_sb_watchers(sb, conn);
+ if (sb)
+ fsnotify_update_sb_watchers(sb, conn);
/*
* Since connector is attached to object using cmpxchg() we are
* guaranteed that connector initialization is fully visible by anyone
diff --git a/fs/nsfs.c b/fs/nsfs.c
index f7fddf8ecf73..59aa801347a7 100644
--- a/fs/nsfs.c
+++ b/fs/nsfs.c
@@ -151,19 +151,49 @@ static int copy_ns_info_to_user(const struct mnt_namespace *mnt_ns,
return 0;
}
+static bool nsfs_ioctl_valid(unsigned int cmd)
+{
+ switch (cmd) {
+ case NS_GET_USERNS:
+ case NS_GET_PARENT:
+ case NS_GET_NSTYPE:
+ case NS_GET_OWNER_UID:
+ case NS_GET_MNTNS_ID:
+ case NS_GET_PID_FROM_PIDNS:
+ case NS_GET_TGID_FROM_PIDNS:
+ case NS_GET_PID_IN_PIDNS:
+ case NS_GET_TGID_IN_PIDNS:
+ return (_IOC_TYPE(cmd) == _IOC_TYPE(cmd));
+ }
+
+ /* Extensible ioctls require some extra handling. */
+ switch (_IOC_NR(cmd)) {
+ case _IOC_NR(NS_MNT_GET_INFO):
+ case _IOC_NR(NS_MNT_GET_NEXT):
+ case _IOC_NR(NS_MNT_GET_PREV):
+ return (_IOC_TYPE(cmd) == _IOC_TYPE(cmd));
+ }
+
+ return false;
+}
+
static long ns_ioctl(struct file *filp, unsigned int ioctl,
unsigned long arg)
{
struct user_namespace *user_ns;
struct pid_namespace *pid_ns;
struct task_struct *tsk;
- struct ns_common *ns = get_proc_ns(file_inode(filp));
+ struct ns_common *ns;
struct mnt_namespace *mnt_ns;
bool previous = false;
uid_t __user *argp;
uid_t uid;
int ret;
+ if (!nsfs_ioctl_valid(ioctl))
+ return -ENOIOCTLCMD;
+
+ ns = get_proc_ns(file_inode(filp));
switch (ioctl) {
case NS_GET_USERNS:
return open_related_ns(ns, ns_get_owner);
diff --git a/fs/ntfs3/namei.c b/fs/ntfs3/namei.c
index abf7e81584a9..652735a0b0c4 100644
--- a/fs/ntfs3/namei.c
+++ b/fs/ntfs3/namei.c
@@ -201,11 +201,11 @@ static int ntfs_symlink(struct mnt_idmap *idmap, struct inode *dir,
/*
* ntfs_mkdir- inode_operations::mkdir
*/
-static int ntfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
- struct dentry *dentry, umode_t mode)
+static struct dentry *ntfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
- return ntfs_create_inode(idmap, dir, dentry, NULL, S_IFDIR | mode, 0,
- NULL, 0, NULL);
+ return ERR_PTR(ntfs_create_inode(idmap, dir, dentry, NULL, S_IFDIR | mode, 0,
+ NULL, 0, NULL));
}
/*
diff --git a/fs/ocfs2/dlmfs/dlmfs.c b/fs/ocfs2/dlmfs/dlmfs.c
index 2a7f36643895..5130ec44e5e1 100644
--- a/fs/ocfs2/dlmfs/dlmfs.c
+++ b/fs/ocfs2/dlmfs/dlmfs.c
@@ -402,10 +402,10 @@ static struct inode *dlmfs_get_inode(struct inode *parent,
* File creation. Allocate an inode, and we're done..
*/
/* SMP-safe */
-static int dlmfs_mkdir(struct mnt_idmap * idmap,
- struct inode * dir,
- struct dentry * dentry,
- umode_t mode)
+static struct dentry *dlmfs_mkdir(struct mnt_idmap * idmap,
+ struct inode * dir,
+ struct dentry * dentry,
+ umode_t mode)
{
int status;
struct inode *inode = NULL;
@@ -448,7 +448,7 @@ static int dlmfs_mkdir(struct mnt_idmap * idmap,
bail:
if (status < 0)
iput(inode);
- return status;
+ return ERR_PTR(status);
}
static int dlmfs_create(struct mnt_idmap *idmap,
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index 0ec63a1a94b8..99278c8f0e24 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -644,10 +644,10 @@ static int ocfs2_mknod_locked(struct ocfs2_super *osb,
suballoc_loc, suballoc_bit);
}
-static int ocfs2_mkdir(struct mnt_idmap *idmap,
- struct inode *dir,
- struct dentry *dentry,
- umode_t mode)
+static struct dentry *ocfs2_mkdir(struct mnt_idmap *idmap,
+ struct inode *dir,
+ struct dentry *dentry,
+ umode_t mode)
{
int ret;
@@ -657,7 +657,7 @@ static int ocfs2_mkdir(struct mnt_idmap *idmap,
if (ret)
mlog_errno(ret);
- return ret;
+ return ERR_PTR(ret);
}
static int ocfs2_create(struct mnt_idmap *idmap,
diff --git a/fs/omfs/dir.c b/fs/omfs/dir.c
index 6bda275826d6..2ed541fccf33 100644
--- a/fs/omfs/dir.c
+++ b/fs/omfs/dir.c
@@ -279,10 +279,10 @@ out_free_inode:
return err;
}
-static int omfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
- struct dentry *dentry, umode_t mode)
+static struct dentry *omfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
- return omfs_add_node(dir, dentry, mode | S_IFDIR);
+ return ERR_PTR(omfs_add_node(dir, dentry, mode | S_IFDIR));
}
static int omfs_create(struct mnt_idmap *idmap, struct inode *dir,
diff --git a/fs/open.c b/fs/open.c
index 1be20de9f283..a9063cca9911 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -67,11 +67,11 @@ int do_truncate(struct mnt_idmap *idmap, struct dentry *dentry,
return ret;
}
-long vfs_truncate(const struct path *path, loff_t length)
+int vfs_truncate(const struct path *path, loff_t length)
{
struct mnt_idmap *idmap;
struct inode *inode;
- long error;
+ int error;
inode = path->dentry->d_inode;
@@ -123,7 +123,7 @@ mnt_drop_write_and_out:
}
EXPORT_SYMBOL_GPL(vfs_truncate);
-long do_sys_truncate(const char __user *pathname, loff_t length)
+int do_sys_truncate(const char __user *pathname, loff_t length)
{
unsigned int lookup_flags = LOOKUP_FOLLOW;
struct path path;
@@ -157,7 +157,7 @@ COMPAT_SYSCALL_DEFINE2(truncate, const char __user *, path, compat_off_t, length
}
#endif
-long do_ftruncate(struct file *file, loff_t length, int small)
+int do_ftruncate(struct file *file, loff_t length, int small)
{
struct inode *inode;
struct dentry *dentry;
@@ -196,7 +196,7 @@ long do_ftruncate(struct file *file, loff_t length, int small)
return error;
}
-long do_sys_ftruncate(unsigned int fd, loff_t length, int small)
+int do_sys_ftruncate(unsigned int fd, loff_t length, int small)
{
if (length < 0)
return -EINVAL;
@@ -251,7 +251,7 @@ COMPAT_SYSCALL_DEFINE3(ftruncate64, unsigned int, fd,
int vfs_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
{
struct inode *inode = file_inode(file);
- long ret;
+ int ret;
loff_t sum;
if (offset < 0 || len <= 0)
@@ -460,7 +460,7 @@ static const struct cred *access_override_creds(void)
return override_creds(override_cred);
}
-static long do_faccessat(int dfd, const char __user *filename, int mode, int flags)
+static int do_faccessat(int dfd, const char __user *filename, int mode, int flags)
{
struct path path;
struct inode *inode;
@@ -1409,22 +1409,23 @@ struct file *file_open_root(const struct path *root,
}
EXPORT_SYMBOL(file_open_root);
-static long do_sys_openat2(int dfd, const char __user *filename,
- struct open_how *how)
+static int do_sys_openat2(int dfd, const char __user *filename,
+ struct open_how *how)
{
struct open_flags op;
- int fd = build_open_flags(how, &op);
struct filename *tmp;
+ int err, fd;
- if (fd)
- return fd;
+ err = build_open_flags(how, &op);
+ if (unlikely(err))
+ return err;
tmp = getname(filename);
if (IS_ERR(tmp))
return PTR_ERR(tmp);
fd = get_unused_fd_flags(how->flags);
- if (fd >= 0) {
+ if (likely(fd >= 0)) {
struct file *f = do_filp_open(dfd, tmp, &op);
if (IS_ERR(f)) {
put_unused_fd(fd);
@@ -1437,7 +1438,7 @@ static long do_sys_openat2(int dfd, const char __user *filename,
return fd;
}
-long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
+int do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
{
struct open_how how = build_open_how(flags, mode);
return do_sys_openat2(dfd, filename, &how);
@@ -1551,7 +1552,7 @@ int filp_close(struct file *filp, fl_owner_t id)
int retval;
retval = filp_flush(filp, id);
- fput(filp);
+ fput_close(filp);
return retval;
}
@@ -1577,13 +1578,16 @@ SYSCALL_DEFINE1(close, unsigned int, fd)
* We're returning to user space. Don't bother
* with any delayed fput() cases.
*/
- __fput_sync(file);
+ fput_close_sync(file);
+
+ if (likely(retval == 0))
+ return 0;
/* can't restart close syscall because file table entry was cleared */
- if (unlikely(retval == -ERESTARTSYS ||
- retval == -ERESTARTNOINTR ||
- retval == -ERESTARTNOHAND ||
- retval == -ERESTART_RESTARTBLOCK))
+ if (retval == -ERESTARTSYS ||
+ retval == -ERESTARTNOINTR ||
+ retval == -ERESTARTNOHAND ||
+ retval == -ERESTART_RESTARTBLOCK)
retval = -EINTR;
return retval;
diff --git a/fs/orangefs/file.c b/fs/orangefs/file.c
index d68372241b30..90c49c0de243 100644
--- a/fs/orangefs/file.c
+++ b/fs/orangefs/file.c
@@ -57,8 +57,8 @@ ssize_t wait_for_direct_io(enum ORANGEFS_io_type type, struct inode *inode,
int buffer_index;
ssize_t ret;
size_t copy_amount;
- int open_for_read;
- int open_for_write;
+ bool open_for_read;
+ bool open_for_write;
new_op = op_alloc(ORANGEFS_VFS_OP_FILE_IO);
if (!new_op)
diff --git a/fs/orangefs/inode.c b/fs/orangefs/inode.c
index aae6d2b8767d..5ac743c6bc2e 100644
--- a/fs/orangefs/inode.c
+++ b/fs/orangefs/inode.c
@@ -16,22 +16,22 @@
#include "orangefs-kernel.h"
#include "orangefs-bufmap.h"
-static int orangefs_writepage_locked(struct page *page,
- struct writeback_control *wbc)
+static int orangefs_writepage_locked(struct folio *folio,
+ struct writeback_control *wbc)
{
- struct inode *inode = page->mapping->host;
+ struct inode *inode = folio->mapping->host;
struct orangefs_write_range *wr = NULL;
struct iov_iter iter;
struct bio_vec bv;
- size_t len, wlen;
+ size_t wlen;
ssize_t ret;
- loff_t off;
+ loff_t len, off;
- set_page_writeback(page);
+ folio_start_writeback(folio);
len = i_size_read(inode);
- if (PagePrivate(page)) {
- wr = (struct orangefs_write_range *)page_private(page);
+ if (folio->private) {
+ wr = folio->private;
WARN_ON(wr->pos >= len);
off = wr->pos;
if (off + wr->len > len)
@@ -40,36 +40,27 @@ static int orangefs_writepage_locked(struct page *page,
wlen = wr->len;
} else {
WARN_ON(1);
- off = page_offset(page);
- if (off + PAGE_SIZE > len)
+ off = folio_pos(folio);
+ wlen = folio_size(folio);
+
+ if (wlen > len - off)
wlen = len - off;
- else
- wlen = PAGE_SIZE;
}
/* Should've been handled in orangefs_invalidate_folio. */
WARN_ON(off == len || off + wlen > len);
WARN_ON(wlen == 0);
- bvec_set_page(&bv, page, wlen, off % PAGE_SIZE);
+ bvec_set_folio(&bv, folio, wlen, offset_in_folio(folio, off));
iov_iter_bvec(&iter, ITER_SOURCE, &bv, 1, wlen);
ret = wait_for_direct_io(ORANGEFS_IO_WRITE, inode, &off, &iter, wlen,
len, wr, NULL, NULL);
if (ret < 0) {
- mapping_set_error(page->mapping, ret);
+ mapping_set_error(folio->mapping, ret);
} else {
ret = 0;
}
- kfree(detach_page_private(page));
- return ret;
-}
-
-static int orangefs_writepage(struct page *page, struct writeback_control *wbc)
-{
- int ret;
- ret = orangefs_writepage_locked(page, wbc);
- unlock_page(page);
- end_page_writeback(page);
+ kfree(folio_detach_private(folio));
return ret;
}
@@ -79,33 +70,33 @@ struct orangefs_writepages {
kuid_t uid;
kgid_t gid;
int maxpages;
- int npages;
- struct page **pages;
+ int nfolios;
+ struct address_space *mapping;
+ struct folio **folios;
struct bio_vec *bv;
};
static int orangefs_writepages_work(struct orangefs_writepages *ow,
- struct writeback_control *wbc)
+ struct writeback_control *wbc)
{
- struct inode *inode = ow->pages[0]->mapping->host;
+ struct inode *inode = ow->mapping->host;
struct orangefs_write_range *wrp, wr;
struct iov_iter iter;
ssize_t ret;
- size_t len;
- loff_t off;
+ size_t start;
+ loff_t len, off;
int i;
len = i_size_read(inode);
- for (i = 0; i < ow->npages; i++) {
- set_page_writeback(ow->pages[i]);
- bvec_set_page(&ow->bv[i], ow->pages[i],
- min(page_offset(ow->pages[i]) + PAGE_SIZE,
- ow->off + ow->len) -
- max(ow->off, page_offset(ow->pages[i])),
- i == 0 ? ow->off - page_offset(ow->pages[i]) : 0);
+ start = offset_in_folio(ow->folios[0], ow->off);
+ for (i = 0; i < ow->nfolios; i++) {
+ folio_start_writeback(ow->folios[i]);
+ bvec_set_folio(&ow->bv[i], ow->folios[i],
+ folio_size(ow->folios[i]) - start, start);
+ start = 0;
}
- iov_iter_bvec(&iter, ITER_SOURCE, ow->bv, ow->npages, ow->len);
+ iov_iter_bvec(&iter, ITER_SOURCE, ow->bv, ow->nfolios, ow->len);
WARN_ON(ow->off >= len);
if (ow->off + ow->len > len)
@@ -116,40 +107,24 @@ static int orangefs_writepages_work(struct orangefs_writepages *ow,
wr.gid = ow->gid;
ret = wait_for_direct_io(ORANGEFS_IO_WRITE, inode, &off, &iter, ow->len,
0, &wr, NULL, NULL);
- if (ret < 0) {
- for (i = 0; i < ow->npages; i++) {
- mapping_set_error(ow->pages[i]->mapping, ret);
- if (PagePrivate(ow->pages[i])) {
- wrp = (struct orangefs_write_range *)
- page_private(ow->pages[i]);
- ClearPagePrivate(ow->pages[i]);
- put_page(ow->pages[i]);
- kfree(wrp);
- }
- end_page_writeback(ow->pages[i]);
- unlock_page(ow->pages[i]);
- }
- } else {
+ if (ret < 0)
+ mapping_set_error(ow->mapping, ret);
+ else
ret = 0;
- for (i = 0; i < ow->npages; i++) {
- if (PagePrivate(ow->pages[i])) {
- wrp = (struct orangefs_write_range *)
- page_private(ow->pages[i]);
- ClearPagePrivate(ow->pages[i]);
- put_page(ow->pages[i]);
- kfree(wrp);
- }
- end_page_writeback(ow->pages[i]);
- unlock_page(ow->pages[i]);
- }
+
+ for (i = 0; i < ow->nfolios; i++) {
+ wrp = folio_detach_private(ow->folios[i]);
+ kfree(wrp);
+ folio_end_writeback(ow->folios[i]);
+ folio_unlock(ow->folios[i]);
}
+
return ret;
}
static int orangefs_writepages_callback(struct folio *folio,
- struct writeback_control *wbc, void *data)
+ struct writeback_control *wbc, struct orangefs_writepages *ow)
{
- struct orangefs_writepages *ow = data;
struct orangefs_write_range *wr = folio->private;
int ret;
@@ -162,41 +137,41 @@ static int orangefs_writepages_callback(struct folio *folio,
}
ret = -1;
- if (ow->npages == 0) {
+ if (ow->nfolios == 0) {
ow->off = wr->pos;
ow->len = wr->len;
ow->uid = wr->uid;
ow->gid = wr->gid;
- ow->pages[ow->npages++] = &folio->page;
+ ow->folios[ow->nfolios++] = folio;
ret = 0;
goto done;
}
if (!uid_eq(ow->uid, wr->uid) || !gid_eq(ow->gid, wr->gid)) {
orangefs_writepages_work(ow, wbc);
- ow->npages = 0;
+ ow->nfolios = 0;
ret = -1;
goto done;
}
if (ow->off + ow->len == wr->pos) {
ow->len += wr->len;
- ow->pages[ow->npages++] = &folio->page;
+ ow->folios[ow->nfolios++] = folio;
ret = 0;
goto done;
}
done:
if (ret == -1) {
- if (ow->npages) {
+ if (ow->nfolios) {
orangefs_writepages_work(ow, wbc);
- ow->npages = 0;
+ ow->nfolios = 0;
}
- ret = orangefs_writepage_locked(&folio->page, wbc);
+ ret = orangefs_writepage_locked(folio, wbc);
mapping_set_error(folio->mapping, ret);
folio_unlock(folio);
folio_end_writeback(folio);
} else {
- if (ow->npages == ow->maxpages) {
+ if (ow->nfolios == ow->maxpages) {
orangefs_writepages_work(ow, wbc);
- ow->npages = 0;
+ ow->nfolios = 0;
}
}
return ret;
@@ -207,31 +182,35 @@ static int orangefs_writepages(struct address_space *mapping,
{
struct orangefs_writepages *ow;
struct blk_plug plug;
- int ret;
+ int error;
+ struct folio *folio = NULL;
+
ow = kzalloc(sizeof(struct orangefs_writepages), GFP_KERNEL);
if (!ow)
return -ENOMEM;
ow->maxpages = orangefs_bufmap_size_query()/PAGE_SIZE;
- ow->pages = kcalloc(ow->maxpages, sizeof(struct page *), GFP_KERNEL);
- if (!ow->pages) {
+ ow->folios = kcalloc(ow->maxpages, sizeof(struct folio *), GFP_KERNEL);
+ if (!ow->folios) {
kfree(ow);
return -ENOMEM;
}
ow->bv = kcalloc(ow->maxpages, sizeof(struct bio_vec), GFP_KERNEL);
if (!ow->bv) {
- kfree(ow->pages);
+ kfree(ow->folios);
kfree(ow);
return -ENOMEM;
}
+ ow->mapping = mapping;
blk_start_plug(&plug);
- ret = write_cache_pages(mapping, wbc, orangefs_writepages_callback, ow);
- if (ow->npages)
- ret = orangefs_writepages_work(ow, wbc);
+ while ((folio = writeback_iter(mapping, wbc, folio, &error)))
+ error = orangefs_writepages_callback(folio, wbc, ow);
+ if (ow->nfolios)
+ error = orangefs_writepages_work(ow, wbc);
blk_finish_plug(&plug);
- kfree(ow->pages);
+ kfree(ow->folios);
kfree(ow->bv);
kfree(ow);
- return ret;
+ return error;
}
static int orangefs_launder_folio(struct folio *);
@@ -484,7 +463,7 @@ static int orangefs_launder_folio(struct folio *folio)
};
folio_wait_writeback(folio);
if (folio_clear_dirty_for_io(folio)) {
- r = orangefs_writepage_locked(&folio->page, &wbc);
+ r = orangefs_writepage_locked(folio, &wbc);
folio_end_writeback(folio);
}
return r;
@@ -606,7 +585,6 @@ out:
/** ORANGEFS2 implementation of address space operations */
static const struct address_space_operations orangefs_address_operations = {
- .writepage = orangefs_writepage,
.readahead = orangefs_readahead,
.read_folio = orangefs_read_folio,
.writepages = orangefs_writepages,
@@ -616,6 +594,7 @@ static const struct address_space_operations orangefs_address_operations = {
.invalidate_folio = orangefs_invalidate_folio,
.release_folio = orangefs_release_folio,
.free_folio = orangefs_free_folio,
+ .migrate_folio = filemap_migrate_folio,
.launder_folio = orangefs_launder_folio,
.direct_IO = orangefs_direct_IO,
};
diff --git a/fs/orangefs/namei.c b/fs/orangefs/namei.c
index 200558ec72f0..82395fe2b956 100644
--- a/fs/orangefs/namei.c
+++ b/fs/orangefs/namei.c
@@ -300,8 +300,8 @@ out:
return ret;
}
-static int orangefs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
- struct dentry *dentry, umode_t mode)
+static struct dentry *orangefs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
struct orangefs_inode_s *parent = ORANGEFS_I(dir);
struct orangefs_kernel_op_s *new_op;
@@ -312,7 +312,7 @@ static int orangefs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
new_op = op_alloc(ORANGEFS_VFS_OP_MKDIR);
if (!new_op)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
new_op->upcall.req.mkdir.parent_refn = parent->refn;
@@ -366,7 +366,7 @@ static int orangefs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
__orangefs_setattr(dir, &iattr);
out:
op_release(new_op);
- return ret;
+ return ERR_PTR(ret);
}
static int orangefs_rename(struct mnt_idmap *idmap,
diff --git a/fs/orangefs/orangefs-debug.h b/fs/orangefs/orangefs-debug.h
index 6e079d4230d0..d4463534cec6 100644
--- a/fs/orangefs/orangefs-debug.h
+++ b/fs/orangefs/orangefs-debug.h
@@ -43,47 +43,4 @@
#define GOSSIP_MAX_NR 16
#define GOSSIP_MAX_DEBUG (((__u64)1 << GOSSIP_MAX_NR) - 1)
-/* a private internal type */
-struct __keyword_mask_s {
- const char *keyword;
- __u64 mask_val;
-};
-
-/*
- * Map all kmod keywords to kmod debug masks here. Keep this
- * structure "packed":
- *
- * "all" is always last...
- *
- * keyword mask_val index
- * foo 1 0
- * bar 2 1
- * baz 4 2
- * qux 8 3
- * . . .
- */
-static struct __keyword_mask_s s_kmod_keyword_mask_map[] = {
- {"super", GOSSIP_SUPER_DEBUG},
- {"inode", GOSSIP_INODE_DEBUG},
- {"file", GOSSIP_FILE_DEBUG},
- {"dir", GOSSIP_DIR_DEBUG},
- {"utils", GOSSIP_UTILS_DEBUG},
- {"wait", GOSSIP_WAIT_DEBUG},
- {"acl", GOSSIP_ACL_DEBUG},
- {"dcache", GOSSIP_DCACHE_DEBUG},
- {"dev", GOSSIP_DEV_DEBUG},
- {"name", GOSSIP_NAME_DEBUG},
- {"bufmap", GOSSIP_BUFMAP_DEBUG},
- {"cache", GOSSIP_CACHE_DEBUG},
- {"debugfs", GOSSIP_DEBUGFS_DEBUG},
- {"xattr", GOSSIP_XATTR_DEBUG},
- {"init", GOSSIP_INIT_DEBUG},
- {"sysfs", GOSSIP_SYSFS_DEBUG},
- {"none", GOSSIP_NO_DEBUG},
- {"all", GOSSIP_MAX_DEBUG}
-};
-
-static const int num_kmod_keyword_mask_map = (int)
- (ARRAY_SIZE(s_kmod_keyword_mask_map));
-
#endif /* __ORANGEFS_DEBUG_H */
diff --git a/fs/orangefs/orangefs-debugfs.c b/fs/orangefs/orangefs-debugfs.c
index f52073022fae..f7095c91660c 100644
--- a/fs/orangefs/orangefs-debugfs.c
+++ b/fs/orangefs/orangefs-debugfs.c
@@ -44,6 +44,49 @@
#include "protocol.h"
#include "orangefs-kernel.h"
+/* a private internal type */
+struct __keyword_mask_s {
+ const char *keyword;
+ __u64 mask_val;
+};
+
+/*
+ * Map all kmod keywords to kmod debug masks here. Keep this
+ * structure "packed":
+ *
+ * "all" is always last...
+ *
+ * keyword mask_val index
+ * foo 1 0
+ * bar 2 1
+ * baz 4 2
+ * qux 8 3
+ * . . .
+ */
+static struct __keyword_mask_s s_kmod_keyword_mask_map[] = {
+ {"super", GOSSIP_SUPER_DEBUG},
+ {"inode", GOSSIP_INODE_DEBUG},
+ {"file", GOSSIP_FILE_DEBUG},
+ {"dir", GOSSIP_DIR_DEBUG},
+ {"utils", GOSSIP_UTILS_DEBUG},
+ {"wait", GOSSIP_WAIT_DEBUG},
+ {"acl", GOSSIP_ACL_DEBUG},
+ {"dcache", GOSSIP_DCACHE_DEBUG},
+ {"dev", GOSSIP_DEV_DEBUG},
+ {"name", GOSSIP_NAME_DEBUG},
+ {"bufmap", GOSSIP_BUFMAP_DEBUG},
+ {"cache", GOSSIP_CACHE_DEBUG},
+ {"debugfs", GOSSIP_DEBUGFS_DEBUG},
+ {"xattr", GOSSIP_XATTR_DEBUG},
+ {"init", GOSSIP_INIT_DEBUG},
+ {"sysfs", GOSSIP_SYSFS_DEBUG},
+ {"none", GOSSIP_NO_DEBUG},
+ {"all", GOSSIP_MAX_DEBUG}
+};
+
+static const int num_kmod_keyword_mask_map = (int)
+ (ARRAY_SIZE(s_kmod_keyword_mask_map));
+
#define DEBUG_HELP_STRING_SIZE 4096
#define HELP_STRING_UNINITIALIZED \
"Client Debug Keywords are unknown until the first time\n" \
diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
index c9993ff66fc2..fe493f3ed6b6 100644
--- a/fs/overlayfs/dir.c
+++ b/fs/overlayfs/dir.c
@@ -138,37 +138,6 @@ kill_whiteout:
goto out;
}
-int ovl_mkdir_real(struct ovl_fs *ofs, struct inode *dir,
- struct dentry **newdentry, umode_t mode)
-{
- int err;
- struct dentry *d, *dentry = *newdentry;
-
- err = ovl_do_mkdir(ofs, dir, dentry, mode);
- if (err)
- return err;
-
- if (likely(!d_unhashed(dentry)))
- return 0;
-
- /*
- * vfs_mkdir() may succeed and leave the dentry passed
- * to it unhashed and negative. If that happens, try to
- * lookup a new hashed and positive dentry.
- */
- d = ovl_lookup_upper(ofs, dentry->d_name.name, dentry->d_parent,
- dentry->d_name.len);
- if (IS_ERR(d)) {
- pr_warn("failed lookup after mkdir (%pd2, err=%i).\n",
- dentry, err);
- return PTR_ERR(d);
- }
- dput(dentry);
- *newdentry = d;
-
- return 0;
-}
-
struct dentry *ovl_create_real(struct ovl_fs *ofs, struct inode *dir,
struct dentry *newdentry, struct ovl_cattr *attr)
{
@@ -191,7 +160,8 @@ struct dentry *ovl_create_real(struct ovl_fs *ofs, struct inode *dir,
case S_IFDIR:
/* mkdir is special... */
- err = ovl_mkdir_real(ofs, dir, &newdentry, attr->mode);
+ newdentry = ovl_do_mkdir(ofs, dir, newdentry, attr->mode);
+ err = PTR_ERR_OR_ZERO(newdentry);
break;
case S_IFCHR:
@@ -219,7 +189,8 @@ struct dentry *ovl_create_real(struct ovl_fs *ofs, struct inode *dir,
}
out:
if (err) {
- dput(newdentry);
+ if (!IS_ERR(newdentry))
+ dput(newdentry);
return ERR_PTR(err);
}
return newdentry;
@@ -282,7 +253,8 @@ static int ovl_instantiate(struct dentry *dentry, struct inode *inode,
* XXX: if we ever use ovl_obtain_alias() to decode directory
* file handles, need to use ovl_get_inode_locked() and
* d_instantiate_new() here to prevent from creating two
- * hashed directory inode aliases.
+ * hashed directory inode aliases. We then need to return
+ * the obtained alias to ovl_mkdir().
*/
inode = ovl_get_inode(dentry->d_sb, &oip);
if (IS_ERR(inode))
@@ -687,10 +659,10 @@ static int ovl_create(struct mnt_idmap *idmap, struct inode *dir,
return ovl_create_object(dentry, (mode & 07777) | S_IFREG, 0, NULL);
}
-static int ovl_mkdir(struct mnt_idmap *idmap, struct inode *dir,
- struct dentry *dentry, umode_t mode)
+static struct dentry *ovl_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
- return ovl_create_object(dentry, (mode & 07777) | S_IFDIR, 0, NULL);
+ return ERR_PTR(ovl_create_object(dentry, (mode & 07777) | S_IFDIR, 0, NULL));
}
static int ovl_mknod(struct mnt_idmap *idmap, struct inode *dir,
diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
index 0021e2025020..6f2f8f4cfbbc 100644
--- a/fs/overlayfs/overlayfs.h
+++ b/fs/overlayfs/overlayfs.h
@@ -241,13 +241,14 @@ static inline int ovl_do_create(struct ovl_fs *ofs,
return err;
}
-static inline int ovl_do_mkdir(struct ovl_fs *ofs,
- struct inode *dir, struct dentry *dentry,
- umode_t mode)
+static inline struct dentry *ovl_do_mkdir(struct ovl_fs *ofs,
+ struct inode *dir,
+ struct dentry *dentry,
+ umode_t mode)
{
- int err = vfs_mkdir(ovl_upper_mnt_idmap(ofs), dir, dentry, mode);
- pr_debug("mkdir(%pd2, 0%o) = %i\n", dentry, mode, err);
- return err;
+ dentry = vfs_mkdir(ovl_upper_mnt_idmap(ofs), dir, dentry, mode);
+ pr_debug("mkdir(%pd2, 0%o) = %i\n", dentry, mode, PTR_ERR_OR_ZERO(dentry));
+ return dentry;
}
static inline int ovl_do_mknod(struct ovl_fs *ofs,
@@ -838,8 +839,6 @@ struct ovl_cattr {
#define OVL_CATTR(m) (&(struct ovl_cattr) { .mode = (m) })
-int ovl_mkdir_real(struct ovl_fs *ofs, struct inode *dir,
- struct dentry **newdentry, umode_t mode);
struct dentry *ovl_create_real(struct ovl_fs *ofs,
struct inode *dir, struct dentry *newdentry,
struct ovl_cattr *attr);
diff --git a/fs/overlayfs/params.c b/fs/overlayfs/params.c
index 1115c22deca0..6759f7d040c8 100644
--- a/fs/overlayfs/params.c
+++ b/fs/overlayfs/params.c
@@ -59,6 +59,7 @@ enum ovl_opt {
Opt_metacopy,
Opt_verity,
Opt_volatile,
+ Opt_override_creds,
};
static const struct constant_table ovl_parameter_bool[] = {
@@ -155,6 +156,7 @@ const struct fs_parameter_spec ovl_parameter_spec[] = {
fsparam_enum("metacopy", Opt_metacopy, ovl_parameter_bool),
fsparam_enum("verity", Opt_verity, ovl_parameter_verity),
fsparam_flag("volatile", Opt_volatile),
+ fsparam_flag_no("override_creds", Opt_override_creds),
{}
};
@@ -662,6 +664,29 @@ static int ovl_parse_param(struct fs_context *fc, struct fs_parameter *param)
case Opt_userxattr:
config->userxattr = true;
break;
+ case Opt_override_creds: {
+ const struct cred *cred = NULL;
+
+ if (result.negated) {
+ swap(cred, ofs->creator_cred);
+ put_cred(cred);
+ break;
+ }
+
+ if (!current_in_userns(fc->user_ns)) {
+ err = -EINVAL;
+ break;
+ }
+
+ cred = prepare_creds();
+ if (cred)
+ swap(cred, ofs->creator_cred);
+ else
+ err = -ENOMEM;
+
+ put_cred(cred);
+ break;
+ }
default:
pr_err("unrecognized mount option \"%s\" or missing value\n",
param->key);
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index 86ae6f6da36b..b63474d1b064 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -327,9 +327,10 @@ retry:
goto retry;
}
- err = ovl_mkdir_real(ofs, dir, &work, attr.ia_mode);
- if (err)
- goto out_dput;
+ work = ovl_do_mkdir(ofs, dir, work, attr.ia_mode);
+ err = PTR_ERR(work);
+ if (IS_ERR(work))
+ goto out_err;
/* Weird filesystem returning with hashed negative (kernfs)? */
err = -EINVAL;
@@ -1305,6 +1306,7 @@ int ovl_fill_super(struct super_block *sb, struct fs_context *fc)
{
struct ovl_fs *ofs = sb->s_fs_info;
struct ovl_fs_context *ctx = fc->fs_private;
+ const struct cred *old_cred = NULL;
struct dentry *root_dentry;
struct ovl_entry *oe;
struct ovl_layer *layers;
@@ -1318,10 +1320,15 @@ int ovl_fill_super(struct super_block *sb, struct fs_context *fc)
sb->s_d_op = &ovl_dentry_operations;
err = -ENOMEM;
- ofs->creator_cred = cred = prepare_creds();
+ if (!ofs->creator_cred)
+ ofs->creator_cred = cred = prepare_creds();
+ else
+ cred = (struct cred *)ofs->creator_cred;
if (!cred)
goto out_err;
+ old_cred = ovl_override_creds(sb);
+
err = ovl_fs_params_verify(ctx, &ofs->config);
if (err)
goto out_err;
@@ -1481,11 +1488,19 @@ int ovl_fill_super(struct super_block *sb, struct fs_context *fc)
sb->s_root = root_dentry;
+ ovl_revert_creds(old_cred);
return 0;
out_free_oe:
ovl_free_entry(oe);
out_err:
+ /*
+ * Revert creds before calling ovl_free_fs() which will call
+ * put_cred() and put_cred() requires that the cred's that are
+ * put are not the caller's creds, i.e., current->cred.
+ */
+ if (old_cred)
+ ovl_revert_creds(old_cred);
ovl_free_fs(ofs);
sb->s_fs_info = NULL;
return err;
diff --git a/fs/pidfs.c b/fs/pidfs.c
index c0478b3c55d9..d64a4cbeb0da 100644
--- a/fs/pidfs.c
+++ b/fs/pidfs.c
@@ -24,6 +24,28 @@
#include "internal.h"
#include "mount.h"
+static struct kmem_cache *pidfs_cachep __ro_after_init;
+
+/*
+ * Stashes information that userspace needs to access even after the
+ * process has been reaped.
+ */
+struct pidfs_exit_info {
+ __u64 cgroupid;
+ __s32 exit_code;
+};
+
+struct pidfs_inode {
+ struct pidfs_exit_info __pei;
+ struct pidfs_exit_info *exit_info;
+ struct inode vfs_inode;
+};
+
+static inline struct pidfs_inode *pidfs_i(struct inode *inode)
+{
+ return container_of(inode, struct pidfs_inode, vfs_inode);
+}
+
static struct rb_root pidfs_ino_tree = RB_ROOT;
#if BITS_PER_LONG == 32
@@ -188,36 +210,48 @@ static void pidfd_show_fdinfo(struct seq_file *m, struct file *f)
static __poll_t pidfd_poll(struct file *file, struct poll_table_struct *pts)
{
struct pid *pid = pidfd_pid(file);
- bool thread = file->f_flags & PIDFD_THREAD;
struct task_struct *task;
__poll_t poll_flags = 0;
poll_wait(file, &pid->wait_pidfd, pts);
/*
- * Depending on PIDFD_THREAD, inform pollers when the thread
- * or the whole thread-group exits.
+ * Don't wake waiters if the thread-group leader exited
+ * prematurely. They either get notified when the last subthread
+ * exits or not at all if one of the remaining subthreads execs
+ * and assumes the struct pid of the old thread-group leader.
*/
guard(rcu)();
task = pid_task(pid, PIDTYPE_PID);
if (!task)
poll_flags = EPOLLIN | EPOLLRDNORM | EPOLLHUP;
- else if (task->exit_state && (thread || thread_group_empty(task)))
+ else if (task->exit_state && !delay_group_leader(task))
poll_flags = EPOLLIN | EPOLLRDNORM;
return poll_flags;
}
-static long pidfd_info(struct task_struct *task, unsigned int cmd, unsigned long arg)
+static inline bool pid_in_current_pidns(const struct pid *pid)
+{
+ const struct pid_namespace *ns = task_active_pid_ns(current);
+
+ if (ns->level <= pid->level)
+ return pid->numbers[ns->level].ns == ns;
+
+ return false;
+}
+
+static long pidfd_info(struct file *file, unsigned int cmd, unsigned long arg)
{
struct pidfd_info __user *uinfo = (struct pidfd_info __user *)arg;
+ struct inode *inode = file_inode(file);
+ struct pid *pid = pidfd_pid(file);
size_t usize = _IOC_SIZE(cmd);
struct pidfd_info kinfo = {};
+ struct pidfs_exit_info *exit_info;
struct user_namespace *user_ns;
+ struct task_struct *task;
const struct cred *c;
__u64 mask;
-#ifdef CONFIG_CGROUPS
- struct cgroup *cgrp;
-#endif
if (!uinfo)
return -EINVAL;
@@ -227,6 +261,37 @@ static long pidfd_info(struct task_struct *task, unsigned int cmd, unsigned long
if (copy_from_user(&mask, &uinfo->mask, sizeof(mask)))
return -EFAULT;
+ /*
+ * Restrict information retrieval to tasks within the caller's pid
+ * namespace hierarchy.
+ */
+ if (!pid_in_current_pidns(pid))
+ return -ESRCH;
+
+ if (mask & PIDFD_INFO_EXIT) {
+ exit_info = READ_ONCE(pidfs_i(inode)->exit_info);
+ if (exit_info) {
+ kinfo.mask |= PIDFD_INFO_EXIT;
+#ifdef CONFIG_CGROUPS
+ kinfo.cgroupid = exit_info->cgroupid;
+ kinfo.mask |= PIDFD_INFO_CGROUPID;
+#endif
+ kinfo.exit_code = exit_info->exit_code;
+ }
+ }
+
+ task = get_pid_task(pid, PIDTYPE_PID);
+ if (!task) {
+ /*
+ * If the task has already been reaped, only exit
+ * information is available
+ */
+ if (!(mask & PIDFD_INFO_EXIT))
+ return -ESRCH;
+
+ goto copy_out;
+ }
+
c = get_task_cred(task);
if (!c)
return -ESRCH;
@@ -246,11 +311,15 @@ static long pidfd_info(struct task_struct *task, unsigned int cmd, unsigned long
put_cred(c);
#ifdef CONFIG_CGROUPS
- rcu_read_lock();
- cgrp = task_dfl_cgroup(task);
- kinfo.cgroupid = cgroup_id(cgrp);
- kinfo.mask |= PIDFD_INFO_CGROUPID;
- rcu_read_unlock();
+ if (!kinfo.cgroupid) {
+ struct cgroup *cgrp;
+
+ rcu_read_lock();
+ cgrp = task_dfl_cgroup(task);
+ kinfo.cgroupid = cgroup_id(cgrp);
+ kinfo.mask |= PIDFD_INFO_CGROUPID;
+ rcu_read_unlock();
+ }
#endif
/*
@@ -270,16 +339,14 @@ static long pidfd_info(struct task_struct *task, unsigned int cmd, unsigned long
if (kinfo.pid == 0 || kinfo.tgid == 0 || (kinfo.ppid == 0 && kinfo.pid != 1))
return -ESRCH;
+copy_out:
/*
* If userspace and the kernel have the same struct size it can just
* be copied. If userspace provides an older struct, only the bits that
* userspace knows about will be copied. If userspace provides a new
* struct, only the bits that the kernel knows about will be copied.
*/
- if (copy_to_user(uinfo, &kinfo, min(usize, sizeof(kinfo))))
- return -EFAULT;
-
- return 0;
+ return copy_struct_to_user(uinfo, usize, &kinfo, sizeof(kinfo), NULL);
}
static bool pidfs_ioctl_valid(unsigned int cmd)
@@ -317,7 +384,6 @@ static long pidfd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct task_struct *task __free(put_task) = NULL;
struct nsproxy *nsp __free(put_nsproxy) = NULL;
- struct pid *pid = pidfd_pid(file);
struct ns_common *ns_common = NULL;
struct pid_namespace *pid_ns;
@@ -332,13 +398,13 @@ static long pidfd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return put_user(file_inode(file)->i_generation, argp);
}
- task = get_pid_task(pid, PIDTYPE_PID);
- if (!task)
- return -ESRCH;
-
/* Extensible IOCTL that does not open namespace FDs, take a shortcut */
if (_IOC_NR(cmd) == _IOC_NR(PIDFD_GET_INFO))
- return pidfd_info(task, cmd, arg);
+ return pidfd_info(file, cmd, arg);
+
+ task = get_pid_task(pidfd_pid(file), PIDTYPE_PID);
+ if (!task)
+ return -ESRCH;
if (arg)
return -EINVAL;
@@ -450,6 +516,49 @@ struct pid *pidfd_pid(const struct file *file)
return file_inode(file)->i_private;
}
+/*
+ * We're called from release_task(). We know there's at least one
+ * reference to struct pid being held that won't be released until the
+ * task has been reaped which cannot happen until we're out of
+ * release_task().
+ *
+ * If this struct pid is referred to by a pidfd then
+ * stashed_dentry_get() will return the dentry and inode for that struct
+ * pid. Since we've taken a reference on it there's now an additional
+ * reference from the exit path on it. Which is fine. We're going to put
+ * it again in a second and we know that the pid is kept alive anyway.
+ *
+ * Worst case is that we've filled in the info and immediately free the
+ * dentry and inode afterwards since the pidfd has been closed. Since
+ * pidfs_exit() currently is placed after exit_task_work() we know that
+ * it cannot be us aka the exiting task holding a pidfd to ourselves.
+ */
+void pidfs_exit(struct task_struct *tsk)
+{
+ struct dentry *dentry;
+
+ might_sleep();
+
+ dentry = stashed_dentry_get(&task_pid(tsk)->stashed);
+ if (dentry) {
+ struct inode *inode = d_inode(dentry);
+ struct pidfs_exit_info *exit_info = &pidfs_i(inode)->__pei;
+#ifdef CONFIG_CGROUPS
+ struct cgroup *cgrp;
+
+ rcu_read_lock();
+ cgrp = task_dfl_cgroup(tsk);
+ exit_info->cgroupid = cgroup_id(cgrp);
+ rcu_read_unlock();
+#endif
+ exit_info->exit_code = tsk->exit_code;
+
+ /* Ensure that PIDFD_GET_INFO sees either all or nothing. */
+ smp_store_release(&pidfs_i(inode)->exit_info, &pidfs_i(inode)->__pei);
+ dput(dentry);
+ }
+}
+
static struct vfsmount *pidfs_mnt __ro_after_init;
/*
@@ -505,9 +614,30 @@ static void pidfs_evict_inode(struct inode *inode)
put_pid(pid);
}
+static struct inode *pidfs_alloc_inode(struct super_block *sb)
+{
+ struct pidfs_inode *pi;
+
+ pi = alloc_inode_sb(sb, pidfs_cachep, GFP_KERNEL);
+ if (!pi)
+ return NULL;
+
+ memset(&pi->__pei, 0, sizeof(pi->__pei));
+ pi->exit_info = NULL;
+
+ return &pi->vfs_inode;
+}
+
+static void pidfs_free_inode(struct inode *inode)
+{
+ kmem_cache_free(pidfs_cachep, pidfs_i(inode));
+}
+
static const struct super_operations pidfs_sops = {
+ .alloc_inode = pidfs_alloc_inode,
.drop_inode = generic_delete_inode,
.evict_inode = pidfs_evict_inode,
+ .free_inode = pidfs_free_inode,
.statfs = simple_statfs,
};
@@ -633,8 +763,49 @@ static int pidfs_export_permission(struct handle_to_path_ctx *ctx,
return 0;
}
+static inline bool pidfs_pid_valid(struct pid *pid, const struct path *path,
+ unsigned int flags)
+{
+ enum pid_type type;
+
+ if (flags & PIDFD_CLONE)
+ return true;
+
+ /*
+ * Make sure that if a pidfd is created PIDFD_INFO_EXIT
+ * information will be available. So after an inode for the
+ * pidfd has been allocated perform another check that the pid
+ * is still alive. If it is exit information is available even
+ * if the task gets reaped before the pidfd is returned to
+ * userspace. The only exception is PIDFD_CLONE where no task
+ * linkage has been established for @pid yet and the kernel is
+ * in the middle of process creation so there's nothing for
+ * pidfs to miss.
+ */
+ if (flags & PIDFD_THREAD)
+ type = PIDTYPE_PID;
+ else
+ type = PIDTYPE_TGID;
+
+ /*
+ * Since pidfs_exit() is called before struct pid's task linkage
+ * is removed the case where the task got reaped but a dentry
+ * was already attached to struct pid and exit information was
+ * recorded and published can be handled correctly.
+ */
+ if (unlikely(!pid_has_task(pid, type))) {
+ struct inode *inode = d_inode(path->dentry);
+ return !!READ_ONCE(pidfs_i(inode)->exit_info);
+ }
+
+ return true;
+}
+
static struct file *pidfs_export_open(struct path *path, unsigned int oflags)
{
+ if (!pidfs_pid_valid(d_inode(path->dentry)->i_private, path, oflags))
+ return ERR_PTR(-ESRCH);
+
/*
* Clear O_LARGEFILE as open_by_handle_at() forces it and raise
* O_RDWR as pidfds always are.
@@ -698,22 +869,46 @@ static struct file_system_type pidfs_type = {
struct file *pidfs_alloc_file(struct pid *pid, unsigned int flags)
{
-
struct file *pidfd_file;
- struct path path;
+ struct path path __free(path_put) = {};
int ret;
+ /*
+ * Ensure that PIDFD_CLONE can be passed as a flag without
+ * overloading other uapi pidfd flags.
+ */
+ BUILD_BUG_ON(PIDFD_CLONE == PIDFD_THREAD);
+ BUILD_BUG_ON(PIDFD_CLONE == PIDFD_NONBLOCK);
+
ret = path_from_stashed(&pid->stashed, pidfs_mnt, get_pid(pid), &path);
if (ret < 0)
return ERR_PTR(ret);
+ if (!pidfs_pid_valid(pid, &path, flags))
+ return ERR_PTR(-ESRCH);
+
+ flags &= ~PIDFD_CLONE;
pidfd_file = dentry_open(&path, flags, current_cred());
- path_put(&path);
+ /* Raise PIDFD_THREAD explicitly as do_dentry_open() strips it. */
+ if (!IS_ERR(pidfd_file))
+ pidfd_file->f_flags |= (flags & PIDFD_THREAD);
+
return pidfd_file;
}
+static void pidfs_inode_init_once(void *data)
+{
+ struct pidfs_inode *pi = data;
+
+ inode_init_once(&pi->vfs_inode);
+}
+
void __init pidfs_init(void)
{
+ pidfs_cachep = kmem_cache_create("pidfs_cache", sizeof(struct pidfs_inode), 0,
+ (SLAB_HWCACHE_ALIGN | SLAB_RECLAIM_ACCOUNT |
+ SLAB_ACCOUNT | SLAB_PANIC),
+ pidfs_inode_init_once);
pidfs_mnt = kern_mount(&pidfs_type);
if (IS_ERR(pidfs_mnt))
panic("Failed to mount pidfs pseudo filesystem");
diff --git a/fs/pipe.c b/fs/pipe.c
index 4d0799e4e719..da45edd68c41 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -112,20 +112,40 @@ void pipe_double_lock(struct pipe_inode_info *pipe1,
pipe_lock(pipe2);
}
+static struct page *anon_pipe_get_page(struct pipe_inode_info *pipe)
+{
+ for (int i = 0; i < ARRAY_SIZE(pipe->tmp_page); i++) {
+ if (pipe->tmp_page[i]) {
+ struct page *page = pipe->tmp_page[i];
+ pipe->tmp_page[i] = NULL;
+ return page;
+ }
+ }
+
+ return alloc_page(GFP_HIGHUSER | __GFP_ACCOUNT);
+}
+
+static void anon_pipe_put_page(struct pipe_inode_info *pipe,
+ struct page *page)
+{
+ if (page_count(page) == 1) {
+ for (int i = 0; i < ARRAY_SIZE(pipe->tmp_page); i++) {
+ if (!pipe->tmp_page[i]) {
+ pipe->tmp_page[i] = page;
+ return;
+ }
+ }
+ }
+
+ put_page(page);
+}
+
static void anon_pipe_buf_release(struct pipe_inode_info *pipe,
struct pipe_buffer *buf)
{
struct page *page = buf->page;
- /*
- * If nobody else uses this page, and we don't already have a
- * temporary page, let's keep track of it as a one-deep
- * allocation cache. (Otherwise just release our reference to it)
- */
- if (page_count(page) == 1 && !pipe->tmp_page)
- pipe->tmp_page = page;
- else
- put_page(page);
+ anon_pipe_put_page(pipe, page);
}
static bool anon_pipe_buf_try_steal(struct pipe_inode_info *pipe,
@@ -247,7 +267,7 @@ static inline unsigned int pipe_update_tail(struct pipe_inode_info *pipe,
}
static ssize_t
-pipe_read(struct kiocb *iocb, struct iov_iter *to)
+anon_pipe_read(struct kiocb *iocb, struct iov_iter *to)
{
size_t total_len = iov_iter_count(to);
struct file *filp = iocb->ki_filp;
@@ -274,7 +294,6 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to)
/* Read ->head with a barrier vs post_one_notification() */
unsigned int head = smp_load_acquire(&pipe->head);
unsigned int tail = pipe->tail;
- unsigned int mask = pipe->ring_size - 1;
#ifdef CONFIG_WATCH_QUEUE
if (pipe->note_loss) {
@@ -301,7 +320,7 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to)
#endif
if (!pipe_empty(head, tail)) {
- struct pipe_buffer *buf = &pipe->bufs[tail & mask];
+ struct pipe_buffer *buf = pipe_buf(pipe, tail);
size_t chars = buf->len;
size_t written;
int error;
@@ -359,29 +378,9 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to)
break;
}
mutex_unlock(&pipe->mutex);
-
/*
* We only get here if we didn't actually read anything.
*
- * However, we could have seen (and removed) a zero-sized
- * pipe buffer, and might have made space in the buffers
- * that way.
- *
- * You can't make zero-sized pipe buffers by doing an empty
- * write (not even in packet mode), but they can happen if
- * the writer gets an EFAULT when trying to fill a buffer
- * that already got allocated and inserted in the buffer
- * array.
- *
- * So we still need to wake up any pending writers in the
- * _very_ unlikely case that the pipe was full, but we got
- * no data.
- */
- if (unlikely(wake_writer))
- wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM);
- kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
-
- /*
* But because we didn't read anything, at this point we can
* just return directly with -ERESTARTSYS if we're interrupted,
* since we've done any required wakeups and there's no need
@@ -390,7 +389,6 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to)
if (wait_event_interruptible_exclusive(pipe->rd_wait, pipe_readable(pipe)) < 0)
return -ERESTARTSYS;
- wake_writer = false;
wake_next_reader = true;
mutex_lock(&pipe->mutex);
}
@@ -403,8 +401,15 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to)
if (wake_next_reader)
wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM);
kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
+ return ret;
+}
+
+static ssize_t
+fifo_pipe_read(struct kiocb *iocb, struct iov_iter *to)
+{
+ int ret = anon_pipe_read(iocb, to);
if (ret > 0)
- file_accessed(filp);
+ file_accessed(iocb->ki_filp);
return ret;
}
@@ -424,7 +429,7 @@ static inline bool pipe_writable(const struct pipe_inode_info *pipe)
}
static ssize_t
-pipe_write(struct kiocb *iocb, struct iov_iter *from)
+anon_pipe_write(struct kiocb *iocb, struct iov_iter *from)
{
struct file *filp = iocb->ki_filp;
struct pipe_inode_info *pipe = filp->private_data;
@@ -471,8 +476,7 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
was_empty = pipe_empty(head, pipe->tail);
chars = total_len & (PAGE_SIZE-1);
if (chars && !was_empty) {
- unsigned int mask = pipe->ring_size - 1;
- struct pipe_buffer *buf = &pipe->bufs[(head - 1) & mask];
+ struct pipe_buffer *buf = pipe_buf(pipe, head - 1);
int offset = buf->offset + buf->len;
if ((buf->flags & PIPE_BUF_FLAG_CAN_MERGE) &&
@@ -503,54 +507,44 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
head = pipe->head;
if (!pipe_full(head, pipe->tail, pipe->max_usage)) {
- unsigned int mask = pipe->ring_size - 1;
struct pipe_buffer *buf;
- struct page *page = pipe->tmp_page;
+ struct page *page;
int copied;
- if (!page) {
- page = alloc_page(GFP_HIGHUSER | __GFP_ACCOUNT);
- if (unlikely(!page)) {
- ret = ret ? : -ENOMEM;
- break;
- }
- pipe->tmp_page = page;
+ page = anon_pipe_get_page(pipe);
+ if (unlikely(!page)) {
+ if (!ret)
+ ret = -ENOMEM;
+ break;
}
- /* Allocate a slot in the ring in advance and attach an
- * empty buffer. If we fault or otherwise fail to use
- * it, either the reader will consume it or it'll still
- * be there for the next write.
- */
- pipe->head = head + 1;
+ copied = copy_page_from_iter(page, 0, PAGE_SIZE, from);
+ if (unlikely(copied < PAGE_SIZE && iov_iter_count(from))) {
+ anon_pipe_put_page(pipe, page);
+ if (!ret)
+ ret = -EFAULT;
+ break;
+ }
+ pipe->head = head + 1;
/* Insert it into the buffer array */
- buf = &pipe->bufs[head & mask];
+ buf = pipe_buf(pipe, head);
buf->page = page;
buf->ops = &anon_pipe_buf_ops;
buf->offset = 0;
- buf->len = 0;
if (is_packetized(filp))
buf->flags = PIPE_BUF_FLAG_PACKET;
else
buf->flags = PIPE_BUF_FLAG_CAN_MERGE;
- pipe->tmp_page = NULL;
- copied = copy_page_from_iter(page, 0, PAGE_SIZE, from);
- if (unlikely(copied < PAGE_SIZE && iov_iter_count(from))) {
- if (!ret)
- ret = -EFAULT;
- break;
- }
- ret += copied;
buf->len = copied;
+ ret += copied;
if (!iov_iter_count(from))
break;
- }
- if (!pipe_full(head, pipe->tail, pipe->max_usage))
continue;
+ }
/* Wait for buffer space to become available. */
if ((filp->f_flags & O_NONBLOCK) ||
@@ -602,11 +596,21 @@ out:
kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
if (wake_next_writer)
wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM);
- if (ret > 0 && sb_start_write_trylock(file_inode(filp)->i_sb)) {
- int err = file_update_time(filp);
- if (err)
- ret = err;
- sb_end_write(file_inode(filp)->i_sb);
+ return ret;
+}
+
+static ssize_t
+fifo_pipe_write(struct kiocb *iocb, struct iov_iter *from)
+{
+ int ret = anon_pipe_write(iocb, from);
+ if (ret > 0) {
+ struct file *filp = iocb->ki_filp;
+ if (sb_start_write_trylock(file_inode(filp)->i_sb)) {
+ int err = file_update_time(filp);
+ if (err)
+ ret = err;
+ sb_end_write(file_inode(filp)->i_sb);
+ }
}
return ret;
}
@@ -853,8 +857,10 @@ void free_pipe_info(struct pipe_inode_info *pipe)
if (pipe->watch_queue)
put_watch_queue(pipe->watch_queue);
#endif
- if (pipe->tmp_page)
- __free_page(pipe->tmp_page);
+ for (i = 0; i < ARRAY_SIZE(pipe->tmp_page); i++) {
+ if (pipe->tmp_page[i])
+ __free_page(pipe->tmp_page[i]);
+ }
kfree(pipe->bufs);
kfree(pipe);
}
@@ -874,6 +880,8 @@ static const struct dentry_operations pipefs_dentry_operations = {
.d_dname = pipefs_dname,
};
+static const struct file_operations pipeanon_fops;
+
static struct inode * get_pipe_inode(void)
{
struct inode *inode = new_inode_pseudo(pipe_mnt->mnt_sb);
@@ -891,7 +899,7 @@ static struct inode * get_pipe_inode(void)
inode->i_pipe = pipe;
pipe->files = 2;
pipe->readers = pipe->writers = 1;
- inode->i_fop = &pipefifo_fops;
+ inode->i_fop = &pipeanon_fops;
/*
* Mark the inode dirty from the very beginning,
@@ -934,7 +942,7 @@ int create_pipe_files(struct file **res, int flags)
f = alloc_file_pseudo(inode, pipe_mnt, "",
O_WRONLY | (flags & (O_NONBLOCK | O_DIRECT)),
- &pipefifo_fops);
+ &pipeanon_fops);
if (IS_ERR(f)) {
free_pipe_info(inode->i_pipe);
iput(inode);
@@ -945,7 +953,7 @@ int create_pipe_files(struct file **res, int flags)
f->f_pipe = 0;
res[0] = alloc_file_clone(f, O_RDONLY | (flags & O_NONBLOCK),
- &pipefifo_fops);
+ &pipeanon_fops);
if (IS_ERR(res[0])) {
put_pipe_info(inode, inode->i_pipe);
fput(f);
@@ -1109,8 +1117,8 @@ static void wake_up_partner(struct pipe_inode_info *pipe)
static int fifo_open(struct inode *inode, struct file *filp)
{
+ bool is_pipe = inode->i_fop == &pipeanon_fops;
struct pipe_inode_info *pipe;
- bool is_pipe = inode->i_sb->s_magic == PIPEFS_MAGIC;
int ret;
filp->f_pipe = 0;
@@ -1234,8 +1242,19 @@ err:
const struct file_operations pipefifo_fops = {
.open = fifo_open,
- .read_iter = pipe_read,
- .write_iter = pipe_write,
+ .read_iter = fifo_pipe_read,
+ .write_iter = fifo_pipe_write,
+ .poll = pipe_poll,
+ .unlocked_ioctl = pipe_ioctl,
+ .release = pipe_release,
+ .fasync = pipe_fasync,
+ .splice_write = iter_file_splice_write,
+};
+
+static const struct file_operations pipeanon_fops = {
+ .open = fifo_open,
+ .read_iter = anon_pipe_read,
+ .write_iter = anon_pipe_write,
.poll = pipe_poll,
.unlocked_ioctl = pipe_ioctl,
.release = pipe_release,
@@ -1271,6 +1290,10 @@ int pipe_resize_ring(struct pipe_inode_info *pipe, unsigned int nr_slots)
struct pipe_buffer *bufs;
unsigned int head, tail, mask, n;
+ /* nr_slots larger than limits of pipe->{head,tail} */
+ if (unlikely(nr_slots > (pipe_index_t)-1u))
+ return -EINVAL;
+
bufs = kcalloc(nr_slots, sizeof(*bufs),
GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
if (unlikely(!bufs))
@@ -1390,7 +1413,9 @@ struct pipe_inode_info *get_pipe_info(struct file *file, bool for_splice)
{
struct pipe_inode_info *pipe = file->private_data;
- if (file->f_op != &pipefifo_fops || !pipe)
+ if (!pipe)
+ return NULL;
+ if (file->f_op != &pipefifo_fops && file->f_op != &pipeanon_fops)
return NULL;
if (for_splice && pipe_has_watch_queue(pipe))
return NULL;
diff --git a/fs/pnode.c b/fs/pnode.c
index ef048f008bdd..7a062a5de10e 100644
--- a/fs/pnode.c
+++ b/fs/pnode.c
@@ -150,7 +150,7 @@ static struct mount *propagation_next(struct mount *m,
struct mount *origin)
{
/* are there any slaves of this mount? */
- if (!IS_MNT_NEW(m) && !list_empty(&m->mnt_slave_list))
+ if (!IS_MNT_PROPAGATED(m) && !list_empty(&m->mnt_slave_list))
return first_slave(m);
while (1) {
@@ -174,7 +174,7 @@ static struct mount *skip_propagation_subtree(struct mount *m,
* Advance m such that propagation_next will not return
* the slaves of m.
*/
- if (!IS_MNT_NEW(m) && !list_empty(&m->mnt_slave_list))
+ if (!IS_MNT_PROPAGATED(m) && !list_empty(&m->mnt_slave_list))
m = last_slave(m);
return m;
@@ -185,7 +185,7 @@ static struct mount *next_group(struct mount *m, struct mount *origin)
while (1) {
while (1) {
struct mount *next;
- if (!IS_MNT_NEW(m) && !list_empty(&m->mnt_slave_list))
+ if (!IS_MNT_PROPAGATED(m) && !list_empty(&m->mnt_slave_list))
return first_slave(m);
next = next_peer(m);
if (m->mnt_group_id == origin->mnt_group_id) {
@@ -226,7 +226,7 @@ static int propagate_one(struct mount *m, struct mountpoint *dest_mp)
struct mount *child;
int type;
/* skip ones added by this propagate_mnt() */
- if (IS_MNT_NEW(m))
+ if (IS_MNT_PROPAGATED(m))
return 0;
/* skip if mountpoint isn't covered by it */
if (!is_subdir(dest_mp->m_dentry, m->mnt.mnt_root))
@@ -380,7 +380,7 @@ bool propagation_would_overmount(const struct mount *from,
if (!IS_MNT_SHARED(from))
return false;
- if (IS_MNT_NEW(to))
+ if (IS_MNT_PROPAGATED(to))
return false;
if (to->mnt.mnt_root != mp->m_dentry)
@@ -549,8 +549,10 @@ static void restore_mounts(struct list_head *to_restore)
mp = parent->mnt_mp;
parent = parent->mnt_parent;
}
- if (parent != mnt->mnt_parent)
+ if (parent != mnt->mnt_parent) {
mnt_change_mountpoint(parent, mp, mnt);
+ mnt_notify_add(mnt);
+ }
}
}
diff --git a/fs/pnode.h b/fs/pnode.h
index 0b02a6393891..ddafe0d087ca 100644
--- a/fs/pnode.h
+++ b/fs/pnode.h
@@ -12,7 +12,7 @@
#define IS_MNT_SHARED(m) ((m)->mnt.mnt_flags & MNT_SHARED)
#define IS_MNT_SLAVE(m) ((m)->mnt_master)
-#define IS_MNT_NEW(m) (!(m)->mnt_ns || is_anon_ns((m)->mnt_ns))
+#define IS_MNT_PROPAGATED(m) (!(m)->mnt_ns || ((m)->mnt_ns->mntns_flags & MNTNS_PROPAGATING))
#define CLEAR_MNT_SHARED(m) ((m)->mnt.mnt_flags &= ~MNT_SHARED)
#define IS_MNT_UNBINDABLE(m) ((m)->mnt.mnt_flags & MNT_UNBINDABLE)
#define IS_MNT_MARKED(m) ((m)->mnt.mnt_flags & MNT_MARKED)
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
index 1cb33771bf9f..728630b10fdf 100644
--- a/fs/proc/kcore.c
+++ b/fs/proc/kcore.c
@@ -34,8 +34,6 @@
#include <asm/sections.h>
#include "internal.h"
-#define CORE_STR "CORE"
-
#ifndef ELF_CORE_EFLAGS
#define ELF_CORE_EFLAGS 0
#endif
@@ -122,7 +120,9 @@ static void update_kcore_size(void)
kcore_phdrs_len = kcore_nphdr * sizeof(struct elf_phdr);
kcore_notes_len = (4 * sizeof(struct elf_note) +
- 3 * ALIGN(sizeof(CORE_STR), 4) +
+ ALIGN(sizeof(NN_PRSTATUS), 4) +
+ ALIGN(sizeof(NN_PRPSINFO), 4) +
+ ALIGN(sizeof(NN_TASKSTRUCT), 4) +
VMCOREINFO_NOTE_NAME_BYTES +
ALIGN(sizeof(struct elf_prstatus), 4) +
ALIGN(sizeof(struct elf_prpsinfo), 4) +
@@ -443,11 +443,11 @@ static ssize_t read_kcore_iter(struct kiocb *iocb, struct iov_iter *iter)
goto out;
}
- append_kcore_note(notes, &i, CORE_STR, NT_PRSTATUS, &prstatus,
+ append_kcore_note(notes, &i, NN_PRSTATUS, NT_PRSTATUS, &prstatus,
sizeof(prstatus));
- append_kcore_note(notes, &i, CORE_STR, NT_PRPSINFO, &prpsinfo,
+ append_kcore_note(notes, &i, NN_PRPSINFO, NT_PRPSINFO, &prpsinfo,
sizeof(prpsinfo));
- append_kcore_note(notes, &i, CORE_STR, NT_TASKSTRUCT, current,
+ append_kcore_note(notes, &i, NN_TASKSTRUCT, NT_TASKSTRUCT, current,
arch_task_struct_size);
/*
* vmcoreinfo_size is mostly constant after init time, but it
diff --git a/fs/pstore/inode.c b/fs/pstore/inode.c
index 56815799ce79..bb3b769edc71 100644
--- a/fs/pstore/inode.c
+++ b/fs/pstore/inode.c
@@ -14,10 +14,10 @@
#include <linux/init.h>
#include <linux/list.h>
#include <linux/string.h>
-#include <linux/mount.h>
#include <linux/seq_file.h>
#include <linux/ramfs.h>
-#include <linux/parser.h>
+#include <linux/fs_parser.h>
+#include <linux/fs_context.h>
#include <linux/sched.h>
#include <linux/magic.h>
#include <linux/pstore.h>
@@ -226,37 +226,38 @@ static struct inode *pstore_get_inode(struct super_block *sb)
}
enum {
- Opt_kmsg_bytes, Opt_err
+ Opt_kmsg_bytes
};
-static const match_table_t tokens = {
- {Opt_kmsg_bytes, "kmsg_bytes=%u"},
- {Opt_err, NULL}
+static const struct fs_parameter_spec pstore_param_spec[] = {
+ fsparam_u32 ("kmsg_bytes", Opt_kmsg_bytes),
+ {}
};
-static void parse_options(char *options)
-{
- char *p;
- substring_t args[MAX_OPT_ARGS];
- int option;
-
- if (!options)
- return;
+struct pstore_context {
+ unsigned int kmsg_bytes;
+};
- while ((p = strsep(&options, ",")) != NULL) {
- int token;
+static int pstore_parse_param(struct fs_context *fc, struct fs_parameter *param)
+{
+ struct pstore_context *ctx = fc->fs_private;
+ struct fs_parse_result result;
+ int opt;
- if (!*p)
- continue;
+ opt = fs_parse(fc, pstore_param_spec, param, &result);
+ /* pstore has historically ignored invalid kmsg_bytes param */
+ if (opt < 0)
+ return 0;
- token = match_token(p, tokens, args);
- switch (token) {
- case Opt_kmsg_bytes:
- if (!match_int(&args[0], &option))
- pstore_set_kmsg_bytes(option);
- break;
- }
+ switch (opt) {
+ case Opt_kmsg_bytes:
+ ctx->kmsg_bytes = result.uint_32;
+ break;
+ default:
+ return -EINVAL;
}
+
+ return 0;
}
/*
@@ -265,14 +266,16 @@ static void parse_options(char *options)
static int pstore_show_options(struct seq_file *m, struct dentry *root)
{
if (kmsg_bytes != CONFIG_PSTORE_DEFAULT_KMSG_BYTES)
- seq_printf(m, ",kmsg_bytes=%lu", kmsg_bytes);
+ seq_printf(m, ",kmsg_bytes=%u", kmsg_bytes);
return 0;
}
-static int pstore_remount(struct super_block *sb, int *flags, char *data)
+static int pstore_reconfigure(struct fs_context *fc)
{
- sync_filesystem(sb);
- parse_options(data);
+ struct pstore_context *ctx = fc->fs_private;
+
+ sync_filesystem(fc->root->d_sb);
+ pstore_set_kmsg_bytes(ctx->kmsg_bytes);
return 0;
}
@@ -281,7 +284,6 @@ static const struct super_operations pstore_ops = {
.statfs = simple_statfs,
.drop_inode = generic_delete_inode,
.evict_inode = pstore_evict_inode,
- .remount_fs = pstore_remount,
.show_options = pstore_show_options,
};
@@ -406,8 +408,9 @@ void pstore_get_records(int quiet)
inode_unlock(d_inode(root));
}
-static int pstore_fill_super(struct super_block *sb, void *data, int silent)
+static int pstore_fill_super(struct super_block *sb, struct fs_context *fc)
{
+ struct pstore_context *ctx = fc->fs_private;
struct inode *inode;
sb->s_maxbytes = MAX_LFS_FILESIZE;
@@ -417,7 +420,7 @@ static int pstore_fill_super(struct super_block *sb, void *data, int silent)
sb->s_op = &pstore_ops;
sb->s_time_gran = 1;
- parse_options(data);
+ pstore_set_kmsg_bytes(ctx->kmsg_bytes);
inode = pstore_get_inode(sb);
if (inode) {
@@ -438,12 +441,26 @@ static int pstore_fill_super(struct super_block *sb, void *data, int silent)
return 0;
}
-static struct dentry *pstore_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int pstore_get_tree(struct fs_context *fc)
+{
+ if (fc->root)
+ return pstore_reconfigure(fc);
+
+ return get_tree_single(fc, pstore_fill_super);
+}
+
+static void pstore_free_fc(struct fs_context *fc)
{
- return mount_single(fs_type, flags, data, pstore_fill_super);
+ kfree(fc->fs_private);
}
+static const struct fs_context_operations pstore_context_ops = {
+ .parse_param = pstore_parse_param,
+ .get_tree = pstore_get_tree,
+ .reconfigure = pstore_reconfigure,
+ .free = pstore_free_fc,
+};
+
static void pstore_kill_sb(struct super_block *sb)
{
guard(mutex)(&pstore_sb_lock);
@@ -456,11 +473,33 @@ static void pstore_kill_sb(struct super_block *sb)
INIT_LIST_HEAD(&records_list);
}
+static int pstore_init_fs_context(struct fs_context *fc)
+{
+ struct pstore_context *ctx;
+
+ ctx = kzalloc(sizeof(struct pstore_context), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ /*
+ * Global kmsg_bytes is initialized to default, and updated
+ * every time we (re)mount the single-sb filesystem with the
+ * option specified.
+ */
+ ctx->kmsg_bytes = kmsg_bytes;
+
+ fc->fs_private = ctx;
+ fc->ops = &pstore_context_ops;
+
+ return 0;
+}
+
static struct file_system_type pstore_fs_type = {
.owner = THIS_MODULE,
.name = "pstore",
- .mount = pstore_mount,
.kill_sb = pstore_kill_sb,
+ .init_fs_context = pstore_init_fs_context,
+ .parameters = pstore_param_spec,
};
int __init pstore_init_fs(void)
diff --git a/fs/pstore/internal.h b/fs/pstore/internal.h
index 801d6c0b170c..a0fc51196910 100644
--- a/fs/pstore/internal.h
+++ b/fs/pstore/internal.h
@@ -6,7 +6,7 @@
#include <linux/time.h>
#include <linux/pstore.h>
-extern unsigned long kmsg_bytes;
+extern unsigned int kmsg_bytes;
#ifdef CONFIG_PSTORE_FTRACE
extern void pstore_register_ftrace(void);
@@ -35,7 +35,7 @@ static inline void pstore_unregister_pmsg(void) {}
extern struct pstore_info *psinfo;
-extern void pstore_set_kmsg_bytes(int);
+extern void pstore_set_kmsg_bytes(unsigned int bytes);
extern void pstore_get_records(int);
extern void pstore_get_backend_records(struct pstore_info *psi,
struct dentry *root, int quiet);
diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
index f56b066ab80c..557cf9d40177 100644
--- a/fs/pstore/platform.c
+++ b/fs/pstore/platform.c
@@ -92,8 +92,8 @@ module_param(compress, charp, 0444);
MODULE_PARM_DESC(compress, "compression to use");
/* How much of the kernel log to snapshot */
-unsigned long kmsg_bytes = CONFIG_PSTORE_DEFAULT_KMSG_BYTES;
-module_param(kmsg_bytes, ulong, 0444);
+unsigned int kmsg_bytes = CONFIG_PSTORE_DEFAULT_KMSG_BYTES;
+module_param(kmsg_bytes, uint, 0444);
MODULE_PARM_DESC(kmsg_bytes, "amount of kernel log to snapshot (in bytes)");
static void *compress_workspace;
@@ -107,9 +107,9 @@ static void *compress_workspace;
static char *big_oops_buf;
static size_t max_compressed_size;
-void pstore_set_kmsg_bytes(int bytes)
+void pstore_set_kmsg_bytes(unsigned int bytes)
{
- kmsg_bytes = bytes;
+ WRITE_ONCE(kmsg_bytes, bytes);
}
/* Tag each group of saved records with a sequence number */
@@ -278,6 +278,7 @@ static void pstore_dump(struct kmsg_dumper *dumper,
struct kmsg_dump_detail *detail)
{
struct kmsg_dump_iter iter;
+ unsigned int remaining = READ_ONCE(kmsg_bytes);
unsigned long total = 0;
const char *why;
unsigned int part = 1;
@@ -300,7 +301,7 @@ static void pstore_dump(struct kmsg_dumper *dumper,
kmsg_dump_rewind(&iter);
oopscount++;
- while (total < kmsg_bytes) {
+ while (total < remaining) {
char *dst;
size_t dst_size;
int header_size;
diff --git a/fs/ramfs/inode.c b/fs/ramfs/inode.c
index 8006faaaf0ec..775fa905fda0 100644
--- a/fs/ramfs/inode.c
+++ b/fs/ramfs/inode.c
@@ -119,13 +119,13 @@ out:
return error;
}
-static int ramfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
- struct dentry *dentry, umode_t mode)
+static struct dentry *ramfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
int retval = ramfs_mknod(&nop_mnt_idmap, dir, dentry, mode | S_IFDIR, 0);
if (!retval)
inc_nlink(dir);
- return retval;
+ return ERR_PTR(retval);
}
static int ramfs_create(struct mnt_idmap *idmap, struct inode *dir,
diff --git a/fs/read_write.c b/fs/read_write.c
index a6133241dfb8..bb0ed26a0b3a 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -169,11 +169,16 @@ generic_file_llseek_size(struct file *file, loff_t offset, int whence,
if (whence == SEEK_CUR) {
/*
- * f_lock protects against read/modify/write race with
- * other SEEK_CURs. Note that parallel writes and reads
- * behave like SEEK_SET.
+ * If the file requires locking via f_pos_lock we know
+ * that mutual exclusion for SEEK_CUR on the same file
+ * is guaranteed. If the file isn't locked, we take
+ * f_lock to protect against f_pos races with other
+ * SEEK_CURs.
*/
- guard(spinlock)(&file->f_lock);
+ if (file_seek_cur_needs_f_lock(file)) {
+ guard(spinlock)(&file->f_lock);
+ return vfs_setpos(file, file->f_pos + offset, maxsize);
+ }
return vfs_setpos(file, file->f_pos + offset, maxsize);
}
diff --git a/fs/signalfd.c b/fs/signalfd.c
index d1a5f43ce466..d469782f97f4 100644
--- a/fs/signalfd.c
+++ b/fs/signalfd.c
@@ -277,15 +277,14 @@ static int do_signalfd4(int ufd, sigset_t *mask, int flags)
return ufd;
}
- file = anon_inode_getfile("[signalfd]", &signalfd_fops, ctx,
- O_RDWR | (flags & O_NONBLOCK));
+ file = anon_inode_getfile_fmode("[signalfd]", &signalfd_fops,
+ ctx, O_RDWR | (flags & O_NONBLOCK),
+ FMODE_NOWAIT);
if (IS_ERR(file)) {
put_unused_fd(ufd);
kfree(ctx);
return PTR_ERR(file);
}
- file->f_mode |= FMODE_NOWAIT;
-
fd_install(ufd, file);
} else {
CLASS(fd, f)(ufd);
diff --git a/fs/smb/client/cifsfs.h b/fs/smb/client/cifsfs.h
index 831fee962c4d..8dea0cf3a8de 100644
--- a/fs/smb/client/cifsfs.h
+++ b/fs/smb/client/cifsfs.h
@@ -59,8 +59,8 @@ extern int cifs_unlink(struct inode *dir, struct dentry *dentry);
extern int cifs_hardlink(struct dentry *, struct inode *, struct dentry *);
extern int cifs_mknod(struct mnt_idmap *, struct inode *, struct dentry *,
umode_t, dev_t);
-extern int cifs_mkdir(struct mnt_idmap *, struct inode *, struct dentry *,
- umode_t);
+extern struct dentry *cifs_mkdir(struct mnt_idmap *, struct inode *, struct dentry *,
+ umode_t);
extern int cifs_rmdir(struct inode *, struct dentry *);
extern int cifs_rename2(struct mnt_idmap *, struct inode *,
struct dentry *, struct inode *, struct dentry *,
diff --git a/fs/smb/client/file.c b/fs/smb/client/file.c
index 8582cf61242c..9e4f7378f30f 100644
--- a/fs/smb/client/file.c
+++ b/fs/smb/client/file.c
@@ -388,7 +388,7 @@ cifs_mark_open_files_invalid(struct cifs_tcon *tcon)
spin_unlock(&tcon->tc_lock);
/*
- * BB Add call to invalidate_inodes(sb) for all superblocks mounted
+ * BB Add call to evict_inodes(sb) for all superblocks mounted
* to this tcon.
*/
}
diff --git a/fs/smb/client/inode.c b/fs/smb/client/inode.c
index 616149c7f0a5..3bb21aa58474 100644
--- a/fs/smb/client/inode.c
+++ b/fs/smb/client/inode.c
@@ -2207,8 +2207,8 @@ posix_mkdir_get_info:
}
#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
-int cifs_mkdir(struct mnt_idmap *idmap, struct inode *inode,
- struct dentry *direntry, umode_t mode)
+struct dentry *cifs_mkdir(struct mnt_idmap *idmap, struct inode *inode,
+ struct dentry *direntry, umode_t mode)
{
int rc = 0;
unsigned int xid;
@@ -2224,10 +2224,10 @@ int cifs_mkdir(struct mnt_idmap *idmap, struct inode *inode,
cifs_sb = CIFS_SB(inode->i_sb);
if (unlikely(cifs_forced_shutdown(cifs_sb)))
- return -EIO;
+ return ERR_PTR(-EIO);
tlink = cifs_sb_tlink(cifs_sb);
if (IS_ERR(tlink))
- return PTR_ERR(tlink);
+ return ERR_CAST(tlink);
tcon = tlink_tcon(tlink);
xid = get_xid();
@@ -2283,7 +2283,7 @@ mkdir_out:
free_dentry_path(page);
free_xid(xid);
cifs_put_tlink(tlink);
- return rc;
+ return ERR_PTR(rc);
}
int cifs_rmdir(struct inode *inode, struct dentry *direntry)
diff --git a/fs/smb/server/vfs.c b/fs/smb/server/vfs.c
index 6890016e1923..8554aa5a1059 100644
--- a/fs/smb/server/vfs.c
+++ b/fs/smb/server/vfs.c
@@ -113,11 +113,6 @@ static int ksmbd_vfs_path_lookup_locked(struct ksmbd_share_config *share_conf,
if (IS_ERR(d))
goto err_out;
- if (d_is_negative(d)) {
- dput(d);
- goto err_out;
- }
-
path->dentry = d;
path->mnt = mntget(parent_path->mnt);
@@ -211,8 +206,8 @@ int ksmbd_vfs_mkdir(struct ksmbd_work *work, const char *name, umode_t mode)
{
struct mnt_idmap *idmap;
struct path path;
- struct dentry *dentry;
- int err;
+ struct dentry *dentry, *d;
+ int err = 0;
dentry = ksmbd_vfs_kern_path_create(work, name,
LOOKUP_NO_SYMLINKS | LOOKUP_DIRECTORY,
@@ -227,27 +222,15 @@ int ksmbd_vfs_mkdir(struct ksmbd_work *work, const char *name, umode_t mode)
idmap = mnt_idmap(path.mnt);
mode |= S_IFDIR;
- err = vfs_mkdir(idmap, d_inode(path.dentry), dentry, mode);
- if (!err && d_unhashed(dentry)) {
- struct dentry *d;
-
- d = lookup_one(idmap, dentry->d_name.name, dentry->d_parent,
- dentry->d_name.len);
- if (IS_ERR(d)) {
- err = PTR_ERR(d);
- goto out_err;
- }
- if (unlikely(d_is_negative(d))) {
- dput(d);
- err = -ENOENT;
- goto out_err;
- }
-
- ksmbd_vfs_inherit_owner(work, d_inode(path.dentry), d_inode(d));
- dput(d);
- }
+ d = dentry;
+ dentry = vfs_mkdir(idmap, d_inode(path.dentry), dentry, mode);
+ if (IS_ERR(dentry))
+ err = PTR_ERR(dentry);
+ else if (d_is_negative(dentry))
+ err = -ENOENT;
+ if (!err && dentry != d)
+ ksmbd_vfs_inherit_owner(work, d_inode(path.dentry), d_inode(dentry));
-out_err:
done_path_create(&path, dentry);
if (err)
pr_err("mkdir(%s): creation failed (err:%d)\n", name, err);
@@ -693,6 +676,7 @@ int ksmbd_vfs_rename(struct ksmbd_work *work, const struct path *old_path,
struct ksmbd_file *parent_fp;
int new_type;
int err, lookup_flags = LOOKUP_NO_SYMLINKS;
+ int target_lookup_flags = LOOKUP_RENAME_TARGET;
if (ksmbd_override_fsids(work))
return -ENOMEM;
@@ -703,6 +687,14 @@ int ksmbd_vfs_rename(struct ksmbd_work *work, const struct path *old_path,
goto revert_fsids;
}
+ /*
+ * explicitly handle file overwrite case, for compatibility with
+ * filesystems that may not support rename flags (e.g: fuse)
+ */
+ if (flags & RENAME_NOREPLACE)
+ target_lookup_flags |= LOOKUP_EXCL;
+ flags &= ~(RENAME_NOREPLACE);
+
retry:
err = vfs_path_parent_lookup(to, lookup_flags | LOOKUP_BENEATH,
&new_path, &new_last, &new_type,
@@ -743,7 +735,7 @@ retry:
}
new_dentry = lookup_one_qstr_excl(&new_last, new_path.dentry,
- lookup_flags | LOOKUP_RENAME_TARGET);
+ lookup_flags | target_lookup_flags);
if (IS_ERR(new_dentry)) {
err = PTR_ERR(new_dentry);
goto out3;
@@ -754,16 +746,6 @@ retry:
goto out4;
}
- /*
- * explicitly handle file overwrite case, for compatibility with
- * filesystems that may not support rename flags (e.g: fuse)
- */
- if ((flags & RENAME_NOREPLACE) && d_is_positive(new_dentry)) {
- err = -EEXIST;
- goto out4;
- }
- flags &= ~(RENAME_NOREPLACE);
-
if (old_child == trap) {
err = -EINVAL;
goto out4;
diff --git a/fs/splice.c b/fs/splice.c
index 23fa5561b944..90d464241f15 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -200,7 +200,6 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
unsigned int spd_pages = spd->nr_pages;
unsigned int tail = pipe->tail;
unsigned int head = pipe->head;
- unsigned int mask = pipe->ring_size - 1;
ssize_t ret = 0;
int page_nr = 0;
@@ -214,7 +213,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
}
while (!pipe_full(head, tail, pipe->max_usage)) {
- struct pipe_buffer *buf = &pipe->bufs[head & mask];
+ struct pipe_buffer *buf = pipe_buf(pipe, head);
buf->page = spd->pages[page_nr];
buf->offset = spd->partial[page_nr].offset;
@@ -247,7 +246,6 @@ ssize_t add_to_pipe(struct pipe_inode_info *pipe, struct pipe_buffer *buf)
{
unsigned int head = pipe->head;
unsigned int tail = pipe->tail;
- unsigned int mask = pipe->ring_size - 1;
int ret;
if (unlikely(!pipe->readers)) {
@@ -256,7 +254,7 @@ ssize_t add_to_pipe(struct pipe_inode_info *pipe, struct pipe_buffer *buf)
} else if (pipe_full(head, tail, pipe->max_usage)) {
ret = -EAGAIN;
} else {
- pipe->bufs[head & mask] = *buf;
+ *pipe_buf(pipe, head) = *buf;
pipe->head = head + 1;
return buf->len;
}
@@ -447,11 +445,10 @@ static int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_des
{
unsigned int head = pipe->head;
unsigned int tail = pipe->tail;
- unsigned int mask = pipe->ring_size - 1;
int ret;
while (!pipe_empty(head, tail)) {
- struct pipe_buffer *buf = &pipe->bufs[tail & mask];
+ struct pipe_buffer *buf = pipe_buf(pipe, tail);
sd->len = buf->len;
if (sd->len > sd->total_len)
@@ -495,8 +492,7 @@ static int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_des
static inline bool eat_empty_buffer(struct pipe_inode_info *pipe)
{
unsigned int tail = pipe->tail;
- unsigned int mask = pipe->ring_size - 1;
- struct pipe_buffer *buf = &pipe->bufs[tail & mask];
+ struct pipe_buffer *buf = pipe_buf(pipe, tail);
if (unlikely(!buf->len)) {
pipe_buf_release(pipe, buf);
@@ -690,7 +686,7 @@ iter_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
while (sd.total_len) {
struct kiocb kiocb;
struct iov_iter from;
- unsigned int head, tail, mask;
+ unsigned int head, tail;
size_t left;
int n;
@@ -711,12 +707,11 @@ iter_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
head = pipe->head;
tail = pipe->tail;
- mask = pipe->ring_size - 1;
/* build the vector */
left = sd.total_len;
for (n = 0; !pipe_empty(head, tail) && left && n < nbufs; tail++) {
- struct pipe_buffer *buf = &pipe->bufs[tail & mask];
+ struct pipe_buffer *buf = pipe_buf(pipe, tail);
size_t this_len = buf->len;
/* zero-length bvecs are not supported, skip them */
@@ -752,7 +747,7 @@ iter_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
/* dismiss the fully eaten buffers, adjust the partial one */
tail = pipe->tail;
while (ret) {
- struct pipe_buffer *buf = &pipe->bufs[tail & mask];
+ struct pipe_buffer *buf = pipe_buf(pipe, tail);
if (ret >= buf->len) {
ret -= buf->len;
buf->len = 0;
@@ -809,7 +804,7 @@ ssize_t splice_to_socket(struct pipe_inode_info *pipe, struct file *out,
pipe_lock(pipe);
while (len > 0) {
- unsigned int head, tail, mask, bc = 0;
+ unsigned int head, tail, bc = 0;
size_t remain = len;
/*
@@ -846,10 +841,9 @@ ssize_t splice_to_socket(struct pipe_inode_info *pipe, struct file *out,
head = pipe->head;
tail = pipe->tail;
- mask = pipe->ring_size - 1;
while (!pipe_empty(head, tail)) {
- struct pipe_buffer *buf = &pipe->bufs[tail & mask];
+ struct pipe_buffer *buf = pipe_buf(pipe, tail);
size_t seg;
if (!buf->len) {
@@ -894,7 +888,7 @@ ssize_t splice_to_socket(struct pipe_inode_info *pipe, struct file *out,
len -= ret;
tail = pipe->tail;
while (ret > 0) {
- struct pipe_buffer *buf = &pipe->bufs[tail & mask];
+ struct pipe_buffer *buf = pipe_buf(pipe, tail);
size_t seg = min_t(size_t, ret, buf->len);
buf->offset += seg;
@@ -1725,7 +1719,6 @@ static int splice_pipe_to_pipe(struct pipe_inode_info *ipipe,
struct pipe_buffer *ibuf, *obuf;
unsigned int i_head, o_head;
unsigned int i_tail, o_tail;
- unsigned int i_mask, o_mask;
int ret = 0;
bool input_wakeup = false;
@@ -1747,9 +1740,7 @@ retry:
pipe_double_lock(ipipe, opipe);
i_tail = ipipe->tail;
- i_mask = ipipe->ring_size - 1;
o_head = opipe->head;
- o_mask = opipe->ring_size - 1;
do {
size_t o_len;
@@ -1792,8 +1783,8 @@ retry:
goto retry;
}
- ibuf = &ipipe->bufs[i_tail & i_mask];
- obuf = &opipe->bufs[o_head & o_mask];
+ ibuf = pipe_buf(ipipe, i_tail);
+ obuf = pipe_buf(opipe, o_head);
if (len >= ibuf->len) {
/*
@@ -1862,7 +1853,6 @@ static ssize_t link_pipe(struct pipe_inode_info *ipipe,
struct pipe_buffer *ibuf, *obuf;
unsigned int i_head, o_head;
unsigned int i_tail, o_tail;
- unsigned int i_mask, o_mask;
ssize_t ret = 0;
/*
@@ -1873,9 +1863,7 @@ static ssize_t link_pipe(struct pipe_inode_info *ipipe,
pipe_double_lock(ipipe, opipe);
i_tail = ipipe->tail;
- i_mask = ipipe->ring_size - 1;
o_head = opipe->head;
- o_mask = opipe->ring_size - 1;
do {
if (!opipe->readers) {
@@ -1896,8 +1884,8 @@ static ssize_t link_pipe(struct pipe_inode_info *ipipe,
pipe_full(o_head, o_tail, opipe->max_usage))
break;
- ibuf = &ipipe->bufs[i_tail & i_mask];
- obuf = &opipe->bufs[o_head & o_mask];
+ ibuf = pipe_buf(ipipe, i_tail);
+ obuf = pipe_buf(opipe, o_head);
/*
* Get a reference to this pipe buffer,
diff --git a/fs/super.c b/fs/super.c
index 5a7db4a556e3..97a17f9d9023 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -1417,7 +1417,7 @@ static void fs_bdev_mark_dead(struct block_device *bdev, bool surprise)
if (!surprise)
sync_filesystem(sb);
shrink_dcache_sb(sb);
- invalidate_inodes(sb);
+ evict_inodes(sb);
if (sb->s_op->shutdown)
sb->s_op->shutdown(sb);
@@ -1737,61 +1737,6 @@ struct dentry *mount_nodev(struct file_system_type *fs_type,
}
EXPORT_SYMBOL(mount_nodev);
-int reconfigure_single(struct super_block *s,
- int flags, void *data)
-{
- struct fs_context *fc;
- int ret;
-
- /* The caller really need to be passing fc down into mount_single(),
- * then a chunk of this can be removed. [Bollocks -- AV]
- * Better yet, reconfiguration shouldn't happen, but rather the second
- * mount should be rejected if the parameters are not compatible.
- */
- fc = fs_context_for_reconfigure(s->s_root, flags, MS_RMT_MASK);
- if (IS_ERR(fc))
- return PTR_ERR(fc);
-
- ret = parse_monolithic_mount_data(fc, data);
- if (ret < 0)
- goto out;
-
- ret = reconfigure_super(fc);
-out:
- put_fs_context(fc);
- return ret;
-}
-
-static int compare_single(struct super_block *s, void *p)
-{
- return 1;
-}
-
-struct dentry *mount_single(struct file_system_type *fs_type,
- int flags, void *data,
- int (*fill_super)(struct super_block *, void *, int))
-{
- struct super_block *s;
- int error;
-
- s = sget(fs_type, compare_single, set_anon_super, flags, NULL);
- if (IS_ERR(s))
- return ERR_CAST(s);
- if (!s->s_root) {
- error = fill_super(s, data, flags & SB_SILENT ? 1 : 0);
- if (!error)
- s->s_flags |= SB_ACTIVE;
- } else {
- error = reconfigure_single(s, flags, data);
- }
- if (unlikely(error)) {
- deactivate_locked_super(s);
- return ERR_PTR(error);
- }
- return dget(s->s_root);
-}
-EXPORT_SYMBOL(mount_single);
-
/**
* vfs_get_tree - Get the mountable root
* @fc: The superblock configuration context.
diff --git a/fs/sysv/Kconfig b/fs/sysv/Kconfig
deleted file mode 100644
index 67b3f90afbfd..000000000000
--- a/fs/sysv/Kconfig
+++ /dev/null
@@ -1,38 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-config SYSV_FS
- tristate "System V/Xenix/V7/Coherent file system support"
- depends on BLOCK
- select BUFFER_HEAD
- help
- SCO, Xenix and Coherent are commercial Unix systems for Intel
- machines, and Version 7 was used on the DEC PDP-11. Saying Y
- here would allow you to read from their floppies and hard disk
- partitions.
-
- If you have floppies or hard disk partitions like that, it is likely
- that they contain binaries from those other Unix systems; in order
- to run these binaries, you will want to install linux-abi which is
- a set of kernel modules that lets you run SCO, Xenix, Wyse,
- UnixWare, Dell Unix and System V programs under Linux. It is
- available via FTP (user: ftp) from
- <ftp://ftp.openlinux.org/pub/people/hch/linux-abi/>).
- NOTE: that will work only for binaries from Intel-based systems;
- PDP ones will have to wait until somebody ports Linux to -11 ;-)
-
- If you only intend to mount files from some other Unix over the
- network using NFS, you don't need the System V file system support
- (but you need NFS file system support obviously).
-
- Note that this option is generally not needed for floppies, since a
- good portable way to transport files and directories between unixes
- (and even other operating systems) is given by the tar program ("man
- tar" or preferably "info tar"). Note also that this option has
- nothing whatsoever to do with the option "System V IPC". Read about
- the System V file system in
- <file:Documentation/filesystems/sysv-fs.rst>.
- Saying Y here will enlarge your kernel by about 27 KB.
-
- To compile this as a module, choose M here: the module will be called
- sysv.
-
- If you haven't heard about all of this before, it's safe to say N.
diff --git a/fs/sysv/Makefile b/fs/sysv/Makefile
deleted file mode 100644
index 17d12ba04b18..000000000000
--- a/fs/sysv/Makefile
+++ /dev/null
@@ -1,9 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# Makefile for the Linux SystemV/Coherent filesystem routines.
-#
-
-obj-$(CONFIG_SYSV_FS) += sysv.o
-
-sysv-objs := ialloc.o balloc.o inode.o itree.o file.o dir.o \
- namei.o super.o
diff --git a/fs/sysv/balloc.c b/fs/sysv/balloc.c
deleted file mode 100644
index 0e69dbdf7277..000000000000
--- a/fs/sysv/balloc.c
+++ /dev/null
@@ -1,240 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * linux/fs/sysv/balloc.c
- *
- * minix/bitmap.c
- * Copyright (C) 1991, 1992 Linus Torvalds
- *
- * ext/freelists.c
- * Copyright (C) 1992 Remy Card (card@masi.ibp.fr)
- *
- * xenix/alloc.c
- * Copyright (C) 1992 Doug Evans
- *
- * coh/alloc.c
- * Copyright (C) 1993 Pascal Haible, Bruno Haible
- *
- * sysv/balloc.c
- * Copyright (C) 1993 Bruno Haible
- *
- * This file contains code for allocating/freeing blocks.
- */
-
-#include <linux/buffer_head.h>
-#include <linux/string.h>
-#include "sysv.h"
-
-/* We don't trust the value of
- sb->sv_sbd2->s_tfree = *sb->sv_free_blocks
- but we nevertheless keep it up to date. */
-
-static inline sysv_zone_t *get_chunk(struct super_block *sb, struct buffer_head *bh)
-{
- char *bh_data = bh->b_data;
-
- if (SYSV_SB(sb)->s_type == FSTYPE_SYSV4)
- return (sysv_zone_t*)(bh_data+4);
- else
- return (sysv_zone_t*)(bh_data+2);
-}
-
-/* NOTE NOTE NOTE: nr is a block number _as_ _stored_ _on_ _disk_ */
-
-void sysv_free_block(struct super_block * sb, sysv_zone_t nr)
-{
- struct sysv_sb_info * sbi = SYSV_SB(sb);
- struct buffer_head * bh;
- sysv_zone_t *blocks = sbi->s_bcache;
- unsigned count;
- unsigned block = fs32_to_cpu(sbi, nr);
-
- /*
- * This code does not work at all for AFS (it has a bitmap
- * free list). As AFS is supposed to be read-only no one
- * should call this for an AFS filesystem anyway...
- */
- if (sbi->s_type == FSTYPE_AFS)
- return;
-
- if (block < sbi->s_firstdatazone || block >= sbi->s_nzones) {
- printk("sysv_free_block: trying to free block not in datazone\n");
- return;
- }
-
- mutex_lock(&sbi->s_lock);
- count = fs16_to_cpu(sbi, *sbi->s_bcache_count);
-
- if (count > sbi->s_flc_size) {
- printk("sysv_free_block: flc_count > flc_size\n");
- mutex_unlock(&sbi->s_lock);
- return;
- }
- /* If the free list head in super-block is full, it is copied
- * into this block being freed, ditto if it's completely empty
- * (applies only on Coherent).
- */
- if (count == sbi->s_flc_size || count == 0) {
- block += sbi->s_block_base;
- bh = sb_getblk(sb, block);
- if (!bh) {
- printk("sysv_free_block: getblk() failed\n");
- mutex_unlock(&sbi->s_lock);
- return;
- }
- memset(bh->b_data, 0, sb->s_blocksize);
- *(__fs16*)bh->b_data = cpu_to_fs16(sbi, count);
- memcpy(get_chunk(sb,bh), blocks, count * sizeof(sysv_zone_t));
- mark_buffer_dirty(bh);
- set_buffer_uptodate(bh);
- brelse(bh);
- count = 0;
- }
- sbi->s_bcache[count++] = nr;
-
- *sbi->s_bcache_count = cpu_to_fs16(sbi, count);
- fs32_add(sbi, sbi->s_free_blocks, 1);
- dirty_sb(sb);
- mutex_unlock(&sbi->s_lock);
-}
-
-sysv_zone_t sysv_new_block(struct super_block * sb)
-{
- struct sysv_sb_info *sbi = SYSV_SB(sb);
- unsigned int block;
- sysv_zone_t nr;
- struct buffer_head * bh;
- unsigned count;
-
- mutex_lock(&sbi->s_lock);
- count = fs16_to_cpu(sbi, *sbi->s_bcache_count);
-
- if (count == 0) /* Applies only to Coherent FS */
- goto Enospc;
- nr = sbi->s_bcache[--count];
- if (nr == 0) /* Applies only to Xenix FS, SystemV FS */
- goto Enospc;
-
- block = fs32_to_cpu(sbi, nr);
-
- *sbi->s_bcache_count = cpu_to_fs16(sbi, count);
-
- if (block < sbi->s_firstdatazone || block >= sbi->s_nzones) {
- printk("sysv_new_block: new block %d is not in data zone\n",
- block);
- goto Enospc;
- }
-
- if (count == 0) { /* the last block continues the free list */
- unsigned count;
-
- block += sbi->s_block_base;
- if (!(bh = sb_bread(sb, block))) {
- printk("sysv_new_block: cannot read free-list block\n");
- /* retry this same block next time */
- *sbi->s_bcache_count = cpu_to_fs16(sbi, 1);
- goto Enospc;
- }
- count = fs16_to_cpu(sbi, *(__fs16*)bh->b_data);
- if (count > sbi->s_flc_size) {
- printk("sysv_new_block: free-list block with >flc_size entries\n");
- brelse(bh);
- goto Enospc;
- }
- *sbi->s_bcache_count = cpu_to_fs16(sbi, count);
- memcpy(sbi->s_bcache, get_chunk(sb, bh),
- count * sizeof(sysv_zone_t));
- brelse(bh);
- }
- /* Now the free list head in the superblock is valid again. */
- fs32_add(sbi, sbi->s_free_blocks, -1);
- dirty_sb(sb);
- mutex_unlock(&sbi->s_lock);
- return nr;
-
-Enospc:
- mutex_unlock(&sbi->s_lock);
- return 0;
-}
-
-unsigned long sysv_count_free_blocks(struct super_block * sb)
-{
- struct sysv_sb_info * sbi = SYSV_SB(sb);
- int sb_count;
- int count;
- struct buffer_head * bh = NULL;
- sysv_zone_t *blocks;
- unsigned block;
- int n;
-
- /*
- * This code does not work at all for AFS (it has a bitmap
- * free list). As AFS is supposed to be read-only we just
- * lie and say it has no free block at all.
- */
- if (sbi->s_type == FSTYPE_AFS)
- return 0;
-
- mutex_lock(&sbi->s_lock);
- sb_count = fs32_to_cpu(sbi, *sbi->s_free_blocks);
-
- if (0)
- goto trust_sb;
-
- /* this causes a lot of disk traffic ... */
- count = 0;
- n = fs16_to_cpu(sbi, *sbi->s_bcache_count);
- blocks = sbi->s_bcache;
- while (1) {
- sysv_zone_t zone;
- if (n > sbi->s_flc_size)
- goto E2big;
- zone = 0;
- while (n && (zone = blocks[--n]) != 0)
- count++;
- if (zone == 0)
- break;
-
- block = fs32_to_cpu(sbi, zone);
- if (bh)
- brelse(bh);
-
- if (block < sbi->s_firstdatazone || block >= sbi->s_nzones)
- goto Einval;
- block += sbi->s_block_base;
- bh = sb_bread(sb, block);
- if (!bh)
- goto Eio;
- n = fs16_to_cpu(sbi, *(__fs16*)bh->b_data);
- blocks = get_chunk(sb, bh);
- }
- if (bh)
- brelse(bh);
- if (count != sb_count)
- goto Ecount;
-done:
- mutex_unlock(&sbi->s_lock);
- return count;
-
-Einval:
- printk("sysv_count_free_blocks: new block %d is not in data zone\n",
- block);
- goto trust_sb;
-Eio:
- printk("sysv_count_free_blocks: cannot read free-list block\n");
- goto trust_sb;
-E2big:
- printk("sysv_count_free_blocks: >flc_size entries in free-list block\n");
- if (bh)
- brelse(bh);
-trust_sb:
- count = sb_count;
- goto done;
-Ecount:
- printk("sysv_count_free_blocks: free block count was %d, "
- "correcting to %d\n", sb_count, count);
- if (!sb_rdonly(sb)) {
- *sbi->s_free_blocks = cpu_to_fs32(sbi, count);
- dirty_sb(sb);
- }
- goto done;
-}
diff --git a/fs/sysv/dir.c b/fs/sysv/dir.c
deleted file mode 100644
index 639307e2ff8c..000000000000
--- a/fs/sysv/dir.c
+++ /dev/null
@@ -1,378 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * linux/fs/sysv/dir.c
- *
- * minix/dir.c
- * Copyright (C) 1991, 1992 Linus Torvalds
- *
- * coh/dir.c
- * Copyright (C) 1993 Pascal Haible, Bruno Haible
- *
- * sysv/dir.c
- * Copyright (C) 1993 Bruno Haible
- *
- * SystemV/Coherent directory handling functions
- */
-
-#include <linux/pagemap.h>
-#include <linux/highmem.h>
-#include <linux/swap.h>
-#include "sysv.h"
-
-static int sysv_readdir(struct file *, struct dir_context *);
-
-const struct file_operations sysv_dir_operations = {
- .llseek = generic_file_llseek,
- .read = generic_read_dir,
- .iterate_shared = sysv_readdir,
- .fsync = generic_file_fsync,
-};
-
-static void dir_commit_chunk(struct folio *folio, loff_t pos, unsigned len)
-{
- struct address_space *mapping = folio->mapping;
- struct inode *dir = mapping->host;
-
- block_write_end(NULL, mapping, pos, len, len, folio, NULL);
- if (pos+len > dir->i_size) {
- i_size_write(dir, pos+len);
- mark_inode_dirty(dir);
- }
- folio_unlock(folio);
-}
-
-static int sysv_handle_dirsync(struct inode *dir)
-{
- int err;
-
- err = filemap_write_and_wait(dir->i_mapping);
- if (!err)
- err = sync_inode_metadata(dir, 1);
- return err;
-}
-
-/*
- * Calls to dir_get_folio()/folio_release_kmap() must be nested according to the
- * rules documented in mm/highmem.rst.
- *
- * NOTE: sysv_find_entry() and sysv_dotdot() act as calls to dir_get_folio()
- * and must be treated accordingly for nesting purposes.
- */
-static void *dir_get_folio(struct inode *dir, unsigned long n,
- struct folio **foliop)
-{
- struct folio *folio = read_mapping_folio(dir->i_mapping, n, NULL);
-
- if (IS_ERR(folio))
- return ERR_CAST(folio);
- *foliop = folio;
- return kmap_local_folio(folio, 0);
-}
-
-static int sysv_readdir(struct file *file, struct dir_context *ctx)
-{
- unsigned long pos = ctx->pos;
- struct inode *inode = file_inode(file);
- struct super_block *sb = inode->i_sb;
- unsigned long npages = dir_pages(inode);
- unsigned offset;
- unsigned long n;
-
- ctx->pos = pos = (pos + SYSV_DIRSIZE-1) & ~(SYSV_DIRSIZE-1);
- if (pos >= inode->i_size)
- return 0;
-
- offset = pos & ~PAGE_MASK;
- n = pos >> PAGE_SHIFT;
-
- for ( ; n < npages; n++, offset = 0) {
- char *kaddr, *limit;
- struct sysv_dir_entry *de;
- struct folio *folio;
-
- kaddr = dir_get_folio(inode, n, &folio);
- if (IS_ERR(kaddr))
- continue;
- de = (struct sysv_dir_entry *)(kaddr+offset);
- limit = kaddr + PAGE_SIZE - SYSV_DIRSIZE;
- for ( ;(char*)de <= limit; de++, ctx->pos += sizeof(*de)) {
- char *name = de->name;
-
- if (!de->inode)
- continue;
-
- if (!dir_emit(ctx, name, strnlen(name,SYSV_NAMELEN),
- fs16_to_cpu(SYSV_SB(sb), de->inode),
- DT_UNKNOWN)) {
- folio_release_kmap(folio, kaddr);
- return 0;
- }
- }
- folio_release_kmap(folio, kaddr);
- }
- return 0;
-}
-
-/* compare strings: name[0..len-1] (not zero-terminated) and
- * buffer[0..] (filled with zeroes up to buffer[0..maxlen-1])
- */
-static inline int namecompare(int len, int maxlen,
- const char * name, const char * buffer)
-{
- if (len < maxlen && buffer[len])
- return 0;
- return !memcmp(name, buffer, len);
-}
-
-/*
- * sysv_find_entry()
- *
- * finds an entry in the specified directory with the wanted name.
- * It does NOT read the inode of the
- * entry - you'll have to do that yourself if you want to.
- *
- * On Success folio_release_kmap() should be called on *foliop.
- *
- * sysv_find_entry() acts as a call to dir_get_folio() and must be treated
- * accordingly for nesting purposes.
- */
-struct sysv_dir_entry *sysv_find_entry(struct dentry *dentry, struct folio **foliop)
-{
- const char * name = dentry->d_name.name;
- int namelen = dentry->d_name.len;
- struct inode * dir = d_inode(dentry->d_parent);
- unsigned long start, n;
- unsigned long npages = dir_pages(dir);
- struct sysv_dir_entry *de;
-
- start = SYSV_I(dir)->i_dir_start_lookup;
- if (start >= npages)
- start = 0;
- n = start;
-
- do {
- char *kaddr = dir_get_folio(dir, n, foliop);
-
- if (!IS_ERR(kaddr)) {
- de = (struct sysv_dir_entry *)kaddr;
- kaddr += folio_size(*foliop) - SYSV_DIRSIZE;
- for ( ; (char *) de <= kaddr ; de++) {
- if (!de->inode)
- continue;
- if (namecompare(namelen, SYSV_NAMELEN,
- name, de->name))
- goto found;
- }
- folio_release_kmap(*foliop, kaddr);
- }
-
- if (++n >= npages)
- n = 0;
- } while (n != start);
-
- return NULL;
-
-found:
- SYSV_I(dir)->i_dir_start_lookup = n;
- return de;
-}
-
-int sysv_add_link(struct dentry *dentry, struct inode *inode)
-{
- struct inode *dir = d_inode(dentry->d_parent);
- const char * name = dentry->d_name.name;
- int namelen = dentry->d_name.len;
- struct folio *folio = NULL;
- struct sysv_dir_entry * de;
- unsigned long npages = dir_pages(dir);
- unsigned long n;
- char *kaddr;
- loff_t pos;
- int err;
-
- /* We take care of directory expansion in the same loop */
- for (n = 0; n <= npages; n++) {
- kaddr = dir_get_folio(dir, n, &folio);
- if (IS_ERR(kaddr))
- return PTR_ERR(kaddr);
- de = (struct sysv_dir_entry *)kaddr;
- kaddr += PAGE_SIZE - SYSV_DIRSIZE;
- while ((char *)de <= kaddr) {
- if (!de->inode)
- goto got_it;
- err = -EEXIST;
- if (namecompare(namelen, SYSV_NAMELEN, name, de->name))
- goto out_folio;
- de++;
- }
- folio_release_kmap(folio, kaddr);
- }
- BUG();
- return -EINVAL;
-
-got_it:
- pos = folio_pos(folio) + offset_in_folio(folio, de);
- folio_lock(folio);
- err = sysv_prepare_chunk(folio, pos, SYSV_DIRSIZE);
- if (err)
- goto out_unlock;
- memcpy (de->name, name, namelen);
- memset (de->name + namelen, 0, SYSV_DIRSIZE - namelen - 2);
- de->inode = cpu_to_fs16(SYSV_SB(inode->i_sb), inode->i_ino);
- dir_commit_chunk(folio, pos, SYSV_DIRSIZE);
- inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
- mark_inode_dirty(dir);
- err = sysv_handle_dirsync(dir);
-out_folio:
- folio_release_kmap(folio, kaddr);
- return err;
-out_unlock:
- folio_unlock(folio);
- goto out_folio;
-}
-
-int sysv_delete_entry(struct sysv_dir_entry *de, struct folio *folio)
-{
- struct inode *inode = folio->mapping->host;
- loff_t pos = folio_pos(folio) + offset_in_folio(folio, de);
- int err;
-
- folio_lock(folio);
- err = sysv_prepare_chunk(folio, pos, SYSV_DIRSIZE);
- if (err) {
- folio_unlock(folio);
- return err;
- }
- de->inode = 0;
- dir_commit_chunk(folio, pos, SYSV_DIRSIZE);
- inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
- mark_inode_dirty(inode);
- return sysv_handle_dirsync(inode);
-}
-
-int sysv_make_empty(struct inode *inode, struct inode *dir)
-{
- struct folio *folio = filemap_grab_folio(inode->i_mapping, 0);
- struct sysv_dir_entry * de;
- char *kaddr;
- int err;
-
- if (IS_ERR(folio))
- return PTR_ERR(folio);
- err = sysv_prepare_chunk(folio, 0, 2 * SYSV_DIRSIZE);
- if (err) {
- folio_unlock(folio);
- goto fail;
- }
- kaddr = kmap_local_folio(folio, 0);
- memset(kaddr, 0, folio_size(folio));
-
- de = (struct sysv_dir_entry *)kaddr;
- de->inode = cpu_to_fs16(SYSV_SB(inode->i_sb), inode->i_ino);
- strcpy(de->name,".");
- de++;
- de->inode = cpu_to_fs16(SYSV_SB(inode->i_sb), dir->i_ino);
- strcpy(de->name,"..");
-
- kunmap_local(kaddr);
- dir_commit_chunk(folio, 0, 2 * SYSV_DIRSIZE);
- err = sysv_handle_dirsync(inode);
-fail:
- folio_put(folio);
- return err;
-}
-
-/*
- * routine to check that the specified directory is empty (for rmdir)
- */
-int sysv_empty_dir(struct inode * inode)
-{
- struct super_block *sb = inode->i_sb;
- struct folio *folio = NULL;
- unsigned long i, npages = dir_pages(inode);
- char *kaddr;
-
- for (i = 0; i < npages; i++) {
- struct sysv_dir_entry *de;
-
- kaddr = dir_get_folio(inode, i, &folio);
- if (IS_ERR(kaddr))
- continue;
-
- de = (struct sysv_dir_entry *)kaddr;
- kaddr += folio_size(folio) - SYSV_DIRSIZE;
-
- for ( ;(char *)de <= kaddr; de++) {
- if (!de->inode)
- continue;
- /* check for . and .. */
- if (de->name[0] != '.')
- goto not_empty;
- if (!de->name[1]) {
- if (de->inode == cpu_to_fs16(SYSV_SB(sb),
- inode->i_ino))
- continue;
- goto not_empty;
- }
- if (de->name[1] != '.' || de->name[2])
- goto not_empty;
- }
- folio_release_kmap(folio, kaddr);
- }
- return 1;
-
-not_empty:
- folio_release_kmap(folio, kaddr);
- return 0;
-}
-
-/* Releases the page */
-int sysv_set_link(struct sysv_dir_entry *de, struct folio *folio,
- struct inode *inode)
-{
- struct inode *dir = folio->mapping->host;
- loff_t pos = folio_pos(folio) + offset_in_folio(folio, de);
- int err;
-
- folio_lock(folio);
- err = sysv_prepare_chunk(folio, pos, SYSV_DIRSIZE);
- if (err) {
- folio_unlock(folio);
- return err;
- }
- de->inode = cpu_to_fs16(SYSV_SB(inode->i_sb), inode->i_ino);
- dir_commit_chunk(folio, pos, SYSV_DIRSIZE);
- inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
- mark_inode_dirty(dir);
- return sysv_handle_dirsync(inode);
-}
-
-/*
- * Calls to dir_get_folio()/folio_release_kmap() must be nested according to the
- * rules documented in mm/highmem.rst.
- *
- * sysv_dotdot() acts as a call to dir_get_folio() and must be treated
- * accordingly for nesting purposes.
- */
-struct sysv_dir_entry *sysv_dotdot(struct inode *dir, struct folio **foliop)
-{
- struct sysv_dir_entry *de = dir_get_folio(dir, 0, foliop);
-
- if (IS_ERR(de))
- return NULL;
- /* ".." is the second directory entry */
- return de + 1;
-}
-
-ino_t sysv_inode_by_name(struct dentry *dentry)
-{
- struct folio *folio;
- struct sysv_dir_entry *de = sysv_find_entry (dentry, &folio);
- ino_t res = 0;
-
- if (de) {
- res = fs16_to_cpu(SYSV_SB(dentry->d_sb), de->inode);
- folio_release_kmap(folio, de);
- }
- return res;
-}
diff --git a/fs/sysv/file.c b/fs/sysv/file.c
deleted file mode 100644
index c645f60bdb7f..000000000000
--- a/fs/sysv/file.c
+++ /dev/null
@@ -1,59 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * linux/fs/sysv/file.c
- *
- * minix/file.c
- * Copyright (C) 1991, 1992 Linus Torvalds
- *
- * coh/file.c
- * Copyright (C) 1993 Pascal Haible, Bruno Haible
- *
- * sysv/file.c
- * Copyright (C) 1993 Bruno Haible
- *
- * SystemV/Coherent regular file handling primitives
- */
-
-#include "sysv.h"
-
-/*
- * We have mostly NULLs here: the current defaults are OK for
- * the coh filesystem.
- */
-const struct file_operations sysv_file_operations = {
- .llseek = generic_file_llseek,
- .read_iter = generic_file_read_iter,
- .write_iter = generic_file_write_iter,
- .mmap = generic_file_mmap,
- .fsync = generic_file_fsync,
- .splice_read = filemap_splice_read,
-};
-
-static int sysv_setattr(struct mnt_idmap *idmap,
- struct dentry *dentry, struct iattr *attr)
-{
- struct inode *inode = d_inode(dentry);
- int error;
-
- error = setattr_prepare(&nop_mnt_idmap, dentry, attr);
- if (error)
- return error;
-
- if ((attr->ia_valid & ATTR_SIZE) &&
- attr->ia_size != i_size_read(inode)) {
- error = inode_newsize_ok(inode, attr->ia_size);
- if (error)
- return error;
- truncate_setsize(inode, attr->ia_size);
- sysv_truncate(inode);
- }
-
- setattr_copy(&nop_mnt_idmap, inode, attr);
- mark_inode_dirty(inode);
- return 0;
-}
-
-const struct inode_operations sysv_file_inode_operations = {
- .setattr = sysv_setattr,
- .getattr = sysv_getattr,
-};
diff --git a/fs/sysv/ialloc.c b/fs/sysv/ialloc.c
deleted file mode 100644
index 269df6d49815..000000000000
--- a/fs/sysv/ialloc.c
+++ /dev/null
@@ -1,235 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * linux/fs/sysv/ialloc.c
- *
- * minix/bitmap.c
- * Copyright (C) 1991, 1992 Linus Torvalds
- *
- * ext/freelists.c
- * Copyright (C) 1992 Remy Card (card@masi.ibp.fr)
- *
- * xenix/alloc.c
- * Copyright (C) 1992 Doug Evans
- *
- * coh/alloc.c
- * Copyright (C) 1993 Pascal Haible, Bruno Haible
- *
- * sysv/ialloc.c
- * Copyright (C) 1993 Bruno Haible
- *
- * This file contains code for allocating/freeing inodes.
- */
-
-#include <linux/kernel.h>
-#include <linux/stddef.h>
-#include <linux/sched.h>
-#include <linux/stat.h>
-#include <linux/string.h>
-#include <linux/buffer_head.h>
-#include <linux/writeback.h>
-#include "sysv.h"
-
-/* We don't trust the value of
- sb->sv_sbd2->s_tinode = *sb->sv_sb_total_free_inodes
- but we nevertheless keep it up to date. */
-
-/* An inode on disk is considered free if both i_mode == 0 and i_nlink == 0. */
-
-/* return &sb->sv_sb_fic_inodes[i] = &sbd->s_inode[i]; */
-static inline sysv_ino_t *
-sv_sb_fic_inode(struct super_block * sb, unsigned int i)
-{
- struct sysv_sb_info *sbi = SYSV_SB(sb);
-
- if (sbi->s_bh1 == sbi->s_bh2)
- return &sbi->s_sb_fic_inodes[i];
- else {
- /* 512 byte Xenix FS */
- unsigned int offset = offsetof(struct xenix_super_block, s_inode[i]);
- if (offset < 512)
- return (sysv_ino_t*)(sbi->s_sbd1 + offset);
- else
- return (sysv_ino_t*)(sbi->s_sbd2 + offset);
- }
-}
-
-struct sysv_inode *
-sysv_raw_inode(struct super_block *sb, unsigned ino, struct buffer_head **bh)
-{
- struct sysv_sb_info *sbi = SYSV_SB(sb);
- struct sysv_inode *res;
- int block = sbi->s_firstinodezone + sbi->s_block_base;
-
- block += (ino-1) >> sbi->s_inodes_per_block_bits;
- *bh = sb_bread(sb, block);
- if (!*bh)
- return NULL;
- res = (struct sysv_inode *)(*bh)->b_data;
- return res + ((ino-1) & sbi->s_inodes_per_block_1);
-}
-
-static int refill_free_cache(struct super_block *sb)
-{
- struct sysv_sb_info *sbi = SYSV_SB(sb);
- struct buffer_head * bh;
- struct sysv_inode * raw_inode;
- int i = 0, ino;
-
- ino = SYSV_ROOT_INO+1;
- raw_inode = sysv_raw_inode(sb, ino, &bh);
- if (!raw_inode)
- goto out;
- while (ino <= sbi->s_ninodes) {
- if (raw_inode->i_mode == 0 && raw_inode->i_nlink == 0) {
- *sv_sb_fic_inode(sb,i++) = cpu_to_fs16(SYSV_SB(sb), ino);
- if (i == sbi->s_fic_size)
- break;
- }
- if ((ino++ & sbi->s_inodes_per_block_1) == 0) {
- brelse(bh);
- raw_inode = sysv_raw_inode(sb, ino, &bh);
- if (!raw_inode)
- goto out;
- } else
- raw_inode++;
- }
- brelse(bh);
-out:
- return i;
-}
-
-void sysv_free_inode(struct inode * inode)
-{
- struct super_block *sb = inode->i_sb;
- struct sysv_sb_info *sbi = SYSV_SB(sb);
- unsigned int ino;
- struct buffer_head * bh;
- struct sysv_inode * raw_inode;
- unsigned count;
-
- sb = inode->i_sb;
- ino = inode->i_ino;
- if (ino <= SYSV_ROOT_INO || ino > sbi->s_ninodes) {
- printk("sysv_free_inode: inode 0,1,2 or nonexistent inode\n");
- return;
- }
- raw_inode = sysv_raw_inode(sb, ino, &bh);
- if (!raw_inode) {
- printk("sysv_free_inode: unable to read inode block on device "
- "%s\n", inode->i_sb->s_id);
- return;
- }
- mutex_lock(&sbi->s_lock);
- count = fs16_to_cpu(sbi, *sbi->s_sb_fic_count);
- if (count < sbi->s_fic_size) {
- *sv_sb_fic_inode(sb,count++) = cpu_to_fs16(sbi, ino);
- *sbi->s_sb_fic_count = cpu_to_fs16(sbi, count);
- }
- fs16_add(sbi, sbi->s_sb_total_free_inodes, 1);
- dirty_sb(sb);
- memset(raw_inode, 0, sizeof(struct sysv_inode));
- mark_buffer_dirty(bh);
- mutex_unlock(&sbi->s_lock);
- brelse(bh);
-}
-
-struct inode * sysv_new_inode(const struct inode * dir, umode_t mode)
-{
- struct super_block *sb = dir->i_sb;
- struct sysv_sb_info *sbi = SYSV_SB(sb);
- struct inode *inode;
- sysv_ino_t ino;
- unsigned count;
- struct writeback_control wbc = {
- .sync_mode = WB_SYNC_NONE
- };
-
- inode = new_inode(sb);
- if (!inode)
- return ERR_PTR(-ENOMEM);
-
- mutex_lock(&sbi->s_lock);
- count = fs16_to_cpu(sbi, *sbi->s_sb_fic_count);
- if (count == 0 || (*sv_sb_fic_inode(sb,count-1) == 0)) {
- count = refill_free_cache(sb);
- if (count == 0) {
- iput(inode);
- mutex_unlock(&sbi->s_lock);
- return ERR_PTR(-ENOSPC);
- }
- }
- /* Now count > 0. */
- ino = *sv_sb_fic_inode(sb,--count);
- *sbi->s_sb_fic_count = cpu_to_fs16(sbi, count);
- fs16_add(sbi, sbi->s_sb_total_free_inodes, -1);
- dirty_sb(sb);
- inode_init_owner(&nop_mnt_idmap, inode, dir, mode);
- inode->i_ino = fs16_to_cpu(sbi, ino);
- simple_inode_init_ts(inode);
- inode->i_blocks = 0;
- memset(SYSV_I(inode)->i_data, 0, sizeof(SYSV_I(inode)->i_data));
- SYSV_I(inode)->i_dir_start_lookup = 0;
- insert_inode_hash(inode);
- mark_inode_dirty(inode);
-
- sysv_write_inode(inode, &wbc); /* ensure inode not allocated again */
- mark_inode_dirty(inode); /* cleared by sysv_write_inode() */
- /* That's it. */
- mutex_unlock(&sbi->s_lock);
- return inode;
-}
-
-unsigned long sysv_count_free_inodes(struct super_block * sb)
-{
- struct sysv_sb_info *sbi = SYSV_SB(sb);
- struct buffer_head * bh;
- struct sysv_inode * raw_inode;
- int ino, count, sb_count;
-
- mutex_lock(&sbi->s_lock);
-
- sb_count = fs16_to_cpu(sbi, *sbi->s_sb_total_free_inodes);
-
- if (0)
- goto trust_sb;
-
- /* this causes a lot of disk traffic ... */
- count = 0;
- ino = SYSV_ROOT_INO+1;
- raw_inode = sysv_raw_inode(sb, ino, &bh);
- if (!raw_inode)
- goto Eio;
- while (ino <= sbi->s_ninodes) {
- if (raw_inode->i_mode == 0 && raw_inode->i_nlink == 0)
- count++;
- if ((ino++ & sbi->s_inodes_per_block_1) == 0) {
- brelse(bh);
- raw_inode = sysv_raw_inode(sb, ino, &bh);
- if (!raw_inode)
- goto Eio;
- } else
- raw_inode++;
- }
- brelse(bh);
- if (count != sb_count)
- goto Einval;
-out:
- mutex_unlock(&sbi->s_lock);
- return count;
-
-Einval:
- printk("sysv_count_free_inodes: "
- "free inode count was %d, correcting to %d\n",
- sb_count, count);
- if (!sb_rdonly(sb)) {
- *sbi->s_sb_total_free_inodes = cpu_to_fs16(SYSV_SB(sb), count);
- dirty_sb(sb);
- }
- goto out;
-
-Eio:
- printk("sysv_count_free_inodes: unable to read inode table\n");
-trust_sb:
- count = sb_count;
- goto out;
-}
diff --git a/fs/sysv/inode.c b/fs/sysv/inode.c
deleted file mode 100644
index 76bc2d5e75a9..000000000000
--- a/fs/sysv/inode.c
+++ /dev/null
@@ -1,354 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * linux/fs/sysv/inode.c
- *
- * minix/inode.c
- * Copyright (C) 1991, 1992 Linus Torvalds
- *
- * xenix/inode.c
- * Copyright (C) 1992 Doug Evans
- *
- * coh/inode.c
- * Copyright (C) 1993 Pascal Haible, Bruno Haible
- *
- * sysv/inode.c
- * Copyright (C) 1993 Paul B. Monday
- *
- * sysv/inode.c
- * Copyright (C) 1993 Bruno Haible
- * Copyright (C) 1997, 1998 Krzysztof G. Baranowski
- *
- * This file contains code for allocating/freeing inodes and for read/writing
- * the superblock.
- */
-
-#include <linux/highuid.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/buffer_head.h>
-#include <linux/vfs.h>
-#include <linux/writeback.h>
-#include <linux/namei.h>
-#include <asm/byteorder.h>
-#include "sysv.h"
-
-static int sysv_sync_fs(struct super_block *sb, int wait)
-{
- struct sysv_sb_info *sbi = SYSV_SB(sb);
- u32 time = (u32)ktime_get_real_seconds(), old_time;
-
- mutex_lock(&sbi->s_lock);
-
- /*
- * If we are going to write out the super block,
- * then attach current time stamp.
- * But if the filesystem was marked clean, keep it clean.
- */
- old_time = fs32_to_cpu(sbi, *sbi->s_sb_time);
- if (sbi->s_type == FSTYPE_SYSV4) {
- if (*sbi->s_sb_state == cpu_to_fs32(sbi, 0x7c269d38u - old_time))
- *sbi->s_sb_state = cpu_to_fs32(sbi, 0x7c269d38u - time);
- *sbi->s_sb_time = cpu_to_fs32(sbi, time);
- mark_buffer_dirty(sbi->s_bh2);
- }
-
- mutex_unlock(&sbi->s_lock);
-
- return 0;
-}
-
-static int sysv_remount(struct super_block *sb, int *flags, char *data)
-{
- struct sysv_sb_info *sbi = SYSV_SB(sb);
-
- sync_filesystem(sb);
- if (sbi->s_forced_ro)
- *flags |= SB_RDONLY;
- return 0;
-}
-
-static void sysv_put_super(struct super_block *sb)
-{
- struct sysv_sb_info *sbi = SYSV_SB(sb);
-
- if (!sb_rdonly(sb)) {
- /* XXX ext2 also updates the state here */
- mark_buffer_dirty(sbi->s_bh1);
- if (sbi->s_bh1 != sbi->s_bh2)
- mark_buffer_dirty(sbi->s_bh2);
- }
-
- brelse(sbi->s_bh1);
- if (sbi->s_bh1 != sbi->s_bh2)
- brelse(sbi->s_bh2);
-
- kfree(sbi);
-}
-
-static int sysv_statfs(struct dentry *dentry, struct kstatfs *buf)
-{
- struct super_block *sb = dentry->d_sb;
- struct sysv_sb_info *sbi = SYSV_SB(sb);
- u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
-
- buf->f_type = sb->s_magic;
- buf->f_bsize = sb->s_blocksize;
- buf->f_blocks = sbi->s_ndatazones;
- buf->f_bavail = buf->f_bfree = sysv_count_free_blocks(sb);
- buf->f_files = sbi->s_ninodes;
- buf->f_ffree = sysv_count_free_inodes(sb);
- buf->f_namelen = SYSV_NAMELEN;
- buf->f_fsid = u64_to_fsid(id);
- return 0;
-}
-
-/*
- * NXI <-> N0XI for PDP, XIN <-> XIN0 for le32, NIX <-> 0NIX for be32
- */
-static inline void read3byte(struct sysv_sb_info *sbi,
- unsigned char * from, unsigned char * to)
-{
- if (sbi->s_bytesex == BYTESEX_PDP) {
- to[0] = from[0];
- to[1] = 0;
- to[2] = from[1];
- to[3] = from[2];
- } else if (sbi->s_bytesex == BYTESEX_LE) {
- to[0] = from[0];
- to[1] = from[1];
- to[2] = from[2];
- to[3] = 0;
- } else {
- to[0] = 0;
- to[1] = from[0];
- to[2] = from[1];
- to[3] = from[2];
- }
-}
-
-static inline void write3byte(struct sysv_sb_info *sbi,
- unsigned char * from, unsigned char * to)
-{
- if (sbi->s_bytesex == BYTESEX_PDP) {
- to[0] = from[0];
- to[1] = from[2];
- to[2] = from[3];
- } else if (sbi->s_bytesex == BYTESEX_LE) {
- to[0] = from[0];
- to[1] = from[1];
- to[2] = from[2];
- } else {
- to[0] = from[1];
- to[1] = from[2];
- to[2] = from[3];
- }
-}
-
-static const struct inode_operations sysv_symlink_inode_operations = {
- .get_link = page_get_link,
- .getattr = sysv_getattr,
-};
-
-void sysv_set_inode(struct inode *inode, dev_t rdev)
-{
- if (S_ISREG(inode->i_mode)) {
- inode->i_op = &sysv_file_inode_operations;
- inode->i_fop = &sysv_file_operations;
- inode->i_mapping->a_ops = &sysv_aops;
- } else if (S_ISDIR(inode->i_mode)) {
- inode->i_op = &sysv_dir_inode_operations;
- inode->i_fop = &sysv_dir_operations;
- inode->i_mapping->a_ops = &sysv_aops;
- } else if (S_ISLNK(inode->i_mode)) {
- inode->i_op = &sysv_symlink_inode_operations;
- inode_nohighmem(inode);
- inode->i_mapping->a_ops = &sysv_aops;
- } else
- init_special_inode(inode, inode->i_mode, rdev);
-}
-
-struct inode *sysv_iget(struct super_block *sb, unsigned int ino)
-{
- struct sysv_sb_info * sbi = SYSV_SB(sb);
- struct buffer_head * bh;
- struct sysv_inode * raw_inode;
- struct sysv_inode_info * si;
- struct inode *inode;
- unsigned int block;
-
- if (!ino || ino > sbi->s_ninodes) {
- printk("Bad inode number on dev %s: %d is out of range\n",
- sb->s_id, ino);
- return ERR_PTR(-EIO);
- }
-
- inode = iget_locked(sb, ino);
- if (!inode)
- return ERR_PTR(-ENOMEM);
- if (!(inode->i_state & I_NEW))
- return inode;
-
- raw_inode = sysv_raw_inode(sb, ino, &bh);
- if (!raw_inode) {
- printk("Major problem: unable to read inode from dev %s\n",
- inode->i_sb->s_id);
- goto bad_inode;
- }
- /* SystemV FS: kludge permissions if ino==SYSV_ROOT_INO ?? */
- inode->i_mode = fs16_to_cpu(sbi, raw_inode->i_mode);
- i_uid_write(inode, (uid_t)fs16_to_cpu(sbi, raw_inode->i_uid));
- i_gid_write(inode, (gid_t)fs16_to_cpu(sbi, raw_inode->i_gid));
- set_nlink(inode, fs16_to_cpu(sbi, raw_inode->i_nlink));
- inode->i_size = fs32_to_cpu(sbi, raw_inode->i_size);
- inode_set_atime(inode, fs32_to_cpu(sbi, raw_inode->i_atime), 0);
- inode_set_mtime(inode, fs32_to_cpu(sbi, raw_inode->i_mtime), 0);
- inode_set_ctime(inode, fs32_to_cpu(sbi, raw_inode->i_ctime), 0);
- inode->i_blocks = 0;
-
- si = SYSV_I(inode);
- for (block = 0; block < 10+1+1+1; block++)
- read3byte(sbi, &raw_inode->i_data[3*block],
- (u8 *)&si->i_data[block]);
- brelse(bh);
- si->i_dir_start_lookup = 0;
- if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
- sysv_set_inode(inode,
- old_decode_dev(fs32_to_cpu(sbi, si->i_data[0])));
- else
- sysv_set_inode(inode, 0);
- unlock_new_inode(inode);
- return inode;
-
-bad_inode:
- iget_failed(inode);
- return ERR_PTR(-EIO);
-}
-
-static int __sysv_write_inode(struct inode *inode, int wait)
-{
- struct super_block * sb = inode->i_sb;
- struct sysv_sb_info * sbi = SYSV_SB(sb);
- struct buffer_head * bh;
- struct sysv_inode * raw_inode;
- struct sysv_inode_info * si;
- unsigned int ino, block;
- int err = 0;
-
- ino = inode->i_ino;
- if (!ino || ino > sbi->s_ninodes) {
- printk("Bad inode number on dev %s: %d is out of range\n",
- inode->i_sb->s_id, ino);
- return -EIO;
- }
- raw_inode = sysv_raw_inode(sb, ino, &bh);
- if (!raw_inode) {
- printk("unable to read i-node block\n");
- return -EIO;
- }
-
- raw_inode->i_mode = cpu_to_fs16(sbi, inode->i_mode);
- raw_inode->i_uid = cpu_to_fs16(sbi, fs_high2lowuid(i_uid_read(inode)));
- raw_inode->i_gid = cpu_to_fs16(sbi, fs_high2lowgid(i_gid_read(inode)));
- raw_inode->i_nlink = cpu_to_fs16(sbi, inode->i_nlink);
- raw_inode->i_size = cpu_to_fs32(sbi, inode->i_size);
- raw_inode->i_atime = cpu_to_fs32(sbi, inode_get_atime_sec(inode));
- raw_inode->i_mtime = cpu_to_fs32(sbi, inode_get_mtime_sec(inode));
- raw_inode->i_ctime = cpu_to_fs32(sbi, inode_get_ctime_sec(inode));
-
- si = SYSV_I(inode);
- if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
- si->i_data[0] = cpu_to_fs32(sbi, old_encode_dev(inode->i_rdev));
- for (block = 0; block < 10+1+1+1; block++)
- write3byte(sbi, (u8 *)&si->i_data[block],
- &raw_inode->i_data[3*block]);
- mark_buffer_dirty(bh);
- if (wait) {
- sync_dirty_buffer(bh);
- if (buffer_req(bh) && !buffer_uptodate(bh)) {
- printk ("IO error syncing sysv inode [%s:%08x]\n",
- sb->s_id, ino);
- err = -EIO;
- }
- }
- brelse(bh);
- return err;
-}
-
-int sysv_write_inode(struct inode *inode, struct writeback_control *wbc)
-{
- return __sysv_write_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
-}
-
-int sysv_sync_inode(struct inode *inode)
-{
- return __sysv_write_inode(inode, 1);
-}
-
-static void sysv_evict_inode(struct inode *inode)
-{
- truncate_inode_pages_final(&inode->i_data);
- if (!inode->i_nlink) {
- inode->i_size = 0;
- sysv_truncate(inode);
- }
- invalidate_inode_buffers(inode);
- clear_inode(inode);
- if (!inode->i_nlink)
- sysv_free_inode(inode);
-}
-
-static struct kmem_cache *sysv_inode_cachep;
-
-static struct inode *sysv_alloc_inode(struct super_block *sb)
-{
- struct sysv_inode_info *si;
-
- si = alloc_inode_sb(sb, sysv_inode_cachep, GFP_KERNEL);
- if (!si)
- return NULL;
- return &si->vfs_inode;
-}
-
-static void sysv_free_in_core_inode(struct inode *inode)
-{
- kmem_cache_free(sysv_inode_cachep, SYSV_I(inode));
-}
-
-static void init_once(void *p)
-{
- struct sysv_inode_info *si = (struct sysv_inode_info *)p;
-
- inode_init_once(&si->vfs_inode);
-}
-
-const struct super_operations sysv_sops = {
- .alloc_inode = sysv_alloc_inode,
- .free_inode = sysv_free_in_core_inode,
- .write_inode = sysv_write_inode,
- .evict_inode = sysv_evict_inode,
- .put_super = sysv_put_super,
- .sync_fs = sysv_sync_fs,
- .remount_fs = sysv_remount,
- .statfs = sysv_statfs,
-};
-
-int __init sysv_init_icache(void)
-{
- sysv_inode_cachep = kmem_cache_create("sysv_inode_cache",
- sizeof(struct sysv_inode_info), 0,
- SLAB_RECLAIM_ACCOUNT|SLAB_ACCOUNT,
- init_once);
- if (!sysv_inode_cachep)
- return -ENOMEM;
- return 0;
-}
-
-void sysv_destroy_icache(void)
-{
- /*
- * Make sure all delayed rcu free inodes are flushed before we
- * destroy cache.
- */
- rcu_barrier();
- kmem_cache_destroy(sysv_inode_cachep);
-}
diff --git a/fs/sysv/itree.c b/fs/sysv/itree.c
deleted file mode 100644
index 451e95f474fa..000000000000
--- a/fs/sysv/itree.c
+++ /dev/null
@@ -1,511 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * linux/fs/sysv/itree.c
- *
- * Handling of indirect blocks' trees.
- * AV, Sep--Dec 2000
- */
-
-#include <linux/buffer_head.h>
-#include <linux/mount.h>
-#include <linux/mpage.h>
-#include <linux/string.h>
-#include "sysv.h"
-
-enum {DIRECT = 10, DEPTH = 4}; /* Have triple indirect */
-
-static inline void dirty_indirect(struct buffer_head *bh, struct inode *inode)
-{
- mark_buffer_dirty_inode(bh, inode);
- if (IS_SYNC(inode))
- sync_dirty_buffer(bh);
-}
-
-static int block_to_path(struct inode *inode, long block, int offsets[DEPTH])
-{
- struct super_block *sb = inode->i_sb;
- struct sysv_sb_info *sbi = SYSV_SB(sb);
- int ptrs_bits = sbi->s_ind_per_block_bits;
- unsigned long indirect_blocks = sbi->s_ind_per_block,
- double_blocks = sbi->s_ind_per_block_2;
- int n = 0;
-
- if (block < 0) {
- printk("sysv_block_map: block < 0\n");
- } else if (block < DIRECT) {
- offsets[n++] = block;
- } else if ( (block -= DIRECT) < indirect_blocks) {
- offsets[n++] = DIRECT;
- offsets[n++] = block;
- } else if ((block -= indirect_blocks) < double_blocks) {
- offsets[n++] = DIRECT+1;
- offsets[n++] = block >> ptrs_bits;
- offsets[n++] = block & (indirect_blocks - 1);
- } else if (((block -= double_blocks) >> (ptrs_bits * 2)) < indirect_blocks) {
- offsets[n++] = DIRECT+2;
- offsets[n++] = block >> (ptrs_bits * 2);
- offsets[n++] = (block >> ptrs_bits) & (indirect_blocks - 1);
- offsets[n++] = block & (indirect_blocks - 1);
- } else {
- /* nothing */;
- }
- return n;
-}
-
-static inline int block_to_cpu(struct sysv_sb_info *sbi, sysv_zone_t nr)
-{
- return sbi->s_block_base + fs32_to_cpu(sbi, nr);
-}
-
-typedef struct {
- sysv_zone_t *p;
- sysv_zone_t key;
- struct buffer_head *bh;
-} Indirect;
-
-static DEFINE_RWLOCK(pointers_lock);
-
-static inline void add_chain(Indirect *p, struct buffer_head *bh, sysv_zone_t *v)
-{
- p->key = *(p->p = v);
- p->bh = bh;
-}
-
-static inline int verify_chain(Indirect *from, Indirect *to)
-{
- while (from <= to && from->key == *from->p)
- from++;
- return (from > to);
-}
-
-static inline sysv_zone_t *block_end(struct buffer_head *bh)
-{
- return (sysv_zone_t*)((char*)bh->b_data + bh->b_size);
-}
-
-static Indirect *get_branch(struct inode *inode,
- int depth,
- int offsets[],
- Indirect chain[],
- int *err)
-{
- struct super_block *sb = inode->i_sb;
- Indirect *p = chain;
- struct buffer_head *bh;
-
- *err = 0;
- add_chain(chain, NULL, SYSV_I(inode)->i_data + *offsets);
- if (!p->key)
- goto no_block;
- while (--depth) {
- int block = block_to_cpu(SYSV_SB(sb), p->key);
- bh = sb_bread(sb, block);
- if (!bh)
- goto failure;
- read_lock(&pointers_lock);
- if (!verify_chain(chain, p))
- goto changed;
- add_chain(++p, bh, (sysv_zone_t*)bh->b_data + *++offsets);
- read_unlock(&pointers_lock);
- if (!p->key)
- goto no_block;
- }
- return NULL;
-
-changed:
- read_unlock(&pointers_lock);
- brelse(bh);
- *err = -EAGAIN;
- goto no_block;
-failure:
- *err = -EIO;
-no_block:
- return p;
-}
-
-static int alloc_branch(struct inode *inode,
- int num,
- int *offsets,
- Indirect *branch)
-{
- int blocksize = inode->i_sb->s_blocksize;
- int n = 0;
- int i;
-
- branch[0].key = sysv_new_block(inode->i_sb);
- if (branch[0].key) for (n = 1; n < num; n++) {
- struct buffer_head *bh;
- int parent;
- /* Allocate the next block */
- branch[n].key = sysv_new_block(inode->i_sb);
- if (!branch[n].key)
- break;
- /*
- * Get buffer_head for parent block, zero it out and set
- * the pointer to new one, then send parent to disk.
- */
- parent = block_to_cpu(SYSV_SB(inode->i_sb), branch[n-1].key);
- bh = sb_getblk(inode->i_sb, parent);
- if (!bh) {
- sysv_free_block(inode->i_sb, branch[n].key);
- break;
- }
- lock_buffer(bh);
- memset(bh->b_data, 0, blocksize);
- branch[n].bh = bh;
- branch[n].p = (sysv_zone_t*) bh->b_data + offsets[n];
- *branch[n].p = branch[n].key;
- set_buffer_uptodate(bh);
- unlock_buffer(bh);
- dirty_indirect(bh, inode);
- }
- if (n == num)
- return 0;
-
- /* Allocation failed, free what we already allocated */
- for (i = 1; i < n; i++)
- bforget(branch[i].bh);
- for (i = 0; i < n; i++)
- sysv_free_block(inode->i_sb, branch[i].key);
- return -ENOSPC;
-}
-
-static inline int splice_branch(struct inode *inode,
- Indirect chain[],
- Indirect *where,
- int num)
-{
- int i;
-
- /* Verify that place we are splicing to is still there and vacant */
- write_lock(&pointers_lock);
- if (!verify_chain(chain, where-1) || *where->p)
- goto changed;
- *where->p = where->key;
- write_unlock(&pointers_lock);
-
- inode_set_ctime_current(inode);
-
- /* had we spliced it onto indirect block? */
- if (where->bh)
- dirty_indirect(where->bh, inode);
-
- if (IS_SYNC(inode))
- sysv_sync_inode(inode);
- else
- mark_inode_dirty(inode);
- return 0;
-
-changed:
- write_unlock(&pointers_lock);
- for (i = 1; i < num; i++)
- bforget(where[i].bh);
- for (i = 0; i < num; i++)
- sysv_free_block(inode->i_sb, where[i].key);
- return -EAGAIN;
-}
-
-static int get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create)
-{
- int err = -EIO;
- int offsets[DEPTH];
- Indirect chain[DEPTH];
- struct super_block *sb = inode->i_sb;
- Indirect *partial;
- int left;
- int depth = block_to_path(inode, iblock, offsets);
-
- if (depth == 0)
- goto out;
-
-reread:
- partial = get_branch(inode, depth, offsets, chain, &err);
-
- /* Simplest case - block found, no allocation needed */
- if (!partial) {
-got_it:
- map_bh(bh_result, sb, block_to_cpu(SYSV_SB(sb),
- chain[depth-1].key));
- /* Clean up and exit */
- partial = chain+depth-1; /* the whole chain */
- goto cleanup;
- }
-
- /* Next simple case - plain lookup or failed read of indirect block */
- if (!create || err == -EIO) {
-cleanup:
- while (partial > chain) {
- brelse(partial->bh);
- partial--;
- }
-out:
- return err;
- }
-
- /*
- * Indirect block might be removed by truncate while we were
- * reading it. Handling of that case (forget what we've got and
- * reread) is taken out of the main path.
- */
- if (err == -EAGAIN)
- goto changed;
-
- left = (chain + depth) - partial;
- err = alloc_branch(inode, left, offsets+(partial-chain), partial);
- if (err)
- goto cleanup;
-
- if (splice_branch(inode, chain, partial, left) < 0)
- goto changed;
-
- set_buffer_new(bh_result);
- goto got_it;
-
-changed:
- while (partial > chain) {
- brelse(partial->bh);
- partial--;
- }
- goto reread;
-}
-
-static inline int all_zeroes(sysv_zone_t *p, sysv_zone_t *q)
-{
- while (p < q)
- if (*p++)
- return 0;
- return 1;
-}
-
-static Indirect *find_shared(struct inode *inode,
- int depth,
- int offsets[],
- Indirect chain[],
- sysv_zone_t *top)
-{
- Indirect *partial, *p;
- int k, err;
-
- *top = 0;
- for (k = depth; k > 1 && !offsets[k-1]; k--)
- ;
- partial = get_branch(inode, k, offsets, chain, &err);
-
- write_lock(&pointers_lock);
- if (!partial)
- partial = chain + k-1;
- /*
- * If the branch acquired continuation since we've looked at it -
- * fine, it should all survive and (new) top doesn't belong to us.
- */
- if (!partial->key && *partial->p) {
- write_unlock(&pointers_lock);
- goto no_top;
- }
- for (p=partial; p>chain && all_zeroes((sysv_zone_t*)p->bh->b_data,p->p); p--)
- ;
- /*
- * OK, we've found the last block that must survive. The rest of our
- * branch should be detached before unlocking. However, if that rest
- * of branch is all ours and does not grow immediately from the inode
- * it's easier to cheat and just decrement partial->p.
- */
- if (p == chain + k - 1 && p > chain) {
- p->p--;
- } else {
- *top = *p->p;
- *p->p = 0;
- }
- write_unlock(&pointers_lock);
-
- while (partial > p) {
- brelse(partial->bh);
- partial--;
- }
-no_top:
- return partial;
-}
-
-static inline void free_data(struct inode *inode, sysv_zone_t *p, sysv_zone_t *q)
-{
- for ( ; p < q ; p++) {
- sysv_zone_t nr = *p;
- if (nr) {
- *p = 0;
- sysv_free_block(inode->i_sb, nr);
- mark_inode_dirty(inode);
- }
- }
-}
-
-static void free_branches(struct inode *inode, sysv_zone_t *p, sysv_zone_t *q, int depth)
-{
- struct buffer_head * bh;
- struct super_block *sb = inode->i_sb;
-
- if (depth--) {
- for ( ; p < q ; p++) {
- int block;
- sysv_zone_t nr = *p;
- if (!nr)
- continue;
- *p = 0;
- block = block_to_cpu(SYSV_SB(sb), nr);
- bh = sb_bread(sb, block);
- if (!bh)
- continue;
- free_branches(inode, (sysv_zone_t*)bh->b_data,
- block_end(bh), depth);
- bforget(bh);
- sysv_free_block(sb, nr);
- mark_inode_dirty(inode);
- }
- } else
- free_data(inode, p, q);
-}
-
-void sysv_truncate (struct inode * inode)
-{
- sysv_zone_t *i_data = SYSV_I(inode)->i_data;
- int offsets[DEPTH];
- Indirect chain[DEPTH];
- Indirect *partial;
- sysv_zone_t nr = 0;
- int n;
- long iblock;
- unsigned blocksize;
-
- if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
- S_ISLNK(inode->i_mode)))
- return;
-
- blocksize = inode->i_sb->s_blocksize;
- iblock = (inode->i_size + blocksize-1)
- >> inode->i_sb->s_blocksize_bits;
-
- block_truncate_page(inode->i_mapping, inode->i_size, get_block);
-
- n = block_to_path(inode, iblock, offsets);
- if (n == 0)
- return;
-
- if (n == 1) {
- free_data(inode, i_data+offsets[0], i_data + DIRECT);
- goto do_indirects;
- }
-
- partial = find_shared(inode, n, offsets, chain, &nr);
- /* Kill the top of shared branch (already detached) */
- if (nr) {
- if (partial == chain)
- mark_inode_dirty(inode);
- else
- dirty_indirect(partial->bh, inode);
- free_branches(inode, &nr, &nr+1, (chain+n-1) - partial);
- }
- /* Clear the ends of indirect blocks on the shared branch */
- while (partial > chain) {
- free_branches(inode, partial->p + 1, block_end(partial->bh),
- (chain+n-1) - partial);
- dirty_indirect(partial->bh, inode);
- brelse (partial->bh);
- partial--;
- }
-do_indirects:
- /* Kill the remaining (whole) subtrees (== subtrees deeper than...) */
- while (n < DEPTH) {
- nr = i_data[DIRECT + n - 1];
- if (nr) {
- i_data[DIRECT + n - 1] = 0;
- mark_inode_dirty(inode);
- free_branches(inode, &nr, &nr+1, n);
- }
- n++;
- }
- inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
- if (IS_SYNC(inode))
- sysv_sync_inode (inode);
- else
- mark_inode_dirty(inode);
-}
-
-static unsigned sysv_nblocks(struct super_block *s, loff_t size)
-{
- struct sysv_sb_info *sbi = SYSV_SB(s);
- int ptrs_bits = sbi->s_ind_per_block_bits;
- unsigned blocks, res, direct = DIRECT, i = DEPTH;
- blocks = (size + s->s_blocksize - 1) >> s->s_blocksize_bits;
- res = blocks;
- while (--i && blocks > direct) {
- blocks = ((blocks - direct - 1) >> ptrs_bits) + 1;
- res += blocks;
- direct = 1;
- }
- return res;
-}
-
-int sysv_getattr(struct mnt_idmap *idmap, const struct path *path,
- struct kstat *stat, u32 request_mask, unsigned int flags)
-{
- struct super_block *s = path->dentry->d_sb;
- generic_fillattr(&nop_mnt_idmap, request_mask, d_inode(path->dentry),
- stat);
- stat->blocks = (s->s_blocksize / 512) * sysv_nblocks(s, stat->size);
- stat->blksize = s->s_blocksize;
- return 0;
-}
-
-static int sysv_writepages(struct address_space *mapping,
- struct writeback_control *wbc)
-{
- return mpage_writepages(mapping, wbc, get_block);
-}
-
-static int sysv_read_folio(struct file *file, struct folio *folio)
-{
- return block_read_full_folio(folio, get_block);
-}
-
-int sysv_prepare_chunk(struct folio *folio, loff_t pos, unsigned len)
-{
- return __block_write_begin(folio, pos, len, get_block);
-}
-
-static void sysv_write_failed(struct address_space *mapping, loff_t to)
-{
- struct inode *inode = mapping->host;
-
- if (to > inode->i_size) {
- truncate_pagecache(inode, inode->i_size);
- sysv_truncate(inode);
- }
-}
-
-static int sysv_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len,
- struct folio **foliop, void **fsdata)
-{
- int ret;
-
- ret = block_write_begin(mapping, pos, len, foliop, get_block);
- if (unlikely(ret))
- sysv_write_failed(mapping, pos + len);
-
- return ret;
-}
-
-static sector_t sysv_bmap(struct address_space *mapping, sector_t block)
-{
- return generic_block_bmap(mapping,block,get_block);
-}
-
-const struct address_space_operations sysv_aops = {
- .dirty_folio = block_dirty_folio,
- .invalidate_folio = block_invalidate_folio,
- .read_folio = sysv_read_folio,
- .writepages = sysv_writepages,
- .write_begin = sysv_write_begin,
- .write_end = generic_write_end,
- .migrate_folio = buffer_migrate_folio,
- .bmap = sysv_bmap
-};
diff --git a/fs/sysv/namei.c b/fs/sysv/namei.c
deleted file mode 100644
index fb8bd8437872..000000000000
--- a/fs/sysv/namei.c
+++ /dev/null
@@ -1,280 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * linux/fs/sysv/namei.c
- *
- * minix/namei.c
- * Copyright (C) 1991, 1992 Linus Torvalds
- *
- * coh/namei.c
- * Copyright (C) 1993 Pascal Haible, Bruno Haible
- *
- * sysv/namei.c
- * Copyright (C) 1993 Bruno Haible
- * Copyright (C) 1997, 1998 Krzysztof G. Baranowski
- */
-
-#include <linux/pagemap.h>
-#include "sysv.h"
-
-static int add_nondir(struct dentry *dentry, struct inode *inode)
-{
- int err = sysv_add_link(dentry, inode);
- if (!err) {
- d_instantiate(dentry, inode);
- return 0;
- }
- inode_dec_link_count(inode);
- iput(inode);
- return err;
-}
-
-static struct dentry *sysv_lookup(struct inode * dir, struct dentry * dentry, unsigned int flags)
-{
- struct inode * inode = NULL;
- ino_t ino;
-
- if (dentry->d_name.len > SYSV_NAMELEN)
- return ERR_PTR(-ENAMETOOLONG);
- ino = sysv_inode_by_name(dentry);
- if (ino)
- inode = sysv_iget(dir->i_sb, ino);
- return d_splice_alias(inode, dentry);
-}
-
-static int sysv_mknod(struct mnt_idmap *idmap, struct inode *dir,
- struct dentry *dentry, umode_t mode, dev_t rdev)
-{
- struct inode * inode;
- int err;
-
- if (!old_valid_dev(rdev))
- return -EINVAL;
-
- inode = sysv_new_inode(dir, mode);
- err = PTR_ERR(inode);
-
- if (!IS_ERR(inode)) {
- sysv_set_inode(inode, rdev);
- mark_inode_dirty(inode);
- err = add_nondir(dentry, inode);
- }
- return err;
-}
-
-static int sysv_create(struct mnt_idmap *idmap, struct inode *dir,
- struct dentry *dentry, umode_t mode, bool excl)
-{
- return sysv_mknod(&nop_mnt_idmap, dir, dentry, mode, 0);
-}
-
-static int sysv_symlink(struct mnt_idmap *idmap, struct inode *dir,
- struct dentry *dentry, const char *symname)
-{
- int err = -ENAMETOOLONG;
- int l = strlen(symname)+1;
- struct inode * inode;
-
- if (l > dir->i_sb->s_blocksize)
- goto out;
-
- inode = sysv_new_inode(dir, S_IFLNK|0777);
- err = PTR_ERR(inode);
- if (IS_ERR(inode))
- goto out;
-
- sysv_set_inode(inode, 0);
- err = page_symlink(inode, symname, l);
- if (err)
- goto out_fail;
-
- mark_inode_dirty(inode);
- err = add_nondir(dentry, inode);
-out:
- return err;
-
-out_fail:
- inode_dec_link_count(inode);
- iput(inode);
- goto out;
-}
-
-static int sysv_link(struct dentry * old_dentry, struct inode * dir,
- struct dentry * dentry)
-{
- struct inode *inode = d_inode(old_dentry);
-
- inode_set_ctime_current(inode);
- inode_inc_link_count(inode);
- ihold(inode);
-
- return add_nondir(dentry, inode);
-}
-
-static int sysv_mkdir(struct mnt_idmap *idmap, struct inode *dir,
- struct dentry *dentry, umode_t mode)
-{
- struct inode * inode;
- int err;
-
- inode_inc_link_count(dir);
-
- inode = sysv_new_inode(dir, S_IFDIR|mode);
- err = PTR_ERR(inode);
- if (IS_ERR(inode))
- goto out_dir;
-
- sysv_set_inode(inode, 0);
-
- inode_inc_link_count(inode);
-
- err = sysv_make_empty(inode, dir);
- if (err)
- goto out_fail;
-
- err = sysv_add_link(dentry, inode);
- if (err)
- goto out_fail;
-
- d_instantiate(dentry, inode);
-out:
- return err;
-
-out_fail:
- inode_dec_link_count(inode);
- inode_dec_link_count(inode);
- iput(inode);
-out_dir:
- inode_dec_link_count(dir);
- goto out;
-}
-
-static int sysv_unlink(struct inode * dir, struct dentry * dentry)
-{
- struct inode * inode = d_inode(dentry);
- struct folio *folio;
- struct sysv_dir_entry * de;
- int err;
-
- de = sysv_find_entry(dentry, &folio);
- if (!de)
- return -ENOENT;
-
- err = sysv_delete_entry(de, folio);
- if (!err) {
- inode_set_ctime_to_ts(inode, inode_get_ctime(dir));
- inode_dec_link_count(inode);
- }
- folio_release_kmap(folio, de);
- return err;
-}
-
-static int sysv_rmdir(struct inode * dir, struct dentry * dentry)
-{
- struct inode *inode = d_inode(dentry);
- int err = -ENOTEMPTY;
-
- if (sysv_empty_dir(inode)) {
- err = sysv_unlink(dir, dentry);
- if (!err) {
- inode->i_size = 0;
- inode_dec_link_count(inode);
- inode_dec_link_count(dir);
- }
- }
- return err;
-}
-
-/*
- * Anybody can rename anything with this: the permission checks are left to the
- * higher-level routines.
- */
-static int sysv_rename(struct mnt_idmap *idmap, struct inode *old_dir,
- struct dentry *old_dentry, struct inode *new_dir,
- struct dentry *new_dentry, unsigned int flags)
-{
- struct inode * old_inode = d_inode(old_dentry);
- struct inode * new_inode = d_inode(new_dentry);
- struct folio *dir_folio;
- struct sysv_dir_entry * dir_de = NULL;
- struct folio *old_folio;
- struct sysv_dir_entry * old_de;
- int err = -ENOENT;
-
- if (flags & ~RENAME_NOREPLACE)
- return -EINVAL;
-
- old_de = sysv_find_entry(old_dentry, &old_folio);
- if (!old_de)
- goto out;
-
- if (S_ISDIR(old_inode->i_mode)) {
- err = -EIO;
- dir_de = sysv_dotdot(old_inode, &dir_folio);
- if (!dir_de)
- goto out_old;
- }
-
- if (new_inode) {
- struct folio *new_folio;
- struct sysv_dir_entry * new_de;
-
- err = -ENOTEMPTY;
- if (dir_de && !sysv_empty_dir(new_inode))
- goto out_dir;
-
- err = -ENOENT;
- new_de = sysv_find_entry(new_dentry, &new_folio);
- if (!new_de)
- goto out_dir;
- err = sysv_set_link(new_de, new_folio, old_inode);
- folio_release_kmap(new_folio, new_de);
- if (err)
- goto out_dir;
- inode_set_ctime_current(new_inode);
- if (dir_de)
- drop_nlink(new_inode);
- inode_dec_link_count(new_inode);
- } else {
- err = sysv_add_link(new_dentry, old_inode);
- if (err)
- goto out_dir;
- if (dir_de)
- inode_inc_link_count(new_dir);
- }
-
- err = sysv_delete_entry(old_de, old_folio);
- if (err)
- goto out_dir;
-
- mark_inode_dirty(old_inode);
-
- if (dir_de) {
- err = sysv_set_link(dir_de, dir_folio, new_dir);
- if (!err)
- inode_dec_link_count(old_dir);
- }
-
-out_dir:
- if (dir_de)
- folio_release_kmap(dir_folio, dir_de);
-out_old:
- folio_release_kmap(old_folio, old_de);
-out:
- return err;
-}
-
-/*
- * directories can handle most operations...
- */
-const struct inode_operations sysv_dir_inode_operations = {
- .create = sysv_create,
- .lookup = sysv_lookup,
- .link = sysv_link,
- .unlink = sysv_unlink,
- .symlink = sysv_symlink,
- .mkdir = sysv_mkdir,
- .rmdir = sysv_rmdir,
- .mknod = sysv_mknod,
- .rename = sysv_rename,
- .getattr = sysv_getattr,
-};
diff --git a/fs/sysv/super.c b/fs/sysv/super.c
deleted file mode 100644
index 5c0d07ddbda2..000000000000
--- a/fs/sysv/super.c
+++ /dev/null
@@ -1,595 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * linux/fs/sysv/inode.c
- *
- * minix/inode.c
- * Copyright (C) 1991, 1992 Linus Torvalds
- *
- * xenix/inode.c
- * Copyright (C) 1992 Doug Evans
- *
- * coh/inode.c
- * Copyright (C) 1993 Pascal Haible, Bruno Haible
- *
- * sysv/inode.c
- * Copyright (C) 1993 Paul B. Monday
- *
- * sysv/inode.c
- * Copyright (C) 1993 Bruno Haible
- * Copyright (C) 1997, 1998 Krzysztof G. Baranowski
- *
- * This file contains code for read/parsing the superblock.
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/buffer_head.h>
-#include "sysv.h"
-
-/*
- * The following functions try to recognize specific filesystems.
- *
- * We recognize:
- * - Xenix FS by its magic number.
- * - SystemV FS by its magic number.
- * - Coherent FS by its funny fname/fpack field.
- * - SCO AFS by s_nfree == 0xffff
- * - V7 FS has no distinguishing features.
- *
- * We discriminate among SystemV4 and SystemV2 FS by the assumption that
- * the time stamp is not < 01-01-1980.
- */
-
-enum {
- JAN_1_1980 = (10*365 + 2) * 24 * 60 * 60
-};
-
-static void detected_xenix(struct sysv_sb_info *sbi, unsigned *max_links)
-{
- struct buffer_head *bh1 = sbi->s_bh1;
- struct buffer_head *bh2 = sbi->s_bh2;
- struct xenix_super_block * sbd1;
- struct xenix_super_block * sbd2;
-
- if (bh1 != bh2)
- sbd1 = sbd2 = (struct xenix_super_block *) bh1->b_data;
- else {
- /* block size = 512, so bh1 != bh2 */
- sbd1 = (struct xenix_super_block *) bh1->b_data;
- sbd2 = (struct xenix_super_block *) (bh2->b_data - 512);
- }
-
- *max_links = XENIX_LINK_MAX;
- sbi->s_fic_size = XENIX_NICINOD;
- sbi->s_flc_size = XENIX_NICFREE;
- sbi->s_sbd1 = (char *)sbd1;
- sbi->s_sbd2 = (char *)sbd2;
- sbi->s_sb_fic_count = &sbd1->s_ninode;
- sbi->s_sb_fic_inodes = &sbd1->s_inode[0];
- sbi->s_sb_total_free_inodes = &sbd2->s_tinode;
- sbi->s_bcache_count = &sbd1->s_nfree;
- sbi->s_bcache = &sbd1->s_free[0];
- sbi->s_free_blocks = &sbd2->s_tfree;
- sbi->s_sb_time = &sbd2->s_time;
- sbi->s_firstdatazone = fs16_to_cpu(sbi, sbd1->s_isize);
- sbi->s_nzones = fs32_to_cpu(sbi, sbd1->s_fsize);
-}
-
-static void detected_sysv4(struct sysv_sb_info *sbi, unsigned *max_links)
-{
- struct sysv4_super_block * sbd;
- struct buffer_head *bh1 = sbi->s_bh1;
- struct buffer_head *bh2 = sbi->s_bh2;
-
- if (bh1 == bh2)
- sbd = (struct sysv4_super_block *) (bh1->b_data + BLOCK_SIZE/2);
- else
- sbd = (struct sysv4_super_block *) bh2->b_data;
-
- *max_links = SYSV_LINK_MAX;
- sbi->s_fic_size = SYSV_NICINOD;
- sbi->s_flc_size = SYSV_NICFREE;
- sbi->s_sbd1 = (char *)sbd;
- sbi->s_sbd2 = (char *)sbd;
- sbi->s_sb_fic_count = &sbd->s_ninode;
- sbi->s_sb_fic_inodes = &sbd->s_inode[0];
- sbi->s_sb_total_free_inodes = &sbd->s_tinode;
- sbi->s_bcache_count = &sbd->s_nfree;
- sbi->s_bcache = &sbd->s_free[0];
- sbi->s_free_blocks = &sbd->s_tfree;
- sbi->s_sb_time = &sbd->s_time;
- sbi->s_sb_state = &sbd->s_state;
- sbi->s_firstdatazone = fs16_to_cpu(sbi, sbd->s_isize);
- sbi->s_nzones = fs32_to_cpu(sbi, sbd->s_fsize);
-}
-
-static void detected_sysv2(struct sysv_sb_info *sbi, unsigned *max_links)
-{
- struct sysv2_super_block *sbd;
- struct buffer_head *bh1 = sbi->s_bh1;
- struct buffer_head *bh2 = sbi->s_bh2;
-
- if (bh1 == bh2)
- sbd = (struct sysv2_super_block *) (bh1->b_data + BLOCK_SIZE/2);
- else
- sbd = (struct sysv2_super_block *) bh2->b_data;
-
- *max_links = SYSV_LINK_MAX;
- sbi->s_fic_size = SYSV_NICINOD;
- sbi->s_flc_size = SYSV_NICFREE;
- sbi->s_sbd1 = (char *)sbd;
- sbi->s_sbd2 = (char *)sbd;
- sbi->s_sb_fic_count = &sbd->s_ninode;
- sbi->s_sb_fic_inodes = &sbd->s_inode[0];
- sbi->s_sb_total_free_inodes = &sbd->s_tinode;
- sbi->s_bcache_count = &sbd->s_nfree;
- sbi->s_bcache = &sbd->s_free[0];
- sbi->s_free_blocks = &sbd->s_tfree;
- sbi->s_sb_time = &sbd->s_time;
- sbi->s_sb_state = &sbd->s_state;
- sbi->s_firstdatazone = fs16_to_cpu(sbi, sbd->s_isize);
- sbi->s_nzones = fs32_to_cpu(sbi, sbd->s_fsize);
-}
-
-static void detected_coherent(struct sysv_sb_info *sbi, unsigned *max_links)
-{
- struct coh_super_block * sbd;
- struct buffer_head *bh1 = sbi->s_bh1;
-
- sbd = (struct coh_super_block *) bh1->b_data;
-
- *max_links = COH_LINK_MAX;
- sbi->s_fic_size = COH_NICINOD;
- sbi->s_flc_size = COH_NICFREE;
- sbi->s_sbd1 = (char *)sbd;
- sbi->s_sbd2 = (char *)sbd;
- sbi->s_sb_fic_count = &sbd->s_ninode;
- sbi->s_sb_fic_inodes = &sbd->s_inode[0];
- sbi->s_sb_total_free_inodes = &sbd->s_tinode;
- sbi->s_bcache_count = &sbd->s_nfree;
- sbi->s_bcache = &sbd->s_free[0];
- sbi->s_free_blocks = &sbd->s_tfree;
- sbi->s_sb_time = &sbd->s_time;
- sbi->s_firstdatazone = fs16_to_cpu(sbi, sbd->s_isize);
- sbi->s_nzones = fs32_to_cpu(sbi, sbd->s_fsize);
-}
-
-static void detected_v7(struct sysv_sb_info *sbi, unsigned *max_links)
-{
- struct buffer_head *bh2 = sbi->s_bh2;
- struct v7_super_block *sbd = (struct v7_super_block *)bh2->b_data;
-
- *max_links = V7_LINK_MAX;
- sbi->s_fic_size = V7_NICINOD;
- sbi->s_flc_size = V7_NICFREE;
- sbi->s_sbd1 = (char *)sbd;
- sbi->s_sbd2 = (char *)sbd;
- sbi->s_sb_fic_count = &sbd->s_ninode;
- sbi->s_sb_fic_inodes = &sbd->s_inode[0];
- sbi->s_sb_total_free_inodes = &sbd->s_tinode;
- sbi->s_bcache_count = &sbd->s_nfree;
- sbi->s_bcache = &sbd->s_free[0];
- sbi->s_free_blocks = &sbd->s_tfree;
- sbi->s_sb_time = &sbd->s_time;
- sbi->s_firstdatazone = fs16_to_cpu(sbi, sbd->s_isize);
- sbi->s_nzones = fs32_to_cpu(sbi, sbd->s_fsize);
-}
-
-static int detect_xenix(struct sysv_sb_info *sbi, struct buffer_head *bh)
-{
- struct xenix_super_block *sbd = (struct xenix_super_block *)bh->b_data;
- if (*(__le32 *)&sbd->s_magic == cpu_to_le32(0x2b5544))
- sbi->s_bytesex = BYTESEX_LE;
- else if (*(__be32 *)&sbd->s_magic == cpu_to_be32(0x2b5544))
- sbi->s_bytesex = BYTESEX_BE;
- else
- return 0;
- switch (fs32_to_cpu(sbi, sbd->s_type)) {
- case 1:
- sbi->s_type = FSTYPE_XENIX;
- return 1;
- case 2:
- sbi->s_type = FSTYPE_XENIX;
- return 2;
- default:
- return 0;
- }
-}
-
-static int detect_sysv(struct sysv_sb_info *sbi, struct buffer_head *bh)
-{
- struct super_block *sb = sbi->s_sb;
- /* All relevant fields are at the same offsets in R2 and R4 */
- struct sysv4_super_block * sbd;
- u32 type;
-
- sbd = (struct sysv4_super_block *) (bh->b_data + BLOCK_SIZE/2);
- if (*(__le32 *)&sbd->s_magic == cpu_to_le32(0xfd187e20))
- sbi->s_bytesex = BYTESEX_LE;
- else if (*(__be32 *)&sbd->s_magic == cpu_to_be32(0xfd187e20))
- sbi->s_bytesex = BYTESEX_BE;
- else
- return 0;
-
- type = fs32_to_cpu(sbi, sbd->s_type);
-
- if (fs16_to_cpu(sbi, sbd->s_nfree) == 0xffff) {
- sbi->s_type = FSTYPE_AFS;
- sbi->s_forced_ro = 1;
- if (!sb_rdonly(sb)) {
- printk("SysV FS: SCO EAFS on %s detected, "
- "forcing read-only mode.\n",
- sb->s_id);
- }
- return type;
- }
-
- if (fs32_to_cpu(sbi, sbd->s_time) < JAN_1_1980) {
- /* this is likely to happen on SystemV2 FS */
- if (type > 3 || type < 1)
- return 0;
- sbi->s_type = FSTYPE_SYSV2;
- return type;
- }
- if ((type > 3 || type < 1) && (type > 0x30 || type < 0x10))
- return 0;
-
- /* On Interactive Unix (ISC) Version 4.0/3.x s_type field = 0x10,
- 0x20 or 0x30 indicates that symbolic links and the 14-character
- filename limit is gone. Due to lack of information about this
- feature read-only mode seems to be a reasonable approach... -KGB */
-
- if (type >= 0x10) {
- printk("SysV FS: can't handle long file names on %s, "
- "forcing read-only mode.\n", sb->s_id);
- sbi->s_forced_ro = 1;
- }
-
- sbi->s_type = FSTYPE_SYSV4;
- return type >= 0x10 ? type >> 4 : type;
-}
-
-static int detect_coherent(struct sysv_sb_info *sbi, struct buffer_head *bh)
-{
- struct coh_super_block * sbd;
-
- sbd = (struct coh_super_block *) (bh->b_data + BLOCK_SIZE/2);
- if ((memcmp(sbd->s_fname,"noname",6) && memcmp(sbd->s_fname,"xxxxx ",6))
- || (memcmp(sbd->s_fpack,"nopack",6) && memcmp(sbd->s_fpack,"xxxxx\n",6)))
- return 0;
- sbi->s_bytesex = BYTESEX_PDP;
- sbi->s_type = FSTYPE_COH;
- return 1;
-}
-
-static int detect_sysv_odd(struct sysv_sb_info *sbi, struct buffer_head *bh)
-{
- int size = detect_sysv(sbi, bh);
-
- return size>2 ? 0 : size;
-}
-
-static struct {
- int block;
- int (*test)(struct sysv_sb_info *, struct buffer_head *);
-} flavours[] = {
- {1, detect_xenix},
- {0, detect_sysv},
- {0, detect_coherent},
- {9, detect_sysv_odd},
- {15,detect_sysv_odd},
- {18,detect_sysv},
-};
-
-static char *flavour_names[] = {
- [FSTYPE_XENIX] = "Xenix",
- [FSTYPE_SYSV4] = "SystemV",
- [FSTYPE_SYSV2] = "SystemV Release 2",
- [FSTYPE_COH] = "Coherent",
- [FSTYPE_V7] = "V7",
- [FSTYPE_AFS] = "AFS",
-};
-
-static void (*flavour_setup[])(struct sysv_sb_info *, unsigned *) = {
- [FSTYPE_XENIX] = detected_xenix,
- [FSTYPE_SYSV4] = detected_sysv4,
- [FSTYPE_SYSV2] = detected_sysv2,
- [FSTYPE_COH] = detected_coherent,
- [FSTYPE_V7] = detected_v7,
- [FSTYPE_AFS] = detected_sysv4,
-};
-
-static int complete_read_super(struct super_block *sb, int silent, int size)
-{
- struct sysv_sb_info *sbi = SYSV_SB(sb);
- struct inode *root_inode;
- char *found = flavour_names[sbi->s_type];
- u_char n_bits = size+8;
- int bsize = 1 << n_bits;
- int bsize_4 = bsize >> 2;
-
- sbi->s_firstinodezone = 2;
-
- flavour_setup[sbi->s_type](sbi, &sb->s_max_links);
- if (sbi->s_firstdatazone < sbi->s_firstinodezone)
- return 0;
-
- sbi->s_ndatazones = sbi->s_nzones - sbi->s_firstdatazone;
- sbi->s_inodes_per_block = bsize >> 6;
- sbi->s_inodes_per_block_1 = (bsize >> 6)-1;
- sbi->s_inodes_per_block_bits = n_bits-6;
- sbi->s_ind_per_block = bsize_4;
- sbi->s_ind_per_block_2 = bsize_4*bsize_4;
- sbi->s_toobig_block = 10 + bsize_4 * (1 + bsize_4 * (1 + bsize_4));
- sbi->s_ind_per_block_bits = n_bits-2;
-
- sbi->s_ninodes = (sbi->s_firstdatazone - sbi->s_firstinodezone)
- << sbi->s_inodes_per_block_bits;
-
- if (!silent)
- printk("VFS: Found a %s FS (block size = %ld) on device %s\n",
- found, sb->s_blocksize, sb->s_id);
-
- sb->s_magic = SYSV_MAGIC_BASE + sbi->s_type;
- /* set up enough so that it can read an inode */
- sb->s_op = &sysv_sops;
- if (sbi->s_forced_ro)
- sb->s_flags |= SB_RDONLY;
- root_inode = sysv_iget(sb, SYSV_ROOT_INO);
- if (IS_ERR(root_inode)) {
- printk("SysV FS: get root inode failed\n");
- return 0;
- }
- sb->s_root = d_make_root(root_inode);
- if (!sb->s_root) {
- printk("SysV FS: get root dentry failed\n");
- return 0;
- }
- return 1;
-}
-
-static int sysv_fill_super(struct super_block *sb, void *data, int silent)
-{
- struct buffer_head *bh1, *bh = NULL;
- struct sysv_sb_info *sbi;
- unsigned long blocknr;
- int size = 0, i;
-
- BUILD_BUG_ON(1024 != sizeof (struct xenix_super_block));
- BUILD_BUG_ON(512 != sizeof (struct sysv4_super_block));
- BUILD_BUG_ON(512 != sizeof (struct sysv2_super_block));
- BUILD_BUG_ON(500 != sizeof (struct coh_super_block));
- BUILD_BUG_ON(64 != sizeof (struct sysv_inode));
-
- sbi = kzalloc(sizeof(struct sysv_sb_info), GFP_KERNEL);
- if (!sbi)
- return -ENOMEM;
-
- sbi->s_sb = sb;
- sbi->s_block_base = 0;
- mutex_init(&sbi->s_lock);
- sb->s_fs_info = sbi;
- sb->s_time_min = 0;
- sb->s_time_max = U32_MAX;
- sb_set_blocksize(sb, BLOCK_SIZE);
-
- for (i = 0; i < ARRAY_SIZE(flavours) && !size; i++) {
- brelse(bh);
- bh = sb_bread(sb, flavours[i].block);
- if (!bh)
- continue;
- size = flavours[i].test(SYSV_SB(sb), bh);
- }
-
- if (!size)
- goto Eunknown;
-
- switch (size) {
- case 1:
- blocknr = bh->b_blocknr << 1;
- brelse(bh);
- sb_set_blocksize(sb, 512);
- bh1 = sb_bread(sb, blocknr);
- bh = sb_bread(sb, blocknr + 1);
- break;
- case 2:
- bh1 = bh;
- break;
- case 3:
- blocknr = bh->b_blocknr >> 1;
- brelse(bh);
- sb_set_blocksize(sb, 2048);
- bh1 = bh = sb_bread(sb, blocknr);
- break;
- default:
- goto Ebadsize;
- }
-
- if (bh && bh1) {
- sbi->s_bh1 = bh1;
- sbi->s_bh2 = bh;
- if (complete_read_super(sb, silent, size))
- return 0;
- }
-
- brelse(bh1);
- brelse(bh);
- sb_set_blocksize(sb, BLOCK_SIZE);
- printk("oldfs: cannot read superblock\n");
-failed:
- kfree(sbi);
- return -EINVAL;
-
-Eunknown:
- brelse(bh);
- if (!silent)
- printk("VFS: unable to find oldfs superblock on device %s\n",
- sb->s_id);
- goto failed;
-Ebadsize:
- brelse(bh);
- if (!silent)
- printk("VFS: oldfs: unsupported block size (%dKb)\n",
- 1<<(size-2));
- goto failed;
-}
-
-static int v7_sanity_check(struct super_block *sb, struct buffer_head *bh)
-{
- struct v7_super_block *v7sb;
- struct sysv_inode *v7i;
- struct buffer_head *bh2;
- struct sysv_sb_info *sbi;
-
- sbi = sb->s_fs_info;
-
- /* plausibility check on superblock */
- v7sb = (struct v7_super_block *) bh->b_data;
- if (fs16_to_cpu(sbi, v7sb->s_nfree) > V7_NICFREE ||
- fs16_to_cpu(sbi, v7sb->s_ninode) > V7_NICINOD ||
- fs32_to_cpu(sbi, v7sb->s_fsize) > V7_MAXSIZE)
- return 0;
-
- /* plausibility check on root inode: it is a directory,
- with a nonzero size that is a multiple of 16 */
- bh2 = sb_bread(sb, 2);
- if (bh2 == NULL)
- return 0;
-
- v7i = (struct sysv_inode *)(bh2->b_data + 64);
- if ((fs16_to_cpu(sbi, v7i->i_mode) & ~0777) != S_IFDIR ||
- (fs32_to_cpu(sbi, v7i->i_size) == 0) ||
- (fs32_to_cpu(sbi, v7i->i_size) & 017) ||
- (fs32_to_cpu(sbi, v7i->i_size) > V7_NFILES *
- sizeof(struct sysv_dir_entry))) {
- brelse(bh2);
- return 0;
- }
-
- brelse(bh2);
- return 1;
-}
-
-static int v7_fill_super(struct super_block *sb, void *data, int silent)
-{
- struct sysv_sb_info *sbi;
- struct buffer_head *bh;
-
- BUILD_BUG_ON(sizeof(struct v7_super_block) != 440);
- BUILD_BUG_ON(sizeof(struct sysv_inode) != 64);
-
- sbi = kzalloc(sizeof(struct sysv_sb_info), GFP_KERNEL);
- if (!sbi)
- return -ENOMEM;
-
- sbi->s_sb = sb;
- sbi->s_block_base = 0;
- sbi->s_type = FSTYPE_V7;
- mutex_init(&sbi->s_lock);
- sb->s_fs_info = sbi;
- sb->s_time_min = 0;
- sb->s_time_max = U32_MAX;
-
- sb_set_blocksize(sb, 512);
-
- if ((bh = sb_bread(sb, 1)) == NULL) {
- if (!silent)
- printk("VFS: unable to read V7 FS superblock on "
- "device %s.\n", sb->s_id);
- goto failed;
- }
-
- /* Try PDP-11 UNIX */
- sbi->s_bytesex = BYTESEX_PDP;
- if (v7_sanity_check(sb, bh))
- goto detected;
-
- /* Try PC/IX, v7/x86 */
- sbi->s_bytesex = BYTESEX_LE;
- if (v7_sanity_check(sb, bh))
- goto detected;
-
- goto failed;
-
-detected:
- sbi->s_bh1 = bh;
- sbi->s_bh2 = bh;
- if (complete_read_super(sb, silent, 1))
- return 0;
-
-failed:
- printk(KERN_ERR "VFS: could not find a valid V7 on %s.\n",
- sb->s_id);
- brelse(bh);
- kfree(sbi);
- return -EINVAL;
-}
-
-/* Every kernel module contains stuff like this. */
-
-static struct dentry *sysv_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
-{
- return mount_bdev(fs_type, flags, dev_name, data, sysv_fill_super);
-}
-
-static struct dentry *v7_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
-{
- return mount_bdev(fs_type, flags, dev_name, data, v7_fill_super);
-}
-
-static struct file_system_type sysv_fs_type = {
- .owner = THIS_MODULE,
- .name = "sysv",
- .mount = sysv_mount,
- .kill_sb = kill_block_super,
- .fs_flags = FS_REQUIRES_DEV,
-};
-MODULE_ALIAS_FS("sysv");
-
-static struct file_system_type v7_fs_type = {
- .owner = THIS_MODULE,
- .name = "v7",
- .mount = v7_mount,
- .kill_sb = kill_block_super,
- .fs_flags = FS_REQUIRES_DEV,
-};
-MODULE_ALIAS_FS("v7");
-MODULE_ALIAS("v7");
-
-static int __init init_sysv_fs(void)
-{
- int error;
-
- error = sysv_init_icache();
- if (error)
- goto out;
- error = register_filesystem(&sysv_fs_type);
- if (error)
- goto destroy_icache;
- error = register_filesystem(&v7_fs_type);
- if (error)
- goto unregister;
- return 0;
-
-unregister:
- unregister_filesystem(&sysv_fs_type);
-destroy_icache:
- sysv_destroy_icache();
-out:
- return error;
-}
-
-static void __exit exit_sysv_fs(void)
-{
- unregister_filesystem(&sysv_fs_type);
- unregister_filesystem(&v7_fs_type);
- sysv_destroy_icache();
-}
-
-module_init(init_sysv_fs)
-module_exit(exit_sysv_fs)
-MODULE_DESCRIPTION("SystemV Filesystem");
-MODULE_LICENSE("GPL");
diff --git a/fs/sysv/sysv.h b/fs/sysv/sysv.h
deleted file mode 100644
index 0a48b2e7edb1..000000000000
--- a/fs/sysv/sysv.h
+++ /dev/null
@@ -1,245 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _SYSV_H
-#define _SYSV_H
-
-#include <linux/buffer_head.h>
-
-typedef __u16 __bitwise __fs16;
-typedef __u32 __bitwise __fs32;
-
-#include <linux/sysv_fs.h>
-
-/*
- * SystemV/V7/Coherent super-block data in memory
- *
- * The SystemV/V7/Coherent superblock contains dynamic data (it gets modified
- * while the system is running). This is in contrast to the Minix and Berkeley
- * filesystems (where the superblock is never modified). This affects the
- * sync() operation: we must keep the superblock in a disk buffer and use this
- * one as our "working copy".
- */
-
-struct sysv_sb_info {
- struct super_block *s_sb; /* VFS superblock */
- int s_type; /* file system type: FSTYPE_{XENIX|SYSV|COH} */
- char s_bytesex; /* bytesex (le/be/pdp) */
- unsigned int s_inodes_per_block; /* number of inodes per block */
- unsigned int s_inodes_per_block_1; /* inodes_per_block - 1 */
- unsigned int s_inodes_per_block_bits; /* log2(inodes_per_block) */
- unsigned int s_ind_per_block; /* number of indirections per block */
- unsigned int s_ind_per_block_bits; /* log2(ind_per_block) */
- unsigned int s_ind_per_block_2; /* ind_per_block ^ 2 */
- unsigned int s_toobig_block; /* 10 + ipb + ipb^2 + ipb^3 */
- unsigned int s_block_base; /* physical block number of block 0 */
- unsigned short s_fic_size; /* free inode cache size, NICINOD */
- unsigned short s_flc_size; /* free block list chunk size, NICFREE */
- /* The superblock is kept in one or two disk buffers: */
- struct buffer_head *s_bh1;
- struct buffer_head *s_bh2;
- /* These are pointers into the disk buffer, to compensate for
- different superblock layout. */
- char * s_sbd1; /* entire superblock data, for part 1 */
- char * s_sbd2; /* entire superblock data, for part 2 */
- __fs16 *s_sb_fic_count; /* pointer to s_sbd->s_ninode */
- sysv_ino_t *s_sb_fic_inodes; /* pointer to s_sbd->s_inode */
- __fs16 *s_sb_total_free_inodes; /* pointer to s_sbd->s_tinode */
- __fs16 *s_bcache_count; /* pointer to s_sbd->s_nfree */
- sysv_zone_t *s_bcache; /* pointer to s_sbd->s_free */
- __fs32 *s_free_blocks; /* pointer to s_sbd->s_tfree */
- __fs32 *s_sb_time; /* pointer to s_sbd->s_time */
- __fs32 *s_sb_state; /* pointer to s_sbd->s_state, only FSTYPE_SYSV */
- /* We keep those superblock entities that don't change here;
- this saves us an indirection and perhaps a conversion. */
- u32 s_firstinodezone; /* index of first inode zone */
- u32 s_firstdatazone; /* same as s_sbd->s_isize */
- u32 s_ninodes; /* total number of inodes */
- u32 s_ndatazones; /* total number of data zones */
- u32 s_nzones; /* same as s_sbd->s_fsize */
- u16 s_namelen; /* max length of dir entry */
- int s_forced_ro;
- struct mutex s_lock;
-};
-
-/*
- * SystemV/V7/Coherent FS inode data in memory
- */
-struct sysv_inode_info {
- __fs32 i_data[13];
- u32 i_dir_start_lookup;
- struct inode vfs_inode;
-};
-
-
-static inline struct sysv_inode_info *SYSV_I(struct inode *inode)
-{
- return container_of(inode, struct sysv_inode_info, vfs_inode);
-}
-
-static inline struct sysv_sb_info *SYSV_SB(struct super_block *sb)
-{
- return sb->s_fs_info;
-}
-
-
-/* identify the FS in memory */
-enum {
- FSTYPE_NONE = 0,
- FSTYPE_XENIX,
- FSTYPE_SYSV4,
- FSTYPE_SYSV2,
- FSTYPE_COH,
- FSTYPE_V7,
- FSTYPE_AFS,
- FSTYPE_END,
-};
-
-#define SYSV_MAGIC_BASE 0x012FF7B3
-
-#define XENIX_SUPER_MAGIC (SYSV_MAGIC_BASE+FSTYPE_XENIX)
-#define SYSV4_SUPER_MAGIC (SYSV_MAGIC_BASE+FSTYPE_SYSV4)
-#define SYSV2_SUPER_MAGIC (SYSV_MAGIC_BASE+FSTYPE_SYSV2)
-#define COH_SUPER_MAGIC (SYSV_MAGIC_BASE+FSTYPE_COH)
-
-
-/* Admissible values for i_nlink: 0.._LINK_MAX */
-enum {
- XENIX_LINK_MAX = 126, /* ?? */
- SYSV_LINK_MAX = 126, /* 127? 251? */
- V7_LINK_MAX = 126, /* ?? */
- COH_LINK_MAX = 10000,
-};
-
-
-static inline void dirty_sb(struct super_block *sb)
-{
- struct sysv_sb_info *sbi = SYSV_SB(sb);
-
- mark_buffer_dirty(sbi->s_bh1);
- if (sbi->s_bh1 != sbi->s_bh2)
- mark_buffer_dirty(sbi->s_bh2);
-}
-
-
-/* ialloc.c */
-extern struct sysv_inode *sysv_raw_inode(struct super_block *, unsigned,
- struct buffer_head **);
-extern struct inode * sysv_new_inode(const struct inode *, umode_t);
-extern void sysv_free_inode(struct inode *);
-extern unsigned long sysv_count_free_inodes(struct super_block *);
-
-/* balloc.c */
-extern sysv_zone_t sysv_new_block(struct super_block *);
-extern void sysv_free_block(struct super_block *, sysv_zone_t);
-extern unsigned long sysv_count_free_blocks(struct super_block *);
-
-/* itree.c */
-void sysv_truncate(struct inode *);
-int sysv_prepare_chunk(struct folio *folio, loff_t pos, unsigned len);
-
-/* inode.c */
-extern struct inode *sysv_iget(struct super_block *, unsigned int);
-extern int sysv_write_inode(struct inode *, struct writeback_control *wbc);
-extern int sysv_sync_inode(struct inode *);
-extern void sysv_set_inode(struct inode *, dev_t);
-extern int sysv_getattr(struct mnt_idmap *, const struct path *,
- struct kstat *, u32, unsigned int);
-extern int sysv_init_icache(void);
-extern void sysv_destroy_icache(void);
-
-
-/* dir.c */
-struct sysv_dir_entry *sysv_find_entry(struct dentry *, struct folio **);
-int sysv_add_link(struct dentry *, struct inode *);
-int sysv_delete_entry(struct sysv_dir_entry *, struct folio *);
-int sysv_make_empty(struct inode *, struct inode *);
-int sysv_empty_dir(struct inode *);
-int sysv_set_link(struct sysv_dir_entry *, struct folio *,
- struct inode *);
-struct sysv_dir_entry *sysv_dotdot(struct inode *, struct folio **);
-ino_t sysv_inode_by_name(struct dentry *);
-
-
-extern const struct inode_operations sysv_file_inode_operations;
-extern const struct inode_operations sysv_dir_inode_operations;
-extern const struct file_operations sysv_file_operations;
-extern const struct file_operations sysv_dir_operations;
-extern const struct address_space_operations sysv_aops;
-extern const struct super_operations sysv_sops;
-
-
-enum {
- BYTESEX_LE,
- BYTESEX_PDP,
- BYTESEX_BE,
-};
-
-static inline u32 PDP_swab(u32 x)
-{
-#ifdef __LITTLE_ENDIAN
- return ((x & 0xffff) << 16) | ((x & 0xffff0000) >> 16);
-#else
-#ifdef __BIG_ENDIAN
- return ((x & 0xff00ff) << 8) | ((x & 0xff00ff00) >> 8);
-#else
-#error BYTESEX
-#endif
-#endif
-}
-
-static inline __u32 fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
-{
- if (sbi->s_bytesex == BYTESEX_PDP)
- return PDP_swab((__force __u32)n);
- else if (sbi->s_bytesex == BYTESEX_LE)
- return le32_to_cpu((__force __le32)n);
- else
- return be32_to_cpu((__force __be32)n);
-}
-
-static inline __fs32 cpu_to_fs32(struct sysv_sb_info *sbi, __u32 n)
-{
- if (sbi->s_bytesex == BYTESEX_PDP)
- return (__force __fs32)PDP_swab(n);
- else if (sbi->s_bytesex == BYTESEX_LE)
- return (__force __fs32)cpu_to_le32(n);
- else
- return (__force __fs32)cpu_to_be32(n);
-}
-
-static inline __fs32 fs32_add(struct sysv_sb_info *sbi, __fs32 *n, int d)
-{
- if (sbi->s_bytesex == BYTESEX_PDP)
- *(__u32*)n = PDP_swab(PDP_swab(*(__u32*)n)+d);
- else if (sbi->s_bytesex == BYTESEX_LE)
- le32_add_cpu((__le32 *)n, d);
- else
- be32_add_cpu((__be32 *)n, d);
- return *n;
-}
-
-static inline __u16 fs16_to_cpu(struct sysv_sb_info *sbi, __fs16 n)
-{
- if (sbi->s_bytesex != BYTESEX_BE)
- return le16_to_cpu((__force __le16)n);
- else
- return be16_to_cpu((__force __be16)n);
-}
-
-static inline __fs16 cpu_to_fs16(struct sysv_sb_info *sbi, __u16 n)
-{
- if (sbi->s_bytesex != BYTESEX_BE)
- return (__force __fs16)cpu_to_le16(n);
- else
- return (__force __fs16)cpu_to_be16(n);
-}
-
-static inline __fs16 fs16_add(struct sysv_sb_info *sbi, __fs16 *n, int d)
-{
- if (sbi->s_bytesex != BYTESEX_BE)
- le16_add_cpu((__le16 *)n, d);
- else
- be16_add_cpu((__be16 *)n, d);
- return *n;
-}
-
-#endif /* _SYSV_H */
diff --git a/fs/timerfd.c b/fs/timerfd.c
index 9f7eb451a60f..753e22e83e0f 100644
--- a/fs/timerfd.c
+++ b/fs/timerfd.c
@@ -439,15 +439,15 @@ SYSCALL_DEFINE2(timerfd_create, int, clockid, int, flags)
return ufd;
}
- file = anon_inode_getfile("[timerfd]", &timerfd_fops, ctx,
- O_RDWR | (flags & TFD_SHARED_FCNTL_FLAGS));
+ file = anon_inode_getfile_fmode("[timerfd]", &timerfd_fops, ctx,
+ O_RDWR | (flags & TFD_SHARED_FCNTL_FLAGS),
+ FMODE_NOWAIT);
if (IS_ERR(file)) {
put_unused_fd(ufd);
kfree(ctx);
return PTR_ERR(file);
}
- file->f_mode |= FMODE_NOWAIT;
fd_install(ufd, file);
return ufd;
}
diff --git a/fs/tracefs/inode.c b/fs/tracefs/inode.c
index 53214499e384..cb1af30b49f5 100644
--- a/fs/tracefs/inode.c
+++ b/fs/tracefs/inode.c
@@ -109,9 +109,9 @@ static char *get_dname(struct dentry *dentry)
return name;
}
-static int tracefs_syscall_mkdir(struct mnt_idmap *idmap,
- struct inode *inode, struct dentry *dentry,
- umode_t mode)
+static struct dentry *tracefs_syscall_mkdir(struct mnt_idmap *idmap,
+ struct inode *inode, struct dentry *dentry,
+ umode_t mode)
{
struct tracefs_inode *ti;
char *name;
@@ -119,7 +119,7 @@ static int tracefs_syscall_mkdir(struct mnt_idmap *idmap,
name = get_dname(dentry);
if (!name)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
/*
* This is a new directory that does not take the default of
@@ -141,7 +141,7 @@ static int tracefs_syscall_mkdir(struct mnt_idmap *idmap,
kfree(name);
- return ret;
+ return ERR_PTR(ret);
}
static int tracefs_syscall_rmdir(struct inode *inode, struct dentry *dentry)
diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
index fda82f3e16e8..3c3d3ad4fa6c 100644
--- a/fs/ubifs/dir.c
+++ b/fs/ubifs/dir.c
@@ -1002,8 +1002,8 @@ out_fname:
return err;
}
-static int ubifs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
- struct dentry *dentry, umode_t mode)
+static struct dentry *ubifs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
struct inode *inode;
struct ubifs_inode *dir_ui = ubifs_inode(dir);
@@ -1023,7 +1023,7 @@ static int ubifs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
err = ubifs_budget_space(c, &req);
if (err)
- return err;
+ return ERR_PTR(err);
err = ubifs_prepare_create(dir, dentry, &nm);
if (err)
@@ -1060,7 +1060,7 @@ static int ubifs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
ubifs_release_budget(c, &req);
d_instantiate(dentry, inode);
fscrypt_free_filename(&nm);
- return 0;
+ return NULL;
out_cancel:
dir->i_size -= sz_change;
@@ -1074,7 +1074,7 @@ out_fname:
fscrypt_free_filename(&nm);
out_budg:
ubifs_release_budget(c, &req);
- return err;
+ return ERR_PTR(err);
}
static int ubifs_mknod(struct mnt_idmap *idmap, struct inode *dir,
diff --git a/fs/udf/namei.c b/fs/udf/namei.c
index 2cb49b6b0716..5f2e9a892bff 100644
--- a/fs/udf/namei.c
+++ b/fs/udf/namei.c
@@ -419,8 +419,8 @@ static int udf_mknod(struct mnt_idmap *idmap, struct inode *dir,
return udf_add_nondir(dentry, inode);
}
-static int udf_mkdir(struct mnt_idmap *idmap, struct inode *dir,
- struct dentry *dentry, umode_t mode)
+static struct dentry *udf_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
struct inode *inode;
struct udf_fileident_iter iter;
@@ -430,7 +430,7 @@ static int udf_mkdir(struct mnt_idmap *idmap, struct inode *dir,
inode = udf_new_inode(dir, S_IFDIR | mode);
if (IS_ERR(inode))
- return PTR_ERR(inode);
+ return ERR_CAST(inode);
iinfo = UDF_I(inode);
inode->i_op = &udf_dir_inode_operations;
@@ -439,7 +439,7 @@ static int udf_mkdir(struct mnt_idmap *idmap, struct inode *dir,
if (err) {
clear_nlink(inode);
discard_new_inode(inode);
- return err;
+ return ERR_PTR(err);
}
set_nlink(inode, 2);
iter.fi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize);
@@ -456,7 +456,7 @@ static int udf_mkdir(struct mnt_idmap *idmap, struct inode *dir,
if (err) {
clear_nlink(inode);
discard_new_inode(inode);
- return err;
+ return ERR_PTR(err);
}
iter.fi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize);
iter.fi.icb.extLocation = cpu_to_lelb(iinfo->i_location);
@@ -471,7 +471,7 @@ static int udf_mkdir(struct mnt_idmap *idmap, struct inode *dir,
mark_inode_dirty(dir);
d_instantiate_new(dentry, inode);
- return 0;
+ return NULL;
}
static int empty_dir(struct inode *dir)
diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c
index 38a024c8cccd..5b3c85c93242 100644
--- a/fs/ufs/namei.c
+++ b/fs/ufs/namei.c
@@ -166,8 +166,8 @@ static int ufs_link (struct dentry * old_dentry, struct inode * dir,
return error;
}
-static int ufs_mkdir(struct mnt_idmap * idmap, struct inode * dir,
- struct dentry * dentry, umode_t mode)
+static struct dentry *ufs_mkdir(struct mnt_idmap * idmap, struct inode * dir,
+ struct dentry * dentry, umode_t mode)
{
struct inode * inode;
int err;
@@ -194,7 +194,7 @@ static int ufs_mkdir(struct mnt_idmap * idmap, struct inode * dir,
goto out_fail;
d_instantiate_new(dentry, inode);
- return 0;
+ return NULL;
out_fail:
inode_dec_link_count(inode);
@@ -202,7 +202,7 @@ out_fail:
discard_new_inode(inode);
out_dir:
inode_dec_link_count(dir);
- return err;
+ return ERR_PTR(err);
}
static int ufs_unlink(struct inode *dir, struct dentry *dentry)
diff --git a/fs/unicode/Kconfig b/fs/unicode/Kconfig
index da786a687fdc..4ad2c36550f1 100644
--- a/fs/unicode/Kconfig
+++ b/fs/unicode/Kconfig
@@ -10,6 +10,7 @@ config UNICODE
be a separate loadable module that gets requested only when a file
system actually use it.
-config UNICODE_NORMALIZATION_SELFTEST
+config UNICODE_NORMALIZATION_KUNIT_TEST
tristate "Test UTF-8 normalization support"
- depends on UNICODE
+ depends on UNICODE && KUNIT
+ default KUNIT_ALL_TESTS
diff --git a/fs/unicode/Makefile b/fs/unicode/Makefile
index e309afe2b2bb..d95be7fb9f6b 100644
--- a/fs/unicode/Makefile
+++ b/fs/unicode/Makefile
@@ -4,7 +4,7 @@ ifneq ($(CONFIG_UNICODE),)
obj-y += unicode.o
endif
obj-$(CONFIG_UNICODE) += utf8data.o
-obj-$(CONFIG_UNICODE_NORMALIZATION_SELFTEST) += utf8-selftest.o
+obj-$(CONFIG_UNICODE_NORMALIZATION_KUNIT_TEST) += tests/utf8_kunit.o
unicode-y := utf8-norm.o utf8-core.o
diff --git a/fs/unicode/tests/.kunitconfig b/fs/unicode/tests/.kunitconfig
new file mode 100644
index 000000000000..62dd5c171f9c
--- /dev/null
+++ b/fs/unicode/tests/.kunitconfig
@@ -0,0 +1,3 @@
+CONFIG_KUNIT=y
+CONFIG_UNICODE=y
+CONFIG_UNICODE_NORMALIZATION_KUNIT_TEST=y
diff --git a/fs/unicode/utf8-selftest.c b/fs/unicode/tests/utf8_kunit.c
index 5ddaf27b21a6..5063e8138aec 100644
--- a/fs/unicode/utf8-selftest.c
+++ b/fs/unicode/tests/utf8_kunit.c
@@ -1,34 +1,14 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Kernel module for testing utf-8 support.
+ * KUnit tests for utf-8 support.
*
* Copyright 2017 Collabora Ltd.
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/printk.h>
#include <linux/unicode.h>
-#include <linux/dcache.h>
-
-#include "utf8n.h"
-
-static unsigned int failed_tests;
-static unsigned int total_tests;
-
-#define _test(cond, func, line, fmt, ...) do { \
- total_tests++; \
- if (!cond) { \
- failed_tests++; \
- pr_err("test %s:%d Failed: %s%s", \
- func, line, #cond, (fmt?":":".")); \
- if (fmt) \
- pr_err(fmt, ##__VA_ARGS__); \
- } \
- } while (0)
-#define test_f(cond, fmt, ...) _test(cond, __func__, __LINE__, fmt, ##__VA_ARGS__)
-#define test(cond) _test(cond, __func__, __LINE__, "")
+#include <kunit/test.h>
+
+#include "../utf8n.h"
static const struct {
/* UTF-8 strings in this vector _must_ be NULL-terminated. */
@@ -167,69 +147,74 @@ static int utf8cursor(struct utf8cursor *u8c, const struct unicode_map *um,
return utf8ncursor(u8c, um, n, s, (unsigned int)-1);
}
-static void check_utf8_nfdi(struct unicode_map *um)
+static void check_utf8_nfdi(struct kunit *test)
{
int i;
struct utf8cursor u8c;
+ struct unicode_map *um = test->priv;
for (i = 0; i < ARRAY_SIZE(nfdi_test_data); i++) {
int len = strlen(nfdi_test_data[i].str);
int nlen = strlen(nfdi_test_data[i].dec);
int j = 0;
unsigned char c;
+ int ret;
+
+ KUNIT_EXPECT_EQ(test, utf8len(um, UTF8_NFDI, nfdi_test_data[i].str), nlen);
+ KUNIT_EXPECT_EQ(test, utf8nlen(um, UTF8_NFDI, nfdi_test_data[i].str, len),
+ nlen);
- test((utf8len(um, UTF8_NFDI, nfdi_test_data[i].str) == nlen));
- test((utf8nlen(um, UTF8_NFDI, nfdi_test_data[i].str, len) ==
- nlen));
- if (utf8cursor(&u8c, um, UTF8_NFDI, nfdi_test_data[i].str) < 0)
- pr_err("can't create cursor\n");
+ ret = utf8cursor(&u8c, um, UTF8_NFDI, nfdi_test_data[i].str);
+ KUNIT_EXPECT_TRUE_MSG(test, ret >= 0, "Can't create cursor\n");
while ((c = utf8byte(&u8c)) > 0) {
- test_f((c == nfdi_test_data[i].dec[j]),
- "Unexpected byte 0x%x should be 0x%x\n",
- c, nfdi_test_data[i].dec[j]);
+ KUNIT_EXPECT_EQ_MSG(test, c, nfdi_test_data[i].dec[j],
+ "Unexpected byte 0x%x should be 0x%x\n",
+ c, nfdi_test_data[i].dec[j]);
j++;
}
- test((j == nlen));
+ KUNIT_EXPECT_EQ(test, j, nlen);
}
}
-static void check_utf8_nfdicf(struct unicode_map *um)
+static void check_utf8_nfdicf(struct kunit *test)
{
int i;
struct utf8cursor u8c;
+ struct unicode_map *um = test->priv;
for (i = 0; i < ARRAY_SIZE(nfdicf_test_data); i++) {
int len = strlen(nfdicf_test_data[i].str);
int nlen = strlen(nfdicf_test_data[i].ncf);
int j = 0;
+ int ret;
unsigned char c;
- test((utf8len(um, UTF8_NFDICF, nfdicf_test_data[i].str) ==
- nlen));
- test((utf8nlen(um, UTF8_NFDICF, nfdicf_test_data[i].str, len) ==
- nlen));
+ KUNIT_EXPECT_EQ(test, utf8len(um, UTF8_NFDICF, nfdicf_test_data[i].str),
+ nlen);
+ KUNIT_EXPECT_EQ(test, utf8nlen(um, UTF8_NFDICF, nfdicf_test_data[i].str, len),
+ nlen);
- if (utf8cursor(&u8c, um, UTF8_NFDICF,
- nfdicf_test_data[i].str) < 0)
- pr_err("can't create cursor\n");
+ ret = utf8cursor(&u8c, um, UTF8_NFDICF, nfdicf_test_data[i].str);
+ KUNIT_EXPECT_TRUE_MSG(test, ret >= 0, "Can't create cursor\n");
while ((c = utf8byte(&u8c)) > 0) {
- test_f((c == nfdicf_test_data[i].ncf[j]),
- "Unexpected byte 0x%x should be 0x%x\n",
- c, nfdicf_test_data[i].ncf[j]);
+ KUNIT_EXPECT_EQ_MSG(test, c, nfdicf_test_data[i].ncf[j],
+ "Unexpected byte 0x%x should be 0x%x\n",
+ c, nfdicf_test_data[i].ncf[j]);
j++;
}
- test((j == nlen));
+ KUNIT_EXPECT_EQ(test, j, nlen);
}
}
-static void check_utf8_comparisons(struct unicode_map *table)
+static void check_utf8_comparisons(struct kunit *test)
{
int i;
+ struct unicode_map *um = test->priv;
for (i = 0; i < ARRAY_SIZE(nfdi_test_data); i++) {
const struct qstr s1 = {.name = nfdi_test_data[i].str,
@@ -237,8 +222,9 @@ static void check_utf8_comparisons(struct unicode_map *table)
const struct qstr s2 = {.name = nfdi_test_data[i].dec,
.len = sizeof(nfdi_test_data[i].dec)};
- test_f(!utf8_strncmp(table, &s1, &s2),
- "%s %s comparison mismatch\n", s1.name, s2.name);
+ /* strncmp returns 0 when strings are equal */
+ KUNIT_EXPECT_TRUE_MSG(test, utf8_strncmp(um, &s1, &s2) == 0,
+ "%s %s comparison mismatch\n", s1.name, s2.name);
}
for (i = 0; i < ARRAY_SIZE(nfdicf_test_data); i++) {
@@ -247,62 +233,65 @@ static void check_utf8_comparisons(struct unicode_map *table)
const struct qstr s2 = {.name = nfdicf_test_data[i].ncf,
.len = sizeof(nfdicf_test_data[i].ncf)};
- test_f(!utf8_strncasecmp(table, &s1, &s2),
- "%s %s comparison mismatch\n", s1.name, s2.name);
+ /* strncasecmp returns 0 when strings are equal */
+ KUNIT_EXPECT_TRUE_MSG(test, utf8_strncasecmp(um, &s1, &s2) == 0,
+ "%s %s comparison mismatch\n", s1.name, s2.name);
}
}
-static void check_supported_versions(struct unicode_map *um)
+static void check_supported_versions(struct kunit *test)
{
+ struct unicode_map *um = test->priv;
/* Unicode 7.0.0 should be supported. */
- test(utf8version_is_supported(um, UNICODE_AGE(7, 0, 0)));
+ KUNIT_EXPECT_TRUE(test, utf8version_is_supported(um, UNICODE_AGE(7, 0, 0)));
/* Unicode 9.0.0 should be supported. */
- test(utf8version_is_supported(um, UNICODE_AGE(9, 0, 0)));
+ KUNIT_EXPECT_TRUE(test, utf8version_is_supported(um, UNICODE_AGE(9, 0, 0)));
/* Unicode 1x.0.0 (the latest version) should be supported. */
- test(utf8version_is_supported(um, UTF8_LATEST));
+ KUNIT_EXPECT_TRUE(test, utf8version_is_supported(um, UTF8_LATEST));
/* Next versions don't exist. */
- test(!utf8version_is_supported(um, UNICODE_AGE(13, 0, 0)));
- test(!utf8version_is_supported(um, UNICODE_AGE(0, 0, 0)));
- test(!utf8version_is_supported(um, UNICODE_AGE(-1, -1, -1)));
+ KUNIT_EXPECT_FALSE(test, utf8version_is_supported(um, UNICODE_AGE(13, 0, 0)));
+ KUNIT_EXPECT_FALSE(test, utf8version_is_supported(um, UNICODE_AGE(0, 0, 0)));
+ KUNIT_EXPECT_FALSE(test, utf8version_is_supported(um, UNICODE_AGE(-1, -1, -1)));
}
-static int __init init_test_ucd(void)
+static struct kunit_case unicode_normalization_test_cases[] = {
+ KUNIT_CASE(check_supported_versions),
+ KUNIT_CASE(check_utf8_comparisons),
+ KUNIT_CASE(check_utf8_nfdicf),
+ KUNIT_CASE(check_utf8_nfdi),
+ {}
+};
+
+static int init_test_ucd(struct kunit *test)
{
- struct unicode_map *um;
+ struct unicode_map *um = utf8_load(UTF8_LATEST);
- failed_tests = 0;
- total_tests = 0;
+ test->priv = um;
- um = utf8_load(UTF8_LATEST);
- if (IS_ERR(um)) {
- pr_err("%s: Unable to load utf8 table.\n", __func__);
- return PTR_ERR(um);
- }
+ KUNIT_EXPECT_EQ_MSG(test, IS_ERR(um), 0,
+ "%s: Unable to load utf8 table.\n", __func__);
- check_supported_versions(um);
- check_utf8_nfdi(um);
- check_utf8_nfdicf(um);
- check_utf8_comparisons(um);
-
- if (!failed_tests)
- pr_info("All %u tests passed\n", total_tests);
- else
- pr_err("%u out of %u tests failed\n", failed_tests,
- total_tests);
- utf8_unload(um);
return 0;
}
-static void __exit exit_test_ucd(void)
+static void exit_test_ucd(struct kunit *test)
{
+ utf8_unload(test->priv);
}
-module_init(init_test_ucd);
-module_exit(exit_test_ucd);
+static struct kunit_suite unicode_normalization_test_suite = {
+ .name = "unicode_normalization",
+ .test_cases = unicode_normalization_test_cases,
+ .init = init_test_ucd,
+ .exit = exit_test_ucd,
+};
+
+kunit_test_suite(unicode_normalization_test_suite);
+
MODULE_AUTHOR("Gabriel Krisman Bertazi <krisman@collabora.co.uk>");
-MODULE_DESCRIPTION("Kernel module for testing utf-8 support");
+MODULE_DESCRIPTION("KUnit tests for utf-8 support.");
MODULE_LICENSE("GPL");
diff --git a/fs/unicode/utf8-norm.c b/fs/unicode/utf8-norm.c
index 768f8ab448b8..7b998c99c88d 100644
--- a/fs/unicode/utf8-norm.c
+++ b/fs/unicode/utf8-norm.c
@@ -586,7 +586,7 @@ ccc_mismatch:
}
}
-#ifdef CONFIG_UNICODE_NORMALIZATION_SELFTEST_MODULE
+#if IS_MODULE(CONFIG_UNICODE_NORMALIZATION_KUNIT_TEST)
EXPORT_SYMBOL_GPL(utf8version_is_supported);
EXPORT_SYMBOL_GPL(utf8nlen);
EXPORT_SYMBOL_GPL(utf8ncursor);
diff --git a/fs/vboxsf/dir.c b/fs/vboxsf/dir.c
index a859ac9b74ba..770e29ec3557 100644
--- a/fs/vboxsf/dir.c
+++ b/fs/vboxsf/dir.c
@@ -303,11 +303,11 @@ static int vboxsf_dir_mkfile(struct mnt_idmap *idmap,
return vboxsf_dir_create(parent, dentry, mode, false, excl, NULL);
}
-static int vboxsf_dir_mkdir(struct mnt_idmap *idmap,
- struct inode *parent, struct dentry *dentry,
- umode_t mode)
+static struct dentry *vboxsf_dir_mkdir(struct mnt_idmap *idmap,
+ struct inode *parent, struct dentry *dentry,
+ umode_t mode)
{
- return vboxsf_dir_create(parent, dentry, mode, true, true, NULL);
+ return ERR_PTR(vboxsf_dir_create(parent, dentry, mode, true, true, NULL));
}
static int vboxsf_dir_atomic_open(struct inode *parent, struct dentry *dentry,
diff --git a/fs/xfs/scrub/orphanage.c b/fs/xfs/scrub/orphanage.c
index c287c755f2c5..3537f3cca6d5 100644
--- a/fs/xfs/scrub/orphanage.c
+++ b/fs/xfs/scrub/orphanage.c
@@ -167,10 +167,11 @@ xrep_orphanage_create(
* directory to control access to a file we put in here.
*/
if (d_really_is_negative(orphanage_dentry)) {
- error = vfs_mkdir(&nop_mnt_idmap, root_inode, orphanage_dentry,
- 0750);
- if (error)
- goto out_dput_orphanage;
+ orphanage_dentry = vfs_mkdir(&nop_mnt_idmap, root_inode,
+ orphanage_dentry, 0750);
+ error = PTR_ERR(orphanage_dentry);
+ if (IS_ERR(orphanage_dentry))
+ goto out_unlock_root;
}
/* Not a directory? Bail out. */
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 6d9965b546cb..5077d52a775d 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -115,7 +115,7 @@ xfs_end_ioend(
*/
error = blk_status_to_errno(ioend->io_bio.bi_status);
if (unlikely(error)) {
- if (ioend->io_flags & IOMAP_F_SHARED) {
+ if (ioend->io_flags & IOMAP_IOEND_SHARED) {
xfs_reflink_cancel_cow_range(ip, offset, size, true);
xfs_bmap_punch_delalloc_range(ip, XFS_DATA_FORK, offset,
offset + size);
@@ -126,9 +126,9 @@ xfs_end_ioend(
/*
* Success: commit the COW or unwritten blocks if needed.
*/
- if (ioend->io_flags & IOMAP_F_SHARED)
+ if (ioend->io_flags & IOMAP_IOEND_SHARED)
error = xfs_reflink_end_cow(ip, offset, size);
- else if (ioend->io_type == IOMAP_UNWRITTEN)
+ else if (ioend->io_flags & IOMAP_IOEND_UNWRITTEN)
error = xfs_iomap_write_unwritten(ip, offset, size, false);
if (!error && xfs_ioend_is_append(ioend))
@@ -396,10 +396,11 @@ allocate_blocks:
}
static int
-xfs_prepare_ioend(
- struct iomap_ioend *ioend,
+xfs_submit_ioend(
+ struct iomap_writepage_ctx *wpc,
int status)
{
+ struct iomap_ioend *ioend = wpc->ioend;
unsigned int nofs_flag;
/*
@@ -410,7 +411,7 @@ xfs_prepare_ioend(
nofs_flag = memalloc_nofs_save();
/* Convert CoW extents to regular */
- if (!status && (ioend->io_flags & IOMAP_F_SHARED)) {
+ if (!status && (ioend->io_flags & IOMAP_IOEND_SHARED)) {
status = xfs_reflink_convert_cow(XFS_I(ioend->io_inode),
ioend->io_offset, ioend->io_size);
}
@@ -418,10 +419,14 @@ xfs_prepare_ioend(
memalloc_nofs_restore(nofs_flag);
/* send ioends that might require a transaction to the completion wq */
- if (xfs_ioend_is_append(ioend) || ioend->io_type == IOMAP_UNWRITTEN ||
- (ioend->io_flags & IOMAP_F_SHARED))
+ if (xfs_ioend_is_append(ioend) ||
+ (ioend->io_flags & (IOMAP_IOEND_UNWRITTEN | IOMAP_IOEND_SHARED)))
ioend->io_bio.bi_end_io = xfs_end_bio;
- return status;
+
+ if (status)
+ return status;
+ submit_bio(&ioend->io_bio);
+ return 0;
}
/*
@@ -463,7 +468,7 @@ xfs_discard_folio(
static const struct iomap_writeback_ops xfs_writeback_ops = {
.map_blocks = xfs_map_blocks,
- .prepare_ioend = xfs_prepare_ioend,
+ .submit_ioend = xfs_submit_ioend,
.discard_folio = xfs_discard_folio,
};
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 9a435b1ff264..85b857805d6d 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -1498,7 +1498,8 @@ xfs_write_fault(
if (IS_DAX(inode))
ret = xfs_dax_fault_locked(vmf, order, true);
else
- ret = iomap_page_mkwrite(vmf, &xfs_buffered_write_iomap_ops);
+ ret = iomap_page_mkwrite(vmf, &xfs_buffered_write_iomap_ops,
+ NULL);
xfs_iunlock(ip, lock_mode);
sb_end_pagefault(inode->i_sb);
@@ -1613,7 +1614,8 @@ const struct file_operations xfs_file_operations = {
.fadvise = xfs_file_fadvise,
.remap_file_range = xfs_file_remap_range,
.fop_flags = FOP_MMAP_SYNC | FOP_BUFFER_RASYNC |
- FOP_BUFFER_WASYNC | FOP_DIO_PARALLEL_WRITE,
+ FOP_BUFFER_WASYNC | FOP_DIO_PARALLEL_WRITE |
+ FOP_DONTCACHE,
};
const struct file_operations xfs_dir_file_operations = {
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index d61460309a78..f631177ac320 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -828,6 +828,10 @@ xfs_direct_write_iomap_begin(
if (offset + length > i_size_read(inode))
iomap_flags |= IOMAP_F_DIRTY;
+ /* HW-offload atomics are always used in this path */
+ if (flags & IOMAP_ATOMIC)
+ iomap_flags |= IOMAP_F_ATOMIC_BIO;
+
/*
* COW writes may allocate delalloc space or convert unwritten COW
* extents, so we need to make sure to take the lock exclusively here.
@@ -1495,7 +1499,7 @@ xfs_zero_range(
return dax_zero_range(inode, pos, len, did_zero,
&xfs_dax_write_iomap_ops);
return iomap_zero_range(inode, pos, len, did_zero,
- &xfs_buffered_write_iomap_ops);
+ &xfs_buffered_write_iomap_ops, NULL);
}
int
@@ -1510,5 +1514,5 @@ xfs_truncate_page(
return dax_truncate_page(inode, pos, did_zero,
&xfs_dax_write_iomap_ops);
return iomap_truncate_page(inode, pos, did_zero,
- &xfs_buffered_write_iomap_ops);
+ &xfs_buffered_write_iomap_ops, NULL);
}
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index 40289fe6f5b2..a4480098d2bf 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -298,14 +298,14 @@ xfs_vn_create(
return xfs_generic_create(idmap, dir, dentry, mode, 0, NULL);
}
-STATIC int
+STATIC struct dentry *
xfs_vn_mkdir(
struct mnt_idmap *idmap,
struct inode *dir,
struct dentry *dentry,
umode_t mode)
{
- return xfs_generic_create(idmap, dir, dentry, mode | S_IFDIR, 0, NULL);
+ return ERR_PTR(xfs_generic_create(idmap, dir, dentry, mode | S_IFDIR, 0, NULL));
}
STATIC struct dentry *
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 0055066fb1d9..62d04f4843cf 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -2122,7 +2122,8 @@ static struct file_system_type xfs_fs_type = {
.init_fs_context = xfs_init_fs_context,
.parameters = xfs_fs_parameters,
.kill_sb = xfs_kill_sb,
- .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP | FS_MGTIME,
+ .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP | FS_MGTIME |
+ FS_LBS,
};
MODULE_ALIAS_FS("xfs");
diff --git a/fs/zonefs/file.c b/fs/zonefs/file.c
index 35166c92420c..42e2c0065bb3 100644
--- a/fs/zonefs/file.c
+++ b/fs/zonefs/file.c
@@ -299,7 +299,7 @@ static vm_fault_t zonefs_filemap_page_mkwrite(struct vm_fault *vmf)
/* Serialize against truncates */
filemap_invalidate_lock_shared(inode->i_mapping);
- ret = iomap_page_mkwrite(vmf, &zonefs_write_iomap_ops);
+ ret = iomap_page_mkwrite(vmf, &zonefs_write_iomap_ops, NULL);
filemap_invalidate_unlock_shared(inode->i_mapping);
sb_end_pagefault(inode->i_sb);
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index a5cbbf3e26ec..3c61c29ff6ab 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -1212,7 +1212,7 @@ static inline void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
#ifndef memset_io
/**
- * memset_io Set a range of I/O memory to a constant value
+ * memset_io - Set a range of I/O memory to a constant value
* @addr: The beginning of the I/O-memory range to set
* @val: The value to set the memory to
* @count: The number of bytes to set
@@ -1224,7 +1224,7 @@ void memset_io(volatile void __iomem *addr, int val, size_t count);
#ifndef memcpy_fromio
/**
- * memcpy_fromio Copy a block of data from I/O memory
+ * memcpy_fromio - Copy a block of data from I/O memory
* @dst: The (RAM) destination for the copy
* @src: The (I/O memory) source for the data
* @count: The number of bytes to copy
@@ -1236,7 +1236,7 @@ void memcpy_fromio(void *dst, const volatile void __iomem *src, size_t count);
#ifndef memcpy_toio
/**
- * memcpy_toio Copy a block of data into I/O memory
+ * memcpy_toio - Copy a block of data into I/O memory
* @dst: The (I/O memory) destination for the copy
* @src: The (RAM) source for the data
* @count: The number of bytes to copy
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
index 60d674af3080..1625c8529e70 100644
--- a/include/linux/binfmts.h
+++ b/include/linux/binfmts.h
@@ -64,7 +64,7 @@ struct linux_binprm {
const char *fdpath; /* generated filename for execveat */
unsigned interp_flags;
int execfd; /* File descriptor of the executable */
- unsigned long loader, exec;
+ unsigned long exec;
struct rlimit rlim_stack; /* Saved RLIMIT_STACK used during exec. */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index d37751789bf5..1c0cf6af392c 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -268,10 +268,16 @@ static inline dev_t disk_devt(struct gendisk *disk)
return MKDEV(disk->major, disk->first_minor);
}
+/*
+ * We should strive for 1 << (PAGE_SHIFT + MAX_PAGECACHE_ORDER)
+ * however we constrain this to what we can validate and test.
+ */
+#define BLK_MAX_BLOCK_SIZE SZ_64K
+
/* blk_validate_limits() validates bsize, so drivers don't usually need to */
static inline int blk_validate_block_size(unsigned long bsize)
{
- if (bsize < 512 || bsize > PAGE_SIZE || !is_power_of_2(bsize))
+ if (bsize < 512 || bsize > BLK_MAX_BLOCK_SIZE || !is_power_of_2(bsize))
return -EINVAL;
return 0;
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 17960a1e858d..485b651869d9 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -619,9 +619,8 @@ struct cgroup_root {
*/
struct cftype {
/*
- * By convention, the name should begin with the name of the
- * subsystem, followed by a period. Zero length string indicates
- * end of cftype array.
+ * Name of the subsystem is prepended in cgroup_file_name().
+ * Zero length string indicates end of cftype array.
*/
char name[MAX_CFTYPE_NAME];
unsigned long private;
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index f8ef47f8a634..28e999f2c642 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -113,6 +113,7 @@ int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from);
int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
+int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
int cgroup_rm_cftypes(struct cftype *cfts);
void cgroup_file_notify(struct cgroup_file *cfile);
void cgroup_file_show(struct cgroup_file *cfile, bool show);
@@ -689,8 +690,6 @@ static inline void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen)
*/
void cgroup_rstat_updated(struct cgroup *cgrp, int cpu);
void cgroup_rstat_flush(struct cgroup *cgrp);
-void cgroup_rstat_flush_hold(struct cgroup *cgrp);
-void cgroup_rstat_flush_release(struct cgroup *cgrp);
/*
* Basic resource stats.
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 155385754824..6db889c9efcc 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -206,9 +206,25 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
#define __must_be_byte_array(a) __BUILD_BUG_ON_ZERO_MSG(!__is_byte_array(a), \
"must be byte array")
+/*
+ * If the "nonstring" attribute isn't available, we have to return true
+ * so the __must_*() checks pass when "nonstring" isn't supported.
+ */
+#if __has_attribute(__nonstring__) && defined(__annotated)
+#define __is_cstr(a) (!__annotated(a, nonstring))
+#define __is_noncstr(a) (__annotated(a, nonstring))
+#else
+#define __is_cstr(a) (true)
+#define __is_noncstr(a) (true)
+#endif
+
/* Require C Strings (i.e. NUL-terminated) lack the "nonstring" attribute. */
#define __must_be_cstr(p) \
- __BUILD_BUG_ON_ZERO_MSG(__annotated(p, nonstring), "must be cstr (NUL-terminated)")
+ __BUILD_BUG_ON_ZERO_MSG(!__is_cstr(p), \
+ "must be C-string (NUL-terminated)")
+#define __must_be_noncstr(p) \
+ __BUILD_BUG_ON_ZERO_MSG(!__is_noncstr(p), \
+ "must be non-C-string (not NUL-terminated)")
#endif /* __KERNEL__ */
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
index 981cc3d7e3aa..e09d323be845 100644
--- a/include/linux/compiler_types.h
+++ b/include/linux/compiler_types.h
@@ -349,6 +349,18 @@ struct ftrace_likely_data {
#endif
/*
+ * Optional: only supported since gcc >= 15
+ * Optional: not supported by Clang
+ *
+ * gcc: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=117178
+ */
+#ifdef CONFIG_CC_HAS_MULTIDIMENSIONAL_NONSTRING
+# define __nonstring_array __attribute__((__nonstring__))
+#else
+# define __nonstring_array
+#endif
+
+/*
* Apply __counted_by() when the Endianness matches to increase test coverage.
*/
#ifdef __LITTLE_ENDIAN
@@ -360,7 +372,7 @@ struct ftrace_likely_data {
#endif
/* Do not trap wrapping arithmetic within an annotated function. */
-#ifdef CONFIG_UBSAN_SIGNED_WRAP
+#ifdef CONFIG_UBSAN_INTEGER_WRAP
# define __signed_wrap __attribute__((no_sanitize("signed-integer-overflow")))
#else
# define __signed_wrap
@@ -446,11 +458,14 @@ struct ftrace_likely_data {
#define __member_size(p) __builtin_object_size(p, 1)
#endif
-/* Determine if an attribute has been applied to a variable. */
+/*
+ * Determine if an attribute has been applied to a variable.
+ * Using __annotated needs to check for __annotated being available,
+ * or negative tests may fail when annotation cannot be checked. For
+ * example, see the definition of __is_cstr().
+ */
#if __has_builtin(__builtin_has_attribute)
#define __annotated(var, attr) __builtin_has_attribute(var, attr)
-#else
-#define __annotated(var, attr) (false)
#endif
/*
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 4afb60365675..45bff10d3773 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -203,34 +203,34 @@ struct dentry_operations {
#define DCACHE_NFSFS_RENAMED BIT(12)
/* this dentry has been "silly renamed" and has to be deleted on the last
* dput() */
-#define DCACHE_FSNOTIFY_PARENT_WATCHED BIT(14)
+#define DCACHE_FSNOTIFY_PARENT_WATCHED BIT(13)
/* Parent inode is watched by some fsnotify listener */
-#define DCACHE_DENTRY_KILLED BIT(15)
+#define DCACHE_DENTRY_KILLED BIT(14)
-#define DCACHE_MOUNTED BIT(16) /* is a mountpoint */
-#define DCACHE_NEED_AUTOMOUNT BIT(17) /* handle automount on this dir */
-#define DCACHE_MANAGE_TRANSIT BIT(18) /* manage transit from this dirent */
+#define DCACHE_MOUNTED BIT(15) /* is a mountpoint */
+#define DCACHE_NEED_AUTOMOUNT BIT(16) /* handle automount on this dir */
+#define DCACHE_MANAGE_TRANSIT BIT(17) /* manage transit from this dirent */
#define DCACHE_MANAGED_DENTRY \
(DCACHE_MOUNTED|DCACHE_NEED_AUTOMOUNT|DCACHE_MANAGE_TRANSIT)
-#define DCACHE_LRU_LIST BIT(19)
+#define DCACHE_LRU_LIST BIT(18)
-#define DCACHE_ENTRY_TYPE (7 << 20) /* bits 20..22 are for storing type: */
-#define DCACHE_MISS_TYPE (0 << 20) /* Negative dentry */
-#define DCACHE_WHITEOUT_TYPE (1 << 20) /* Whiteout dentry (stop pathwalk) */
-#define DCACHE_DIRECTORY_TYPE (2 << 20) /* Normal directory */
-#define DCACHE_AUTODIR_TYPE (3 << 20) /* Lookupless directory (presumed automount) */
-#define DCACHE_REGULAR_TYPE (4 << 20) /* Regular file type */
-#define DCACHE_SPECIAL_TYPE (5 << 20) /* Other file type */
-#define DCACHE_SYMLINK_TYPE (6 << 20) /* Symlink */
+#define DCACHE_ENTRY_TYPE (7 << 19) /* bits 19..21 are for storing type: */
+#define DCACHE_MISS_TYPE (0 << 19) /* Negative dentry */
+#define DCACHE_WHITEOUT_TYPE (1 << 19) /* Whiteout dentry (stop pathwalk) */
+#define DCACHE_DIRECTORY_TYPE (2 << 19) /* Normal directory */
+#define DCACHE_AUTODIR_TYPE (3 << 19) /* Lookupless directory (presumed automount) */
+#define DCACHE_REGULAR_TYPE (4 << 19) /* Regular file type */
+#define DCACHE_SPECIAL_TYPE (5 << 19) /* Other file type */
+#define DCACHE_SYMLINK_TYPE (6 << 19) /* Symlink */
-#define DCACHE_NOKEY_NAME BIT(25) /* Encrypted name encoded without key */
-#define DCACHE_OP_REAL BIT(26)
+#define DCACHE_NOKEY_NAME BIT(22) /* Encrypted name encoded without key */
+#define DCACHE_OP_REAL BIT(23)
-#define DCACHE_PAR_LOOKUP BIT(28) /* being looked up (with parent locked shared) */
-#define DCACHE_DENTRY_CURSOR BIT(29)
-#define DCACHE_NORCU BIT(30) /* No RCU delay for freeing */
+#define DCACHE_PAR_LOOKUP BIT(24) /* being looked up (with parent locked shared) */
+#define DCACHE_DENTRY_CURSOR BIT(25)
+#define DCACHE_NORCU BIT(26) /* No RCU delay for freeing */
extern seqlock_t rename_lock;
@@ -253,7 +253,6 @@ extern struct dentry * d_splice_alias(struct inode *, struct dentry *);
extern struct dentry * d_add_ci(struct dentry *, struct inode *, struct qstr *);
extern bool d_same_name(const struct dentry *dentry, const struct dentry *parent,
const struct qstr *name);
-extern struct dentry * d_exact_alias(struct dentry *, struct inode *);
extern struct dentry *d_find_any_alias(struct inode *inode);
extern struct dentry * d_obtain_alias(struct inode *);
extern struct dentry * d_obtain_root(struct inode *);
diff --git a/include/linux/eventpoll.h b/include/linux/eventpoll.h
index 0c0d00fcd131..ccb478eb174b 100644
--- a/include/linux/eventpoll.h
+++ b/include/linux/eventpoll.h
@@ -25,6 +25,10 @@ struct file *get_epoll_tfile_raw_ptr(struct file *file, int tfd, unsigned long t
/* Used to release the epoll bits inside the "struct file" */
void eventpoll_release_file(struct file *file);
+/* Copy ready events to userspace */
+int epoll_sendevents(struct file *file, struct epoll_event __user *events,
+ int maxevents);
+
/*
* This is called from inside fs/file_table.c:__fput() to unlink files
* from the eventpoll interface. We need to have this facility to cleanup
diff --git a/include/linux/fanotify.h b/include/linux/fanotify.h
index 78f660ebc318..3c817dc6292e 100644
--- a/include/linux/fanotify.h
+++ b/include/linux/fanotify.h
@@ -25,7 +25,7 @@
#define FANOTIFY_FID_BITS (FAN_REPORT_DFID_NAME_TARGET)
-#define FANOTIFY_INFO_MODES (FANOTIFY_FID_BITS | FAN_REPORT_PIDFD)
+#define FANOTIFY_INFO_MODES (FANOTIFY_FID_BITS | FAN_REPORT_PIDFD | FAN_REPORT_MNT)
/*
* fanotify_init() flags that require CAP_SYS_ADMIN.
@@ -38,7 +38,8 @@
FAN_REPORT_PIDFD | \
FAN_REPORT_FD_ERROR | \
FAN_UNLIMITED_QUEUE | \
- FAN_UNLIMITED_MARKS)
+ FAN_UNLIMITED_MARKS | \
+ FAN_REPORT_MNT)
/*
* fanotify_init() flags that are allowed for user without CAP_SYS_ADMIN.
@@ -58,7 +59,7 @@
#define FANOTIFY_INTERNAL_GROUP_FLAGS (FANOTIFY_UNPRIV)
#define FANOTIFY_MARK_TYPE_BITS (FAN_MARK_INODE | FAN_MARK_MOUNT | \
- FAN_MARK_FILESYSTEM)
+ FAN_MARK_FILESYSTEM | FAN_MARK_MNTNS)
#define FANOTIFY_MARK_CMD_BITS (FAN_MARK_ADD | FAN_MARK_REMOVE | \
FAN_MARK_FLUSH)
@@ -109,10 +110,13 @@
/* Events that can only be reported with data type FSNOTIFY_EVENT_ERROR */
#define FANOTIFY_ERROR_EVENTS (FAN_FS_ERROR)
+#define FANOTIFY_MOUNT_EVENTS (FAN_MNT_ATTACH | FAN_MNT_DETACH)
+
/* Events that user can request to be notified on */
#define FANOTIFY_EVENTS (FANOTIFY_PATH_EVENTS | \
FANOTIFY_INODE_EVENTS | \
- FANOTIFY_ERROR_EVENTS)
+ FANOTIFY_ERROR_EVENTS | \
+ FANOTIFY_MOUNT_EVENTS)
/* Extra flags that may be reported with event or control handling of events */
#define FANOTIFY_EVENT_FLAGS (FAN_EVENT_ON_CHILD | FAN_ONDIR)
diff --git a/include/linux/file_ref.h b/include/linux/file_ref.h
index 9b3a8d9b17ab..7db62fbc0500 100644
--- a/include/linux/file_ref.h
+++ b/include/linux/file_ref.h
@@ -61,6 +61,7 @@ static inline void file_ref_init(file_ref_t *ref, unsigned long cnt)
atomic_long_set(&ref->refcnt, cnt - 1);
}
+bool __file_ref_put_badval(file_ref_t *ref, unsigned long cnt);
bool __file_ref_put(file_ref_t *ref, unsigned long cnt);
/**
@@ -161,6 +162,39 @@ static __always_inline __must_check bool file_ref_put(file_ref_t *ref)
}
/**
+ * file_ref_put_close - drop a reference expecting it would transition to FILE_REF_NOREF
+ * @ref: Pointer to the reference count
+ *
+ * Semantically it is equivalent to calling file_ref_put(), but it trades lower
+ * performance in face of other CPUs also modifying the refcount for higher
+ * performance when this happens to be the last reference.
+ *
+ * For the last reference file_ref_put() issues 2 atomics. One to drop the
+ * reference and another to transition it to FILE_REF_DEAD. This routine does
+ * the work in one step, but in order to do it has to pre-read the variable which
+ * decreases scalability.
+ *
+ * Use with close() et al, stick to file_ref_put() by default.
+ */
+static __always_inline __must_check bool file_ref_put_close(file_ref_t *ref)
+{
+ long old, new;
+
+ old = atomic_long_read(&ref->refcnt);
+ do {
+ if (unlikely(old < 0))
+ return __file_ref_put_badval(ref, old);
+
+ if (old == FILE_REF_ONEREF)
+ new = FILE_REF_DEAD;
+ else
+ new = old - 1;
+ } while (!atomic_long_try_cmpxchg(&ref->refcnt, &old, new));
+
+ return new == FILE_REF_DEAD;
+}
+
+/**
* file_ref_read - Read the number of file references
* @ref: Pointer to the reference count
*
@@ -174,4 +208,18 @@ static inline unsigned long file_ref_read(file_ref_t *ref)
return c >= FILE_REF_RELEASED ? 0 : c + 1;
}
+/*
+ * __file_ref_read_raw - Return the value stored in ref->refcnt
+ * @ref: Pointer to the reference count
+ *
+ * Return: The raw value found in the counter
+ *
+ * A hack for file_needs_f_pos_lock(), you probably want to use
+ * file_ref_read() instead.
+ */
+static inline unsigned long __file_ref_read_raw(file_ref_t *ref)
+{
+ return atomic_long_read(&ref->refcnt);
+}
+
#endif
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 2788df98080f..1a0e23a5d02d 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -2,6 +2,7 @@
#ifndef _LINUX_FS_H
#define _LINUX_FS_H
+#include <linux/vfsdebug.h>
#include <linux/linkage.h>
#include <linux/wait_bit.h>
#include <linux/kdev_t.h>
@@ -790,19 +791,8 @@ struct inode {
static inline void inode_set_cached_link(struct inode *inode, char *link, int linklen)
{
- int testlen;
-
- /*
- * TODO: patch it into a debug-only check if relevant macros show up.
- * In the meantime, since we are suffering strlen even on production kernels
- * to find the right length, do a fixup if the wrong value got passed.
- */
- testlen = strlen(link);
- if (testlen != linklen) {
- WARN_ONCE(1, "bad length passed for symlink [%s] (got %d, expected %d)",
- link, linklen, testlen);
- linklen = testlen;
- }
+ VFS_WARN_ON_INODE(strlen(link) != linklen, inode);
+ VFS_WARN_ON_INODE(inode->i_opflags & IOP_CACHED_LINK, inode);
inode->i_link = link;
inode->i_linklen = linklen;
inode->i_opflags |= IOP_CACHED_LINK;
@@ -1067,7 +1057,6 @@ static inline int ra_has_index(struct file_ra_state *ra, pgoff_t index)
/**
* struct file - Represents a file
- * @f_ref: reference count
* @f_lock: Protects f_ep, f_flags. Must not be taken from IRQ context.
* @f_mode: FMODE_* flags often used in hotpaths
* @f_op: file operations
@@ -1077,12 +1066,12 @@ static inline int ra_has_index(struct file_ra_state *ra, pgoff_t index)
* @f_flags: file flags
* @f_iocb_flags: iocb flags
* @f_cred: stashed credentials of creator/opener
+ * @f_owner: file owner
* @f_path: path of the file
* @f_pos_lock: lock protecting file position
* @f_pipe: specific to pipes
* @f_pos: file position
* @f_security: LSM security context of this file
- * @f_owner: file owner
* @f_wb_err: writeback error
* @f_sb_err: per sb writeback errors
* @f_ep: link of all epoll hooks for this file
@@ -1090,9 +1079,9 @@ static inline int ra_has_index(struct file_ra_state *ra, pgoff_t index)
* @f_llist: work queue entrypoint
* @f_ra: file's readahead state
* @f_freeptr: Pointer used by SLAB_TYPESAFE_BY_RCU file cache (don't touch.)
+ * @f_ref: reference count
*/
struct file {
- file_ref_t f_ref;
spinlock_t f_lock;
fmode_t f_mode;
const struct file_operations *f_op;
@@ -1102,6 +1091,7 @@ struct file {
unsigned int f_flags;
unsigned int f_iocb_flags;
const struct cred *f_cred;
+ struct fown_struct *f_owner;
/* --- cacheline 1 boundary (64 bytes) --- */
struct path f_path;
union {
@@ -1115,7 +1105,6 @@ struct file {
void *f_security;
#endif
/* --- cacheline 2 boundary (128 bytes) --- */
- struct fown_struct *f_owner;
errseq_t f_wb_err;
errseq_t f_sb_err;
#ifdef CONFIG_EPOLL
@@ -1127,6 +1116,7 @@ struct file {
struct file_ra_state f_ra;
freeptr_t f_freeptr;
};
+ file_ref_t f_ref;
/* --- cacheline 3 boundary (192 bytes) --- */
} __randomize_layout
__attribute__((aligned(4))); /* lest something weird decides that 2 is OK */
@@ -1981,8 +1971,8 @@ bool inode_owner_or_capable(struct mnt_idmap *idmap,
*/
int vfs_create(struct mnt_idmap *, struct inode *,
struct dentry *, umode_t, bool);
-int vfs_mkdir(struct mnt_idmap *, struct inode *,
- struct dentry *, umode_t);
+struct dentry *vfs_mkdir(struct mnt_idmap *, struct inode *,
+ struct dentry *, umode_t);
int vfs_mknod(struct mnt_idmap *, struct inode *, struct dentry *,
umode_t, dev_t);
int vfs_symlink(struct mnt_idmap *, struct inode *,
@@ -2039,7 +2029,7 @@ int vfs_fchown(struct file *file, uid_t user, gid_t group);
int vfs_fchmod(struct file *file, umode_t mode);
int vfs_utimes(const struct path *path, struct timespec64 *times);
-extern long vfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+int vfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
#ifdef CONFIG_COMPAT
extern long compat_ptr_ioctl(struct file *file, unsigned int cmd,
@@ -2211,8 +2201,8 @@ struct inode_operations {
int (*unlink) (struct inode *,struct dentry *);
int (*symlink) (struct mnt_idmap *, struct inode *,struct dentry *,
const char *);
- int (*mkdir) (struct mnt_idmap *, struct inode *,struct dentry *,
- umode_t);
+ struct dentry *(*mkdir) (struct mnt_idmap *, struct inode *,
+ struct dentry *, umode_t);
int (*rmdir) (struct inode *,struct dentry *);
int (*mknod) (struct mnt_idmap *, struct inode *,struct dentry *,
umode_t,dev_t);
@@ -2616,6 +2606,7 @@ struct file_system_type {
#define FS_DISALLOW_NOTIFY_PERM 16 /* Disable fanotify permission events */
#define FS_ALLOW_IDMAP 32 /* FS has been updated to handle vfs idmappings. */
#define FS_MGTIME 64 /* FS uses multigrain timestamps */
+#define FS_LBS 128 /* FS supports LBS */
#define FS_RENAME_DOES_D_MOVE 32768 /* FS will handle d_move() during rename() internally. */
int (*init_fs_context)(struct fs_context *);
const struct fs_parameter_spec *parameters;
@@ -2653,9 +2644,6 @@ static inline bool is_mgtime(const struct inode *inode)
extern struct dentry *mount_bdev(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data,
int (*fill_super)(struct super_block *, void *, int));
-extern struct dentry *mount_single(struct file_system_type *fs_type,
- int flags, void *data,
- int (*fill_super)(struct super_block *, void *, int));
extern struct dentry *mount_nodev(struct file_system_type *fs_type,
int flags, void *data,
int (*fill_super)(struct super_block *, void *, int));
@@ -2794,13 +2782,13 @@ static inline bool is_idmapped_mnt(const struct vfsmount *mnt)
return mnt_idmap(mnt) != &nop_mnt_idmap;
}
-extern long vfs_truncate(const struct path *, loff_t);
+int vfs_truncate(const struct path *, loff_t);
int do_truncate(struct mnt_idmap *, struct dentry *, loff_t start,
unsigned int time_attrs, struct file *filp);
extern int vfs_fallocate(struct file *file, int mode, loff_t offset,
loff_t len);
-extern long do_sys_open(int dfd, const char __user *filename, int flags,
- umode_t mode);
+int do_sys_open(int dfd, const char __user *filename, int flags,
+ umode_t mode);
extern struct file *file_open_name(struct filename *, int, umode_t);
extern struct file *filp_open(const char *, int, umode_t);
extern struct file *file_open_root(const struct path *,
@@ -2851,7 +2839,10 @@ extern int filp_close(struct file *, fl_owner_t id);
extern struct filename *getname_flags(const char __user *, int);
extern struct filename *getname_uflags(const char __user *, int);
-extern struct filename *getname(const char __user *);
+static inline struct filename *getname(const char __user *name)
+{
+ return getname_flags(name, 0);
+}
extern struct filename *getname_kernel(const char *);
extern struct filename *__getname_maybe_null(const char __user *);
static inline struct filename *getname_maybe_null(const char __user *name, int flags)
@@ -2864,6 +2855,13 @@ static inline struct filename *getname_maybe_null(const char __user *name, int f
return __getname_maybe_null(name);
}
extern void putname(struct filename *name);
+DEFINE_FREE(putname, struct filename *, if (!IS_ERR_OR_NULL(_T)) putname(_T))
+
+static inline struct filename *refname(struct filename *name)
+{
+ atomic_inc(&name->refcnt);
+ return name;
+}
extern int finish_open(struct file *file, struct dentry *dentry,
int (*open)(struct inode *, struct file *));
@@ -3297,7 +3295,11 @@ static inline void __iget(struct inode *inode)
extern void iget_failed(struct inode *);
extern void clear_inode(struct inode *);
extern void __destroy_inode(struct inode *);
-extern struct inode *new_inode_pseudo(struct super_block *sb);
+struct inode *alloc_inode(struct super_block *sb);
+static inline struct inode *new_inode_pseudo(struct super_block *sb)
+{
+ return alloc_inode(sb);
+}
extern struct inode *new_inode(struct super_block *sb);
extern void free_inode_nonrcu(struct inode *inode);
extern int setattr_should_drop_suidgid(struct mnt_idmap *, struct inode *);
diff --git a/include/linux/fs_context.h b/include/linux/fs_context.h
index 4b4bfef6f053..a19e4bd32e4d 100644
--- a/include/linux/fs_context.h
+++ b/include/linux/fs_context.h
@@ -144,8 +144,6 @@ extern void put_fs_context(struct fs_context *fc);
extern int vfs_parse_fs_param_source(struct fs_context *fc,
struct fs_parameter *param);
extern void fc_drop_locked(struct fs_context *fc);
-int reconfigure_single(struct super_block *s,
- int flags, void *data);
extern int get_tree_nodev(struct fs_context *fc,
int (*fill_super)(struct super_block *sb,
diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h
index 18855cb44b1c..56fad33043d5 100644
--- a/include/linux/fscrypt.h
+++ b/include/linux/fscrypt.h
@@ -310,10 +310,8 @@ static inline void fscrypt_prepare_dentry(struct dentry *dentry,
/* crypto.c */
void fscrypt_enqueue_decrypt_work(struct work_struct *);
-struct page *fscrypt_encrypt_pagecache_blocks(struct page *page,
- unsigned int len,
- unsigned int offs,
- gfp_t gfp_flags);
+struct page *fscrypt_encrypt_pagecache_blocks(struct folio *folio,
+ size_t len, size_t offs, gfp_t gfp_flags);
int fscrypt_encrypt_block_inplace(const struct inode *inode, struct page *page,
unsigned int len, unsigned int offs,
u64 lblk_num, gfp_t gfp_flags);
@@ -480,10 +478,8 @@ static inline void fscrypt_enqueue_decrypt_work(struct work_struct *work)
{
}
-static inline struct page *fscrypt_encrypt_pagecache_blocks(struct page *page,
- unsigned int len,
- unsigned int offs,
- gfp_t gfp_flags)
+static inline struct page *fscrypt_encrypt_pagecache_blocks(struct folio *folio,
+ size_t len, size_t offs, gfp_t gfp_flags)
{
return ERR_PTR(-EOPNOTSUPP);
}
diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
index 83d3ac97f826..454d8e466958 100644
--- a/include/linux/fsnotify.h
+++ b/include/linux/fsnotify.h
@@ -320,6 +320,11 @@ static inline void fsnotify_vfsmount_delete(struct vfsmount *mnt)
__fsnotify_vfsmount_delete(mnt);
}
+static inline void fsnotify_mntns_delete(struct mnt_namespace *mntns)
+{
+ __fsnotify_mntns_delete(mntns);
+}
+
/*
* fsnotify_inoderemove - an inode is going away
*/
@@ -528,4 +533,19 @@ static inline int fsnotify_sb_error(struct super_block *sb, struct inode *inode,
NULL, NULL, NULL, 0);
}
+static inline void fsnotify_mnt_attach(struct mnt_namespace *ns, struct vfsmount *mnt)
+{
+ fsnotify_mnt(FS_MNT_ATTACH, ns, mnt);
+}
+
+static inline void fsnotify_mnt_detach(struct mnt_namespace *ns, struct vfsmount *mnt)
+{
+ fsnotify_mnt(FS_MNT_DETACH, ns, mnt);
+}
+
+static inline void fsnotify_mnt_move(struct mnt_namespace *ns, struct vfsmount *mnt)
+{
+ fsnotify_mnt(FS_MNT_MOVE, ns, mnt);
+}
+
#endif /* _LINUX_FS_NOTIFY_H */
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index 0d24a21a8e60..6cd8d1d28b8b 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -59,6 +59,10 @@
#define FS_PRE_ACCESS 0x00100000 /* Pre-content access hook */
+#define FS_MNT_ATTACH 0x01000000 /* Mount was attached */
+#define FS_MNT_DETACH 0x02000000 /* Mount was detached */
+#define FS_MNT_MOVE (FS_MNT_ATTACH | FS_MNT_DETACH)
+
/*
* Set on inode mark that cares about things that happen to its children.
* Always set for dnotify and inotify.
@@ -80,6 +84,9 @@
*/
#define ALL_FSNOTIFY_DIRENT_EVENTS (FS_CREATE | FS_DELETE | FS_MOVE | FS_RENAME)
+/* Mount namespace events */
+#define FSNOTIFY_MNT_EVENTS (FS_MNT_ATTACH | FS_MNT_DETACH)
+
/* Content events can be used to inspect file content */
#define FSNOTIFY_CONTENT_PERM_EVENTS (FS_OPEN_PERM | FS_OPEN_EXEC_PERM | \
FS_ACCESS_PERM)
@@ -108,6 +115,7 @@
/* Events that can be reported to backends */
#define ALL_FSNOTIFY_EVENTS (ALL_FSNOTIFY_DIRENT_EVENTS | \
+ FSNOTIFY_MNT_EVENTS | \
FS_EVENTS_POSS_ON_CHILD | \
FS_DELETE_SELF | FS_MOVE_SELF | \
FS_UNMOUNT | FS_Q_OVERFLOW | FS_IN_IGNORED | \
@@ -298,6 +306,7 @@ enum fsnotify_data_type {
FSNOTIFY_EVENT_PATH,
FSNOTIFY_EVENT_INODE,
FSNOTIFY_EVENT_DENTRY,
+ FSNOTIFY_EVENT_MNT,
FSNOTIFY_EVENT_ERROR,
};
@@ -318,6 +327,11 @@ static inline const struct path *file_range_path(const struct file_range *range)
return range->path;
}
+struct fsnotify_mnt {
+ const struct mnt_namespace *ns;
+ u64 mnt_id;
+};
+
static inline struct inode *fsnotify_data_inode(const void *data, int data_type)
{
switch (data_type) {
@@ -383,6 +397,24 @@ static inline struct super_block *fsnotify_data_sb(const void *data,
}
}
+static inline const struct fsnotify_mnt *fsnotify_data_mnt(const void *data,
+ int data_type)
+{
+ switch (data_type) {
+ case FSNOTIFY_EVENT_MNT:
+ return data;
+ default:
+ return NULL;
+ }
+}
+
+static inline u64 fsnotify_data_mnt_id(const void *data, int data_type)
+{
+ const struct fsnotify_mnt *mnt_data = fsnotify_data_mnt(data, data_type);
+
+ return mnt_data ? mnt_data->mnt_id : 0;
+}
+
static inline struct fs_error_report *fsnotify_data_error_report(
const void *data,
int data_type)
@@ -420,6 +452,7 @@ enum fsnotify_iter_type {
FSNOTIFY_ITER_TYPE_SB,
FSNOTIFY_ITER_TYPE_PARENT,
FSNOTIFY_ITER_TYPE_INODE2,
+ FSNOTIFY_ITER_TYPE_MNTNS,
FSNOTIFY_ITER_TYPE_COUNT
};
@@ -429,6 +462,7 @@ enum fsnotify_obj_type {
FSNOTIFY_OBJ_TYPE_INODE,
FSNOTIFY_OBJ_TYPE_VFSMOUNT,
FSNOTIFY_OBJ_TYPE_SB,
+ FSNOTIFY_OBJ_TYPE_MNTNS,
FSNOTIFY_OBJ_TYPE_COUNT,
FSNOTIFY_OBJ_TYPE_DETACHED = FSNOTIFY_OBJ_TYPE_COUNT
};
@@ -613,8 +647,10 @@ extern int __fsnotify_parent(struct dentry *dentry, __u32 mask, const void *data
extern void __fsnotify_inode_delete(struct inode *inode);
extern void __fsnotify_vfsmount_delete(struct vfsmount *mnt);
extern void fsnotify_sb_delete(struct super_block *sb);
+extern void __fsnotify_mntns_delete(struct mnt_namespace *mntns);
extern void fsnotify_sb_free(struct super_block *sb);
extern u32 fsnotify_get_cookie(void);
+extern void fsnotify_mnt(__u32 mask, struct mnt_namespace *ns, struct vfsmount *mnt);
static inline __u32 fsnotify_parent_needed_mask(__u32 mask)
{
@@ -928,6 +964,9 @@ static inline void __fsnotify_vfsmount_delete(struct vfsmount *mnt)
static inline void fsnotify_sb_delete(struct super_block *sb)
{}
+static inline void __fsnotify_mntns_delete(struct mnt_namespace *mntns)
+{}
+
static inline void fsnotify_sb_free(struct super_block *sb)
{}
@@ -942,6 +981,9 @@ static inline u32 fsnotify_get_cookie(void)
static inline void fsnotify_unmount_inodes(struct super_block *sb)
{}
+static inline void fsnotify_mnt(__u32 mask, struct mnt_namespace *ns, struct vfsmount *mnt)
+{}
+
#endif /* CONFIG_FSNOTIFY */
#endif /* __KERNEL __ */
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index 75bf54e76f3b..02fe001feebb 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -56,6 +56,13 @@ struct vm_fault;
*
* IOMAP_F_BOUNDARY indicates that I/O and I/O completions for this iomap must
* never be merged with the mapping before it.
+ *
+ * IOMAP_F_ANON_WRITE indicates that (write) I/O does not have a target block
+ * assigned to it yet and the file system will do that in the bio submission
+ * handler, splitting the I/O as needed.
+ *
+ * IOMAP_F_ATOMIC_BIO indicates that (write) I/O will be issued as an atomic
+ * bio, i.e. set REQ_ATOMIC.
*/
#define IOMAP_F_NEW (1U << 0)
#define IOMAP_F_DIRTY (1U << 1)
@@ -68,6 +75,8 @@ struct vm_fault;
#endif /* CONFIG_BUFFER_HEAD */
#define IOMAP_F_XATTR (1U << 5)
#define IOMAP_F_BOUNDARY (1U << 6)
+#define IOMAP_F_ANON_WRITE (1U << 7)
+#define IOMAP_F_ATOMIC_BIO (1U << 8)
/*
* Flags set by the core iomap code during operations:
@@ -111,6 +120,8 @@ struct iomap {
static inline sector_t iomap_sector(const struct iomap *iomap, loff_t pos)
{
+ if (iomap->flags & IOMAP_F_ANON_WRITE)
+ return U64_MAX; /* invalid */
return (iomap->addr + pos - iomap->offset) >> SECTOR_SHIFT;
}
@@ -182,7 +193,8 @@ struct iomap_folio_ops {
#else
#define IOMAP_DAX 0
#endif /* CONFIG_FS_DAX */
-#define IOMAP_ATOMIC (1 << 9)
+#define IOMAP_ATOMIC (1 << 9) /* torn-write protection */
+#define IOMAP_DONTCACHE (1 << 10)
struct iomap_ops {
/*
@@ -211,8 +223,10 @@ struct iomap_ops {
* calls to iomap_iter(). Treat as read-only in the body.
* @len: The remaining length of the file segment we're operating on.
* It is updated at the same time as @pos.
- * @processed: The number of bytes processed by the body in the most recent
- * iteration, or a negative errno. 0 causes the iteration to stop.
+ * @iter_start_pos: The original start pos for the current iomap. Used for
+ * incremental iter advance.
+ * @status: Status of the most recent iteration. Zero on success or a negative
+ * errno on error.
* @flags: Zero or more of the iomap_begin flags above.
* @iomap: Map describing the I/O iteration
* @srcmap: Source map for COW operations
@@ -221,7 +235,8 @@ struct iomap_iter {
struct inode *inode;
loff_t pos;
u64 len;
- s64 processed;
+ loff_t iter_start_pos;
+ int status;
unsigned flags;
struct iomap iomap;
struct iomap srcmap;
@@ -229,20 +244,46 @@ struct iomap_iter {
};
int iomap_iter(struct iomap_iter *iter, const struct iomap_ops *ops);
+int iomap_iter_advance(struct iomap_iter *iter, u64 *count);
/**
- * iomap_length - length of the current iomap iteration
+ * iomap_length_trim - trimmed length of the current iomap iteration
* @iter: iteration structure
+ * @pos: File position to trim from.
+ * @len: Length of the mapping to trim to.
*
- * Returns the length that the operation applies to for the current iteration.
+ * Returns a trimmed length that the operation applies to for the current
+ * iteration.
*/
-static inline u64 iomap_length(const struct iomap_iter *iter)
+static inline u64 iomap_length_trim(const struct iomap_iter *iter, loff_t pos,
+ u64 len)
{
u64 end = iter->iomap.offset + iter->iomap.length;
if (iter->srcmap.type != IOMAP_HOLE)
end = min(end, iter->srcmap.offset + iter->srcmap.length);
- return min(iter->len, end - iter->pos);
+ return min(len, end - pos);
+}
+
+/**
+ * iomap_length - length of the current iomap iteration
+ * @iter: iteration structure
+ *
+ * Returns the length that the operation applies to for the current iteration.
+ */
+static inline u64 iomap_length(const struct iomap_iter *iter)
+{
+ return iomap_length_trim(iter, iter->pos, iter->len);
+}
+
+/**
+ * iomap_iter_advance_full - advance by the full length of current map
+ */
+static inline int iomap_iter_advance_full(struct iomap_iter *iter)
+{
+ u64 length = iomap_length(iter);
+
+ return iomap_iter_advance(iter, &length);
}
/**
@@ -306,12 +347,11 @@ bool iomap_dirty_folio(struct address_space *mapping, struct folio *folio);
int iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
const struct iomap_ops *ops);
int iomap_zero_range(struct inode *inode, loff_t pos, loff_t len,
- bool *did_zero, const struct iomap_ops *ops);
+ bool *did_zero, const struct iomap_ops *ops, void *private);
int iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
- const struct iomap_ops *ops);
-vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf,
- const struct iomap_ops *ops);
-
+ const struct iomap_ops *ops, void *private);
+vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops,
+ void *private);
typedef void (*iomap_punch_t)(struct inode *inode, loff_t offset, loff_t length,
struct iomap *iomap);
void iomap_write_delalloc_release(struct inode *inode, loff_t start_byte,
@@ -328,16 +368,42 @@ sector_t iomap_bmap(struct address_space *mapping, sector_t bno,
const struct iomap_ops *ops);
/*
+ * Flags for iomap_ioend->io_flags.
+ */
+/* shared COW extent */
+#define IOMAP_IOEND_SHARED (1U << 0)
+/* unwritten extent */
+#define IOMAP_IOEND_UNWRITTEN (1U << 1)
+/* don't merge into previous ioend */
+#define IOMAP_IOEND_BOUNDARY (1U << 2)
+/* is direct I/O */
+#define IOMAP_IOEND_DIRECT (1U << 3)
+
+/*
+ * Flags that if set on either ioend prevent the merge of two ioends.
+ * (IOMAP_IOEND_BOUNDARY also prevents merges, but only one-way)
+ */
+#define IOMAP_IOEND_NOMERGE_FLAGS \
+ (IOMAP_IOEND_SHARED | IOMAP_IOEND_UNWRITTEN | IOMAP_IOEND_DIRECT)
+
+/*
* Structure for writeback I/O completions.
+ *
+ * File systems implementing ->submit_ioend (for buffered I/O) or ->submit_io
+ * for direct I/O) can split a bio generated by iomap. In that case the parent
+ * ioend it was split from is recorded in ioend->io_parent.
*/
struct iomap_ioend {
struct list_head io_list; /* next ioend in chain */
- u16 io_type;
- u16 io_flags; /* IOMAP_F_* */
+ u16 io_flags; /* IOMAP_IOEND_* */
struct inode *io_inode; /* file being written to */
- size_t io_size; /* size of data within eof */
+ size_t io_size; /* size of the extent */
+ atomic_t io_remaining; /* completetion defer count */
+ int io_error; /* stashed away status */
+ struct iomap_ioend *io_parent; /* parent for completions */
loff_t io_offset; /* offset in the file */
sector_t io_sector; /* start sector of ioend */
+ void *io_private; /* file system private data */
struct bio io_bio; /* MUST BE LAST! */
};
@@ -362,12 +428,14 @@ struct iomap_writeback_ops {
loff_t offset, unsigned len);
/*
- * Optional, allows the file systems to perform actions just before
- * submitting the bio and/or override the bio end_io handler for complex
- * operations like copy on write extent manipulation or unwritten extent
- * conversions.
+ * Optional, allows the file systems to hook into bio submission,
+ * including overriding the bi_end_io handler.
+ *
+ * Returns 0 if the bio was successfully submitted, or a negative
+ * error code if status was non-zero or another error happened and
+ * the bio could not be submitted.
*/
- int (*prepare_ioend)(struct iomap_ioend *ioend, int status);
+ int (*submit_ioend)(struct iomap_writepage_ctx *wpc, int status);
/*
* Optional, allows the file system to discard state on a page where
@@ -383,6 +451,10 @@ struct iomap_writepage_ctx {
u32 nr_folios; /* folios added to the ioend */
};
+struct iomap_ioend *iomap_init_ioend(struct inode *inode, struct bio *bio,
+ loff_t file_offset, u16 ioend_flags);
+struct iomap_ioend *iomap_split_ioend(struct iomap_ioend *ioend,
+ unsigned int max_len, bool is_append);
void iomap_finish_ioends(struct iomap_ioend *ioend, int error);
void iomap_ioend_try_merge(struct iomap_ioend *ioend,
struct list_head *more_ioends);
@@ -454,4 +526,6 @@ int iomap_swapfile_activate(struct swap_info_struct *sis,
# define iomap_swapfile_activate(sis, swapfile, pagespan, ops) (-EIO)
#endif /* CONFIG_SWAP */
+extern struct bio_set iomap_ioend_bioset;
+
#endif /* LINUX_IOMAP_H */
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index f0e9f8eda7a3..c840431eadda 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -68,8 +68,6 @@ extern note_buf_t __percpu *crash_notes;
#define KEXEC_CRASH_MEM_ALIGN PAGE_SIZE
#endif
-#define KEXEC_CORE_NOTE_NAME CRASH_CORE_NOTE_NAME
-
/*
* This structure is used to hold the arguments that are used when loading
* kernel binaries.
diff --git a/include/linux/key.h b/include/linux/key.h
index 074dca3222b9..ba05de8579ec 100644
--- a/include/linux/key.h
+++ b/include/linux/key.h
@@ -236,6 +236,7 @@ struct key {
#define KEY_FLAG_ROOT_CAN_INVAL 7 /* set if key can be invalidated by root without permission */
#define KEY_FLAG_KEEP 8 /* set if key should not be removed */
#define KEY_FLAG_UID_KEYRING 9 /* set if key is a user or user session keyring */
+#define KEY_FLAG_FINAL_PUT 10 /* set if final put has happened on key */
/* the key type and key description string
* - the desc is used to match a key against search criteria
diff --git a/include/linux/kstrtox.h b/include/linux/kstrtox.h
index 7fcf29a4e0de..6ea897222af1 100644
--- a/include/linux/kstrtox.h
+++ b/include/linux/kstrtox.h
@@ -143,6 +143,7 @@ static inline int __must_check kstrtos32_from_user(const char __user *s, size_t
*/
extern unsigned long simple_strtoul(const char *,char **,unsigned int);
+extern unsigned long simple_strntoul(const char *,char **,unsigned int,size_t);
extern long simple_strtol(const char *,char **,unsigned int);
extern unsigned long long simple_strtoull(const char *,char **,unsigned int);
extern long long simple_strtoll(const char *,char **,unsigned int);
diff --git a/include/linux/misc_cgroup.h b/include/linux/misc_cgroup.h
index 49eef10c8e59..4bf261d41a6d 100644
--- a/include/linux/misc_cgroup.h
+++ b/include/linux/misc_cgroup.h
@@ -60,7 +60,6 @@ struct misc_cg {
struct misc_res res[MISC_CG_RES_TYPES];
};
-u64 misc_cg_res_total_usage(enum misc_res_type type);
int misc_cg_set_capacity(enum misc_res_type type, u64 capacity);
int misc_cg_try_charge(enum misc_res_type type, struct misc_cg *cg, u64 amount);
void misc_cg_uncharge(enum misc_res_type type, struct misc_cg *cg, u64 amount);
@@ -104,11 +103,6 @@ static inline void put_misc_cg(struct misc_cg *cg)
#else /* !CONFIG_CGROUP_MISC */
-static inline u64 misc_cg_res_total_usage(enum misc_res_type type)
-{
- return 0;
-}
-
static inline int misc_cg_set_capacity(enum misc_res_type type, u64 capacity)
{
return 0;
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 1f80baddacc5..2edb8d14d165 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2555,7 +2555,7 @@ int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
struct task_struct *task, bool bypass_rlim);
struct kvec;
-struct page *get_dump_page(unsigned long addr);
+struct page *get_dump_page(unsigned long addr, int *locked);
bool folio_mark_dirty(struct folio *folio);
bool folio_mark_dirty_lock(struct folio *folio);
diff --git a/include/linux/mnt_idmapping.h b/include/linux/mnt_idmapping.h
index b1b219bc3422..e71a6070a8f8 100644
--- a/include/linux/mnt_idmapping.h
+++ b/include/linux/mnt_idmapping.h
@@ -25,6 +25,11 @@ static_assert(sizeof(vfsgid_t) == sizeof(kgid_t));
static_assert(offsetof(vfsuid_t, val) == offsetof(kuid_t, val));
static_assert(offsetof(vfsgid_t, val) == offsetof(kgid_t, val));
+static inline bool is_valid_mnt_idmap(const struct mnt_idmap *idmap)
+{
+ return idmap != &nop_mnt_idmap && idmap != &invalid_mnt_idmap;
+}
+
#ifdef CONFIG_MULTIUSER
static inline uid_t __vfsuid_val(vfsuid_t uid)
{
diff --git a/include/linux/namei.h b/include/linux/namei.h
index 8ec8fed3bce8..e3042176cdf4 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -18,35 +18,36 @@ enum { MAX_NESTED_LINKS = 8 };
enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT};
/* pathwalk mode */
-#define LOOKUP_FOLLOW 0x0001 /* follow links at the end */
-#define LOOKUP_DIRECTORY 0x0002 /* require a directory */
-#define LOOKUP_AUTOMOUNT 0x0004 /* force terminal automount */
-#define LOOKUP_EMPTY 0x4000 /* accept empty path [user_... only] */
-#define LOOKUP_DOWN 0x8000 /* follow mounts in the starting point */
-#define LOOKUP_MOUNTPOINT 0x0080 /* follow mounts in the end */
-
-#define LOOKUP_REVAL 0x0020 /* tell ->d_revalidate() to trust no cache */
-#define LOOKUP_RCU 0x0040 /* RCU pathwalk mode; semi-internal */
+#define LOOKUP_FOLLOW BIT(0) /* follow links at the end */
+#define LOOKUP_DIRECTORY BIT(1) /* require a directory */
+#define LOOKUP_AUTOMOUNT BIT(2) /* force terminal automount */
+#define LOOKUP_EMPTY BIT(3) /* accept empty path [user_... only] */
+#define LOOKUP_LINKAT_EMPTY BIT(4) /* Linkat request with empty path. */
+#define LOOKUP_DOWN BIT(5) /* follow mounts in the starting point */
+#define LOOKUP_MOUNTPOINT BIT(6) /* follow mounts in the end */
+#define LOOKUP_REVAL BIT(7) /* tell ->d_revalidate() to trust no cache */
+#define LOOKUP_RCU BIT(8) /* RCU pathwalk mode; semi-internal */
+#define LOOKUP_CACHED BIT(9) /* Only do cached lookup */
+#define LOOKUP_PARENT BIT(10) /* Looking up final parent in path */
+/* 5 spare bits for pathwalk */
/* These tell filesystem methods that we are dealing with the final component... */
-#define LOOKUP_OPEN 0x0100 /* ... in open */
-#define LOOKUP_CREATE 0x0200 /* ... in object creation */
-#define LOOKUP_EXCL 0x0400 /* ... in exclusive creation */
-#define LOOKUP_RENAME_TARGET 0x0800 /* ... in destination of rename() */
+#define LOOKUP_OPEN BIT(16) /* ... in open */
+#define LOOKUP_CREATE BIT(17) /* ... in object creation */
+#define LOOKUP_EXCL BIT(18) /* ... in target must not exist */
+#define LOOKUP_RENAME_TARGET BIT(19) /* ... in destination of rename() */
-/* internal use only */
-#define LOOKUP_PARENT 0x0010
+/* 4 spare bits for intent */
/* Scoping flags for lookup. */
-#define LOOKUP_NO_SYMLINKS 0x010000 /* No symlink crossing. */
-#define LOOKUP_NO_MAGICLINKS 0x020000 /* No nd_jump_link() crossing. */
-#define LOOKUP_NO_XDEV 0x040000 /* No mountpoint crossing. */
-#define LOOKUP_BENEATH 0x080000 /* No escaping from starting point. */
-#define LOOKUP_IN_ROOT 0x100000 /* Treat dirfd as fs root. */
-#define LOOKUP_CACHED 0x200000 /* Only do cached lookup */
-#define LOOKUP_LINKAT_EMPTY 0x400000 /* Linkat request with empty path. */
+#define LOOKUP_NO_SYMLINKS BIT(24) /* No symlink crossing. */
+#define LOOKUP_NO_MAGICLINKS BIT(25) /* No nd_jump_link() crossing. */
+#define LOOKUP_NO_XDEV BIT(26) /* No mountpoint crossing. */
+#define LOOKUP_BENEATH BIT(27) /* No escaping from starting point. */
+#define LOOKUP_IN_ROOT BIT(28) /* Treat dirfd as fs root. */
/* LOOKUP_* flags which do scope-related checks based on the dirfd. */
#define LOOKUP_IS_SCOPED (LOOKUP_BENEATH | LOOKUP_IN_ROOT)
+/* 3 spare bits for scoping */
extern int path_pts(struct path *path);
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 9155a6ffc370..d66c61cbbd1d 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -1802,7 +1802,7 @@ struct nfs_rpc_ops {
int (*link) (struct inode *, struct inode *, const struct qstr *);
int (*symlink) (struct inode *, struct dentry *, struct folio *,
unsigned int, struct iattr *);
- int (*mkdir) (struct inode *, struct dentry *, struct iattr *);
+ struct dentry *(*mkdir) (struct inode *, struct dentry *, struct iattr *);
int (*rmdir) (struct inode *, const struct qstr *);
int (*readdir) (struct nfs_readdir_arg *, struct nfs_readdir_res *);
int (*mknod) (struct inode *, struct dentry *, struct iattr *,
diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
index 9fd7a0ce9c1a..f0ac0633366b 100644
--- a/include/linux/nodemask.h
+++ b/include/linux/nodemask.h
@@ -94,7 +94,6 @@
#include <linux/bitmap.h>
#include <linux/minmax.h>
#include <linux/nodemask_types.h>
-#include <linux/numa.h>
#include <linux/random.h>
extern nodemask_t _unused_nodemask_arg_;
@@ -191,6 +190,13 @@ static __always_inline void __nodes_andnot(nodemask_t *dstp, const nodemask_t *s
bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits);
}
+#define nodes_copy(dst, src) __nodes_copy(&(dst), &(src), MAX_NUMNODES)
+static __always_inline void __nodes_copy(nodemask_t *dstp,
+ const nodemask_t *srcp, unsigned int nbits)
+{
+ bitmap_copy(dstp->bits, srcp->bits, nbits);
+}
+
#define nodes_complement(dst, src) \
__nodes_complement(&(dst), &(src), MAX_NUMNODES)
static __always_inline void __nodes_complement(nodemask_t *dstp,
diff --git a/include/linux/nodemask_types.h b/include/linux/nodemask_types.h
index 6b28d97ea6ed..f850a48742f1 100644
--- a/include/linux/nodemask_types.h
+++ b/include/linux/nodemask_types.h
@@ -3,7 +3,16 @@
#define __LINUX_NODEMASK_TYPES_H
#include <linux/bitops.h>
-#include <linux/numa.h>
+
+#ifdef CONFIG_NODES_SHIFT
+#define NODES_SHIFT CONFIG_NODES_SHIFT
+#else
+#define NODES_SHIFT 0
+#endif
+
+#define MAX_NUMNODES (1 << NODES_SHIFT)
+
+#define NUMA_NO_NODE (-1)
typedef struct { DECLARE_BITMAP(bits, MAX_NUMNODES); } nodemask_t;
diff --git a/include/linux/numa.h b/include/linux/numa.h
index 3567e40329eb..e6baaf6051bc 100644
--- a/include/linux/numa.h
+++ b/include/linux/numa.h
@@ -3,16 +3,8 @@
#define _LINUX_NUMA_H
#include <linux/init.h>
#include <linux/types.h>
+#include <linux/nodemask.h>
-#ifdef CONFIG_NODES_SHIFT
-#define NODES_SHIFT CONFIG_NODES_SHIFT
-#else
-#define NODES_SHIFT 0
-#endif
-
-#define MAX_NUMNODES (1 << NODES_SHIFT)
-
-#define NUMA_NO_NODE (-1)
#define NUMA_NO_MEMBLK (-1)
static inline bool numa_valid_node(int nid)
@@ -39,6 +31,8 @@ void __init alloc_offline_node_data(int nid);
/* Generic implementation available */
int numa_nearest_node(int node, unsigned int state);
+int nearest_node_nodemask(int node, nodemask_t *mask);
+
#ifndef memory_add_physaddr_to_nid
int memory_add_physaddr_to_nid(u64 start);
#endif
@@ -55,6 +49,11 @@ static inline int numa_nearest_node(int node, unsigned int state)
return NUMA_NO_NODE;
}
+static inline int nearest_node_nodemask(int node, nodemask_t *mask)
+{
+ return NUMA_NO_NODE;
+}
+
static inline int memory_add_physaddr_to_nid(u64 start)
{
return 0;
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 36d283552f80..df9234e5f478 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -925,14 +925,15 @@ FOLIO_FLAG_FALSE(has_hwpoisoned)
enum pagetype {
/* 0x00-0x7f are positive numbers, ie mapcount */
/* Reserve 0x80-0xef for mapcount overflow. */
- PGTY_buddy = 0xf0,
- PGTY_offline = 0xf1,
- PGTY_table = 0xf2,
- PGTY_guard = 0xf3,
- PGTY_hugetlb = 0xf4,
- PGTY_slab = 0xf5,
- PGTY_zsmalloc = 0xf6,
- PGTY_unaccepted = 0xf7,
+ PGTY_buddy = 0xf0,
+ PGTY_offline = 0xf1,
+ PGTY_table = 0xf2,
+ PGTY_guard = 0xf3,
+ PGTY_hugetlb = 0xf4,
+ PGTY_slab = 0xf5,
+ PGTY_zsmalloc = 0xf6,
+ PGTY_unaccepted = 0xf7,
+ PGTY_large_kmalloc = 0xf8,
PGTY_mapcount_underflow = 0xff
};
@@ -1075,6 +1076,7 @@ PAGE_TYPE_OPS(Zsmalloc, zsmalloc, zsmalloc)
* Serialized with zone lock.
*/
PAGE_TYPE_OPS(Unaccepted, unaccepted, unaccepted)
+FOLIO_TYPE_OPS(large_kmalloc, large_kmalloc)
/**
* PageHuge - Determine if the page belongs to hugetlbfs
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 47bfc6b1b632..d2432cb02fa0 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -1044,21 +1044,23 @@ static inline pgoff_t page_pgoff(const struct folio *folio,
return folio->index + folio_page_idx(folio, page);
}
-/*
- * Return byte-offset into filesystem object for page.
+/**
+ * folio_pos - Returns the byte position of this folio in its file.
+ * @folio: The folio.
*/
-static inline loff_t page_offset(struct page *page)
+static inline loff_t folio_pos(const struct folio *folio)
{
- return ((loff_t)page->index) << PAGE_SHIFT;
+ return ((loff_t)folio->index) * PAGE_SIZE;
}
-/**
- * folio_pos - Returns the byte position of this folio in its file.
- * @folio: The folio.
+/*
+ * Return byte-offset into filesystem object for page.
*/
-static inline loff_t folio_pos(struct folio *folio)
+static inline loff_t page_offset(struct page *page)
{
- return page_offset(&folio->page);
+ struct folio *folio = page_folio(page);
+
+ return folio_pos(folio) + folio_page_idx(folio, page) * PAGE_SIZE;
}
/*
@@ -1603,34 +1605,6 @@ static inline ssize_t folio_mkwrite_check_truncate(struct folio *folio,
}
/**
- * page_mkwrite_check_truncate - check if page was truncated
- * @page: the page to check
- * @inode: the inode to check the page against
- *
- * Returns the number of bytes in the page up to EOF,
- * or -EFAULT if the page was truncated.
- */
-static inline int page_mkwrite_check_truncate(struct page *page,
- struct inode *inode)
-{
- loff_t size = i_size_read(inode);
- pgoff_t index = size >> PAGE_SHIFT;
- int offset = offset_in_page(size);
-
- if (page->mapping != inode->i_mapping)
- return -EFAULT;
-
- /* page is wholly inside EOF */
- if (page->index < index)
- return PAGE_SIZE;
- /* page is wholly past EOF */
- if (page->index > index || !offset)
- return -EFAULT;
- /* page is partially inside EOF */
- return offset;
-}
-
-/**
* i_blocks_per_folio - How many blocks fit in this folio.
* @inode: The inode which contains the blocks.
* @folio: The folio.
diff --git a/include/linux/pid.h b/include/linux/pid.h
index 98837a1ff0f3..311ecebd7d56 100644
--- a/include/linux/pid.h
+++ b/include/linux/pid.h
@@ -101,9 +101,9 @@ extern struct pid *get_task_pid(struct task_struct *task, enum pid_type type);
* these helpers must be called with the tasklist_lock write-held.
*/
extern void attach_pid(struct task_struct *task, enum pid_type);
-extern void detach_pid(struct task_struct *task, enum pid_type);
-extern void change_pid(struct task_struct *task, enum pid_type,
- struct pid *pid);
+void detach_pid(struct pid **pids, struct task_struct *task, enum pid_type);
+void change_pid(struct pid **pids, struct task_struct *task, enum pid_type,
+ struct pid *pid);
extern void exchange_tids(struct task_struct *task, struct task_struct *old);
extern void transfer_pid(struct task_struct *old, struct task_struct *new,
enum pid_type);
@@ -129,6 +129,7 @@ extern struct pid *find_ge_pid(int nr, struct pid_namespace *);
extern struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid,
size_t set_tid_size);
extern void free_pid(struct pid *pid);
+void free_pids(struct pid **pids);
extern void disable_pid_allocation(struct pid_namespace *ns);
/*
diff --git a/include/linux/pidfs.h b/include/linux/pidfs.h
index 7c830d0dec9a..05e6f8f4a026 100644
--- a/include/linux/pidfs.h
+++ b/include/linux/pidfs.h
@@ -6,6 +6,7 @@ struct file *pidfs_alloc_file(struct pid *pid, unsigned int flags);
void __init pidfs_init(void);
void pidfs_add_pid(struct pid *pid);
void pidfs_remove_pid(struct pid *pid);
+void pidfs_exit(struct task_struct *tsk);
extern const struct dentry_operations pidfs_dentry_operations;
#endif /* _LINUX_PID_FS_H */
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
index b698758000f8..9d42d473d201 100644
--- a/include/linux/pipe_fs_i.h
+++ b/include/linux/pipe_fs_i.h
@@ -108,7 +108,7 @@ struct pipe_inode_info {
#ifdef CONFIG_WATCH_QUEUE
bool note_loss;
#endif
- struct page *tmp_page;
+ struct page *tmp_page[2];
struct fasync_struct *fasync_readers;
struct fasync_struct *fasync_writers;
struct pipe_buffer *bufs;
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 48e5c03df1dd..23bcf71ffb06 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -1025,12 +1025,6 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
#define RCU_POINTER_INITIALIZER(p, v) \
.p = RCU_INITIALIZER(v)
-/*
- * Does the specified offset indicate that the corresponding rcu_head
- * structure can be handled by kvfree_rcu()?
- */
-#define __is_kvfree_rcu_offset(offset) ((offset) < 4096)
-
/**
* kfree_rcu() - kfree an object after a grace period.
* @ptr: pointer to kfree for double-argument invocations.
@@ -1041,11 +1035,11 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
* when they are used in a kernel module, that module must invoke the
* high-latency rcu_barrier() function at module-unload time.
*
- * The kfree_rcu() function handles this issue. Rather than encoding a
- * function address in the embedded rcu_head structure, kfree_rcu() instead
- * encodes the offset of the rcu_head structure within the base structure.
- * Because the functions are not allowed in the low-order 4096 bytes of
- * kernel virtual memory, offsets up to 4095 bytes can be accommodated.
+ * The kfree_rcu() function handles this issue. In order to have a universal
+ * callback function handling different offsets of rcu_head, the callback needs
+ * to determine the starting address of the freed object, which can be a large
+ * kmalloc or vmalloc allocation. To allow simply aligning the pointer down to
+ * page boundary for those, only offsets up to 4095 bytes can be accommodated.
* If the offset is larger than 4095 bytes, a compile-time error will
* be generated in kvfree_rcu_arg_2(). If this error is triggered, you can
* either fall back to use of call_rcu() or rearrange the structure to
@@ -1082,14 +1076,23 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
#define kfree_rcu_mightsleep(ptr) kvfree_rcu_arg_1(ptr)
#define kvfree_rcu_mightsleep(ptr) kvfree_rcu_arg_1(ptr)
+/*
+ * In mm/slab_common.c, no suitable header to include here.
+ */
+void kvfree_call_rcu(struct rcu_head *head, void *ptr);
+
+/*
+ * The BUILD_BUG_ON() makes sure the rcu_head offset can be handled. See the
+ * comment of kfree_rcu() for details.
+ */
#define kvfree_rcu_arg_2(ptr, rhf) \
do { \
typeof (ptr) ___p = (ptr); \
\
- if (___p) { \
- BUILD_BUG_ON(!__is_kvfree_rcu_offset(offsetof(typeof(*(ptr)), rhf))); \
- kvfree_call_rcu(&((___p)->rhf), (void *) (___p)); \
- } \
+ if (___p) { \
+ BUILD_BUG_ON(offsetof(typeof(*(ptr)), rhf) >= 4096); \
+ kvfree_call_rcu(&((___p)->rhf), (void *) (___p)); \
+ } \
} while (0)
#define kvfree_rcu_arg_1(ptr) \
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index fe42315f667f..f519cd680228 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -90,41 +90,6 @@ static inline void synchronize_rcu_expedited(void)
synchronize_rcu();
}
-/*
- * Add one more declaration of kvfree() here. It is
- * not so straight forward to just include <linux/mm.h>
- * where it is defined due to getting many compile
- * errors caused by that include.
- */
-extern void kvfree(const void *addr);
-
-static inline void __kvfree_call_rcu(struct rcu_head *head, void *ptr)
-{
- if (head) {
- call_rcu(head, (rcu_callback_t) ((void *) head - ptr));
- return;
- }
-
- // kvfree_rcu(one_arg) call.
- might_sleep();
- synchronize_rcu();
- kvfree(ptr);
-}
-
-static inline void kvfree_rcu_barrier(void)
-{
- rcu_barrier();
-}
-
-#ifdef CONFIG_KASAN_GENERIC
-void kvfree_call_rcu(struct rcu_head *head, void *ptr);
-#else
-static inline void kvfree_call_rcu(struct rcu_head *head, void *ptr)
-{
- __kvfree_call_rcu(head, ptr);
-}
-#endif
-
void rcu_qs(void);
static inline void rcu_softirq_qs(void)
@@ -164,7 +129,6 @@ static inline void rcu_end_inkernel_boot(void) { }
static inline bool rcu_inkernel_boot_has_ended(void) { return true; }
static inline bool rcu_is_watching(void) { return true; }
static inline void rcu_momentary_eqs(void) { }
-static inline void kfree_rcu_scheduler_running(void) { }
/* Avoid RCU read-side critical sections leaking across. */
static inline void rcu_all_qs(void) { barrier(); }
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 27d86d912781..dbe77b5fe06e 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -34,12 +34,9 @@ static inline void rcu_virt_note_context_switch(void)
}
void synchronize_rcu_expedited(void);
-void kvfree_call_rcu(struct rcu_head *head, void *ptr);
-void kvfree_rcu_barrier(void);
void rcu_barrier(void);
void rcu_momentary_eqs(void);
-void kfree_rcu_scheduler_running(void);
struct rcu_gp_oldstate {
unsigned long rgos_norm;
diff --git a/include/linux/sched/ext.h b/include/linux/sched/ext.h
index 1d70a9867fb1..f7545430a548 100644
--- a/include/linux/sched/ext.h
+++ b/include/linux/sched/ext.h
@@ -146,6 +146,7 @@ struct sched_ext_entity {
u32 weight;
s32 sticky_cpu;
s32 holding_cpu;
+ s32 selected_cpu;
u32 kf_mask; /* see scx_kf_mask above */
struct task_struct *kf_tasks[2]; /* see SCX_CALL_OP_TASK() */
atomic_long_t ops_state;
diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h
index e45531455d3b..9b959972bf4a 100644
--- a/include/linux/seccomp.h
+++ b/include/linux/seccomp.h
@@ -22,21 +22,17 @@
#include <linux/atomic.h>
#include <asm/seccomp.h>
+extern int __secure_computing(void);
+
#ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
-extern int __secure_computing(const struct seccomp_data *sd);
static inline int secure_computing(void)
{
if (unlikely(test_syscall_work(SECCOMP)))
- return __secure_computing(NULL);
+ return __secure_computing();
return 0;
}
#else
extern void secure_computing_strict(int this_syscall);
-static inline int __secure_computing(const struct seccomp_data *sd)
-{
- secure_computing_strict(sd->nr);
- return 0;
-}
#endif
extern long prctl_get_seccomp(void);
@@ -58,7 +54,7 @@ static inline int secure_computing(void) { return 0; }
#else
static inline void secure_computing_strict(int this_syscall) { return; }
#endif
-static inline int __secure_computing(const struct seccomp_data *sd) { return 0; }
+static inline int __secure_computing(void) { return 0; }
static inline long prctl_get_seccomp(void)
{
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 09eedaecf120..98e07e9e9e58 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -16,6 +16,7 @@
#include <linux/gfp.h>
#include <linux/overflow.h>
#include <linux/types.h>
+#include <linux/rcupdate.h>
#include <linux/workqueue.h>
#include <linux/percpu-refcount.h>
#include <linux/cleanup.h>
@@ -941,8 +942,6 @@ static inline __alloc_size(1, 2) void *kmalloc_array_noprof(size_t n, size_t siz
if (unlikely(check_mul_overflow(n, size, &bytes)))
return NULL;
- if (__builtin_constant_p(n) && __builtin_constant_p(size))
- return kmalloc_noprof(bytes, flags);
return kmalloc_noprof(bytes, flags);
}
#define kmalloc_array(...) alloc_hooks(kmalloc_array_noprof(__VA_ARGS__))
@@ -1082,6 +1081,19 @@ extern void kvfree_sensitive(const void *addr, size_t len);
unsigned int kmem_cache_size(struct kmem_cache *s);
+#ifndef CONFIG_KVFREE_RCU_BATCHED
+static inline void kvfree_rcu_barrier(void)
+{
+ rcu_barrier();
+}
+
+static inline void kfree_rcu_scheduler_running(void) { }
+#else
+void kvfree_rcu_barrier(void);
+
+void kfree_rcu_scheduler_running(void);
+#endif
+
/**
* kmalloc_size_roundup - Report allocation bucket size for the given size
*
diff --git a/include/linux/string.h b/include/linux/string.h
index f8e21e80942f..0403a4ca4c11 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -415,8 +415,10 @@ void memcpy_and_pad(void *dest, size_t dest_len, const void *src, size_t count,
*/
#define strtomem_pad(dest, src, pad) do { \
const size_t _dest_len = __must_be_byte_array(dest) + \
+ __must_be_noncstr(dest) + \
ARRAY_SIZE(dest); \
- const size_t _src_len = __builtin_object_size(src, 1); \
+ const size_t _src_len = __must_be_cstr(src) + \
+ __builtin_object_size(src, 1); \
\
BUILD_BUG_ON(!__builtin_constant_p(_dest_len) || \
_dest_len == (size_t)-1); \
@@ -439,8 +441,10 @@ void memcpy_and_pad(void *dest, size_t dest_len, const void *src, size_t count,
*/
#define strtomem(dest, src) do { \
const size_t _dest_len = __must_be_byte_array(dest) + \
+ __must_be_noncstr(dest) + \
ARRAY_SIZE(dest); \
- const size_t _src_len = __builtin_object_size(src, 1); \
+ const size_t _src_len = __must_be_cstr(src) + \
+ __builtin_object_size(src, 1); \
\
BUILD_BUG_ON(!__builtin_constant_p(_dest_len) || \
_dest_len == (size_t)-1); \
@@ -459,8 +463,10 @@ void memcpy_and_pad(void *dest, size_t dest_len, const void *src, size_t count,
*/
#define memtostr(dest, src) do { \
const size_t _dest_len = __must_be_byte_array(dest) + \
+ __must_be_cstr(dest) + \
ARRAY_SIZE(dest); \
- const size_t _src_len = __builtin_object_size(src, 1); \
+ const size_t _src_len = __must_be_noncstr(src) + \
+ __builtin_object_size(src, 1); \
const size_t _src_chars = strnlen(src, _src_len); \
const size_t _copy_len = min(_dest_len - 1, _src_chars); \
\
@@ -485,8 +491,10 @@ void memcpy_and_pad(void *dest, size_t dest_len, const void *src, size_t count,
*/
#define memtostr_pad(dest, src) do { \
const size_t _dest_len = __must_be_byte_array(dest) + \
+ __must_be_cstr(dest) + \
ARRAY_SIZE(dest); \
- const size_t _src_len = __builtin_object_size(src, 1); \
+ const size_t _src_len = __must_be_noncstr(src) + \
+ __builtin_object_size(src, 1); \
const size_t _src_chars = strnlen(src, _src_len); \
const size_t _copy_len = min(_dest_len - 1, _src_chars); \
\
diff --git a/include/linux/string_choices.h b/include/linux/string_choices.h
index 120ca0f28e95..f3ba4f52ff26 100644
--- a/include/linux/string_choices.h
+++ b/include/linux/string_choices.h
@@ -41,23 +41,23 @@ static inline const char *str_high_low(bool v)
}
#define str_low_high(v) str_high_low(!(v))
-static inline const char *str_read_write(bool v)
-{
- return v ? "read" : "write";
-}
-#define str_write_read(v) str_read_write(!(v))
-
static inline const char *str_on_off(bool v)
{
return v ? "on" : "off";
}
#define str_off_on(v) str_on_off(!(v))
-static inline const char *str_yes_no(bool v)
+static inline const char *str_read_write(bool v)
{
- return v ? "yes" : "no";
+ return v ? "read" : "write";
}
-#define str_no_yes(v) str_yes_no(!(v))
+#define str_write_read(v) str_read_write(!(v))
+
+static inline const char *str_true_false(bool v)
+{
+ return v ? "true" : "false";
+}
+#define str_false_true(v) str_true_false(!(v))
static inline const char *str_up_down(bool v)
{
@@ -65,11 +65,11 @@ static inline const char *str_up_down(bool v)
}
#define str_down_up(v) str_up_down(!(v))
-static inline const char *str_true_false(bool v)
+static inline const char *str_yes_no(bool v)
{
- return v ? "true" : "false";
+ return v ? "yes" : "no";
}
-#define str_false_true(v) str_true_false(!(v))
+#define str_no_yes(v) str_yes_no(!(v))
/**
* str_plural - Return the simple pluralization based on English counts
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index c6333204d451..e5603cc91963 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -951,6 +951,10 @@ asmlinkage long sys_statx(int dfd, const char __user *path, unsigned flags,
asmlinkage long sys_rseq(struct rseq __user *rseq, uint32_t rseq_len,
int flags, uint32_t sig);
asmlinkage long sys_open_tree(int dfd, const char __user *path, unsigned flags);
+asmlinkage long sys_open_tree_attr(int dfd, const char __user *path,
+ unsigned flags,
+ struct mount_attr __user *uattr,
+ size_t usize);
asmlinkage long sys_move_mount(int from_dfd, const char __user *from_path,
int to_dfd, const char __user *to_path,
unsigned int ms_flags);
@@ -1266,14 +1270,14 @@ static inline long ksys_lchown(const char __user *filename, uid_t user,
AT_SYMLINK_NOFOLLOW);
}
-extern long do_sys_ftruncate(unsigned int fd, loff_t length, int small);
+int do_sys_ftruncate(unsigned int fd, loff_t length, int small);
static inline long ksys_ftruncate(unsigned int fd, loff_t length)
{
return do_sys_ftruncate(fd, length, 1);
}
-extern long do_sys_truncate(const char __user *pathname, loff_t length);
+int do_sys_truncate(const char __user *pathname, loff_t length);
static inline long ksys_truncate(const char __user *pathname, loff_t length)
{
diff --git a/include/linux/sysv_fs.h b/include/linux/sysv_fs.h
deleted file mode 100644
index 5cf77dbb8d86..000000000000
--- a/include/linux/sysv_fs.h
+++ /dev/null
@@ -1,214 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_SYSV_FS_H
-#define _LINUX_SYSV_FS_H
-
-#define __packed2__ __attribute__((packed, aligned(2)))
-
-
-#ifndef __KERNEL__
-typedef u16 __fs16;
-typedef u32 __fs16;
-#endif
-
-/* inode numbers are 16 bit */
-typedef __fs16 sysv_ino_t;
-
-/* Block numbers are 24 bit, sometimes stored in 32 bit.
- On Coherent FS, they are always stored in PDP-11 manner: the least
- significant 16 bits come last. */
-typedef __fs32 sysv_zone_t;
-
-/* 0 is non-existent */
-#define SYSV_BADBL_INO 1 /* inode of bad blocks file */
-#define SYSV_ROOT_INO 2 /* inode of root directory */
-
-
-/* Xenix super-block data on disk */
-#define XENIX_NICINOD 100 /* number of inode cache entries */
-#define XENIX_NICFREE 100 /* number of free block list chunk entries */
-struct xenix_super_block {
- __fs16 s_isize; /* index of first data zone */
- __fs32 s_fsize __packed2__; /* total number of zones of this fs */
- /* the start of the free block list: */
- __fs16 s_nfree; /* number of free blocks in s_free, <= XENIX_NICFREE */
- sysv_zone_t s_free[XENIX_NICFREE]; /* first free block list chunk */
- /* the cache of free inodes: */
- __fs16 s_ninode; /* number of free inodes in s_inode, <= XENIX_NICINOD */
- sysv_ino_t s_inode[XENIX_NICINOD]; /* some free inodes */
- /* locks, not used by Linux: */
- char s_flock; /* lock during free block list manipulation */
- char s_ilock; /* lock during inode cache manipulation */
- char s_fmod; /* super-block modified flag */
- char s_ronly; /* flag whether fs is mounted read-only */
- __fs32 s_time __packed2__; /* time of last super block update */
- __fs32 s_tfree __packed2__; /* total number of free zones */
- __fs16 s_tinode; /* total number of free inodes */
- __fs16 s_dinfo[4]; /* device information ?? */
- char s_fname[6]; /* file system volume name */
- char s_fpack[6]; /* file system pack name */
- char s_clean; /* set to 0x46 when filesystem is properly unmounted */
- char s_fill[371];
- s32 s_magic; /* version of file system */
- __fs32 s_type; /* type of file system: 1 for 512 byte blocks
- 2 for 1024 byte blocks
- 3 for 2048 byte blocks */
-
-};
-
-/*
- * SystemV FS comes in two variants:
- * sysv2: System V Release 2 (e.g. Microport), structure elements aligned(2).
- * sysv4: System V Release 4 (e.g. Consensys), structure elements aligned(4).
- */
-#define SYSV_NICINOD 100 /* number of inode cache entries */
-#define SYSV_NICFREE 50 /* number of free block list chunk entries */
-
-/* SystemV4 super-block data on disk */
-struct sysv4_super_block {
- __fs16 s_isize; /* index of first data zone */
- u16 s_pad0;
- __fs32 s_fsize; /* total number of zones of this fs */
- /* the start of the free block list: */
- __fs16 s_nfree; /* number of free blocks in s_free, <= SYSV_NICFREE */
- u16 s_pad1;
- sysv_zone_t s_free[SYSV_NICFREE]; /* first free block list chunk */
- /* the cache of free inodes: */
- __fs16 s_ninode; /* number of free inodes in s_inode, <= SYSV_NICINOD */
- u16 s_pad2;
- sysv_ino_t s_inode[SYSV_NICINOD]; /* some free inodes */
- /* locks, not used by Linux: */
- char s_flock; /* lock during free block list manipulation */
- char s_ilock; /* lock during inode cache manipulation */
- char s_fmod; /* super-block modified flag */
- char s_ronly; /* flag whether fs is mounted read-only */
- __fs32 s_time; /* time of last super block update */
- __fs16 s_dinfo[4]; /* device information ?? */
- __fs32 s_tfree; /* total number of free zones */
- __fs16 s_tinode; /* total number of free inodes */
- u16 s_pad3;
- char s_fname[6]; /* file system volume name */
- char s_fpack[6]; /* file system pack name */
- s32 s_fill[12];
- __fs32 s_state; /* file system state: 0x7c269d38-s_time means clean */
- s32 s_magic; /* version of file system */
- __fs32 s_type; /* type of file system: 1 for 512 byte blocks
- 2 for 1024 byte blocks */
-};
-
-/* SystemV2 super-block data on disk */
-struct sysv2_super_block {
- __fs16 s_isize; /* index of first data zone */
- __fs32 s_fsize __packed2__; /* total number of zones of this fs */
- /* the start of the free block list: */
- __fs16 s_nfree; /* number of free blocks in s_free, <= SYSV_NICFREE */
- sysv_zone_t s_free[SYSV_NICFREE]; /* first free block list chunk */
- /* the cache of free inodes: */
- __fs16 s_ninode; /* number of free inodes in s_inode, <= SYSV_NICINOD */
- sysv_ino_t s_inode[SYSV_NICINOD]; /* some free inodes */
- /* locks, not used by Linux: */
- char s_flock; /* lock during free block list manipulation */
- char s_ilock; /* lock during inode cache manipulation */
- char s_fmod; /* super-block modified flag */
- char s_ronly; /* flag whether fs is mounted read-only */
- __fs32 s_time __packed2__; /* time of last super block update */
- __fs16 s_dinfo[4]; /* device information ?? */
- __fs32 s_tfree __packed2__; /* total number of free zones */
- __fs16 s_tinode; /* total number of free inodes */
- char s_fname[6]; /* file system volume name */
- char s_fpack[6]; /* file system pack name */
- s32 s_fill[14];
- __fs32 s_state; /* file system state: 0xcb096f43 means clean */
- s32 s_magic; /* version of file system */
- __fs32 s_type; /* type of file system: 1 for 512 byte blocks
- 2 for 1024 byte blocks */
-};
-
-/* V7 super-block data on disk */
-#define V7_NICINOD 100 /* number of inode cache entries */
-#define V7_NICFREE 50 /* number of free block list chunk entries */
-struct v7_super_block {
- __fs16 s_isize; /* index of first data zone */
- __fs32 s_fsize __packed2__; /* total number of zones of this fs */
- /* the start of the free block list: */
- __fs16 s_nfree; /* number of free blocks in s_free, <= V7_NICFREE */
- sysv_zone_t s_free[V7_NICFREE]; /* first free block list chunk */
- /* the cache of free inodes: */
- __fs16 s_ninode; /* number of free inodes in s_inode, <= V7_NICINOD */
- sysv_ino_t s_inode[V7_NICINOD]; /* some free inodes */
- /* locks, not used by Linux or V7: */
- char s_flock; /* lock during free block list manipulation */
- char s_ilock; /* lock during inode cache manipulation */
- char s_fmod; /* super-block modified flag */
- char s_ronly; /* flag whether fs is mounted read-only */
- __fs32 s_time __packed2__; /* time of last super block update */
- /* the following fields are not maintained by V7: */
- __fs32 s_tfree __packed2__; /* total number of free zones */
- __fs16 s_tinode; /* total number of free inodes */
- __fs16 s_m; /* interleave factor */
- __fs16 s_n; /* interleave factor */
- char s_fname[6]; /* file system name */
- char s_fpack[6]; /* file system pack name */
-};
-/* Constants to aid sanity checking */
-/* This is not a hard limit, nor enforced by v7 kernel. It's actually just
- * the limit used by Seventh Edition's ls, though is high enough to assume
- * that no reasonable file system would have that much entries in root
- * directory. Thus, if we see anything higher, we just probably got the
- * endiannes wrong. */
-#define V7_NFILES 1024
-/* The disk addresses are three-byte (despite direct block addresses being
- * aligned word-wise in inode). If the most significant byte is non-zero,
- * something is most likely wrong (not a filesystem, bad bytesex). */
-#define V7_MAXSIZE 0x00ffffff
-
-/* Coherent super-block data on disk */
-#define COH_NICINOD 100 /* number of inode cache entries */
-#define COH_NICFREE 64 /* number of free block list chunk entries */
-struct coh_super_block {
- __fs16 s_isize; /* index of first data zone */
- __fs32 s_fsize __packed2__; /* total number of zones of this fs */
- /* the start of the free block list: */
- __fs16 s_nfree; /* number of free blocks in s_free, <= COH_NICFREE */
- sysv_zone_t s_free[COH_NICFREE] __packed2__; /* first free block list chunk */
- /* the cache of free inodes: */
- __fs16 s_ninode; /* number of free inodes in s_inode, <= COH_NICINOD */
- sysv_ino_t s_inode[COH_NICINOD]; /* some free inodes */
- /* locks, not used by Linux: */
- char s_flock; /* lock during free block list manipulation */
- char s_ilock; /* lock during inode cache manipulation */
- char s_fmod; /* super-block modified flag */
- char s_ronly; /* flag whether fs is mounted read-only */
- __fs32 s_time __packed2__; /* time of last super block update */
- __fs32 s_tfree __packed2__; /* total number of free zones */
- __fs16 s_tinode; /* total number of free inodes */
- __fs16 s_interleave_m; /* interleave factor */
- __fs16 s_interleave_n;
- char s_fname[6]; /* file system volume name */
- char s_fpack[6]; /* file system pack name */
- __fs32 s_unique; /* zero, not used */
-};
-
-/* SystemV/Coherent inode data on disk */
-struct sysv_inode {
- __fs16 i_mode;
- __fs16 i_nlink;
- __fs16 i_uid;
- __fs16 i_gid;
- __fs32 i_size;
- u8 i_data[3*(10+1+1+1)];
- u8 i_gen;
- __fs32 i_atime; /* time of last access */
- __fs32 i_mtime; /* time of last modification */
- __fs32 i_ctime; /* time of creation */
-};
-
-/* SystemV/Coherent directory entry on disk */
-#define SYSV_NAMELEN 14 /* max size of name in struct sysv_dir_entry */
-struct sysv_dir_entry {
- sysv_ino_t inode;
- char name[SYSV_NAMELEN]; /* up to 14 characters, the rest are zeroes */
-};
-
-#define SYSV_DIRSIZE sizeof(struct sysv_dir_entry) /* size of every directory entry */
-
-#endif /* _LINUX_SYSV_FS_H */
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index cf2446c9c30d..dd925d84fa46 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -217,54 +217,6 @@ static inline int arch_within_stack_frames(const void * const stack,
}
#endif
-#ifdef CONFIG_HARDENED_USERCOPY
-extern void __check_object_size(const void *ptr, unsigned long n,
- bool to_user);
-
-static __always_inline void check_object_size(const void *ptr, unsigned long n,
- bool to_user)
-{
- if (!__builtin_constant_p(n))
- __check_object_size(ptr, n, to_user);
-}
-#else
-static inline void check_object_size(const void *ptr, unsigned long n,
- bool to_user)
-{ }
-#endif /* CONFIG_HARDENED_USERCOPY */
-
-extern void __compiletime_error("copy source size is too small")
-__bad_copy_from(void);
-extern void __compiletime_error("copy destination size is too small")
-__bad_copy_to(void);
-
-void __copy_overflow(int size, unsigned long count);
-
-static inline void copy_overflow(int size, unsigned long count)
-{
- if (IS_ENABLED(CONFIG_BUG))
- __copy_overflow(size, count);
-}
-
-static __always_inline __must_check bool
-check_copy_size(const void *addr, size_t bytes, bool is_source)
-{
- int sz = __builtin_object_size(addr, 0);
- if (unlikely(sz >= 0 && sz < bytes)) {
- if (!__builtin_constant_p(bytes))
- copy_overflow(sz, bytes);
- else if (is_source)
- __bad_copy_from();
- else
- __bad_copy_to();
- return false;
- }
- if (WARN_ON_ONCE(bytes > INT_MAX))
- return false;
- check_object_size(addr, bytes, is_source);
- return true;
-}
-
#ifndef arch_setup_new_exec
static inline void arch_setup_new_exec(void) { }
#endif
diff --git a/include/linux/topology.h b/include/linux/topology.h
index 52f5850730b3..a1815f4395ab 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -262,6 +262,36 @@ sched_numa_hop_mask(unsigned int node, unsigned int hops)
#endif /* CONFIG_NUMA */
/**
+ * for_each_node_numadist() - iterate over nodes in increasing distance
+ * order, starting from a given node
+ * @node: the iteration variable and the starting node.
+ * @unvisited: a nodemask to keep track of the unvisited nodes.
+ *
+ * This macro iterates over NUMA node IDs in increasing distance from the
+ * starting @node and yields MAX_NUMNODES when all the nodes have been
+ * visited.
+ *
+ * Note that by the time the loop completes, the @unvisited nodemask will
+ * be fully cleared, unless the loop exits early.
+ *
+ * The difference between for_each_node() and for_each_node_numadist() is
+ * that the former allows to iterate over nodes in numerical order, whereas
+ * the latter iterates over nodes in increasing order of distance.
+ *
+ * This complexity of this iterator is O(N^2), where N represents the
+ * number of nodes, as each iteration involves scanning all nodes to
+ * find the one with the shortest distance.
+ *
+ * Requires rcu_lock to be held.
+ */
+#define for_each_node_numadist(node, unvisited) \
+ for (int __start = (node), \
+ (node) = nearest_node_nodemask((__start), &(unvisited)); \
+ (node) < MAX_NUMNODES; \
+ node_clear((node), (unvisited)), \
+ (node) = nearest_node_nodemask((__start), &(unvisited)))
+
+/**
* for_each_numa_hop_mask - iterate over cpumasks of increasing NUMA distance
* from a given node.
* @mask: the iteration variable.
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index e9c702c1908d..7c06f4795670 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -7,7 +7,7 @@
#include <linux/minmax.h>
#include <linux/nospec.h>
#include <linux/sched.h>
-#include <linux/thread_info.h>
+#include <linux/ucopysize.h>
#include <asm/uaccess.h>
diff --git a/include/linux/ucopysize.h b/include/linux/ucopysize.h
new file mode 100644
index 000000000000..41c2d9720466
--- /dev/null
+++ b/include/linux/ucopysize.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Perform sanity checking for object sizes for uaccess.h and uio.h. */
+#ifndef __LINUX_UCOPYSIZE_H__
+#define __LINUX_UCOPYSIZE_H__
+
+#include <linux/bug.h>
+
+#ifdef CONFIG_HARDENED_USERCOPY
+#include <linux/jump_label.h>
+extern void __check_object_size(const void *ptr, unsigned long n,
+ bool to_user);
+
+DECLARE_STATIC_KEY_MAYBE(CONFIG_HARDENED_USERCOPY_DEFAULT_ON,
+ validate_usercopy_range);
+
+static __always_inline void check_object_size(const void *ptr, unsigned long n,
+ bool to_user)
+{
+ if (!__builtin_constant_p(n) &&
+ static_branch_maybe(CONFIG_HARDENED_USERCOPY_DEFAULT_ON,
+ &validate_usercopy_range)) {
+ __check_object_size(ptr, n, to_user);
+ }
+}
+#else
+static inline void check_object_size(const void *ptr, unsigned long n,
+ bool to_user)
+{ }
+#endif /* CONFIG_HARDENED_USERCOPY */
+
+extern void __compiletime_error("copy source size is too small")
+__bad_copy_from(void);
+extern void __compiletime_error("copy destination size is too small")
+__bad_copy_to(void);
+
+void __copy_overflow(int size, unsigned long count);
+
+static inline void copy_overflow(int size, unsigned long count)
+{
+ if (IS_ENABLED(CONFIG_BUG))
+ __copy_overflow(size, count);
+}
+
+static __always_inline __must_check bool
+check_copy_size(const void *addr, size_t bytes, bool is_source)
+{
+ int sz = __builtin_object_size(addr, 0);
+ if (unlikely(sz >= 0 && sz < bytes)) {
+ if (!__builtin_constant_p(bytes))
+ copy_overflow(sz, bytes);
+ else if (is_source)
+ __bad_copy_from();
+ else
+ __bad_copy_to();
+ return false;
+ }
+ if (WARN_ON_ONCE(bytes > INT_MAX))
+ return false;
+ check_object_size(addr, bytes, is_source);
+ return true;
+}
+
+#endif /* __LINUX_UCOPYSIZE_H__ */
diff --git a/include/linux/uidgid.h b/include/linux/uidgid.h
index f85ec5613721..2dc767e08f54 100644
--- a/include/linux/uidgid.h
+++ b/include/linux/uidgid.h
@@ -132,6 +132,7 @@ static inline bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid)
u32 map_id_down(struct uid_gid_map *map, u32 id);
u32 map_id_up(struct uid_gid_map *map, u32 id);
+u32 map_id_range_up(struct uid_gid_map *map, u32 id, u32 count);
#else
@@ -186,6 +187,11 @@ static inline u32 map_id_down(struct uid_gid_map *map, u32 id)
return id;
}
+static inline u32 map_id_range_up(struct uid_gid_map *map, u32 id, u32 count)
+{
+ return id;
+}
+
static inline u32 map_id_up(struct uid_gid_map *map, u32 id)
{
return id;
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 8ada84e85447..49ece9e1888f 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -6,8 +6,8 @@
#define __LINUX_UIO_H
#include <linux/kernel.h>
-#include <linux/thread_info.h>
#include <linux/mm_types.h>
+#include <linux/ucopysize.h>
#include <uapi/linux/uio.h>
struct page;
diff --git a/include/linux/vfsdebug.h b/include/linux/vfsdebug.h
new file mode 100644
index 000000000000..9cf22d3eb9dd
--- /dev/null
+++ b/include/linux/vfsdebug.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef LINUX_VFS_DEBUG_H
+#define LINUX_VFS_DEBUG_H 1
+
+#include <linux/bug.h>
+
+struct inode;
+
+#ifdef CONFIG_DEBUG_VFS
+void dump_inode(struct inode *inode, const char *reason);
+
+#define VFS_BUG_ON(cond) BUG_ON(cond)
+#define VFS_WARN_ON(cond) (void)WARN_ON(cond)
+#define VFS_WARN_ON_ONCE(cond) (void)WARN_ON_ONCE(cond)
+#define VFS_WARN_ONCE(cond, format...) (void)WARN_ONCE(cond, format)
+#define VFS_WARN(cond, format...) (void)WARN(cond, format)
+
+#define VFS_BUG_ON_INODE(cond, inode) ({ \
+ if (unlikely(!!(cond))) { \
+ dump_inode(inode, "VFS_BUG_ON_INODE(" #cond")");\
+ BUG_ON(1); \
+ } \
+})
+
+#define VFS_WARN_ON_INODE(cond, inode) ({ \
+ int __ret_warn = !!(cond); \
+ \
+ if (unlikely(__ret_warn)) { \
+ dump_inode(inode, "VFS_WARN_ON_INODE(" #cond")");\
+ WARN_ON(1); \
+ } \
+ unlikely(__ret_warn); \
+})
+#else
+#define VFS_BUG_ON(cond) BUILD_BUG_ON_INVALID(cond)
+#define VFS_WARN_ON(cond) BUILD_BUG_ON_INVALID(cond)
+#define VFS_WARN_ON_ONCE(cond) BUILD_BUG_ON_INVALID(cond)
+#define VFS_WARN_ONCE(cond, format...) BUILD_BUG_ON_INVALID(cond)
+#define VFS_WARN(cond, format...) BUILD_BUG_ON_INVALID(cond)
+
+#define VFS_BUG_ON_INODE(cond, inode) VFS_BUG_ON(cond)
+#define VFS_WARN_ON_INODE(cond, inode) BUILD_BUG_ON_INVALID(cond)
+#endif /* CONFIG_DEBUG_VFS */
+
+#endif
diff --git a/include/linux/vmcore_info.h b/include/linux/vmcore_info.h
index e1dec1a6a749..37e003ae5262 100644
--- a/include/linux/vmcore_info.h
+++ b/include/linux/vmcore_info.h
@@ -6,9 +6,8 @@
#include <linux/elfcore.h>
#include <linux/elf.h>
-#define CRASH_CORE_NOTE_NAME "CORE"
#define CRASH_CORE_NOTE_HEAD_BYTES ALIGN(sizeof(struct elf_note), 4)
-#define CRASH_CORE_NOTE_NAME_BYTES ALIGN(sizeof(CRASH_CORE_NOTE_NAME), 4)
+#define CRASH_CORE_NOTE_NAME_BYTES ALIGN(sizeof(NN_PRSTATUS), 4)
#define CRASH_CORE_NOTE_DESC_BYTES ALIGN(sizeof(struct elf_prstatus), 4)
/*
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 6d90ad974408..3503fe822e38 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -316,6 +316,9 @@ extern void init_wait_entry(struct wait_queue_entry *wq_entry, int flags);
} \
\
cmd; \
+ \
+ if (condition) \
+ break; \
} \
finish_wait(&wq_head, &__wq_entry); \
__out: __ret; \
diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h
index 0754c463224a..cf793d18e5df 100644
--- a/include/net/af_rxrpc.h
+++ b/include/net/af_rxrpc.h
@@ -69,6 +69,8 @@ struct rxrpc_peer *rxrpc_kernel_get_peer(struct rxrpc_peer *peer);
struct rxrpc_peer *rxrpc_kernel_get_call_peer(struct socket *sock, struct rxrpc_call *call);
const struct sockaddr_rxrpc *rxrpc_kernel_remote_srx(const struct rxrpc_peer *peer);
const struct sockaddr *rxrpc_kernel_remote_addr(const struct rxrpc_peer *peer);
+unsigned long rxrpc_kernel_set_peer_data(struct rxrpc_peer *peer, unsigned long app_data);
+unsigned long rxrpc_kernel_get_peer_data(const struct rxrpc_peer *peer);
unsigned int rxrpc_kernel_get_srtt(const struct rxrpc_peer *);
int rxrpc_kernel_charge_accept(struct socket *, rxrpc_notify_rx_t,
rxrpc_user_attach_call_t, unsigned long, gfp_t,
diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h
index 958a2460330c..8857f5ea77d4 100644
--- a/include/trace/events/afs.h
+++ b/include/trace/events/afs.h
@@ -127,27 +127,29 @@ enum yfs_cm_operation {
E_(afs_call_trace_work, "QUEUE")
#define afs_server_traces \
- EM(afs_server_trace_alloc, "ALLOC ") \
EM(afs_server_trace_callback, "CALLBACK ") \
EM(afs_server_trace_destroy, "DESTROY ") \
EM(afs_server_trace_free, "FREE ") \
EM(afs_server_trace_gc, "GC ") \
- EM(afs_server_trace_get_by_addr, "GET addr ") \
- EM(afs_server_trace_get_by_uuid, "GET uuid ") \
- EM(afs_server_trace_get_caps, "GET caps ") \
- EM(afs_server_trace_get_install, "GET inst ") \
- EM(afs_server_trace_get_new_cbi, "GET cbi ") \
EM(afs_server_trace_get_probe, "GET probe") \
- EM(afs_server_trace_give_up_cb, "giveup-cb") \
EM(afs_server_trace_purging, "PURGE ") \
- EM(afs_server_trace_put_call, "PUT call ") \
EM(afs_server_trace_put_cbi, "PUT cbi ") \
- EM(afs_server_trace_put_find_rsq, "PUT f-rsq") \
EM(afs_server_trace_put_probe, "PUT probe") \
- EM(afs_server_trace_put_slist, "PUT slist") \
- EM(afs_server_trace_put_slist_isort, "PUT isort") \
- EM(afs_server_trace_put_uuid_rsq, "PUT u-req") \
- E_(afs_server_trace_update, "UPDATE")
+ EM(afs_server_trace_see_destroyer, "SEE destr") \
+ EM(afs_server_trace_see_expired, "SEE expd ") \
+ EM(afs_server_trace_see_purge, "SEE purge") \
+ EM(afs_server_trace_see_timer, "SEE timer") \
+ EM(afs_server_trace_unuse_call, "UNU call ") \
+ EM(afs_server_trace_unuse_create_fail, "UNU cfail") \
+ EM(afs_server_trace_unuse_slist, "UNU slist") \
+ EM(afs_server_trace_unuse_slist_isort, "UNU isort") \
+ EM(afs_server_trace_update, "UPDATE ") \
+ EM(afs_server_trace_use_by_uuid, "USE uuid ") \
+ EM(afs_server_trace_use_cm_call, "USE cm-cl") \
+ EM(afs_server_trace_use_get_caps, "USE gcaps") \
+ EM(afs_server_trace_use_give_up_cb, "USE gvupc") \
+ EM(afs_server_trace_use_install, "USE inst ") \
+ E_(afs_server_trace_wait_create, "WAIT crt ")
#define afs_volume_traces \
EM(afs_volume_trace_alloc, "ALLOC ") \
@@ -169,41 +171,48 @@ enum yfs_cm_operation {
#define afs_cell_traces \
EM(afs_cell_trace_alloc, "ALLOC ") \
+ EM(afs_cell_trace_destroy, "DESTROY ") \
EM(afs_cell_trace_free, "FREE ") \
EM(afs_cell_trace_get_atcell, "GET atcell") \
- EM(afs_cell_trace_get_queue_dns, "GET q-dns ") \
- EM(afs_cell_trace_get_queue_manage, "GET q-mng ") \
- EM(afs_cell_trace_get_queue_new, "GET q-new ") \
EM(afs_cell_trace_get_server, "GET server") \
EM(afs_cell_trace_get_vol, "GET vol ") \
- EM(afs_cell_trace_insert, "INSERT ") \
- EM(afs_cell_trace_manage, "MANAGE ") \
+ EM(afs_cell_trace_purge, "PURGE ") \
EM(afs_cell_trace_put_atcell, "PUT atcell") \
EM(afs_cell_trace_put_candidate, "PUT candid") \
- EM(afs_cell_trace_put_destroy, "PUT destry") \
- EM(afs_cell_trace_put_queue_work, "PUT q-work") \
- EM(afs_cell_trace_put_queue_fail, "PUT q-fail") \
+ EM(afs_cell_trace_put_final, "PUT final ") \
EM(afs_cell_trace_put_server, "PUT server") \
EM(afs_cell_trace_put_vol, "PUT vol ") \
+ EM(afs_cell_trace_queue_again, "QUE again ") \
+ EM(afs_cell_trace_queue_dns, "QUE dns ") \
+ EM(afs_cell_trace_queue_new, "QUE new ") \
+ EM(afs_cell_trace_queue_purge, "QUE purge ") \
+ EM(afs_cell_trace_manage, "MANAGE ") \
+ EM(afs_cell_trace_managed, "MANAGED ") \
EM(afs_cell_trace_see_source, "SEE source") \
- EM(afs_cell_trace_see_ws, "SEE ws ") \
+ EM(afs_cell_trace_see_mgmt_timer, "SEE mtimer") \
EM(afs_cell_trace_unuse_alias, "UNU alias ") \
EM(afs_cell_trace_unuse_check_alias, "UNU chk-al") \
EM(afs_cell_trace_unuse_delete, "UNU delete") \
+ EM(afs_cell_trace_unuse_dynroot_mntpt, "UNU dyn-mp") \
EM(afs_cell_trace_unuse_fc, "UNU fc ") \
- EM(afs_cell_trace_unuse_lookup, "UNU lookup") \
+ EM(afs_cell_trace_unuse_lookup_dynroot, "UNU lu-dyn") \
+ EM(afs_cell_trace_unuse_lookup_error, "UNU lu-err") \
EM(afs_cell_trace_unuse_mntpt, "UNU mntpt ") \
EM(afs_cell_trace_unuse_no_pin, "UNU no-pin") \
EM(afs_cell_trace_unuse_parse, "UNU parse ") \
EM(afs_cell_trace_unuse_pin, "UNU pin ") \
- EM(afs_cell_trace_unuse_probe, "UNU probe ") \
EM(afs_cell_trace_unuse_sbi, "UNU sbi ") \
EM(afs_cell_trace_unuse_ws, "UNU ws ") \
EM(afs_cell_trace_use_alias, "USE alias ") \
EM(afs_cell_trace_use_check_alias, "USE chk-al") \
EM(afs_cell_trace_use_fc, "USE fc ") \
EM(afs_cell_trace_use_fc_alias, "USE fc-al ") \
- EM(afs_cell_trace_use_lookup, "USE lookup") \
+ EM(afs_cell_trace_use_lookup_add, "USE lu-add") \
+ EM(afs_cell_trace_use_lookup_canonical, "USE lu-can") \
+ EM(afs_cell_trace_use_lookup_dynroot, "USE lu-dyn") \
+ EM(afs_cell_trace_use_lookup_mntpt, "USE lu-mpt") \
+ EM(afs_cell_trace_use_lookup_mount, "USE lu-mnt") \
+ EM(afs_cell_trace_use_lookup_ws, "USE lu-ws ") \
EM(afs_cell_trace_use_mntpt, "USE mntpt ") \
EM(afs_cell_trace_use_pin, "USE pin ") \
EM(afs_cell_trace_use_probe, "USE probe ") \
@@ -220,7 +229,7 @@ enum yfs_cm_operation {
EM(afs_alist_trace_put_getaddru, "PUT GtAdrU") \
EM(afs_alist_trace_put_parse_empty, "PUT p-empt") \
EM(afs_alist_trace_put_parse_error, "PUT p-err ") \
- EM(afs_alist_trace_put_server_dup, "PUT sv-dup") \
+ EM(afs_alist_trace_put_server_create, "PUT sv-crt") \
EM(afs_alist_trace_put_server_oom, "PUT sv-oom") \
EM(afs_alist_trace_put_server_update, "PUT sv-upd") \
EM(afs_alist_trace_put_vlgetcaps, "PUT vgtcap") \
@@ -1270,7 +1279,7 @@ TRACE_EVENT(afs_bulkstat_error,
);
TRACE_EVENT(afs_cm_no_server,
- TP_PROTO(struct afs_call *call, struct sockaddr_rxrpc *srx),
+ TP_PROTO(struct afs_call *call, const struct sockaddr_rxrpc *srx),
TP_ARGS(call, srx),
@@ -1529,7 +1538,7 @@ TRACE_EVENT(afs_server,
__entry->reason = reason;
),
- TP_printk("s=%08x %s u=%d a=%d",
+ TP_printk("s=%08x %s r=%d a=%d",
__entry->server,
__print_symbolic(__entry->reason, afs_server_traces),
__entry->ref,
@@ -1537,25 +1546,29 @@ TRACE_EVENT(afs_server,
);
TRACE_EVENT(afs_volume,
- TP_PROTO(afs_volid_t vid, int ref, enum afs_volume_trace reason),
+ TP_PROTO(unsigned int debug_id, afs_volid_t vid, int ref,
+ enum afs_volume_trace reason),
- TP_ARGS(vid, ref, reason),
+ TP_ARGS(debug_id, vid, ref, reason),
TP_STRUCT__entry(
+ __field(unsigned int, debug_id)
__field(afs_volid_t, vid)
__field(int, ref)
__field(enum afs_volume_trace, reason)
),
TP_fast_assign(
- __entry->vid = vid;
- __entry->ref = ref;
- __entry->reason = reason;
+ __entry->debug_id = debug_id;
+ __entry->vid = vid;
+ __entry->ref = ref;
+ __entry->reason = reason;
),
- TP_printk("V=%llx %s ur=%d",
- __entry->vid,
+ TP_printk("V=%08x %s vid=%llx r=%d",
+ __entry->debug_id,
__print_symbolic(__entry->reason, afs_volume_traces),
+ __entry->vid,
__entry->ref)
);
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
index e81431deaa50..ac3b28b8939b 100644
--- a/include/trace/events/rcu.h
+++ b/include/trace/events/rcu.h
@@ -561,40 +561,6 @@ TRACE_EVENT_RCU(rcu_segcb_stats,
);
/*
- * Tracepoint for the registration of a single RCU callback of the special
- * kvfree() form. The first argument is the RCU type, the second argument
- * is a pointer to the RCU callback, the third argument is the offset
- * of the callback within the enclosing RCU-protected data structure,
- * the fourth argument is the number of lazy callbacks queued, and the
- * fifth argument is the total number of callbacks queued.
- */
-TRACE_EVENT_RCU(rcu_kvfree_callback,
-
- TP_PROTO(const char *rcuname, struct rcu_head *rhp, unsigned long offset,
- long qlen),
-
- TP_ARGS(rcuname, rhp, offset, qlen),
-
- TP_STRUCT__entry(
- __field(const char *, rcuname)
- __field(void *, rhp)
- __field(unsigned long, offset)
- __field(long, qlen)
- ),
-
- TP_fast_assign(
- __entry->rcuname = rcuname;
- __entry->rhp = rhp;
- __entry->offset = offset;
- __entry->qlen = qlen;
- ),
-
- TP_printk("%s rhp=%p func=%ld %ld",
- __entry->rcuname, __entry->rhp, __entry->offset,
- __entry->qlen)
-);
-
-/*
* Tracepoint for marking the beginning rcu_do_batch, performed to start
* RCU callback invocation. The first argument is the RCU flavor,
* the second is the number of lazy callbacks queued, the third is
diff --git a/include/trace/events/sched_ext.h b/include/trace/events/sched_ext.h
index fe19da7315a9..50e4b712735a 100644
--- a/include/trace/events/sched_ext.h
+++ b/include/trace/events/sched_ext.h
@@ -26,6 +26,25 @@ TRACE_EVENT(sched_ext_dump,
)
);
+TRACE_EVENT(sched_ext_event,
+ TP_PROTO(const char *name, __s64 delta),
+ TP_ARGS(name, delta),
+
+ TP_STRUCT__entry(
+ __string(name, name)
+ __field( __s64, delta )
+ ),
+
+ TP_fast_assign(
+ __assign_str(name);
+ __entry->delta = delta;
+ ),
+
+ TP_printk("name %s delta %lld",
+ __get_str(name), __entry->delta
+ )
+);
+
#endif /* _TRACE_SCHED_EXT_H */
/* This part must be outside protection */
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index 88dc393c2bca..2892a45023af 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -849,9 +849,11 @@ __SYSCALL(__NR_getxattrat, sys_getxattrat)
__SYSCALL(__NR_listxattrat, sys_listxattrat)
#define __NR_removexattrat 466
__SYSCALL(__NR_removexattrat, sys_removexattrat)
+#define __NR_open_tree_attr 467
+__SYSCALL(__NR_open_tree_attr, sys_open_tree_attr)
#undef __NR_syscalls
-#define __NR_syscalls 467
+#define __NR_syscalls 468
/*
* 32 bit systems traditionally used different
diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
index b44069d29cec..8cf4ea9c544c 100644
--- a/include/uapi/linux/elf.h
+++ b/include/uapi/linux/elf.h
@@ -291,8 +291,18 @@ typedef struct elf64_phdr {
#define SHF_WRITE 0x1
#define SHF_ALLOC 0x2
#define SHF_EXECINSTR 0x4
+#define SHF_MERGE 0x10
+#define SHF_STRINGS 0x20
+#define SHF_INFO_LINK 0x40
+#define SHF_LINK_ORDER 0x80
+#define SHF_OS_NONCONFORMING 0x100
+#define SHF_GROUP 0x200
+#define SHF_TLS 0x400
#define SHF_RELA_LIVEPATCH 0x00100000
#define SHF_RO_AFTER_INIT 0x00200000
+#define SHF_ORDERED 0x04000000
+#define SHF_EXCLUDE 0x08000000
+#define SHF_MASKOS 0x0ff00000
#define SHF_MASKPROC 0xf0000000
/* special section indexes */
@@ -368,101 +378,180 @@ typedef struct elf64_shdr {
#define ELF_OSABI ELFOSABI_NONE
#endif
+/* Note definitions: NN_ defines names. NT_ defines types. */
+
+#define NN_GNU_PROPERTY_TYPE_0 "GNU"
+#define NT_GNU_PROPERTY_TYPE_0 5
+
/*
* Notes used in ET_CORE. Architectures export some of the arch register sets
* using the corresponding note types via the PTRACE_GETREGSET and
* PTRACE_SETREGSET requests.
- * The note name for these types is "LINUX", except NT_PRFPREG that is named
- * "CORE".
*/
+#define NN_PRSTATUS "CORE"
#define NT_PRSTATUS 1
+#define NN_PRFPREG "CORE"
#define NT_PRFPREG 2
+#define NN_PRPSINFO "CORE"
#define NT_PRPSINFO 3
+#define NN_TASKSTRUCT "CORE"
#define NT_TASKSTRUCT 4
+#define NN_AUXV "CORE"
#define NT_AUXV 6
/*
* Note to userspace developers: size of NT_SIGINFO note may increase
* in the future to accomodate more fields, don't assume it is fixed!
*/
+#define NN_SIGINFO "CORE"
#define NT_SIGINFO 0x53494749
+#define NN_FILE "CORE"
#define NT_FILE 0x46494c45
+#define NN_PRXFPREG "LINUX"
#define NT_PRXFPREG 0x46e62b7f /* copied from gdb5.1/include/elf/common.h */
+#define NN_PPC_VMX "LINUX"
#define NT_PPC_VMX 0x100 /* PowerPC Altivec/VMX registers */
+#define NN_PPC_SPE "LINUX"
#define NT_PPC_SPE 0x101 /* PowerPC SPE/EVR registers */
+#define NN_PPC_VSX "LINUX"
#define NT_PPC_VSX 0x102 /* PowerPC VSX registers */
+#define NN_PPC_TAR "LINUX"
#define NT_PPC_TAR 0x103 /* Target Address Register */
+#define NN_PPC_PPR "LINUX"
#define NT_PPC_PPR 0x104 /* Program Priority Register */
+#define NN_PPC_DSCR "LINUX"
#define NT_PPC_DSCR 0x105 /* Data Stream Control Register */
+#define NN_PPC_EBB "LINUX"
#define NT_PPC_EBB 0x106 /* Event Based Branch Registers */
+#define NN_PPC_PMU "LINUX"
#define NT_PPC_PMU 0x107 /* Performance Monitor Registers */
+#define NN_PPC_TM_CGPR "LINUX"
#define NT_PPC_TM_CGPR 0x108 /* TM checkpointed GPR Registers */
+#define NN_PPC_TM_CFPR "LINUX"
#define NT_PPC_TM_CFPR 0x109 /* TM checkpointed FPR Registers */
+#define NN_PPC_TM_CVMX "LINUX"
#define NT_PPC_TM_CVMX 0x10a /* TM checkpointed VMX Registers */
+#define NN_PPC_TM_CVSX "LINUX"
#define NT_PPC_TM_CVSX 0x10b /* TM checkpointed VSX Registers */
+#define NN_PPC_TM_SPR "LINUX"
#define NT_PPC_TM_SPR 0x10c /* TM Special Purpose Registers */
+#define NN_PPC_TM_CTAR "LINUX"
#define NT_PPC_TM_CTAR 0x10d /* TM checkpointed Target Address Register */
+#define NN_PPC_TM_CPPR "LINUX"
#define NT_PPC_TM_CPPR 0x10e /* TM checkpointed Program Priority Register */
+#define NN_PPC_TM_CDSCR "LINUX"
#define NT_PPC_TM_CDSCR 0x10f /* TM checkpointed Data Stream Control Register */
+#define NN_PPC_PKEY "LINUX"
#define NT_PPC_PKEY 0x110 /* Memory Protection Keys registers */
+#define NN_PPC_DEXCR "LINUX"
#define NT_PPC_DEXCR 0x111 /* PowerPC DEXCR registers */
+#define NN_PPC_HASHKEYR "LINUX"
#define NT_PPC_HASHKEYR 0x112 /* PowerPC HASHKEYR register */
+#define NN_386_TLS "LINUX"
#define NT_386_TLS 0x200 /* i386 TLS slots (struct user_desc) */
+#define NN_386_IOPERM "LINUX"
#define NT_386_IOPERM 0x201 /* x86 io permission bitmap (1=deny) */
+#define NN_X86_XSTATE "LINUX"
#define NT_X86_XSTATE 0x202 /* x86 extended state using xsave */
/* Old binutils treats 0x203 as a CET state */
+#define NN_X86_SHSTK "LINUX"
#define NT_X86_SHSTK 0x204 /* x86 SHSTK state */
+#define NN_X86_XSAVE_LAYOUT "LINUX"
#define NT_X86_XSAVE_LAYOUT 0x205 /* XSAVE layout description */
+#define NN_S390_HIGH_GPRS "LINUX"
#define NT_S390_HIGH_GPRS 0x300 /* s390 upper register halves */
+#define NN_S390_TIMER "LINUX"
#define NT_S390_TIMER 0x301 /* s390 timer register */
+#define NN_S390_TODCMP "LINUX"
#define NT_S390_TODCMP 0x302 /* s390 TOD clock comparator register */
+#define NN_S390_TODPREG "LINUX"
#define NT_S390_TODPREG 0x303 /* s390 TOD programmable register */
+#define NN_S390_CTRS "LINUX"
#define NT_S390_CTRS 0x304 /* s390 control registers */
+#define NN_S390_PREFIX "LINUX"
#define NT_S390_PREFIX 0x305 /* s390 prefix register */
+#define NN_S390_LAST_BREAK "LINUX"
#define NT_S390_LAST_BREAK 0x306 /* s390 breaking event address */
+#define NN_S390_SYSTEM_CALL "LINUX"
#define NT_S390_SYSTEM_CALL 0x307 /* s390 system call restart data */
+#define NN_S390_TDB "LINUX"
#define NT_S390_TDB 0x308 /* s390 transaction diagnostic block */
+#define NN_S390_VXRS_LOW "LINUX"
#define NT_S390_VXRS_LOW 0x309 /* s390 vector registers 0-15 upper half */
+#define NN_S390_VXRS_HIGH "LINUX"
#define NT_S390_VXRS_HIGH 0x30a /* s390 vector registers 16-31 */
+#define NN_S390_GS_CB "LINUX"
#define NT_S390_GS_CB 0x30b /* s390 guarded storage registers */
+#define NN_S390_GS_BC "LINUX"
#define NT_S390_GS_BC 0x30c /* s390 guarded storage broadcast control block */
+#define NN_S390_RI_CB "LINUX"
#define NT_S390_RI_CB 0x30d /* s390 runtime instrumentation */
+#define NN_S390_PV_CPU_DATA "LINUX"
#define NT_S390_PV_CPU_DATA 0x30e /* s390 protvirt cpu dump data */
+#define NN_ARM_VFP "LINUX"
#define NT_ARM_VFP 0x400 /* ARM VFP/NEON registers */
+#define NN_ARM_TLS "LINUX"
#define NT_ARM_TLS 0x401 /* ARM TLS register */
+#define NN_ARM_HW_BREAK "LINUX"
#define NT_ARM_HW_BREAK 0x402 /* ARM hardware breakpoint registers */
+#define NN_ARM_HW_WATCH "LINUX"
#define NT_ARM_HW_WATCH 0x403 /* ARM hardware watchpoint registers */
+#define NN_ARM_SYSTEM_CALL "LINUX"
#define NT_ARM_SYSTEM_CALL 0x404 /* ARM system call number */
+#define NN_ARM_SVE "LINUX"
#define NT_ARM_SVE 0x405 /* ARM Scalable Vector Extension registers */
+#define NN_ARM_PAC_MASK "LINUX"
#define NT_ARM_PAC_MASK 0x406 /* ARM pointer authentication code masks */
+#define NN_ARM_PACA_KEYS "LINUX"
#define NT_ARM_PACA_KEYS 0x407 /* ARM pointer authentication address keys */
+#define NN_ARM_PACG_KEYS "LINUX"
#define NT_ARM_PACG_KEYS 0x408 /* ARM pointer authentication generic key */
+#define NN_ARM_TAGGED_ADDR_CTRL "LINUX"
#define NT_ARM_TAGGED_ADDR_CTRL 0x409 /* arm64 tagged address control (prctl()) */
+#define NN_ARM_PAC_ENABLED_KEYS "LINUX"
#define NT_ARM_PAC_ENABLED_KEYS 0x40a /* arm64 ptr auth enabled keys (prctl()) */
+#define NN_ARM_SSVE "LINUX"
#define NT_ARM_SSVE 0x40b /* ARM Streaming SVE registers */
+#define NN_ARM_ZA "LINUX"
#define NT_ARM_ZA 0x40c /* ARM SME ZA registers */
+#define NN_ARM_ZT "LINUX"
#define NT_ARM_ZT 0x40d /* ARM SME ZT registers */
+#define NN_ARM_FPMR "LINUX"
#define NT_ARM_FPMR 0x40e /* ARM floating point mode register */
+#define NN_ARM_POE "LINUX"
#define NT_ARM_POE 0x40f /* ARM POE registers */
+#define NN_ARM_GCS "LINUX"
#define NT_ARM_GCS 0x410 /* ARM GCS state */
+#define NN_ARC_V2 "LINUX"
#define NT_ARC_V2 0x600 /* ARCv2 accumulator/extra registers */
+#define NN_VMCOREDD "LINUX"
#define NT_VMCOREDD 0x700 /* Vmcore Device Dump Note */
+#define NN_MIPS_DSP "LINUX"
#define NT_MIPS_DSP 0x800 /* MIPS DSP ASE registers */
+#define NN_MIPS_FP_MODE "LINUX"
#define NT_MIPS_FP_MODE 0x801 /* MIPS floating-point mode */
+#define NN_MIPS_MSA "LINUX"
#define NT_MIPS_MSA 0x802 /* MIPS SIMD registers */
+#define NN_RISCV_CSR "LINUX"
#define NT_RISCV_CSR 0x900 /* RISC-V Control and Status Registers */
+#define NN_RISCV_VECTOR "LINUX"
#define NT_RISCV_VECTOR 0x901 /* RISC-V vector registers */
+#define NN_RISCV_TAGGED_ADDR_CTRL "LINUX"
#define NT_RISCV_TAGGED_ADDR_CTRL 0x902 /* RISC-V tagged address control (prctl()) */
+#define NN_LOONGARCH_CPUCFG "LINUX"
#define NT_LOONGARCH_CPUCFG 0xa00 /* LoongArch CPU config registers */
+#define NN_LOONGARCH_CSR "LINUX"
#define NT_LOONGARCH_CSR 0xa01 /* LoongArch control and status registers */
+#define NN_LOONGARCH_LSX "LINUX"
#define NT_LOONGARCH_LSX 0xa02 /* LoongArch Loongson SIMD Extension registers */
+#define NN_LOONGARCH_LASX "LINUX"
#define NT_LOONGARCH_LASX 0xa03 /* LoongArch Loongson Advanced SIMD Extension registers */
+#define NN_LOONGARCH_LBT "LINUX"
#define NT_LOONGARCH_LBT 0xa04 /* LoongArch Loongson Binary Translation registers */
+#define NN_LOONGARCH_HW_BREAK "LINUX"
#define NT_LOONGARCH_HW_BREAK 0xa05 /* LoongArch hardware breakpoint registers */
+#define NN_LOONGARCH_HW_WATCH "LINUX"
#define NT_LOONGARCH_HW_WATCH 0xa06 /* LoongArch hardware watchpoint registers */
-/* Note types with note name "GNU" */
-#define NT_GNU_PROPERTY_TYPE_0 5
-
/* Note header in a PT_NOTE section */
typedef struct elf32_note {
Elf32_Word n_namesz; /* Name size */
diff --git a/include/uapi/linux/fanotify.h b/include/uapi/linux/fanotify.h
index bd8167979707..e710967c7c26 100644
--- a/include/uapi/linux/fanotify.h
+++ b/include/uapi/linux/fanotify.h
@@ -28,6 +28,8 @@
/* #define FAN_DIR_MODIFY 0x00080000 */ /* Deprecated (reserved) */
#define FAN_PRE_ACCESS 0x00100000 /* Pre-content access hook */
+#define FAN_MNT_ATTACH 0x01000000 /* Mount was attached */
+#define FAN_MNT_DETACH 0x02000000 /* Mount was detached */
#define FAN_EVENT_ON_CHILD 0x08000000 /* Interested in child events */
@@ -64,6 +66,7 @@
#define FAN_REPORT_NAME 0x00000800 /* Report events with name */
#define FAN_REPORT_TARGET_FID 0x00001000 /* Report dirent target id */
#define FAN_REPORT_FD_ERROR 0x00002000 /* event->fd can report error */
+#define FAN_REPORT_MNT 0x00004000 /* Report mount events */
/* Convenience macro - FAN_REPORT_NAME requires FAN_REPORT_DIR_FID */
#define FAN_REPORT_DFID_NAME (FAN_REPORT_DIR_FID | FAN_REPORT_NAME)
@@ -94,6 +97,7 @@
#define FAN_MARK_INODE 0x00000000
#define FAN_MARK_MOUNT 0x00000010
#define FAN_MARK_FILESYSTEM 0x00000100
+#define FAN_MARK_MNTNS 0x00000110
/*
* Convenience macro - FAN_MARK_IGNORE requires FAN_MARK_IGNORED_SURV_MODIFY
@@ -147,6 +151,7 @@ struct fanotify_event_metadata {
#define FAN_EVENT_INFO_TYPE_PIDFD 4
#define FAN_EVENT_INFO_TYPE_ERROR 5
#define FAN_EVENT_INFO_TYPE_RANGE 6
+#define FAN_EVENT_INFO_TYPE_MNT 7
/* Special info types for FAN_RENAME */
#define FAN_EVENT_INFO_TYPE_OLD_DFID_NAME 10
@@ -200,6 +205,11 @@ struct fanotify_event_info_range {
__u64 count;
};
+struct fanotify_event_info_mnt {
+ struct fanotify_event_info_header hdr;
+ __u64 mnt_id;
+};
+
/*
* User space may need to record additional information about its decision.
* The extra information type records what kind of information is included.
diff --git a/include/uapi/linux/firewire-cdev.h b/include/uapi/linux/firewire-cdev.h
index 1f2c9469f921..05e3aa8fa8bc 100644
--- a/include/uapi/linux/firewire-cdev.h
+++ b/include/uapi/linux/firewire-cdev.h
@@ -449,7 +449,8 @@ struct fw_cdev_event_phy_packet {
* which the packet arrived. For %FW_CDEV_EVENT_PHY_PACKET_SENT2 and non-ping packet,
* the time stamp of isochronous cycle at which the packet was sent. For ping packet,
* the tick count for round-trip time measured by 1394 OHCI controller.
- * The time stamp of isochronous cycle at which either the response was sent for
+ *
+ * The time stamp of isochronous cycle at which either the response was sent for
* %FW_CDEV_EVENT_PHY_PACKET_SENT2 or the request arrived for
* %FW_CDEV_EVENT_PHY_PACKET_RECEIVED2.
* @data: Incoming data
diff --git a/include/uapi/linux/mount.h b/include/uapi/linux/mount.h
index c07008816aca..7fa67c2031a5 100644
--- a/include/uapi/linux/mount.h
+++ b/include/uapi/linux/mount.h
@@ -179,7 +179,12 @@ struct statmount {
__u32 opt_array; /* [str] Array of nul terminated fs options */
__u32 opt_sec_num; /* Number of security options */
__u32 opt_sec_array; /* [str] Array of nul terminated security options */
- __u64 __spare2[46];
+ __u64 supported_mask; /* Mask flags that this kernel supports */
+ __u32 mnt_uidmap_num; /* Number of uid mappings */
+ __u32 mnt_uidmap; /* [str] Array of uid mappings (as seen from callers namespace) */
+ __u32 mnt_gidmap_num; /* Number of gid mappings */
+ __u32 mnt_gidmap; /* [str] Array of gid mappings (as seen from callers namespace) */
+ __u64 __spare2[43];
char str[]; /* Variable size part containing strings */
};
@@ -217,6 +222,9 @@ struct mnt_id_req {
#define STATMOUNT_SB_SOURCE 0x00000200U /* Want/got sb_source */
#define STATMOUNT_OPT_ARRAY 0x00000400U /* Want/got opt_... */
#define STATMOUNT_OPT_SEC_ARRAY 0x00000800U /* Want/got opt_sec... */
+#define STATMOUNT_SUPPORTED_MASK 0x00001000U /* Want/got supported mask flags */
+#define STATMOUNT_MNT_UIDMAP 0x00002000U /* Want/got uidmap... */
+#define STATMOUNT_MNT_GIDMAP 0x00004000U /* Want/got gidmap... */
/*
* Special @mnt_id values that can be passed to listmount
diff --git a/include/uapi/linux/nilfs2_ondisk.h b/include/uapi/linux/nilfs2_ondisk.h
index c23f91ae5fe8..3196cc44a002 100644
--- a/include/uapi/linux/nilfs2_ondisk.h
+++ b/include/uapi/linux/nilfs2_ondisk.h
@@ -188,7 +188,8 @@ struct nilfs_super_block {
__le16 s_segment_usage_size; /* Size of a segment usage */
/*98*/ __u8 s_uuid[16]; /* 128-bit uuid for volume */
-/*A8*/ char s_volume_name[80]; /* volume name */
+/*A8*/ char s_volume_name[80] /* volume name */
+ __kernel_nonstring;
/*F8*/ __le32 s_c_interval; /* Commit interval of segment */
__le32 s_c_block_max; /*
diff --git a/include/uapi/linux/pidfd.h b/include/uapi/linux/pidfd.h
index 4540f6301b8c..2970ef44655a 100644
--- a/include/uapi/linux/pidfd.h
+++ b/include/uapi/linux/pidfd.h
@@ -10,6 +10,10 @@
/* Flags for pidfd_open(). */
#define PIDFD_NONBLOCK O_NONBLOCK
#define PIDFD_THREAD O_EXCL
+#ifdef __KERNEL__
+#include <linux/sched.h>
+#define PIDFD_CLONE CLONE_PIDFD
+#endif
/* Flags for pidfd_send_signal(). */
#define PIDFD_SIGNAL_THREAD (1UL << 0)
@@ -20,9 +24,34 @@
#define PIDFD_INFO_PID (1UL << 0) /* Always returned, even if not requested */
#define PIDFD_INFO_CREDS (1UL << 1) /* Always returned, even if not requested */
#define PIDFD_INFO_CGROUPID (1UL << 2) /* Always returned if available, even if not requested */
+#define PIDFD_INFO_EXIT (1UL << 3) /* Only returned if requested. */
#define PIDFD_INFO_SIZE_VER0 64 /* sizeof first published struct */
+/*
+ * The concept of process and threads in userland and the kernel is a confusing
+ * one - within the kernel every thread is a 'task' with its own individual PID,
+ * however from userland's point of view threads are grouped by a single PID,
+ * which is that of the 'thread group leader', typically the first thread
+ * spawned.
+ *
+ * To cut the Gideon knot, for internal kernel usage, we refer to
+ * PIDFD_SELF_THREAD to refer to the current thread (or task from a kernel
+ * perspective), and PIDFD_SELF_THREAD_GROUP to refer to the current thread
+ * group leader...
+ */
+#define PIDFD_SELF_THREAD -10000 /* Current thread. */
+#define PIDFD_SELF_THREAD_GROUP -20000 /* Current thread group leader. */
+
+/*
+ * ...and for userland we make life simpler - PIDFD_SELF refers to the current
+ * thread, PIDFD_SELF_PROCESS refers to the process thread group leader.
+ *
+ * For nearly all practical uses, a user will want to use PIDFD_SELF.
+ */
+#define PIDFD_SELF PIDFD_SELF_THREAD
+#define PIDFD_SELF_PROCESS PIDFD_SELF_THREAD_GROUP
+
struct pidfd_info {
/*
* This mask is similar to the request_mask in statx(2).
@@ -62,7 +91,7 @@ struct pidfd_info {
__u32 sgid;
__u32 fsuid;
__u32 fsgid;
- __u32 spare0[1];
+ __s32 exit_code;
};
#define PIDFS_IOCTL_MAGIC 0xFF
diff --git a/include/uapi/linux/stddef.h b/include/uapi/linux/stddef.h
index a6fce46aeb37..b87df1b485c2 100644
--- a/include/uapi/linux/stddef.h
+++ b/include/uapi/linux/stddef.h
@@ -70,4 +70,10 @@
#define __counted_by_be(m)
#endif
+#ifdef __KERNEL__
+#define __kernel_nonstring __nonstring
+#else
+#define __kernel_nonstring
+#endif
+
#endif /* _UAPI_LINUX_STDDEF_H */
diff --git a/init/.kunitconfig b/init/.kunitconfig
new file mode 100644
index 000000000000..acb906b1a5f9
--- /dev/null
+++ b/init/.kunitconfig
@@ -0,0 +1,3 @@
+CONFIG_KUNIT=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_TEST=y
diff --git a/init/Kconfig b/init/Kconfig
index 324c2886b2ea..aeea36d11dda 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -129,6 +129,9 @@ config CC_HAS_COUNTED_BY
# https://github.com/llvm/llvm-project/pull/112636
depends on !(CC_IS_CLANG && CLANG_VERSION < 190103)
+config CC_HAS_MULTIDIMENSIONAL_NONSTRING
+ def_bool $(success,echo 'char tag[][4] __attribute__((__nonstring__)) = { };' | $(CC) $(CLANG_FLAGS) -x c - -c -o /dev/null -Werror)
+
config RUSTC_HAS_COERCE_POINTEE
def_bool RUSTC_VERSION >= 108400
@@ -1195,7 +1198,8 @@ config CPUSETS_V1
help
Legacy cgroup v1 cpusets controller which has been deprecated by
cgroup v2 implementation. The v1 is there for legacy applications
- which haven't migrated to the new cgroup v2 interface yet. If you
+ which haven't migrated to the new cgroup v2 interface yet. Legacy
+ interface includes cpuset filesystem and /proc/<pid>/cpuset. If you
do not have any such application then you are completely fine leaving
this option disabled.
@@ -1203,7 +1207,7 @@ config CPUSETS_V1
config PROC_PID_CPUSET
bool "Include legacy /proc/<pid>/cpuset file"
- depends on CPUSETS
+ depends on CPUSETS_V1
default y
config CGROUP_DEVICE
@@ -1454,6 +1458,13 @@ config INITRAMFS_PRESERVE_MTIME
If unsure, say Y.
+config INITRAMFS_TEST
+ bool "Test initramfs cpio archive extraction" if !KUNIT_ALL_TESTS
+ depends on BLK_DEV_INITRD && KUNIT=y
+ default KUNIT_ALL_TESTS
+ help
+ Build KUnit tests for initramfs. See Documentation/dev-tools/kunit
+
choice
prompt "Compiler optimization level"
default CC_OPTIMIZE_FOR_PERFORMANCE
diff --git a/init/Makefile b/init/Makefile
index 10b652d33e87..d6f75d8907e0 100644
--- a/init/Makefile
+++ b/init/Makefile
@@ -12,6 +12,7 @@ else
obj-$(CONFIG_BLK_DEV_INITRD) += initramfs.o
endif
obj-$(CONFIG_GENERIC_CALIBRATE_DELAY) += calibrate.o
+obj-$(CONFIG_INITRAMFS_TEST) += initramfs_test.o
obj-y += init_task.o
diff --git a/init/initramfs.c b/init/initramfs.c
index b2f7583bb1f5..72bad44a1d41 100644
--- a/init/initramfs.c
+++ b/init/initramfs.c
@@ -20,6 +20,7 @@
#include <linux/security.h>
#include "do_mounts.h"
+#include "initramfs_internal.h"
static __initdata bool csum_present;
static __initdata u32 io_csum;
@@ -75,6 +76,7 @@ static __initdata struct hash {
struct hash *next;
char name[N_ALIGN(PATH_MAX)];
} *head[32];
+static __initdata bool hardlink_seen;
static inline int hash(int major, int minor, int ino)
{
@@ -108,19 +110,21 @@ static char __init *find_link(int major, int minor, int ino,
strcpy(q->name, name);
q->next = NULL;
*p = q;
+ hardlink_seen = true;
return NULL;
}
static void __init free_hash(void)
{
struct hash **p, *q;
- for (p = head; p < head + 32; p++) {
+ for (p = head; hardlink_seen && p < head + 32; p++) {
while (*p) {
q = *p;
*p = q->next;
kfree(q);
}
}
+ hardlink_seen = false;
}
#ifdef CONFIG_INITRAMFS_PRESERVE_MTIME
@@ -143,9 +147,8 @@ struct dir_entry {
char name[];
};
-static void __init dir_add(const char *name, time64_t mtime)
+static void __init dir_add(const char *name, size_t nlen, time64_t mtime)
{
- size_t nlen = strlen(name) + 1;
struct dir_entry *de;
de = kmalloc(sizeof(struct dir_entry) + nlen, GFP_KERNEL);
@@ -169,7 +172,7 @@ static void __init dir_utime(void)
#else
static void __init do_utime(char *filename, time64_t mtime) {}
static void __init do_utime_path(const struct path *path, time64_t mtime) {}
-static void __init dir_add(const char *name, time64_t mtime) {}
+static void __init dir_add(const char *name, size_t nlen, time64_t mtime) {}
static void __init dir_utime(void) {}
#endif
@@ -188,14 +191,11 @@ static __initdata u32 hdr_csum;
static void __init parse_header(char *s)
{
unsigned long parsed[13];
- char buf[9];
int i;
- buf[8] = '\0';
- for (i = 0, s += 6; i < 13; i++, s += 8) {
- memcpy(buf, s, 8);
- parsed[i] = simple_strtoul(buf, NULL, 16);
- }
+ for (i = 0, s += 6; i < 13; i++, s += 8)
+ parsed[i] = simple_strntoul(s, NULL, 16, 8);
+
ino = parsed[0];
mode = parsed[1];
uid = parsed[2];
@@ -256,7 +256,7 @@ static __initdata char *header_buf, *symlink_buf, *name_buf;
static int __init do_start(void)
{
- read_into(header_buf, 110, GotHeader);
+ read_into(header_buf, CPIO_HDRLEN, GotHeader);
return 0;
}
@@ -396,7 +396,7 @@ static int __init do_name(void)
init_mkdir(collected, mode);
init_chown(collected, uid, gid, 0);
init_chmod(collected, mode);
- dir_add(collected, mtime);
+ dir_add(collected, name_len, mtime);
} else if (S_ISBLK(mode) || S_ISCHR(mode) ||
S_ISFIFO(mode) || S_ISSOCK(mode)) {
if (maybe_link() == 0) {
@@ -497,20 +497,33 @@ static unsigned long my_inptr __initdata; /* index of next byte to be processed
#include <linux/decompress/generic.h>
-static char * __init unpack_to_rootfs(char *buf, unsigned long len)
+/**
+ * unpack_to_rootfs - decompress and extract an initramfs archive
+ * @buf: input initramfs archive to extract
+ * @len: length of initramfs data to process
+ *
+ * Returns: NULL for success or an error message string
+ *
+ * This symbol shouldn't be used externally. It's available for unit tests.
+ */
+char * __init unpack_to_rootfs(char *buf, unsigned long len)
{
long written;
decompress_fn decompress;
const char *compress_name;
- static __initdata char msg_buf[64];
+ struct {
+ char header[CPIO_HDRLEN];
+ char symlink[PATH_MAX + N_ALIGN(PATH_MAX) + 1];
+ char name[N_ALIGN(PATH_MAX)];
+ } *bufs = kmalloc(sizeof(*bufs), GFP_KERNEL);
- header_buf = kmalloc(110, GFP_KERNEL);
- symlink_buf = kmalloc(PATH_MAX + N_ALIGN(PATH_MAX) + 1, GFP_KERNEL);
- name_buf = kmalloc(N_ALIGN(PATH_MAX), GFP_KERNEL);
-
- if (!header_buf || !symlink_buf || !name_buf)
+ if (!bufs)
panic_show_mem("can't allocate buffers");
+ header_buf = bufs->header;
+ symlink_buf = bufs->symlink;
+ name_buf = bufs->name;
+
state = Start;
this_header = 0;
message = NULL;
@@ -538,12 +551,9 @@ static char * __init unpack_to_rootfs(char *buf, unsigned long len)
if (res)
error("decompressor failed");
} else if (compress_name) {
- if (!message) {
- snprintf(msg_buf, sizeof msg_buf,
- "compression method %s not configured",
- compress_name);
- message = msg_buf;
- }
+ pr_err("compression method %s not configured\n",
+ compress_name);
+ error("decompressor failed");
} else
error("invalid magic at start of compressed archive");
if (state != Reset)
@@ -553,9 +563,9 @@ static char * __init unpack_to_rootfs(char *buf, unsigned long len)
len -= my_inptr;
}
dir_utime();
- kfree(name_buf);
- kfree(symlink_buf);
- kfree(header_buf);
+ /* free any hardlink state collected without optional TRAILER!!! */
+ free_hash();
+ kfree(bufs);
return message;
}
diff --git a/init/initramfs_internal.h b/init/initramfs_internal.h
new file mode 100644
index 000000000000..233dad16b0a0
--- /dev/null
+++ b/init/initramfs_internal.h
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
+#ifndef __INITRAMFS_INTERNAL_H__
+#define __INITRAMFS_INTERNAL_H__
+
+char *unpack_to_rootfs(char *buf, unsigned long len);
+#define CPIO_HDRLEN 110
+
+#endif
diff --git a/init/initramfs_test.c b/init/initramfs_test.c
new file mode 100644
index 000000000000..517e5e04e5cc
--- /dev/null
+++ b/init/initramfs_test.c
@@ -0,0 +1,407 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <kunit/test.h>
+#include <linux/fcntl.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/init_syscalls.h>
+#include <linux/stringify.h>
+#include <linux/timekeeping.h>
+#include "initramfs_internal.h"
+
+struct initramfs_test_cpio {
+ char *magic;
+ unsigned int ino;
+ unsigned int mode;
+ unsigned int uid;
+ unsigned int gid;
+ unsigned int nlink;
+ unsigned int mtime;
+ unsigned int filesize;
+ unsigned int devmajor;
+ unsigned int devminor;
+ unsigned int rdevmajor;
+ unsigned int rdevminor;
+ unsigned int namesize;
+ unsigned int csum;
+ char *fname;
+ char *data;
+};
+
+static size_t fill_cpio(struct initramfs_test_cpio *cs, size_t csz, char *out)
+{
+ int i;
+ size_t off = 0;
+
+ for (i = 0; i < csz; i++) {
+ char *pos = &out[off];
+ struct initramfs_test_cpio *c = &cs[i];
+ size_t thislen;
+
+ /* +1 to account for nulterm */
+ thislen = sprintf(pos, "%s"
+ "%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x"
+ "%s",
+ c->magic, c->ino, c->mode, c->uid, c->gid, c->nlink,
+ c->mtime, c->filesize, c->devmajor, c->devminor,
+ c->rdevmajor, c->rdevminor, c->namesize, c->csum,
+ c->fname) + 1;
+ pr_debug("packing (%zu): %.*s\n", thislen, (int)thislen, pos);
+ off += thislen;
+ while (off & 3)
+ out[off++] = '\0';
+
+ memcpy(&out[off], c->data, c->filesize);
+ off += c->filesize;
+ while (off & 3)
+ out[off++] = '\0';
+ }
+
+ return off;
+}
+
+static void __init initramfs_test_extract(struct kunit *test)
+{
+ char *err, *cpio_srcbuf;
+ size_t len;
+ struct timespec64 ts_before, ts_after;
+ struct kstat st = {};
+ struct initramfs_test_cpio c[] = { {
+ .magic = "070701",
+ .ino = 1,
+ .mode = S_IFREG | 0777,
+ .uid = 12,
+ .gid = 34,
+ .nlink = 1,
+ .mtime = 56,
+ .filesize = 0,
+ .devmajor = 0,
+ .devminor = 1,
+ .rdevmajor = 0,
+ .rdevminor = 0,
+ .namesize = sizeof("initramfs_test_extract"),
+ .csum = 0,
+ .fname = "initramfs_test_extract",
+ }, {
+ .magic = "070701",
+ .ino = 2,
+ .mode = S_IFDIR | 0777,
+ .nlink = 1,
+ .mtime = 57,
+ .devminor = 1,
+ .namesize = sizeof("initramfs_test_extract_dir"),
+ .fname = "initramfs_test_extract_dir",
+ }, {
+ .magic = "070701",
+ .namesize = sizeof("TRAILER!!!"),
+ .fname = "TRAILER!!!",
+ } };
+
+ /* +3 to cater for any 4-byte end-alignment */
+ cpio_srcbuf = kzalloc(ARRAY_SIZE(c) * (CPIO_HDRLEN + PATH_MAX + 3),
+ GFP_KERNEL);
+ len = fill_cpio(c, ARRAY_SIZE(c), cpio_srcbuf);
+
+ ktime_get_real_ts64(&ts_before);
+ err = unpack_to_rootfs(cpio_srcbuf, len);
+ ktime_get_real_ts64(&ts_after);
+ if (err) {
+ KUNIT_FAIL(test, "unpack failed %s", err);
+ goto out;
+ }
+
+ KUNIT_EXPECT_EQ(test, init_stat(c[0].fname, &st, 0), 0);
+ KUNIT_EXPECT_TRUE(test, S_ISREG(st.mode));
+ KUNIT_EXPECT_TRUE(test, uid_eq(st.uid, KUIDT_INIT(c[0].uid)));
+ KUNIT_EXPECT_TRUE(test, gid_eq(st.gid, KGIDT_INIT(c[0].gid)));
+ KUNIT_EXPECT_EQ(test, st.nlink, 1);
+ if (IS_ENABLED(CONFIG_INITRAMFS_PRESERVE_MTIME)) {
+ KUNIT_EXPECT_EQ(test, st.mtime.tv_sec, c[0].mtime);
+ } else {
+ KUNIT_EXPECT_GE(test, st.mtime.tv_sec, ts_before.tv_sec);
+ KUNIT_EXPECT_LE(test, st.mtime.tv_sec, ts_after.tv_sec);
+ }
+ KUNIT_EXPECT_EQ(test, st.blocks, c[0].filesize);
+
+ KUNIT_EXPECT_EQ(test, init_stat(c[1].fname, &st, 0), 0);
+ KUNIT_EXPECT_TRUE(test, S_ISDIR(st.mode));
+ if (IS_ENABLED(CONFIG_INITRAMFS_PRESERVE_MTIME)) {
+ KUNIT_EXPECT_EQ(test, st.mtime.tv_sec, c[1].mtime);
+ } else {
+ KUNIT_EXPECT_GE(test, st.mtime.tv_sec, ts_before.tv_sec);
+ KUNIT_EXPECT_LE(test, st.mtime.tv_sec, ts_after.tv_sec);
+ }
+
+ KUNIT_EXPECT_EQ(test, init_unlink(c[0].fname), 0);
+ KUNIT_EXPECT_EQ(test, init_rmdir(c[1].fname), 0);
+out:
+ kfree(cpio_srcbuf);
+}
+
+/*
+ * Don't terminate filename. Previously, the cpio filename field was passed
+ * directly to filp_open(collected, O_CREAT|..) without nulterm checks. See
+ * https://lore.kernel.org/linux-fsdevel/20241030035509.20194-2-ddiss@suse.de
+ */
+static void __init initramfs_test_fname_overrun(struct kunit *test)
+{
+ char *err, *cpio_srcbuf;
+ size_t len, suffix_off;
+ struct initramfs_test_cpio c[] = { {
+ .magic = "070701",
+ .ino = 1,
+ .mode = S_IFREG | 0777,
+ .uid = 0,
+ .gid = 0,
+ .nlink = 1,
+ .mtime = 1,
+ .filesize = 0,
+ .devmajor = 0,
+ .devminor = 1,
+ .rdevmajor = 0,
+ .rdevminor = 0,
+ .namesize = sizeof("initramfs_test_fname_overrun"),
+ .csum = 0,
+ .fname = "initramfs_test_fname_overrun",
+ } };
+
+ /*
+ * poison cpio source buffer, so we can detect overrun. source
+ * buffer is used by read_into() when hdr or fname
+ * are already available (e.g. no compression).
+ */
+ cpio_srcbuf = kmalloc(CPIO_HDRLEN + PATH_MAX + 3, GFP_KERNEL);
+ memset(cpio_srcbuf, 'B', CPIO_HDRLEN + PATH_MAX + 3);
+ /* limit overrun to avoid crashes / filp_open() ENAMETOOLONG */
+ cpio_srcbuf[CPIO_HDRLEN + strlen(c[0].fname) + 20] = '\0';
+
+ len = fill_cpio(c, ARRAY_SIZE(c), cpio_srcbuf);
+ /* overwrite trailing fname terminator and padding */
+ suffix_off = len - 1;
+ while (cpio_srcbuf[suffix_off] == '\0') {
+ cpio_srcbuf[suffix_off] = 'P';
+ suffix_off--;
+ }
+
+ err = unpack_to_rootfs(cpio_srcbuf, len);
+ KUNIT_EXPECT_NOT_NULL(test, err);
+
+ kfree(cpio_srcbuf);
+}
+
+static void __init initramfs_test_data(struct kunit *test)
+{
+ char *err, *cpio_srcbuf;
+ size_t len;
+ struct file *file;
+ struct initramfs_test_cpio c[] = { {
+ .magic = "070701",
+ .ino = 1,
+ .mode = S_IFREG | 0777,
+ .uid = 0,
+ .gid = 0,
+ .nlink = 1,
+ .mtime = 1,
+ .filesize = sizeof("ASDF") - 1,
+ .devmajor = 0,
+ .devminor = 1,
+ .rdevmajor = 0,
+ .rdevminor = 0,
+ .namesize = sizeof("initramfs_test_data"),
+ .csum = 0,
+ .fname = "initramfs_test_data",
+ .data = "ASDF",
+ } };
+
+ /* +6 for max name and data 4-byte padding */
+ cpio_srcbuf = kmalloc(CPIO_HDRLEN + c[0].namesize + c[0].filesize + 6,
+ GFP_KERNEL);
+
+ len = fill_cpio(c, ARRAY_SIZE(c), cpio_srcbuf);
+
+ err = unpack_to_rootfs(cpio_srcbuf, len);
+ KUNIT_EXPECT_NULL(test, err);
+
+ file = filp_open(c[0].fname, O_RDONLY, 0);
+ if (IS_ERR(file)) {
+ KUNIT_FAIL(test, "open failed");
+ goto out;
+ }
+
+ /* read back file contents into @cpio_srcbuf and confirm match */
+ len = kernel_read(file, cpio_srcbuf, c[0].filesize, NULL);
+ KUNIT_EXPECT_EQ(test, len, c[0].filesize);
+ KUNIT_EXPECT_MEMEQ(test, cpio_srcbuf, c[0].data, len);
+
+ fput(file);
+ KUNIT_EXPECT_EQ(test, init_unlink(c[0].fname), 0);
+out:
+ kfree(cpio_srcbuf);
+}
+
+static void __init initramfs_test_csum(struct kunit *test)
+{
+ char *err, *cpio_srcbuf;
+ size_t len;
+ struct initramfs_test_cpio c[] = { {
+ /* 070702 magic indicates a valid csum is present */
+ .magic = "070702",
+ .ino = 1,
+ .mode = S_IFREG | 0777,
+ .nlink = 1,
+ .filesize = sizeof("ASDF") - 1,
+ .devminor = 1,
+ .namesize = sizeof("initramfs_test_csum"),
+ .csum = 'A' + 'S' + 'D' + 'F',
+ .fname = "initramfs_test_csum",
+ .data = "ASDF",
+ }, {
+ /* mix csum entry above with no-csum entry below */
+ .magic = "070701",
+ .ino = 2,
+ .mode = S_IFREG | 0777,
+ .nlink = 1,
+ .filesize = sizeof("ASDF") - 1,
+ .devminor = 1,
+ .namesize = sizeof("initramfs_test_csum_not_here"),
+ /* csum ignored */
+ .csum = 5555,
+ .fname = "initramfs_test_csum_not_here",
+ .data = "ASDF",
+ } };
+
+ cpio_srcbuf = kmalloc(8192, GFP_KERNEL);
+
+ len = fill_cpio(c, ARRAY_SIZE(c), cpio_srcbuf);
+
+ err = unpack_to_rootfs(cpio_srcbuf, len);
+ KUNIT_EXPECT_NULL(test, err);
+
+ KUNIT_EXPECT_EQ(test, init_unlink(c[0].fname), 0);
+ KUNIT_EXPECT_EQ(test, init_unlink(c[1].fname), 0);
+
+ /* mess up the csum and confirm that unpack fails */
+ c[0].csum--;
+ len = fill_cpio(c, ARRAY_SIZE(c), cpio_srcbuf);
+
+ err = unpack_to_rootfs(cpio_srcbuf, len);
+ KUNIT_EXPECT_NOT_NULL(test, err);
+
+ /*
+ * file (with content) is still retained in case of bad-csum abort.
+ * Perhaps we should change this.
+ */
+ KUNIT_EXPECT_EQ(test, init_unlink(c[0].fname), 0);
+ KUNIT_EXPECT_EQ(test, init_unlink(c[1].fname), -ENOENT);
+ kfree(cpio_srcbuf);
+}
+
+/*
+ * hardlink hashtable may leak when the archive omits a trailer:
+ * https://lore.kernel.org/r/20241107002044.16477-10-ddiss@suse.de/
+ */
+static void __init initramfs_test_hardlink(struct kunit *test)
+{
+ char *err, *cpio_srcbuf;
+ size_t len;
+ struct kstat st0, st1;
+ struct initramfs_test_cpio c[] = { {
+ .magic = "070701",
+ .ino = 1,
+ .mode = S_IFREG | 0777,
+ .nlink = 2,
+ .devminor = 1,
+ .namesize = sizeof("initramfs_test_hardlink"),
+ .fname = "initramfs_test_hardlink",
+ }, {
+ /* hardlink data is present in last archive entry */
+ .magic = "070701",
+ .ino = 1,
+ .mode = S_IFREG | 0777,
+ .nlink = 2,
+ .filesize = sizeof("ASDF") - 1,
+ .devminor = 1,
+ .namesize = sizeof("initramfs_test_hardlink_link"),
+ .fname = "initramfs_test_hardlink_link",
+ .data = "ASDF",
+ } };
+
+ cpio_srcbuf = kmalloc(8192, GFP_KERNEL);
+
+ len = fill_cpio(c, ARRAY_SIZE(c), cpio_srcbuf);
+
+ err = unpack_to_rootfs(cpio_srcbuf, len);
+ KUNIT_EXPECT_NULL(test, err);
+
+ KUNIT_EXPECT_EQ(test, init_stat(c[0].fname, &st0, 0), 0);
+ KUNIT_EXPECT_EQ(test, init_stat(c[1].fname, &st1, 0), 0);
+ KUNIT_EXPECT_EQ(test, st0.ino, st1.ino);
+ KUNIT_EXPECT_EQ(test, st0.nlink, 2);
+ KUNIT_EXPECT_EQ(test, st1.nlink, 2);
+
+ KUNIT_EXPECT_EQ(test, init_unlink(c[0].fname), 0);
+ KUNIT_EXPECT_EQ(test, init_unlink(c[1].fname), 0);
+
+ kfree(cpio_srcbuf);
+}
+
+#define INITRAMFS_TEST_MANY_LIMIT 1000
+#define INITRAMFS_TEST_MANY_PATH_MAX (sizeof("initramfs_test_many-") \
+ + sizeof(__stringify(INITRAMFS_TEST_MANY_LIMIT)))
+static void __init initramfs_test_many(struct kunit *test)
+{
+ char *err, *cpio_srcbuf, *p;
+ size_t len = INITRAMFS_TEST_MANY_LIMIT *
+ (CPIO_HDRLEN + INITRAMFS_TEST_MANY_PATH_MAX + 3);
+ char thispath[INITRAMFS_TEST_MANY_PATH_MAX];
+ int i;
+
+ p = cpio_srcbuf = kmalloc(len, GFP_KERNEL);
+
+ for (i = 0; i < INITRAMFS_TEST_MANY_LIMIT; i++) {
+ struct initramfs_test_cpio c = {
+ .magic = "070701",
+ .ino = i,
+ .mode = S_IFREG | 0777,
+ .nlink = 1,
+ .devminor = 1,
+ .fname = thispath,
+ };
+
+ c.namesize = 1 + sprintf(thispath, "initramfs_test_many-%d", i);
+ p += fill_cpio(&c, 1, p);
+ }
+
+ len = p - cpio_srcbuf;
+ err = unpack_to_rootfs(cpio_srcbuf, len);
+ KUNIT_EXPECT_NULL(test, err);
+
+ for (i = 0; i < INITRAMFS_TEST_MANY_LIMIT; i++) {
+ sprintf(thispath, "initramfs_test_many-%d", i);
+ KUNIT_EXPECT_EQ(test, init_unlink(thispath), 0);
+ }
+
+ kfree(cpio_srcbuf);
+}
+
+/*
+ * The kunit_case/_suite struct cannot be marked as __initdata as this will be
+ * used in debugfs to retrieve results after test has run.
+ */
+static struct kunit_case __refdata initramfs_test_cases[] = {
+ KUNIT_CASE(initramfs_test_extract),
+ KUNIT_CASE(initramfs_test_fname_overrun),
+ KUNIT_CASE(initramfs_test_data),
+ KUNIT_CASE(initramfs_test_csum),
+ KUNIT_CASE(initramfs_test_hardlink),
+ KUNIT_CASE(initramfs_test_many),
+ {},
+};
+
+static struct kunit_suite initramfs_test_suite = {
+ .name = "initramfs",
+ .test_cases = initramfs_test_cases,
+};
+kunit_test_init_section_suites(&initramfs_test_suite);
+
+MODULE_DESCRIPTION("Initramfs KUnit test suite");
+MODULE_LICENSE("GPL v2");
diff --git a/io_uring/net.c b/io_uring/net.c
index 5d0b56ff50ee..50e8a3ccc9de 100644
--- a/io_uring/net.c
+++ b/io_uring/net.c
@@ -148,7 +148,7 @@ static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags)
io_alloc_cache_kasan(&hdr->free_iov, &hdr->free_iov_nr);
if (io_alloc_cache_put(&req->ctx->netmsg_cache, hdr)) {
req->async_data = NULL;
- req->flags &= ~REQ_F_ASYNC_DATA;
+ req->flags &= ~(REQ_F_ASYNC_DATA|REQ_F_NEED_CLEANUP);
}
}
@@ -441,7 +441,6 @@ int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
static void io_req_msg_cleanup(struct io_kiocb *req,
unsigned int issue_flags)
{
- req->flags &= ~REQ_F_NEED_CLEANUP;
io_netmsg_recycle(req, issue_flags);
}
@@ -1441,6 +1440,7 @@ int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
*/
if (!(issue_flags & IO_URING_F_UNLOCKED)) {
io_notif_flush(zc->notif);
+ zc->notif = NULL;
io_req_msg_cleanup(req, 0);
}
io_req_set_res(req, ret, IORING_CQE_F_MORE);
@@ -1501,6 +1501,7 @@ int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
*/
if (!(issue_flags & IO_URING_F_UNLOCKED)) {
io_notif_flush(sr->notif);
+ sr->notif = NULL;
io_req_msg_cleanup(req, 0);
}
io_req_set_res(req, ret, IORING_CQE_F_MORE);
diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c
index 7f358740e958..367eaf2c78b7 100644
--- a/kernel/audit_watch.c
+++ b/kernel/audit_watch.c
@@ -350,11 +350,10 @@ static int audit_get_nd(struct audit_watch *watch, struct path *parent)
struct dentry *d = kern_path_locked(watch->path, parent);
if (IS_ERR(d))
return PTR_ERR(d);
- if (d_is_positive(d)) {
- /* update watch filter fields */
- watch->dev = d->d_sb->s_dev;
- watch->ino = d_backing_inode(d)->i_ino;
- }
+ /* update watch filter fields */
+ watch->dev = d->d_sb->s_dev;
+ watch->ino = d_backing_inode(d)->i_ino;
+
inode_unlock(d_backing_inode(parent->dentry));
dput(d);
return 0;
@@ -419,10 +418,11 @@ int audit_add_watch(struct audit_krule *krule, struct list_head **list)
/* caller expects mutex locked */
mutex_lock(&audit_filter_mutex);
- if (ret) {
+ if (ret && ret != -ENOENT) {
audit_put_watch(watch);
return ret;
}
+ ret = 0;
/* either find an old parent or attach a new one */
parent = audit_find_parent(d_backing_inode(parent_path.dentry));
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index 9c853cde9abe..78fd876a5473 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -2207,10 +2207,8 @@ __audit_reusename(const __user char *uptr)
list_for_each_entry(n, &context->names_list, list) {
if (!n->name)
continue;
- if (n->name->uptr == uptr) {
- atomic_inc(&n->name->refcnt);
- return n->name;
- }
+ if (n->name->uptr == uptr)
+ return refname(n->name);
}
return NULL;
}
@@ -2237,7 +2235,7 @@ void __audit_getname(struct filename *name)
n->name = name;
n->name_len = AUDIT_NAME_FULL;
name->aname = n;
- atomic_inc(&name->refcnt);
+ refname(name);
}
static inline int audit_copy_fcaps(struct audit_names *name,
@@ -2369,7 +2367,7 @@ out_alloc:
return;
if (name) {
n->name = name;
- atomic_inc(&name->refcnt);
+ refname(name);
}
out:
@@ -2496,7 +2494,7 @@ void __audit_inode_child(struct inode *parent,
if (found_parent) {
found_child->name = found_parent->name;
found_child->name_len = AUDIT_NAME_FULL;
- atomic_inc(&found_child->name->refcnt);
+ refname(found_child->name);
}
}
diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
index 9aaf5124648b..dc3aa91a6ba0 100644
--- a/kernel/bpf/inode.c
+++ b/kernel/bpf/inode.c
@@ -150,14 +150,14 @@ static void bpf_dentry_finalize(struct dentry *dentry, struct inode *inode,
inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
}
-static int bpf_mkdir(struct mnt_idmap *idmap, struct inode *dir,
- struct dentry *dentry, umode_t mode)
+static struct dentry *bpf_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
struct inode *inode;
inode = bpf_get_inode(dir->i_sb, dir, mode | S_IFDIR);
if (IS_ERR(inode))
- return PTR_ERR(inode);
+ return ERR_CAST(inode);
inode->i_op = &bpf_dir_iops;
inode->i_fop = &simple_dir_operations;
@@ -166,7 +166,7 @@ static int bpf_mkdir(struct mnt_idmap *idmap, struct inode *dir,
inc_nlink(dir);
bpf_dentry_finalize(dentry, inode, dir);
- return 0;
+ return NULL;
}
struct map_iter {
diff --git a/kernel/cgroup/cgroup-internal.h b/kernel/cgroup/cgroup-internal.h
index c964dd7ff967..95ab39e1ec8f 100644
--- a/kernel/cgroup/cgroup-internal.h
+++ b/kernel/cgroup/cgroup-internal.h
@@ -168,6 +168,7 @@ struct cgroup_mgctx {
extern struct cgroup_subsys *cgroup_subsys[];
extern struct list_head cgroup_roots;
+extern bool cgrp_dfl_visible;
/* iterate across the hierarchies */
#define for_each_root(root) \
diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c
index e28d5f0d20ed..11ea8d24ac72 100644
--- a/kernel/cgroup/cgroup-v1.c
+++ b/kernel/cgroup/cgroup-v1.c
@@ -673,6 +673,7 @@ struct cftype cgroup1_base_files[] = {
int proc_cgroupstats_show(struct seq_file *m, void *v)
{
struct cgroup_subsys *ss;
+ bool cgrp_v1_visible = false;
int i;
seq_puts(m, "#subsys_name\thierarchy\tnum_cgroups\tenabled\n");
@@ -684,12 +685,18 @@ int proc_cgroupstats_show(struct seq_file *m, void *v)
for_each_subsys(ss, i) {
if (cgroup1_subsys_absent(ss))
continue;
+ cgrp_v1_visible |= ss->root != &cgrp_dfl_root;
+
seq_printf(m, "%s\t%d\t%d\t%d\n",
ss->legacy_name, ss->root->hierarchy_id,
atomic_read(&ss->root->nr_cgrps),
cgroup_ssid_enabled(i));
}
+ if (cgrp_dfl_visible && !cgrp_v1_visible)
+ pr_info_once("/proc/cgroups lists only v1 controllers, use cgroup.controllers of root cgroup for v2 info\n");
+
+
return 0;
}
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index afc665b7b1fe..f231fe3a0744 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -171,7 +171,7 @@ EXPORT_SYMBOL_GPL(cgrp_dfl_root);
* The default hierarchy always exists but is hidden until mounted for the
* first time. This is for backward compatibility.
*/
-static bool cgrp_dfl_visible;
+bool cgrp_dfl_visible;
/* some controllers are not supported in the default hierarchy */
static u16 cgrp_dfl_inhibit_ss_mask;
@@ -4447,7 +4447,7 @@ int cgroup_rm_cftypes(struct cftype *cfts)
* function currently returns 0 as long as @cfts registration is successful
* even if some file creation attempts on existing cgroups fail.
*/
-static int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
+int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
{
int ret;
@@ -5831,7 +5831,7 @@ int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name, umode_t mode)
}
/*
- * This extra ref will be put in cgroup_free_fn() and guarantees
+ * This extra ref will be put in css_free_rwork_fn() and guarantees
* that @cgrp->kn is always accessible.
*/
kernfs_get(cgrp->kn);
diff --git a/kernel/cgroup/cpuset-v1.c b/kernel/cgroup/cpuset-v1.c
index 25c1d7b77e2f..b69a7db67090 100644
--- a/kernel/cgroup/cpuset-v1.c
+++ b/kernel/cgroup/cpuset-v1.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
+#include "cgroup-internal.h"
#include "cpuset-internal.h"
/*
@@ -175,6 +176,7 @@ static int cpuset_write_s64(struct cgroup_subsys_state *css, struct cftype *cft,
switch (type) {
case FILE_SCHED_RELAX_DOMAIN_LEVEL:
+ pr_info_once("cpuset.%s is deprecated\n", cft->name);
retval = update_relax_domain_level(cs, val);
break;
default:
@@ -373,6 +375,46 @@ out:
return ret;
}
+#ifdef CONFIG_PROC_PID_CPUSET
+/*
+ * proc_cpuset_show()
+ * - Print tasks cpuset path into seq_file.
+ * - Used for /proc/<pid>/cpuset.
+ */
+int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
+ struct pid *pid, struct task_struct *tsk)
+{
+ char *buf;
+ struct cgroup_subsys_state *css;
+ int retval;
+
+ retval = -ENOMEM;
+ buf = kmalloc(PATH_MAX, GFP_KERNEL);
+ if (!buf)
+ goto out;
+
+ rcu_read_lock();
+ spin_lock_irq(&css_set_lock);
+ css = task_css(tsk, cpuset_cgrp_id);
+ retval = cgroup_path_ns_locked(css->cgroup, buf, PATH_MAX,
+ current->nsproxy->cgroup_ns);
+ spin_unlock_irq(&css_set_lock);
+ rcu_read_unlock();
+
+ if (retval == -E2BIG)
+ retval = -ENAMETOOLONG;
+ if (retval < 0)
+ goto out_free;
+ seq_puts(m, buf);
+ seq_putc(m, '\n');
+ retval = 0;
+out_free:
+ kfree(buf);
+out:
+ return retval;
+}
+#endif /* CONFIG_PROC_PID_CPUSET */
+
static u64 cpuset_read_u64(struct cgroup_subsys_state *css, struct cftype *cft)
{
struct cpuset *cs = css_cs(css);
@@ -424,24 +466,31 @@ static int cpuset_write_u64(struct cgroup_subsys_state *css, struct cftype *cft,
retval = cpuset_update_flag(CS_CPU_EXCLUSIVE, cs, val);
break;
case FILE_MEM_EXCLUSIVE:
+ pr_info_once("cpuset.%s is deprecated\n", cft->name);
retval = cpuset_update_flag(CS_MEM_EXCLUSIVE, cs, val);
break;
case FILE_MEM_HARDWALL:
+ pr_info_once("cpuset.%s is deprecated\n", cft->name);
retval = cpuset_update_flag(CS_MEM_HARDWALL, cs, val);
break;
case FILE_SCHED_LOAD_BALANCE:
+ pr_info_once("cpuset.%s is deprecated, use cpuset.cpus.partition instead\n", cft->name);
retval = cpuset_update_flag(CS_SCHED_LOAD_BALANCE, cs, val);
break;
case FILE_MEMORY_MIGRATE:
+ pr_info_once("cpuset.%s is deprecated\n", cft->name);
retval = cpuset_update_flag(CS_MEMORY_MIGRATE, cs, val);
break;
case FILE_MEMORY_PRESSURE_ENABLED:
+ pr_info_once("cpuset.%s is deprecated, use memory.pressure with CONFIG_PSI instead\n", cft->name);
cpuset_memory_pressure_enabled = !!val;
break;
case FILE_SPREAD_PAGE:
+ pr_info_once("cpuset.%s is deprecated\n", cft->name);
retval = cpuset_update_flag(CS_SPREAD_PAGE, cs, val);
break;
case FILE_SPREAD_SLAB:
+ pr_warn_once("cpuset.%s is deprecated\n", cft->name);
retval = cpuset_update_flag(CS_SPREAD_SLAB, cs, val);
break;
default:
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index 0f910c828973..5a637292faa2 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -21,7 +21,6 @@
* License. See the file COPYING in the main directory of the Linux
* distribution for more details.
*/
-#include "cgroup-internal.h"
#include "cpuset-internal.h"
#include <linux/init.h>
@@ -4244,50 +4243,6 @@ void cpuset_print_current_mems_allowed(void)
rcu_read_unlock();
}
-#ifdef CONFIG_PROC_PID_CPUSET
-/*
- * proc_cpuset_show()
- * - Print tasks cpuset path into seq_file.
- * - Used for /proc/<pid>/cpuset.
- * - No need to task_lock(tsk) on this tsk->cpuset reference, as it
- * doesn't really matter if tsk->cpuset changes after we read it,
- * and we take cpuset_mutex, keeping cpuset_attach() from changing it
- * anyway.
- */
-int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
- struct pid *pid, struct task_struct *tsk)
-{
- char *buf;
- struct cgroup_subsys_state *css;
- int retval;
-
- retval = -ENOMEM;
- buf = kmalloc(PATH_MAX, GFP_KERNEL);
- if (!buf)
- goto out;
-
- rcu_read_lock();
- spin_lock_irq(&css_set_lock);
- css = task_css(tsk, cpuset_cgrp_id);
- retval = cgroup_path_ns_locked(css->cgroup, buf, PATH_MAX,
- current->nsproxy->cgroup_ns);
- spin_unlock_irq(&css_set_lock);
- rcu_read_unlock();
-
- if (retval == -E2BIG)
- retval = -ENAMETOOLONG;
- if (retval < 0)
- goto out_free;
- seq_puts(m, buf);
- seq_putc(m, '\n');
- retval = 0;
-out_free:
- kfree(buf);
-out:
- return retval;
-}
-#endif /* CONFIG_PROC_PID_CPUSET */
-
/* Display task mems_allowed in /proc/<pid>/status file. */
void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task)
{
diff --git a/kernel/cgroup/legacy_freezer.c b/kernel/cgroup/legacy_freezer.c
index 074653f964c1..039d1eb2f215 100644
--- a/kernel/cgroup/legacy_freezer.c
+++ b/kernel/cgroup/legacy_freezer.c
@@ -430,9 +430,11 @@ static ssize_t freezer_write(struct kernfs_open_file *of,
if (strcmp(buf, freezer_state_strs(0)) == 0)
freeze = false;
- else if (strcmp(buf, freezer_state_strs(CGROUP_FROZEN)) == 0)
+ else if (strcmp(buf, freezer_state_strs(CGROUP_FROZEN)) == 0) {
+ pr_info_once("Freezing with imperfect legacy cgroup freezer. "
+ "See cgroup.freeze of cgroup v2\n");
freeze = true;
- else
+ } else
return -EINVAL;
freezer_change_state(css_freezer(of_css(of)), freeze);
diff --git a/kernel/cgroup/misc.c b/kernel/cgroup/misc.c
index 0e26068995a6..2fa3a4fb2aaf 100644
--- a/kernel/cgroup/misc.c
+++ b/kernel/cgroup/misc.c
@@ -68,22 +68,6 @@ static inline bool valid_type(enum misc_res_type type)
}
/**
- * misc_cg_res_total_usage() - Get the current total usage of the resource.
- * @type: misc res type.
- *
- * Context: Any context.
- * Return: Current total usage of the resource.
- */
-u64 misc_cg_res_total_usage(enum misc_res_type type)
-{
- if (valid_type(type))
- return atomic64_read(&root_cg.res[type].usage);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(misc_cg_res_total_usage);
-
-/**
* misc_cg_set_capacity() - Set the capacity of the misc cgroup res.
* @type: Type of the misc res.
* @capacity: Supported capacity of the misc res on the host.
diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c
index aac91466279f..4bb587d5d34f 100644
--- a/kernel/cgroup/rstat.c
+++ b/kernel/cgroup/rstat.c
@@ -299,40 +299,6 @@ static inline void __cgroup_rstat_unlock(struct cgroup *cgrp, int cpu_in_loop)
spin_unlock_irq(&cgroup_rstat_lock);
}
-/* see cgroup_rstat_flush() */
-static void cgroup_rstat_flush_locked(struct cgroup *cgrp)
- __releases(&cgroup_rstat_lock) __acquires(&cgroup_rstat_lock)
-{
- int cpu;
-
- lockdep_assert_held(&cgroup_rstat_lock);
-
- for_each_possible_cpu(cpu) {
- struct cgroup *pos = cgroup_rstat_updated_list(cgrp, cpu);
-
- for (; pos; pos = pos->rstat_flush_next) {
- struct cgroup_subsys_state *css;
-
- cgroup_base_stat_flush(pos, cpu);
- bpf_rstat_flush(pos, cgroup_parent(pos), cpu);
-
- rcu_read_lock();
- list_for_each_entry_rcu(css, &pos->rstat_css_list,
- rstat_css_node)
- css->ss->css_rstat_flush(css, cpu);
- rcu_read_unlock();
- }
-
- /* play nice and yield if necessary */
- if (need_resched() || spin_needbreak(&cgroup_rstat_lock)) {
- __cgroup_rstat_unlock(cgrp, cpu);
- if (!cond_resched())
- cpu_relax();
- __cgroup_rstat_lock(cgrp, cpu);
- }
- }
-}
-
/**
* cgroup_rstat_flush - flush stats in @cgrp's subtree
* @cgrp: target cgroup
@@ -348,38 +314,30 @@ static void cgroup_rstat_flush_locked(struct cgroup *cgrp)
*/
__bpf_kfunc void cgroup_rstat_flush(struct cgroup *cgrp)
{
+ int cpu;
+
might_sleep();
+ for_each_possible_cpu(cpu) {
+ struct cgroup *pos = cgroup_rstat_updated_list(cgrp, cpu);
- __cgroup_rstat_lock(cgrp, -1);
- cgroup_rstat_flush_locked(cgrp);
- __cgroup_rstat_unlock(cgrp, -1);
-}
+ /* Reacquire for each CPU to avoid disabling IRQs too long */
+ __cgroup_rstat_lock(cgrp, cpu);
+ for (; pos; pos = pos->rstat_flush_next) {
+ struct cgroup_subsys_state *css;
-/**
- * cgroup_rstat_flush_hold - flush stats in @cgrp's subtree and hold
- * @cgrp: target cgroup
- *
- * Flush stats in @cgrp's subtree and prevent further flushes. Must be
- * paired with cgroup_rstat_flush_release().
- *
- * This function may block.
- */
-void cgroup_rstat_flush_hold(struct cgroup *cgrp)
- __acquires(&cgroup_rstat_lock)
-{
- might_sleep();
- __cgroup_rstat_lock(cgrp, -1);
- cgroup_rstat_flush_locked(cgrp);
-}
+ cgroup_base_stat_flush(pos, cpu);
+ bpf_rstat_flush(pos, cgroup_parent(pos), cpu);
-/**
- * cgroup_rstat_flush_release - release cgroup_rstat_flush_hold()
- * @cgrp: cgroup used by tracepoint
- */
-void cgroup_rstat_flush_release(struct cgroup *cgrp)
- __releases(&cgroup_rstat_lock)
-{
- __cgroup_rstat_unlock(cgrp, -1);
+ rcu_read_lock();
+ list_for_each_entry_rcu(css, &pos->rstat_css_list,
+ rstat_css_node)
+ css->ss->css_rstat_flush(css, cpu);
+ rcu_read_unlock();
+ }
+ __cgroup_rstat_unlock(cgrp, cpu);
+ if (!cond_resched())
+ cpu_relax();
+ }
}
int cgroup_rstat_init(struct cgroup *cgrp)
@@ -612,36 +570,34 @@ static void cgroup_force_idle_show(struct seq_file *seq, struct cgroup_base_stat
void cgroup_base_stat_cputime_show(struct seq_file *seq)
{
struct cgroup *cgrp = seq_css(seq)->cgroup;
- u64 usage, utime, stime, ntime;
+ struct cgroup_base_stat bstat;
if (cgroup_parent(cgrp)) {
- cgroup_rstat_flush_hold(cgrp);
- usage = cgrp->bstat.cputime.sum_exec_runtime;
+ cgroup_rstat_flush(cgrp);
+ __cgroup_rstat_lock(cgrp, -1);
+ bstat = cgrp->bstat;
cputime_adjust(&cgrp->bstat.cputime, &cgrp->prev_cputime,
- &utime, &stime);
- ntime = cgrp->bstat.ntime;
- cgroup_rstat_flush_release(cgrp);
+ &bstat.cputime.utime, &bstat.cputime.stime);
+ __cgroup_rstat_unlock(cgrp, -1);
} else {
- /* cgrp->bstat of root is not actually used, reuse it */
- root_cgroup_cputime(&cgrp->bstat);
- usage = cgrp->bstat.cputime.sum_exec_runtime;
- utime = cgrp->bstat.cputime.utime;
- stime = cgrp->bstat.cputime.stime;
- ntime = cgrp->bstat.ntime;
+ root_cgroup_cputime(&bstat);
}
- do_div(usage, NSEC_PER_USEC);
- do_div(utime, NSEC_PER_USEC);
- do_div(stime, NSEC_PER_USEC);
- do_div(ntime, NSEC_PER_USEC);
+ do_div(bstat.cputime.sum_exec_runtime, NSEC_PER_USEC);
+ do_div(bstat.cputime.utime, NSEC_PER_USEC);
+ do_div(bstat.cputime.stime, NSEC_PER_USEC);
+ do_div(bstat.ntime, NSEC_PER_USEC);
seq_printf(seq, "usage_usec %llu\n"
"user_usec %llu\n"
"system_usec %llu\n"
"nice_usec %llu\n",
- usage, utime, stime, ntime);
+ bstat.cputime.sum_exec_runtime,
+ bstat.cputime.utime,
+ bstat.cputime.stime,
+ bstat.ntime);
- cgroup_force_idle_show(seq, &cgrp->bstat);
+ cgroup_force_idle_show(seq, &bstat);
}
/* Add bpf kfuncs for cgroup_rstat_updated() and cgroup_rstat_flush() */
diff --git a/kernel/configs/hardening.config b/kernel/configs/hardening.config
index 3fabb8f55ef6..dd7c32fb5ac1 100644
--- a/kernel/configs/hardening.config
+++ b/kernel/configs/hardening.config
@@ -46,7 +46,7 @@ CONFIG_UBSAN_BOUNDS=y
# CONFIG_UBSAN_SHIFT is not set
# CONFIG_UBSAN_DIV_ZERO is not set
# CONFIG_UBSAN_UNREACHABLE is not set
-# CONFIG_UBSAN_SIGNED_WRAP is not set
+# CONFIG_UBSAN_INTEGER_WRAP is not set
# CONFIG_UBSAN_BOOL is not set
# CONFIG_UBSAN_ENUM is not set
# CONFIG_UBSAN_ALIGNMENT is not set
diff --git a/kernel/crash_core.c b/kernel/crash_core.c
index 078fe5bc5a74..335b8425dd4b 100644
--- a/kernel/crash_core.c
+++ b/kernel/crash_core.c
@@ -436,7 +436,7 @@ void crash_save_cpu(struct pt_regs *regs, int cpu)
memset(&prstatus, 0, sizeof(prstatus));
prstatus.common.pr_pid = current->pid;
elf_core_copy_regs(&prstatus.pr_reg, regs);
- buf = append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS,
+ buf = append_elf_note(buf, NN_PRSTATUS, NT_PRSTATUS,
&prstatus, sizeof(prstatus));
final_note(buf);
}
diff --git a/kernel/entry/common.c b/kernel/entry/common.c
index e33691d5adf7..20154572ede9 100644
--- a/kernel/entry/common.c
+++ b/kernel/entry/common.c
@@ -49,7 +49,7 @@ long syscall_trace_enter(struct pt_regs *regs, long syscall,
/* Do seccomp after ptrace, to catch any tracer changes. */
if (work & SYSCALL_WORK_SECCOMP) {
- ret = __secure_computing(NULL);
+ ret = __secure_computing();
if (ret == -1L)
return ret;
}
diff --git a/kernel/exit.c b/kernel/exit.c
index 3485e5fc499e..c2e6c7b7779f 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -69,6 +69,7 @@
#include <linux/sysfs.h>
#include <linux/user_events.h>
#include <linux/uaccess.h>
+#include <linux/pidfs.h>
#include <uapi/linux/wait.h>
@@ -122,14 +123,22 @@ static __init int kernel_exit_sysfs_init(void)
late_initcall(kernel_exit_sysfs_init);
#endif
-static void __unhash_process(struct task_struct *p, bool group_dead)
+/*
+ * For things release_task() would like to do *after* tasklist_lock is released.
+ */
+struct release_task_post {
+ struct pid *pids[PIDTYPE_MAX];
+};
+
+static void __unhash_process(struct release_task_post *post, struct task_struct *p,
+ bool group_dead)
{
nr_threads--;
- detach_pid(p, PIDTYPE_PID);
+ detach_pid(post->pids, p, PIDTYPE_PID);
if (group_dead) {
- detach_pid(p, PIDTYPE_TGID);
- detach_pid(p, PIDTYPE_PGID);
- detach_pid(p, PIDTYPE_SID);
+ detach_pid(post->pids, p, PIDTYPE_TGID);
+ detach_pid(post->pids, p, PIDTYPE_PGID);
+ detach_pid(post->pids, p, PIDTYPE_SID);
list_del_rcu(&p->tasks);
list_del_init(&p->sibling);
@@ -141,7 +150,7 @@ static void __unhash_process(struct task_struct *p, bool group_dead)
/*
* This function expects the tasklist_lock write-locked.
*/
-static void __exit_signal(struct task_struct *tsk)
+static void __exit_signal(struct release_task_post *post, struct task_struct *tsk)
{
struct signal_struct *sig = tsk->signal;
bool group_dead = thread_group_leader(tsk);
@@ -174,9 +183,6 @@ static void __exit_signal(struct task_struct *tsk)
sig->curr_target = next_thread(tsk);
}
- add_device_randomness((const void*) &tsk->se.sum_exec_runtime,
- sizeof(unsigned long long));
-
/*
* Accumulate here the counters for all threads as they die. We could
* skip the group leader because it is the last user of signal_struct,
@@ -197,23 +203,15 @@ static void __exit_signal(struct task_struct *tsk)
task_io_accounting_add(&sig->ioac, &tsk->ioac);
sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
sig->nr_threads--;
- __unhash_process(tsk, group_dead);
+ __unhash_process(post, tsk, group_dead);
write_sequnlock(&sig->stats_lock);
- /*
- * Do this under ->siglock, we can race with another thread
- * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals.
- */
- flush_sigqueue(&tsk->pending);
tsk->sighand = NULL;
spin_unlock(&sighand->siglock);
__cleanup_sighand(sighand);
- clear_tsk_thread_flag(tsk, TIF_SIGPENDING);
- if (group_dead) {
- flush_sigqueue(&sig->shared_pending);
+ if (group_dead)
tty_kref_put(tty);
- }
}
static void delayed_put_task_struct(struct rcu_head *rhp)
@@ -239,22 +237,27 @@ void __weak release_thread(struct task_struct *dead_task)
void release_task(struct task_struct *p)
{
+ struct release_task_post post;
struct task_struct *leader;
struct pid *thread_pid;
int zap_leader;
repeat:
+ memset(&post, 0, sizeof(post));
+
/* don't need to get the RCU readlock here - the process is dead and
* can't be modifying its own credentials. But shut RCU-lockdep up */
rcu_read_lock();
dec_rlimit_ucounts(task_ucounts(p), UCOUNT_RLIMIT_NPROC, 1);
rcu_read_unlock();
+ pidfs_exit(p);
cgroup_release(p);
+ thread_pid = get_pid(p->thread_pid);
+
write_lock_irq(&tasklist_lock);
ptrace_release_task(p);
- thread_pid = get_pid(p->thread_pid);
- __exit_signal(p);
+ __exit_signal(&post, p);
/*
* If we are the last non-leader member of the thread
@@ -278,7 +281,20 @@ repeat:
write_unlock_irq(&tasklist_lock);
proc_flush_pid(thread_pid);
put_pid(thread_pid);
+ add_device_randomness(&p->se.sum_exec_runtime,
+ sizeof(p->se.sum_exec_runtime));
+ free_pids(post.pids);
release_thread(p);
+ /*
+ * This task was already removed from the process/thread/pid lists
+ * and lock_task_sighand(p) can't succeed. Nobody else can touch
+ * ->pending or, if group dead, signal->shared_pending. We can call
+ * flush_sigqueue() lockless.
+ */
+ flush_sigqueue(&p->pending);
+ if (thread_group_leader(p))
+ flush_sigqueue(&p->signal->shared_pending);
+
put_task_struct_rcu_user(p);
p = leader;
@@ -741,10 +757,10 @@ static void exit_notify(struct task_struct *tsk, int group_dead)
tsk->exit_state = EXIT_ZOMBIE;
/*
- * sub-thread or delay_group_leader(), wake up the
- * PIDFD_THREAD waiters.
+ * Ignore thread-group leaders that exited before all
+ * subthreads did.
*/
- if (!thread_group_empty(tsk))
+ if (!delay_group_leader(tsk))
do_notify_pidfd(tsk);
if (unlikely(tsk->ptrace)) {
diff --git a/kernel/fork.c b/kernel/fork.c
index 735405a9c5f3..f11ac96b7587 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -2032,25 +2032,18 @@ static inline void rcu_copy_process(struct task_struct *p)
*/
static int __pidfd_prepare(struct pid *pid, unsigned int flags, struct file **ret)
{
- int pidfd;
struct file *pidfd_file;
- pidfd = get_unused_fd_flags(O_CLOEXEC);
+ CLASS(get_unused_fd, pidfd)(O_CLOEXEC);
if (pidfd < 0)
return pidfd;
pidfd_file = pidfs_alloc_file(pid, flags | O_RDWR);
- if (IS_ERR(pidfd_file)) {
- put_unused_fd(pidfd);
+ if (IS_ERR(pidfd_file))
return PTR_ERR(pidfd_file);
- }
- /*
- * anon_inode_getfile() ignores everything outside of the
- * O_ACCMODE | O_NONBLOCK mask, set PIDFD_THREAD manually.
- */
- pidfd_file->f_flags |= (flags & PIDFD_THREAD);
+
*ret = pidfd_file;
- return pidfd;
+ return take_fd(pidfd);
}
/**
@@ -2432,8 +2425,11 @@ __latent_entropy struct task_struct *copy_process(
if (clone_flags & CLONE_PIDFD) {
int flags = (clone_flags & CLONE_THREAD) ? PIDFD_THREAD : 0;
- /* Note that no task has been attached to @pid yet. */
- retval = __pidfd_prepare(pid, flags, &pidfile);
+ /*
+ * Note that no task has been attached to @pid yet indicate
+ * that via CLONE_PIDFD.
+ */
+ retval = __pidfd_prepare(pid, flags | PIDFD_CLONE, &pidfile);
if (retval < 0)
goto bad_fork_free_pid;
pidfd = retval;
diff --git a/kernel/kcmp.c b/kernel/kcmp.c
index 2c596851f8a9..7c1a65bd5f8d 100644
--- a/kernel/kcmp.c
+++ b/kernel/kcmp.c
@@ -145,7 +145,7 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
*/
task1 = find_task_by_vpid(pid1);
task2 = find_task_by_vpid(pid2);
- if (!task1 || !task2)
+ if (unlikely(!task1 || !task2))
goto err_no_task;
get_task_struct(task1);
diff --git a/kernel/pid.c b/kernel/pid.c
index 924084713be8..4ac2ce46817f 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -88,20 +88,6 @@ struct pid_namespace init_pid_ns = {
};
EXPORT_SYMBOL_GPL(init_pid_ns);
-/*
- * Note: disable interrupts while the pidmap_lock is held as an
- * interrupt might come in and do read_lock(&tasklist_lock).
- *
- * If we don't disable interrupts there is a nasty deadlock between
- * detach_pid()->free_pid() and another cpu that does
- * spin_lock(&pidmap_lock) followed by an interrupt routine that does
- * read_lock(&tasklist_lock);
- *
- * After we clean up the tasklist_lock and know there are no
- * irq handlers that take it we can leave the interrupts enabled.
- * For now it is easier to be safe than to prove it can't happen.
- */
-
static __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock);
seqcount_spinlock_t pidmap_lock_seq = SEQCNT_SPINLOCK_ZERO(pidmap_lock_seq, &pidmap_lock);
@@ -128,11 +114,11 @@ static void delayed_put_pid(struct rcu_head *rhp)
void free_pid(struct pid *pid)
{
- /* We can be called with write_lock_irq(&tasklist_lock) held */
int i;
- unsigned long flags;
- spin_lock_irqsave(&pidmap_lock, flags);
+ lockdep_assert_not_held(&tasklist_lock);
+
+ spin_lock(&pidmap_lock);
for (i = 0; i <= pid->level; i++) {
struct upid *upid = pid->numbers + i;
struct pid_namespace *ns = upid->ns;
@@ -155,11 +141,23 @@ void free_pid(struct pid *pid)
idr_remove(&ns->idr, upid->nr);
}
pidfs_remove_pid(pid);
- spin_unlock_irqrestore(&pidmap_lock, flags);
+ spin_unlock(&pidmap_lock);
call_rcu(&pid->rcu, delayed_put_pid);
}
+void free_pids(struct pid **pids)
+{
+ int tmp;
+
+ /*
+ * This can batch pidmap_lock.
+ */
+ for (tmp = PIDTYPE_MAX; --tmp >= 0; )
+ if (pids[tmp])
+ free_pid(pids[tmp]);
+}
+
struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid,
size_t set_tid_size)
{
@@ -211,7 +209,7 @@ struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid,
}
idr_preload(GFP_KERNEL);
- spin_lock_irq(&pidmap_lock);
+ spin_lock(&pidmap_lock);
if (tid) {
nr = idr_alloc(&tmp->idr, NULL, tid,
@@ -238,7 +236,7 @@ struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid,
nr = idr_alloc_cyclic(&tmp->idr, NULL, pid_min,
pid_max, GFP_ATOMIC);
}
- spin_unlock_irq(&pidmap_lock);
+ spin_unlock(&pidmap_lock);
idr_preload_end();
if (nr < 0) {
@@ -272,7 +270,7 @@ struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid,
upid = pid->numbers + ns->level;
idr_preload(GFP_KERNEL);
- spin_lock_irq(&pidmap_lock);
+ spin_lock(&pidmap_lock);
if (!(ns->pid_allocated & PIDNS_ADDING))
goto out_unlock;
pidfs_add_pid(pid);
@@ -281,18 +279,18 @@ struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid,
idr_replace(&upid->ns->idr, pid, upid->nr);
upid->ns->pid_allocated++;
}
- spin_unlock_irq(&pidmap_lock);
+ spin_unlock(&pidmap_lock);
idr_preload_end();
return pid;
out_unlock:
- spin_unlock_irq(&pidmap_lock);
+ spin_unlock(&pidmap_lock);
idr_preload_end();
put_pid_ns(ns);
out_free:
- spin_lock_irq(&pidmap_lock);
+ spin_lock(&pidmap_lock);
while (++i <= ns->level) {
upid = pid->numbers + i;
idr_remove(&upid->ns->idr, upid->nr);
@@ -302,7 +300,7 @@ out_free:
if (ns->pid_allocated == PIDNS_ADDING)
idr_set_cursor(&ns->idr, 0);
- spin_unlock_irq(&pidmap_lock);
+ spin_unlock(&pidmap_lock);
kmem_cache_free(ns->pid_cachep, pid);
return ERR_PTR(retval);
@@ -310,9 +308,9 @@ out_free:
void disable_pid_allocation(struct pid_namespace *ns)
{
- spin_lock_irq(&pidmap_lock);
+ spin_lock(&pidmap_lock);
ns->pid_allocated &= ~PIDNS_ADDING;
- spin_unlock_irq(&pidmap_lock);
+ spin_unlock(&pidmap_lock);
}
struct pid *find_pid_ns(int nr, struct pid_namespace *ns)
@@ -339,17 +337,23 @@ static struct pid **task_pid_ptr(struct task_struct *task, enum pid_type type)
*/
void attach_pid(struct task_struct *task, enum pid_type type)
{
- struct pid *pid = *task_pid_ptr(task, type);
+ struct pid *pid;
+
+ lockdep_assert_held_write(&tasklist_lock);
+
+ pid = *task_pid_ptr(task, type);
hlist_add_head_rcu(&task->pid_links[type], &pid->tasks[type]);
}
-static void __change_pid(struct task_struct *task, enum pid_type type,
- struct pid *new)
+static void __change_pid(struct pid **pids, struct task_struct *task,
+ enum pid_type type, struct pid *new)
{
- struct pid **pid_ptr = task_pid_ptr(task, type);
- struct pid *pid;
+ struct pid **pid_ptr, *pid;
int tmp;
+ lockdep_assert_held_write(&tasklist_lock);
+
+ pid_ptr = task_pid_ptr(task, type);
pid = *pid_ptr;
hlist_del_rcu(&task->pid_links[type]);
@@ -364,18 +368,19 @@ static void __change_pid(struct task_struct *task, enum pid_type type,
if (pid_has_task(pid, tmp))
return;
- free_pid(pid);
+ WARN_ON(pids[type]);
+ pids[type] = pid;
}
-void detach_pid(struct task_struct *task, enum pid_type type)
+void detach_pid(struct pid **pids, struct task_struct *task, enum pid_type type)
{
- __change_pid(task, type, NULL);
+ __change_pid(pids, task, type, NULL);
}
-void change_pid(struct task_struct *task, enum pid_type type,
+void change_pid(struct pid **pids, struct task_struct *task, enum pid_type type,
struct pid *pid)
{
- __change_pid(task, type, pid);
+ __change_pid(pids, task, type, pid);
attach_pid(task, type);
}
@@ -386,6 +391,8 @@ void exchange_tids(struct task_struct *left, struct task_struct *right)
struct hlist_head *head1 = &pid1->tasks[PIDTYPE_PID];
struct hlist_head *head2 = &pid2->tasks[PIDTYPE_PID];
+ lockdep_assert_held_write(&tasklist_lock);
+
/* Swap the single entry tid lists */
hlists_swap_heads_rcu(head1, head2);
@@ -403,6 +410,7 @@ void transfer_pid(struct task_struct *old, struct task_struct *new,
enum pid_type type)
{
WARN_ON_ONCE(type == PIDTYPE_PID);
+ lockdep_assert_held_write(&tasklist_lock);
hlist_replace_rcu(&old->pid_links[type], &new->pid_links[type]);
}
@@ -564,15 +572,29 @@ struct pid *pidfd_get_pid(unsigned int fd, unsigned int *flags)
*/
struct task_struct *pidfd_get_task(int pidfd, unsigned int *flags)
{
- unsigned int f_flags;
+ unsigned int f_flags = 0;
struct pid *pid;
struct task_struct *task;
+ enum pid_type type;
- pid = pidfd_get_pid(pidfd, &f_flags);
- if (IS_ERR(pid))
- return ERR_CAST(pid);
+ switch (pidfd) {
+ case PIDFD_SELF_THREAD:
+ type = PIDTYPE_PID;
+ pid = get_task_pid(current, type);
+ break;
+ case PIDFD_SELF_THREAD_GROUP:
+ type = PIDTYPE_TGID;
+ pid = get_task_pid(current, type);
+ break;
+ default:
+ pid = pidfd_get_pid(pidfd, &f_flags);
+ if (IS_ERR(pid))
+ return ERR_CAST(pid);
+ type = PIDTYPE_TGID;
+ break;
+ }
- task = get_pid_task(pid, PIDTYPE_TGID);
+ task = get_pid_task(pid, type);
put_pid(pid);
if (!task)
return ERR_PTR(-ESRCH);
diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
index 4b3f31911465..7a34a99d4664 100644
--- a/kernel/rcu/tiny.c
+++ b/kernel/rcu/tiny.c
@@ -85,15 +85,8 @@ void rcu_sched_clock_irq(int user)
static inline bool rcu_reclaim_tiny(struct rcu_head *head)
{
rcu_callback_t f;
- unsigned long offset = (unsigned long)head->func;
rcu_lock_acquire(&rcu_callback_map);
- if (__is_kvfree_rcu_offset(offset)) {
- trace_rcu_invoke_kvfree_callback("", head, offset);
- kvfree((void *)head - offset);
- rcu_lock_release(&rcu_callback_map);
- return true;
- }
trace_rcu_invoke_callback("", head);
f = head->func;
@@ -159,10 +152,6 @@ void synchronize_rcu(void)
}
EXPORT_SYMBOL_GPL(synchronize_rcu);
-static void tiny_rcu_leak_callback(struct rcu_head *rhp)
-{
-}
-
/*
* Post an RCU callback to be invoked after the end of an RCU grace
* period. But since we have but one CPU, that would be after any
@@ -178,9 +167,6 @@ void call_rcu(struct rcu_head *head, rcu_callback_t func)
pr_err("%s(): Double-freed CB %p->%pS()!!! ", __func__, head, head->func);
mem_dump_obj(head);
}
-
- if (!__is_kvfree_rcu_offset((unsigned long)head->func))
- WRITE_ONCE(head->func, tiny_rcu_leak_callback);
return;
}
@@ -246,17 +232,6 @@ bool poll_state_synchronize_rcu(unsigned long oldstate)
}
EXPORT_SYMBOL_GPL(poll_state_synchronize_rcu);
-#ifdef CONFIG_KASAN_GENERIC
-void kvfree_call_rcu(struct rcu_head *head, void *ptr)
-{
- if (head)
- kasan_record_aux_stack(ptr);
-
- __kvfree_call_rcu(head, ptr);
-}
-EXPORT_SYMBOL_GPL(kvfree_call_rcu);
-#endif
-
void __init rcu_init(void)
{
open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 475f31deed14..5dbc4189037c 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -2931,13 +2931,8 @@ static int __init rcu_spawn_core_kthreads(void)
static void rcutree_enqueue(struct rcu_data *rdp, struct rcu_head *head, rcu_callback_t func)
{
rcu_segcblist_enqueue(&rdp->cblist, head);
- if (__is_kvfree_rcu_offset((unsigned long)func))
- trace_rcu_kvfree_callback(rcu_state.name, head,
- (unsigned long)func,
- rcu_segcblist_n_cbs(&rdp->cblist));
- else
- trace_rcu_callback(rcu_state.name, head,
- rcu_segcblist_n_cbs(&rdp->cblist));
+ trace_rcu_callback(rcu_state.name, head,
+ rcu_segcblist_n_cbs(&rdp->cblist));
trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCBQueued"));
}
diff --git a/kernel/sched/build_policy.c b/kernel/sched/build_policy.c
index fae1f5c921eb..72d97aa8b726 100644
--- a/kernel/sched/build_policy.c
+++ b/kernel/sched/build_policy.c
@@ -61,6 +61,7 @@
#ifdef CONFIG_SCHED_CLASS_EXT
# include "ext.c"
+# include "ext_idle.c"
#endif
#include "syscalls.c"
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 67189907214d..f6840c33a296 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3922,13 +3922,8 @@ bool cpus_share_resources(int this_cpu, int that_cpu)
static inline bool ttwu_queue_cond(struct task_struct *p, int cpu)
{
- /*
- * The BPF scheduler may depend on select_task_rq() being invoked during
- * wakeups. In addition, @p may end up executing on a different CPU
- * regardless of what happens in the wakeup path making the ttwu_queue
- * optimization less meaningful. Skip if on SCX.
- */
- if (task_on_scx(p))
+ /* See SCX_OPS_ALLOW_QUEUED_WAKEUP. */
+ if (!scx_allow_ttwu_queue(p))
return false;
/*
@@ -9016,7 +9011,7 @@ void sched_release_group(struct task_group *tg)
spin_unlock_irqrestore(&task_group_lock, flags);
}
-static struct task_group *sched_get_task_group(struct task_struct *tsk)
+static void sched_change_group(struct task_struct *tsk)
{
struct task_group *tg;
@@ -9028,13 +9023,7 @@ static struct task_group *sched_get_task_group(struct task_struct *tsk)
tg = container_of(task_css_check(tsk, cpu_cgrp_id, true),
struct task_group, css);
tg = autogroup_task_group(tsk, tg);
-
- return tg;
-}
-
-static void sched_change_group(struct task_struct *tsk, struct task_group *group)
-{
- tsk->sched_task_group = group;
+ tsk->sched_task_group = tg;
#ifdef CONFIG_FAIR_GROUP_SCHED
if (tsk->sched_class->task_change_group)
@@ -9055,20 +9044,11 @@ void sched_move_task(struct task_struct *tsk, bool for_autogroup)
{
int queued, running, queue_flags =
DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
- struct task_group *group;
struct rq *rq;
CLASS(task_rq_lock, rq_guard)(tsk);
rq = rq_guard.rq;
- /*
- * Esp. with SCHED_AUTOGROUP enabled it is possible to get superfluous
- * group changes.
- */
- group = sched_get_task_group(tsk);
- if (group == tsk->sched_task_group)
- return;
-
update_rq_clock(rq);
running = task_current_donor(rq, tsk);
@@ -9079,7 +9059,7 @@ void sched_move_task(struct task_struct *tsk, bool for_autogroup)
if (running)
put_prev_task(rq, tsk);
- sched_change_group(tsk, group);
+ sched_change_group(tsk);
if (!for_autogroup)
scx_cgroup_move_task(tsk);
diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
index 7b9dfee858e7..06561d6717c9 100644
--- a/kernel/sched/ext.c
+++ b/kernel/sched/ext.c
@@ -6,6 +6,9 @@
* Copyright (c) 2022 Tejun Heo <tj@kernel.org>
* Copyright (c) 2022 David Vernet <dvernet@meta.com>
*/
+#include <linux/btf_ids.h>
+#include "ext_idle.h"
+
#define SCX_OP_IDX(op) (offsetof(struct sched_ext_ops, op) / sizeof(void (*)(void)))
enum scx_consts {
@@ -93,7 +96,7 @@ enum scx_ops_flags {
/*
* Keep built-in idle tracking even if ops.update_idle() is implemented.
*/
- SCX_OPS_KEEP_BUILTIN_IDLE = 1LLU << 0,
+ SCX_OPS_KEEP_BUILTIN_IDLE = 1LLU << 0,
/*
* By default, if there are no other task to run on the CPU, ext core
@@ -101,7 +104,7 @@ enum scx_ops_flags {
* flag is specified, such tasks are passed to ops.enqueue() with
* %SCX_ENQ_LAST. See the comment above %SCX_ENQ_LAST for more info.
*/
- SCX_OPS_ENQ_LAST = 1LLU << 1,
+ SCX_OPS_ENQ_LAST = 1LLU << 1,
/*
* An exiting task may schedule after PF_EXITING is set. In such cases,
@@ -114,13 +117,13 @@ enum scx_ops_flags {
* depend on pid lookups and wants to handle these tasks directly, the
* following flag can be used.
*/
- SCX_OPS_ENQ_EXITING = 1LLU << 2,
+ SCX_OPS_ENQ_EXITING = 1LLU << 2,
/*
* If set, only tasks with policy set to SCHED_EXT are attached to
* sched_ext. If clear, SCHED_NORMAL tasks are also included.
*/
- SCX_OPS_SWITCH_PARTIAL = 1LLU << 3,
+ SCX_OPS_SWITCH_PARTIAL = 1LLU << 3,
/*
* A migration disabled task can only execute on its current CPU. By
@@ -133,7 +136,29 @@ enum scx_ops_flags {
* current CPU while p->nr_cpus_allowed keeps tracking p->user_cpus_ptr
* and thus may disagree with cpumask_weight(p->cpus_ptr).
*/
- SCX_OPS_ENQ_MIGRATION_DISABLED = 1LLU << 4,
+ SCX_OPS_ENQ_MIGRATION_DISABLED = 1LLU << 4,
+
+ /*
+ * Queued wakeup (ttwu_queue) is a wakeup optimization that invokes
+ * ops.enqueue() on the ops.select_cpu() selected or the wakee's
+ * previous CPU via IPI (inter-processor interrupt) to reduce cacheline
+ * transfers. When this optimization is enabled, ops.select_cpu() is
+ * skipped in some cases (when racing against the wakee switching out).
+ * As the BPF scheduler may depend on ops.select_cpu() being invoked
+ * during wakeups, queued wakeup is disabled by default.
+ *
+ * If this ops flag is set, queued wakeup optimization is enabled and
+ * the BPF scheduler must be able to handle ops.enqueue() invoked on the
+ * wakee's CPU without preceding ops.select_cpu() even for tasks which
+ * may be executed on multiple CPUs.
+ */
+ SCX_OPS_ALLOW_QUEUED_WAKEUP = 1LLU << 5,
+
+ /*
+ * If set, enable per-node idle cpumasks. If clear, use a single global
+ * flat idle cpumask.
+ */
+ SCX_OPS_BUILTIN_IDLE_PER_NODE = 1LLU << 6,
/*
* CPU cgroup support flags
@@ -144,7 +169,9 @@ enum scx_ops_flags {
SCX_OPS_ENQ_LAST |
SCX_OPS_ENQ_EXITING |
SCX_OPS_ENQ_MIGRATION_DISABLED |
+ SCX_OPS_ALLOW_QUEUED_WAKEUP |
SCX_OPS_SWITCH_PARTIAL |
+ SCX_OPS_BUILTIN_IDLE_PER_NODE |
SCX_OPS_HAS_CGROUP_WEIGHT,
};
@@ -779,6 +806,7 @@ enum scx_deq_flags {
enum scx_pick_idle_cpu_flags {
SCX_PICK_IDLE_CORE = 1LLU << 0, /* pick a CPU whose SMT siblings are also idle */
+ SCX_PICK_IDLE_IN_NODE = 1LLU << 1, /* pick a CPU in the same target NUMA node */
};
enum scx_kick_flags {
@@ -894,16 +922,11 @@ DEFINE_STATIC_KEY_FALSE(__scx_switched_all);
static struct sched_ext_ops scx_ops;
static bool scx_warned_zero_slice;
+DEFINE_STATIC_KEY_FALSE(scx_ops_allow_queued_wakeup);
static DEFINE_STATIC_KEY_FALSE(scx_ops_enq_last);
static DEFINE_STATIC_KEY_FALSE(scx_ops_enq_exiting);
static DEFINE_STATIC_KEY_FALSE(scx_ops_enq_migration_disabled);
static DEFINE_STATIC_KEY_FALSE(scx_ops_cpu_preempt);
-static DEFINE_STATIC_KEY_FALSE(scx_builtin_idle_enabled);
-
-#ifdef CONFIG_SMP
-static DEFINE_STATIC_KEY_FALSE(scx_selcpu_topo_llc);
-static DEFINE_STATIC_KEY_FALSE(scx_selcpu_topo_numa);
-#endif
static struct static_key_false scx_has_op[SCX_OPI_END] =
{ [0 ... SCX_OPI_END-1] = STATIC_KEY_FALSE_INIT };
@@ -938,21 +961,6 @@ static unsigned long scx_watchdog_timestamp = INITIAL_JIFFIES;
static struct delayed_work scx_watchdog_work;
-/* idle tracking */
-#ifdef CONFIG_SMP
-#ifdef CONFIG_CPUMASK_OFFSTACK
-#define CL_ALIGNED_IF_ONSTACK
-#else
-#define CL_ALIGNED_IF_ONSTACK __cacheline_aligned_in_smp
-#endif
-
-static struct {
- cpumask_var_t cpu;
- cpumask_var_t smt;
-} idle_masks CL_ALIGNED_IF_ONSTACK;
-
-#endif /* CONFIG_SMP */
-
/* for %SCX_KICK_WAIT */
static unsigned long __percpu *scx_kick_cpus_pnt_seqs;
@@ -1473,6 +1481,117 @@ static struct task_struct *scx_task_iter_next_locked(struct scx_task_iter *iter)
return p;
}
+/*
+ * Collection of event counters. Event types are placed in descending order.
+ */
+struct scx_event_stats {
+ /*
+ * If ops.select_cpu() returns a CPU which can't be used by the task,
+ * the core scheduler code silently picks a fallback CPU.
+ */
+ s64 SCX_EV_SELECT_CPU_FALLBACK;
+
+ /*
+ * When dispatching to a local DSQ, the CPU may have gone offline in
+ * the meantime. In this case, the task is bounced to the global DSQ.
+ */
+ s64 SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE;
+
+ /*
+ * If SCX_OPS_ENQ_LAST is not set, the number of times that a task
+ * continued to run because there were no other tasks on the CPU.
+ */
+ s64 SCX_EV_DISPATCH_KEEP_LAST;
+
+ /*
+ * If SCX_OPS_ENQ_EXITING is not set, the number of times that a task
+ * is dispatched to a local DSQ when exiting.
+ */
+ s64 SCX_EV_ENQ_SKIP_EXITING;
+
+ /*
+ * If SCX_OPS_ENQ_MIGRATION_DISABLED is not set, the number of times a
+ * migration disabled task skips ops.enqueue() and is dispatched to its
+ * local DSQ.
+ */
+ s64 SCX_EV_ENQ_SKIP_MIGRATION_DISABLED;
+
+ /*
+ * The total number of tasks enqueued (or pick_task-ed) with a
+ * default time slice (SCX_SLICE_DFL).
+ */
+ s64 SCX_EV_ENQ_SLICE_DFL;
+
+ /*
+ * The total duration of bypass modes in nanoseconds.
+ */
+ s64 SCX_EV_BYPASS_DURATION;
+
+ /*
+ * The number of tasks dispatched in the bypassing mode.
+ */
+ s64 SCX_EV_BYPASS_DISPATCH;
+
+ /*
+ * The number of times the bypassing mode has been activated.
+ */
+ s64 SCX_EV_BYPASS_ACTIVATE;
+};
+
+/*
+ * The event counter is organized by a per-CPU variable to minimize the
+ * accounting overhead without synchronization. A system-wide view on the
+ * event counter is constructed when requested by scx_bpf_get_event_stat().
+ */
+static DEFINE_PER_CPU(struct scx_event_stats, event_stats_cpu);
+
+/**
+ * scx_add_event - Increase an event counter for 'name' by 'cnt'
+ * @name: an event name defined in struct scx_event_stats
+ * @cnt: the number of the event occured
+ *
+ * This can be used when preemption is not disabled.
+ */
+#define scx_add_event(name, cnt) do { \
+ this_cpu_add(event_stats_cpu.name, cnt); \
+ trace_sched_ext_event(#name, cnt); \
+} while(0)
+
+/**
+ * __scx_add_event - Increase an event counter for 'name' by 'cnt'
+ * @name: an event name defined in struct scx_event_stats
+ * @cnt: the number of the event occured
+ *
+ * This should be used only when preemption is disabled.
+ */
+#define __scx_add_event(name, cnt) do { \
+ __this_cpu_add(event_stats_cpu.name, cnt); \
+ trace_sched_ext_event(#name, cnt); \
+} while(0)
+
+/**
+ * scx_agg_event - Aggregate an event counter 'kind' from 'src_e' to 'dst_e'
+ * @dst_e: destination event stats
+ * @src_e: source event stats
+ * @kind: a kind of event to be aggregated
+ */
+#define scx_agg_event(dst_e, src_e, kind) do { \
+ (dst_e)->kind += READ_ONCE((src_e)->kind); \
+} while(0)
+
+/**
+ * scx_dump_event - Dump an event 'kind' in 'events' to 's'
+ * @s: output seq_buf
+ * @events: event stats
+ * @kind: a kind of event to dump
+ */
+#define scx_dump_event(s, events, kind) do { \
+ dump_line(&(s), "%40s: %16lld", #kind, (events)->kind); \
+} while (0)
+
+
+static void scx_bpf_events(struct scx_event_stats *events, size_t events__sz);
+
static enum scx_ops_enable_state scx_ops_enable_state(void)
{
return atomic_read(&scx_ops_enable_state_var);
@@ -2018,21 +2137,27 @@ static void do_enqueue_task(struct rq *rq, struct task_struct *p, u64 enq_flags,
if (!scx_rq_online(rq))
goto local;
- if (scx_rq_bypassing(rq))
+ if (scx_rq_bypassing(rq)) {
+ __scx_add_event(SCX_EV_BYPASS_DISPATCH, 1);
goto global;
+ }
if (p->scx.ddsp_dsq_id != SCX_DSQ_INVALID)
goto direct;
/* see %SCX_OPS_ENQ_EXITING */
if (!static_branch_unlikely(&scx_ops_enq_exiting) &&
- unlikely(p->flags & PF_EXITING))
+ unlikely(p->flags & PF_EXITING)) {
+ __scx_add_event(SCX_EV_ENQ_SKIP_EXITING, 1);
goto local;
+ }
/* see %SCX_OPS_ENQ_MIGRATION_DISABLED */
if (!static_branch_unlikely(&scx_ops_enq_migration_disabled) &&
- is_migration_disabled(p))
+ is_migration_disabled(p)) {
+ __scx_add_event(SCX_EV_ENQ_SKIP_MIGRATION_DISABLED, 1);
goto local;
+ }
if (!SCX_HAS_OP(enqueue))
goto global;
@@ -2072,6 +2197,7 @@ local:
*/
touch_core_sched(rq, p);
p->scx.slice = SCX_SLICE_DFL;
+ __scx_add_event(SCX_EV_ENQ_SLICE_DFL, 1);
local_norefill:
dispatch_enqueue(&rq->scx.local_dsq, p, enq_flags);
return;
@@ -2079,6 +2205,7 @@ local_norefill:
global:
touch_core_sched(rq, p); /* see the comment in local: */
p->scx.slice = SCX_SLICE_DFL;
+ __scx_add_event(SCX_EV_ENQ_SLICE_DFL, 1);
dispatch_enqueue(find_global_dsq(p), p, enq_flags);
}
@@ -2150,6 +2277,10 @@ static void enqueue_task_scx(struct rq *rq, struct task_struct *p, int enq_flags
do_enqueue_task(rq, p, enq_flags, sticky_cpu);
out:
rq->scx.flags &= ~SCX_RQ_IN_WAKEUP;
+
+ if ((enq_flags & SCX_ENQ_CPU_SELECTED) &&
+ unlikely(cpu_of(rq) != p->scx.selected_cpu))
+ __scx_add_event(SCX_EV_SELECT_CPU_FALLBACK, 1);
}
static void ops_dequeue(struct task_struct *p, u64 deq_flags)
@@ -2337,7 +2468,7 @@ static void move_remote_task_to_local_dsq(struct task_struct *p, u64 enq_flags,
* The caller must ensure that @p and @rq are on different CPUs.
*/
static bool task_can_run_on_remote_rq(struct task_struct *p, struct rq *rq,
- bool trigger_error)
+ bool enforce)
{
int cpu = cpu_of(rq);
@@ -2356,7 +2487,7 @@ static bool task_can_run_on_remote_rq(struct task_struct *p, struct rq *rq,
* easily be masked if task_allowed_on_cpu() is done first.
*/
if (unlikely(is_migration_disabled(p))) {
- if (trigger_error)
+ if (enforce)
scx_ops_error("SCX_DSQ_LOCAL[_ON] cannot move migration disabled %s[%d] from CPU %d to %d",
p->comm, p->pid, task_cpu(p), cpu);
return false;
@@ -2369,14 +2500,17 @@ static bool task_can_run_on_remote_rq(struct task_struct *p, struct rq *rq,
* picked CPU is outside the allowed mask.
*/
if (!task_allowed_on_cpu(p, cpu)) {
- if (trigger_error)
+ if (enforce)
scx_ops_error("SCX_DSQ_LOCAL[_ON] target CPU %d not allowed for %s[%d]",
cpu, p->comm, p->pid);
return false;
}
- if (!scx_rq_online(rq))
+ if (!scx_rq_online(rq)) {
+ if (enforce)
+ __scx_add_event(SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE, 1);
return false;
+ }
return true;
}
@@ -2446,7 +2580,7 @@ static bool consume_remote_task(struct rq *this_rq, struct task_struct *p,
}
#else /* CONFIG_SMP */
static inline void move_remote_task_to_local_dsq(struct task_struct *p, u64 enq_flags, struct rq *src_rq, struct rq *dst_rq) { WARN_ON_ONCE(1); }
-static inline bool task_can_run_on_remote_rq(struct task_struct *p, struct rq *rq, bool trigger_error) { return false; }
+static inline bool task_can_run_on_remote_rq(struct task_struct *p, struct rq *rq, bool enforce) { return false; }
static inline bool consume_remote_task(struct rq *this_rq, struct task_struct *p, struct scx_dispatch_q *dsq, struct rq *task_rq) { return false; }
#endif /* CONFIG_SMP */
@@ -2893,6 +3027,7 @@ no_tasks:
if (prev_on_rq && (!static_branch_unlikely(&scx_ops_enq_last) ||
scx_rq_bypassing(rq))) {
rq->scx.flags |= SCX_RQ_BAL_KEEP;
+ __scx_add_event(SCX_EV_DISPATCH_KEEP_LAST, 1);
goto has_tasks;
}
rq->scx.flags &= ~SCX_RQ_IN_BALANCE;
@@ -3159,8 +3294,10 @@ static struct task_struct *pick_task_scx(struct rq *rq)
*/
if (keep_prev) {
p = prev;
- if (!p->scx.slice)
+ if (!p->scx.slice) {
p->scx.slice = SCX_SLICE_DFL;
+ __scx_add_event(SCX_EV_ENQ_SLICE_DFL, 1);
+ }
} else {
p = first_local_task(rq);
if (!p) {
@@ -3176,6 +3313,7 @@ static struct task_struct *pick_task_scx(struct rq *rq)
scx_warned_zero_slice = true;
}
p->scx.slice = SCX_SLICE_DFL;
+ __scx_add_event(SCX_EV_ENQ_SLICE_DFL, 1);
}
}
@@ -3220,418 +3358,10 @@ bool scx_prio_less(const struct task_struct *a, const struct task_struct *b,
#ifdef CONFIG_SMP
-static bool test_and_clear_cpu_idle(int cpu)
-{
-#ifdef CONFIG_SCHED_SMT
- /*
- * SMT mask should be cleared whether we can claim @cpu or not. The SMT
- * cluster is not wholly idle either way. This also prevents
- * scx_pick_idle_cpu() from getting caught in an infinite loop.
- */
- if (sched_smt_active()) {
- const struct cpumask *smt = cpu_smt_mask(cpu);
-
- /*
- * If offline, @cpu is not its own sibling and
- * scx_pick_idle_cpu() can get caught in an infinite loop as
- * @cpu is never cleared from idle_masks.smt. Ensure that @cpu
- * is eventually cleared.
- *
- * NOTE: Use cpumask_intersects() and cpumask_test_cpu() to
- * reduce memory writes, which may help alleviate cache
- * coherence pressure.
- */
- if (cpumask_intersects(smt, idle_masks.smt))
- cpumask_andnot(idle_masks.smt, idle_masks.smt, smt);
- else if (cpumask_test_cpu(cpu, idle_masks.smt))
- __cpumask_clear_cpu(cpu, idle_masks.smt);
- }
-#endif
- return cpumask_test_and_clear_cpu(cpu, idle_masks.cpu);
-}
-
-static s32 scx_pick_idle_cpu(const struct cpumask *cpus_allowed, u64 flags)
-{
- int cpu;
-
-retry:
- if (sched_smt_active()) {
- cpu = cpumask_any_and_distribute(idle_masks.smt, cpus_allowed);
- if (cpu < nr_cpu_ids)
- goto found;
-
- if (flags & SCX_PICK_IDLE_CORE)
- return -EBUSY;
- }
-
- cpu = cpumask_any_and_distribute(idle_masks.cpu, cpus_allowed);
- if (cpu >= nr_cpu_ids)
- return -EBUSY;
-
-found:
- if (test_and_clear_cpu_idle(cpu))
- return cpu;
- else
- goto retry;
-}
-
-/*
- * Return the amount of CPUs in the same LLC domain of @cpu (or zero if the LLC
- * domain is not defined).
- */
-static unsigned int llc_weight(s32 cpu)
-{
- struct sched_domain *sd;
-
- sd = rcu_dereference(per_cpu(sd_llc, cpu));
- if (!sd)
- return 0;
-
- return sd->span_weight;
-}
-
-/*
- * Return the cpumask representing the LLC domain of @cpu (or NULL if the LLC
- * domain is not defined).
- */
-static struct cpumask *llc_span(s32 cpu)
-{
- struct sched_domain *sd;
-
- sd = rcu_dereference(per_cpu(sd_llc, cpu));
- if (!sd)
- return 0;
-
- return sched_domain_span(sd);
-}
-
-/*
- * Return the amount of CPUs in the same NUMA domain of @cpu (or zero if the
- * NUMA domain is not defined).
- */
-static unsigned int numa_weight(s32 cpu)
-{
- struct sched_domain *sd;
- struct sched_group *sg;
-
- sd = rcu_dereference(per_cpu(sd_numa, cpu));
- if (!sd)
- return 0;
- sg = sd->groups;
- if (!sg)
- return 0;
-
- return sg->group_weight;
-}
-
-/*
- * Return the cpumask representing the NUMA domain of @cpu (or NULL if the NUMA
- * domain is not defined).
- */
-static struct cpumask *numa_span(s32 cpu)
-{
- struct sched_domain *sd;
- struct sched_group *sg;
-
- sd = rcu_dereference(per_cpu(sd_numa, cpu));
- if (!sd)
- return NULL;
- sg = sd->groups;
- if (!sg)
- return NULL;
-
- return sched_group_span(sg);
-}
-
-/*
- * Return true if the LLC domains do not perfectly overlap with the NUMA
- * domains, false otherwise.
- */
-static bool llc_numa_mismatch(void)
-{
- int cpu;
-
- /*
- * We need to scan all online CPUs to verify whether their scheduling
- * domains overlap.
- *
- * While it is rare to encounter architectures with asymmetric NUMA
- * topologies, CPU hotplugging or virtualized environments can result
- * in asymmetric configurations.
- *
- * For example:
- *
- * NUMA 0:
- * - LLC 0: cpu0..cpu7
- * - LLC 1: cpu8..cpu15 [offline]
- *
- * NUMA 1:
- * - LLC 0: cpu16..cpu23
- * - LLC 1: cpu24..cpu31
- *
- * In this case, if we only check the first online CPU (cpu0), we might
- * incorrectly assume that the LLC and NUMA domains are fully
- * overlapping, which is incorrect (as NUMA 1 has two distinct LLC
- * domains).
- */
- for_each_online_cpu(cpu)
- if (llc_weight(cpu) != numa_weight(cpu))
- return true;
-
- return false;
-}
-
-/*
- * Initialize topology-aware scheduling.
- *
- * Detect if the system has multiple LLC or multiple NUMA domains and enable
- * cache-aware / NUMA-aware scheduling optimizations in the default CPU idle
- * selection policy.
- *
- * Assumption: the kernel's internal topology representation assumes that each
- * CPU belongs to a single LLC domain, and that each LLC domain is entirely
- * contained within a single NUMA node.
- */
-static void update_selcpu_topology(void)
-{
- bool enable_llc = false, enable_numa = false;
- unsigned int nr_cpus;
- s32 cpu = cpumask_first(cpu_online_mask);
-
- /*
- * Enable LLC domain optimization only when there are multiple LLC
- * domains among the online CPUs. If all online CPUs are part of a
- * single LLC domain, the idle CPU selection logic can choose any
- * online CPU without bias.
- *
- * Note that it is sufficient to check the LLC domain of the first
- * online CPU to determine whether a single LLC domain includes all
- * CPUs.
- */
- rcu_read_lock();
- nr_cpus = llc_weight(cpu);
- if (nr_cpus > 0) {
- if (nr_cpus < num_online_cpus())
- enable_llc = true;
- pr_debug("sched_ext: LLC=%*pb weight=%u\n",
- cpumask_pr_args(llc_span(cpu)), llc_weight(cpu));
- }
-
- /*
- * Enable NUMA optimization only when there are multiple NUMA domains
- * among the online CPUs and the NUMA domains don't perfectly overlaps
- * with the LLC domains.
- *
- * If all CPUs belong to the same NUMA node and the same LLC domain,
- * enabling both NUMA and LLC optimizations is unnecessary, as checking
- * for an idle CPU in the same domain twice is redundant.
- */
- nr_cpus = numa_weight(cpu);
- if (nr_cpus > 0) {
- if (nr_cpus < num_online_cpus() && llc_numa_mismatch())
- enable_numa = true;
- pr_debug("sched_ext: NUMA=%*pb weight=%u\n",
- cpumask_pr_args(numa_span(cpu)), numa_weight(cpu));
- }
- rcu_read_unlock();
-
- pr_debug("sched_ext: LLC idle selection %s\n",
- str_enabled_disabled(enable_llc));
- pr_debug("sched_ext: NUMA idle selection %s\n",
- str_enabled_disabled(enable_numa));
-
- if (enable_llc)
- static_branch_enable_cpuslocked(&scx_selcpu_topo_llc);
- else
- static_branch_disable_cpuslocked(&scx_selcpu_topo_llc);
- if (enable_numa)
- static_branch_enable_cpuslocked(&scx_selcpu_topo_numa);
- else
- static_branch_disable_cpuslocked(&scx_selcpu_topo_numa);
-}
-
-/*
- * Built-in CPU idle selection policy:
- *
- * 1. Prioritize full-idle cores:
- * - always prioritize CPUs from fully idle cores (both logical CPUs are
- * idle) to avoid interference caused by SMT.
- *
- * 2. Reuse the same CPU:
- * - prefer the last used CPU to take advantage of cached data (L1, L2) and
- * branch prediction optimizations.
- *
- * 3. Pick a CPU within the same LLC (Last-Level Cache):
- * - if the above conditions aren't met, pick a CPU that shares the same LLC
- * to maintain cache locality.
- *
- * 4. Pick a CPU within the same NUMA node, if enabled:
- * - choose a CPU from the same NUMA node to reduce memory access latency.
- *
- * 5. Pick any idle CPU usable by the task.
- *
- * Step 3 and 4 are performed only if the system has, respectively, multiple
- * LLC domains / multiple NUMA nodes (see scx_selcpu_topo_llc and
- * scx_selcpu_topo_numa).
- *
- * NOTE: tasks that can only run on 1 CPU are excluded by this logic, because
- * we never call ops.select_cpu() for them, see select_task_rq().
- */
-static s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu,
- u64 wake_flags, bool *found)
-{
- const struct cpumask *llc_cpus = NULL;
- const struct cpumask *numa_cpus = NULL;
- s32 cpu;
-
- *found = false;
-
- /*
- * This is necessary to protect llc_cpus.
- */
- rcu_read_lock();
-
- /*
- * Determine the scheduling domain only if the task is allowed to run
- * on all CPUs.
- *
- * This is done primarily for efficiency, as it avoids the overhead of
- * updating a cpumask every time we need to select an idle CPU (which
- * can be costly in large SMP systems), but it also aligns logically:
- * if a task's scheduling domain is restricted by user-space (through
- * CPU affinity), the task will simply use the flat scheduling domain
- * defined by user-space.
- */
- if (p->nr_cpus_allowed >= num_possible_cpus()) {
- if (static_branch_maybe(CONFIG_NUMA, &scx_selcpu_topo_numa))
- numa_cpus = numa_span(prev_cpu);
-
- if (static_branch_maybe(CONFIG_SCHED_MC, &scx_selcpu_topo_llc))
- llc_cpus = llc_span(prev_cpu);
- }
-
- /*
- * If WAKE_SYNC, try to migrate the wakee to the waker's CPU.
- */
- if (wake_flags & SCX_WAKE_SYNC) {
- cpu = smp_processor_id();
-
- /*
- * If the waker's CPU is cache affine and prev_cpu is idle,
- * then avoid a migration.
- */
- if (cpus_share_cache(cpu, prev_cpu) &&
- test_and_clear_cpu_idle(prev_cpu)) {
- cpu = prev_cpu;
- goto cpu_found;
- }
-
- /*
- * If the waker's local DSQ is empty, and the system is under
- * utilized, try to wake up @p to the local DSQ of the waker.
- *
- * Checking only for an empty local DSQ is insufficient as it
- * could give the wakee an unfair advantage when the system is
- * oversaturated.
- *
- * Checking only for the presence of idle CPUs is also
- * insufficient as the local DSQ of the waker could have tasks
- * piled up on it even if there is an idle core elsewhere on
- * the system.
- */
- if (!cpumask_empty(idle_masks.cpu) &&
- !(current->flags & PF_EXITING) &&
- cpu_rq(cpu)->scx.local_dsq.nr == 0) {
- if (cpumask_test_cpu(cpu, p->cpus_ptr))
- goto cpu_found;
- }
- }
-
- /*
- * If CPU has SMT, any wholly idle CPU is likely a better pick than
- * partially idle @prev_cpu.
- */
- if (sched_smt_active()) {
- /*
- * Keep using @prev_cpu if it's part of a fully idle core.
- */
- if (cpumask_test_cpu(prev_cpu, idle_masks.smt) &&
- test_and_clear_cpu_idle(prev_cpu)) {
- cpu = prev_cpu;
- goto cpu_found;
- }
-
- /*
- * Search for any fully idle core in the same LLC domain.
- */
- if (llc_cpus) {
- cpu = scx_pick_idle_cpu(llc_cpus, SCX_PICK_IDLE_CORE);
- if (cpu >= 0)
- goto cpu_found;
- }
-
- /*
- * Search for any fully idle core in the same NUMA node.
- */
- if (numa_cpus) {
- cpu = scx_pick_idle_cpu(numa_cpus, SCX_PICK_IDLE_CORE);
- if (cpu >= 0)
- goto cpu_found;
- }
-
- /*
- * Search for any full idle core usable by the task.
- */
- cpu = scx_pick_idle_cpu(p->cpus_ptr, SCX_PICK_IDLE_CORE);
- if (cpu >= 0)
- goto cpu_found;
- }
-
- /*
- * Use @prev_cpu if it's idle.
- */
- if (test_and_clear_cpu_idle(prev_cpu)) {
- cpu = prev_cpu;
- goto cpu_found;
- }
-
- /*
- * Search for any idle CPU in the same LLC domain.
- */
- if (llc_cpus) {
- cpu = scx_pick_idle_cpu(llc_cpus, 0);
- if (cpu >= 0)
- goto cpu_found;
- }
-
- /*
- * Search for any idle CPU in the same NUMA node.
- */
- if (numa_cpus) {
- cpu = scx_pick_idle_cpu(numa_cpus, 0);
- if (cpu >= 0)
- goto cpu_found;
- }
-
- /*
- * Search for any idle CPU usable by the task.
- */
- cpu = scx_pick_idle_cpu(p->cpus_ptr, 0);
- if (cpu >= 0)
- goto cpu_found;
-
- rcu_read_unlock();
- return prev_cpu;
-
-cpu_found:
- rcu_read_unlock();
-
- *found = true;
- return cpu;
-}
-
static int select_task_rq_scx(struct task_struct *p, int prev_cpu, int wake_flags)
{
+ bool rq_bypass;
+
/*
* sched_exec() calls with %WF_EXEC when @p is about to exec(2) as it
* can be a good migration opportunity with low cache and memory
@@ -3645,7 +3375,8 @@ static int select_task_rq_scx(struct task_struct *p, int prev_cpu, int wake_flag
if (unlikely(wake_flags & WF_EXEC))
return prev_cpu;
- if (SCX_HAS_OP(select_cpu) && !scx_rq_bypassing(task_rq(p))) {
+ rq_bypass = scx_rq_bypassing(task_rq(p));
+ if (SCX_HAS_OP(select_cpu) && !rq_bypass) {
s32 cpu;
struct task_struct **ddsp_taskp;
@@ -3655,20 +3386,27 @@ static int select_task_rq_scx(struct task_struct *p, int prev_cpu, int wake_flag
cpu = SCX_CALL_OP_TASK_RET(SCX_KF_ENQUEUE | SCX_KF_SELECT_CPU,
select_cpu, p, prev_cpu, wake_flags);
+ p->scx.selected_cpu = cpu;
*ddsp_taskp = NULL;
if (ops_cpu_valid(cpu, "from ops.select_cpu()"))
return cpu;
else
return prev_cpu;
} else {
- bool found;
s32 cpu;
- cpu = scx_select_cpu_dfl(p, prev_cpu, wake_flags, &found);
- if (found) {
+ cpu = scx_select_cpu_dfl(p, prev_cpu, wake_flags, 0);
+ if (cpu >= 0) {
p->scx.slice = SCX_SLICE_DFL;
p->scx.ddsp_dsq_id = SCX_DSQ_LOCAL;
+ __scx_add_event(SCX_EV_ENQ_SLICE_DFL, 1);
+ } else {
+ cpu = prev_cpu;
}
+ p->scx.selected_cpu = cpu;
+
+ if (rq_bypass)
+ __scx_add_event(SCX_EV_BYPASS_DISPATCH, 1);
return cpu;
}
}
@@ -3696,90 +3434,6 @@ static void set_cpus_allowed_scx(struct task_struct *p,
(struct cpumask *)p->cpus_ptr);
}
-static void reset_idle_masks(void)
-{
- /*
- * Consider all online cpus idle. Should converge to the actual state
- * quickly.
- */
- cpumask_copy(idle_masks.cpu, cpu_online_mask);
- cpumask_copy(idle_masks.smt, cpu_online_mask);
-}
-
-static void update_builtin_idle(int cpu, bool idle)
-{
- assign_cpu(cpu, idle_masks.cpu, idle);
-
-#ifdef CONFIG_SCHED_SMT
- if (sched_smt_active()) {
- const struct cpumask *smt = cpu_smt_mask(cpu);
-
- if (idle) {
- /*
- * idle_masks.smt handling is racy but that's fine as
- * it's only for optimization and self-correcting.
- */
- if (!cpumask_subset(smt, idle_masks.cpu))
- return;
- cpumask_or(idle_masks.smt, idle_masks.smt, smt);
- } else {
- cpumask_andnot(idle_masks.smt, idle_masks.smt, smt);
- }
- }
-#endif
-}
-
-/*
- * Update the idle state of a CPU to @idle.
- *
- * If @do_notify is true, ops.update_idle() is invoked to notify the scx
- * scheduler of an actual idle state transition (idle to busy or vice
- * versa). If @do_notify is false, only the idle state in the idle masks is
- * refreshed without invoking ops.update_idle().
- *
- * This distinction is necessary, because an idle CPU can be "reserved" and
- * awakened via scx_bpf_pick_idle_cpu() + scx_bpf_kick_cpu(), marking it as
- * busy even if no tasks are dispatched. In this case, the CPU may return
- * to idle without a true state transition. Refreshing the idle masks
- * without invoking ops.update_idle() ensures accurate idle state tracking
- * while avoiding unnecessary updates and maintaining balanced state
- * transitions.
- */
-void __scx_update_idle(struct rq *rq, bool idle, bool do_notify)
-{
- int cpu = cpu_of(rq);
-
- lockdep_assert_rq_held(rq);
-
- /*
- * Trigger ops.update_idle() only when transitioning from a task to
- * the idle thread and vice versa.
- *
- * Idle transitions are indicated by do_notify being set to true,
- * managed by put_prev_task_idle()/set_next_task_idle().
- */
- if (SCX_HAS_OP(update_idle) && do_notify && !scx_rq_bypassing(rq))
- SCX_CALL_OP(SCX_KF_REST, update_idle, cpu_of(rq), idle);
-
- /*
- * Update the idle masks:
- * - for real idle transitions (do_notify == true)
- * - for idle-to-idle transitions (indicated by the previous task
- * being the idle thread, managed by pick_task_idle())
- *
- * Skip updating idle masks if the previous task is not the idle
- * thread, since set_next_task_idle() has already handled it when
- * transitioning from a task to the idle thread (calling this
- * function with do_notify == true).
- *
- * In this way we can avoid updating the idle masks twice,
- * unnecessarily.
- */
- if (static_branch_likely(&scx_builtin_idle_enabled))
- if (do_notify || is_idle_task(rq->curr))
- update_builtin_idle(cpu, idle);
-}
-
static void handle_hotplug(struct rq *rq, bool online)
{
int cpu = cpu_of(rq);
@@ -3787,7 +3441,7 @@ static void handle_hotplug(struct rq *rq, bool online)
atomic_long_inc(&scx_hotplug_seq);
if (scx_enabled())
- update_selcpu_topology();
+ scx_idle_update_selcpu_topology(&scx_ops);
if (online && SCX_HAS_OP(cpu_online))
SCX_CALL_OP(SCX_KF_UNLOCKED, cpu_online, cpu);
@@ -3819,12 +3473,6 @@ static void rq_offline_scx(struct rq *rq)
rq->scx.flags &= ~SCX_RQ_ONLINE;
}
-#else /* CONFIG_SMP */
-
-static bool test_and_clear_cpu_idle(int cpu) { return false; }
-static s32 scx_pick_idle_cpu(const struct cpumask *cpus_allowed, u64 flags) { return -EBUSY; }
-static void reset_idle_masks(void) {}
-
#endif /* CONFIG_SMP */
static bool check_rq_for_timeouts(struct rq *rq)
@@ -4749,8 +4397,33 @@ static ssize_t scx_attr_ops_show(struct kobject *kobj,
}
SCX_ATTR(ops);
+#define scx_attr_event_show(buf, at, events, kind) ({ \
+ sysfs_emit_at(buf, at, "%s %llu\n", #kind, (events)->kind); \
+})
+
+static ssize_t scx_attr_events_show(struct kobject *kobj,
+ struct kobj_attribute *ka, char *buf)
+{
+ struct scx_event_stats events;
+ int at = 0;
+
+ scx_bpf_events(&events, sizeof(events));
+ at += scx_attr_event_show(buf, at, &events, SCX_EV_SELECT_CPU_FALLBACK);
+ at += scx_attr_event_show(buf, at, &events, SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE);
+ at += scx_attr_event_show(buf, at, &events, SCX_EV_DISPATCH_KEEP_LAST);
+ at += scx_attr_event_show(buf, at, &events, SCX_EV_ENQ_SKIP_EXITING);
+ at += scx_attr_event_show(buf, at, &events, SCX_EV_ENQ_SKIP_MIGRATION_DISABLED);
+ at += scx_attr_event_show(buf, at, &events, SCX_EV_ENQ_SLICE_DFL);
+ at += scx_attr_event_show(buf, at, &events, SCX_EV_BYPASS_DURATION);
+ at += scx_attr_event_show(buf, at, &events, SCX_EV_BYPASS_DISPATCH);
+ at += scx_attr_event_show(buf, at, &events, SCX_EV_BYPASS_ACTIVATE);
+ return at;
+}
+SCX_ATTR(events);
+
static struct attribute *scx_sched_attrs[] = {
&scx_attr_ops.attr,
+ &scx_attr_events.attr,
NULL,
};
ATTRIBUTE_GROUPS(scx_sched);
@@ -4862,6 +4535,8 @@ static void scx_clear_softlockup(void)
static void scx_ops_bypass(bool bypass)
{
static DEFINE_RAW_SPINLOCK(bypass_lock);
+ static unsigned long bypass_timestamp;
+
int cpu;
unsigned long flags;
@@ -4871,11 +4546,15 @@ static void scx_ops_bypass(bool bypass)
WARN_ON_ONCE(scx_ops_bypass_depth <= 0);
if (scx_ops_bypass_depth != 1)
goto unlock;
+ bypass_timestamp = ktime_get_ns();
+ scx_add_event(SCX_EV_BYPASS_ACTIVATE, 1);
} else {
scx_ops_bypass_depth--;
WARN_ON_ONCE(scx_ops_bypass_depth < 0);
if (scx_ops_bypass_depth != 0)
goto unlock;
+ scx_add_event(SCX_EV_BYPASS_DURATION,
+ ktime_get_ns() - bypass_timestamp);
}
atomic_inc(&scx_ops_breather_depth);
@@ -5095,11 +4774,12 @@ static void scx_ops_disable_workfn(struct kthread_work *work)
static_branch_disable(&__scx_ops_enabled);
for (i = SCX_OPI_BEGIN; i < SCX_OPI_END; i++)
static_branch_disable(&scx_has_op[i]);
+ static_branch_disable(&scx_ops_allow_queued_wakeup);
static_branch_disable(&scx_ops_enq_last);
static_branch_disable(&scx_ops_enq_exiting);
static_branch_disable(&scx_ops_enq_migration_disabled);
static_branch_disable(&scx_ops_cpu_preempt);
- static_branch_disable(&scx_builtin_idle_enabled);
+ scx_idle_disable();
synchronize_rcu();
if (ei->kind >= SCX_EXIT_ERROR) {
@@ -5349,6 +5029,7 @@ static void scx_dump_state(struct scx_exit_info *ei, size_t dump_len)
.at_jiffies = jiffies,
};
struct seq_buf s;
+ struct scx_event_stats events;
unsigned long flags;
char *buf;
int cpu;
@@ -5457,6 +5138,21 @@ static void scx_dump_state(struct scx_exit_info *ei, size_t dump_len)
rq_unlock(rq, &rf);
}
+ dump_newline(&s);
+ dump_line(&s, "Event counters");
+ dump_line(&s, "--------------");
+
+ scx_bpf_events(&events, sizeof(events));
+ scx_dump_event(s, &events, SCX_EV_SELECT_CPU_FALLBACK);
+ scx_dump_event(s, &events, SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE);
+ scx_dump_event(s, &events, SCX_EV_DISPATCH_KEEP_LAST);
+ scx_dump_event(s, &events, SCX_EV_ENQ_SKIP_EXITING);
+ scx_dump_event(s, &events, SCX_EV_ENQ_SKIP_MIGRATION_DISABLED);
+ scx_dump_event(s, &events, SCX_EV_ENQ_SLICE_DFL);
+ scx_dump_event(s, &events, SCX_EV_BYPASS_DURATION);
+ scx_dump_event(s, &events, SCX_EV_BYPASS_DISPATCH);
+ scx_dump_event(s, &events, SCX_EV_BYPASS_ACTIVATE);
+
if (seq_buf_has_overflowed(&s) && dump_len >= sizeof(trunc_marker))
memcpy(ei->dump + dump_len - sizeof(trunc_marker),
trunc_marker, sizeof(trunc_marker));
@@ -5546,6 +5242,16 @@ static int validate_ops(const struct sched_ext_ops *ops)
return -EINVAL;
}
+ /*
+ * SCX_OPS_BUILTIN_IDLE_PER_NODE requires built-in CPU idle
+ * selection policy to be enabled.
+ */
+ if ((ops->flags & SCX_OPS_BUILTIN_IDLE_PER_NODE) &&
+ (ops->update_idle && !(ops->flags & SCX_OPS_KEEP_BUILTIN_IDLE))) {
+ scx_ops_error("SCX_OPS_BUILTIN_IDLE_PER_NODE requires CPU idle selection enabled");
+ return -EINVAL;
+ }
+
return 0;
}
@@ -5564,6 +5270,15 @@ static int scx_ops_enable(struct sched_ext_ops *ops, struct bpf_link *link)
mutex_lock(&scx_ops_enable_mutex);
+ /*
+ * Clear event counters so a new scx scheduler gets
+ * fresh event counter values.
+ */
+ for_each_possible_cpu(cpu) {
+ struct scx_event_stats *e = per_cpu_ptr(&event_stats_cpu, cpu);
+ memset(e, 0, sizeof(*e));
+ }
+
if (!scx_ops_helper) {
WRITE_ONCE(scx_ops_helper,
scx_create_rt_helper("sched_ext_ops_helper"));
@@ -5661,9 +5376,8 @@ static int scx_ops_enable(struct sched_ext_ops *ops, struct bpf_link *link)
static_branch_enable_cpuslocked(&scx_has_op[i]);
check_hotplug_seq(ops);
-#ifdef CONFIG_SMP
- update_selcpu_topology();
-#endif
+ scx_idle_update_selcpu_topology(ops);
+
cpus_read_unlock();
ret = validate_ops(ops);
@@ -5702,9 +5416,10 @@ static int scx_ops_enable(struct sched_ext_ops *ops, struct bpf_link *link)
if (((void (**)(void))ops)[i])
static_branch_enable(&scx_has_op[i]);
+ if (ops->flags & SCX_OPS_ALLOW_QUEUED_WAKEUP)
+ static_branch_enable(&scx_ops_allow_queued_wakeup);
if (ops->flags & SCX_OPS_ENQ_LAST)
static_branch_enable(&scx_ops_enq_last);
-
if (ops->flags & SCX_OPS_ENQ_EXITING)
static_branch_enable(&scx_ops_enq_exiting);
if (ops->flags & SCX_OPS_ENQ_MIGRATION_DISABLED)
@@ -5712,12 +5427,7 @@ static int scx_ops_enable(struct sched_ext_ops *ops, struct bpf_link *link)
if (scx_ops.cpu_acquire || scx_ops.cpu_release)
static_branch_enable(&scx_ops_cpu_preempt);
- if (!ops->update_idle || (ops->flags & SCX_OPS_KEEP_BUILTIN_IDLE)) {
- reset_idle_masks();
- static_branch_enable(&scx_builtin_idle_enabled);
- } else {
- static_branch_disable(&scx_builtin_idle_enabled);
- }
+ scx_idle_enable(ops);
/*
* Lock out forks, cgroup on/offlining and moves before opening the
@@ -6356,10 +6066,8 @@ void __init init_sched_ext_class(void)
SCX_TG_ONLINE);
BUG_ON(rhashtable_init(&dsq_hash, &dsq_hash_params));
-#ifdef CONFIG_SMP
- BUG_ON(!alloc_cpumask_var(&idle_masks.cpu, GFP_KERNEL));
- BUG_ON(!alloc_cpumask_var(&idle_masks.smt, GFP_KERNEL));
-#endif
+ scx_idle_init_masks();
+
scx_kick_cpus_pnt_seqs =
__alloc_percpu(sizeof(scx_kick_cpus_pnt_seqs[0]) * nr_cpu_ids,
__alignof__(scx_kick_cpus_pnt_seqs[0]));
@@ -6367,15 +6075,16 @@ void __init init_sched_ext_class(void)
for_each_possible_cpu(cpu) {
struct rq *rq = cpu_rq(cpu);
+ int n = cpu_to_node(cpu);
init_dsq(&rq->scx.local_dsq, SCX_DSQ_LOCAL);
INIT_LIST_HEAD(&rq->scx.runnable_list);
INIT_LIST_HEAD(&rq->scx.ddsp_deferred_locals);
- BUG_ON(!zalloc_cpumask_var(&rq->scx.cpus_to_kick, GFP_KERNEL));
- BUG_ON(!zalloc_cpumask_var(&rq->scx.cpus_to_kick_if_idle, GFP_KERNEL));
- BUG_ON(!zalloc_cpumask_var(&rq->scx.cpus_to_preempt, GFP_KERNEL));
- BUG_ON(!zalloc_cpumask_var(&rq->scx.cpus_to_wait, GFP_KERNEL));
+ BUG_ON(!zalloc_cpumask_var_node(&rq->scx.cpus_to_kick, GFP_KERNEL, n));
+ BUG_ON(!zalloc_cpumask_var_node(&rq->scx.cpus_to_kick_if_idle, GFP_KERNEL, n));
+ BUG_ON(!zalloc_cpumask_var_node(&rq->scx.cpus_to_preempt, GFP_KERNEL, n));
+ BUG_ON(!zalloc_cpumask_var_node(&rq->scx.cpus_to_wait, GFP_KERNEL, n));
init_irq_work(&rq->scx.deferred_irq_work, deferred_irq_workfn);
init_irq_work(&rq->scx.kick_cpus_irq_work, kick_cpus_irq_workfn);
@@ -6392,65 +6101,6 @@ void __init init_sched_ext_class(void)
/********************************************************************************
* Helpers that can be called from the BPF scheduler.
*/
-#include <linux/btf_ids.h>
-
-__bpf_kfunc_start_defs();
-
-static bool check_builtin_idle_enabled(void)
-{
- if (static_branch_likely(&scx_builtin_idle_enabled))
- return true;
-
- scx_ops_error("built-in idle tracking is disabled");
- return false;
-}
-
-/**
- * scx_bpf_select_cpu_dfl - The default implementation of ops.select_cpu()
- * @p: task_struct to select a CPU for
- * @prev_cpu: CPU @p was on previously
- * @wake_flags: %SCX_WAKE_* flags
- * @is_idle: out parameter indicating whether the returned CPU is idle
- *
- * Can only be called from ops.select_cpu() if the built-in CPU selection is
- * enabled - ops.update_idle() is missing or %SCX_OPS_KEEP_BUILTIN_IDLE is set.
- * @p, @prev_cpu and @wake_flags match ops.select_cpu().
- *
- * Returns the picked CPU with *@is_idle indicating whether the picked CPU is
- * currently idle and thus a good candidate for direct dispatching.
- */
-__bpf_kfunc s32 scx_bpf_select_cpu_dfl(struct task_struct *p, s32 prev_cpu,
- u64 wake_flags, bool *is_idle)
-{
- if (!ops_cpu_valid(prev_cpu, NULL))
- goto prev_cpu;
-
- if (!check_builtin_idle_enabled())
- goto prev_cpu;
-
- if (!scx_kf_allowed(SCX_KF_SELECT_CPU))
- goto prev_cpu;
-
-#ifdef CONFIG_SMP
- return scx_select_cpu_dfl(p, prev_cpu, wake_flags, is_idle);
-#endif
-
-prev_cpu:
- *is_idle = false;
- return prev_cpu;
-}
-
-__bpf_kfunc_end_defs();
-
-BTF_KFUNCS_START(scx_kfunc_ids_select_cpu)
-BTF_ID_FLAGS(func, scx_bpf_select_cpu_dfl, KF_RCU)
-BTF_KFUNCS_END(scx_kfunc_ids_select_cpu)
-
-static const struct btf_kfunc_id_set scx_kfunc_set_select_cpu = {
- .owner = THIS_MODULE,
- .set = &scx_kfunc_ids_select_cpu,
-};
-
static bool scx_dsq_insert_preamble(struct task_struct *p, u64 enq_flags)
{
if (!scx_kf_allowed(SCX_KF_ENQUEUE | SCX_KF_DISPATCH))
@@ -6972,8 +6622,12 @@ __bpf_kfunc u32 scx_bpf_reenqueue_local(void)
* CPUs disagree, they use %ENQUEUE_RESTORE which is bypassed to
* the current local DSQ for running tasks and thus are not
* visible to the BPF scheduler.
+ *
+ * Also skip re-enqueueing tasks that can only run on this
+ * CPU, as they would just be re-added to the same local
+ * DSQ without any benefit.
*/
- if (p->migration_pending)
+ if (p->migration_pending || is_migration_disabled(p) || p->nr_cpus_allowed == 1)
continue;
dispatch_dequeue(rq, p);
@@ -7470,6 +7124,16 @@ __bpf_kfunc void scx_bpf_cpuperf_set(s32 cpu, u32 perf)
}
/**
+ * scx_bpf_nr_node_ids - Return the number of possible node IDs
+ *
+ * All valid node IDs in the system are smaller than the returned value.
+ */
+__bpf_kfunc u32 scx_bpf_nr_node_ids(void)
+{
+ return nr_node_ids;
+}
+
+/**
* scx_bpf_nr_cpu_ids - Return the number of possible CPU IDs
*
* All valid CPU IDs in the system are smaller than the returned value.
@@ -7510,142 +7174,6 @@ __bpf_kfunc void scx_bpf_put_cpumask(const struct cpumask *cpumask)
}
/**
- * scx_bpf_get_idle_cpumask - Get a referenced kptr to the idle-tracking
- * per-CPU cpumask.
- *
- * Returns NULL if idle tracking is not enabled, or running on a UP kernel.
- */
-__bpf_kfunc const struct cpumask *scx_bpf_get_idle_cpumask(void)
-{
- if (!check_builtin_idle_enabled())
- return cpu_none_mask;
-
-#ifdef CONFIG_SMP
- return idle_masks.cpu;
-#else
- return cpu_none_mask;
-#endif
-}
-
-/**
- * scx_bpf_get_idle_smtmask - Get a referenced kptr to the idle-tracking,
- * per-physical-core cpumask. Can be used to determine if an entire physical
- * core is free.
- *
- * Returns NULL if idle tracking is not enabled, or running on a UP kernel.
- */
-__bpf_kfunc const struct cpumask *scx_bpf_get_idle_smtmask(void)
-{
- if (!check_builtin_idle_enabled())
- return cpu_none_mask;
-
-#ifdef CONFIG_SMP
- if (sched_smt_active())
- return idle_masks.smt;
- else
- return idle_masks.cpu;
-#else
- return cpu_none_mask;
-#endif
-}
-
-/**
- * scx_bpf_put_idle_cpumask - Release a previously acquired referenced kptr to
- * either the percpu, or SMT idle-tracking cpumask.
- * @idle_mask: &cpumask to use
- */
-__bpf_kfunc void scx_bpf_put_idle_cpumask(const struct cpumask *idle_mask)
-{
- /*
- * Empty function body because we aren't actually acquiring or releasing
- * a reference to a global idle cpumask, which is read-only in the
- * caller and is never released. The acquire / release semantics here
- * are just used to make the cpumask a trusted pointer in the caller.
- */
-}
-
-/**
- * scx_bpf_test_and_clear_cpu_idle - Test and clear @cpu's idle state
- * @cpu: cpu to test and clear idle for
- *
- * Returns %true if @cpu was idle and its idle state was successfully cleared.
- * %false otherwise.
- *
- * Unavailable if ops.update_idle() is implemented and
- * %SCX_OPS_KEEP_BUILTIN_IDLE is not set.
- */
-__bpf_kfunc bool scx_bpf_test_and_clear_cpu_idle(s32 cpu)
-{
- if (!check_builtin_idle_enabled())
- return false;
-
- if (ops_cpu_valid(cpu, NULL))
- return test_and_clear_cpu_idle(cpu);
- else
- return false;
-}
-
-/**
- * scx_bpf_pick_idle_cpu - Pick and claim an idle cpu
- * @cpus_allowed: Allowed cpumask
- * @flags: %SCX_PICK_IDLE_CPU_* flags
- *
- * Pick and claim an idle cpu in @cpus_allowed. Returns the picked idle cpu
- * number on success. -%EBUSY if no matching cpu was found.
- *
- * Idle CPU tracking may race against CPU scheduling state transitions. For
- * example, this function may return -%EBUSY as CPUs are transitioning into the
- * idle state. If the caller then assumes that there will be dispatch events on
- * the CPUs as they were all busy, the scheduler may end up stalling with CPUs
- * idling while there are pending tasks. Use scx_bpf_pick_any_cpu() and
- * scx_bpf_kick_cpu() to guarantee that there will be at least one dispatch
- * event in the near future.
- *
- * Unavailable if ops.update_idle() is implemented and
- * %SCX_OPS_KEEP_BUILTIN_IDLE is not set.
- */
-__bpf_kfunc s32 scx_bpf_pick_idle_cpu(const struct cpumask *cpus_allowed,
- u64 flags)
-{
- if (!check_builtin_idle_enabled())
- return -EBUSY;
-
- return scx_pick_idle_cpu(cpus_allowed, flags);
-}
-
-/**
- * scx_bpf_pick_any_cpu - Pick and claim an idle cpu if available or pick any CPU
- * @cpus_allowed: Allowed cpumask
- * @flags: %SCX_PICK_IDLE_CPU_* flags
- *
- * Pick and claim an idle cpu in @cpus_allowed. If none is available, pick any
- * CPU in @cpus_allowed. Guaranteed to succeed and returns the picked idle cpu
- * number if @cpus_allowed is not empty. -%EBUSY is returned if @cpus_allowed is
- * empty.
- *
- * If ops.update_idle() is implemented and %SCX_OPS_KEEP_BUILTIN_IDLE is not
- * set, this function can't tell which CPUs are idle and will always pick any
- * CPU.
- */
-__bpf_kfunc s32 scx_bpf_pick_any_cpu(const struct cpumask *cpus_allowed,
- u64 flags)
-{
- s32 cpu;
-
- if (static_branch_likely(&scx_builtin_idle_enabled)) {
- cpu = scx_pick_idle_cpu(cpus_allowed, flags);
- if (cpu >= 0)
- return cpu;
- }
-
- cpu = cpumask_any_distribute(cpus_allowed);
- if (cpu < nr_cpu_ids)
- return cpu;
- else
- return -EBUSY;
-}
-
-/**
* scx_bpf_task_running - Is task currently running?
* @p: task of interest
*/
@@ -7765,6 +7293,43 @@ __bpf_kfunc u64 scx_bpf_now(void)
return clock;
}
+/*
+ * scx_bpf_events - Get a system-wide event counter to
+ * @events: output buffer from a BPF program
+ * @events__sz: @events len, must end in '__sz'' for the verifier
+ */
+__bpf_kfunc void scx_bpf_events(struct scx_event_stats *events,
+ size_t events__sz)
+{
+ struct scx_event_stats e_sys, *e_cpu;
+ int cpu;
+
+ /* Aggregate per-CPU event counters into the system-wide counters. */
+ memset(&e_sys, 0, sizeof(e_sys));
+ for_each_possible_cpu(cpu) {
+ e_cpu = per_cpu_ptr(&event_stats_cpu, cpu);
+ scx_agg_event(&e_sys, e_cpu, SCX_EV_SELECT_CPU_FALLBACK);
+ scx_agg_event(&e_sys, e_cpu, SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE);
+ scx_agg_event(&e_sys, e_cpu, SCX_EV_DISPATCH_KEEP_LAST);
+ scx_agg_event(&e_sys, e_cpu, SCX_EV_ENQ_SKIP_EXITING);
+ scx_agg_event(&e_sys, e_cpu, SCX_EV_ENQ_SKIP_MIGRATION_DISABLED);
+ scx_agg_event(&e_sys, e_cpu, SCX_EV_ENQ_SLICE_DFL);
+ scx_agg_event(&e_sys, e_cpu, SCX_EV_BYPASS_DURATION);
+ scx_agg_event(&e_sys, e_cpu, SCX_EV_BYPASS_DISPATCH);
+ scx_agg_event(&e_sys, e_cpu, SCX_EV_BYPASS_ACTIVATE);
+ }
+
+ /*
+ * We cannot entirely trust a BPF-provided size since a BPF program
+ * might be compiled against a different vmlinux.h, of which
+ * scx_event_stats would be larger (a newer vmlinux.h) or smaller
+ * (an older vmlinux.h). Hence, we use the smaller size to avoid
+ * memory corruption.
+ */
+ events__sz = min(events__sz, sizeof(*events));
+ memcpy(events, &e_sys, events__sz);
+}
+
__bpf_kfunc_end_defs();
BTF_KFUNCS_START(scx_kfunc_ids_any)
@@ -7780,6 +7345,7 @@ BTF_ID_FLAGS(func, scx_bpf_dump_bstr, KF_TRUSTED_ARGS)
BTF_ID_FLAGS(func, scx_bpf_cpuperf_cap)
BTF_ID_FLAGS(func, scx_bpf_cpuperf_cur)
BTF_ID_FLAGS(func, scx_bpf_cpuperf_set)
+BTF_ID_FLAGS(func, scx_bpf_nr_node_ids)
BTF_ID_FLAGS(func, scx_bpf_nr_cpu_ids)
BTF_ID_FLAGS(func, scx_bpf_get_possible_cpumask, KF_ACQUIRE)
BTF_ID_FLAGS(func, scx_bpf_get_online_cpumask, KF_ACQUIRE)
@@ -7797,6 +7363,7 @@ BTF_ID_FLAGS(func, scx_bpf_cpu_rq)
BTF_ID_FLAGS(func, scx_bpf_task_cgroup, KF_RCU | KF_ACQUIRE)
#endif
BTF_ID_FLAGS(func, scx_bpf_now)
+BTF_ID_FLAGS(func, scx_bpf_events, KF_TRUSTED_ARGS)
BTF_KFUNCS_END(scx_kfunc_ids_any)
static const struct btf_kfunc_id_set scx_kfunc_set_any = {
@@ -7820,8 +7387,6 @@ static int __init scx_init(void)
* check using scx_kf_allowed().
*/
if ((ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
- &scx_kfunc_set_select_cpu)) ||
- (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
&scx_kfunc_set_enqueue_dispatch)) ||
(ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
&scx_kfunc_set_dispatch)) ||
@@ -7841,6 +7406,12 @@ static int __init scx_init(void)
return ret;
}
+ ret = scx_idle_init();
+ if (ret) {
+ pr_err("sched_ext: Failed to initialize idle tracking (%d)\n", ret);
+ return ret;
+ }
+
ret = register_bpf_struct_ops(&bpf_sched_ext_ops, sched_ext_ops);
if (ret) {
pr_err("sched_ext: Failed to register struct_ops (%d)\n", ret);
diff --git a/kernel/sched/ext.h b/kernel/sched/ext.h
index 1079b56b0f7a..1bda96b19a1b 100644
--- a/kernel/sched/ext.h
+++ b/kernel/sched/ext.h
@@ -8,6 +8,8 @@
*/
#ifdef CONFIG_SCHED_CLASS_EXT
+DECLARE_STATIC_KEY_FALSE(scx_ops_allow_queued_wakeup);
+
void scx_tick(struct rq *rq);
void init_scx_entity(struct sched_ext_entity *scx);
void scx_pre_fork(struct task_struct *p);
@@ -34,6 +36,13 @@ static inline bool task_on_scx(const struct task_struct *p)
return scx_enabled() && p->sched_class == &ext_sched_class;
}
+static inline bool scx_allow_ttwu_queue(const struct task_struct *p)
+{
+ return !scx_enabled() ||
+ static_branch_likely(&scx_ops_allow_queued_wakeup) ||
+ p->sched_class != &ext_sched_class;
+}
+
#ifdef CONFIG_SCHED_CORE
bool scx_prio_less(const struct task_struct *a, const struct task_struct *b,
bool in_fi);
@@ -52,6 +61,7 @@ static inline void scx_rq_activate(struct rq *rq) {}
static inline void scx_rq_deactivate(struct rq *rq) {}
static inline int scx_check_setscheduler(struct task_struct *p, int policy) { return 0; }
static inline bool task_on_scx(const struct task_struct *p) { return false; }
+static inline bool scx_allow_ttwu_queue(const struct task_struct *p) { return true; }
static inline void init_sched_ext_class(void) {}
#endif /* CONFIG_SCHED_CLASS_EXT */
diff --git a/kernel/sched/ext_idle.c b/kernel/sched/ext_idle.c
new file mode 100644
index 000000000000..52c36a70a3d0
--- /dev/null
+++ b/kernel/sched/ext_idle.c
@@ -0,0 +1,1171 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * BPF extensible scheduler class: Documentation/scheduler/sched-ext.rst
+ *
+ * Built-in idle CPU tracking policy.
+ *
+ * Copyright (c) 2022 Meta Platforms, Inc. and affiliates.
+ * Copyright (c) 2022 Tejun Heo <tj@kernel.org>
+ * Copyright (c) 2022 David Vernet <dvernet@meta.com>
+ * Copyright (c) 2024 Andrea Righi <arighi@nvidia.com>
+ */
+#include "ext_idle.h"
+
+/* Enable/disable built-in idle CPU selection policy */
+static DEFINE_STATIC_KEY_FALSE(scx_builtin_idle_enabled);
+
+/* Enable/disable per-node idle cpumasks */
+static DEFINE_STATIC_KEY_FALSE(scx_builtin_idle_per_node);
+
+#ifdef CONFIG_SMP
+/* Enable/disable LLC aware optimizations */
+static DEFINE_STATIC_KEY_FALSE(scx_selcpu_topo_llc);
+
+/* Enable/disable NUMA aware optimizations */
+static DEFINE_STATIC_KEY_FALSE(scx_selcpu_topo_numa);
+
+/*
+ * cpumasks to track idle CPUs within each NUMA node.
+ *
+ * If SCX_OPS_BUILTIN_IDLE_PER_NODE is not enabled, a single global cpumask
+ * from is used to track all the idle CPUs in the system.
+ */
+struct scx_idle_cpus {
+ cpumask_var_t cpu;
+ cpumask_var_t smt;
+};
+
+/*
+ * Global host-wide idle cpumasks (used when SCX_OPS_BUILTIN_IDLE_PER_NODE
+ * is not enabled).
+ */
+static struct scx_idle_cpus scx_idle_global_masks;
+
+/*
+ * Per-node idle cpumasks.
+ */
+static struct scx_idle_cpus **scx_idle_node_masks;
+
+/*
+ * Return the idle masks associated to a target @node.
+ *
+ * NUMA_NO_NODE identifies the global idle cpumask.
+ */
+static struct scx_idle_cpus *idle_cpumask(int node)
+{
+ return node == NUMA_NO_NODE ? &scx_idle_global_masks : scx_idle_node_masks[node];
+}
+
+/*
+ * Returns the NUMA node ID associated with a @cpu, or NUMA_NO_NODE if
+ * per-node idle cpumasks are disabled.
+ */
+static int scx_cpu_node_if_enabled(int cpu)
+{
+ if (!static_branch_maybe(CONFIG_NUMA, &scx_builtin_idle_per_node))
+ return NUMA_NO_NODE;
+
+ return cpu_to_node(cpu);
+}
+
+bool scx_idle_test_and_clear_cpu(int cpu)
+{
+ int node = scx_cpu_node_if_enabled(cpu);
+ struct cpumask *idle_cpus = idle_cpumask(node)->cpu;
+
+#ifdef CONFIG_SCHED_SMT
+ /*
+ * SMT mask should be cleared whether we can claim @cpu or not. The SMT
+ * cluster is not wholly idle either way. This also prevents
+ * scx_pick_idle_cpu() from getting caught in an infinite loop.
+ */
+ if (sched_smt_active()) {
+ const struct cpumask *smt = cpu_smt_mask(cpu);
+ struct cpumask *idle_smts = idle_cpumask(node)->smt;
+
+ /*
+ * If offline, @cpu is not its own sibling and
+ * scx_pick_idle_cpu() can get caught in an infinite loop as
+ * @cpu is never cleared from the idle SMT mask. Ensure that
+ * @cpu is eventually cleared.
+ *
+ * NOTE: Use cpumask_intersects() and cpumask_test_cpu() to
+ * reduce memory writes, which may help alleviate cache
+ * coherence pressure.
+ */
+ if (cpumask_intersects(smt, idle_smts))
+ cpumask_andnot(idle_smts, idle_smts, smt);
+ else if (cpumask_test_cpu(cpu, idle_smts))
+ __cpumask_clear_cpu(cpu, idle_smts);
+ }
+#endif
+
+ return cpumask_test_and_clear_cpu(cpu, idle_cpus);
+}
+
+/*
+ * Pick an idle CPU in a specific NUMA node.
+ */
+static s32 pick_idle_cpu_in_node(const struct cpumask *cpus_allowed, int node, u64 flags)
+{
+ int cpu;
+
+retry:
+ if (sched_smt_active()) {
+ cpu = cpumask_any_and_distribute(idle_cpumask(node)->smt, cpus_allowed);
+ if (cpu < nr_cpu_ids)
+ goto found;
+
+ if (flags & SCX_PICK_IDLE_CORE)
+ return -EBUSY;
+ }
+
+ cpu = cpumask_any_and_distribute(idle_cpumask(node)->cpu, cpus_allowed);
+ if (cpu >= nr_cpu_ids)
+ return -EBUSY;
+
+found:
+ if (scx_idle_test_and_clear_cpu(cpu))
+ return cpu;
+ else
+ goto retry;
+}
+
+/*
+ * Tracks nodes that have not yet been visited when searching for an idle
+ * CPU across all available nodes.
+ */
+static DEFINE_PER_CPU(nodemask_t, per_cpu_unvisited);
+
+/*
+ * Search for an idle CPU across all nodes, excluding @node.
+ */
+static s32 pick_idle_cpu_from_online_nodes(const struct cpumask *cpus_allowed, int node, u64 flags)
+{
+ nodemask_t *unvisited;
+ s32 cpu = -EBUSY;
+
+ preempt_disable();
+ unvisited = this_cpu_ptr(&per_cpu_unvisited);
+
+ /*
+ * Restrict the search to the online nodes (excluding the current
+ * node that has been visited already).
+ */
+ nodes_copy(*unvisited, node_states[N_ONLINE]);
+ node_clear(node, *unvisited);
+
+ /*
+ * Traverse all nodes in order of increasing distance, starting
+ * from @node.
+ *
+ * This loop is O(N^2), with N being the amount of NUMA nodes,
+ * which might be quite expensive in large NUMA systems. However,
+ * this complexity comes into play only when a scheduler enables
+ * SCX_OPS_BUILTIN_IDLE_PER_NODE and it's requesting an idle CPU
+ * without specifying a target NUMA node, so it shouldn't be a
+ * bottleneck is most cases.
+ *
+ * As a future optimization we may want to cache the list of nodes
+ * in a per-node array, instead of actually traversing them every
+ * time.
+ */
+ for_each_node_numadist(node, *unvisited) {
+ cpu = pick_idle_cpu_in_node(cpus_allowed, node, flags);
+ if (cpu >= 0)
+ break;
+ }
+ preempt_enable();
+
+ return cpu;
+}
+
+/*
+ * Find an idle CPU in the system, starting from @node.
+ */
+s32 scx_pick_idle_cpu(const struct cpumask *cpus_allowed, int node, u64 flags)
+{
+ s32 cpu;
+
+ /*
+ * Always search in the starting node first (this is an
+ * optimization that can save some cycles even when the search is
+ * not limited to a single node).
+ */
+ cpu = pick_idle_cpu_in_node(cpus_allowed, node, flags);
+ if (cpu >= 0)
+ return cpu;
+
+ /*
+ * Stop the search if we are using only a single global cpumask
+ * (NUMA_NO_NODE) or if the search is restricted to the first node
+ * only.
+ */
+ if (node == NUMA_NO_NODE || flags & SCX_PICK_IDLE_IN_NODE)
+ return -EBUSY;
+
+ /*
+ * Extend the search to the other online nodes.
+ */
+ return pick_idle_cpu_from_online_nodes(cpus_allowed, node, flags);
+}
+
+/*
+ * Return the amount of CPUs in the same LLC domain of @cpu (or zero if the LLC
+ * domain is not defined).
+ */
+static unsigned int llc_weight(s32 cpu)
+{
+ struct sched_domain *sd;
+
+ sd = rcu_dereference(per_cpu(sd_llc, cpu));
+ if (!sd)
+ return 0;
+
+ return sd->span_weight;
+}
+
+/*
+ * Return the cpumask representing the LLC domain of @cpu (or NULL if the LLC
+ * domain is not defined).
+ */
+static struct cpumask *llc_span(s32 cpu)
+{
+ struct sched_domain *sd;
+
+ sd = rcu_dereference(per_cpu(sd_llc, cpu));
+ if (!sd)
+ return 0;
+
+ return sched_domain_span(sd);
+}
+
+/*
+ * Return the amount of CPUs in the same NUMA domain of @cpu (or zero if the
+ * NUMA domain is not defined).
+ */
+static unsigned int numa_weight(s32 cpu)
+{
+ struct sched_domain *sd;
+ struct sched_group *sg;
+
+ sd = rcu_dereference(per_cpu(sd_numa, cpu));
+ if (!sd)
+ return 0;
+ sg = sd->groups;
+ if (!sg)
+ return 0;
+
+ return sg->group_weight;
+}
+
+/*
+ * Return the cpumask representing the NUMA domain of @cpu (or NULL if the NUMA
+ * domain is not defined).
+ */
+static struct cpumask *numa_span(s32 cpu)
+{
+ struct sched_domain *sd;
+ struct sched_group *sg;
+
+ sd = rcu_dereference(per_cpu(sd_numa, cpu));
+ if (!sd)
+ return NULL;
+ sg = sd->groups;
+ if (!sg)
+ return NULL;
+
+ return sched_group_span(sg);
+}
+
+/*
+ * Return true if the LLC domains do not perfectly overlap with the NUMA
+ * domains, false otherwise.
+ */
+static bool llc_numa_mismatch(void)
+{
+ int cpu;
+
+ /*
+ * We need to scan all online CPUs to verify whether their scheduling
+ * domains overlap.
+ *
+ * While it is rare to encounter architectures with asymmetric NUMA
+ * topologies, CPU hotplugging or virtualized environments can result
+ * in asymmetric configurations.
+ *
+ * For example:
+ *
+ * NUMA 0:
+ * - LLC 0: cpu0..cpu7
+ * - LLC 1: cpu8..cpu15 [offline]
+ *
+ * NUMA 1:
+ * - LLC 0: cpu16..cpu23
+ * - LLC 1: cpu24..cpu31
+ *
+ * In this case, if we only check the first online CPU (cpu0), we might
+ * incorrectly assume that the LLC and NUMA domains are fully
+ * overlapping, which is incorrect (as NUMA 1 has two distinct LLC
+ * domains).
+ */
+ for_each_online_cpu(cpu)
+ if (llc_weight(cpu) != numa_weight(cpu))
+ return true;
+
+ return false;
+}
+
+/*
+ * Initialize topology-aware scheduling.
+ *
+ * Detect if the system has multiple LLC or multiple NUMA domains and enable
+ * cache-aware / NUMA-aware scheduling optimizations in the default CPU idle
+ * selection policy.
+ *
+ * Assumption: the kernel's internal topology representation assumes that each
+ * CPU belongs to a single LLC domain, and that each LLC domain is entirely
+ * contained within a single NUMA node.
+ */
+void scx_idle_update_selcpu_topology(struct sched_ext_ops *ops)
+{
+ bool enable_llc = false, enable_numa = false;
+ unsigned int nr_cpus;
+ s32 cpu = cpumask_first(cpu_online_mask);
+
+ /*
+ * Enable LLC domain optimization only when there are multiple LLC
+ * domains among the online CPUs. If all online CPUs are part of a
+ * single LLC domain, the idle CPU selection logic can choose any
+ * online CPU without bias.
+ *
+ * Note that it is sufficient to check the LLC domain of the first
+ * online CPU to determine whether a single LLC domain includes all
+ * CPUs.
+ */
+ rcu_read_lock();
+ nr_cpus = llc_weight(cpu);
+ if (nr_cpus > 0) {
+ if (nr_cpus < num_online_cpus())
+ enable_llc = true;
+ pr_debug("sched_ext: LLC=%*pb weight=%u\n",
+ cpumask_pr_args(llc_span(cpu)), llc_weight(cpu));
+ }
+
+ /*
+ * Enable NUMA optimization only when there are multiple NUMA domains
+ * among the online CPUs and the NUMA domains don't perfectly overlaps
+ * with the LLC domains.
+ *
+ * If all CPUs belong to the same NUMA node and the same LLC domain,
+ * enabling both NUMA and LLC optimizations is unnecessary, as checking
+ * for an idle CPU in the same domain twice is redundant.
+ *
+ * If SCX_OPS_BUILTIN_IDLE_PER_NODE is enabled ignore the NUMA
+ * optimization, as we would naturally select idle CPUs within
+ * specific NUMA nodes querying the corresponding per-node cpumask.
+ */
+ if (!(ops->flags & SCX_OPS_BUILTIN_IDLE_PER_NODE)) {
+ nr_cpus = numa_weight(cpu);
+ if (nr_cpus > 0) {
+ if (nr_cpus < num_online_cpus() && llc_numa_mismatch())
+ enable_numa = true;
+ pr_debug("sched_ext: NUMA=%*pb weight=%u\n",
+ cpumask_pr_args(numa_span(cpu)), nr_cpus);
+ }
+ }
+ rcu_read_unlock();
+
+ pr_debug("sched_ext: LLC idle selection %s\n",
+ str_enabled_disabled(enable_llc));
+ pr_debug("sched_ext: NUMA idle selection %s\n",
+ str_enabled_disabled(enable_numa));
+
+ if (enable_llc)
+ static_branch_enable_cpuslocked(&scx_selcpu_topo_llc);
+ else
+ static_branch_disable_cpuslocked(&scx_selcpu_topo_llc);
+ if (enable_numa)
+ static_branch_enable_cpuslocked(&scx_selcpu_topo_numa);
+ else
+ static_branch_disable_cpuslocked(&scx_selcpu_topo_numa);
+}
+
+/*
+ * Built-in CPU idle selection policy:
+ *
+ * 1. Prioritize full-idle cores:
+ * - always prioritize CPUs from fully idle cores (both logical CPUs are
+ * idle) to avoid interference caused by SMT.
+ *
+ * 2. Reuse the same CPU:
+ * - prefer the last used CPU to take advantage of cached data (L1, L2) and
+ * branch prediction optimizations.
+ *
+ * 3. Pick a CPU within the same LLC (Last-Level Cache):
+ * - if the above conditions aren't met, pick a CPU that shares the same LLC
+ * to maintain cache locality.
+ *
+ * 4. Pick a CPU within the same NUMA node, if enabled:
+ * - choose a CPU from the same NUMA node to reduce memory access latency.
+ *
+ * 5. Pick any idle CPU usable by the task.
+ *
+ * Step 3 and 4 are performed only if the system has, respectively,
+ * multiple LLCs / multiple NUMA nodes (see scx_selcpu_topo_llc and
+ * scx_selcpu_topo_numa) and they don't contain the same subset of CPUs.
+ *
+ * If %SCX_OPS_BUILTIN_IDLE_PER_NODE is enabled, the search will always
+ * begin in @prev_cpu's node and proceed to other nodes in order of
+ * increasing distance.
+ *
+ * Return the picked CPU if idle, or a negative value otherwise.
+ *
+ * NOTE: tasks that can only run on 1 CPU are excluded by this logic, because
+ * we never call ops.select_cpu() for them, see select_task_rq().
+ */
+s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, u64 flags)
+{
+ const struct cpumask *llc_cpus = NULL;
+ const struct cpumask *numa_cpus = NULL;
+ int node = scx_cpu_node_if_enabled(prev_cpu);
+ s32 cpu;
+
+ /*
+ * This is necessary to protect llc_cpus.
+ */
+ rcu_read_lock();
+
+ /*
+ * Determine the scheduling domain only if the task is allowed to run
+ * on all CPUs.
+ *
+ * This is done primarily for efficiency, as it avoids the overhead of
+ * updating a cpumask every time we need to select an idle CPU (which
+ * can be costly in large SMP systems), but it also aligns logically:
+ * if a task's scheduling domain is restricted by user-space (through
+ * CPU affinity), the task will simply use the flat scheduling domain
+ * defined by user-space.
+ */
+ if (p->nr_cpus_allowed >= num_possible_cpus()) {
+ if (static_branch_maybe(CONFIG_NUMA, &scx_selcpu_topo_numa))
+ numa_cpus = numa_span(prev_cpu);
+
+ if (static_branch_maybe(CONFIG_SCHED_MC, &scx_selcpu_topo_llc))
+ llc_cpus = llc_span(prev_cpu);
+ }
+
+ /*
+ * If WAKE_SYNC, try to migrate the wakee to the waker's CPU.
+ */
+ if (wake_flags & SCX_WAKE_SYNC) {
+ int waker_node;
+
+ /*
+ * If the waker's CPU is cache affine and prev_cpu is idle,
+ * then avoid a migration.
+ */
+ cpu = smp_processor_id();
+ if (cpus_share_cache(cpu, prev_cpu) &&
+ scx_idle_test_and_clear_cpu(prev_cpu)) {
+ cpu = prev_cpu;
+ goto out_unlock;
+ }
+
+ /*
+ * If the waker's local DSQ is empty, and the system is under
+ * utilized, try to wake up @p to the local DSQ of the waker.
+ *
+ * Checking only for an empty local DSQ is insufficient as it
+ * could give the wakee an unfair advantage when the system is
+ * oversaturated.
+ *
+ * Checking only for the presence of idle CPUs is also
+ * insufficient as the local DSQ of the waker could have tasks
+ * piled up on it even if there is an idle core elsewhere on
+ * the system.
+ */
+ waker_node = cpu_to_node(cpu);
+ if (!(current->flags & PF_EXITING) &&
+ cpu_rq(cpu)->scx.local_dsq.nr == 0 &&
+ (!(flags & SCX_PICK_IDLE_IN_NODE) || (waker_node == node)) &&
+ !cpumask_empty(idle_cpumask(waker_node)->cpu)) {
+ if (cpumask_test_cpu(cpu, p->cpus_ptr))
+ goto out_unlock;
+ }
+ }
+
+ /*
+ * If CPU has SMT, any wholly idle CPU is likely a better pick than
+ * partially idle @prev_cpu.
+ */
+ if (sched_smt_active()) {
+ /*
+ * Keep using @prev_cpu if it's part of a fully idle core.
+ */
+ if (cpumask_test_cpu(prev_cpu, idle_cpumask(node)->smt) &&
+ scx_idle_test_and_clear_cpu(prev_cpu)) {
+ cpu = prev_cpu;
+ goto out_unlock;
+ }
+
+ /*
+ * Search for any fully idle core in the same LLC domain.
+ */
+ if (llc_cpus) {
+ cpu = pick_idle_cpu_in_node(llc_cpus, node, SCX_PICK_IDLE_CORE);
+ if (cpu >= 0)
+ goto out_unlock;
+ }
+
+ /*
+ * Search for any fully idle core in the same NUMA node.
+ */
+ if (numa_cpus) {
+ cpu = pick_idle_cpu_in_node(numa_cpus, node, SCX_PICK_IDLE_CORE);
+ if (cpu >= 0)
+ goto out_unlock;
+ }
+
+ /*
+ * Search for any full-idle core usable by the task.
+ *
+ * If the node-aware idle CPU selection policy is enabled
+ * (%SCX_OPS_BUILTIN_IDLE_PER_NODE), the search will always
+ * begin in prev_cpu's node and proceed to other nodes in
+ * order of increasing distance.
+ */
+ cpu = scx_pick_idle_cpu(p->cpus_ptr, node, flags | SCX_PICK_IDLE_CORE);
+ if (cpu >= 0)
+ goto out_unlock;
+
+ /*
+ * Give up if we're strictly looking for a full-idle SMT
+ * core.
+ */
+ if (flags & SCX_PICK_IDLE_CORE) {
+ cpu = prev_cpu;
+ goto out_unlock;
+ }
+ }
+
+ /*
+ * Use @prev_cpu if it's idle.
+ */
+ if (scx_idle_test_and_clear_cpu(prev_cpu)) {
+ cpu = prev_cpu;
+ goto out_unlock;
+ }
+
+ /*
+ * Search for any idle CPU in the same LLC domain.
+ */
+ if (llc_cpus) {
+ cpu = pick_idle_cpu_in_node(llc_cpus, node, 0);
+ if (cpu >= 0)
+ goto out_unlock;
+ }
+
+ /*
+ * Search for any idle CPU in the same NUMA node.
+ */
+ if (numa_cpus) {
+ cpu = pick_idle_cpu_in_node(numa_cpus, node, 0);
+ if (cpu >= 0)
+ goto out_unlock;
+ }
+
+ /*
+ * Search for any idle CPU usable by the task.
+ *
+ * If the node-aware idle CPU selection policy is enabled
+ * (%SCX_OPS_BUILTIN_IDLE_PER_NODE), the search will always begin
+ * in prev_cpu's node and proceed to other nodes in order of
+ * increasing distance.
+ */
+ cpu = scx_pick_idle_cpu(p->cpus_ptr, node, flags);
+ if (cpu >= 0)
+ goto out_unlock;
+
+out_unlock:
+ rcu_read_unlock();
+
+ return cpu;
+}
+
+/*
+ * Initialize global and per-node idle cpumasks.
+ */
+void scx_idle_init_masks(void)
+{
+ int node;
+
+ /* Allocate global idle cpumasks */
+ BUG_ON(!alloc_cpumask_var(&scx_idle_global_masks.cpu, GFP_KERNEL));
+ BUG_ON(!alloc_cpumask_var(&scx_idle_global_masks.smt, GFP_KERNEL));
+
+ /* Allocate per-node idle cpumasks */
+ scx_idle_node_masks = kcalloc(num_possible_nodes(),
+ sizeof(*scx_idle_node_masks), GFP_KERNEL);
+ BUG_ON(!scx_idle_node_masks);
+
+ for_each_node(node) {
+ scx_idle_node_masks[node] = kzalloc_node(sizeof(**scx_idle_node_masks),
+ GFP_KERNEL, node);
+ BUG_ON(!scx_idle_node_masks[node]);
+
+ BUG_ON(!alloc_cpumask_var_node(&scx_idle_node_masks[node]->cpu, GFP_KERNEL, node));
+ BUG_ON(!alloc_cpumask_var_node(&scx_idle_node_masks[node]->smt, GFP_KERNEL, node));
+ }
+}
+
+static void update_builtin_idle(int cpu, bool idle)
+{
+ int node = scx_cpu_node_if_enabled(cpu);
+ struct cpumask *idle_cpus = idle_cpumask(node)->cpu;
+
+ assign_cpu(cpu, idle_cpus, idle);
+
+#ifdef CONFIG_SCHED_SMT
+ if (sched_smt_active()) {
+ const struct cpumask *smt = cpu_smt_mask(cpu);
+ struct cpumask *idle_smts = idle_cpumask(node)->smt;
+
+ if (idle) {
+ /*
+ * idle_smt handling is racy but that's fine as it's
+ * only for optimization and self-correcting.
+ */
+ if (!cpumask_subset(smt, idle_cpus))
+ return;
+ cpumask_or(idle_smts, idle_smts, smt);
+ } else {
+ cpumask_andnot(idle_smts, idle_smts, smt);
+ }
+ }
+#endif
+}
+
+/*
+ * Update the idle state of a CPU to @idle.
+ *
+ * If @do_notify is true, ops.update_idle() is invoked to notify the scx
+ * scheduler of an actual idle state transition (idle to busy or vice
+ * versa). If @do_notify is false, only the idle state in the idle masks is
+ * refreshed without invoking ops.update_idle().
+ *
+ * This distinction is necessary, because an idle CPU can be "reserved" and
+ * awakened via scx_bpf_pick_idle_cpu() + scx_bpf_kick_cpu(), marking it as
+ * busy even if no tasks are dispatched. In this case, the CPU may return
+ * to idle without a true state transition. Refreshing the idle masks
+ * without invoking ops.update_idle() ensures accurate idle state tracking
+ * while avoiding unnecessary updates and maintaining balanced state
+ * transitions.
+ */
+void __scx_update_idle(struct rq *rq, bool idle, bool do_notify)
+{
+ int cpu = cpu_of(rq);
+
+ lockdep_assert_rq_held(rq);
+
+ /*
+ * Trigger ops.update_idle() only when transitioning from a task to
+ * the idle thread and vice versa.
+ *
+ * Idle transitions are indicated by do_notify being set to true,
+ * managed by put_prev_task_idle()/set_next_task_idle().
+ */
+ if (SCX_HAS_OP(update_idle) && do_notify && !scx_rq_bypassing(rq))
+ SCX_CALL_OP(SCX_KF_REST, update_idle, cpu_of(rq), idle);
+
+ /*
+ * Update the idle masks:
+ * - for real idle transitions (do_notify == true)
+ * - for idle-to-idle transitions (indicated by the previous task
+ * being the idle thread, managed by pick_task_idle())
+ *
+ * Skip updating idle masks if the previous task is not the idle
+ * thread, since set_next_task_idle() has already handled it when
+ * transitioning from a task to the idle thread (calling this
+ * function with do_notify == true).
+ *
+ * In this way we can avoid updating the idle masks twice,
+ * unnecessarily.
+ */
+ if (static_branch_likely(&scx_builtin_idle_enabled))
+ if (do_notify || is_idle_task(rq->curr))
+ update_builtin_idle(cpu, idle);
+}
+
+static void reset_idle_masks(struct sched_ext_ops *ops)
+{
+ int node;
+
+ /*
+ * Consider all online cpus idle. Should converge to the actual state
+ * quickly.
+ */
+ if (!(ops->flags & SCX_OPS_BUILTIN_IDLE_PER_NODE)) {
+ cpumask_copy(idle_cpumask(NUMA_NO_NODE)->cpu, cpu_online_mask);
+ cpumask_copy(idle_cpumask(NUMA_NO_NODE)->smt, cpu_online_mask);
+ return;
+ }
+
+ for_each_node(node) {
+ const struct cpumask *node_mask = cpumask_of_node(node);
+
+ cpumask_and(idle_cpumask(node)->cpu, cpu_online_mask, node_mask);
+ cpumask_and(idle_cpumask(node)->smt, cpu_online_mask, node_mask);
+ }
+}
+#endif /* CONFIG_SMP */
+
+void scx_idle_enable(struct sched_ext_ops *ops)
+{
+ if (!ops->update_idle || (ops->flags & SCX_OPS_KEEP_BUILTIN_IDLE))
+ static_branch_enable(&scx_builtin_idle_enabled);
+ else
+ static_branch_disable(&scx_builtin_idle_enabled);
+
+ if (ops->flags & SCX_OPS_BUILTIN_IDLE_PER_NODE)
+ static_branch_enable(&scx_builtin_idle_per_node);
+ else
+ static_branch_disable(&scx_builtin_idle_per_node);
+
+#ifdef CONFIG_SMP
+ reset_idle_masks(ops);
+#endif
+}
+
+void scx_idle_disable(void)
+{
+ static_branch_disable(&scx_builtin_idle_enabled);
+ static_branch_disable(&scx_builtin_idle_per_node);
+}
+
+/********************************************************************************
+ * Helpers that can be called from the BPF scheduler.
+ */
+
+static int validate_node(int node)
+{
+ if (!static_branch_likely(&scx_builtin_idle_per_node)) {
+ scx_ops_error("per-node idle tracking is disabled");
+ return -EOPNOTSUPP;
+ }
+
+ /* Return no entry for NUMA_NO_NODE (not a critical scx error) */
+ if (node == NUMA_NO_NODE)
+ return -ENOENT;
+
+ /* Make sure node is in a valid range */
+ if (node < 0 || node >= nr_node_ids) {
+ scx_ops_error("invalid node %d", node);
+ return -EINVAL;
+ }
+
+ /* Make sure the node is part of the set of possible nodes */
+ if (!node_possible(node)) {
+ scx_ops_error("unavailable node %d", node);
+ return -EINVAL;
+ }
+
+ return node;
+}
+
+__bpf_kfunc_start_defs();
+
+static bool check_builtin_idle_enabled(void)
+{
+ if (static_branch_likely(&scx_builtin_idle_enabled))
+ return true;
+
+ scx_ops_error("built-in idle tracking is disabled");
+ return false;
+}
+
+/**
+ * scx_bpf_cpu_node - Return the NUMA node the given @cpu belongs to, or
+ * trigger an error if @cpu is invalid
+ * @cpu: target CPU
+ */
+__bpf_kfunc int scx_bpf_cpu_node(s32 cpu)
+{
+#ifdef CONFIG_NUMA
+ if (!ops_cpu_valid(cpu, NULL))
+ return NUMA_NO_NODE;
+
+ return cpu_to_node(cpu);
+#else
+ return 0;
+#endif
+}
+
+/**
+ * scx_bpf_select_cpu_dfl - The default implementation of ops.select_cpu()
+ * @p: task_struct to select a CPU for
+ * @prev_cpu: CPU @p was on previously
+ * @wake_flags: %SCX_WAKE_* flags
+ * @is_idle: out parameter indicating whether the returned CPU is idle
+ *
+ * Can only be called from ops.select_cpu() if the built-in CPU selection is
+ * enabled - ops.update_idle() is missing or %SCX_OPS_KEEP_BUILTIN_IDLE is set.
+ * @p, @prev_cpu and @wake_flags match ops.select_cpu().
+ *
+ * Returns the picked CPU with *@is_idle indicating whether the picked CPU is
+ * currently idle and thus a good candidate for direct dispatching.
+ */
+__bpf_kfunc s32 scx_bpf_select_cpu_dfl(struct task_struct *p, s32 prev_cpu,
+ u64 wake_flags, bool *is_idle)
+{
+#ifdef CONFIG_SMP
+ s32 cpu;
+#endif
+ if (!ops_cpu_valid(prev_cpu, NULL))
+ goto prev_cpu;
+
+ if (!check_builtin_idle_enabled())
+ goto prev_cpu;
+
+ if (!scx_kf_allowed(SCX_KF_SELECT_CPU))
+ goto prev_cpu;
+
+#ifdef CONFIG_SMP
+ cpu = scx_select_cpu_dfl(p, prev_cpu, wake_flags, 0);
+ if (cpu >= 0) {
+ *is_idle = true;
+ return cpu;
+ }
+#endif
+
+prev_cpu:
+ *is_idle = false;
+ return prev_cpu;
+}
+
+/**
+ * scx_bpf_get_idle_cpumask_node - Get a referenced kptr to the
+ * idle-tracking per-CPU cpumask of a target NUMA node.
+ * @node: target NUMA node
+ *
+ * Returns an empty cpumask if idle tracking is not enabled, if @node is
+ * not valid, or running on a UP kernel. In this case the actual error will
+ * be reported to the BPF scheduler via scx_ops_error().
+ */
+__bpf_kfunc const struct cpumask *scx_bpf_get_idle_cpumask_node(int node)
+{
+ node = validate_node(node);
+ if (node < 0)
+ return cpu_none_mask;
+
+#ifdef CONFIG_SMP
+ return idle_cpumask(node)->cpu;
+#else
+ return cpu_none_mask;
+#endif
+}
+
+/**
+ * scx_bpf_get_idle_cpumask - Get a referenced kptr to the idle-tracking
+ * per-CPU cpumask.
+ *
+ * Returns an empty mask if idle tracking is not enabled, or running on a
+ * UP kernel.
+ */
+__bpf_kfunc const struct cpumask *scx_bpf_get_idle_cpumask(void)
+{
+ if (static_branch_unlikely(&scx_builtin_idle_per_node)) {
+ scx_ops_error("SCX_OPS_BUILTIN_IDLE_PER_NODE enabled");
+ return cpu_none_mask;
+ }
+
+ if (!check_builtin_idle_enabled())
+ return cpu_none_mask;
+
+#ifdef CONFIG_SMP
+ return idle_cpumask(NUMA_NO_NODE)->cpu;
+#else
+ return cpu_none_mask;
+#endif
+}
+
+/**
+ * scx_bpf_get_idle_smtmask_node - Get a referenced kptr to the
+ * idle-tracking, per-physical-core cpumask of a target NUMA node. Can be
+ * used to determine if an entire physical core is free.
+ * @node: target NUMA node
+ *
+ * Returns an empty cpumask if idle tracking is not enabled, if @node is
+ * not valid, or running on a UP kernel. In this case the actual error will
+ * be reported to the BPF scheduler via scx_ops_error().
+ */
+__bpf_kfunc const struct cpumask *scx_bpf_get_idle_smtmask_node(int node)
+{
+ node = validate_node(node);
+ if (node < 0)
+ return cpu_none_mask;
+
+#ifdef CONFIG_SMP
+ if (sched_smt_active())
+ return idle_cpumask(node)->smt;
+ else
+ return idle_cpumask(node)->cpu;
+#else
+ return cpu_none_mask;
+#endif
+}
+
+/**
+ * scx_bpf_get_idle_smtmask - Get a referenced kptr to the idle-tracking,
+ * per-physical-core cpumask. Can be used to determine if an entire physical
+ * core is free.
+ *
+ * Returns an empty mask if idle tracking is not enabled, or running on a
+ * UP kernel.
+ */
+__bpf_kfunc const struct cpumask *scx_bpf_get_idle_smtmask(void)
+{
+ if (static_branch_unlikely(&scx_builtin_idle_per_node)) {
+ scx_ops_error("SCX_OPS_BUILTIN_IDLE_PER_NODE enabled");
+ return cpu_none_mask;
+ }
+
+ if (!check_builtin_idle_enabled())
+ return cpu_none_mask;
+
+#ifdef CONFIG_SMP
+ if (sched_smt_active())
+ return idle_cpumask(NUMA_NO_NODE)->smt;
+ else
+ return idle_cpumask(NUMA_NO_NODE)->cpu;
+#else
+ return cpu_none_mask;
+#endif
+}
+
+/**
+ * scx_bpf_put_idle_cpumask - Release a previously acquired referenced kptr to
+ * either the percpu, or SMT idle-tracking cpumask.
+ * @idle_mask: &cpumask to use
+ */
+__bpf_kfunc void scx_bpf_put_idle_cpumask(const struct cpumask *idle_mask)
+{
+ /*
+ * Empty function body because we aren't actually acquiring or releasing
+ * a reference to a global idle cpumask, which is read-only in the
+ * caller and is never released. The acquire / release semantics here
+ * are just used to make the cpumask a trusted pointer in the caller.
+ */
+}
+
+/**
+ * scx_bpf_test_and_clear_cpu_idle - Test and clear @cpu's idle state
+ * @cpu: cpu to test and clear idle for
+ *
+ * Returns %true if @cpu was idle and its idle state was successfully cleared.
+ * %false otherwise.
+ *
+ * Unavailable if ops.update_idle() is implemented and
+ * %SCX_OPS_KEEP_BUILTIN_IDLE is not set.
+ */
+__bpf_kfunc bool scx_bpf_test_and_clear_cpu_idle(s32 cpu)
+{
+ if (!check_builtin_idle_enabled())
+ return false;
+
+ if (ops_cpu_valid(cpu, NULL))
+ return scx_idle_test_and_clear_cpu(cpu);
+ else
+ return false;
+}
+
+/**
+ * scx_bpf_pick_idle_cpu_node - Pick and claim an idle cpu from @node
+ * @cpus_allowed: Allowed cpumask
+ * @node: target NUMA node
+ * @flags: %SCX_PICK_IDLE_* flags
+ *
+ * Pick and claim an idle cpu in @cpus_allowed from the NUMA node @node.
+ *
+ * Returns the picked idle cpu number on success, or -%EBUSY if no matching
+ * cpu was found.
+ *
+ * The search starts from @node and proceeds to other online NUMA nodes in
+ * order of increasing distance (unless SCX_PICK_IDLE_IN_NODE is specified,
+ * in which case the search is limited to the target @node).
+ *
+ * Always returns an error if ops.update_idle() is implemented and
+ * %SCX_OPS_KEEP_BUILTIN_IDLE is not set, or if
+ * %SCX_OPS_BUILTIN_IDLE_PER_NODE is not set.
+ */
+__bpf_kfunc s32 scx_bpf_pick_idle_cpu_node(const struct cpumask *cpus_allowed,
+ int node, u64 flags)
+{
+ node = validate_node(node);
+ if (node < 0)
+ return node;
+
+ return scx_pick_idle_cpu(cpus_allowed, node, flags);
+}
+
+/**
+ * scx_bpf_pick_idle_cpu - Pick and claim an idle cpu
+ * @cpus_allowed: Allowed cpumask
+ * @flags: %SCX_PICK_IDLE_CPU_* flags
+ *
+ * Pick and claim an idle cpu in @cpus_allowed. Returns the picked idle cpu
+ * number on success. -%EBUSY if no matching cpu was found.
+ *
+ * Idle CPU tracking may race against CPU scheduling state transitions. For
+ * example, this function may return -%EBUSY as CPUs are transitioning into the
+ * idle state. If the caller then assumes that there will be dispatch events on
+ * the CPUs as they were all busy, the scheduler may end up stalling with CPUs
+ * idling while there are pending tasks. Use scx_bpf_pick_any_cpu() and
+ * scx_bpf_kick_cpu() to guarantee that there will be at least one dispatch
+ * event in the near future.
+ *
+ * Unavailable if ops.update_idle() is implemented and
+ * %SCX_OPS_KEEP_BUILTIN_IDLE is not set.
+ *
+ * Always returns an error if %SCX_OPS_BUILTIN_IDLE_PER_NODE is set, use
+ * scx_bpf_pick_idle_cpu_node() instead.
+ */
+__bpf_kfunc s32 scx_bpf_pick_idle_cpu(const struct cpumask *cpus_allowed,
+ u64 flags)
+{
+ if (static_branch_maybe(CONFIG_NUMA, &scx_builtin_idle_per_node)) {
+ scx_ops_error("per-node idle tracking is enabled");
+ return -EBUSY;
+ }
+
+ if (!check_builtin_idle_enabled())
+ return -EBUSY;
+
+ return scx_pick_idle_cpu(cpus_allowed, NUMA_NO_NODE, flags);
+}
+
+/**
+ * scx_bpf_pick_any_cpu_node - Pick and claim an idle cpu if available
+ * or pick any CPU from @node
+ * @cpus_allowed: Allowed cpumask
+ * @node: target NUMA node
+ * @flags: %SCX_PICK_IDLE_CPU_* flags
+ *
+ * Pick and claim an idle cpu in @cpus_allowed. If none is available, pick any
+ * CPU in @cpus_allowed. Guaranteed to succeed and returns the picked idle cpu
+ * number if @cpus_allowed is not empty. -%EBUSY is returned if @cpus_allowed is
+ * empty.
+ *
+ * The search starts from @node and proceeds to other online NUMA nodes in
+ * order of increasing distance (unless %SCX_PICK_IDLE_IN_NODE is specified,
+ * in which case the search is limited to the target @node, regardless of
+ * the CPU idle state).
+ *
+ * If ops.update_idle() is implemented and %SCX_OPS_KEEP_BUILTIN_IDLE is not
+ * set, this function can't tell which CPUs are idle and will always pick any
+ * CPU.
+ */
+__bpf_kfunc s32 scx_bpf_pick_any_cpu_node(const struct cpumask *cpus_allowed,
+ int node, u64 flags)
+{
+ s32 cpu;
+
+ node = validate_node(node);
+ if (node < 0)
+ return node;
+
+ cpu = scx_pick_idle_cpu(cpus_allowed, node, flags);
+ if (cpu >= 0)
+ return cpu;
+
+ if (flags & SCX_PICK_IDLE_IN_NODE)
+ cpu = cpumask_any_and_distribute(cpumask_of_node(node), cpus_allowed);
+ else
+ cpu = cpumask_any_distribute(cpus_allowed);
+ if (cpu < nr_cpu_ids)
+ return cpu;
+ else
+ return -EBUSY;
+}
+
+/**
+ * scx_bpf_pick_any_cpu - Pick and claim an idle cpu if available or pick any CPU
+ * @cpus_allowed: Allowed cpumask
+ * @flags: %SCX_PICK_IDLE_CPU_* flags
+ *
+ * Pick and claim an idle cpu in @cpus_allowed. If none is available, pick any
+ * CPU in @cpus_allowed. Guaranteed to succeed and returns the picked idle cpu
+ * number if @cpus_allowed is not empty. -%EBUSY is returned if @cpus_allowed is
+ * empty.
+ *
+ * If ops.update_idle() is implemented and %SCX_OPS_KEEP_BUILTIN_IDLE is not
+ * set, this function can't tell which CPUs are idle and will always pick any
+ * CPU.
+ *
+ * Always returns an error if %SCX_OPS_BUILTIN_IDLE_PER_NODE is set, use
+ * scx_bpf_pick_any_cpu_node() instead.
+ */
+__bpf_kfunc s32 scx_bpf_pick_any_cpu(const struct cpumask *cpus_allowed,
+ u64 flags)
+{
+ s32 cpu;
+
+ if (static_branch_maybe(CONFIG_NUMA, &scx_builtin_idle_per_node)) {
+ scx_ops_error("per-node idle tracking is enabled");
+ return -EBUSY;
+ }
+
+ if (static_branch_likely(&scx_builtin_idle_enabled)) {
+ cpu = scx_pick_idle_cpu(cpus_allowed, NUMA_NO_NODE, flags);
+ if (cpu >= 0)
+ return cpu;
+ }
+
+ cpu = cpumask_any_distribute(cpus_allowed);
+ if (cpu < nr_cpu_ids)
+ return cpu;
+ else
+ return -EBUSY;
+}
+
+__bpf_kfunc_end_defs();
+
+BTF_KFUNCS_START(scx_kfunc_ids_idle)
+BTF_ID_FLAGS(func, scx_bpf_cpu_node)
+BTF_ID_FLAGS(func, scx_bpf_get_idle_cpumask_node, KF_ACQUIRE)
+BTF_ID_FLAGS(func, scx_bpf_get_idle_cpumask, KF_ACQUIRE)
+BTF_ID_FLAGS(func, scx_bpf_get_idle_smtmask_node, KF_ACQUIRE)
+BTF_ID_FLAGS(func, scx_bpf_get_idle_smtmask, KF_ACQUIRE)
+BTF_ID_FLAGS(func, scx_bpf_put_idle_cpumask, KF_RELEASE)
+BTF_ID_FLAGS(func, scx_bpf_test_and_clear_cpu_idle)
+BTF_ID_FLAGS(func, scx_bpf_pick_idle_cpu_node, KF_RCU)
+BTF_ID_FLAGS(func, scx_bpf_pick_idle_cpu, KF_RCU)
+BTF_ID_FLAGS(func, scx_bpf_pick_any_cpu_node, KF_RCU)
+BTF_ID_FLAGS(func, scx_bpf_pick_any_cpu, KF_RCU)
+BTF_KFUNCS_END(scx_kfunc_ids_idle)
+
+static const struct btf_kfunc_id_set scx_kfunc_set_idle = {
+ .owner = THIS_MODULE,
+ .set = &scx_kfunc_ids_idle,
+};
+
+BTF_KFUNCS_START(scx_kfunc_ids_select_cpu)
+BTF_ID_FLAGS(func, scx_bpf_select_cpu_dfl, KF_RCU)
+BTF_KFUNCS_END(scx_kfunc_ids_select_cpu)
+
+static const struct btf_kfunc_id_set scx_kfunc_set_select_cpu = {
+ .owner = THIS_MODULE,
+ .set = &scx_kfunc_ids_select_cpu,
+};
+
+int scx_idle_init(void)
+{
+ int ret;
+
+ ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &scx_kfunc_set_select_cpu) ||
+ register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &scx_kfunc_set_idle) ||
+ register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &scx_kfunc_set_idle) ||
+ register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &scx_kfunc_set_idle);
+
+ return ret;
+}
diff --git a/kernel/sched/ext_idle.h b/kernel/sched/ext_idle.h
new file mode 100644
index 000000000000..511cc2221f7a
--- /dev/null
+++ b/kernel/sched/ext_idle.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * BPF extensible scheduler class: Documentation/scheduler/sched-ext.rst
+ *
+ * Copyright (c) 2022 Meta Platforms, Inc. and affiliates.
+ * Copyright (c) 2022 Tejun Heo <tj@kernel.org>
+ * Copyright (c) 2022 David Vernet <dvernet@meta.com>
+ * Copyright (c) 2024 Andrea Righi <arighi@nvidia.com>
+ */
+#ifndef _KERNEL_SCHED_EXT_IDLE_H
+#define _KERNEL_SCHED_EXT_IDLE_H
+
+struct sched_ext_ops;
+
+#ifdef CONFIG_SMP
+void scx_idle_update_selcpu_topology(struct sched_ext_ops *ops);
+void scx_idle_init_masks(void);
+bool scx_idle_test_and_clear_cpu(int cpu);
+s32 scx_pick_idle_cpu(const struct cpumask *cpus_allowed, int node, u64 flags);
+#else /* !CONFIG_SMP */
+static inline void scx_idle_update_selcpu_topology(struct sched_ext_ops *ops) {}
+static inline void scx_idle_init_masks(void) {}
+static inline bool scx_idle_test_and_clear_cpu(int cpu) { return false; }
+static inline s32 scx_pick_idle_cpu(const struct cpumask *cpus_allowed, int node, u64 flags)
+{
+ return -EBUSY;
+}
+#endif /* CONFIG_SMP */
+
+s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, u64 flags);
+void scx_idle_enable(struct sched_ext_ops *ops);
+void scx_idle_disable(void);
+int scx_idle_init(void);
+
+#endif /* _KERNEL_SCHED_EXT_IDLE_H */
diff --git a/kernel/seccomp.c b/kernel/seccomp.c
index 7bbb408431eb..41aa761c7738 100644
--- a/kernel/seccomp.c
+++ b/kernel/seccomp.c
@@ -29,13 +29,11 @@
#include <linux/syscalls.h>
#include <linux/sysctl.h>
+#include <asm/syscall.h>
+
/* Not exposed in headers: strictly internal use only. */
#define SECCOMP_MODE_DEAD (SECCOMP_MODE_FILTER + 1)
-#ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
-#include <asm/syscall.h>
-#endif
-
#ifdef CONFIG_SECCOMP_FILTER
#include <linux/file.h>
#include <linux/filter.h>
@@ -576,6 +574,9 @@ void seccomp_filter_release(struct task_struct *tsk)
if (WARN_ON((tsk->flags & PF_EXITING) == 0))
return;
+ if (READ_ONCE(tsk->seccomp.filter) == NULL)
+ return;
+
spin_lock_irq(&tsk->sighand->siglock);
orig = tsk->seccomp.filter;
/* Detach task from its filter tree. */
@@ -601,6 +602,13 @@ static inline void seccomp_sync_threads(unsigned long flags)
BUG_ON(!mutex_is_locked(&current->signal->cred_guard_mutex));
assert_spin_locked(&current->sighand->siglock);
+ /*
+ * Don't touch any of the threads if the process is being killed.
+ * This allows for a lockless check in seccomp_filter_release.
+ */
+ if (current->signal->flags & SIGNAL_GROUP_EXIT)
+ return;
+
/* Synchronize all threads. */
caller = current;
for_each_thread(caller, thread) {
@@ -1074,6 +1082,13 @@ void secure_computing_strict(int this_syscall)
else
BUG();
}
+int __secure_computing(void)
+{
+ int this_syscall = syscall_get_nr(current, current_pt_regs());
+
+ secure_computing_strict(this_syscall);
+ return 0;
+}
#else
#ifdef CONFIG_SECCOMP_FILTER
@@ -1225,13 +1240,12 @@ out:
return -1;
}
-static int __seccomp_filter(int this_syscall, const struct seccomp_data *sd,
- const bool recheck_after_trace)
+static int __seccomp_filter(int this_syscall, const bool recheck_after_trace)
{
u32 filter_ret, action;
+ struct seccomp_data sd;
struct seccomp_filter *match = NULL;
int data;
- struct seccomp_data sd_local;
/*
* Make sure that any changes to mode from another thread have
@@ -1239,12 +1253,9 @@ static int __seccomp_filter(int this_syscall, const struct seccomp_data *sd,
*/
smp_rmb();
- if (!sd) {
- populate_seccomp_data(&sd_local);
- sd = &sd_local;
- }
+ populate_seccomp_data(&sd);
- filter_ret = seccomp_run_filters(sd, &match);
+ filter_ret = seccomp_run_filters(&sd, &match);
data = filter_ret & SECCOMP_RET_DATA;
action = filter_ret & SECCOMP_RET_ACTION_FULL;
@@ -1302,13 +1313,13 @@ static int __seccomp_filter(int this_syscall, const struct seccomp_data *sd,
* a reload of all registers. This does not goto skip since
* a skip would have already been reported.
*/
- if (__seccomp_filter(this_syscall, NULL, true))
+ if (__seccomp_filter(this_syscall, true))
return -1;
return 0;
case SECCOMP_RET_USER_NOTIF:
- if (seccomp_do_user_notification(this_syscall, match, sd))
+ if (seccomp_do_user_notification(this_syscall, match, &sd))
goto skip;
return 0;
@@ -1350,8 +1361,7 @@ skip:
return -1;
}
#else
-static int __seccomp_filter(int this_syscall, const struct seccomp_data *sd,
- const bool recheck_after_trace)
+static int __seccomp_filter(int this_syscall, const bool recheck_after_trace)
{
BUG();
@@ -1359,7 +1369,7 @@ static int __seccomp_filter(int this_syscall, const struct seccomp_data *sd,
}
#endif
-int __secure_computing(const struct seccomp_data *sd)
+int __secure_computing(void)
{
int mode = current->seccomp.mode;
int this_syscall;
@@ -1368,15 +1378,14 @@ int __secure_computing(const struct seccomp_data *sd)
unlikely(current->ptrace & PT_SUSPEND_SECCOMP))
return 0;
- this_syscall = sd ? sd->nr :
- syscall_get_nr(current, current_pt_regs());
+ this_syscall = syscall_get_nr(current, current_pt_regs());
switch (mode) {
case SECCOMP_MODE_STRICT:
__secure_computing_strict(this_syscall); /* may call do_exit */
return 0;
case SECCOMP_MODE_FILTER:
- return __seccomp_filter(this_syscall, sd, false);
+ return __seccomp_filter(this_syscall, false);
/* Surviving SECCOMP_RET_KILL_* must be proactively impossible. */
case SECCOMP_MODE_DEAD:
WARN_ON_ONCE(1);
diff --git a/kernel/signal.c b/kernel/signal.c
index 875e97f6205a..027ad9e97417 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -2180,8 +2180,7 @@ bool do_notify_parent(struct task_struct *tsk, int sig)
WARN_ON_ONCE(!tsk->ptrace &&
(tsk->group_leader != tsk || !thread_group_empty(tsk)));
/*
- * tsk is a group leader and has no threads, wake up the
- * non-PIDFD_THREAD waiters.
+ * Notify for thread-group leaders without subthreads.
*/
if (thread_group_empty(tsk))
do_notify_pidfd(tsk);
@@ -4009,6 +4008,47 @@ static struct pid *pidfd_to_pid(const struct file *file)
(PIDFD_SIGNAL_THREAD | PIDFD_SIGNAL_THREAD_GROUP | \
PIDFD_SIGNAL_PROCESS_GROUP)
+static int do_pidfd_send_signal(struct pid *pid, int sig, enum pid_type type,
+ siginfo_t __user *info, unsigned int flags)
+{
+ kernel_siginfo_t kinfo;
+
+ switch (flags) {
+ case PIDFD_SIGNAL_THREAD:
+ type = PIDTYPE_PID;
+ break;
+ case PIDFD_SIGNAL_THREAD_GROUP:
+ type = PIDTYPE_TGID;
+ break;
+ case PIDFD_SIGNAL_PROCESS_GROUP:
+ type = PIDTYPE_PGID;
+ break;
+ }
+
+ if (info) {
+ int ret;
+
+ ret = copy_siginfo_from_user_any(&kinfo, info);
+ if (unlikely(ret))
+ return ret;
+
+ if (unlikely(sig != kinfo.si_signo))
+ return -EINVAL;
+
+ /* Only allow sending arbitrary signals to yourself. */
+ if ((task_pid(current) != pid || type > PIDTYPE_TGID) &&
+ (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
+ return -EPERM;
+ } else {
+ prepare_kill_siginfo(sig, &kinfo, type);
+ }
+
+ if (type == PIDTYPE_PGID)
+ return kill_pgrp_info(sig, &kinfo, pid);
+
+ return kill_pid_info_type(sig, &kinfo, pid, type);
+}
+
/**
* sys_pidfd_send_signal - Signal a process through a pidfd
* @pidfd: file descriptor of the process
@@ -4026,9 +4066,7 @@ static struct pid *pidfd_to_pid(const struct file *file)
SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
siginfo_t __user *, info, unsigned int, flags)
{
- int ret;
struct pid *pid;
- kernel_siginfo_t kinfo;
enum pid_type type;
/* Enforce flags be set to 0 until we add an extension. */
@@ -4039,57 +4077,39 @@ SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
if (hweight32(flags & PIDFD_SEND_SIGNAL_FLAGS) > 1)
return -EINVAL;
- CLASS(fd, f)(pidfd);
- if (fd_empty(f))
- return -EBADF;
+ switch (pidfd) {
+ case PIDFD_SELF_THREAD:
+ pid = get_task_pid(current, PIDTYPE_PID);
+ type = PIDTYPE_PID;
+ break;
+ case PIDFD_SELF_THREAD_GROUP:
+ pid = get_task_pid(current, PIDTYPE_TGID);
+ type = PIDTYPE_TGID;
+ break;
+ default: {
+ CLASS(fd, f)(pidfd);
+ if (fd_empty(f))
+ return -EBADF;
- /* Is this a pidfd? */
- pid = pidfd_to_pid(fd_file(f));
- if (IS_ERR(pid))
- return PTR_ERR(pid);
+ /* Is this a pidfd? */
+ pid = pidfd_to_pid(fd_file(f));
+ if (IS_ERR(pid))
+ return PTR_ERR(pid);
- if (!access_pidfd_pidns(pid))
- return -EINVAL;
+ if (!access_pidfd_pidns(pid))
+ return -EINVAL;
- switch (flags) {
- case 0:
/* Infer scope from the type of pidfd. */
if (fd_file(f)->f_flags & PIDFD_THREAD)
type = PIDTYPE_PID;
else
type = PIDTYPE_TGID;
- break;
- case PIDFD_SIGNAL_THREAD:
- type = PIDTYPE_PID;
- break;
- case PIDFD_SIGNAL_THREAD_GROUP:
- type = PIDTYPE_TGID;
- break;
- case PIDFD_SIGNAL_PROCESS_GROUP:
- type = PIDTYPE_PGID;
- break;
- }
- if (info) {
- ret = copy_siginfo_from_user_any(&kinfo, info);
- if (unlikely(ret))
- return ret;
-
- if (unlikely(sig != kinfo.si_signo))
- return -EINVAL;
-
- /* Only allow sending arbitrary signals to yourself. */
- if ((task_pid(current) != pid || type > PIDTYPE_TGID) &&
- (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
- return -EPERM;
- } else {
- prepare_kill_siginfo(sig, &kinfo, type);
+ return do_pidfd_send_signal(pid, sig, type, info, flags);
+ }
}
- if (type == PIDTYPE_PGID)
- return kill_pgrp_info(sig, &kinfo, pid);
- else
- return kill_pid_info_type(sig, &kinfo, pid, type);
+ return do_pidfd_send_signal(pid, sig, type, info, flags);
}
static int
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index 8896d844d738..5d2d0562115b 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -250,6 +250,7 @@ static int multi_cpu_stop(void *data)
* be detected and reported on their side.
*/
touch_nmi_watchdog();
+ /* Also suppress RCU CPU stall warnings. */
rcu_momentary_eqs();
}
} while (curstate != MULTI_STOP_EXIT);
diff --git a/kernel/sys.c b/kernel/sys.c
index cb366ff8703a..4efca8a97d62 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1085,6 +1085,7 @@ SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid)
{
struct task_struct *p;
struct task_struct *group_leader = current->group_leader;
+ struct pid *pids[PIDTYPE_MAX] = { 0 };
struct pid *pgrp;
int err;
@@ -1142,13 +1143,14 @@ SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid)
goto out;
if (task_pgrp(p) != pgrp)
- change_pid(p, PIDTYPE_PGID, pgrp);
+ change_pid(pids, p, PIDTYPE_PGID, pgrp);
err = 0;
out:
/* All paths lead to here, thus we are safe. -DaveM */
write_unlock_irq(&tasklist_lock);
rcu_read_unlock();
+ free_pids(pids);
return err;
}
@@ -1222,21 +1224,22 @@ out:
return retval;
}
-static void set_special_pids(struct pid *pid)
+static void set_special_pids(struct pid **pids, struct pid *pid)
{
struct task_struct *curr = current->group_leader;
if (task_session(curr) != pid)
- change_pid(curr, PIDTYPE_SID, pid);
+ change_pid(pids, curr, PIDTYPE_SID, pid);
if (task_pgrp(curr) != pid)
- change_pid(curr, PIDTYPE_PGID, pid);
+ change_pid(pids, curr, PIDTYPE_PGID, pid);
}
int ksys_setsid(void)
{
struct task_struct *group_leader = current->group_leader;
struct pid *sid = task_pid(group_leader);
+ struct pid *pids[PIDTYPE_MAX] = { 0 };
pid_t session = pid_vnr(sid);
int err = -EPERM;
@@ -1252,13 +1255,14 @@ int ksys_setsid(void)
goto out;
group_leader->signal->leader = 1;
- set_special_pids(sid);
+ set_special_pids(pids, sid);
proc_clear_tty(group_leader);
err = session;
out:
write_unlock_irq(&tasklist_lock);
+ free_pids(pids);
if (err > 0) {
proc_sid_connector(group_leader);
sched_autogroup_create_attach(group_leader);
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
index aa0b2e47f2f2..682f40d5632d 100644
--- a/kernel/user_namespace.c
+++ b/kernel/user_namespace.c
@@ -238,7 +238,7 @@ EXPORT_SYMBOL(__put_user_ns);
struct idmap_key {
bool map_up; /* true -> id from kid; false -> kid from id */
u32 id; /* id to find */
- u32 count; /* == 0 unless used with map_id_range_down() */
+ u32 count;
};
/*
@@ -343,16 +343,19 @@ u32 map_id_down(struct uid_gid_map *map, u32 id)
* UID_GID_MAP_MAX_BASE_EXTENTS.
*/
static struct uid_gid_extent *
-map_id_up_base(unsigned extents, struct uid_gid_map *map, u32 id)
+map_id_range_up_base(unsigned extents, struct uid_gid_map *map, u32 id, u32 count)
{
unsigned idx;
- u32 first, last;
+ u32 first, last, id2;
+
+ id2 = id + count - 1;
/* Find the matching extent */
for (idx = 0; idx < extents; idx++) {
first = map->extent[idx].lower_first;
last = first + map->extent[idx].count - 1;
- if (id >= first && id <= last)
+ if (id >= first && id <= last &&
+ (id2 >= first && id2 <= last))
return &map->extent[idx];
}
return NULL;
@@ -363,28 +366,28 @@ map_id_up_base(unsigned extents, struct uid_gid_map *map, u32 id)
* Can only be called if number of mappings exceeds UID_GID_MAP_MAX_BASE_EXTENTS.
*/
static struct uid_gid_extent *
-map_id_up_max(unsigned extents, struct uid_gid_map *map, u32 id)
+map_id_range_up_max(unsigned extents, struct uid_gid_map *map, u32 id, u32 count)
{
struct idmap_key key;
key.map_up = true;
- key.count = 1;
+ key.count = count;
key.id = id;
return bsearch(&key, map->reverse, extents,
sizeof(struct uid_gid_extent), cmp_map_id);
}
-u32 map_id_up(struct uid_gid_map *map, u32 id)
+u32 map_id_range_up(struct uid_gid_map *map, u32 id, u32 count)
{
struct uid_gid_extent *extent;
unsigned extents = map->nr_extents;
smp_rmb();
if (extents <= UID_GID_MAP_MAX_BASE_EXTENTS)
- extent = map_id_up_base(extents, map, id);
+ extent = map_id_range_up_base(extents, map, id, count);
else
- extent = map_id_up_max(extents, map, id);
+ extent = map_id_range_up_max(extents, map, id, count);
/* Map the id or note failure */
if (extent)
@@ -395,6 +398,11 @@ u32 map_id_up(struct uid_gid_map *map, u32 id)
return id;
}
+u32 map_id_up(struct uid_gid_map *map, u32 id)
+{
+ return map_id_range_up(map, id, 1);
+}
+
/**
* make_kuid - Map a user-namespace uid pair into a kuid.
* @ns: User namespace that the uid is in
diff --git a/kernel/watch_queue.c b/kernel/watch_queue.c
index 5267adeaa403..7e45559521af 100644
--- a/kernel/watch_queue.c
+++ b/kernel/watch_queue.c
@@ -101,12 +101,11 @@ static bool post_one_notification(struct watch_queue *wqueue,
struct pipe_inode_info *pipe = wqueue->pipe;
struct pipe_buffer *buf;
struct page *page;
- unsigned int head, tail, mask, note, offset, len;
+ unsigned int head, tail, note, offset, len;
bool done = false;
spin_lock_irq(&pipe->rd_wait.lock);
- mask = pipe->ring_size - 1;
head = pipe->head;
tail = pipe->tail;
if (pipe_full(head, tail, pipe->ring_size))
@@ -124,7 +123,7 @@ static bool post_one_notification(struct watch_queue *wqueue,
memcpy(p + offset, n, len);
kunmap_atomic(p);
- buf = &pipe->bufs[head & mask];
+ buf = pipe_buf(pipe, head);
buf->page = page;
buf->private = (unsigned long)wqueue;
buf->ops = &watch_queue_pipe_buf_ops;
@@ -147,7 +146,7 @@ out:
return done;
lost:
- buf = &pipe->bufs[(head - 1) & mask];
+ buf = pipe_buf(pipe, head - 1);
buf->flags |= PIPE_BUF_FLAG_LOSS;
goto out;
}
@@ -269,6 +268,15 @@ long watch_queue_set_size(struct pipe_inode_info *pipe, unsigned int nr_notes)
if (ret < 0)
goto error;
+ /*
+ * pipe_resize_ring() does not update nr_accounted for watch_queue
+ * pipes, because the above vastly overprovisions. Set nr_accounted on
+ * and max_usage this pipe to the number that was actually charged to
+ * the user above via account_pipe_buffers.
+ */
+ pipe->max_usage = nr_pages;
+ pipe->nr_accounted = nr_pages;
+
ret = -ENOMEM;
pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
if (!pages)
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 35796c290ca3..70c6298c281e 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -808,6 +808,15 @@ config ARCH_HAS_DEBUG_VM_PGTABLE
An architecture should select this when it can successfully
build and run DEBUG_VM_PGTABLE.
+config DEBUG_VFS
+ bool "Debug VFS"
+ depends on DEBUG_KERNEL
+ help
+ Enable this to turn on extended checks in the VFS layer that may impact
+ performance.
+
+ If unsure, say N.
+
config DEBUG_VM_IRQSOFF
def_bool DEBUG_VM && !PREEMPT_RT
@@ -2427,6 +2436,24 @@ config ASYNC_RAID6_TEST
config TEST_HEXDUMP
tristate "Test functions located in the hexdump module at runtime"
+config PRINTF_KUNIT_TEST
+ tristate "KUnit test printf() family of functions at runtime" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ Enable this option to test the printf functions at runtime.
+
+ If unsure, say N.
+
+config SCANF_KUNIT_TEST
+ tristate "KUnit test scanf() family of functions at runtime" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ Enable this option to test the scanf functions at runtime.
+
+ If unsure, say N.
+
config STRING_KUNIT_TEST
tristate "KUnit test string functions at runtime" if !KUNIT_ALL_TESTS
depends on KUNIT
@@ -2440,12 +2467,6 @@ config STRING_HELPERS_KUNIT_TEST
config TEST_KSTRTOX
tristate "Test kstrto*() family of functions at runtime"
-config TEST_PRINTF
- tristate "Test printf() family of functions at runtime"
-
-config TEST_SCANF
- tristate "Test scanf() family of functions at runtime"
-
config TEST_BITMAP
tristate "Test bitmap_*() family of functions at runtime"
help
@@ -2691,6 +2712,20 @@ config SYSCTL_KUNIT_TEST
If unsure, say N.
+config KFIFO_KUNIT_TEST
+ tristate "KUnit Test for the generic kernel FIFO implementation" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ This builds the generic FIFO implementation KUnit test suite.
+ It tests that the API and basic functionality of the kfifo type
+ and associated macros.
+
+ For more information on KUnit and unit tests in general please refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ If unsure, say N.
+
config LIST_KUNIT_TEST
tristate "KUnit Test for Kernel Linked-list structures" if !KUNIT_ALL_TESTS
depends on KUNIT
@@ -3166,7 +3201,7 @@ config TEST_OBJPOOL
If unsure, say N.
-config INT_POW_TEST
+config INT_POW_KUNIT_TEST
tristate "Integer exponentiation (int_pow) test" if !KUNIT_ALL_TESTS
depends on KUNIT
default KUNIT_ALL_TESTS
@@ -3197,6 +3232,44 @@ config INT_SQRT_KUNIT_TEST
If unsure, say N
+config INT_LOG_KUNIT_TEST
+ tristate "Integer log (int_log) test" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ This option enables the KUnit test suite for the int_log library, which
+ provides two functions to compute the integer logarithm in base 2 and
+ base 10, called respectively as intlog2 and intlog10.
+
+ If unsure, say N
+
+config GCD_KUNIT_TEST
+ tristate "Greatest common divisor test" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ This option enables the KUnit test suite for the gcd() function,
+ which computes the greatest common divisor of two numbers.
+
+ This test suite verifies the correctness of gcd() across various
+ scenarios, including edge cases.
+
+ If unsure, say N
+
+config PRIME_NUMBERS_KUNIT_TEST
+ tristate "Prime number generator test" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ select PRIME_NUMBERS
+ default KUNIT_ALL_TESTS
+ help
+ This option enables the KUnit test suite for the {is,next}_prime_number
+ functions.
+
+ Enabling this option will include tests that compare the prime number
+ generator functions against a brute force implementation.
+
+ If unsure, say N
+
endif # RUNTIME_TESTING_MENU
config ARCH_USE_MEMTEST
diff --git a/lib/Kconfig.ubsan b/lib/Kconfig.ubsan
index 1d4aa7a83b3a..4216b3a4ff21 100644
--- a/lib/Kconfig.ubsan
+++ b/lib/Kconfig.ubsan
@@ -116,21 +116,22 @@ config UBSAN_UNREACHABLE
This option enables -fsanitize=unreachable which checks for control
flow reaching an expected-to-be-unreachable position.
-config UBSAN_SIGNED_WRAP
- bool "Perform checking for signed arithmetic wrap-around"
+config UBSAN_INTEGER_WRAP
+ bool "Perform checking for integer arithmetic wrap-around"
default UBSAN
depends on !COMPILE_TEST
- # The no_sanitize attribute was introduced in GCC with version 8.
- depends on !CC_IS_GCC || GCC_VERSION >= 80000
+ depends on $(cc-option,-fsanitize-undefined-ignore-overflow-pattern=all)
depends on $(cc-option,-fsanitize=signed-integer-overflow)
- help
- This option enables -fsanitize=signed-integer-overflow which checks
- for wrap-around of any arithmetic operations with signed integers.
- This currently performs nearly no instrumentation due to the
- kernel's use of -fno-strict-overflow which converts all would-be
- arithmetic undefined behavior into wrap-around arithmetic. Future
- sanitizer versions will allow for wrap-around checking (rather than
- exclusively undefined behavior).
+ depends on $(cc-option,-fsanitize=unsigned-integer-overflow)
+ depends on $(cc-option,-fsanitize=implicit-signed-integer-truncation)
+ depends on $(cc-option,-fsanitize=implicit-unsigned-integer-truncation)
+ depends on $(cc-option,-fsanitize-ignorelist=/dev/null)
+ help
+ This option enables all of the sanitizers involved in integer overflow
+ (wrap-around) mitigation: signed-integer-overflow, unsigned-integer-overflow,
+ implicit-signed-integer-truncation, and implicit-unsigned-integer-truncation.
+ This is currently limited only to the size_t type while testing and
+ compiler development continues.
config UBSAN_BOOL
bool "Perform checking for non-boolean values used as boolean"
diff --git a/lib/Makefile b/lib/Makefile
index d5cfc7afbbb8..89b8a4bce108 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -52,9 +52,7 @@ obj-y += bcd.o sort.o parser.o debug_locks.o random32.o \
percpu-refcount.o rhashtable.o base64.o \
once.o refcount.o rcuref.o usercopy.o errseq.o bucket_locks.o \
generic-radix-tree.o bitmap-str.o
-obj-$(CONFIG_STRING_KUNIT_TEST) += string_kunit.o
obj-y += string_helpers.o
-obj-$(CONFIG_STRING_HELPERS_KUNIT_TEST) += string_helpers_kunit.o
obj-y += hexdump.o
obj-$(CONFIG_TEST_HEXDUMP) += test_hexdump.o
obj-y += kstrtox.o
@@ -65,27 +63,20 @@ obj-$(CONFIG_TEST_DHRY) += test_dhry.o
obj-$(CONFIG_TEST_FIRMWARE) += test_firmware.o
obj-$(CONFIG_TEST_BITOPS) += test_bitops.o
CFLAGS_test_bitops.o += -Werror
-obj-$(CONFIG_CPUMASK_KUNIT_TEST) += cpumask_kunit.o
obj-$(CONFIG_TEST_SYSCTL) += test_sysctl.o
-obj-$(CONFIG_TEST_IOV_ITER) += kunit_iov_iter.o
-obj-$(CONFIG_HASH_KUNIT_TEST) += test_hash.o
obj-$(CONFIG_TEST_IDA) += test_ida.o
obj-$(CONFIG_TEST_UBSAN) += test_ubsan.o
CFLAGS_test_ubsan.o += $(call cc-disable-warning, vla)
CFLAGS_test_ubsan.o += $(call cc-disable-warning, unused-but-set-variable)
UBSAN_SANITIZE_test_ubsan.o := y
obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o
-obj-$(CONFIG_TEST_LIST_SORT) += test_list_sort.o
obj-$(CONFIG_TEST_MIN_HEAP) += test_min_heap.o
obj-$(CONFIG_TEST_LKM) += test_module.o
obj-$(CONFIG_TEST_VMALLOC) += test_vmalloc.o
obj-$(CONFIG_TEST_RHASHTABLE) += test_rhashtable.o
-obj-$(CONFIG_TEST_SORT) += test_sort.o
obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_keys.o
obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_key_base.o
obj-$(CONFIG_TEST_DYNAMIC_DEBUG) += test_dynamic_debug.o
-obj-$(CONFIG_TEST_PRINTF) += test_printf.o
-obj-$(CONFIG_TEST_SCANF) += test_scanf.o
obj-$(CONFIG_TEST_BITMAP) += test_bitmap.o
ifeq ($(CONFIG_CC_IS_CLANG)$(CONFIG_KASAN),yy)
@@ -98,7 +89,6 @@ obj-$(CONFIG_TEST_XARRAY) += test_xarray.o
obj-$(CONFIG_TEST_MAPLE_TREE) += test_maple_tree.o
obj-$(CONFIG_TEST_PARMAN) += test_parman.o
obj-$(CONFIG_TEST_KMOD) += test_kmod.o
-obj-$(CONFIG_TEST_RUNTIME) += tests/
obj-$(CONFIG_TEST_DEBUG_VIRTUAL) += test_debug_virtual.o
obj-$(CONFIG_TEST_MEMCAT_P) += test_memcat_p.o
obj-$(CONFIG_TEST_OBJAGG) += test_objagg.o
@@ -107,10 +97,7 @@ obj-$(CONFIG_TEST_MEMINIT) += test_meminit.o
obj-$(CONFIG_TEST_LOCKUP) += test_lockup.o
obj-$(CONFIG_TEST_HMM) += test_hmm.o
obj-$(CONFIG_TEST_FREE_PAGES) += test_free_pages.o
-obj-$(CONFIG_KPROBES_SANITY_TEST) += test_kprobes.o
obj-$(CONFIG_TEST_REF_TRACKER) += test_ref_tracker.o
-CFLAGS_test_fprobe.o += $(CC_FLAGS_FTRACE)
-obj-$(CONFIG_FPROBE_SANITY_TEST) += test_fprobe.o
obj-$(CONFIG_TEST_OBJPOOL) += test_objpool.o
obj-$(CONFIG_TEST_FPU) += test_fpu.o
@@ -132,7 +119,7 @@ endif
obj-$(CONFIG_DEBUG_INFO_REDUCED) += debug_info.o
CFLAGS_debug_info.o += $(call cc-option, -femit-struct-debug-detailed=any)
-obj-y += math/ crypto/
+obj-y += math/ crypto/ tests/
obj-$(CONFIG_GENERIC_IOMAP) += iomap.o
obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o
@@ -368,32 +355,6 @@ obj-$(CONFIG_OBJAGG) += objagg.o
# pldmfw library
obj-$(CONFIG_PLDMFW) += pldmfw/
-# KUnit tests
-CFLAGS_bitfield_kunit.o := $(DISABLE_STRUCTLEAK_PLUGIN)
-obj-$(CONFIG_BITFIELD_KUNIT) += bitfield_kunit.o
-obj-$(CONFIG_CHECKSUM_KUNIT) += checksum_kunit.o
-obj-$(CONFIG_UTIL_MACROS_KUNIT) += util_macros_kunit.o
-obj-$(CONFIG_LIST_KUNIT_TEST) += list-test.o
-obj-$(CONFIG_HASHTABLE_KUNIT_TEST) += hashtable_test.o
-obj-$(CONFIG_LINEAR_RANGES_TEST) += test_linear_ranges.o
-obj-$(CONFIG_BITS_TEST) += test_bits.o
-obj-$(CONFIG_CMDLINE_KUNIT_TEST) += cmdline_kunit.o
-obj-$(CONFIG_SLUB_KUNIT_TEST) += slub_kunit.o
-obj-$(CONFIG_MEMCPY_KUNIT_TEST) += memcpy_kunit.o
-obj-$(CONFIG_IS_SIGNED_TYPE_KUNIT_TEST) += is_signed_type_kunit.o
-CFLAGS_overflow_kunit.o = $(call cc-disable-warning, tautological-constant-out-of-range-compare)
-obj-$(CONFIG_OVERFLOW_KUNIT_TEST) += overflow_kunit.o
-CFLAGS_stackinit_kunit.o += $(call cc-disable-warning, switch-unreachable)
-obj-$(CONFIG_STACKINIT_KUNIT_TEST) += stackinit_kunit.o
-CFLAGS_fortify_kunit.o += $(call cc-disable-warning, unsequenced)
-CFLAGS_fortify_kunit.o += $(call cc-disable-warning, stringop-overread)
-CFLAGS_fortify_kunit.o += $(call cc-disable-warning, stringop-truncation)
-CFLAGS_fortify_kunit.o += $(DISABLE_STRUCTLEAK_PLUGIN)
-obj-$(CONFIG_FORTIFY_KUNIT_TEST) += fortify_kunit.o
-obj-$(CONFIG_CRC_KUNIT_TEST) += crc_kunit.o
-obj-$(CONFIG_SIPHASH_KUNIT_TEST) += siphash_kunit.o
-obj-$(CONFIG_USERCOPY_KUNIT_TEST) += usercopy_kunit.o
-
obj-$(CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED) += devmem_is_allowed.o
obj-$(CONFIG_FIRMWARE_TABLE) += fw_table.o
diff --git a/lib/math/Makefile b/lib/math/Makefile
index 853f023ae537..d1caba23baa0 100644
--- a/lib/math/Makefile
+++ b/lib/math/Makefile
@@ -5,8 +5,7 @@ obj-$(CONFIG_CORDIC) += cordic.o
obj-$(CONFIG_PRIME_NUMBERS) += prime_numbers.o
obj-$(CONFIG_RATIONAL) += rational.o
-obj-$(CONFIG_INT_POW_TEST) += tests/int_pow_kunit.o
obj-$(CONFIG_TEST_DIV64) += test_div64.o
obj-$(CONFIG_TEST_MULDIV64) += test_mul_u64_u64_div_u64.o
-obj-$(CONFIG_RATIONAL_KUNIT_TEST) += rational-test.o
-obj-$(CONFIG_INT_SQRT_KUNIT_TEST) += tests/int_sqrt_kunit.o \ No newline at end of file
+
+obj-y += tests/
diff --git a/lib/math/prime_numbers.c b/lib/math/prime_numbers.c
index 9a17ee9af93a..95a6f7960db9 100644
--- a/lib/math/prime_numbers.c
+++ b/lib/math/prime_numbers.c
@@ -1,16 +1,11 @@
// SPDX-License-Identifier: GPL-2.0-only
-#define pr_fmt(fmt) "prime numbers: " fmt
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/prime_numbers.h>
#include <linux/slab.h>
-struct primes {
- struct rcu_head rcu;
- unsigned long last, sz;
- unsigned long primes[];
-};
+#include "prime_numbers_private.h"
#if BITS_PER_LONG == 64
static const struct primes small_primes = {
@@ -62,9 +57,25 @@ static const struct primes small_primes = {
static DEFINE_MUTEX(lock);
static const struct primes __rcu *primes = RCU_INITIALIZER(&small_primes);
-static unsigned long selftest_max;
+#if IS_ENABLED(CONFIG_PRIME_NUMBERS_KUNIT_TEST)
+/*
+ * Calls the callback under RCU lock. The callback must not retain
+ * the primes pointer.
+ */
+void with_primes(void *ctx, primes_fn fn)
+{
+ rcu_read_lock();
+ fn(ctx, rcu_dereference(primes));
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL(with_primes);
+
+EXPORT_SYMBOL(slow_is_prime_number);
-static bool slow_is_prime_number(unsigned long x)
+#else
+static
+#endif
+bool slow_is_prime_number(unsigned long x)
{
unsigned long y = int_sqrt(x);
@@ -239,77 +250,13 @@ bool is_prime_number(unsigned long x)
}
EXPORT_SYMBOL(is_prime_number);
-static void dump_primes(void)
-{
- const struct primes *p;
- char *buf;
-
- buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
-
- rcu_read_lock();
- p = rcu_dereference(primes);
-
- if (buf)
- bitmap_print_to_pagebuf(true, buf, p->primes, p->sz);
- pr_info("primes.{last=%lu, .sz=%lu, .primes[]=...x%lx} = %s\n",
- p->last, p->sz, p->primes[BITS_TO_LONGS(p->sz) - 1], buf);
-
- rcu_read_unlock();
-
- kfree(buf);
-}
-
-static int selftest(unsigned long max)
-{
- unsigned long x, last;
-
- if (!max)
- return 0;
-
- for (last = 0, x = 2; x < max; x++) {
- bool slow = slow_is_prime_number(x);
- bool fast = is_prime_number(x);
-
- if (slow != fast) {
- pr_err("inconsistent result for is-prime(%lu): slow=%s, fast=%s!\n",
- x, slow ? "yes" : "no", fast ? "yes" : "no");
- goto err;
- }
-
- if (!slow)
- continue;
-
- if (next_prime_number(last) != x) {
- pr_err("incorrect result for next-prime(%lu): expected %lu, got %lu\n",
- last, x, next_prime_number(last));
- goto err;
- }
- last = x;
- }
-
- pr_info("%s(%lu) passed, last prime was %lu\n", __func__, x, last);
- return 0;
-
-err:
- dump_primes();
- return -EINVAL;
-}
-
-static int __init primes_init(void)
-{
- return selftest(selftest_max);
-}
-
static void __exit primes_exit(void)
{
free_primes();
}
-module_init(primes_init);
module_exit(primes_exit);
-module_param_named(selftest, selftest_max, ulong, 0400);
-
MODULE_AUTHOR("Intel Corporation");
MODULE_DESCRIPTION("Prime number library");
MODULE_LICENSE("GPL");
diff --git a/lib/math/prime_numbers_private.h b/lib/math/prime_numbers_private.h
new file mode 100644
index 000000000000..f3ebf5386e6b
--- /dev/null
+++ b/lib/math/prime_numbers_private.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#include <linux/types.h>
+
+struct primes {
+ struct rcu_head rcu;
+ unsigned long last, sz;
+ unsigned long primes[];
+};
+
+#if IS_ENABLED(CONFIG_PRIME_NUMBERS_KUNIT_TEST)
+typedef void (*primes_fn)(void *, const struct primes *);
+
+void with_primes(void *ctx, primes_fn fn);
+bool slow_is_prime_number(unsigned long x);
+#endif
diff --git a/lib/math/tests/Makefile b/lib/math/tests/Makefile
index e1a79f093b2d..13dc96e48408 100644
--- a/lib/math/tests/Makefile
+++ b/lib/math/tests/Makefile
@@ -1,4 +1,8 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_INT_POW_TEST) += int_pow_kunit.o
-obj-$(CONFIG_INT_SQRT_KUNIT_TEST) += int_sqrt_kunit.o
+obj-$(CONFIG_GCD_KUNIT_TEST) += gcd_kunit.o
+obj-$(CONFIG_INT_LOG_KUNIT_TEST) += int_log_kunit.o
+obj-$(CONFIG_INT_POW_KUNIT_TEST) += int_pow_kunit.o
+obj-$(CONFIG_INT_SQRT_KUNIT_TEST) += int_sqrt_kunit.o
+obj-$(CONFIG_PRIME_NUMBERS_KUNIT_TEST) += prime_numbers_kunit.o
+obj-$(CONFIG_RATIONAL_KUNIT_TEST) += rational_kunit.o
diff --git a/lib/math/tests/gcd_kunit.c b/lib/math/tests/gcd_kunit.c
new file mode 100644
index 000000000000..ede1883583b1
--- /dev/null
+++ b/lib/math/tests/gcd_kunit.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <kunit/test.h>
+#include <linux/gcd.h>
+#include <linux/limits.h>
+
+struct test_case_params {
+ unsigned long val1;
+ unsigned long val2;
+ unsigned long expected_result;
+ const char *name;
+};
+
+static const struct test_case_params params[] = {
+ { 48, 18, 6, "GCD of 48 and 18" },
+ { 18, 48, 6, "GCD of 18 and 48" },
+ { 56, 98, 14, "GCD of 56 and 98" },
+ { 17, 13, 1, "Coprime numbers" },
+ { 101, 103, 1, "Coprime numbers" },
+ { 270, 192, 6, "GCD of 270 and 192" },
+ { 0, 5, 5, "GCD with zero" },
+ { 7, 0, 7, "GCD with zero reversed" },
+ { 36, 36, 36, "GCD of identical numbers" },
+ { ULONG_MAX, 1, 1, "GCD of max ulong and 1" },
+ { ULONG_MAX, ULONG_MAX, ULONG_MAX, "GCD of max ulong values" },
+};
+
+static void get_desc(const struct test_case_params *tc, char *desc)
+{
+ strscpy(desc, tc->name, KUNIT_PARAM_DESC_SIZE);
+}
+
+KUNIT_ARRAY_PARAM(gcd, params, get_desc);
+
+static void gcd_test(struct kunit *test)
+{
+ const struct test_case_params *tc = (const struct test_case_params *)test->param_value;
+
+ KUNIT_EXPECT_EQ(test, tc->expected_result, gcd(tc->val1, tc->val2));
+}
+
+static struct kunit_case math_gcd_test_cases[] = {
+ KUNIT_CASE_PARAM(gcd_test, gcd_gen_params),
+ {}
+};
+
+static struct kunit_suite gcd_test_suite = {
+ .name = "math-gcd",
+ .test_cases = math_gcd_test_cases,
+};
+
+kunit_test_suite(gcd_test_suite);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("math.gcd KUnit test suite");
+MODULE_AUTHOR("Yu-Chun Lin <eleanor15x@gmail.com>");
diff --git a/lib/math/tests/int_log_kunit.c b/lib/math/tests/int_log_kunit.c
new file mode 100644
index 000000000000..14e854146cb4
--- /dev/null
+++ b/lib/math/tests/int_log_kunit.c
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <kunit/test.h>
+#include <linux/int_log.h>
+
+struct test_case_params {
+ u32 value;
+ unsigned int expected_result;
+ const char *name;
+};
+
+
+/* The expected result takes into account the log error */
+static const struct test_case_params intlog2_params[] = {
+ {0, 0, "Log base 2 of 0"},
+ {1, 0, "Log base 2 of 1"},
+ {2, 16777216, "Log base 2 of 2"},
+ {3, 26591232, "Log base 2 of 3"},
+ {4, 33554432, "Log base 2 of 4"},
+ {8, 50331648, "Log base 2 of 8"},
+ {16, 67108864, "Log base 2 of 16"},
+ {32, 83886080, "Log base 2 of 32"},
+ {U32_MAX, 536870911, "Log base 2 of MAX"},
+};
+
+static const struct test_case_params intlog10_params[] = {
+ {0, 0, "Log base 10 of 0"},
+ {1, 0, "Log base 10 of 1"},
+ {6, 13055203, "Log base 10 of 6"},
+ {10, 16777225, "Log base 10 of 10"},
+ {100, 33554450, "Log base 10 of 100"},
+ {1000, 50331675, "Log base 10 of 1000"},
+ {10000, 67108862, "Log base 10 of 10000"},
+ {U32_MAX, 161614247, "Log base 10 of MAX"}
+};
+
+static void get_desc(const struct test_case_params *tc, char *desc)
+{
+ strscpy(desc, tc->name, KUNIT_PARAM_DESC_SIZE);
+}
+
+
+KUNIT_ARRAY_PARAM(intlog2, intlog2_params, get_desc);
+
+static void intlog2_test(struct kunit *test)
+{
+ const struct test_case_params *tc = (const struct test_case_params *)test->param_value;
+
+ KUNIT_EXPECT_EQ(test, tc->expected_result, intlog2(tc->value));
+}
+
+KUNIT_ARRAY_PARAM(intlog10, intlog10_params, get_desc);
+
+static void intlog10_test(struct kunit *test)
+{
+ const struct test_case_params *tc = (const struct test_case_params *)test->param_value;
+
+ KUNIT_EXPECT_EQ(test, tc->expected_result, intlog10(tc->value));
+}
+
+static struct kunit_case math_int_log_test_cases[] = {
+ KUNIT_CASE_PARAM(intlog2_test, intlog2_gen_params),
+ KUNIT_CASE_PARAM(intlog10_test, intlog10_gen_params),
+ {}
+};
+
+static struct kunit_suite int_log_test_suite = {
+ .name = "math-int_log",
+ .test_cases = math_int_log_test_cases,
+};
+
+kunit_test_suites(&int_log_test_suite);
+
+MODULE_DESCRIPTION("math.int_log KUnit test suite");
+MODULE_LICENSE("GPL");
diff --git a/lib/math/tests/prime_numbers_kunit.c b/lib/math/tests/prime_numbers_kunit.c
new file mode 100644
index 000000000000..2f1643208c66
--- /dev/null
+++ b/lib/math/tests/prime_numbers_kunit.c
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <kunit/test.h>
+#include <linux/module.h>
+#include <linux/prime_numbers.h>
+
+#include "../prime_numbers_private.h"
+
+static void dump_primes(void *ctx, const struct primes *p)
+{
+ static char buf[PAGE_SIZE];
+ struct kunit_suite *suite = ctx;
+
+ bitmap_print_to_pagebuf(true, buf, p->primes, p->sz);
+ kunit_info(suite, "primes.{last=%lu, .sz=%lu, .primes[]=...x%lx} = %s",
+ p->last, p->sz, p->primes[BITS_TO_LONGS(p->sz) - 1], buf);
+}
+
+static void prime_numbers_test(struct kunit *test)
+{
+ const unsigned long max = 65536;
+ unsigned long x, last, next;
+
+ for (last = 0, x = 2; x < max; x++) {
+ const bool slow = slow_is_prime_number(x);
+ const bool fast = is_prime_number(x);
+
+ KUNIT_ASSERT_EQ_MSG(test, slow, fast, "is-prime(%lu)", x);
+
+ if (!slow)
+ continue;
+
+ next = next_prime_number(last);
+ KUNIT_ASSERT_EQ_MSG(test, next, x, "next-prime(%lu)", last);
+ last = next;
+ }
+}
+
+static void kunit_suite_exit(struct kunit_suite *suite)
+{
+ with_primes(suite, dump_primes);
+}
+
+static struct kunit_case prime_numbers_cases[] = {
+ KUNIT_CASE(prime_numbers_test),
+ {},
+};
+
+static struct kunit_suite prime_numbers_suite = {
+ .name = "math-prime_numbers",
+ .suite_exit = kunit_suite_exit,
+ .test_cases = prime_numbers_cases,
+};
+
+kunit_test_suite(prime_numbers_suite);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_DESCRIPTION("Prime number library");
+MODULE_LICENSE("GPL");
diff --git a/lib/math/rational-test.c b/lib/math/tests/rational_kunit.c
index 47486a95f088..47486a95f088 100644
--- a/lib/math/rational-test.c
+++ b/lib/math/tests/rational_kunit.c
diff --git a/lib/test_ubsan.c b/lib/test_ubsan.c
index 5d7b10e98610..8772e5edaa4f 100644
--- a/lib/test_ubsan.c
+++ b/lib/test_ubsan.c
@@ -15,7 +15,7 @@ static void test_ubsan_add_overflow(void)
{
volatile int val = INT_MAX;
- UBSAN_TEST(CONFIG_UBSAN_SIGNED_WRAP);
+ UBSAN_TEST(CONFIG_UBSAN_INTEGER_WRAP);
val += 2;
}
@@ -24,7 +24,7 @@ static void test_ubsan_sub_overflow(void)
volatile int val = INT_MIN;
volatile int val2 = 2;
- UBSAN_TEST(CONFIG_UBSAN_SIGNED_WRAP);
+ UBSAN_TEST(CONFIG_UBSAN_INTEGER_WRAP);
val -= val2;
}
@@ -32,7 +32,7 @@ static void test_ubsan_mul_overflow(void)
{
volatile int val = INT_MAX / 2;
- UBSAN_TEST(CONFIG_UBSAN_SIGNED_WRAP);
+ UBSAN_TEST(CONFIG_UBSAN_INTEGER_WRAP);
val *= 3;
}
@@ -40,7 +40,7 @@ static void test_ubsan_negate_overflow(void)
{
volatile int val = INT_MIN;
- UBSAN_TEST(CONFIG_UBSAN_SIGNED_WRAP);
+ UBSAN_TEST(CONFIG_UBSAN_INTEGER_WRAP);
val = -val;
}
@@ -53,6 +53,15 @@ static void test_ubsan_divrem_overflow(void)
val /= val2;
}
+static void test_ubsan_truncate_signed(void)
+{
+ volatile long val = LONG_MAX;
+ volatile int val2 = 0;
+
+ UBSAN_TEST(CONFIG_UBSAN_INTEGER_WRAP);
+ val2 = val;
+}
+
static void test_ubsan_shift_out_of_bounds(void)
{
volatile int neg = -1, wrap = 4;
@@ -127,6 +136,7 @@ static const test_ubsan_fp test_ubsan_array[] = {
test_ubsan_sub_overflow,
test_ubsan_mul_overflow,
test_ubsan_negate_overflow,
+ test_ubsan_truncate_signed,
test_ubsan_shift_out_of_bounds,
test_ubsan_out_of_bounds,
test_ubsan_load_invalid_value,
diff --git a/lib/tests/Makefile b/lib/tests/Makefile
index 8e4f42cb9c54..498915255860 100644
--- a/lib/tests/Makefile
+++ b/lib/tests/Makefile
@@ -1 +1,44 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for tests of kernel library functions.
+
+# KUnit tests
+CFLAGS_bitfield_kunit.o := $(DISABLE_STRUCTLEAK_PLUGIN)
+obj-$(CONFIG_BITFIELD_KUNIT) += bitfield_kunit.o
+obj-$(CONFIG_BITS_TEST) += test_bits.o
+obj-$(CONFIG_CHECKSUM_KUNIT) += checksum_kunit.o
+obj-$(CONFIG_CMDLINE_KUNIT_TEST) += cmdline_kunit.o
+obj-$(CONFIG_CPUMASK_KUNIT_TEST) += cpumask_kunit.o
+obj-$(CONFIG_CRC_KUNIT_TEST) += crc_kunit.o
+CFLAGS_fortify_kunit.o += $(call cc-disable-warning, unsequenced)
+CFLAGS_fortify_kunit.o += $(call cc-disable-warning, stringop-overread)
+CFLAGS_fortify_kunit.o += $(call cc-disable-warning, stringop-truncation)
+CFLAGS_fortify_kunit.o += $(DISABLE_STRUCTLEAK_PLUGIN)
+obj-$(CONFIG_FORTIFY_KUNIT_TEST) += fortify_kunit.o
+CFLAGS_test_fprobe.o += $(CC_FLAGS_FTRACE)
+obj-$(CONFIG_FPROBE_SANITY_TEST) += test_fprobe.o
+obj-$(CONFIG_HASHTABLE_KUNIT_TEST) += hashtable_test.o
+obj-$(CONFIG_HASH_KUNIT_TEST) += test_hash.o
+obj-$(CONFIG_TEST_IOV_ITER) += kunit_iov_iter.o
+obj-$(CONFIG_IS_SIGNED_TYPE_KUNIT_TEST) += is_signed_type_kunit.o
+obj-$(CONFIG_KPROBES_SANITY_TEST) += test_kprobes.o
+obj-$(CONFIG_LIST_KUNIT_TEST) += list-test.o
+obj-$(CONFIG_KFIFO_KUNIT_TEST) += kfifo_kunit.o
+obj-$(CONFIG_TEST_LIST_SORT) += test_list_sort.o
+obj-$(CONFIG_LINEAR_RANGES_TEST) += test_linear_ranges.o
+obj-$(CONFIG_MEMCPY_KUNIT_TEST) += memcpy_kunit.o
+CFLAGS_overflow_kunit.o = $(call cc-disable-warning, tautological-constant-out-of-range-compare)
+obj-$(CONFIG_OVERFLOW_KUNIT_TEST) += overflow_kunit.o
+obj-$(CONFIG_PRINTF_KUNIT_TEST) += printf_kunit.o
+obj-$(CONFIG_SCANF_KUNIT_TEST) += scanf_kunit.o
+obj-$(CONFIG_SIPHASH_KUNIT_TEST) += siphash_kunit.o
+obj-$(CONFIG_SLUB_KUNIT_TEST) += slub_kunit.o
+obj-$(CONFIG_TEST_SORT) += test_sort.o
+CFLAGS_stackinit_kunit.o += $(call cc-disable-warning, switch-unreachable)
+obj-$(CONFIG_STACKINIT_KUNIT_TEST) += stackinit_kunit.o
+obj-$(CONFIG_STRING_KUNIT_TEST) += string_kunit.o
+obj-$(CONFIG_STRING_HELPERS_KUNIT_TEST) += string_helpers_kunit.o
+obj-$(CONFIG_USERCOPY_KUNIT_TEST) += usercopy_kunit.o
+obj-$(CONFIG_UTIL_MACROS_KUNIT) += util_macros_kunit.o
+
obj-$(CONFIG_TEST_RUNTIME_MODULE) += module/
diff --git a/lib/bitfield_kunit.c b/lib/tests/bitfield_kunit.c
index 5ccd86f61896..5ccd86f61896 100644
--- a/lib/bitfield_kunit.c
+++ b/lib/tests/bitfield_kunit.c
diff --git a/lib/checksum_kunit.c b/lib/tests/checksum_kunit.c
index be04aa42125c..be04aa42125c 100644
--- a/lib/checksum_kunit.c
+++ b/lib/tests/checksum_kunit.c
diff --git a/lib/cmdline_kunit.c b/lib/tests/cmdline_kunit.c
index c1602f797637..c1602f797637 100644
--- a/lib/cmdline_kunit.c
+++ b/lib/tests/cmdline_kunit.c
diff --git a/lib/cpumask_kunit.c b/lib/tests/cpumask_kunit.c
index 6b62a6bdd50e..6b62a6bdd50e 100644
--- a/lib/cpumask_kunit.c
+++ b/lib/tests/cpumask_kunit.c
diff --git a/lib/crc_kunit.c b/lib/tests/crc_kunit.c
index 6a61d4b5fd45..6a61d4b5fd45 100644
--- a/lib/crc_kunit.c
+++ b/lib/tests/crc_kunit.c
diff --git a/lib/fortify_kunit.c b/lib/tests/fortify_kunit.c
index ecb638d4cde1..29ffc62a71e3 100644
--- a/lib/fortify_kunit.c
+++ b/lib/tests/fortify_kunit.c
@@ -60,6 +60,7 @@ static int fortify_write_overflows;
static const char array_of_10[] = "this is 10";
static const char *ptr_of_11 = "this is 11!";
+static const char * const unchanging_12 = "this is 12!!";
static char array_unknown[] = "compiler thinks I might change";
void fortify_add_kunit_error(int write)
@@ -83,12 +84,28 @@ void fortify_add_kunit_error(int write)
static void fortify_test_known_sizes(struct kunit *test)
{
+ char stack[80] = "Test!";
+
+ KUNIT_EXPECT_FALSE(test, __is_constexpr(__builtin_strlen(stack)));
+ KUNIT_EXPECT_EQ(test, __compiletime_strlen(stack), 5);
+
+ KUNIT_EXPECT_TRUE(test, __is_constexpr(__builtin_strlen("88888888")));
KUNIT_EXPECT_EQ(test, __compiletime_strlen("88888888"), 8);
+
+ KUNIT_EXPECT_TRUE(test, __is_constexpr(__builtin_strlen(array_of_10)));
KUNIT_EXPECT_EQ(test, __compiletime_strlen(array_of_10), 10);
+
+ KUNIT_EXPECT_FALSE(test, __is_constexpr(__builtin_strlen(ptr_of_11)));
KUNIT_EXPECT_EQ(test, __compiletime_strlen(ptr_of_11), 11);
+ KUNIT_EXPECT_TRUE(test, __is_constexpr(__builtin_strlen(unchanging_12)));
+ KUNIT_EXPECT_EQ(test, __compiletime_strlen(unchanging_12), 12);
+
+ KUNIT_EXPECT_FALSE(test, __is_constexpr(__builtin_strlen(array_unknown)));
KUNIT_EXPECT_EQ(test, __compiletime_strlen(array_unknown), SIZE_MAX);
+
/* Externally defined and dynamically sized string pointer: */
+ KUNIT_EXPECT_FALSE(test, __is_constexpr(__builtin_strlen(test->name)));
KUNIT_EXPECT_EQ(test, __compiletime_strlen(test->name), SIZE_MAX);
}
@@ -394,8 +411,6 @@ struct fortify_padding {
char buf[32];
unsigned long bytes_after;
};
-/* Force compiler into not being able to resolve size at compile-time. */
-static volatile int unconst;
static void fortify_test_strlen(struct kunit *test)
{
@@ -520,57 +535,56 @@ static void fortify_test_strncpy(struct kunit *test)
{
struct fortify_padding pad = { };
char src[] = "Copy me fully into a small buffer and I will overflow!";
+ size_t sizeof_buf = sizeof(pad.buf);
+
+ OPTIMIZER_HIDE_VAR(sizeof_buf);
/* Destination is %NUL-filled to start with. */
KUNIT_EXPECT_EQ(test, pad.bytes_before, 0);
- KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
- KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
- KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof_buf - 1], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof_buf - 2], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof_buf - 3], '\0');
KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
/* Legitimate strncpy() 1 less than of max size. */
- KUNIT_ASSERT_TRUE(test, strncpy(pad.buf, src,
- sizeof(pad.buf) + unconst - 1)
+ KUNIT_ASSERT_TRUE(test, strncpy(pad.buf, src, sizeof_buf - 1)
== pad.buf);
KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
/* Only last byte should be %NUL */
- KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
- KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
- KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof_buf - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 3], '\0');
/* Legitimate (though unterminated) max-size strncpy. */
- KUNIT_ASSERT_TRUE(test, strncpy(pad.buf, src,
- sizeof(pad.buf) + unconst)
+ KUNIT_ASSERT_TRUE(test, strncpy(pad.buf, src, sizeof_buf)
== pad.buf);
KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
/* No trailing %NUL -- thanks strncpy API. */
- KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 1], '\0');
- KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
- KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 2], '\0');
/* But we will not have gone beyond. */
KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
/* Now verify that FORTIFY is working... */
- KUNIT_ASSERT_TRUE(test, strncpy(pad.buf, src,
- sizeof(pad.buf) + unconst + 1)
+ KUNIT_ASSERT_TRUE(test, strncpy(pad.buf, src, sizeof_buf + 1)
== pad.buf);
/* Should catch the overflow. */
KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1);
- KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 1], '\0');
- KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
- KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 2], '\0');
/* And we will not have gone beyond. */
KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
/* And further... */
- KUNIT_ASSERT_TRUE(test, strncpy(pad.buf, src,
- sizeof(pad.buf) + unconst + 2)
+ KUNIT_ASSERT_TRUE(test, strncpy(pad.buf, src, sizeof_buf + 2)
== pad.buf);
/* Should catch the overflow. */
KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2);
- KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 1], '\0');
- KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
- KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 2], '\0');
/* And we will not have gone beyond. */
KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
}
@@ -579,55 +593,56 @@ static void fortify_test_strscpy(struct kunit *test)
{
struct fortify_padding pad = { };
char src[] = "Copy me fully into a small buffer and I will overflow!";
+ size_t sizeof_buf = sizeof(pad.buf);
+ size_t sizeof_src = sizeof(src);
+
+ OPTIMIZER_HIDE_VAR(sizeof_buf);
+ OPTIMIZER_HIDE_VAR(sizeof_src);
/* Destination is %NUL-filled to start with. */
KUNIT_EXPECT_EQ(test, pad.bytes_before, 0);
- KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
- KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
- KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof_buf - 1], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof_buf - 2], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof_buf - 3], '\0');
KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
/* Legitimate strscpy() 1 less than of max size. */
- KUNIT_ASSERT_EQ(test, strscpy(pad.buf, src,
- sizeof(pad.buf) + unconst - 1),
+ KUNIT_ASSERT_EQ(test, strscpy(pad.buf, src, sizeof_buf - 1),
-E2BIG);
KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
/* Keeping space for %NUL, last two bytes should be %NUL */
- KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
- KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
- KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof_buf - 1], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof_buf - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 3], '\0');
/* Legitimate max-size strscpy. */
- KUNIT_ASSERT_EQ(test, strscpy(pad.buf, src,
- sizeof(pad.buf) + unconst),
+ KUNIT_ASSERT_EQ(test, strscpy(pad.buf, src, sizeof_buf),
-E2BIG);
KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
/* A trailing %NUL will exist. */
- KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
- KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
- KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof_buf - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 2], '\0');
/* Now verify that FORTIFY is working... */
- KUNIT_ASSERT_EQ(test, strscpy(pad.buf, src,
- sizeof(pad.buf) + unconst + 1),
+ KUNIT_ASSERT_EQ(test, strscpy(pad.buf, src, sizeof_buf + 1),
-E2BIG);
/* Should catch the overflow. */
KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1);
- KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
- KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
- KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof_buf - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 2], '\0');
/* And we will not have gone beyond. */
KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
/* And much further... */
- KUNIT_ASSERT_EQ(test, strscpy(pad.buf, src,
- sizeof(src) * 2 + unconst),
+ KUNIT_ASSERT_EQ(test, strscpy(pad.buf, src, sizeof_src * 2),
-E2BIG);
/* Should catch the overflow. */
KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2);
- KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
- KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
- KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof_buf - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof_buf - 2], '\0');
/* And we will not have gone beyond. */
KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
}
@@ -767,7 +782,9 @@ static void fortify_test_strlcat(struct kunit *test)
struct fortify_padding pad = { };
char src[sizeof(pad.buf)] = { };
int i, partial;
- int len = sizeof(pad.buf) + unconst;
+ int len = sizeof(pad.buf);
+
+ OPTIMIZER_HIDE_VAR(len);
/* Fill 15 bytes with valid characters. */
partial = sizeof(src) / 2 - 1;
@@ -857,28 +874,32 @@ struct fortify_zero_sized {
#define __fortify_test(memfunc) \
static void fortify_test_##memfunc(struct kunit *test) \
{ \
- struct fortify_zero_sized zero = { }; \
+ struct fortify_zero_sized empty = { }; \
struct fortify_padding pad = { }; \
char srcA[sizeof(pad.buf) + 2]; \
char srcB[sizeof(pad.buf) + 2]; \
- size_t len = sizeof(pad.buf) + unconst; \
+ size_t len = sizeof(pad.buf); \
+ size_t zero = 0; \
+ \
+ OPTIMIZER_HIDE_VAR(len); \
+ OPTIMIZER_HIDE_VAR(zero); \
\
memset(srcA, 'A', sizeof(srcA)); \
KUNIT_ASSERT_EQ(test, srcA[0], 'A'); \
memset(srcB, 'B', sizeof(srcB)); \
KUNIT_ASSERT_EQ(test, srcB[0], 'B'); \
\
- memfunc(pad.buf, srcA, 0 + unconst); \
+ memfunc(pad.buf, srcA, zero); \
KUNIT_EXPECT_EQ(test, pad.buf[0], '\0'); \
KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \
KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \
- memfunc(pad.buf + 1, srcB, 1 + unconst); \
+ memfunc(pad.buf + 1, srcB, zero + 1); \
KUNIT_EXPECT_EQ(test, pad.buf[0], '\0'); \
KUNIT_EXPECT_EQ(test, pad.buf[1], 'B'); \
KUNIT_EXPECT_EQ(test, pad.buf[2], '\0'); \
KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \
KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \
- memfunc(pad.buf, srcA, 1 + unconst); \
+ memfunc(pad.buf, srcA, zero + 1); \
KUNIT_EXPECT_EQ(test, pad.buf[0], 'A'); \
KUNIT_EXPECT_EQ(test, pad.buf[1], 'B'); \
KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \
@@ -904,10 +925,10 @@ static void fortify_test_##memfunc(struct kunit *test) \
/* Reset error counter. */ \
fortify_write_overflows = 0; \
/* Copy nothing into nothing: no errors. */ \
- memfunc(zero.buf, srcB, 0 + unconst); \
+ memfunc(empty.buf, srcB, zero); \
KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \
KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \
- memfunc(zero.buf, srcB, 1 + unconst); \
+ memfunc(empty.buf, srcB, zero + 1); \
KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \
KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1); \
}
@@ -919,7 +940,9 @@ static void fortify_test_memscan(struct kunit *test)
char haystack[] = "Where oh where is my memory range?";
char *mem = haystack + strlen("Where oh where is ");
char needle = 'm';
- size_t len = sizeof(haystack) + unconst;
+ size_t len = sizeof(haystack);
+
+ OPTIMIZER_HIDE_VAR(len);
KUNIT_ASSERT_PTR_EQ(test, memscan(haystack, needle, len),
mem);
@@ -938,7 +961,9 @@ static void fortify_test_memchr(struct kunit *test)
char haystack[] = "Where oh where is my memory range?";
char *mem = haystack + strlen("Where oh where is ");
char needle = 'm';
- size_t len = sizeof(haystack) + unconst;
+ size_t len = sizeof(haystack);
+
+ OPTIMIZER_HIDE_VAR(len);
KUNIT_ASSERT_PTR_EQ(test, memchr(haystack, needle, len),
mem);
@@ -957,7 +982,9 @@ static void fortify_test_memchr_inv(struct kunit *test)
char haystack[] = "Where oh where is my memory range?";
char *mem = haystack + 1;
char needle = 'W';
- size_t len = sizeof(haystack) + unconst;
+ size_t len = sizeof(haystack);
+
+ OPTIMIZER_HIDE_VAR(len);
/* Normal search is okay. */
KUNIT_ASSERT_PTR_EQ(test, memchr_inv(haystack, needle, len),
@@ -976,8 +1003,11 @@ static void fortify_test_memcmp(struct kunit *test)
{
char one[] = "My mind is going ...";
char two[] = "My mind is going ... I can feel it.";
- size_t one_len = sizeof(one) + unconst - 1;
- size_t two_len = sizeof(two) + unconst - 1;
+ size_t one_len = sizeof(one) - 1;
+ size_t two_len = sizeof(two) - 1;
+
+ OPTIMIZER_HIDE_VAR(one_len);
+ OPTIMIZER_HIDE_VAR(two_len);
/* We match the first string (ignoring the %NUL). */
KUNIT_ASSERT_EQ(test, memcmp(one, two, one_len), 0);
@@ -998,7 +1028,9 @@ static void fortify_test_kmemdup(struct kunit *test)
{
char src[] = "I got Doom running on it!";
char *copy;
- size_t len = sizeof(src) + unconst;
+ size_t len = sizeof(src);
+
+ OPTIMIZER_HIDE_VAR(len);
/* Copy is within bounds. */
copy = kmemdup(src, len, GFP_KERNEL);
diff --git a/lib/hashtable_test.c b/lib/tests/hashtable_test.c
index 3521de6bad15..3521de6bad15 100644
--- a/lib/hashtable_test.c
+++ b/lib/tests/hashtable_test.c
diff --git a/lib/is_signed_type_kunit.c b/lib/tests/is_signed_type_kunit.c
index 88adbe813f3a..88adbe813f3a 100644
--- a/lib/is_signed_type_kunit.c
+++ b/lib/tests/is_signed_type_kunit.c
diff --git a/lib/tests/kfifo_kunit.c b/lib/tests/kfifo_kunit.c
new file mode 100644
index 000000000000..a85eedc3195a
--- /dev/null
+++ b/lib/tests/kfifo_kunit.c
@@ -0,0 +1,224 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnit test for the generic kernel FIFO implementation.
+ *
+ * Copyright (C) 2024 Diego Vieira <diego.daniel.professional@gmail.com>
+ */
+#include <kunit/test.h>
+
+#include <linux/kfifo.h>
+
+#define KFIFO_SIZE 32
+#define N_ELEMENTS 5
+
+static void kfifo_test_reset_should_clear_the_fifo(struct kunit *test)
+{
+ DEFINE_KFIFO(my_fifo, u8, KFIFO_SIZE);
+
+ kfifo_put(&my_fifo, 1);
+ kfifo_put(&my_fifo, 2);
+ kfifo_put(&my_fifo, 3);
+ KUNIT_EXPECT_EQ(test, kfifo_len(&my_fifo), 3);
+
+ kfifo_reset(&my_fifo);
+
+ KUNIT_EXPECT_EQ(test, kfifo_len(&my_fifo), 0);
+ KUNIT_EXPECT_TRUE(test, kfifo_is_empty(&my_fifo));
+}
+
+static void kfifo_test_define_should_define_an_empty_fifo(struct kunit *test)
+{
+ DEFINE_KFIFO(my_fifo, u8, KFIFO_SIZE);
+
+ KUNIT_EXPECT_TRUE(test, kfifo_initialized(&my_fifo));
+ KUNIT_EXPECT_TRUE(test, kfifo_is_empty(&my_fifo));
+ KUNIT_EXPECT_EQ(test, kfifo_len(&my_fifo), 0);
+}
+
+static void kfifo_test_len_should_ret_n_of_stored_elements(struct kunit *test)
+{
+ u8 buffer1[N_ELEMENTS];
+
+ for (int i = 0; i < N_ELEMENTS; i++)
+ buffer1[i] = i + 1;
+
+ DEFINE_KFIFO(my_fifo, u8, KFIFO_SIZE);
+
+ KUNIT_EXPECT_EQ(test, kfifo_len(&my_fifo), 0);
+
+ kfifo_in(&my_fifo, buffer1, N_ELEMENTS);
+ KUNIT_EXPECT_EQ(test, kfifo_len(&my_fifo), N_ELEMENTS);
+
+ kfifo_in(&my_fifo, buffer1, N_ELEMENTS);
+ KUNIT_EXPECT_EQ(test, kfifo_len(&my_fifo), N_ELEMENTS * 2);
+
+ kfifo_reset(&my_fifo);
+ KUNIT_EXPECT_EQ(test, kfifo_len(&my_fifo), 0);
+}
+
+static void kfifo_test_put_should_insert_and_get_should_pop(struct kunit *test)
+{
+ u8 out_data = 0;
+ int processed_elements;
+ u8 elements[] = { 3, 5, 11 };
+
+ DEFINE_KFIFO(my_fifo, u8, KFIFO_SIZE);
+
+ // If the fifo is empty, get returns 0
+ processed_elements = kfifo_get(&my_fifo, &out_data);
+ KUNIT_EXPECT_EQ(test, processed_elements, 0);
+ KUNIT_EXPECT_EQ(test, out_data, 0);
+
+ for (int i = 0; i < 3; i++)
+ kfifo_put(&my_fifo, elements[i]);
+
+ for (int i = 0; i < 3; i++) {
+ processed_elements = kfifo_get(&my_fifo, &out_data);
+ KUNIT_EXPECT_EQ(test, processed_elements, 1);
+ KUNIT_EXPECT_EQ(test, out_data, elements[i]);
+ }
+}
+
+static void kfifo_test_in_should_insert_multiple_elements(struct kunit *test)
+{
+ u8 in_buffer[] = { 11, 25, 65 };
+ u8 out_data;
+ int processed_elements;
+
+ DEFINE_KFIFO(my_fifo, u8, KFIFO_SIZE);
+
+ kfifo_in(&my_fifo, in_buffer, 3);
+
+ for (int i = 0; i < 3; i++) {
+ processed_elements = kfifo_get(&my_fifo, &out_data);
+ KUNIT_EXPECT_EQ(test, processed_elements, 1);
+ KUNIT_EXPECT_EQ(test, out_data, in_buffer[i]);
+ }
+}
+
+static void kfifo_test_out_should_pop_multiple_elements(struct kunit *test)
+{
+ u8 in_buffer[] = { 11, 25, 65 };
+ u8 out_buffer[3];
+ int copied_elements;
+
+ DEFINE_KFIFO(my_fifo, u8, KFIFO_SIZE);
+
+ for (int i = 0; i < 3; i++)
+ kfifo_put(&my_fifo, in_buffer[i]);
+
+ copied_elements = kfifo_out(&my_fifo, out_buffer, 3);
+ KUNIT_EXPECT_EQ(test, copied_elements, 3);
+
+ for (int i = 0; i < 3; i++)
+ KUNIT_EXPECT_EQ(test, out_buffer[i], in_buffer[i]);
+ KUNIT_EXPECT_TRUE(test, kfifo_is_empty(&my_fifo));
+}
+
+static void kfifo_test_dec_init_should_define_an_empty_fifo(struct kunit *test)
+{
+ DECLARE_KFIFO(my_fifo, u8, KFIFO_SIZE);
+
+ INIT_KFIFO(my_fifo);
+
+ // my_fifo is a struct with an inplace buffer
+ KUNIT_EXPECT_FALSE(test, __is_kfifo_ptr(&my_fifo));
+
+ KUNIT_EXPECT_TRUE(test, kfifo_initialized(&my_fifo));
+}
+
+static void kfifo_test_define_should_equal_declare_init(struct kunit *test)
+{
+ // declare a variable my_fifo of type struct kfifo of u8
+ DECLARE_KFIFO(my_fifo1, u8, KFIFO_SIZE);
+ // initialize the my_fifo variable
+ INIT_KFIFO(my_fifo1);
+
+ // DEFINE_KFIFO declares the variable with the initial value
+ // essentially the same as calling DECLARE_KFIFO and INIT_KFIFO
+ DEFINE_KFIFO(my_fifo2, u8, KFIFO_SIZE);
+
+ // my_fifo1 and my_fifo2 have the same size
+ KUNIT_EXPECT_EQ(test, sizeof(my_fifo1), sizeof(my_fifo2));
+ KUNIT_EXPECT_EQ(test, kfifo_initialized(&my_fifo1),
+ kfifo_initialized(&my_fifo2));
+ KUNIT_EXPECT_EQ(test, kfifo_is_empty(&my_fifo1),
+ kfifo_is_empty(&my_fifo2));
+}
+
+static void kfifo_test_alloc_should_initiliaze_a_ptr_fifo(struct kunit *test)
+{
+ int ret;
+ DECLARE_KFIFO_PTR(my_fifo, u8);
+
+ INIT_KFIFO(my_fifo);
+
+ // kfifo_initialized returns false signaling the buffer pointer is NULL
+ KUNIT_EXPECT_FALSE(test, kfifo_initialized(&my_fifo));
+
+ // kfifo_alloc allocates the buffer
+ ret = kfifo_alloc(&my_fifo, KFIFO_SIZE, GFP_KERNEL);
+ KUNIT_EXPECT_EQ_MSG(test, ret, 0, "Memory allocation should succeed");
+ KUNIT_EXPECT_TRUE(test, kfifo_initialized(&my_fifo));
+
+ // kfifo_free frees the buffer
+ kfifo_free(&my_fifo);
+}
+
+static void kfifo_test_peek_should_not_remove_elements(struct kunit *test)
+{
+ u8 out_data;
+ int processed_elements;
+
+ DEFINE_KFIFO(my_fifo, u8, KFIFO_SIZE);
+
+ // If the fifo is empty, peek returns 0
+ processed_elements = kfifo_peek(&my_fifo, &out_data);
+ KUNIT_EXPECT_EQ(test, processed_elements, 0);
+
+ kfifo_put(&my_fifo, 3);
+ kfifo_put(&my_fifo, 5);
+ kfifo_put(&my_fifo, 11);
+
+ KUNIT_EXPECT_EQ(test, kfifo_len(&my_fifo), 3);
+
+ processed_elements = kfifo_peek(&my_fifo, &out_data);
+ KUNIT_EXPECT_EQ(test, processed_elements, 1);
+ KUNIT_EXPECT_EQ(test, out_data, 3);
+
+ KUNIT_EXPECT_EQ(test, kfifo_len(&my_fifo), 3);
+
+ // Using peek doesn't remove the element
+ // so the read element and the fifo length
+ // remains the same
+ processed_elements = kfifo_peek(&my_fifo, &out_data);
+ KUNIT_EXPECT_EQ(test, processed_elements, 1);
+ KUNIT_EXPECT_EQ(test, out_data, 3);
+
+ KUNIT_EXPECT_EQ(test, kfifo_len(&my_fifo), 3);
+}
+
+static struct kunit_case kfifo_test_cases[] = {
+ KUNIT_CASE(kfifo_test_reset_should_clear_the_fifo),
+ KUNIT_CASE(kfifo_test_define_should_define_an_empty_fifo),
+ KUNIT_CASE(kfifo_test_len_should_ret_n_of_stored_elements),
+ KUNIT_CASE(kfifo_test_put_should_insert_and_get_should_pop),
+ KUNIT_CASE(kfifo_test_in_should_insert_multiple_elements),
+ KUNIT_CASE(kfifo_test_out_should_pop_multiple_elements),
+ KUNIT_CASE(kfifo_test_dec_init_should_define_an_empty_fifo),
+ KUNIT_CASE(kfifo_test_define_should_equal_declare_init),
+ KUNIT_CASE(kfifo_test_alloc_should_initiliaze_a_ptr_fifo),
+ KUNIT_CASE(kfifo_test_peek_should_not_remove_elements),
+ {},
+};
+
+static struct kunit_suite kfifo_test_module = {
+ .name = "kfifo",
+ .test_cases = kfifo_test_cases,
+};
+
+kunit_test_suites(&kfifo_test_module);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Diego Vieira <diego.daniel.professional@gmail.com>");
+MODULE_DESCRIPTION("KUnit test for the kernel FIFO");
diff --git a/lib/kunit_iov_iter.c b/lib/tests/kunit_iov_iter.c
index 48342736d016..48342736d016 100644
--- a/lib/kunit_iov_iter.c
+++ b/lib/tests/kunit_iov_iter.c
diff --git a/lib/list-test.c b/lib/tests/list-test.c
index 9135cdc1bb39..9135cdc1bb39 100644
--- a/lib/list-test.c
+++ b/lib/tests/list-test.c
diff --git a/lib/memcpy_kunit.c b/lib/tests/memcpy_kunit.c
index d36933554e46..d36933554e46 100644
--- a/lib/memcpy_kunit.c
+++ b/lib/tests/memcpy_kunit.c
diff --git a/lib/overflow_kunit.c b/lib/tests/overflow_kunit.c
index 5222c6393f11..894691b4411a 100644
--- a/lib/overflow_kunit.c
+++ b/lib/tests/overflow_kunit.c
@@ -1185,22 +1185,40 @@ struct bar {
static void DEFINE_FLEX_test(struct kunit *test)
{
- /* Using _RAW_ on a __counted_by struct will initialize "counter" to zero */
- DEFINE_RAW_FLEX(struct foo, two_but_zero, array, 2);
-#ifdef CONFIG_CC_HAS_COUNTED_BY
- int expected_raw_size = sizeof(struct foo);
-#else
- int expected_raw_size = sizeof(struct foo) + 2 * sizeof(s16);
-#endif
- /* Without annotation, it will always be on-stack size. */
DEFINE_RAW_FLEX(struct bar, two, array, 2);
DEFINE_FLEX(struct foo, eight, array, counter, 8);
DEFINE_FLEX(struct foo, empty, array, counter, 0);
+ /* Using _RAW_ on a __counted_by struct will initialize "counter" to zero */
+ DEFINE_RAW_FLEX(struct foo, two_but_zero, array, 2);
+ int array_size_override = 0;
- KUNIT_EXPECT_EQ(test, __struct_size(two_but_zero), expected_raw_size);
+ KUNIT_EXPECT_EQ(test, sizeof(*two), sizeof(struct bar));
KUNIT_EXPECT_EQ(test, __struct_size(two), sizeof(struct bar) + 2 * sizeof(s16));
- KUNIT_EXPECT_EQ(test, __struct_size(eight), 24);
+ KUNIT_EXPECT_EQ(test, __member_size(two), sizeof(struct bar) + 2 * sizeof(s16));
+ KUNIT_EXPECT_EQ(test, __struct_size(two->array), 2 * sizeof(s16));
+ KUNIT_EXPECT_EQ(test, __member_size(two->array), 2 * sizeof(s16));
+
+ KUNIT_EXPECT_EQ(test, sizeof(*eight), sizeof(struct foo));
+ KUNIT_EXPECT_EQ(test, __struct_size(eight), sizeof(struct foo) + 8 * sizeof(s16));
+ KUNIT_EXPECT_EQ(test, __member_size(eight), sizeof(struct foo) + 8 * sizeof(s16));
+ KUNIT_EXPECT_EQ(test, __struct_size(eight->array), 8 * sizeof(s16));
+ KUNIT_EXPECT_EQ(test, __member_size(eight->array), 8 * sizeof(s16));
+
+ KUNIT_EXPECT_EQ(test, sizeof(*empty), sizeof(struct foo));
KUNIT_EXPECT_EQ(test, __struct_size(empty), sizeof(struct foo));
+ KUNIT_EXPECT_EQ(test, __member_size(empty), sizeof(struct foo));
+ KUNIT_EXPECT_EQ(test, __struct_size(empty->array), 0);
+ KUNIT_EXPECT_EQ(test, __member_size(empty->array), 0);
+
+ /* If __counted_by is not being used, array size will have the on-stack size. */
+ if (!IS_ENABLED(CONFIG_CC_HAS_COUNTED_BY))
+ array_size_override = 2 * sizeof(s16);
+
+ KUNIT_EXPECT_EQ(test, sizeof(*two_but_zero), sizeof(struct foo));
+ KUNIT_EXPECT_EQ(test, __struct_size(two_but_zero), sizeof(struct foo) + 2 * sizeof(s16));
+ KUNIT_EXPECT_EQ(test, __member_size(two_but_zero), sizeof(struct foo) + 2 * sizeof(s16));
+ KUNIT_EXPECT_EQ(test, __struct_size(two_but_zero->array), array_size_override);
+ KUNIT_EXPECT_EQ(test, __member_size(two_but_zero->array), array_size_override);
}
static struct kunit_case overflow_test_cases[] = {
diff --git a/lib/test_printf.c b/lib/tests/printf_kunit.c
index 59dbe4f9a4cb..2c9f6170bacd 100644
--- a/lib/test_printf.c
+++ b/lib/tests/printf_kunit.c
@@ -3,9 +3,7 @@
* Test cases for printf facility.
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/init.h>
+#include <kunit/test.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/printk.h>
@@ -25,8 +23,6 @@
#include <linux/property.h>
-#include "../tools/testing/selftests/kselftest_module.h"
-
#define BUF_SIZE 256
#define PAD_SIZE 16
#define FILL_CHAR '$'
@@ -37,14 +33,14 @@
block \
__diag_pop();
-KSTM_MODULE_GLOBALS();
+static unsigned int total_tests;
-static char *test_buffer __initdata;
-static char *alloced_buffer __initdata;
+static char *test_buffer;
+static char *alloced_buffer;
-static int __printf(4, 0) __init
-do_test(int bufsize, const char *expect, int elen,
- const char *fmt, va_list ap)
+static void __printf(7, 0)
+do_test(struct kunit *kunittest, const char *file, const int line, int bufsize, const char *expect,
+ int elen, const char *fmt, va_list ap)
{
va_list aq;
int ret, written;
@@ -57,62 +53,70 @@ do_test(int bufsize, const char *expect, int elen,
va_end(aq);
if (ret != elen) {
- pr_warn("vsnprintf(buf, %d, \"%s\", ...) returned %d, expected %d\n",
- bufsize, fmt, ret, elen);
- return 1;
+ KUNIT_FAIL(kunittest,
+ "%s:%d: vsnprintf(buf, %d, \"%s\", ...) returned %d, expected %d\n",
+ file, line, bufsize, fmt, ret, elen);
+ return;
}
if (memchr_inv(alloced_buffer, FILL_CHAR, PAD_SIZE)) {
- pr_warn("vsnprintf(buf, %d, \"%s\", ...) wrote before buffer\n", bufsize, fmt);
- return 1;
+ KUNIT_FAIL(kunittest,
+ "%s:%d: vsnprintf(buf, %d, \"%s\", ...) wrote before buffer\n",
+ file, line, bufsize, fmt);
+ return;
}
if (!bufsize) {
if (memchr_inv(test_buffer, FILL_CHAR, BUF_SIZE + PAD_SIZE)) {
- pr_warn("vsnprintf(buf, 0, \"%s\", ...) wrote to buffer\n",
- fmt);
- return 1;
+ KUNIT_FAIL(kunittest,
+ "%s:%d: vsnprintf(buf, 0, \"%s\", ...) wrote to buffer\n",
+ file, line, fmt);
}
- return 0;
+ return;
}
written = min(bufsize-1, elen);
if (test_buffer[written]) {
- pr_warn("vsnprintf(buf, %d, \"%s\", ...) did not nul-terminate buffer\n",
- bufsize, fmt);
- return 1;
+ KUNIT_FAIL(kunittest,
+ "%s:%d: vsnprintf(buf, %d, \"%s\", ...) did not nul-terminate buffer\n",
+ file, line, bufsize, fmt);
+ return;
}
if (memchr_inv(test_buffer + written + 1, FILL_CHAR, bufsize - (written + 1))) {
- pr_warn("vsnprintf(buf, %d, \"%s\", ...) wrote beyond the nul-terminator\n",
- bufsize, fmt);
- return 1;
+ KUNIT_FAIL(kunittest,
+ "%s:%d: vsnprintf(buf, %d, \"%s\", ...) wrote beyond the nul-terminator\n",
+ file, line, bufsize, fmt);
+ return;
}
if (memchr_inv(test_buffer + bufsize, FILL_CHAR, BUF_SIZE + PAD_SIZE - bufsize)) {
- pr_warn("vsnprintf(buf, %d, \"%s\", ...) wrote beyond buffer\n", bufsize, fmt);
- return 1;
+ KUNIT_FAIL(kunittest,
+ "%s:%d: vsnprintf(buf, %d, \"%s\", ...) wrote beyond buffer\n",
+ file, line, bufsize, fmt);
+ return;
}
if (memcmp(test_buffer, expect, written)) {
- pr_warn("vsnprintf(buf, %d, \"%s\", ...) wrote '%s', expected '%.*s'\n",
- bufsize, fmt, test_buffer, written, expect);
- return 1;
+ KUNIT_FAIL(kunittest,
+ "%s:%d: vsnprintf(buf, %d, \"%s\", ...) wrote '%s', expected '%.*s'\n",
+ file, line, bufsize, fmt, test_buffer, written, expect);
+ return;
}
- return 0;
}
-static void __printf(3, 4) __init
-__test(const char *expect, int elen, const char *fmt, ...)
+static void __printf(6, 7)
+__test(struct kunit *kunittest, const char *file, const int line, const char *expect, int elen,
+ const char *fmt, ...)
{
va_list ap;
int rand;
char *p;
if (elen >= BUF_SIZE) {
- pr_err("error in test suite: expected output length %d too long. Format was '%s'.\n",
- elen, fmt);
- failed_tests++;
+ KUNIT_FAIL(kunittest,
+ "%s:%d: error in test suite: expected length (%d) >= BUF_SIZE (%d). fmt=\"%s\"\n",
+ file, line, elen, BUF_SIZE, fmt);
return;
}
@@ -124,19 +128,19 @@ __test(const char *expect, int elen, const char *fmt, ...)
* enough and 0), and then we also test that kvasprintf would
* be able to print it as expected.
*/
- failed_tests += do_test(BUF_SIZE, expect, elen, fmt, ap);
+ do_test(kunittest, file, line, BUF_SIZE, expect, elen, fmt, ap);
rand = get_random_u32_inclusive(1, elen + 1);
/* Since elen < BUF_SIZE, we have 1 <= rand <= BUF_SIZE. */
- failed_tests += do_test(rand, expect, elen, fmt, ap);
- failed_tests += do_test(0, expect, elen, fmt, ap);
+ do_test(kunittest, file, line, rand, expect, elen, fmt, ap);
+ do_test(kunittest, file, line, 0, expect, elen, fmt, ap);
p = kvasprintf(GFP_KERNEL, fmt, ap);
if (p) {
total_tests++;
if (memcmp(p, expect, elen+1)) {
- pr_warn("kvasprintf(..., \"%s\", ...) returned '%s', expected '%s'\n",
- fmt, p, expect);
- failed_tests++;
+ KUNIT_FAIL(kunittest,
+ "%s:%d: kvasprintf(..., \"%s\", ...) returned '%s', expected '%s'\n",
+ file, line, fmt, p, expect);
}
kfree(p);
}
@@ -144,10 +148,10 @@ __test(const char *expect, int elen, const char *fmt, ...)
}
#define test(expect, fmt, ...) \
- __test(expect, strlen(expect), fmt, ##__VA_ARGS__)
+ __test(kunittest, __FILE__, __LINE__, expect, strlen(expect), fmt, ##__VA_ARGS__)
-static void __init
-test_basic(void)
+static void
+test_basic(struct kunit *kunittest)
{
/* Work around annoying "warning: zero-length gnu_printf format string". */
char nul = '\0';
@@ -155,11 +159,11 @@ test_basic(void)
test("", &nul);
test("100%", "100%%");
test("xxx%yyy", "xxx%cyyy", '%');
- __test("xxx\0yyy", 7, "xxx%cyyy", '\0');
+ __test(kunittest, __FILE__, __LINE__, "xxx\0yyy", 7, "xxx%cyyy", '\0');
}
-static void __init
-test_number(void)
+static void
+test_number(struct kunit *kunittest)
{
test("0x1234abcd ", "%#-12x", 0x1234abcd);
test(" 0x1234abcd", "%#12x", 0x1234abcd);
@@ -180,8 +184,8 @@ test_number(void)
test("00|0|0|0|0", "%.2d|%.1d|%.0d|%.*d|%1.0d", 0, 0, 0, 0, 0, 0);
}
-static void __init
-test_string(void)
+static void
+test_string(struct kunit *kunittest)
{
test("", "%s%.0s", "", "123");
test("ABCD|abc|123", "%s|%.3s|%.*s", "ABCD", "abcdef", 3, "123456");
@@ -218,29 +222,6 @@ test_string(void)
#define ZEROS "00000000" /* hex 32 zero bits */
#define ONES "ffffffff" /* hex 32 one bits */
-static int __init
-plain_format(void)
-{
- char buf[PLAIN_BUF_SIZE];
- int nchars;
-
- nchars = snprintf(buf, PLAIN_BUF_SIZE, "%p", PTR);
-
- if (nchars != PTR_WIDTH)
- return -1;
-
- if (strncmp(buf, PTR_VAL_NO_CRNG, PTR_WIDTH) == 0) {
- pr_warn("crng possibly not yet initialized. plain 'p' buffer contains \"%s\"",
- PTR_VAL_NO_CRNG);
- return 0;
- }
-
- if (strncmp(buf, ZEROS, strlen(ZEROS)) != 0)
- return -1;
-
- return 0;
-}
-
#else
#define PTR_WIDTH 8
@@ -250,92 +231,47 @@ plain_format(void)
#define ZEROS ""
#define ONES ""
-static int __init
-plain_format(void)
-{
- /* Format is implicitly tested for 32 bit machines by plain_hash() */
- return 0;
-}
-
#endif /* BITS_PER_LONG == 64 */
-static int __init
-plain_hash_to_buffer(const void *p, char *buf, size_t len)
+static void
+plain_hash_to_buffer(struct kunit *kunittest, const void *p, char *buf, size_t len)
{
- int nchars;
-
- nchars = snprintf(buf, len, "%p", p);
-
- if (nchars != PTR_WIDTH)
- return -1;
+ KUNIT_ASSERT_EQ(kunittest, snprintf(buf, len, "%p", p), PTR_WIDTH);
if (strncmp(buf, PTR_VAL_NO_CRNG, PTR_WIDTH) == 0) {
- pr_warn("crng possibly not yet initialized. plain 'p' buffer contains \"%s\"",
- PTR_VAL_NO_CRNG);
- return 0;
+ kunit_skip(kunittest,
+ "crng possibly not yet initialized. plain 'p' buffer contains \"%s\"\n",
+ PTR_VAL_NO_CRNG);
}
-
- return 0;
}
-static int __init
-plain_hash(void)
+static void
+hash_pointer(struct kunit *kunittest)
{
- char buf[PLAIN_BUF_SIZE];
- int ret;
-
- ret = plain_hash_to_buffer(PTR, buf, PLAIN_BUF_SIZE);
- if (ret)
- return ret;
-
- if (strncmp(buf, PTR_STR, PTR_WIDTH) == 0)
- return -1;
-
- return 0;
-}
+ if (no_hash_pointers)
+ kunit_skip(kunittest, "hash pointers disabled");
-/*
- * We can't use test() to test %p because we don't know what output to expect
- * after an address is hashed.
- */
-static void __init
-plain(void)
-{
- int err;
+ char buf[PLAIN_BUF_SIZE];
- if (no_hash_pointers) {
- pr_warn("skipping plain 'p' tests");
- skipped_tests += 2;
- return;
- }
+ plain_hash_to_buffer(kunittest, PTR, buf, PLAIN_BUF_SIZE);
- err = plain_hash();
- if (err) {
- pr_warn("plain 'p' does not appear to be hashed\n");
- failed_tests++;
- return;
- }
+ /*
+ * The hash of %p is unpredictable, therefore test() cannot be used.
+ *
+ * Instead verify that the first 32 bits are zeros on a 64-bit system
+ * and that the non-hashed value is not printed.
+ */
- err = plain_format();
- if (err) {
- pr_warn("hashing plain 'p' has unexpected format\n");
- failed_tests++;
- }
+ KUNIT_EXPECT_MEMEQ(kunittest, buf, ZEROS, strlen(ZEROS));
+ KUNIT_EXPECT_MEMNEQ(kunittest, buf, PTR_STR, PTR_WIDTH);
}
-static void __init
-test_hashed(const char *fmt, const void *p)
+static void
+test_hashed(struct kunit *kunittest, const char *fmt, const void *p)
{
char buf[PLAIN_BUF_SIZE];
- int ret;
- /*
- * No need to increase failed test counter since this is assumed
- * to be called after plain().
- */
- ret = plain_hash_to_buffer(p, buf, PLAIN_BUF_SIZE);
- if (ret)
- return;
+ plain_hash_to_buffer(kunittest, p, buf, PLAIN_BUF_SIZE);
test(buf, fmt, p);
}
@@ -343,8 +279,8 @@ test_hashed(const char *fmt, const void *p)
/*
* NULL pointers aren't hashed.
*/
-static void __init
-null_pointer(void)
+static void
+null_pointer(struct kunit *kunittest)
{
test(ZEROS "00000000", "%p", NULL);
test(ZEROS "00000000", "%px", NULL);
@@ -354,8 +290,8 @@ null_pointer(void)
/*
* Error pointers aren't hashed.
*/
-static void __init
-error_pointer(void)
+static void
+error_pointer(struct kunit *kunittest)
{
test(ONES "fffffff5", "%p", ERR_PTR(-11));
test(ONES "fffffff5", "%px", ERR_PTR(-11));
@@ -364,27 +300,27 @@ error_pointer(void)
#define PTR_INVALID ((void *)0x000000ab)
-static void __init
-invalid_pointer(void)
+static void
+invalid_pointer(struct kunit *kunittest)
{
- test_hashed("%p", PTR_INVALID);
+ test_hashed(kunittest, "%p", PTR_INVALID);
test(ZEROS "000000ab", "%px", PTR_INVALID);
test("(efault)", "%pE", PTR_INVALID);
}
-static void __init
-symbol_ptr(void)
+static void
+symbol_ptr(struct kunit *kunittest)
{
}
-static void __init
-kernel_ptr(void)
+static void
+kernel_ptr(struct kunit *kunittest)
{
/* We can't test this without access to kptr_restrict. */
}
-static void __init
-struct_resource(void)
+static void
+struct_resource(struct kunit *kunittest)
{
struct resource test_resource = {
.start = 0xc0ffee00,
@@ -432,8 +368,8 @@ struct_resource(void)
"%pR", &test_resource);
}
-static void __init
-struct_range(void)
+static void
+struct_range(struct kunit *kunittest)
{
struct range test_range = DEFINE_RANGE(0xc0ffee00ba5eba11,
0xc0ffee00ba5eba11);
@@ -448,18 +384,18 @@ struct_range(void)
"%pra", &test_range);
}
-static void __init
-addr(void)
+static void
+addr(struct kunit *kunittest)
{
}
-static void __init
-escaped_str(void)
+static void
+escaped_str(struct kunit *kunittest)
{
}
-static void __init
-hex_string(void)
+static void
+hex_string(struct kunit *kunittest)
{
const char buf[3] = {0xc0, 0xff, 0xee};
@@ -469,8 +405,8 @@ hex_string(void)
"%*ph|%*phC|%*phD|%*phN", 3, buf, 3, buf, 3, buf, 3, buf);
}
-static void __init
-mac(void)
+static void
+mac(struct kunit *kunittest)
{
const u8 addr[6] = {0x2d, 0x48, 0xd6, 0xfc, 0x7a, 0x05};
@@ -481,8 +417,8 @@ mac(void)
test("057afcd6482d", "%pmR", addr);
}
-static void __init
-ip4(void)
+static void
+ip4(struct kunit *kunittest)
{
struct sockaddr_in sa;
@@ -496,20 +432,13 @@ ip4(void)
test("001.002.003.004:12345|1.2.3.4:12345", "%piSp|%pISp", &sa, &sa);
}
-static void __init
-ip6(void)
+static void
+ip6(struct kunit *kunittest)
{
}
-static void __init
-ip(void)
-{
- ip4();
- ip6();
-}
-
-static void __init
-uuid(void)
+static void
+uuid(struct kunit *kunittest)
{
const char uuid[16] = {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf};
@@ -520,7 +449,7 @@ uuid(void)
test("03020100-0504-0706-0809-0A0B0C0D0E0F", "%pUL", uuid);
}
-static struct dentry test_dentry[4] __initdata = {
+static struct dentry test_dentry[4] = {
{ .d_parent = &test_dentry[0],
.d_name = QSTR_INIT(test_dentry[0].d_iname, 3),
.d_iname = "foo" },
@@ -535,8 +464,8 @@ static struct dentry test_dentry[4] __initdata = {
.d_iname = "romeo" },
};
-static void __init
-dentry(void)
+static void
+dentry(struct kunit *kunittest)
{
test("foo", "%pd", &test_dentry[0]);
test("foo", "%pd2", &test_dentry[0]);
@@ -556,13 +485,13 @@ dentry(void)
test(" bravo/alfa| bravo/alfa", "%12pd2|%*pd2", &test_dentry[2], 12, &test_dentry[2]);
}
-static void __init
-struct_va_format(void)
+static void
+struct_va_format(struct kunit *kunittest)
{
}
-static void __init
-time_and_date(void)
+static void
+time_and_date(struct kunit *kunittest)
{
/* 1543210543 */
const struct rtc_time tm = {
@@ -595,13 +524,13 @@ time_and_date(void)
test("15:32:23|0119-00-04", "%ptTtrs|%ptTdrs", &t, &t);
}
-static void __init
-struct_clk(void)
+static void
+struct_clk(struct kunit *kunittest)
{
}
-static void __init
-large_bitmap(void)
+static void
+large_bitmap(struct kunit *kunittest)
{
const int nbits = 1 << 16;
unsigned long *bits = bitmap_zalloc(nbits, GFP_KERNEL);
@@ -614,8 +543,8 @@ large_bitmap(void)
bitmap_free(bits);
}
-static void __init
-bitmap(void)
+static void
+bitmap(struct kunit *kunittest)
{
DECLARE_BITMAP(bits, 20);
const int primes[] = {2,3,5,7,11,13,17,19};
@@ -634,11 +563,11 @@ bitmap(void)
test("fffff|fffff", "%20pb|%*pb", bits, 20, bits);
test("0-19|0-19", "%20pbl|%*pbl", bits, 20, bits);
- large_bitmap();
+ large_bitmap(kunittest);
}
-static void __init
-netdev_features(void)
+static void
+netdev_features(struct kunit *kunittest)
{
}
@@ -663,9 +592,9 @@ static const struct page_flags_test pft[] = {
"%#x", "kasantag"},
};
-static void __init
-page_flags_test(int section, int node, int zone, int last_cpupid,
- int kasan_tag, unsigned long flags, const char *name,
+static void
+page_flags_test(struct kunit *kunittest, int section, int node, int zone,
+ int last_cpupid, int kasan_tag, unsigned long flags, const char *name,
char *cmp_buf)
{
unsigned long values[] = {section, node, zone, last_cpupid, kasan_tag};
@@ -701,26 +630,25 @@ page_flags_test(int section, int node, int zone, int last_cpupid,
test(cmp_buf, "%pGp", &flags);
}
-static void __init
-flags(void)
+static void
+flags(struct kunit *kunittest)
{
unsigned long flags;
char *cmp_buffer;
gfp_t gfp;
- cmp_buffer = kmalloc(BUF_SIZE, GFP_KERNEL);
- if (!cmp_buffer)
- return;
+ cmp_buffer = kunit_kmalloc(kunittest, BUF_SIZE, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(kunittest, cmp_buffer);
flags = 0;
- page_flags_test(0, 0, 0, 0, 0, flags, "", cmp_buffer);
+ page_flags_test(kunittest, 0, 0, 0, 0, 0, flags, "", cmp_buffer);
flags = 1UL << NR_PAGEFLAGS;
- page_flags_test(0, 0, 0, 0, 0, flags, "", cmp_buffer);
+ page_flags_test(kunittest, 0, 0, 0, 0, 0, flags, "", cmp_buffer);
flags |= 1UL << PG_uptodate | 1UL << PG_dirty | 1UL << PG_lru
| 1UL << PG_active | 1UL << PG_swapbacked;
- page_flags_test(1, 1, 1, 0x1fffff, 1, flags,
+ page_flags_test(kunittest, 1, 1, 1, 0x1fffff, 1, flags,
"uptodate|dirty|lru|active|swapbacked",
cmp_buffer);
@@ -745,11 +673,9 @@ flags(void)
(unsigned long) gfp);
gfp |= __GFP_HIGH;
test(cmp_buffer, "%pGg", &gfp);
-
- kfree(cmp_buffer);
}
-static void __init fwnode_pointer(void)
+static void fwnode_pointer(struct kunit *kunittest)
{
const struct software_node first = { .name = "first" };
const struct software_node second = { .name = "second", .parent = &first };
@@ -763,8 +689,7 @@ static void __init fwnode_pointer(void)
rval = software_node_register_node_group(group);
if (rval) {
- pr_warn("cannot register softnodes; rval %d\n", rval);
- return;
+ kunit_skip(kunittest, "cannot register softnodes; rval %d\n", rval);
}
test(full_name_second, "%pfw", software_node_fwnode(&second));
@@ -776,7 +701,7 @@ static void __init fwnode_pointer(void)
software_node_unregister_node_group(group);
}
-static void __init fourcc_pointer(void)
+static void fourcc_pointer(struct kunit *kunittest)
{
struct {
u32 code;
@@ -793,14 +718,14 @@ static void __init fourcc_pointer(void)
test(try[i].str, "%p4cc", &try[i].code);
}
-static void __init
-errptr(void)
+static void
+errptr(struct kunit *kunittest)
{
test("-1234", "%pe", ERR_PTR(-1234));
/* Check that %pe with a non-ERR_PTR gets treated as ordinary %p. */
BUILD_BUG_ON(IS_ERR(PTR));
- test_hashed("%pe", PTR);
+ test_hashed(kunittest, "%pe", PTR);
#ifdef CONFIG_SYMBOLIC_ERRNAME
test("(-ENOTSOCK)", "(%pe)", ERR_PTR(-ENOTSOCK));
@@ -813,51 +738,66 @@ errptr(void)
#endif
}
-static void __init
-test_pointer(void)
-{
- plain();
- null_pointer();
- error_pointer();
- invalid_pointer();
- symbol_ptr();
- kernel_ptr();
- struct_resource();
- struct_range();
- addr();
- escaped_str();
- hex_string();
- mac();
- ip();
- uuid();
- dentry();
- struct_va_format();
- time_and_date();
- struct_clk();
- bitmap();
- netdev_features();
- flags();
- errptr();
- fwnode_pointer();
- fourcc_pointer();
-}
-
-static void __init selftest(void)
+static int printf_suite_init(struct kunit_suite *suite)
{
+ total_tests = 0;
+
alloced_buffer = kmalloc(BUF_SIZE + 2*PAD_SIZE, GFP_KERNEL);
if (!alloced_buffer)
- return;
+ return -ENOMEM;
test_buffer = alloced_buffer + PAD_SIZE;
- test_basic();
- test_number();
- test_string();
- test_pointer();
+ return 0;
+}
+static void printf_suite_exit(struct kunit_suite *suite)
+{
kfree(alloced_buffer);
-}
-KSTM_MODULE_LOADERS(test_printf);
+ kunit_info(suite, "ran %u tests\n", total_tests);
+}
+
+static struct kunit_case printf_test_cases[] = {
+ KUNIT_CASE(test_basic),
+ KUNIT_CASE(test_number),
+ KUNIT_CASE(test_string),
+ KUNIT_CASE(hash_pointer),
+ KUNIT_CASE(null_pointer),
+ KUNIT_CASE(error_pointer),
+ KUNIT_CASE(invalid_pointer),
+ KUNIT_CASE(symbol_ptr),
+ KUNIT_CASE(kernel_ptr),
+ KUNIT_CASE(struct_resource),
+ KUNIT_CASE(struct_range),
+ KUNIT_CASE(addr),
+ KUNIT_CASE(escaped_str),
+ KUNIT_CASE(hex_string),
+ KUNIT_CASE(mac),
+ KUNIT_CASE(ip4),
+ KUNIT_CASE(ip6),
+ KUNIT_CASE(uuid),
+ KUNIT_CASE(dentry),
+ KUNIT_CASE(struct_va_format),
+ KUNIT_CASE(time_and_date),
+ KUNIT_CASE(struct_clk),
+ KUNIT_CASE(bitmap),
+ KUNIT_CASE(netdev_features),
+ KUNIT_CASE(flags),
+ KUNIT_CASE(errptr),
+ KUNIT_CASE(fwnode_pointer),
+ KUNIT_CASE(fourcc_pointer),
+ {}
+};
+
+static struct kunit_suite printf_test_suite = {
+ .name = "printf",
+ .suite_init = printf_suite_init,
+ .suite_exit = printf_suite_exit,
+ .test_cases = printf_test_cases,
+};
+
+kunit_test_suite(printf_test_suite);
+
MODULE_AUTHOR("Rasmus Villemoes <linux@rasmusvillemoes.dk>");
MODULE_DESCRIPTION("Test cases for printf facility");
MODULE_LICENSE("GPL");
diff --git a/lib/test_scanf.c b/lib/tests/scanf_kunit.c
index 44f8508c9d88..e9a36ed80575 100644
--- a/lib/test_scanf.c
+++ b/lib/tests/scanf_kunit.c
@@ -3,152 +3,138 @@
* Test cases for sscanf facility.
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
+#include <kunit/test.h>
#include <linux/bitops.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/overflow.h>
-#include <linux/printk.h>
#include <linux/prandom.h>
#include <linux/slab.h>
#include <linux/string.h>
-#include "../tools/testing/selftests/kselftest_module.h"
-
#define BUF_SIZE 1024
-KSTM_MODULE_GLOBALS();
-static char *test_buffer __initdata;
-static char *fmt_buffer __initdata;
-static struct rnd_state rnd_state __initdata;
+static char *test_buffer;
+static char *fmt_buffer;
+static struct rnd_state rnd_state;
-typedef int (*check_fn)(const void *check_data, const char *string,
- const char *fmt, int n_args, va_list ap);
+typedef void (*check_fn)(struct kunit *test, const char *file, const int line,
+ const void *check_data, const char *string, const char *fmt, int n_args,
+ va_list ap);
-static void __scanf(4, 6) __init
-_test(check_fn fn, const void *check_data, const char *string, const char *fmt,
- int n_args, ...)
+static void __scanf(7, 9)
+_test(struct kunit *test, const char *file, const int line, check_fn fn, const void *check_data,
+ const char *string, const char *fmt, int n_args, ...)
{
va_list ap, ap_copy;
int ret;
- total_tests++;
-
va_start(ap, n_args);
va_copy(ap_copy, ap);
ret = vsscanf(string, fmt, ap_copy);
va_end(ap_copy);
if (ret != n_args) {
- pr_warn("vsscanf(\"%s\", \"%s\", ...) returned %d expected %d\n",
- string, fmt, ret, n_args);
- goto fail;
+ KUNIT_FAIL(test, "%s:%d: vsscanf(\"%s\", \"%s\", ...) returned %d expected %d",
+ file, line, string, fmt, ret, n_args);
+ } else {
+ (*fn)(test, file, line, check_data, string, fmt, n_args, ap);
}
- ret = (*fn)(check_data, string, fmt, n_args, ap);
- if (ret)
- goto fail;
-
- va_end(ap);
-
- return;
-
-fail:
- failed_tests++;
va_end(ap);
}
#define _check_numbers_template(arg_fmt, expect, str, fmt, n_args, ap) \
do { \
- pr_debug("\"%s\", \"%s\" ->\n", str, fmt); \
for (; n_args > 0; n_args--, expect++) { \
typeof(*expect) got = *va_arg(ap, typeof(expect)); \
- pr_debug("\t" arg_fmt "\n", got); \
if (got != *expect) { \
- pr_warn("vsscanf(\"%s\", \"%s\", ...) expected " arg_fmt " got " arg_fmt "\n", \
- str, fmt, *expect, got); \
- return 1; \
+ KUNIT_FAIL(test, \
+ "%s:%d: vsscanf(\"%s\", \"%s\", ...) expected " arg_fmt " got " arg_fmt, \
+ file, line, str, fmt, *expect, got); \
+ return; \
} \
} \
- return 0; \
} while (0)
-static int __init check_ull(const void *check_data, const char *string,
- const char *fmt, int n_args, va_list ap)
+static void check_ull(struct kunit *test, const char *file, const int line, const void *check_data,
+ const char *string, const char *fmt, int n_args, va_list ap)
{
const unsigned long long *pval = check_data;
_check_numbers_template("%llu", pval, string, fmt, n_args, ap);
}
-static int __init check_ll(const void *check_data, const char *string,
- const char *fmt, int n_args, va_list ap)
+static void check_ll(struct kunit *test, const char *file, const int line, const void *check_data,
+ const char *string, const char *fmt, int n_args, va_list ap)
{
const long long *pval = check_data;
_check_numbers_template("%lld", pval, string, fmt, n_args, ap);
}
-static int __init check_ulong(const void *check_data, const char *string,
- const char *fmt, int n_args, va_list ap)
+static void check_ulong(struct kunit *test, const char *file, const int line,
+ const void *check_data, const char *string, const char *fmt, int n_args,
+ va_list ap)
{
const unsigned long *pval = check_data;
_check_numbers_template("%lu", pval, string, fmt, n_args, ap);
}
-static int __init check_long(const void *check_data, const char *string,
- const char *fmt, int n_args, va_list ap)
+static void check_long(struct kunit *test, const char *file, const int line, const void *check_data,
+ const char *string, const char *fmt, int n_args, va_list ap)
{
const long *pval = check_data;
_check_numbers_template("%ld", pval, string, fmt, n_args, ap);
}
-static int __init check_uint(const void *check_data, const char *string,
- const char *fmt, int n_args, va_list ap)
+static void check_uint(struct kunit *test, const char *file, const int line, const void *check_data,
+ const char *string, const char *fmt, int n_args, va_list ap)
{
const unsigned int *pval = check_data;
_check_numbers_template("%u", pval, string, fmt, n_args, ap);
}
-static int __init check_int(const void *check_data, const char *string,
- const char *fmt, int n_args, va_list ap)
+static void check_int(struct kunit *test, const char *file, const int line, const void *check_data,
+ const char *string, const char *fmt, int n_args, va_list ap)
{
const int *pval = check_data;
_check_numbers_template("%d", pval, string, fmt, n_args, ap);
}
-static int __init check_ushort(const void *check_data, const char *string,
- const char *fmt, int n_args, va_list ap)
+static void check_ushort(struct kunit *test, const char *file, const int line,
+ const void *check_data, const char *string, const char *fmt, int n_args,
+ va_list ap)
{
const unsigned short *pval = check_data;
_check_numbers_template("%hu", pval, string, fmt, n_args, ap);
}
-static int __init check_short(const void *check_data, const char *string,
- const char *fmt, int n_args, va_list ap)
+static void check_short(struct kunit *test, const char *file, const int line,
+ const void *check_data, const char *string, const char *fmt, int n_args,
+ va_list ap)
{
const short *pval = check_data;
_check_numbers_template("%hd", pval, string, fmt, n_args, ap);
}
-static int __init check_uchar(const void *check_data, const char *string,
- const char *fmt, int n_args, va_list ap)
+static void check_uchar(struct kunit *test, const char *file, const int line,
+ const void *check_data, const char *string, const char *fmt, int n_args,
+ va_list ap)
{
const unsigned char *pval = check_data;
_check_numbers_template("%hhu", pval, string, fmt, n_args, ap);
}
-static int __init check_char(const void *check_data, const char *string,
- const char *fmt, int n_args, va_list ap)
+static void check_char(struct kunit *test, const char *file, const int line, const void *check_data,
+ const char *string, const char *fmt, int n_args, va_list ap)
{
const signed char *pval = check_data;
@@ -156,7 +142,7 @@ static int __init check_char(const void *check_data, const char *string,
}
/* Selection of interesting numbers to test, copied from test-kstrtox.c */
-static const unsigned long long numbers[] __initconst = {
+static const unsigned long long numbers[] = {
0x0ULL,
0x1ULL,
0x7fULL,
@@ -196,7 +182,7 @@ do { \
T result = ~expect_val; /* should be overwritten */ \
\
snprintf(test_buffer, BUF_SIZE, gen_fmt, expect_val); \
- _test(fn, &expect_val, test_buffer, "%" scan_fmt, 1, &result); \
+ _test(test, __FILE__, __LINE__, fn, &expect_val, test_buffer, "%" scan_fmt, 1, &result);\
} while (0)
#define simple_numbers_loop(T, gen_fmt, scan_fmt, fn) \
@@ -214,7 +200,7 @@ do { \
} \
} while (0)
-static void __init numbers_simple(void)
+static void numbers_simple(struct kunit *test)
{
simple_numbers_loop(unsigned long long, "%llu", "llu", check_ull);
simple_numbers_loop(long long, "%lld", "lld", check_ll);
@@ -267,14 +253,14 @@ static void __init numbers_simple(void)
* the raw prandom*() functions (Not mathematically rigorous!!).
* Variabilty of length and value is more important than perfect randomness.
*/
-static u32 __init next_test_random(u32 max_bits)
+static u32 next_test_random(u32 max_bits)
{
u32 n_bits = hweight32(prandom_u32_state(&rnd_state)) % (max_bits + 1);
return prandom_u32_state(&rnd_state) & GENMASK(n_bits, 0);
}
-static unsigned long long __init next_test_random_ull(void)
+static unsigned long long next_test_random_ull(void)
{
u32 rand1 = prandom_u32_state(&rnd_state);
u32 n_bits = (hweight32(rand1) * 3) % 64;
@@ -311,7 +297,7 @@ do { \
* updating buf_pos and returning the number of characters appended.
* On error buf_pos is not changed and return value is 0.
*/
-static int __init __printf(4, 5)
+static int __printf(4, 5)
append_fmt(char *buf, int *buf_pos, int buf_len, const char *val_fmt, ...)
{
va_list ap;
@@ -333,7 +319,7 @@ append_fmt(char *buf, int *buf_pos, int buf_len, const char *val_fmt, ...)
* Convenience function to append the field delimiter string
* to both the value string and format string buffers.
*/
-static void __init append_delim(char *str_buf, int *str_buf_pos, int str_buf_len,
+static void append_delim(char *str_buf, int *str_buf_pos, int str_buf_len,
char *fmt_buf, int *fmt_buf_pos, int fmt_buf_len,
const char *delim_str)
{
@@ -344,7 +330,7 @@ static void __init append_delim(char *str_buf, int *str_buf_pos, int str_buf_len
#define test_array_8(fn, check_data, string, fmt, arr) \
do { \
BUILD_BUG_ON(ARRAY_SIZE(arr) != 8); \
- _test(fn, check_data, string, fmt, 8, \
+ _test(test, __FILE__, __LINE__, fn, check_data, string, fmt, 8, \
&(arr)[0], &(arr)[1], &(arr)[2], &(arr)[3], \
&(arr)[4], &(arr)[5], &(arr)[6], &(arr)[7]); \
} while (0)
@@ -398,7 +384,7 @@ do { \
test_array_8(fn, expect, test_buffer, fmt_buffer, result); \
} while (0)
-static void __init numbers_list_ll(const char *delim)
+static void numbers_list_ll(struct kunit *test, const char *delim)
{
numbers_list_8(unsigned long long, "%llu", delim, "llu", check_ull);
numbers_list_8(long long, "%lld", delim, "lld", check_ll);
@@ -408,7 +394,7 @@ static void __init numbers_list_ll(const char *delim)
numbers_list_8(long long, "0x%llx", delim, "lli", check_ll);
}
-static void __init numbers_list_l(const char *delim)
+static void numbers_list_l(struct kunit *test, const char *delim)
{
numbers_list_8(unsigned long, "%lu", delim, "lu", check_ulong);
numbers_list_8(long, "%ld", delim, "ld", check_long);
@@ -418,7 +404,7 @@ static void __init numbers_list_l(const char *delim)
numbers_list_8(long, "0x%lx", delim, "li", check_long);
}
-static void __init numbers_list_d(const char *delim)
+static void numbers_list_d(struct kunit *test, const char *delim)
{
numbers_list_8(unsigned int, "%u", delim, "u", check_uint);
numbers_list_8(int, "%d", delim, "d", check_int);
@@ -428,7 +414,7 @@ static void __init numbers_list_d(const char *delim)
numbers_list_8(int, "0x%x", delim, "i", check_int);
}
-static void __init numbers_list_h(const char *delim)
+static void numbers_list_h(struct kunit *test, const char *delim)
{
numbers_list_8(unsigned short, "%hu", delim, "hu", check_ushort);
numbers_list_8(short, "%hd", delim, "hd", check_short);
@@ -438,7 +424,7 @@ static void __init numbers_list_h(const char *delim)
numbers_list_8(short, "0x%hx", delim, "hi", check_short);
}
-static void __init numbers_list_hh(const char *delim)
+static void numbers_list_hh(struct kunit *test, const char *delim)
{
numbers_list_8(unsigned char, "%hhu", delim, "hhu", check_uchar);
numbers_list_8(signed char, "%hhd", delim, "hhd", check_char);
@@ -448,16 +434,19 @@ static void __init numbers_list_hh(const char *delim)
numbers_list_8(signed char, "0x%hhx", delim, "hhi", check_char);
}
-static void __init numbers_list(const char *delim)
+static void numbers_list(struct kunit *test)
{
- numbers_list_ll(delim);
- numbers_list_l(delim);
- numbers_list_d(delim);
- numbers_list_h(delim);
- numbers_list_hh(delim);
+ const char * const *param = test->param_value;
+ const char *delim = *param;
+
+ numbers_list_ll(test, delim);
+ numbers_list_l(test, delim);
+ numbers_list_d(test, delim);
+ numbers_list_h(test, delim);
+ numbers_list_hh(test, delim);
}
-static void __init numbers_list_field_width_ll(const char *delim)
+static void numbers_list_field_width_ll(struct kunit *test, const char *delim)
{
numbers_list_fix_width(unsigned long long, "%llu", delim, 20, "llu", check_ull);
numbers_list_fix_width(long long, "%lld", delim, 20, "lld", check_ll);
@@ -467,7 +456,7 @@ static void __init numbers_list_field_width_ll(const char *delim)
numbers_list_fix_width(long long, "0x%llx", delim, 18, "lli", check_ll);
}
-static void __init numbers_list_field_width_l(const char *delim)
+static void numbers_list_field_width_l(struct kunit *test, const char *delim)
{
#if BITS_PER_LONG == 64
numbers_list_fix_width(unsigned long, "%lu", delim, 20, "lu", check_ulong);
@@ -486,7 +475,7 @@ static void __init numbers_list_field_width_l(const char *delim)
#endif
}
-static void __init numbers_list_field_width_d(const char *delim)
+static void numbers_list_field_width_d(struct kunit *test, const char *delim)
{
numbers_list_fix_width(unsigned int, "%u", delim, 10, "u", check_uint);
numbers_list_fix_width(int, "%d", delim, 11, "d", check_int);
@@ -496,7 +485,7 @@ static void __init numbers_list_field_width_d(const char *delim)
numbers_list_fix_width(int, "0x%x", delim, 10, "i", check_int);
}
-static void __init numbers_list_field_width_h(const char *delim)
+static void numbers_list_field_width_h(struct kunit *test, const char *delim)
{
numbers_list_fix_width(unsigned short, "%hu", delim, 5, "hu", check_ushort);
numbers_list_fix_width(short, "%hd", delim, 6, "hd", check_short);
@@ -506,7 +495,7 @@ static void __init numbers_list_field_width_h(const char *delim)
numbers_list_fix_width(short, "0x%hx", delim, 6, "hi", check_short);
}
-static void __init numbers_list_field_width_hh(const char *delim)
+static void numbers_list_field_width_hh(struct kunit *test, const char *delim)
{
numbers_list_fix_width(unsigned char, "%hhu", delim, 3, "hhu", check_uchar);
numbers_list_fix_width(signed char, "%hhd", delim, 4, "hhd", check_char);
@@ -520,16 +509,19 @@ static void __init numbers_list_field_width_hh(const char *delim)
* List of numbers separated by delim. Each field width specifier is the
* maximum possible digits for the given type and base.
*/
-static void __init numbers_list_field_width_typemax(const char *delim)
+static void numbers_list_field_width_typemax(struct kunit *test)
{
- numbers_list_field_width_ll(delim);
- numbers_list_field_width_l(delim);
- numbers_list_field_width_d(delim);
- numbers_list_field_width_h(delim);
- numbers_list_field_width_hh(delim);
+ const char * const *param = test->param_value;
+ const char *delim = *param;
+
+ numbers_list_field_width_ll(test, delim);
+ numbers_list_field_width_l(test, delim);
+ numbers_list_field_width_d(test, delim);
+ numbers_list_field_width_h(test, delim);
+ numbers_list_field_width_hh(test, delim);
}
-static void __init numbers_list_field_width_val_ll(const char *delim)
+static void numbers_list_field_width_val_ll(struct kunit *test, const char *delim)
{
numbers_list_val_width(unsigned long long, "%llu", delim, "llu", check_ull);
numbers_list_val_width(long long, "%lld", delim, "lld", check_ll);
@@ -539,7 +531,7 @@ static void __init numbers_list_field_width_val_ll(const char *delim)
numbers_list_val_width(long long, "0x%llx", delim, "lli", check_ll);
}
-static void __init numbers_list_field_width_val_l(const char *delim)
+static void numbers_list_field_width_val_l(struct kunit *test, const char *delim)
{
numbers_list_val_width(unsigned long, "%lu", delim, "lu", check_ulong);
numbers_list_val_width(long, "%ld", delim, "ld", check_long);
@@ -549,7 +541,7 @@ static void __init numbers_list_field_width_val_l(const char *delim)
numbers_list_val_width(long, "0x%lx", delim, "li", check_long);
}
-static void __init numbers_list_field_width_val_d(const char *delim)
+static void numbers_list_field_width_val_d(struct kunit *test, const char *delim)
{
numbers_list_val_width(unsigned int, "%u", delim, "u", check_uint);
numbers_list_val_width(int, "%d", delim, "d", check_int);
@@ -559,7 +551,7 @@ static void __init numbers_list_field_width_val_d(const char *delim)
numbers_list_val_width(int, "0x%x", delim, "i", check_int);
}
-static void __init numbers_list_field_width_val_h(const char *delim)
+static void numbers_list_field_width_val_h(struct kunit *test, const char *delim)
{
numbers_list_val_width(unsigned short, "%hu", delim, "hu", check_ushort);
numbers_list_val_width(short, "%hd", delim, "hd", check_short);
@@ -569,7 +561,7 @@ static void __init numbers_list_field_width_val_h(const char *delim)
numbers_list_val_width(short, "0x%hx", delim, "hi", check_short);
}
-static void __init numbers_list_field_width_val_hh(const char *delim)
+static void numbers_list_field_width_val_hh(struct kunit *test, const char *delim)
{
numbers_list_val_width(unsigned char, "%hhu", delim, "hhu", check_uchar);
numbers_list_val_width(signed char, "%hhd", delim, "hhd", check_char);
@@ -583,13 +575,16 @@ static void __init numbers_list_field_width_val_hh(const char *delim)
* List of numbers separated by delim. Each field width specifier is the
* exact length of the corresponding value digits in the string being scanned.
*/
-static void __init numbers_list_field_width_val_width(const char *delim)
+static void numbers_list_field_width_val_width(struct kunit *test)
{
- numbers_list_field_width_val_ll(delim);
- numbers_list_field_width_val_l(delim);
- numbers_list_field_width_val_d(delim);
- numbers_list_field_width_val_h(delim);
- numbers_list_field_width_val_hh(delim);
+ const char * const *param = test->param_value;
+ const char *delim = *param;
+
+ numbers_list_field_width_val_ll(test, delim);
+ numbers_list_field_width_val_l(test, delim);
+ numbers_list_field_width_val_d(test, delim);
+ numbers_list_field_width_val_h(test, delim);
+ numbers_list_field_width_val_hh(test, delim);
}
/*
@@ -598,9 +593,14 @@ static void __init numbers_list_field_width_val_width(const char *delim)
* of digits. For example the hex values c0,3,bf01,303 would have a
* string representation of "c03bf01303" and extracted with "%2x%1x%4x%3x".
*/
-static void __init numbers_slice(void)
+static void numbers_slice(struct kunit *test)
{
- numbers_list_field_width_val_width("");
+ const char *delim = "";
+
+ KUNIT_ASSERT_PTR_EQ(test, test->param_value, NULL);
+ test->param_value = &delim;
+
+ numbers_list_field_width_val_width(test);
}
#define test_number_prefix(T, str, scan_fmt, expect0, expect1, n_args, fn) \
@@ -608,14 +608,14 @@ do { \
const T expect[2] = { expect0, expect1 }; \
T result[2] = { (T)~expect[0], (T)~expect[1] }; \
\
- _test(fn, &expect, str, scan_fmt, n_args, &result[0], &result[1]); \
+ _test(test, __FILE__, __LINE__, fn, &expect, str, scan_fmt, n_args, &result[0], &result[1]);\
} while (0)
/*
* Number prefix is >= field width.
* Expected behaviour is derived from testing userland sscanf.
*/
-static void __init numbers_prefix_overflow(void)
+static void numbers_prefix_overflow(struct kunit *test)
{
/*
* Negative decimal with a field of width 1, should quit scanning
@@ -684,25 +684,17 @@ do { \
T got; \
char *endp; \
int len; \
- bool fail = false; \
\
- total_tests++; \
len = snprintf(test_buffer, BUF_SIZE, gen_fmt, expect); \
got = (fn)(test_buffer, &endp, base); \
- pr_debug(#fn "(\"%s\", %d) -> " gen_fmt "\n", test_buffer, base, got); \
if (got != (expect)) { \
- fail = true; \
- pr_warn(#fn "(\"%s\", %d): got " gen_fmt " expected " gen_fmt "\n", \
- test_buffer, base, got, expect); \
+ KUNIT_FAIL(test, #fn "(\"%s\", %d): got " gen_fmt " expected " gen_fmt, \
+ test_buffer, base, got, expect); \
} else if (endp != test_buffer + len) { \
- fail = true; \
- pr_warn(#fn "(\"%s\", %d) startp=0x%px got endp=0x%px expected 0x%px\n", \
- test_buffer, base, test_buffer, \
- test_buffer + len, endp); \
+ KUNIT_FAIL(test, #fn "(\"%s\", %d) startp=0x%px got endp=0x%px expected 0x%px", \
+ test_buffer, base, test_buffer, \
+ test_buffer + len, endp); \
} \
- \
- if (fail) \
- failed_tests++; \
} while (0)
#define test_simple_strtoxx(T, fn, gen_fmt, base) \
@@ -718,7 +710,7 @@ do { \
} \
} while (0)
-static void __init test_simple_strtoull(void)
+static void test_simple_strtoull(struct kunit *test)
{
test_simple_strtoxx(unsigned long long, simple_strtoull, "%llu", 10);
test_simple_strtoxx(unsigned long long, simple_strtoull, "%llu", 0);
@@ -727,7 +719,7 @@ static void __init test_simple_strtoull(void)
test_simple_strtoxx(unsigned long long, simple_strtoull, "0x%llx", 0);
}
-static void __init test_simple_strtoll(void)
+static void test_simple_strtoll(struct kunit *test)
{
test_simple_strtoxx(long long, simple_strtoll, "%lld", 10);
test_simple_strtoxx(long long, simple_strtoll, "%lld", 0);
@@ -736,7 +728,7 @@ static void __init test_simple_strtoll(void)
test_simple_strtoxx(long long, simple_strtoll, "0x%llx", 0);
}
-static void __init test_simple_strtoul(void)
+static void test_simple_strtoul(struct kunit *test)
{
test_simple_strtoxx(unsigned long, simple_strtoul, "%lu", 10);
test_simple_strtoxx(unsigned long, simple_strtoul, "%lu", 0);
@@ -745,7 +737,7 @@ static void __init test_simple_strtoul(void)
test_simple_strtoxx(unsigned long, simple_strtoul, "0x%lx", 0);
}
-static void __init test_simple_strtol(void)
+static void test_simple_strtol(struct kunit *test)
{
test_simple_strtoxx(long, simple_strtol, "%ld", 10);
test_simple_strtoxx(long, simple_strtol, "%ld", 0);
@@ -755,60 +747,69 @@ static void __init test_simple_strtol(void)
}
/* Selection of common delimiters/separators between numbers in a string. */
-static const char * const number_delimiters[] __initconst = {
+static const char * const number_delimiters[] = {
" ", ":", ",", "-", "/",
};
-static void __init test_numbers(void)
+static void number_delimiter_param_desc(const char * const *param,
+ char *desc)
{
- int i;
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "\"%s\"", *param);
+}
- /* String containing only one number. */
- numbers_simple();
+KUNIT_ARRAY_PARAM(number_delimiters, number_delimiters, number_delimiter_param_desc);
+static struct kunit_case scanf_test_cases[] = {
+ KUNIT_CASE(numbers_simple),
/* String with multiple numbers separated by delimiter. */
- for (i = 0; i < ARRAY_SIZE(number_delimiters); i++) {
- numbers_list(number_delimiters[i]);
-
- /* Field width may be longer than actual field digits. */
- numbers_list_field_width_typemax(number_delimiters[i]);
-
- /* Each field width exactly length of actual field digits. */
- numbers_list_field_width_val_width(number_delimiters[i]);
- }
-
+ KUNIT_CASE_PARAM(numbers_list, number_delimiters_gen_params),
+ /* Field width may be longer than actual field digits. */
+ KUNIT_CASE_PARAM(numbers_list_field_width_typemax, number_delimiters_gen_params),
+ /* Each field width exactly length of actual field digits. */
+ KUNIT_CASE_PARAM(numbers_list_field_width_val_width, number_delimiters_gen_params),
/* Slice continuous sequence of digits using field widths. */
- numbers_slice();
-
- numbers_prefix_overflow();
-}
+ KUNIT_CASE(numbers_slice),
+ KUNIT_CASE(numbers_prefix_overflow),
+
+ KUNIT_CASE(test_simple_strtoull),
+ KUNIT_CASE(test_simple_strtoll),
+ KUNIT_CASE(test_simple_strtoul),
+ KUNIT_CASE(test_simple_strtol),
+ {}
+};
-static void __init selftest(void)
+static int scanf_suite_init(struct kunit_suite *suite)
{
test_buffer = kmalloc(BUF_SIZE, GFP_KERNEL);
if (!test_buffer)
- return;
+ return -ENOMEM;
fmt_buffer = kmalloc(BUF_SIZE, GFP_KERNEL);
if (!fmt_buffer) {
kfree(test_buffer);
- return;
+ return -ENOMEM;
}
prandom_seed_state(&rnd_state, 3141592653589793238ULL);
- test_numbers();
-
- test_simple_strtoull();
- test_simple_strtoll();
- test_simple_strtoul();
- test_simple_strtol();
+ return 0;
+}
+static void scanf_suite_exit(struct kunit_suite *suite)
+{
kfree(fmt_buffer);
kfree(test_buffer);
}
-KSTM_MODULE_LOADERS(test_scanf);
+static struct kunit_suite scanf_test_suite = {
+ .name = "scanf",
+ .suite_init = scanf_suite_init,
+ .suite_exit = scanf_suite_exit,
+ .test_cases = scanf_test_cases,
+};
+
+kunit_test_suite(scanf_test_suite);
+
MODULE_AUTHOR("Richard Fitzgerald <rf@opensource.cirrus.com>");
MODULE_DESCRIPTION("Test cases for sscanf facility");
MODULE_LICENSE("GPL v2");
diff --git a/lib/siphash_kunit.c b/lib/tests/siphash_kunit.c
index 26bd4e8dc03e..26bd4e8dc03e 100644
--- a/lib/siphash_kunit.c
+++ b/lib/tests/siphash_kunit.c
diff --git a/lib/slub_kunit.c b/lib/tests/slub_kunit.c
index f11691315c2f..d47c472b0520 100644
--- a/lib/slub_kunit.c
+++ b/lib/tests/slub_kunit.c
@@ -6,6 +6,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/rcupdate.h>
+#include <linux/delay.h>
#include "../mm/slab.h"
static struct kunit_resource resource;
@@ -181,6 +182,63 @@ static void test_kfree_rcu(struct kunit *test)
KUNIT_EXPECT_EQ(test, 0, slab_errors);
}
+struct cache_destroy_work {
+ struct work_struct work;
+ struct kmem_cache *s;
+};
+
+static void cache_destroy_workfn(struct work_struct *w)
+{
+ struct cache_destroy_work *cdw;
+
+ cdw = container_of(w, struct cache_destroy_work, work);
+ kmem_cache_destroy(cdw->s);
+}
+
+#define KMEM_CACHE_DESTROY_NR 10
+
+static void test_kfree_rcu_wq_destroy(struct kunit *test)
+{
+ struct test_kfree_rcu_struct *p;
+ struct cache_destroy_work cdw;
+ struct workqueue_struct *wq;
+ struct kmem_cache *s;
+ unsigned int delay;
+ int i;
+
+ if (IS_BUILTIN(CONFIG_SLUB_KUNIT_TEST))
+ kunit_skip(test, "can't do kfree_rcu() when test is built-in");
+
+ INIT_WORK_ONSTACK(&cdw.work, cache_destroy_workfn);
+ wq = alloc_workqueue("test_kfree_rcu_destroy_wq",
+ WQ_HIGHPRI | WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
+
+ if (!wq)
+ kunit_skip(test, "failed to alloc wq");
+
+ for (i = 0; i < KMEM_CACHE_DESTROY_NR; i++) {
+ s = test_kmem_cache_create("TestSlub_kfree_rcu_wq_destroy",
+ sizeof(struct test_kfree_rcu_struct),
+ SLAB_NO_MERGE);
+
+ if (!s)
+ kunit_skip(test, "failed to create cache");
+
+ delay = get_random_u8();
+ p = kmem_cache_alloc(s, GFP_KERNEL);
+ kfree_rcu(p, rcu);
+
+ cdw.s = s;
+
+ msleep(delay);
+ queue_work(wq, &cdw.work);
+ flush_work(&cdw.work);
+ }
+
+ destroy_workqueue(wq);
+ KUNIT_EXPECT_EQ(test, 0, slab_errors);
+}
+
static void test_leak_destroy(struct kunit *test)
{
struct kmem_cache *s = test_kmem_cache_create("TestSlub_leak_destroy",
@@ -254,6 +312,7 @@ static struct kunit_case test_cases[] = {
KUNIT_CASE(test_clobber_redzone_free),
KUNIT_CASE(test_kmalloc_redzone_access),
KUNIT_CASE(test_kfree_rcu),
+ KUNIT_CASE(test_kfree_rcu_wq_destroy),
KUNIT_CASE(test_leak_destroy),
KUNIT_CASE(test_krealloc_redzone_zeroing),
{}
diff --git a/lib/stackinit_kunit.c b/lib/tests/stackinit_kunit.c
index 135322592faf..63aa78e6f5c1 100644
--- a/lib/stackinit_kunit.c
+++ b/lib/tests/stackinit_kunit.c
@@ -185,6 +185,15 @@ static bool stackinit_range_contains(char *haystack_start, size_t haystack_size,
INIT_STRUCT_assigned_copy(var_type)
/*
+ * The "did we actually fill the stack?" check value needs
+ * to be neither 0 nor any of the "pattern" bytes. The
+ * pattern bytes are compiler, architecture, and type based,
+ * so we have to pick a value that never appears for those
+ * combinations. Use 0x99 which is not 0xFF, 0xFE, nor 0xAA.
+ */
+#define FILL_BYTE 0x99
+
+/*
* @name: unique string name for the test
* @var_type: type to be tested for zeroing initialization
* @which: is this a SCALAR, STRING, or STRUCT type?
@@ -206,12 +215,12 @@ static noinline void test_ ## name (struct kunit *test) \
ZERO_CLONE_ ## which(zero); \
/* Clear entire check buffer for 0xFF overlap test. */ \
memset(check_buf, 0x00, sizeof(check_buf)); \
- /* Fill stack with 0xFF. */ \
+ /* Fill stack with FILL_BYTE. */ \
ignored = leaf_ ##name((unsigned long)&ignored, 1, \
FETCH_ARG_ ## which(zero)); \
- /* Verify all bytes overwritten with 0xFF. */ \
+ /* Verify all bytes overwritten with FILL_BYTE. */ \
for (sum = 0, i = 0; i < target_size; i++) \
- sum += (check_buf[i] != 0xFF); \
+ sum += (check_buf[i] != FILL_BYTE); \
/* Clear entire check buffer for later bit tests. */ \
memset(check_buf, 0x00, sizeof(check_buf)); \
/* Extract stack-defined variable contents. */ \
@@ -222,7 +231,8 @@ static noinline void test_ ## name (struct kunit *test) \
* possible between the two leaf function calls. \
*/ \
KUNIT_ASSERT_EQ_MSG(test, sum, 0, \
- "leaf fill was not 0xFF!?\n"); \
+ "leaf fill was not 0x%02X!?\n", \
+ FILL_BYTE); \
\
/* Validate that compiler lined up fill and target. */ \
KUNIT_ASSERT_TRUE_MSG(test, \
@@ -234,9 +244,9 @@ static noinline void test_ ## name (struct kunit *test) \
(int)((ssize_t)(uintptr_t)fill_start - \
(ssize_t)(uintptr_t)target_start)); \
\
- /* Look for any bytes still 0xFF in check region. */ \
+ /* Validate check region has no FILL_BYTE bytes. */ \
for (sum = 0, i = 0; i < target_size; i++) \
- sum += (check_buf[i] == 0xFF); \
+ sum += (check_buf[i] == FILL_BYTE); \
\
if (sum != 0 && xfail) \
kunit_skip(test, \
@@ -271,12 +281,12 @@ static noinline int leaf_ ## name(unsigned long sp, bool fill, \
* stack frame of SOME kind... \
*/ \
memset(buf, (char)(sp & 0xff), sizeof(buf)); \
- /* Fill variable with 0xFF. */ \
+ /* Fill variable with FILL_BYTE. */ \
if (fill) { \
fill_start = &var; \
fill_size = sizeof(var); \
memset(fill_start, \
- (char)((sp & 0xff) | forced_mask), \
+ FILL_BYTE & forced_mask, \
fill_size); \
} \
\
@@ -469,7 +479,7 @@ static int noinline __leaf_switch_none(int path, bool fill)
fill_start = &var;
fill_size = sizeof(var);
- memset(fill_start, forced_mask | 0x55, fill_size);
+ memset(fill_start, (forced_mask | 0x55) & FILL_BYTE, fill_size);
}
memcpy(check_buf, target_start, target_size);
break;
@@ -480,7 +490,7 @@ static int noinline __leaf_switch_none(int path, bool fill)
fill_start = &var;
fill_size = sizeof(var);
- memset(fill_start, forced_mask | 0xaa, fill_size);
+ memset(fill_start, (forced_mask | 0xaa) & FILL_BYTE, fill_size);
}
memcpy(check_buf, target_start, target_size);
break;
diff --git a/lib/string_helpers_kunit.c b/lib/tests/string_helpers_kunit.c
index c853046183d2..c853046183d2 100644
--- a/lib/string_helpers_kunit.c
+++ b/lib/tests/string_helpers_kunit.c
diff --git a/lib/string_kunit.c b/lib/tests/string_kunit.c
index c919e3293da6..0ed7448a26d3 100644
--- a/lib/string_kunit.c
+++ b/lib/tests/string_kunit.c
@@ -579,8 +579,8 @@ static void string_test_strtomem(struct kunit *test)
static void string_test_memtostr(struct kunit *test)
{
- char nonstring[7] = { 'a', 'b', 'c', 'd', 'e', 'f', 'g' };
- char nonstring_small[3] = { 'a', 'b', 'c' };
+ char nonstring[7] __nonstring = { 'a', 'b', 'c', 'd', 'e', 'f', 'g' };
+ char nonstring_small[3] __nonstring = { 'a', 'b', 'c' };
char dest[sizeof(nonstring) + 1];
/* Copy in a non-NUL-terminated string into exactly right-sized dest. */
diff --git a/lib/test_bits.c b/lib/tests/test_bits.c
index c7b38d91e1f1..c7b38d91e1f1 100644
--- a/lib/test_bits.c
+++ b/lib/tests/test_bits.c
diff --git a/lib/test_fprobe.c b/lib/tests/test_fprobe.c
index cf92111b5c79..cf92111b5c79 100644
--- a/lib/test_fprobe.c
+++ b/lib/tests/test_fprobe.c
diff --git a/lib/test_hash.c b/lib/tests/test_hash.c
index a7af39662a0a..a7af39662a0a 100644
--- a/lib/test_hash.c
+++ b/lib/tests/test_hash.c
diff --git a/lib/test_kprobes.c b/lib/tests/test_kprobes.c
index b7582010125c..b7582010125c 100644
--- a/lib/test_kprobes.c
+++ b/lib/tests/test_kprobes.c
diff --git a/lib/test_linear_ranges.c b/lib/tests/test_linear_ranges.c
index f482be00f1bc..f482be00f1bc 100644
--- a/lib/test_linear_ranges.c
+++ b/lib/tests/test_linear_ranges.c
diff --git a/lib/test_list_sort.c b/lib/tests/test_list_sort.c
index 30879abc8a42..30879abc8a42 100644
--- a/lib/test_list_sort.c
+++ b/lib/tests/test_list_sort.c
diff --git a/lib/test_sort.c b/lib/tests/test_sort.c
index cd4a338d1153..cd4a338d1153 100644
--- a/lib/test_sort.c
+++ b/lib/tests/test_sort.c
diff --git a/lib/usercopy_kunit.c b/lib/tests/usercopy_kunit.c
index 77fa00a13df7..77fa00a13df7 100644
--- a/lib/usercopy_kunit.c
+++ b/lib/tests/usercopy_kunit.c
diff --git a/lib/util_macros_kunit.c b/lib/tests/util_macros_kunit.c
index 94cc9f0de50a..94cc9f0de50a 100644
--- a/lib/util_macros_kunit.c
+++ b/lib/tests/util_macros_kunit.c
diff --git a/lib/ubsan.c b/lib/ubsan.c
index a1c983d148f1..cdc1d31c3821 100644
--- a/lib/ubsan.c
+++ b/lib/ubsan.c
@@ -44,7 +44,7 @@ const char *report_ubsan_failure(struct pt_regs *regs, u32 check_type)
case ubsan_shift_out_of_bounds:
return "UBSAN: shift out of bounds";
#endif
-#if defined(CONFIG_UBSAN_DIV_ZERO) || defined(CONFIG_UBSAN_SIGNED_WRAP)
+#if defined(CONFIG_UBSAN_DIV_ZERO) || defined(CONFIG_UBSAN_INTEGER_WRAP)
/*
* SanitizerKind::IntegerDivideByZero and
* SanitizerKind::SignedIntegerOverflow emit
@@ -79,7 +79,7 @@ const char *report_ubsan_failure(struct pt_regs *regs, u32 check_type)
case ubsan_type_mismatch:
return "UBSAN: type mismatch";
#endif
-#ifdef CONFIG_UBSAN_SIGNED_WRAP
+#ifdef CONFIG_UBSAN_INTEGER_WRAP
/*
* SanitizerKind::SignedIntegerOverflow emits
* SanitizerHandler::AddOverflow, SanitizerHandler::SubOverflow,
@@ -303,6 +303,30 @@ void __ubsan_handle_negate_overflow(void *_data, void *old_val)
}
EXPORT_SYMBOL(__ubsan_handle_negate_overflow);
+void __ubsan_handle_implicit_conversion(void *_data, void *from_val, void *to_val)
+{
+ struct implicit_conversion_data *data = _data;
+ char from_val_str[VALUE_LENGTH];
+ char to_val_str[VALUE_LENGTH];
+
+ if (suppress_report(&data->location))
+ return;
+
+ val_to_string(from_val_str, sizeof(from_val_str), data->from_type, from_val);
+ val_to_string(to_val_str, sizeof(to_val_str), data->to_type, to_val);
+
+ ubsan_prologue(&data->location, "implicit-conversion");
+
+ pr_err("cannot represent %s value %s during %s %s, truncated to %s\n",
+ data->from_type->type_name,
+ from_val_str,
+ type_check_kinds[data->type_check_kind],
+ data->to_type->type_name,
+ to_val_str);
+
+ ubsan_epilogue();
+}
+EXPORT_SYMBOL(__ubsan_handle_implicit_conversion);
void __ubsan_handle_divrem_overflow(void *_data, void *lhs, void *rhs)
{
diff --git a/lib/ubsan.h b/lib/ubsan.h
index 07e37d4429b4..b37e22374e77 100644
--- a/lib/ubsan.h
+++ b/lib/ubsan.h
@@ -62,6 +62,13 @@ struct overflow_data {
struct type_descriptor *type;
};
+struct implicit_conversion_data {
+ struct source_location location;
+ struct type_descriptor *from_type;
+ struct type_descriptor *to_type;
+ unsigned char type_check_kind;
+};
+
struct type_mismatch_data {
struct source_location location;
struct type_descriptor *type;
@@ -142,6 +149,7 @@ void ubsan_linkage __ubsan_handle_sub_overflow(void *data, void *lhs, void *rhs)
void ubsan_linkage __ubsan_handle_mul_overflow(void *data, void *lhs, void *rhs);
void ubsan_linkage __ubsan_handle_negate_overflow(void *_data, void *old_val);
void ubsan_linkage __ubsan_handle_divrem_overflow(void *_data, void *lhs, void *rhs);
+void ubsan_linkage __ubsan_handle_implicit_conversion(void *_data, void *lhs, void *rhs);
void ubsan_linkage __ubsan_handle_type_mismatch(struct type_mismatch_data *data, void *ptr);
void ubsan_linkage __ubsan_handle_type_mismatch_v1(void *_data, void *ptr);
void ubsan_linkage __ubsan_handle_out_of_bounds(void *_data, void *index);
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 56fe96319292..734bd70c8b9b 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -114,6 +114,13 @@ unsigned long simple_strtoul(const char *cp, char **endp, unsigned int base)
}
EXPORT_SYMBOL(simple_strtoul);
+unsigned long simple_strntoul(const char *cp, char **endp, unsigned int base,
+ size_t max_chars)
+{
+ return simple_strntoull(cp, endp, base, max_chars);
+}
+EXPORT_SYMBOL(simple_strntoul);
+
/**
* simple_strtol - convert a string to a signed long
* @cp: The start of the string
diff --git a/mm/Kconfig b/mm/Kconfig
index 1b501db06417..0b7f4bb5cb80 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -242,6 +242,10 @@ menu "Slab allocator options"
config SLUB
def_bool y
+config KVFREE_RCU_BATCHED
+ def_bool y
+ depends on !SLUB_TINY && !TINY_RCU
+
config SLUB_TINY
bool "Configure for minimal memory footprint"
depends on EXPERT
diff --git a/mm/gup.c b/mm/gup.c
index 3883b307780e..855ab860f88b 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -2254,6 +2254,7 @@ EXPORT_SYMBOL(fault_in_readable);
/**
* get_dump_page() - pin user page in memory while writing it to core dump
* @addr: user address
+ * @locked: a pointer to an int denoting whether the mmap sem is held
*
* Returns struct page pointer of user page pinned for dump,
* to be freed afterwards by put_page().
@@ -2266,13 +2267,12 @@ EXPORT_SYMBOL(fault_in_readable);
* Called without mmap_lock (takes and releases the mmap_lock by itself).
*/
#ifdef CONFIG_ELF_CORE
-struct page *get_dump_page(unsigned long addr)
+struct page *get_dump_page(unsigned long addr, int *locked)
{
struct page *page;
- int locked = 0;
int ret;
- ret = __get_user_pages_locked(current->mm, addr, 1, &page, &locked,
+ ret = __get_user_pages_locked(current->mm, addr, 1, &page, locked,
FOLL_FORCE | FOLL_DUMP | FOLL_GET);
return (ret == 1) ? page : NULL;
}
diff --git a/mm/memcontrol-v1.c b/mm/memcontrol-v1.c
index 2be6b9112808..2e9fa431bbf5 100644
--- a/mm/memcontrol-v1.c
+++ b/mm/memcontrol-v1.c
@@ -1855,9 +1855,11 @@ static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
if (val > MAX_SWAPPINESS)
return -EINVAL;
- if (!mem_cgroup_is_root(memcg))
+ if (!mem_cgroup_is_root(memcg)) {
+ pr_info_once("Per memcg swappiness does not exist in cgroup v2. "
+ "See memory.reclaim or memory.swap.max there\n ");
WRITE_ONCE(memcg->swappiness, val);
- else
+ } else
WRITE_ONCE(vm_swappiness, val);
return 0;
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index bbaadbeeb291..a9eea051b0d6 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -196,6 +196,37 @@ int numa_nearest_node(int node, unsigned int state)
}
EXPORT_SYMBOL_GPL(numa_nearest_node);
+/**
+ * nearest_node_nodemask - Find the node in @mask at the nearest distance
+ * from @node.
+ *
+ * @node: a valid node ID to start the search from.
+ * @mask: a pointer to a nodemask representing the allowed nodes.
+ *
+ * This function iterates over all nodes in @mask and calculates the
+ * distance from the starting @node, then it returns the node ID that is
+ * the closest to @node, or MAX_NUMNODES if no node is found.
+ *
+ * Note that @node must be a valid node ID usable with node_distance(),
+ * providing an invalid node ID (e.g., NUMA_NO_NODE) may result in crashes
+ * or unexpected behavior.
+ */
+int nearest_node_nodemask(int node, nodemask_t *mask)
+{
+ int dist, n, min_dist = INT_MAX, min_node = MAX_NUMNODES;
+
+ for_each_node_mask(n, *mask) {
+ dist = node_distance(node, n);
+ if (dist < min_dist) {
+ min_dist = dist;
+ min_node = n;
+ }
+ }
+
+ return min_node;
+}
+EXPORT_SYMBOL_GPL(nearest_node_nodemask);
+
struct mempolicy *get_task_policy(struct task_struct *p)
{
struct mempolicy *pol = p->mempolicy;
diff --git a/mm/shmem.c b/mm/shmem.c
index 1ede0800e846..ab61c8bb20e1 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -3912,16 +3912,16 @@ out_iput:
return error;
}
-static int shmem_mkdir(struct mnt_idmap *idmap, struct inode *dir,
- struct dentry *dentry, umode_t mode)
+static struct dentry *shmem_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
int error;
error = shmem_mknod(idmap, dir, dentry, mode | S_IFDIR, 0);
if (error)
- return error;
+ return ERR_PTR(error);
inc_nlink(dir);
- return 0;
+ return NULL;
}
static int shmem_create(struct mnt_idmap *idmap, struct inode *dir,
diff --git a/mm/slab.h b/mm/slab.h
index e9fd9bf0bfa6..05a21dc796e0 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -457,39 +457,17 @@ static inline bool is_kmalloc_normal(struct kmem_cache *s)
return !(s->flags & (SLAB_CACHE_DMA|SLAB_ACCOUNT|SLAB_RECLAIM_ACCOUNT));
}
-/* Legal flag mask for kmem_cache_create(), for various configurations */
#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
SLAB_CACHE_DMA32 | SLAB_PANIC | \
- SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS )
+ SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS | \
+ SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
+ SLAB_TEMPORARY | SLAB_ACCOUNT | \
+ SLAB_NO_USER_FLAGS | SLAB_KMALLOC | SLAB_NO_MERGE)
-#ifdef CONFIG_SLUB_DEBUG
#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
SLAB_TRACE | SLAB_CONSISTENCY_CHECKS)
-#else
-#define SLAB_DEBUG_FLAGS (0)
-#endif
-#define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
- SLAB_TEMPORARY | SLAB_ACCOUNT | \
- SLAB_NO_USER_FLAGS | SLAB_KMALLOC | SLAB_NO_MERGE)
-
-/* Common flags available with current configuration */
-#define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
-
-/* Common flags permitted for kmem_cache_create */
-#define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \
- SLAB_RED_ZONE | \
- SLAB_POISON | \
- SLAB_STORE_USER | \
- SLAB_TRACE | \
- SLAB_CONSISTENCY_CHECKS | \
- SLAB_NOLEAKTRACE | \
- SLAB_RECLAIM_ACCOUNT | \
- SLAB_TEMPORARY | \
- SLAB_ACCOUNT | \
- SLAB_KMALLOC | \
- SLAB_NO_MERGE | \
- SLAB_NO_USER_FLAGS)
+#define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS)
bool __kmem_cache_empty(struct kmem_cache *);
int __kmem_cache_shutdown(struct kmem_cache *);
@@ -604,6 +582,8 @@ void __memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
void **p, int objects, struct slabobj_ext *obj_exts);
#endif
+void kvfree_rcu_cb(struct rcu_head *head);
+
size_t __ksize(const void *objp);
static inline size_t slab_ksize(const struct kmem_cache *s)
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 4c9f0a87f733..fb579e9f60ba 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -298,6 +298,8 @@ struct kmem_cache *__kmem_cache_create_args(const char *name,
static_branch_enable(&slub_debug_enabled);
if (flags & SLAB_STORE_USER)
stack_depot_init();
+#else
+ flags &= ~SLAB_DEBUG_FLAGS;
#endif
mutex_lock(&slab_mutex);
@@ -307,20 +309,11 @@ struct kmem_cache *__kmem_cache_create_args(const char *name,
goto out_unlock;
}
- /* Refuse requests with allocator specific flags */
if (flags & ~SLAB_FLAGS_PERMITTED) {
err = -EINVAL;
goto out_unlock;
}
- /*
- * Some allocators will constraint the set of valid flags to a subset
- * of all flags. We expect them to define CACHE_CREATE_MASK in this
- * case, and we'll just provide them with a sanitized version of the
- * passed flags.
- */
- flags &= CACHE_CREATE_MASK;
-
/* Fail closed on bad usersize of useroffset values. */
if (!IS_ENABLED(CONFIG_HARDENED_USERCOPY) ||
WARN_ON(!args->usersize && args->useroffset) ||
@@ -1284,6 +1277,29 @@ EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
EXPORT_TRACEPOINT_SYMBOL(kfree);
EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free);
+#ifndef CONFIG_KVFREE_RCU_BATCHED
+
+void kvfree_call_rcu(struct rcu_head *head, void *ptr)
+{
+ if (head) {
+ kasan_record_aux_stack(ptr);
+ call_rcu(head, kvfree_rcu_cb);
+ return;
+ }
+
+ // kvfree_rcu(one_arg) call.
+ might_sleep();
+ synchronize_rcu();
+ kvfree(ptr);
+}
+EXPORT_SYMBOL_GPL(kvfree_call_rcu);
+
+void __init kvfree_rcu_init(void)
+{
+}
+
+#else /* CONFIG_KVFREE_RCU_BATCHED */
+
/*
* This rcu parameter is runtime-read-only. It reflects
* a minimum allowed number of objects which can be cached
@@ -1534,8 +1550,7 @@ kvfree_rcu_list(struct rcu_head *head)
rcu_lock_acquire(&rcu_callback_map);
trace_rcu_invoke_kvfree_callback("slab", head, offset);
- if (!WARN_ON_ONCE(!__is_kvfree_rcu_offset(offset)))
- kvfree(ptr);
+ kvfree(ptr);
rcu_lock_release(&rcu_callback_map);
cond_resched_tasks_rcu_qs();
@@ -1863,8 +1878,6 @@ add_ptr_to_bulk_krc_lock(struct kfree_rcu_cpu **krcp,
return true;
}
-#if !defined(CONFIG_TINY_RCU)
-
static enum hrtimer_restart
schedule_page_work_fn(struct hrtimer *t)
{
@@ -2073,8 +2086,6 @@ void kvfree_rcu_barrier(void)
}
EXPORT_SYMBOL_GPL(kvfree_rcu_barrier);
-#endif /* #if !defined(CONFIG_TINY_RCU) */
-
static unsigned long
kfree_rcu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
{
@@ -2168,3 +2179,6 @@ void __init kvfree_rcu_init(void)
shrinker_register(kfree_rcu_shrinker);
}
+
+#endif /* CONFIG_KVFREE_RCU_BATCHED */
+
diff --git a/mm/slub.c b/mm/slub.c
index 1f50129dcfb3..5eac408e818e 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -19,6 +19,7 @@
#include <linux/bitops.h>
#include <linux/slab.h>
#include "slab.h"
+#include <linux/vmalloc.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/kasan.h>
@@ -1017,22 +1018,31 @@ void skip_orig_size_check(struct kmem_cache *s, const void *object)
set_orig_size(s, (void *)object, s->object_size);
}
-static void slab_bug(struct kmem_cache *s, char *fmt, ...)
+static void __slab_bug(struct kmem_cache *s, const char *fmt, va_list argsp)
{
struct va_format vaf;
va_list args;
- va_start(args, fmt);
+ va_copy(args, argsp);
vaf.fmt = fmt;
vaf.va = &args;
pr_err("=============================================================================\n");
- pr_err("BUG %s (%s): %pV\n", s->name, print_tainted(), &vaf);
+ pr_err("BUG %s (%s): %pV\n", s ? s->name : "<unknown>", print_tainted(), &vaf);
pr_err("-----------------------------------------------------------------------------\n\n");
va_end(args);
}
+static void slab_bug(struct kmem_cache *s, const char *fmt, ...)
+{
+ va_list args;
+
+ va_start(args, fmt);
+ __slab_bug(s, fmt, args);
+ va_end(args);
+}
+
__printf(2, 3)
-static void slab_fix(struct kmem_cache *s, char *fmt, ...)
+static void slab_fix(struct kmem_cache *s, const char *fmt, ...)
{
struct va_format vaf;
va_list args;
@@ -1085,19 +1095,19 @@ static void print_trailer(struct kmem_cache *s, struct slab *slab, u8 *p)
/* Beginning of the filler is the free pointer */
print_section(KERN_ERR, "Padding ", p + off,
size_from_object(s) - off);
-
- dump_stack();
}
static void object_err(struct kmem_cache *s, struct slab *slab,
- u8 *object, char *reason)
+ u8 *object, const char *reason)
{
if (slab_add_kunit_errors())
return;
- slab_bug(s, "%s", reason);
+ slab_bug(s, reason);
print_trailer(s, slab, object);
add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
+
+ WARN_ON(1);
}
static bool freelist_corrupted(struct kmem_cache *s, struct slab *slab,
@@ -1114,22 +1124,30 @@ static bool freelist_corrupted(struct kmem_cache *s, struct slab *slab,
return false;
}
+static void __slab_err(struct slab *slab)
+{
+ if (slab_in_kunit_test())
+ return;
+
+ print_slab_info(slab);
+ add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
+
+ WARN_ON(1);
+}
+
static __printf(3, 4) void slab_err(struct kmem_cache *s, struct slab *slab,
const char *fmt, ...)
{
va_list args;
- char buf[100];
if (slab_add_kunit_errors())
return;
va_start(args, fmt);
- vsnprintf(buf, sizeof(buf), fmt, args);
+ __slab_bug(s, fmt, args);
va_end(args);
- slab_bug(s, "%s", buf);
- print_slab_info(slab);
- dump_stack();
- add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
+
+ __slab_err(slab);
}
static void init_object(struct kmem_cache *s, void *object, u8 val)
@@ -1166,7 +1184,7 @@ static void init_object(struct kmem_cache *s, void *object, u8 val)
s->inuse - poison_size);
}
-static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
+static void restore_bytes(struct kmem_cache *s, const char *message, u8 data,
void *from, void *to)
{
slab_fix(s, "Restoring %s 0x%p-0x%p=0x%x", message, from, to - 1, data);
@@ -1181,8 +1199,8 @@ static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
static pad_check_attributes int
check_bytes_and_report(struct kmem_cache *s, struct slab *slab,
- u8 *object, char *what,
- u8 *start, unsigned int value, unsigned int bytes)
+ u8 *object, const char *what, u8 *start, unsigned int value,
+ unsigned int bytes, bool slab_obj_print)
{
u8 *fault;
u8 *end;
@@ -1201,10 +1219,11 @@ check_bytes_and_report(struct kmem_cache *s, struct slab *slab,
if (slab_add_kunit_errors())
goto skip_bug_print;
- slab_bug(s, "%s overwritten", what);
- pr_err("0x%p-0x%p @offset=%tu. First byte 0x%x instead of 0x%x\n",
- fault, end - 1, fault - addr,
- fault[0], value);
+ pr_err("[%s overwritten] 0x%p-0x%p @offset=%tu. First byte 0x%x instead of 0x%x\n",
+ what, fault, end - 1, fault - addr, fault[0], value);
+
+ if (slab_obj_print)
+ object_err(s, slab, object, "Object corrupt");
skip_bug_print:
restore_bytes(s, what, value, fault, end);
@@ -1268,7 +1287,7 @@ static int check_pad_bytes(struct kmem_cache *s, struct slab *slab, u8 *p)
return 1;
return check_bytes_and_report(s, slab, p, "Object padding",
- p + off, POISON_INUSE, size_from_object(s) - off);
+ p + off, POISON_INUSE, size_from_object(s) - off, true);
}
/* Check the pad bytes at the end of a slab page */
@@ -1301,9 +1320,10 @@ slab_pad_check(struct kmem_cache *s, struct slab *slab)
while (end > fault && end[-1] == POISON_INUSE)
end--;
- slab_err(s, slab, "Padding overwritten. 0x%p-0x%p @offset=%tu",
- fault, end - 1, fault - start);
+ slab_bug(s, "Padding overwritten. 0x%p-0x%p @offset=%tu",
+ fault, end - 1, fault - start);
print_section(KERN_ERR, "Padding ", pad, remainder);
+ __slab_err(slab);
restore_bytes(s, "slab padding", POISON_INUSE, fault, end);
}
@@ -1318,11 +1338,11 @@ static int check_object(struct kmem_cache *s, struct slab *slab,
if (s->flags & SLAB_RED_ZONE) {
if (!check_bytes_and_report(s, slab, object, "Left Redzone",
- object - s->red_left_pad, val, s->red_left_pad))
+ object - s->red_left_pad, val, s->red_left_pad, ret))
ret = 0;
if (!check_bytes_and_report(s, slab, object, "Right Redzone",
- endobject, val, s->inuse - s->object_size))
+ endobject, val, s->inuse - s->object_size, ret))
ret = 0;
if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) {
@@ -1331,7 +1351,7 @@ static int check_object(struct kmem_cache *s, struct slab *slab,
if (s->object_size > orig_size &&
!check_bytes_and_report(s, slab, object,
"kmalloc Redzone", p + orig_size,
- val, s->object_size - orig_size)) {
+ val, s->object_size - orig_size, ret)) {
ret = 0;
}
}
@@ -1339,7 +1359,7 @@ static int check_object(struct kmem_cache *s, struct slab *slab,
if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) {
if (!check_bytes_and_report(s, slab, p, "Alignment padding",
endobject, POISON_INUSE,
- s->inuse - s->object_size))
+ s->inuse - s->object_size, ret))
ret = 0;
}
}
@@ -1355,11 +1375,11 @@ static int check_object(struct kmem_cache *s, struct slab *slab,
if (kasan_meta_size < s->object_size - 1 &&
!check_bytes_and_report(s, slab, p, "Poison",
p + kasan_meta_size, POISON_FREE,
- s->object_size - kasan_meta_size - 1))
+ s->object_size - kasan_meta_size - 1, ret))
ret = 0;
if (kasan_meta_size < s->object_size &&
!check_bytes_and_report(s, slab, p, "End Poison",
- p + s->object_size - 1, POISON_END, 1))
+ p + s->object_size - 1, POISON_END, 1, ret))
ret = 0;
}
/*
@@ -1385,11 +1405,6 @@ static int check_object(struct kmem_cache *s, struct slab *slab,
ret = 0;
}
- if (!ret && !slab_in_kunit_test()) {
- print_trailer(s, slab, object);
- add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
- }
-
return ret;
}
@@ -1427,7 +1442,7 @@ static int check_slab(struct kmem_cache *s, struct slab *slab)
* Determine if a certain object in a slab is on the freelist. Must hold the
* slab lock to guarantee that the chains are in a consistent state.
*/
-static int on_freelist(struct kmem_cache *s, struct slab *slab, void *search)
+static bool on_freelist(struct kmem_cache *s, struct slab *slab, void *search)
{
int nr = 0;
void *fp;
@@ -1437,26 +1452,34 @@ static int on_freelist(struct kmem_cache *s, struct slab *slab, void *search)
fp = slab->freelist;
while (fp && nr <= slab->objects) {
if (fp == search)
- return 1;
+ return true;
if (!check_valid_pointer(s, slab, fp)) {
if (object) {
object_err(s, slab, object,
"Freechain corrupt");
set_freepointer(s, object, NULL);
+ break;
} else {
slab_err(s, slab, "Freepointer corrupt");
slab->freelist = NULL;
slab->inuse = slab->objects;
slab_fix(s, "Freelist cleared");
- return 0;
+ return false;
}
- break;
}
object = fp;
fp = get_freepointer(s, object);
nr++;
}
+ if (nr > slab->objects) {
+ slab_err(s, slab, "Freelist cycle detected");
+ slab->freelist = NULL;
+ slab->inuse = slab->objects;
+ slab_fix(s, "Freelist cleared");
+ return false;
+ }
+
max_objects = order_objects(slab_order(slab), s->size);
if (max_objects > MAX_OBJS_PER_PAGE)
max_objects = MAX_OBJS_PER_PAGE;
@@ -1624,12 +1647,12 @@ static inline int free_consistency_checks(struct kmem_cache *s,
slab_err(s, slab, "Attempt to free object(0x%p) outside of slab",
object);
} else if (!slab->slab_cache) {
- pr_err("SLUB <none>: no slab for object 0x%p.\n",
- object);
- dump_stack();
- } else
+ slab_err(NULL, slab, "No slab cache for object 0x%p",
+ object);
+ } else {
object_err(s, slab, object,
- "page slab pointer corrupt.");
+ "page slab pointer corrupt.");
+ }
return 0;
}
return 1;
@@ -4241,6 +4264,7 @@ static void *___kmalloc_large_node(size_t size, gfp_t flags, int node)
ptr = folio_address(folio);
lruvec_stat_mod_folio(folio, NR_SLAB_UNRECLAIMABLE_B,
PAGE_SIZE << order);
+ __folio_set_large_kmalloc(folio);
}
ptr = kasan_kmalloc_large(ptr, size, flags);
@@ -4716,6 +4740,11 @@ static void free_large_kmalloc(struct folio *folio, void *object)
{
unsigned int order = folio_order(folio);
+ if (WARN_ON_ONCE(!folio_test_large_kmalloc(folio))) {
+ dump_page(&folio->page, "Not a kmalloc allocation");
+ return;
+ }
+
if (WARN_ON_ONCE(order == 0))
pr_warn_once("object pointer: 0x%p\n", object);
@@ -4725,9 +4754,55 @@ static void free_large_kmalloc(struct folio *folio, void *object)
lruvec_stat_mod_folio(folio, NR_SLAB_UNRECLAIMABLE_B,
-(PAGE_SIZE << order));
+ __folio_clear_large_kmalloc(folio);
folio_put(folio);
}
+/*
+ * Given an rcu_head embedded within an object obtained from kvmalloc at an
+ * offset < 4k, free the object in question.
+ */
+void kvfree_rcu_cb(struct rcu_head *head)
+{
+ void *obj = head;
+ struct folio *folio;
+ struct slab *slab;
+ struct kmem_cache *s;
+ void *slab_addr;
+
+ if (is_vmalloc_addr(obj)) {
+ obj = (void *) PAGE_ALIGN_DOWN((unsigned long)obj);
+ vfree(obj);
+ return;
+ }
+
+ folio = virt_to_folio(obj);
+ if (!folio_test_slab(folio)) {
+ /*
+ * rcu_head offset can be only less than page size so no need to
+ * consider folio order
+ */
+ obj = (void *) PAGE_ALIGN_DOWN((unsigned long)obj);
+ free_large_kmalloc(folio, obj);
+ return;
+ }
+
+ slab = folio_slab(folio);
+ s = slab->slab_cache;
+ slab_addr = folio_address(folio);
+
+ if (is_kfence_address(obj)) {
+ obj = kfence_object_start(obj);
+ } else {
+ unsigned int idx = __obj_to_index(s, slab_addr, obj);
+
+ obj = slab_addr + s->size * idx;
+ obj = fixup_red_left(s, obj);
+ }
+
+ slab_free(s, slab, obj, _RET_IP_);
+}
+
/**
* kfree - free previously allocated memory
* @object: pointer returned by kmalloc() or kmem_cache_alloc()
@@ -4878,6 +4953,168 @@ void *krealloc_noprof(const void *p, size_t new_size, gfp_t flags)
}
EXPORT_SYMBOL(krealloc_noprof);
+static gfp_t kmalloc_gfp_adjust(gfp_t flags, size_t size)
+{
+ /*
+ * We want to attempt a large physically contiguous block first because
+ * it is less likely to fragment multiple larger blocks and therefore
+ * contribute to a long term fragmentation less than vmalloc fallback.
+ * However make sure that larger requests are not too disruptive - no
+ * OOM killer and no allocation failure warnings as we have a fallback.
+ */
+ if (size > PAGE_SIZE) {
+ flags |= __GFP_NOWARN;
+
+ if (!(flags & __GFP_RETRY_MAYFAIL))
+ flags |= __GFP_NORETRY;
+
+ /* nofail semantic is implemented by the vmalloc fallback */
+ flags &= ~__GFP_NOFAIL;
+ }
+
+ return flags;
+}
+
+/**
+ * __kvmalloc_node - attempt to allocate physically contiguous memory, but upon
+ * failure, fall back to non-contiguous (vmalloc) allocation.
+ * @size: size of the request.
+ * @b: which set of kmalloc buckets to allocate from.
+ * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL.
+ * @node: numa node to allocate from
+ *
+ * Uses kmalloc to get the memory but if the allocation fails then falls back
+ * to the vmalloc allocator. Use kvfree for freeing the memory.
+ *
+ * GFP_NOWAIT and GFP_ATOMIC are not supported, neither is the __GFP_NORETRY modifier.
+ * __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is
+ * preferable to the vmalloc fallback, due to visible performance drawbacks.
+ *
+ * Return: pointer to the allocated memory of %NULL in case of failure
+ */
+void *__kvmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, int node)
+{
+ void *ret;
+
+ /*
+ * It doesn't really make sense to fallback to vmalloc for sub page
+ * requests
+ */
+ ret = __do_kmalloc_node(size, PASS_BUCKET_PARAM(b),
+ kmalloc_gfp_adjust(flags, size),
+ node, _RET_IP_);
+ if (ret || size <= PAGE_SIZE)
+ return ret;
+
+ /* non-sleeping allocations are not supported by vmalloc */
+ if (!gfpflags_allow_blocking(flags))
+ return NULL;
+
+ /* Don't even allow crazy sizes */
+ if (unlikely(size > INT_MAX)) {
+ WARN_ON_ONCE(!(flags & __GFP_NOWARN));
+ return NULL;
+ }
+
+ /*
+ * kvmalloc() can always use VM_ALLOW_HUGE_VMAP,
+ * since the callers already cannot assume anything
+ * about the resulting pointer, and cannot play
+ * protection games.
+ */
+ return __vmalloc_node_range_noprof(size, 1, VMALLOC_START, VMALLOC_END,
+ flags, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP,
+ node, __builtin_return_address(0));
+}
+EXPORT_SYMBOL(__kvmalloc_node_noprof);
+
+/**
+ * kvfree() - Free memory.
+ * @addr: Pointer to allocated memory.
+ *
+ * kvfree frees memory allocated by any of vmalloc(), kmalloc() or kvmalloc().
+ * It is slightly more efficient to use kfree() or vfree() if you are certain
+ * that you know which one to use.
+ *
+ * Context: Either preemptible task context or not-NMI interrupt.
+ */
+void kvfree(const void *addr)
+{
+ if (is_vmalloc_addr(addr))
+ vfree(addr);
+ else
+ kfree(addr);
+}
+EXPORT_SYMBOL(kvfree);
+
+/**
+ * kvfree_sensitive - Free a data object containing sensitive information.
+ * @addr: address of the data object to be freed.
+ * @len: length of the data object.
+ *
+ * Use the special memzero_explicit() function to clear the content of a
+ * kvmalloc'ed object containing sensitive data to make sure that the
+ * compiler won't optimize out the data clearing.
+ */
+void kvfree_sensitive(const void *addr, size_t len)
+{
+ if (likely(!ZERO_OR_NULL_PTR(addr))) {
+ memzero_explicit((void *)addr, len);
+ kvfree(addr);
+ }
+}
+EXPORT_SYMBOL(kvfree_sensitive);
+
+/**
+ * kvrealloc - reallocate memory; contents remain unchanged
+ * @p: object to reallocate memory for
+ * @size: the size to reallocate
+ * @flags: the flags for the page level allocator
+ *
+ * If @p is %NULL, kvrealloc() behaves exactly like kvmalloc(). If @size is 0
+ * and @p is not a %NULL pointer, the object pointed to is freed.
+ *
+ * If __GFP_ZERO logic is requested, callers must ensure that, starting with the
+ * initial memory allocation, every subsequent call to this API for the same
+ * memory allocation is flagged with __GFP_ZERO. Otherwise, it is possible that
+ * __GFP_ZERO is not fully honored by this API.
+ *
+ * In any case, the contents of the object pointed to are preserved up to the
+ * lesser of the new and old sizes.
+ *
+ * This function must not be called concurrently with itself or kvfree() for the
+ * same memory allocation.
+ *
+ * Return: pointer to the allocated memory or %NULL in case of error
+ */
+void *kvrealloc_noprof(const void *p, size_t size, gfp_t flags)
+{
+ void *n;
+
+ if (is_vmalloc_addr(p))
+ return vrealloc_noprof(p, size, flags);
+
+ n = krealloc_noprof(p, size, kmalloc_gfp_adjust(flags, size));
+ if (!n) {
+ /* We failed to krealloc(), fall back to kvmalloc(). */
+ n = kvmalloc_noprof(size, flags);
+ if (!n)
+ return NULL;
+
+ if (p) {
+ /* We already know that `p` is not a vmalloc address. */
+ kasan_disable_current();
+ memcpy(n, kasan_reset_tag(p), ksize(p));
+ kasan_enable_current();
+
+ kfree(p);
+ }
+ }
+
+ return n;
+}
+EXPORT_SYMBOL(kvrealloc_noprof);
+
struct detached_freelist {
struct slab *slab;
void *tail;
@@ -5570,14 +5807,14 @@ static int calculate_sizes(struct kmem_cache_args *args, struct kmem_cache *s)
return !!oo_objects(s->oo);
}
-static void list_slab_objects(struct kmem_cache *s, struct slab *slab,
- const char *text)
+static void list_slab_objects(struct kmem_cache *s, struct slab *slab)
{
#ifdef CONFIG_SLUB_DEBUG
void *addr = slab_address(slab);
void *p;
- slab_err(s, slab, text, s->name);
+ if (!slab_add_kunit_errors())
+ slab_bug(s, "Objects remaining on __kmem_cache_shutdown()");
spin_lock(&object_map_lock);
__fill_map(object_map, s, slab);
@@ -5592,6 +5829,8 @@ static void list_slab_objects(struct kmem_cache *s, struct slab *slab,
}
}
spin_unlock(&object_map_lock);
+
+ __slab_err(slab);
#endif
}
@@ -5612,8 +5851,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
remove_partial(n, slab);
list_add(&slab->slab_list, &discard);
} else {
- list_slab_objects(s, slab,
- "Objects remaining in %s on __kmem_cache_shutdown()");
+ list_slab_objects(s, slab);
}
}
spin_unlock_irq(&n->list_lock);
diff --git a/mm/usercopy.c b/mm/usercopy.c
index 83c164aba6e0..dbdcc43964fb 100644
--- a/mm/usercopy.c
+++ b/mm/usercopy.c
@@ -17,7 +17,7 @@
#include <linux/sched.h>
#include <linux/sched/task.h>
#include <linux/sched/task_stack.h>
-#include <linux/thread_info.h>
+#include <linux/ucopysize.h>
#include <linux/vmalloc.h>
#include <linux/atomic.h>
#include <linux/jump_label.h>
@@ -201,7 +201,9 @@ static inline void check_heap_object(const void *ptr, unsigned long n,
}
}
-static DEFINE_STATIC_KEY_FALSE_RO(bypass_usercopy_checks);
+DEFINE_STATIC_KEY_MAYBE_RO(CONFIG_HARDENED_USERCOPY_DEFAULT_ON,
+ validate_usercopy_range);
+EXPORT_SYMBOL(validate_usercopy_range);
/*
* Validates that the given object is:
@@ -212,9 +214,6 @@ static DEFINE_STATIC_KEY_FALSE_RO(bypass_usercopy_checks);
*/
void __check_object_size(const void *ptr, unsigned long n, bool to_user)
{
- if (static_branch_unlikely(&bypass_usercopy_checks))
- return;
-
/* Skip all tests if size is zero. */
if (!n)
return;
@@ -255,7 +254,8 @@ void __check_object_size(const void *ptr, unsigned long n, bool to_user)
}
EXPORT_SYMBOL(__check_object_size);
-static bool enable_checks __initdata = true;
+static bool enable_checks __initdata =
+ IS_ENABLED(CONFIG_HARDENED_USERCOPY_DEFAULT_ON);
static int __init parse_hardened_usercopy(char *str)
{
@@ -269,8 +269,10 @@ __setup("hardened_usercopy=", parse_hardened_usercopy);
static int __init set_hardened_usercopy(void)
{
- if (enable_checks == false)
- static_branch_enable(&bypass_usercopy_checks);
+ if (enable_checks)
+ static_branch_enable(&validate_usercopy_range);
+ else
+ static_branch_disable(&validate_usercopy_range);
return 1;
}
diff --git a/mm/util.c b/mm/util.c
index 8c965474d329..e7d81371032b 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -615,168 +615,6 @@ unsigned long vm_mmap(struct file *file, unsigned long addr,
}
EXPORT_SYMBOL(vm_mmap);
-static gfp_t kmalloc_gfp_adjust(gfp_t flags, size_t size)
-{
- /*
- * We want to attempt a large physically contiguous block first because
- * it is less likely to fragment multiple larger blocks and therefore
- * contribute to a long term fragmentation less than vmalloc fallback.
- * However make sure that larger requests are not too disruptive - no
- * OOM killer and no allocation failure warnings as we have a fallback.
- */
- if (size > PAGE_SIZE) {
- flags |= __GFP_NOWARN;
-
- if (!(flags & __GFP_RETRY_MAYFAIL))
- flags |= __GFP_NORETRY;
-
- /* nofail semantic is implemented by the vmalloc fallback */
- flags &= ~__GFP_NOFAIL;
- }
-
- return flags;
-}
-
-/**
- * __kvmalloc_node - attempt to allocate physically contiguous memory, but upon
- * failure, fall back to non-contiguous (vmalloc) allocation.
- * @size: size of the request.
- * @b: which set of kmalloc buckets to allocate from.
- * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL.
- * @node: numa node to allocate from
- *
- * Uses kmalloc to get the memory but if the allocation fails then falls back
- * to the vmalloc allocator. Use kvfree for freeing the memory.
- *
- * GFP_NOWAIT and GFP_ATOMIC are not supported, neither is the __GFP_NORETRY modifier.
- * __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is
- * preferable to the vmalloc fallback, due to visible performance drawbacks.
- *
- * Return: pointer to the allocated memory of %NULL in case of failure
- */
-void *__kvmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, int node)
-{
- void *ret;
-
- /*
- * It doesn't really make sense to fallback to vmalloc for sub page
- * requests
- */
- ret = __kmalloc_node_noprof(PASS_BUCKET_PARAMS(size, b),
- kmalloc_gfp_adjust(flags, size),
- node);
- if (ret || size <= PAGE_SIZE)
- return ret;
-
- /* non-sleeping allocations are not supported by vmalloc */
- if (!gfpflags_allow_blocking(flags))
- return NULL;
-
- /* Don't even allow crazy sizes */
- if (unlikely(size > INT_MAX)) {
- WARN_ON_ONCE(!(flags & __GFP_NOWARN));
- return NULL;
- }
-
- /*
- * kvmalloc() can always use VM_ALLOW_HUGE_VMAP,
- * since the callers already cannot assume anything
- * about the resulting pointer, and cannot play
- * protection games.
- */
- return __vmalloc_node_range_noprof(size, 1, VMALLOC_START, VMALLOC_END,
- flags, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP,
- node, __builtin_return_address(0));
-}
-EXPORT_SYMBOL(__kvmalloc_node_noprof);
-
-/**
- * kvfree() - Free memory.
- * @addr: Pointer to allocated memory.
- *
- * kvfree frees memory allocated by any of vmalloc(), kmalloc() or kvmalloc().
- * It is slightly more efficient to use kfree() or vfree() if you are certain
- * that you know which one to use.
- *
- * Context: Either preemptible task context or not-NMI interrupt.
- */
-void kvfree(const void *addr)
-{
- if (is_vmalloc_addr(addr))
- vfree(addr);
- else
- kfree(addr);
-}
-EXPORT_SYMBOL(kvfree);
-
-/**
- * kvfree_sensitive - Free a data object containing sensitive information.
- * @addr: address of the data object to be freed.
- * @len: length of the data object.
- *
- * Use the special memzero_explicit() function to clear the content of a
- * kvmalloc'ed object containing sensitive data to make sure that the
- * compiler won't optimize out the data clearing.
- */
-void kvfree_sensitive(const void *addr, size_t len)
-{
- if (likely(!ZERO_OR_NULL_PTR(addr))) {
- memzero_explicit((void *)addr, len);
- kvfree(addr);
- }
-}
-EXPORT_SYMBOL(kvfree_sensitive);
-
-/**
- * kvrealloc - reallocate memory; contents remain unchanged
- * @p: object to reallocate memory for
- * @size: the size to reallocate
- * @flags: the flags for the page level allocator
- *
- * If @p is %NULL, kvrealloc() behaves exactly like kvmalloc(). If @size is 0
- * and @p is not a %NULL pointer, the object pointed to is freed.
- *
- * If __GFP_ZERO logic is requested, callers must ensure that, starting with the
- * initial memory allocation, every subsequent call to this API for the same
- * memory allocation is flagged with __GFP_ZERO. Otherwise, it is possible that
- * __GFP_ZERO is not fully honored by this API.
- *
- * In any case, the contents of the object pointed to are preserved up to the
- * lesser of the new and old sizes.
- *
- * This function must not be called concurrently with itself or kvfree() for the
- * same memory allocation.
- *
- * Return: pointer to the allocated memory or %NULL in case of error
- */
-void *kvrealloc_noprof(const void *p, size_t size, gfp_t flags)
-{
- void *n;
-
- if (is_vmalloc_addr(p))
- return vrealloc_noprof(p, size, flags);
-
- n = krealloc_noprof(p, size, kmalloc_gfp_adjust(flags, size));
- if (!n) {
- /* We failed to krealloc(), fall back to kvmalloc(). */
- n = kvmalloc_noprof(size, flags);
- if (!n)
- return NULL;
-
- if (p) {
- /* We already know that `p` is not a vmalloc address. */
- kasan_disable_current();
- memcpy(n, kasan_reset_tag(p), ksize(p));
- kasan_enable_current();
-
- kfree(p);
- }
- }
-
- return n;
-}
-EXPORT_SYMBOL(kvrealloc_noprof);
-
/**
* __vmalloc_array - allocate memory for a virtually contiguous array.
* @n: number of elements.
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index a64a0cab1bf7..3cc3af15086f 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -344,6 +344,7 @@ struct rxrpc_peer {
struct hlist_head error_targets; /* targets for net error distribution */
struct rb_root service_conns; /* Service connections */
struct list_head keepalive_link; /* Link in net->peer_keepalive[] */
+ unsigned long app_data; /* Application data (e.g. afs_server) */
time64_t last_tx_at; /* Last time packet sent here */
seqlock_t service_conn_lock;
spinlock_t lock; /* access lock */
diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c
index 56e09d161a97..71b6e07bf161 100644
--- a/net/rxrpc/peer_object.c
+++ b/net/rxrpc/peer_object.c
@@ -461,7 +461,7 @@ void rxrpc_destroy_all_peers(struct rxrpc_net *rxnet)
continue;
hlist_for_each_entry(peer, &rxnet->peer_hash[i], hash_link) {
- pr_err("Leaked peer %u {%u} %pISp\n",
+ pr_err("Leaked peer %x {%u} %pISp\n",
peer->debug_id,
refcount_read(&peer->ref),
&peer->srx.transport);
@@ -478,7 +478,7 @@ void rxrpc_destroy_all_peers(struct rxrpc_net *rxnet)
*/
struct rxrpc_peer *rxrpc_kernel_get_call_peer(struct socket *sock, struct rxrpc_call *call)
{
- return call->peer;
+ return rxrpc_get_peer(call->peer, rxrpc_peer_get_application);
}
EXPORT_SYMBOL(rxrpc_kernel_get_call_peer);
@@ -520,3 +520,29 @@ const struct sockaddr *rxrpc_kernel_remote_addr(const struct rxrpc_peer *peer)
(peer ? &peer->srx.transport : &rxrpc_null_addr.transport);
}
EXPORT_SYMBOL(rxrpc_kernel_remote_addr);
+
+/**
+ * rxrpc_kernel_set_peer_data - Set app-specific data on a peer.
+ * @peer: The peer to alter
+ * @app_data: The data to set
+ *
+ * Set the app-specific data on a peer. AF_RXRPC makes no effort to retain
+ * anything the data might refer to. The previous app_data is returned.
+ */
+unsigned long rxrpc_kernel_set_peer_data(struct rxrpc_peer *peer, unsigned long app_data)
+{
+ return xchg(&peer->app_data, app_data);
+}
+EXPORT_SYMBOL(rxrpc_kernel_set_peer_data);
+
+/**
+ * rxrpc_kernel_get_peer_data - Get app-specific data from a peer.
+ * @peer: The peer to query
+ *
+ * Retrieve the app-specific data from a peer.
+ */
+unsigned long rxrpc_kernel_get_peer_data(const struct rxrpc_peer *peer)
+{
+ return peer->app_data;
+}
+EXPORT_SYMBOL(rxrpc_kernel_get_peer_data);
diff --git a/rust/kernel/fs/file.rs b/rust/kernel/fs/file.rs
index e03dbe14d62a..736209a1b983 100644
--- a/rust/kernel/fs/file.rs
+++ b/rust/kernel/fs/file.rs
@@ -392,6 +392,7 @@ pub struct FileDescriptorReservation {
impl FileDescriptorReservation {
/// Creates a new file descriptor reservation.
+ #[inline]
pub fn get_unused_fd_flags(flags: u32) -> Result<Self> {
// SAFETY: FFI call, there are no safety requirements on `flags`.
let fd: i32 = unsafe { bindings::get_unused_fd_flags(flags) };
@@ -405,6 +406,7 @@ impl FileDescriptorReservation {
}
/// Returns the file descriptor number that was reserved.
+ #[inline]
pub fn reserved_fd(&self) -> u32 {
self.fd
}
@@ -413,6 +415,7 @@ impl FileDescriptorReservation {
///
/// The previously reserved file descriptor is bound to `file`. This method consumes the
/// [`FileDescriptorReservation`], so it will not be usable after this call.
+ #[inline]
pub fn fd_install(self, file: ARef<File>) {
// SAFETY: `self.fd` was previously returned by `get_unused_fd_flags`. We have not yet used
// the fd, so it is still valid, and `current` still refers to the same task, as this type
@@ -433,6 +436,7 @@ impl FileDescriptorReservation {
}
impl Drop for FileDescriptorReservation {
+ #[inline]
fn drop(&mut self) {
// SAFETY: By the type invariants of this type, `self.fd` was previously returned by
// `get_unused_fd_flags`. We have not yet used the fd, so it is still valid, and `current`
diff --git a/rust/kernel/seq_file.rs b/rust/kernel/seq_file.rs
index 04947c672979..efc4dd09850a 100644
--- a/rust/kernel/seq_file.rs
+++ b/rust/kernel/seq_file.rs
@@ -30,6 +30,7 @@ impl SeqFile {
}
/// Used by the [`seq_print`] macro.
+ #[inline]
pub fn call_printf(&self, args: core::fmt::Arguments<'_>) {
// SAFETY: Passing a void pointer to `Arguments` is valid for `%pA`.
unsafe {
diff --git a/rust/kernel/workqueue.rs b/rust/kernel/workqueue.rs
index b7be224cdf4b..4cf9a5a9b6c3 100644
--- a/rust/kernel/workqueue.rs
+++ b/rust/kernel/workqueue.rs
@@ -703,3 +703,21 @@ pub fn system_freezable_power_efficient() -> &'static Queue {
// SAFETY: `system_freezable_power_efficient_wq` is a C global, always available.
unsafe { Queue::from_raw(bindings::system_freezable_power_efficient_wq) }
}
+
+/// Returns the system bottom halves work queue (`system_bh_wq`).
+///
+/// It is similar to the one returned by [`system`] but for work items which
+/// need to run from a softirq context.
+pub fn system_bh() -> &'static Queue {
+ // SAFETY: `system_bh_wq` is a C global, always available.
+ unsafe { Queue::from_raw(bindings::system_bh_wq) }
+}
+
+/// Returns the system bottom halves high-priority work queue (`system_bh_highpri_wq`).
+///
+/// It is similar to the one returned by [`system_bh`] but for work items which
+/// require higher scheduling priority.
+pub fn system_bh_highpri() -> &'static Queue {
+ // SAFETY: `system_bh_highpri_wq` is a C global, always available.
+ unsafe { Queue::from_raw(bindings::system_bh_highpri_wq) }
+}
diff --git a/samples/check-exec/run-script-ask.inc b/samples/check-exec/run-script-ask.sh
index 8ef0fdc37266..8ef0fdc37266 100755
--- a/samples/check-exec/run-script-ask.inc
+++ b/samples/check-exec/run-script-ask.sh
diff --git a/samples/vfs/samples-vfs.h b/samples/vfs/samples-vfs.h
index 103e1e7c4cec..498baf581b56 100644
--- a/samples/vfs/samples-vfs.h
+++ b/samples/vfs/samples-vfs.h
@@ -42,7 +42,11 @@ struct statmount {
__u32 opt_array; /* [str] Array of nul terminated fs options */
__u32 opt_sec_num; /* Number of security options */
__u32 opt_sec_array; /* [str] Array of nul terminated security options */
- __u64 __spare2[46];
+ __u32 mnt_uidmap_num; /* Number of uid mappings */
+ __u32 mnt_uidmap; /* [str] Array of uid mappings */
+ __u32 mnt_gidmap_num; /* Number of gid mappings */
+ __u32 mnt_gidmap; /* [str] Array of gid mappings */
+ __u64 __spare2[44];
char str[]; /* Variable size part containing strings */
};
@@ -158,6 +162,14 @@ struct mnt_ns_info {
#define STATX_MNT_ID_UNIQUE 0x00004000U /* Want/got extended stx_mount_id */
#endif
+#ifndef STATMOUNT_MNT_UIDMAP
+#define STATMOUNT_MNT_UIDMAP 0x00002000U /* Want/got uidmap... */
+#endif
+
+#ifndef STATMOUNT_MNT_GIDMAP
+#define STATMOUNT_MNT_GIDMAP 0x00004000U /* Want/got gidmap... */
+#endif
+
#ifndef MOUNT_ATTR_RDONLY
#define MOUNT_ATTR_RDONLY 0x00000001 /* Mount read-only */
#endif
diff --git a/samples/vfs/test-list-all-mounts.c b/samples/vfs/test-list-all-mounts.c
index 1a02ea4593e3..713c174626aa 100644
--- a/samples/vfs/test-list-all-mounts.c
+++ b/samples/vfs/test-list-all-mounts.c
@@ -128,20 +128,43 @@ next:
STATMOUNT_MNT_POINT |
STATMOUNT_MNT_NS_ID |
STATMOUNT_MNT_OPTS |
- STATMOUNT_FS_TYPE, 0);
+ STATMOUNT_FS_TYPE |
+ STATMOUNT_MNT_UIDMAP |
+ STATMOUNT_MNT_GIDMAP, 0);
if (!stmnt) {
printf("Failed to statmount(%" PRIu64 ") in mount namespace(%" PRIu64 ")\n",
(uint64_t)last_mnt_id, (uint64_t)info.mnt_ns_id);
continue;
}
- printf("mnt_id:\t\t%" PRIu64 "\nmnt_parent_id:\t%" PRIu64 "\nfs_type:\t%s\nmnt_root:\t%s\nmnt_point:\t%s\nmnt_opts:\t%s\n\n",
+ printf("mnt_id:\t\t%" PRIu64 "\nmnt_parent_id:\t%" PRIu64 "\nfs_type:\t%s\nmnt_root:\t%s\nmnt_point:\t%s\nmnt_opts:\t%s\n",
(uint64_t)stmnt->mnt_id,
(uint64_t)stmnt->mnt_parent_id,
- stmnt->str + stmnt->fs_type,
- stmnt->str + stmnt->mnt_root,
- stmnt->str + stmnt->mnt_point,
- stmnt->str + stmnt->mnt_opts);
+ (stmnt->mask & STATMOUNT_FS_TYPE) ? stmnt->str + stmnt->fs_type : "",
+ (stmnt->mask & STATMOUNT_MNT_ROOT) ? stmnt->str + stmnt->mnt_root : "",
+ (stmnt->mask & STATMOUNT_MNT_POINT) ? stmnt->str + stmnt->mnt_point : "",
+ (stmnt->mask & STATMOUNT_MNT_OPTS) ? stmnt->str + stmnt->mnt_opts : "");
+
+ if (stmnt->mask & STATMOUNT_MNT_UIDMAP) {
+ const char *idmap = stmnt->str + stmnt->mnt_uidmap;
+
+ for (size_t idx = 0; idx < stmnt->mnt_uidmap_num; idx++) {
+ printf("mnt_uidmap[%zu]:\t%s\n", idx, idmap);
+ idmap += strlen(idmap) + 1;
+ }
+ }
+
+ if (stmnt->mask & STATMOUNT_MNT_GIDMAP) {
+ const char *idmap = stmnt->str + stmnt->mnt_gidmap;
+
+ for (size_t idx = 0; idx < stmnt->mnt_gidmap_num; idx++) {
+ printf("mnt_gidmap[%zu]:\t%s\n", idx, idmap);
+ idmap += strlen(idmap) + 1;
+ }
+ }
+
+ printf("\n");
+
free(stmnt);
}
}
diff --git a/scripts/Makefile.clang b/scripts/Makefile.clang
index 2435efae67f5..b67636b28c35 100644
--- a/scripts/Makefile.clang
+++ b/scripts/Makefile.clang
@@ -12,6 +12,8 @@ CLANG_TARGET_FLAGS_riscv := riscv64-linux-gnu
CLANG_TARGET_FLAGS_s390 := s390x-linux-gnu
CLANG_TARGET_FLAGS_sparc := sparc64-linux-gnu
CLANG_TARGET_FLAGS_x86 := x86_64-linux-gnu
+# This is only for i386 UM builds, which need the 32-bit target not -m32
+CLANG_TARGET_FLAGS_i386 := i386-linux-gnu
CLANG_TARGET_FLAGS_um := $(CLANG_TARGET_FLAGS_$(SUBARCH))
CLANG_TARGET_FLAGS := $(CLANG_TARGET_FLAGS_$(SRCARCH))
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index cad20f0e66ee..981d14ef9db2 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -166,8 +166,8 @@ _c_flags += $(if $(patsubst n%,, \
$(UBSAN_SANITIZE_$(target-stem).o)$(UBSAN_SANITIZE)$(is-kernel-object)), \
$(CFLAGS_UBSAN))
_c_flags += $(if $(patsubst n%,, \
- $(UBSAN_SIGNED_WRAP_$(target-stem).o)$(UBSAN_SANITIZE_$(target-stem).o)$(UBSAN_SIGNED_WRAP)$(UBSAN_SANITIZE)$(is-kernel-object)), \
- $(CFLAGS_UBSAN_SIGNED_WRAP))
+ $(UBSAN_INTEGER_WRAP_$(target-stem).o)$(UBSAN_SANITIZE_$(target-stem).o)$(UBSAN_INTEGER_WRAP)$(UBSAN_SANITIZE)$(is-kernel-object)), \
+ $(CFLAGS_UBSAN_INTEGER_WRAP))
endif
ifeq ($(CONFIG_KCOV),y)
diff --git a/scripts/Makefile.ubsan b/scripts/Makefile.ubsan
index b2d3b273b802..9e35198edbf0 100644
--- a/scripts/Makefile.ubsan
+++ b/scripts/Makefile.ubsan
@@ -14,5 +14,11 @@ ubsan-cflags-$(CONFIG_UBSAN_TRAP) += $(call cc-option,-fsanitize-trap=undefined
export CFLAGS_UBSAN := $(ubsan-cflags-y)
-ubsan-signed-wrap-cflags-$(CONFIG_UBSAN_SIGNED_WRAP) += -fsanitize=signed-integer-overflow
-export CFLAGS_UBSAN_SIGNED_WRAP := $(ubsan-signed-wrap-cflags-y)
+ubsan-integer-wrap-cflags-$(CONFIG_UBSAN_INTEGER_WRAP) += \
+ -fsanitize-undefined-ignore-overflow-pattern=all \
+ -fsanitize=signed-integer-overflow \
+ -fsanitize=unsigned-integer-overflow \
+ -fsanitize=implicit-signed-integer-truncation \
+ -fsanitize=implicit-unsigned-integer-truncation \
+ -fsanitize-ignorelist=$(srctree)/scripts/integer-wrap-ignore.scl
+export CFLAGS_UBSAN_INTEGER_WRAP := $(ubsan-integer-wrap-cflags-y)
diff --git a/scripts/documentation-file-ref-check b/scripts/documentation-file-ref-check
index 68083f2f1122..408b1dbe7884 100755
--- a/scripts/documentation-file-ref-check
+++ b/scripts/documentation-file-ref-check
@@ -92,7 +92,7 @@ while (<IN>) {
next if ($f =~ m,^Next/,);
# Makefiles and scripts contain nasty expressions to parse docs
- next if ($f =~ m/Makefile/ || $f =~ m/\.sh$/);
+ next if ($f =~ m/Makefile/ || $f =~ m/\.(sh|py|pl|~|rej|org|orig)$/);
# It doesn't make sense to parse hidden files
next if ($f =~ m#/\.#);
diff --git a/scripts/get_abi.pl b/scripts/get_abi.pl
deleted file mode 100755
index de1c0354b50c..000000000000
--- a/scripts/get_abi.pl
+++ /dev/null
@@ -1,1103 +0,0 @@
-#!/usr/bin/env perl
-# SPDX-License-Identifier: GPL-2.0
-
-BEGIN { $Pod::Usage::Formatter = 'Pod::Text::Termcap'; }
-
-use strict;
-use warnings;
-use utf8;
-use Pod::Usage qw(pod2usage);
-use Getopt::Long;
-use File::Find;
-use IO::Handle;
-use Fcntl ':mode';
-use Cwd 'abs_path';
-use Data::Dumper;
-
-my $help = 0;
-my $hint = 0;
-my $man = 0;
-my $debug = 0;
-my $enable_lineno = 0;
-my $show_warnings = 1;
-my $prefix="Documentation/ABI";
-my $sysfs_prefix="/sys";
-my $search_string;
-
-# Debug options
-my $dbg_what_parsing = 1;
-my $dbg_what_open = 2;
-my $dbg_dump_abi_structs = 4;
-my $dbg_undefined = 8;
-
-$Data::Dumper::Indent = 1;
-$Data::Dumper::Terse = 1;
-
-#
-# If true, assumes that the description is formatted with ReST
-#
-my $description_is_rst = 1;
-
-GetOptions(
- "debug=i" => \$debug,
- "enable-lineno" => \$enable_lineno,
- "rst-source!" => \$description_is_rst,
- "dir=s" => \$prefix,
- 'help|?' => \$help,
- "show-hints" => \$hint,
- "search-string=s" => \$search_string,
- man => \$man
-) or pod2usage(2);
-
-pod2usage(1) if $help;
-pod2usage(-exitstatus => 0, -noperldoc, -verbose => 2) if $man;
-
-pod2usage(2) if (scalar @ARGV < 1 || @ARGV > 2);
-
-my ($cmd, $arg) = @ARGV;
-
-pod2usage(2) if ($cmd ne "search" && $cmd ne "rest" && $cmd ne "validate" && $cmd ne "undefined");
-pod2usage(2) if ($cmd eq "search" && !$arg);
-
-require Data::Dumper if ($debug & $dbg_dump_abi_structs);
-
-my %data;
-my %symbols;
-
-#
-# Displays an error message, printing file name and line
-#
-sub parse_error($$$$) {
- my ($file, $ln, $msg, $data) = @_;
-
- return if (!$show_warnings);
-
- $data =~ s/\s+$/\n/;
-
- print STDERR "Warning: file $file#$ln:\n\t$msg";
-
- if ($data ne "") {
- print STDERR ". Line\n\t\t$data";
- } else {
- print STDERR "\n";
- }
-}
-
-#
-# Parse an ABI file, storing its contents at %data
-#
-sub parse_abi {
- my $file = $File::Find::name;
-
- my $mode = (stat($file))[2];
- return if ($mode & S_IFDIR);
- return if ($file =~ m,/README,);
- return if ($file =~ m,/\.,);
- return if ($file =~ m,\.(rej|org|orig|bak)$,);
-
- my $name = $file;
- $name =~ s,.*/,,;
-
- my $fn = $file;
- $fn =~ s,.*Documentation/ABI/,,;
-
- my $nametag = "File $fn";
- $data{$nametag}->{what} = "File $name";
- $data{$nametag}->{type} = "File";
- $data{$nametag}->{file} = $name;
- $data{$nametag}->{filepath} = $file;
- $data{$nametag}->{is_file} = 1;
- $data{$nametag}->{line_no} = 1;
-
- my $type = $file;
- $type =~ s,.*/(.*)/.*,$1,;
-
- my $what;
- my $new_what;
- my $tag = "";
- my $ln;
- my $xrefs;
- my $space;
- my @labels;
- my $label = "";
-
- print STDERR "Opening $file\n" if ($debug & $dbg_what_open);
- open IN, $file;
- while(<IN>) {
- $ln++;
- if (m/^(\S+)(:\s*)(.*)/i) {
- my $new_tag = lc($1);
- my $sep = $2;
- my $content = $3;
-
- if (!($new_tag =~ m/(what|where|date|kernelversion|contact|description|users)/)) {
- if ($tag eq "description") {
- # New "tag" is actually part of
- # description. Don't consider it a tag
- $new_tag = "";
- } elsif ($tag ne "") {
- parse_error($file, $ln, "tag '$tag' is invalid", $_);
- }
- }
-
- # Invalid, but it is a common mistake
- if ($new_tag eq "where") {
- parse_error($file, $ln, "tag 'Where' is invalid. Should be 'What:' instead", "");
- $new_tag = "what";
- }
-
- if ($new_tag =~ m/what/) {
- $space = "";
- $content =~ s/[,.;]$//;
-
- push @{$symbols{$content}->{file}}, " $file:" . ($ln - 1);
-
- if ($tag =~ m/what/) {
- $what .= "\xac" . $content;
- } else {
- if ($what) {
- parse_error($file, $ln, "What '$what' doesn't have a description", "") if (!$data{$what}->{description});
-
- foreach my $w(split /\xac/, $what) {
- $symbols{$w}->{xref} = $what;
- };
- }
-
- $what = $content;
- $label = $content;
- $new_what = 1;
- }
- push @labels, [($content, $label)];
- $tag = $new_tag;
-
- push @{$data{$nametag}->{symbols}}, $content if ($data{$nametag}->{what});
- next;
- }
-
- if ($tag ne "" && $new_tag) {
- $tag = $new_tag;
-
- if ($new_what) {
- @{$data{$what}->{label_list}} = @labels if ($data{$nametag}->{what});
- @labels = ();
- $label = "";
- $new_what = 0;
-
- $data{$what}->{type} = $type;
- if (!defined($data{$what}->{file})) {
- $data{$what}->{file} = $name;
- $data{$what}->{filepath} = $file;
- } else {
- $data{$what}->{description} .= "\n\n" if (defined($data{$what}->{description}));
- if ($name ne $data{$what}->{file}) {
- $data{$what}->{file} .= " " . $name;
- $data{$what}->{filepath} .= " " . $file;
- }
- }
- print STDERR "\twhat: $what\n" if ($debug & $dbg_what_parsing);
- $data{$what}->{line_no} = $ln;
- } else {
- $data{$what}->{line_no} = $ln if (!defined($data{$what}->{line_no}));
- }
-
- if (!$what) {
- parse_error($file, $ln, "'What:' should come first:", $_);
- next;
- }
- if ($new_tag eq "description") {
- $sep =~ s,:, ,;
- $content = ' ' x length($new_tag) . $sep . $content;
- while ($content =~ s/\t+/' ' x (length($&) * 8 - length($`) % 8)/e) {}
- if ($content =~ m/^(\s*)(\S.*)$/) {
- # Preserve initial spaces for the first line
- $space = $1;
- $content = "$2\n";
- $data{$what}->{$tag} .= $content;
- } else {
- undef($space);
- }
-
- } else {
- $data{$what}->{$tag} = $content;
- }
- next;
- }
- }
-
- # Store any contents before tags at the database
- if (!$tag && $data{$nametag}->{what}) {
- $data{$nametag}->{description} .= $_;
- next;
- }
-
- if ($tag eq "description") {
- my $content = $_;
- while ($content =~ s/\t+/' ' x (length($&) * 8 - length($`) % 8)/e) {}
- if (m/^\s*\n/) {
- $data{$what}->{$tag} .= "\n";
- next;
- }
-
- if (!defined($space)) {
- # Preserve initial spaces for the first line
- if ($content =~ m/^(\s*)(\S.*)$/) {
- $space = $1;
- $content = "$2\n";
- }
- } else {
- $space = "" if (!($content =~ s/^($space)//));
- }
- $data{$what}->{$tag} .= $content;
-
- next;
- }
- if (m/^\s*(.*)/) {
- $data{$what}->{$tag} .= "\n$1";
- $data{$what}->{$tag} =~ s/\n+$//;
- next;
- }
-
- # Everything else is error
- parse_error($file, $ln, "Unexpected content", $_);
- }
- $data{$nametag}->{description} =~ s/^\n+// if ($data{$nametag}->{description});
- if ($what) {
- parse_error($file, $ln, "What '$what' doesn't have a description", "") if (!$data{$what}->{description});
-
- foreach my $w(split /\xac/,$what) {
- $symbols{$w}->{xref} = $what;
- };
- }
- close IN;
-}
-
-sub create_labels {
- my %labels;
-
- foreach my $what (keys %data) {
- next if ($data{$what}->{file} eq "File");
-
- foreach my $p (@{$data{$what}->{label_list}}) {
- my ($content, $label) = @{$p};
- $label = "abi_" . $label . " ";
- $label =~ tr/A-Z/a-z/;
-
- # Convert special chars to "_"
- $label =~s/([\x00-\x2f\x3a-\x40\x5b-\x60\x7b-\xff])/_/g;
- $label =~ s,_+,_,g;
- $label =~ s,_$,,;
-
- # Avoid duplicated labels
- while (defined($labels{$label})) {
- my @chars = ("A".."Z", "a".."z");
- $label .= $chars[rand @chars];
- }
- $labels{$label} = 1;
-
- $data{$what}->{label} = $label;
-
- # only one label is enough
- last;
- }
- }
-}
-
-#
-# Outputs the book on ReST format
-#
-
-# \b doesn't work well with paths. So, we need to define something else:
-# Boundaries are punct characters, spaces and end-of-line
-my $start = qr {(^|\s|\() }x;
-my $bondary = qr { ([,.:;\)\s]|\z) }x;
-my $xref_match = qr { $start(\/(sys|config|proc|dev|kvd)\/[^,.:;\)\s]+)$bondary }x;
-my $symbols = qr { ([\x01-\x08\x0e-\x1f\x21-\x2f\x3a-\x40\x7b-\xff]) }x;
-
-sub output_rest {
- create_labels();
-
- my $part = "";
-
- foreach my $what (sort {
- ($data{$a}->{type} eq "File") cmp ($data{$b}->{type} eq "File") ||
- $a cmp $b
- } keys %data) {
- my $type = $data{$what}->{type};
-
- my @file = split / /, $data{$what}->{file};
- my @filepath = split / /, $data{$what}->{filepath};
-
- if ($enable_lineno) {
- printf ".. LINENO %s%s#%s\n\n",
- $prefix, $file[0],
- $data{$what}->{line_no};
- }
-
- my $w = $what;
-
- if ($type ne "File") {
- my $cur_part = $what;
- if ($what =~ '/') {
- if ($what =~ m#^(\/?(?:[\w\-]+\/?){1,2})#) {
- $cur_part = "Symbols under $1";
- $cur_part =~ s,/$,,;
- }
- }
-
- if ($cur_part ne "" && $part ne $cur_part) {
- $part = $cur_part;
- my $bar = $part;
- $bar =~ s/./-/g;
- print "$part\n$bar\n\n";
- }
-
- printf ".. _%s:\n\n", $data{$what}->{label};
-
- my @names = split /\xac/,$w;
- my $len = 0;
-
- foreach my $name (@names) {
- $name =~ s/$symbols/\\$1/g;
- $name = "**$name**";
- $len = length($name) if (length($name) > $len);
- }
-
- print "+-" . "-" x $len . "-+\n";
- foreach my $name (@names) {
- printf "| %s", $name . " " x ($len - length($name)) . " |\n";
- print "+-" . "-" x $len . "-+\n";
- }
-
- print "\n";
- }
-
- for (my $i = 0; $i < scalar(@filepath); $i++) {
- my $path = $filepath[$i];
- my $f = $file[$i];
-
- $path =~ s,.*/(.*/.*),$1,;;
- $path =~ s,[/\-],_,g;;
- my $fileref = "abi_file_".$path;
-
- if ($type eq "File") {
- print ".. _$fileref:\n\n";
- } else {
- print "Defined on file :ref:`$f <$fileref>`\n\n";
- }
- }
-
- if ($type eq "File") {
- my $bar = $w;
- $bar =~ s/./-/g;
- print "$w\n$bar\n\n";
- }
-
- my $desc = "";
- $desc = $data{$what}->{description} if (defined($data{$what}->{description}));
- $desc =~ s/\s+$/\n/;
-
- if (!($desc =~ /^\s*$/)) {
- if ($description_is_rst) {
- # Remove title markups from the description
- # Having titles inside ABI files will only work if extra
- # care would be taken in order to strictly follow the same
- # level order for each markup.
- $desc =~ s/\n[\-\*\=\^\~]+\n/\n\n/g;
-
- # Enrich text by creating cross-references
-
- my $new_desc = "";
- my $init_indent = -1;
- my $literal_indent = -1;
-
- open(my $fh, "+<", \$desc);
- while (my $d = <$fh>) {
- my $indent = $d =~ m/^(\s+)/;
- my $spaces = length($indent);
- $init_indent = $indent if ($init_indent < 0);
- if ($literal_indent >= 0) {
- if ($spaces > $literal_indent) {
- $new_desc .= $d;
- next;
- } else {
- $literal_indent = -1;
- }
- } else {
- if ($d =~ /()::$/ && !($d =~ /^\s*\.\./)) {
- $literal_indent = $spaces;
- }
- }
-
- $d =~ s,Documentation/(?!devicetree)(\S+)\.rst,:doc:`/$1`,g;
-
- my @matches = $d =~ m,Documentation/ABI/([\w\/\-]+),g;
- foreach my $f (@matches) {
- my $xref = $f;
- my $path = $f;
- $path =~ s,.*/(.*/.*),$1,;;
- $path =~ s,[/\-],_,g;;
- $xref .= " <abi_file_" . $path . ">";
- $d =~ s,\bDocumentation/ABI/$f\b,:ref:`$xref`,g;
- }
-
- # Seek for cross reference symbols like /sys/...
- @matches = $d =~ m/$xref_match/g;
-
- foreach my $s (@matches) {
- next if (!($s =~ m,/,));
- if (defined($data{$s}) && defined($data{$s}->{label})) {
- my $xref = $s;
-
- $xref =~ s/$symbols/\\$1/g;
- $xref = ":ref:`$xref <" . $data{$s}->{label} . ">`";
-
- $d =~ s,$start$s$bondary,$1$xref$2,g;
- }
- }
- $new_desc .= $d;
- }
- close $fh;
-
-
- print "$new_desc\n\n";
- } else {
- $desc =~ s/^\s+//;
-
- # Remove title markups from the description, as they won't work
- $desc =~ s/\n[\-\*\=\^\~]+\n/\n\n/g;
-
- if ($desc =~ m/\:\n/ || $desc =~ m/\n[\t ]+/ || $desc =~ m/[\x00-\x08\x0b-\x1f\x7b-\xff]/) {
- # put everything inside a code block
- $desc =~ s/\n/\n /g;
-
- print "::\n\n";
- print " $desc\n\n";
- } else {
- # Escape any special chars from description
- $desc =~s/([\x00-\x08\x0b-\x1f\x21-\x2a\x2d\x2f\x3c-\x40\x5c\x5e-\x60\x7b-\xff])/\\$1/g;
- print "$desc\n\n";
- }
- }
- } else {
- print "DESCRIPTION MISSING for $what\n\n" if (!$data{$what}->{is_file});
- }
-
- if ($data{$what}->{symbols}) {
- printf "Has the following ABI:\n\n";
-
- foreach my $content(@{$data{$what}->{symbols}}) {
- my $label = $data{$symbols{$content}->{xref}}->{label};
-
- # Escape special chars from content
- $content =~s/([\x00-\x1f\x21-\x2f\x3a-\x40\x7b-\xff])/\\$1/g;
-
- print "- :ref:`$content <$label>`\n\n";
- }
- }
-
- if (defined($data{$what}->{users})) {
- my $users = $data{$what}->{users};
-
- $users =~ s/\n/\n\t/g;
- printf "Users:\n\t%s\n\n", $users if ($users ne "");
- }
-
- }
-}
-
-#
-# Searches for ABI symbols
-#
-sub search_symbols {
- foreach my $what (sort keys %data) {
- next if (!($what =~ m/($arg)/));
-
- my $type = $data{$what}->{type};
- next if ($type eq "File");
-
- my $file = $data{$what}->{filepath};
-
- $what =~ s/\xac/, /g;
- my $bar = $what;
- $bar =~ s/./-/g;
-
- print "\n$what\n$bar\n\n";
-
- my $kernelversion = $data{$what}->{kernelversion} if (defined($data{$what}->{kernelversion}));
- my $contact = $data{$what}->{contact} if (defined($data{$what}->{contact}));
- my $users = $data{$what}->{users} if (defined($data{$what}->{users}));
- my $date = $data{$what}->{date} if (defined($data{$what}->{date}));
- my $desc = $data{$what}->{description} if (defined($data{$what}->{description}));
-
- $kernelversion =~ s/^\s+// if ($kernelversion);
- $contact =~ s/^\s+// if ($contact);
- if ($users) {
- $users =~ s/^\s+//;
- $users =~ s/\n//g;
- }
- $date =~ s/^\s+// if ($date);
- $desc =~ s/^\s+// if ($desc);
-
- printf "Kernel version:\t\t%s\n", $kernelversion if ($kernelversion);
- printf "Date:\t\t\t%s\n", $date if ($date);
- printf "Contact:\t\t%s\n", $contact if ($contact);
- printf "Users:\t\t\t%s\n", $users if ($users);
- print "Defined on file(s):\t$file\n\n";
- print "Description:\n\n$desc";
- }
-}
-
-# Exclude /sys/kernel/debug and /sys/kernel/tracing from the search path
-sub dont_parse_special_attributes {
- if (($File::Find::dir =~ m,^/sys/kernel,)) {
- return grep {!/(debug|tracing)/ } @_;
- }
-
- if (($File::Find::dir =~ m,^/sys/fs,)) {
- return grep {!/(pstore|bpf|fuse)/ } @_;
- }
-
- return @_
-}
-
-my %leaf;
-my %aliases;
-my @files;
-my %root;
-
-sub graph_add_file {
- my $file = shift;
- my $type = shift;
-
- my $dir = $file;
- $dir =~ s,^(.*/).*,$1,;
- $file =~ s,.*/,,;
-
- my $name;
- my $file_ref = \%root;
- foreach my $edge(split "/", $dir) {
- $name .= "$edge/";
- if (!defined ${$file_ref}{$edge}) {
- ${$file_ref}{$edge} = { };
- }
- $file_ref = \%{$$file_ref{$edge}};
- ${$file_ref}{"__name"} = [ $name ];
- }
- $name .= "$file";
- ${$file_ref}{$file} = {
- "__name" => [ $name ]
- };
-
- return \%{$$file_ref{$file}};
-}
-
-sub graph_add_link {
- my $file = shift;
- my $link = shift;
-
- # Traverse graph to find the reference
- my $file_ref = \%root;
- foreach my $edge(split "/", $file) {
- $file_ref = \%{$$file_ref{$edge}} || die "Missing node!";
- }
-
- # do a BFS
-
- my @queue;
- my %seen;
- my $st;
-
- push @queue, $file_ref;
- $seen{$start}++;
-
- while (@queue) {
- my $v = shift @queue;
- my @child = keys(%{$v});
-
- foreach my $c(@child) {
- next if $seen{$$v{$c}};
- next if ($c eq "__name");
-
- if (!defined($$v{$c}{"__name"})) {
- printf STDERR "Error: Couldn't find a non-empty name on a children of $file/.*: ";
- print STDERR Dumper(%{$v});
- exit;
- }
-
- # Add new name
- my $name = @{$$v{$c}{"__name"}}[0];
- if ($name =~ s#^$file/#$link/#) {
- push @{$$v{$c}{"__name"}}, $name;
- }
- # Add child to the queue and mark as seen
- push @queue, $$v{$c};
- $seen{$c}++;
- }
- }
-}
-
-my $escape_symbols = qr { ([\x01-\x08\x0e-\x1f\x21-\x29\x2b-\x2d\x3a-\x40\x7b-\xfe]) }x;
-sub parse_existing_sysfs {
- my $file = $File::Find::name;
-
- my $mode = (lstat($file))[2];
- my $abs_file = abs_path($file);
-
- my @tmp;
- push @tmp, $file;
- push @tmp, $abs_file if ($abs_file ne $file);
-
- foreach my $f(@tmp) {
- # Ignore cgroup, as this is big and has zero docs under ABI
- return if ($f =~ m#^/sys/fs/cgroup/#);
-
- # Ignore firmware as it is documented elsewhere
- # Either ACPI or under Documentation/devicetree/bindings/
- return if ($f =~ m#^/sys/firmware/#);
-
- # Ignore some sysfs nodes that aren't actually part of ABI
- return if ($f =~ m#/sections|notes/#);
-
- # Would need to check at
- # Documentation/admin-guide/kernel-parameters.txt, but this
- # is not easily parseable.
- return if ($f =~ m#/parameters/#);
- }
-
- if (S_ISLNK($mode)) {
- $aliases{$file} = $abs_file;
- return;
- }
-
- return if (S_ISDIR($mode));
-
- # Trivial: file is defined exactly the same way at ABI What:
- return if (defined($data{$file}));
- return if (defined($data{$abs_file}));
-
- push @files, graph_add_file($abs_file, "file");
-}
-
-sub get_leave($)
-{
- my $what = shift;
- my $leave;
-
- my $l = $what;
- my $stop = 1;
-
- $leave = $l;
- $leave =~ s,/$,,;
- $leave =~ s,.*/,,;
- $leave =~ s/[\(\)]//g;
-
- # $leave is used to improve search performance at
- # check_undefined_symbols, as the algorithm there can seek
- # for a small number of "what". It also allows giving a
- # hint about a leave with the same name somewhere else.
- # However, there are a few occurences where the leave is
- # either a wildcard or a number. Just group such cases
- # altogether.
- if ($leave =~ m/\.\*/ || $leave eq "" || $leave =~ /\\d/) {
- $leave = "others";
- }
-
- return $leave;
-}
-
-my @not_found;
-
-sub check_file($$)
-{
- my $file_ref = shift;
- my $names_ref = shift;
- my @names = @{$names_ref};
- my $file = $names[0];
-
- my $found_string;
-
- my $leave = get_leave($file);
- if (!defined($leaf{$leave})) {
- $leave = "others";
- }
- my @expr = @{$leaf{$leave}->{expr}};
- die ("\rmissing rules for $leave") if (!defined($leaf{$leave}));
-
- my $path = $file;
- $path =~ s,(.*/).*,$1,;
-
- if ($search_string) {
- return if (!($file =~ m#$search_string#));
- $found_string = 1;
- }
-
- for (my $i = 0; $i < @names; $i++) {
- if ($found_string && $hint) {
- if (!$i) {
- print STDERR "--> $names[$i]\n";
- } else {
- print STDERR " $names[$i]\n";
- }
- }
- foreach my $re (@expr) {
- print STDERR "$names[$i] =~ /^$re\$/\n" if ($debug && $dbg_undefined);
- if ($names[$i] =~ $re) {
- return;
- }
- }
- }
-
- if ($leave ne "others") {
- my @expr = @{$leaf{"others"}->{expr}};
- for (my $i = 0; $i < @names; $i++) {
- foreach my $re (@expr) {
- print STDERR "$names[$i] =~ /^$re\$/\n" if ($debug && $dbg_undefined);
- if ($names[$i] =~ $re) {
- return;
- }
- }
- }
- }
-
- push @not_found, $file if (!$search_string || $found_string);
-
- if ($hint && (!$search_string || $found_string)) {
- my $what = $leaf{$leave}->{what};
- $what =~ s/\xac/\n\t/g;
- if ($leave ne "others") {
- print STDERR "\r more likely regexes:\n\t$what\n";
- } else {
- print STDERR "\r tested regexes:\n\t$what\n";
- }
- }
-}
-
-sub check_undefined_symbols {
- my $num_files = scalar @files;
- my $next_i = 0;
- my $start_time = times;
-
- @files = sort @files;
-
- my $last_time = $start_time;
-
- # When either debug or hint is enabled, there's no sense showing
- # progress, as the progress will be overriden.
- if ($hint || ($debug && $dbg_undefined)) {
- $next_i = $num_files;
- }
-
- my $is_console;
- $is_console = 1 if (-t STDERR);
-
- for (my $i = 0; $i < $num_files; $i++) {
- my $file_ref = $files[$i];
- my @names = @{$$file_ref{"__name"}};
-
- check_file($file_ref, \@names);
-
- my $cur_time = times;
-
- if ($i == $next_i || $cur_time > $last_time + 1) {
- my $percent = $i * 100 / $num_files;
-
- my $tm = $cur_time - $start_time;
- my $time = sprintf "%d:%02d", int($tm), 60 * ($tm - int($tm));
-
- printf STDERR "\33[2K\r", if ($is_console);
- printf STDERR "%s: processing sysfs files... %i%%: $names[0]", $time, $percent;
- printf STDERR "\n", if (!$is_console);
- STDERR->flush();
-
- $next_i = int (($percent + 1) * $num_files / 100);
- $last_time = $cur_time;
- }
- }
-
- my $cur_time = times;
- my $tm = $cur_time - $start_time;
- my $time = sprintf "%d:%02d", int($tm), 60 * ($tm - int($tm));
-
- printf STDERR "\33[2K\r", if ($is_console);
- printf STDERR "%s: processing sysfs files... done\n", $time;
-
- foreach my $file (@not_found) {
- print "$file not found.\n";
- }
-}
-
-sub undefined_symbols {
- print STDERR "Reading $sysfs_prefix directory contents...";
- find({
- wanted =>\&parse_existing_sysfs,
- preprocess =>\&dont_parse_special_attributes,
- no_chdir => 1
- }, $sysfs_prefix);
- print STDERR "done.\n";
-
- $leaf{"others"}->{what} = "";
-
- print STDERR "Converting ABI What fields into regexes...";
- foreach my $w (sort keys %data) {
- foreach my $what (split /\xac/,$w) {
- next if (!($what =~ m/^$sysfs_prefix/));
-
- # Convert what into regular expressions
-
- # Escape dot characters
- $what =~ s/\./\xf6/g;
-
- # Temporarily change [0-9]+ type of patterns
- $what =~ s/\[0\-9\]\+/\xff/g;
-
- # Temporarily change [\d+-\d+] type of patterns
- $what =~ s/\[0\-\d+\]/\xff/g;
- $what =~ s/\[(\d+)\]/\xf4$1\xf5/g;
-
- # Temporarily change [0-9] type of patterns
- $what =~ s/\[(\d)\-(\d)\]/\xf4$1-$2\xf5/g;
-
- # Handle multiple option patterns
- $what =~ s/[\{\<\[]([\w_]+)(?:[,|]+([\w_]+)){1,}[\}\>\]]/($1|$2)/g;
-
- # Handle wildcards
- $what =~ s,\*,.*,g;
- $what =~ s,/\xf6..,/.*,g;
- $what =~ s/\<[^\>]+\>/.*/g;
- $what =~ s/\{[^\}]+\}/.*/g;
- $what =~ s/\[[^\]]+\]/.*/g;
-
- $what =~ s/[XYZ]/.*/g;
-
- # Recover [0-9] type of patterns
- $what =~ s/\xf4/[/g;
- $what =~ s/\xf5/]/g;
-
- # Remove duplicated spaces
- $what =~ s/\s+/ /g;
-
- # Special case: this ABI has a parenthesis on it
- $what =~ s/sqrt\(x^2\+y^2\+z^2\)/sqrt\(x^2\+y^2\+z^2\)/;
-
- # Special case: drop comparition as in:
- # What: foo = <something>
- # (this happens on a few IIO definitions)
- $what =~ s,\s*\=.*$,,;
-
- # Escape all other symbols
- $what =~ s/$escape_symbols/\\$1/g;
- $what =~ s/\\\\/\\/g;
- $what =~ s/\\([\[\]\(\)\|])/$1/g;
- $what =~ s/(\d+)\\(-\d+)/$1$2/g;
-
- $what =~ s/\xff/\\d+/g;
-
- # Special case: IIO ABI which a parenthesis.
- $what =~ s/sqrt(.*)/sqrt\(.*\)/;
-
- # Simplify regexes with multiple .*
- $what =~ s#(?:\.\*){2,}##g;
-# $what =~ s#\.\*/\.\*#.*#g;
-
- # Recover dot characters
- $what =~ s/\xf6/\./g;
-
- my $leave = get_leave($what);
-
- my $added = 0;
- foreach my $l (split /\|/, $leave) {
- if (defined($leaf{$l})) {
- next if ($leaf{$l}->{what} =~ m/\b$what\b/);
- $leaf{$l}->{what} .= "\xac" . $what;
- $added = 1;
- } else {
- $leaf{$l}->{what} = $what;
- $added = 1;
- }
- }
- if ($search_string && $added) {
- print STDERR "What: $what\n" if ($what =~ m#$search_string#);
- }
-
- }
- }
- # Compile regexes
- foreach my $l (sort keys %leaf) {
- my @expr;
- foreach my $w(sort split /\xac/, $leaf{$l}->{what}) {
- push @expr, qr /^$w$/;
- }
- $leaf{$l}->{expr} = \@expr;
- }
-
- # Take links into account
- foreach my $link (sort keys %aliases) {
- my $abs_file = $aliases{$link};
- graph_add_link($abs_file, $link);
- }
- print STDERR "done.\n";
-
- check_undefined_symbols;
-}
-
-# Ensure that the prefix will always end with a slash
-# While this is not needed for find, it makes the patch nicer
-# with --enable-lineno
-$prefix =~ s,/?$,/,;
-
-if ($cmd eq "undefined" || $cmd eq "search") {
- $show_warnings = 0;
-}
-#
-# Parses all ABI files located at $prefix dir
-#
-find({wanted =>\&parse_abi, no_chdir => 1}, $prefix);
-
-print STDERR Data::Dumper->Dump([\%data], [qw(*data)]) if ($debug & $dbg_dump_abi_structs);
-
-#
-# Handles the command
-#
-if ($cmd eq "undefined") {
- undefined_symbols;
-} elsif ($cmd eq "search") {
- search_symbols;
-} else {
- if ($cmd eq "rest") {
- output_rest;
- }
-
- # Warn about duplicated ABI entries
- foreach my $what(sort keys %symbols) {
- my @files = @{$symbols{$what}->{file}};
-
- next if (scalar(@files) == 1);
-
- printf STDERR "Warning: $what is defined %d times: @files\n",
- scalar(@files);
- }
-}
-
-__END__
-
-=head1 NAME
-
-get_abi.pl - parse the Linux ABI files and produce a ReST book.
-
-=head1 SYNOPSIS
-
-B<get_abi.pl> [--debug <level>] [--enable-lineno] [--man] [--help]
- [--(no-)rst-source] [--dir=<dir>] [--show-hints]
- [--search-string <regex>]
- <COMMAND> [<ARGUMENT>]
-
-Where B<COMMAND> can be:
-
-=over 8
-
-B<search> I<SEARCH_REGEX> - search for I<SEARCH_REGEX> inside ABI
-
-B<rest> - output the ABI in ReST markup language
-
-B<validate> - validate the ABI contents
-
-B<undefined> - existing symbols at the system that aren't
- defined at Documentation/ABI
-
-=back
-
-=head1 OPTIONS
-
-=over 8
-
-=item B<--dir>
-
-Changes the location of the ABI search. By default, it uses
-the Documentation/ABI directory.
-
-=item B<--rst-source> and B<--no-rst-source>
-
-The input file may be using ReST syntax or not. Those two options allow
-selecting between a rst-compliant source ABI (B<--rst-source>), or a
-plain text that may be violating ReST spec, so it requres some escaping
-logic (B<--no-rst-source>).
-
-=item B<--enable-lineno>
-
-Enable output of .. LINENO lines.
-
-=item B<--debug> I<debug level>
-
-Print debug information according with the level, which is given by the
-following bitmask:
-
- - 1: Debug parsing What entries from ABI files;
- - 2: Shows what files are opened from ABI files;
- - 4: Dump the structs used to store the contents of the ABI files.
-
-=item B<--show-hints>
-
-Show hints about possible definitions for the missing ABI symbols.
-Used only when B<undefined>.
-
-=item B<--search-string> I<regex string>
-
-Show only occurences that match a search string.
-Used only when B<undefined>.
-
-=item B<--help>
-
-Prints a brief help message and exits.
-
-=item B<--man>
-
-Prints the manual page and exits.
-
-=back
-
-=head1 DESCRIPTION
-
-Parse the Linux ABI files from ABI DIR (usually located at Documentation/ABI),
-allowing to search for ABI symbols or to produce a ReST book containing
-the Linux ABI documentation.
-
-=head1 EXAMPLES
-
-Search for all stable symbols with the word "usb":
-
-=over 8
-
-$ scripts/get_abi.pl search usb --dir Documentation/ABI/stable
-
-=back
-
-Search for all symbols that match the regex expression "usb.*cap":
-
-=over 8
-
-$ scripts/get_abi.pl search usb.*cap
-
-=back
-
-Output all obsoleted symbols in ReST format
-
-=over 8
-
-$ scripts/get_abi.pl rest --dir Documentation/ABI/obsolete
-
-=back
-
-=head1 BUGS
-
-Report bugs to Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
-
-=head1 COPYRIGHT
-
-Copyright (c) 2016-2021 by Mauro Carvalho Chehab <mchehab+huawei@kernel.org>.
-
-License GPLv2: GNU GPL version 2 <http://gnu.org/licenses/gpl.html>.
-
-This is free software: you are free to change and redistribute it.
-There is NO WARRANTY, to the extent permitted by law.
-
-=cut
diff --git a/scripts/get_abi.py b/scripts/get_abi.py
new file mode 100755
index 000000000000..7ce4748a46d2
--- /dev/null
+++ b/scripts/get_abi.py
@@ -0,0 +1,214 @@
+#!/usr/bin/env python3
+# pylint: disable=R0903
+# Copyright(c) 2025: Mauro Carvalho Chehab <mchehab@kernel.org>.
+# SPDX-License-Identifier: GPL-2.0
+
+"""
+Parse ABI documentation and produce results from it.
+"""
+
+import argparse
+import logging
+import os
+import sys
+
+# Import Python modules
+
+LIB_DIR = "lib/abi"
+SRC_DIR = os.path.dirname(os.path.realpath(__file__))
+
+sys.path.insert(0, os.path.join(SRC_DIR, LIB_DIR))
+
+from abi_parser import AbiParser # pylint: disable=C0413
+from abi_regex import AbiRegex # pylint: disable=C0413
+from helpers import ABI_DIR, DEBUG_HELP # pylint: disable=C0413
+from system_symbols import SystemSymbols # pylint: disable=C0413
+
+# Command line classes
+
+
+REST_DESC = """
+Produce output in ReST format.
+
+The output is done on two sections:
+
+- Symbols: show all parsed symbols in alphabetic order;
+- Files: cross reference the content of each file with the symbols on it.
+"""
+
+class AbiRest:
+ """Initialize an argparse subparser for rest output"""
+
+ def __init__(self, subparsers):
+ """Initialize argparse subparsers"""
+
+ parser = subparsers.add_parser("rest",
+ formatter_class=argparse.RawTextHelpFormatter,
+ description=REST_DESC)
+
+ parser.add_argument("--enable-lineno", action="store_true",
+ help="enable lineno")
+ parser.add_argument("--raw", action="store_true",
+ help="output text as contained in the ABI files. "
+ "It not used, output will contain dynamically"
+ " generated cross references when possible.")
+ parser.add_argument("--no-file", action="store_true",
+ help="Don't the files section")
+ parser.add_argument("--show-hints", help="Show-hints")
+
+ parser.set_defaults(func=self.run)
+
+ def run(self, args):
+ """Run subparser"""
+
+ parser = AbiParser(args.dir, debug=args.debug)
+ parser.parse_abi()
+ parser.check_issues()
+
+ for t in parser.doc(args.raw, not args.no_file):
+ if args.enable_lineno:
+ print (f".. LINENO {t[1]}#{t[2]}\n\n")
+
+ print(t[0])
+
+class AbiValidate:
+ """Initialize an argparse subparser for ABI validation"""
+
+ def __init__(self, subparsers):
+ """Initialize argparse subparsers"""
+
+ parser = subparsers.add_parser("validate",
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter,
+ description="list events")
+
+ parser.set_defaults(func=self.run)
+
+ def run(self, args):
+ """Run subparser"""
+
+ parser = AbiParser(args.dir, debug=args.debug)
+ parser.parse_abi()
+ parser.check_issues()
+
+
+class AbiSearch:
+ """Initialize an argparse subparser for ABI search"""
+
+ def __init__(self, subparsers):
+ """Initialize argparse subparsers"""
+
+ parser = subparsers.add_parser("search",
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter,
+ description="Search ABI using a regular expression")
+
+ parser.add_argument("expression",
+ help="Case-insensitive search pattern for the ABI symbol")
+
+ parser.set_defaults(func=self.run)
+
+ def run(self, args):
+ """Run subparser"""
+
+ parser = AbiParser(args.dir, debug=args.debug)
+ parser.parse_abi()
+ parser.search_symbols(args.expression)
+
+UNDEFINED_DESC="""
+Check undefined ABIs on local machine.
+
+Read sysfs devnodes and check if the devnodes there are defined inside
+ABI documentation.
+
+The search logic tries to minimize the number of regular expressions to
+search per each symbol.
+
+By default, it runs on a single CPU, as Python support for CPU threads
+is still experimental, and multi-process runs on Python is very slow.
+
+On experimental tests, if the number of ABI symbols to search per devnode
+is contained on a limit of ~150 regular expressions, using a single CPU
+is a lot faster than using multiple processes. However, if the number of
+regular expressions to check is at the order of ~30000, using multiple
+CPUs speeds up the check.
+"""
+
+class AbiUndefined:
+ """
+ Initialize an argparse subparser for logic to check undefined ABI at
+ the current machine's sysfs
+ """
+
+ def __init__(self, subparsers):
+ """Initialize argparse subparsers"""
+
+ parser = subparsers.add_parser("undefined",
+ formatter_class=argparse.RawTextHelpFormatter,
+ description=UNDEFINED_DESC)
+
+ parser.add_argument("-S", "--sysfs-dir", default="/sys",
+ help="directory where sysfs is mounted")
+ parser.add_argument("-s", "--search-string",
+ help="search string regular expression to limit symbol search")
+ parser.add_argument("-H", "--show-hints", action="store_true",
+ help="Hints about definitions for missing ABI symbols.")
+ parser.add_argument("-j", "--jobs", "--max-workers", type=int, default=1,
+ help="If bigger than one, enables multiprocessing.")
+ parser.add_argument("-c", "--max-chunk-size", type=int, default=50,
+ help="Maximum number of chunk size")
+ parser.add_argument("-f", "--found", action="store_true",
+ help="Also show found items. "
+ "Helpful to debug the parser."),
+ parser.add_argument("-d", "--dry-run", action="store_true",
+ help="Don't actually search for undefined. "
+ "Helpful to debug the parser."),
+
+ parser.set_defaults(func=self.run)
+
+ def run(self, args):
+ """Run subparser"""
+
+ abi = AbiRegex(args.dir, debug=args.debug,
+ search_string=args.search_string)
+
+ abi_symbols = SystemSymbols(abi=abi, hints=args.show_hints,
+ sysfs=args.sysfs_dir)
+
+ abi_symbols.check_undefined_symbols(dry_run=args.dry_run,
+ found=args.found,
+ max_workers=args.jobs,
+ chunk_size=args.max_chunk_size)
+
+
+def main():
+ """Main program"""
+
+ parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
+
+ parser.add_argument("-d", "--debug", type=int, default=0, help="debug level")
+ parser.add_argument("-D", "--dir", default=ABI_DIR, help=DEBUG_HELP)
+
+ subparsers = parser.add_subparsers()
+
+ AbiRest(subparsers)
+ AbiValidate(subparsers)
+ AbiSearch(subparsers)
+ AbiUndefined(subparsers)
+
+ args = parser.parse_args()
+
+ if args.debug:
+ level = logging.DEBUG
+ else:
+ level = logging.INFO
+
+ logging.basicConfig(level=level, format="[%(levelname)s] %(message)s")
+
+ if "func" in args:
+ args.func(args)
+ else:
+ sys.exit(f"Please specify a valid command for {sys.argv[0]}")
+
+
+# Call main method
+if __name__ == "__main__":
+ main()
diff --git a/scripts/get_feat.pl b/scripts/get_feat.pl
index 5c5397eeb237..40fb28c8424e 100755
--- a/scripts/get_feat.pl
+++ b/scripts/get_feat.pl
@@ -512,13 +512,13 @@ print STDERR Data::Dumper->Dump([\%data], [qw(*data)]) if ($debug);
# Handles the command
#
if ($cmd eq "current") {
- $arch = qx(uname -m | sed 's/x86_64/x86/' | sed 's/i386/x86/');
+ $arch = qx(uname -m | sed 's/x86_64/x86/' | sed 's/i386/x86/' | sed 's/s390x/s390/');
$arch =~s/\s+$//;
}
if ($cmd eq "ls" or $cmd eq "list") {
if (!$arch) {
- $arch = qx(uname -m | sed 's/x86_64/x86/' | sed 's/i386/x86/');
+ $arch = qx(uname -m | sed 's/x86_64/x86/' | sed 's/i386/x86/' | sed 's/s390x/s390/');
$arch =~s/\s+$//;
}
diff --git a/scripts/integer-wrap-ignore.scl b/scripts/integer-wrap-ignore.scl
new file mode 100644
index 000000000000..431c3053a4a2
--- /dev/null
+++ b/scripts/integer-wrap-ignore.scl
@@ -0,0 +1,3 @@
+[{unsigned-integer-overflow,signed-integer-overflow,implicit-signed-integer-truncation,implicit-unsigned-integer-truncation}]
+type:*
+type:size_t=sanitize
diff --git a/scripts/kernel-doc b/scripts/kernel-doc
index e57c5e989a0a..af6cf408b96d 100755
--- a/scripts/kernel-doc
+++ b/scripts/kernel-doc
@@ -26,7 +26,7 @@ kernel-doc - Print formatted kernel documentation to stdout
kernel-doc [-h] [-v] [-Werror] [-Wall] [-Wreturn] [-Wshort-desc[ription]] [-Wcontents-before-sections]
[ -man |
- -rst [-sphinx-version VERSION] [-enable-lineno] |
+ -rst [-enable-lineno] |
-none
]
[
@@ -130,7 +130,6 @@ if ($#ARGV == -1) {
}
my $kernelversion;
-my ($sphinx_major, $sphinx_minor, $sphinx_patch);
my $dohighlight = "";
@@ -138,7 +137,6 @@ my $verbose = 0;
my $Werror = 0;
my $Wreturn = 0;
my $Wshort_desc = 0;
-my $Wcontents_before_sections = 0;
my $output_mode = "rst";
my $output_preformatted = 0;
my $no_doc_sections = 0;
@@ -179,7 +177,7 @@ my ($function, %function_table, %parametertypes, $declaration_purpose);
my %nosymbol_table = ();
my $declaration_start_line;
my ($type, $declaration_name, $return_type);
-my ($newsection, $newcontents, $prototype, $brcount, %source_map);
+my ($newsection, $newcontents, $prototype, $brcount);
if (defined($ENV{'KBUILD_VERBOSE'}) && $ENV{'KBUILD_VERBOSE'} =~ '1') {
$verbose = 1;
@@ -224,7 +222,6 @@ use constant {
STATE_INLINE => 7, # gathering doc outside main block
};
my $state;
-my $in_doc_sect;
my $leading_space;
# Inline documentation state
@@ -333,12 +330,9 @@ while ($ARGV[0] =~ m/^--?(.*)/) {
$Wreturn = 1;
} elsif ($cmd eq "Wshort-desc" or $cmd eq "Wshort-description") {
$Wshort_desc = 1;
- } elsif ($cmd eq "Wcontents-before-sections") {
- $Wcontents_before_sections = 1;
} elsif ($cmd eq "Wall") {
$Wreturn = 1;
$Wshort_desc = 1;
- $Wcontents_before_sections = 1;
} elsif (($cmd eq "h") || ($cmd eq "help")) {
pod2usage(-exitval => 0, -verbose => 2);
} elsif ($cmd eq 'no-doc-sections') {
@@ -347,23 +341,6 @@ while ($ARGV[0] =~ m/^--?(.*)/) {
$enable_lineno = 1;
} elsif ($cmd eq 'show-not-found') {
$show_not_found = 1; # A no-op but don't fail
- } elsif ($cmd eq "sphinx-version") {
- my $ver_string = shift @ARGV;
- if ($ver_string =~ m/^(\d+)(\.\d+)?(\.\d+)?/) {
- $sphinx_major = $1;
- if (defined($2)) {
- $sphinx_minor = substr($2,1);
- } else {
- $sphinx_minor = 0;
- }
- if (defined($3)) {
- $sphinx_patch = substr($3,1)
- } else {
- $sphinx_patch = 0;
- }
- } else {
- die "Sphinx version should either major.minor or major.minor.patch format\n";
- }
} else {
# Unknown argument
pod2usage(
@@ -387,8 +364,6 @@ while ($ARGV[0] =~ m/^--?(.*)/) {
# continue execution near EOF;
-# The C domain dialect changed on Sphinx 3. So, we need to check the
-# version in order to produce the right tags.
sub findprog($)
{
foreach(split(/:/, $ENV{PATH})) {
@@ -396,42 +371,6 @@ sub findprog($)
}
}
-sub get_sphinx_version()
-{
- my $ver;
-
- my $cmd = "sphinx-build";
- if (!findprog($cmd)) {
- my $cmd = "sphinx-build3";
- if (!findprog($cmd)) {
- $sphinx_major = 1;
- $sphinx_minor = 2;
- $sphinx_patch = 0;
- printf STDERR "Warning: Sphinx version not found. Using default (Sphinx version %d.%d.%d)\n",
- $sphinx_major, $sphinx_minor, $sphinx_patch;
- return;
- }
- }
-
- open IN, "$cmd --version 2>&1 |";
- while (<IN>) {
- if (m/^\s*sphinx-build\s+([\d]+)\.([\d\.]+)(\+\/[\da-f]+)?$/) {
- $sphinx_major = $1;
- $sphinx_minor = $2;
- $sphinx_patch = $3;
- last;
- }
- # Sphinx 1.2.x uses a different format
- if (m/^\s*Sphinx.*\s+([\d]+)\.([\d\.]+)$/) {
- $sphinx_major = $1;
- $sphinx_minor = $2;
- $sphinx_patch = $3;
- last;
- }
- }
- close IN;
-}
-
# get kernel version from env
sub get_kernel_version() {
my $version = 'unknown kernel version';
@@ -816,6 +755,10 @@ sub output_highlight_rst {
if ($block) {
$output .= highlight_block($block);
}
+
+ $output =~ s/^\n+//g;
+ $output =~ s/\n+$//g;
+
foreach $line (split "\n", $output) {
print $lineprefix . $line . "\n";
}
@@ -859,9 +802,10 @@ sub output_function_rst(%) {
$signature .= ")";
}
- if ($sphinx_major < 3) {
+ if ($args{'typedef'} || $args{'functiontype'} eq "") {
+ print ".. c:macro:: ". $args{'function'} . "\n\n";
+
if ($args{'typedef'}) {
- print ".. c:type:: ". $args{'function'} . "\n\n";
print_lineno($declaration_start_line);
print " **Typedef**: ";
$lineprefix = "";
@@ -869,25 +813,10 @@ sub output_function_rst(%) {
print "\n\n**Syntax**\n\n";
print " ``$signature``\n\n";
} else {
- print ".. c:function:: $signature\n\n";
+ print "``$signature``\n\n";
}
} else {
- if ($args{'typedef'} || $args{'functiontype'} eq "") {
- print ".. c:macro:: ". $args{'function'} . "\n\n";
-
- if ($args{'typedef'}) {
- print_lineno($declaration_start_line);
- print " **Typedef**: ";
- $lineprefix = "";
- output_highlight_rst($args{'purpose'});
- print "\n\n**Syntax**\n\n";
- print " ``$signature``\n\n";
- } else {
- print "``$signature``\n\n";
- }
- } else {
- print ".. c:function:: $signature\n\n";
- }
+ print ".. c:function:: $signature\n\n";
}
if (!$args{'typedef'}) {
@@ -955,13 +884,9 @@ sub output_enum_rst(%) {
my $count;
my $outer;
- if ($sphinx_major < 3) {
- my $name = "enum " . $args{'enum'};
- print "\n\n.. c:type:: " . $name . "\n\n";
- } else {
- my $name = $args{'enum'};
- print "\n\n.. c:enum:: " . $name . "\n\n";
- }
+ my $name = $args{'enum'};
+ print "\n\n.. c:enum:: " . $name . "\n\n";
+
print_lineno($declaration_start_line);
$lineprefix = " ";
output_highlight_rst($args{'purpose'});
@@ -992,11 +917,8 @@ sub output_typedef_rst(%) {
my $oldprefix = $lineprefix;
my $name;
- if ($sphinx_major < 3) {
- $name = "typedef " . $args{'typedef'};
- } else {
- $name = $args{'typedef'};
- }
+ $name = $args{'typedef'};
+
print "\n\n.. c:type:: " . $name . "\n\n";
print_lineno($declaration_start_line);
$lineprefix = " ";
@@ -1012,17 +934,13 @@ sub output_struct_rst(%) {
my ($parameter);
my $oldprefix = $lineprefix;
- if ($sphinx_major < 3) {
- my $name = $args{'type'} . " " . $args{'struct'};
- print "\n\n.. c:type:: " . $name . "\n\n";
+ my $name = $args{'struct'};
+ if ($args{'type'} eq 'union') {
+ print "\n\n.. c:union:: " . $name . "\n\n";
} else {
- my $name = $args{'struct'};
- if ($args{'type'} eq 'union') {
- print "\n\n.. c:union:: " . $name . "\n\n";
- } else {
- print "\n\n.. c:struct:: " . $name . "\n\n";
- }
+ print "\n\n.. c:struct:: " . $name . "\n\n";
}
+
print_lineno($declaration_start_line);
$lineprefix = " ";
output_highlight_rst($args{'purpose'});
@@ -2005,10 +1923,6 @@ sub map_filename($) {
$file = $orig_file;
}
- if (defined($source_map{$file})) {
- $file = $source_map{$file};
- }
-
return $file;
}
@@ -2044,7 +1958,6 @@ sub process_export_file($) {
sub process_normal() {
if (/$doc_start/o) {
$state = STATE_NAME; # next line is always the function name
- $in_doc_sect = 0;
$declaration_start_line = $. + 1;
}
}
@@ -2149,7 +2062,6 @@ sub process_body($$) {
}
if (/$doc_sect/i) { # case insensitive for supported section names
- $in_doc_sect = 1;
$newsection = $1;
$newcontents = $2;
@@ -2166,14 +2078,10 @@ sub process_body($$) {
}
if (($contents ne "") && ($contents ne "\n")) {
- if (!$in_doc_sect && $Wcontents_before_sections) {
- emit_warning("${file}:$.", "contents before sections\n");
- }
dump_section($file, $section, $contents);
$section = $section_default;
}
- $in_doc_sect = 1;
$state = STATE_BODY;
$contents = $newcontents;
$new_start_line = $.;
@@ -2387,11 +2295,6 @@ sub process_file($) {
close IN_FILE;
}
-
-if ($output_mode eq "rst") {
- get_sphinx_version() if (!$sphinx_major);
-}
-
$kernelversion = get_kernel_version();
# generate a sequence of code that will splice in highlighting information
@@ -2403,19 +2306,6 @@ for (my $k = 0; $k < @highlights; $k++) {
$dohighlight .= "\$contents =~ s:$pattern:$result:gs;\n";
}
-# Read the file that maps relative names to absolute names for
-# separate source and object directories and for shadow trees.
-if (open(SOURCE_MAP, "<.tmp_filelist.txt")) {
- my ($relname, $absname);
- while(<SOURCE_MAP>) {
- chop();
- ($relname, $absname) = (split())[0..1];
- $relname =~ s:^/+::;
- $source_map{$relname} = $absname;
- }
- close(SOURCE_MAP);
-}
-
if ($output_selection == OUTPUT_EXPORTED ||
$output_selection == OUTPUT_INTERNAL) {
@@ -2471,17 +2361,6 @@ Do not output documentation, only warnings.
=head3 reStructuredText only
-=over 8
-
-=item -sphinx-version VERSION
-
-Use the ReST C domain dialect compatible with a specific Sphinx Version.
-
-If not specified, kernel-doc will auto-detect using the sphinx-build version
-found on PATH.
-
-=back
-
=head2 Output selection (mutually exclusive):
=over 8
diff --git a/scripts/lib/abi/abi_parser.py b/scripts/lib/abi/abi_parser.py
new file mode 100644
index 000000000000..66a738013ce1
--- /dev/null
+++ b/scripts/lib/abi/abi_parser.py
@@ -0,0 +1,628 @@
+#!/usr/bin/env python3
+# pylint: disable=R0902,R0903,R0911,R0912,R0913,R0914,R0915,R0917,C0302
+# Copyright(c) 2025: Mauro Carvalho Chehab <mchehab@kernel.org>.
+# SPDX-License-Identifier: GPL-2.0
+
+"""
+Parse ABI documentation and produce results from it.
+"""
+
+from argparse import Namespace
+import logging
+import os
+import re
+
+from pprint import pformat
+from random import randrange, seed
+
+# Import Python modules
+
+from helpers import AbiDebug, ABI_DIR
+
+
+class AbiParser:
+ """Main class to parse ABI files"""
+
+ TAGS = r"(what|where|date|kernelversion|contact|description|users)"
+ XREF = r"(?:^|\s|\()(\/(?:sys|config|proc|dev|kvd)\/[^,.:;\)\s]+)(?:[,.:;\)\s]|\Z)"
+
+ def __init__(self, directory, logger=None,
+ enable_lineno=False, show_warnings=True, debug=0):
+ """Stores arguments for the class and initialize class vars"""
+
+ self.directory = directory
+ self.enable_lineno = enable_lineno
+ self.show_warnings = show_warnings
+ self.debug = debug
+
+ if not logger:
+ self.log = logging.getLogger("get_abi")
+ else:
+ self.log = logger
+
+ self.data = {}
+ self.what_symbols = {}
+ self.file_refs = {}
+ self.what_refs = {}
+
+ # Ignore files that contain such suffixes
+ self.ignore_suffixes = (".rej", ".org", ".orig", ".bak", "~")
+
+ # Regular expressions used on parser
+ self.re_abi_dir = re.compile(r"(.*)" + ABI_DIR)
+ self.re_tag = re.compile(r"(\S+)(:\s*)(.*)", re.I)
+ self.re_valid = re.compile(self.TAGS)
+ self.re_start_spc = re.compile(r"(\s*)(\S.*)")
+ self.re_whitespace = re.compile(r"^\s+")
+
+ # Regular used on print
+ self.re_what = re.compile(r"(\/?(?:[\w\-]+\/?){1,2})")
+ self.re_escape = re.compile(r"([\.\x01-\x08\x0e-\x1f\x21-\x2f\x3a-\x40\x7b-\xff])")
+ self.re_unprintable = re.compile(r"([\x00-\x2f\x3a-\x40\x5b-\x60\x7b-\xff]+)")
+ self.re_title_mark = re.compile(r"\n[\-\*\=\^\~]+\n")
+ self.re_doc = re.compile(r"Documentation/(?!devicetree)(\S+)\.rst")
+ self.re_abi = re.compile(r"(Documentation/ABI/)([\w\/\-]+)")
+ self.re_xref_node = re.compile(self.XREF)
+
+ def warn(self, fdata, msg, extra=None):
+ """Displays a parse error if warning is enabled"""
+
+ if not self.show_warnings:
+ return
+
+ msg = f"{fdata.fname}:{fdata.ln}: {msg}"
+ if extra:
+ msg += "\n\t\t" + extra
+
+ self.log.warning(msg)
+
+ def add_symbol(self, what, fname, ln=None, xref=None):
+ """Create a reference table describing where each 'what' is located"""
+
+ if what not in self.what_symbols:
+ self.what_symbols[what] = {"file": {}}
+
+ if fname not in self.what_symbols[what]["file"]:
+ self.what_symbols[what]["file"][fname] = []
+
+ if ln and ln not in self.what_symbols[what]["file"][fname]:
+ self.what_symbols[what]["file"][fname].append(ln)
+
+ if xref:
+ self.what_symbols[what]["xref"] = xref
+
+ def _parse_line(self, fdata, line):
+ """Parse a single line of an ABI file"""
+
+ new_what = False
+ new_tag = False
+ content = None
+
+ match = self.re_tag.match(line)
+ if match:
+ new = match.group(1).lower()
+ sep = match.group(2)
+ content = match.group(3)
+
+ match = self.re_valid.search(new)
+ if match:
+ new_tag = match.group(1)
+ else:
+ if fdata.tag == "description":
+ # New "tag" is actually part of description.
+ # Don't consider it a tag
+ new_tag = False
+ elif fdata.tag != "":
+ self.warn(fdata, f"tag '{fdata.tag}' is invalid", line)
+
+ if new_tag:
+ # "where" is Invalid, but was a common mistake. Warn if found
+ if new_tag == "where":
+ self.warn(fdata, "tag 'Where' is invalid. Should be 'What:' instead")
+ new_tag = "what"
+
+ if new_tag == "what":
+ fdata.space = None
+
+ if content not in self.what_symbols:
+ self.add_symbol(what=content, fname=fdata.fname, ln=fdata.ln)
+
+ if fdata.tag == "what":
+ fdata.what.append(content.strip("\n"))
+ else:
+ if fdata.key:
+ if "description" not in self.data.get(fdata.key, {}):
+ self.warn(fdata, f"{fdata.key} doesn't have a description")
+
+ for w in fdata.what:
+ self.add_symbol(what=w, fname=fdata.fname,
+ ln=fdata.what_ln, xref=fdata.key)
+
+ fdata.label = content
+ new_what = True
+
+ key = "abi_" + content.lower()
+ fdata.key = self.re_unprintable.sub("_", key).strip("_")
+
+ # Avoid duplicated keys but using a defined seed, to make
+ # the namespace identical if there aren't changes at the
+ # ABI symbols
+ seed(42)
+
+ while fdata.key in self.data:
+ char = randrange(0, 51) + ord("A")
+ if char > ord("Z"):
+ char += ord("a") - ord("Z") - 1
+
+ fdata.key += chr(char)
+
+ if fdata.key and fdata.key not in self.data:
+ self.data[fdata.key] = {
+ "what": [content],
+ "file": [fdata.file_ref],
+ "path": fdata.ftype,
+ "line_no": fdata.ln,
+ }
+
+ fdata.what = self.data[fdata.key]["what"]
+
+ self.what_refs[content] = fdata.key
+ fdata.tag = new_tag
+ fdata.what_ln = fdata.ln
+
+ if fdata.nametag["what"]:
+ t = (content, fdata.key)
+ if t not in fdata.nametag["symbols"]:
+ fdata.nametag["symbols"].append(t)
+
+ return
+
+ if fdata.tag and new_tag:
+ fdata.tag = new_tag
+
+ if new_what:
+ fdata.label = ""
+
+ if "description" in self.data[fdata.key]:
+ self.data[fdata.key]["description"] += "\n\n"
+
+ if fdata.file_ref not in self.data[fdata.key]["file"]:
+ self.data[fdata.key]["file"].append(fdata.file_ref)
+
+ if self.debug == AbiDebug.WHAT_PARSING:
+ self.log.debug("what: %s", fdata.what)
+
+ if not fdata.what:
+ self.warn(fdata, "'What:' should come first:", line)
+ return
+
+ if new_tag == "description":
+ fdata.space = None
+
+ if content:
+ sep = sep.replace(":", " ")
+
+ c = " " * len(new_tag) + sep + content
+ c = c.expandtabs()
+
+ match = self.re_start_spc.match(c)
+ if match:
+ # Preserve initial spaces for the first line
+ fdata.space = match.group(1)
+ content = match.group(2) + "\n"
+
+ self.data[fdata.key][fdata.tag] = content
+
+ return
+
+ # Store any contents before tags at the database
+ if not fdata.tag and "what" in fdata.nametag:
+ fdata.nametag["description"] += line
+ return
+
+ if fdata.tag == "description":
+ content = line.expandtabs()
+
+ if self.re_whitespace.sub("", content) == "":
+ self.data[fdata.key][fdata.tag] += "\n"
+ return
+
+ if fdata.space is None:
+ match = self.re_start_spc.match(content)
+ if match:
+ # Preserve initial spaces for the first line
+ fdata.space = match.group(1)
+
+ content = match.group(2) + "\n"
+ else:
+ if content.startswith(fdata.space):
+ content = content[len(fdata.space):]
+
+ else:
+ fdata.space = ""
+
+ if fdata.tag == "what":
+ w = content.strip("\n")
+ if w:
+ self.data[fdata.key][fdata.tag].append(w)
+ else:
+ self.data[fdata.key][fdata.tag] += content
+ return
+
+ content = line.strip()
+ if fdata.tag:
+ if fdata.tag == "what":
+ w = content.strip("\n")
+ if w:
+ self.data[fdata.key][fdata.tag].append(w)
+ else:
+ self.data[fdata.key][fdata.tag] += "\n" + content.rstrip("\n")
+ return
+
+ # Everything else is error
+ if content:
+ self.warn(fdata, "Unexpected content", line)
+
+ def parse_readme(self, nametag, fname):
+ """Parse ABI README file"""
+
+ nametag["what"] = ["Introduction"]
+ nametag["path"] = "README"
+ with open(fname, "r", encoding="utf8", errors="backslashreplace") as fp:
+ for line in fp:
+ match = self.re_tag.match(line)
+ if match:
+ new = match.group(1).lower()
+
+ match = self.re_valid.search(new)
+ if match:
+ nametag["description"] += "\n:" + line
+ continue
+
+ nametag["description"] += line
+
+ def parse_file(self, fname, path, basename):
+ """Parse a single file"""
+
+ ref = f"abi_file_{path}_{basename}"
+ ref = self.re_unprintable.sub("_", ref).strip("_")
+
+ # Store per-file state into a namespace variable. This will be used
+ # by the per-line parser state machine and by the warning function.
+ fdata = Namespace
+
+ fdata.fname = fname
+ fdata.name = basename
+
+ pos = fname.find(ABI_DIR)
+ if pos > 0:
+ f = fname[pos:]
+ else:
+ f = fname
+
+ fdata.file_ref = (f, ref)
+ self.file_refs[f] = ref
+
+ fdata.ln = 0
+ fdata.what_ln = 0
+ fdata.tag = ""
+ fdata.label = ""
+ fdata.what = []
+ fdata.key = None
+ fdata.xrefs = None
+ fdata.space = None
+ fdata.ftype = path.split("/")[0]
+
+ fdata.nametag = {}
+ fdata.nametag["what"] = [f"ABI file {path}/{basename}"]
+ fdata.nametag["type"] = "File"
+ fdata.nametag["path"] = fdata.ftype
+ fdata.nametag["file"] = [fdata.file_ref]
+ fdata.nametag["line_no"] = 1
+ fdata.nametag["description"] = ""
+ fdata.nametag["symbols"] = []
+
+ self.data[ref] = fdata.nametag
+
+ if self.debug & AbiDebug.WHAT_OPEN:
+ self.log.debug("Opening file %s", fname)
+
+ if basename == "README":
+ self.parse_readme(fdata.nametag, fname)
+ return
+
+ with open(fname, "r", encoding="utf8", errors="backslashreplace") as fp:
+ for line in fp:
+ fdata.ln += 1
+
+ self._parse_line(fdata, line)
+
+ if "description" in fdata.nametag:
+ fdata.nametag["description"] = fdata.nametag["description"].lstrip("\n")
+
+ if fdata.key:
+ if "description" not in self.data.get(fdata.key, {}):
+ self.warn(fdata, f"{fdata.key} doesn't have a description")
+
+ for w in fdata.what:
+ self.add_symbol(what=w, fname=fname, xref=fdata.key)
+
+ def _parse_abi(self, root=None):
+ """Internal function to parse documentation ABI recursively"""
+
+ if not root:
+ root = self.directory
+
+ with os.scandir(root) as obj:
+ for entry in obj:
+ name = os.path.join(root, entry.name)
+
+ if entry.is_dir():
+ self._parse_abi(name)
+ continue
+
+ if not entry.is_file():
+ continue
+
+ basename = os.path.basename(name)
+
+ if basename.startswith("."):
+ continue
+
+ if basename.endswith(self.ignore_suffixes):
+ continue
+
+ path = self.re_abi_dir.sub("", os.path.dirname(name))
+
+ self.parse_file(name, path, basename)
+
+ def parse_abi(self, root=None):
+ """Parse documentation ABI"""
+
+ self._parse_abi(root)
+
+ if self.debug & AbiDebug.DUMP_ABI_STRUCTS:
+ self.log.debug(pformat(self.data))
+
+ def desc_txt(self, desc):
+ """Print description as found inside ABI files"""
+
+ desc = desc.strip(" \t\n")
+
+ return desc + "\n\n"
+
+ def xref(self, fname):
+ """
+ Converts a Documentation/ABI + basename into a ReST cross-reference
+ """
+
+ xref = self.file_refs.get(fname)
+ if not xref:
+ return None
+ else:
+ return xref
+
+ def desc_rst(self, desc):
+ """Enrich ReST output by creating cross-references"""
+
+ # Remove title markups from the description
+ # Having titles inside ABI files will only work if extra
+ # care would be taken in order to strictly follow the same
+ # level order for each markup.
+ desc = self.re_title_mark.sub("\n\n", "\n" + desc)
+ desc = desc.rstrip(" \t\n").lstrip("\n")
+
+ # Python's regex performance for non-compiled expressions is a lot
+ # than Perl, as Perl automatically caches them at their
+ # first usage. Here, we'll need to do the same, as otherwise the
+ # performance penalty is be high
+
+ new_desc = ""
+ for d in desc.split("\n"):
+ if d == "":
+ new_desc += "\n"
+ continue
+
+ # Use cross-references for doc files where needed
+ d = self.re_doc.sub(r":doc:`/\1`", d)
+
+ # Use cross-references for ABI generated docs where needed
+ matches = self.re_abi.findall(d)
+ for m in matches:
+ abi = m[0] + m[1]
+
+ xref = self.file_refs.get(abi)
+ if not xref:
+ # This may happen if ABI is on a separate directory,
+ # like parsing ABI testing and symbol is at stable.
+ # The proper solution is to move this part of the code
+ # for it to be inside sphinx/kernel_abi.py
+ self.log.info("Didn't find ABI reference for '%s'", abi)
+ else:
+ new = self.re_escape.sub(r"\\\1", m[1])
+ d = re.sub(fr"\b{abi}\b", f":ref:`{new} <{xref}>`", d)
+
+ # Seek for cross reference symbols like /sys/...
+ # Need to be careful to avoid doing it on a code block
+ if d[0] not in [" ", "\t"]:
+ matches = self.re_xref_node.findall(d)
+ for m in matches:
+ # Finding ABI here is more complex due to wildcards
+ xref = self.what_refs.get(m)
+ if xref:
+ new = self.re_escape.sub(r"\\\1", m)
+ d = re.sub(fr"\b{m}\b", f":ref:`{new} <{xref}>`", d)
+
+ new_desc += d + "\n"
+
+ return new_desc + "\n\n"
+
+ def doc(self, output_in_txt=False, show_symbols=True, show_file=True,
+ filter_path=None):
+ """Print ABI at stdout"""
+
+ part = None
+ for key, v in sorted(self.data.items(),
+ key=lambda x: (x[1].get("type", ""),
+ x[1].get("what"))):
+
+ wtype = v.get("type", "Symbol")
+ file_ref = v.get("file")
+ names = v.get("what", [""])
+
+ if wtype == "File":
+ if not show_file:
+ continue
+ else:
+ if not show_symbols:
+ continue
+
+ if filter_path:
+ if v.get("path") != filter_path:
+ continue
+
+ msg = ""
+
+ if wtype != "File":
+ cur_part = names[0]
+ if cur_part.find("/") >= 0:
+ match = self.re_what.match(cur_part)
+ if match:
+ symbol = match.group(1).rstrip("/")
+ cur_part = "Symbols under " + symbol
+
+ if cur_part and cur_part != part:
+ part = cur_part
+ msg += part + "\n"+ "-" * len(part) +"\n\n"
+
+ msg += f".. _{key}:\n\n"
+
+ max_len = 0
+ for i in range(0, len(names)): # pylint: disable=C0200
+ names[i] = "**" + self.re_escape.sub(r"\\\1", names[i]) + "**"
+
+ max_len = max(max_len, len(names[i]))
+
+ msg += "+-" + "-" * max_len + "-+\n"
+ for name in names:
+ msg += f"| {name}" + " " * (max_len - len(name)) + " |\n"
+ msg += "+-" + "-" * max_len + "-+\n"
+ msg += "\n"
+
+ for ref in file_ref:
+ if wtype == "File":
+ msg += f".. _{ref[1]}:\n\n"
+ else:
+ base = os.path.basename(ref[0])
+ msg += f"Defined on file :ref:`{base} <{ref[1]}>`\n\n"
+
+ if wtype == "File":
+ msg += names[0] +"\n" + "-" * len(names[0]) +"\n\n"
+
+ desc = v.get("description")
+ if not desc and wtype != "File":
+ msg += f"DESCRIPTION MISSING for {names[0]}\n\n"
+
+ if desc:
+ if output_in_txt:
+ msg += self.desc_txt(desc)
+ else:
+ msg += self.desc_rst(desc)
+
+ symbols = v.get("symbols")
+ if symbols:
+ msg += "Has the following ABI:\n\n"
+
+ for w, label in symbols:
+ # Escape special chars from content
+ content = self.re_escape.sub(r"\\\1", w)
+
+ msg += f"- :ref:`{content} <{label}>`\n\n"
+
+ users = v.get("users")
+ if users and users.strip(" \t\n"):
+ users = users.strip("\n").replace('\n', '\n\t')
+ msg += f"Users:\n\t{users}\n\n"
+
+ ln = v.get("line_no", 1)
+
+ yield (msg, file_ref[0][0], ln)
+
+ def check_issues(self):
+ """Warn about duplicated ABI entries"""
+
+ for what, v in self.what_symbols.items():
+ files = v.get("file")
+ if not files:
+ # Should never happen if the parser works properly
+ self.log.warning("%s doesn't have a file associated", what)
+ continue
+
+ if len(files) == 1:
+ continue
+
+ f = []
+ for fname, lines in sorted(files.items()):
+ if not lines:
+ f.append(f"{fname}")
+ elif len(lines) == 1:
+ f.append(f"{fname}:{lines[0]}")
+ else:
+ m = fname + "lines "
+ m += ", ".join(str(x) for x in lines)
+ f.append(m)
+
+ self.log.warning("%s is defined %d times: %s", what, len(f), "; ".join(f))
+
+ def search_symbols(self, expr):
+ """ Searches for ABI symbols """
+
+ regex = re.compile(expr, re.I)
+
+ found_keys = 0
+ for t in sorted(self.data.items(), key=lambda x: [0]):
+ v = t[1]
+
+ wtype = v.get("type", "")
+ if wtype == "File":
+ continue
+
+ for what in v.get("what", [""]):
+ if regex.search(what):
+ found_keys += 1
+
+ kernelversion = v.get("kernelversion", "").strip(" \t\n")
+ date = v.get("date", "").strip(" \t\n")
+ contact = v.get("contact", "").strip(" \t\n")
+ users = v.get("users", "").strip(" \t\n")
+ desc = v.get("description", "").strip(" \t\n")
+
+ files = []
+ for f in v.get("file", ()):
+ files.append(f[0])
+
+ what = str(found_keys) + ". " + what
+ title_tag = "-" * len(what)
+
+ print(f"\n{what}\n{title_tag}\n")
+
+ if kernelversion:
+ print(f"Kernel version:\t\t{kernelversion}")
+
+ if date:
+ print(f"Date:\t\t\t{date}")
+
+ if contact:
+ print(f"Contact:\t\t{contact}")
+
+ if users:
+ print(f"Users:\t\t\t{users}")
+
+ print("Defined on file(s):\t" + ", ".join(files))
+
+ if desc:
+ desc = desc.strip("\n")
+ print(f"\n{desc}\n")
+
+ if not found_keys:
+ print(f"Regular expression /{expr}/ not found.")
diff --git a/scripts/lib/abi/abi_regex.py b/scripts/lib/abi/abi_regex.py
new file mode 100644
index 000000000000..8a57846cbc69
--- /dev/null
+++ b/scripts/lib/abi/abi_regex.py
@@ -0,0 +1,234 @@
+#!/usr/bin/env python3
+# xxpylint: disable=R0903
+# Copyright(c) 2025: Mauro Carvalho Chehab <mchehab@kernel.org>.
+# SPDX-License-Identifier: GPL-2.0
+
+"""
+Convert ABI what into regular expressions
+"""
+
+import re
+import sys
+
+from pprint import pformat
+
+from abi_parser import AbiParser
+from helpers import AbiDebug
+
+class AbiRegex(AbiParser):
+ """Extends AbiParser to search ABI nodes with regular expressions"""
+
+ # Escape only ASCII visible characters
+ escape_symbols = r"([\x21-\x29\x2b-\x2d\x3a-\x40\x5c\x60\x7b-\x7e])"
+ leave_others = "others"
+
+ # Tuples with regular expressions to be compiled and replacement data
+ re_whats = [
+ # Drop escape characters that might exist
+ (re.compile("\\\\"), ""),
+
+ # Temporarily escape dot characters
+ (re.compile(r"\."), "\xf6"),
+
+ # Temporarily change [0-9]+ type of patterns
+ (re.compile(r"\[0\-9\]\+"), "\xff"),
+
+ # Temporarily change [\d+-\d+] type of patterns
+ (re.compile(r"\[0\-\d+\]"), "\xff"),
+ (re.compile(r"\[0:\d+\]"), "\xff"),
+ (re.compile(r"\[(\d+)\]"), "\xf4\\\\d+\xf5"),
+
+ # Temporarily change [0-9] type of patterns
+ (re.compile(r"\[(\d)\-(\d)\]"), "\xf4\1-\2\xf5"),
+
+ # Handle multiple option patterns
+ (re.compile(r"[\{\<\[]([\w_]+)(?:[,|]+([\w_]+)){1,}[\}\>\]]"), r"(\1|\2)"),
+
+ # Handle wildcards
+ (re.compile(r"([^\/])\*"), "\\1\\\\w\xf7"),
+ (re.compile(r"/\*/"), "/.*/"),
+ (re.compile(r"/\xf6\xf6\xf6"), "/.*"),
+ (re.compile(r"\<[^\>]+\>"), "\\\\w\xf7"),
+ (re.compile(r"\{[^\}]+\}"), "\\\\w\xf7"),
+ (re.compile(r"\[[^\]]+\]"), "\\\\w\xf7"),
+
+ (re.compile(r"XX+"), "\\\\w\xf7"),
+ (re.compile(r"([^A-Z])[XYZ]([^A-Z])"), "\\1\\\\w\xf7\\2"),
+ (re.compile(r"([^A-Z])[XYZ]$"), "\\1\\\\w\xf7"),
+ (re.compile(r"_[AB]_"), "_\\\\w\xf7_"),
+
+ # Recover [0-9] type of patterns
+ (re.compile(r"\xf4"), "["),
+ (re.compile(r"\xf5"), "]"),
+
+ # Remove duplicated spaces
+ (re.compile(r"\s+"), r" "),
+
+ # Special case: drop comparison as in:
+ # What: foo = <something>
+ # (this happens on a few IIO definitions)
+ (re.compile(r"\s*\=.*$"), ""),
+
+ # Escape all other symbols
+ (re.compile(escape_symbols), r"\\\1"),
+ (re.compile(r"\\\\"), r"\\"),
+ (re.compile(r"\\([\[\]\(\)\|])"), r"\1"),
+ (re.compile(r"(\d+)\\(-\d+)"), r"\1\2"),
+
+ (re.compile(r"\xff"), r"\\d+"),
+
+ # Special case: IIO ABI which a parenthesis.
+ (re.compile(r"sqrt(.*)"), r"sqrt(.*)"),
+
+ # Simplify regexes with multiple .*
+ (re.compile(r"(?:\.\*){2,}"), ""),
+
+ # Recover dot characters
+ (re.compile(r"\xf6"), "\\."),
+ # Recover plus characters
+ (re.compile(r"\xf7"), "+"),
+ ]
+ re_has_num = re.compile(r"\\d")
+
+ # Symbol name after escape_chars that are considered a devnode basename
+ re_symbol_name = re.compile(r"(\w|\\[\.\-\:])+$")
+
+ # List of popular group names to be skipped to minimize regex group size
+ # Use AbiDebug.SUBGROUP_SIZE to detect those
+ skip_names = set(["devices", "hwmon"])
+
+ def regex_append(self, what, new):
+ """
+ Get a search group for a subset of regular expressions.
+
+ As ABI may have thousands of symbols, using a for to search all
+ regular expressions is at least O(n^2). When there are wildcards,
+ the complexity increases substantially, eventually becoming exponential.
+
+ To avoid spending too much time on them, use a logic to split
+ them into groups. The smaller the group, the better, as it would
+ mean that searches will be confined to a small number of regular
+ expressions.
+
+ The conversion to a regex subset is tricky, as we need something
+ that can be easily obtained from the sysfs symbol and from the
+ regular expression. So, we need to discard nodes that have
+ wildcards.
+
+ If it can't obtain a subgroup, place the regular expression inside
+ a special group (self.leave_others).
+ """
+
+ search_group = None
+
+ for search_group in reversed(new.split("/")):
+ if not search_group or search_group in self.skip_names:
+ continue
+ if self.re_symbol_name.match(search_group):
+ break
+
+ if not search_group:
+ search_group = self.leave_others
+
+ if self.debug & AbiDebug.SUBGROUP_MAP:
+ self.log.debug("%s: mapped as %s", what, search_group)
+
+ try:
+ if search_group not in self.regex_group:
+ self.regex_group[search_group] = []
+
+ self.regex_group[search_group].append(re.compile(new))
+ if self.search_string:
+ if what.find(self.search_string) >= 0:
+ print(f"What: {what}")
+ except re.PatternError:
+ self.log.warning("Ignoring '%s' as it produced an invalid regex:\n"
+ " '%s'", what, new)
+
+ def get_regexes(self, what):
+ """
+ Given an ABI devnode, return a list of all regular expressions that
+ may match it, based on the sub-groups created by regex_append()
+ """
+
+ re_list = []
+
+ patches = what.split("/")
+ patches.reverse()
+ patches.append(self.leave_others)
+
+ for search_group in patches:
+ if search_group in self.regex_group:
+ re_list += self.regex_group[search_group]
+
+ return re_list
+
+ def __init__(self, *args, **kwargs):
+ """
+ Override init method to get verbose argument
+ """
+
+ self.regex_group = None
+ self.search_string = None
+ self.re_string = None
+
+ if "search_string" in kwargs:
+ self.search_string = kwargs.get("search_string")
+ del kwargs["search_string"]
+
+ if self.search_string:
+
+ try:
+ self.re_string = re.compile(self.search_string)
+ except re.PatternError as e:
+ msg = f"{self.search_string} is not a valid regular expression"
+ raise ValueError(msg) from e
+
+ super().__init__(*args, **kwargs)
+
+ def parse_abi(self, *args, **kwargs):
+
+ super().parse_abi(*args, **kwargs)
+
+ self.regex_group = {}
+
+ print("Converting ABI What fields into regexes...", file=sys.stderr)
+
+ for t in sorted(self.data.items(), key=lambda x: x[0]):
+ v = t[1]
+ if v.get("type") == "File":
+ continue
+
+ v["regex"] = []
+
+ for what in v.get("what", []):
+ if not what.startswith("/sys"):
+ continue
+
+ new = what
+ for r, s in self.re_whats:
+ try:
+ new = r.sub(s, new)
+ except re.PatternError as e:
+ # Help debugging troubles with new regexes
+ raise re.PatternError(f"{e}\nwhile re.sub('{r.pattern}', {s}, str)") from e
+
+ v["regex"].append(new)
+
+ if self.debug & AbiDebug.REGEX:
+ self.log.debug("%-90s <== %s", new, what)
+
+ # Store regex into a subgroup to speedup searches
+ self.regex_append(what, new)
+
+ if self.debug & AbiDebug.SUBGROUP_DICT:
+ self.log.debug("%s", pformat(self.regex_group))
+
+ if self.debug & AbiDebug.SUBGROUP_SIZE:
+ biggestd_keys = sorted(self.regex_group.keys(),
+ key= lambda k: len(self.regex_group[k]),
+ reverse=True)
+
+ print("Top regex subgroups:", file=sys.stderr)
+ for k in biggestd_keys[:10]:
+ print(f"{k} has {len(self.regex_group[k])} elements", file=sys.stderr)
diff --git a/scripts/lib/abi/helpers.py b/scripts/lib/abi/helpers.py
new file mode 100644
index 000000000000..639b23e4ca33
--- /dev/null
+++ b/scripts/lib/abi/helpers.py
@@ -0,0 +1,38 @@
+#!/usr/bin/env python3
+# Copyright(c) 2025: Mauro Carvalho Chehab <mchehab@kernel.org>.
+# pylint: disable=R0903
+# SPDX-License-Identifier: GPL-2.0
+
+"""
+Helper classes for ABI parser
+"""
+
+ABI_DIR = "Documentation/ABI/"
+
+
+class AbiDebug:
+ """Debug levels"""
+
+ WHAT_PARSING = 1
+ WHAT_OPEN = 2
+ DUMP_ABI_STRUCTS = 4
+ UNDEFINED = 8
+ REGEX = 16
+ SUBGROUP_MAP = 32
+ SUBGROUP_DICT = 64
+ SUBGROUP_SIZE = 128
+ GRAPH = 256
+
+
+DEBUG_HELP = """
+1 - enable debug parsing logic
+2 - enable debug messages on file open
+4 - enable debug for ABI parse data
+8 - enable extra debug information to identify troubles
+ with ABI symbols found at the local machine that
+ weren't found on ABI documentation (used only for
+ undefined subcommand)
+16 - enable debug for what to regex conversion
+32 - enable debug for symbol regex subgroups
+64 - enable debug for sysfs graph tree variable
+"""
diff --git a/scripts/lib/abi/system_symbols.py b/scripts/lib/abi/system_symbols.py
new file mode 100644
index 000000000000..f15c94a6e33c
--- /dev/null
+++ b/scripts/lib/abi/system_symbols.py
@@ -0,0 +1,378 @@
+#!/usr/bin/env python3
+# pylint: disable=R0902,R0912,R0914,R0915,R1702
+# Copyright(c) 2025: Mauro Carvalho Chehab <mchehab@kernel.org>.
+# SPDX-License-Identifier: GPL-2.0
+
+"""
+Parse ABI documentation and produce results from it.
+"""
+
+import os
+import re
+import sys
+
+from concurrent import futures
+from datetime import datetime
+from random import shuffle
+
+from helpers import AbiDebug
+
+class SystemSymbols:
+ """Stores arguments for the class and initialize class vars"""
+
+ def graph_add_file(self, path, link=None):
+ """
+ add a file path to the sysfs graph stored at self.root
+ """
+
+ if path in self.files:
+ return
+
+ name = ""
+ ref = self.root
+ for edge in path.split("/"):
+ name += edge + "/"
+ if edge not in ref:
+ ref[edge] = {"__name": [name.rstrip("/")]}
+
+ ref = ref[edge]
+
+ if link and link not in ref["__name"]:
+ ref["__name"].append(link.rstrip("/"))
+
+ self.files.add(path)
+
+ def print_graph(self, root_prefix="", root=None, level=0):
+ """Prints a reference tree graph using UTF-8 characters"""
+
+ if not root:
+ root = self.root
+ level = 0
+
+ # Prevent endless traverse
+ if level > 5:
+ return
+
+ if level > 0:
+ prefix = "├──"
+ last_prefix = "└──"
+ else:
+ prefix = ""
+ last_prefix = ""
+
+ items = list(root.items())
+
+ names = root.get("__name", [])
+ for k, edge in items:
+ if k == "__name":
+ continue
+
+ if not k:
+ k = "/"
+
+ if len(names) > 1:
+ k += " links: " + ",".join(names[1:])
+
+ if edge == items[-1][1]:
+ print(root_prefix + last_prefix + k)
+ p = root_prefix
+ if level > 0:
+ p += " "
+ self.print_graph(p, edge, level + 1)
+ else:
+ print(root_prefix + prefix + k)
+ p = root_prefix + "│ "
+ self.print_graph(p, edge, level + 1)
+
+ def _walk(self, root):
+ """
+ Walk through sysfs to get all devnodes that aren't ignored.
+
+ By default, uses /sys as sysfs mounting point. If another
+ directory is used, it replaces them to /sys at the patches.
+ """
+
+ with os.scandir(root) as obj:
+ for entry in obj:
+ path = os.path.join(root, entry.name)
+ if self.sysfs:
+ p = path.replace(self.sysfs, "/sys", count=1)
+ else:
+ p = path
+
+ if self.re_ignore.search(p):
+ return
+
+ # Handle link first to avoid directory recursion
+ if entry.is_symlink():
+ real = os.path.realpath(path)
+ if not self.sysfs:
+ self.aliases[path] = real
+ else:
+ real = real.replace(self.sysfs, "/sys", count=1)
+
+ # Add absfile location to graph if it doesn't exist
+ if not self.re_ignore.search(real):
+ # Add link to the graph
+ self.graph_add_file(real, p)
+
+ elif entry.is_file():
+ self.graph_add_file(p)
+
+ elif entry.is_dir():
+ self._walk(path)
+
+ def __init__(self, abi, sysfs="/sys", hints=False):
+ """
+ Initialize internal variables and get a list of all files inside
+ sysfs that can currently be parsed.
+
+ Please notice that there are several entries on sysfs that aren't
+ documented as ABI. Ignore those.
+
+ The real paths will be stored under self.files. Aliases will be
+ stored in separate, as self.aliases.
+ """
+
+ self.abi = abi
+ self.log = abi.log
+
+ if sysfs != "/sys":
+ self.sysfs = sysfs.rstrip("/")
+ else:
+ self.sysfs = None
+
+ self.hints = hints
+
+ self.root = {}
+ self.aliases = {}
+ self.files = set()
+
+ dont_walk = [
+ # Those require root access and aren't documented at ABI
+ f"^{sysfs}/kernel/debug",
+ f"^{sysfs}/kernel/tracing",
+ f"^{sysfs}/fs/pstore",
+ f"^{sysfs}/fs/bpf",
+ f"^{sysfs}/fs/fuse",
+
+ # This is not documented at ABI
+ f"^{sysfs}/module",
+
+ f"^{sysfs}/fs/cgroup", # this is big and has zero docs under ABI
+ f"^{sysfs}/firmware", # documented elsewhere: ACPI, DT bindings
+ "sections|notes", # aren't actually part of ABI
+
+ # kernel-parameters.txt - not easy to parse
+ "parameters",
+ ]
+
+ self.re_ignore = re.compile("|".join(dont_walk))
+
+ print(f"Reading {sysfs} directory contents...", file=sys.stderr)
+ self._walk(sysfs)
+
+ def check_file(self, refs, found):
+ """Check missing ABI symbols for a given sysfs file"""
+
+ res_list = []
+
+ try:
+ for names in refs:
+ fname = names[0]
+
+ res = {
+ "found": False,
+ "fname": fname,
+ "msg": "",
+ }
+ res_list.append(res)
+
+ re_what = self.abi.get_regexes(fname)
+ if not re_what:
+ self.abi.log.warning(f"missing rules for {fname}")
+ continue
+
+ for name in names:
+ for r in re_what:
+ if self.abi.debug & AbiDebug.UNDEFINED:
+ self.log.debug("check if %s matches '%s'", name, r.pattern)
+ if r.match(name):
+ res["found"] = True
+ if found:
+ res["msg"] += f" {fname}: regex:\n\t"
+ continue
+
+ if self.hints and not res["found"]:
+ res["msg"] += f" {fname} not found. Tested regexes:\n"
+ for r in re_what:
+ res["msg"] += " " + r.pattern + "\n"
+
+ except KeyboardInterrupt:
+ pass
+
+ return res_list
+
+ def _ref_interactor(self, root):
+ """Recursive function to interact over the sysfs tree"""
+
+ for k, v in root.items():
+ if isinstance(v, dict):
+ yield from self._ref_interactor(v)
+
+ if root == self.root or k == "__name":
+ continue
+
+ if self.abi.re_string:
+ fname = v["__name"][0]
+ if self.abi.re_string.search(fname):
+ yield v
+ else:
+ yield v
+
+
+ def get_fileref(self, all_refs, chunk_size):
+ """Interactor to group refs into chunks"""
+
+ n = 0
+ refs = []
+
+ for ref in all_refs:
+ refs.append(ref)
+
+ n += 1
+ if n >= chunk_size:
+ yield refs
+ n = 0
+ refs = []
+
+ yield refs
+
+ def check_undefined_symbols(self, max_workers=None, chunk_size=50,
+ found=None, dry_run=None):
+ """Seach ABI for sysfs symbols missing documentation"""
+
+ self.abi.parse_abi()
+
+ if self.abi.debug & AbiDebug.GRAPH:
+ self.print_graph()
+
+ all_refs = []
+ for ref in self._ref_interactor(self.root):
+ all_refs.append(ref["__name"])
+
+ if dry_run:
+ print("Would check", file=sys.stderr)
+ for ref in all_refs:
+ print(", ".join(ref))
+
+ return
+
+ print("Starting to search symbols (it may take several minutes):",
+ file=sys.stderr)
+ start = datetime.now()
+ old_elapsed = None
+
+ # Python doesn't support multithreading due to limitations on its
+ # global lock (GIL). While Python 3.13 finally made GIL optional,
+ # there are still issues related to it. Also, we want to have
+ # backward compatibility with older versions of Python.
+ #
+ # So, use instead multiprocess. However, Python is very slow passing
+ # data from/to multiple processes. Also, it may consume lots of memory
+ # if the data to be shared is not small. So, we need to group workload
+ # in chunks that are big enough to generate performance gains while
+ # not being so big that would cause out-of-memory.
+
+ num_refs = len(all_refs)
+ print(f"Number of references to parse: {num_refs}", file=sys.stderr)
+
+ if not max_workers:
+ max_workers = os.cpu_count()
+ elif max_workers > os.cpu_count():
+ max_workers = os.cpu_count()
+
+ max_workers = max(max_workers, 1)
+
+ max_chunk_size = int((num_refs + max_workers - 1) / max_workers)
+ chunk_size = min(chunk_size, max_chunk_size)
+ chunk_size = max(1, chunk_size)
+
+ if max_workers > 1:
+ executor = futures.ProcessPoolExecutor
+
+ # Place references in a random order. This may help improving
+ # performance, by mixing complex/simple expressions when creating
+ # chunks
+ shuffle(all_refs)
+ else:
+ # Python has a high overhead with processes. When there's just
+ # one worker, it is faster to not create a new process.
+ # Yet, User still deserves to have a progress print. So, use
+ # python's "thread", which is actually a single process, using
+ # an internal schedule to switch between tasks. No performance
+ # gains for non-IO tasks, but still it can be quickly interrupted
+ # from time to time to display progress.
+ executor = futures.ThreadPoolExecutor
+
+ not_found = []
+ f_list = []
+ with executor(max_workers=max_workers) as exe:
+ for refs in self.get_fileref(all_refs, chunk_size):
+ if refs:
+ try:
+ f_list.append(exe.submit(self.check_file, refs, found))
+
+ except KeyboardInterrupt:
+ return
+
+ total = len(f_list)
+
+ if not total:
+ if self.abi.re_string:
+ print(f"No ABI symbol matches {self.abi.search_string}")
+ else:
+ self.abi.log.warning("No ABI symbols found")
+ return
+
+ print(f"{len(f_list):6d} jobs queued on {max_workers} workers",
+ file=sys.stderr)
+
+ while f_list:
+ try:
+ t = futures.wait(f_list, timeout=1,
+ return_when=futures.FIRST_COMPLETED)
+
+ done = t[0]
+
+ for fut in done:
+ res_list = fut.result()
+
+ for res in res_list:
+ if not res["found"]:
+ not_found.append(res["fname"])
+ if res["msg"]:
+ print(res["msg"])
+
+ f_list.remove(fut)
+ except KeyboardInterrupt:
+ return
+
+ except RuntimeError as e:
+ self.abi.log.warning(f"Future: {e}")
+ break
+
+ if sys.stderr.isatty():
+ elapsed = str(datetime.now() - start).split(".", maxsplit=1)[0]
+ if len(f_list) < total:
+ elapsed += f" ({total - len(f_list)}/{total} jobs completed). "
+ if elapsed != old_elapsed:
+ print(elapsed + "\r", end="", flush=True,
+ file=sys.stderr)
+ old_elapsed = elapsed
+
+ elapsed = str(datetime.now() - start).split(".", maxsplit=1)[0]
+ print(elapsed, file=sys.stderr)
+
+ for f in sorted(not_found):
+ print(f"{f} not found.")
diff --git a/scripts/syscall.tbl b/scripts/syscall.tbl
index ebbdb3c42e9f..580b4e246aec 100644
--- a/scripts/syscall.tbl
+++ b/scripts/syscall.tbl
@@ -407,3 +407,4 @@
464 common getxattrat sys_getxattrat
465 common listxattrat sys_listxattrat
466 common removexattrat sys_removexattrat
+467 common open_tree_attr sys_open_tree_attr
diff --git a/security/Kconfig b/security/Kconfig
index f10dbf15c294..536061cf33a9 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -164,27 +164,6 @@ config LSM_MMAP_MIN_ADDR
this low address space will need the permission specific to the
systems running LSM.
-config HARDENED_USERCOPY
- bool "Harden memory copies between kernel and userspace"
- imply STRICT_DEVMEM
- help
- This option checks for obviously wrong memory regions when
- copying memory to/from the kernel (via copy_to_user() and
- copy_from_user() functions) by rejecting memory ranges that
- are larger than the specified heap object, span multiple
- separately allocated pages, are not on the process stack,
- or are part of the kernel text. This prevents entire classes
- of heap overflow exploits and similar kernel memory exposures.
-
-config FORTIFY_SOURCE
- bool "Harden common str/mem functions against buffer overflows"
- depends on ARCH_HAS_FORTIFY_SOURCE
- # https://github.com/llvm/llvm-project/issues/53645
- depends on !CC_IS_CLANG || !X86_32
- help
- Detect overflows of buffers in common string and memory functions
- where the compiler can determine and validate the buffer sizes.
-
config STATIC_USERMODEHELPER
bool "Force all usermode helper calls through a single binary"
help
diff --git a/security/Kconfig.hardening b/security/Kconfig.hardening
index b56e001e0c6a..c17366ce8224 100644
--- a/security/Kconfig.hardening
+++ b/security/Kconfig.hardening
@@ -280,6 +280,39 @@ config ZERO_CALL_USED_REGS
endmenu
+menu "Bounds checking"
+
+config FORTIFY_SOURCE
+ bool "Harden common str/mem functions against buffer overflows"
+ depends on ARCH_HAS_FORTIFY_SOURCE
+ # https://github.com/llvm/llvm-project/issues/53645
+ depends on !X86_32 || !CC_IS_CLANG || CLANG_VERSION >= 160000
+ help
+ Detect overflows of buffers in common string and memory functions
+ where the compiler can determine and validate the buffer sizes.
+
+config HARDENED_USERCOPY
+ bool "Harden memory copies between kernel and userspace"
+ imply STRICT_DEVMEM
+ help
+ This option checks for obviously wrong memory regions when
+ copying memory to/from the kernel (via copy_to_user() and
+ copy_from_user() functions) by rejecting memory ranges that
+ are larger than the specified heap object, span multiple
+ separately allocated pages, are not on the process stack,
+ or are part of the kernel text. This prevents entire classes
+ of heap overflow exploits and similar kernel memory exposures.
+
+config HARDENED_USERCOPY_DEFAULT_ON
+ bool "Harden memory copies by default"
+ depends on HARDENED_USERCOPY
+ default HARDENED_USERCOPY
+ help
+ This has the effect of setting "hardened_usercopy=on" on the kernel
+ command line. This can be disabled with "hardened_usercopy=off".
+
+endmenu
+
menu "Hardening of kernel data structures"
config LIST_HARDENED
diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c
index c07d150685d7..6039afae4bfc 100644
--- a/security/apparmor/apparmorfs.c
+++ b/security/apparmor/apparmorfs.c
@@ -1795,8 +1795,8 @@ fail2:
return error;
}
-static int ns_mkdir_op(struct mnt_idmap *idmap, struct inode *dir,
- struct dentry *dentry, umode_t mode)
+static struct dentry *ns_mkdir_op(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
struct aa_ns *ns, *parent;
/* TODO: improve permission check */
@@ -1808,7 +1808,7 @@ static int ns_mkdir_op(struct mnt_idmap *idmap, struct inode *dir,
AA_MAY_LOAD_POLICY);
end_current_label_crit_section(label);
if (error)
- return error;
+ return ERR_PTR(error);
parent = aa_get_ns(dir->i_private);
AA_BUG(d_inode(ns_subns_dir(parent)) != dir);
@@ -1843,7 +1843,7 @@ out:
mutex_unlock(&parent->lock);
aa_put_ns(parent);
- return error;
+ return ERR_PTR(error);
}
static int ns_rmdir_op(struct inode *dir, struct dentry *dentry)
diff --git a/security/keys/gc.c b/security/keys/gc.c
index 7d687b0962b1..f27223ea4578 100644
--- a/security/keys/gc.c
+++ b/security/keys/gc.c
@@ -218,8 +218,10 @@ continue_scanning:
key = rb_entry(cursor, struct key, serial_node);
cursor = rb_next(cursor);
- if (refcount_read(&key->usage) == 0)
+ if (test_bit(KEY_FLAG_FINAL_PUT, &key->flags)) {
+ smp_mb(); /* Clobber key->user after FINAL_PUT seen. */
goto found_unreferenced_key;
+ }
if (unlikely(gc_state & KEY_GC_REAPING_DEAD_1)) {
if (key->type == key_gc_dead_keytype) {
diff --git a/security/keys/key.c b/security/keys/key.c
index 3d7d185019d3..7198cd2ac3a3 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -658,6 +658,8 @@ void key_put(struct key *key)
key->user->qnbytes -= key->quotalen;
spin_unlock_irqrestore(&key->user->lock, flags);
}
+ smp_mb(); /* key->user before FINAL_PUT set. */
+ set_bit(KEY_FLAG_FINAL_PUT, &key->flags);
schedule_work(&key_gc_work);
}
}
diff --git a/security/landlock/fs.c b/security/landlock/fs.c
index 71b9dc331aae..582769ae830e 100644
--- a/security/landlock/fs.c
+++ b/security/landlock/fs.c
@@ -1216,7 +1216,7 @@ static void hook_inode_free_security_rcu(void *inode_security)
/*
* Release the inodes used in a security policy.
*
- * Cf. fsnotify_unmount_inodes() and invalidate_inodes()
+ * Cf. fsnotify_unmount_inodes() and evict_inodes()
*/
static void hook_sb_delete(struct super_block *const sb)
{
diff --git a/security/loadpin/Kconfig b/security/loadpin/Kconfig
index 848f8b4a6019..aef63d3e30df 100644
--- a/security/loadpin/Kconfig
+++ b/security/loadpin/Kconfig
@@ -16,7 +16,7 @@ config SECURITY_LOADPIN_ENFORCE
depends on SECURITY_LOADPIN
# Module compression breaks LoadPin unless modules are decompressed in
# the kernel.
- depends on !MODULES || (MODULE_COMPRESS_NONE || MODULE_DECOMPRESS)
+ depends on !MODULE_COMPRESS || MODULE_DECOMPRESS
help
If selected, LoadPin will enforce pinning at boot. If not
selected, it can be enabled at boot with the kernel parameter
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 7b867dfec88b..212cdead2b52 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -3395,6 +3395,9 @@ static int selinux_path_notify(const struct path *path, u64 mask,
case FSNOTIFY_OBJ_TYPE_INODE:
perm = FILE__WATCH;
break;
+ case FSNOTIFY_OBJ_TYPE_MNTNS:
+ perm = FILE__WATCH_MOUNTNS;
+ break;
default:
return -EINVAL;
}
diff --git a/security/selinux/include/classmap.h b/security/selinux/include/classmap.h
index 03e82477dce9..f9b5ca92a825 100644
--- a/security/selinux/include/classmap.h
+++ b/security/selinux/include/classmap.h
@@ -8,7 +8,7 @@
COMMON_FILE_SOCK_PERMS, "unlink", "link", "rename", "execute", \
"quotaon", "mounton", "audit_access", "open", "execmod", \
"watch", "watch_mount", "watch_sb", "watch_with_perm", \
- "watch_reads"
+ "watch_reads", "watch_mountns"
#define COMMON_SOCK_PERMS \
COMMON_FILE_SOCK_PERMS, "bind", "connect", "listen", "accept", \
diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c
index 1971710620c1..3d064dd4e03f 100644
--- a/security/yama/yama_lsm.c
+++ b/security/yama/yama_lsm.c
@@ -222,7 +222,7 @@ static int yama_task_prctl(int option, unsigned long arg2, unsigned long arg3,
unsigned long arg4, unsigned long arg5)
{
int rc = -ENOSYS;
- struct task_struct *myself = current;
+ struct task_struct *myself;
switch (option) {
case PR_SET_PTRACER:
@@ -232,11 +232,7 @@ static int yama_task_prctl(int option, unsigned long arg2, unsigned long arg3,
* leader checking is handled later when walking the ancestry
* at the time of PTRACE_ATTACH check.
*/
- rcu_read_lock();
- if (!thread_group_leader(myself))
- myself = rcu_dereference(myself->group_leader);
- get_task_struct(myself);
- rcu_read_unlock();
+ myself = current->group_leader;
if (arg2 == 0) {
yama_ptracer_del(NULL, myself);
@@ -255,7 +251,6 @@ static int yama_task_prctl(int option, unsigned long arg2, unsigned long arg3,
}
}
- put_task_struct(myself);
break;
}
diff --git a/tools/include/nolibc/Makefile b/tools/include/nolibc/Makefile
index a1f55fb24bb3..dceec0e1a135 100644
--- a/tools/include/nolibc/Makefile
+++ b/tools/include/nolibc/Makefile
@@ -29,6 +29,7 @@ all_files := \
compiler.h \
crt.h \
ctype.h \
+ dirent.h \
errno.h \
nolibc.h \
signal.h \
diff --git a/tools/include/nolibc/arch-mips.h b/tools/include/nolibc/arch-mips.h
index 1791a8ce58da..753a8ed2cf69 100644
--- a/tools/include/nolibc/arch-mips.h
+++ b/tools/include/nolibc/arch-mips.h
@@ -179,6 +179,7 @@
})
/* startup code, note that it's called __start on MIPS */
+void __start(void);
void __attribute__((weak, noreturn)) __nolibc_entrypoint __no_stack_protector __start(void)
{
__asm__ volatile (
diff --git a/tools/include/nolibc/arch-s390.h b/tools/include/nolibc/arch-s390.h
index f9ab83a219b8..df4c3cc713ac 100644
--- a/tools/include/nolibc/arch-s390.h
+++ b/tools/include/nolibc/arch-s390.h
@@ -5,8 +5,8 @@
#ifndef _NOLIBC_ARCH_S390_H
#define _NOLIBC_ARCH_S390_H
-#include <asm/signal.h>
-#include <asm/unistd.h>
+#include <linux/signal.h>
+#include <linux/unistd.h>
#include "compiler.h"
#include "crt.h"
@@ -143,8 +143,13 @@
void __attribute__((weak, noreturn)) __nolibc_entrypoint __no_stack_protector _start(void)
{
__asm__ volatile (
+#ifdef __s390x__
"lgr %r2, %r15\n" /* save stack pointer to %r2, as arg1 of _start_c */
"aghi %r15, -160\n" /* allocate new stackframe */
+#else
+ "lr %r2, %r15\n"
+ "ahi %r15, -96\n"
+#endif
"xc 0(8,%r15), 0(%r15)\n" /* clear backchain */
"brasl %r14, _start_c\n" /* transfer to c runtime */
);
diff --git a/tools/include/nolibc/arch.h b/tools/include/nolibc/arch.h
index c8f4e5d3add9..8a2c143c0fba 100644
--- a/tools/include/nolibc/arch.h
+++ b/tools/include/nolibc/arch.h
@@ -29,7 +29,7 @@
#include "arch-powerpc.h"
#elif defined(__riscv)
#include "arch-riscv.h"
-#elif defined(__s390x__)
+#elif defined(__s390x__) || defined(__s390__)
#include "arch-s390.h"
#elif defined(__loongarch__)
#include "arch-loongarch.h"
diff --git a/tools/include/nolibc/crt.h b/tools/include/nolibc/crt.h
index bbcd5fd09806..c4b10103bbec 100644
--- a/tools/include/nolibc/crt.h
+++ b/tools/include/nolibc/crt.h
@@ -10,6 +10,7 @@
char **environ __attribute__((weak));
const unsigned long *_auxv __attribute__((weak));
+void _start(void);
static void __stack_chk_init(void);
static void exit(int);
@@ -22,6 +23,7 @@ extern void (*const __init_array_end[])(int, char **, char**) __attribute__((wea
extern void (*const __fini_array_start[])(void) __attribute__((weak));
extern void (*const __fini_array_end[])(void) __attribute__((weak));
+void _start_c(long *sp);
__attribute__((weak,used))
void _start_c(long *sp)
{
diff --git a/tools/include/nolibc/dirent.h b/tools/include/nolibc/dirent.h
new file mode 100644
index 000000000000..c5c30d0dd680
--- /dev/null
+++ b/tools/include/nolibc/dirent.h
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: LGPL-2.1 OR MIT */
+/*
+ * Directory access for NOLIBC
+ * Copyright (C) 2025 Thomas Weißschuh <linux@weissschuh.net>
+ */
+
+#ifndef _NOLIBC_DIRENT_H
+#define _NOLIBC_DIRENT_H
+
+#include "stdint.h"
+#include "types.h"
+
+#include <linux/limits.h>
+
+struct dirent {
+ ino_t d_ino;
+ char d_name[NAME_MAX + 1];
+};
+
+/* See comment of FILE in stdio.h */
+typedef struct {
+ char dummy[1];
+} DIR;
+
+static __attribute__((unused))
+DIR *fdopendir(int fd)
+{
+ if (fd < 0) {
+ SET_ERRNO(EBADF);
+ return NULL;
+ }
+ return (DIR *)(intptr_t)~fd;
+}
+
+static __attribute__((unused))
+DIR *opendir(const char *name)
+{
+ int fd;
+
+ fd = open(name, O_RDONLY);
+ if (fd == -1)
+ return NULL;
+ return fdopendir(fd);
+}
+
+static __attribute__((unused))
+int closedir(DIR *dirp)
+{
+ intptr_t i = (intptr_t)dirp;
+
+ if (i >= 0) {
+ SET_ERRNO(EBADF);
+ return -1;
+ }
+ return close(~i);
+}
+
+static __attribute__((unused))
+int readdir_r(DIR *dirp, struct dirent *entry, struct dirent **result)
+{
+ char buf[sizeof(struct linux_dirent64) + NAME_MAX + 1];
+ struct linux_dirent64 *ldir = (void *)buf;
+ intptr_t i = (intptr_t)dirp;
+ int fd, ret;
+
+ if (i >= 0)
+ return EBADF;
+
+ fd = ~i;
+
+ ret = sys_getdents64(fd, ldir, sizeof(buf));
+ if (ret < 0)
+ return -ret;
+ if (ret == 0) {
+ *result = NULL;
+ return 0;
+ }
+
+ /*
+ * getdents64() returns as many entries as fit the buffer.
+ * readdir() can only return one entry at a time.
+ * Make sure the non-returned ones are not skipped.
+ */
+ ret = lseek(fd, ldir->d_off, SEEK_SET);
+ if (ret == -1)
+ return errno;
+
+ entry->d_ino = ldir->d_ino;
+ /* the destination should always be big enough */
+ strlcpy(entry->d_name, ldir->d_name, sizeof(entry->d_name));
+ *result = entry;
+ return 0;
+}
+
+/* make sure to include all global symbols */
+#include "nolibc.h"
+
+#endif /* _NOLIBC_DIRENT_H */
diff --git a/tools/include/nolibc/errno.h b/tools/include/nolibc/errno.h
index a44486ff0477..1d8d8033e8ff 100644
--- a/tools/include/nolibc/errno.h
+++ b/tools/include/nolibc/errno.h
@@ -7,7 +7,7 @@
#ifndef _NOLIBC_ERRNO_H
#define _NOLIBC_ERRNO_H
-#include <asm/errno.h>
+#include <linux/errno.h>
#ifndef NOLIBC_IGNORE_ERRNO
#define SET_ERRNO(v) do { errno = (v); } while (0)
diff --git a/tools/include/nolibc/nolibc.h b/tools/include/nolibc/nolibc.h
index 92436b1e4441..70872401aca8 100644
--- a/tools/include/nolibc/nolibc.h
+++ b/tools/include/nolibc/nolibc.h
@@ -31,8 +31,7 @@
* - The third level is the libc call definition. It exposes the lower raw
* sys_<name>() calls in a way that looks like what a libc usually does,
* takes care of specific input values, and of setting errno upon error.
- * There can be minor variations compared to standard libc calls. For
- * example the open() call always takes 3 args here.
+ * There can be minor variations compared to standard libc calls.
*
* The errno variable is declared static and unused. This way it can be
* optimized away if not used. However this means that a program made of
@@ -105,6 +104,7 @@
#include "string.h"
#include "time.h"
#include "stackprotector.h"
+#include "dirent.h"
/* Used by programs to avoid std includes */
#define NOLIBC
diff --git a/tools/include/nolibc/signal.h b/tools/include/nolibc/signal.h
index 137552216e46..cdcc5904c51e 100644
--- a/tools/include/nolibc/signal.h
+++ b/tools/include/nolibc/signal.h
@@ -13,6 +13,7 @@
#include "sys.h"
/* This one is not marked static as it's needed by libgcc for divide by zero */
+int raise(int signal);
__attribute__((weak,unused,section(".text.nolibc_raise")))
int raise(int signal)
{
diff --git a/tools/include/nolibc/stackprotector.h b/tools/include/nolibc/stackprotector.h
index 1d0d5259ec41..c71a2c257177 100644
--- a/tools/include/nolibc/stackprotector.h
+++ b/tools/include/nolibc/stackprotector.h
@@ -18,6 +18,7 @@
* triggering stack protector errors themselves
*/
+void __stack_chk_fail(void);
__attribute__((weak,used,noreturn,section(".text.nolibc_stack_chk")))
void __stack_chk_fail(void)
{
@@ -28,6 +29,7 @@ void __stack_chk_fail(void)
for (;;);
}
+void __stack_chk_fail_local(void);
__attribute__((weak,noreturn,section(".text.nolibc_stack_chk")))
void __stack_chk_fail_local(void)
{
diff --git a/tools/include/nolibc/stdio.h b/tools/include/nolibc/stdio.h
index 3892034198dd..a403351dbf60 100644
--- a/tools/include/nolibc/stdio.h
+++ b/tools/include/nolibc/stdio.h
@@ -350,6 +350,104 @@ int printf(const char *fmt, ...)
}
static __attribute__((unused))
+int vsscanf(const char *str, const char *format, va_list args)
+{
+ uintmax_t uval;
+ intmax_t ival;
+ int base;
+ char *endptr;
+ int matches;
+ int lpref;
+
+ matches = 0;
+
+ while (1) {
+ if (*format == '%') {
+ /* start of pattern */
+ lpref = 0;
+ format++;
+
+ if (*format == 'l') {
+ /* same as in printf() */
+ lpref = 1;
+ format++;
+ if (*format == 'l') {
+ lpref = 2;
+ format++;
+ }
+ }
+
+ if (*format == '%') {
+ /* literal % */
+ if ('%' != *str)
+ goto done;
+ str++;
+ format++;
+ continue;
+ } else if (*format == 'd') {
+ ival = strtoll(str, &endptr, 10);
+ if (lpref == 0)
+ *va_arg(args, int *) = ival;
+ else if (lpref == 1)
+ *va_arg(args, long *) = ival;
+ else if (lpref == 2)
+ *va_arg(args, long long *) = ival;
+ } else if (*format == 'u' || *format == 'x' || *format == 'X') {
+ base = *format == 'u' ? 10 : 16;
+ uval = strtoull(str, &endptr, base);
+ if (lpref == 0)
+ *va_arg(args, unsigned int *) = uval;
+ else if (lpref == 1)
+ *va_arg(args, unsigned long *) = uval;
+ else if (lpref == 2)
+ *va_arg(args, unsigned long long *) = uval;
+ } else if (*format == 'p') {
+ *va_arg(args, void **) = (void *)strtoul(str, &endptr, 16);
+ } else {
+ SET_ERRNO(EILSEQ);
+ goto done;
+ }
+
+ format++;
+ str = endptr;
+ matches++;
+
+ } else if (*format == '\0') {
+ goto done;
+ } else if (isspace(*format)) {
+ /* skip spaces in format and str */
+ while (isspace(*format))
+ format++;
+ while (isspace(*str))
+ str++;
+ } else if (*format == *str) {
+ /* literal match */
+ format++;
+ str++;
+ } else {
+ if (!matches)
+ matches = EOF;
+ goto done;
+ }
+ }
+
+done:
+ return matches;
+}
+
+static __attribute__((unused, format(scanf, 2, 3)))
+int sscanf(const char *str, const char *format, ...)
+{
+ va_list args;
+ int ret;
+
+ va_start(args, format);
+ ret = vsscanf(str, format, args);
+ va_end(args);
+ return ret;
+}
+
+static __attribute__((unused))
void perror(const char *msg)
{
fprintf(stderr, "%s%serrno=%d\n", (msg && *msg) ? msg : "", (msg && *msg) ? ": " : "", errno);
diff --git a/tools/include/nolibc/stdlib.h b/tools/include/nolibc/stdlib.h
index 75aa273c23a6..86ad378ab1ea 100644
--- a/tools/include/nolibc/stdlib.h
+++ b/tools/include/nolibc/stdlib.h
@@ -30,6 +30,7 @@ static __attribute__((unused)) char itoa_buffer[21];
*/
/* must be exported, as it's used by libgcc for various divide functions */
+void abort(void);
__attribute__((weak,unused,noreturn,section(".text.nolibc_abort")))
void abort(void)
{
diff --git a/tools/include/nolibc/string.h b/tools/include/nolibc/string.h
index 9ec9c24f38c0..ba84ab700e30 100644
--- a/tools/include/nolibc/string.h
+++ b/tools/include/nolibc/string.h
@@ -32,6 +32,7 @@ int memcmp(const void *s1, const void *s2, size_t n)
/* might be ignored by the compiler without -ffreestanding, then found as
* missing.
*/
+void *memmove(void *dst, const void *src, size_t len);
__attribute__((weak,unused,section(".text.nolibc_memmove")))
void *memmove(void *dst, const void *src, size_t len)
{
@@ -56,6 +57,7 @@ void *memmove(void *dst, const void *src, size_t len)
#ifndef NOLIBC_ARCH_HAS_MEMCPY
/* must be exported, as it's used by libgcc on ARM */
+void *memcpy(void *dst, const void *src, size_t len);
__attribute__((weak,unused,section(".text.nolibc_memcpy")))
void *memcpy(void *dst, const void *src, size_t len)
{
@@ -73,6 +75,7 @@ void *memcpy(void *dst, const void *src, size_t len)
/* might be ignored by the compiler without -ffreestanding, then found as
* missing.
*/
+void *memset(void *dst, int b, size_t len);
__attribute__((weak,unused,section(".text.nolibc_memset")))
void *memset(void *dst, int b, size_t len)
{
@@ -124,6 +127,7 @@ char *strcpy(char *dst, const char *src)
* thus itself, hence the asm() statement below that's meant to disable this
* confusing practice.
*/
+size_t strlen(const char *str);
__attribute__((weak,unused,section(".text.nolibc_strlen")))
size_t strlen(const char *str)
{
diff --git a/tools/include/nolibc/sys.h b/tools/include/nolibc/sys.h
index d4a5c2399a66..08c1c074bec8 100644
--- a/tools/include/nolibc/sys.h
+++ b/tools/include/nolibc/sys.h
@@ -10,10 +10,10 @@
#include "std.h"
/* system includes */
-#include <asm/unistd.h>
-#include <asm/signal.h> /* for SIGCHLD */
-#include <asm/ioctls.h>
-#include <asm/mman.h>
+#include <linux/unistd.h>
+#include <linux/signal.h> /* for SIGCHLD */
+#include <linux/termios.h>
+#include <linux/mman.h>
#include <linux/fs.h>
#include <linux/loop.h>
#include <linux/time.h>
@@ -23,7 +23,6 @@
#include <linux/prctl.h>
#include <linux/resource.h>
#include <linux/utsname.h>
-#include <linux/signal.h>
#include "arch.h"
#include "errno.h"
@@ -532,20 +531,16 @@ uid_t getuid(void)
/*
- * int ioctl(int fd, unsigned long req, void *value);
+ * int ioctl(int fd, unsigned long cmd, ... arg);
*/
static __attribute__((unused))
-int sys_ioctl(int fd, unsigned long req, void *value)
+long sys_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg)
{
- return my_syscall3(__NR_ioctl, fd, req, value);
+ return my_syscall3(__NR_ioctl, fd, cmd, arg);
}
-static __attribute__((unused))
-int ioctl(int fd, unsigned long req, void *value)
-{
- return __sysret(sys_ioctl(fd, req, value));
-}
+#define ioctl(fd, cmd, arg) __sysret(sys_ioctl(fd, cmd, (unsigned long)(arg)))
/*
* int kill(pid_t pid, int signal);
@@ -602,9 +597,36 @@ off_t sys_lseek(int fd, off_t offset, int whence)
}
static __attribute__((unused))
+int sys_llseek(int fd, unsigned long offset_high, unsigned long offset_low,
+ __kernel_loff_t *result, int whence)
+{
+#ifdef __NR_llseek
+ return my_syscall5(__NR_llseek, fd, offset_high, offset_low, result, whence);
+#else
+ return __nolibc_enosys(__func__, fd, offset_high, offset_low, result, whence);
+#endif
+}
+
+static __attribute__((unused))
off_t lseek(int fd, off_t offset, int whence)
{
- return __sysret(sys_lseek(fd, offset, whence));
+ __kernel_loff_t loff = 0;
+ off_t result;
+ int ret;
+
+ result = sys_lseek(fd, offset, whence);
+ if (result == -ENOSYS) {
+ /* Only exists on 32bit where nolibc off_t is also 32bit */
+ ret = sys_llseek(fd, 0, offset, &loff, whence);
+ if (ret < 0)
+ result = ret;
+ else if (loff != (off_t)loff)
+ result = -EOVERFLOW;
+ else
+ result = loff;
+ }
+
+ return __sysret(result);
}
@@ -742,6 +764,31 @@ int mount(const char *src, const char *tgt,
return __sysret(sys_mount(src, tgt, fst, flags, data));
}
+/*
+ * int openat(int dirfd, const char *path, int flags[, mode_t mode]);
+ */
+
+static __attribute__((unused))
+int sys_openat(int dirfd, const char *path, int flags, mode_t mode)
+{
+ return my_syscall4(__NR_openat, dirfd, path, flags, mode);
+}
+
+static __attribute__((unused))
+int openat(int dirfd, const char *path, int flags, ...)
+{
+ mode_t mode = 0;
+
+ if (flags & O_CREAT) {
+ va_list args;
+
+ va_start(args, flags);
+ mode = va_arg(args, mode_t);
+ va_end(args);
+ }
+
+ return __sysret(sys_openat(dirfd, path, flags, mode));
+}
/*
* int open(const char *path, int flags[, mode_t mode]);
@@ -750,13 +797,7 @@ int mount(const char *src, const char *tgt,
static __attribute__((unused))
int sys_open(const char *path, int flags, mode_t mode)
{
-#ifdef __NR_openat
return my_syscall4(__NR_openat, AT_FDCWD, path, flags, mode);
-#elif defined(__NR_open)
- return my_syscall3(__NR_open, path, flags, mode);
-#else
- return __nolibc_enosys(__func__, path, flags, mode);
-#endif
}
static __attribute__((unused))
@@ -768,7 +809,7 @@ int open(const char *path, int flags, ...)
va_list args;
va_start(args, flags);
- mode = va_arg(args, int);
+ mode = va_arg(args, mode_t);
va_end(args);
}
diff --git a/tools/memory-model/Documentation/glossary.txt b/tools/memory-model/Documentation/glossary.txt
index 6f3d16dbf467..7ead94bffa4e 100644
--- a/tools/memory-model/Documentation/glossary.txt
+++ b/tools/memory-model/Documentation/glossary.txt
@@ -15,14 +15,14 @@ Address Dependency: When the address of a later memory access is computed
3 do_something(p->a);
4 rcu_read_unlock();
- In this case, because the address of "p->a" on line 3 is computed
- from the value returned by the rcu_dereference() on line 2, the
- address dependency extends from that rcu_dereference() to that
- "p->a". In rare cases, optimizing compilers can destroy address
- dependencies. Please see Documentation/RCU/rcu_dereference.rst
- for more information.
+ In this case, because the address of "p->a" on line 3 is computed
+ from the value returned by the rcu_dereference() on line 2, the
+ address dependency extends from that rcu_dereference() to that
+ "p->a". In rare cases, optimizing compilers can destroy address
+ dependencies. Please see Documentation/RCU/rcu_dereference.rst
+ for more information.
- See also "Control Dependency" and "Data Dependency".
+ See also "Control Dependency" and "Data Dependency".
Acquire: With respect to a lock, acquiring that lock, for example,
using spin_lock(). With respect to a non-lock shared variable,
@@ -59,12 +59,12 @@ Control Dependency: When a later store's execution depends on a test
1 if (READ_ONCE(x))
2 WRITE_ONCE(y, 1);
- Here, the control dependency extends from the READ_ONCE() on
- line 1 to the WRITE_ONCE() on line 2. Control dependencies are
- fragile, and can be easily destroyed by optimizing compilers.
- Please see control-dependencies.txt for more information.
+ Here, the control dependency extends from the READ_ONCE() on
+ line 1 to the WRITE_ONCE() on line 2. Control dependencies are
+ fragile, and can be easily destroyed by optimizing compilers.
+ Please see control-dependencies.txt for more information.
- See also "Address Dependency" and "Data Dependency".
+ See also "Address Dependency" and "Data Dependency".
Cycle: Memory-barrier pairing is restricted to a pair of CPUs, as the
name suggests. And in a great many cases, a pair of CPUs is all
@@ -72,10 +72,10 @@ Cycle: Memory-barrier pairing is restricted to a pair of CPUs, as the
extended to additional CPUs, and the result is called a "cycle".
In a cycle, each CPU's ordering interacts with that of the next:
- CPU 0 CPU 1 CPU 2
- WRITE_ONCE(x, 1); WRITE_ONCE(y, 1); WRITE_ONCE(z, 1);
- smp_mb(); smp_mb(); smp_mb();
- r0 = READ_ONCE(y); r1 = READ_ONCE(z); r2 = READ_ONCE(x);
+ CPU 0 CPU 1 CPU 2
+ WRITE_ONCE(x, 1); WRITE_ONCE(y, 1); WRITE_ONCE(z, 1);
+ smp_mb(); smp_mb(); smp_mb();
+ r0 = READ_ONCE(y); r1 = READ_ONCE(z); r2 = READ_ONCE(x);
CPU 0's smp_mb() interacts with that of CPU 1, which interacts
with that of CPU 2, which in turn interacts with that of CPU 0
diff --git a/tools/memory-model/Documentation/herd-representation.txt b/tools/memory-model/Documentation/herd-representation.txt
index ed988906f2b7..4e19b4f2a476 100644
--- a/tools/memory-model/Documentation/herd-representation.txt
+++ b/tools/memory-model/Documentation/herd-representation.txt
@@ -18,6 +18,11 @@
#
# By convention, a blank line in a cell means "same as the preceding line".
#
+# Note that the syntactic representation does not always match the sets and
+# relations in linux-kernel.cat, due to redefinitions in linux-kernel.bell and
+# lock.cat. For example, the po link between LKR and LKW is upgraded to an rmw
+# link, and W[ACQUIRE] are not included in the Acquire set.
+#
# Disclaimer. The table includes representations of "add" and "and" operations;
# corresponding/identical representations of "sub", "inc", "dec" and "or", "xor",
# "andnot" operations are omitted.
@@ -27,16 +32,16 @@
------------------------------------------------------------------------------
| Non-RMW ops | |
------------------------------------------------------------------------------
- | READ_ONCE | R[once] |
+ | READ_ONCE | R[ONCE] |
| atomic_read | |
- | WRITE_ONCE | W[once] |
+ | WRITE_ONCE | W[ONCE] |
| atomic_set | |
- | smp_load_acquire | R[acquire] |
+ | smp_load_acquire | R[ACQUIRE] |
| atomic_read_acquire | |
- | smp_store_release | W[release] |
+ | smp_store_release | W[RELEASE] |
| atomic_set_release | |
- | smp_store_mb | W[once] ->po F[mb] |
- | smp_mb | F[mb] |
+ | smp_store_mb | W[ONCE] ->po F[MB] |
+ | smp_mb | F[MB] |
| smp_rmb | F[rmb] |
| smp_wmb | F[wmb] |
| smp_mb__before_atomic | F[before-atomic] |
@@ -49,8 +54,8 @@
| rcu_read_lock | F[rcu-lock] |
| rcu_read_unlock | F[rcu-unlock] |
| synchronize_rcu | F[sync-rcu] |
- | rcu_dereference | R[once] |
- | rcu_assign_pointer | W[release] |
+ | rcu_dereference | R[ONCE] |
+ | rcu_assign_pointer | W[RELEASE] |
| srcu_read_lock | R[srcu-lock] |
| srcu_down_read | |
| srcu_read_unlock | W[srcu-unlock] |
@@ -60,32 +65,31 @@
------------------------------------------------------------------------------
| RMW ops w/o return value | |
------------------------------------------------------------------------------
- | atomic_add | R*[noreturn] ->rmw W*[once] |
+ | atomic_add | R*[NORETURN] ->rmw W*[NORETURN] |
| atomic_and | |
| spin_lock | LKR ->po LKW |
------------------------------------------------------------------------------
| RMW ops w/ return value | |
------------------------------------------------------------------------------
- | atomic_add_return | F[mb] ->po R*[once] |
- | | ->rmw W*[once] ->po F[mb] |
+ | atomic_add_return | R*[MB] ->rmw W*[MB] |
| atomic_fetch_add | |
| atomic_fetch_and | |
| atomic_xchg | |
| xchg | |
| atomic_add_negative | |
- | atomic_add_return_relaxed | R*[once] ->rmw W*[once] |
+ | atomic_add_return_relaxed | R*[ONCE] ->rmw W*[ONCE] |
| atomic_fetch_add_relaxed | |
| atomic_fetch_and_relaxed | |
| atomic_xchg_relaxed | |
| xchg_relaxed | |
| atomic_add_negative_relaxed | |
- | atomic_add_return_acquire | R*[acquire] ->rmw W*[once] |
+ | atomic_add_return_acquire | R*[ACQUIRE] ->rmw W*[ACQUIRE] |
| atomic_fetch_add_acquire | |
| atomic_fetch_and_acquire | |
| atomic_xchg_acquire | |
| xchg_acquire | |
| atomic_add_negative_acquire | |
- | atomic_add_return_release | R*[once] ->rmw W*[release] |
+ | atomic_add_return_release | R*[RELEASE] ->rmw W*[RELEASE] |
| atomic_fetch_add_release | |
| atomic_fetch_and_release | |
| atomic_xchg_release | |
@@ -94,17 +98,16 @@
------------------------------------------------------------------------------
| Conditional RMW ops | |
------------------------------------------------------------------------------
- | atomic_cmpxchg | On success: F[mb] ->po R*[once] |
- | | ->rmw W*[once] ->po F[mb] |
- | | On failure: R*[once] |
+ | atomic_cmpxchg | On success: R*[MB] ->rmw W*[MB] |
+ | | On failure: R*[MB] |
| cmpxchg | |
| atomic_add_unless | |
- | atomic_cmpxchg_relaxed | On success: R*[once] ->rmw W*[once] |
- | | On failure: R*[once] |
- | atomic_cmpxchg_acquire | On success: R*[acquire] ->rmw W*[once] |
- | | On failure: R*[once] |
- | atomic_cmpxchg_release | On success: R*[once] ->rmw W*[release] |
- | | On failure: R*[once] |
+ | atomic_cmpxchg_relaxed | On success: R*[ONCE] ->rmw W*[ONCE] |
+ | | On failure: R*[ONCE] |
+ | atomic_cmpxchg_acquire | On success: R*[ACQUIRE] ->rmw W*[ACQUIRE] |
+ | | On failure: R*[ACQUIRE] |
+ | atomic_cmpxchg_release | On success: R*[RELEASE] ->rmw W*[RELEASE] |
+ | | On failure: R*[RELEASE] |
| spin_trylock | On success: LKR ->po LKW |
| | On failure: LF |
------------------------------------------------------------------------------
diff --git a/tools/memory-model/README b/tools/memory-model/README
index dab38904206a..64c860863aa9 100644
--- a/tools/memory-model/README
+++ b/tools/memory-model/README
@@ -20,7 +20,7 @@ that litmus test to be exercised within the Linux kernel.
REQUIREMENTS
============
-Version 7.52 or higher of the "herd7" and "klitmus7" tools must be
+Version 7.58 or higher of the "herd7" and "klitmus7" tools must be
downloaded separately:
https://github.com/herd/herdtools7
@@ -79,7 +79,7 @@ Several thousand more example litmus tests are available here:
https://git.kernel.org/pub/scm/linux/kernel/git/paulmck/perfbook.git/tree/CodeSamples/formal/herd
https://git.kernel.org/pub/scm/linux/kernel/git/paulmck/perfbook.git/tree/CodeSamples/formal/litmus
-Documentation describing litmus tests and now to use them may be found
+Documentation describing litmus tests and how to use them may be found
here:
tools/memory-model/Documentation/litmus-tests.txt
diff --git a/tools/memory-model/linux-kernel.bell b/tools/memory-model/linux-kernel.bell
index ce068700939c..fe65998002b9 100644
--- a/tools/memory-model/linux-kernel.bell
+++ b/tools/memory-model/linux-kernel.bell
@@ -13,17 +13,18 @@
"Linux-kernel memory consistency model"
-enum Accesses = 'once (*READ_ONCE,WRITE_ONCE*) ||
- 'release (*smp_store_release*) ||
- 'acquire (*smp_load_acquire*) ||
- 'noreturn (* R of non-return RMW *)
-instructions R[{'once,'acquire,'noreturn}]
-instructions W[{'once,'release}]
-instructions RMW[{'once,'acquire,'release}]
+enum Accesses = 'ONCE (*READ_ONCE,WRITE_ONCE*) ||
+ 'RELEASE (*smp_store_release*) ||
+ 'ACQUIRE (*smp_load_acquire*) ||
+ 'NORETURN (* R of non-return RMW *) ||
+ 'MB (*xchg(),cmpxchg(),...*)
+instructions R[Accesses]
+instructions W[Accesses]
+instructions RMW[Accesses]
enum Barriers = 'wmb (*smp_wmb*) ||
'rmb (*smp_rmb*) ||
- 'mb (*smp_mb*) ||
+ 'MB (*smp_mb*) ||
'barrier (*barrier*) ||
'rcu-lock (*rcu_read_lock*) ||
'rcu-unlock (*rcu_read_unlock*) ||
@@ -35,6 +36,17 @@ enum Barriers = 'wmb (*smp_wmb*) ||
'after-srcu-read-unlock (*smp_mb__after_srcu_read_unlock*)
instructions F[Barriers]
+
+(*
+ * Filter out syntactic annotations that do not provide the corresponding
+ * semantic ordering, such as Acquire on a store or Mb on a failed RMW.
+ *)
+let FailedRMW = RMW \ (domain(rmw) | range(rmw))
+let Acquire = ACQUIRE \ W \ FailedRMW
+let Release = RELEASE \ R \ FailedRMW
+let Mb = MB \ FailedRMW
+let Noreturn = NORETURN \ W
+
(* SRCU *)
enum SRCU = 'srcu-lock || 'srcu-unlock || 'sync-srcu
instructions SRCU[SRCU]
@@ -73,7 +85,7 @@ flag ~empty rcu-rscs & (po ; [Sync-srcu] ; po) as invalid-sleep
flag ~empty different-values(srcu-rscs) as srcu-bad-value-match
(* Compute marked and plain memory accesses *)
-let Marked = (~M) | IW | Once | Release | Acquire | domain(rmw) | range(rmw) |
+let Marked = (~M) | IW | ONCE | RELEASE | ACQUIRE | MB | RMW |
LKR | LKW | UL | LF | RL | RU | Srcu-lock | Srcu-unlock
let Plain = M \ Marked
@@ -82,3 +94,6 @@ let carry-dep = (data ; [~ Srcu-unlock] ; rfi)*
let addr = carry-dep ; addr
let ctrl = carry-dep ; ctrl
let data = carry-dep ; data
+
+flag ~empty (if "lkmmv2" then 0 else _)
+ as this-model-requires-variant-higher-than-lkmmv1
diff --git a/tools/memory-model/linux-kernel.cat b/tools/memory-model/linux-kernel.cat
index adf3c4f41229..d7e7bf13c831 100644
--- a/tools/memory-model/linux-kernel.cat
+++ b/tools/memory-model/linux-kernel.cat
@@ -34,6 +34,16 @@ let R4rmb = R \ Noreturn (* Reads for which rmb works *)
let rmb = [R4rmb] ; fencerel(Rmb) ; [R4rmb]
let wmb = [W] ; fencerel(Wmb) ; [W]
let mb = ([M] ; fencerel(Mb) ; [M]) |
+ (*
+ * full-barrier RMWs (successful cmpxchg(), xchg(), etc.) act as
+ * though there were enclosed by smp_mb().
+ * The effect of these virtual smp_mb() is formalized by adding
+ * Mb tags to the read and write of the operation, and providing
+ * the same ordering as though there were additional po edges
+ * between the Mb tag and the read resp. write.
+ *)
+ ([M] ; po ; [Mb & R]) |
+ ([Mb & W] ; po ; [M]) |
([M] ; fencerel(Before-atomic) ; [RMW] ; po? ; [M]) |
([M] ; po? ; [RMW] ; fencerel(After-atomic) ; [M]) |
([M] ; po? ; [LKW] ; fencerel(After-spinlock) ; [M]) |
diff --git a/tools/memory-model/linux-kernel.cfg b/tools/memory-model/linux-kernel.cfg
index 3c8098e99f41..69b04f3aad73 100644
--- a/tools/memory-model/linux-kernel.cfg
+++ b/tools/memory-model/linux-kernel.cfg
@@ -1,6 +1,7 @@
macros linux-kernel.def
bell linux-kernel.bell
model linux-kernel.cat
+variant lkmmv2
graph columns
squished true
showevents noregs
diff --git a/tools/memory-model/linux-kernel.def b/tools/memory-model/linux-kernel.def
index 88a39601f525..49e402782e49 100644
--- a/tools/memory-model/linux-kernel.def
+++ b/tools/memory-model/linux-kernel.def
@@ -6,18 +6,18 @@
// which appeared in ASPLOS 2018.
// ONCE
-READ_ONCE(X) __load{once}(X)
-WRITE_ONCE(X,V) { __store{once}(X,V); }
+READ_ONCE(X) __load{ONCE}(X)
+WRITE_ONCE(X,V) { __store{ONCE}(X,V); }
// Release Acquire and friends
-smp_store_release(X,V) { __store{release}(*X,V); }
-smp_load_acquire(X) __load{acquire}(*X)
-rcu_assign_pointer(X,V) { __store{release}(X,V); }
-rcu_dereference(X) __load{once}(X)
-smp_store_mb(X,V) { __store{once}(X,V); __fence{mb}; }
+smp_store_release(X,V) { __store{RELEASE}(*X,V); }
+smp_load_acquire(X) __load{ACQUIRE}(*X)
+rcu_assign_pointer(X,V) { __store{RELEASE}(X,V); }
+rcu_dereference(X) __load{ONCE}(X)
+smp_store_mb(X,V) { __store{ONCE}(X,V); __fence{MB}; }
// Fences
-smp_mb() { __fence{mb}; }
+smp_mb() { __fence{MB}; }
smp_rmb() { __fence{rmb}; }
smp_wmb() { __fence{wmb}; }
smp_mb__before_atomic() { __fence{before-atomic}; }
@@ -28,14 +28,14 @@ smp_mb__after_srcu_read_unlock() { __fence{after-srcu-read-unlock}; }
barrier() { __fence{barrier}; }
// Exchange
-xchg(X,V) __xchg{mb}(X,V)
-xchg_relaxed(X,V) __xchg{once}(X,V)
-xchg_release(X,V) __xchg{release}(X,V)
-xchg_acquire(X,V) __xchg{acquire}(X,V)
-cmpxchg(X,V,W) __cmpxchg{mb}(X,V,W)
-cmpxchg_relaxed(X,V,W) __cmpxchg{once}(X,V,W)
-cmpxchg_acquire(X,V,W) __cmpxchg{acquire}(X,V,W)
-cmpxchg_release(X,V,W) __cmpxchg{release}(X,V,W)
+xchg(X,V) __xchg{MB}(X,V)
+xchg_relaxed(X,V) __xchg{ONCE}(X,V)
+xchg_release(X,V) __xchg{RELEASE}(X,V)
+xchg_acquire(X,V) __xchg{ACQUIRE}(X,V)
+cmpxchg(X,V,W) __cmpxchg{MB}(X,V,W)
+cmpxchg_relaxed(X,V,W) __cmpxchg{ONCE}(X,V,W)
+cmpxchg_acquire(X,V,W) __cmpxchg{ACQUIRE}(X,V,W)
+cmpxchg_release(X,V,W) __cmpxchg{RELEASE}(X,V,W)
// Spinlocks
spin_lock(X) { __lock(X); }
@@ -63,57 +63,86 @@ atomic_set(X,V) { WRITE_ONCE(*X,V); }
atomic_read_acquire(X) smp_load_acquire(X)
atomic_set_release(X,V) { smp_store_release(X,V); }
-atomic_add(V,X) { __atomic_op(X,+,V); }
-atomic_sub(V,X) { __atomic_op(X,-,V); }
-atomic_inc(X) { __atomic_op(X,+,1); }
-atomic_dec(X) { __atomic_op(X,-,1); }
-
-atomic_add_return(V,X) __atomic_op_return{mb}(X,+,V)
-atomic_add_return_relaxed(V,X) __atomic_op_return{once}(X,+,V)
-atomic_add_return_acquire(V,X) __atomic_op_return{acquire}(X,+,V)
-atomic_add_return_release(V,X) __atomic_op_return{release}(X,+,V)
-atomic_fetch_add(V,X) __atomic_fetch_op{mb}(X,+,V)
-atomic_fetch_add_relaxed(V,X) __atomic_fetch_op{once}(X,+,V)
-atomic_fetch_add_acquire(V,X) __atomic_fetch_op{acquire}(X,+,V)
-atomic_fetch_add_release(V,X) __atomic_fetch_op{release}(X,+,V)
-
-atomic_inc_return(X) __atomic_op_return{mb}(X,+,1)
-atomic_inc_return_relaxed(X) __atomic_op_return{once}(X,+,1)
-atomic_inc_return_acquire(X) __atomic_op_return{acquire}(X,+,1)
-atomic_inc_return_release(X) __atomic_op_return{release}(X,+,1)
-atomic_fetch_inc(X) __atomic_fetch_op{mb}(X,+,1)
-atomic_fetch_inc_relaxed(X) __atomic_fetch_op{once}(X,+,1)
-atomic_fetch_inc_acquire(X) __atomic_fetch_op{acquire}(X,+,1)
-atomic_fetch_inc_release(X) __atomic_fetch_op{release}(X,+,1)
-
-atomic_sub_return(V,X) __atomic_op_return{mb}(X,-,V)
-atomic_sub_return_relaxed(V,X) __atomic_op_return{once}(X,-,V)
-atomic_sub_return_acquire(V,X) __atomic_op_return{acquire}(X,-,V)
-atomic_sub_return_release(V,X) __atomic_op_return{release}(X,-,V)
-atomic_fetch_sub(V,X) __atomic_fetch_op{mb}(X,-,V)
-atomic_fetch_sub_relaxed(V,X) __atomic_fetch_op{once}(X,-,V)
-atomic_fetch_sub_acquire(V,X) __atomic_fetch_op{acquire}(X,-,V)
-atomic_fetch_sub_release(V,X) __atomic_fetch_op{release}(X,-,V)
-
-atomic_dec_return(X) __atomic_op_return{mb}(X,-,1)
-atomic_dec_return_relaxed(X) __atomic_op_return{once}(X,-,1)
-atomic_dec_return_acquire(X) __atomic_op_return{acquire}(X,-,1)
-atomic_dec_return_release(X) __atomic_op_return{release}(X,-,1)
-atomic_fetch_dec(X) __atomic_fetch_op{mb}(X,-,1)
-atomic_fetch_dec_relaxed(X) __atomic_fetch_op{once}(X,-,1)
-atomic_fetch_dec_acquire(X) __atomic_fetch_op{acquire}(X,-,1)
-atomic_fetch_dec_release(X) __atomic_fetch_op{release}(X,-,1)
-
-atomic_xchg(X,V) __xchg{mb}(X,V)
-atomic_xchg_relaxed(X,V) __xchg{once}(X,V)
-atomic_xchg_release(X,V) __xchg{release}(X,V)
-atomic_xchg_acquire(X,V) __xchg{acquire}(X,V)
-atomic_cmpxchg(X,V,W) __cmpxchg{mb}(X,V,W)
-atomic_cmpxchg_relaxed(X,V,W) __cmpxchg{once}(X,V,W)
-atomic_cmpxchg_acquire(X,V,W) __cmpxchg{acquire}(X,V,W)
-atomic_cmpxchg_release(X,V,W) __cmpxchg{release}(X,V,W)
-
-atomic_sub_and_test(V,X) __atomic_op_return{mb}(X,-,V) == 0
-atomic_dec_and_test(X) __atomic_op_return{mb}(X,-,1) == 0
-atomic_inc_and_test(X) __atomic_op_return{mb}(X,+,1) == 0
-atomic_add_negative(V,X) __atomic_op_return{mb}(X,+,V) < 0
+atomic_add(V,X) { __atomic_op{NORETURN}(X,+,V); }
+atomic_sub(V,X) { __atomic_op{NORETURN}(X,-,V); }
+atomic_and(V,X) { __atomic_op{NORETURN}(X,&,V); }
+atomic_or(V,X) { __atomic_op{NORETURN}(X,|,V); }
+atomic_xor(V,X) { __atomic_op{NORETURN}(X,^,V); }
+atomic_inc(X) { __atomic_op{NORETURN}(X,+,1); }
+atomic_dec(X) { __atomic_op{NORETURN}(X,-,1); }
+atomic_andnot(V,X) { __atomic_op{NORETURN}(X,&~,V); }
+
+atomic_add_return(V,X) __atomic_op_return{MB}(X,+,V)
+atomic_add_return_relaxed(V,X) __atomic_op_return{ONCE}(X,+,V)
+atomic_add_return_acquire(V,X) __atomic_op_return{ACQUIRE}(X,+,V)
+atomic_add_return_release(V,X) __atomic_op_return{RELEASE}(X,+,V)
+atomic_fetch_add(V,X) __atomic_fetch_op{MB}(X,+,V)
+atomic_fetch_add_relaxed(V,X) __atomic_fetch_op{ONCE}(X,+,V)
+atomic_fetch_add_acquire(V,X) __atomic_fetch_op{ACQUIRE}(X,+,V)
+atomic_fetch_add_release(V,X) __atomic_fetch_op{RELEASE}(X,+,V)
+
+atomic_fetch_and(V,X) __atomic_fetch_op{MB}(X,&,V)
+atomic_fetch_and_relaxed(V,X) __atomic_fetch_op{ONCE}(X,&,V)
+atomic_fetch_and_acquire(V,X) __atomic_fetch_op{ACQUIRE}(X,&,V)
+atomic_fetch_and_release(V,X) __atomic_fetch_op{RELEASE}(X,&,V)
+
+atomic_fetch_or(V,X) __atomic_fetch_op{MB}(X,|,V)
+atomic_fetch_or_relaxed(V,X) __atomic_fetch_op{ONCE}(X,|,V)
+atomic_fetch_or_acquire(V,X) __atomic_fetch_op{ACQUIRE}(X,|,V)
+atomic_fetch_or_release(V,X) __atomic_fetch_op{RELEASE}(X,|,V)
+
+atomic_fetch_xor(V,X) __atomic_fetch_op{MB}(X,^,V)
+atomic_fetch_xor_relaxed(V,X) __atomic_fetch_op{ONCE}(X,^,V)
+atomic_fetch_xor_acquire(V,X) __atomic_fetch_op{ACQUIRE}(X,^,V)
+atomic_fetch_xor_release(V,X) __atomic_fetch_op{RELEASE}(X,^,V)
+
+atomic_inc_return(X) __atomic_op_return{MB}(X,+,1)
+atomic_inc_return_relaxed(X) __atomic_op_return{ONCE}(X,+,1)
+atomic_inc_return_acquire(X) __atomic_op_return{ACQUIRE}(X,+,1)
+atomic_inc_return_release(X) __atomic_op_return{RELEASE}(X,+,1)
+atomic_fetch_inc(X) __atomic_fetch_op{MB}(X,+,1)
+atomic_fetch_inc_relaxed(X) __atomic_fetch_op{ONCE}(X,+,1)
+atomic_fetch_inc_acquire(X) __atomic_fetch_op{ACQUIRE}(X,+,1)
+atomic_fetch_inc_release(X) __atomic_fetch_op{RELEASE}(X,+,1)
+
+atomic_sub_return(V,X) __atomic_op_return{MB}(X,-,V)
+atomic_sub_return_relaxed(V,X) __atomic_op_return{ONCE}(X,-,V)
+atomic_sub_return_acquire(V,X) __atomic_op_return{ACQUIRE}(X,-,V)
+atomic_sub_return_release(V,X) __atomic_op_return{RELEASE}(X,-,V)
+atomic_fetch_sub(V,X) __atomic_fetch_op{MB}(X,-,V)
+atomic_fetch_sub_relaxed(V,X) __atomic_fetch_op{ONCE}(X,-,V)
+atomic_fetch_sub_acquire(V,X) __atomic_fetch_op{ACQUIRE}(X,-,V)
+atomic_fetch_sub_release(V,X) __atomic_fetch_op{RELEASE}(X,-,V)
+
+atomic_dec_return(X) __atomic_op_return{MB}(X,-,1)
+atomic_dec_return_relaxed(X) __atomic_op_return{ONCE}(X,-,1)
+atomic_dec_return_acquire(X) __atomic_op_return{ACQUIRE}(X,-,1)
+atomic_dec_return_release(X) __atomic_op_return{RELEASE}(X,-,1)
+atomic_fetch_dec(X) __atomic_fetch_op{MB}(X,-,1)
+atomic_fetch_dec_relaxed(X) __atomic_fetch_op{ONCE}(X,-,1)
+atomic_fetch_dec_acquire(X) __atomic_fetch_op{ACQUIRE}(X,-,1)
+atomic_fetch_dec_release(X) __atomic_fetch_op{RELEASE}(X,-,1)
+
+atomic_xchg(X,V) __xchg{MB}(X,V)
+atomic_xchg_relaxed(X,V) __xchg{ONCE}(X,V)
+atomic_xchg_release(X,V) __xchg{RELEASE}(X,V)
+atomic_xchg_acquire(X,V) __xchg{ACQUIRE}(X,V)
+atomic_cmpxchg(X,V,W) __cmpxchg{MB}(X,V,W)
+atomic_cmpxchg_relaxed(X,V,W) __cmpxchg{ONCE}(X,V,W)
+atomic_cmpxchg_acquire(X,V,W) __cmpxchg{ACQUIRE}(X,V,W)
+atomic_cmpxchg_release(X,V,W) __cmpxchg{RELEASE}(X,V,W)
+
+atomic_sub_and_test(V,X) __atomic_op_return{MB}(X,-,V) == 0
+atomic_dec_and_test(X) __atomic_op_return{MB}(X,-,1) == 0
+atomic_inc_and_test(X) __atomic_op_return{MB}(X,+,1) == 0
+atomic_add_negative(V,X) __atomic_op_return{MB}(X,+,V) < 0
+atomic_add_negative_relaxed(V,X) __atomic_op_return{ONCE}(X,+,V) < 0
+atomic_add_negative_acquire(V,X) __atomic_op_return{ACQUIRE}(X,+,V) < 0
+atomic_add_negative_release(V,X) __atomic_op_return{RELEASE}(X,+,V) < 0
+
+atomic_fetch_andnot(V,X) __atomic_fetch_op{MB}(X,&~,V)
+atomic_fetch_andnot_acquire(V,X) __atomic_fetch_op{ACQUIRE}(X,&~,V)
+atomic_fetch_andnot_release(V,X) __atomic_fetch_op{RELEASE}(X,&~,V)
+atomic_fetch_andnot_relaxed(V,X) __atomic_fetch_op{ONCE}(X,&~,V)
+
+atomic_add_unless(X,V,W) __atomic_add_unless{MB}(X,V,W)
diff --git a/tools/sched_ext/include/scx/common.bpf.h b/tools/sched_ext/include/scx/common.bpf.h
index 7849405614b1..dc4333d23189 100644
--- a/tools/sched_ext/include/scx/common.bpf.h
+++ b/tools/sched_ext/include/scx/common.bpf.h
@@ -7,6 +7,13 @@
#ifndef __SCX_COMMON_BPF_H
#define __SCX_COMMON_BPF_H
+/*
+ * The generated kfunc prototypes in vmlinux.h are missing address space
+ * attributes which cause build failures. For now, suppress the generated
+ * prototypes. See https://github.com/sched-ext/scx/issues/1111.
+ */
+#define BPF_NO_KFUNC_PROTOTYPES
+
#ifdef LSP
#define __bpf__
#include "../vmlinux.h"
@@ -18,6 +25,7 @@
#include <bpf/bpf_tracing.h>
#include <asm-generic/errno.h>
#include "user_exit_info.h"
+#include "enum_defs.autogen.h"
#define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */
#define PF_KTHREAD 0x00200000 /* I am a kernel thread */
@@ -62,21 +70,28 @@ void scx_bpf_dump_bstr(char *fmt, unsigned long long *data, u32 data_len) __ksym
u32 scx_bpf_cpuperf_cap(s32 cpu) __ksym __weak;
u32 scx_bpf_cpuperf_cur(s32 cpu) __ksym __weak;
void scx_bpf_cpuperf_set(s32 cpu, u32 perf) __ksym __weak;
+u32 scx_bpf_nr_node_ids(void) __ksym __weak;
u32 scx_bpf_nr_cpu_ids(void) __ksym __weak;
+int scx_bpf_cpu_node(s32 cpu) __ksym __weak;
const struct cpumask *scx_bpf_get_possible_cpumask(void) __ksym __weak;
const struct cpumask *scx_bpf_get_online_cpumask(void) __ksym __weak;
void scx_bpf_put_cpumask(const struct cpumask *cpumask) __ksym __weak;
+const struct cpumask *scx_bpf_get_idle_cpumask_node(int node) __ksym __weak;
const struct cpumask *scx_bpf_get_idle_cpumask(void) __ksym;
+const struct cpumask *scx_bpf_get_idle_smtmask_node(int node) __ksym __weak;
const struct cpumask *scx_bpf_get_idle_smtmask(void) __ksym;
void scx_bpf_put_idle_cpumask(const struct cpumask *cpumask) __ksym;
bool scx_bpf_test_and_clear_cpu_idle(s32 cpu) __ksym;
+s32 scx_bpf_pick_idle_cpu_node(const cpumask_t *cpus_allowed, int node, u64 flags) __ksym __weak;
s32 scx_bpf_pick_idle_cpu(const cpumask_t *cpus_allowed, u64 flags) __ksym;
+s32 scx_bpf_pick_any_cpu_node(const cpumask_t *cpus_allowed, int node, u64 flags) __ksym __weak;
s32 scx_bpf_pick_any_cpu(const cpumask_t *cpus_allowed, u64 flags) __ksym;
bool scx_bpf_task_running(const struct task_struct *p) __ksym;
s32 scx_bpf_task_cpu(const struct task_struct *p) __ksym;
struct rq *scx_bpf_cpu_rq(s32 cpu) __ksym;
struct cgroup *scx_bpf_task_cgroup(struct task_struct *p) __ksym __weak;
u64 scx_bpf_now(void) __ksym __weak;
+void scx_bpf_events(struct scx_event_stats *events, size_t events__sz) __ksym __weak;
/*
* Use the following as @it__iter when calling scx_bpf_dsq_move[_vtime]() from
@@ -84,6 +99,9 @@ u64 scx_bpf_now(void) __ksym __weak;
*/
#define BPF_FOR_EACH_ITER (&___it)
+#define scx_read_event(e, name) \
+ (bpf_core_field_exists((e)->name) ? (e)->name : 0)
+
static inline __attribute__((format(printf, 1, 2)))
void ___scx_bpf_bstr_format_checker(const char *fmt, ...) {}
@@ -584,6 +602,22 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
__u.__val; \
})
+#define READ_ONCE_ARENA(type, x) \
+({ \
+ union { type __val; char __c[1]; } __u = \
+ { .__c = { 0 } }; \
+ __read_once_size((void *)&(x), __u.__c, sizeof(x)); \
+ __u.__val; \
+})
+
+#define WRITE_ONCE_ARENA(type, x, val) \
+({ \
+ union { type __val; char __c[1]; } __u = \
+ { .__val = (val) }; \
+ __write_once_size((void *)&(x), __u.__c, sizeof(x)); \
+ __u.__val; \
+})
+
/*
* log2_u32 - Compute the base 2 logarithm of a 32-bit exponential value.
* @v: The value for which we're computing the base 2 logarithm.
diff --git a/tools/sched_ext/include/scx/common.h b/tools/sched_ext/include/scx/common.h
index dc18b99e55cd..1dc76bd84296 100644
--- a/tools/sched_ext/include/scx/common.h
+++ b/tools/sched_ext/include/scx/common.h
@@ -16,6 +16,7 @@
#include <stdlib.h>
#include <stdint.h>
#include <errno.h>
+#include "enum_defs.autogen.h"
typedef uint8_t u8;
typedef uint16_t u16;
diff --git a/tools/sched_ext/include/scx/compat.bpf.h b/tools/sched_ext/include/scx/compat.bpf.h
index 50e1499ae093..9252e1a00556 100644
--- a/tools/sched_ext/include/scx/compat.bpf.h
+++ b/tools/sched_ext/include/scx/compat.bpf.h
@@ -125,12 +125,107 @@ bool scx_bpf_dispatch_vtime_from_dsq___compat(struct bpf_iter_scx_dsq *it__iter,
false; \
})
+/**
+ * __COMPAT_is_enq_cpu_selected - Test if SCX_ENQ_CPU_SELECTED is on
+ * in a compatible way. We will preserve this __COMPAT helper until v6.16.
+ *
+ * @enq_flags: enqueue flags from ops.enqueue()
+ *
+ * Return: True if SCX_ENQ_CPU_SELECTED is turned on in @enq_flags
+ */
+static inline bool __COMPAT_is_enq_cpu_selected(u64 enq_flags)
+{
+#ifdef HAVE_SCX_ENQ_CPU_SELECTED
+ /*
+ * This is the case that a BPF code compiled against vmlinux.h
+ * where the enum SCX_ENQ_CPU_SELECTED exists.
+ */
+
+ /*
+ * We should temporarily suspend the macro expansion of
+ * 'SCX_ENQ_CPU_SELECTED'. This avoids 'SCX_ENQ_CPU_SELECTED' being
+ * rewritten to '__SCX_ENQ_CPU_SELECTED' when 'SCX_ENQ_CPU_SELECTED'
+ * is defined in 'scripts/gen_enums.py'.
+ */
+#pragma push_macro("SCX_ENQ_CPU_SELECTED")
+#undef SCX_ENQ_CPU_SELECTED
+ u64 flag;
+
+ /*
+ * When the kernel did not have SCX_ENQ_CPU_SELECTED,
+ * select_task_rq_scx() has never been skipped. Thus, this case
+ * should be considered that the CPU has already been selected.
+ */
+ if (!bpf_core_enum_value_exists(enum scx_enq_flags,
+ SCX_ENQ_CPU_SELECTED))
+ return true;
+
+ flag = bpf_core_enum_value(enum scx_enq_flags, SCX_ENQ_CPU_SELECTED);
+ return enq_flags & flag;
+
+ /*
+ * Once done, resume the macro expansion of 'SCX_ENQ_CPU_SELECTED'.
+ */
+#pragma pop_macro("SCX_ENQ_CPU_SELECTED")
+#else
+ /*
+ * This is the case that a BPF code compiled against vmlinux.h
+ * where the enum SCX_ENQ_CPU_SELECTED does NOT exist.
+ */
+ return true;
+#endif /* HAVE_SCX_ENQ_CPU_SELECTED */
+}
+
+
#define scx_bpf_now() \
(bpf_ksym_exists(scx_bpf_now) ? \
scx_bpf_now() : \
bpf_ktime_get_ns())
/*
+ * v6.15: Introduce event counters.
+ *
+ * Preserve the following macro until v6.17.
+ */
+#define __COMPAT_scx_bpf_events(events, size) \
+ (bpf_ksym_exists(scx_bpf_events) ? \
+ scx_bpf_events(events, size) : ({}))
+
+/*
+ * v6.15: Introduce NUMA-aware kfuncs to operate with per-node idle
+ * cpumasks.
+ *
+ * Preserve the following __COMPAT_scx_*_node macros until v6.17.
+ */
+#define __COMPAT_scx_bpf_nr_node_ids() \
+ (bpf_ksym_exists(scx_bpf_nr_node_ids) ? \
+ scx_bpf_nr_node_ids() : 1U)
+
+#define __COMPAT_scx_bpf_cpu_node(cpu) \
+ (bpf_ksym_exists(scx_bpf_cpu_node) ? \
+ scx_bpf_cpu_node(cpu) : 0)
+
+#define __COMPAT_scx_bpf_get_idle_cpumask_node(node) \
+ (bpf_ksym_exists(scx_bpf_get_idle_cpumask_node) ? \
+ scx_bpf_get_idle_cpumask_node(node) : \
+ scx_bpf_get_idle_cpumask()) \
+
+#define __COMPAT_scx_bpf_get_idle_smtmask_node(node) \
+ (bpf_ksym_exists(scx_bpf_get_idle_smtmask_node) ? \
+ scx_bpf_get_idle_smtmask_node(node) : \
+ scx_bpf_get_idle_smtmask())
+
+#define __COMPAT_scx_bpf_pick_idle_cpu_node(cpus_allowed, node, flags) \
+ (bpf_ksym_exists(scx_bpf_pick_idle_cpu_node) ? \
+ scx_bpf_pick_idle_cpu_node(cpus_allowed, node, flags) : \
+ scx_bpf_pick_idle_cpu(cpus_allowed, flags))
+
+#define __COMPAT_scx_bpf_pick_any_cpu_node(cpus_allowed, node, flags) \
+ (bpf_ksym_exists(scx_bpf_pick_any_cpu_node) ? \
+ scx_bpf_pick_any_cpu_node(cpus_allowed, node, flags) : \
+ scx_bpf_pick_any_cpu(cpus_allowed, flags))
+
+/*
* Define sched_ext_ops. This may be expanded to define multiple variants for
* backward compatibility. See compat.h::SCX_OPS_LOAD/ATTACH().
*/
diff --git a/tools/sched_ext/include/scx/compat.h b/tools/sched_ext/include/scx/compat.h
index b50280e2ba2b..35c67c5174ac 100644
--- a/tools/sched_ext/include/scx/compat.h
+++ b/tools/sched_ext/include/scx/compat.h
@@ -106,8 +106,20 @@ static inline bool __COMPAT_struct_has_field(const char *type, const char *field
return false;
}
-#define SCX_OPS_SWITCH_PARTIAL \
- __COMPAT_ENUM_OR_ZERO("scx_ops_flags", "SCX_OPS_SWITCH_PARTIAL")
+#define SCX_OPS_FLAG(name) __COMPAT_ENUM_OR_ZERO("scx_ops_flags", #name)
+
+#define SCX_OPS_KEEP_BUILTIN_IDLE SCX_OPS_FLAG(SCX_OPS_KEEP_BUILTIN_IDLE)
+#define SCX_OPS_ENQ_LAST SCX_OPS_FLAG(SCX_OPS_ENQ_LAST)
+#define SCX_OPS_ENQ_EXITING SCX_OPS_FLAG(SCX_OPS_ENQ_EXITING)
+#define SCX_OPS_SWITCH_PARTIAL SCX_OPS_FLAG(SCX_OPS_SWITCH_PARTIAL)
+#define SCX_OPS_ENQ_MIGRATION_DISABLED SCX_OPS_FLAG(SCX_OPS_ENQ_MIGRATION_DISABLED)
+#define SCX_OPS_ALLOW_QUEUED_WAKEUP SCX_OPS_FLAG(SCX_OPS_ALLOW_QUEUED_WAKEUP)
+#define SCX_OPS_BUILTIN_IDLE_PER_NODE SCX_OPS_FLAG(SCX_OPS_BUILTIN_IDLE_PER_NODE)
+
+#define SCX_PICK_IDLE_FLAG(name) __COMPAT_ENUM_OR_ZERO("scx_pick_idle_cpu_flags", #name)
+
+#define SCX_PICK_IDLE_CORE SCX_PICK_IDLE_FLAG(SCX_PICK_IDLE_CORE)
+#define SCX_PICK_IDLE_IN_NODE SCX_PICK_IDLE_FLAG(SCX_PICK_IDLE_IN_NODE)
static inline long scx_hotplug_seq(void)
{
diff --git a/tools/sched_ext/include/scx/enum_defs.autogen.h b/tools/sched_ext/include/scx/enum_defs.autogen.h
new file mode 100644
index 000000000000..6e6c45f14fe1
--- /dev/null
+++ b/tools/sched_ext/include/scx/enum_defs.autogen.h
@@ -0,0 +1,120 @@
+/*
+ * WARNING: This file is autogenerated from gen_enum_defs.py [1].
+ *
+ * [1] https://github.com/sched-ext/scx/blob/main/scripts/gen_enum_defs.py
+ */
+
+#ifndef __ENUM_DEFS_AUTOGEN_H__
+#define __ENUM_DEFS_AUTOGEN_H__
+
+#define HAVE_SCX_DSP_DFL_MAX_BATCH
+#define HAVE_SCX_DSP_MAX_LOOPS
+#define HAVE_SCX_WATCHDOG_MAX_TIMEOUT
+#define HAVE_SCX_EXIT_BT_LEN
+#define HAVE_SCX_EXIT_MSG_LEN
+#define HAVE_SCX_EXIT_DUMP_DFL_LEN
+#define HAVE_SCX_CPUPERF_ONE
+#define HAVE_SCX_OPS_TASK_ITER_BATCH
+#define HAVE_SCX_CPU_PREEMPT_RT
+#define HAVE_SCX_CPU_PREEMPT_DL
+#define HAVE_SCX_CPU_PREEMPT_STOP
+#define HAVE_SCX_CPU_PREEMPT_UNKNOWN
+#define HAVE_SCX_DEQ_SLEEP
+#define HAVE_SCX_DEQ_CORE_SCHED_EXEC
+#define HAVE_SCX_DSQ_FLAG_BUILTIN
+#define HAVE_SCX_DSQ_FLAG_LOCAL_ON
+#define HAVE_SCX_DSQ_INVALID
+#define HAVE_SCX_DSQ_GLOBAL
+#define HAVE_SCX_DSQ_LOCAL
+#define HAVE_SCX_DSQ_LOCAL_ON
+#define HAVE_SCX_DSQ_LOCAL_CPU_MASK
+#define HAVE_SCX_DSQ_ITER_REV
+#define HAVE___SCX_DSQ_ITER_HAS_SLICE
+#define HAVE___SCX_DSQ_ITER_HAS_VTIME
+#define HAVE___SCX_DSQ_ITER_USER_FLAGS
+#define HAVE___SCX_DSQ_ITER_ALL_FLAGS
+#define HAVE_SCX_DSQ_LNODE_ITER_CURSOR
+#define HAVE___SCX_DSQ_LNODE_PRIV_SHIFT
+#define HAVE_SCX_ENQ_WAKEUP
+#define HAVE_SCX_ENQ_HEAD
+#define HAVE_SCX_ENQ_CPU_SELECTED
+#define HAVE_SCX_ENQ_PREEMPT
+#define HAVE_SCX_ENQ_REENQ
+#define HAVE_SCX_ENQ_LAST
+#define HAVE___SCX_ENQ_INTERNAL_MASK
+#define HAVE_SCX_ENQ_CLEAR_OPSS
+#define HAVE_SCX_ENQ_DSQ_PRIQ
+#define HAVE_SCX_TASK_DSQ_ON_PRIQ
+#define HAVE_SCX_TASK_QUEUED
+#define HAVE_SCX_TASK_RESET_RUNNABLE_AT
+#define HAVE_SCX_TASK_DEQD_FOR_SLEEP
+#define HAVE_SCX_TASK_STATE_SHIFT
+#define HAVE_SCX_TASK_STATE_BITS
+#define HAVE_SCX_TASK_STATE_MASK
+#define HAVE_SCX_TASK_CURSOR
+#define HAVE_SCX_ECODE_RSN_HOTPLUG
+#define HAVE_SCX_ECODE_ACT_RESTART
+#define HAVE_SCX_EXIT_NONE
+#define HAVE_SCX_EXIT_DONE
+#define HAVE_SCX_EXIT_UNREG
+#define HAVE_SCX_EXIT_UNREG_BPF
+#define HAVE_SCX_EXIT_UNREG_KERN
+#define HAVE_SCX_EXIT_SYSRQ
+#define HAVE_SCX_EXIT_ERROR
+#define HAVE_SCX_EXIT_ERROR_BPF
+#define HAVE_SCX_EXIT_ERROR_STALL
+#define HAVE_SCX_KF_UNLOCKED
+#define HAVE_SCX_KF_CPU_RELEASE
+#define HAVE_SCX_KF_DISPATCH
+#define HAVE_SCX_KF_ENQUEUE
+#define HAVE_SCX_KF_SELECT_CPU
+#define HAVE_SCX_KF_REST
+#define HAVE___SCX_KF_RQ_LOCKED
+#define HAVE___SCX_KF_TERMINAL
+#define HAVE_SCX_KICK_IDLE
+#define HAVE_SCX_KICK_PREEMPT
+#define HAVE_SCX_KICK_WAIT
+#define HAVE_SCX_OPI_BEGIN
+#define HAVE_SCX_OPI_NORMAL_BEGIN
+#define HAVE_SCX_OPI_NORMAL_END
+#define HAVE_SCX_OPI_CPU_HOTPLUG_BEGIN
+#define HAVE_SCX_OPI_CPU_HOTPLUG_END
+#define HAVE_SCX_OPI_END
+#define HAVE_SCX_OPS_ENABLING
+#define HAVE_SCX_OPS_ENABLED
+#define HAVE_SCX_OPS_DISABLING
+#define HAVE_SCX_OPS_DISABLED
+#define HAVE_SCX_OPS_KEEP_BUILTIN_IDLE
+#define HAVE_SCX_OPS_ENQ_LAST
+#define HAVE_SCX_OPS_ENQ_EXITING
+#define HAVE_SCX_OPS_SWITCH_PARTIAL
+#define HAVE_SCX_OPS_HAS_CGROUP_WEIGHT
+#define HAVE_SCX_OPS_ALL_FLAGS
+#define HAVE_SCX_OPSS_NONE
+#define HAVE_SCX_OPSS_QUEUEING
+#define HAVE_SCX_OPSS_QUEUED
+#define HAVE_SCX_OPSS_DISPATCHING
+#define HAVE_SCX_OPSS_QSEQ_SHIFT
+#define HAVE_SCX_PICK_IDLE_CORE
+#define HAVE_SCX_OPS_NAME_LEN
+#define HAVE_SCX_SLICE_DFL
+#define HAVE_SCX_SLICE_INF
+#define HAVE_SCX_RQ_ONLINE
+#define HAVE_SCX_RQ_CAN_STOP_TICK
+#define HAVE_SCX_RQ_BAL_PENDING
+#define HAVE_SCX_RQ_BAL_KEEP
+#define HAVE_SCX_RQ_BYPASSING
+#define HAVE_SCX_RQ_IN_WAKEUP
+#define HAVE_SCX_RQ_IN_BALANCE
+#define HAVE_SCX_TASK_NONE
+#define HAVE_SCX_TASK_INIT
+#define HAVE_SCX_TASK_READY
+#define HAVE_SCX_TASK_ENABLED
+#define HAVE_SCX_TASK_NR_STATES
+#define HAVE_SCX_TG_ONLINE
+#define HAVE_SCX_TG_INITED
+#define HAVE_SCX_WAKE_FORK
+#define HAVE_SCX_WAKE_TTWU
+#define HAVE_SCX_WAKE_SYNC
+
+#endif /* __ENUM_DEFS_AUTOGEN_H__ */
diff --git a/tools/sched_ext/scx_central.c b/tools/sched_ext/scx_central.c
index 1e9f74525d8f..6ba6e610eeaa 100644
--- a/tools/sched_ext/scx_central.c
+++ b/tools/sched_ext/scx_central.c
@@ -10,6 +10,7 @@
#include <unistd.h>
#include <inttypes.h>
#include <signal.h>
+#include <assert.h>
#include <libgen.h>
#include <bpf/bpf.h>
#include <scx/common.h>
@@ -60,14 +61,22 @@ restart:
skel->rodata->nr_cpu_ids = libbpf_num_possible_cpus();
skel->rodata->slice_ns = __COMPAT_ENUM_OR_ZERO("scx_public_consts", "SCX_SLICE_DFL");
+ assert(skel->rodata->nr_cpu_ids <= INT32_MAX);
+
while ((opt = getopt(argc, argv, "s:c:pvh")) != -1) {
switch (opt) {
case 's':
skel->rodata->slice_ns = strtoull(optarg, NULL, 0) * 1000;
break;
- case 'c':
- skel->rodata->central_cpu = strtoul(optarg, NULL, 0);
+ case 'c': {
+ u32 central_cpu = strtoul(optarg, NULL, 0);
+ if (central_cpu >= skel->rodata->nr_cpu_ids) {
+ fprintf(stderr, "invalid central CPU id value, %u given (%u max)\n", central_cpu, skel->rodata->nr_cpu_ids);
+ return -1;
+ }
+ skel->rodata->central_cpu = (s32)central_cpu;
break;
+ }
case 'v':
verbose = true;
break;
@@ -96,7 +105,7 @@ restart:
*/
cpuset = CPU_ALLOC(skel->rodata->nr_cpu_ids);
SCX_BUG_ON(!cpuset, "Failed to allocate cpuset");
- CPU_ZERO(cpuset);
+ CPU_ZERO_S(CPU_ALLOC_SIZE(skel->rodata->nr_cpu_ids), cpuset);
CPU_SET(skel->rodata->central_cpu, cpuset);
SCX_BUG_ON(sched_setaffinity(0, sizeof(*cpuset), cpuset),
"Failed to affinitize to central CPU %d (max %d)",
diff --git a/tools/sched_ext/scx_qmap.bpf.c b/tools/sched_ext/scx_qmap.bpf.c
index 3a20bb0c014a..26c40ca4f36c 100644
--- a/tools/sched_ext/scx_qmap.bpf.c
+++ b/tools/sched_ext/scx_qmap.bpf.c
@@ -231,7 +231,7 @@ void BPF_STRUCT_OPS(qmap_enqueue, struct task_struct *p, u64 enq_flags)
}
/* if select_cpu() wasn't called, try direct dispatch */
- if (!(enq_flags & SCX_ENQ_CPU_SELECTED) &&
+ if (!__COMPAT_is_enq_cpu_selected(enq_flags) &&
(cpu = pick_direct_dispatch_cpu(p, scx_bpf_task_cpu(p))) >= 0) {
__sync_fetch_and_add(&nr_ddsp_from_enq, 1);
scx_bpf_dsq_insert(p, SCX_DSQ_LOCAL_ON | cpu, slice_ns, enq_flags);
@@ -763,6 +763,8 @@ static void dump_shared_dsq(void)
static int monitor_timerfn(void *map, int *key, struct bpf_timer *timer)
{
+ struct scx_event_stats events;
+
bpf_rcu_read_lock();
dispatch_highpri(true);
bpf_rcu_read_unlock();
@@ -772,6 +774,25 @@ static int monitor_timerfn(void *map, int *key, struct bpf_timer *timer)
if (print_shared_dsq)
dump_shared_dsq();
+ __COMPAT_scx_bpf_events(&events, sizeof(events));
+
+ bpf_printk("%35s: %lld", "SCX_EV_SELECT_CPU_FALLBACK",
+ scx_read_event(&events, SCX_EV_SELECT_CPU_FALLBACK));
+ bpf_printk("%35s: %lld", "SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE",
+ scx_read_event(&events, SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE));
+ bpf_printk("%35s: %lld", "SCX_EV_DISPATCH_KEEP_LAST",
+ scx_read_event(&events, SCX_EV_DISPATCH_KEEP_LAST));
+ bpf_printk("%35s: %lld", "SCX_EV_ENQ_SKIP_EXITING",
+ scx_read_event(&events, SCX_EV_ENQ_SKIP_EXITING));
+ bpf_printk("%35s: %lld", "SCX_EV_ENQ_SLICE_DFL",
+ scx_read_event(&events, SCX_EV_ENQ_SLICE_DFL));
+ bpf_printk("%35s: %lld", "SCX_EV_BYPASS_DURATION",
+ scx_read_event(&events, SCX_EV_BYPASS_DURATION));
+ bpf_printk("%35s: %lld", "SCX_EV_BYPASS_DISPATCH",
+ scx_read_event(&events, SCX_EV_BYPASS_DISPATCH));
+ bpf_printk("%35s: %lld", "SCX_EV_BYPASS_ACTIVATE",
+ scx_read_event(&events, SCX_EV_BYPASS_ACTIVATE));
+
bpf_timer_start(timer, ONE_SEC_IN_NS, 0);
return 0;
}
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index 8daac70c2f9d..2ebaf5e6942e 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -35,6 +35,7 @@ TARGETS += filesystems/epoll
TARGETS += filesystems/fat
TARGETS += filesystems/overlayfs
TARGETS += filesystems/statmount
+TARGETS += filesystems/mount-notify
TARGETS += firmware
TARGETS += fpu
TARGETS += ftrace
diff --git a/tools/testing/selftests/filesystems/mount-notify/.gitignore b/tools/testing/selftests/filesystems/mount-notify/.gitignore
new file mode 100644
index 000000000000..82a4846cbc4b
--- /dev/null
+++ b/tools/testing/selftests/filesystems/mount-notify/.gitignore
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+/*_test
diff --git a/tools/testing/selftests/filesystems/mount-notify/Makefile b/tools/testing/selftests/filesystems/mount-notify/Makefile
new file mode 100644
index 000000000000..10be0227b5ae
--- /dev/null
+++ b/tools/testing/selftests/filesystems/mount-notify/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+CFLAGS += -Wall -O2 -g $(KHDR_INCLUDES)
+TEST_GEN_PROGS := mount-notify_test
+
+include ../../lib.mk
diff --git a/tools/testing/selftests/filesystems/mount-notify/mount-notify_test.c b/tools/testing/selftests/filesystems/mount-notify/mount-notify_test.c
new file mode 100644
index 000000000000..4a2d5c454fd1
--- /dev/null
+++ b/tools/testing/selftests/filesystems/mount-notify/mount-notify_test.c
@@ -0,0 +1,516 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+// Copyright (c) 2025 Miklos Szeredi <miklos@szeredi.hu>
+
+#define _GNU_SOURCE
+#include <fcntl.h>
+#include <sched.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <sys/mount.h>
+#include <linux/fanotify.h>
+#include <unistd.h>
+#include <sys/fanotify.h>
+#include <sys/syscall.h>
+
+#include "../../kselftest_harness.h"
+#include "../statmount/statmount.h"
+
+#ifndef FAN_MNT_ATTACH
+struct fanotify_event_info_mnt {
+ struct fanotify_event_info_header hdr;
+ __u64 mnt_id;
+};
+#define FAN_MNT_ATTACH 0x01000000 /* Mount was attached */
+#endif
+
+#ifndef FAN_MNT_DETACH
+#define FAN_MNT_DETACH 0x02000000 /* Mount was detached */
+#endif
+
+#ifndef FAN_REPORT_MNT
+#define FAN_REPORT_MNT 0x00004000 /* Report mount events */
+#endif
+
+#ifndef FAN_MARK_MNTNS
+#define FAN_MARK_MNTNS 0x00000110
+#endif
+
+static uint64_t get_mnt_id(struct __test_metadata *const _metadata,
+ const char *path)
+{
+ struct statx sx;
+
+ ASSERT_EQ(statx(AT_FDCWD, path, 0, STATX_MNT_ID_UNIQUE, &sx), 0);
+ ASSERT_TRUE(!!(sx.stx_mask & STATX_MNT_ID_UNIQUE));
+ return sx.stx_mnt_id;
+}
+
+static const char root_mntpoint_templ[] = "/tmp/mount-notify_test_root.XXXXXX";
+
+FIXTURE(fanotify) {
+ int fan_fd;
+ char buf[256];
+ unsigned int rem;
+ void *next;
+ char root_mntpoint[sizeof(root_mntpoint_templ)];
+ int orig_root;
+ int ns_fd;
+ uint64_t root_id;
+};
+
+FIXTURE_SETUP(fanotify)
+{
+ int ret;
+
+ ASSERT_EQ(unshare(CLONE_NEWNS), 0);
+
+ self->ns_fd = open("/proc/self/ns/mnt", O_RDONLY);
+ ASSERT_GE(self->ns_fd, 0);
+
+ ASSERT_EQ(mount("", "/", NULL, MS_REC|MS_PRIVATE, NULL), 0);
+
+ strcpy(self->root_mntpoint, root_mntpoint_templ);
+ ASSERT_NE(mkdtemp(self->root_mntpoint), NULL);
+
+ self->orig_root = open("/", O_PATH | O_CLOEXEC);
+ ASSERT_GE(self->orig_root, 0);
+
+ ASSERT_EQ(mount("tmpfs", self->root_mntpoint, "tmpfs", 0, NULL), 0);
+
+ ASSERT_EQ(chroot(self->root_mntpoint), 0);
+
+ ASSERT_EQ(chdir("/"), 0);
+
+ ASSERT_EQ(mkdir("a", 0700), 0);
+
+ ASSERT_EQ(mkdir("b", 0700), 0);
+
+ self->root_id = get_mnt_id(_metadata, "/");
+ ASSERT_NE(self->root_id, 0);
+
+ self->fan_fd = fanotify_init(FAN_REPORT_MNT, 0);
+ ASSERT_GE(self->fan_fd, 0);
+
+ ret = fanotify_mark(self->fan_fd, FAN_MARK_ADD | FAN_MARK_MNTNS,
+ FAN_MNT_ATTACH | FAN_MNT_DETACH, self->ns_fd, NULL);
+ ASSERT_EQ(ret, 0);
+
+ self->rem = 0;
+}
+
+FIXTURE_TEARDOWN(fanotify)
+{
+ ASSERT_EQ(self->rem, 0);
+ close(self->fan_fd);
+
+ ASSERT_EQ(fchdir(self->orig_root), 0);
+
+ ASSERT_EQ(chroot("."), 0);
+
+ EXPECT_EQ(umount2(self->root_mntpoint, MNT_DETACH), 0);
+ EXPECT_EQ(chdir(self->root_mntpoint), 0);
+ EXPECT_EQ(chdir("/"), 0);
+ EXPECT_EQ(rmdir(self->root_mntpoint), 0);
+}
+
+static uint64_t expect_notify(struct __test_metadata *const _metadata,
+ FIXTURE_DATA(fanotify) *self,
+ uint64_t *mask)
+{
+ struct fanotify_event_metadata *meta;
+ struct fanotify_event_info_mnt *mnt;
+ unsigned int thislen;
+
+ if (!self->rem) {
+ ssize_t len = read(self->fan_fd, self->buf, sizeof(self->buf));
+ ASSERT_GT(len, 0);
+
+ self->rem = len;
+ self->next = (void *) self->buf;
+ }
+
+ meta = self->next;
+ ASSERT_TRUE(FAN_EVENT_OK(meta, self->rem));
+
+ thislen = meta->event_len;
+ self->rem -= thislen;
+ self->next += thislen;
+
+ *mask = meta->mask;
+ thislen -= sizeof(*meta);
+
+ mnt = ((void *) meta) + meta->event_len - thislen;
+
+ ASSERT_EQ(thislen, sizeof(*mnt));
+
+ return mnt->mnt_id;
+}
+
+static void expect_notify_n(struct __test_metadata *const _metadata,
+ FIXTURE_DATA(fanotify) *self,
+ unsigned int n, uint64_t mask[], uint64_t mnts[])
+{
+ unsigned int i;
+
+ for (i = 0; i < n; i++)
+ mnts[i] = expect_notify(_metadata, self, &mask[i]);
+}
+
+static uint64_t expect_notify_mask(struct __test_metadata *const _metadata,
+ FIXTURE_DATA(fanotify) *self,
+ uint64_t expect_mask)
+{
+ uint64_t mntid, mask;
+
+ mntid = expect_notify(_metadata, self, &mask);
+ ASSERT_EQ(expect_mask, mask);
+
+ return mntid;
+}
+
+
+static void expect_notify_mask_n(struct __test_metadata *const _metadata,
+ FIXTURE_DATA(fanotify) *self,
+ uint64_t mask, unsigned int n, uint64_t mnts[])
+{
+ unsigned int i;
+
+ for (i = 0; i < n; i++)
+ mnts[i] = expect_notify_mask(_metadata, self, mask);
+}
+
+static void verify_mount_ids(struct __test_metadata *const _metadata,
+ const uint64_t list1[], const uint64_t list2[],
+ size_t num)
+{
+ unsigned int i, j;
+
+ // Check that neither list has any duplicates
+ for (i = 0; i < num; i++) {
+ for (j = 0; j < num; j++) {
+ if (i != j) {
+ ASSERT_NE(list1[i], list1[j]);
+ ASSERT_NE(list2[i], list2[j]);
+ }
+ }
+ }
+ // Check that all list1 memebers can be found in list2. Together with
+ // the above it means that the list1 and list2 represent the same sets.
+ for (i = 0; i < num; i++) {
+ for (j = 0; j < num; j++) {
+ if (list1[i] == list2[j])
+ break;
+ }
+ ASSERT_NE(j, num);
+ }
+}
+
+static void check_mounted(struct __test_metadata *const _metadata,
+ const uint64_t mnts[], size_t num)
+{
+ ssize_t ret;
+ uint64_t *list;
+
+ list = malloc((num + 1) * sizeof(list[0]));
+ ASSERT_NE(list, NULL);
+
+ ret = listmount(LSMT_ROOT, 0, 0, list, num + 1, 0);
+ ASSERT_EQ(ret, num);
+
+ verify_mount_ids(_metadata, mnts, list, num);
+
+ free(list);
+}
+
+static void setup_mount_tree(struct __test_metadata *const _metadata,
+ int log2_num)
+{
+ int ret, i;
+
+ ret = mount("", "/", NULL, MS_SHARED, NULL);
+ ASSERT_EQ(ret, 0);
+
+ for (i = 0; i < log2_num; i++) {
+ ret = mount("/", "/", NULL, MS_BIND, NULL);
+ ASSERT_EQ(ret, 0);
+ }
+}
+
+TEST_F(fanotify, bind)
+{
+ int ret;
+ uint64_t mnts[2] = { self->root_id };
+
+ ret = mount("/", "/", NULL, MS_BIND, NULL);
+ ASSERT_EQ(ret, 0);
+
+ mnts[1] = expect_notify_mask(_metadata, self, FAN_MNT_ATTACH);
+ ASSERT_NE(mnts[0], mnts[1]);
+
+ check_mounted(_metadata, mnts, 2);
+
+ // Cleanup
+ uint64_t detach_id;
+ ret = umount("/");
+ ASSERT_EQ(ret, 0);
+
+ detach_id = expect_notify_mask(_metadata, self, FAN_MNT_DETACH);
+ ASSERT_EQ(detach_id, mnts[1]);
+
+ check_mounted(_metadata, mnts, 1);
+}
+
+TEST_F(fanotify, move)
+{
+ int ret;
+ uint64_t mnts[2] = { self->root_id };
+ uint64_t move_id;
+
+ ret = mount("/", "/a", NULL, MS_BIND, NULL);
+ ASSERT_EQ(ret, 0);
+
+ mnts[1] = expect_notify_mask(_metadata, self, FAN_MNT_ATTACH);
+ ASSERT_NE(mnts[0], mnts[1]);
+
+ check_mounted(_metadata, mnts, 2);
+
+ ret = move_mount(AT_FDCWD, "/a", AT_FDCWD, "/b", 0);
+ ASSERT_EQ(ret, 0);
+
+ move_id = expect_notify_mask(_metadata, self, FAN_MNT_ATTACH | FAN_MNT_DETACH);
+ ASSERT_EQ(move_id, mnts[1]);
+
+ // Cleanup
+ ret = umount("/b");
+ ASSERT_EQ(ret, 0);
+
+ check_mounted(_metadata, mnts, 1);
+}
+
+TEST_F(fanotify, propagate)
+{
+ const unsigned int log2_num = 4;
+ const unsigned int num = (1 << log2_num);
+ uint64_t mnts[num];
+
+ setup_mount_tree(_metadata, log2_num);
+
+ expect_notify_mask_n(_metadata, self, FAN_MNT_ATTACH, num - 1, mnts + 1);
+
+ mnts[0] = self->root_id;
+ check_mounted(_metadata, mnts, num);
+
+ // Cleanup
+ int ret;
+ uint64_t mnts2[num];
+ ret = umount2("/", MNT_DETACH);
+ ASSERT_EQ(ret, 0);
+
+ ret = mount("", "/", NULL, MS_PRIVATE, NULL);
+ ASSERT_EQ(ret, 0);
+
+ mnts2[0] = self->root_id;
+ expect_notify_mask_n(_metadata, self, FAN_MNT_DETACH, num - 1, mnts2 + 1);
+ verify_mount_ids(_metadata, mnts, mnts2, num);
+
+ check_mounted(_metadata, mnts, 1);
+}
+
+TEST_F(fanotify, fsmount)
+{
+ int ret, fs, mnt;
+ uint64_t mnts[2] = { self->root_id };
+
+ fs = fsopen("tmpfs", 0);
+ ASSERT_GE(fs, 0);
+
+ ret = fsconfig(fs, FSCONFIG_CMD_CREATE, 0, 0, 0);
+ ASSERT_EQ(ret, 0);
+
+ mnt = fsmount(fs, 0, 0);
+ ASSERT_GE(mnt, 0);
+
+ close(fs);
+
+ ret = move_mount(mnt, "", AT_FDCWD, "/a", MOVE_MOUNT_F_EMPTY_PATH);
+ ASSERT_EQ(ret, 0);
+
+ close(mnt);
+
+ mnts[1] = expect_notify_mask(_metadata, self, FAN_MNT_ATTACH);
+ ASSERT_NE(mnts[0], mnts[1]);
+
+ check_mounted(_metadata, mnts, 2);
+
+ // Cleanup
+ uint64_t detach_id;
+ ret = umount("/a");
+ ASSERT_EQ(ret, 0);
+
+ detach_id = expect_notify_mask(_metadata, self, FAN_MNT_DETACH);
+ ASSERT_EQ(detach_id, mnts[1]);
+
+ check_mounted(_metadata, mnts, 1);
+}
+
+TEST_F(fanotify, reparent)
+{
+ uint64_t mnts[6] = { self->root_id };
+ uint64_t dmnts[3];
+ uint64_t masks[3];
+ unsigned int i;
+ int ret;
+
+ // Create setup with a[1] -> b[2] propagation
+ ret = mount("/", "/a", NULL, MS_BIND, NULL);
+ ASSERT_EQ(ret, 0);
+
+ ret = mount("", "/a", NULL, MS_SHARED, NULL);
+ ASSERT_EQ(ret, 0);
+
+ ret = mount("/a", "/b", NULL, MS_BIND, NULL);
+ ASSERT_EQ(ret, 0);
+
+ ret = mount("", "/b", NULL, MS_SLAVE, NULL);
+ ASSERT_EQ(ret, 0);
+
+ expect_notify_mask_n(_metadata, self, FAN_MNT_ATTACH, 2, mnts + 1);
+
+ check_mounted(_metadata, mnts, 3);
+
+ // Mount on a[3], which is propagated to b[4]
+ ret = mount("/", "/a", NULL, MS_BIND, NULL);
+ ASSERT_EQ(ret, 0);
+
+ expect_notify_mask_n(_metadata, self, FAN_MNT_ATTACH, 2, mnts + 3);
+
+ check_mounted(_metadata, mnts, 5);
+
+ // Mount on b[5], not propagated
+ ret = mount("/", "/b", NULL, MS_BIND, NULL);
+ ASSERT_EQ(ret, 0);
+
+ mnts[5] = expect_notify_mask(_metadata, self, FAN_MNT_ATTACH);
+
+ check_mounted(_metadata, mnts, 6);
+
+ // Umount a[3], which is propagated to b[4], but not b[5]
+ // This will result in b[5] "falling" on b[2]
+ ret = umount("/a");
+ ASSERT_EQ(ret, 0);
+
+ expect_notify_n(_metadata, self, 3, masks, dmnts);
+ verify_mount_ids(_metadata, mnts + 3, dmnts, 3);
+
+ for (i = 0; i < 3; i++) {
+ if (dmnts[i] == mnts[5]) {
+ ASSERT_EQ(masks[i], FAN_MNT_ATTACH | FAN_MNT_DETACH);
+ } else {
+ ASSERT_EQ(masks[i], FAN_MNT_DETACH);
+ }
+ }
+
+ mnts[3] = mnts[5];
+ check_mounted(_metadata, mnts, 4);
+
+ // Cleanup
+ ret = umount("/b");
+ ASSERT_EQ(ret, 0);
+
+ ret = umount("/a");
+ ASSERT_EQ(ret, 0);
+
+ ret = umount("/b");
+ ASSERT_EQ(ret, 0);
+
+ expect_notify_mask_n(_metadata, self, FAN_MNT_DETACH, 3, dmnts);
+ verify_mount_ids(_metadata, mnts + 1, dmnts, 3);
+
+ check_mounted(_metadata, mnts, 1);
+}
+
+TEST_F(fanotify, rmdir)
+{
+ uint64_t mnts[3] = { self->root_id };
+ int ret;
+
+ ret = mount("/", "/a", NULL, MS_BIND, NULL);
+ ASSERT_EQ(ret, 0);
+
+ ret = mount("/", "/a/b", NULL, MS_BIND, NULL);
+ ASSERT_EQ(ret, 0);
+
+ expect_notify_mask_n(_metadata, self, FAN_MNT_ATTACH, 2, mnts + 1);
+
+ check_mounted(_metadata, mnts, 3);
+
+ ret = chdir("/a");
+ ASSERT_EQ(ret, 0);
+
+ ret = fork();
+ ASSERT_GE(ret, 0);
+
+ if (ret == 0) {
+ chdir("/");
+ unshare(CLONE_NEWNS);
+ mount("", "/", NULL, MS_REC|MS_PRIVATE, NULL);
+ umount2("/a", MNT_DETACH);
+ // This triggers a detach in the other namespace
+ rmdir("/a");
+ exit(0);
+ }
+ wait(NULL);
+
+ expect_notify_mask_n(_metadata, self, FAN_MNT_DETACH, 2, mnts + 1);
+ check_mounted(_metadata, mnts, 1);
+
+ // Cleanup
+ ret = chdir("/");
+ ASSERT_EQ(ret, 0);
+}
+
+TEST_F(fanotify, pivot_root)
+{
+ uint64_t mnts[3] = { self->root_id };
+ uint64_t mnts2[3];
+ int ret;
+
+ ret = mount("tmpfs", "/a", "tmpfs", 0, NULL);
+ ASSERT_EQ(ret, 0);
+
+ mnts[2] = expect_notify_mask(_metadata, self, FAN_MNT_ATTACH);
+
+ ret = mkdir("/a/new", 0700);
+ ASSERT_EQ(ret, 0);
+
+ ret = mkdir("/a/old", 0700);
+ ASSERT_EQ(ret, 0);
+
+ ret = mount("/a", "/a/new", NULL, MS_BIND, NULL);
+ ASSERT_EQ(ret, 0);
+
+ mnts[1] = expect_notify_mask(_metadata, self, FAN_MNT_ATTACH);
+ check_mounted(_metadata, mnts, 3);
+
+ ret = syscall(SYS_pivot_root, "/a/new", "/a/new/old");
+ ASSERT_EQ(ret, 0);
+
+ expect_notify_mask_n(_metadata, self, FAN_MNT_ATTACH | FAN_MNT_DETACH, 2, mnts2);
+ verify_mount_ids(_metadata, mnts, mnts2, 2);
+ check_mounted(_metadata, mnts, 3);
+
+ // Cleanup
+ ret = syscall(SYS_pivot_root, "/old", "/old/a/new");
+ ASSERT_EQ(ret, 0);
+
+ ret = umount("/a/new");
+ ASSERT_EQ(ret, 0);
+
+ ret = umount("/a");
+ ASSERT_EQ(ret, 0);
+
+ check_mounted(_metadata, mnts, 1);
+}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/filesystems/nsfs/iterate_mntns.c b/tools/testing/selftests/filesystems/nsfs/iterate_mntns.c
index 457cf76f3c5f..a3d8015897e9 100644
--- a/tools/testing/selftests/filesystems/nsfs/iterate_mntns.c
+++ b/tools/testing/selftests/filesystems/nsfs/iterate_mntns.c
@@ -3,6 +3,8 @@
#define _GNU_SOURCE
#include <fcntl.h>
+#include <linux/auto_dev-ioctl.h>
+#include <linux/errno.h>
#include <sched.h>
#include <stdio.h>
#include <string.h>
@@ -146,4 +148,16 @@ TEST_F(iterate_mount_namespaces, iterate_backward)
}
}
+TEST_F(iterate_mount_namespaces, nfs_valid_ioctl)
+{
+ ASSERT_NE(ioctl(self->fd_mnt_ns[0], AUTOFS_DEV_IOCTL_OPENMOUNT, NULL), 0);
+ ASSERT_EQ(errno, ENOTTY);
+
+ ASSERT_NE(ioctl(self->fd_mnt_ns[0], AUTOFS_DEV_IOCTL_CLOSEMOUNT, NULL), 0);
+ ASSERT_EQ(errno, ENOTTY);
+
+ ASSERT_NE(ioctl(self->fd_mnt_ns[0], AUTOFS_DEV_IOCTL_READY, NULL), 0);
+ ASSERT_EQ(errno, ENOTTY);
+}
+
TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/filesystems/overlayfs/Makefile b/tools/testing/selftests/filesystems/overlayfs/Makefile
index e8d1adb021af..6c661232b3b5 100644
--- a/tools/testing/selftests/filesystems/overlayfs/Makefile
+++ b/tools/testing/selftests/filesystems/overlayfs/Makefile
@@ -1,7 +1,14 @@
# SPDX-License-Identifier: GPL-2.0
-TEST_GEN_PROGS := dev_in_maps set_layers_via_fds
+CFLAGS += -Wall
+CFLAGS += $(KHDR_INCLUDES)
+LDLIBS += -lcap
-CFLAGS := -Wall -Werror
+LOCAL_HDRS += wrappers.h log.h
+
+TEST_GEN_PROGS := dev_in_maps
+TEST_GEN_PROGS += set_layers_via_fds
include ../../lib.mk
+
+$(OUTPUT)/set_layers_via_fds: ../utils.c
diff --git a/tools/testing/selftests/filesystems/overlayfs/set_layers_via_fds.c b/tools/testing/selftests/filesystems/overlayfs/set_layers_via_fds.c
index 1d0ae785a667..5074e64e74a8 100644
--- a/tools/testing/selftests/filesystems/overlayfs/set_layers_via_fds.c
+++ b/tools/testing/selftests/filesystems/overlayfs/set_layers_via_fds.c
@@ -6,26 +6,40 @@
#include <sched.h>
#include <stdio.h>
#include <string.h>
+#include <sys/socket.h>
#include <sys/stat.h>
+#include <sys/sysmacros.h>
#include <sys/mount.h>
#include <unistd.h>
#include "../../kselftest_harness.h"
+#include "../../pidfd/pidfd.h"
#include "log.h"
+#include "../utils.h"
#include "wrappers.h"
FIXTURE(set_layers_via_fds) {
+ int pidfd;
};
FIXTURE_SETUP(set_layers_via_fds)
{
- ASSERT_EQ(mkdir("/set_layers_via_fds", 0755), 0);
+ self->pidfd = -EBADF;
+ EXPECT_EQ(mkdir("/set_layers_via_fds", 0755), 0);
+ EXPECT_EQ(mkdir("/set_layers_via_fds_tmpfs", 0755), 0);
}
FIXTURE_TEARDOWN(set_layers_via_fds)
{
+ if (self->pidfd >= 0) {
+ EXPECT_EQ(sys_pidfd_send_signal(self->pidfd, SIGKILL, NULL, 0), 0);
+ EXPECT_EQ(close(self->pidfd), 0);
+ }
umount2("/set_layers_via_fds", 0);
- ASSERT_EQ(rmdir("/set_layers_via_fds"), 0);
+ EXPECT_EQ(rmdir("/set_layers_via_fds"), 0);
+
+ umount2("/set_layers_via_fds_tmpfs", 0);
+ EXPECT_EQ(rmdir("/set_layers_via_fds_tmpfs"), 0);
}
TEST_F(set_layers_via_fds, set_layers_via_fds)
@@ -214,4 +228,493 @@ TEST_F(set_layers_via_fds, set_500_layers_via_fds)
ASSERT_EQ(close(fd_overlay), 0);
}
+TEST_F(set_layers_via_fds, set_override_creds)
+{
+ int fd_context, fd_tmpfs, fd_overlay;
+ int layer_fds[] = { [0 ... 3] = -EBADF };
+ pid_t pid;
+ int pidfd;
+
+ ASSERT_EQ(unshare(CLONE_NEWNS), 0);
+ ASSERT_EQ(sys_mount(NULL, "/", NULL, MS_SLAVE | MS_REC, NULL), 0);
+
+ fd_context = sys_fsopen("tmpfs", 0);
+ ASSERT_GE(fd_context, 0);
+
+ ASSERT_EQ(sys_fsconfig(fd_context, FSCONFIG_CMD_CREATE, NULL, NULL, 0), 0);
+ fd_tmpfs = sys_fsmount(fd_context, 0, 0);
+ ASSERT_GE(fd_tmpfs, 0);
+ ASSERT_EQ(close(fd_context), 0);
+
+ ASSERT_EQ(mkdirat(fd_tmpfs, "w", 0755), 0);
+ ASSERT_EQ(mkdirat(fd_tmpfs, "u", 0755), 0);
+ ASSERT_EQ(mkdirat(fd_tmpfs, "l1", 0755), 0);
+ ASSERT_EQ(mkdirat(fd_tmpfs, "l2", 0755), 0);
+
+ layer_fds[0] = openat(fd_tmpfs, "w", O_DIRECTORY);
+ ASSERT_GE(layer_fds[0], 0);
+
+ layer_fds[1] = openat(fd_tmpfs, "u", O_DIRECTORY);
+ ASSERT_GE(layer_fds[1], 0);
+
+ layer_fds[2] = openat(fd_tmpfs, "l1", O_DIRECTORY);
+ ASSERT_GE(layer_fds[2], 0);
+
+ layer_fds[3] = openat(fd_tmpfs, "l2", O_DIRECTORY);
+ ASSERT_GE(layer_fds[3], 0);
+
+ ASSERT_EQ(sys_move_mount(fd_tmpfs, "", -EBADF, "/tmp", MOVE_MOUNT_F_EMPTY_PATH), 0);
+ ASSERT_EQ(close(fd_tmpfs), 0);
+
+ fd_context = sys_fsopen("overlay", 0);
+ ASSERT_GE(fd_context, 0);
+
+ ASSERT_NE(sys_fsconfig(fd_context, FSCONFIG_SET_FD, "lowerdir", NULL, layer_fds[2]), 0);
+
+ ASSERT_EQ(sys_fsconfig(fd_context, FSCONFIG_SET_FD, "workdir", NULL, layer_fds[0]), 0);
+ ASSERT_EQ(sys_fsconfig(fd_context, FSCONFIG_SET_FD, "upperdir", NULL, layer_fds[1]), 0);
+ ASSERT_EQ(sys_fsconfig(fd_context, FSCONFIG_SET_FD, "lowerdir+", NULL, layer_fds[2]), 0);
+ ASSERT_EQ(sys_fsconfig(fd_context, FSCONFIG_SET_FD, "lowerdir+", NULL, layer_fds[3]), 0);
+
+ ASSERT_EQ(sys_fsconfig(fd_context, FSCONFIG_SET_STRING, "metacopy", "on", 0), 0);
+
+ pid = create_child(&pidfd, 0);
+ ASSERT_GE(pid, 0);
+ if (pid == 0) {
+ if (sys_fsconfig(fd_context, FSCONFIG_SET_FLAG, "override_creds", NULL, 0)) {
+ TH_LOG("sys_fsconfig should have succeeded");
+ _exit(EXIT_FAILURE);
+ }
+
+ _exit(EXIT_SUCCESS);
+ }
+ ASSERT_GE(sys_waitid(P_PID, pid, NULL, WEXITED), 0);
+ ASSERT_GE(close(pidfd), 0);
+
+ pid = create_child(&pidfd, 0);
+ ASSERT_GE(pid, 0);
+ if (pid == 0) {
+ if (sys_fsconfig(fd_context, FSCONFIG_SET_FLAG, "nooverride_creds", NULL, 0)) {
+ TH_LOG("sys_fsconfig should have succeeded");
+ _exit(EXIT_FAILURE);
+ }
+
+ _exit(EXIT_SUCCESS);
+ }
+ ASSERT_GE(sys_waitid(P_PID, pid, NULL, WEXITED), 0);
+ ASSERT_GE(close(pidfd), 0);
+
+ pid = create_child(&pidfd, 0);
+ ASSERT_GE(pid, 0);
+ if (pid == 0) {
+ if (sys_fsconfig(fd_context, FSCONFIG_SET_FLAG, "override_creds", NULL, 0)) {
+ TH_LOG("sys_fsconfig should have succeeded");
+ _exit(EXIT_FAILURE);
+ }
+
+ _exit(EXIT_SUCCESS);
+ }
+ ASSERT_GE(sys_waitid(P_PID, pid, NULL, WEXITED), 0);
+ ASSERT_GE(close(pidfd), 0);
+
+ ASSERT_EQ(sys_fsconfig(fd_context, FSCONFIG_CMD_CREATE, NULL, NULL, 0), 0);
+
+ fd_overlay = sys_fsmount(fd_context, 0, 0);
+ ASSERT_GE(fd_overlay, 0);
+
+ ASSERT_EQ(sys_move_mount(fd_overlay, "", -EBADF, "/set_layers_via_fds", MOVE_MOUNT_F_EMPTY_PATH), 0);
+
+ ASSERT_EQ(close(fd_context), 0);
+ ASSERT_EQ(close(fd_overlay), 0);
+}
+
+TEST_F(set_layers_via_fds, set_override_creds_invalid)
+{
+ int fd_context, fd_tmpfs, fd_overlay, ret;
+ int layer_fds[] = { [0 ... 3] = -EBADF };
+ pid_t pid;
+ int fd_userns1, fd_userns2;
+ int ipc_sockets[2];
+ char c;
+ const unsigned int predictable_fd_context_nr = 123;
+
+ fd_userns1 = get_userns_fd(0, 0, 10000);
+ ASSERT_GE(fd_userns1, 0);
+
+ fd_userns2 = get_userns_fd(0, 1234, 10000);
+ ASSERT_GE(fd_userns2, 0);
+
+ ret = socketpair(AF_LOCAL, SOCK_STREAM | SOCK_CLOEXEC, 0, ipc_sockets);
+ ASSERT_GE(ret, 0);
+
+ pid = create_child(&self->pidfd, 0);
+ ASSERT_GE(pid, 0);
+ if (pid == 0) {
+ if (close(ipc_sockets[0])) {
+ TH_LOG("close should have succeeded");
+ _exit(EXIT_FAILURE);
+ }
+
+ if (!switch_userns(fd_userns2, 0, 0, false)) {
+ TH_LOG("switch_userns should have succeeded");
+ _exit(EXIT_FAILURE);
+ }
+
+ if (read_nointr(ipc_sockets[1], &c, 1) != 1) {
+ TH_LOG("read_nointr should have succeeded");
+ _exit(EXIT_FAILURE);
+ }
+
+ if (close(ipc_sockets[1])) {
+ TH_LOG("close should have succeeded");
+ _exit(EXIT_FAILURE);
+ }
+
+ if (!sys_fsconfig(predictable_fd_context_nr, FSCONFIG_SET_FLAG, "override_creds", NULL, 0)) {
+ TH_LOG("sys_fsconfig should have failed");
+ _exit(EXIT_FAILURE);
+ }
+
+ _exit(EXIT_SUCCESS);
+ }
+
+ ASSERT_EQ(close(ipc_sockets[1]), 0);
+ ASSERT_EQ(switch_userns(fd_userns1, 0, 0, false), true);
+ ASSERT_EQ(unshare(CLONE_NEWNS), 0);
+ ASSERT_EQ(sys_mount(NULL, "/", NULL, MS_SLAVE | MS_REC, NULL), 0);
+
+ fd_context = sys_fsopen("tmpfs", 0);
+ ASSERT_GE(fd_context, 0);
+
+ ASSERT_EQ(sys_fsconfig(fd_context, FSCONFIG_CMD_CREATE, NULL, NULL, 0), 0);
+ fd_tmpfs = sys_fsmount(fd_context, 0, 0);
+ ASSERT_GE(fd_tmpfs, 0);
+ ASSERT_EQ(close(fd_context), 0);
+
+ ASSERT_EQ(mkdirat(fd_tmpfs, "w", 0755), 0);
+ ASSERT_EQ(mkdirat(fd_tmpfs, "u", 0755), 0);
+ ASSERT_EQ(mkdirat(fd_tmpfs, "l1", 0755), 0);
+ ASSERT_EQ(mkdirat(fd_tmpfs, "l2", 0755), 0);
+
+ layer_fds[0] = openat(fd_tmpfs, "w", O_DIRECTORY);
+ ASSERT_GE(layer_fds[0], 0);
+
+ layer_fds[1] = openat(fd_tmpfs, "u", O_DIRECTORY);
+ ASSERT_GE(layer_fds[1], 0);
+
+ layer_fds[2] = openat(fd_tmpfs, "l1", O_DIRECTORY);
+ ASSERT_GE(layer_fds[2], 0);
+
+ layer_fds[3] = openat(fd_tmpfs, "l2", O_DIRECTORY);
+ ASSERT_GE(layer_fds[3], 0);
+
+ ASSERT_EQ(sys_move_mount(fd_tmpfs, "", -EBADF, "/tmp", MOVE_MOUNT_F_EMPTY_PATH), 0);
+ ASSERT_EQ(close(fd_tmpfs), 0);
+
+ fd_context = sys_fsopen("overlay", 0);
+ ASSERT_GE(fd_context, 0);
+ ASSERT_EQ(dup3(fd_context, predictable_fd_context_nr, 0), predictable_fd_context_nr);
+ ASSERT_EQ(close(fd_context), 0);
+ fd_context = predictable_fd_context_nr;
+ ASSERT_EQ(write_nointr(ipc_sockets[0], "1", 1), 1);
+ ASSERT_EQ(close(ipc_sockets[0]), 0);
+
+ ASSERT_EQ(wait_for_pid(pid), 0);
+ ASSERT_EQ(close(self->pidfd), 0);
+ self->pidfd = -EBADF;
+
+ ASSERT_NE(sys_fsconfig(fd_context, FSCONFIG_SET_FD, "lowerdir", NULL, layer_fds[2]), 0);
+ ASSERT_EQ(sys_fsconfig(fd_context, FSCONFIG_SET_FD, "workdir", NULL, layer_fds[0]), 0);
+ ASSERT_EQ(sys_fsconfig(fd_context, FSCONFIG_SET_FD, "upperdir", NULL, layer_fds[1]), 0);
+ ASSERT_EQ(sys_fsconfig(fd_context, FSCONFIG_SET_FD, "lowerdir+", NULL, layer_fds[2]), 0);
+ ASSERT_EQ(sys_fsconfig(fd_context, FSCONFIG_SET_FD, "lowerdir+", NULL, layer_fds[3]), 0);
+
+ for (int i = 0; i < ARRAY_SIZE(layer_fds); i++)
+ ASSERT_EQ(close(layer_fds[i]), 0);
+
+ ASSERT_EQ(sys_fsconfig(fd_context, FSCONFIG_SET_FLAG, "userxattr", NULL, 0), 0);
+
+ ASSERT_EQ(sys_fsconfig(fd_context, FSCONFIG_CMD_CREATE, NULL, NULL, 0), 0);
+
+ fd_overlay = sys_fsmount(fd_context, 0, 0);
+ ASSERT_GE(fd_overlay, 0);
+
+ ASSERT_EQ(sys_move_mount(fd_overlay, "", -EBADF, "/set_layers_via_fds", MOVE_MOUNT_F_EMPTY_PATH), 0);
+
+ ASSERT_EQ(close(fd_context), 0);
+ ASSERT_EQ(close(fd_overlay), 0);
+ ASSERT_EQ(close(fd_userns1), 0);
+ ASSERT_EQ(close(fd_userns2), 0);
+}
+
+TEST_F(set_layers_via_fds, set_override_creds_nomknod)
+{
+ int fd_context, fd_tmpfs, fd_overlay;
+ int layer_fds[] = { [0 ... 3] = -EBADF };
+ pid_t pid;
+ int pidfd;
+
+ ASSERT_EQ(unshare(CLONE_NEWNS), 0);
+ ASSERT_EQ(sys_mount(NULL, "/", NULL, MS_SLAVE | MS_REC, NULL), 0);
+
+ fd_context = sys_fsopen("tmpfs", 0);
+ ASSERT_GE(fd_context, 0);
+
+ ASSERT_EQ(sys_fsconfig(fd_context, FSCONFIG_CMD_CREATE, NULL, NULL, 0), 0);
+ fd_tmpfs = sys_fsmount(fd_context, 0, 0);
+ ASSERT_GE(fd_tmpfs, 0);
+ ASSERT_EQ(close(fd_context), 0);
+
+ ASSERT_EQ(mkdirat(fd_tmpfs, "w", 0755), 0);
+ ASSERT_EQ(mkdirat(fd_tmpfs, "u", 0755), 0);
+ ASSERT_EQ(mkdirat(fd_tmpfs, "l1", 0755), 0);
+ ASSERT_EQ(mkdirat(fd_tmpfs, "l2", 0755), 0);
+
+ layer_fds[0] = openat(fd_tmpfs, "w", O_DIRECTORY);
+ ASSERT_GE(layer_fds[0], 0);
+
+ layer_fds[1] = openat(fd_tmpfs, "u", O_DIRECTORY);
+ ASSERT_GE(layer_fds[1], 0);
+
+ layer_fds[2] = openat(fd_tmpfs, "l1", O_DIRECTORY);
+ ASSERT_GE(layer_fds[2], 0);
+
+ layer_fds[3] = openat(fd_tmpfs, "l2", O_DIRECTORY);
+ ASSERT_GE(layer_fds[3], 0);
+
+ ASSERT_EQ(sys_move_mount(fd_tmpfs, "", -EBADF, "/tmp", MOVE_MOUNT_F_EMPTY_PATH), 0);
+ ASSERT_EQ(close(fd_tmpfs), 0);
+
+ fd_context = sys_fsopen("overlay", 0);
+ ASSERT_GE(fd_context, 0);
+
+ ASSERT_NE(sys_fsconfig(fd_context, FSCONFIG_SET_FD, "lowerdir", NULL, layer_fds[2]), 0);
+
+ ASSERT_EQ(sys_fsconfig(fd_context, FSCONFIG_SET_FD, "workdir", NULL, layer_fds[0]), 0);
+ ASSERT_EQ(sys_fsconfig(fd_context, FSCONFIG_SET_FD, "upperdir", NULL, layer_fds[1]), 0);
+ ASSERT_EQ(sys_fsconfig(fd_context, FSCONFIG_SET_FD, "lowerdir+", NULL, layer_fds[2]), 0);
+ ASSERT_EQ(sys_fsconfig(fd_context, FSCONFIG_SET_FD, "lowerdir+", NULL, layer_fds[3]), 0);
+ ASSERT_EQ(sys_fsconfig(fd_context, FSCONFIG_SET_FLAG, "userxattr", NULL, 0), 0);
+
+ pid = create_child(&pidfd, 0);
+ ASSERT_GE(pid, 0);
+ if (pid == 0) {
+ if (!cap_down(CAP_MKNOD))
+ _exit(EXIT_FAILURE);
+
+ if (!cap_down(CAP_SYS_ADMIN))
+ _exit(EXIT_FAILURE);
+
+ if (sys_fsconfig(fd_context, FSCONFIG_SET_FLAG, "override_creds", NULL, 0))
+ _exit(EXIT_FAILURE);
+
+ _exit(EXIT_SUCCESS);
+ }
+ ASSERT_EQ(sys_waitid(P_PID, pid, NULL, WEXITED), 0);
+ ASSERT_GE(close(pidfd), 0);
+
+ ASSERT_EQ(sys_fsconfig(fd_context, FSCONFIG_CMD_CREATE, NULL, NULL, 0), 0);
+
+ fd_overlay = sys_fsmount(fd_context, 0, 0);
+ ASSERT_GE(fd_overlay, 0);
+
+ ASSERT_EQ(sys_move_mount(fd_overlay, "", -EBADF, "/set_layers_via_fds", MOVE_MOUNT_F_EMPTY_PATH), 0);
+ ASSERT_EQ(mknodat(fd_overlay, "dev-zero", S_IFCHR | 0644, makedev(1, 5)), -1);
+ ASSERT_EQ(errno, EPERM);
+
+ ASSERT_EQ(close(fd_context), 0);
+ ASSERT_EQ(close(fd_overlay), 0);
+}
+
+TEST_F(set_layers_via_fds, set_500_layers_via_opath_fds)
+{
+ int fd_context, fd_tmpfs, fd_overlay, fd_work, fd_upper, fd_lower;
+ int layer_fds[500] = { [0 ... 499] = -EBADF };
+
+ ASSERT_EQ(unshare(CLONE_NEWNS), 0);
+ ASSERT_EQ(sys_mount(NULL, "/", NULL, MS_SLAVE | MS_REC, NULL), 0);
+
+ fd_context = sys_fsopen("tmpfs", 0);
+ ASSERT_GE(fd_context, 0);
+
+ ASSERT_EQ(sys_fsconfig(fd_context, FSCONFIG_CMD_CREATE, NULL, NULL, 0), 0);
+ fd_tmpfs = sys_fsmount(fd_context, 0, 0);
+ ASSERT_GE(fd_tmpfs, 0);
+ ASSERT_EQ(close(fd_context), 0);
+
+ for (int i = 0; i < ARRAY_SIZE(layer_fds); i++) {
+ char path[100];
+
+ sprintf(path, "l%d", i);
+ ASSERT_EQ(mkdirat(fd_tmpfs, path, 0755), 0);
+ layer_fds[i] = openat(fd_tmpfs, path, O_DIRECTORY | O_PATH);
+ ASSERT_GE(layer_fds[i], 0);
+ }
+
+ ASSERT_EQ(mkdirat(fd_tmpfs, "w", 0755), 0);
+ fd_work = openat(fd_tmpfs, "w", O_DIRECTORY | O_PATH);
+ ASSERT_GE(fd_work, 0);
+
+ ASSERT_EQ(mkdirat(fd_tmpfs, "u", 0755), 0);
+ fd_upper = openat(fd_tmpfs, "u", O_DIRECTORY | O_PATH);
+ ASSERT_GE(fd_upper, 0);
+
+ ASSERT_EQ(mkdirat(fd_tmpfs, "l501", 0755), 0);
+ fd_lower = openat(fd_tmpfs, "l501", O_DIRECTORY | O_PATH);
+ ASSERT_GE(fd_lower, 0);
+
+ ASSERT_EQ(sys_move_mount(fd_tmpfs, "", -EBADF, "/tmp", MOVE_MOUNT_F_EMPTY_PATH), 0);
+ ASSERT_EQ(close(fd_tmpfs), 0);
+
+ fd_context = sys_fsopen("overlay", 0);
+ ASSERT_GE(fd_context, 0);
+
+ ASSERT_EQ(sys_fsconfig(fd_context, FSCONFIG_SET_FD, "workdir", NULL, fd_work), 0);
+ ASSERT_EQ(close(fd_work), 0);
+
+ ASSERT_EQ(sys_fsconfig(fd_context, FSCONFIG_SET_FD, "upperdir", NULL, fd_upper), 0);
+ ASSERT_EQ(close(fd_upper), 0);
+
+ for (int i = 0; i < ARRAY_SIZE(layer_fds); i++) {
+ ASSERT_EQ(sys_fsconfig(fd_context, FSCONFIG_SET_FD, "lowerdir+", NULL, layer_fds[i]), 0);
+ ASSERT_EQ(close(layer_fds[i]), 0);
+ }
+
+ ASSERT_NE(sys_fsconfig(fd_context, FSCONFIG_SET_FD, "lowerdir+", NULL, fd_lower), 0);
+ ASSERT_EQ(close(fd_lower), 0);
+
+ ASSERT_EQ(sys_fsconfig(fd_context, FSCONFIG_CMD_CREATE, NULL, NULL, 0), 0);
+
+ fd_overlay = sys_fsmount(fd_context, 0, 0);
+ ASSERT_GE(fd_overlay, 0);
+ ASSERT_EQ(close(fd_context), 0);
+ ASSERT_EQ(close(fd_overlay), 0);
+}
+
+TEST_F(set_layers_via_fds, set_layers_via_detached_mount_fds)
+{
+ int fd_context, fd_tmpfs, fd_overlay, fd_tmp;
+ int layer_fds[] = { [0 ... 8] = -EBADF };
+ bool layers_found[] = { [0 ... 8] = false };
+ size_t len = 0;
+ char *line = NULL;
+ FILE *f_mountinfo;
+
+ ASSERT_EQ(unshare(CLONE_NEWNS), 0);
+ ASSERT_EQ(sys_mount(NULL, "/", NULL, MS_SLAVE | MS_REC, NULL), 0);
+
+ fd_context = sys_fsopen("tmpfs", 0);
+ ASSERT_GE(fd_context, 0);
+
+ ASSERT_EQ(sys_fsconfig(fd_context, FSCONFIG_CMD_CREATE, NULL, NULL, 0), 0);
+ fd_tmpfs = sys_fsmount(fd_context, 0, 0);
+ ASSERT_GE(fd_tmpfs, 0);
+ ASSERT_EQ(close(fd_context), 0);
+
+ ASSERT_EQ(mkdirat(fd_tmpfs, "u", 0755), 0);
+ ASSERT_EQ(mkdirat(fd_tmpfs, "u/upper", 0755), 0);
+ ASSERT_EQ(mkdirat(fd_tmpfs, "u/work", 0755), 0);
+ ASSERT_EQ(mkdirat(fd_tmpfs, "l1", 0755), 0);
+ ASSERT_EQ(mkdirat(fd_tmpfs, "l2", 0755), 0);
+ ASSERT_EQ(mkdirat(fd_tmpfs, "l3", 0755), 0);
+ ASSERT_EQ(mkdirat(fd_tmpfs, "l4", 0755), 0);
+ ASSERT_EQ(mkdirat(fd_tmpfs, "d1", 0755), 0);
+ ASSERT_EQ(mkdirat(fd_tmpfs, "d2", 0755), 0);
+ ASSERT_EQ(mkdirat(fd_tmpfs, "d3", 0755), 0);
+
+ ASSERT_EQ(sys_move_mount(fd_tmpfs, "", -EBADF, "/set_layers_via_fds_tmpfs", MOVE_MOUNT_F_EMPTY_PATH), 0);
+
+ fd_tmp = open_tree(fd_tmpfs, "u", OPEN_TREE_CLONE | OPEN_TREE_CLOEXEC);
+ ASSERT_GE(fd_tmp, 0);
+
+ layer_fds[0] = openat(fd_tmp, "upper", O_CLOEXEC | O_DIRECTORY | O_PATH);
+ ASSERT_GE(layer_fds[0], 0);
+
+ layer_fds[1] = openat(fd_tmp, "work", O_CLOEXEC | O_DIRECTORY | O_PATH);
+ ASSERT_GE(layer_fds[1], 0);
+
+ layer_fds[2] = open_tree(fd_tmpfs, "l1", OPEN_TREE_CLONE | OPEN_TREE_CLOEXEC);
+ ASSERT_GE(layer_fds[2], 0);
+
+ layer_fds[3] = open_tree(fd_tmpfs, "l2", OPEN_TREE_CLONE | OPEN_TREE_CLOEXEC);
+ ASSERT_GE(layer_fds[3], 0);
+
+ layer_fds[4] = open_tree(fd_tmpfs, "l3", OPEN_TREE_CLONE | OPEN_TREE_CLOEXEC);
+ ASSERT_GE(layer_fds[4], 0);
+
+ layer_fds[5] = open_tree(fd_tmpfs, "l4", OPEN_TREE_CLONE | OPEN_TREE_CLOEXEC);
+ ASSERT_GE(layer_fds[5], 0);
+
+ layer_fds[6] = open_tree(fd_tmpfs, "d1", OPEN_TREE_CLONE | OPEN_TREE_CLOEXEC);
+ ASSERT_GE(layer_fds[6], 0);
+
+ layer_fds[7] = open_tree(fd_tmpfs, "d2", OPEN_TREE_CLONE | OPEN_TREE_CLOEXEC);
+ ASSERT_GE(layer_fds[7], 0);
+
+ layer_fds[8] = open_tree(fd_tmpfs, "d3", OPEN_TREE_CLONE | OPEN_TREE_CLOEXEC);
+ ASSERT_GE(layer_fds[8], 0);
+
+ ASSERT_EQ(close(fd_tmpfs), 0);
+
+ fd_context = sys_fsopen("overlay", 0);
+ ASSERT_GE(fd_context, 0);
+
+ ASSERT_NE(sys_fsconfig(fd_context, FSCONFIG_SET_FD, "lowerdir", NULL, layer_fds[2]), 0);
+
+ ASSERT_EQ(sys_fsconfig(fd_context, FSCONFIG_SET_FD, "upperdir", NULL, layer_fds[0]), 0);
+ ASSERT_EQ(sys_fsconfig(fd_context, FSCONFIG_SET_FD, "workdir", NULL, layer_fds[1]), 0);
+ ASSERT_EQ(sys_fsconfig(fd_context, FSCONFIG_SET_FD, "lowerdir+", NULL, layer_fds[2]), 0);
+ ASSERT_EQ(sys_fsconfig(fd_context, FSCONFIG_SET_FD, "lowerdir+", NULL, layer_fds[3]), 0);
+ ASSERT_EQ(sys_fsconfig(fd_context, FSCONFIG_SET_FD, "lowerdir+", NULL, layer_fds[4]), 0);
+ ASSERT_EQ(sys_fsconfig(fd_context, FSCONFIG_SET_FD, "lowerdir+", NULL, layer_fds[5]), 0);
+ ASSERT_EQ(sys_fsconfig(fd_context, FSCONFIG_SET_FD, "datadir+", NULL, layer_fds[6]), 0);
+ ASSERT_EQ(sys_fsconfig(fd_context, FSCONFIG_SET_FD, "datadir+", NULL, layer_fds[7]), 0);
+ ASSERT_EQ(sys_fsconfig(fd_context, FSCONFIG_SET_FD, "datadir+", NULL, layer_fds[8]), 0);
+
+ ASSERT_EQ(sys_fsconfig(fd_context, FSCONFIG_SET_STRING, "metacopy", "on", 0), 0);
+
+ ASSERT_EQ(sys_fsconfig(fd_context, FSCONFIG_CMD_CREATE, NULL, NULL, 0), 0);
+
+ fd_overlay = sys_fsmount(fd_context, 0, 0);
+ ASSERT_GE(fd_overlay, 0);
+
+ ASSERT_EQ(sys_move_mount(fd_overlay, "", -EBADF, "/set_layers_via_fds", MOVE_MOUNT_F_EMPTY_PATH), 0);
+
+ f_mountinfo = fopen("/proc/self/mountinfo", "r");
+ ASSERT_NE(f_mountinfo, NULL);
+
+ while (getline(&line, &len, f_mountinfo) != -1) {
+ char *haystack = line;
+
+ if (strstr(haystack, "workdir=/tmp/w"))
+ layers_found[0] = true;
+ if (strstr(haystack, "upperdir=/tmp/u"))
+ layers_found[1] = true;
+ if (strstr(haystack, "lowerdir+=/tmp/l1"))
+ layers_found[2] = true;
+ if (strstr(haystack, "lowerdir+=/tmp/l2"))
+ layers_found[3] = true;
+ if (strstr(haystack, "lowerdir+=/tmp/l3"))
+ layers_found[4] = true;
+ if (strstr(haystack, "lowerdir+=/tmp/l4"))
+ layers_found[5] = true;
+ if (strstr(haystack, "datadir+=/tmp/d1"))
+ layers_found[6] = true;
+ if (strstr(haystack, "datadir+=/tmp/d2"))
+ layers_found[7] = true;
+ if (strstr(haystack, "datadir+=/tmp/d3"))
+ layers_found[8] = true;
+ }
+ free(line);
+
+ for (int i = 0; i < ARRAY_SIZE(layer_fds); i++) {
+ ASSERT_EQ(layers_found[i], true);
+ ASSERT_EQ(close(layer_fds[i]), 0);
+ }
+
+ ASSERT_EQ(close(fd_context), 0);
+ ASSERT_EQ(close(fd_overlay), 0);
+ ASSERT_EQ(fclose(f_mountinfo), 0);
+}
+
TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/filesystems/overlayfs/wrappers.h b/tools/testing/selftests/filesystems/overlayfs/wrappers.h
index 071b95fd2ac0..c38bc48e0cfa 100644
--- a/tools/testing/selftests/filesystems/overlayfs/wrappers.h
+++ b/tools/testing/selftests/filesystems/overlayfs/wrappers.h
@@ -44,4 +44,21 @@ static inline int sys_move_mount(int from_dfd, const char *from_pathname,
to_pathname, flags);
}
+#ifndef OPEN_TREE_CLONE
+#define OPEN_TREE_CLONE 1
+#endif
+
+#ifndef OPEN_TREE_CLOEXEC
+#define OPEN_TREE_CLOEXEC O_CLOEXEC
+#endif
+
+#ifndef AT_RECURSIVE
+#define AT_RECURSIVE 0x8000
+#endif
+
+static inline int sys_open_tree(int dfd, const char *filename, unsigned int flags)
+{
+ return syscall(__NR_open_tree, dfd, filename, flags);
+}
+
#endif
diff --git a/tools/testing/selftests/filesystems/statmount/statmount.h b/tools/testing/selftests/filesystems/statmount/statmount.h
index f4294bab9d73..a7a5289ddae9 100644
--- a/tools/testing/selftests/filesystems/statmount/statmount.h
+++ b/tools/testing/selftests/filesystems/statmount/statmount.h
@@ -25,7 +25,7 @@ static inline int statmount(uint64_t mnt_id, uint64_t mnt_ns_id, uint64_t mask,
return syscall(__NR_statmount, &req, buf, bufsize, flags);
}
-static ssize_t listmount(uint64_t mnt_id, uint64_t mnt_ns_id,
+static inline ssize_t listmount(uint64_t mnt_id, uint64_t mnt_ns_id,
uint64_t last_mnt_id, uint64_t list[], size_t num,
unsigned int flags)
{
diff --git a/tools/testing/selftests/filesystems/utils.c b/tools/testing/selftests/filesystems/utils.c
new file mode 100644
index 000000000000..e553c89c5b19
--- /dev/null
+++ b/tools/testing/selftests/filesystems/utils.c
@@ -0,0 +1,501 @@
+// SPDX-License-Identifier: GPL-2.0
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE
+#endif
+#include <fcntl.h>
+#include <sys/types.h>
+#include <dirent.h>
+#include <grp.h>
+#include <linux/limits.h>
+#include <sched.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/eventfd.h>
+#include <sys/fsuid.h>
+#include <sys/prctl.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <sys/xattr.h>
+
+#include "utils.h"
+
+#define MAX_USERNS_LEVEL 32
+
+#define syserror(format, ...) \
+ ({ \
+ fprintf(stderr, "%m - " format "\n", ##__VA_ARGS__); \
+ (-errno); \
+ })
+
+#define syserror_set(__ret__, format, ...) \
+ ({ \
+ typeof(__ret__) __internal_ret__ = (__ret__); \
+ errno = labs(__ret__); \
+ fprintf(stderr, "%m - " format "\n", ##__VA_ARGS__); \
+ __internal_ret__; \
+ })
+
+#define STRLITERALLEN(x) (sizeof(""x"") - 1)
+
+#define INTTYPE_TO_STRLEN(type) \
+ (2 + (sizeof(type) <= 1 \
+ ? 3 \
+ : sizeof(type) <= 2 \
+ ? 5 \
+ : sizeof(type) <= 4 \
+ ? 10 \
+ : sizeof(type) <= 8 ? 20 : sizeof(int[-2 * (sizeof(type) > 8)])))
+
+#define list_for_each(__iterator, __list) \
+ for (__iterator = (__list)->next; __iterator != __list; __iterator = __iterator->next)
+
+typedef enum idmap_type_t {
+ ID_TYPE_UID,
+ ID_TYPE_GID
+} idmap_type_t;
+
+struct id_map {
+ idmap_type_t map_type;
+ __u32 nsid;
+ __u32 hostid;
+ __u32 range;
+};
+
+struct list {
+ void *elem;
+ struct list *next;
+ struct list *prev;
+};
+
+struct userns_hierarchy {
+ int fd_userns;
+ int fd_event;
+ unsigned int level;
+ struct list id_map;
+};
+
+static inline void list_init(struct list *list)
+{
+ list->elem = NULL;
+ list->next = list->prev = list;
+}
+
+static inline int list_empty(const struct list *list)
+{
+ return list == list->next;
+}
+
+static inline void __list_add(struct list *new, struct list *prev, struct list *next)
+{
+ next->prev = new;
+ new->next = next;
+ new->prev = prev;
+ prev->next = new;
+}
+
+static inline void list_add_tail(struct list *head, struct list *list)
+{
+ __list_add(list, head->prev, head);
+}
+
+static inline void list_del(struct list *list)
+{
+ struct list *next, *prev;
+
+ next = list->next;
+ prev = list->prev;
+ next->prev = prev;
+ prev->next = next;
+}
+
+static ssize_t read_nointr(int fd, void *buf, size_t count)
+{
+ ssize_t ret;
+
+ do {
+ ret = read(fd, buf, count);
+ } while (ret < 0 && errno == EINTR);
+
+ return ret;
+}
+
+static ssize_t write_nointr(int fd, const void *buf, size_t count)
+{
+ ssize_t ret;
+
+ do {
+ ret = write(fd, buf, count);
+ } while (ret < 0 && errno == EINTR);
+
+ return ret;
+}
+
+#define __STACK_SIZE (8 * 1024 * 1024)
+static pid_t do_clone(int (*fn)(void *), void *arg, int flags)
+{
+ void *stack;
+
+ stack = malloc(__STACK_SIZE);
+ if (!stack)
+ return -ENOMEM;
+
+#ifdef __ia64__
+ return __clone2(fn, stack, __STACK_SIZE, flags | SIGCHLD, arg, NULL);
+#else
+ return clone(fn, stack + __STACK_SIZE, flags | SIGCHLD, arg, NULL);
+#endif
+}
+
+static int get_userns_fd_cb(void *data)
+{
+ for (;;)
+ pause();
+ _exit(0);
+}
+
+static int wait_for_pid(pid_t pid)
+{
+ int status, ret;
+
+again:
+ ret = waitpid(pid, &status, 0);
+ if (ret == -1) {
+ if (errno == EINTR)
+ goto again;
+
+ return -1;
+ }
+
+ if (!WIFEXITED(status))
+ return -1;
+
+ return WEXITSTATUS(status);
+}
+
+static int write_id_mapping(idmap_type_t map_type, pid_t pid, const char *buf, size_t buf_size)
+{
+ int fd = -EBADF, setgroups_fd = -EBADF;
+ int fret = -1;
+ int ret;
+ char path[STRLITERALLEN("/proc/") + INTTYPE_TO_STRLEN(pid_t) +
+ STRLITERALLEN("/setgroups") + 1];
+
+ if (geteuid() != 0 && map_type == ID_TYPE_GID) {
+ ret = snprintf(path, sizeof(path), "/proc/%d/setgroups", pid);
+ if (ret < 0 || ret >= sizeof(path))
+ goto out;
+
+ setgroups_fd = open(path, O_WRONLY | O_CLOEXEC);
+ if (setgroups_fd < 0 && errno != ENOENT) {
+ syserror("Failed to open \"%s\"", path);
+ goto out;
+ }
+
+ if (setgroups_fd >= 0) {
+ ret = write_nointr(setgroups_fd, "deny\n", STRLITERALLEN("deny\n"));
+ if (ret != STRLITERALLEN("deny\n")) {
+ syserror("Failed to write \"deny\" to \"/proc/%d/setgroups\"", pid);
+ goto out;
+ }
+ }
+ }
+
+ ret = snprintf(path, sizeof(path), "/proc/%d/%cid_map", pid, map_type == ID_TYPE_UID ? 'u' : 'g');
+ if (ret < 0 || ret >= sizeof(path))
+ goto out;
+
+ fd = open(path, O_WRONLY | O_CLOEXEC);
+ if (fd < 0) {
+ syserror("Failed to open \"%s\"", path);
+ goto out;
+ }
+
+ ret = write_nointr(fd, buf, buf_size);
+ if (ret != buf_size) {
+ syserror("Failed to write %cid mapping to \"%s\"",
+ map_type == ID_TYPE_UID ? 'u' : 'g', path);
+ goto out;
+ }
+
+ fret = 0;
+out:
+ close(fd);
+ close(setgroups_fd);
+
+ return fret;
+}
+
+static int map_ids_from_idmap(struct list *idmap, pid_t pid)
+{
+ int fill, left;
+ char mapbuf[4096] = {};
+ bool had_entry = false;
+ idmap_type_t map_type, u_or_g;
+
+ if (list_empty(idmap))
+ return 0;
+
+ for (map_type = ID_TYPE_UID, u_or_g = 'u';
+ map_type <= ID_TYPE_GID; map_type++, u_or_g = 'g') {
+ char *pos = mapbuf;
+ int ret;
+ struct list *iterator;
+
+
+ list_for_each(iterator, idmap) {
+ struct id_map *map = iterator->elem;
+ if (map->map_type != map_type)
+ continue;
+
+ had_entry = true;
+
+ left = 4096 - (pos - mapbuf);
+ fill = snprintf(pos, left, "%u %u %u\n", map->nsid, map->hostid, map->range);
+ /*
+ * The kernel only takes <= 4k for writes to
+ * /proc/<pid>/{g,u}id_map
+ */
+ if (fill <= 0 || fill >= left)
+ return syserror_set(-E2BIG, "Too many %cid mappings defined", u_or_g);
+
+ pos += fill;
+ }
+ if (!had_entry)
+ continue;
+
+ ret = write_id_mapping(map_type, pid, mapbuf, pos - mapbuf);
+ if (ret < 0)
+ return syserror("Failed to write mapping: %s", mapbuf);
+
+ memset(mapbuf, 0, sizeof(mapbuf));
+ }
+
+ return 0;
+}
+
+static int get_userns_fd_from_idmap(struct list *idmap)
+{
+ int ret;
+ pid_t pid;
+ char path_ns[STRLITERALLEN("/proc/") + INTTYPE_TO_STRLEN(pid_t) +
+ STRLITERALLEN("/ns/user") + 1];
+
+ pid = do_clone(get_userns_fd_cb, NULL, CLONE_NEWUSER | CLONE_NEWNS);
+ if (pid < 0)
+ return -errno;
+
+ ret = map_ids_from_idmap(idmap, pid);
+ if (ret < 0)
+ return ret;
+
+ ret = snprintf(path_ns, sizeof(path_ns), "/proc/%d/ns/user", pid);
+ if (ret < 0 || (size_t)ret >= sizeof(path_ns))
+ ret = -EIO;
+ else
+ ret = open(path_ns, O_RDONLY | O_CLOEXEC | O_NOCTTY);
+
+ (void)kill(pid, SIGKILL);
+ (void)wait_for_pid(pid);
+ return ret;
+}
+
+int get_userns_fd(unsigned long nsid, unsigned long hostid, unsigned long range)
+{
+ struct list head, uid_mapl, gid_mapl;
+ struct id_map uid_map = {
+ .map_type = ID_TYPE_UID,
+ .nsid = nsid,
+ .hostid = hostid,
+ .range = range,
+ };
+ struct id_map gid_map = {
+ .map_type = ID_TYPE_GID,
+ .nsid = nsid,
+ .hostid = hostid,
+ .range = range,
+ };
+
+ list_init(&head);
+ uid_mapl.elem = &uid_map;
+ gid_mapl.elem = &gid_map;
+ list_add_tail(&head, &uid_mapl);
+ list_add_tail(&head, &gid_mapl);
+
+ return get_userns_fd_from_idmap(&head);
+}
+
+bool switch_ids(uid_t uid, gid_t gid)
+{
+ if (setgroups(0, NULL))
+ return syserror("failure: setgroups");
+
+ if (setresgid(gid, gid, gid))
+ return syserror("failure: setresgid");
+
+ if (setresuid(uid, uid, uid))
+ return syserror("failure: setresuid");
+
+ /* Ensure we can access proc files from processes we can ptrace. */
+ if (prctl(PR_SET_DUMPABLE, 1, 0, 0, 0))
+ return syserror("failure: make dumpable");
+
+ return true;
+}
+
+static int create_userns_hierarchy(struct userns_hierarchy *h);
+
+static int userns_fd_cb(void *data)
+{
+ struct userns_hierarchy *h = data;
+ char c;
+ int ret;
+
+ ret = read_nointr(h->fd_event, &c, 1);
+ if (ret < 0)
+ return syserror("failure: read from socketpair");
+
+ /* Only switch ids if someone actually wrote a mapping for us. */
+ if (c == '1') {
+ if (!switch_ids(0, 0))
+ return syserror("failure: switch ids to 0");
+ }
+
+ ret = write_nointr(h->fd_event, "1", 1);
+ if (ret < 0)
+ return syserror("failure: write to socketpair");
+
+ ret = create_userns_hierarchy(++h);
+ if (ret < 0)
+ return syserror("failure: userns level %d", h->level);
+
+ return 0;
+}
+
+static int create_userns_hierarchy(struct userns_hierarchy *h)
+{
+ int fret = -1;
+ char c;
+ int fd_socket[2];
+ int fd_userns = -EBADF, ret = -1;
+ ssize_t bytes;
+ pid_t pid;
+ char path[256];
+
+ if (h->level == MAX_USERNS_LEVEL)
+ return 0;
+
+ ret = socketpair(AF_LOCAL, SOCK_STREAM | SOCK_CLOEXEC, 0, fd_socket);
+ if (ret < 0)
+ return syserror("failure: create socketpair");
+
+ /* Note the CLONE_FILES | CLONE_VM when mucking with fds and memory. */
+ h->fd_event = fd_socket[1];
+ pid = do_clone(userns_fd_cb, h, CLONE_NEWUSER | CLONE_FILES | CLONE_VM);
+ if (pid < 0) {
+ syserror("failure: userns level %d", h->level);
+ goto out_close;
+ }
+
+ ret = map_ids_from_idmap(&h->id_map, pid);
+ if (ret < 0) {
+ kill(pid, SIGKILL);
+ syserror("failure: writing id mapping for userns level %d for %d", h->level, pid);
+ goto out_wait;
+ }
+
+ if (!list_empty(&h->id_map))
+ bytes = write_nointr(fd_socket[0], "1", 1); /* Inform the child we wrote a mapping. */
+ else
+ bytes = write_nointr(fd_socket[0], "0", 1); /* Inform the child we didn't write a mapping. */
+ if (bytes < 0) {
+ kill(pid, SIGKILL);
+ syserror("failure: write to socketpair");
+ goto out_wait;
+ }
+
+ /* Wait for child to set*id() and become dumpable. */
+ bytes = read_nointr(fd_socket[0], &c, 1);
+ if (bytes < 0) {
+ kill(pid, SIGKILL);
+ syserror("failure: read from socketpair");
+ goto out_wait;
+ }
+
+ snprintf(path, sizeof(path), "/proc/%d/ns/user", pid);
+ fd_userns = open(path, O_RDONLY | O_CLOEXEC);
+ if (fd_userns < 0) {
+ kill(pid, SIGKILL);
+ syserror("failure: open userns level %d for %d", h->level, pid);
+ goto out_wait;
+ }
+
+ fret = 0;
+
+out_wait:
+ if (!wait_for_pid(pid) && !fret) {
+ h->fd_userns = fd_userns;
+ fd_userns = -EBADF;
+ }
+
+out_close:
+ if (fd_userns >= 0)
+ close(fd_userns);
+ close(fd_socket[0]);
+ close(fd_socket[1]);
+ return fret;
+}
+
+/* caps_down - lower all effective caps */
+int caps_down(void)
+{
+ bool fret = false;
+ cap_t caps = NULL;
+ int ret = -1;
+
+ caps = cap_get_proc();
+ if (!caps)
+ goto out;
+
+ ret = cap_clear_flag(caps, CAP_EFFECTIVE);
+ if (ret)
+ goto out;
+
+ ret = cap_set_proc(caps);
+ if (ret)
+ goto out;
+
+ fret = true;
+
+out:
+ cap_free(caps);
+ return fret;
+}
+
+/* cap_down - lower an effective cap */
+int cap_down(cap_value_t down)
+{
+ bool fret = false;
+ cap_t caps = NULL;
+ cap_value_t cap = down;
+ int ret = -1;
+
+ caps = cap_get_proc();
+ if (!caps)
+ goto out;
+
+ ret = cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap, 0);
+ if (ret)
+ goto out;
+
+ ret = cap_set_proc(caps);
+ if (ret)
+ goto out;
+
+ fret = true;
+
+out:
+ cap_free(caps);
+ return fret;
+}
diff --git a/tools/testing/selftests/filesystems/utils.h b/tools/testing/selftests/filesystems/utils.h
new file mode 100644
index 000000000000..7f1df2a3e94c
--- /dev/null
+++ b/tools/testing/selftests/filesystems/utils.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __IDMAP_UTILS_H
+#define __IDMAP_UTILS_H
+
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE
+#endif
+#include <errno.h>
+#include <linux/types.h>
+#include <sched.h>
+#include <signal.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <syscall.h>
+#include <sys/capability.h>
+#include <sys/fsuid.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+extern int get_userns_fd(unsigned long nsid, unsigned long hostid,
+ unsigned long range);
+
+extern int caps_down(void);
+extern int cap_down(cap_value_t down);
+
+extern bool switch_ids(uid_t uid, gid_t gid);
+
+static inline bool switch_userns(int fd, uid_t uid, gid_t gid, bool drop_caps)
+{
+ if (setns(fd, CLONE_NEWUSER))
+ return false;
+
+ if (!switch_ids(uid, gid))
+ return false;
+
+ if (drop_caps && !caps_down())
+ return false;
+
+ return true;
+}
+
+#endif /* __IDMAP_UTILS_H */
diff --git a/tools/testing/selftests/kselftest.h b/tools/testing/selftests/kselftest.h
index cdf91b0ca40f..c3b6d2604b1e 100644
--- a/tools/testing/selftests/kselftest.h
+++ b/tools/testing/selftests/kselftest.h
@@ -444,10 +444,6 @@ static inline __noreturn __printf(1, 2) void ksft_exit_skip(const char *msg, ...
static inline int ksft_min_kernel_version(unsigned int min_major,
unsigned int min_minor)
{
-#ifdef NOLIBC
- ksft_print_msg("NOLIBC: Can't check kernel version: Function not implemented\n");
- return 0;
-#else
unsigned int major, minor;
struct utsname info;
@@ -455,7 +451,6 @@ static inline int ksft_min_kernel_version(unsigned int min_major,
ksft_exit_fail_msg("Can't parse kernel version\n");
return major > min_major || (major == min_major && minor >= min_minor);
-#endif
}
#endif /* __KSELFTEST_H */
diff --git a/tools/testing/selftests/kselftest/module.sh b/tools/testing/selftests/kselftest/module.sh
index fb4733faff12..51fb65159932 100755
--- a/tools/testing/selftests/kselftest/module.sh
+++ b/tools/testing/selftests/kselftest/module.sh
@@ -11,7 +11,7 @@
# SPDX-License-Identifier: GPL-2.0+
# $(dirname $0)/../kselftest/module.sh "description" module_name
#
-# Example: tools/testing/selftests/lib/printf.sh
+# Example: tools/testing/selftests/lib/bitmap.sh
desc="" # Output prefix.
module="" # Filename (without the .ko).
diff --git a/tools/testing/selftests/lib/Makefile b/tools/testing/selftests/lib/Makefile
index c52fe3ad8e98..f876bf4744e1 100644
--- a/tools/testing/selftests/lib/Makefile
+++ b/tools/testing/selftests/lib/Makefile
@@ -4,5 +4,5 @@
# No binaries, but make sure arg-less "make" doesn't trigger "run_tests"
all:
-TEST_PROGS := printf.sh bitmap.sh prime_numbers.sh scanf.sh
+TEST_PROGS := bitmap.sh
include ../lib.mk
diff --git a/tools/testing/selftests/lib/config b/tools/testing/selftests/lib/config
index dc15aba8d0a3..81a1f64a22e8 100644
--- a/tools/testing/selftests/lib/config
+++ b/tools/testing/selftests/lib/config
@@ -1,5 +1,2 @@
-CONFIG_TEST_PRINTF=m
-CONFIG_TEST_SCANF=m
CONFIG_TEST_BITMAP=m
-CONFIG_PRIME_NUMBERS=m
CONFIG_TEST_BITOPS=m
diff --git a/tools/testing/selftests/lib/prime_numbers.sh b/tools/testing/selftests/lib/prime_numbers.sh
deleted file mode 100755
index 370b79a9cb2e..000000000000
--- a/tools/testing/selftests/lib/prime_numbers.sh
+++ /dev/null
@@ -1,4 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0
-# Checks fast/slow prime_number generation for inconsistencies
-$(dirname $0)/../kselftest/module.sh "prime numbers" prime_numbers selftest=65536
diff --git a/tools/testing/selftests/lib/printf.sh b/tools/testing/selftests/lib/printf.sh
deleted file mode 100755
index 05f4544e87f9..000000000000
--- a/tools/testing/selftests/lib/printf.sh
+++ /dev/null
@@ -1,4 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0
-# Tests the printf infrastructure using test_printf kernel module.
-$(dirname $0)/../kselftest/module.sh "printf" test_printf
diff --git a/tools/testing/selftests/lib/scanf.sh b/tools/testing/selftests/lib/scanf.sh
deleted file mode 100755
index b59b8ba561c3..000000000000
--- a/tools/testing/selftests/lib/scanf.sh
+++ /dev/null
@@ -1,4 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0
-# Tests the scanf infrastructure using test_scanf kernel module.
-$(dirname $0)/../kselftest/module.sh "scanf" test_scanf
diff --git a/tools/testing/selftests/mm/guard-pages.c b/tools/testing/selftests/mm/guard-pages.c
index ece37212a8a2..525c50d3ec23 100644
--- a/tools/testing/selftests/mm/guard-pages.c
+++ b/tools/testing/selftests/mm/guard-pages.c
@@ -19,6 +19,8 @@
#include <sys/uio.h>
#include <unistd.h>
+#include "../pidfd/pidfd.h"
+
/*
* Ignore the checkpatch warning, as per the C99 standard, section 7.14.1.1:
*
@@ -50,11 +52,6 @@ static void handle_fatal(int c)
siglongjmp(signal_jmp_buf, c);
}
-static int pidfd_open(pid_t pid, unsigned int flags)
-{
- return syscall(SYS_pidfd_open, pid, flags);
-}
-
static ssize_t sys_process_madvise(int pidfd, const struct iovec *iovec,
size_t n, int advice, unsigned int flags)
{
@@ -370,14 +367,10 @@ TEST_F(guard_pages, multi_vma)
TEST_F(guard_pages, process_madvise)
{
const unsigned long page_size = self->page_size;
- pid_t pid = getpid();
- int pidfd = pidfd_open(pid, 0);
char *ptr_region, *ptr1, *ptr2, *ptr3;
ssize_t count;
struct iovec vec[6];
- ASSERT_NE(pidfd, -1);
-
/* Reserve region to map over. */
ptr_region = mmap(NULL, 100 * page_size, PROT_NONE,
MAP_ANON | MAP_PRIVATE, -1, 0);
@@ -425,7 +418,7 @@ TEST_F(guard_pages, process_madvise)
ASSERT_EQ(munmap(&ptr_region[99 * page_size], page_size), 0);
/* Now guard in one step. */
- count = sys_process_madvise(pidfd, vec, 6, MADV_GUARD_INSTALL, 0);
+ count = sys_process_madvise(PIDFD_SELF, vec, 6, MADV_GUARD_INSTALL, 0);
/* OK we don't have permission to do this, skip. */
if (count == -1 && errno == EPERM)
@@ -446,7 +439,7 @@ TEST_F(guard_pages, process_madvise)
ASSERT_FALSE(try_read_write_buf(&ptr3[19 * page_size]));
/* Now do the same with unguard... */
- count = sys_process_madvise(pidfd, vec, 6, MADV_GUARD_REMOVE, 0);
+ count = sys_process_madvise(PIDFD_SELF, vec, 6, MADV_GUARD_REMOVE, 0);
/* ...and everything should now succeed. */
@@ -463,7 +456,6 @@ TEST_F(guard_pages, process_madvise)
ASSERT_EQ(munmap(ptr1, 10 * page_size), 0);
ASSERT_EQ(munmap(ptr2, 5 * page_size), 0);
ASSERT_EQ(munmap(ptr3, 20 * page_size), 0);
- close(pidfd);
}
/* Assert that unmapping ranges does not leave guard markers behind. */
diff --git a/tools/testing/selftests/mount_setattr/mount_setattr_test.c b/tools/testing/selftests/mount_setattr/mount_setattr_test.c
index 70f65eb320a7..48a000cabc97 100644
--- a/tools/testing/selftests/mount_setattr/mount_setattr_test.c
+++ b/tools/testing/selftests/mount_setattr/mount_setattr_test.c
@@ -20,6 +20,7 @@
#include <stdarg.h>
#include <linux/mount.h>
+#include "../filesystems/overlayfs/wrappers.h"
#include "../kselftest_harness.h"
#ifndef CLONE_NEWNS
@@ -126,6 +127,26 @@
#endif
#endif
+#ifndef __NR_move_mount
+ #if defined __alpha__
+ #define __NR_move_mount 539
+ #elif defined _MIPS_SIM
+ #if _MIPS_SIM == _MIPS_SIM_ABI32 /* o32 */
+ #define __NR_move_mount 4429
+ #endif
+ #if _MIPS_SIM == _MIPS_SIM_NABI32 /* n32 */
+ #define __NR_move_mount 6429
+ #endif
+ #if _MIPS_SIM == _MIPS_SIM_ABI64 /* n64 */
+ #define __NR_move_mount 5429
+ #endif
+ #elif defined __ia64__
+ #define __NR_move_mount (428 + 1024)
+ #else
+ #define __NR_move_mount 429
+ #endif
+#endif
+
#ifndef MOUNT_ATTR_IDMAP
#define MOUNT_ATTR_IDMAP 0x00100000
#endif
@@ -397,6 +418,10 @@ FIXTURE_SETUP(mount_setattr)
ASSERT_EQ(mkdir("/tmp/B/BB", 0777), 0);
+ ASSERT_EQ(mkdir("/tmp/target1", 0777), 0);
+
+ ASSERT_EQ(mkdir("/tmp/target2", 0777), 0);
+
ASSERT_EQ(mount("testing", "/tmp/B/BB", "tmpfs", MS_NOATIME | MS_NODEV,
"size=100000,mode=700"), 0);
@@ -1506,4 +1531,631 @@ TEST_F(mount_setattr, mount_attr_nosymfollow)
ASSERT_EQ(close(fd), 0);
}
+TEST_F(mount_setattr, open_tree_detached)
+{
+ int fd_tree_base = -EBADF, fd_tree_subdir = -EBADF;
+ struct statx stx;
+
+ fd_tree_base = sys_open_tree(-EBADF, "/mnt",
+ AT_NO_AUTOMOUNT | AT_SYMLINK_NOFOLLOW |
+ AT_RECURSIVE | OPEN_TREE_CLOEXEC |
+ OPEN_TREE_CLONE);
+ ASSERT_GE(fd_tree_base, 0);
+ /*
+ * /mnt testing tmpfs
+ * |-/mnt/A testing tmpfs
+ * | `-/mnt/A/AA testing tmpfs
+ * | `-/mnt/A/AA/B testing tmpfs
+ * | `-/mnt/A/AA/B/BB testing tmpfs
+ * `-/mnt/B testing ramfs
+ */
+ ASSERT_EQ(statx(fd_tree_base, "A", 0, 0, &stx), 0);
+ ASSERT_TRUE(stx.stx_attributes & STATX_ATTR_MOUNT_ROOT);
+ ASSERT_EQ(statx(fd_tree_base, "A/AA", 0, 0, &stx), 0);
+ ASSERT_TRUE(stx.stx_attributes & STATX_ATTR_MOUNT_ROOT);
+ ASSERT_EQ(statx(fd_tree_base, "A/AA/B", 0, 0, &stx), 0);
+ ASSERT_TRUE(stx.stx_attributes & STATX_ATTR_MOUNT_ROOT);
+ ASSERT_EQ(statx(fd_tree_base, "A/AA/B/BB", 0, 0, &stx), 0);
+ ASSERT_TRUE(stx.stx_attributes & STATX_ATTR_MOUNT_ROOT);
+
+ fd_tree_subdir = sys_open_tree(fd_tree_base, "A/AA",
+ AT_NO_AUTOMOUNT | AT_SYMLINK_NOFOLLOW |
+ AT_RECURSIVE | OPEN_TREE_CLOEXEC |
+ OPEN_TREE_CLONE);
+ ASSERT_GE(fd_tree_subdir, 0);
+ /*
+ * /AA testing tmpfs
+ * `-/AA/B testing tmpfs
+ * `-/AA/B/BB testing tmpfs
+ */
+ ASSERT_EQ(statx(fd_tree_subdir, "B", 0, 0, &stx), 0);
+ ASSERT_TRUE(stx.stx_attributes & STATX_ATTR_MOUNT_ROOT);
+ ASSERT_EQ(statx(fd_tree_subdir, "B/BB", 0, 0, &stx), 0);
+ ASSERT_TRUE(stx.stx_attributes & STATX_ATTR_MOUNT_ROOT);
+
+ ASSERT_EQ(move_mount(fd_tree_subdir, "", -EBADF, "/tmp/target1", MOVE_MOUNT_F_EMPTY_PATH), 0);
+ /*
+ * /tmp/target1 testing tmpfs
+ * `-/tmp/target1/B testing tmpfs
+ * `-/tmp/target1/B/BB testing tmpfs
+ */
+ ASSERT_EQ(statx(-EBADF, "/tmp/target1", 0, 0, &stx), 0);
+ ASSERT_TRUE(stx.stx_attributes & STATX_ATTR_MOUNT_ROOT);
+ ASSERT_EQ(statx(-EBADF, "/tmp/target1/B", 0, 0, &stx), 0);
+ ASSERT_TRUE(stx.stx_attributes & STATX_ATTR_MOUNT_ROOT);
+ ASSERT_EQ(statx(-EBADF, "/tmp/target1/B/BB", 0, 0, &stx), 0);
+ ASSERT_TRUE(stx.stx_attributes & STATX_ATTR_MOUNT_ROOT);
+
+ ASSERT_EQ(move_mount(fd_tree_base, "", -EBADF, "/tmp/target2", MOVE_MOUNT_F_EMPTY_PATH), 0);
+ /*
+ * /tmp/target2 testing tmpfs
+ * |-/tmp/target2/A testing tmpfs
+ * | `-/tmp/target2/A/AA testing tmpfs
+ * | `-/tmp/target2/A/AA/B testing tmpfs
+ * | `-/tmp/target2/A/AA/B/BB testing tmpfs
+ * `-/tmp/target2/B testing ramfs
+ */
+ ASSERT_EQ(statx(-EBADF, "/tmp/target2", 0, 0, &stx), 0);
+ ASSERT_TRUE(stx.stx_attributes & STATX_ATTR_MOUNT_ROOT);
+ ASSERT_EQ(statx(-EBADF, "/tmp/target2/A", 0, 0, &stx), 0);
+ ASSERT_TRUE(stx.stx_attributes & STATX_ATTR_MOUNT_ROOT);
+ ASSERT_EQ(statx(-EBADF, "/tmp/target2/A/AA", 0, 0, &stx), 0);
+ ASSERT_TRUE(stx.stx_attributes & STATX_ATTR_MOUNT_ROOT);
+ ASSERT_EQ(statx(-EBADF, "/tmp/target2/A/AA/B", 0, 0, &stx), 0);
+ ASSERT_TRUE(stx.stx_attributes & STATX_ATTR_MOUNT_ROOT);
+ ASSERT_EQ(statx(-EBADF, "/tmp/target2/A/AA/B/BB", 0, 0, &stx), 0);
+ ASSERT_TRUE(stx.stx_attributes & STATX_ATTR_MOUNT_ROOT);
+ ASSERT_EQ(statx(-EBADF, "/tmp/target2/B", 0, 0, &stx), 0);
+ ASSERT_TRUE(stx.stx_attributes & STATX_ATTR_MOUNT_ROOT);
+
+ EXPECT_EQ(close(fd_tree_base), 0);
+ EXPECT_EQ(close(fd_tree_subdir), 0);
+}
+
+TEST_F(mount_setattr, open_tree_detached_fail)
+{
+ int fd_tree_base = -EBADF, fd_tree_subdir = -EBADF;
+ struct statx stx;
+
+ fd_tree_base = sys_open_tree(-EBADF, "/mnt",
+ AT_NO_AUTOMOUNT | AT_SYMLINK_NOFOLLOW |
+ AT_RECURSIVE | OPEN_TREE_CLOEXEC |
+ OPEN_TREE_CLONE);
+ ASSERT_GE(fd_tree_base, 0);
+ /*
+ * /mnt testing tmpfs
+ * |-/mnt/A testing tmpfs
+ * | `-/mnt/A/AA testing tmpfs
+ * | `-/mnt/A/AA/B testing tmpfs
+ * | `-/mnt/A/AA/B/BB testing tmpfs
+ * `-/mnt/B testing ramfs
+ */
+ ASSERT_EQ(statx(fd_tree_base, "A", 0, 0, &stx), 0);
+ ASSERT_TRUE(stx.stx_attributes & STATX_ATTR_MOUNT_ROOT);
+ ASSERT_EQ(statx(fd_tree_base, "A/AA", 0, 0, &stx), 0);
+ ASSERT_TRUE(stx.stx_attributes & STATX_ATTR_MOUNT_ROOT);
+ ASSERT_EQ(statx(fd_tree_base, "A/AA/B", 0, 0, &stx), 0);
+ ASSERT_TRUE(stx.stx_attributes & STATX_ATTR_MOUNT_ROOT);
+ ASSERT_EQ(statx(fd_tree_base, "A/AA/B/BB", 0, 0, &stx), 0);
+ ASSERT_TRUE(stx.stx_attributes & STATX_ATTR_MOUNT_ROOT);
+
+ ASSERT_EQ(unshare(CLONE_NEWNS), 0);
+
+ /*
+ * The origin mount namespace of the anonymous mount namespace
+ * of @fd_tree_base doesn't match the caller's mount namespace
+ * anymore so creation of another detached mounts must fail.
+ */
+ fd_tree_subdir = sys_open_tree(fd_tree_base, "A/AA",
+ AT_NO_AUTOMOUNT | AT_SYMLINK_NOFOLLOW |
+ AT_RECURSIVE | OPEN_TREE_CLOEXEC |
+ OPEN_TREE_CLONE);
+ ASSERT_LT(fd_tree_subdir, 0);
+ ASSERT_EQ(errno, EINVAL);
+}
+
+TEST_F(mount_setattr, open_tree_detached_fail2)
+{
+ int fd_tree_base = -EBADF, fd_tree_subdir = -EBADF;
+ struct statx stx;
+
+ fd_tree_base = sys_open_tree(-EBADF, "/mnt",
+ AT_NO_AUTOMOUNT | AT_SYMLINK_NOFOLLOW |
+ AT_RECURSIVE | OPEN_TREE_CLOEXEC |
+ OPEN_TREE_CLONE);
+ ASSERT_GE(fd_tree_base, 0);
+ /*
+ * /mnt testing tmpfs
+ * |-/mnt/A testing tmpfs
+ * | `-/mnt/A/AA testing tmpfs
+ * | `-/mnt/A/AA/B testing tmpfs
+ * | `-/mnt/A/AA/B/BB testing tmpfs
+ * `-/mnt/B testing ramfs
+ */
+ ASSERT_EQ(statx(fd_tree_base, "A", 0, 0, &stx), 0);
+ ASSERT_TRUE(stx.stx_attributes & STATX_ATTR_MOUNT_ROOT);
+ ASSERT_EQ(statx(fd_tree_base, "A/AA", 0, 0, &stx), 0);
+ ASSERT_TRUE(stx.stx_attributes & STATX_ATTR_MOUNT_ROOT);
+ ASSERT_EQ(statx(fd_tree_base, "A/AA/B", 0, 0, &stx), 0);
+ ASSERT_TRUE(stx.stx_attributes & STATX_ATTR_MOUNT_ROOT);
+ ASSERT_EQ(statx(fd_tree_base, "A/AA/B/BB", 0, 0, &stx), 0);
+ ASSERT_TRUE(stx.stx_attributes & STATX_ATTR_MOUNT_ROOT);
+
+ EXPECT_EQ(create_and_enter_userns(), 0);
+
+ /*
+ * The caller entered a new user namespace. They will have
+ * CAP_SYS_ADMIN in this user namespace. However, they're still
+ * located in a mount namespace that is owned by an ancestor
+ * user namespace in which they hold no privilege. Creating a
+ * detached mount must thus fail.
+ */
+ fd_tree_subdir = sys_open_tree(fd_tree_base, "A/AA",
+ AT_NO_AUTOMOUNT | AT_SYMLINK_NOFOLLOW |
+ AT_RECURSIVE | OPEN_TREE_CLOEXEC |
+ OPEN_TREE_CLONE);
+ ASSERT_LT(fd_tree_subdir, 0);
+ ASSERT_EQ(errno, EPERM);
+}
+
+TEST_F(mount_setattr, open_tree_detached_fail3)
+{
+ int fd_tree_base = -EBADF, fd_tree_subdir = -EBADF;
+ struct statx stx;
+
+ fd_tree_base = sys_open_tree(-EBADF, "/mnt",
+ AT_NO_AUTOMOUNT | AT_SYMLINK_NOFOLLOW |
+ AT_RECURSIVE | OPEN_TREE_CLOEXEC |
+ OPEN_TREE_CLONE);
+ ASSERT_GE(fd_tree_base, 0);
+ /*
+ * /mnt testing tmpfs
+ * |-/mnt/A testing tmpfs
+ * | `-/mnt/A/AA testing tmpfs
+ * | `-/mnt/A/AA/B testing tmpfs
+ * | `-/mnt/A/AA/B/BB testing tmpfs
+ * `-/mnt/B testing ramfs
+ */
+ ASSERT_EQ(statx(fd_tree_base, "A", 0, 0, &stx), 0);
+ ASSERT_TRUE(stx.stx_attributes & STATX_ATTR_MOUNT_ROOT);
+ ASSERT_EQ(statx(fd_tree_base, "A/AA", 0, 0, &stx), 0);
+ ASSERT_TRUE(stx.stx_attributes & STATX_ATTR_MOUNT_ROOT);
+ ASSERT_EQ(statx(fd_tree_base, "A/AA/B", 0, 0, &stx), 0);
+ ASSERT_TRUE(stx.stx_attributes & STATX_ATTR_MOUNT_ROOT);
+ ASSERT_EQ(statx(fd_tree_base, "A/AA/B/BB", 0, 0, &stx), 0);
+ ASSERT_TRUE(stx.stx_attributes & STATX_ATTR_MOUNT_ROOT);
+
+ EXPECT_EQ(prepare_unpriv_mountns(), 0);
+
+ /*
+ * The caller entered a new mount namespace. They will have
+ * CAP_SYS_ADMIN in the owning user namespace of their mount
+ * namespace.
+ *
+ * However, the origin mount namespace of the anonymous mount
+ * namespace of @fd_tree_base doesn't match the caller's mount
+ * namespace anymore so creation of another detached mounts must
+ * fail.
+ */
+ fd_tree_subdir = sys_open_tree(fd_tree_base, "A/AA",
+ AT_NO_AUTOMOUNT | AT_SYMLINK_NOFOLLOW |
+ AT_RECURSIVE | OPEN_TREE_CLOEXEC |
+ OPEN_TREE_CLONE);
+ ASSERT_LT(fd_tree_subdir, 0);
+ ASSERT_EQ(errno, EINVAL);
+}
+
+TEST_F(mount_setattr, open_tree_subfolder)
+{
+ int fd_context, fd_tmpfs, fd_tree;
+
+ fd_context = sys_fsopen("tmpfs", 0);
+ ASSERT_GE(fd_context, 0);
+
+ ASSERT_EQ(sys_fsconfig(fd_context, FSCONFIG_CMD_CREATE, NULL, NULL, 0), 0);
+
+ fd_tmpfs = sys_fsmount(fd_context, 0, 0);
+ ASSERT_GE(fd_tmpfs, 0);
+
+ EXPECT_EQ(close(fd_context), 0);
+
+ ASSERT_EQ(mkdirat(fd_tmpfs, "subdir", 0755), 0);
+
+ fd_tree = sys_open_tree(fd_tmpfs, "subdir",
+ AT_NO_AUTOMOUNT | AT_SYMLINK_NOFOLLOW |
+ AT_RECURSIVE | OPEN_TREE_CLOEXEC |
+ OPEN_TREE_CLONE);
+ ASSERT_GE(fd_tree, 0);
+
+ EXPECT_EQ(close(fd_tmpfs), 0);
+
+ ASSERT_EQ(mkdirat(-EBADF, "/mnt/open_tree_subfolder", 0755), 0);
+
+ ASSERT_EQ(sys_move_mount(fd_tree, "", -EBADF, "/mnt/open_tree_subfolder", MOVE_MOUNT_F_EMPTY_PATH), 0);
+
+ EXPECT_EQ(close(fd_tree), 0);
+
+ ASSERT_EQ(umount2("/mnt/open_tree_subfolder", 0), 0);
+
+ EXPECT_EQ(rmdir("/mnt/open_tree_subfolder"), 0);
+}
+
+TEST_F(mount_setattr, mount_detached_mount_on_detached_mount_then_close)
+{
+ int fd_tree_base = -EBADF, fd_tree_subdir = -EBADF;
+ struct statx stx;
+
+ fd_tree_base = sys_open_tree(-EBADF, "/mnt",
+ AT_NO_AUTOMOUNT | AT_SYMLINK_NOFOLLOW |
+ OPEN_TREE_CLOEXEC | OPEN_TREE_CLONE);
+ ASSERT_GE(fd_tree_base, 0);
+ /*
+ * /mnt testing tmpfs
+ */
+ ASSERT_EQ(statx(fd_tree_base, "A", 0, 0, &stx), 0);
+ ASSERT_FALSE(stx.stx_attributes & STATX_ATTR_MOUNT_ROOT);
+
+ fd_tree_subdir = sys_open_tree(fd_tree_base, "",
+ AT_NO_AUTOMOUNT | AT_SYMLINK_NOFOLLOW |
+ AT_EMPTY_PATH | OPEN_TREE_CLOEXEC |
+ OPEN_TREE_CLONE);
+ ASSERT_GE(fd_tree_subdir, 0);
+ /*
+ * /mnt testing tmpfs
+ */
+ ASSERT_EQ(statx(fd_tree_subdir, "A", 0, 0, &stx), 0);
+ ASSERT_FALSE(stx.stx_attributes & STATX_ATTR_MOUNT_ROOT);
+
+ /*
+ * /mnt testing tmpfs
+ * `-/mnt testing tmpfs
+ */
+ ASSERT_EQ(move_mount(fd_tree_subdir, "", fd_tree_base, "", MOVE_MOUNT_F_EMPTY_PATH | MOVE_MOUNT_T_EMPTY_PATH), 0);
+ ASSERT_EQ(statx(fd_tree_subdir, "", AT_EMPTY_PATH, 0, &stx), 0);
+ ASSERT_TRUE(stx.stx_attributes & STATX_ATTR_MOUNT_ROOT);
+
+ ASSERT_NE(move_mount(fd_tree_subdir, "", fd_tree_base, "", MOVE_MOUNT_F_EMPTY_PATH | MOVE_MOUNT_T_EMPTY_PATH), 0);
+
+ EXPECT_EQ(close(fd_tree_base), 0);
+ EXPECT_EQ(close(fd_tree_subdir), 0);
+}
+
+TEST_F(mount_setattr, mount_detached_mount_on_detached_mount_and_attach)
+{
+ int fd_tree_base = -EBADF, fd_tree_subdir = -EBADF;
+ struct statx stx;
+ __u64 mnt_id = 0;
+
+ fd_tree_base = sys_open_tree(-EBADF, "/mnt",
+ AT_NO_AUTOMOUNT | AT_SYMLINK_NOFOLLOW |
+ OPEN_TREE_CLOEXEC | OPEN_TREE_CLONE);
+ ASSERT_GE(fd_tree_base, 0);
+ /*
+ * /mnt testing tmpfs
+ */
+ ASSERT_EQ(statx(fd_tree_base, "A", 0, 0, &stx), 0);
+ ASSERT_FALSE(stx.stx_attributes & STATX_ATTR_MOUNT_ROOT);
+
+ fd_tree_subdir = sys_open_tree(fd_tree_base, "",
+ AT_NO_AUTOMOUNT | AT_SYMLINK_NOFOLLOW |
+ AT_EMPTY_PATH | OPEN_TREE_CLOEXEC |
+ OPEN_TREE_CLONE);
+ ASSERT_GE(fd_tree_subdir, 0);
+ /*
+ * /mnt testing tmpfs
+ */
+ ASSERT_EQ(statx(fd_tree_subdir, "A", 0, 0, &stx), 0);
+ ASSERT_FALSE(stx.stx_attributes & STATX_ATTR_MOUNT_ROOT);
+
+ /*
+ * /mnt testing tmpfs
+ * `-/mnt testing tmpfs
+ */
+ ASSERT_EQ(move_mount(fd_tree_subdir, "", fd_tree_base, "", MOVE_MOUNT_F_EMPTY_PATH | MOVE_MOUNT_T_EMPTY_PATH), 0);
+ ASSERT_EQ(statx(fd_tree_subdir, "", AT_EMPTY_PATH, STATX_MNT_ID_UNIQUE, &stx), 0);
+ ASSERT_TRUE(stx.stx_attributes & STATX_ATTR_MOUNT_ROOT);
+ ASSERT_TRUE(stx.stx_mask & STATX_MNT_ID_UNIQUE);
+ mnt_id = stx.stx_mnt_id;
+
+ ASSERT_NE(move_mount(fd_tree_subdir, "", fd_tree_base, "", MOVE_MOUNT_F_EMPTY_PATH | MOVE_MOUNT_T_EMPTY_PATH), 0);
+
+ ASSERT_EQ(move_mount(fd_tree_base, "", -EBADF, "/tmp/target1", MOVE_MOUNT_F_EMPTY_PATH), 0);
+ ASSERT_EQ(statx(-EBADF, "/tmp/target1", 0, STATX_MNT_ID_UNIQUE, &stx), 0);
+ ASSERT_TRUE(stx.stx_attributes & STATX_ATTR_MOUNT_ROOT);
+ ASSERT_TRUE(stx.stx_mask & STATX_MNT_ID_UNIQUE);
+ ASSERT_EQ(stx.stx_mnt_id, mnt_id);
+
+ EXPECT_EQ(close(fd_tree_base), 0);
+ EXPECT_EQ(close(fd_tree_subdir), 0);
+}
+
+TEST_F(mount_setattr, move_mount_detached_fail)
+{
+ int fd_tree_base = -EBADF, fd_tree_subdir = -EBADF;
+ struct statx stx;
+
+ fd_tree_base = sys_open_tree(-EBADF, "/mnt",
+ AT_NO_AUTOMOUNT | AT_SYMLINK_NOFOLLOW |
+ OPEN_TREE_CLOEXEC | OPEN_TREE_CLONE);
+ ASSERT_GE(fd_tree_base, 0);
+
+ /* Attach the mount to the caller's mount namespace. */
+ ASSERT_EQ(move_mount(fd_tree_base, "", -EBADF, "/tmp/target1", MOVE_MOUNT_F_EMPTY_PATH), 0);
+
+ ASSERT_EQ(statx(fd_tree_base, "A", 0, 0, &stx), 0);
+ ASSERT_FALSE(stx.stx_attributes & STATX_ATTR_MOUNT_ROOT);
+
+ fd_tree_subdir = sys_open_tree(-EBADF, "/tmp/B",
+ AT_NO_AUTOMOUNT | AT_SYMLINK_NOFOLLOW |
+ OPEN_TREE_CLOEXEC | OPEN_TREE_CLONE);
+ ASSERT_GE(fd_tree_subdir, 0);
+ ASSERT_EQ(statx(fd_tree_subdir, "BB", 0, 0, &stx), 0);
+ ASSERT_FALSE(stx.stx_attributes & STATX_ATTR_MOUNT_ROOT);
+
+ /* Not allowed to move an attached mount to a detached mount. */
+ ASSERT_NE(move_mount(fd_tree_base, "", fd_tree_subdir, "", MOVE_MOUNT_F_EMPTY_PATH | MOVE_MOUNT_T_EMPTY_PATH), 0);
+ ASSERT_EQ(errno, EINVAL);
+
+ EXPECT_EQ(close(fd_tree_base), 0);
+ EXPECT_EQ(close(fd_tree_subdir), 0);
+}
+
+TEST_F(mount_setattr, attach_detached_mount_then_umount_then_close)
+{
+ int fd_tree = -EBADF;
+ struct statx stx;
+
+ fd_tree = sys_open_tree(-EBADF, "/mnt",
+ AT_NO_AUTOMOUNT | AT_SYMLINK_NOFOLLOW |
+ AT_RECURSIVE | OPEN_TREE_CLOEXEC |
+ OPEN_TREE_CLONE);
+ ASSERT_GE(fd_tree, 0);
+
+ ASSERT_EQ(statx(fd_tree, "A", 0, 0, &stx), 0);
+ /* We copied with AT_RECURSIVE so /mnt/A must be a mountpoint. */
+ ASSERT_TRUE(stx.stx_attributes & STATX_ATTR_MOUNT_ROOT);
+
+ /* Attach the mount to the caller's mount namespace. */
+ ASSERT_EQ(move_mount(fd_tree, "", -EBADF, "/tmp/target1", MOVE_MOUNT_F_EMPTY_PATH), 0);
+
+ ASSERT_EQ(statx(-EBADF, "/tmp/target1", 0, 0, &stx), 0);
+ ASSERT_TRUE(stx.stx_attributes & STATX_ATTR_MOUNT_ROOT);
+
+ ASSERT_EQ(umount2("/tmp/target1", MNT_DETACH), 0);
+
+ /*
+ * This tests whether dissolve_on_fput() handles a NULL mount
+ * namespace correctly, i.e., that it doesn't splat.
+ */
+ EXPECT_EQ(close(fd_tree), 0);
+}
+
+TEST_F(mount_setattr, mount_detached1_onto_detached2_then_close_detached1_then_mount_detached2_onto_attached)
+{
+ int fd_tree1 = -EBADF, fd_tree2 = -EBADF;
+
+ /*
+ * |-/mnt/A testing tmpfs
+ * `-/mnt/A/AA testing tmpfs
+ * `-/mnt/A/AA/B testing tmpfs
+ * `-/mnt/A/AA/B/BB testing tmpfs
+ */
+ fd_tree1 = sys_open_tree(-EBADF, "/mnt/A",
+ AT_NO_AUTOMOUNT | AT_SYMLINK_NOFOLLOW |
+ AT_RECURSIVE | OPEN_TREE_CLOEXEC |
+ OPEN_TREE_CLONE);
+ ASSERT_GE(fd_tree1, 0);
+
+ /*
+ * `-/mnt/B testing ramfs
+ */
+ fd_tree2 = sys_open_tree(-EBADF, "/mnt/B",
+ AT_NO_AUTOMOUNT | AT_SYMLINK_NOFOLLOW |
+ AT_EMPTY_PATH | OPEN_TREE_CLOEXEC |
+ OPEN_TREE_CLONE);
+ ASSERT_GE(fd_tree2, 0);
+
+ /*
+ * Move the source detached mount tree to the target detached
+ * mount tree. This will move all the mounts in the source mount
+ * tree from the source anonymous mount namespace to the target
+ * anonymous mount namespace.
+ *
+ * The source detached mount tree and the target detached mount
+ * tree now both refer to the same anonymous mount namespace.
+ *
+ * |-"" testing ramfs
+ * `-"" testing tmpfs
+ * `-""/AA testing tmpfs
+ * `-""/AA/B testing tmpfs
+ * `-""/AA/B/BB testing tmpfs
+ */
+ ASSERT_EQ(move_mount(fd_tree1, "", fd_tree2, "", MOVE_MOUNT_F_EMPTY_PATH | MOVE_MOUNT_T_EMPTY_PATH), 0);
+
+ /*
+ * The source detached mount tree @fd_tree1 is now an attached
+ * mount, i.e., it has a parent. Specifically, it now has the
+ * root mount of the mount tree of @fd_tree2 as its parent.
+ *
+ * That means we are no longer allowed to attach it as we only
+ * allow attaching the root of an anonymous mount tree, not
+ * random bits and pieces. Verify that the kernel enforces this.
+ */
+ ASSERT_NE(move_mount(fd_tree1, "", -EBADF, "/tmp/target1", MOVE_MOUNT_F_EMPTY_PATH), 0);
+
+ /*
+ * Closing the source detached mount tree must not unmount and
+ * free the shared anonymous mount namespace. The kernel will
+ * quickly yell at us because the anonymous mount namespace
+ * won't be empty when it's freed.
+ */
+ EXPECT_EQ(close(fd_tree1), 0);
+
+ /*
+ * Attach the mount tree to a non-anonymous mount namespace.
+ * This can only succeed if closing fd_tree1 had proper
+ * semantics and didn't cause the anonymous mount namespace to
+ * be freed. If it did this will trigger a UAF which will be
+ * visible on any KASAN enabled kernel.
+ *
+ * |-/tmp/target1 testing ramfs
+ * `-/tmp/target1 testing tmpfs
+ * `-/tmp/target1/AA testing tmpfs
+ * `-/tmp/target1/AA/B testing tmpfs
+ * `-/tmp/target1/AA/B/BB testing tmpfs
+ */
+ ASSERT_EQ(move_mount(fd_tree2, "", -EBADF, "/tmp/target1", MOVE_MOUNT_F_EMPTY_PATH), 0);
+ EXPECT_EQ(close(fd_tree2), 0);
+}
+
+TEST_F(mount_setattr, two_detached_mounts_referring_to_same_anonymous_mount_namespace)
+{
+ int fd_tree1 = -EBADF, fd_tree2 = -EBADF;
+
+ /*
+ * Copy the following mount tree:
+ *
+ * |-/mnt/A testing tmpfs
+ * `-/mnt/A/AA testing tmpfs
+ * `-/mnt/A/AA/B testing tmpfs
+ * `-/mnt/A/AA/B/BB testing tmpfs
+ */
+ fd_tree1 = sys_open_tree(-EBADF, "/mnt/A",
+ AT_NO_AUTOMOUNT | AT_SYMLINK_NOFOLLOW |
+ AT_RECURSIVE | OPEN_TREE_CLOEXEC |
+ OPEN_TREE_CLONE);
+ ASSERT_GE(fd_tree1, 0);
+
+ /*
+ * Create an O_PATH file descriptors with a separate struct file
+ * that refers to the same detached mount tree as @fd_tree1
+ */
+ fd_tree2 = sys_open_tree(fd_tree1, "",
+ AT_NO_AUTOMOUNT | AT_SYMLINK_NOFOLLOW |
+ AT_EMPTY_PATH | OPEN_TREE_CLOEXEC);
+ ASSERT_GE(fd_tree2, 0);
+
+ /*
+ * Copy the following mount tree:
+ *
+ * |-/tmp/target1 testing tmpfs
+ * `-/tmp/target1/AA testing tmpfs
+ * `-/tmp/target1/AA/B testing tmpfs
+ * `-/tmp/target1/AA/B/BB testing tmpfs
+ */
+ ASSERT_EQ(move_mount(fd_tree2, "", -EBADF, "/tmp/target1", MOVE_MOUNT_F_EMPTY_PATH), 0);
+
+ /*
+ * This must fail as this would mean adding the same mount tree
+ * into the same mount tree.
+ */
+ ASSERT_NE(move_mount(fd_tree1, "", -EBADF, "/tmp/target1", MOVE_MOUNT_F_EMPTY_PATH), 0);
+}
+
+TEST_F(mount_setattr, two_detached_subtrees_of_same_anonymous_mount_namespace)
+{
+ int fd_tree1 = -EBADF, fd_tree2 = -EBADF;
+
+ /*
+ * Copy the following mount tree:
+ *
+ * |-/mnt/A testing tmpfs
+ * `-/mnt/A/AA testing tmpfs
+ * `-/mnt/A/AA/B testing tmpfs
+ * `-/mnt/A/AA/B/BB testing tmpfs
+ */
+ fd_tree1 = sys_open_tree(-EBADF, "/mnt/A",
+ AT_NO_AUTOMOUNT | AT_SYMLINK_NOFOLLOW |
+ AT_RECURSIVE | OPEN_TREE_CLOEXEC |
+ OPEN_TREE_CLONE);
+ ASSERT_GE(fd_tree1, 0);
+
+ /*
+ * Create an O_PATH file descriptors with a separate struct file that
+ * refers to a subtree of the same detached mount tree as @fd_tree1
+ */
+ fd_tree2 = sys_open_tree(fd_tree1, "AA",
+ AT_NO_AUTOMOUNT | AT_SYMLINK_NOFOLLOW |
+ AT_EMPTY_PATH | OPEN_TREE_CLOEXEC);
+ ASSERT_GE(fd_tree2, 0);
+
+ /*
+ * This must fail as it is only possible to attach the root of a
+ * detached mount tree.
+ */
+ ASSERT_NE(move_mount(fd_tree2, "", -EBADF, "/tmp/target1", MOVE_MOUNT_F_EMPTY_PATH), 0);
+
+ ASSERT_EQ(move_mount(fd_tree1, "", -EBADF, "/tmp/target1", MOVE_MOUNT_F_EMPTY_PATH), 0);
+}
+
+TEST_F(mount_setattr, detached_tree_propagation)
+{
+ int fd_tree = -EBADF;
+ struct statx stx1, stx2, stx3, stx4;
+
+ ASSERT_EQ(unshare(CLONE_NEWNS), 0);
+ ASSERT_EQ(mount(NULL, "/mnt", NULL, MS_REC | MS_SHARED, NULL), 0);
+
+ /*
+ * Copy the following mount tree:
+ *
+ * /mnt testing tmpfs
+ * |-/mnt/A testing tmpfs
+ * | `-/mnt/A/AA testing tmpfs
+ * | `-/mnt/A/AA/B testing tmpfs
+ * | `-/mnt/A/AA/B/BB testing tmpfs
+ * `-/mnt/B testing ramfs
+ */
+ fd_tree = sys_open_tree(-EBADF, "/mnt",
+ AT_NO_AUTOMOUNT | AT_SYMLINK_NOFOLLOW |
+ AT_RECURSIVE | OPEN_TREE_CLOEXEC |
+ OPEN_TREE_CLONE);
+ ASSERT_GE(fd_tree, 0);
+
+ ASSERT_EQ(statx(-EBADF, "/mnt/A", 0, 0, &stx1), 0);
+ ASSERT_EQ(statx(fd_tree, "A", 0, 0, &stx2), 0);
+
+ /*
+ * Copying the mount namespace like done above doesn't alter the
+ * mounts in any way so the filesystem mounted on /mnt must be
+ * identical even though the mounts will differ. Use the device
+ * information to verify that. Note that tmpfs will have a 0
+ * major number so comparing the major number is misleading.
+ */
+ ASSERT_EQ(stx1.stx_dev_minor, stx2.stx_dev_minor);
+
+ /* Mount a tmpfs filesystem over /mnt/A. */
+ ASSERT_EQ(mount(NULL, "/mnt/A", "tmpfs", 0, NULL), 0);
+
+
+ ASSERT_EQ(statx(-EBADF, "/mnt/A", 0, 0, &stx3), 0);
+ ASSERT_EQ(statx(fd_tree, "A", 0, 0, &stx4), 0);
+
+ /*
+ * A new filesystem has been mounted on top of /mnt/A which
+ * means that the device information will be different for any
+ * statx() that was taken from /mnt/A before the mount compared
+ * to one after the mount.
+ *
+ * Since we already now that the device information between the
+ * stx1 and stx2 samples are identical we also now that stx2 and
+ * stx3 device information will necessarily differ.
+ */
+ ASSERT_NE(stx1.stx_dev_minor, stx3.stx_dev_minor);
+
+ /*
+ * If mount propagation worked correctly then the tmpfs mount
+ * that was created after the mount namespace was unshared will
+ * have propagated onto /mnt/A in the detached mount tree.
+ *
+ * Verify that the device information for stx3 and stx4 are
+ * identical. It is already established that stx3 is different
+ * from both stx1 and stx2 sampled before the tmpfs mount was
+ * done so if stx3 and stx4 are identical the proof is done.
+ */
+ ASSERT_EQ(stx3.stx_dev_minor, stx4.stx_dev_minor);
+
+ EXPECT_EQ(close(fd_tree), 0);
+}
+
TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/nolibc/Makefile b/tools/testing/selftests/nolibc/Makefile
index 7d14a7c0cb62..58bcbbd029bc 100644
--- a/tools/testing/selftests/nolibc/Makefile
+++ b/tools/testing/selftests/nolibc/Makefile
@@ -47,6 +47,7 @@ XARCH_riscv = riscv64
XARCH = $(or $(XARCH_$(ARCH)),$(ARCH))
# map from user input variants to their kernel supported architectures
+ARCH_armthumb = arm
ARCH_ppc = powerpc
ARCH_ppc64 = powerpc
ARCH_ppc64le = powerpc
@@ -54,6 +55,7 @@ ARCH_mips32le = mips
ARCH_mips32be = mips
ARCH_riscv32 = riscv
ARCH_riscv64 = riscv
+ARCH_s390x = s390
ARCH := $(or $(ARCH_$(XARCH)),$(XARCH))
# kernel image names by architecture
@@ -62,6 +64,7 @@ IMAGE_x86_64 = arch/x86/boot/bzImage
IMAGE_x86 = arch/x86/boot/bzImage
IMAGE_arm64 = arch/arm64/boot/Image
IMAGE_arm = arch/arm/boot/zImage
+IMAGE_armthumb = arch/arm/boot/zImage
IMAGE_mips32le = vmlinuz
IMAGE_mips32be = vmlinuz
IMAGE_ppc = vmlinux
@@ -70,6 +73,7 @@ IMAGE_ppc64le = arch/powerpc/boot/zImage
IMAGE_riscv = arch/riscv/boot/Image
IMAGE_riscv32 = arch/riscv/boot/Image
IMAGE_riscv64 = arch/riscv/boot/Image
+IMAGE_s390x = arch/s390/boot/bzImage
IMAGE_s390 = arch/s390/boot/bzImage
IMAGE_loongarch = arch/loongarch/boot/vmlinuz.efi
IMAGE = $(objtree)/$(IMAGE_$(XARCH))
@@ -81,19 +85,20 @@ DEFCONFIG_x86_64 = defconfig
DEFCONFIG_x86 = defconfig
DEFCONFIG_arm64 = defconfig
DEFCONFIG_arm = multi_v7_defconfig
+DEFCONFIG_armthumb = multi_v7_defconfig
DEFCONFIG_mips32le = malta_defconfig
-DEFCONFIG_mips32be = malta_defconfig
+DEFCONFIG_mips32be = malta_defconfig generic/eb.config
DEFCONFIG_ppc = pmac32_defconfig
DEFCONFIG_ppc64 = powernv_be_defconfig
DEFCONFIG_ppc64le = powernv_defconfig
DEFCONFIG_riscv = defconfig
DEFCONFIG_riscv32 = rv32_defconfig
DEFCONFIG_riscv64 = defconfig
-DEFCONFIG_s390 = defconfig
+DEFCONFIG_s390x = defconfig
+DEFCONFIG_s390 = defconfig compat.config
DEFCONFIG_loongarch = defconfig
DEFCONFIG = $(DEFCONFIG_$(XARCH))
-EXTRACONFIG_mips32be = -d CONFIG_CPU_LITTLE_ENDIAN -e CONFIG_CPU_BIG_ENDIAN
EXTRACONFIG = $(EXTRACONFIG_$(XARCH))
# optional tests to run (default = all)
@@ -105,6 +110,7 @@ QEMU_ARCH_x86_64 = x86_64
QEMU_ARCH_x86 = x86_64
QEMU_ARCH_arm64 = aarch64
QEMU_ARCH_arm = arm
+QEMU_ARCH_armthumb = arm
QEMU_ARCH_mips32le = mipsel # works with malta_defconfig
QEMU_ARCH_mips32be = mips
QEMU_ARCH_ppc = ppc
@@ -113,6 +119,7 @@ QEMU_ARCH_ppc64le = ppc64
QEMU_ARCH_riscv = riscv64
QEMU_ARCH_riscv32 = riscv32
QEMU_ARCH_riscv64 = riscv64
+QEMU_ARCH_s390x = s390x
QEMU_ARCH_s390 = s390x
QEMU_ARCH_loongarch = loongarch64
QEMU_ARCH = $(QEMU_ARCH_$(XARCH))
@@ -133,6 +140,7 @@ QEMU_ARGS_x86_64 = -M pc -append "console=ttyS0,9600 i8042.noaux panic=-1 $(
QEMU_ARGS_x86 = -M pc -append "console=ttyS0,9600 i8042.noaux panic=-1 $(TEST:%=NOLIBC_TEST=%)"
QEMU_ARGS_arm64 = -M virt -cpu cortex-a53 -append "panic=-1 $(TEST:%=NOLIBC_TEST=%)"
QEMU_ARGS_arm = -M virt -append "panic=-1 $(TEST:%=NOLIBC_TEST=%)"
+QEMU_ARGS_armthumb = -M virt -append "panic=-1 $(TEST:%=NOLIBC_TEST=%)"
QEMU_ARGS_mips32le = -M malta -append "panic=-1 $(TEST:%=NOLIBC_TEST=%)"
QEMU_ARGS_mips32be = -M malta -append "panic=-1 $(TEST:%=NOLIBC_TEST=%)"
QEMU_ARGS_ppc = -M g3beige -append "console=ttyS0 panic=-1 $(TEST:%=NOLIBC_TEST=%)"
@@ -141,6 +149,7 @@ QEMU_ARGS_ppc64le = -M powernv -append "console=hvc0 panic=-1 $(TEST:%=NOLIBC
QEMU_ARGS_riscv = -M virt -append "console=ttyS0 panic=-1 $(TEST:%=NOLIBC_TEST=%)"
QEMU_ARGS_riscv32 = -M virt -append "console=ttyS0 panic=-1 $(TEST:%=NOLIBC_TEST=%)"
QEMU_ARGS_riscv64 = -M virt -append "console=ttyS0 panic=-1 $(TEST:%=NOLIBC_TEST=%)"
+QEMU_ARGS_s390x = -M s390-ccw-virtio -append "console=ttyS0 panic=-1 $(TEST:%=NOLIBC_TEST=%)"
QEMU_ARGS_s390 = -M s390-ccw-virtio -append "console=ttyS0 panic=-1 $(TEST:%=NOLIBC_TEST=%)"
QEMU_ARGS_loongarch = -M virt -append "console=ttyS0,115200 panic=-1 $(TEST:%=NOLIBC_TEST=%)"
QEMU_ARGS = -m 1G $(QEMU_ARGS_$(XARCH)) $(QEMU_ARGS_BIOS) $(QEMU_ARGS_EXTRA)
@@ -156,15 +165,18 @@ Q=@
endif
CFLAGS_i386 = $(call cc-option,-m32)
+CFLAGS_arm = -marm
+CFLAGS_armthumb = -mthumb -march=armv6t2
CFLAGS_ppc = -m32 -mbig-endian -mno-vsx $(call cc-option,-mmultiple)
CFLAGS_ppc64 = -m64 -mbig-endian -mno-vsx $(call cc-option,-mmultiple)
CFLAGS_ppc64le = -m64 -mlittle-endian -mno-vsx $(call cc-option,-mabi=elfv2)
-CFLAGS_s390 = -m64
+CFLAGS_s390x = -m64
+CFLAGS_s390 = -m31
CFLAGS_mips32le = -EL -mabi=32 -fPIC
CFLAGS_mips32be = -EB -mabi=32
CFLAGS_STACKPROTECTOR ?= $(call cc-option,-mstack-protector-guard=global $(call cc-option,-fstack-protector-all))
CFLAGS ?= -Os -fno-ident -fno-asynchronous-unwind-tables -std=c89 -W -Wall -Wextra \
- $(call cc-option,-fno-stack-protector) \
+ $(call cc-option,-fno-stack-protector) $(call cc-option,-Wmissing-prototypes) \
$(CFLAGS_$(XARCH)) $(CFLAGS_STACKPROTECTOR) $(CFLAGS_EXTRA)
LDFLAGS :=
@@ -220,7 +232,7 @@ all: run
sysroot: sysroot/$(ARCH)/include
-sysroot/$(ARCH)/include:
+sysroot/$(ARCH)/include: | defconfig
$(Q)rm -rf sysroot/$(ARCH) sysroot/sysroot
$(QUIET_MKDIR)mkdir -p sysroot
$(Q)$(MAKE) -C $(srctree) outputmakefile
@@ -264,16 +276,16 @@ initramfs: nolibc-test
$(Q)cp nolibc-test initramfs/init
defconfig:
- $(Q)$(MAKE) -C $(srctree) ARCH=$(ARCH) CC=$(CC) CROSS_COMPILE=$(CROSS_COMPILE) mrproper $(DEFCONFIG) prepare
+ $(Q)$(MAKE) -C $(srctree) ARCH=$(ARCH) CC=$(CC) CROSS_COMPILE=$(CROSS_COMPILE) $(DEFCONFIG)
$(Q)if [ -n "$(EXTRACONFIG)" ]; then \
$(srctree)/scripts/config --file $(objtree)/.config $(EXTRACONFIG); \
$(MAKE) -C $(srctree) ARCH=$(ARCH) CC=$(CC) CROSS_COMPILE=$(CROSS_COMPILE) olddefconfig < /dev/null; \
fi
-kernel:
+kernel: | defconfig
$(Q)$(MAKE) -C $(srctree) ARCH=$(ARCH) CC=$(CC) CROSS_COMPILE=$(CROSS_COMPILE) $(IMAGE_NAME) < /dev/null
-kernel-standalone: initramfs
+kernel-standalone: initramfs | defconfig
$(Q)$(MAKE) -C $(srctree) ARCH=$(ARCH) CC=$(CC) CROSS_COMPILE=$(CROSS_COMPILE) $(IMAGE_NAME) CONFIG_INITRAMFS_SOURCE=$(CURDIR)/initramfs < /dev/null
# run the tests after building the kernel
diff --git a/tools/testing/selftests/nolibc/nolibc-test-linkage.c b/tools/testing/selftests/nolibc/nolibc-test-linkage.c
index 5ff4c8a1db2a..a7ca8325863f 100644
--- a/tools/testing/selftests/nolibc/nolibc-test-linkage.c
+++ b/tools/testing/selftests/nolibc/nolibc-test-linkage.c
@@ -11,16 +11,16 @@ void *linkage_test_errno_addr(void)
return &errno;
}
-int linkage_test_constructor_test_value;
+int linkage_test_constructor_test_value = 0;
__attribute__((constructor))
static void constructor1(void)
{
- linkage_test_constructor_test_value = 2;
+ linkage_test_constructor_test_value |= 1 << 0;
}
__attribute__((constructor))
static void constructor2(void)
{
- linkage_test_constructor_test_value *= 3;
+ linkage_test_constructor_test_value |= 1 << 1;
}
diff --git a/tools/testing/selftests/nolibc/nolibc-test.c b/tools/testing/selftests/nolibc/nolibc-test.c
index 0e0e3b48a8c3..5884a891c491 100644
--- a/tools/testing/selftests/nolibc/nolibc-test.c
+++ b/tools/testing/selftests/nolibc/nolibc-test.c
@@ -43,6 +43,8 @@
#endif
#endif
+#pragma GCC diagnostic ignored "-Wmissing-prototypes"
+
#include "nolibc-test-linkage.h"
/* for the type of int_fast16_t and int_fast32_t, musl differs from glibc and nolibc */
@@ -690,14 +692,14 @@ int expect_strtox(int llen, void *func, const char *input, int base, intmax_t ex
__attribute__((constructor))
static void constructor1(void)
{
- constructor_test_value = 1;
+ constructor_test_value |= 1 << 0;
}
__attribute__((constructor))
static void constructor2(int argc, char **argv, char **envp)
{
if (argc && argv && envp)
- constructor_test_value *= 2;
+ constructor_test_value |= 1 << 1;
}
int run_startup(int min, int max)
@@ -736,9 +738,9 @@ int run_startup(int min, int max)
CASE_TEST(environ_HOME); EXPECT_PTRNZ(1, getenv("HOME")); break;
CASE_TEST(auxv_addr); EXPECT_PTRGT(test_auxv != (void *)-1, test_auxv, brk); break;
CASE_TEST(auxv_AT_UID); EXPECT_EQ(1, getauxval(AT_UID), getuid()); break;
- CASE_TEST(constructor); EXPECT_EQ(1, constructor_test_value, 2); break;
+ CASE_TEST(constructor); EXPECT_EQ(is_nolibc, constructor_test_value, 0x3); break;
CASE_TEST(linkage_errno); EXPECT_PTREQ(1, linkage_test_errno_addr(), &errno); break;
- CASE_TEST(linkage_constr); EXPECT_EQ(1, linkage_test_constructor_test_value, 6); break;
+ CASE_TEST(linkage_constr); EXPECT_EQ(1, linkage_test_constructor_test_value, 0x3); break;
case __LINE__:
return ret; /* must be last */
/* note: do not set any defaults so as to permit holes above */
@@ -767,6 +769,44 @@ int test_getdents64(const char *dir)
return ret;
}
+static int test_dirent(void)
+{
+ int comm = 0, cmdline = 0;
+ struct dirent dirent, *result;
+ DIR *dir;
+ int ret;
+
+ dir = opendir("/proc/self");
+ if (!dir)
+ return 1;
+
+ while (1) {
+ errno = 0;
+ ret = readdir_r(dir, &dirent, &result);
+ if (ret != 0)
+ return 1;
+ if (!result)
+ break;
+
+ if (strcmp(dirent.d_name, "comm") == 0)
+ comm++;
+ else if (strcmp(dirent.d_name, "cmdline") == 0)
+ cmdline++;
+ }
+
+ if (errno)
+ return 1;
+
+ ret = closedir(dir);
+ if (ret)
+ return 1;
+
+ if (comm != 1 || cmdline != 1)
+ return 1;
+
+ return 0;
+}
+
int test_getpagesize(void)
{
int x = getpagesize();
@@ -988,6 +1028,22 @@ int test_rlimit(void)
return 0;
}
+int test_openat(void)
+{
+ int dev, null;
+
+ dev = openat(AT_FDCWD, "/dev", O_DIRECTORY);
+ if (dev < 0)
+ return -1;
+
+ null = openat(dev, "null", O_RDONLY);
+ close(dev);
+ if (null < 0)
+ return -1;
+
+ close(null);
+ return 0;
+}
/* Run syscall tests between IDs <min> and <max>.
* Return 0 on success, non-zero on failure.
@@ -1059,6 +1115,7 @@ int run_syscall(int min, int max)
CASE_TEST(fork); EXPECT_SYSZR(1, test_fork()); break;
CASE_TEST(getdents64_root); EXPECT_SYSNE(1, test_getdents64("/"), -1); break;
CASE_TEST(getdents64_null); EXPECT_SYSER(1, test_getdents64("/dev/null"), -1, ENOTDIR); break;
+ CASE_TEST(directories); EXPECT_SYSZR(proc, test_dirent()); break;
CASE_TEST(gettimeofday_tv); EXPECT_SYSZR(1, gettimeofday(&tv, NULL)); break;
CASE_TEST(gettimeofday_tv_tz);EXPECT_SYSZR(1, gettimeofday(&tv, &tz)); break;
CASE_TEST(getpagesize); EXPECT_SYSZR(1, test_getpagesize()); break;
@@ -1073,8 +1130,9 @@ int run_syscall(int min, int max)
CASE_TEST(mmap_bad); EXPECT_PTRER(1, mmap(NULL, 0, PROT_READ, MAP_PRIVATE, 0, 0), MAP_FAILED, EINVAL); break;
CASE_TEST(munmap_bad); EXPECT_SYSER(1, munmap(NULL, 0), -1, EINVAL); break;
CASE_TEST(mmap_munmap_good); EXPECT_SYSZR(1, test_mmap_munmap()); break;
- CASE_TEST(open_tty); EXPECT_SYSNE(1, tmp = open("/dev/null", 0), -1); if (tmp != -1) close(tmp); break;
- CASE_TEST(open_blah); EXPECT_SYSER(1, tmp = open("/proc/self/blah", 0), -1, ENOENT); if (tmp != -1) close(tmp); break;
+ CASE_TEST(open_tty); EXPECT_SYSNE(1, tmp = open("/dev/null", O_RDONLY), -1); if (tmp != -1) close(tmp); break;
+ CASE_TEST(open_blah); EXPECT_SYSER(1, tmp = open("/proc/self/blah", O_RDONLY), -1, ENOENT); if (tmp != -1) close(tmp); break;
+ CASE_TEST(openat_dir); EXPECT_SYSZR(1, test_openat()); break;
CASE_TEST(pipe); EXPECT_SYSZR(1, test_pipe()); break;
CASE_TEST(poll_null); EXPECT_SYSZR(1, poll(NULL, 0, 0)); break;
CASE_TEST(poll_stdout); EXPECT_SYSNE(1, ({ struct pollfd fds = { 1, POLLOUT, 0}; poll(&fds, 1, 0); }), -1); break;
@@ -1284,6 +1342,73 @@ static int expect_vfprintf(int llen, int c, const char *expected, const char *fm
return ret;
}
+static int test_scanf(void)
+{
+ unsigned long long ull;
+ unsigned long ul;
+ unsigned int u;
+ long long ll;
+ long l;
+ void *p;
+ int i;
+
+ /* return __LINE__ to point to the specific failure */
+
+ /* test EOF */
+ if (sscanf("", "foo") != EOF)
+ return __LINE__;
+
+ /* test simple literal without placeholder */
+ if (sscanf("foo", "foo") != 0)
+ return __LINE__;
+
+ /* test single placeholder */
+ if (sscanf("123", "%d", &i) != 1)
+ return __LINE__;
+
+ if (i != 123)
+ return __LINE__;
+
+ /* test multiple place holders and separators */
+ if (sscanf("a123b456c0x90", "a%db%uc%p", &i, &u, &p) != 3)
+ return __LINE__;
+
+ if (i != 123)
+ return __LINE__;
+
+ if (u != 456)
+ return __LINE__;
+
+ if (p != (void *)0x90)
+ return __LINE__;
+
+ /* test space handling */
+ if (sscanf("a b1", "a b%d", &i) != 1)
+ return __LINE__;
+
+ if (i != 1)
+ return __LINE__;
+
+ /* test literal percent */
+ if (sscanf("a%1", "a%%%d", &i) != 1)
+ return __LINE__;
+
+ if (i != 1)
+ return __LINE__;
+
+ /* test stdint.h types */
+ if (sscanf("1|2|3|4|5|6",
+ "%d|%ld|%lld|%u|%lu|%llu",
+ &i, &l, &ll, &u, &ul, &ull) != 6)
+ return __LINE__;
+
+ if (i != 1 || l != 2 || ll != 3 ||
+ u != 4 || ul != 5 || ull != 6)
+ return __LINE__;
+
+ return 0;
+}
+
static int run_vfprintf(int min, int max)
{
int test;
@@ -1305,6 +1430,7 @@ static int run_vfprintf(int min, int max)
CASE_TEST(char); EXPECT_VFPRINTF(1, "c", "%c", 'c'); break;
CASE_TEST(hex); EXPECT_VFPRINTF(1, "f", "%x", 0xf); break;
CASE_TEST(pointer); EXPECT_VFPRINTF(3, "0x1", "%p", (void *) 0x1); break;
+ CASE_TEST(scanf); EXPECT_ZR(1, test_scanf()); break;
case __LINE__:
return ret; /* must be last */
/* note: do not set any defaults so as to permit holes above */
diff --git a/tools/testing/selftests/nolibc/run-tests.sh b/tools/testing/selftests/nolibc/run-tests.sh
index 9c5160c53881..0299a0912d40 100755
--- a/tools/testing/selftests/nolibc/run-tests.sh
+++ b/tools/testing/selftests/nolibc/run-tests.sh
@@ -17,7 +17,16 @@ perform_download=0
test_mode=system
werror=1
llvm=
-archs="i386 x86_64 arm64 arm mips32le mips32be ppc ppc64 ppc64le riscv32 riscv64 s390 loongarch"
+all_archs=(
+ i386 x86_64
+ arm64 arm armthumb
+ mips32le mips32be
+ ppc ppc64 ppc64le
+ riscv32 riscv64
+ s390x s390
+ loongarch
+)
+archs="${all_archs[@]}"
TEMP=$(getopt -o 'j:d:c:b:a:m:pelh' -n "$0" -- "$@")
@@ -94,19 +103,21 @@ fi
crosstool_arch() {
case "$1" in
arm64) echo aarch64;;
+ armthumb) echo arm;;
ppc) echo powerpc;;
ppc64) echo powerpc64;;
ppc64le) echo powerpc64;;
riscv) echo riscv64;;
loongarch) echo loongarch64;;
mips*) echo mips;;
+ s390*) echo s390;;
*) echo "$1";;
esac
}
crosstool_abi() {
case "$1" in
- arm) echo linux-gnueabi;;
+ arm | armthumb) echo linux-gnueabi;;
*) echo linux;;
esac
}
@@ -157,10 +168,6 @@ test_arch() {
fi
MAKE=(make -j"${nproc}" XARCH="${arch}" CROSS_COMPILE="${cross_compile}" LLVM="${llvm}" O="${build_dir}")
- mkdir -p "$build_dir"
- if [ "$test_mode" = "system" ] && [ ! -f "${build_dir}/.config" ]; then
- swallow_output "${MAKE[@]}" defconfig
- fi
case "$test_mode" in
'system')
test_target=run
@@ -173,6 +180,13 @@ test_arch() {
exit 1
esac
printf '%-15s' "$arch:"
+ if [ "$arch" = "s390" ] && ([ "$llvm" = "1" ] || [ "$test_mode" = "user" ]); then
+ echo "Unsupported configuration"
+ return
+ fi
+
+ mkdir -p "$build_dir"
+ swallow_output "${MAKE[@]}" defconfig
swallow_output "${MAKE[@]}" CFLAGS_EXTRA="$CFLAGS_EXTRA" "$test_target" V=1
cp run.out run.out."${arch}"
"${MAKE[@]}" report | grep passed
diff --git a/tools/testing/selftests/pidfd/.gitignore b/tools/testing/selftests/pidfd/.gitignore
index bf92481f925c..0406a065deb4 100644
--- a/tools/testing/selftests/pidfd/.gitignore
+++ b/tools/testing/selftests/pidfd/.gitignore
@@ -8,3 +8,5 @@ pidfd_getfd_test
pidfd_setns_test
pidfd_file_handle_test
pidfd_bind_mount
+pidfd_info_test
+pidfd_exec_helper
diff --git a/tools/testing/selftests/pidfd/Makefile b/tools/testing/selftests/pidfd/Makefile
index 301343a11b62..fcbefc0d77f6 100644
--- a/tools/testing/selftests/pidfd/Makefile
+++ b/tools/testing/selftests/pidfd/Makefile
@@ -3,7 +3,9 @@ CFLAGS += -g $(KHDR_INCLUDES) -pthread -Wall
TEST_GEN_PROGS := pidfd_test pidfd_fdinfo_test pidfd_open_test \
pidfd_poll_test pidfd_wait pidfd_getfd_test pidfd_setns_test \
- pidfd_file_handle_test pidfd_bind_mount
+ pidfd_file_handle_test pidfd_bind_mount pidfd_info_test
+
+TEST_GEN_PROGS_EXTENDED := pidfd_exec_helper
include ../lib.mk
diff --git a/tools/testing/selftests/pidfd/pidfd.h b/tools/testing/selftests/pidfd/pidfd.h
index 0b96ac4b8ce5..cec22aa11cdf 100644
--- a/tools/testing/selftests/pidfd/pidfd.h
+++ b/tools/testing/selftests/pidfd/pidfd.h
@@ -12,6 +12,7 @@
#include <stdlib.h>
#include <string.h>
#include <syscall.h>
+#include <sys/ioctl.h>
#include <sys/types.h>
#include <sys/wait.h>
@@ -50,6 +51,107 @@
#define PIDFD_NONBLOCK O_NONBLOCK
#endif
+#ifndef PIDFD_SELF_THREAD
+#define PIDFD_SELF_THREAD -10000 /* Current thread. */
+#endif
+
+#ifndef PIDFD_SELF_THREAD_GROUP
+#define PIDFD_SELF_THREAD_GROUP -20000 /* Current thread group leader. */
+#endif
+
+#ifndef PIDFD_SELF
+#define PIDFD_SELF PIDFD_SELF_THREAD
+#endif
+
+#ifndef PIDFD_SELF_PROCESS
+#define PIDFD_SELF_PROCESS PIDFD_SELF_THREAD_GROUP
+#endif
+
+#ifndef PIDFS_IOCTL_MAGIC
+#define PIDFS_IOCTL_MAGIC 0xFF
+#endif
+
+#ifndef PIDFD_GET_CGROUP_NAMESPACE
+#define PIDFD_GET_CGROUP_NAMESPACE _IO(PIDFS_IOCTL_MAGIC, 1)
+#endif
+
+#ifndef PIDFD_GET_IPC_NAMESPACE
+#define PIDFD_GET_IPC_NAMESPACE _IO(PIDFS_IOCTL_MAGIC, 2)
+#endif
+
+#ifndef PIDFD_GET_MNT_NAMESPACE
+#define PIDFD_GET_MNT_NAMESPACE _IO(PIDFS_IOCTL_MAGIC, 3)
+#endif
+
+#ifndef PIDFD_GET_NET_NAMESPACE
+#define PIDFD_GET_NET_NAMESPACE _IO(PIDFS_IOCTL_MAGIC, 4)
+#endif
+
+#ifndef PIDFD_GET_PID_NAMESPACE
+#define PIDFD_GET_PID_NAMESPACE _IO(PIDFS_IOCTL_MAGIC, 5)
+#endif
+
+#ifndef PIDFD_GET_PID_FOR_CHILDREN_NAMESPACE
+#define PIDFD_GET_PID_FOR_CHILDREN_NAMESPACE _IO(PIDFS_IOCTL_MAGIC, 6)
+#endif
+
+#ifndef PIDFD_GET_TIME_NAMESPACE
+#define PIDFD_GET_TIME_NAMESPACE _IO(PIDFS_IOCTL_MAGIC, 7)
+#endif
+
+#ifndef PIDFD_GET_TIME_FOR_CHILDREN_NAMESPACE
+#define PIDFD_GET_TIME_FOR_CHILDREN_NAMESPACE _IO(PIDFS_IOCTL_MAGIC, 8)
+#endif
+
+#ifndef PIDFD_GET_USER_NAMESPACE
+#define PIDFD_GET_USER_NAMESPACE _IO(PIDFS_IOCTL_MAGIC, 9)
+#endif
+
+#ifndef PIDFD_GET_UTS_NAMESPACE
+#define PIDFD_GET_UTS_NAMESPACE _IO(PIDFS_IOCTL_MAGIC, 10)
+#endif
+
+#ifndef PIDFD_GET_INFO
+#define PIDFD_GET_INFO _IOWR(PIDFS_IOCTL_MAGIC, 11, struct pidfd_info)
+#endif
+
+#ifndef PIDFD_INFO_PID
+#define PIDFD_INFO_PID (1UL << 0) /* Always returned, even if not requested */
+#endif
+
+#ifndef PIDFD_INFO_CREDS
+#define PIDFD_INFO_CREDS (1UL << 1) /* Always returned, even if not requested */
+#endif
+
+#ifndef PIDFD_INFO_CGROUPID
+#define PIDFD_INFO_CGROUPID (1UL << 2) /* Always returned if available, even if not requested */
+#endif
+
+#ifndef PIDFD_INFO_EXIT
+#define PIDFD_INFO_EXIT (1UL << 3) /* Always returned if available, even if not requested */
+#endif
+
+#ifndef PIDFD_THREAD
+#define PIDFD_THREAD O_EXCL
+#endif
+
+struct pidfd_info {
+ __u64 mask;
+ __u64 cgroupid;
+ __u32 pid;
+ __u32 tgid;
+ __u32 ppid;
+ __u32 ruid;
+ __u32 rgid;
+ __u32 euid;
+ __u32 egid;
+ __u32 suid;
+ __u32 sgid;
+ __u32 fsuid;
+ __u32 fsgid;
+ __s32 exit_code;
+};
+
/*
* The kernel reserves 300 pids via RESERVED_PIDS in kernel/pid.c
* That means, when it wraps around any pid < 300 will be skipped.
@@ -152,4 +254,11 @@ static inline ssize_t write_nointr(int fd, const void *buf, size_t count)
return ret;
}
+static inline int sys_execveat(int dirfd, const char *pathname,
+ char *const argv[], char *const envp[],
+ int flags)
+{
+ return syscall(__NR_execveat, dirfd, pathname, argv, envp, flags);
+}
+
#endif /* __PIDFD_H */
diff --git a/tools/testing/selftests/pidfd/pidfd_exec_helper.c b/tools/testing/selftests/pidfd/pidfd_exec_helper.c
new file mode 100644
index 000000000000..5516808c95f2
--- /dev/null
+++ b/tools/testing/selftests/pidfd/pidfd_exec_helper.c
@@ -0,0 +1,12 @@
+#define _GNU_SOURCE
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+int main(int argc, char *argv[])
+{
+ if (pause())
+ _exit(EXIT_FAILURE);
+
+ _exit(EXIT_SUCCESS);
+}
diff --git a/tools/testing/selftests/pidfd/pidfd_fdinfo_test.c b/tools/testing/selftests/pidfd/pidfd_fdinfo_test.c
index f062a986e382..f718aac75068 100644
--- a/tools/testing/selftests/pidfd/pidfd_fdinfo_test.c
+++ b/tools/testing/selftests/pidfd/pidfd_fdinfo_test.c
@@ -13,6 +13,7 @@
#include <syscall.h>
#include <sys/wait.h>
#include <sys/mman.h>
+#include <sys/mount.h>
#include "pidfd.h"
#include "../kselftest.h"
diff --git a/tools/testing/selftests/pidfd/pidfd_info_test.c b/tools/testing/selftests/pidfd/pidfd_info_test.c
new file mode 100644
index 000000000000..1758a1b0457b
--- /dev/null
+++ b/tools/testing/selftests/pidfd/pidfd_info_test.c
@@ -0,0 +1,692 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#define _GNU_SOURCE
+#include <errno.h>
+#include <fcntl.h>
+#include <limits.h>
+#include <linux/types.h>
+#include <poll.h>
+#include <pthread.h>
+#include <sched.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <syscall.h>
+#include <sys/prctl.h>
+#include <sys/wait.h>
+#include <unistd.h>
+#include <sys/socket.h>
+#include <linux/kcmp.h>
+#include <sys/stat.h>
+
+#include "pidfd.h"
+#include "../kselftest_harness.h"
+
+FIXTURE(pidfd_info)
+{
+ pid_t child_pid1;
+ int child_pidfd1;
+
+ pid_t child_pid2;
+ int child_pidfd2;
+
+ pid_t child_pid3;
+ int child_pidfd3;
+
+ pid_t child_pid4;
+ int child_pidfd4;
+};
+
+FIXTURE_SETUP(pidfd_info)
+{
+ int ret;
+ int ipc_sockets[2];
+ char c;
+
+ ret = socketpair(AF_LOCAL, SOCK_STREAM | SOCK_CLOEXEC, 0, ipc_sockets);
+ EXPECT_EQ(ret, 0);
+
+ self->child_pid1 = create_child(&self->child_pidfd1, 0);
+ EXPECT_GE(self->child_pid1, 0);
+
+ if (self->child_pid1 == 0) {
+ close(ipc_sockets[0]);
+
+ if (write_nointr(ipc_sockets[1], "1", 1) < 0)
+ _exit(EXIT_FAILURE);
+
+ close(ipc_sockets[1]);
+
+ pause();
+ _exit(EXIT_SUCCESS);
+ }
+
+ EXPECT_EQ(close(ipc_sockets[1]), 0);
+ ASSERT_EQ(read_nointr(ipc_sockets[0], &c, 1), 1);
+ EXPECT_EQ(close(ipc_sockets[0]), 0);
+
+ /* SIGKILL but don't reap. */
+ EXPECT_EQ(sys_pidfd_send_signal(self->child_pidfd1, SIGKILL, NULL, 0), 0);
+
+ ret = socketpair(AF_LOCAL, SOCK_STREAM | SOCK_CLOEXEC, 0, ipc_sockets);
+ EXPECT_EQ(ret, 0);
+
+ self->child_pid2 = create_child(&self->child_pidfd2, 0);
+ EXPECT_GE(self->child_pid2, 0);
+
+ if (self->child_pid2 == 0) {
+ close(ipc_sockets[0]);
+
+ if (write_nointr(ipc_sockets[1], "1", 1) < 0)
+ _exit(EXIT_FAILURE);
+
+ close(ipc_sockets[1]);
+
+ pause();
+ _exit(EXIT_SUCCESS);
+ }
+
+ EXPECT_EQ(close(ipc_sockets[1]), 0);
+ ASSERT_EQ(read_nointr(ipc_sockets[0], &c, 1), 1);
+ EXPECT_EQ(close(ipc_sockets[0]), 0);
+
+ /* SIGKILL and reap. */
+ EXPECT_EQ(sys_pidfd_send_signal(self->child_pidfd2, SIGKILL, NULL, 0), 0);
+ EXPECT_EQ(sys_waitid(P_PID, self->child_pid2, NULL, WEXITED), 0);
+
+ self->child_pid3 = create_child(&self->child_pidfd3, CLONE_NEWUSER | CLONE_NEWPID);
+ EXPECT_GE(self->child_pid3, 0);
+
+ if (self->child_pid3 == 0)
+ _exit(EXIT_SUCCESS);
+
+ self->child_pid4 = create_child(&self->child_pidfd4, CLONE_NEWUSER | CLONE_NEWPID);
+ EXPECT_GE(self->child_pid4, 0);
+
+ if (self->child_pid4 == 0)
+ _exit(EXIT_SUCCESS);
+
+ EXPECT_EQ(sys_waitid(P_PID, self->child_pid4, NULL, WEXITED), 0);
+}
+
+FIXTURE_TEARDOWN(pidfd_info)
+{
+ sys_pidfd_send_signal(self->child_pidfd1, SIGKILL, NULL, 0);
+ if (self->child_pidfd1 >= 0)
+ EXPECT_EQ(0, close(self->child_pidfd1));
+
+ sys_waitid(P_PID, self->child_pid1, NULL, WEXITED);
+
+ sys_pidfd_send_signal(self->child_pidfd2, SIGKILL, NULL, 0);
+ if (self->child_pidfd2 >= 0)
+ EXPECT_EQ(0, close(self->child_pidfd2));
+
+ sys_waitid(P_PID, self->child_pid2, NULL, WEXITED);
+ sys_waitid(P_PID, self->child_pid3, NULL, WEXITED);
+ sys_waitid(P_PID, self->child_pid4, NULL, WEXITED);
+}
+
+TEST_F(pidfd_info, sigkill_exit)
+{
+ struct pidfd_info info = {
+ .mask = PIDFD_INFO_CGROUPID,
+ };
+
+ /* Process has exited but not been reaped so this must work. */
+ ASSERT_EQ(ioctl(self->child_pidfd1, PIDFD_GET_INFO, &info), 0);
+
+ info.mask = PIDFD_INFO_CGROUPID | PIDFD_INFO_EXIT;
+ ASSERT_EQ(ioctl(self->child_pidfd1, PIDFD_GET_INFO, &info), 0);
+ ASSERT_TRUE(!!(info.mask & PIDFD_INFO_CREDS));
+ /* Process has exited but not been reaped, so no PIDFD_INFO_EXIT information yet. */
+ ASSERT_FALSE(!!(info.mask & PIDFD_INFO_EXIT));
+}
+
+TEST_F(pidfd_info, sigkill_reaped)
+{
+ struct pidfd_info info = {
+ .mask = PIDFD_INFO_CGROUPID,
+ };
+
+ /* Process has already been reaped and PIDFD_INFO_EXIT hasn't been set. */
+ ASSERT_NE(ioctl(self->child_pidfd2, PIDFD_GET_INFO, &info), 0);
+ ASSERT_EQ(errno, ESRCH);
+
+ info.mask = PIDFD_INFO_CGROUPID | PIDFD_INFO_EXIT;
+ ASSERT_EQ(ioctl(self->child_pidfd2, PIDFD_GET_INFO, &info), 0);
+ ASSERT_FALSE(!!(info.mask & PIDFD_INFO_CREDS));
+ ASSERT_TRUE(!!(info.mask & PIDFD_INFO_EXIT));
+ ASSERT_TRUE(WIFSIGNALED(info.exit_code));
+ ASSERT_EQ(WTERMSIG(info.exit_code), SIGKILL);
+}
+
+TEST_F(pidfd_info, success_exit)
+{
+ struct pidfd_info info = {
+ .mask = PIDFD_INFO_CGROUPID,
+ };
+
+ /* Process has exited but not been reaped so this must work. */
+ ASSERT_EQ(ioctl(self->child_pidfd3, PIDFD_GET_INFO, &info), 0);
+
+ info.mask = PIDFD_INFO_CGROUPID | PIDFD_INFO_EXIT;
+ ASSERT_EQ(ioctl(self->child_pidfd3, PIDFD_GET_INFO, &info), 0);
+ ASSERT_TRUE(!!(info.mask & PIDFD_INFO_CREDS));
+ /* Process has exited but not been reaped, so no PIDFD_INFO_EXIT information yet. */
+ ASSERT_FALSE(!!(info.mask & PIDFD_INFO_EXIT));
+}
+
+TEST_F(pidfd_info, success_reaped)
+{
+ struct pidfd_info info = {
+ .mask = PIDFD_INFO_CGROUPID,
+ };
+
+ /* Process has already been reaped and PIDFD_INFO_EXIT hasn't been set. */
+ ASSERT_NE(ioctl(self->child_pidfd4, PIDFD_GET_INFO, &info), 0);
+ ASSERT_EQ(errno, ESRCH);
+
+ info.mask = PIDFD_INFO_CGROUPID | PIDFD_INFO_EXIT;
+ ASSERT_EQ(ioctl(self->child_pidfd4, PIDFD_GET_INFO, &info), 0);
+ ASSERT_FALSE(!!(info.mask & PIDFD_INFO_CREDS));
+ ASSERT_TRUE(!!(info.mask & PIDFD_INFO_EXIT));
+ ASSERT_TRUE(WIFEXITED(info.exit_code));
+ ASSERT_EQ(WEXITSTATUS(info.exit_code), 0);
+}
+
+TEST_F(pidfd_info, success_reaped_poll)
+{
+ struct pidfd_info info = {
+ .mask = PIDFD_INFO_CGROUPID | PIDFD_INFO_EXIT,
+ };
+ struct pollfd fds = {};
+ int nevents;
+
+ fds.events = POLLIN;
+ fds.fd = self->child_pidfd2;
+
+ nevents = poll(&fds, 1, -1);
+ ASSERT_EQ(nevents, 1);
+ ASSERT_TRUE(!!(fds.revents & POLLIN));
+ ASSERT_TRUE(!!(fds.revents & POLLHUP));
+
+ ASSERT_EQ(ioctl(self->child_pidfd2, PIDFD_GET_INFO, &info), 0);
+ ASSERT_FALSE(!!(info.mask & PIDFD_INFO_CREDS));
+ ASSERT_TRUE(!!(info.mask & PIDFD_INFO_EXIT));
+ ASSERT_TRUE(WIFSIGNALED(info.exit_code));
+ ASSERT_EQ(WTERMSIG(info.exit_code), SIGKILL);
+}
+
+static void *pidfd_info_pause_thread(void *arg)
+{
+ pid_t pid_thread = gettid();
+ int ipc_socket = *(int *)arg;
+
+ /* Inform the grand-parent what the tid of this thread is. */
+ if (write_nointr(ipc_socket, &pid_thread, sizeof(pid_thread)) != sizeof(pid_thread))
+ return NULL;
+
+ close(ipc_socket);
+
+ /* Sleep untill we're killed. */
+ pause();
+ return NULL;
+}
+
+TEST_F(pidfd_info, thread_group)
+{
+ pid_t pid_leader, pid_poller, pid_thread;
+ pthread_t thread;
+ int nevents, pidfd_leader, pidfd_thread, pidfd_leader_thread, ret;
+ int ipc_sockets[2];
+ struct pollfd fds = {};
+ struct pidfd_info info = {
+ .mask = PIDFD_INFO_CGROUPID | PIDFD_INFO_EXIT,
+ }, info2;
+
+ ret = socketpair(AF_LOCAL, SOCK_STREAM | SOCK_CLOEXEC, 0, ipc_sockets);
+ EXPECT_EQ(ret, 0);
+
+ pid_leader = create_child(&pidfd_leader, 0);
+ EXPECT_GE(pid_leader, 0);
+
+ if (pid_leader == 0) {
+ close(ipc_sockets[0]);
+
+ /* The thread will outlive the thread-group leader. */
+ if (pthread_create(&thread, NULL, pidfd_info_pause_thread, &ipc_sockets[1]))
+ syscall(__NR_exit, EXIT_FAILURE);
+
+ /* Make the thread-group leader exit prematurely. */
+ syscall(__NR_exit, EXIT_SUCCESS);
+ }
+
+ /*
+ * Opening a PIDFD_THREAD aka thread-specific pidfd based on a
+ * thread-group leader must succeed.
+ */
+ pidfd_leader_thread = sys_pidfd_open(pid_leader, PIDFD_THREAD);
+ ASSERT_GE(pidfd_leader_thread, 0);
+
+ pid_poller = fork();
+ ASSERT_GE(pid_poller, 0);
+ if (pid_poller == 0) {
+ /*
+ * We can't poll and wait for the old thread-group
+ * leader to exit using a thread-specific pidfd. The
+ * thread-group leader exited prematurely and
+ * notification is delayed until all subthreads have
+ * exited.
+ */
+ fds.events = POLLIN;
+ fds.fd = pidfd_leader_thread;
+ nevents = poll(&fds, 1, 10000 /* wait 5 seconds */);
+ if (nevents != 0)
+ _exit(EXIT_FAILURE);
+ if (fds.revents & POLLIN)
+ _exit(EXIT_FAILURE);
+ if (fds.revents & POLLHUP)
+ _exit(EXIT_FAILURE);
+ _exit(EXIT_SUCCESS);
+ }
+
+ /* Retrieve the tid of the thread. */
+ EXPECT_EQ(close(ipc_sockets[1]), 0);
+ ASSERT_EQ(read_nointr(ipc_sockets[0], &pid_thread, sizeof(pid_thread)), sizeof(pid_thread));
+ EXPECT_EQ(close(ipc_sockets[0]), 0);
+
+ /* Opening a thread as a thread-group leader must fail. */
+ pidfd_thread = sys_pidfd_open(pid_thread, 0);
+ ASSERT_LT(pidfd_thread, 0);
+
+ /* Opening a thread as a PIDFD_THREAD must succeed. */
+ pidfd_thread = sys_pidfd_open(pid_thread, PIDFD_THREAD);
+ ASSERT_GE(pidfd_thread, 0);
+
+ ASSERT_EQ(wait_for_pid(pid_poller), 0);
+
+ /*
+ * Note that pidfd_leader is a thread-group pidfd, so polling on it
+ * would only notify us once all thread in the thread-group have
+ * exited. So we can't poll before we have taken down the whole
+ * thread-group.
+ */
+
+ /* Get PIDFD_GET_INFO using the thread-group leader pidfd. */
+ ASSERT_EQ(ioctl(pidfd_leader, PIDFD_GET_INFO, &info), 0);
+ ASSERT_TRUE(!!(info.mask & PIDFD_INFO_CREDS));
+ /* Process has exited but not been reaped, so no PIDFD_INFO_EXIT information yet. */
+ ASSERT_FALSE(!!(info.mask & PIDFD_INFO_EXIT));
+ ASSERT_EQ(info.pid, pid_leader);
+
+ /*
+ * Now retrieve the same info using the thread specific pidfd
+ * for the thread-group leader.
+ */
+ info2.mask = PIDFD_INFO_CGROUPID | PIDFD_INFO_EXIT;
+ ASSERT_EQ(ioctl(pidfd_leader_thread, PIDFD_GET_INFO, &info2), 0);
+ ASSERT_TRUE(!!(info2.mask & PIDFD_INFO_CREDS));
+ /* Process has exited but not been reaped, so no PIDFD_INFO_EXIT information yet. */
+ ASSERT_FALSE(!!(info2.mask & PIDFD_INFO_EXIT));
+ ASSERT_EQ(info2.pid, pid_leader);
+
+ /* Now try the thread-specific pidfd. */
+ ASSERT_EQ(ioctl(pidfd_thread, PIDFD_GET_INFO, &info), 0);
+ ASSERT_TRUE(!!(info.mask & PIDFD_INFO_CREDS));
+ /* The thread hasn't exited, so no PIDFD_INFO_EXIT information yet. */
+ ASSERT_FALSE(!!(info.mask & PIDFD_INFO_EXIT));
+ ASSERT_EQ(info.pid, pid_thread);
+
+ /*
+ * Take down the whole thread-group. The thread-group leader
+ * exited successfully but the thread will now be SIGKILLed.
+ * This must be reflected in the recorded exit information.
+ */
+ EXPECT_EQ(sys_pidfd_send_signal(pidfd_leader, SIGKILL, NULL, 0), 0);
+ EXPECT_EQ(sys_waitid(P_PIDFD, pidfd_leader, NULL, WEXITED), 0);
+
+ fds.events = POLLIN;
+ fds.fd = pidfd_leader;
+ nevents = poll(&fds, 1, -1);
+ ASSERT_EQ(nevents, 1);
+ ASSERT_TRUE(!!(fds.revents & POLLIN));
+ /* The thread-group leader has been reaped. */
+ ASSERT_TRUE(!!(fds.revents & POLLHUP));
+
+ /*
+ * Retrieve exit information for the thread-group leader via the
+ * thread-group leader pidfd.
+ */
+ info.mask = PIDFD_INFO_CGROUPID | PIDFD_INFO_EXIT;
+ ASSERT_EQ(ioctl(pidfd_leader, PIDFD_GET_INFO, &info), 0);
+ ASSERT_FALSE(!!(info.mask & PIDFD_INFO_CREDS));
+ ASSERT_TRUE(!!(info.mask & PIDFD_INFO_EXIT));
+ /* The thread-group leader exited successfully. Only the specific thread was SIGKILLed. */
+ ASSERT_TRUE(WIFEXITED(info.exit_code));
+ ASSERT_EQ(WEXITSTATUS(info.exit_code), 0);
+
+ /*
+ * Retrieve exit information for the thread-group leader via the
+ * thread-specific pidfd.
+ */
+ info2.mask = PIDFD_INFO_CGROUPID | PIDFD_INFO_EXIT;
+ ASSERT_EQ(ioctl(pidfd_leader_thread, PIDFD_GET_INFO, &info2), 0);
+ ASSERT_FALSE(!!(info2.mask & PIDFD_INFO_CREDS));
+ ASSERT_TRUE(!!(info2.mask & PIDFD_INFO_EXIT));
+
+ /* The thread-group leader exited successfully. Only the specific thread was SIGKILLed. */
+ ASSERT_TRUE(WIFEXITED(info2.exit_code));
+ ASSERT_EQ(WEXITSTATUS(info2.exit_code), 0);
+
+ /* Retrieve exit information for the thread. */
+ info.mask = PIDFD_INFO_CGROUPID | PIDFD_INFO_EXIT;
+ ASSERT_EQ(ioctl(pidfd_thread, PIDFD_GET_INFO, &info), 0);
+ ASSERT_FALSE(!!(info.mask & PIDFD_INFO_CREDS));
+ ASSERT_TRUE(!!(info.mask & PIDFD_INFO_EXIT));
+
+ /* The thread got SIGKILLed. */
+ ASSERT_TRUE(WIFSIGNALED(info.exit_code));
+ ASSERT_EQ(WTERMSIG(info.exit_code), SIGKILL);
+
+ EXPECT_EQ(close(pidfd_leader), 0);
+ EXPECT_EQ(close(pidfd_thread), 0);
+}
+
+static void *pidfd_info_thread_exec(void *arg)
+{
+ pid_t pid_thread = gettid();
+ int ipc_socket = *(int *)arg;
+
+ /* Inform the grand-parent what the tid of this thread is. */
+ if (write_nointr(ipc_socket, &pid_thread, sizeof(pid_thread)) != sizeof(pid_thread))
+ return NULL;
+
+ if (read_nointr(ipc_socket, &pid_thread, sizeof(pid_thread)) != sizeof(pid_thread))
+ return NULL;
+
+ close(ipc_socket);
+
+ sys_execveat(AT_FDCWD, "pidfd_exec_helper", NULL, NULL, 0);
+ return NULL;
+}
+
+TEST_F(pidfd_info, thread_group_exec)
+{
+ pid_t pid_leader, pid_poller, pid_thread;
+ pthread_t thread;
+ int nevents, pidfd_leader, pidfd_leader_thread, pidfd_thread, ret;
+ int ipc_sockets[2];
+ struct pollfd fds = {};
+ struct pidfd_info info = {
+ .mask = PIDFD_INFO_CGROUPID | PIDFD_INFO_EXIT,
+ };
+
+ ret = socketpair(AF_LOCAL, SOCK_STREAM | SOCK_CLOEXEC, 0, ipc_sockets);
+ EXPECT_EQ(ret, 0);
+
+ pid_leader = create_child(&pidfd_leader, 0);
+ EXPECT_GE(pid_leader, 0);
+
+ if (pid_leader == 0) {
+ close(ipc_sockets[0]);
+
+ /* The thread will outlive the thread-group leader. */
+ if (pthread_create(&thread, NULL, pidfd_info_thread_exec, &ipc_sockets[1]))
+ syscall(__NR_exit, EXIT_FAILURE);
+
+ /* Make the thread-group leader exit prematurely. */
+ syscall(__NR_exit, EXIT_SUCCESS);
+ }
+
+ /* Open a thread-specific pidfd for the thread-group leader. */
+ pidfd_leader_thread = sys_pidfd_open(pid_leader, PIDFD_THREAD);
+ ASSERT_GE(pidfd_leader_thread, 0);
+
+ pid_poller = fork();
+ ASSERT_GE(pid_poller, 0);
+ if (pid_poller == 0) {
+ /*
+ * We can't poll and wait for the old thread-group
+ * leader to exit using a thread-specific pidfd. The
+ * thread-group leader exited prematurely and
+ * notification is delayed until all subthreads have
+ * exited.
+ *
+ * When the thread has execed it will taken over the old
+ * thread-group leaders struct pid. Calling poll after
+ * the thread execed will thus block again because a new
+ * thread-group has started.
+ */
+ fds.events = POLLIN;
+ fds.fd = pidfd_leader_thread;
+ nevents = poll(&fds, 1, 10000 /* wait 5 seconds */);
+ if (nevents != 0)
+ _exit(EXIT_FAILURE);
+ if (fds.revents & POLLIN)
+ _exit(EXIT_FAILURE);
+ if (fds.revents & POLLHUP)
+ _exit(EXIT_FAILURE);
+ _exit(EXIT_SUCCESS);
+ }
+
+ /* Retrieve the tid of the thread. */
+ EXPECT_EQ(close(ipc_sockets[1]), 0);
+ ASSERT_EQ(read_nointr(ipc_sockets[0], &pid_thread, sizeof(pid_thread)), sizeof(pid_thread));
+
+ /* Opening a thread as a PIDFD_THREAD must succeed. */
+ pidfd_thread = sys_pidfd_open(pid_thread, PIDFD_THREAD);
+ ASSERT_GE(pidfd_thread, 0);
+
+ /* Now that we've opened a thread-specific pidfd the thread can exec. */
+ ASSERT_EQ(write_nointr(ipc_sockets[0], &pid_thread, sizeof(pid_thread)), sizeof(pid_thread));
+ EXPECT_EQ(close(ipc_sockets[0]), 0);
+
+ ASSERT_EQ(wait_for_pid(pid_poller), 0);
+
+ /* Wait until the kernel has SIGKILLed the thread. */
+ fds.events = POLLHUP;
+ fds.fd = pidfd_thread;
+ nevents = poll(&fds, 1, -1);
+ ASSERT_EQ(nevents, 1);
+ /* The thread has been reaped. */
+ ASSERT_TRUE(!!(fds.revents & POLLHUP));
+
+ /* Retrieve thread-specific exit info from pidfd. */
+ ASSERT_EQ(ioctl(pidfd_thread, PIDFD_GET_INFO, &info), 0);
+ ASSERT_FALSE(!!(info.mask & PIDFD_INFO_CREDS));
+ ASSERT_TRUE(!!(info.mask & PIDFD_INFO_EXIT));
+ /*
+ * While the kernel will have SIGKILLed the whole thread-group
+ * during exec it will cause the individual threads to exit
+ * cleanly.
+ */
+ ASSERT_TRUE(WIFEXITED(info.exit_code));
+ ASSERT_EQ(WEXITSTATUS(info.exit_code), 0);
+
+ /*
+ * The thread-group leader is still alive, the thread has taken
+ * over its struct pid and thus its pid number.
+ */
+ info.mask = PIDFD_INFO_CGROUPID | PIDFD_INFO_EXIT;
+ ASSERT_EQ(ioctl(pidfd_leader, PIDFD_GET_INFO, &info), 0);
+ ASSERT_TRUE(!!(info.mask & PIDFD_INFO_CREDS));
+ ASSERT_FALSE(!!(info.mask & PIDFD_INFO_EXIT));
+ ASSERT_EQ(info.pid, pid_leader);
+
+ /* Take down the thread-group leader. */
+ EXPECT_EQ(sys_pidfd_send_signal(pidfd_leader, SIGKILL, NULL, 0), 0);
+
+ /*
+ * Afte the exec we're dealing with an empty thread-group so now
+ * we must see an exit notification on the thread-specific pidfd
+ * for the thread-group leader as there's no subthread that can
+ * revive the struct pid.
+ */
+ fds.events = POLLIN;
+ fds.fd = pidfd_leader_thread;
+ nevents = poll(&fds, 1, -1);
+ ASSERT_EQ(nevents, 1);
+ ASSERT_TRUE(!!(fds.revents & POLLIN));
+ ASSERT_FALSE(!!(fds.revents & POLLHUP));
+
+ EXPECT_EQ(sys_waitid(P_PIDFD, pidfd_leader, NULL, WEXITED), 0);
+
+ /* Retrieve exit information for the thread-group leader. */
+ info.mask = PIDFD_INFO_CGROUPID | PIDFD_INFO_EXIT;
+ ASSERT_EQ(ioctl(pidfd_leader, PIDFD_GET_INFO, &info), 0);
+ ASSERT_FALSE(!!(info.mask & PIDFD_INFO_CREDS));
+ ASSERT_TRUE(!!(info.mask & PIDFD_INFO_EXIT));
+
+ EXPECT_EQ(close(pidfd_leader), 0);
+ EXPECT_EQ(close(pidfd_thread), 0);
+}
+
+static void *pidfd_info_thread_exec_sane(void *arg)
+{
+ pid_t pid_thread = gettid();
+ int ipc_socket = *(int *)arg;
+
+ /* Inform the grand-parent what the tid of this thread is. */
+ if (write_nointr(ipc_socket, &pid_thread, sizeof(pid_thread)) != sizeof(pid_thread))
+ return NULL;
+
+ if (read_nointr(ipc_socket, &pid_thread, sizeof(pid_thread)) != sizeof(pid_thread))
+ return NULL;
+
+ close(ipc_socket);
+
+ sys_execveat(AT_FDCWD, "pidfd_exec_helper", NULL, NULL, 0);
+ return NULL;
+}
+
+TEST_F(pidfd_info, thread_group_exec_thread)
+{
+ pid_t pid_leader, pid_poller, pid_thread;
+ pthread_t thread;
+ int nevents, pidfd_leader, pidfd_leader_thread, pidfd_thread, ret;
+ int ipc_sockets[2];
+ struct pollfd fds = {};
+ struct pidfd_info info = {
+ .mask = PIDFD_INFO_CGROUPID | PIDFD_INFO_EXIT,
+ };
+
+ ret = socketpair(AF_LOCAL, SOCK_STREAM | SOCK_CLOEXEC, 0, ipc_sockets);
+ EXPECT_EQ(ret, 0);
+
+ pid_leader = create_child(&pidfd_leader, 0);
+ EXPECT_GE(pid_leader, 0);
+
+ if (pid_leader == 0) {
+ close(ipc_sockets[0]);
+
+ /* The thread will outlive the thread-group leader. */
+ if (pthread_create(&thread, NULL, pidfd_info_thread_exec_sane, &ipc_sockets[1]))
+ syscall(__NR_exit, EXIT_FAILURE);
+
+ /*
+ * Pause the thread-group leader. It will be killed once
+ * the subthread execs.
+ */
+ pause();
+ syscall(__NR_exit, EXIT_SUCCESS);
+ }
+
+ /* Retrieve the tid of the thread. */
+ EXPECT_EQ(close(ipc_sockets[1]), 0);
+ ASSERT_EQ(read_nointr(ipc_sockets[0], &pid_thread, sizeof(pid_thread)), sizeof(pid_thread));
+
+ /* Opening a thread as a PIDFD_THREAD must succeed. */
+ pidfd_thread = sys_pidfd_open(pid_thread, PIDFD_THREAD);
+ ASSERT_GE(pidfd_thread, 0);
+
+ /* Open a thread-specific pidfd for the thread-group leader. */
+ pidfd_leader_thread = sys_pidfd_open(pid_leader, PIDFD_THREAD);
+ ASSERT_GE(pidfd_leader_thread, 0);
+
+ pid_poller = fork();
+ ASSERT_GE(pid_poller, 0);
+ if (pid_poller == 0) {
+ /*
+ * The subthread will now exec. The struct pid of the old
+ * thread-group leader will be assumed by the subthread which
+ * becomes the new thread-group leader. So no exit notification
+ * must be generated. Wait for 5 seconds and call it a success
+ * if no notification has been received.
+ */
+ fds.events = POLLIN;
+ fds.fd = pidfd_leader_thread;
+ nevents = poll(&fds, 1, 10000 /* wait 5 seconds */);
+ if (nevents != 0)
+ _exit(EXIT_FAILURE);
+ if (fds.revents & POLLIN)
+ _exit(EXIT_FAILURE);
+ if (fds.revents & POLLHUP)
+ _exit(EXIT_FAILURE);
+ _exit(EXIT_SUCCESS);
+ }
+
+ /* Now that we've opened a thread-specific pidfd the thread can exec. */
+ ASSERT_EQ(write_nointr(ipc_sockets[0], &pid_thread, sizeof(pid_thread)), sizeof(pid_thread));
+ EXPECT_EQ(close(ipc_sockets[0]), 0);
+ ASSERT_EQ(wait_for_pid(pid_poller), 0);
+
+ /* Wait until the kernel has SIGKILLed the thread. */
+ fds.events = POLLHUP;
+ fds.fd = pidfd_thread;
+ nevents = poll(&fds, 1, -1);
+ ASSERT_EQ(nevents, 1);
+ /* The thread has been reaped. */
+ ASSERT_TRUE(!!(fds.revents & POLLHUP));
+
+ /* Retrieve thread-specific exit info from pidfd. */
+ ASSERT_EQ(ioctl(pidfd_thread, PIDFD_GET_INFO, &info), 0);
+ ASSERT_FALSE(!!(info.mask & PIDFD_INFO_CREDS));
+ ASSERT_TRUE(!!(info.mask & PIDFD_INFO_EXIT));
+ /*
+ * While the kernel will have SIGKILLed the whole thread-group
+ * during exec it will cause the individual threads to exit
+ * cleanly.
+ */
+ ASSERT_TRUE(WIFEXITED(info.exit_code));
+ ASSERT_EQ(WEXITSTATUS(info.exit_code), 0);
+
+ /*
+ * The thread-group leader is still alive, the thread has taken
+ * over its struct pid and thus its pid number.
+ */
+ info.mask = PIDFD_INFO_CGROUPID | PIDFD_INFO_EXIT;
+ ASSERT_EQ(ioctl(pidfd_leader, PIDFD_GET_INFO, &info), 0);
+ ASSERT_TRUE(!!(info.mask & PIDFD_INFO_CREDS));
+ ASSERT_FALSE(!!(info.mask & PIDFD_INFO_EXIT));
+ ASSERT_EQ(info.pid, pid_leader);
+
+ /* Take down the thread-group leader. */
+ EXPECT_EQ(sys_pidfd_send_signal(pidfd_leader, SIGKILL, NULL, 0), 0);
+
+ /*
+ * Afte the exec we're dealing with an empty thread-group so now
+ * we must see an exit notification on the thread-specific pidfd
+ * for the thread-group leader as there's no subthread that can
+ * revive the struct pid.
+ */
+ fds.events = POLLIN;
+ fds.fd = pidfd_leader_thread;
+ nevents = poll(&fds, 1, -1);
+ ASSERT_EQ(nevents, 1);
+ ASSERT_TRUE(!!(fds.revents & POLLIN));
+ ASSERT_FALSE(!!(fds.revents & POLLHUP));
+
+ EXPECT_EQ(sys_waitid(P_PIDFD, pidfd_leader, NULL, WEXITED), 0);
+
+ /* Retrieve exit information for the thread-group leader. */
+ info.mask = PIDFD_INFO_CGROUPID | PIDFD_INFO_EXIT;
+ ASSERT_EQ(ioctl(pidfd_leader, PIDFD_GET_INFO, &info), 0);
+ ASSERT_FALSE(!!(info.mask & PIDFD_INFO_CREDS));
+ ASSERT_TRUE(!!(info.mask & PIDFD_INFO_EXIT));
+
+ EXPECT_EQ(close(pidfd_leader), 0);
+ EXPECT_EQ(close(pidfd_thread), 0);
+}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/pidfd/pidfd_open_test.c b/tools/testing/selftests/pidfd/pidfd_open_test.c
index ce413a221bac..cd3de40e4977 100644
--- a/tools/testing/selftests/pidfd/pidfd_open_test.c
+++ b/tools/testing/selftests/pidfd/pidfd_open_test.c
@@ -22,32 +22,6 @@
#include "pidfd.h"
#include "../kselftest.h"
-#ifndef PIDFS_IOCTL_MAGIC
-#define PIDFS_IOCTL_MAGIC 0xFF
-#endif
-
-#ifndef PIDFD_GET_INFO
-#define PIDFD_GET_INFO _IOWR(PIDFS_IOCTL_MAGIC, 11, struct pidfd_info)
-#define PIDFD_INFO_CGROUPID (1UL << 0)
-
-struct pidfd_info {
- __u64 request_mask;
- __u64 cgroupid;
- __u32 pid;
- __u32 tgid;
- __u32 ppid;
- __u32 ruid;
- __u32 rgid;
- __u32 euid;
- __u32 egid;
- __u32 suid;
- __u32 sgid;
- __u32 fsuid;
- __u32 fsgid;
- __u32 spare0[1];
-};
-#endif
-
static int safe_int(const char *numstr, int *converted)
{
char *err = NULL;
@@ -148,7 +122,7 @@ out:
int main(int argc, char **argv)
{
struct pidfd_info info = {
- .request_mask = PIDFD_INFO_CGROUPID,
+ .mask = PIDFD_INFO_CGROUPID,
};
int pidfd = -1, ret = 1;
pid_t pid;
@@ -227,7 +201,7 @@ int main(int argc, char **argv)
getegid(), info.sgid);
goto on_error;
}
- if ((info.request_mask & PIDFD_INFO_CGROUPID) && info.cgroupid == 0) {
+ if ((info.mask & PIDFD_INFO_CGROUPID) && info.cgroupid == 0) {
ksft_print_msg("cgroupid should not be 0 when PIDFD_INFO_CGROUPID is set\n");
goto on_error;
}
diff --git a/tools/testing/selftests/pidfd/pidfd_setns_test.c b/tools/testing/selftests/pidfd/pidfd_setns_test.c
index 222f8131283b..e6a079b3d5e2 100644
--- a/tools/testing/selftests/pidfd/pidfd_setns_test.c
+++ b/tools/testing/selftests/pidfd/pidfd_setns_test.c
@@ -16,55 +16,10 @@
#include <unistd.h>
#include <sys/socket.h>
#include <sys/stat.h>
-#include <linux/ioctl.h>
#include "pidfd.h"
#include "../kselftest_harness.h"
-#ifndef PIDFS_IOCTL_MAGIC
-#define PIDFS_IOCTL_MAGIC 0xFF
-#endif
-
-#ifndef PIDFD_GET_CGROUP_NAMESPACE
-#define PIDFD_GET_CGROUP_NAMESPACE _IO(PIDFS_IOCTL_MAGIC, 1)
-#endif
-
-#ifndef PIDFD_GET_IPC_NAMESPACE
-#define PIDFD_GET_IPC_NAMESPACE _IO(PIDFS_IOCTL_MAGIC, 2)
-#endif
-
-#ifndef PIDFD_GET_MNT_NAMESPACE
-#define PIDFD_GET_MNT_NAMESPACE _IO(PIDFS_IOCTL_MAGIC, 3)
-#endif
-
-#ifndef PIDFD_GET_NET_NAMESPACE
-#define PIDFD_GET_NET_NAMESPACE _IO(PIDFS_IOCTL_MAGIC, 4)
-#endif
-
-#ifndef PIDFD_GET_PID_NAMESPACE
-#define PIDFD_GET_PID_NAMESPACE _IO(PIDFS_IOCTL_MAGIC, 5)
-#endif
-
-#ifndef PIDFD_GET_PID_FOR_CHILDREN_NAMESPACE
-#define PIDFD_GET_PID_FOR_CHILDREN_NAMESPACE _IO(PIDFS_IOCTL_MAGIC, 6)
-#endif
-
-#ifndef PIDFD_GET_TIME_NAMESPACE
-#define PIDFD_GET_TIME_NAMESPACE _IO(PIDFS_IOCTL_MAGIC, 7)
-#endif
-
-#ifndef PIDFD_GET_TIME_FOR_CHILDREN_NAMESPACE
-#define PIDFD_GET_TIME_FOR_CHILDREN_NAMESPACE _IO(PIDFS_IOCTL_MAGIC, 8)
-#endif
-
-#ifndef PIDFD_GET_USER_NAMESPACE
-#define PIDFD_GET_USER_NAMESPACE _IO(PIDFS_IOCTL_MAGIC, 9)
-#endif
-
-#ifndef PIDFD_GET_UTS_NAMESPACE
-#define PIDFD_GET_UTS_NAMESPACE _IO(PIDFS_IOCTL_MAGIC, 10)
-#endif
-
enum {
PIDFD_NS_USER,
PIDFD_NS_MNT,
diff --git a/tools/testing/selftests/pidfd/pidfd_test.c b/tools/testing/selftests/pidfd/pidfd_test.c
index e9728e86b4f2..fcd85cad9f18 100644
--- a/tools/testing/selftests/pidfd/pidfd_test.c
+++ b/tools/testing/selftests/pidfd/pidfd_test.c
@@ -42,12 +42,41 @@ static pid_t pidfd_clone(int flags, int *pidfd, int (*fn)(void *))
#endif
}
-static int signal_received;
+static pthread_t signal_received;
static void set_signal_received_on_sigusr1(int sig)
{
if (sig == SIGUSR1)
- signal_received = 1;
+ signal_received = pthread_self();
+}
+
+static int send_signal(int pidfd)
+{
+ int ret = 0;
+
+ if (sys_pidfd_send_signal(pidfd, SIGUSR1, NULL, 0) < 0) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (signal_received != pthread_self()) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+exit:
+ signal_received = 0;
+ return ret;
+}
+
+static void *send_signal_worker(void *arg)
+{
+ int pidfd = (int)(intptr_t)arg;
+ int ret;
+
+ /* We forward any errors for the caller to handle. */
+ ret = send_signal(pidfd);
+ return (void *)(intptr_t)ret;
}
/*
@@ -56,8 +85,11 @@ static void set_signal_received_on_sigusr1(int sig)
*/
static int test_pidfd_send_signal_simple_success(void)
{
- int pidfd, ret;
+ int pidfd;
const char *test_name = "pidfd_send_signal send SIGUSR1";
+ pthread_t thread;
+ void *thread_res;
+ int err;
if (!have_pidfd_send_signal) {
ksft_test_result_skip(
@@ -66,25 +98,45 @@ static int test_pidfd_send_signal_simple_success(void)
return 0;
}
+ signal(SIGUSR1, set_signal_received_on_sigusr1);
+
+ /* Try sending a signal to ourselves via /proc/self. */
pidfd = open("/proc/self", O_DIRECTORY | O_CLOEXEC);
if (pidfd < 0)
ksft_exit_fail_msg(
"%s test: Failed to open process file descriptor\n",
test_name);
+ err = send_signal(pidfd);
+ if (err)
+ ksft_exit_fail_msg(
+ "%s test: Error %d on sending pidfd signal\n",
+ test_name, err);
+ close(pidfd);
- signal(SIGUSR1, set_signal_received_on_sigusr1);
+ /* Now try the same thing only using PIDFD_SELF_THREAD_GROUP. */
+ err = send_signal(PIDFD_SELF_THREAD_GROUP);
+ if (err)
+ ksft_exit_fail_msg(
+ "%s test: Error %d on PIDFD_SELF_THREAD_GROUP signal\n",
+ test_name, err);
- ret = sys_pidfd_send_signal(pidfd, SIGUSR1, NULL, 0);
- close(pidfd);
- if (ret < 0)
- ksft_exit_fail_msg("%s test: Failed to send signal\n",
+ /*
+ * Now try the same thing in a thread and assert thread ID is equal to
+ * worker thread ID.
+ */
+ if (pthread_create(&thread, NULL, send_signal_worker,
+ (void *)(intptr_t)PIDFD_SELF_THREAD))
+ ksft_exit_fail_msg("%s test: Failed to create thread\n",
test_name);
-
- if (signal_received != 1)
- ksft_exit_fail_msg("%s test: Failed to receive signal\n",
+ if (pthread_join(thread, &thread_res))
+ ksft_exit_fail_msg("%s test: Failed to join thread\n",
test_name);
+ err = (int)(intptr_t)thread_res;
+ if (err)
+ ksft_exit_fail_msg(
+ "%s test: Error %d on PIDFD_SELF_THREAD signal\n",
+ test_name, err);
- signal_received = 0;
ksft_test_result_pass("%s test: Sent signal\n", test_name);
return 0;
}
diff --git a/tools/testing/selftests/sched_ext/Makefile b/tools/testing/selftests/sched_ext/Makefile
index 011762224600..f4531327b8e7 100644
--- a/tools/testing/selftests/sched_ext/Makefile
+++ b/tools/testing/selftests/sched_ext/Makefile
@@ -172,6 +172,7 @@ auto-test-targets := \
maximal \
maybe_null \
minimal \
+ numa \
prog_run \
reload_loop \
select_cpu_dfl \
diff --git a/tools/testing/selftests/sched_ext/numa.bpf.c b/tools/testing/selftests/sched_ext/numa.bpf.c
new file mode 100644
index 000000000000..a79d86ed54a1
--- /dev/null
+++ b/tools/testing/selftests/sched_ext/numa.bpf.c
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * A scheduler that validates the behavior of the NUMA-aware
+ * functionalities.
+ *
+ * The scheduler creates a separate DSQ for each NUMA node, ensuring tasks
+ * are exclusively processed by CPUs within their respective nodes. Idle
+ * CPUs are selected only within the same node, so task migration can only
+ * occurs between CPUs belonging to the same node.
+ *
+ * Copyright (c) 2025 Andrea Righi <arighi@nvidia.com>
+ */
+
+#include <scx/common.bpf.h>
+
+char _license[] SEC("license") = "GPL";
+
+UEI_DEFINE(uei);
+
+const volatile unsigned int __COMPAT_SCX_PICK_IDLE_IN_NODE;
+
+static bool is_cpu_idle(s32 cpu, int node)
+{
+ const struct cpumask *idle_cpumask;
+ bool idle;
+
+ idle_cpumask = __COMPAT_scx_bpf_get_idle_cpumask_node(node);
+ idle = bpf_cpumask_test_cpu(cpu, idle_cpumask);
+ scx_bpf_put_cpumask(idle_cpumask);
+
+ return idle;
+}
+
+s32 BPF_STRUCT_OPS(numa_select_cpu,
+ struct task_struct *p, s32 prev_cpu, u64 wake_flags)
+{
+ int node = __COMPAT_scx_bpf_cpu_node(scx_bpf_task_cpu(p));
+ s32 cpu;
+
+ /*
+ * We could just use __COMPAT_scx_bpf_pick_any_cpu_node() here,
+ * since it already tries to pick an idle CPU within the node
+ * first, but let's use both functions for better testing coverage.
+ */
+ cpu = __COMPAT_scx_bpf_pick_idle_cpu_node(p->cpus_ptr, node,
+ __COMPAT_SCX_PICK_IDLE_IN_NODE);
+ if (cpu < 0)
+ cpu = __COMPAT_scx_bpf_pick_any_cpu_node(p->cpus_ptr, node,
+ __COMPAT_SCX_PICK_IDLE_IN_NODE);
+
+ if (is_cpu_idle(cpu, node))
+ scx_bpf_error("CPU %d should be marked as busy", cpu);
+
+ if (__COMPAT_scx_bpf_cpu_node(cpu) != node)
+ scx_bpf_error("CPU %d should be in node %d", cpu, node);
+
+ return cpu;
+}
+
+void BPF_STRUCT_OPS(numa_enqueue, struct task_struct *p, u64 enq_flags)
+{
+ int node = __COMPAT_scx_bpf_cpu_node(scx_bpf_task_cpu(p));
+
+ scx_bpf_dsq_insert(p, node, SCX_SLICE_DFL, enq_flags);
+}
+
+void BPF_STRUCT_OPS(numa_dispatch, s32 cpu, struct task_struct *prev)
+{
+ int node = __COMPAT_scx_bpf_cpu_node(cpu);
+
+ scx_bpf_dsq_move_to_local(node);
+}
+
+s32 BPF_STRUCT_OPS_SLEEPABLE(numa_init)
+{
+ int node, err;
+
+ bpf_for(node, 0, __COMPAT_scx_bpf_nr_node_ids()) {
+ err = scx_bpf_create_dsq(node, node);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+void BPF_STRUCT_OPS(numa_exit, struct scx_exit_info *ei)
+{
+ UEI_RECORD(uei, ei);
+}
+
+SEC(".struct_ops.link")
+struct sched_ext_ops numa_ops = {
+ .select_cpu = (void *)numa_select_cpu,
+ .enqueue = (void *)numa_enqueue,
+ .dispatch = (void *)numa_dispatch,
+ .init = (void *)numa_init,
+ .exit = (void *)numa_exit,
+ .name = "numa",
+};
diff --git a/tools/testing/selftests/sched_ext/numa.c b/tools/testing/selftests/sched_ext/numa.c
new file mode 100644
index 000000000000..b060c3b65c82
--- /dev/null
+++ b/tools/testing/selftests/sched_ext/numa.c
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2025 Andrea Righi <arighi@nvidia.com>
+ */
+#include <bpf/bpf.h>
+#include <scx/common.h>
+#include <sys/wait.h>
+#include <unistd.h>
+#include "numa.bpf.skel.h"
+#include "scx_test.h"
+
+static enum scx_test_status setup(void **ctx)
+{
+ struct numa *skel;
+
+ skel = numa__open();
+ SCX_FAIL_IF(!skel, "Failed to open");
+ SCX_ENUM_INIT(skel);
+ skel->rodata->__COMPAT_SCX_PICK_IDLE_IN_NODE = SCX_PICK_IDLE_IN_NODE;
+ skel->struct_ops.numa_ops->flags = SCX_OPS_BUILTIN_IDLE_PER_NODE;
+ SCX_FAIL_IF(numa__load(skel), "Failed to load skel");
+
+ *ctx = skel;
+
+ return SCX_TEST_PASS;
+}
+
+static enum scx_test_status run(void *ctx)
+{
+ struct numa *skel = ctx;
+ struct bpf_link *link;
+
+ link = bpf_map__attach_struct_ops(skel->maps.numa_ops);
+ SCX_FAIL_IF(!link, "Failed to attach scheduler");
+
+ /* Just sleeping is fine, plenty of scheduling events happening */
+ sleep(1);
+
+ SCX_EQ(skel->data->uei.kind, EXIT_KIND(SCX_EXIT_NONE));
+ bpf_link__destroy(link);
+
+ return SCX_TEST_PASS;
+}
+
+static void cleanup(void *ctx)
+{
+ struct numa *skel = ctx;
+
+ numa__destroy(skel);
+}
+
+struct scx_test numa = {
+ .name = "numa",
+ .description = "Verify NUMA-aware functionalities",
+ .setup = setup,
+ .run = run,
+ .cleanup = cleanup,
+};
+REGISTER_SCX_TEST(&numa)
diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
index 14ba51b52095..b2f76a52215a 100644
--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
+++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
@@ -155,6 +155,12 @@ struct seccomp_data {
# endif
#endif
+#ifndef __NR_uretprobe
+# if defined(__x86_64__)
+# define __NR_uretprobe 335
+# endif
+#endif
+
#ifndef SECCOMP_SET_MODE_STRICT
#define SECCOMP_SET_MODE_STRICT 0
#endif
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index ba0327e2d0d3..16bee3b78219 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -4224,15 +4224,14 @@ static int kvm_vcpu_ioctl_get_stats_fd(struct kvm_vcpu *vcpu)
if (fd < 0)
return fd;
- file = anon_inode_getfile(name, &kvm_vcpu_stats_fops, vcpu, O_RDONLY);
+ file = anon_inode_getfile_fmode(name, &kvm_vcpu_stats_fops, vcpu,
+ O_RDONLY, FMODE_PREAD);
if (IS_ERR(file)) {
put_unused_fd(fd);
return PTR_ERR(file);
}
kvm_get_kvm(vcpu->kvm);
-
- file->f_mode |= FMODE_PREAD;
fd_install(fd, file);
return fd;
@@ -5020,16 +5019,14 @@ static int kvm_vm_ioctl_get_stats_fd(struct kvm *kvm)
if (fd < 0)
return fd;
- file = anon_inode_getfile("kvm-vm-stats",
- &kvm_vm_stats_fops, kvm, O_RDONLY);
+ file = anon_inode_getfile_fmode("kvm-vm-stats",
+ &kvm_vm_stats_fops, kvm, O_RDONLY, FMODE_PREAD);
if (IS_ERR(file)) {
put_unused_fd(fd);
return PTR_ERR(file);
}
kvm_get_kvm(kvm);
-
- file->f_mode |= FMODE_PREAD;
fd_install(fd, file);
return fd;