summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/acpi/acpi_bus.h9
-rw-r--r--include/acpi/acpixf.h2
-rw-r--r--include/acpi/actbl3.h7
-rw-r--r--include/acpi/actypes.h14
-rw-r--r--include/asm-generic/Kbuild1
-rw-r--r--include/asm-generic/atomic.h2
-rw-r--r--include/asm-generic/barrier.h29
-rw-r--r--include/asm-generic/bug.h1
-rw-r--r--include/asm-generic/checksum.h6
-rw-r--r--include/asm-generic/io.h20
-rw-r--r--include/asm-generic/iomap.h28
-rw-r--r--include/asm-generic/kvm_types.h5
-rw-r--r--include/asm-generic/mshyperv.h1
-rw-r--r--include/asm-generic/pgalloc.h80
-rw-r--r--include/asm-generic/qspinlock.h5
-rw-r--r--include/asm-generic/qspinlock_types.h8
-rw-r--r--include/asm-generic/rwonce.h90
-rw-r--r--include/asm-generic/seccomp.h2
-rw-r--r--include/asm-generic/sections.h4
-rw-r--r--include/asm-generic/tlb.h56
-rw-r--r--include/asm-generic/uaccess.h4
-rw-r--r--include/asm-generic/vmlinux.lds.h35
-rw-r--r--include/clocksource/timer-ti-dm.h2
-rw-r--r--include/crypto/acompress.h18
-rw-r--r--include/crypto/aead.h2
-rw-r--r--include/crypto/akcipher.h2
-rw-r--r--include/crypto/algapi.h25
-rw-r--r--include/crypto/chacha.h4
-rw-r--r--include/crypto/chacha20poly1305.h2
-rw-r--r--include/crypto/gf128mul.h2
-rw-r--r--include/crypto/hash.h4
-rw-r--r--include/crypto/if_alg.h4
-rw-r--r--include/crypto/internal/acompress.h2
-rw-r--r--include/crypto/internal/geniv.h2
-rw-r--r--include/crypto/kpp.h2
-rw-r--r--include/crypto/public_key.h2
-rw-r--r--include/crypto/sha.h1
-rw-r--r--include/crypto/skcipher.h4
-rw-r--r--include/drm/amd_asic_type.h2
-rw-r--r--include/drm/bridge/dw_hdmi.h26
-rw-r--r--include/drm/drm_atomic.h2
-rw-r--r--include/drm/drm_bridge.h5
-rw-r--r--include/drm/drm_client.h2
-rw-r--r--include/drm/drm_connector.h126
-rw-r--r--include/drm/drm_device.h3
-rw-r--r--include/drm/drm_dp_helper.h10
-rw-r--r--include/drm/drm_dp_mst_helper.h17
-rw-r--r--include/drm/drm_drv.h31
-rw-r--r--include/drm/drm_edid.h15
-rw-r--r--include/drm/drm_format_helper.h4
-rw-r--r--include/drm/drm_gem.h41
-rw-r--r--include/drm/drm_gem_cma_helper.h74
-rw-r--r--include/drm/drm_gem_shmem_helper.h4
-rw-r--r--include/drm/drm_gem_vram_helper.h3
-rw-r--r--include/drm/drm_mipi_dbi.h5
-rw-r--r--include/drm/drm_mode_config.h8
-rw-r--r--include/drm/drm_modes.h194
-rw-r--r--include/drm/drm_modeset_helper_vtables.h42
-rw-r--r--include/drm/drm_rect.h2
-rw-r--r--include/drm/drm_vblank.h20
-rw-r--r--include/drm/drm_vblank_work.h71
-rw-r--r--include/drm/gpu_scheduler.h6
-rw-r--r--include/drm/i915_pciids.h17
-rw-r--r--include/drm/ttm/ttm_bo_api.h4
-rw-r--r--include/drm/ttm/ttm_bo_driver.h17
-rw-r--r--include/drm/ttm/ttm_tt.h2
-rw-r--r--include/dt-bindings/clk/versaclock.h13
-rw-r--r--include/dt-bindings/clock/actions,s500-cmu.h7
-rw-r--r--include/dt-bindings/clock/agilex-clock.h4
-rw-r--r--include/dt-bindings/clock/bcm3368-clock.h24
-rw-r--r--include/dt-bindings/clock/bcm6318-clock.h42
-rw-r--r--include/dt-bindings/clock/bcm63268-clock.h30
-rw-r--r--include/dt-bindings/clock/bcm6328-clock.h19
-rw-r--r--include/dt-bindings/clock/bcm6358-clock.h18
-rw-r--r--include/dt-bindings/clock/bcm6362-clock.h26
-rw-r--r--include/dt-bindings/clock/bcm6368-clock.h24
-rw-r--r--include/dt-bindings/clock/g12a-clkc.h2
-rw-r--r--include/dt-bindings/clock/ingenic,sysost.h12
-rw-r--r--include/dt-bindings/clock/jz4780-cgu.h144
-rw-r--r--include/dt-bindings/clock/microchip,sparx5.h23
-rw-r--r--include/dt-bindings/clock/qcom,apss-ipq.h12
-rw-r--r--include/dt-bindings/clock/qcom,gcc-ipq8074.h4
-rw-r--r--include/dt-bindings/clock/qcom,gcc-sc7180.h1
-rw-r--r--include/dt-bindings/clock/qcom,gcc-sdm660.h1
-rw-r--r--include/dt-bindings/clock/qcom,gpucc-sm8150.h33
-rw-r--r--include/dt-bindings/clock/qcom,gpucc-sm8250.h34
-rw-r--r--include/dt-bindings/clock/qcom,lpasscorecc-sc7180.h29
-rw-r--r--include/dt-bindings/clock/qcom,rpmcc.h16
-rw-r--r--include/dt-bindings/clock/r8a774e1-cpg-mssr.h59
-rw-r--r--include/dt-bindings/clock/vf610-clock.h3
-rw-r--r--include/dt-bindings/clock/x1000-cgu.h2
-rw-r--r--include/dt-bindings/clock/x1830-cgu.h2
-rw-r--r--include/dt-bindings/dma/xlnx-zynqmp-dpdma.h16
-rw-r--r--include/dt-bindings/gce/mt6779-gce.h222
-rw-r--r--include/dt-bindings/iio/adc/ingenic,adc.h6
-rw-r--r--include/dt-bindings/iio/qcom,spmi-adc7-pm8350.h67
-rw-r--r--include/dt-bindings/iio/qcom,spmi-adc7-pm8350b.h88
-rw-r--r--include/dt-bindings/iio/qcom,spmi-adc7-pmk8350.h46
-rw-r--r--include/dt-bindings/iio/qcom,spmi-adc7-pmr735a.h28
-rw-r--r--include/dt-bindings/iio/qcom,spmi-adc7-pmr735b.h28
-rw-r--r--include/dt-bindings/iio/qcom,spmi-vadc.h78
-rw-r--r--include/dt-bindings/leds/common.h5
-rw-r--r--include/dt-bindings/memory/mt6779-larb-port.h206
-rw-r--r--include/dt-bindings/mux/mux-j721e-wiz.h53
-rw-r--r--include/dt-bindings/mux/mux.h2
-rw-r--r--include/dt-bindings/phy/phy.h1
-rw-r--r--include/dt-bindings/pinctrl/k3.h2
-rw-r--r--include/dt-bindings/pinctrl/mt6779-pinfunc.h1242
-rw-r--r--include/dt-bindings/pinctrl/omap.h2
-rw-r--r--include/dt-bindings/power/qcom-rpmpd.h1
-rw-r--r--include/dt-bindings/power/r8a774e1-sysc.h36
-rw-r--r--include/dt-bindings/regulator/dlg,da9211-regulator.h16
-rw-r--r--include/dt-bindings/regulator/mediatek,mt6397-regulator.h15
-rw-r--r--include/dt-bindings/reset/actions,s500-reset.h67
-rw-r--r--include/dt-bindings/reset/ti-syscon.h2
-rw-r--r--include/dt-bindings/sound/qcom,q6asm.h4
-rw-r--r--include/keys/asymmetric-parser.h2
-rw-r--r--include/keys/asymmetric-subtype.h2
-rw-r--r--include/keys/asymmetric-type.h2
-rw-r--r--include/kunit/test.h210
-rw-r--r--include/kvm/arm_arch_timer.h13
-rw-r--r--include/linux/acpi.h30
-rw-r--r--include/linux/acpi_iort.h20
-rw-r--r--include/linux/amba/clcd-regs.h87
-rw-r--r--include/linux/amba/clcd.h290
-rw-r--r--include/linux/arch_topology.h4
-rw-r--r--include/linux/arm-smccc.h49
-rw-r--r--include/linux/async_tx.h2
-rw-r--r--include/linux/atmdev.h9
-rw-r--r--include/linux/audit.h46
-rw-r--r--include/linux/backing-dev-defs.h43
-rw-r--r--include/linux/backing-dev.h22
-rw-r--r--include/linux/backlight.h399
-rw-r--r--include/linux/binfmts.h21
-rw-r--r--include/linux/bio.h12
-rw-r--r--include/linux/bitfield.h2
-rw-r--r--include/linux/blk-cgroup.h107
-rw-r--r--include/linux/blk-mq.h67
-rw-r--r--include/linux/blk_types.h37
-rw-r--r--include/linux/blkdev.h228
-rw-r--r--include/linux/bpf-cgroup.h16
-rw-r--r--include/linux/bpf-netns.h3
-rw-r--r--include/linux/bpf.h162
-rw-r--r--include/linux/bpf_types.h2
-rw-r--r--include/linux/bpf_verifier.h1
-rw-r--r--include/linux/bpfilter.h13
-rw-r--r--include/linux/btf_ids.h130
-rw-r--r--include/linux/btree.h2
-rw-r--r--include/linux/buffer_head.h1
-rw-r--r--include/linux/capability.h6
-rw-r--r--include/linux/cdrom.h2
-rw-r--r--include/linux/ceph/ceph_features.h2
-rw-r--r--include/linux/ceph/ceph_fs.h1
-rw-r--r--include/linux/ceph/libceph.h1
-rw-r--r--include/linux/ceph/osd_client.h2
-rw-r--r--include/linux/clk-provider.h3
-rw-r--r--include/linux/clk/at91_pmc.h4
-rw-r--r--include/linux/clock_cooling.h57
-rw-r--r--include/linux/compaction.h2
-rw-r--r--include/linux/compat.h5
-rw-r--r--include/linux/compiler-clang.h4
-rw-r--r--include/linux/compiler-gcc.h6
-rw-r--r--include/linux/compiler.h187
-rw-r--r--include/linux/compiler_attributes.h13
-rw-r--r--include/linux/compiler_types.h85
-rw-r--r--include/linux/console.h13
-rw-r--r--include/linux/console_struct.h93
-rw-r--r--include/linux/context_tracking.h2
-rw-r--r--include/linux/coresight.h6
-rw-r--r--include/linux/cpu.h1
-rw-r--r--include/linux/cpufreq.h18
-rw-r--r--include/linux/cpuhotplug.h2
-rw-r--r--include/linux/cpuidle.h9
-rw-r--r--include/linux/crash_core.h6
-rw-r--r--include/linux/crush/crush.h2
-rw-r--r--include/linux/crypto.h41
-rw-r--r--include/linux/dasd_mod.h2
-rw-r--r--include/linux/dcache.h2
-rw-r--r--include/linux/debugfs.h12
-rw-r--r--include/linux/decompress/unzstd.h11
-rw-r--r--include/linux/delay.h2
-rw-r--r--include/linux/devfreq.h9
-rw-r--r--include/linux/devfreq_cooling.h9
-rw-r--r--include/linux/device-mapper.h11
-rw-r--r--include/linux/device.h305
-rw-r--r--include/linux/dma-debug.h6
-rw-r--r--include/linux/dma-direct.h107
-rw-r--r--include/linux/dma-fence.h13
-rw-r--r--include/linux/dma-mapping.h258
-rw-r--r--include/linux/dma-resv.h4
-rw-r--r--include/linux/dma/k3-psil.h2
-rw-r--r--include/linux/dma/k3-udma-glue.h2
-rw-r--r--include/linux/dma/ti-cppi5.h2
-rw-r--r--include/linux/dmaengine.h37
-rw-r--r--include/linux/dmar.h1
-rw-r--r--include/linux/dsa/loop.h41
-rw-r--r--include/linux/dynamic_debug.h4
-rw-r--r--include/linux/dynamic_queue_limits.h2
-rw-r--r--include/linux/edac.h29
-rw-r--r--include/linux/efi.h4
-rw-r--r--include/linux/elfcore-compat.h4
-rw-r--r--include/linux/elfcore.h66
-rw-r--r--include/linux/energy_model.h149
-rw-r--r--include/linux/entry-common.h372
-rw-r--r--include/linux/entry-kvm.h80
-rw-r--r--include/linux/ethtool.h55
-rw-r--r--include/linux/exportfs.h2
-rw-r--r--include/linux/fanotify.h6
-rw-r--r--include/linux/fb.h7
-rw-r--r--include/linux/fdtable.h6
-rw-r--r--include/linux/file.h19
-rw-r--r--include/linux/filter.h155
-rw-r--r--include/linux/firmware/imx/sci.h2
-rw-r--r--include/linux/firmware/imx/svc/rm.h69
-rw-r--r--include/linux/firmware/intel/stratix10-smc.h43
-rw-r--r--include/linux/firmware/intel/stratix10-svc-client.h17
-rw-r--r--include/linux/freezer.h14
-rw-r--r--include/linux/frontswap.h2
-rw-r--r--include/linux/fs.h267
-rw-r--r--include/linux/fs_struct.h2
-rw-r--r--include/linux/fscrypt.h111
-rw-r--r--include/linux/fsl/enetc_mdio.h1
-rw-r--r--include/linux/fsl/mc.h32
-rw-r--r--include/linux/fsnotify.h84
-rw-r--r--include/linux/fsnotify_backend.h93
-rw-r--r--include/linux/fsverity.h9
-rw-r--r--include/linux/ftrace.h12
-rw-r--r--include/linux/generic-radix-tree.h2
-rw-r--r--include/linux/genhd.h40
-rw-r--r--include/linux/gpio/driver.h37
-rw-r--r--include/linux/gpio/regmap.h2
-rw-r--r--include/linux/hardirq.h28
-rw-r--r--include/linux/hashtable.h4
-rw-r--r--include/linux/hdmi.h1
-rw-r--r--include/linux/highmem.h2
-rw-r--r--include/linux/hmm.h24
-rw-r--r--include/linux/host1x.h4
-rw-r--r--include/linux/hrtimer.h3
-rw-r--r--include/linux/huge_mm.h67
-rw-r--r--include/linux/hugetlb.h53
-rw-r--r--include/linux/hw_breakpoint.h3
-rw-r--r--include/linux/hyperv.h22
-rw-r--r--include/linux/i2c.h16
-rw-r--r--include/linux/icmp.h5
-rw-r--r--include/linux/icmpv6.h22
-rw-r--r--include/linux/ieee80211.h2
-rw-r--r--include/linux/if_bridge.h1
-rw-r--r--include/linux/iio/common/cros_ec_sensors_core.h11
-rw-r--r--include/linux/iio/iio-opaque.h36
-rw-r--r--include/linux/iio/iio.h61
-rw-r--r--include/linux/iio/trigger_consumer.h7
-rw-r--r--include/linux/ima.h4
-rw-r--r--include/linux/indirect_call_wrapper.h12
-rw-r--r--include/linux/init_syscalls.h19
-rw-r--r--include/linux/initrd.h6
-rw-r--r--include/linux/instrumentation.h57
-rw-r--r--include/linux/intel-iommu.h13
-rw-r--r--include/linux/intel_rapl.h5
-rw-r--r--include/linux/interconnect-provider.h16
-rw-r--r--include/linux/interrupt.h41
-rw-r--r--include/linux/io-64-nonatomic-hi-lo.h4
-rw-r--r--include/linux/io-64-nonatomic-lo-hi.h4
-rw-r--r--include/linux/io-pgtable.h2
-rw-r--r--include/linux/iommu.h38
-rw-r--r--include/linux/ipv6.h3
-rw-r--r--include/linux/irq.h13
-rw-r--r--include/linux/irqchip.h29
-rw-r--r--include/linux/irqchip/arm-gic-v3.h4
-rw-r--r--include/linux/irqchip/arm-vic.h11
-rw-r--r--include/linux/irqchip/irq-bcm2836.h2
-rw-r--r--include/linux/irqchip/irq-omap-intc.h2
-rw-r--r--include/linux/irqdesc.h15
-rw-r--r--include/linux/irqflags.h36
-rw-r--r--include/linux/irqhandler.h1
-rw-r--r--include/linux/jbd2.h3
-rw-r--r--include/linux/jhash.h2
-rw-r--r--include/linux/jump_label.h2
-rw-r--r--include/linux/kasan.h4
-rw-r--r--include/linux/kcsan-checks.h10
-rw-r--r--include/linux/kernel.h9
-rw-r--r--include/linux/kexec.h29
-rw-r--r--include/linux/kobject.h1
-rw-r--r--include/linux/kprobes.h16
-rw-r--r--include/linux/ktime.h1
-rw-r--r--include/linux/kvm_host.h20
-rw-r--r--include/linux/kvm_irqfd.h2
-rw-r--r--include/linux/kvm_types.h19
-rw-r--r--include/linux/led-class-multicolor.h121
-rw-r--r--include/linux/leds-ti-lmu-common.h2
-rw-r--r--include/linux/leds.h10
-rw-r--r--include/linux/libnvdimm.h52
-rw-r--r--include/linux/lightnvm.h3
-rw-r--r--include/linux/linkmode.h6
-rw-r--r--include/linux/list.h20
-rw-r--r--include/linux/lockdep.h231
-rw-r--r--include/linux/lockdep_types.h194
-rw-r--r--include/linux/lsm_hook_defs.h2
-rw-r--r--include/linux/lsm_hooks.h2
-rw-r--r--include/linux/mailbox/mtk-cmdq-mailbox.h4
-rw-r--r--include/linux/marvell_phy.h2
-rw-r--r--include/linux/math64.h21
-rw-r--r--include/linux/mdio.h1
-rw-r--r--include/linux/memblock.h28
-rw-r--r--include/linux/memcontrol.h217
-rw-r--r--include/linux/mempolicy.h18
-rw-r--r--include/linux/mfd/core.h42
-rw-r--r--include/linux/mfd/da9055/pdata.h2
-rw-r--r--include/linux/mfd/da9063/core.h1
-rw-r--r--include/linux/mfd/da9063/registers.h15
-rw-r--r--include/linux/mfd/hi6421-pmic.h2
-rw-r--r--include/linux/mfd/khadas-mcu.h91
-rw-r--r--include/linux/mfd/lp873x.h2
-rw-r--r--include/linux/mfd/lp87565.h2
-rw-r--r--include/linux/mfd/madera/pdata.h1
-rw-r--r--include/linux/mfd/max77693-private.h2
-rw-r--r--include/linux/mfd/sky81452.h2
-rw-r--r--include/linux/mfd/smsc.h104
-rw-r--r--include/linux/mfd/stm32-lptimer.h5
-rw-r--r--include/linux/mfd/ti_am335x_tscadc.h2
-rw-r--r--include/linux/mfd/tps65086.h2
-rw-r--r--include/linux/mfd/tps65217.h2
-rw-r--r--include/linux/mfd/tps65218.h2
-rw-r--r--include/linux/mfd/tps65912.h2
-rw-r--r--include/linux/mic_bus.h2
-rw-r--r--include/linux/migrate.h50
-rw-r--r--include/linux/mlx5/accel.h6
-rw-r--r--include/linux/mlx5/cq.h1
-rw-r--r--include/linux/mlx5/device.h18
-rw-r--r--include/linux/mlx5/driver.h7
-rw-r--r--include/linux/mlx5/fs.h5
-rw-r--r--include/linux/mlx5/mlx5_ifc.h147
-rw-r--r--include/linux/mlx5/port.h3
-rw-r--r--include/linux/mlx5/qp.h2
-rw-r--r--include/linux/mlx5/rsc_dump.h51
-rw-r--r--include/linux/mlx5/vport.h2
-rw-r--r--include/linux/mm.h121
-rw-r--r--include/linux/mm_inline.h6
-rw-r--r--include/linux/mm_types.h6
-rw-r--r--include/linux/mman.h4
-rw-r--r--include/linux/mmc/host.h2
-rw-r--r--include/linux/mmc/sdio_ids.h7
-rw-r--r--include/linux/mmu_notifier.h19
-rw-r--r--include/linux/mmzone.h69
-rw-r--r--include/linux/mod_devicetable.h2
-rw-r--r--include/linux/module.h26
-rw-r--r--include/linux/moduleparam.h2
-rw-r--r--include/linux/mpi.h3
-rw-r--r--include/linux/mroute.h5
-rw-r--r--include/linux/mroute6.h8
-rw-r--r--include/linux/mtd/hyperbus.h2
-rw-r--r--include/linux/mtd/nand.h12
-rw-r--r--include/linux/mtd/pfow.h2
-rw-r--r--include/linux/mtd/rawnand.h265
-rw-r--r--include/linux/mtd/spinand.h2
-rw-r--r--include/linux/mutex.h11
-rw-r--r--include/linux/net.h10
-rw-r--r--include/linux/net/intel/i40e_client.h194
-rw-r--r--include/linux/netdevice.h48
-rw-r--r--include/linux/netfilter.h20
-rw-r--r--include/linux/netfilter/x_tables.h4
-rw-r--r--include/linux/netfilter_ipv6.h18
-rw-r--r--include/linux/netpoll.h3
-rw-r--r--include/linux/nfs4.h27
-rw-r--r--include/linux/nfs_fs.h12
-rw-r--r--include/linux/nfs_fs_sb.h6
-rw-r--r--include/linux/nfs_xdr.h60
-rw-r--r--include/linux/nospec.h2
-rw-r--r--include/linux/nvme-fc-driver.h2
-rw-r--r--include/linux/nvme.h138
-rw-r--r--include/linux/nvmem-consumer.h1
-rw-r--r--include/linux/nvmem-provider.h3
-rw-r--r--include/linux/of.h9
-rw-r--r--include/linux/of_address.h4
-rw-r--r--include/linux/of_device.h16
-rw-r--r--include/linux/of_graph.h6
-rw-r--r--include/linux/of_iommu.h6
-rw-r--r--include/linux/of_irq.h13
-rw-r--r--include/linux/of_mdio.h40
-rw-r--r--include/linux/oom.h4
-rw-r--r--include/linux/padata.h21
-rw-r--r--include/linux/page-flags-layout.h4
-rw-r--r--include/linux/pageblock-flags.h24
-rw-r--r--include/linux/pagemap.h49
-rw-r--r--include/linux/pci-ats.h4
-rw-r--r--include/linux/pci.h30
-rw-r--r--include/linux/pci_ids.h8
-rw-r--r--include/linux/percpu-refcount.h2
-rw-r--r--include/linux/percpu_counter.h4
-rw-r--r--include/linux/perf_event.h17
-rw-r--r--include/linux/pgtable.h85
-rw-r--r--include/linux/phy.h113
-rw-r--r--include/linux/phylink.h110
-rw-r--r--include/linux/platform_data/clk-fch.h (renamed from include/linux/platform_data/clk-st.h)11
-rw-r--r--include/linux/platform_data/cros_ec_commands.h116
-rw-r--r--include/linux/platform_data/cros_ec_proto.h3
-rw-r--r--include/linux/platform_data/davinci-cpufreq.h2
-rw-r--r--include/linux/platform_data/davinci_asp.h2
-rw-r--r--include/linux/platform_data/dma-dw.h10
-rw-r--r--include/linux/platform_data/elm.h2
-rw-r--r--include/linux/platform_data/gpio-davinci.h2
-rw-r--r--include/linux/platform_data/gpmc-omap.h2
-rw-r--r--include/linux/platform_data/gsc_hwmon.h3
-rw-r--r--include/linux/platform_data/leds-lp55xx.h13
-rw-r--r--include/linux/platform_data/leds-s3c24xx.h6
-rw-r--r--include/linux/platform_data/media/omap1_camera.h32
-rw-r--r--include/linux/platform_data/mlxreg.h9
-rw-r--r--include/linux/platform_data/mmc-omap.h3
-rw-r--r--include/linux/platform_data/mtd-davinci-aemif.h2
-rw-r--r--include/linux/platform_data/omap-twl4030.h2
-rw-r--r--include/linux/platform_data/sky81452-backlight.h35
-rw-r--r--include/linux/platform_data/spi-imx.h33
-rw-r--r--include/linux/platform_data/uio_pruss.h2
-rw-r--r--include/linux/platform_data/usb-omap.h2
-rw-r--r--include/linux/pldmfw.h165
-rw-r--r--include/linux/pm.h10
-rw-r--r--include/linux/pm_domain.h12
-rw-r--r--include/linux/pm_opp.h21
-rw-r--r--include/linux/pm_runtime.h246
-rw-r--r--include/linux/poison.h4
-rw-r--r--include/linux/posix-timers.h17
-rw-r--r--include/linux/power/bq2415x_charger.h4
-rw-r--r--include/linux/power/bq27xxx_battery.h2
-rw-r--r--include/linux/power_supply.h4
-rw-r--r--include/linux/prandom.h78
-rw-r--r--include/linux/printk.h1
-rw-r--r--include/linux/proc_fs.h3
-rw-r--r--include/linux/property.h5
-rw-r--r--include/linux/psi_types.h7
-rw-r--r--include/linux/ptr_ring.h2
-rw-r--r--include/linux/pwm.h12
-rw-r--r--include/linux/qcom-geni-se.h45
-rw-r--r--include/linux/qcom_scm.h19
-rw-r--r--include/linux/qed/common_hsi.h30
-rw-r--r--include/linux/qed/eth_common.h30
-rw-r--r--include/linux/qed/fcoe_common.h3
-rw-r--r--include/linux/qed/iscsi_common.h30
-rw-r--r--include/linux/qed/iwarp_common.h30
-rw-r--r--include/linux/qed/qed_chain.h360
-rw-r--r--include/linux/qed/qed_eth_if.h30
-rw-r--r--include/linux/qed/qed_fcoe_if.h4
-rw-r--r--include/linux/qed/qed_if.h184
-rw-r--r--include/linux/qed/qed_iov_if.h30
-rw-r--r--include/linux/qed/qed_iscsi_if.h30
-rw-r--r--include/linux/qed/qed_ll2_if.h30
-rw-r--r--include/linux/qed/qed_rdma_if.h31
-rw-r--r--include/linux/qed/qede_rdma.h31
-rw-r--r--include/linux/qed/rdma_common.h30
-rw-r--r--include/linux/qed/roce_common.h30
-rw-r--r--include/linux/qed/storage_common.h30
-rw-r--r--include/linux/qed/tcp_common.h30
-rw-r--r--include/linux/raid/detect.h8
-rw-r--r--include/linux/raid/md_u.h13
-rw-r--r--include/linux/random.h63
-rw-r--r--include/linux/ratelimit.h36
-rw-r--r--include/linux/ratelimit_types.h43
-rw-r--r--include/linux/rculist.h4
-rw-r--r--include/linux/rculist_nulls.h2
-rw-r--r--include/linux/rcupdate.h53
-rw-r--r--include/linux/rcupdate_trace.h4
-rw-r--r--include/linux/rcutiny.h20
-rw-r--r--include/linux/rcutree.h2
-rw-r--r--include/linux/regmap.h245
-rw-r--r--include/linux/regset.h218
-rw-r--r--include/linux/regulator/consumer.h10
-rw-r--r--include/linux/regulator/driver.h7
-rw-r--r--include/linux/regulator/machine.h1
-rw-r--r--include/linux/regulator/pca9450.h219
-rw-r--r--include/linux/remoteproc.h36
-rw-r--r--include/linux/remoteproc/qcom_q6v5_ipa_notify.h82
-rw-r--r--include/linux/remoteproc/qcom_rproc.h36
-rw-r--r--include/linux/reset/reset-simple.h48
-rw-r--r--include/linux/rhashtable.h69
-rw-r--r--include/linux/ring_buffer.h1
-rw-r--r--include/linux/rmi.h2
-rw-r--r--include/linux/rtc.h4
-rw-r--r--include/linux/rtsx_pci.h33
-rw-r--r--include/linux/rwsem.h20
-rw-r--r--include/linux/sched.h61
-rw-r--r--include/linux/sched/isolation.h1
-rw-r--r--include/linux/sched/loadavg.h2
-rw-r--r--include/linux/sched/mm.h22
-rw-r--r--include/linux/sched/signal.h2
-rw-r--r--include/linux/sched/sysctl.h4
-rw-r--r--include/linux/sched/task.h25
-rw-r--r--include/linux/sched/topology.h17
-rw-r--r--include/linux/sched/user.h3
-rw-r--r--include/linux/sched_clock.h28
-rw-r--r--include/linux/scmi_protocol.h110
-rw-r--r--include/linux/seccomp.h12
-rw-r--r--include/linux/seqlock.h1014
-rw-r--r--include/linux/serial_8250.h2
-rw-r--r--include/linux/serial_core.h2
-rw-r--r--include/linux/shmem_fs.h3
-rw-r--r--include/linux/skbuff.h10
-rw-r--r--include/linux/slab.h9
-rw-r--r--include/linux/slab_def.h9
-rw-r--r--include/linux/slub_def.h31
-rw-r--r--include/linux/soc/mediatek/mtk-cmdq.h31
-rw-r--r--include/linux/soc/ti/k3-ringacc.h6
-rw-r--r--include/linux/soc/ti/knav_qmss.h2
-rw-r--r--include/linux/soc/ti/ti-msgmgr.h2
-rw-r--r--include/linux/soc/ti/ti_sci_inta_msi.h2
-rw-r--r--include/linux/soc/ti/ti_sci_protocol.h6
-rw-r--r--include/linux/socket.h1
-rw-r--r--include/linux/sockptr.h105
-rw-r--r--include/linux/soundwire/sdw.h33
-rw-r--r--include/linux/soundwire/sdw_intel.h2
-rw-r--r--include/linux/soundwire/sdw_registers.h117
-rw-r--r--include/linux/spi/altera.h29
-rw-r--r--include/linux/spi/spi-mem.h14
-rw-r--r--include/linux/spi/spi.h29
-rw-r--r--include/linux/spinlock.h1
-rw-r--r--include/linux/spinlock_types.h2
-rw-r--r--include/linux/string_helpers.h15
-rw-r--r--include/linux/sunrpc/rpc_rdma.h74
-rw-r--r--include/linux/sunrpc/rpc_rdma_cid.h24
-rw-r--r--include/linux/sunrpc/svc_rdma.h17
-rw-r--r--include/linux/sunrpc/xdr.h26
-rw-r--r--include/linux/sunrpc/xprt.h1
-rw-r--r--include/linux/suspend.h6
-rw-r--r--include/linux/swap.h27
-rw-r--r--include/linux/syscalls.h93
-rw-r--r--include/linux/sysctl.h6
-rw-r--r--include/linux/sysfs.h7
-rw-r--r--include/linux/tboot.h2
-rw-r--r--include/linux/tcp.h4
-rw-r--r--include/linux/thermal.h31
-rw-r--r--include/linux/thunderbolt.h2
-rw-r--r--include/linux/time.h1
-rw-r--r--include/linux/time_namespace.h6
-rw-r--r--include/linux/torture.h5
-rw-r--r--include/linux/tpm.h1
-rw-r--r--include/linux/tpm_eventlog.h11
-rw-r--r--include/linux/trace.h1
-rw-r--r--include/linux/tracepoint.h11
-rw-r--r--include/linux/types.h2
-rw-r--r--include/linux/uaccess.h20
-rw-r--r--include/linux/uio.h1
-rw-r--r--include/linux/umh.h15
-rw-r--r--include/linux/usb.h14
-rw-r--r--include/linux/usb/ch9.h8
-rw-r--r--include/linux/usb/chipidea.h2
-rw-r--r--include/linux/usb/gadget.h9
-rw-r--r--include/linux/usb/pd.h1
-rw-r--r--include/linux/usb/pd_vdo.h2
-rw-r--r--include/linux/usb/phy_companion.h2
-rw-r--r--include/linux/usb/quirks.h4
-rw-r--r--include/linux/usb/serial.h97
-rw-r--r--include/linux/usb/tcpm.h2
-rw-r--r--include/linux/usb/typec.h14
-rw-r--r--include/linux/usb/typec_altmode.h14
-rw-r--r--include/linux/usb/usbnet.h2
-rw-r--r--include/linux/usermode_driver.h18
-rw-r--r--include/linux/uuid.h2
-rw-r--r--include/linux/vbox_utils.h1
-rw-r--r--include/linux/vdpa.h66
-rw-r--r--include/linux/vgaarb.h6
-rw-r--r--include/linux/videodev2.h1
-rw-r--r--include/linux/virtio_caif.h6
-rw-r--r--include/linux/virtio_config.h187
-rw-r--r--include/linux/virtio_ring.h19
-rw-r--r--include/linux/vm_event_item.h3
-rw-r--r--include/linux/vmstat.h14
-rw-r--r--include/linux/vmw_vmci_defs.h2
-rw-r--r--include/linux/vt_kern.h3
-rw-r--r--include/linux/watchdog.h2
-rw-r--r--include/linux/wimax/debug.h4
-rw-r--r--include/linux/wkup_m3_ipc.h2
-rw-r--r--include/linux/ww_mutex.h8
-rw-r--r--include/linux/xattr.h4
-rw-r--r--include/linux/xxhash.h2
-rw-r--r--include/linux/xz.h4
-rw-r--r--include/linux/zlib.h2
-rw-r--r--include/media/cec.h57
-rw-r--r--include/media/davinci/vpbe_display.h2
-rw-r--r--include/media/drv-intf/soc_mediabus.h107
-rw-r--r--include/media/dvbdev.h6
-rw-r--r--include/media/media-device.h2
-rw-r--r--include/media/media-devnode.h2
-rw-r--r--include/media/media-entity.h2
-rw-r--r--include/media/soc_camera.h397
-rw-r--r--include/media/tpg/v4l2-tpg.h3
-rw-r--r--include/media/v4l2-mc.h8
-rw-r--r--include/media/v4l2-rect.h20
-rw-r--r--include/media/v4l2-subdev.h39
-rw-r--r--include/media/videobuf-dma-sg.h2
-rw-r--r--include/media/videobuf2-core.h51
-rw-r--r--include/media/videobuf2-v4l2.h13
-rw-r--r--include/memory/renesas-rpc-if.h87
-rw-r--r--include/misc/ocxl-config.h1
-rw-r--r--include/misc/ocxl.h102
-rw-r--r--include/net/9p/transport.h2
-rw-r--r--include/net/act_api.h11
-rw-r--r--include/net/addrconf.h4
-rw-r--r--include/net/bluetooth/bluetooth.h12
-rw-r--r--include/net/bluetooth/hci.h28
-rw-r--r--include/net/bluetooth/hci_core.h107
-rw-r--r--include/net/bluetooth/hci_sock.h4
-rw-r--r--include/net/bluetooth/mgmt.h95
-rw-r--r--include/net/bluetooth/sco.h2
-rw-r--r--include/net/bonding.h8
-rw-r--r--include/net/busy_poll.h6
-rw-r--r--include/net/caif/caif_layer.h4
-rw-r--r--include/net/cfg80211.h41
-rw-r--r--include/net/cipso_ipv4.h12
-rw-r--r--include/net/compat.h1
-rw-r--r--include/net/devlink.h81
-rw-r--r--include/net/dsa.h54
-rw-r--r--include/net/dst.h10
-rw-r--r--include/net/fib_rules.h18
-rw-r--r--include/net/flow.h18
-rw-r--r--include/net/flow_dissector.h9
-rw-r--r--include/net/flow_offload.h22
-rw-r--r--include/net/fq.h1
-rw-r--r--include/net/fq_impl.h3
-rw-r--r--include/net/ieee80211_radiotap.h1
-rw-r--r--include/net/inet_connection_sock.h18
-rw-r--r--include/net/inet_sock.h1
-rw-r--r--include/net/ip.h17
-rw-r--r--include/net/ip6_checksum.h9
-rw-r--r--include/net/ip6_fib.h38
-rw-r--r--include/net/ip_tunnels.h2
-rw-r--r--include/net/ip_vs.h44
-rw-r--r--include/net/ipv6.h10
-rw-r--r--include/net/l3mdev.h39
-rw-r--r--include/net/mac80211.h42
-rw-r--r--include/net/mptcp.h15
-rw-r--r--include/net/netfilter/nf_conntrack.h14
-rw-r--r--include/net/netfilter/nf_tables.h25
-rw-r--r--include/net/pkt_cls.h54
-rw-r--r--include/net/regulatory.h2
-rw-r--r--include/net/request_sock.h2
-rw-r--r--include/net/rpl.h6
-rw-r--r--include/net/sch_generic.h3
-rw-r--r--include/net/sctp/sctp.h4
-rw-r--r--include/net/sctp/structs.h18
-rw-r--r--include/net/sock.h35
-rw-r--r--include/net/switchdev.h38
-rw-r--r--include/net/tc_act/tc_police.h42
-rw-r--r--include/net/tcp.h29
-rw-r--r--include/net/tls.h34
-rw-r--r--include/net/transp_v6.h3
-rw-r--r--include/net/tso.h23
-rw-r--r--include/net/udp.h10
-rw-r--r--include/net/udp_tunnel.h169
-rw-r--r--include/net/wimax.h2
-rw-r--r--include/net/xdp.h59
-rw-r--r--include/net/xdp_sock.h4
-rw-r--r--include/net/xfrm.h29
-rw-r--r--include/rdma/ib.h31
-rw-r--r--include/rdma/ib_addr.h31
-rw-r--r--include/rdma/ib_cache.h29
-rw-r--r--include/rdma/ib_cm.h1
-rw-r--r--include/rdma/ib_hdrs.h44
-rw-r--r--include/rdma/ib_mad.h31
-rw-r--r--include/rdma/ib_marshall.h31
-rw-r--r--include/rdma/ib_pack.h29
-rw-r--r--include/rdma/ib_pma.h31
-rw-r--r--include/rdma/ib_sa.h29
-rw-r--r--include/rdma/ib_smi.h31
-rw-r--r--include/rdma/ib_umem.h29
-rw-r--r--include/rdma/ib_umem_odp.h29
-rw-r--r--include/rdma/ib_verbs.h100
-rw-r--r--include/rdma/iw_cm.h30
-rw-r--r--include/rdma/iw_portmap.h30
-rw-r--r--include/rdma/opa_addr.h44
-rw-r--r--include/rdma/opa_port_info.h31
-rw-r--r--include/rdma/opa_smi.h31
-rw-r--r--include/rdma/opa_vnic.h49
-rw-r--r--include/rdma/rdma_cm.h31
-rw-r--r--include/rdma/rdma_cm_ib.h31
-rw-r--r--include/rdma/rdma_netlink.h2
-rw-r--r--include/rdma/rdma_vt.h50
-rw-r--r--include/rdma/rdmavt_cq.h53
-rw-r--r--include/rdma/rdmavt_mr.h50
-rw-r--r--include/rdma/rdmavt_qp.h69
-rw-r--r--include/rdma/uverbs_ioctl.h30
-rw-r--r--include/rdma/uverbs_named_ioctl.h29
-rw-r--r--include/rdma/uverbs_std_types.h43
-rw-r--r--include/rdma/uverbs_types.h29
-rw-r--r--include/scsi/fc/fc_ms.h4
-rw-r--r--include/scsi/scsi_tcq.h2
-rw-r--r--include/scsi/scsi_transport_iscsi.h2
-rw-r--r--include/soc/arc/aux.h2
-rw-r--r--include/soc/at91/atmel_tcb.h5
-rw-r--r--include/soc/mscc/ocelot.h91
-rw-r--r--include/soc/mscc/ocelot_dev.h78
-rw-r--r--include/soc/mscc/ocelot_qsys.h13
-rw-r--r--include/soc/mscc/ocelot_sys.h23
-rw-r--r--include/soc/qcom/kryo-l2-accessors.h12
-rw-r--r--include/soc/qcom/rpmh.h7
-rw-r--r--include/soc/tegra/bpmp-abi.h913
-rw-r--r--include/soc/tegra/fuse.h2
-rw-r--r--include/sound/control.h45
-rw-r--r--include/sound/gus.h4
-rw-r--r--include/sound/hda_codec.h4
-rw-r--r--include/sound/hdaudio.h3
-rw-r--r--include/sound/hdmi-codec.h8
-rw-r--r--include/sound/memalloc.h9
-rw-r--r--include/sound/omap-hdmi-audio.h2
-rw-r--r--include/sound/rt5670.h26
-rw-r--r--include/sound/simple_card_utils.h6
-rw-r--r--include/sound/soc-component.h30
-rw-r--r--include/sound/soc-dai.h14
-rw-r--r--include/sound/soc-dapm.h20
-rw-r--r--include/sound/soc-link.h1
-rw-r--r--include/sound/soc.h34
-rw-r--r--include/sound/wm8960.h17
-rw-r--r--include/target/iscsi/iscsi_target_core.h9
-rw-r--r--include/target/iscsi/iscsi_transport.h2
-rw-r--r--include/target/target_core_backend.h2
-rw-r--r--include/target/target_core_base.h1
-rw-r--r--include/trace/events/block.h15
-rw-r--r--include/trace/events/btrfs.h137
-rw-r--r--include/trace/events/ext4.h85
-rw-r--r--include/trace/events/f2fs.h63
-rw-r--r--include/trace/events/kvm.h2
-rw-r--r--include/trace/events/migrate.h17
-rw-r--r--include/trace/events/mmflags.h2
-rw-r--r--include/trace/events/random.h17
-rw-r--r--include/trace/events/rcu.h19
-rw-r--r--include/trace/events/rpcgss.h168
-rw-r--r--include/trace/events/rpcrdma.h207
-rw-r--r--include/trace/events/sched.h14
-rw-r--r--include/trace/events/scmi.h6
-rw-r--r--include/trace/events/sunrpc.h35
-rw-r--r--include/trace/events/ufs.h31
-rw-r--r--include/trace/events/xdp.h16
-rw-r--r--include/trace/trace_events.h19
-rw-r--r--include/uapi/asm-generic/unistd.h6
-rw-r--r--include/uapi/drm/amdgpu_drm.h10
-rw-r--r--include/uapi/drm/drm_fourcc.h140
-rw-r--r--include/uapi/drm/drm_mode.h2
-rw-r--r--include/uapi/drm/i915_drm.h4
-rw-r--r--include/uapi/drm/msm_drm.h4
-rw-r--r--include/uapi/linux/android/binder.h2
-rw-r--r--include/uapi/linux/atmioc.h2
-rw-r--r--include/uapi/linux/audit.h18
-rw-r--r--include/uapi/linux/auto_dev-ioctl.h2
-rw-r--r--include/uapi/linux/batadv_packet.h50
-rw-r--r--include/uapi/linux/batman_adv.h7
-rw-r--r--include/uapi/linux/bcache.h38
-rw-r--r--include/uapi/linux/blkzoned.h15
-rw-r--r--include/uapi/linux/bpf.h404
-rw-r--r--include/uapi/linux/btrfs.h21
-rw-r--r--include/uapi/linux/btrfs_tree.h4
-rw-r--r--include/uapi/linux/caif/caif_socket.h2
-rw-r--r--include/uapi/linux/capability.h9
-rw-r--r--include/uapi/linux/close_range.h9
-rw-r--r--include/uapi/linux/devlink.h15
-rw-r--r--include/uapi/linux/elf.h2
-rw-r--r--include/uapi/linux/elfcore.h101
-rw-r--r--include/uapi/linux/errqueue.h14
-rw-r--r--include/uapi/linux/ethtool.h87
-rw-r--r--include/uapi/linux/ethtool_netlink.h57
-rw-r--r--include/uapi/linux/fanotify.h16
-rw-r--r--include/uapi/linux/fpga-dfl.h82
-rw-r--r--include/uapi/linux/gpio.h2
-rw-r--r--include/uapi/linux/hsr_netlink.h2
-rw-r--r--include/uapi/linux/hyperv.h2
-rw-r--r--include/uapi/linux/icmp.h22
-rw-r--r--include/uapi/linux/icmpv6.h1
-rw-r--r--include/uapi/linux/idxd.h6
-rw-r--r--include/uapi/linux/if_bridge.h75
-rw-r--r--include/uapi/linux/if_link.h23
-rw-r--r--include/uapi/linux/if_xdp.h5
-rw-r--r--include/uapi/linux/in.h1
-rw-r--r--include/uapi/linux/in6.h1
-rw-r--r--include/uapi/linux/inet_diag.h1
-rw-r--r--include/uapi/linux/io_uring.h4
-rw-r--r--include/uapi/linux/iommu.h6
-rw-r--r--include/uapi/linux/isst_if.h2
-rw-r--r--include/uapi/linux/kfd_ioctl.h22
-rw-r--r--include/uapi/linux/kvm.h4
-rw-r--r--include/uapi/linux/map_to_7segment.h2
-rw-r--r--include/uapi/linux/mdio.h26
-rw-r--r--include/uapi/linux/mptcp.h17
-rw-r--r--include/uapi/linux/mrp_bridge.h38
-rw-r--r--include/uapi/linux/ndctl.h5
-rw-r--r--include/uapi/linux/neighbour.h24
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h14
-rw-r--r--include/uapi/linux/netfilter/xt_connmark.h2
-rw-r--r--include/uapi/linux/nfs4.h3
-rw-r--r--include/uapi/linux/nfs_fs.h1
-rw-r--r--include/uapi/linux/nl80211.h94
-rw-r--r--include/uapi/linux/openvswitch.h3
-rw-r--r--include/uapi/linux/perf_event.h49
-rw-r--r--include/uapi/linux/pkt_cls.h3
-rw-r--r--include/uapi/linux/pkt_sched.h2
-rw-r--r--include/uapi/linux/ptp_clock.h34
-rw-r--r--include/uapi/linux/raid/md_p.h2
-rw-r--r--include/uapi/linux/raw.h2
-rw-r--r--include/uapi/linux/remoteproc_cdev.h37
-rw-r--r--include/uapi/linux/rtnetlink.h46
-rw-r--r--include/uapi/linux/seccomp.h25
-rw-r--r--include/uapi/linux/seg6_iptunnel.h21
-rw-r--r--include/uapi/linux/serial_core.h14
-rw-r--r--include/uapi/linux/snmp.h1
-rw-r--r--include/uapi/linux/target_core_user.h25
-rw-r--r--include/uapi/linux/tcp.h1
-rw-r--r--include/uapi/linux/thermal.h89
-rw-r--r--include/uapi/linux/types.h2
-rw-r--r--include/uapi/linux/usb/ch9.h19
-rw-r--r--include/uapi/linux/vbox_vmmdev_types.h3
-rw-r--r--include/uapi/linux/vboxguest.h24
-rw-r--r--include/uapi/linux/vhost.h2
-rw-r--r--include/uapi/linux/vhost_types.h11
-rw-r--r--include/uapi/linux/videodev2.h17
-rw-r--r--include/uapi/linux/virtio_9p.h4
-rw-r--r--include/uapi/linux/virtio_balloon.h10
-rw-r--r--include/uapi/linux/virtio_blk.h26
-rw-r--r--include/uapi/linux/virtio_config.h10
-rw-r--r--include/uapi/linux/virtio_console.h8
-rw-r--r--include/uapi/linux/virtio_crypto.h26
-rw-r--r--include/uapi/linux/virtio_fs.h2
-rw-r--r--include/uapi/linux/virtio_gpu.h8
-rw-r--r--include/uapi/linux/virtio_input.h18
-rw-r--r--include/uapi/linux/virtio_iommu.h12
-rw-r--r--include/uapi/linux/virtio_mem.h14
-rw-r--r--include/uapi/linux/virtio_net.h8
-rw-r--r--include/uapi/linux/virtio_pmem.h4
-rw-r--r--include/uapi/linux/virtio_scsi.h20
-rw-r--r--include/uapi/linux/wireless.h2
-rw-r--r--include/uapi/linux/xdp_diag.h11
-rw-r--r--include/uapi/linux/xfrm.h1
-rw-r--r--include/uapi/misc/habanalabs.h27
-rw-r--r--include/uapi/mtd/mtd-abi.h2
-rw-r--r--include/uapi/rdma/efa-abi.h15
-rw-r--r--include/uapi/rdma/ib_user_ioctl_cmds.h15
-rw-r--r--include/uapi/rdma/mlx5_user_ioctl_cmds.h16
-rw-r--r--include/uapi/rdma/qedr-abi.h10
-rw-r--r--include/uapi/rdma/rdma_netlink.h9
-rw-r--r--include/uapi/rdma/rdma_user_ioctl.h2
-rw-r--r--include/uapi/scsi/fc/fc_els.h2
-rw-r--r--include/uapi/xen/gntdev.h2
-rw-r--r--include/vdso/datapage.h11
-rw-r--r--include/vdso/vsyscall.h3
-rw-r--r--include/xen/interface/io/displif.h91
-rw-r--r--include/xen/interface/io/netif.h20
-rw-r--r--include/xen/page.h1
-rw-r--r--include/xen/swiotlb-xen.h8
841 files changed, 18287 insertions, 8950 deletions
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 5afb6ceb284f..a3abcc4b7d9f 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -588,8 +588,13 @@ bool acpi_dma_supported(struct acpi_device *adev);
enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev);
int acpi_dma_get_range(struct device *dev, u64 *dma_addr, u64 *offset,
u64 *size);
-int acpi_dma_configure(struct device *dev, enum dev_dma_attr attr);
-
+int acpi_dma_configure_id(struct device *dev, enum dev_dma_attr attr,
+ const u32 *input_id);
+static inline int acpi_dma_configure(struct device *dev,
+ enum dev_dma_attr attr)
+{
+ return acpi_dma_configure_id(dev, attr, NULL);
+}
struct acpi_device *acpi_find_child_device(struct acpi_device *parent,
u64 address, bool check_children);
int acpi_is_root_bridge(acpi_handle);
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index 459d6981ca96..9dc816641286 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -12,7 +12,7 @@
/* Current ACPICA subsystem version in YYYYMMDD format */
-#define ACPI_CA_VERSION 0x20200528
+#define ACPI_CA_VERSION 0x20200717
#include <acpi/acconfig.h>
#include <acpi/actypes.h>
diff --git a/include/acpi/actbl3.h b/include/acpi/actbl3.h
index b0b163b9efc6..bdcac69fa6bd 100644
--- a/include/acpi/actbl3.h
+++ b/include/acpi/actbl3.h
@@ -415,6 +415,13 @@ struct acpi_table_tpm2 {
/* Platform-specific data follows */
};
+/* Optional trailer for revision 4 holding platform-specific data */
+struct acpi_tpm2_phy {
+ u8 start_method_specific[12];
+ u32 log_area_minimum_length;
+ u64 log_area_start_address;
+};
+
/* Values for start_method above */
#define ACPI_TPM2_NOT_ALLOWED 0
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index aa236b9e6f24..d50e61384f1f 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -1146,7 +1146,7 @@ struct acpi_pnp_device_id {
struct acpi_pnp_device_id_list {
u32 count; /* Number of IDs in Ids array */
u32 list_size; /* Size of list, including ID strings */
- struct acpi_pnp_device_id ids[1]; /* ID array */
+ struct acpi_pnp_device_id ids[]; /* ID array */
};
/*
@@ -1201,12 +1201,18 @@ struct acpi_pci_id {
u16 function;
};
+struct acpi_mem_mapping {
+ acpi_physical_address physical_address;
+ u8 *logical_address;
+ acpi_size length;
+ struct acpi_mem_mapping *next_mm;
+};
+
struct acpi_mem_space_context {
u32 length;
acpi_physical_address address;
- acpi_physical_address mapped_physical_address;
- u8 *mapped_logical_address;
- acpi_size mapped_length;
+ struct acpi_mem_mapping *cur_mm;
+ struct acpi_mem_mapping *first_mm;
};
/*
diff --git a/include/asm-generic/Kbuild b/include/asm-generic/Kbuild
index 44ec80e70518..74b0612601dd 100644
--- a/include/asm-generic/Kbuild
+++ b/include/asm-generic/Kbuild
@@ -45,6 +45,7 @@ mandatory-y += pci.h
mandatory-y += percpu.h
mandatory-y += pgalloc.h
mandatory-y += preempt.h
+mandatory-y += rwonce.h
mandatory-y += sections.h
mandatory-y += serial.h
mandatory-y += shmparam.h
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
index 286867f593d2..11f96f40f4a7 100644
--- a/include/asm-generic/atomic.h
+++ b/include/asm-generic/atomic.h
@@ -159,8 +159,6 @@ ATOMIC_OP(xor, ^)
* resource counting etc..
*/
-#define ATOMIC_INIT(i) { (i) }
-
/**
* atomic_read - read atomic variable
* @v: pointer of type atomic_t
diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h
index 2eacaf7d62f6..798027bb89be 100644
--- a/include/asm-generic/barrier.h
+++ b/include/asm-generic/barrier.h
@@ -13,7 +13,7 @@
#ifndef __ASSEMBLY__
-#include <linux/compiler.h>
+#include <asm/rwonce.h>
#ifndef nop
#define nop() asm volatile ("nop")
@@ -46,10 +46,6 @@
#define dma_wmb() wmb()
#endif
-#ifndef read_barrier_depends
-#define read_barrier_depends() do { } while (0)
-#endif
-
#ifndef __smp_mb
#define __smp_mb() mb()
#endif
@@ -62,10 +58,6 @@
#define __smp_wmb() wmb()
#endif
-#ifndef __smp_read_barrier_depends
-#define __smp_read_barrier_depends() read_barrier_depends()
-#endif
-
#ifdef CONFIG_SMP
#ifndef smp_mb
@@ -80,10 +72,6 @@
#define smp_wmb() __smp_wmb()
#endif
-#ifndef smp_read_barrier_depends
-#define smp_read_barrier_depends() __smp_read_barrier_depends()
-#endif
-
#else /* !CONFIG_SMP */
#ifndef smp_mb
@@ -98,10 +86,6 @@
#define smp_wmb() barrier()
#endif
-#ifndef smp_read_barrier_depends
-#define smp_read_barrier_depends() do { } while (0)
-#endif
-
#endif /* CONFIG_SMP */
#ifndef __smp_store_mb
@@ -196,7 +180,6 @@ do { \
#define virt_mb() __smp_mb()
#define virt_rmb() __smp_rmb()
#define virt_wmb() __smp_wmb()
-#define virt_read_barrier_depends() __smp_read_barrier_depends()
#define virt_store_mb(var, value) __smp_store_mb(var, value)
#define virt_mb__before_atomic() __smp_mb__before_atomic()
#define virt_mb__after_atomic() __smp_mb__after_atomic()
@@ -257,5 +240,15 @@ do { \
})
#endif
+/*
+ * pmem_wmb() ensures that all stores for which the modification
+ * are written to persistent storage by preceding instructions have
+ * updated persistent storage before any data access or data transfer
+ * caused by subsequent instructions is initiated.
+ */
+#ifndef pmem_wmb
+#define pmem_wmb() wmb()
+#endif
+
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_GENERIC_BARRIER_H */
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index c94e33ae3e7b..18b0f4eee8cb 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -3,6 +3,7 @@
#define _ASM_GENERIC_BUG_H
#include <linux/compiler.h>
+#include <linux/instrumentation.h>
#define CUT_HERE "------------[ cut here ]------------\n"
diff --git a/include/asm-generic/checksum.h b/include/asm-generic/checksum.h
index 5a80f8e54300..cd8b75aa770d 100644
--- a/include/asm-generic/checksum.h
+++ b/include/asm-generic/checksum.h
@@ -23,11 +23,9 @@ extern __wsum csum_partial(const void *buff, int len, __wsum sum);
* here even more important to align src and dst on a 32-bit (or even
* better 64-bit) boundary
*/
-extern __wsum csum_partial_copy(const void *src, void *dst, int len, __wsum sum);
-
#ifndef csum_partial_copy_nocheck
-#define csum_partial_copy_nocheck(src, dst, len, sum) \
- csum_partial_copy((src), (dst), (len), (sum))
+__wsum csum_partial_copy_nocheck(const void *src, void *dst, int len,
+ __wsum sum);
#endif
#ifndef ip_fast_csum
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index 8b1e020e9a03..dabf8cb7203b 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -163,7 +163,7 @@ static inline u16 readw(const volatile void __iomem *addr)
u16 val;
__io_br();
- val = __le16_to_cpu(__raw_readw(addr));
+ val = __le16_to_cpu((__le16 __force)__raw_readw(addr));
__io_ar(val);
return val;
}
@@ -176,7 +176,7 @@ static inline u32 readl(const volatile void __iomem *addr)
u32 val;
__io_br();
- val = __le32_to_cpu(__raw_readl(addr));
+ val = __le32_to_cpu((__le32 __force)__raw_readl(addr));
__io_ar(val);
return val;
}
@@ -212,7 +212,7 @@ static inline void writeb(u8 value, volatile void __iomem *addr)
static inline void writew(u16 value, volatile void __iomem *addr)
{
__io_bw();
- __raw_writew(cpu_to_le16(value), addr);
+ __raw_writew((u16 __force)cpu_to_le16(value), addr);
__io_aw();
}
#endif
@@ -222,7 +222,7 @@ static inline void writew(u16 value, volatile void __iomem *addr)
static inline void writel(u32 value, volatile void __iomem *addr)
{
__io_bw();
- __raw_writel(__cpu_to_le32(value), addr);
+ __raw_writel((u32 __force)__cpu_to_le32(value), addr);
__io_aw();
}
#endif
@@ -456,7 +456,7 @@ static inline void writesq(volatile void __iomem *addr, const void *buffer,
#if !defined(inb) && !defined(_inb)
#define _inb _inb
-static inline u16 _inb(unsigned long addr)
+static inline u8 _inb(unsigned long addr)
{
u8 val;
@@ -474,7 +474,7 @@ static inline u16 _inw(unsigned long addr)
u16 val;
__io_pbr();
- val = __le16_to_cpu(__raw_readw(PCI_IOBASE + addr));
+ val = __le16_to_cpu((__le16 __force)__raw_readw(PCI_IOBASE + addr));
__io_par(val);
return val;
}
@@ -482,12 +482,12 @@ static inline u16 _inw(unsigned long addr)
#if !defined(inl) && !defined(_inl)
#define _inl _inl
-static inline u16 _inl(unsigned long addr)
+static inline u32 _inl(unsigned long addr)
{
u32 val;
__io_pbr();
- val = __le32_to_cpu(__raw_readl(PCI_IOBASE + addr));
+ val = __le32_to_cpu((__le32 __force)__raw_readl(PCI_IOBASE + addr));
__io_par(val);
return val;
}
@@ -508,7 +508,7 @@ static inline void _outb(u8 value, unsigned long addr)
static inline void _outw(u16 value, unsigned long addr)
{
__io_pbw();
- __raw_writew(cpu_to_le16(value), PCI_IOBASE + addr);
+ __raw_writew((u16 __force)cpu_to_le16(value), PCI_IOBASE + addr);
__io_paw();
}
#endif
@@ -518,7 +518,7 @@ static inline void _outw(u16 value, unsigned long addr)
static inline void _outl(u32 value, unsigned long addr)
{
__io_pbw();
- __raw_writel(cpu_to_le32(value), PCI_IOBASE + addr);
+ __raw_writel((u32 __force)cpu_to_le32(value), PCI_IOBASE + addr);
__io_paw();
}
#endif
diff --git a/include/asm-generic/iomap.h b/include/asm-generic/iomap.h
index 9d28a5e82f73..649224664969 100644
--- a/include/asm-generic/iomap.h
+++ b/include/asm-generic/iomap.h
@@ -26,14 +26,14 @@
* in the low address range. Architectures for which this is not
* true can't use this generic implementation.
*/
-extern unsigned int ioread8(void __iomem *);
-extern unsigned int ioread16(void __iomem *);
-extern unsigned int ioread16be(void __iomem *);
-extern unsigned int ioread32(void __iomem *);
-extern unsigned int ioread32be(void __iomem *);
+extern unsigned int ioread8(const void __iomem *);
+extern unsigned int ioread16(const void __iomem *);
+extern unsigned int ioread16be(const void __iomem *);
+extern unsigned int ioread32(const void __iomem *);
+extern unsigned int ioread32be(const void __iomem *);
#ifdef CONFIG_64BIT
-extern u64 ioread64(void __iomem *);
-extern u64 ioread64be(void __iomem *);
+extern u64 ioread64(const void __iomem *);
+extern u64 ioread64be(const void __iomem *);
#endif
#ifdef readq
@@ -41,10 +41,10 @@ extern u64 ioread64be(void __iomem *);
#define ioread64_hi_lo ioread64_hi_lo
#define ioread64be_lo_hi ioread64be_lo_hi
#define ioread64be_hi_lo ioread64be_hi_lo
-extern u64 ioread64_lo_hi(void __iomem *addr);
-extern u64 ioread64_hi_lo(void __iomem *addr);
-extern u64 ioread64be_lo_hi(void __iomem *addr);
-extern u64 ioread64be_hi_lo(void __iomem *addr);
+extern u64 ioread64_lo_hi(const void __iomem *addr);
+extern u64 ioread64_hi_lo(const void __iomem *addr);
+extern u64 ioread64be_lo_hi(const void __iomem *addr);
+extern u64 ioread64be_hi_lo(const void __iomem *addr);
#endif
extern void iowrite8(u8, void __iomem *);
@@ -79,9 +79,9 @@ extern void iowrite64be_hi_lo(u64 val, void __iomem *addr);
* memory across multiple ports, use "memcpy_toio()"
* and friends.
*/
-extern void ioread8_rep(void __iomem *port, void *buf, unsigned long count);
-extern void ioread16_rep(void __iomem *port, void *buf, unsigned long count);
-extern void ioread32_rep(void __iomem *port, void *buf, unsigned long count);
+extern void ioread8_rep(const void __iomem *port, void *buf, unsigned long count);
+extern void ioread16_rep(const void __iomem *port, void *buf, unsigned long count);
+extern void ioread32_rep(const void __iomem *port, void *buf, unsigned long count);
extern void iowrite8_rep(void __iomem *port, const void *buf, unsigned long count);
extern void iowrite16_rep(void __iomem *port, const void *buf, unsigned long count);
diff --git a/include/asm-generic/kvm_types.h b/include/asm-generic/kvm_types.h
new file mode 100644
index 000000000000..2a82daf110f1
--- /dev/null
+++ b/include/asm-generic/kvm_types.h
@@ -0,0 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_KVM_TYPES_H
+#define _ASM_GENERIC_KVM_TYPES_H
+
+#endif
diff --git a/include/asm-generic/mshyperv.h b/include/asm-generic/mshyperv.h
index 1c4fd950f091..c5edc5e08b94 100644
--- a/include/asm-generic/mshyperv.h
+++ b/include/asm-generic/mshyperv.h
@@ -168,7 +168,6 @@ void hyperv_report_panic_msg(phys_addr_t pa, size_t size);
bool hv_is_hyperv_initialized(void);
bool hv_is_hibernation_supported(void);
void hyperv_cleanup(void);
-void hv_setup_sched_clock(void *sched_clock);
#else /* CONFIG_HYPERV */
static inline bool hv_is_hyperv_initialized(void) { return false; }
static inline bool hv_is_hibernation_supported(void) { return false; }
diff --git a/include/asm-generic/pgalloc.h b/include/asm-generic/pgalloc.h
index 73f7421413cb..02932efad3ab 100644
--- a/include/asm-generic/pgalloc.h
+++ b/include/asm-generic/pgalloc.h
@@ -102,6 +102,86 @@ static inline void pte_free(struct mm_struct *mm, struct page *pte_page)
__free_page(pte_page);
}
+
+#if CONFIG_PGTABLE_LEVELS > 2
+
+#ifndef __HAVE_ARCH_PMD_ALLOC_ONE
+/**
+ * pmd_alloc_one - allocate a page for PMD-level page table
+ * @mm: the mm_struct of the current context
+ *
+ * Allocates a page and runs the pgtable_pmd_page_ctor().
+ * Allocations use %GFP_PGTABLE_USER in user context and
+ * %GFP_PGTABLE_KERNEL in kernel context.
+ *
+ * Return: pointer to the allocated memory or %NULL on error
+ */
+static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
+{
+ struct page *page;
+ gfp_t gfp = GFP_PGTABLE_USER;
+
+ if (mm == &init_mm)
+ gfp = GFP_PGTABLE_KERNEL;
+ page = alloc_pages(gfp, 0);
+ if (!page)
+ return NULL;
+ if (!pgtable_pmd_page_ctor(page)) {
+ __free_pages(page, 0);
+ return NULL;
+ }
+ return (pmd_t *)page_address(page);
+}
+#endif
+
+#ifndef __HAVE_ARCH_PMD_FREE
+static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
+{
+ BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
+ pgtable_pmd_page_dtor(virt_to_page(pmd));
+ free_page((unsigned long)pmd);
+}
+#endif
+
+#endif /* CONFIG_PGTABLE_LEVELS > 2 */
+
+#if CONFIG_PGTABLE_LEVELS > 3
+
+#ifndef __HAVE_ARCH_PUD_ALLOC_ONE
+/**
+ * pud_alloc_one - allocate a page for PUD-level page table
+ * @mm: the mm_struct of the current context
+ *
+ * Allocates a page using %GFP_PGTABLE_USER for user context and
+ * %GFP_PGTABLE_KERNEL for kernel context.
+ *
+ * Return: pointer to the allocated memory or %NULL on error
+ */
+static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
+{
+ gfp_t gfp = GFP_PGTABLE_USER;
+
+ if (mm == &init_mm)
+ gfp = GFP_PGTABLE_KERNEL;
+ return (pud_t *)get_zeroed_page(gfp);
+}
+#endif
+
+static inline void pud_free(struct mm_struct *mm, pud_t *pud)
+{
+ BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
+ free_page((unsigned long)pud);
+}
+
+#endif /* CONFIG_PGTABLE_LEVELS > 3 */
+
+#ifndef __HAVE_ARCH_PGD_FREE
+static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
+{
+ free_page((unsigned long)pgd);
+}
+#endif
+
#endif /* CONFIG_MMU */
#endif /* __ASM_GENERIC_PGALLOC_H */
diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h
index fde943d180e0..4fe7fd0fe834 100644
--- a/include/asm-generic/qspinlock.h
+++ b/include/asm-generic/qspinlock.h
@@ -11,7 +11,9 @@
#define __ASM_GENERIC_QSPINLOCK_H
#include <asm-generic/qspinlock_types.h>
+#include <linux/atomic.h>
+#ifndef queued_spin_is_locked
/**
* queued_spin_is_locked - is the spinlock locked?
* @lock: Pointer to queued spinlock structure
@@ -25,6 +27,7 @@ static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
*/
return atomic_read(&lock->val);
}
+#endif
/**
* queued_spin_value_unlocked - is the spinlock structure unlocked?
@@ -67,6 +70,7 @@ static __always_inline int queued_spin_trylock(struct qspinlock *lock)
extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
+#ifndef queued_spin_lock
/**
* queued_spin_lock - acquire a queued spinlock
* @lock: Pointer to queued spinlock structure
@@ -80,6 +84,7 @@ static __always_inline void queued_spin_lock(struct qspinlock *lock)
queued_spin_lock_slowpath(lock, val);
}
+#endif
#ifndef queued_spin_unlock
/**
diff --git a/include/asm-generic/qspinlock_types.h b/include/asm-generic/qspinlock_types.h
index 56d1309d32f8..2fd1fb89ec36 100644
--- a/include/asm-generic/qspinlock_types.h
+++ b/include/asm-generic/qspinlock_types.h
@@ -9,15 +9,7 @@
#ifndef __ASM_GENERIC_QSPINLOCK_TYPES_H
#define __ASM_GENERIC_QSPINLOCK_TYPES_H
-/*
- * Including atomic.h with PARAVIRT on will cause compilation errors because
- * of recursive header file incluson via paravirt_types.h. So don't include
- * it if PARAVIRT is on.
- */
-#ifndef CONFIG_PARAVIRT
#include <linux/types.h>
-#include <linux/atomic.h>
-#endif
typedef struct qspinlock {
union {
diff --git a/include/asm-generic/rwonce.h b/include/asm-generic/rwonce.h
new file mode 100644
index 000000000000..8d0a6280e982
--- /dev/null
+++ b/include/asm-generic/rwonce.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Prevent the compiler from merging or refetching reads or writes. The
+ * compiler is also forbidden from reordering successive instances of
+ * READ_ONCE and WRITE_ONCE, but only when the compiler is aware of some
+ * particular ordering. One way to make the compiler aware of ordering is to
+ * put the two invocations of READ_ONCE or WRITE_ONCE in different C
+ * statements.
+ *
+ * These two macros will also work on aggregate data types like structs or
+ * unions.
+ *
+ * Their two major use cases are: (1) Mediating communication between
+ * process-level code and irq/NMI handlers, all running on the same CPU,
+ * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
+ * mutilate accesses that either do not require ordering or that interact
+ * with an explicit memory barrier or atomic instruction that provides the
+ * required ordering.
+ */
+#ifndef __ASM_GENERIC_RWONCE_H
+#define __ASM_GENERIC_RWONCE_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/compiler_types.h>
+#include <linux/kasan-checks.h>
+#include <linux/kcsan-checks.h>
+
+/*
+ * Yes, this permits 64-bit accesses on 32-bit architectures. These will
+ * actually be atomic in some cases (namely Armv7 + LPAE), but for others we
+ * rely on the access being split into 2x32-bit accesses for a 32-bit quantity
+ * (e.g. a virtual address) and a strong prevailing wind.
+ */
+#define compiletime_assert_rwonce_type(t) \
+ compiletime_assert(__native_word(t) || sizeof(t) == sizeof(long long), \
+ "Unsupported access size for {READ,WRITE}_ONCE().")
+
+/*
+ * Use __READ_ONCE() instead of READ_ONCE() if you do not require any
+ * atomicity. Note that this may result in tears!
+ */
+#ifndef __READ_ONCE
+#define __READ_ONCE(x) (*(const volatile __unqual_scalar_typeof(x) *)&(x))
+#endif
+
+#define READ_ONCE(x) \
+({ \
+ compiletime_assert_rwonce_type(x); \
+ __READ_ONCE(x); \
+})
+
+#define __WRITE_ONCE(x, val) \
+do { \
+ *(volatile typeof(x) *)&(x) = (val); \
+} while (0)
+
+#define WRITE_ONCE(x, val) \
+do { \
+ compiletime_assert_rwonce_type(x); \
+ __WRITE_ONCE(x, val); \
+} while (0)
+
+static __no_sanitize_or_inline
+unsigned long __read_once_word_nocheck(const void *addr)
+{
+ return __READ_ONCE(*(unsigned long *)addr);
+}
+
+/*
+ * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need to load a
+ * word from memory atomically but without telling KASAN/KCSAN. This is
+ * usually used by unwinding code when walking the stack of a running process.
+ */
+#define READ_ONCE_NOCHECK(x) \
+({ \
+ compiletime_assert(sizeof(x) == sizeof(unsigned long), \
+ "Unsupported access size for READ_ONCE_NOCHECK()."); \
+ (typeof(x))__read_once_word_nocheck(&(x)); \
+})
+
+static __no_kasan_or_inline
+unsigned long read_word_at_a_time(const void *addr)
+{
+ kasan_check_read(addr, 1);
+ return *(unsigned long *)addr;
+}
+
+#endif /* __ASSEMBLY__ */
+#endif /* __ASM_GENERIC_RWONCE_H */
diff --git a/include/asm-generic/seccomp.h b/include/asm-generic/seccomp.h
index 1321ac7821d7..6b6f42bc58f9 100644
--- a/include/asm-generic/seccomp.h
+++ b/include/asm-generic/seccomp.h
@@ -33,7 +33,7 @@ static inline const int *get_compat_mode1_syscalls(void)
static const int mode1_syscalls_32[] = {
__NR_seccomp_read_32, __NR_seccomp_write_32,
__NR_seccomp_exit_32, __NR_seccomp_sigreturn_32,
- 0, /* null terminated */
+ -1, /* negative terminated */
};
return mode1_syscalls_32;
}
diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h
index 66397ed10acb..d16302d3eb59 100644
--- a/include/asm-generic/sections.h
+++ b/include/asm-generic/sections.h
@@ -60,8 +60,8 @@ extern __visible const void __nosave_begin, __nosave_end;
/* Function descriptor handling (if any). Override in asm/sections.h */
#ifndef dereference_function_descriptor
-#define dereference_function_descriptor(p) (p)
-#define dereference_kernel_function_descriptor(p) (p)
+#define dereference_function_descriptor(p) ((void *)(p))
+#define dereference_kernel_function_descriptor(p) ((void *)(p))
#endif
/* random extra sections (if any). Override
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index 3f1649a8cf55..6661ee1cff47 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -14,7 +14,6 @@
#include <linux/mmu_notifier.h>
#include <linux/swap.h>
#include <linux/hugetlb_inline.h>
-#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
@@ -512,6 +511,38 @@ static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vm
}
#endif
+/*
+ * tlb_flush_{pte|pmd|pud|p4d}_range() adjust the tlb->start and tlb->end,
+ * and set corresponding cleared_*.
+ */
+static inline void tlb_flush_pte_range(struct mmu_gather *tlb,
+ unsigned long address, unsigned long size)
+{
+ __tlb_adjust_range(tlb, address, size);
+ tlb->cleared_ptes = 1;
+}
+
+static inline void tlb_flush_pmd_range(struct mmu_gather *tlb,
+ unsigned long address, unsigned long size)
+{
+ __tlb_adjust_range(tlb, address, size);
+ tlb->cleared_pmds = 1;
+}
+
+static inline void tlb_flush_pud_range(struct mmu_gather *tlb,
+ unsigned long address, unsigned long size)
+{
+ __tlb_adjust_range(tlb, address, size);
+ tlb->cleared_puds = 1;
+}
+
+static inline void tlb_flush_p4d_range(struct mmu_gather *tlb,
+ unsigned long address, unsigned long size)
+{
+ __tlb_adjust_range(tlb, address, size);
+ tlb->cleared_p4ds = 1;
+}
+
#ifndef __tlb_remove_tlb_entry
#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
#endif
@@ -525,19 +556,17 @@ static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vm
*/
#define tlb_remove_tlb_entry(tlb, ptep, address) \
do { \
- __tlb_adjust_range(tlb, address, PAGE_SIZE); \
- tlb->cleared_ptes = 1; \
+ tlb_flush_pte_range(tlb, address, PAGE_SIZE); \
__tlb_remove_tlb_entry(tlb, ptep, address); \
} while (0)
#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
do { \
unsigned long _sz = huge_page_size(h); \
- __tlb_adjust_range(tlb, address, _sz); \
if (_sz == PMD_SIZE) \
- tlb->cleared_pmds = 1; \
+ tlb_flush_pmd_range(tlb, address, _sz); \
else if (_sz == PUD_SIZE) \
- tlb->cleared_puds = 1; \
+ tlb_flush_pud_range(tlb, address, _sz); \
__tlb_remove_tlb_entry(tlb, ptep, address); \
} while (0)
@@ -551,8 +580,7 @@ static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vm
#define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \
do { \
- __tlb_adjust_range(tlb, address, HPAGE_PMD_SIZE); \
- tlb->cleared_pmds = 1; \
+ tlb_flush_pmd_range(tlb, address, HPAGE_PMD_SIZE); \
__tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \
} while (0)
@@ -566,8 +594,7 @@ static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vm
#define tlb_remove_pud_tlb_entry(tlb, pudp, address) \
do { \
- __tlb_adjust_range(tlb, address, HPAGE_PUD_SIZE); \
- tlb->cleared_puds = 1; \
+ tlb_flush_pud_range(tlb, address, HPAGE_PUD_SIZE); \
__tlb_remove_pud_tlb_entry(tlb, pudp, address); \
} while (0)
@@ -592,9 +619,8 @@ static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vm
#ifndef pte_free_tlb
#define pte_free_tlb(tlb, ptep, address) \
do { \
- __tlb_adjust_range(tlb, address, PAGE_SIZE); \
+ tlb_flush_pmd_range(tlb, address, PAGE_SIZE); \
tlb->freed_tables = 1; \
- tlb->cleared_pmds = 1; \
__pte_free_tlb(tlb, ptep, address); \
} while (0)
#endif
@@ -602,9 +628,8 @@ static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vm
#ifndef pmd_free_tlb
#define pmd_free_tlb(tlb, pmdp, address) \
do { \
- __tlb_adjust_range(tlb, address, PAGE_SIZE); \
+ tlb_flush_pud_range(tlb, address, PAGE_SIZE); \
tlb->freed_tables = 1; \
- tlb->cleared_puds = 1; \
__pmd_free_tlb(tlb, pmdp, address); \
} while (0)
#endif
@@ -612,9 +637,8 @@ static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vm
#ifndef pud_free_tlb
#define pud_free_tlb(tlb, pudp, address) \
do { \
- __tlb_adjust_range(tlb, address, PAGE_SIZE); \
+ tlb_flush_p4d_range(tlb, address, PAGE_SIZE); \
tlb->freed_tables = 1; \
- tlb->cleared_p4ds = 1; \
__pud_free_tlb(tlb, pudp, address); \
} while (0)
#endif
diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
index e935318804f8..ba68ee4dabfa 100644
--- a/include/asm-generic/uaccess.h
+++ b/include/asm-generic/uaccess.h
@@ -86,8 +86,8 @@ static inline void set_fs(mm_segment_t fs)
}
#endif
-#ifndef segment_eq
-#define segment_eq(a, b) ((a).seg == (b).seg)
+#ifndef uaccess_kernel
+#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg)
#endif
#define access_ok(addr, size) __access_ok((unsigned long)(addr),(size))
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 052e0f05a984..5430febd34be 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -109,12 +109,31 @@
#endif
/*
- * Align to a 32 byte boundary equal to the
- * alignment gcc 4.5 uses for a struct
+ * GCC 4.5 and later have a 32 bytes section alignment for structures.
+ * Except GCC 4.9, that feels the need to align on 64 bytes.
*/
+#if __GNUC__ == 4 && __GNUC_MINOR__ == 9
+#define STRUCT_ALIGNMENT 64
+#else
#define STRUCT_ALIGNMENT 32
+#endif
#define STRUCT_ALIGN() . = ALIGN(STRUCT_ALIGNMENT)
+/*
+ * The order of the sched class addresses are important, as they are
+ * used to determine the order of the priority of each sched class in
+ * relation to each other.
+ */
+#define SCHED_DATA \
+ STRUCT_ALIGN(); \
+ __begin_sched_classes = .; \
+ *(__idle_sched_class) \
+ *(__fair_sched_class) \
+ *(__rt_sched_class) \
+ *(__dl_sched_class) \
+ *(__stop_sched_class) \
+ __end_sched_classes = .;
+
/* The actual configuration determine if the init/exit sections
* are handled as text/data or they can be discarded (which
* often happens at runtime)
@@ -320,9 +339,9 @@
*(__tracepoints) \
/* implement dynamic printk debug */ \
. = ALIGN(8); \
- __start___verbose = .; \
- KEEP(*(__verbose)) \
- __stop___verbose = .; \
+ __start___dyndbg = .; \
+ KEEP(*(__dyndbg)) \
+ __stop___dyndbg = .; \
LIKELY_PROFILE() \
BRANCH_PROFILE() \
TRACE_PRINTKS() \
@@ -375,6 +394,7 @@
*/
#ifndef RO_AFTER_INIT_DATA
#define RO_AFTER_INIT_DATA \
+ . = ALIGN(8); \
__start_ro_after_init = .; \
*(.data..ro_after_init) \
JUMP_TABLE_DATA \
@@ -389,6 +409,7 @@
.rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
__start_rodata = .; \
*(.rodata) *(.rodata.*) \
+ SCHED_DATA \
RO_AFTER_INIT_DATA /* Read only after init */ \
. = ALIGN(8); \
__start___tracepoints_ptrs = .; \
@@ -642,6 +663,10 @@
__start_BTF = .; \
*(.BTF) \
__stop_BTF = .; \
+ } \
+ . = ALIGN(4); \
+ .BTF_ids : AT(ADDR(.BTF_ids) - LOAD_OFFSET) { \
+ *(.BTF_ids) \
}
#else
#define BTF
diff --git a/include/clocksource/timer-ti-dm.h b/include/clocksource/timer-ti-dm.h
index 531ca87fcd08..4c61dade8835 100644
--- a/include/clocksource/timer-ti-dm.h
+++ b/include/clocksource/timer-ti-dm.h
@@ -1,7 +1,7 @@
/*
* OMAP Dual-Mode Timers
*
- * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2010 Texas Instruments Incorporated - https://www.ti.com/
* Tarun Kanti DebBarma <tarun.kanti@ti.com>
* Thara Gopinath <thara@ti.com>
*
diff --git a/include/crypto/acompress.h b/include/crypto/acompress.h
index 2b4d2b06ccbd..fcde59c65a81 100644
--- a/include/crypto/acompress.h
+++ b/include/crypto/acompress.h
@@ -106,6 +106,24 @@ struct acomp_alg {
*/
struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type,
u32 mask);
+/**
+ * crypto_alloc_acomp_node() -- allocate ACOMPRESS tfm handle with desired NUMA node
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ * compression algorithm e.g. "deflate"
+ * @type: specifies the type of the algorithm
+ * @mask: specifies the mask for the algorithm
+ * @node: specifies the NUMA node the ZIP hardware belongs to
+ *
+ * Allocate a handle for a compression algorithm. Drivers should try to use
+ * (de)compressors on the specified NUMA node.
+ * The returned struct crypto_acomp is the handle that is required for any
+ * subsequent API invocation for the compression operations.
+ *
+ * Return: allocated handle in case of success; IS_ERR() is true in case
+ * of an error, PTR_ERR() returns the error code.
+ */
+struct crypto_acomp *crypto_alloc_acomp_node(const char *alg_name, u32 type,
+ u32 mask, int node);
static inline struct crypto_tfm *crypto_acomp_tfm(struct crypto_acomp *tfm)
{
diff --git a/include/crypto/aead.h b/include/crypto/aead.h
index 62c68550aab6..c32a6f5664e9 100644
--- a/include/crypto/aead.h
+++ b/include/crypto/aead.h
@@ -425,7 +425,7 @@ static inline struct aead_request *aead_request_alloc(struct crypto_aead *tfm,
*/
static inline void aead_request_free(struct aead_request *req)
{
- kzfree(req);
+ kfree_sensitive(req);
}
/**
diff --git a/include/crypto/akcipher.h b/include/crypto/akcipher.h
index 6924b091adec..1d3aa252caba 100644
--- a/include/crypto/akcipher.h
+++ b/include/crypto/akcipher.h
@@ -207,7 +207,7 @@ static inline struct akcipher_request *akcipher_request_alloc(
*/
static inline void akcipher_request_free(struct akcipher_request *req)
{
- kzfree(req);
+ kfree_sensitive(req);
}
/**
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
index 00a9cf98debe..143d884d65c7 100644
--- a/include/crypto/algapi.h
+++ b/include/crypto/algapi.h
@@ -116,7 +116,7 @@ struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type,
void *crypto_spawn_tfm2(struct crypto_spawn *spawn);
struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb);
-int crypto_check_attr_type(struct rtattr **tb, u32 type);
+int crypto_check_attr_type(struct rtattr **tb, u32 type, u32 *mask_ret);
const char *crypto_attr_alg_name(struct rtattr *rta);
int crypto_attr_u32(struct rtattr *rta, u32 *num);
int crypto_inst_setname(struct crypto_instance *inst, const char *name,
@@ -235,18 +235,29 @@ static inline struct crypto_async_request *crypto_get_backlog(
container_of(queue->backlog, struct crypto_async_request, list);
}
-static inline int crypto_requires_off(u32 type, u32 mask, u32 off)
+static inline u32 crypto_requires_off(struct crypto_attr_type *algt, u32 off)
{
- return (type ^ off) & mask & off;
+ return (algt->type ^ off) & algt->mask & off;
}
/*
- * Returns CRYPTO_ALG_ASYNC if type/mask requires the use of sync algorithms.
- * Otherwise returns zero.
+ * When an algorithm uses another algorithm (e.g., if it's an instance of a
+ * template), these are the flags that should always be set on the "outer"
+ * algorithm if any "inner" algorithm has them set.
*/
-static inline int crypto_requires_sync(u32 type, u32 mask)
+#define CRYPTO_ALG_INHERITED_FLAGS \
+ (CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK | \
+ CRYPTO_ALG_ALLOCATES_MEMORY)
+
+/*
+ * Given the type and mask that specify the flags restrictions on a template
+ * instance being created, return the mask that should be passed to
+ * crypto_grab_*() (along with type=0) to honor any request the user made to
+ * have any of the CRYPTO_ALG_INHERITED_FLAGS clear.
+ */
+static inline u32 crypto_algt_inherited_mask(struct crypto_attr_type *algt)
{
- return crypto_requires_off(type, mask, CRYPTO_ALG_ASYNC);
+ return crypto_requires_off(algt, CRYPTO_ALG_INHERITED_FLAGS);
}
noinline unsigned long __crypto_memneq(const void *a, const void *b, size_t size);
diff --git a/include/crypto/chacha.h b/include/crypto/chacha.h
index 2676f4fbd4c1..3a1c72fdb7cf 100644
--- a/include/crypto/chacha.h
+++ b/include/crypto/chacha.h
@@ -25,11 +25,7 @@
#define CHACHA_BLOCK_SIZE 64
#define CHACHAPOLY_IV_SIZE 12
-#ifdef CONFIG_X86_64
-#define CHACHA_STATE_WORDS ((CHACHA_BLOCK_SIZE + 12) / sizeof(u32))
-#else
#define CHACHA_STATE_WORDS (CHACHA_BLOCK_SIZE / sizeof(u32))
-#endif
/* 192-bit nonce, then 64-bit stream position */
#define XCHACHA_IV_SIZE 32
diff --git a/include/crypto/chacha20poly1305.h b/include/crypto/chacha20poly1305.h
index 234ee28078ef..d2ac3ff7dc1e 100644
--- a/include/crypto/chacha20poly1305.h
+++ b/include/crypto/chacha20poly1305.h
@@ -45,4 +45,6 @@ bool chacha20poly1305_decrypt_sg_inplace(struct scatterlist *src, size_t src_len
const u64 nonce,
const u8 key[CHACHA20POLY1305_KEY_SIZE]);
+bool chacha20poly1305_selftest(void);
+
#endif /* __CHACHA20POLY1305_H */
diff --git a/include/crypto/gf128mul.h b/include/crypto/gf128mul.h
index fa0a63d298dc..81330c6446f6 100644
--- a/include/crypto/gf128mul.h
+++ b/include/crypto/gf128mul.h
@@ -230,7 +230,7 @@ void gf128mul_4k_bbe(be128 *a, const struct gf128mul_4k *t);
void gf128mul_x8_ble(le128 *r, const le128 *x);
static inline void gf128mul_free_4k(struct gf128mul_4k *t)
{
- kzfree(t);
+ kfree_sensitive(t);
}
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index 4829d2367eda..0d1b403888c9 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -606,7 +606,7 @@ static inline struct ahash_request *ahash_request_alloc(
*/
static inline void ahash_request_free(struct ahash_request *req)
{
- kzfree(req);
+ kfree_sensitive(req);
}
static inline void ahash_request_zero(struct ahash_request *req)
@@ -687,7 +687,7 @@ static inline void ahash_request_set_crypt(struct ahash_request *req,
* The message digest API is able to maintain state information for the
* caller.
*
- * The synchronous message digest API can store user-related context in in its
+ * The synchronous message digest API can store user-related context in its
* shash_desc request data structure.
*/
diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h
index 088c1ded2714..ee6412314f8f 100644
--- a/include/crypto/if_alg.h
+++ b/include/crypto/if_alg.h
@@ -135,6 +135,7 @@ struct af_alg_async_req {
* SG?
* @enc: Cryptographic operation to be performed when
* recvmsg is invoked.
+ * @init: True if metadata has been sent.
* @len: Length of memory allocated for this data structure.
*/
struct af_alg_ctx {
@@ -151,6 +152,7 @@ struct af_alg_ctx {
bool more;
bool merge;
bool enc;
+ bool init;
unsigned int len;
};
@@ -226,7 +228,7 @@ unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes, size_t offset);
void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst,
size_t dst_offset);
void af_alg_wmem_wakeup(struct sock *sk);
-int af_alg_wait_for_data(struct sock *sk, unsigned flags);
+int af_alg_wait_for_data(struct sock *sk, unsigned flags, unsigned min);
int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
unsigned int ivsize);
ssize_t af_alg_sendpage(struct socket *sock, struct page *page,
diff --git a/include/crypto/internal/acompress.h b/include/crypto/internal/acompress.h
index cf478681b53e..cfc47e18820f 100644
--- a/include/crypto/internal/acompress.h
+++ b/include/crypto/internal/acompress.h
@@ -46,7 +46,7 @@ static inline struct acomp_req *__acomp_request_alloc(struct crypto_acomp *tfm)
static inline void __acomp_request_free(struct acomp_req *req)
{
- kzfree(req);
+ kfree_sensitive(req);
}
/**
diff --git a/include/crypto/internal/geniv.h b/include/crypto/internal/geniv.h
index 229d37681a9d..7fd7126f593a 100644
--- a/include/crypto/internal/geniv.h
+++ b/include/crypto/internal/geniv.h
@@ -20,7 +20,7 @@ struct aead_geniv_ctx {
};
struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl,
- struct rtattr **tb, u32 type, u32 mask);
+ struct rtattr **tb);
int aead_init_geniv(struct crypto_aead *tfm);
void aead_exit_geniv(struct crypto_aead *tfm);
diff --git a/include/crypto/kpp.h b/include/crypto/kpp.h
index cd9a9b500624..88b591215d5c 100644
--- a/include/crypto/kpp.h
+++ b/include/crypto/kpp.h
@@ -187,7 +187,7 @@ static inline struct kpp_request *kpp_request_alloc(struct crypto_kpp *tfm,
*/
static inline void kpp_request_free(struct kpp_request *req)
{
- kzfree(req);
+ kfree_sensitive(req);
}
/**
diff --git a/include/crypto/public_key.h b/include/crypto/public_key.h
index 0588ef3bc6ff..11f535cfb810 100644
--- a/include/crypto/public_key.h
+++ b/include/crypto/public_key.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/* Asymmetric public-key algorithm definitions
*
- * See Documentation/crypto/asymmetric-keys.txt
+ * See Documentation/crypto/asymmetric-keys.rst
*
* Copyright (C) 2012 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
diff --git a/include/crypto/sha.h b/include/crypto/sha.h
index 10753ff71d46..4ff3da816630 100644
--- a/include/crypto/sha.h
+++ b/include/crypto/sha.h
@@ -147,6 +147,7 @@ static inline void sha256_init(struct sha256_state *sctx)
}
void sha256_update(struct sha256_state *sctx, const u8 *data, unsigned int len);
void sha256_final(struct sha256_state *sctx, u8 *out);
+void sha256(const u8 *data, unsigned int len, u8 *out);
static inline void sha224_init(struct sha256_state *sctx)
{
diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h
index 141e7690f9c3..6a733b171a5d 100644
--- a/include/crypto/skcipher.h
+++ b/include/crypto/skcipher.h
@@ -18,7 +18,7 @@
* @iv: Initialisation Vector
* @src: Source SG list
* @dst: Destination SG list
- * @base: Underlying async request request
+ * @base: Underlying async request
* @__ctx: Start of private context data
*/
struct skcipher_request {
@@ -508,7 +508,7 @@ static inline struct skcipher_request *skcipher_request_alloc(
*/
static inline void skcipher_request_free(struct skcipher_request *req)
{
- kzfree(req);
+ kfree_sensitive(req);
}
static inline void skcipher_request_zero(struct skcipher_request *req)
diff --git a/include/drm/amd_asic_type.h b/include/drm/amd_asic_type.h
index b1230e33d506..8712e14991ed 100644
--- a/include/drm/amd_asic_type.h
+++ b/include/drm/amd_asic_type.h
@@ -54,6 +54,8 @@ enum amd_asic_type {
CHIP_NAVI10, /* 25 */
CHIP_NAVI14, /* 26 */
CHIP_NAVI12, /* 27 */
+ CHIP_SIENNA_CICHLID, /* 28 */
+ CHIP_NAVY_FLOUNDER, /* 29 */
CHIP_LAST,
};
diff --git a/include/drm/bridge/dw_hdmi.h b/include/drm/bridge/dw_hdmi.h
index 0b34a12c4a1c..ea34ca146b82 100644
--- a/include/drm/bridge/dw_hdmi.h
+++ b/include/drm/bridge/dw_hdmi.h
@@ -8,7 +8,7 @@
#include <sound/hdmi-codec.h>
-struct drm_connector;
+struct drm_display_info;
struct drm_display_mode;
struct drm_encoder;
struct dw_hdmi;
@@ -114,7 +114,8 @@ struct dw_hdmi_phy_config {
struct dw_hdmi_phy_ops {
int (*init)(struct dw_hdmi *hdmi, void *data,
- struct drm_display_mode *mode);
+ const struct drm_display_info *display,
+ const struct drm_display_mode *mode);
void (*disable)(struct dw_hdmi *hdmi, void *data);
enum drm_connector_status (*read_hpd)(struct dw_hdmi *hdmi, void *data);
void (*update_hpd)(struct dw_hdmi *hdmi, void *data,
@@ -124,13 +125,22 @@ struct dw_hdmi_phy_ops {
struct dw_hdmi_plat_data {
struct regmap *regm;
- enum drm_mode_status (*mode_valid)(struct drm_connector *connector,
- const struct drm_display_mode *mode);
- unsigned long input_bus_format;
+
unsigned long input_bus_encoding;
bool use_drm_infoframe;
bool ycbcr_420_allowed;
+ /*
+ * Private data passed to all the .mode_valid() and .configure_phy()
+ * callback functions.
+ */
+ void *priv_data;
+
+ /* Platform-specific mode validation (optional). */
+ enum drm_mode_status (*mode_valid)(struct dw_hdmi *hdmi, void *data,
+ const struct drm_display_info *info,
+ const struct drm_display_mode *mode);
+
/* Vendor PHY support */
const struct dw_hdmi_phy_ops *phy_ops;
const char *phy_name;
@@ -141,8 +151,7 @@ struct dw_hdmi_plat_data {
const struct dw_hdmi_mpll_config *mpll_cfg;
const struct dw_hdmi_curr_ctrl *cur_ctr;
const struct dw_hdmi_phy_config *phy_config;
- int (*configure_phy)(struct dw_hdmi *hdmi,
- const struct dw_hdmi_plat_data *pdata,
+ int (*configure_phy)(struct dw_hdmi *hdmi, void *data,
unsigned long mpixelclock);
};
@@ -166,7 +175,8 @@ void dw_hdmi_set_channel_status(struct dw_hdmi *hdmi, u8 *channel_status);
void dw_hdmi_set_channel_allocation(struct dw_hdmi *hdmi, unsigned int ca);
void dw_hdmi_audio_enable(struct dw_hdmi *hdmi);
void dw_hdmi_audio_disable(struct dw_hdmi *hdmi);
-void dw_hdmi_set_high_tmds_clock_ratio(struct dw_hdmi *hdmi);
+void dw_hdmi_set_high_tmds_clock_ratio(struct dw_hdmi *hdmi,
+ const struct drm_display_info *display);
/* PHY configuration */
void dw_hdmi_phy_i2c_set_addr(struct dw_hdmi *hdmi, u8 address);
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index 7b6cb4774e7d..d07c851d255b 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -103,7 +103,7 @@ struct drm_crtc_commit {
*
* Will be signalled when all hw register changes for this commit have
* been written out. Especially when disabling a pipe this can be much
- * later than than @flip_done, since that can signal already when the
+ * later than @flip_done, since that can signal already when the
* screen goes black, whereas to fully shut down a pipe more register
* I/O is required.
*
diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h
index ea2aa5ebae34..2195daa289d2 100644
--- a/include/drm/drm_bridge.h
+++ b/include/drm/drm_bridge.h
@@ -35,6 +35,7 @@
struct drm_bridge;
struct drm_bridge_timings;
struct drm_connector;
+struct drm_display_info;
struct drm_panel;
struct edid;
struct i2c_adapter;
@@ -112,6 +113,7 @@ struct drm_bridge_funcs {
* drm_mode_status Enum
*/
enum drm_mode_status (*mode_valid)(struct drm_bridge *bridge,
+ const struct drm_display_info *info,
const struct drm_display_mode *mode);
/**
@@ -473,7 +475,7 @@ struct drm_bridge_funcs {
* one of them should be provided.
*
* If drivers need to tweak &drm_bridge_state.input_bus_cfg.flags or
- * &drm_bridge_state.output_bus_cfg.flags it should should happen in
+ * &drm_bridge_state.output_bus_cfg.flags it should happen in
* this function. By default the &drm_bridge_state.output_bus_cfg.flags
* field is set to the next bridge
* &drm_bridge_state.input_bus_cfg.flags value or
@@ -836,6 +838,7 @@ bool drm_bridge_chain_mode_fixup(struct drm_bridge *bridge,
struct drm_display_mode *adjusted_mode);
enum drm_mode_status
drm_bridge_chain_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_info *info,
const struct drm_display_mode *mode);
void drm_bridge_chain_disable(struct drm_bridge *bridge);
void drm_bridge_chain_post_disable(struct drm_bridge *bridge);
diff --git a/include/drm/drm_client.h b/include/drm/drm_client.h
index eb259c2547af..7aaea665bfc2 100644
--- a/include/drm/drm_client.h
+++ b/include/drm/drm_client.h
@@ -154,6 +154,7 @@ struct drm_client_buffer {
struct drm_client_buffer *
drm_client_framebuffer_create(struct drm_client_dev *client, u32 width, u32 height, u32 format);
void drm_client_framebuffer_delete(struct drm_client_buffer *buffer);
+int drm_client_framebuffer_flush(struct drm_client_buffer *buffer, struct drm_rect *rect);
void *drm_client_buffer_vmap(struct drm_client_buffer *buffer);
void drm_client_buffer_vunmap(struct drm_client_buffer *buffer);
@@ -161,6 +162,7 @@ int drm_client_modeset_create(struct drm_client_dev *client);
void drm_client_modeset_free(struct drm_client_dev *client);
int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width, unsigned int height);
bool drm_client_rotation(struct drm_mode_set *modeset, unsigned int *rotation);
+int drm_client_modeset_check(struct drm_client_dev *client);
int drm_client_modeset_commit_locked(struct drm_client_dev *client);
int drm_client_modeset_commit(struct drm_client_dev *client);
int drm_client_modeset_dpms(struct drm_client_dev *client, int mode);
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index fd543d1db9b2..af145608b5ed 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -320,51 +320,97 @@ struct drm_monitor_range_info {
* opposite edge of the driving edge. Transmitters and receivers may however
* need to take other signal timings into account to convert between driving
* and sample edges.
- *
- * @DRM_BUS_FLAG_DE_LOW: The Data Enable signal is active low
- * @DRM_BUS_FLAG_DE_HIGH: The Data Enable signal is active high
- * @DRM_BUS_FLAG_PIXDATA_POSEDGE: Legacy value, do not use
- * @DRM_BUS_FLAG_PIXDATA_NEGEDGE: Legacy value, do not use
- * @DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE: Data is driven on the rising edge of
- * the pixel clock
- * @DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE: Data is driven on the falling edge of
- * the pixel clock
- * @DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE: Data is sampled on the rising edge of
- * the pixel clock
- * @DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE: Data is sampled on the falling edge of
- * the pixel clock
- * @DRM_BUS_FLAG_DATA_MSB_TO_LSB: Data is transmitted MSB to LSB on the bus
- * @DRM_BUS_FLAG_DATA_LSB_TO_MSB: Data is transmitted LSB to MSB on the bus
- * @DRM_BUS_FLAG_SYNC_POSEDGE: Legacy value, do not use
- * @DRM_BUS_FLAG_SYNC_NEGEDGE: Legacy value, do not use
- * @DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE: Sync signals are driven on the rising
- * edge of the pixel clock
- * @DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE: Sync signals are driven on the falling
- * edge of the pixel clock
- * @DRM_BUS_FLAG_SYNC_SAMPLE_POSEDGE: Sync signals are sampled on the rising
- * edge of the pixel clock
- * @DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE: Sync signals are sampled on the falling
- * edge of the pixel clock
- * @DRM_BUS_FLAG_SHARP_SIGNALS: Set if the Sharp-specific signals
- * (SPL, CLS, PS, REV) must be used
*/
enum drm_bus_flags {
+ /**
+ * @DRM_BUS_FLAG_DE_LOW:
+ *
+ * The Data Enable signal is active low
+ */
DRM_BUS_FLAG_DE_LOW = BIT(0),
+
+ /**
+ * @DRM_BUS_FLAG_DE_HIGH:
+ *
+ * The Data Enable signal is active high
+ */
DRM_BUS_FLAG_DE_HIGH = BIT(1),
- DRM_BUS_FLAG_PIXDATA_POSEDGE = BIT(2),
- DRM_BUS_FLAG_PIXDATA_NEGEDGE = BIT(3),
- DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE = DRM_BUS_FLAG_PIXDATA_POSEDGE,
- DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE = DRM_BUS_FLAG_PIXDATA_NEGEDGE,
- DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE = DRM_BUS_FLAG_PIXDATA_NEGEDGE,
- DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE = DRM_BUS_FLAG_PIXDATA_POSEDGE,
+
+ /**
+ * @DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE:
+ *
+ * Data is driven on the rising edge of the pixel clock
+ */
+ DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE = BIT(2),
+
+ /**
+ * @DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE:
+ *
+ * Data is driven on the falling edge of the pixel clock
+ */
+ DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE = BIT(3),
+
+ /**
+ * @DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE:
+ *
+ * Data is sampled on the rising edge of the pixel clock
+ */
+ DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE = DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE,
+
+ /**
+ * @DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE:
+ *
+ * Data is sampled on the falling edge of the pixel clock
+ */
+ DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
+
+ /**
+ * @DRM_BUS_FLAG_DATA_MSB_TO_LSB:
+ *
+ * Data is transmitted MSB to LSB on the bus
+ */
DRM_BUS_FLAG_DATA_MSB_TO_LSB = BIT(4),
+
+ /**
+ * @DRM_BUS_FLAG_DATA_LSB_TO_MSB:
+ *
+ * Data is transmitted LSB to MSB on the bus
+ */
DRM_BUS_FLAG_DATA_LSB_TO_MSB = BIT(5),
- DRM_BUS_FLAG_SYNC_POSEDGE = BIT(6),
- DRM_BUS_FLAG_SYNC_NEGEDGE = BIT(7),
- DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE = DRM_BUS_FLAG_SYNC_POSEDGE,
- DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE = DRM_BUS_FLAG_SYNC_NEGEDGE,
- DRM_BUS_FLAG_SYNC_SAMPLE_POSEDGE = DRM_BUS_FLAG_SYNC_NEGEDGE,
- DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE = DRM_BUS_FLAG_SYNC_POSEDGE,
+
+ /**
+ * @DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE:
+ *
+ * Sync signals are driven on the rising edge of the pixel clock
+ */
+ DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE = BIT(6),
+
+ /**
+ * @DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE:
+ *
+ * Sync signals are driven on the falling edge of the pixel clock
+ */
+ DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE = BIT(7),
+
+ /**
+ * @DRM_BUS_FLAG_SYNC_SAMPLE_POSEDGE:
+ *
+ * Sync signals are sampled on the rising edge of the pixel clock
+ */
+ DRM_BUS_FLAG_SYNC_SAMPLE_POSEDGE = DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE,
+
+ /**
+ * @DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE:
+ *
+ * Sync signals are sampled on the falling edge of the pixel clock
+ */
+ DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE = DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE,
+
+ /**
+ * @DRM_BUS_FLAG_SHARP_SIGNALS:
+ *
+ * Set if the Sharp-specific signals (SPL, CLS, PS, REV) must be used
+ */
DRM_BUS_FLAG_SHARP_SIGNALS = BIT(8),
};
@@ -1329,6 +1375,8 @@ struct drm_connector {
enum drm_connector_force force;
/** @override_edid: has the EDID been overwritten through debugfs for testing? */
bool override_edid;
+ /** @epoch_counter: used to detect any other changes in connector, besides status */
+ u64 epoch_counter;
/**
* @possible_encoders: Bit mask of encoders that can drive this
diff --git a/include/drm/drm_device.h b/include/drm/drm_device.h
index a55874db9dd4..0988351d743c 100644
--- a/include/drm/drm_device.h
+++ b/include/drm/drm_device.h
@@ -146,6 +146,9 @@ struct drm_device {
* @struct_mutex:
*
* Lock for others (not &drm_minor.master and &drm_file.is_master)
+ *
+ * WARNING:
+ * Only drivers annotated with DRIVER_LEGACY should be using this.
*/
struct mutex struct_mutex;
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 2035ac44afde..e47dc22ebf50 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -1457,6 +1457,14 @@ drm_dp_alternate_scrambler_reset_cap(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
DP_ALTERNATE_SCRAMBLER_RESET_CAP;
}
+/* Ignore MSA timing for Adaptive Sync support on DP 1.4 */
+static inline bool
+drm_dp_sink_can_do_video_without_timing_msa(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ return dpcd[DP_DOWN_STREAM_PORT_COUNT] &
+ DP_MSA_TIMING_PAR_IGNORED;
+}
+
/*
* DisplayPort AUX channel
*/
@@ -1701,7 +1709,7 @@ enum drm_dp_quirk {
/**
* drm_dp_has_quirk() - does the DP device have a specific quirk
- * @desc: Device decriptor filled by drm_dp_read_desc()
+ * @desc: Device descriptor filled by drm_dp_read_desc()
* @edid_quirks: Optional quirk bitmask filled by drm_dp_get_edid_quirks()
* @quirk: Quirk to query for
*
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
index 9e1ffcd7cb68..8b9eb4db3381 100644
--- a/include/drm/drm_dp_mst_helper.h
+++ b/include/drm/drm_dp_mst_helper.h
@@ -475,6 +475,15 @@ struct drm_dp_mst_topology_mgr;
struct drm_dp_mst_topology_cbs {
/* create a connector for a port */
struct drm_connector *(*add_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *path);
+ /*
+ * Checks for any pending MST interrupts, passing them to MST core for
+ * processing, the same way an HPD IRQ pulse handler would do this.
+ * If provided MST core calls this callback from a poll-waiting loop
+ * when waiting for MST down message replies. The driver is expected
+ * to guard against a race between this callback and the driver's HPD
+ * IRQ pulse handler.
+ */
+ void (*poll_hpd_irq)(struct drm_dp_mst_topology_mgr *mgr);
};
#define DP_MAX_PAYLOAD (sizeof(unsigned long) * 8)
@@ -672,6 +681,14 @@ struct drm_dp_mst_topology_mgr {
* @destroy_branch_device_list.
*/
struct mutex delayed_destroy_lock;
+
+ /**
+ * @delayed_destroy_wq: Workqueue used for delayed_destroy_work items.
+ * A dedicated WQ makes it possible to drain any requeued work items
+ * on it.
+ */
+ struct workqueue_struct *delayed_destroy_wq;
+
/**
* @delayed_destroy_work: Work item to destroy MST port and branch
* devices, needed to avoid locking inversion.
diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h
index 6d457652f199..7116abc1a04e 100644
--- a/include/drm/drm_drv.h
+++ b/include/drm/drm_drv.h
@@ -311,8 +311,8 @@ struct drm_driver {
*
* Called whenever the minor master is set. Only used by vmwgfx.
*/
- int (*master_set)(struct drm_device *dev, struct drm_file *file_priv,
- bool from_open);
+ void (*master_set)(struct drm_device *dev, struct drm_file *file_priv,
+ bool from_open);
/**
* @master_drop:
*
@@ -328,20 +328,10 @@ struct drm_driver {
void (*debugfs_init)(struct drm_minor *minor);
/**
- * @gem_free_object: deconstructor for drm_gem_objects
- *
- * This is deprecated and should not be used by new drivers. Use
- * &drm_gem_object_funcs.free instead.
- */
- void (*gem_free_object) (struct drm_gem_object *obj);
-
- /**
* @gem_free_object_unlocked: deconstructor for drm_gem_objects
*
* This is deprecated and should not be used by new drivers. Use
* &drm_gem_object_funcs.free instead.
- * Compared to @gem_free_object this is not encumbered with
- * &drm_device.struct_mutex legacy locking schemes.
*/
void (*gem_free_object_unlocked) (struct drm_gem_object *obj);
@@ -364,23 +354,6 @@ struct drm_driver {
void (*gem_close_object) (struct drm_gem_object *, struct drm_file *);
/**
- * @gem_print_info:
- *
- * This callback is deprecated in favour of
- * &drm_gem_object_funcs.print_info.
- *
- * If driver subclasses struct &drm_gem_object, it can implement this
- * optional hook for printing additional driver specific info.
- *
- * drm_printf_indent() should be used in the callback passing it the
- * indent argument.
- *
- * This callback is called from drm_gem_print_info().
- */
- void (*gem_print_info)(struct drm_printer *p, unsigned int indent,
- const struct drm_gem_object *obj);
-
- /**
* @gem_create_object: constructor for gem objects
*
* Hook for allocating the GEM object struct, for use by the CMA and
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index 34b15e3d070c..cfa4f5af49af 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -359,13 +359,22 @@ drm_load_edid_firmware(struct drm_connector *connector)
}
#endif
+/**
+ * drm_edid_are_equal - compare two edid blobs.
+ * @edid1: pointer to first blob
+ * @edid2: pointer to second blob
+ * This helper can be used during probing to determine if
+ * edid had changed.
+ */
+bool drm_edid_are_equal(const struct edid *edid1, const struct edid *edid2);
+
int
drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
- struct drm_connector *connector,
+ const struct drm_connector *connector,
const struct drm_display_mode *mode);
int
drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame,
- struct drm_connector *connector,
+ const struct drm_connector *connector,
const struct drm_display_mode *mode);
void
@@ -378,7 +387,7 @@ drm_hdmi_avi_infoframe_bars(struct hdmi_avi_infoframe *frame,
void
drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame,
- struct drm_connector *connector,
+ const struct drm_connector *connector,
const struct drm_display_mode *mode,
enum hdmi_quantization_range rgb_quant_range);
diff --git a/include/drm/drm_format_helper.h b/include/drm/drm_format_helper.h
index ac220aa1a245..5f9e37032468 100644
--- a/include/drm/drm_format_helper.h
+++ b/include/drm/drm_format_helper.h
@@ -14,8 +14,8 @@ void drm_fb_memcpy(void *dst, void *vaddr, struct drm_framebuffer *fb,
void drm_fb_memcpy_dstclip(void __iomem *dst, void *vaddr,
struct drm_framebuffer *fb,
struct drm_rect *clip);
-void drm_fb_swab16(u16 *dst, void *vaddr, struct drm_framebuffer *fb,
- struct drm_rect *clip);
+void drm_fb_swab(void *dst, void *src, struct drm_framebuffer *fb,
+ struct drm_rect *clip, bool cached);
void drm_fb_xrgb8888_to_rgb565(void *dst, void *vaddr,
struct drm_framebuffer *fb,
struct drm_rect *clip, bool swab);
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index 0b375069cd48..337a48321705 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -143,7 +143,7 @@ struct drm_gem_object_funcs {
/**
* @vunmap:
*
- * Releases the the address previously returned by @vmap. Used by the
+ * Releases the address previously returned by @vmap. Used by the
* drm_gem_dmabuf_vunmap() helper.
*
* This callback is optional.
@@ -157,7 +157,7 @@ struct drm_gem_object_funcs {
*
* This callback is optional.
*
- * The callback is used by by both drm_gem_mmap_obj() and
+ * The callback is used by both drm_gem_mmap_obj() and
* drm_gem_prime_mmap(). When @mmap is present @vm_ops is not
* used, the @mmap callback must set vma->vm_ops instead.
*/
@@ -187,8 +187,8 @@ struct drm_gem_object {
*
* Reference count of this object
*
- * Please use drm_gem_object_get() to acquire and drm_gem_object_put()
- * or drm_gem_object_put_unlocked() to release a reference to a GEM
+ * Please use drm_gem_object_get() to acquire and drm_gem_object_put_locked()
+ * or drm_gem_object_put() to release a reference to a GEM
* buffer object.
*/
struct kref refcount;
@@ -272,8 +272,9 @@ struct drm_gem_object {
* attachment point for the device. This is invariant over the lifetime
* of a gem object.
*
- * The &drm_driver.gem_free_object callback is responsible for cleaning
- * up the dma_buf attachment and references acquired at import time.
+ * The &drm_driver.gem_free_object_unlocked callback is responsible for
+ * cleaning up the dma_buf attachment and references acquired at import
+ * time.
*
* Note that the drm gem/prime core does not depend upon drivers setting
* this field any more. So for drivers where this doesn't make sense
@@ -362,29 +363,27 @@ static inline void drm_gem_object_get(struct drm_gem_object *obj)
kref_get(&obj->refcount);
}
+__attribute__((nonnull))
+static inline void
+__drm_gem_object_put(struct drm_gem_object *obj)
+{
+ kref_put(&obj->refcount, drm_gem_object_free);
+}
+
/**
- * __drm_gem_object_put - raw function to release a GEM buffer object reference
+ * drm_gem_object_put - drop a GEM buffer object reference
* @obj: GEM buffer object
*
- * This function is meant to be used by drivers which are not encumbered with
- * &drm_device.struct_mutex legacy locking and which are using the
- * gem_free_object_unlocked callback. It avoids all the locking checks and
- * locking overhead of drm_gem_object_put() and drm_gem_object_put_unlocked().
- *
- * Drivers should never call this directly in their code. Instead they should
- * wrap it up into a ``driver_gem_object_put(struct driver_gem_object *obj)``
- * wrapper function, and use that. Shared code should never call this, to
- * avoid breaking drivers by accident which still depend upon
- * &drm_device.struct_mutex locking.
+ * This releases a reference to @obj.
*/
static inline void
-__drm_gem_object_put(struct drm_gem_object *obj)
+drm_gem_object_put(struct drm_gem_object *obj)
{
- kref_put(&obj->refcount, drm_gem_object_free);
+ if (obj)
+ __drm_gem_object_put(obj);
}
-void drm_gem_object_put_unlocked(struct drm_gem_object *obj);
-void drm_gem_object_put(struct drm_gem_object *obj);
+void drm_gem_object_put_locked(struct drm_gem_object *obj);
int drm_gem_handle_create(struct drm_file *file_priv,
struct drm_gem_object *obj,
diff --git a/include/drm/drm_gem_cma_helper.h b/include/drm/drm_gem_cma_helper.h
index 947ac95eb24a..2bfa2502607a 100644
--- a/include/drm/drm_gem_cma_helper.h
+++ b/include/drm/drm_gem_cma_helper.h
@@ -107,24 +107,86 @@ void *drm_gem_cma_prime_vmap(struct drm_gem_object *obj);
void drm_gem_cma_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
struct drm_gem_object *
-drm_cma_gem_create_object_default_funcs(struct drm_device *dev, size_t size);
+drm_gem_cma_create_object_default_funcs(struct drm_device *dev, size_t size);
/**
- * DRM_GEM_CMA_VMAP_DRIVER_OPS - CMA GEM driver operations ensuring a virtual
- * address on the buffer
+ * DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE - CMA GEM driver operations
+ * @dumb_create_func: callback function for .dumb_create
+ *
+ * This macro provides a shortcut for setting the default GEM operations in the
+ * &drm_driver structure.
+ *
+ * This macro is a variant of DRM_GEM_CMA_DRIVER_OPS for drivers that
+ * override the default implementation of &struct rm_driver.dumb_create. Use
+ * DRM_GEM_CMA_DRIVER_OPS if possible. Drivers that require a virtual address
+ * on imported buffers should use
+ * DRM_GEM_CMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE() instead.
+ */
+#define DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(dumb_create_func) \
+ .gem_create_object = drm_gem_cma_create_object_default_funcs, \
+ .dumb_create = (dumb_create_func), \
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle, \
+ .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table, \
+ .gem_prime_mmap = drm_gem_cma_prime_mmap
+
+/**
+ * DRM_GEM_CMA_DRIVER_OPS - CMA GEM driver operations
+ *
+ * This macro provides a shortcut for setting the default GEM operations in the
+ * &drm_driver structure.
+ *
+ * Drivers that come with their own implementation of
+ * &struct drm_driver.dumb_create should use
+ * DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE() instead. Use
+ * DRM_GEM_CMA_DRIVER_OPS if possible. Drivers that require a virtual address
+ * on imported buffers should use DRM_GEM_CMA_DRIVER_OPS_VMAP instead.
+ */
+#define DRM_GEM_CMA_DRIVER_OPS \
+ DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(drm_gem_cma_dumb_create)
+
+/**
+ * DRM_GEM_CMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE - CMA GEM driver operations
+ * ensuring a virtual address
+ * on the buffer
+ * @dumb_create_func: callback function for .dumb_create
*
* This macro provides a shortcut for setting the default GEM operations in the
* &drm_driver structure for drivers that need the virtual address also on
* imported buffers.
+ *
+ * This macro is a variant of DRM_GEM_CMA_DRIVER_OPS_VMAP for drivers that
+ * override the default implementation of &struct drm_driver.dumb_create. Use
+ * DRM_GEM_CMA_DRIVER_OPS_VMAP if possible. Drivers that do not require a
+ * virtual address on imported buffers should use
+ * DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE() instead.
*/
-#define DRM_GEM_CMA_VMAP_DRIVER_OPS \
- .gem_create_object = drm_cma_gem_create_object_default_funcs, \
- .dumb_create = drm_gem_cma_dumb_create, \
+#define DRM_GEM_CMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE(dumb_create_func) \
+ .gem_create_object = drm_gem_cma_create_object_default_funcs, \
+ .dumb_create = dumb_create_func, \
.prime_handle_to_fd = drm_gem_prime_handle_to_fd, \
.prime_fd_to_handle = drm_gem_prime_fd_to_handle, \
.gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table_vmap, \
.gem_prime_mmap = drm_gem_prime_mmap
+/**
+ * DRM_GEM_CMA_DRIVER_OPS_VMAP - CMA GEM driver operations ensuring a virtual
+ * address on the buffer
+ *
+ * This macro provides a shortcut for setting the default GEM operations in the
+ * &drm_driver structure for drivers that need the virtual address also on
+ * imported buffers.
+ *
+ * Drivers that come with their own implementation of
+ * &struct drm_driver.dumb_create should use
+ * DRM_GEM_CMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE() instead. Use
+ * DRM_GEM_CMA_DRIVER_OPS_VMAP if possible. Drivers that do not require a
+ * virtual address on imported buffers should use DRM_GEM_CMA_DRIVER_OPS
+ * instead.
+ */
+#define DRM_GEM_CMA_DRIVER_OPS_VMAP \
+ DRM_GEM_CMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE(drm_gem_cma_dumb_create)
+
struct drm_gem_object *
drm_gem_cma_prime_import_sg_table_vmap(struct drm_device *drm,
struct dma_buf_attachment *attach,
diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h
index 294b2931c4cc..5381f0c8cf6f 100644
--- a/include/drm/drm_gem_shmem_helper.h
+++ b/include/drm/drm_gem_shmem_helper.h
@@ -132,6 +132,10 @@ struct drm_gem_shmem_object *
drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
struct drm_device *dev, size_t size,
uint32_t *handle);
+
+struct drm_gem_object *
+drm_gem_shmem_create_object_cached(struct drm_device *dev, size_t size);
+
int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args);
diff --git a/include/drm/drm_gem_vram_helper.h b/include/drm/drm_gem_vram_helper.h
index b63bcd1b996d..035332f3723f 100644
--- a/include/drm/drm_gem_vram_helper.h
+++ b/include/drm/drm_gem_vram_helper.h
@@ -206,6 +206,9 @@ struct drm_vram_mm *drm_vram_helper_alloc_mm(
struct drm_device *dev, uint64_t vram_base, size_t vram_size);
void drm_vram_helper_release_mm(struct drm_device *dev);
+int drmm_vram_helper_init(struct drm_device *dev, uint64_t vram_base,
+ size_t vram_size);
+
/*
* Mode-config helpers
*/
diff --git a/include/drm/drm_mipi_dbi.h b/include/drm/drm_mipi_dbi.h
index 4d0e49c0ed2c..c2827ceaba0d 100644
--- a/include/drm/drm_mipi_dbi.h
+++ b/include/drm/drm_mipi_dbi.h
@@ -95,11 +95,6 @@ struct mipi_dbi_dev {
struct drm_display_mode mode;
/**
- * @enabled: Pipeline is enabled
- */
- bool enabled;
-
- /**
* @tx_buf: Buffer used for transfer (copy clip rect area)
*/
u16 *tx_buf;
diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h
index e73dea5c7333..c2d3d71d133c 100644
--- a/include/drm/drm_mode_config.h
+++ b/include/drm/drm_mode_config.h
@@ -603,22 +603,22 @@ struct drm_mode_config {
struct drm_property *prop_src_h;
/**
* @prop_crtc_x: Default atomic plane property for the plane destination
- * position in the &drm_crtc is is being shown on.
+ * position in the &drm_crtc is being shown on.
*/
struct drm_property *prop_crtc_x;
/**
* @prop_crtc_y: Default atomic plane property for the plane destination
- * position in the &drm_crtc is is being shown on.
+ * position in the &drm_crtc is being shown on.
*/
struct drm_property *prop_crtc_y;
/**
* @prop_crtc_w: Default atomic plane property for the plane destination
- * position in the &drm_crtc is is being shown on.
+ * position in the &drm_crtc is being shown on.
*/
struct drm_property *prop_crtc_w;
/**
* @prop_crtc_h: Default atomic plane property for the plane destination
- * position in the &drm_crtc is is being shown on.
+ * position in the &drm_crtc is being shown on.
*/
struct drm_property *prop_crtc_h;
/**
diff --git a/include/drm/drm_modes.h b/include/drm/drm_modes.h
index 303ee5fbbdd8..eee3c9de6c4f 100644
--- a/include/drm/drm_modes.h
+++ b/include/drm/drm_modes.h
@@ -223,71 +223,21 @@ enum drm_mode_status {
*/
struct drm_display_mode {
/**
- * @head:
- *
- * struct list_head for mode lists.
- */
- struct list_head head;
-
- /**
- * @name:
- *
- * Human-readable name of the mode, filled out with drm_mode_set_name().
- */
- char name[DRM_DISPLAY_MODE_LEN];
-
- /**
- * @status:
- *
- * Status of the mode, used to filter out modes not supported by the
- * hardware. See enum &drm_mode_status.
- */
- enum drm_mode_status status;
-
- /**
- * @type:
- *
- * A bitmask of flags, mostly about the source of a mode. Possible flags
- * are:
- *
- * - DRM_MODE_TYPE_PREFERRED: Preferred mode, usually the native
- * resolution of an LCD panel. There should only be one preferred
- * mode per connector at any given time.
- * - DRM_MODE_TYPE_DRIVER: Mode created by the driver, which is all of
- * them really. Drivers must set this bit for all modes they create
- * and expose to userspace.
- * - DRM_MODE_TYPE_USERDEF: Mode defined via kernel command line
- *
- * Plus a big list of flags which shouldn't be used at all, but are
- * still around since these flags are also used in the userspace ABI.
- * We no longer accept modes with these types though:
- *
- * - DRM_MODE_TYPE_BUILTIN: Meant for hard-coded modes, unused.
- * Use DRM_MODE_TYPE_DRIVER instead.
- * - DRM_MODE_TYPE_DEFAULT: Again a leftover, use
- * DRM_MODE_TYPE_PREFERRED instead.
- * - DRM_MODE_TYPE_CLOCK_C and DRM_MODE_TYPE_CRTC_C: Define leftovers
- * which are stuck around for hysterical raisins only. No one has an
- * idea what they were meant for. Don't use.
- */
- unsigned int type;
-
- /**
* @clock:
*
* Pixel clock in kHz.
*/
int clock; /* in kHz */
- int hdisplay;
- int hsync_start;
- int hsync_end;
- int htotal;
- int hskew;
- int vdisplay;
- int vsync_start;
- int vsync_end;
- int vtotal;
- int vscan;
+ u16 hdisplay;
+ u16 hsync_start;
+ u16 hsync_end;
+ u16 htotal;
+ u16 hskew;
+ u16 vdisplay;
+ u16 vsync_start;
+ u16 vsync_end;
+ u16 vtotal;
+ u16 vscan;
/**
* @flags:
*
@@ -322,7 +272,37 @@ struct drm_display_mode {
* - DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF: frame split into left and
* right parts.
*/
- unsigned int flags;
+ u32 flags;
+
+ /**
+ * @crtc_clock:
+ *
+ * Actual pixel or dot clock in the hardware. This differs from the
+ * logical @clock when e.g. using interlacing, double-clocking, stereo
+ * modes or other fancy stuff that changes the timings and signals
+ * actually sent over the wire.
+ *
+ * This is again in kHz.
+ *
+ * Note that with digital outputs like HDMI or DP there's usually a
+ * massive confusion between the dot clock and the signal clock at the
+ * bit encoding level. Especially when a 8b/10b encoding is used and the
+ * difference is exactly a factor of 10.
+ */
+ int crtc_clock;
+ u16 crtc_hdisplay;
+ u16 crtc_hblank_start;
+ u16 crtc_hblank_end;
+ u16 crtc_hsync_start;
+ u16 crtc_hsync_end;
+ u16 crtc_htotal;
+ u16 crtc_hskew;
+ u16 crtc_vdisplay;
+ u16 crtc_vblank_start;
+ u16 crtc_vblank_end;
+ u16 crtc_vsync_start;
+ u16 crtc_vsync_end;
+ u16 crtc_vtotal;
/**
* @width_mm:
@@ -330,7 +310,7 @@ struct drm_display_mode {
* Addressable size of the output in mm, projectors should set this to
* 0.
*/
- int width_mm;
+ u16 width_mm;
/**
* @height_mm:
@@ -338,37 +318,36 @@ struct drm_display_mode {
* Addressable size of the output in mm, projectors should set this to
* 0.
*/
- int height_mm;
+ u16 height_mm;
/**
- * @crtc_clock:
+ * @type:
*
- * Actual pixel or dot clock in the hardware. This differs from the
- * logical @clock when e.g. using interlacing, double-clocking, stereo
- * modes or other fancy stuff that changes the timings and signals
- * actually sent over the wire.
+ * A bitmask of flags, mostly about the source of a mode. Possible flags
+ * are:
*
- * This is again in kHz.
+ * - DRM_MODE_TYPE_PREFERRED: Preferred mode, usually the native
+ * resolution of an LCD panel. There should only be one preferred
+ * mode per connector at any given time.
+ * - DRM_MODE_TYPE_DRIVER: Mode created by the driver, which is all of
+ * them really. Drivers must set this bit for all modes they create
+ * and expose to userspace.
+ * - DRM_MODE_TYPE_USERDEF: Mode defined or selected via the kernel
+ * command line.
*
- * Note that with digital outputs like HDMI or DP there's usually a
- * massive confusion between the dot clock and the signal clock at the
- * bit encoding level. Especially when a 8b/10b encoding is used and the
- * difference is exactly a factor of 10.
+ * Plus a big list of flags which shouldn't be used at all, but are
+ * still around since these flags are also used in the userspace ABI.
+ * We no longer accept modes with these types though:
+ *
+ * - DRM_MODE_TYPE_BUILTIN: Meant for hard-coded modes, unused.
+ * Use DRM_MODE_TYPE_DRIVER instead.
+ * - DRM_MODE_TYPE_DEFAULT: Again a leftover, use
+ * DRM_MODE_TYPE_PREFERRED instead.
+ * - DRM_MODE_TYPE_CLOCK_C and DRM_MODE_TYPE_CRTC_C: Define leftovers
+ * which are stuck around for hysterical raisins only. No one has an
+ * idea what they were meant for. Don't use.
*/
- int crtc_clock;
- int crtc_hdisplay;
- int crtc_hblank_start;
- int crtc_hblank_end;
- int crtc_hsync_start;
- int crtc_hsync_end;
- int crtc_htotal;
- int crtc_hskew;
- int crtc_vdisplay;
- int crtc_vblank_start;
- int crtc_vblank_end;
- int crtc_vsync_start;
- int crtc_vsync_end;
- int crtc_vtotal;
+ u8 type;
/**
* @private_flags:
@@ -381,21 +360,11 @@ struct drm_display_mode {
int private_flags;
/**
- * @vrefresh:
- *
- * Vertical refresh rate, for debug output in human readable form. Not
- * used in a functional way.
- *
- * This value is in Hz.
- */
- int vrefresh;
-
- /**
- * @picture_aspect_ratio:
+ * @head:
*
- * Field for setting the HDMI picture aspect ratio of a mode.
+ * struct list_head for mode lists.
*/
- enum hdmi_picture_aspect picture_aspect_ratio;
+ struct list_head head;
/**
* @export_head:
@@ -409,6 +378,29 @@ struct drm_display_mode {
* avoid overhead of protecting it by mode_config.mutex.
*/
struct list_head export_head;
+
+ /**
+ * @name:
+ *
+ * Human-readable name of the mode, filled out with drm_mode_set_name().
+ */
+ char name[DRM_DISPLAY_MODE_LEN];
+
+ /**
+ * @status:
+ *
+ * Status of the mode, used to filter out modes not supported by the
+ * hardware. See enum &drm_mode_status.
+ */
+ enum drm_mode_status status;
+
+ /**
+ * @picture_aspect_ratio:
+ *
+ * Field for setting the HDMI picture aspect ratio of a mode.
+ */
+ enum hdmi_picture_aspect picture_aspect_ratio;
+
};
/**
@@ -421,7 +413,7 @@ struct drm_display_mode {
* @m: display mode
*/
#define DRM_MODE_ARG(m) \
- (m)->name, (m)->vrefresh, (m)->clock, \
+ (m)->name, drm_mode_vrefresh(m), (m)->clock, \
(m)->hdisplay, (m)->hsync_start, (m)->hsync_end, (m)->htotal, \
(m)->vdisplay, (m)->vsync_start, (m)->vsync_end, (m)->vtotal, \
(m)->type, (m)->flags
diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h
index 421a30f08463..4efec30f8bad 100644
--- a/include/drm/drm_modeset_helper_vtables.h
+++ b/include/drm/drm_modeset_helper_vtables.h
@@ -968,6 +968,48 @@ struct drm_connector_helper_funcs {
*/
enum drm_mode_status (*mode_valid)(struct drm_connector *connector,
struct drm_display_mode *mode);
+
+ /**
+ * @mode_valid_ctx:
+ *
+ * Callback to validate a mode for a connector, irrespective of the
+ * specific display configuration.
+ *
+ * This callback is used by the probe helpers to filter the mode list
+ * (which is usually derived from the EDID data block from the sink).
+ * See e.g. drm_helper_probe_single_connector_modes().
+ *
+ * This function is optional, and is the atomic version of
+ * &drm_connector_helper_funcs.mode_valid.
+ *
+ * To allow for accessing the atomic state of modesetting objects, the
+ * helper libraries always call this with ctx set to a valid context,
+ * and &drm_mode_config.connection_mutex will always be locked with
+ * the ctx parameter set to @ctx. This allows for taking additional
+ * locks as required.
+ *
+ * Even though additional locks may be acquired, this callback is
+ * still expected not to take any constraints into account which would
+ * be influenced by the currently set display state - such constraints
+ * should be handled in the driver's atomic check. For example, if a
+ * connector shares display bandwidth with other connectors then it
+ * would be ok to validate the minimum bandwidth requirement of a mode
+ * against the maximum possible bandwidth of the connector. But it
+ * wouldn't be ok to take the current bandwidth usage of other
+ * connectors into account, as this would change depending on the
+ * display state.
+ *
+ * Returns:
+ * 0 if &drm_connector_helper_funcs.mode_valid_ctx succeeded and wrote
+ * the &enum drm_mode_status value to @status, or a negative error
+ * code otherwise.
+ *
+ */
+ int (*mode_valid_ctx)(struct drm_connector *connector,
+ struct drm_display_mode *mode,
+ struct drm_modeset_acquire_ctx *ctx,
+ enum drm_mode_status *status);
+
/**
* @best_encoder:
*
diff --git a/include/drm/drm_rect.h b/include/drm/drm_rect.h
index 57a3be9e53e4..e7f4d24cdd00 100644
--- a/include/drm/drm_rect.h
+++ b/include/drm/drm_rect.h
@@ -180,7 +180,7 @@ static inline int drm_rect_height(const struct drm_rect *r)
}
/**
- * drm_rect_visible - determine if the the rectangle is visible
+ * drm_rect_visible - determine if the rectangle is visible
* @r: rectangle whose visibility is returned
*
* RETURNS:
diff --git a/include/drm/drm_vblank.h b/include/drm/drm_vblank.h
index dd9f5b9e56e4..dd125f8c766c 100644
--- a/include/drm/drm_vblank.h
+++ b/include/drm/drm_vblank.h
@@ -27,12 +27,14 @@
#include <linux/seqlock.h>
#include <linux/idr.h>
#include <linux/poll.h>
+#include <linux/kthread.h>
#include <drm/drm_file.h>
#include <drm/drm_modes.h>
struct drm_device;
struct drm_crtc;
+struct drm_vblank_work;
/**
* struct drm_pending_vblank_event - pending vblank event tracking
@@ -203,6 +205,24 @@ struct drm_vblank_crtc {
* disabling functions multiple times.
*/
bool enabled;
+
+ /**
+ * @worker: The &kthread_worker used for executing vblank works.
+ */
+ struct kthread_worker *worker;
+
+ /**
+ * @pending_work: A list of scheduled &drm_vblank_work items that are
+ * waiting for a future vblank.
+ */
+ struct list_head pending_work;
+
+ /**
+ * @work_wait_queue: The wait queue used for signaling that a
+ * &drm_vblank_work item has either finished executing, or was
+ * cancelled.
+ */
+ wait_queue_head_t work_wait_queue;
};
int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs);
diff --git a/include/drm/drm_vblank_work.h b/include/drm/drm_vblank_work.h
new file mode 100644
index 000000000000..eb41d0810c4f
--- /dev/null
+++ b/include/drm/drm_vblank_work.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: MIT */
+
+#ifndef _DRM_VBLANK_WORK_H_
+#define _DRM_VBLANK_WORK_H_
+
+#include <linux/kthread.h>
+
+struct drm_crtc;
+
+/**
+ * struct drm_vblank_work - A delayed work item which delays until a target
+ * vblank passes, and then executes at realtime priority outside of IRQ
+ * context.
+ *
+ * See also:
+ * drm_vblank_work_schedule()
+ * drm_vblank_work_init()
+ * drm_vblank_work_cancel_sync()
+ * drm_vblank_work_flush()
+ */
+struct drm_vblank_work {
+ /**
+ * @base: The base &kthread_work item which will be executed by
+ * &drm_vblank_crtc.worker. Drivers should not interact with this
+ * directly, and instead rely on drm_vblank_work_init() to initialize
+ * this.
+ */
+ struct kthread_work base;
+
+ /**
+ * @vblank: A pointer to &drm_vblank_crtc this work item belongs to.
+ */
+ struct drm_vblank_crtc *vblank;
+
+ /**
+ * @count: The target vblank this work will execute on. Drivers should
+ * not modify this value directly, and instead use
+ * drm_vblank_work_schedule()
+ */
+ u64 count;
+
+ /**
+ * @cancelling: The number of drm_vblank_work_cancel_sync() calls that
+ * are currently running. A work item cannot be rescheduled until all
+ * calls have finished.
+ */
+ int cancelling;
+
+ /**
+ * @node: The position of this work item in
+ * &drm_vblank_crtc.pending_work.
+ */
+ struct list_head node;
+};
+
+/**
+ * to_drm_vblank_work - Retrieve the respective &drm_vblank_work item from a
+ * &kthread_work
+ * @_work: The &kthread_work embedded inside a &drm_vblank_work
+ */
+#define to_drm_vblank_work(_work) \
+ container_of((_work), struct drm_vblank_work, base)
+
+int drm_vblank_work_schedule(struct drm_vblank_work *work,
+ u64 count, bool nextonmiss);
+void drm_vblank_work_init(struct drm_vblank_work *work, struct drm_crtc *crtc,
+ void (*func)(struct kthread_work *work));
+bool drm_vblank_work_cancel_sync(struct drm_vblank_work *work);
+void drm_vblank_work_flush(struct drm_vblank_work *work);
+
+#endif /* !_DRM_VBLANK_WORK_H_ */
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index a21b3b92135a..b9780ae9dd26 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -263,7 +263,7 @@ struct drm_sched_backend_ops {
* @job_list_lock: lock to protect the ring_mirror_list.
* @hang_limit: once the hangs by a job crosses this limit then it is marked
* guilty and it will be considered for scheduling further.
- * @num_jobs: the number of jobs in queue in the scheduler
+ * @score: score to help loadbalancer pick a idle sched
* @ready: marks if the underlying HW is ready to work
* @free_guilty: A hit to time out handler to free the guilty job.
*
@@ -284,8 +284,8 @@ struct drm_gpu_scheduler {
struct list_head ring_mirror_list;
spinlock_t job_list_lock;
int hang_limit;
- atomic_t num_jobs;
- bool ready;
+ atomic_t score;
+ bool ready;
bool free_guilty;
};
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index 662d8351c87a..96e408b4bdc9 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -588,7 +588,11 @@
INTEL_VGA_DEVICE(0x4551, info), \
INTEL_VGA_DEVICE(0x4541, info), \
INTEL_VGA_DEVICE(0x4E71, info), \
+ INTEL_VGA_DEVICE(0x4557, info), \
+ INTEL_VGA_DEVICE(0x4555, info), \
INTEL_VGA_DEVICE(0x4E61, info), \
+ INTEL_VGA_DEVICE(0x4E57, info), \
+ INTEL_VGA_DEVICE(0x4E55, info), \
INTEL_VGA_DEVICE(0x4E51, info)
/* TGL */
@@ -605,4 +609,17 @@
INTEL_VGA_DEVICE(0x9AD9, info), \
INTEL_VGA_DEVICE(0x9AF8, info)
+/* RKL */
+#define INTEL_RKL_IDS(info) \
+ INTEL_VGA_DEVICE(0x4C80, info), \
+ INTEL_VGA_DEVICE(0x4C8A, info), \
+ INTEL_VGA_DEVICE(0x4C8B, info), \
+ INTEL_VGA_DEVICE(0x4C8C, info), \
+ INTEL_VGA_DEVICE(0x4C90, info), \
+ INTEL_VGA_DEVICE(0x4C9A, info)
+
+/* DG1 */
+#define INTEL_DG1_IDS(info) \
+ INTEL_VGA_DEVICE(0x4905, info)
+
#endif /* _I915_PCIIDS_H */
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index de1ccdcd5703..a9e13b252820 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -213,8 +213,6 @@ struct ttm_buffer_object {
* either of these locks held.
*/
- uint64_t offset; /* GPU address space is independent of CPU word size */
-
struct sg_table *sg;
};
@@ -694,7 +692,7 @@ ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
int ttm_bo_swapout(struct ttm_bo_global *glob,
struct ttm_operation_ctx *ctx);
-void ttm_bo_swapout_all(struct ttm_bo_device *bdev);
+void ttm_bo_swapout_all(void);
/**
* ttm_bo_uses_embedded_gem_object - check if the given bo uses the
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 54a527aa79cc..5a37f1cc057e 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -47,7 +47,6 @@
#define TTM_MEMTYPE_FLAG_FIXED (1 << 0) /* Fixed (on-card) PCI memory */
#define TTM_MEMTYPE_FLAG_MAPPABLE (1 << 1) /* Memory mappable */
-#define TTM_MEMTYPE_FLAG_CMA (1 << 3) /* Can't map aperture */
struct ttm_mem_type_manager;
@@ -155,7 +154,6 @@ struct ttm_mem_type_manager_func {
* @use_io_reserve_lru: Use an lru list to try to unreserve io_mem_regions
* reserved by the TTM vm system.
* @io_reserve_lru: Optional lru list for unreserving io mem regions.
- * @io_reserve_fastpath: Only use bdev::driver::io_mem_reserve to obtain
* @move_lock: lock for move fence
* static information. bdev::driver::io_mem_free is never used.
* @lru: The lru list for this memory type.
@@ -177,7 +175,6 @@ struct ttm_mem_type_manager {
bool has_type;
bool use_type;
uint32_t flags;
- uint64_t gpu_offset; /* GPU address space is independent of CPU word size */
uint64_t size;
uint32_t available_caching;
uint32_t default_caching;
@@ -185,7 +182,6 @@ struct ttm_mem_type_manager {
void *priv;
struct mutex io_reserve_mutex;
bool use_io_reserve_lru;
- bool io_reserve_fastpath;
spinlock_t move_lock;
/*
@@ -530,17 +526,6 @@ ttm_flag_masked(uint32_t *old, uint32_t new, uint32_t mask)
*/
/**
- * ttm_mem_reg_is_pci
- *
- * @bdev: Pointer to a struct ttm_bo_device.
- * @mem: A valid struct ttm_mem_reg.
- *
- * Returns true if the memory described by @mem is PCI memory,
- * false otherwise.
- */
-bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem);
-
-/**
* ttm_bo_mem_space
*
* @bo: Pointer to a struct ttm_buffer_object. the data of which
@@ -565,8 +550,6 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
struct ttm_operation_ctx *ctx);
void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem);
-void ttm_bo_mem_put_locked(struct ttm_buffer_object *bo,
- struct ttm_mem_reg *mem);
int ttm_bo_device_release(struct ttm_bo_device *bdev);
diff --git a/include/drm/ttm/ttm_tt.h b/include/drm/ttm/ttm_tt.h
index c0e928abf592..5e2393fe42c6 100644
--- a/include/drm/ttm/ttm_tt.h
+++ b/include/drm/ttm/ttm_tt.h
@@ -70,7 +70,7 @@ struct ttm_backend_func {
* Unbind previously bound backend pages. This function should be
* able to handle differences between aperture and system page sizes.
*/
- int (*unbind) (struct ttm_tt *ttm);
+ void (*unbind) (struct ttm_tt *ttm);
/**
* struct ttm_backend_func member destroy
diff --git a/include/dt-bindings/clk/versaclock.h b/include/dt-bindings/clk/versaclock.h
new file mode 100644
index 000000000000..c6a6a0946564
--- /dev/null
+++ b/include/dt-bindings/clk/versaclock.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* This file defines field values used by the versaclock 6 family
+ * for defining output type
+ */
+
+#define VC5_LVPECL 0
+#define VC5_CMOS 1
+#define VC5_HCSL33 2
+#define VC5_LVDS 3
+#define VC5_CMOS2 4
+#define VC5_CMOSD 5
+#define VC5_HCSL25 6
diff --git a/include/dt-bindings/clock/actions,s500-cmu.h b/include/dt-bindings/clock/actions,s500-cmu.h
index 030981cd2d56..a250a52a6192 100644
--- a/include/dt-bindings/clock/actions,s500-cmu.h
+++ b/include/dt-bindings/clock/actions,s500-cmu.h
@@ -72,7 +72,12 @@
#define CLK_NAND 52
#define CLK_ECC 53
#define CLK_RMII_REF 54
+#define CLK_GPIO 55
-#define CLK_NR_CLKS (CLK_RMII_REF + 1)
+/* system clock (part 2) */
+#define CLK_APB 56
+#define CLK_DMAC 57
+
+#define CLK_NR_CLKS (CLK_DMAC + 1)
#endif /* __DT_BINDINGS_CLOCK_S500_CMU_H */
diff --git a/include/dt-bindings/clock/agilex-clock.h b/include/dt-bindings/clock/agilex-clock.h
index f19cf8ccbdd2..06feca07e08e 100644
--- a/include/dt-bindings/clock/agilex-clock.h
+++ b/include/dt-bindings/clock/agilex-clock.h
@@ -65,6 +65,8 @@
#define AGILEX_SDMMC_CLK 50
#define AGILEX_SPI_M_CLK 51
#define AGILEX_USB_CLK 52
-#define AGILEX_NUM_CLKS 53
+#define AGILEX_NAND_X_CLK 53
+#define AGILEX_NAND_ECC_CLK 54
+#define AGILEX_NUM_CLKS 55
#endif /* __AGILEX_CLOCK_H */
diff --git a/include/dt-bindings/clock/bcm3368-clock.h b/include/dt-bindings/clock/bcm3368-clock.h
new file mode 100644
index 000000000000..74a7382f77b8
--- /dev/null
+++ b/include/dt-bindings/clock/bcm3368-clock.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef __DT_BINDINGS_CLOCK_BCM3368_H
+#define __DT_BINDINGS_CLOCK_BCM3368_H
+
+#define BCM3368_CLK_MAC 3
+#define BCM3368_CLK_TC 5
+#define BCM3368_CLK_US_TOP 6
+#define BCM3368_CLK_DS_TOP 7
+#define BCM3368_CLK_ACM 8
+#define BCM3368_CLK_SPI 9
+#define BCM3368_CLK_USBS 10
+#define BCM3368_CLK_BMU 11
+#define BCM3368_CLK_PCM 12
+#define BCM3368_CLK_NTP 13
+#define BCM3368_CLK_ACP_B 14
+#define BCM3368_CLK_ACP_A 15
+#define BCM3368_CLK_EMUSB 17
+#define BCM3368_CLK_ENET0 18
+#define BCM3368_CLK_ENET1 19
+#define BCM3368_CLK_USBSU 20
+#define BCM3368_CLK_EPHY 21
+
+#endif /* __DT_BINDINGS_CLOCK_BCM3368_H */
diff --git a/include/dt-bindings/clock/bcm6318-clock.h b/include/dt-bindings/clock/bcm6318-clock.h
new file mode 100644
index 000000000000..c4417f8983ab
--- /dev/null
+++ b/include/dt-bindings/clock/bcm6318-clock.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef __DT_BINDINGS_CLOCK_BCM6318_H
+#define __DT_BINDINGS_CLOCK_BCM6318_H
+
+#define BCM6318_CLK_ADSL_ASB 0
+#define BCM6318_CLK_USB_ASB 1
+#define BCM6318_CLK_MIPS_ASB 2
+#define BCM6318_CLK_PCIE_ASB 3
+#define BCM6318_CLK_PHYMIPS_ASB 4
+#define BCM6318_CLK_ROBOSW_ASB 5
+#define BCM6318_CLK_SAR_ASB 6
+#define BCM6318_CLK_SDR_ASB 7
+#define BCM6318_CLK_SWREG_ASB 8
+#define BCM6318_CLK_PERIPH_ASB 9
+#define BCM6318_CLK_CPUBUS160 10
+#define BCM6318_CLK_ADSL 11
+#define BCM6318_CLK_SAR125 12
+#define BCM6318_CLK_MIPS 13
+#define BCM6318_CLK_PCIE 14
+#define BCM6318_CLK_ROBOSW250 16
+#define BCM6318_CLK_ROBOSW025 17
+#define BCM6318_CLK_SDR 19
+#define BCM6318_CLK_USBD 20
+#define BCM6318_CLK_HSSPI 25
+#define BCM6318_CLK_PCIE25 27
+#define BCM6318_CLK_PHYMIPS 28
+#define BCM6318_CLK_AFE 29
+#define BCM6318_CLK_QPROC 30
+
+#define BCM6318_UCLK_ADSL 0
+#define BCM6318_UCLK_ARB 1
+#define BCM6318_UCLK_MIPS 2
+#define BCM6318_UCLK_PCIE 3
+#define BCM6318_UCLK_PERIPH 4
+#define BCM6318_UCLK_PHYMIPS 5
+#define BCM6318_UCLK_ROBOSW 6
+#define BCM6318_UCLK_SAR 7
+#define BCM6318_UCLK_SDR 8
+#define BCM6318_UCLK_USB 9
+
+#endif /* __DT_BINDINGS_CLOCK_BCM6318_H */
diff --git a/include/dt-bindings/clock/bcm63268-clock.h b/include/dt-bindings/clock/bcm63268-clock.h
new file mode 100644
index 000000000000..da23e691d359
--- /dev/null
+++ b/include/dt-bindings/clock/bcm63268-clock.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef __DT_BINDINGS_CLOCK_BCM63268_H
+#define __DT_BINDINGS_CLOCK_BCM63268_H
+
+#define BCM63268_CLK_DIS_GLESS 0
+#define BCM63268_CLK_VDSL_QPROC 1
+#define BCM63268_CLK_VDSL_AFE 2
+#define BCM63268_CLK_VDSL 3
+#define BCM63268_CLK_MIPS 4
+#define BCM63268_CLK_WLAN_OCP 5
+#define BCM63268_CLK_DECT 6
+#define BCM63268_CLK_FAP0 7
+#define BCM63268_CLK_FAP1 8
+#define BCM63268_CLK_SAR 9
+#define BCM63268_CLK_ROBOSW 10
+#define BCM63268_CLK_PCM 11
+#define BCM63268_CLK_USBD 12
+#define BCM63268_CLK_USBH 13
+#define BCM63268_CLK_IPSEC 14
+#define BCM63268_CLK_SPI 15
+#define BCM63268_CLK_HSSPI 16
+#define BCM63268_CLK_PCIE 17
+#define BCM63268_CLK_PHYMIPS 18
+#define BCM63268_CLK_GMAC 19
+#define BCM63268_CLK_NAND 20
+#define BCM63268_CLK_TBUS 27
+#define BCM63268_CLK_ROBOSW250 31
+
+#endif /* __DT_BINDINGS_CLOCK_BCM63268_H */
diff --git a/include/dt-bindings/clock/bcm6328-clock.h b/include/dt-bindings/clock/bcm6328-clock.h
new file mode 100644
index 000000000000..1f6a3103f3dc
--- /dev/null
+++ b/include/dt-bindings/clock/bcm6328-clock.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef __DT_BINDINGS_CLOCK_BCM6328_H
+#define __DT_BINDINGS_CLOCK_BCM6328_H
+
+#define BCM6328_CLK_PHYMIPS 0
+#define BCM6328_CLK_ADSL_QPROC 1
+#define BCM6328_CLK_ADSL_AFE 2
+#define BCM6328_CLK_ADSL 3
+#define BCM6328_CLK_MIPS 4
+#define BCM6328_CLK_SAR 5
+#define BCM6328_CLK_PCM 6
+#define BCM6328_CLK_USBD 7
+#define BCM6328_CLK_USBH 8
+#define BCM6328_CLK_HSSPI 9
+#define BCM6328_CLK_PCIE 10
+#define BCM6328_CLK_ROBOSW 11
+
+#endif /* __DT_BINDINGS_CLOCK_BCM6328_H */
diff --git a/include/dt-bindings/clock/bcm6358-clock.h b/include/dt-bindings/clock/bcm6358-clock.h
new file mode 100644
index 000000000000..980c9cac4765
--- /dev/null
+++ b/include/dt-bindings/clock/bcm6358-clock.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef __DT_BINDINGS_CLOCK_BCM6358_H
+#define __DT_BINDINGS_CLOCK_BCM6358_H
+
+#define BCM6358_CLK_ENET 4
+#define BCM6358_CLK_ADSLPHY 5
+#define BCM6358_CLK_PCM 8
+#define BCM6358_CLK_SPI 9
+#define BCM6358_CLK_USBS 10
+#define BCM6358_CLK_SAR 11
+#define BCM6358_CLK_EMUSB 17
+#define BCM6358_CLK_ENET0 18
+#define BCM6358_CLK_ENET1 19
+#define BCM6358_CLK_USBSU 20
+#define BCM6358_CLK_EPHY 21
+
+#endif /* __DT_BINDINGS_CLOCK_BCM6358_H */
diff --git a/include/dt-bindings/clock/bcm6362-clock.h b/include/dt-bindings/clock/bcm6362-clock.h
new file mode 100644
index 000000000000..17655cd5bf25
--- /dev/null
+++ b/include/dt-bindings/clock/bcm6362-clock.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef __DT_BINDINGS_CLOCK_BCM6362_H
+#define __DT_BINDINGS_CLOCK_BCM6362_H
+
+#define BCM6362_CLK_ADSL_QPROC 1
+#define BCM6362_CLK_ADSL_AFE 2
+#define BCM6362_CLK_ADSL 3
+#define BCM6362_CLK_MIPS 4
+#define BCM6362_CLK_WLAN_OCP 5
+#define BCM6362_CLK_SWPKT_USB 7
+#define BCM6362_CLK_SWPKT_SAR 8
+#define BCM6362_CLK_SAR 9
+#define BCM6362_CLK_ROBOSW 10
+#define BCM6362_CLK_PCM 11
+#define BCM6362_CLK_USBD 12
+#define BCM6362_CLK_USBH 13
+#define BCM6362_CLK_IPSEC 14
+#define BCM6362_CLK_SPI 15
+#define BCM6362_CLK_HSSPI 16
+#define BCM6362_CLK_PCIE 17
+#define BCM6362_CLK_FAP 18
+#define BCM6362_CLK_PHYMIPS 19
+#define BCM6362_CLK_NAND 20
+
+#endif /* __DT_BINDINGS_CLOCK_BCM6362_H */
diff --git a/include/dt-bindings/clock/bcm6368-clock.h b/include/dt-bindings/clock/bcm6368-clock.h
new file mode 100644
index 000000000000..f161d5333883
--- /dev/null
+++ b/include/dt-bindings/clock/bcm6368-clock.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef __DT_BINDINGS_CLOCK_BCM6368_H
+#define __DT_BINDINGS_CLOCK_BCM6368_H
+
+#define BCM6368_CLK_VDSL_QPROC 2
+#define BCM6368_CLK_VDSL_AFE 3
+#define BCM6368_CLK_VDSL_BONDING 4
+#define BCM6368_CLK_VDSL 5
+#define BCM6368_CLK_PHYMIPS 6
+#define BCM6368_CLK_SWPKT_USB 7
+#define BCM6368_CLK_SWPKT_SAR 8
+#define BCM6368_CLK_SPI 9
+#define BCM6368_CLK_USBD 10
+#define BCM6368_CLK_SAR 11
+#define BCM6368_CLK_ROBOSW 12
+#define BCM6368_CLK_UTOPIA 13
+#define BCM6368_CLK_PCM 14
+#define BCM6368_CLK_USBH 15
+#define BCM6368_CLK_DIS_GLESS 16
+#define BCM6368_CLK_NAND 17
+#define BCM6368_CLK_IPSEC 18
+
+#endif /* __DT_BINDINGS_CLOCK_BCM6368_H */
diff --git a/include/dt-bindings/clock/g12a-clkc.h b/include/dt-bindings/clock/g12a-clkc.h
index b0d65d73db96..40d49940d8a8 100644
--- a/include/dt-bindings/clock/g12a-clkc.h
+++ b/include/dt-bindings/clock/g12a-clkc.h
@@ -145,5 +145,7 @@
#define CLKID_CPU3_CLK 255
#define CLKID_SPICC0_SCLK 258
#define CLKID_SPICC1_SCLK 261
+#define CLKID_NNA_AXI_CLK 264
+#define CLKID_NNA_CORE_CLK 267
#endif /* __G12A_CLKC_H */
diff --git a/include/dt-bindings/clock/ingenic,sysost.h b/include/dt-bindings/clock/ingenic,sysost.h
new file mode 100644
index 000000000000..9ac88e90babf
--- /dev/null
+++ b/include/dt-bindings/clock/ingenic,sysost.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This header provides clock numbers for the ingenic,tcu DT binding.
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_INGENIC_OST_H__
+#define __DT_BINDINGS_CLOCK_INGENIC_OST_H__
+
+#define OST_CLK_PERCPU_TIMER 0
+#define OST_CLK_GLOBAL_TIMER 1
+
+#endif /* __DT_BINDINGS_CLOCK_INGENIC_OST_H__ */
diff --git a/include/dt-bindings/clock/jz4780-cgu.h b/include/dt-bindings/clock/jz4780-cgu.h
index 1859ce53ee38..85cf8eb5081b 100644
--- a/include/dt-bindings/clock/jz4780-cgu.h
+++ b/include/dt-bindings/clock/jz4780-cgu.h
@@ -12,78 +12,80 @@
#ifndef __DT_BINDINGS_CLOCK_JZ4780_CGU_H__
#define __DT_BINDINGS_CLOCK_JZ4780_CGU_H__
-#define JZ4780_CLK_EXCLK 0
-#define JZ4780_CLK_RTCLK 1
-#define JZ4780_CLK_APLL 2
-#define JZ4780_CLK_MPLL 3
-#define JZ4780_CLK_EPLL 4
-#define JZ4780_CLK_VPLL 5
-#define JZ4780_CLK_OTGPHY 6
-#define JZ4780_CLK_SCLKA 7
-#define JZ4780_CLK_CPUMUX 8
-#define JZ4780_CLK_CPU 9
-#define JZ4780_CLK_L2CACHE 10
-#define JZ4780_CLK_AHB0 11
-#define JZ4780_CLK_AHB2PMUX 12
-#define JZ4780_CLK_AHB2 13
-#define JZ4780_CLK_PCLK 14
-#define JZ4780_CLK_DDR 15
-#define JZ4780_CLK_VPU 16
-#define JZ4780_CLK_I2SPLL 17
-#define JZ4780_CLK_I2S 18
+#define JZ4780_CLK_EXCLK 0
+#define JZ4780_CLK_RTCLK 1
+#define JZ4780_CLK_APLL 2
+#define JZ4780_CLK_MPLL 3
+#define JZ4780_CLK_EPLL 4
+#define JZ4780_CLK_VPLL 5
+#define JZ4780_CLK_OTGPHY 6
+#define JZ4780_CLK_SCLKA 7
+#define JZ4780_CLK_CPUMUX 8
+#define JZ4780_CLK_CPU 9
+#define JZ4780_CLK_L2CACHE 10
+#define JZ4780_CLK_AHB0 11
+#define JZ4780_CLK_AHB2PMUX 12
+#define JZ4780_CLK_AHB2 13
+#define JZ4780_CLK_PCLK 14
+#define JZ4780_CLK_DDR 15
+#define JZ4780_CLK_VPU 16
+#define JZ4780_CLK_I2SPLL 17
+#define JZ4780_CLK_I2S 18
#define JZ4780_CLK_LCD0PIXCLK 19
#define JZ4780_CLK_LCD1PIXCLK 20
-#define JZ4780_CLK_MSCMUX 21
-#define JZ4780_CLK_MSC0 22
-#define JZ4780_CLK_MSC1 23
-#define JZ4780_CLK_MSC2 24
-#define JZ4780_CLK_UHC 25
-#define JZ4780_CLK_SSIPLL 26
-#define JZ4780_CLK_SSI 27
-#define JZ4780_CLK_CIMMCLK 28
-#define JZ4780_CLK_PCMPLL 29
-#define JZ4780_CLK_PCM 30
-#define JZ4780_CLK_GPU 31
-#define JZ4780_CLK_HDMI 32
-#define JZ4780_CLK_BCH 33
-#define JZ4780_CLK_NEMC 34
-#define JZ4780_CLK_OTG0 35
-#define JZ4780_CLK_SSI0 36
-#define JZ4780_CLK_SMB0 37
-#define JZ4780_CLK_SMB1 38
-#define JZ4780_CLK_SCC 39
-#define JZ4780_CLK_AIC 40
-#define JZ4780_CLK_TSSI0 41
-#define JZ4780_CLK_OWI 42
-#define JZ4780_CLK_KBC 43
-#define JZ4780_CLK_SADC 44
-#define JZ4780_CLK_UART0 45
-#define JZ4780_CLK_UART1 46
-#define JZ4780_CLK_UART2 47
-#define JZ4780_CLK_UART3 48
-#define JZ4780_CLK_SSI1 49
-#define JZ4780_CLK_SSI2 50
-#define JZ4780_CLK_PDMA 51
-#define JZ4780_CLK_GPS 52
-#define JZ4780_CLK_MAC 53
-#define JZ4780_CLK_SMB2 54
-#define JZ4780_CLK_CIM 55
-#define JZ4780_CLK_LCD 56
-#define JZ4780_CLK_TVE 57
-#define JZ4780_CLK_IPU 58
-#define JZ4780_CLK_DDR0 59
-#define JZ4780_CLK_DDR1 60
-#define JZ4780_CLK_SMB3 61
-#define JZ4780_CLK_TSSI1 62
-#define JZ4780_CLK_COMPRESS 63
-#define JZ4780_CLK_AIC1 64
-#define JZ4780_CLK_GPVLC 65
-#define JZ4780_CLK_OTG1 66
-#define JZ4780_CLK_UART4 67
-#define JZ4780_CLK_AHBMON 68
-#define JZ4780_CLK_SMB4 69
-#define JZ4780_CLK_DES 70
-#define JZ4780_CLK_X2D 71
-#define JZ4780_CLK_CORE1 72
+#define JZ4780_CLK_MSCMUX 21
+#define JZ4780_CLK_MSC0 22
+#define JZ4780_CLK_MSC1 23
+#define JZ4780_CLK_MSC2 24
+#define JZ4780_CLK_UHC 25
+#define JZ4780_CLK_SSIPLL 26
+#define JZ4780_CLK_SSI 27
+#define JZ4780_CLK_CIMMCLK 28
+#define JZ4780_CLK_PCMPLL 29
+#define JZ4780_CLK_PCM 30
+#define JZ4780_CLK_GPU 31
+#define JZ4780_CLK_HDMI 32
+#define JZ4780_CLK_BCH 33
+#define JZ4780_CLK_NEMC 34
+#define JZ4780_CLK_OTG0 35
+#define JZ4780_CLK_SSI0 36
+#define JZ4780_CLK_SMB0 37
+#define JZ4780_CLK_SMB1 38
+#define JZ4780_CLK_SCC 39
+#define JZ4780_CLK_AIC 40
+#define JZ4780_CLK_TSSI0 41
+#define JZ4780_CLK_OWI 42
+#define JZ4780_CLK_KBC 43
+#define JZ4780_CLK_SADC 44
+#define JZ4780_CLK_UART0 45
+#define JZ4780_CLK_UART1 46
+#define JZ4780_CLK_UART2 47
+#define JZ4780_CLK_UART3 48
+#define JZ4780_CLK_SSI1 49
+#define JZ4780_CLK_SSI2 50
+#define JZ4780_CLK_PDMA 51
+#define JZ4780_CLK_GPS 52
+#define JZ4780_CLK_MAC 53
+#define JZ4780_CLK_SMB2 54
+#define JZ4780_CLK_CIM 55
+#define JZ4780_CLK_LCD 56
+#define JZ4780_CLK_TVE 57
+#define JZ4780_CLK_IPU 58
+#define JZ4780_CLK_DDR0 59
+#define JZ4780_CLK_DDR1 60
+#define JZ4780_CLK_SMB3 61
+#define JZ4780_CLK_TSSI1 62
+#define JZ4780_CLK_COMPRESS 63
+#define JZ4780_CLK_AIC1 64
+#define JZ4780_CLK_GPVLC 65
+#define JZ4780_CLK_OTG1 66
+#define JZ4780_CLK_UART4 67
+#define JZ4780_CLK_AHBMON 68
+#define JZ4780_CLK_SMB4 69
+#define JZ4780_CLK_DES 70
+#define JZ4780_CLK_X2D 71
+#define JZ4780_CLK_CORE1 72
+#define JZ4780_CLK_EXCLK_DIV512 73
+#define JZ4780_CLK_RTC 74
#endif /* __DT_BINDINGS_CLOCK_JZ4780_CGU_H__ */
diff --git a/include/dt-bindings/clock/microchip,sparx5.h b/include/dt-bindings/clock/microchip,sparx5.h
new file mode 100644
index 000000000000..4b04dabacec2
--- /dev/null
+++ b/include/dt-bindings/clock/microchip,sparx5.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019 Microchip Inc.
+ *
+ * Author: Lars Povlsen <lars.povlsen@microchip.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_SPARX5_H
+#define _DT_BINDINGS_CLK_SPARX5_H
+
+#define CLK_ID_CORE 0
+#define CLK_ID_DDR 1
+#define CLK_ID_CPU2 2
+#define CLK_ID_ARM2 3
+#define CLK_ID_AUX1 4
+#define CLK_ID_AUX2 5
+#define CLK_ID_AUX3 6
+#define CLK_ID_AUX4 7
+#define CLK_ID_SYNCE 8
+
+#define N_CLOCKS 9
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,apss-ipq.h b/include/dt-bindings/clock/qcom,apss-ipq.h
new file mode 100644
index 000000000000..77b6e05492e2
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,apss-ipq.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_QCA_APSS_IPQ6018_H
+#define _DT_BINDINGS_CLOCK_QCA_APSS_IPQ6018_H
+
+#define APCS_ALIAS0_CLK_SRC 0
+#define APCS_ALIAS0_CORE_CLK 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-ipq8074.h b/include/dt-bindings/clock/qcom,gcc-ipq8074.h
index 4de4811a3540..8e2bec1c91bf 100644
--- a/include/dt-bindings/clock/qcom,gcc-ipq8074.h
+++ b/include/dt-bindings/clock/qcom,gcc-ipq8074.h
@@ -230,6 +230,9 @@
#define GCC_GP1_CLK 221
#define GCC_GP2_CLK 222
#define GCC_GP3_CLK 223
+#define GCC_PCIE0_AXI_S_BRIDGE_CLK 224
+#define GCC_PCIE0_RCHNG_CLK_SRC 225
+#define GCC_PCIE0_RCHNG_CLK 226
#define GCC_BLSP1_BCR 0
#define GCC_BLSP1_QUP1_BCR 1
@@ -362,5 +365,6 @@
#define GCC_PCIE1_AXI_SLAVE_ARES 128
#define GCC_PCIE1_AHB_ARES 129
#define GCC_PCIE1_AXI_MASTER_STICKY_ARES 130
+#define GCC_PCIE0_AXI_SLAVE_STICKY_ARES 131
#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-sc7180.h b/include/dt-bindings/clock/qcom,gcc-sc7180.h
index 992b67b7e5e4..bdf43adc7897 100644
--- a/include/dt-bindings/clock/qcom,gcc-sc7180.h
+++ b/include/dt-bindings/clock/qcom,gcc-sc7180.h
@@ -138,6 +138,7 @@
#define GCC_MSS_Q6_MEMNOC_AXI_CLK 128
#define GCC_MSS_SNOC_AXI_CLK 129
#define GCC_SEC_CTRL_CLK_SRC 130
+#define GCC_LPASS_CFG_NOC_SWAY_CLK 131
/* GCC resets */
#define GCC_QUSB2PHY_PRIM_BCR 0
diff --git a/include/dt-bindings/clock/qcom,gcc-sdm660.h b/include/dt-bindings/clock/qcom,gcc-sdm660.h
index 468302282913..df8a6f3d367e 100644
--- a/include/dt-bindings/clock/qcom,gcc-sdm660.h
+++ b/include/dt-bindings/clock/qcom,gcc-sdm660.h
@@ -152,5 +152,6 @@
#define GCC_USB_20_BCR 6
#define GCC_USB_30_BCR 7
#define GCC_USB_PHY_CFG_AHB2PHY_BCR 8
+#define GCC_MSS_RESTART 9
#endif
diff --git a/include/dt-bindings/clock/qcom,gpucc-sm8150.h b/include/dt-bindings/clock/qcom,gpucc-sm8150.h
new file mode 100644
index 000000000000..c5b70aad7770
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gpucc-sm8150.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GPU_CC_SM8150_H
+#define _DT_BINDINGS_CLK_QCOM_GPU_CC_SM8150_H
+
+/* GPU_CC clock registers */
+#define GPU_CC_AHB_CLK 0
+#define GPU_CC_CRC_AHB_CLK 1
+#define GPU_CC_CX_APB_CLK 2
+#define GPU_CC_CX_GMU_CLK 3
+#define GPU_CC_CX_SNOC_DVM_CLK 4
+#define GPU_CC_CXO_AON_CLK 5
+#define GPU_CC_CXO_CLK 6
+#define GPU_CC_GMU_CLK_SRC 7
+#define GPU_CC_GX_GMU_CLK 8
+#define GPU_CC_PLL1 9
+
+/* GPU_CC Resets */
+#define GPUCC_GPU_CC_CX_BCR 0
+#define GPUCC_GPU_CC_GFX3D_AON_BCR 1
+#define GPUCC_GPU_CC_GMU_BCR 2
+#define GPUCC_GPU_CC_GX_BCR 3
+#define GPUCC_GPU_CC_SPDM_BCR 4
+#define GPUCC_GPU_CC_XO_BCR 5
+
+/* GPU_CC GDSCRs */
+#define GPU_CX_GDSC 0
+#define GPU_GX_GDSC 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gpucc-sm8250.h b/include/dt-bindings/clock/qcom,gpucc-sm8250.h
new file mode 100644
index 000000000000..dc8e387c48ad
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gpucc-sm8250.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GPU_CC_SM8250_H
+#define _DT_BINDINGS_CLK_QCOM_GPU_CC_SM8250_H
+
+/* GPU_CC clock registers */
+#define GPU_CC_AHB_CLK 0
+#define GPU_CC_CRC_AHB_CLK 1
+#define GPU_CC_CX_APB_CLK 2
+#define GPU_CC_CX_GMU_CLK 3
+#define GPU_CC_CX_SNOC_DVM_CLK 4
+#define GPU_CC_CXO_AON_CLK 5
+#define GPU_CC_CXO_CLK 6
+#define GPU_CC_GMU_CLK_SRC 7
+#define GPU_CC_GX_GMU_CLK 8
+#define GPU_CC_PLL1 9
+#define GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK 10
+
+/* GPU_CC Resets */
+#define GPUCC_GPU_CC_ACD_BCR 0
+#define GPUCC_GPU_CC_CX_BCR 1
+#define GPUCC_GPU_CC_GFX3D_AON_BCR 2
+#define GPUCC_GPU_CC_GMU_BCR 3
+#define GPUCC_GPU_CC_GX_BCR 4
+#define GPUCC_GPU_CC_XO_BCR 5
+
+/* GPU_CC GDSCRs */
+#define GPU_CX_GDSC 0
+#define GPU_GX_GDSC 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,lpasscorecc-sc7180.h b/include/dt-bindings/clock/qcom,lpasscorecc-sc7180.h
new file mode 100644
index 000000000000..a55d01db2b20
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,lpasscorecc-sc7180.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_LPASS_CORE_CC_SC7180_H
+#define _DT_BINDINGS_CLK_QCOM_LPASS_CORE_CC_SC7180_H
+
+/* LPASS_CORE_CC clocks */
+#define LPASS_LPAAUDIO_DIG_PLL 0
+#define LPASS_LPAAUDIO_DIG_PLL_OUT_ODD 1
+#define CORE_CLK_SRC 2
+#define EXT_MCLK0_CLK_SRC 3
+#define LPAIF_PRI_CLK_SRC 4
+#define LPAIF_SEC_CLK_SRC 5
+#define LPASS_AUDIO_CORE_CORE_CLK 6
+#define LPASS_AUDIO_CORE_EXT_MCLK0_CLK 7
+#define LPASS_AUDIO_CORE_LPAIF_PRI_IBIT_CLK 8
+#define LPASS_AUDIO_CORE_LPAIF_SEC_IBIT_CLK 9
+#define LPASS_AUDIO_CORE_SYSNOC_MPORT_CORE_CLK 10
+
+/* LPASS Core power domains */
+#define LPASS_CORE_HM_GDSCR 0
+
+/* LPASS Audio power domains */
+#define LPASS_AUDIO_HM_GDSCR 0
+#define LPASS_PDC_HM_GDSCR 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,rpmcc.h b/include/dt-bindings/clock/qcom,rpmcc.h
index ae74c43c485d..8aaba7cd9589 100644
--- a/include/dt-bindings/clock/qcom,rpmcc.h
+++ b/include/dt-bindings/clock/qcom,rpmcc.h
@@ -133,5 +133,21 @@
#define RPM_SMD_RF_CLK3_A 87
#define RPM_SMD_RF_CLK3_PIN 88
#define RPM_SMD_RF_CLK3_A_PIN 89
+#define RPM_SMD_MMSSNOC_AXI_CLK 90
+#define RPM_SMD_MMSSNOC_AXI_CLK_A 91
+#define RPM_SMD_CNOC_PERIPH_CLK 92
+#define RPM_SMD_CNOC_PERIPH_A_CLK 93
+#define RPM_SMD_LN_BB_CLK3 94
+#define RPM_SMD_LN_BB_CLK3_A 95
+#define RPM_SMD_LN_BB_CLK1_PIN 96
+#define RPM_SMD_LN_BB_CLK1_A_PIN 97
+#define RPM_SMD_LN_BB_CLK2_PIN 98
+#define RPM_SMD_LN_BB_CLK2_A_PIN 99
+#define RPM_SMD_SYSMMNOC_CLK 100
+#define RPM_SMD_SYSMMNOC_A_CLK 101
+#define RPM_SMD_CE2_CLK 102
+#define RPM_SMD_CE2_A_CLK 103
+#define RPM_SMD_CE3_CLK 104
+#define RPM_SMD_CE3_A_CLK 105
#endif
diff --git a/include/dt-bindings/clock/r8a774e1-cpg-mssr.h b/include/dt-bindings/clock/r8a774e1-cpg-mssr.h
new file mode 100644
index 000000000000..b2fc1d1c3c47
--- /dev/null
+++ b/include/dt-bindings/clock/r8a774e1-cpg-mssr.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2020 Renesas Electronics Corp.
+ */
+#ifndef __DT_BINDINGS_CLOCK_R8A774E1_CPG_MSSR_H__
+#define __DT_BINDINGS_CLOCK_R8A774E1_CPG_MSSR_H__
+
+#include <dt-bindings/clock/renesas-cpg-mssr.h>
+
+/* R8A774E1 CPG Core Clocks */
+#define R8A774E1_CLK_Z 0
+#define R8A774E1_CLK_Z2 1
+#define R8A774E1_CLK_ZG 2
+#define R8A774E1_CLK_ZTR 3
+#define R8A774E1_CLK_ZTRD2 4
+#define R8A774E1_CLK_ZT 5
+#define R8A774E1_CLK_ZX 6
+#define R8A774E1_CLK_S0D1 7
+#define R8A774E1_CLK_S0D2 8
+#define R8A774E1_CLK_S0D3 9
+#define R8A774E1_CLK_S0D4 10
+#define R8A774E1_CLK_S0D6 11
+#define R8A774E1_CLK_S0D8 12
+#define R8A774E1_CLK_S0D12 13
+#define R8A774E1_CLK_S1D2 14
+#define R8A774E1_CLK_S1D4 15
+#define R8A774E1_CLK_S2D1 16
+#define R8A774E1_CLK_S2D2 17
+#define R8A774E1_CLK_S2D4 18
+#define R8A774E1_CLK_S3D1 19
+#define R8A774E1_CLK_S3D2 20
+#define R8A774E1_CLK_S3D4 21
+#define R8A774E1_CLK_LB 22
+#define R8A774E1_CLK_CL 23
+#define R8A774E1_CLK_ZB3 24
+#define R8A774E1_CLK_ZB3D2 25
+#define R8A774E1_CLK_ZB3D4 26
+#define R8A774E1_CLK_CR 27
+#define R8A774E1_CLK_CRD2 28
+#define R8A774E1_CLK_SD0H 29
+#define R8A774E1_CLK_SD0 30
+#define R8A774E1_CLK_SD1H 31
+#define R8A774E1_CLK_SD1 32
+#define R8A774E1_CLK_SD2H 33
+#define R8A774E1_CLK_SD2 34
+#define R8A774E1_CLK_SD3H 35
+#define R8A774E1_CLK_SD3 36
+#define R8A774E1_CLK_RPC 37
+#define R8A774E1_CLK_RPCD2 38
+#define R8A774E1_CLK_MSO 39
+#define R8A774E1_CLK_HDMI 40
+#define R8A774E1_CLK_CSI0 41
+#define R8A774E1_CLK_CP 42
+#define R8A774E1_CLK_CPEX 43
+#define R8A774E1_CLK_R 44
+#define R8A774E1_CLK_OSC 45
+#define R8A774E1_CLK_CANFD 46
+
+#endif /* __DT_BINDINGS_CLOCK_R8A774E1_CPG_MSSR_H__ */
diff --git a/include/dt-bindings/clock/vf610-clock.h b/include/dt-bindings/clock/vf610-clock.h
index 95394f35a74a..0f2d60e884dc 100644
--- a/include/dt-bindings/clock/vf610-clock.h
+++ b/include/dt-bindings/clock/vf610-clock.h
@@ -195,6 +195,7 @@
#define VF610_CLK_WKPU 186
#define VF610_CLK_TCON0 187
#define VF610_CLK_TCON1 188
-#define VF610_CLK_END 189
+#define VF610_CLK_CAAM 189
+#define VF610_CLK_END 190
#endif /* __DT_BINDINGS_CLOCK_VF610_H */
diff --git a/include/dt-bindings/clock/x1000-cgu.h b/include/dt-bindings/clock/x1000-cgu.h
index 0367c8c02e16..f187e0719fd3 100644
--- a/include/dt-bindings/clock/x1000-cgu.h
+++ b/include/dt-bindings/clock/x1000-cgu.h
@@ -48,5 +48,7 @@
#define X1000_CLK_SSI 33
#define X1000_CLK_OST 34
#define X1000_CLK_PDMA 35
+#define X1000_CLK_EXCLK_DIV512 36
+#define X1000_CLK_RTC 37
#endif /* __DT_BINDINGS_CLOCK_X1000_CGU_H__ */
diff --git a/include/dt-bindings/clock/x1830-cgu.h b/include/dt-bindings/clock/x1830-cgu.h
index 801e1d09c881..88455376a950 100644
--- a/include/dt-bindings/clock/x1830-cgu.h
+++ b/include/dt-bindings/clock/x1830-cgu.h
@@ -51,5 +51,7 @@
#define X1830_CLK_TCU 36
#define X1830_CLK_DTRNG 37
#define X1830_CLK_OST 38
+#define X1830_CLK_EXCLK_DIV512 39
+#define X1830_CLK_RTC 40
#endif /* __DT_BINDINGS_CLOCK_X1830_CGU_H__ */
diff --git a/include/dt-bindings/dma/xlnx-zynqmp-dpdma.h b/include/dt-bindings/dma/xlnx-zynqmp-dpdma.h
new file mode 100644
index 000000000000..3719cda5679d
--- /dev/null
+++ b/include/dt-bindings/dma/xlnx-zynqmp-dpdma.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Copyright 2019 Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ */
+
+#ifndef __DT_BINDINGS_DMA_XLNX_ZYNQMP_DPDMA_H__
+#define __DT_BINDINGS_DMA_XLNX_ZYNQMP_DPDMA_H__
+
+#define ZYNQMP_DPDMA_VIDEO0 0
+#define ZYNQMP_DPDMA_VIDEO1 1
+#define ZYNQMP_DPDMA_VIDEO2 2
+#define ZYNQMP_DPDMA_GRAPHICS 3
+#define ZYNQMP_DPDMA_AUDIO0 4
+#define ZYNQMP_DPDMA_AUDIO1 5
+
+#endif /* __DT_BINDINGS_DMA_XLNX_ZYNQMP_DPDMA_H__ */
diff --git a/include/dt-bindings/gce/mt6779-gce.h b/include/dt-bindings/gce/mt6779-gce.h
new file mode 100644
index 000000000000..06101316ace4
--- /dev/null
+++ b/include/dt-bindings/gce/mt6779-gce.h
@@ -0,0 +1,222 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 MediaTek Inc.
+ * Author: Dennis-YC Hsieh <dennis-yc.hsieh@mediatek.com>
+ */
+
+#ifndef _DT_BINDINGS_GCE_MT6779_H
+#define _DT_BINDINGS_GCE_MT6779_H
+
+#define CMDQ_NO_TIMEOUT 0xffffffff
+
+/* GCE HW thread priority */
+#define CMDQ_THR_PRIO_LOWEST 0
+#define CMDQ_THR_PRIO_1 1
+#define CMDQ_THR_PRIO_2 2
+#define CMDQ_THR_PRIO_3 3
+#define CMDQ_THR_PRIO_4 4
+#define CMDQ_THR_PRIO_5 5
+#define CMDQ_THR_PRIO_6 6
+#define CMDQ_THR_PRIO_HIGHEST 7
+
+/* GCE subsys table */
+#define SUBSYS_1300XXXX 0
+#define SUBSYS_1400XXXX 1
+#define SUBSYS_1401XXXX 2
+#define SUBSYS_1402XXXX 3
+#define SUBSYS_1502XXXX 4
+#define SUBSYS_1880XXXX 5
+#define SUBSYS_1881XXXX 6
+#define SUBSYS_1882XXXX 7
+#define SUBSYS_1883XXXX 8
+#define SUBSYS_1884XXXX 9
+#define SUBSYS_1000XXXX 10
+#define SUBSYS_1001XXXX 11
+#define SUBSYS_1002XXXX 12
+#define SUBSYS_1003XXXX 13
+#define SUBSYS_1004XXXX 14
+#define SUBSYS_1005XXXX 15
+#define SUBSYS_1020XXXX 16
+#define SUBSYS_1028XXXX 17
+#define SUBSYS_1700XXXX 18
+#define SUBSYS_1701XXXX 19
+#define SUBSYS_1702XXXX 20
+#define SUBSYS_1703XXXX 21
+#define SUBSYS_1800XXXX 22
+#define SUBSYS_1801XXXX 23
+#define SUBSYS_1802XXXX 24
+#define SUBSYS_1804XXXX 25
+#define SUBSYS_1805XXXX 26
+#define SUBSYS_1808XXXX 27
+#define SUBSYS_180aXXXX 28
+#define SUBSYS_180bXXXX 29
+#define CMDQ_SUBSYS_OFF 32
+
+/* GCE hardware events */
+#define CMDQ_EVENT_DISP_RDMA0_SOF 0
+#define CMDQ_EVENT_DISP_RDMA1_SOF 1
+#define CMDQ_EVENT_MDP_RDMA0_SOF 2
+#define CMDQ_EVENT_MDP_RDMA1_SOF 3
+#define CMDQ_EVENT_MDP_RSZ0_SOF 4
+#define CMDQ_EVENT_MDP_RSZ1_SOF 5
+#define CMDQ_EVENT_MDP_TDSHP_SOF 6
+#define CMDQ_EVENT_MDP_WROT0_SOF 7
+#define CMDQ_EVENT_MDP_WROT1_SOF 8
+#define CMDQ_EVENT_DISP_OVL0_SOF 9
+#define CMDQ_EVENT_DISP_2L_OVL0_SOF 10
+#define CMDQ_EVENT_DISP_2L_OVL1_SOF 11
+#define CMDQ_EVENT_DISP_WDMA0_SOF 12
+#define CMDQ_EVENT_DISP_COLOR0_SOF 13
+#define CMDQ_EVENT_DISP_CCORR0_SOF 14
+#define CMDQ_EVENT_DISP_AAL0_SOF 15
+#define CMDQ_EVENT_DISP_GAMMA0_SOF 16
+#define CMDQ_EVENT_DISP_DITHER0_SOF 17
+#define CMDQ_EVENT_DISP_PWM0_SOF 18
+#define CMDQ_EVENT_DISP_DSI0_SOF 19
+#define CMDQ_EVENT_DISP_DPI0_SOF 20
+#define CMDQ_EVENT_DISP_POSTMASK0_SOF 21
+#define CMDQ_EVENT_DISP_RSZ0_SOF 22
+#define CMDQ_EVENT_MDP_AAL_SOF 23
+#define CMDQ_EVENT_MDP_CCORR_SOF 24
+#define CMDQ_EVENT_DISP_DBI0_SOF 25
+#define CMDQ_EVENT_ISP_RELAY_SOF 26
+#define CMDQ_EVENT_IPU_RELAY_SOF 27
+#define CMDQ_EVENT_DISP_RDMA0_EOF 28
+#define CMDQ_EVENT_DISP_RDMA1_EOF 29
+#define CMDQ_EVENT_MDP_RDMA0_EOF 30
+#define CMDQ_EVENT_MDP_RDMA1_EOF 31
+#define CMDQ_EVENT_MDP_RSZ0_EOF 32
+#define CMDQ_EVENT_MDP_RSZ1_EOF 33
+#define CMDQ_EVENT_MDP_TDSHP_EOF 34
+#define CMDQ_EVENT_MDP_WROT0_W_EOF 35
+#define CMDQ_EVENT_MDP_WROT1_W_EOF 36
+#define CMDQ_EVENT_DISP_OVL0_EOF 37
+#define CMDQ_EVENT_DISP_2L_OVL0_EOF 38
+#define CMDQ_EVENT_DISP_2L_OVL1_EOF 39
+#define CMDQ_EVENT_DISP_WDMA0_EOF 40
+#define CMDQ_EVENT_DISP_COLOR0_EOF 41
+#define CMDQ_EVENT_DISP_CCORR0_EOF 42
+#define CMDQ_EVENT_DISP_AAL0_EOF 43
+#define CMDQ_EVENT_DISP_GAMMA0_EOF 44
+#define CMDQ_EVENT_DISP_DITHER0_EOF 45
+#define CMDQ_EVENT_DISP_DSI0_EOF 46
+#define CMDQ_EVENT_DISP_DPI0_EOF 47
+#define CMDQ_EVENT_DISP_RSZ0_EOF 49
+#define CMDQ_EVENT_MDP_AAL_FRAME_DONE 50
+#define CMDQ_EVENT_MDP_CCORR_FRAME_DONE 51
+#define CMDQ_EVENT_DISP_POSTMASK0_FRAME_DONE 52
+#define CMDQ_EVENT_MUTEX0_STREAM_EOF 130
+#define CMDQ_EVENT_MUTEX1_STREAM_EOF 131
+#define CMDQ_EVENT_MUTEX2_STREAM_EOF 132
+#define CMDQ_EVENT_MUTEX3_STREAM_EOF 133
+#define CMDQ_EVENT_MUTEX4_STREAM_EOF 134
+#define CMDQ_EVENT_MUTEX5_STREAM_EOF 135
+#define CMDQ_EVENT_MUTEX6_STREAM_EOF 136
+#define CMDQ_EVENT_MUTEX7_STREAM_EOF 137
+#define CMDQ_EVENT_MUTEX8_STREAM_EOF 138
+#define CMDQ_EVENT_MUTEX9_STREAM_EOF 139
+#define CMDQ_EVENT_MUTEX10_STREAM_EOF 140
+#define CMDQ_EVENT_MUTEX11_STREAM_EOF 141
+#define CMDQ_EVENT_DISP_RDMA0_UNDERRUN 142
+#define CMDQ_EVENT_DISP_RDMA1_UNDERRUN 143
+#define CMDQ_EVENT_DISP_RDMA2_UNDERRUN 144
+#define CMDQ_EVENT_DISP_RDMA3_UNDERRUN 145
+#define CMDQ_EVENT_DSI0_TE 146
+#define CMDQ_EVENT_DSI0_IRQ_EVENT 147
+#define CMDQ_EVENT_DSI0_DONE_EVENT 148
+#define CMDQ_EVENT_DISP_POSTMASK0_RST_DONE 150
+#define CMDQ_EVENT_DISP_WDMA0_RST_DONE 151
+#define CMDQ_EVENT_MDP_WROT0_RST_DONE 153
+#define CMDQ_EVENT_MDP_RDMA0_RST_DONE 154
+#define CMDQ_EVENT_DISP_OVL0_RST_DONE 155
+#define CMDQ_EVENT_DISP_OVL0_2L_RST_DONE 156
+#define CMDQ_EVENT_DISP_OVL1_2L_RST_DONE 157
+#define CMDQ_EVENT_DIP_CQ_THREAD0_EOF 257
+#define CMDQ_EVENT_DIP_CQ_THREAD1_EOF 258
+#define CMDQ_EVENT_DIP_CQ_THREAD2_EOF 259
+#define CMDQ_EVENT_DIP_CQ_THREAD3_EOF 260
+#define CMDQ_EVENT_DIP_CQ_THREAD4_EOF 261
+#define CMDQ_EVENT_DIP_CQ_THREAD5_EOF 262
+#define CMDQ_EVENT_DIP_CQ_THREAD6_EOF 263
+#define CMDQ_EVENT_DIP_CQ_THREAD7_EOF 264
+#define CMDQ_EVENT_DIP_CQ_THREAD8_EOF 265
+#define CMDQ_EVENT_DIP_CQ_THREAD9_EOF 266
+#define CMDQ_EVENT_DIP_CQ_THREAD10_EOF 267
+#define CMDQ_EVENT_DIP_CQ_THREAD11_EOF 268
+#define CMDQ_EVENT_DIP_CQ_THREAD12_EOF 269
+#define CMDQ_EVENT_DIP_CQ_THREAD13_EOF 270
+#define CMDQ_EVENT_DIP_CQ_THREAD14_EOF 271
+#define CMDQ_EVENT_DIP_CQ_THREAD15_EOF 272
+#define CMDQ_EVENT_DIP_CQ_THREAD16_EOF 273
+#define CMDQ_EVENT_DIP_CQ_THREAD17_EOF 274
+#define CMDQ_EVENT_DIP_CQ_THREAD18_EOF 275
+#define CMDQ_EVENT_DIP_DMA_ERR_EVENT 276
+#define CMDQ_EVENT_AMD_FRAME_DONE 277
+#define CMDQ_EVENT_MFB_DONE 278
+#define CMDQ_EVENT_WPE_A_EOF 279
+#define CMDQ_EVENT_VENC_EOF 289
+#define CMDQ_EVENT_VENC_CMDQ_PAUSE_DONE 290
+#define CMDQ_EVENT_JPEG_ENC_EOF 291
+#define CMDQ_EVENT_VENC_MB_DONE 292
+#define CMDQ_EVENT_VENC_128BYTE_CNT_DONE 293
+#define CMDQ_EVENT_ISP_FRAME_DONE_A 321
+#define CMDQ_EVENT_ISP_FRAME_DONE_B 322
+#define CMDQ_EVENT_ISP_FRAME_DONE_C 323
+#define CMDQ_EVENT_ISP_CAMSV_0_PASS1_DONE 324
+#define CMDQ_EVENT_ISP_CAMSV_0_2_PASS1_DONE 325
+#define CMDQ_EVENT_ISP_CAMSV_1_PASS1_DONE 326
+#define CMDQ_EVENT_ISP_CAMSV_2_PASS1_DONE 327
+#define CMDQ_EVENT_ISP_CAMSV_3_PASS1_DONE 328
+#define CMDQ_EVENT_ISP_TSF_DONE 329
+#define CMDQ_EVENT_SENINF_0_FIFO_FULL 330
+#define CMDQ_EVENT_SENINF_1_FIFO_FULL 331
+#define CMDQ_EVENT_SENINF_2_FIFO_FULL 332
+#define CMDQ_EVENT_SENINF_3_FIFO_FULL 333
+#define CMDQ_EVENT_SENINF_4_FIFO_FULL 334
+#define CMDQ_EVENT_SENINF_5_FIFO_FULL 335
+#define CMDQ_EVENT_SENINF_6_FIFO_FULL 336
+#define CMDQ_EVENT_SENINF_7_FIFO_FULL 337
+#define CMDQ_EVENT_TG_OVRUN_A_INT_DLY 338
+#define CMDQ_EVENT_TG_OVRUN_B_INT_DLY 339
+#define CMDQ_EVENT_TG_OVRUN_C_INT 340
+#define CMDQ_EVENT_TG_GRABERR_A_INT_DLY 341
+#define CMDQ_EVENT_TG_GRABERR_B_INT_DLY 342
+#define CMDQ_EVENT_TG_GRABERR_C_INT 343
+#define CMDQ_EVENT_CQ_VR_SNAP_A_INT_DLY 344
+#define CMDQ_EVENT_CQ_VR_SNAP_B_INT_DLY 345
+#define CMDQ_EVENT_CQ_VR_SNAP_C_INT 346
+#define CMDQ_EVENT_DMA_R1_ERROR_A_INT_DLY 347
+#define CMDQ_EVENT_DMA_R1_ERROR_B_INT_DLY 348
+#define CMDQ_EVENT_DMA_R1_ERROR_C_INT 349
+#define CMDQ_EVENT_APU_GCE_CORE0_EVENT_0 353
+#define CMDQ_EVENT_APU_GCE_CORE0_EVENT_1 354
+#define CMDQ_EVENT_APU_GCE_CORE0_EVENT_2 355
+#define CMDQ_EVENT_APU_GCE_CORE0_EVENT_3 356
+#define CMDQ_EVENT_APU_GCE_CORE1_EVENT_0 385
+#define CMDQ_EVENT_APU_GCE_CORE1_EVENT_1 386
+#define CMDQ_EVENT_APU_GCE_CORE1_EVENT_2 387
+#define CMDQ_EVENT_APU_GCE_CORE1_EVENT_3 388
+#define CMDQ_EVENT_VDEC_EVENT_0 416
+#define CMDQ_EVENT_VDEC_EVENT_1 417
+#define CMDQ_EVENT_VDEC_EVENT_2 418
+#define CMDQ_EVENT_VDEC_EVENT_3 419
+#define CMDQ_EVENT_VDEC_EVENT_4 420
+#define CMDQ_EVENT_VDEC_EVENT_5 421
+#define CMDQ_EVENT_VDEC_EVENT_6 422
+#define CMDQ_EVENT_VDEC_EVENT_7 423
+#define CMDQ_EVENT_VDEC_EVENT_8 424
+#define CMDQ_EVENT_VDEC_EVENT_9 425
+#define CMDQ_EVENT_VDEC_EVENT_10 426
+#define CMDQ_EVENT_VDEC_EVENT_11 427
+#define CMDQ_EVENT_VDEC_EVENT_12 428
+#define CMDQ_EVENT_VDEC_EVENT_13 429
+#define CMDQ_EVENT_VDEC_EVENT_14 430
+#define CMDQ_EVENT_VDEC_EVENT_15 431
+#define CMDQ_EVENT_FDVT_DONE 449
+#define CMDQ_EVENT_FE_DONE 450
+#define CMDQ_EVENT_RSC_EOF 451
+#define CMDQ_EVENT_DVS_DONE_ASYNC_SHOT 452
+#define CMDQ_EVENT_DVP_DONE_ASYNC_SHOT 453
+#define CMDQ_EVENT_DSI0_TE_INFRA 898
+
+#endif
diff --git a/include/dt-bindings/iio/adc/ingenic,adc.h b/include/dt-bindings/iio/adc/ingenic,adc.h
index 42f871ab3272..4627a00e369e 100644
--- a/include/dt-bindings/iio/adc/ingenic,adc.h
+++ b/include/dt-bindings/iio/adc/ingenic,adc.h
@@ -7,5 +7,11 @@
#define INGENIC_ADC_AUX 0
#define INGENIC_ADC_BATTERY 1
#define INGENIC_ADC_AUX2 2
+#define INGENIC_ADC_TOUCH_XP 3
+#define INGENIC_ADC_TOUCH_YP 4
+#define INGENIC_ADC_TOUCH_XN 5
+#define INGENIC_ADC_TOUCH_YN 6
+#define INGENIC_ADC_TOUCH_XD 7
+#define INGENIC_ADC_TOUCH_YD 8
#endif
diff --git a/include/dt-bindings/iio/qcom,spmi-adc7-pm8350.h b/include/dt-bindings/iio/qcom,spmi-adc7-pm8350.h
new file mode 100644
index 000000000000..9426f27a1946
--- /dev/null
+++ b/include/dt-bindings/iio/qcom,spmi-adc7-pm8350.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_QCOM_SPMI_VADC_PM8350_H
+#define _DT_BINDINGS_QCOM_SPMI_VADC_PM8350_H
+
+#ifndef PM8350_SID
+#define PM8350_SID 1
+#endif
+
+/* ADC channels for PM8350_ADC for PMIC7 */
+#define PM8350_ADC7_REF_GND (PM8350_SID << 8 | 0x0)
+#define PM8350_ADC7_1P25VREF (PM8350_SID << 8 | 0x01)
+#define PM8350_ADC7_VREF_VADC (PM8350_SID << 8 | 0x02)
+#define PM8350_ADC7_DIE_TEMP (PM8350_SID << 8 | 0x03)
+
+#define PM8350_ADC7_AMUX_THM1 (PM8350_SID << 8 | 0x04)
+#define PM8350_ADC7_AMUX_THM2 (PM8350_SID << 8 | 0x05)
+#define PM8350_ADC7_AMUX_THM3 (PM8350_SID << 8 | 0x06)
+#define PM8350_ADC7_AMUX_THM4 (PM8350_SID << 8 | 0x07)
+#define PM8350_ADC7_AMUX_THM5 (PM8350_SID << 8 | 0x08)
+#define PM8350_ADC7_GPIO1 (PM8350_SID << 8 | 0x0a)
+#define PM8350_ADC7_GPIO2 (PM8350_SID << 8 | 0x0b)
+#define PM8350_ADC7_GPIO3 (PM8350_SID << 8 | 0x0c)
+#define PM8350_ADC7_GPIO4 (PM8350_SID << 8 | 0x0d)
+
+/* 30k pull-up1 */
+#define PM8350_ADC7_AMUX_THM1_30K_PU (PM8350_SID << 8 | 0x24)
+#define PM8350_ADC7_AMUX_THM2_30K_PU (PM8350_SID << 8 | 0x25)
+#define PM8350_ADC7_AMUX_THM3_30K_PU (PM8350_SID << 8 | 0x26)
+#define PM8350_ADC7_AMUX_THM4_30K_PU (PM8350_SID << 8 | 0x27)
+#define PM8350_ADC7_AMUX_THM5_30K_PU (PM8350_SID << 8 | 0x28)
+#define PM8350_ADC7_GPIO1_30K_PU (PM8350_SID << 8 | 0x2a)
+#define PM8350_ADC7_GPIO2_30K_PU (PM8350_SID << 8 | 0x2b)
+#define PM8350_ADC7_GPIO3_30K_PU (PM8350_SID << 8 | 0x2c)
+#define PM8350_ADC7_GPIO4_30K_PU (PM8350_SID << 8 | 0x2d)
+
+/* 100k pull-up2 */
+#define PM8350_ADC7_AMUX_THM1_100K_PU (PM8350_SID << 8 | 0x44)
+#define PM8350_ADC7_AMUX_THM2_100K_PU (PM8350_SID << 8 | 0x45)
+#define PM8350_ADC7_AMUX_THM3_100K_PU (PM8350_SID << 8 | 0x46)
+#define PM8350_ADC7_AMUX_THM4_100K_PU (PM8350_SID << 8 | 0x47)
+#define PM8350_ADC7_AMUX_THM5_100K_PU (PM8350_SID << 8 | 0x48)
+#define PM8350_ADC7_GPIO1_100K_PU (PM8350_SID << 8 | 0x4a)
+#define PM8350_ADC7_GPIO2_100K_PU (PM8350_SID << 8 | 0x4b)
+#define PM8350_ADC7_GPIO3_100K_PU (PM8350_SID << 8 | 0x4c)
+#define PM8350_ADC7_GPIO4_100K_PU (PM8350_SID << 8 | 0x4d)
+
+/* 400k pull-up3 */
+#define PM8350_ADC7_AMUX_THM1_400K_PU (PM8350_SID << 8 | 0x64)
+#define PM8350_ADC7_AMUX_THM2_400K_PU (PM8350_SID << 8 | 0x65)
+#define PM8350_ADC7_AMUX_THM3_400K_PU (PM8350_SID << 8 | 0x66)
+#define PM8350_ADC7_AMUX_THM4_400K_PU (PM8350_SID << 8 | 0x67)
+#define PM8350_ADC7_AMUX_THM5_400K_PU (PM8350_SID << 8 | 0x68)
+#define PM8350_ADC7_GPIO1_400K_PU (PM8350_SID << 8 | 0x6a)
+#define PM8350_ADC7_GPIO2_400K_PU (PM8350_SID << 8 | 0x6b)
+#define PM8350_ADC7_GPIO3_400K_PU (PM8350_SID << 8 | 0x6c)
+#define PM8350_ADC7_GPIO4_400K_PU (PM8350_SID << 8 | 0x6d)
+
+/* 1/3 Divider */
+#define PM8350_ADC7_GPIO4_DIV3 (PM8350_SID << 8 | 0x8d)
+
+#define PM8350_ADC7_VPH_PWR (PM8350_SID << 8 | 0x8e)
+
+#endif /* _DT_BINDINGS_QCOM_SPMI_VADC_PM8350_H */
diff --git a/include/dt-bindings/iio/qcom,spmi-adc7-pm8350b.h b/include/dt-bindings/iio/qcom,spmi-adc7-pm8350b.h
new file mode 100644
index 000000000000..dc2497c27e16
--- /dev/null
+++ b/include/dt-bindings/iio/qcom,spmi-adc7-pm8350b.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_QCOM_SPMI_VADC_PM8350B_H
+#define _DT_BINDINGS_QCOM_SPMI_VADC_PM8350B_H
+
+#ifndef PM8350B_SID
+#define PM8350B_SID 3
+#endif
+
+/* ADC channels for PM8350B_ADC for PMIC7 */
+#define PM8350B_ADC7_REF_GND (PM8350B_SID << 8 | 0x0)
+#define PM8350B_ADC7_1P25VREF (PM8350B_SID << 8 | 0x01)
+#define PM8350B_ADC7_VREF_VADC (PM8350B_SID << 8 | 0x02)
+#define PM8350B_ADC7_DIE_TEMP (PM8350B_SID << 8 | 0x03)
+
+#define PM8350B_ADC7_AMUX_THM1 (PM8350B_SID << 8 | 0x04)
+#define PM8350B_ADC7_AMUX_THM2 (PM8350B_SID << 8 | 0x05)
+#define PM8350B_ADC7_AMUX_THM3 (PM8350B_SID << 8 | 0x06)
+#define PM8350B_ADC7_AMUX_THM4 (PM8350B_SID << 8 | 0x07)
+#define PM8350B_ADC7_AMUX_THM5 (PM8350B_SID << 8 | 0x08)
+#define PM8350B_ADC7_AMUX_THM6 (PM8350B_SID << 8 | 0x09)
+#define PM8350B_ADC7_GPIO1 (PM8350B_SID << 8 | 0x0a)
+#define PM8350B_ADC7_GPIO2 (PM8350B_SID << 8 | 0x0b)
+#define PM8350B_ADC7_GPIO3 (PM8350B_SID << 8 | 0x0c)
+#define PM8350B_ADC7_GPIO4 (PM8350B_SID << 8 | 0x0d)
+
+#define PM8350B_ADC7_CHG_TEMP (PM8350B_SID << 8 | 0x10)
+#define PM8350B_ADC7_USB_IN_V_16 (PM8350B_SID << 8 | 0x11)
+#define PM8350B_ADC7_VDC_16 (PM8350B_SID << 8 | 0x12)
+#define PM8350B_ADC7_CC1_ID (PM8350B_SID << 8 | 0x13)
+#define PM8350B_ADC7_VREF_BAT_THERM (PM8350B_SID << 8 | 0x15)
+#define PM8350B_ADC7_IIN_FB (PM8350B_SID << 8 | 0x17)
+
+/* 30k pull-up1 */
+#define PM8350B_ADC7_AMUX_THM1_30K_PU (PM8350B_SID << 8 | 0x24)
+#define PM8350B_ADC7_AMUX_THM2_30K_PU (PM8350B_SID << 8 | 0x25)
+#define PM8350B_ADC7_AMUX_THM3_30K_PU (PM8350B_SID << 8 | 0x26)
+#define PM8350B_ADC7_AMUX_THM4_30K_PU (PM8350B_SID << 8 | 0x27)
+#define PM8350B_ADC7_AMUX_THM5_30K_PU (PM8350B_SID << 8 | 0x28)
+#define PM8350B_ADC7_AMUX_THM6_30K_PU (PM8350B_SID << 8 | 0x29)
+#define PM8350B_ADC7_GPIO1_30K_PU (PM8350B_SID << 8 | 0x2a)
+#define PM8350B_ADC7_GPIO2_30K_PU (PM8350B_SID << 8 | 0x2b)
+#define PM8350B_ADC7_GPIO3_30K_PU (PM8350B_SID << 8 | 0x2c)
+#define PM8350B_ADC7_GPIO4_30K_PU (PM8350B_SID << 8 | 0x2d)
+#define PM8350B_ADC7_CC1_ID_30K_PU (PM8350B_SID << 8 | 0x33)
+
+/* 100k pull-up2 */
+#define PM8350B_ADC7_AMUX_THM1_100K_PU (PM8350B_SID << 8 | 0x44)
+#define PM8350B_ADC7_AMUX_THM2_100K_PU (PM8350B_SID << 8 | 0x45)
+#define PM8350B_ADC7_AMUX_THM3_100K_PU (PM8350B_SID << 8 | 0x46)
+#define PM8350B_ADC7_AMUX_THM4_100K_PU (PM8350B_SID << 8 | 0x47)
+#define PM8350B_ADC7_AMUX_THM5_100K_PU (PM8350B_SID << 8 | 0x48)
+#define PM8350B_ADC7_AMUX_THM6_100K_PU (PM8350B_SID << 8 | 0x49)
+#define PM8350B_ADC7_GPIO1_100K_PU (PM8350B_SID << 8 | 0x4a)
+#define PM8350B_ADC7_GPIO2_100K_PU (PM8350B_SID << 8 | 0x4b)
+#define PM8350B_ADC7_GPIO3_100K_PU (PM8350B_SID << 8 | 0x4c)
+#define PM8350B_ADC7_GPIO4_100K_PU (PM8350B_SID << 8 | 0x4d)
+#define PM8350B_ADC7_CC1_ID_100K_PU (PM8350B_SID << 8 | 0x53)
+
+/* 400k pull-up3 */
+#define PM8350B_ADC7_AMUX_THM1_400K_PU (PM8350B_SID << 8 | 0x64)
+#define PM8350B_ADC7_AMUX_THM2_400K_PU (PM8350B_SID << 8 | 0x65)
+#define PM8350B_ADC7_AMUX_THM3_400K_PU (PM8350B_SID << 8 | 0x66)
+#define PM8350B_ADC7_AMUX_THM4_400K_PU (PM8350B_SID << 8 | 0x67)
+#define PM8350B_ADC7_AMUX_THM5_400K_PU (PM8350B_SID << 8 | 0x68)
+#define PM8350B_ADC7_AMUX_THM6_400K_PU (PM8350B_SID << 8 | 0x69)
+#define PM8350B_ADC7_GPIO1_400K_PU (PM8350B_SID << 8 | 0x6a)
+#define PM8350B_ADC7_GPIO2_400K_PU (PM8350B_SID << 8 | 0x6b)
+#define PM8350B_ADC7_GPIO3_400K_PU (PM8350B_SID << 8 | 0x6c)
+#define PM8350B_ADC7_GPIO4_400K_PU (PM8350B_SID << 8 | 0x6d)
+#define PM8350B_ADC7_CC1_ID_400K_PU (PM8350B_SID << 8 | 0x73)
+
+/* 1/3 Divider */
+#define PM8350B_ADC7_GPIO1_DIV3 (PM8350B_SID << 8 | 0x8a)
+#define PM8350B_ADC7_GPIO2_DIV3 (PM8350B_SID << 8 | 0x8b)
+#define PM8350B_ADC7_GPIO3_DIV3 (PM8350B_SID << 8 | 0x8c)
+#define PM8350B_ADC7_GPIO4_DIV3 (PM8350B_SID << 8 | 0x8d)
+
+#define PM8350B_ADC7_VPH_PWR (PM8350B_SID << 8 | 0x8e)
+#define PM8350B_ADC7_VBAT_SNS (PM8350B_SID << 8 | 0x8f)
+
+#define PM8350B_ADC7_SBUx (PM8350B_SID << 8 | 0x94)
+#define PM8350B_ADC7_VBAT_2S_MID (PM8350B_SID << 8 | 0x96)
+
+#endif /* _DT_BINDINGS_QCOM_SPMI_VADC_PM8350B_H */
diff --git a/include/dt-bindings/iio/qcom,spmi-adc7-pmk8350.h b/include/dt-bindings/iio/qcom,spmi-adc7-pmk8350.h
new file mode 100644
index 000000000000..6c296870e95b
--- /dev/null
+++ b/include/dt-bindings/iio/qcom,spmi-adc7-pmk8350.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_QCOM_SPMI_VADC_PMK8350_H
+#define _DT_BINDINGS_QCOM_SPMI_VADC_PMK8350_H
+
+#ifndef PMK8350_SID
+#define PMK8350_SID 0
+#endif
+
+/* ADC channels for PMK8350_ADC for PMIC7 */
+#define PMK8350_ADC7_REF_GND (PMK8350_SID << 8 | 0x0)
+#define PMK8350_ADC7_1P25VREF (PMK8350_SID << 8 | 0x01)
+#define PMK8350_ADC7_VREF_VADC (PMK8350_SID << 8 | 0x02)
+#define PMK8350_ADC7_DIE_TEMP (PMK8350_SID << 8 | 0x03)
+
+#define PMK8350_ADC7_AMUX_THM1 (PMK8350_SID << 8 | 0x04)
+#define PMK8350_ADC7_AMUX_THM2 (PMK8350_SID << 8 | 0x05)
+#define PMK8350_ADC7_AMUX_THM3 (PMK8350_SID << 8 | 0x06)
+#define PMK8350_ADC7_AMUX_THM4 (PMK8350_SID << 8 | 0x07)
+#define PMK8350_ADC7_AMUX_THM5 (PMK8350_SID << 8 | 0x08)
+
+/* 30k pull-up1 */
+#define PMK8350_ADC7_AMUX_THM1_30K_PU (PMK8350_SID << 8 | 0x24)
+#define PMK8350_ADC7_AMUX_THM2_30K_PU (PMK8350_SID << 8 | 0x25)
+#define PMK8350_ADC7_AMUX_THM3_30K_PU (PMK8350_SID << 8 | 0x26)
+#define PMK8350_ADC7_AMUX_THM4_30K_PU (PMK8350_SID << 8 | 0x27)
+#define PMK8350_ADC7_AMUX_THM5_30K_PU (PMK8350_SID << 8 | 0x28)
+
+/* 100k pull-up2 */
+#define PMK8350_ADC7_AMUX_THM1_100K_PU (PMK8350_SID << 8 | 0x44)
+#define PMK8350_ADC7_AMUX_THM2_100K_PU (PMK8350_SID << 8 | 0x45)
+#define PMK8350_ADC7_AMUX_THM3_100K_PU (PMK8350_SID << 8 | 0x46)
+#define PMK8350_ADC7_AMUX_THM4_100K_PU (PMK8350_SID << 8 | 0x47)
+#define PMK8350_ADC7_AMUX_THM5_100K_PU (PMK8350_SID << 8 | 0x48)
+
+/* 400k pull-up3 */
+#define PMK8350_ADC7_AMUX_THM1_400K_PU (PMK8350_SID << 8 | 0x64)
+#define PMK8350_ADC7_AMUX_THM2_400K_PU (PMK8350_SID << 8 | 0x65)
+#define PMK8350_ADC7_AMUX_THM3_400K_PU (PMK8350_SID << 8 | 0x66)
+#define PMK8350_ADC7_AMUX_THM4_400K_PU (PMK8350_SID << 8 | 0x67)
+#define PMK8350_ADC7_AMUX_THM5_400K_PU (PMK8350_SID << 8 | 0x68)
+
+#endif /* _DT_BINDINGS_QCOM_SPMI_VADC_PMK8350_H */
diff --git a/include/dt-bindings/iio/qcom,spmi-adc7-pmr735a.h b/include/dt-bindings/iio/qcom,spmi-adc7-pmr735a.h
new file mode 100644
index 000000000000..d6df1b19e5ff
--- /dev/null
+++ b/include/dt-bindings/iio/qcom,spmi-adc7-pmr735a.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_QCOM_SPMI_VADC_PMR735A_H
+#define _DT_BINDINGS_QCOM_SPMI_VADC_PMR735A_H
+
+#ifndef PMR735A_SID
+#define PMR735A_SID 4
+#endif
+
+/* ADC channels for PMR735A_ADC for PMIC7 */
+#define PMR735A_ADC7_REF_GND (PMR735A_SID << 8 | 0x0)
+#define PMR735A_ADC7_1P25VREF (PMR735A_SID << 8 | 0x01)
+#define PMR735A_ADC7_VREF_VADC (PMR735A_SID << 8 | 0x02)
+#define PMR735A_ADC7_DIE_TEMP (PMR735A_SID << 8 | 0x03)
+
+#define PMR735A_ADC7_GPIO1 (PMR735A_SID << 8 | 0x0a)
+#define PMR735A_ADC7_GPIO2 (PMR735A_SID << 8 | 0x0b)
+#define PMR735A_ADC7_GPIO3 (PMR735A_SID << 8 | 0x0c)
+
+/* 100k pull-up2 */
+#define PMR735A_ADC7_GPIO1_100K_PU (PMR735A_SID << 8 | 0x4a)
+#define PMR735A_ADC7_GPIO2_100K_PU (PMR735A_SID << 8 | 0x4b)
+#define PMR735A_ADC7_GPIO3_100K_PU (PMR735A_SID << 8 | 0x4c)
+
+#endif /* _DT_BINDINGS_QCOM_SPMI_VADC_PMR735A_H */
diff --git a/include/dt-bindings/iio/qcom,spmi-adc7-pmr735b.h b/include/dt-bindings/iio/qcom,spmi-adc7-pmr735b.h
new file mode 100644
index 000000000000..8da0e7dab315
--- /dev/null
+++ b/include/dt-bindings/iio/qcom,spmi-adc7-pmr735b.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_QCOM_SPMI_VADC_PMR735B_H
+#define _DT_BINDINGS_QCOM_SPMI_VADC_PMR735B_H
+
+#ifndef PMR735B_SID
+#define PMR735B_SID 5
+#endif
+
+/* ADC channels for PMR735B_ADC for PMIC7 */
+#define PMR735B_ADC7_REF_GND (PMR735B_SID << 8 | 0x0)
+#define PMR735B_ADC7_1P25VREF (PMR735B_SID << 8 | 0x01)
+#define PMR735B_ADC7_VREF_VADC (PMR735B_SID << 8 | 0x02)
+#define PMR735B_ADC7_DIE_TEMP (PMR735B_SID << 8 | 0x03)
+
+#define PMR735B_ADC7_GPIO1 (PMR735B_SID << 8 | 0x0a)
+#define PMR735B_ADC7_GPIO2 (PMR735B_SID << 8 | 0x0b)
+#define PMR735B_ADC7_GPIO3 (PMR735B_SID << 8 | 0x0c)
+
+/* 100k pull-up2 */
+#define PMR735B_ADC7_GPIO1_100K_PU (PMR735B_SID << 8 | 0x4a)
+#define PMR735B_ADC7_GPIO2_100K_PU (PMR735B_SID << 8 | 0x4b)
+#define PMR735B_ADC7_GPIO3_100K_PU (PMR735B_SID << 8 | 0x4c)
+
+#endif /* _DT_BINDINGS_QCOM_SPMI_VADC_PMR735B_H */
diff --git a/include/dt-bindings/iio/qcom,spmi-vadc.h b/include/dt-bindings/iio/qcom,spmi-vadc.h
index 61d556db1542..08adfe25964c 100644
--- a/include/dt-bindings/iio/qcom,spmi-vadc.h
+++ b/include/dt-bindings/iio/qcom,spmi-vadc.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Copyright (c) 2012-2014,2018 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2014,2018,2020 The Linux Foundation. All rights reserved.
*/
#ifndef _DT_BINDINGS_QCOM_SPMI_VADC_H
@@ -221,4 +221,80 @@
#define ADC5_MAX_CHANNEL 0xc0
+/* ADC channels for ADC for PMIC7 */
+
+#define ADC7_REF_GND 0x00
+#define ADC7_1P25VREF 0x01
+#define ADC7_VREF_VADC 0x02
+#define ADC7_DIE_TEMP 0x03
+
+#define ADC7_AMUX_THM1 0x04
+#define ADC7_AMUX_THM2 0x05
+#define ADC7_AMUX_THM3 0x06
+#define ADC7_AMUX_THM4 0x07
+#define ADC7_AMUX_THM5 0x08
+#define ADC7_AMUX_THM6 0x09
+#define ADC7_GPIO1 0x0a
+#define ADC7_GPIO2 0x0b
+#define ADC7_GPIO3 0x0c
+#define ADC7_GPIO4 0x0d
+
+#define ADC7_CHG_TEMP 0x10
+#define ADC7_USB_IN_V_16 0x11
+#define ADC7_VDC_16 0x12
+#define ADC7_CC1_ID 0x13
+#define ADC7_VREF_BAT_THERM 0x15
+#define ADC7_IIN_FB 0x17
+
+/* 30k pull-up1 */
+#define ADC7_AMUX_THM1_30K_PU 0x24
+#define ADC7_AMUX_THM2_30K_PU 0x25
+#define ADC7_AMUX_THM3_30K_PU 0x26
+#define ADC7_AMUX_THM4_30K_PU 0x27
+#define ADC7_AMUX_THM5_30K_PU 0x28
+#define ADC7_AMUX_THM6_30K_PU 0x29
+#define ADC7_GPIO1_30K_PU 0x2a
+#define ADC7_GPIO2_30K_PU 0x2b
+#define ADC7_GPIO3_30K_PU 0x2c
+#define ADC7_GPIO4_30K_PU 0x2d
+#define ADC7_CC1_ID_30K_PU 0x33
+
+/* 100k pull-up2 */
+#define ADC7_AMUX_THM1_100K_PU 0x44
+#define ADC7_AMUX_THM2_100K_PU 0x45
+#define ADC7_AMUX_THM3_100K_PU 0x46
+#define ADC7_AMUX_THM4_100K_PU 0x47
+#define ADC7_AMUX_THM5_100K_PU 0x48
+#define ADC7_AMUX_THM6_100K_PU 0x49
+#define ADC7_GPIO1_100K_PU 0x4a
+#define ADC7_GPIO2_100K_PU 0x4b
+#define ADC7_GPIO3_100K_PU 0x4c
+#define ADC7_GPIO4_100K_PU 0x4d
+#define ADC7_CC1_ID_100K_PU 0x53
+
+/* 400k pull-up3 */
+#define ADC7_AMUX_THM1_400K_PU 0x64
+#define ADC7_AMUX_THM2_400K_PU 0x65
+#define ADC7_AMUX_THM3_400K_PU 0x66
+#define ADC7_AMUX_THM4_400K_PU 0x67
+#define ADC7_AMUX_THM5_400K_PU 0x68
+#define ADC7_AMUX_THM6_400K_PU 0x69
+#define ADC7_GPIO1_400K_PU 0x6a
+#define ADC7_GPIO2_400K_PU 0x6b
+#define ADC7_GPIO3_400K_PU 0x6c
+#define ADC7_GPIO4_400K_PU 0x6d
+#define ADC7_CC1_ID_400K_PU 0x73
+
+/* 1/3 Divider */
+#define ADC7_GPIO1_DIV3 0x8a
+#define ADC7_GPIO2_DIV3 0x8b
+#define ADC7_GPIO3_DIV3 0x8c
+#define ADC7_GPIO4_DIV3 0x8d
+
+#define ADC7_VPH_PWR 0x8e
+#define ADC7_VBAT_SNS 0x8f
+
+#define ADC7_SBUx 0x94
+#define ADC7_VBAT_2S_MID 0x96
+
#endif /* _DT_BINDINGS_QCOM_SPMI_VADC_H */
diff --git a/include/dt-bindings/leds/common.h b/include/dt-bindings/leds/common.h
index 0ce7dfc00dcb..52b619d44ba2 100644
--- a/include/dt-bindings/leds/common.h
+++ b/include/dt-bindings/leds/common.h
@@ -30,7 +30,10 @@
#define LED_COLOR_ID_VIOLET 5
#define LED_COLOR_ID_YELLOW 6
#define LED_COLOR_ID_IR 7
-#define LED_COLOR_ID_MAX 8
+#define LED_COLOR_ID_MULTI 8 /* For multicolor LEDs */
+#define LED_COLOR_ID_RGB 9 /* For multicolor LEDs that can do arbitrary color,
+ so this would include RGBW and similar */
+#define LED_COLOR_ID_MAX 10
/* Standard LED functions */
/* Keyboard LEDs, usually it would be input4::capslock etc. */
diff --git a/include/dt-bindings/memory/mt6779-larb-port.h b/include/dt-bindings/memory/mt6779-larb-port.h
new file mode 100644
index 000000000000..2ad0899fbf2f
--- /dev/null
+++ b/include/dt-bindings/memory/mt6779-larb-port.h
@@ -0,0 +1,206 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 MediaTek Inc.
+ * Author: Chao Hao <chao.hao@mediatek.com>
+ */
+
+#ifndef _DTS_IOMMU_PORT_MT6779_H_
+#define _DTS_IOMMU_PORT_MT6779_H_
+
+#define MTK_M4U_ID(larb, port) (((larb) << 5) | (port))
+
+#define M4U_LARB0_ID 0
+#define M4U_LARB1_ID 1
+#define M4U_LARB2_ID 2
+#define M4U_LARB3_ID 3
+#define M4U_LARB4_ID 4
+#define M4U_LARB5_ID 5
+#define M4U_LARB6_ID 6
+#define M4U_LARB7_ID 7
+#define M4U_LARB8_ID 8
+#define M4U_LARB9_ID 9
+#define M4U_LARB10_ID 10
+#define M4U_LARB11_ID 11
+
+/* larb0 */
+#define M4U_PORT_DISP_POSTMASK0 MTK_M4U_ID(M4U_LARB0_ID, 0)
+#define M4U_PORT_DISP_OVL0_HDR MTK_M4U_ID(M4U_LARB0_ID, 1)
+#define M4U_PORT_DISP_OVL1_HDR MTK_M4U_ID(M4U_LARB0_ID, 2)
+#define M4U_PORT_DISP_OVL0 MTK_M4U_ID(M4U_LARB0_ID, 3)
+#define M4U_PORT_DISP_OVL1 MTK_M4U_ID(M4U_LARB0_ID, 4)
+#define M4U_PORT_DISP_PVRIC0 MTK_M4U_ID(M4U_LARB0_ID, 5)
+#define M4U_PORT_DISP_RDMA0 MTK_M4U_ID(M4U_LARB0_ID, 6)
+#define M4U_PORT_DISP_WDMA0 MTK_M4U_ID(M4U_LARB0_ID, 7)
+#define M4U_PORT_DISP_FAKE0 MTK_M4U_ID(M4U_LARB0_ID, 8)
+
+/* larb1 */
+#define M4U_PORT_DISP_OVL0_2L_HDR MTK_M4U_ID(M4U_LARB1_ID, 0)
+#define M4U_PORT_DISP_OVL1_2L_HDR MTK_M4U_ID(M4U_LARB1_ID, 1)
+#define M4U_PORT_DISP_OVL0_2L MTK_M4U_ID(M4U_LARB1_ID, 2)
+#define M4U_PORT_DISP_OVL1_2L MTK_M4U_ID(M4U_LARB1_ID, 3)
+#define M4U_PORT_DISP_RDMA1 MTK_M4U_ID(M4U_LARB1_ID, 4)
+#define M4U_PORT_MDP_PVRIC0 MTK_M4U_ID(M4U_LARB1_ID, 5)
+#define M4U_PORT_MDP_PVRIC1 MTK_M4U_ID(M4U_LARB1_ID, 6)
+#define M4U_PORT_MDP_RDMA0 MTK_M4U_ID(M4U_LARB1_ID, 7)
+#define M4U_PORT_MDP_RDMA1 MTK_M4U_ID(M4U_LARB1_ID, 8)
+#define M4U_PORT_MDP_WROT0_R MTK_M4U_ID(M4U_LARB1_ID, 9)
+#define M4U_PORT_MDP_WROT0_W MTK_M4U_ID(M4U_LARB1_ID, 10)
+#define M4U_PORT_MDP_WROT1_R MTK_M4U_ID(M4U_LARB1_ID, 11)
+#define M4U_PORT_MDP_WROT1_W MTK_M4U_ID(M4U_LARB1_ID, 12)
+#define M4U_PORT_DISP_FAKE1 MTK_M4U_ID(M4U_LARB1_ID, 13)
+
+/* larb2-VDEC */
+#define M4U_PORT_HW_VDEC_MC_EXT MTK_M4U_ID(M4U_LARB2_ID, 0)
+#define M4U_PORT_HW_VDEC_UFO_EXT MTK_M4U_ID(M4U_LARB2_ID, 1)
+#define M4U_PORT_HW_VDEC_PP_EXT MTK_M4U_ID(M4U_LARB2_ID, 2)
+#define M4U_PORT_HW_VDEC_PRED_RD_EXT MTK_M4U_ID(M4U_LARB2_ID, 3)
+#define M4U_PORT_HW_VDEC_PRED_WR_EXT MTK_M4U_ID(M4U_LARB2_ID, 4)
+#define M4U_PORT_HW_VDEC_PPWRAP_EXT MTK_M4U_ID(M4U_LARB2_ID, 5)
+#define M4U_PORT_HW_VDEC_TILE_EXT MTK_M4U_ID(M4U_LARB2_ID, 6)
+#define M4U_PORT_HW_VDEC_VLD_EXT MTK_M4U_ID(M4U_LARB2_ID, 7)
+#define M4U_PORT_HW_VDEC_VLD2_EXT MTK_M4U_ID(M4U_LARB2_ID, 8)
+#define M4U_PORT_HW_VDEC_AVC_MV_EXT MTK_M4U_ID(M4U_LARB2_ID, 9)
+#define M4U_PORT_HW_VDEC_UFO_ENC_EXT MTK_M4U_ID(M4U_LARB2_ID, 10)
+#define M4U_PORT_HW_VDEC_RG_CTRL_DMA_EXT MTK_M4U_ID(M4U_LARB2_ID, 11)
+
+/* larb3-VENC */
+#define M4U_PORT_VENC_RCPU MTK_M4U_ID(M4U_LARB3_ID, 0)
+#define M4U_PORT_VENC_REC MTK_M4U_ID(M4U_LARB3_ID, 1)
+#define M4U_PORT_VENC_BSDMA MTK_M4U_ID(M4U_LARB3_ID, 2)
+#define M4U_PORT_VENC_SV_COMV MTK_M4U_ID(M4U_LARB3_ID, 3)
+#define M4U_PORT_VENC_RD_COMV MTK_M4U_ID(M4U_LARB3_ID, 4)
+#define M4U_PORT_VENC_NBM_RDMA MTK_M4U_ID(M4U_LARB3_ID, 5)
+#define M4U_PORT_VENC_NBM_RDMA_LITE MTK_M4U_ID(M4U_LARB3_ID, 6)
+#define M4U_PORT_JPGENC_Y_RDMA MTK_M4U_ID(M4U_LARB3_ID, 7)
+#define M4U_PORT_JPGENC_C_RDMA MTK_M4U_ID(M4U_LARB3_ID, 8)
+#define M4U_PORT_JPGENC_Q_TABLE MTK_M4U_ID(M4U_LARB3_ID, 9)
+#define M4U_PORT_JPGENC_BSDMA MTK_M4U_ID(M4U_LARB3_ID, 10)
+#define M4U_PORT_JPGDEC_WDMA MTK_M4U_ID(M4U_LARB3_ID, 11)
+#define M4U_PORT_JPGDEC_BSDMA MTK_M4U_ID(M4U_LARB3_ID, 12)
+#define M4U_PORT_VENC_NBM_WDMA MTK_M4U_ID(M4U_LARB3_ID, 13)
+#define M4U_PORT_VENC_NBM_WDMA_LITE MTK_M4U_ID(M4U_LARB3_ID, 14)
+#define M4U_PORT_VENC_CUR_LUMA MTK_M4U_ID(M4U_LARB3_ID, 15)
+#define M4U_PORT_VENC_CUR_CHROMA MTK_M4U_ID(M4U_LARB3_ID, 16)
+#define M4U_PORT_VENC_REF_LUMA MTK_M4U_ID(M4U_LARB3_ID, 17)
+#define M4U_PORT_VENC_REF_CHROMA MTK_M4U_ID(M4U_LARB3_ID, 18)
+
+/* larb4-dummy */
+
+/* larb5-IMG */
+#define M4U_PORT_IMGI_D1 MTK_M4U_ID(M4U_LARB5_ID, 0)
+#define M4U_PORT_IMGBI_D1 MTK_M4U_ID(M4U_LARB5_ID, 1)
+#define M4U_PORT_DMGI_D1 MTK_M4U_ID(M4U_LARB5_ID, 2)
+#define M4U_PORT_DEPI_D1 MTK_M4U_ID(M4U_LARB5_ID, 3)
+#define M4U_PORT_LCEI_D1 MTK_M4U_ID(M4U_LARB5_ID, 4)
+#define M4U_PORT_SMTI_D1 MTK_M4U_ID(M4U_LARB5_ID, 5)
+#define M4U_PORT_SMTO_D2 MTK_M4U_ID(M4U_LARB5_ID, 6)
+#define M4U_PORT_SMTO_D1 MTK_M4U_ID(M4U_LARB5_ID, 7)
+#define M4U_PORT_CRZO_D1 MTK_M4U_ID(M4U_LARB5_ID, 8)
+#define M4U_PORT_IMG3O_D1 MTK_M4U_ID(M4U_LARB5_ID, 9)
+#define M4U_PORT_VIPI_D1 MTK_M4U_ID(M4U_LARB5_ID, 10)
+#define M4U_PORT_WPE_RDMA1 MTK_M4U_ID(M4U_LARB5_ID, 11)
+#define M4U_PORT_WPE_RDMA0 MTK_M4U_ID(M4U_LARB5_ID, 12)
+#define M4U_PORT_WPE_WDMA MTK_M4U_ID(M4U_LARB5_ID, 13)
+#define M4U_PORT_TIMGO_D1 MTK_M4U_ID(M4U_LARB5_ID, 14)
+#define M4U_PORT_MFB_RDMA0 MTK_M4U_ID(M4U_LARB5_ID, 15)
+#define M4U_PORT_MFB_RDMA1 MTK_M4U_ID(M4U_LARB5_ID, 16)
+#define M4U_PORT_MFB_RDMA2 MTK_M4U_ID(M4U_LARB5_ID, 17)
+#define M4U_PORT_MFB_RDMA3 MTK_M4U_ID(M4U_LARB5_ID, 18)
+#define M4U_PORT_MFB_WDMA MTK_M4U_ID(M4U_LARB5_ID, 19)
+#define M4U_PORT_RESERVE1 MTK_M4U_ID(M4U_LARB5_ID, 20)
+#define M4U_PORT_RESERVE2 MTK_M4U_ID(M4U_LARB5_ID, 21)
+#define M4U_PORT_RESERVE3 MTK_M4U_ID(M4U_LARB5_ID, 22)
+#define M4U_PORT_RESERVE4 MTK_M4U_ID(M4U_LARB5_ID, 23)
+#define M4U_PORT_RESERVE5 MTK_M4U_ID(M4U_LARB5_ID, 24)
+#define M4U_PORT_RESERVE6 MTK_M4U_ID(M4U_LARB5_ID, 25)
+
+/* larb6-IMG-VPU */
+#define M4U_PORT_IMG_IPUO MTK_M4U_ID(M4U_LARB6_ID, 0)
+#define M4U_PORT_IMG_IPU3O MTK_M4U_ID(M4U_LARB6_ID, 1)
+#define M4U_PORT_IMG_IPUI MTK_M4U_ID(M4U_LARB6_ID, 2)
+
+/* larb7-DVS */
+#define M4U_PORT_DVS_RDMA MTK_M4U_ID(M4U_LARB7_ID, 0)
+#define M4U_PORT_DVS_WDMA MTK_M4U_ID(M4U_LARB7_ID, 1)
+#define M4U_PORT_DVP_RDMA MTK_M4U_ID(M4U_LARB7_ID, 2)
+#define M4U_PORT_DVP_WDMA MTK_M4U_ID(M4U_LARB7_ID, 3)
+
+/* larb8-IPESYS */
+#define M4U_PORT_FDVT_RDA MTK_M4U_ID(M4U_LARB8_ID, 0)
+#define M4U_PORT_FDVT_RDB MTK_M4U_ID(M4U_LARB8_ID, 1)
+#define M4U_PORT_FDVT_WRA MTK_M4U_ID(M4U_LARB8_ID, 2)
+#define M4U_PORT_FDVT_WRB MTK_M4U_ID(M4U_LARB8_ID, 3)
+#define M4U_PORT_FE_RD0 MTK_M4U_ID(M4U_LARB8_ID, 4)
+#define M4U_PORT_FE_RD1 MTK_M4U_ID(M4U_LARB8_ID, 5)
+#define M4U_PORT_FE_WR0 MTK_M4U_ID(M4U_LARB8_ID, 6)
+#define M4U_PORT_FE_WR1 MTK_M4U_ID(M4U_LARB8_ID, 7)
+#define M4U_PORT_RSC_RDMA0 MTK_M4U_ID(M4U_LARB8_ID, 8)
+#define M4U_PORT_RSC_WDMA MTK_M4U_ID(M4U_LARB8_ID, 9)
+
+/* larb9-CAM */
+#define M4U_PORT_CAM_IMGO_R1_C MTK_M4U_ID(M4U_LARB9_ID, 0)
+#define M4U_PORT_CAM_RRZO_R1_C MTK_M4U_ID(M4U_LARB9_ID, 1)
+#define M4U_PORT_CAM_LSCI_R1_C MTK_M4U_ID(M4U_LARB9_ID, 2)
+#define M4U_PORT_CAM_BPCI_R1_C MTK_M4U_ID(M4U_LARB9_ID, 3)
+#define M4U_PORT_CAM_YUVO_R1_C MTK_M4U_ID(M4U_LARB9_ID, 4)
+#define M4U_PORT_CAM_UFDI_R2_C MTK_M4U_ID(M4U_LARB9_ID, 5)
+#define M4U_PORT_CAM_RAWI_R2_C MTK_M4U_ID(M4U_LARB9_ID, 6)
+#define M4U_PORT_CAM_RAWI_R5_C MTK_M4U_ID(M4U_LARB9_ID, 7)
+#define M4U_PORT_CAM_CAMSV_1 MTK_M4U_ID(M4U_LARB9_ID, 8)
+#define M4U_PORT_CAM_CAMSV_2 MTK_M4U_ID(M4U_LARB9_ID, 9)
+#define M4U_PORT_CAM_CAMSV_3 MTK_M4U_ID(M4U_LARB9_ID, 10)
+#define M4U_PORT_CAM_CAMSV_4 MTK_M4U_ID(M4U_LARB9_ID, 11)
+#define M4U_PORT_CAM_CAMSV_5 MTK_M4U_ID(M4U_LARB9_ID, 12)
+#define M4U_PORT_CAM_CAMSV_6 MTK_M4U_ID(M4U_LARB9_ID, 13)
+#define M4U_PORT_CAM_AAO_R1_C MTK_M4U_ID(M4U_LARB9_ID, 14)
+#define M4U_PORT_CAM_AFO_R1_C MTK_M4U_ID(M4U_LARB9_ID, 15)
+#define M4U_PORT_CAM_FLKO_R1_C MTK_M4U_ID(M4U_LARB9_ID, 16)
+#define M4U_PORT_CAM_LCESO_R1_C MTK_M4U_ID(M4U_LARB9_ID, 17)
+#define M4U_PORT_CAM_CRZO_R1_C MTK_M4U_ID(M4U_LARB9_ID, 18)
+#define M4U_PORT_CAM_LTMSO_R1_C MTK_M4U_ID(M4U_LARB9_ID, 19)
+#define M4U_PORT_CAM_RSSO_R1_C MTK_M4U_ID(M4U_LARB9_ID, 20)
+#define M4U_PORT_CAM_CCUI MTK_M4U_ID(M4U_LARB9_ID, 21)
+#define M4U_PORT_CAM_CCUO MTK_M4U_ID(M4U_LARB9_ID, 22)
+#define M4U_PORT_CAM_FAKE MTK_M4U_ID(M4U_LARB9_ID, 23)
+
+/* larb10-CAM_A */
+#define M4U_PORT_CAM_IMGO_R1_A MTK_M4U_ID(M4U_LARB10_ID, 0)
+#define M4U_PORT_CAM_RRZO_R1_A MTK_M4U_ID(M4U_LARB10_ID, 1)
+#define M4U_PORT_CAM_LSCI_R1_A MTK_M4U_ID(M4U_LARB10_ID, 2)
+#define M4U_PORT_CAM_BPCI_R1_A MTK_M4U_ID(M4U_LARB10_ID, 3)
+#define M4U_PORT_CAM_YUVO_R1_A MTK_M4U_ID(M4U_LARB10_ID, 4)
+#define M4U_PORT_CAM_UFDI_R2_A MTK_M4U_ID(M4U_LARB10_ID, 5)
+#define M4U_PORT_CAM_RAWI_R2_A MTK_M4U_ID(M4U_LARB10_ID, 6)
+#define M4U_PORT_CAM_RAWI_R5_A MTK_M4U_ID(M4U_LARB10_ID, 7)
+#define M4U_PORT_CAM_IMGO_R1_B MTK_M4U_ID(M4U_LARB10_ID, 8)
+#define M4U_PORT_CAM_RRZO_R1_B MTK_M4U_ID(M4U_LARB10_ID, 9)
+#define M4U_PORT_CAM_LSCI_R1_B MTK_M4U_ID(M4U_LARB10_ID, 10)
+#define M4U_PORT_CAM_BPCI_R1_B MTK_M4U_ID(M4U_LARB10_ID, 11)
+#define M4U_PORT_CAM_YUVO_R1_B MTK_M4U_ID(M4U_LARB10_ID, 12)
+#define M4U_PORT_CAM_UFDI_R2_B MTK_M4U_ID(M4U_LARB10_ID, 13)
+#define M4U_PORT_CAM_RAWI_R2_B MTK_M4U_ID(M4U_LARB10_ID, 14)
+#define M4U_PORT_CAM_RAWI_R5_B MTK_M4U_ID(M4U_LARB10_ID, 15)
+#define M4U_PORT_CAM_CAMSV_0 MTK_M4U_ID(M4U_LARB10_ID, 16)
+#define M4U_PORT_CAM_AAO_R1_A MTK_M4U_ID(M4U_LARB10_ID, 17)
+#define M4U_PORT_CAM_AFO_R1_A MTK_M4U_ID(M4U_LARB10_ID, 18)
+#define M4U_PORT_CAM_FLKO_R1_A MTK_M4U_ID(M4U_LARB10_ID, 19)
+#define M4U_PORT_CAM_LCESO_R1_A MTK_M4U_ID(M4U_LARB10_ID, 20)
+#define M4U_PORT_CAM_CRZO_R1_A MTK_M4U_ID(M4U_LARB10_ID, 21)
+#define M4U_PORT_CAM_AAO_R1_B MTK_M4U_ID(M4U_LARB10_ID, 22)
+#define M4U_PORT_CAM_AFO_R1_B MTK_M4U_ID(M4U_LARB10_ID, 23)
+#define M4U_PORT_CAM_FLKO_R1_B MTK_M4U_ID(M4U_LARB10_ID, 24)
+#define M4U_PORT_CAM_LCESO_R1_B MTK_M4U_ID(M4U_LARB10_ID, 25)
+#define M4U_PORT_CAM_CRZO_R1_B MTK_M4U_ID(M4U_LARB10_ID, 26)
+#define M4U_PORT_CAM_LTMSO_R1_A MTK_M4U_ID(M4U_LARB10_ID, 27)
+#define M4U_PORT_CAM_RSSO_R1_A MTK_M4U_ID(M4U_LARB10_ID, 28)
+#define M4U_PORT_CAM_LTMSO_R1_B MTK_M4U_ID(M4U_LARB10_ID, 29)
+#define M4U_PORT_CAM_RSSO_R1_B MTK_M4U_ID(M4U_LARB10_ID, 30)
+
+/* larb11-CAM-VPU */
+#define M4U_PORT_CAM_IPUO MTK_M4U_ID(M4U_LARB11_ID, 0)
+#define M4U_PORT_CAM_IPU2O MTK_M4U_ID(M4U_LARB11_ID, 1)
+#define M4U_PORT_CAM_IPU3O MTK_M4U_ID(M4U_LARB11_ID, 2)
+#define M4U_PORT_CAM_IPUI MTK_M4U_ID(M4U_LARB11_ID, 3)
+#define M4U_PORT_CAM_IPU2I MTK_M4U_ID(M4U_LARB11_ID, 4)
+
+#endif
diff --git a/include/dt-bindings/mux/mux-j721e-wiz.h b/include/dt-bindings/mux/mux-j721e-wiz.h
new file mode 100644
index 000000000000..fd1c4ea9fc7f
--- /dev/null
+++ b/include/dt-bindings/mux/mux-j721e-wiz.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This header provides constants for J721E WIZ.
+ */
+
+#ifndef _DT_BINDINGS_J721E_WIZ
+#define _DT_BINDINGS_J721E_WIZ
+
+#define SERDES0_LANE0_QSGMII_LANE1 0x0
+#define SERDES0_LANE0_PCIE0_LANE0 0x1
+#define SERDES0_LANE0_USB3_0_SWAP 0x2
+
+#define SERDES0_LANE1_QSGMII_LANE2 0x0
+#define SERDES0_LANE1_PCIE0_LANE1 0x1
+#define SERDES0_LANE1_USB3_0 0x2
+
+#define SERDES1_LANE0_QSGMII_LANE3 0x0
+#define SERDES1_LANE0_PCIE1_LANE0 0x1
+#define SERDES1_LANE0_USB3_1_SWAP 0x2
+#define SERDES1_LANE0_SGMII_LANE0 0x3
+
+#define SERDES1_LANE1_QSGMII_LANE4 0x0
+#define SERDES1_LANE1_PCIE1_LANE1 0x1
+#define SERDES1_LANE1_USB3_1 0x2
+#define SERDES1_LANE1_SGMII_LANE1 0x3
+
+#define SERDES2_LANE0_PCIE2_LANE0 0x1
+#define SERDES2_LANE0_SGMII_LANE0 0x3
+#define SERDES2_LANE0_USB3_1_SWAP 0x2
+
+#define SERDES2_LANE1_PCIE2_LANE1 0x1
+#define SERDES2_LANE1_USB3_1 0x2
+#define SERDES2_LANE1_SGMII_LANE1 0x3
+
+#define SERDES3_LANE0_PCIE3_LANE0 0x1
+#define SERDES3_LANE0_USB3_0_SWAP 0x2
+
+#define SERDES3_LANE1_PCIE3_LANE1 0x1
+#define SERDES3_LANE1_USB3_0 0x2
+
+#define SERDES4_LANE0_EDP_LANE0 0x0
+#define SERDES4_LANE0_QSGMII_LANE5 0x2
+
+#define SERDES4_LANE1_EDP_LANE1 0x0
+#define SERDES4_LANE1_QSGMII_LANE6 0x2
+
+#define SERDES4_LANE2_EDP_LANE2 0x0
+#define SERDES4_LANE2_QSGMII_LANE7 0x2
+
+#define SERDES4_LANE3_EDP_LANE3 0x0
+#define SERDES4_LANE3_QSGMII_LANE8 0x2
+
+#endif /* _DT_BINDINGS_J721E_WIZ */
diff --git a/include/dt-bindings/mux/mux.h b/include/dt-bindings/mux/mux.h
index 042719218dbf..0b9d654506ef 100644
--- a/include/dt-bindings/mux/mux.h
+++ b/include/dt-bindings/mux/mux.h
@@ -3,7 +3,7 @@
* This header provides constants for most Multiplexer bindings.
*
* Most Multiplexer bindings specify an idle state. In most cases, the
- * the multiplexer can be left as is when idle, and in some cases it can
+ * multiplexer can be left as is when idle, and in some cases it can
* disconnect the input/output and leave the multiplexer in a high
* impedance state.
*/
diff --git a/include/dt-bindings/phy/phy.h b/include/dt-bindings/phy/phy.h
index 3727ef72138b..36e8c241cf48 100644
--- a/include/dt-bindings/phy/phy.h
+++ b/include/dt-bindings/phy/phy.h
@@ -18,5 +18,6 @@
#define PHY_TYPE_UFS 5
#define PHY_TYPE_DP 6
#define PHY_TYPE_XPCS 7
+#define PHY_TYPE_SGMII 8
#endif /* _DT_BINDINGS_PHY */
diff --git a/include/dt-bindings/pinctrl/k3.h b/include/dt-bindings/pinctrl/k3.h
index 499de6216581..b0eea7cc6e23 100644
--- a/include/dt-bindings/pinctrl/k3.h
+++ b/include/dt-bindings/pinctrl/k3.h
@@ -3,7 +3,7 @@
* This header provides constants for pinctrl bindings for TI's K3 SoC
* family.
*
- * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2018 Texas Instruments Incorporated - https://www.ti.com/
*/
#ifndef _DT_BINDINGS_PINCTRL_TI_K3_H
#define _DT_BINDINGS_PINCTRL_TI_K3_H
diff --git a/include/dt-bindings/pinctrl/mt6779-pinfunc.h b/include/dt-bindings/pinctrl/mt6779-pinfunc.h
new file mode 100644
index 000000000000..87fdc4310936
--- /dev/null
+++ b/include/dt-bindings/pinctrl/mt6779-pinfunc.h
@@ -0,0 +1,1242 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 MediaTek Inc.
+ * Author: Andy Teng <andy.teng@mediatek.com>
+ *
+ */
+
+#ifndef __MT6779_PINFUNC_H
+#define __MT6779_PINFUNC_H
+
+#include <dt-bindings/pinctrl/mt65xx.h>
+
+#define PINMUX_GPIO0__FUNC_GPIO0 (MTK_PIN_NO(0) | 0)
+#define PINMUX_GPIO0__FUNC_SPI6_MI (MTK_PIN_NO(0) | 1)
+#define PINMUX_GPIO0__FUNC_I2S5_LRCK (MTK_PIN_NO(0) | 2)
+#define PINMUX_GPIO0__FUNC_TDM_LRCK_2ND (MTK_PIN_NO(0) | 3)
+#define PINMUX_GPIO0__FUNC_PCM1_SYNC (MTK_PIN_NO(0) | 4)
+#define PINMUX_GPIO0__FUNC_SCL_6306 (MTK_PIN_NO(0) | 5)
+#define PINMUX_GPIO0__FUNC_TP_GPIO0_AO (MTK_PIN_NO(0) | 6)
+#define PINMUX_GPIO0__FUNC_PTA_RXD (MTK_PIN_NO(0) | 7)
+
+#define PINMUX_GPIO1__FUNC_GPIO1 (MTK_PIN_NO(1) | 0)
+#define PINMUX_GPIO1__FUNC_SPI6_CSB (MTK_PIN_NO(1) | 1)
+#define PINMUX_GPIO1__FUNC_I2S5_DO (MTK_PIN_NO(1) | 2)
+#define PINMUX_GPIO1__FUNC_TDM_DATA0_2ND (MTK_PIN_NO(1) | 3)
+#define PINMUX_GPIO1__FUNC_PCM1_DO0 (MTK_PIN_NO(1) | 4)
+#define PINMUX_GPIO1__FUNC_SDA_6306 (MTK_PIN_NO(1) | 5)
+#define PINMUX_GPIO1__FUNC_TP_GPIO1_AO (MTK_PIN_NO(1) | 6)
+#define PINMUX_GPIO1__FUNC_PTA_TXD (MTK_PIN_NO(1) | 7)
+
+#define PINMUX_GPIO2__FUNC_GPIO2 (MTK_PIN_NO(2) | 0)
+#define PINMUX_GPIO2__FUNC_SPI6_MO (MTK_PIN_NO(2) | 1)
+#define PINMUX_GPIO2__FUNC_I2S5_BCK (MTK_PIN_NO(2) | 2)
+#define PINMUX_GPIO2__FUNC_TDM_BCK_2ND (MTK_PIN_NO(2) | 3)
+#define PINMUX_GPIO2__FUNC_PCM1_CLK (MTK_PIN_NO(2) | 4)
+#define PINMUX_GPIO2__FUNC_MD_INT1_C2K_UIM0_HOT_PLUG (MTK_PIN_NO(2) | 5)
+#define PINMUX_GPIO2__FUNC_TP_GPIO2_AO (MTK_PIN_NO(2) | 6)
+
+#define PINMUX_GPIO3__FUNC_GPIO3 (MTK_PIN_NO(3) | 0)
+#define PINMUX_GPIO3__FUNC_SPI6_CLK (MTK_PIN_NO(3) | 1)
+#define PINMUX_GPIO3__FUNC_I2S5_MCK (MTK_PIN_NO(3) | 2)
+#define PINMUX_GPIO3__FUNC_TDM_MCK_2ND (MTK_PIN_NO(3) | 3)
+#define PINMUX_GPIO3__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(3) | 4)
+#define PINMUX_GPIO3__FUNC_MD_INT2_C2K_UIM1_HOT_PLUG (MTK_PIN_NO(3) | 5)
+#define PINMUX_GPIO3__FUNC_TP_GPIO3_AO (MTK_PIN_NO(3) | 6)
+
+#define PINMUX_GPIO4__FUNC_GPIO4 (MTK_PIN_NO(4) | 0)
+#define PINMUX_GPIO4__FUNC_SPI7_MI (MTK_PIN_NO(4) | 1)
+#define PINMUX_GPIO4__FUNC_I2S0_MCK (MTK_PIN_NO(4) | 2)
+#define PINMUX_GPIO4__FUNC_TDM_DATA1_2ND (MTK_PIN_NO(4) | 3)
+#define PINMUX_GPIO4__FUNC_PCM1_DO1 (MTK_PIN_NO(4) | 4)
+#define PINMUX_GPIO4__FUNC_DMIC1_CLK (MTK_PIN_NO(4) | 5)
+#define PINMUX_GPIO4__FUNC_TP_GPIO4_AO (MTK_PIN_NO(4) | 6)
+#define PINMUX_GPIO4__FUNC_SCL8 (MTK_PIN_NO(4) | 7)
+
+#define PINMUX_GPIO5__FUNC_GPIO5 (MTK_PIN_NO(5) | 0)
+#define PINMUX_GPIO5__FUNC_SPI7_CSB (MTK_PIN_NO(5) | 1)
+#define PINMUX_GPIO5__FUNC_I2S0_BCK (MTK_PIN_NO(5) | 2)
+#define PINMUX_GPIO5__FUNC_TDM_DATA2_2ND (MTK_PIN_NO(5) | 3)
+#define PINMUX_GPIO5__FUNC_PCM1_DO2 (MTK_PIN_NO(5) | 4)
+#define PINMUX_GPIO5__FUNC_DMIC1_DAT (MTK_PIN_NO(5) | 5)
+#define PINMUX_GPIO5__FUNC_TP_GPIO5_AO (MTK_PIN_NO(5) | 6)
+#define PINMUX_GPIO5__FUNC_SDA8 (MTK_PIN_NO(5) | 7)
+
+#define PINMUX_GPIO6__FUNC_GPIO6 (MTK_PIN_NO(6) | 0)
+#define PINMUX_GPIO6__FUNC_SPI7_MO (MTK_PIN_NO(6) | 1)
+#define PINMUX_GPIO6__FUNC_I2S0_LRCK (MTK_PIN_NO(6) | 2)
+#define PINMUX_GPIO6__FUNC_TDM_DATA3_2ND (MTK_PIN_NO(6) | 3)
+#define PINMUX_GPIO6__FUNC_PCM1_DI (MTK_PIN_NO(6) | 4)
+#define PINMUX_GPIO6__FUNC_DMIC_CLK (MTK_PIN_NO(6) | 5)
+#define PINMUX_GPIO6__FUNC_TP_GPIO6_AO (MTK_PIN_NO(6) | 6)
+#define PINMUX_GPIO6__FUNC_SCL9 (MTK_PIN_NO(6) | 7)
+
+#define PINMUX_GPIO7__FUNC_GPIO7 (MTK_PIN_NO(7) | 0)
+#define PINMUX_GPIO7__FUNC_SPI7_CLK (MTK_PIN_NO(7) | 1)
+#define PINMUX_GPIO7__FUNC_I2S0_DI (MTK_PIN_NO(7) | 2)
+#define PINMUX_GPIO7__FUNC_SRCLKENAI1 (MTK_PIN_NO(7) | 3)
+#define PINMUX_GPIO7__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(7) | 4)
+#define PINMUX_GPIO7__FUNC_DMIC_DAT (MTK_PIN_NO(7) | 5)
+#define PINMUX_GPIO7__FUNC_TP_GPIO7_AO (MTK_PIN_NO(7) | 6)
+#define PINMUX_GPIO7__FUNC_SDA9 (MTK_PIN_NO(7) | 7)
+
+#define PINMUX_GPIO8__FUNC_GPIO8 (MTK_PIN_NO(8) | 0)
+#define PINMUX_GPIO8__FUNC_PWM_0 (MTK_PIN_NO(8) | 1)
+#define PINMUX_GPIO8__FUNC_I2S2_DI2 (MTK_PIN_NO(8) | 2)
+#define PINMUX_GPIO8__FUNC_SRCLKENAI0 (MTK_PIN_NO(8) | 3)
+#define PINMUX_GPIO8__FUNC_URXD1 (MTK_PIN_NO(8) | 4)
+#define PINMUX_GPIO8__FUNC_I2S0_MCK (MTK_PIN_NO(8) | 5)
+#define PINMUX_GPIO8__FUNC_CONN_MCU_DBGACK_N (MTK_PIN_NO(8) | 6)
+#define PINMUX_GPIO8__FUNC_IDDIG (MTK_PIN_NO(8) | 7)
+
+#define PINMUX_GPIO9__FUNC_GPIO9 (MTK_PIN_NO(9) | 0)
+#define PINMUX_GPIO9__FUNC_PWM_3 (MTK_PIN_NO(9) | 1)
+#define PINMUX_GPIO9__FUNC_MD_INT0 (MTK_PIN_NO(9) | 2)
+#define PINMUX_GPIO9__FUNC_SRCLKENAI1 (MTK_PIN_NO(9) | 3)
+#define PINMUX_GPIO9__FUNC_UTXD1 (MTK_PIN_NO(9) | 4)
+#define PINMUX_GPIO9__FUNC_I2S0_BCK (MTK_PIN_NO(9) | 5)
+#define PINMUX_GPIO9__FUNC_CONN_MCU_TRST_B (MTK_PIN_NO(9) | 6)
+#define PINMUX_GPIO9__FUNC_USB_DRVVBUS (MTK_PIN_NO(9) | 7)
+
+#define PINMUX_GPIO10__FUNC_GPIO10 (MTK_PIN_NO(10) | 0)
+#define PINMUX_GPIO10__FUNC_MSDC1_CLK_A (MTK_PIN_NO(10) | 1)
+#define PINMUX_GPIO10__FUNC_TP_URXD1_AO (MTK_PIN_NO(10) | 2)
+#define PINMUX_GPIO10__FUNC_I2S1_LRCK (MTK_PIN_NO(10) | 3)
+#define PINMUX_GPIO10__FUNC_UCTS0 (MTK_PIN_NO(10) | 4)
+#define PINMUX_GPIO10__FUNC_DMIC1_CLK (MTK_PIN_NO(10) | 5)
+#define PINMUX_GPIO10__FUNC_KPCOL2 (MTK_PIN_NO(10) | 6)
+#define PINMUX_GPIO10__FUNC_SCL8 (MTK_PIN_NO(10) | 7)
+
+#define PINMUX_GPIO11__FUNC_GPIO11 (MTK_PIN_NO(11) | 0)
+#define PINMUX_GPIO11__FUNC_MSDC1_CMD_A (MTK_PIN_NO(11) | 1)
+#define PINMUX_GPIO11__FUNC_TP_UTXD1_AO (MTK_PIN_NO(11) | 2)
+#define PINMUX_GPIO11__FUNC_I2S1_DO (MTK_PIN_NO(11) | 3)
+#define PINMUX_GPIO11__FUNC_URTS0 (MTK_PIN_NO(11) | 4)
+#define PINMUX_GPIO11__FUNC_DMIC1_DAT (MTK_PIN_NO(11) | 5)
+#define PINMUX_GPIO11__FUNC_KPROW2 (MTK_PIN_NO(11) | 6)
+#define PINMUX_GPIO11__FUNC_SDA8 (MTK_PIN_NO(11) | 7)
+
+#define PINMUX_GPIO12__FUNC_GPIO12 (MTK_PIN_NO(12) | 0)
+#define PINMUX_GPIO12__FUNC_MSDC1_DAT3_A (MTK_PIN_NO(12) | 1)
+#define PINMUX_GPIO12__FUNC_TP_URXD2_AO (MTK_PIN_NO(12) | 2)
+#define PINMUX_GPIO12__FUNC_I2S1_MCK (MTK_PIN_NO(12) | 3)
+#define PINMUX_GPIO12__FUNC_UCTS1 (MTK_PIN_NO(12) | 4)
+#define PINMUX_GPIO12__FUNC_DMIC_CLK (MTK_PIN_NO(12) | 5)
+#define PINMUX_GPIO12__FUNC_ANT_SEL9 (MTK_PIN_NO(12) | 6)
+#define PINMUX_GPIO12__FUNC_SCL9 (MTK_PIN_NO(12) | 7)
+
+#define PINMUX_GPIO13__FUNC_GPIO13 (MTK_PIN_NO(13) | 0)
+#define PINMUX_GPIO13__FUNC_MSDC1_DAT0_A (MTK_PIN_NO(13) | 1)
+#define PINMUX_GPIO13__FUNC_TP_UTXD2_AO (MTK_PIN_NO(13) | 2)
+#define PINMUX_GPIO13__FUNC_I2S1_BCK (MTK_PIN_NO(13) | 3)
+#define PINMUX_GPIO13__FUNC_URTS1 (MTK_PIN_NO(13) | 4)
+#define PINMUX_GPIO13__FUNC_DMIC_DAT (MTK_PIN_NO(13) | 5)
+#define PINMUX_GPIO13__FUNC_ANT_SEL10 (MTK_PIN_NO(13) | 6)
+#define PINMUX_GPIO13__FUNC_SDA9 (MTK_PIN_NO(13) | 7)
+
+#define PINMUX_GPIO14__FUNC_GPIO14 (MTK_PIN_NO(14) | 0)
+#define PINMUX_GPIO14__FUNC_MSDC1_DAT2_A (MTK_PIN_NO(14) | 1)
+#define PINMUX_GPIO14__FUNC_PWM_3 (MTK_PIN_NO(14) | 2)
+#define PINMUX_GPIO14__FUNC_IDDIG (MTK_PIN_NO(14) | 3)
+#define PINMUX_GPIO14__FUNC_MD_INT0 (MTK_PIN_NO(14) | 4)
+#define PINMUX_GPIO14__FUNC_PTA_RXD (MTK_PIN_NO(14) | 5)
+#define PINMUX_GPIO14__FUNC_ANT_SEL11 (MTK_PIN_NO(14) | 6)
+
+#define PINMUX_GPIO15__FUNC_GPIO15 (MTK_PIN_NO(15) | 0)
+#define PINMUX_GPIO15__FUNC_MSDC1_DAT1_A (MTK_PIN_NO(15) | 1)
+#define PINMUX_GPIO15__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(15) | 2)
+#define PINMUX_GPIO15__FUNC_USB_DRVVBUS (MTK_PIN_NO(15) | 3)
+#define PINMUX_GPIO15__FUNC_MD_INT1_C2K_UIM0_HOT_PLUG (MTK_PIN_NO(15) | 4)
+#define PINMUX_GPIO15__FUNC_PTA_TXD (MTK_PIN_NO(15) | 5)
+#define PINMUX_GPIO15__FUNC_ANT_SEL12 (MTK_PIN_NO(15) | 6)
+
+#define PINMUX_GPIO16__FUNC_GPIO16 (MTK_PIN_NO(16) | 0)
+#define PINMUX_GPIO16__FUNC_SRCLKENAI0 (MTK_PIN_NO(16) | 1)
+#define PINMUX_GPIO16__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(16) | 2)
+#define PINMUX_GPIO16__FUNC_MFG_EJTAG_TRSTN (MTK_PIN_NO(16) | 3)
+#define PINMUX_GPIO16__FUNC_MD_INT2_C2K_UIM1_HOT_PLUG (MTK_PIN_NO(16) | 4)
+#define PINMUX_GPIO16__FUNC_CONN_TCXOENA_REQ (MTK_PIN_NO(16) | 5)
+#define PINMUX_GPIO16__FUNC_PWM_2 (MTK_PIN_NO(16) | 6)
+#define PINMUX_GPIO16__FUNC_JTRSTN_SEL1 (MTK_PIN_NO(16) | 7)
+
+#define PINMUX_GPIO17__FUNC_GPIO17 (MTK_PIN_NO(17) | 0)
+#define PINMUX_GPIO17__FUNC_SPI0_A_MI (MTK_PIN_NO(17) | 1)
+#define PINMUX_GPIO17__FUNC_SCP_SPI0_MI (MTK_PIN_NO(17) | 2)
+#define PINMUX_GPIO17__FUNC_MFG_EJTAG_TDO (MTK_PIN_NO(17) | 3)
+#define PINMUX_GPIO17__FUNC_DPI_HSYNC (MTK_PIN_NO(17) | 4)
+#define PINMUX_GPIO17__FUNC_MFG_DFD_JTAG_TDO (MTK_PIN_NO(17) | 5)
+#define PINMUX_GPIO17__FUNC_DFD_TDO (MTK_PIN_NO(17) | 6)
+#define PINMUX_GPIO17__FUNC_JTDO_SEL1 (MTK_PIN_NO(17) | 7)
+
+#define PINMUX_GPIO18__FUNC_GPIO18 (MTK_PIN_NO(18) | 0)
+#define PINMUX_GPIO18__FUNC_SPI0_A_MO (MTK_PIN_NO(18) | 1)
+#define PINMUX_GPIO18__FUNC_SCP_SPI0_MO (MTK_PIN_NO(18) | 2)
+#define PINMUX_GPIO18__FUNC_MFG_EJTAG_TDI (MTK_PIN_NO(18) | 3)
+#define PINMUX_GPIO18__FUNC_DPI_VSYNC (MTK_PIN_NO(18) | 4)
+#define PINMUX_GPIO18__FUNC_MFG_DFD_JTAG_TDI (MTK_PIN_NO(18) | 5)
+#define PINMUX_GPIO18__FUNC_DFD_TDI (MTK_PIN_NO(18) | 6)
+#define PINMUX_GPIO18__FUNC_JTDI_SEL1 (MTK_PIN_NO(18) | 7)
+
+#define PINMUX_GPIO19__FUNC_GPIO19 (MTK_PIN_NO(19) | 0)
+#define PINMUX_GPIO19__FUNC_SPI0_A_CSB (MTK_PIN_NO(19) | 1)
+#define PINMUX_GPIO19__FUNC_SCP_SPI0_CS (MTK_PIN_NO(19) | 2)
+#define PINMUX_GPIO19__FUNC_MFG_EJTAG_TMS (MTK_PIN_NO(19) | 3)
+#define PINMUX_GPIO19__FUNC_DPI_DE (MTK_PIN_NO(19) | 4)
+#define PINMUX_GPIO19__FUNC_MFG_DFD_JTAG_TMS (MTK_PIN_NO(19) | 5)
+#define PINMUX_GPIO19__FUNC_DFD_TMS (MTK_PIN_NO(19) | 6)
+#define PINMUX_GPIO19__FUNC_JTMS_SEL1 (MTK_PIN_NO(19) | 7)
+
+#define PINMUX_GPIO20__FUNC_GPIO20 (MTK_PIN_NO(20) | 0)
+#define PINMUX_GPIO20__FUNC_SPI0_A_CLK (MTK_PIN_NO(20) | 1)
+#define PINMUX_GPIO20__FUNC_SCP_SPI0_CK (MTK_PIN_NO(20) | 2)
+#define PINMUX_GPIO20__FUNC_MFG_EJTAG_TCK (MTK_PIN_NO(20) | 3)
+#define PINMUX_GPIO20__FUNC_DPI_CK (MTK_PIN_NO(20) | 4)
+#define PINMUX_GPIO20__FUNC_MFG_DFD_JTAG_TCK (MTK_PIN_NO(20) | 5)
+#define PINMUX_GPIO20__FUNC_DFD_TCK_XI (MTK_PIN_NO(20) | 6)
+#define PINMUX_GPIO20__FUNC_JTCK_SEL1 (MTK_PIN_NO(20) | 7)
+
+#define PINMUX_GPIO21__FUNC_GPIO21 (MTK_PIN_NO(21) | 0)
+#define PINMUX_GPIO21__FUNC_PWM_0 (MTK_PIN_NO(21) | 1)
+#define PINMUX_GPIO21__FUNC_CMFLASH0 (MTK_PIN_NO(21) | 2)
+#define PINMUX_GPIO21__FUNC_CMVREF2 (MTK_PIN_NO(21) | 3)
+#define PINMUX_GPIO21__FUNC_CLKM0 (MTK_PIN_NO(21) | 4)
+#define PINMUX_GPIO21__FUNC_ANT_SEL9 (MTK_PIN_NO(21) | 5)
+#define PINMUX_GPIO21__FUNC_CONN_TCXOENA_REQ (MTK_PIN_NO(21) | 6)
+#define PINMUX_GPIO21__FUNC_DBG_MON_A27 (MTK_PIN_NO(21) | 7)
+
+#define PINMUX_GPIO22__FUNC_GPIO22 (MTK_PIN_NO(22) | 0)
+#define PINMUX_GPIO22__FUNC_PWM_1 (MTK_PIN_NO(22) | 1)
+#define PINMUX_GPIO22__FUNC_CMFLASH1 (MTK_PIN_NO(22) | 2)
+#define PINMUX_GPIO22__FUNC_CMVREF3 (MTK_PIN_NO(22) | 3)
+#define PINMUX_GPIO22__FUNC_CLKM1 (MTK_PIN_NO(22) | 4)
+#define PINMUX_GPIO22__FUNC_ANT_SEL10 (MTK_PIN_NO(22) | 5)
+#define PINMUX_GPIO22__FUNC_DBG_MON_A28 (MTK_PIN_NO(22) | 7)
+
+#define PINMUX_GPIO23__FUNC_GPIO23 (MTK_PIN_NO(23) | 0)
+#define PINMUX_GPIO23__FUNC_PWM_2 (MTK_PIN_NO(23) | 1)
+#define PINMUX_GPIO23__FUNC_CMFLASH2 (MTK_PIN_NO(23) | 2)
+#define PINMUX_GPIO23__FUNC_CMVREF0 (MTK_PIN_NO(23) | 3)
+#define PINMUX_GPIO23__FUNC_CLKM2 (MTK_PIN_NO(23) | 4)
+#define PINMUX_GPIO23__FUNC_ANT_SEL11 (MTK_PIN_NO(23) | 5)
+#define PINMUX_GPIO23__FUNC_DBG_MON_A29 (MTK_PIN_NO(23) | 7)
+
+#define PINMUX_GPIO24__FUNC_GPIO24 (MTK_PIN_NO(24) | 0)
+#define PINMUX_GPIO24__FUNC_PWM_0 (MTK_PIN_NO(24) | 1)
+#define PINMUX_GPIO24__FUNC_CMFLASH3 (MTK_PIN_NO(24) | 2)
+#define PINMUX_GPIO24__FUNC_CMVREF1 (MTK_PIN_NO(24) | 3)
+#define PINMUX_GPIO24__FUNC_CLKM3 (MTK_PIN_NO(24) | 4)
+#define PINMUX_GPIO24__FUNC_ANT_SEL12 (MTK_PIN_NO(24) | 5)
+#define PINMUX_GPIO24__FUNC_DBG_MON_A30 (MTK_PIN_NO(24) | 7)
+
+#define PINMUX_GPIO25__FUNC_GPIO25 (MTK_PIN_NO(25) | 0)
+#define PINMUX_GPIO25__FUNC_SRCLKENAI0 (MTK_PIN_NO(25) | 1)
+#define PINMUX_GPIO25__FUNC_UCTS0 (MTK_PIN_NO(25) | 2)
+#define PINMUX_GPIO25__FUNC_SCL8 (MTK_PIN_NO(25) | 3)
+#define PINMUX_GPIO25__FUNC_CMVREF4 (MTK_PIN_NO(25) | 4)
+#define PINMUX_GPIO25__FUNC_I2S0_LRCK (MTK_PIN_NO(25) | 5)
+#define PINMUX_GPIO25__FUNC_CONN_TCXOENA_REQ (MTK_PIN_NO(25) | 6)
+#define PINMUX_GPIO25__FUNC_DBG_MON_A31 (MTK_PIN_NO(25) | 7)
+
+#define PINMUX_GPIO26__FUNC_GPIO26 (MTK_PIN_NO(26) | 0)
+#define PINMUX_GPIO26__FUNC_PWM_0 (MTK_PIN_NO(26) | 1)
+#define PINMUX_GPIO26__FUNC_URTS0 (MTK_PIN_NO(26) | 2)
+#define PINMUX_GPIO26__FUNC_SDA8 (MTK_PIN_NO(26) | 3)
+#define PINMUX_GPIO26__FUNC_CLKM0 (MTK_PIN_NO(26) | 4)
+#define PINMUX_GPIO26__FUNC_I2S0_DI (MTK_PIN_NO(26) | 5)
+#define PINMUX_GPIO26__FUNC_AGPS_SYNC (MTK_PIN_NO(26) | 6)
+#define PINMUX_GPIO26__FUNC_DBG_MON_A32 (MTK_PIN_NO(26) | 7)
+
+#define PINMUX_GPIO27__FUNC_GPIO27 (MTK_PIN_NO(27) | 0)
+#define PINMUX_GPIO27__FUNC_AP_GOOD (MTK_PIN_NO(27) | 1)
+
+#define PINMUX_GPIO28__FUNC_GPIO28 (MTK_PIN_NO(28) | 0)
+#define PINMUX_GPIO28__FUNC_SCL5 (MTK_PIN_NO(28) | 1)
+
+#define PINMUX_GPIO29__FUNC_GPIO29 (MTK_PIN_NO(29) | 0)
+#define PINMUX_GPIO29__FUNC_SDA5 (MTK_PIN_NO(29) | 1)
+
+#define PINMUX_GPIO30__FUNC_GPIO30 (MTK_PIN_NO(30) | 0)
+#define PINMUX_GPIO30__FUNC_I2S1_MCK (MTK_PIN_NO(30) | 1)
+#define PINMUX_GPIO30__FUNC_I2S3_MCK (MTK_PIN_NO(30) | 2)
+#define PINMUX_GPIO30__FUNC_I2S2_MCK (MTK_PIN_NO(30) | 3)
+#define PINMUX_GPIO30__FUNC_DPI_D0 (MTK_PIN_NO(30) | 4)
+#define PINMUX_GPIO30__FUNC_SPI4_MI (MTK_PIN_NO(30) | 5)
+#define PINMUX_GPIO30__FUNC_CONN_MCU_DBGI_N (MTK_PIN_NO(30) | 6)
+
+#define PINMUX_GPIO31__FUNC_GPIO31 (MTK_PIN_NO(31) | 0)
+#define PINMUX_GPIO31__FUNC_I2S1_BCK (MTK_PIN_NO(31) | 1)
+#define PINMUX_GPIO31__FUNC_I2S3_BCK (MTK_PIN_NO(31) | 2)
+#define PINMUX_GPIO31__FUNC_I2S2_BCK (MTK_PIN_NO(31) | 3)
+#define PINMUX_GPIO31__FUNC_DPI_D1 (MTK_PIN_NO(31) | 4)
+#define PINMUX_GPIO31__FUNC_SPI4_CSB (MTK_PIN_NO(31) | 5)
+#define PINMUX_GPIO31__FUNC_CONN_MCU_TDO (MTK_PIN_NO(31) | 6)
+
+#define PINMUX_GPIO32__FUNC_GPIO32 (MTK_PIN_NO(32) | 0)
+#define PINMUX_GPIO32__FUNC_I2S1_LRCK (MTK_PIN_NO(32) | 1)
+#define PINMUX_GPIO32__FUNC_I2S3_LRCK (MTK_PIN_NO(32) | 2)
+#define PINMUX_GPIO32__FUNC_I2S2_LRCK (MTK_PIN_NO(32) | 3)
+#define PINMUX_GPIO32__FUNC_DPI_D2 (MTK_PIN_NO(32) | 4)
+#define PINMUX_GPIO32__FUNC_SPI4_MO (MTK_PIN_NO(32) | 5)
+#define PINMUX_GPIO32__FUNC_CONN_MCU_TDI (MTK_PIN_NO(32) | 6)
+
+#define PINMUX_GPIO33__FUNC_GPIO33 (MTK_PIN_NO(33) | 0)
+#define PINMUX_GPIO33__FUNC_I2S2_DI (MTK_PIN_NO(33) | 1)
+#define PINMUX_GPIO33__FUNC_I2S0_DI (MTK_PIN_NO(33) | 2)
+#define PINMUX_GPIO33__FUNC_I2S5_DO (MTK_PIN_NO(33) | 3)
+#define PINMUX_GPIO33__FUNC_DPI_D3 (MTK_PIN_NO(33) | 4)
+#define PINMUX_GPIO33__FUNC_SPI4_CLK (MTK_PIN_NO(33) | 5)
+#define PINMUX_GPIO33__FUNC_CONN_MCU_TMS (MTK_PIN_NO(33) | 6)
+
+#define PINMUX_GPIO34__FUNC_GPIO34 (MTK_PIN_NO(34) | 0)
+#define PINMUX_GPIO34__FUNC_I2S1_DO (MTK_PIN_NO(34) | 1)
+#define PINMUX_GPIO34__FUNC_I2S3_DO (MTK_PIN_NO(34) | 2)
+#define PINMUX_GPIO34__FUNC_I2S2_DI2 (MTK_PIN_NO(34) | 3)
+#define PINMUX_GPIO34__FUNC_DPI_D4 (MTK_PIN_NO(34) | 4)
+#define PINMUX_GPIO34__FUNC_AGPS_SYNC (MTK_PIN_NO(34) | 5)
+#define PINMUX_GPIO34__FUNC_CONN_MCU_TCK (MTK_PIN_NO(34) | 6)
+
+#define PINMUX_GPIO35__FUNC_GPIO35 (MTK_PIN_NO(35) | 0)
+#define PINMUX_GPIO35__FUNC_TDM_LRCK (MTK_PIN_NO(35) | 1)
+#define PINMUX_GPIO35__FUNC_I2S1_LRCK (MTK_PIN_NO(35) | 2)
+#define PINMUX_GPIO35__FUNC_I2S5_LRCK (MTK_PIN_NO(35) | 3)
+#define PINMUX_GPIO35__FUNC_DPI_D5 (MTK_PIN_NO(35) | 4)
+#define PINMUX_GPIO35__FUNC_SPI5_A_MO (MTK_PIN_NO(35) | 5)
+#define PINMUX_GPIO35__FUNC_IO_JTAG_TDI (MTK_PIN_NO(35) | 6)
+#define PINMUX_GPIO35__FUNC_PWM_2 (MTK_PIN_NO(35) | 7)
+
+#define PINMUX_GPIO36__FUNC_GPIO36 (MTK_PIN_NO(36) | 0)
+#define PINMUX_GPIO36__FUNC_TDM_BCK (MTK_PIN_NO(36) | 1)
+#define PINMUX_GPIO36__FUNC_I2S1_BCK (MTK_PIN_NO(36) | 2)
+#define PINMUX_GPIO36__FUNC_I2S5_BCK (MTK_PIN_NO(36) | 3)
+#define PINMUX_GPIO36__FUNC_DPI_D6 (MTK_PIN_NO(36) | 4)
+#define PINMUX_GPIO36__FUNC_SPI5_A_CSB (MTK_PIN_NO(36) | 5)
+#define PINMUX_GPIO36__FUNC_IO_JTAG_TRSTN (MTK_PIN_NO(36) | 6)
+#define PINMUX_GPIO36__FUNC_SRCLKENAI1 (MTK_PIN_NO(36) | 7)
+
+#define PINMUX_GPIO37__FUNC_GPIO37 (MTK_PIN_NO(37) | 0)
+#define PINMUX_GPIO37__FUNC_TDM_MCK (MTK_PIN_NO(37) | 1)
+#define PINMUX_GPIO37__FUNC_I2S1_MCK (MTK_PIN_NO(37) | 2)
+#define PINMUX_GPIO37__FUNC_I2S5_MCK (MTK_PIN_NO(37) | 3)
+#define PINMUX_GPIO37__FUNC_DPI_D7 (MTK_PIN_NO(37) | 4)
+#define PINMUX_GPIO37__FUNC_SPI5_A_MI (MTK_PIN_NO(37) | 5)
+#define PINMUX_GPIO37__FUNC_IO_JTAG_TCK (MTK_PIN_NO(37) | 6)
+#define PINMUX_GPIO37__FUNC_SRCLKENAI0 (MTK_PIN_NO(37) | 7)
+
+#define PINMUX_GPIO38__FUNC_GPIO38 (MTK_PIN_NO(38) | 0)
+#define PINMUX_GPIO38__FUNC_TDM_DATA0 (MTK_PIN_NO(38) | 1)
+#define PINMUX_GPIO38__FUNC_I2S2_DI (MTK_PIN_NO(38) | 2)
+#define PINMUX_GPIO38__FUNC_I2S5_DO (MTK_PIN_NO(38) | 3)
+#define PINMUX_GPIO38__FUNC_DPI_D8 (MTK_PIN_NO(38) | 4)
+#define PINMUX_GPIO38__FUNC_SPI5_A_CLK (MTK_PIN_NO(38) | 5)
+#define PINMUX_GPIO38__FUNC_IO_JTAG_TDO (MTK_PIN_NO(38) | 6)
+#define PINMUX_GPIO38__FUNC_CONN_TCXOENA_REQ (MTK_PIN_NO(38) | 7)
+
+#define PINMUX_GPIO39__FUNC_GPIO39 (MTK_PIN_NO(39) | 0)
+#define PINMUX_GPIO39__FUNC_TDM_DATA1 (MTK_PIN_NO(39) | 1)
+#define PINMUX_GPIO39__FUNC_I2S1_DO (MTK_PIN_NO(39) | 2)
+#define PINMUX_GPIO39__FUNC_I2S2_DI2 (MTK_PIN_NO(39) | 3)
+#define PINMUX_GPIO39__FUNC_DPI_D9 (MTK_PIN_NO(39) | 4)
+#define PINMUX_GPIO39__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(39) | 5)
+#define PINMUX_GPIO39__FUNC_IO_JTAG_TMS (MTK_PIN_NO(39) | 6)
+#define PINMUX_GPIO39__FUNC_IDDIG (MTK_PIN_NO(39) | 7)
+
+#define PINMUX_GPIO40__FUNC_GPIO40 (MTK_PIN_NO(40) | 0)
+#define PINMUX_GPIO40__FUNC_TDM_DATA2 (MTK_PIN_NO(40) | 1)
+#define PINMUX_GPIO40__FUNC_SCL9 (MTK_PIN_NO(40) | 2)
+#define PINMUX_GPIO40__FUNC_PWM_3 (MTK_PIN_NO(40) | 3)
+#define PINMUX_GPIO40__FUNC_DPI_D10 (MTK_PIN_NO(40) | 4)
+#define PINMUX_GPIO40__FUNC_SRCLKENAI0 (MTK_PIN_NO(40) | 5)
+#define PINMUX_GPIO40__FUNC_DAP_MD32_SWD (MTK_PIN_NO(40) | 6)
+#define PINMUX_GPIO40__FUNC_USB_DRVVBUS (MTK_PIN_NO(40) | 7)
+
+#define PINMUX_GPIO41__FUNC_GPIO41 (MTK_PIN_NO(41) | 0)
+#define PINMUX_GPIO41__FUNC_TDM_DATA3 (MTK_PIN_NO(41) | 1)
+#define PINMUX_GPIO41__FUNC_SDA9 (MTK_PIN_NO(41) | 2)
+#define PINMUX_GPIO41__FUNC_PWM_1 (MTK_PIN_NO(41) | 3)
+#define PINMUX_GPIO41__FUNC_DPI_D11 (MTK_PIN_NO(41) | 4)
+#define PINMUX_GPIO41__FUNC_CLKM1 (MTK_PIN_NO(41) | 5)
+#define PINMUX_GPIO41__FUNC_DAP_MD32_SWCK (MTK_PIN_NO(41) | 6)
+
+#define PINMUX_GPIO42__FUNC_GPIO42 (MTK_PIN_NO(42) | 0)
+#define PINMUX_GPIO42__FUNC_DISP_PWM (MTK_PIN_NO(42) | 1)
+
+#define PINMUX_GPIO43__FUNC_GPIO43 (MTK_PIN_NO(43) | 0)
+#define PINMUX_GPIO43__FUNC_DSI_TE (MTK_PIN_NO(43) | 1)
+
+#define PINMUX_GPIO44__FUNC_GPIO44 (MTK_PIN_NO(44) | 0)
+#define PINMUX_GPIO44__FUNC_LCM_RST (MTK_PIN_NO(44) | 1)
+
+#define PINMUX_GPIO45__FUNC_GPIO45 (MTK_PIN_NO(45) | 0)
+#define PINMUX_GPIO45__FUNC_SCL6 (MTK_PIN_NO(45) | 1)
+#define PINMUX_GPIO45__FUNC_SCP_SCL0 (MTK_PIN_NO(45) | 2)
+#define PINMUX_GPIO45__FUNC_SCP_SCL1 (MTK_PIN_NO(45) | 3)
+#define PINMUX_GPIO45__FUNC_SCL_6306 (MTK_PIN_NO(45) | 4)
+
+#define PINMUX_GPIO46__FUNC_GPIO46 (MTK_PIN_NO(46) | 0)
+#define PINMUX_GPIO46__FUNC_SDA6 (MTK_PIN_NO(46) | 1)
+#define PINMUX_GPIO46__FUNC_SCP_SDA0 (MTK_PIN_NO(46) | 2)
+#define PINMUX_GPIO46__FUNC_SCP_SDA1 (MTK_PIN_NO(46) | 3)
+#define PINMUX_GPIO46__FUNC_SDA_6306 (MTK_PIN_NO(46) | 4)
+
+#define PINMUX_GPIO47__FUNC_GPIO47 (MTK_PIN_NO(47) | 0)
+#define PINMUX_GPIO47__FUNC_SPI1_A_MI (MTK_PIN_NO(47) | 1)
+#define PINMUX_GPIO47__FUNC_SCP_SPI1_A_MI (MTK_PIN_NO(47) | 2)
+#define PINMUX_GPIO47__FUNC_KPCOL2 (MTK_PIN_NO(47) | 3)
+#define PINMUX_GPIO47__FUNC_MD_URXD0 (MTK_PIN_NO(47) | 4)
+#define PINMUX_GPIO47__FUNC_CONN_UART0_RXD (MTK_PIN_NO(47) | 5)
+#define PINMUX_GPIO47__FUNC_SSPM_URXD_AO (MTK_PIN_NO(47) | 6)
+#define PINMUX_GPIO47__FUNC_DBG_MON_B32 (MTK_PIN_NO(47) | 7)
+
+#define PINMUX_GPIO48__FUNC_GPIO48 (MTK_PIN_NO(48) | 0)
+#define PINMUX_GPIO48__FUNC_SPI1_A_CSB (MTK_PIN_NO(48) | 1)
+#define PINMUX_GPIO48__FUNC_SCP_SPI1_A_CS (MTK_PIN_NO(48) | 2)
+#define PINMUX_GPIO48__FUNC_KPROW2 (MTK_PIN_NO(48) | 3)
+#define PINMUX_GPIO48__FUNC_MD_UTXD0 (MTK_PIN_NO(48) | 4)
+#define PINMUX_GPIO48__FUNC_CONN_UART0_TXD (MTK_PIN_NO(48) | 5)
+#define PINMUX_GPIO48__FUNC_SSPM_UTXD_AO (MTK_PIN_NO(48) | 6)
+#define PINMUX_GPIO48__FUNC_DBG_MON_B31 (MTK_PIN_NO(48) | 7)
+
+#define PINMUX_GPIO49__FUNC_GPIO49 (MTK_PIN_NO(49) | 0)
+#define PINMUX_GPIO49__FUNC_SPI1_A_MO (MTK_PIN_NO(49) | 1)
+#define PINMUX_GPIO49__FUNC_SCP_SPI1_A_MO (MTK_PIN_NO(49) | 2)
+#define PINMUX_GPIO49__FUNC_UCTS0 (MTK_PIN_NO(49) | 3)
+#define PINMUX_GPIO49__FUNC_MD_URXD1 (MTK_PIN_NO(49) | 4)
+#define PINMUX_GPIO49__FUNC_PWM_1 (MTK_PIN_NO(49) | 5)
+#define PINMUX_GPIO49__FUNC_TP_URXD2_AO (MTK_PIN_NO(49) | 6)
+#define PINMUX_GPIO49__FUNC_DBG_MON_B30 (MTK_PIN_NO(49) | 7)
+
+#define PINMUX_GPIO50__FUNC_GPIO50 (MTK_PIN_NO(50) | 0)
+#define PINMUX_GPIO50__FUNC_SPI1_A_CLK (MTK_PIN_NO(50) | 1)
+#define PINMUX_GPIO50__FUNC_SCP_SPI1_A_CK (MTK_PIN_NO(50) | 2)
+#define PINMUX_GPIO50__FUNC_URTS0 (MTK_PIN_NO(50) | 3)
+#define PINMUX_GPIO50__FUNC_MD_UTXD1 (MTK_PIN_NO(50) | 4)
+#define PINMUX_GPIO50__FUNC_WIFI_TXD (MTK_PIN_NO(50) | 5)
+#define PINMUX_GPIO50__FUNC_TP_UTXD2_AO (MTK_PIN_NO(50) | 6)
+#define PINMUX_GPIO50__FUNC_DBG_MON_B29 (MTK_PIN_NO(50) | 7)
+
+#define PINMUX_GPIO51__FUNC_GPIO51 (MTK_PIN_NO(51) | 0)
+#define PINMUX_GPIO51__FUNC_SCL0 (MTK_PIN_NO(51) | 1)
+
+#define PINMUX_GPIO52__FUNC_GPIO52 (MTK_PIN_NO(52) | 0)
+#define PINMUX_GPIO52__FUNC_SDA0 (MTK_PIN_NO(52) | 1)
+
+#define PINMUX_GPIO53__FUNC_GPIO53 (MTK_PIN_NO(53) | 0)
+#define PINMUX_GPIO53__FUNC_URXD0 (MTK_PIN_NO(53) | 1)
+#define PINMUX_GPIO53__FUNC_UTXD0 (MTK_PIN_NO(53) | 2)
+#define PINMUX_GPIO53__FUNC_MD_URXD0 (MTK_PIN_NO(53) | 3)
+#define PINMUX_GPIO53__FUNC_MD_URXD1 (MTK_PIN_NO(53) | 4)
+#define PINMUX_GPIO53__FUNC_SSPM_URXD_AO (MTK_PIN_NO(53) | 5)
+#define PINMUX_GPIO53__FUNC_CONN_UART0_RXD (MTK_PIN_NO(53) | 7)
+
+#define PINMUX_GPIO54__FUNC_GPIO54 (MTK_PIN_NO(54) | 0)
+#define PINMUX_GPIO54__FUNC_UTXD0 (MTK_PIN_NO(54) | 1)
+#define PINMUX_GPIO54__FUNC_URXD0 (MTK_PIN_NO(54) | 2)
+#define PINMUX_GPIO54__FUNC_MD_UTXD0 (MTK_PIN_NO(54) | 3)
+#define PINMUX_GPIO54__FUNC_MD_UTXD1 (MTK_PIN_NO(54) | 4)
+#define PINMUX_GPIO54__FUNC_SSPM_UTXD_AO (MTK_PIN_NO(54) | 5)
+#define PINMUX_GPIO54__FUNC_WIFI_TXD (MTK_PIN_NO(54) | 6)
+#define PINMUX_GPIO54__FUNC_CONN_UART0_TXD (MTK_PIN_NO(54) | 7)
+
+#define PINMUX_GPIO55__FUNC_GPIO55 (MTK_PIN_NO(55) | 0)
+#define PINMUX_GPIO55__FUNC_SCL3 (MTK_PIN_NO(55) | 1)
+#define PINMUX_GPIO55__FUNC_SCP_SCL0 (MTK_PIN_NO(55) | 2)
+#define PINMUX_GPIO55__FUNC_SCP_SCL1 (MTK_PIN_NO(55) | 3)
+#define PINMUX_GPIO55__FUNC_SCL_6306 (MTK_PIN_NO(55) | 4)
+
+#define PINMUX_GPIO56__FUNC_GPIO56 (MTK_PIN_NO(56) | 0)
+#define PINMUX_GPIO56__FUNC_SDA3 (MTK_PIN_NO(56) | 1)
+#define PINMUX_GPIO56__FUNC_SCP_SDA0 (MTK_PIN_NO(56) | 2)
+#define PINMUX_GPIO56__FUNC_SCP_SDA1 (MTK_PIN_NO(56) | 3)
+#define PINMUX_GPIO56__FUNC_SDA_6306 (MTK_PIN_NO(56) | 4)
+
+#define PINMUX_GPIO57__FUNC_GPIO57 (MTK_PIN_NO(57) | 0)
+#define PINMUX_GPIO57__FUNC_KPROW1 (MTK_PIN_NO(57) | 1)
+#define PINMUX_GPIO57__FUNC_PWM_1 (MTK_PIN_NO(57) | 2)
+#define PINMUX_GPIO57__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(57) | 3)
+#define PINMUX_GPIO57__FUNC_CLKM1 (MTK_PIN_NO(57) | 4)
+#define PINMUX_GPIO57__FUNC_IDDIG (MTK_PIN_NO(57) | 5)
+#define PINMUX_GPIO57__FUNC_CONN_TCXOENA_REQ (MTK_PIN_NO(57) | 6)
+#define PINMUX_GPIO57__FUNC_MBISTREADEN_TRIGGER (MTK_PIN_NO(57) | 7)
+
+#define PINMUX_GPIO58__FUNC_GPIO58 (MTK_PIN_NO(58) | 0)
+#define PINMUX_GPIO58__FUNC_KPROW0 (MTK_PIN_NO(58) | 1)
+#define PINMUX_GPIO58__FUNC_DBG_MON_B28 (MTK_PIN_NO(58) | 7)
+
+#define PINMUX_GPIO59__FUNC_GPIO59 (MTK_PIN_NO(59) | 0)
+#define PINMUX_GPIO59__FUNC_KPCOL0 (MTK_PIN_NO(59) | 1)
+#define PINMUX_GPIO59__FUNC_DBG_MON_B27 (MTK_PIN_NO(59) | 7)
+
+#define PINMUX_GPIO60__FUNC_GPIO60 (MTK_PIN_NO(60) | 0)
+#define PINMUX_GPIO60__FUNC_KPCOL1 (MTK_PIN_NO(60) | 1)
+#define PINMUX_GPIO60__FUNC_PWM_2 (MTK_PIN_NO(60) | 2)
+#define PINMUX_GPIO60__FUNC_UCTS1 (MTK_PIN_NO(60) | 3)
+#define PINMUX_GPIO60__FUNC_CLKM2 (MTK_PIN_NO(60) | 4)
+#define PINMUX_GPIO60__FUNC_USB_DRVVBUS (MTK_PIN_NO(60) | 5)
+#define PINMUX_GPIO60__FUNC_MBISTWRITEEN_TRIGGER (MTK_PIN_NO(60) | 7)
+
+#define PINMUX_GPIO61__FUNC_GPIO61 (MTK_PIN_NO(61) | 0)
+#define PINMUX_GPIO61__FUNC_SCL1 (MTK_PIN_NO(61) | 1)
+#define PINMUX_GPIO61__FUNC_SCP_SCL0 (MTK_PIN_NO(61) | 2)
+#define PINMUX_GPIO61__FUNC_SCP_SCL1 (MTK_PIN_NO(61) | 3)
+
+#define PINMUX_GPIO62__FUNC_GPIO62 (MTK_PIN_NO(62) | 0)
+#define PINMUX_GPIO62__FUNC_SDA1 (MTK_PIN_NO(62) | 1)
+#define PINMUX_GPIO62__FUNC_SCP_SDA0 (MTK_PIN_NO(62) | 2)
+#define PINMUX_GPIO62__FUNC_SCP_SDA1 (MTK_PIN_NO(62) | 3)
+
+#define PINMUX_GPIO63__FUNC_GPIO63 (MTK_PIN_NO(63) | 0)
+#define PINMUX_GPIO63__FUNC_SPI2_MI (MTK_PIN_NO(63) | 1)
+#define PINMUX_GPIO63__FUNC_SCP_SPI2_MI (MTK_PIN_NO(63) | 2)
+#define PINMUX_GPIO63__FUNC_KPCOL2 (MTK_PIN_NO(63) | 3)
+#define PINMUX_GPIO63__FUNC_MRG_DI (MTK_PIN_NO(63) | 4)
+#define PINMUX_GPIO63__FUNC_MD_URXD0 (MTK_PIN_NO(63) | 5)
+#define PINMUX_GPIO63__FUNC_CONN_UART0_RXD (MTK_PIN_NO(63) | 6)
+#define PINMUX_GPIO63__FUNC_DBG_MON_B26 (MTK_PIN_NO(63) | 7)
+
+#define PINMUX_GPIO64__FUNC_GPIO64 (MTK_PIN_NO(64) | 0)
+#define PINMUX_GPIO64__FUNC_SPI2_CSB (MTK_PIN_NO(64) | 1)
+#define PINMUX_GPIO64__FUNC_SCP_SPI2_CS (MTK_PIN_NO(64) | 2)
+#define PINMUX_GPIO64__FUNC_KPROW2 (MTK_PIN_NO(64) | 3)
+#define PINMUX_GPIO64__FUNC_MRG_SYNC (MTK_PIN_NO(64) | 4)
+#define PINMUX_GPIO64__FUNC_MD_UTXD0 (MTK_PIN_NO(64) | 5)
+#define PINMUX_GPIO64__FUNC_CONN_UART0_TXD (MTK_PIN_NO(64) | 6)
+#define PINMUX_GPIO64__FUNC_DBG_MON_B25 (MTK_PIN_NO(64) | 7)
+
+#define PINMUX_GPIO65__FUNC_GPIO65 (MTK_PIN_NO(65) | 0)
+#define PINMUX_GPIO65__FUNC_SPI2_MO (MTK_PIN_NO(65) | 1)
+#define PINMUX_GPIO65__FUNC_SCP_SPI2_MO (MTK_PIN_NO(65) | 2)
+#define PINMUX_GPIO65__FUNC_SCP_SDA1 (MTK_PIN_NO(65) | 3)
+#define PINMUX_GPIO65__FUNC_MRG_DO (MTK_PIN_NO(65) | 4)
+#define PINMUX_GPIO65__FUNC_MD_URXD1 (MTK_PIN_NO(65) | 5)
+#define PINMUX_GPIO65__FUNC_PWM_3 (MTK_PIN_NO(65) | 6)
+
+#define PINMUX_GPIO66__FUNC_GPIO66 (MTK_PIN_NO(66) | 0)
+#define PINMUX_GPIO66__FUNC_SPI2_CLK (MTK_PIN_NO(66) | 1)
+#define PINMUX_GPIO66__FUNC_SCP_SPI2_CK (MTK_PIN_NO(66) | 2)
+#define PINMUX_GPIO66__FUNC_SCP_SCL1 (MTK_PIN_NO(66) | 3)
+#define PINMUX_GPIO66__FUNC_MRG_CLK (MTK_PIN_NO(66) | 4)
+#define PINMUX_GPIO66__FUNC_MD_UTXD1 (MTK_PIN_NO(66) | 5)
+#define PINMUX_GPIO66__FUNC_WIFI_TXD (MTK_PIN_NO(66) | 6)
+
+#define PINMUX_GPIO67__FUNC_GPIO67 (MTK_PIN_NO(67) | 0)
+#define PINMUX_GPIO67__FUNC_I2S3_LRCK (MTK_PIN_NO(67) | 1)
+#define PINMUX_GPIO67__FUNC_I2S1_LRCK (MTK_PIN_NO(67) | 2)
+#define PINMUX_GPIO67__FUNC_URXD1 (MTK_PIN_NO(67) | 3)
+#define PINMUX_GPIO67__FUNC_PCM0_SYNC (MTK_PIN_NO(67) | 4)
+#define PINMUX_GPIO67__FUNC_I2S5_LRCK (MTK_PIN_NO(67) | 5)
+#define PINMUX_GPIO67__FUNC_ANT_SEL9 (MTK_PIN_NO(67) | 6)
+#define PINMUX_GPIO67__FUNC_DBG_MON_B10 (MTK_PIN_NO(67) | 7)
+
+#define PINMUX_GPIO68__FUNC_GPIO68 (MTK_PIN_NO(68) | 0)
+#define PINMUX_GPIO68__FUNC_I2S3_DO (MTK_PIN_NO(68) | 1)
+#define PINMUX_GPIO68__FUNC_I2S1_DO (MTK_PIN_NO(68) | 2)
+#define PINMUX_GPIO68__FUNC_UTXD1 (MTK_PIN_NO(68) | 3)
+#define PINMUX_GPIO68__FUNC_PCM0_DO (MTK_PIN_NO(68) | 4)
+#define PINMUX_GPIO68__FUNC_I2S5_DO (MTK_PIN_NO(68) | 5)
+#define PINMUX_GPIO68__FUNC_ANT_SEL10 (MTK_PIN_NO(68) | 6)
+#define PINMUX_GPIO68__FUNC_DBG_MON_B9 (MTK_PIN_NO(68) | 7)
+
+#define PINMUX_GPIO69__FUNC_GPIO69 (MTK_PIN_NO(69) | 0)
+#define PINMUX_GPIO69__FUNC_I2S3_MCK (MTK_PIN_NO(69) | 1)
+#define PINMUX_GPIO69__FUNC_I2S1_MCK (MTK_PIN_NO(69) | 2)
+#define PINMUX_GPIO69__FUNC_URTS1 (MTK_PIN_NO(69) | 3)
+#define PINMUX_GPIO69__FUNC_AGPS_SYNC (MTK_PIN_NO(69) | 4)
+#define PINMUX_GPIO69__FUNC_I2S5_MCK (MTK_PIN_NO(69) | 5)
+#define PINMUX_GPIO69__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(69) | 6)
+#define PINMUX_GPIO69__FUNC_DBG_MON_B8 (MTK_PIN_NO(69) | 7)
+
+#define PINMUX_GPIO70__FUNC_GPIO70 (MTK_PIN_NO(70) | 0)
+#define PINMUX_GPIO70__FUNC_I2S0_DI (MTK_PIN_NO(70) | 1)
+#define PINMUX_GPIO70__FUNC_I2S2_DI (MTK_PIN_NO(70) | 2)
+#define PINMUX_GPIO70__FUNC_KPCOL2 (MTK_PIN_NO(70) | 3)
+#define PINMUX_GPIO70__FUNC_PCM0_DI (MTK_PIN_NO(70) | 4)
+#define PINMUX_GPIO70__FUNC_I2S2_DI2 (MTK_PIN_NO(70) | 5)
+#define PINMUX_GPIO70__FUNC_ANT_SEL11 (MTK_PIN_NO(70) | 6)
+#define PINMUX_GPIO70__FUNC_DBG_MON_B7 (MTK_PIN_NO(70) | 7)
+
+#define PINMUX_GPIO71__FUNC_GPIO71 (MTK_PIN_NO(71) | 0)
+#define PINMUX_GPIO71__FUNC_I2S3_BCK (MTK_PIN_NO(71) | 1)
+#define PINMUX_GPIO71__FUNC_I2S1_BCK (MTK_PIN_NO(71) | 2)
+#define PINMUX_GPIO71__FUNC_KPROW2 (MTK_PIN_NO(71) | 3)
+#define PINMUX_GPIO71__FUNC_PCM0_CLK (MTK_PIN_NO(71) | 4)
+#define PINMUX_GPIO71__FUNC_I2S5_BCK (MTK_PIN_NO(71) | 5)
+#define PINMUX_GPIO71__FUNC_ANT_SEL12 (MTK_PIN_NO(71) | 6)
+#define PINMUX_GPIO71__FUNC_DBG_MON_B6 (MTK_PIN_NO(71) | 7)
+
+#define PINMUX_GPIO72__FUNC_GPIO72 (MTK_PIN_NO(72) | 0)
+#define PINMUX_GPIO72__FUNC_BPI_BUS19_OLAT0 (MTK_PIN_NO(72) | 1)
+#define PINMUX_GPIO72__FUNC_CONN_BPI_BUS19_OLAT0 (MTK_PIN_NO(72) | 2)
+
+#define PINMUX_GPIO73__FUNC_GPIO73 (MTK_PIN_NO(73) | 0)
+#define PINMUX_GPIO73__FUNC_BPI_BUS18_PA_VM1 (MTK_PIN_NO(73) | 1)
+#define PINMUX_GPIO73__FUNC_CONN_MIPI5_SCLK (MTK_PIN_NO(73) | 2)
+#define PINMUX_GPIO73__FUNC_MIPI5_SCLK (MTK_PIN_NO(73) | 3)
+
+#define PINMUX_GPIO74__FUNC_GPIO74 (MTK_PIN_NO(74) | 0)
+#define PINMUX_GPIO74__FUNC_BPI_BUS17_PA_VM0 (MTK_PIN_NO(74) | 1)
+#define PINMUX_GPIO74__FUNC_CONN_MIPI5_SDATA (MTK_PIN_NO(74) | 2)
+#define PINMUX_GPIO74__FUNC_MIPI5_SDATA (MTK_PIN_NO(74) | 3)
+
+#define PINMUX_GPIO75__FUNC_GPIO75 (MTK_PIN_NO(75) | 0)
+#define PINMUX_GPIO75__FUNC_BPI_BUS20_OLAT1 (MTK_PIN_NO(75) | 1)
+#define PINMUX_GPIO75__FUNC_CONN_BPI_BUS20_OLAT1 (MTK_PIN_NO(75) | 2)
+#define PINMUX_GPIO75__FUNC_RFIC0_BSI_D2 (MTK_PIN_NO(75) | 3)
+
+#define PINMUX_GPIO76__FUNC_GPIO76 (MTK_PIN_NO(76) | 0)
+#define PINMUX_GPIO76__FUNC_RFIC0_BSI_D1 (MTK_PIN_NO(76) | 1)
+
+#define PINMUX_GPIO77__FUNC_GPIO77 (MTK_PIN_NO(77) | 0)
+#define PINMUX_GPIO77__FUNC_RFIC0_BSI_D0 (MTK_PIN_NO(77) | 1)
+
+#define PINMUX_GPIO78__FUNC_GPIO78 (MTK_PIN_NO(78) | 0)
+#define PINMUX_GPIO78__FUNC_BPI_BUS7 (MTK_PIN_NO(78) | 1)
+#define PINMUX_GPIO78__FUNC_DBG_MON_B24 (MTK_PIN_NO(78) | 7)
+
+#define PINMUX_GPIO79__FUNC_GPIO79 (MTK_PIN_NO(79) | 0)
+#define PINMUX_GPIO79__FUNC_BPI_BUS6 (MTK_PIN_NO(79) | 1)
+#define PINMUX_GPIO79__FUNC_DBG_MON_B23 (MTK_PIN_NO(79) | 7)
+
+#define PINMUX_GPIO80__FUNC_GPIO80 (MTK_PIN_NO(80) | 0)
+#define PINMUX_GPIO80__FUNC_BPI_BUS8 (MTK_PIN_NO(80) | 1)
+#define PINMUX_GPIO80__FUNC_DBG_MON_B22 (MTK_PIN_NO(80) | 7)
+
+#define PINMUX_GPIO81__FUNC_GPIO81 (MTK_PIN_NO(81) | 0)
+#define PINMUX_GPIO81__FUNC_BPI_BUS9 (MTK_PIN_NO(81) | 1)
+#define PINMUX_GPIO81__FUNC_DBG_MON_B21 (MTK_PIN_NO(81) | 7)
+
+#define PINMUX_GPIO82__FUNC_GPIO82 (MTK_PIN_NO(82) | 0)
+#define PINMUX_GPIO82__FUNC_BPI_BUS10 (MTK_PIN_NO(82) | 1)
+#define PINMUX_GPIO82__FUNC_DBG_MON_B20 (MTK_PIN_NO(82) | 7)
+
+#define PINMUX_GPIO83__FUNC_GPIO83 (MTK_PIN_NO(83) | 0)
+#define PINMUX_GPIO83__FUNC_BPI_BUS11 (MTK_PIN_NO(83) | 1)
+#define PINMUX_GPIO83__FUNC_DBG_MON_B19 (MTK_PIN_NO(83) | 7)
+
+#define PINMUX_GPIO84__FUNC_GPIO84 (MTK_PIN_NO(84) | 0)
+#define PINMUX_GPIO84__FUNC_BPI_BUS12 (MTK_PIN_NO(84) | 1)
+#define PINMUX_GPIO84__FUNC_CONN_BPI_BUS12 (MTK_PIN_NO(84) | 2)
+
+#define PINMUX_GPIO85__FUNC_GPIO85 (MTK_PIN_NO(85) | 0)
+#define PINMUX_GPIO85__FUNC_BPI_BUS13 (MTK_PIN_NO(85) | 1)
+#define PINMUX_GPIO85__FUNC_CONN_BPI_BUS13 (MTK_PIN_NO(85) | 2)
+
+#define PINMUX_GPIO86__FUNC_GPIO86 (MTK_PIN_NO(86) | 0)
+#define PINMUX_GPIO86__FUNC_BPI_BUS14 (MTK_PIN_NO(86) | 1)
+#define PINMUX_GPIO86__FUNC_CONN_BPI_BUS14 (MTK_PIN_NO(86) | 2)
+
+#define PINMUX_GPIO87__FUNC_GPIO87 (MTK_PIN_NO(87) | 0)
+#define PINMUX_GPIO87__FUNC_BPI_BUS15 (MTK_PIN_NO(87) | 1)
+#define PINMUX_GPIO87__FUNC_CONN_BPI_BUS15 (MTK_PIN_NO(87) | 2)
+
+#define PINMUX_GPIO88__FUNC_GPIO88 (MTK_PIN_NO(88) | 0)
+#define PINMUX_GPIO88__FUNC_BPI_BUS16 (MTK_PIN_NO(88) | 1)
+#define PINMUX_GPIO88__FUNC_CONN_BPI_BUS16 (MTK_PIN_NO(88) | 2)
+
+#define PINMUX_GPIO89__FUNC_GPIO89 (MTK_PIN_NO(89) | 0)
+#define PINMUX_GPIO89__FUNC_BPI_BUS5 (MTK_PIN_NO(89) | 1)
+#define PINMUX_GPIO89__FUNC_DBG_MON_B18 (MTK_PIN_NO(89) | 7)
+
+#define PINMUX_GPIO90__FUNC_GPIO90 (MTK_PIN_NO(90) | 0)
+#define PINMUX_GPIO90__FUNC_BPI_BUS4 (MTK_PIN_NO(90) | 1)
+#define PINMUX_GPIO90__FUNC_DBG_MON_B17 (MTK_PIN_NO(90) | 7)
+
+#define PINMUX_GPIO91__FUNC_GPIO91 (MTK_PIN_NO(91) | 0)
+#define PINMUX_GPIO91__FUNC_BPI_BUS3 (MTK_PIN_NO(91) | 1)
+
+#define PINMUX_GPIO92__FUNC_GPIO92 (MTK_PIN_NO(92) | 0)
+#define PINMUX_GPIO92__FUNC_BPI_BUS2 (MTK_PIN_NO(92) | 1)
+#define PINMUX_GPIO92__FUNC_DBG_MON_B16 (MTK_PIN_NO(92) | 7)
+
+#define PINMUX_GPIO93__FUNC_GPIO93 (MTK_PIN_NO(93) | 0)
+#define PINMUX_GPIO93__FUNC_BPI_BUS1 (MTK_PIN_NO(93) | 1)
+
+#define PINMUX_GPIO94__FUNC_GPIO94 (MTK_PIN_NO(94) | 0)
+#define PINMUX_GPIO94__FUNC_BPI_BUS0 (MTK_PIN_NO(94) | 1)
+#define PINMUX_GPIO94__FUNC_DBG_MON_B15 (MTK_PIN_NO(94) | 7)
+
+#define PINMUX_GPIO95__FUNC_GPIO95 (MTK_PIN_NO(95) | 0)
+#define PINMUX_GPIO95__FUNC_MIPI0_SDATA (MTK_PIN_NO(95) | 1)
+
+#define PINMUX_GPIO96__FUNC_GPIO96 (MTK_PIN_NO(96) | 0)
+#define PINMUX_GPIO96__FUNC_MIPI0_SCLK (MTK_PIN_NO(96) | 1)
+
+#define PINMUX_GPIO97__FUNC_GPIO97 (MTK_PIN_NO(97) | 0)
+#define PINMUX_GPIO97__FUNC_MIPI1_SDATA (MTK_PIN_NO(97) | 1)
+
+#define PINMUX_GPIO98__FUNC_GPIO98 (MTK_PIN_NO(98) | 0)
+#define PINMUX_GPIO98__FUNC_MIPI1_SCLK (MTK_PIN_NO(98) | 1)
+
+#define PINMUX_GPIO99__FUNC_GPIO99 (MTK_PIN_NO(99) | 0)
+#define PINMUX_GPIO99__FUNC_MIPI2_SCLK (MTK_PIN_NO(99) | 1)
+#define PINMUX_GPIO99__FUNC_DBG_MON_B14 (MTK_PIN_NO(99) | 7)
+
+#define PINMUX_GPIO100__FUNC_GPIO100 (MTK_PIN_NO(100) | 0)
+#define PINMUX_GPIO100__FUNC_MIPI2_SDATA (MTK_PIN_NO(100) | 1)
+#define PINMUX_GPIO100__FUNC_DBG_MON_B13 (MTK_PIN_NO(100) | 7)
+
+#define PINMUX_GPIO101__FUNC_GPIO101 (MTK_PIN_NO(101) | 0)
+#define PINMUX_GPIO101__FUNC_MIPI3_SCLK (MTK_PIN_NO(101) | 1)
+#define PINMUX_GPIO101__FUNC_DBG_MON_B12 (MTK_PIN_NO(101) | 7)
+
+#define PINMUX_GPIO102__FUNC_GPIO102 (MTK_PIN_NO(102) | 0)
+#define PINMUX_GPIO102__FUNC_MIPI3_SDATA (MTK_PIN_NO(102) | 1)
+#define PINMUX_GPIO102__FUNC_DBG_MON_B11 (MTK_PIN_NO(102) | 7)
+
+#define PINMUX_GPIO103__FUNC_GPIO103 (MTK_PIN_NO(103) | 0)
+#define PINMUX_GPIO103__FUNC_MIPI4_SCLK (MTK_PIN_NO(103) | 1)
+#define PINMUX_GPIO103__FUNC_CONN_MIPI4_SCLK (MTK_PIN_NO(103) | 2)
+
+#define PINMUX_GPIO104__FUNC_GPIO104 (MTK_PIN_NO(104) | 0)
+#define PINMUX_GPIO104__FUNC_MIPI4_SDATA (MTK_PIN_NO(104) | 1)
+#define PINMUX_GPIO104__FUNC_CONN_MIPI4_SDATA (MTK_PIN_NO(104) | 2)
+
+#define PINMUX_GPIO105__FUNC_GPIO105 (MTK_PIN_NO(105) | 0)
+#define PINMUX_GPIO105__FUNC_BPI_BUS22_OLAT3 (MTK_PIN_NO(105) | 1)
+#define PINMUX_GPIO105__FUNC_CONN_BPI_BUS22_OLAT3 (MTK_PIN_NO(105) | 2)
+
+#define PINMUX_GPIO106__FUNC_GPIO106 (MTK_PIN_NO(106) | 0)
+#define PINMUX_GPIO106__FUNC_BPI_BUS21_OLAT2 (MTK_PIN_NO(106) | 1)
+#define PINMUX_GPIO106__FUNC_CONN_BPI_BUS21_OLAT2 (MTK_PIN_NO(106) | 2)
+
+#define PINMUX_GPIO107__FUNC_GPIO107 (MTK_PIN_NO(107) | 0)
+#define PINMUX_GPIO107__FUNC_BPI_BUS24_ANT1 (MTK_PIN_NO(107) | 1)
+#define PINMUX_GPIO107__FUNC_CONN_BPI_BUS24_ANT1 (MTK_PIN_NO(107) | 2)
+
+#define PINMUX_GPIO108__FUNC_GPIO108 (MTK_PIN_NO(108) | 0)
+#define PINMUX_GPIO108__FUNC_BPI_BUS25_ANT2 (MTK_PIN_NO(108) | 1)
+#define PINMUX_GPIO108__FUNC_CONN_BPI_BUS25_ANT2 (MTK_PIN_NO(108) | 2)
+
+#define PINMUX_GPIO109__FUNC_GPIO109 (MTK_PIN_NO(109) | 0)
+#define PINMUX_GPIO109__FUNC_BPI_BUS23_ANT0 (MTK_PIN_NO(109) | 1)
+#define PINMUX_GPIO109__FUNC_CONN_BPI_BUS23_ANT0 (MTK_PIN_NO(109) | 2)
+
+#define PINMUX_GPIO110__FUNC_GPIO110 (MTK_PIN_NO(110) | 0)
+#define PINMUX_GPIO110__FUNC_SCL4 (MTK_PIN_NO(110) | 1)
+
+#define PINMUX_GPIO111__FUNC_GPIO111 (MTK_PIN_NO(111) | 0)
+#define PINMUX_GPIO111__FUNC_SDA4 (MTK_PIN_NO(111) | 1)
+
+#define PINMUX_GPIO112__FUNC_GPIO112 (MTK_PIN_NO(112) | 0)
+#define PINMUX_GPIO112__FUNC_SCL2 (MTK_PIN_NO(112) | 1)
+
+#define PINMUX_GPIO113__FUNC_GPIO113 (MTK_PIN_NO(113) | 0)
+#define PINMUX_GPIO113__FUNC_SDA2 (MTK_PIN_NO(113) | 1)
+
+#define PINMUX_GPIO114__FUNC_GPIO114 (MTK_PIN_NO(114) | 0)
+#define PINMUX_GPIO114__FUNC_CLKM0 (MTK_PIN_NO(114) | 1)
+#define PINMUX_GPIO114__FUNC_SPI3_MI (MTK_PIN_NO(114) | 2)
+#define PINMUX_GPIO114__FUNC_DBG_MON_B5 (MTK_PIN_NO(114) | 7)
+
+#define PINMUX_GPIO115__FUNC_GPIO115 (MTK_PIN_NO(115) | 0)
+#define PINMUX_GPIO115__FUNC_CLKM1 (MTK_PIN_NO(115) | 1)
+#define PINMUX_GPIO115__FUNC_SPI3_CSB (MTK_PIN_NO(115) | 2)
+#define PINMUX_GPIO115__FUNC_DBG_MON_B4 (MTK_PIN_NO(115) | 7)
+
+#define PINMUX_GPIO116__FUNC_GPIO116 (MTK_PIN_NO(116) | 0)
+#define PINMUX_GPIO116__FUNC_CMMCLK0 (MTK_PIN_NO(116) | 1)
+#define PINMUX_GPIO116__FUNC_DBG_MON_B3 (MTK_PIN_NO(116) | 7)
+
+#define PINMUX_GPIO117__FUNC_GPIO117 (MTK_PIN_NO(117) | 0)
+#define PINMUX_GPIO117__FUNC_CMMCLK1 (MTK_PIN_NO(117) | 1)
+#define PINMUX_GPIO117__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(117) | 2)
+#define PINMUX_GPIO117__FUNC_DBG_MON_B2 (MTK_PIN_NO(117) | 7)
+
+#define PINMUX_GPIO118__FUNC_GPIO118 (MTK_PIN_NO(118) | 0)
+#define PINMUX_GPIO118__FUNC_CLKM2 (MTK_PIN_NO(118) | 1)
+#define PINMUX_GPIO118__FUNC_SPI3_MO (MTK_PIN_NO(118) | 2)
+#define PINMUX_GPIO118__FUNC_DBG_MON_B1 (MTK_PIN_NO(118) | 7)
+
+#define PINMUX_GPIO119__FUNC_GPIO119 (MTK_PIN_NO(119) | 0)
+#define PINMUX_GPIO119__FUNC_CLKM3 (MTK_PIN_NO(119) | 1)
+#define PINMUX_GPIO119__FUNC_SPI3_CLK (MTK_PIN_NO(119) | 2)
+#define PINMUX_GPIO119__FUNC_DBG_MON_B0 (MTK_PIN_NO(119) | 7)
+
+#define PINMUX_GPIO120__FUNC_GPIO120 (MTK_PIN_NO(120) | 0)
+#define PINMUX_GPIO120__FUNC_CMMCLK2 (MTK_PIN_NO(120) | 1)
+#define PINMUX_GPIO120__FUNC_CLKM2 (MTK_PIN_NO(120) | 2)
+#define PINMUX_GPIO120__FUNC_ANT_SEL12 (MTK_PIN_NO(120) | 6)
+#define PINMUX_GPIO120__FUNC_TP_UCTS2_AO (MTK_PIN_NO(120) | 7)
+
+#define PINMUX_GPIO121__FUNC_GPIO121 (MTK_PIN_NO(121) | 0)
+#define PINMUX_GPIO121__FUNC_CMMCLK3 (MTK_PIN_NO(121) | 1)
+#define PINMUX_GPIO121__FUNC_CLKM3 (MTK_PIN_NO(121) | 2)
+#define PINMUX_GPIO121__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(121) | 3)
+#define PINMUX_GPIO121__FUNC_ANT_SEL11 (MTK_PIN_NO(121) | 6)
+#define PINMUX_GPIO121__FUNC_TP_URTS2_AO (MTK_PIN_NO(121) | 7)
+
+#define PINMUX_GPIO122__FUNC_GPIO122 (MTK_PIN_NO(122) | 0)
+#define PINMUX_GPIO122__FUNC_CMVREF1 (MTK_PIN_NO(122) | 1)
+#define PINMUX_GPIO122__FUNC_PCM0_SYNC (MTK_PIN_NO(122) | 2)
+#define PINMUX_GPIO122__FUNC_SRCLKENAI1 (MTK_PIN_NO(122) | 3)
+#define PINMUX_GPIO122__FUNC_AGPS_SYNC (MTK_PIN_NO(122) | 4)
+#define PINMUX_GPIO122__FUNC_PWM_1 (MTK_PIN_NO(122) | 5)
+#define PINMUX_GPIO122__FUNC_ANT_SEL9 (MTK_PIN_NO(122) | 6)
+#define PINMUX_GPIO122__FUNC_TP_UCTS1_AO (MTK_PIN_NO(122) | 7)
+
+#define PINMUX_GPIO123__FUNC_GPIO123 (MTK_PIN_NO(123) | 0)
+#define PINMUX_GPIO123__FUNC_PCM0_DI (MTK_PIN_NO(123) | 2)
+#define PINMUX_GPIO123__FUNC_ADSP_JTAG_TRSTN (MTK_PIN_NO(123) | 3)
+#define PINMUX_GPIO123__FUNC_VPU_UDI_NTRST (MTK_PIN_NO(123) | 4)
+#define PINMUX_GPIO123__FUNC_SPM_JTAG_TRSTN (MTK_PIN_NO(123) | 5)
+#define PINMUX_GPIO123__FUNC_SSPM_JTAG_TRSTN (MTK_PIN_NO(123) | 6)
+
+#define PINMUX_GPIO124__FUNC_GPIO124 (MTK_PIN_NO(124) | 0)
+#define PINMUX_GPIO124__FUNC_CMVREF2 (MTK_PIN_NO(124) | 1)
+#define PINMUX_GPIO124__FUNC_PCM0_CLK (MTK_PIN_NO(124) | 2)
+#define PINMUX_GPIO124__FUNC_MD_INT0 (MTK_PIN_NO(124) | 3)
+#define PINMUX_GPIO124__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(124) | 4)
+#define PINMUX_GPIO124__FUNC_PWM_2 (MTK_PIN_NO(124) | 5)
+#define PINMUX_GPIO124__FUNC_ANT_SEL10 (MTK_PIN_NO(124) | 6)
+#define PINMUX_GPIO124__FUNC_TP_URTS1_AO (MTK_PIN_NO(124) | 7)
+
+#define PINMUX_GPIO125__FUNC_GPIO125 (MTK_PIN_NO(125) | 0)
+#define PINMUX_GPIO125__FUNC_CMVREF3 (MTK_PIN_NO(125) | 1)
+#define PINMUX_GPIO125__FUNC_PCM0_DO (MTK_PIN_NO(125) | 2)
+#define PINMUX_GPIO125__FUNC_ADSP_JTAG_TMS (MTK_PIN_NO(125) | 3)
+#define PINMUX_GPIO125__FUNC_VPU_UDI_TMS (MTK_PIN_NO(125) | 4)
+#define PINMUX_GPIO125__FUNC_SPM_JTAG_TMS (MTK_PIN_NO(125) | 5)
+#define PINMUX_GPIO125__FUNC_SSPM_JTAG_TMS (MTK_PIN_NO(125) | 6)
+
+#define PINMUX_GPIO126__FUNC_GPIO126 (MTK_PIN_NO(126) | 0)
+#define PINMUX_GPIO126__FUNC_CMVREF4 (MTK_PIN_NO(126) | 1)
+#define PINMUX_GPIO126__FUNC_CMFLASH0 (MTK_PIN_NO(126) | 2)
+#define PINMUX_GPIO126__FUNC_CONN_MCU_AICE_TMSC (MTK_PIN_NO(126) | 6)
+
+#define PINMUX_GPIO127__FUNC_GPIO127 (MTK_PIN_NO(127) | 0)
+#define PINMUX_GPIO127__FUNC_CMVREF0 (MTK_PIN_NO(127) | 1)
+#define PINMUX_GPIO127__FUNC_CMFLASH1 (MTK_PIN_NO(127) | 2)
+#define PINMUX_GPIO127__FUNC_CONN_MCU_AICE_TCKC (MTK_PIN_NO(127) | 6)
+
+#define PINMUX_GPIO128__FUNC_GPIO128 (MTK_PIN_NO(128) | 0)
+#define PINMUX_GPIO128__FUNC_MD1_SIM1_SIO (MTK_PIN_NO(128) | 1)
+#define PINMUX_GPIO128__FUNC_MD1_SIM2_SIO (MTK_PIN_NO(128) | 2)
+#define PINMUX_GPIO128__FUNC_CCU_JTAG_TRST (MTK_PIN_NO(128) | 3)
+#define PINMUX_GPIO128__FUNC_CONN_DSP_JINTP (MTK_PIN_NO(128) | 4)
+#define PINMUX_GPIO128__FUNC_SCP_JTAG_TRSTN (MTK_PIN_NO(128) | 5)
+#define PINMUX_GPIO128__FUNC_LVTS_FOUT (MTK_PIN_NO(128) | 6)
+#define PINMUX_GPIO128__FUNC_DBG_MON_A3 (MTK_PIN_NO(128) | 7)
+
+#define PINMUX_GPIO129__FUNC_GPIO129 (MTK_PIN_NO(129) | 0)
+#define PINMUX_GPIO129__FUNC_MD1_SIM1_SRST (MTK_PIN_NO(129) | 1)
+#define PINMUX_GPIO129__FUNC_MD1_SIM2_SRST (MTK_PIN_NO(129) | 2)
+#define PINMUX_GPIO129__FUNC_CCU_JTAG_TCK (MTK_PIN_NO(129) | 3)
+#define PINMUX_GPIO129__FUNC_CONN_DSP_JCK (MTK_PIN_NO(129) | 4)
+#define PINMUX_GPIO129__FUNC_SCP_JTAG_TCK (MTK_PIN_NO(129) | 5)
+#define PINMUX_GPIO129__FUNC_LVTS_SDO (MTK_PIN_NO(129) | 6)
+#define PINMUX_GPIO129__FUNC_DBG_MON_A4 (MTK_PIN_NO(129) | 7)
+
+#define PINMUX_GPIO130__FUNC_GPIO130 (MTK_PIN_NO(130) | 0)
+#define PINMUX_GPIO130__FUNC_MD1_SIM1_SCLK (MTK_PIN_NO(130) | 1)
+#define PINMUX_GPIO130__FUNC_MD1_SIM2_SCLK (MTK_PIN_NO(130) | 2)
+#define PINMUX_GPIO130__FUNC_LVTS_26M (MTK_PIN_NO(130) | 6)
+#define PINMUX_GPIO130__FUNC_DBG_MON_A5 (MTK_PIN_NO(130) | 7)
+
+#define PINMUX_GPIO131__FUNC_GPIO131 (MTK_PIN_NO(131) | 0)
+#define PINMUX_GPIO131__FUNC_MD1_SIM2_SCLK (MTK_PIN_NO(131) | 1)
+#define PINMUX_GPIO131__FUNC_MD1_SIM1_SCLK (MTK_PIN_NO(131) | 2)
+#define PINMUX_GPIO131__FUNC_CCU_JTAG_TDI (MTK_PIN_NO(131) | 3)
+#define PINMUX_GPIO131__FUNC_CONN_DSP_JDI (MTK_PIN_NO(131) | 4)
+#define PINMUX_GPIO131__FUNC_SCP_JTAG_TDI (MTK_PIN_NO(131) | 5)
+#define PINMUX_GPIO131__FUNC_LVTS_SCK (MTK_PIN_NO(131) | 6)
+#define PINMUX_GPIO131__FUNC_DBG_MON_A0 (MTK_PIN_NO(131) | 7)
+
+#define PINMUX_GPIO132__FUNC_GPIO132 (MTK_PIN_NO(132) | 0)
+#define PINMUX_GPIO132__FUNC_MD1_SIM2_SRST (MTK_PIN_NO(132) | 1)
+#define PINMUX_GPIO132__FUNC_MD1_SIM1_SRST (MTK_PIN_NO(132) | 2)
+#define PINMUX_GPIO132__FUNC_CCU_JTAG_TMS (MTK_PIN_NO(132) | 3)
+#define PINMUX_GPIO132__FUNC_CONN_DSP_JMS (MTK_PIN_NO(132) | 4)
+#define PINMUX_GPIO132__FUNC_SCP_JTAG_TMS (MTK_PIN_NO(132) | 5)
+#define PINMUX_GPIO132__FUNC_LVTS_SDI (MTK_PIN_NO(132) | 6)
+#define PINMUX_GPIO132__FUNC_DBG_MON_A1 (MTK_PIN_NO(132) | 7)
+
+#define PINMUX_GPIO133__FUNC_GPIO133 (MTK_PIN_NO(133) | 0)
+#define PINMUX_GPIO133__FUNC_MD1_SIM2_SIO (MTK_PIN_NO(133) | 1)
+#define PINMUX_GPIO133__FUNC_MD1_SIM1_SIO (MTK_PIN_NO(133) | 2)
+#define PINMUX_GPIO133__FUNC_CCU_JTAG_TDO (MTK_PIN_NO(133) | 3)
+#define PINMUX_GPIO133__FUNC_CONN_DSP_JDO (MTK_PIN_NO(133) | 4)
+#define PINMUX_GPIO133__FUNC_SCP_JTAG_TDO (MTK_PIN_NO(133) | 5)
+#define PINMUX_GPIO133__FUNC_LVTS_SCF (MTK_PIN_NO(133) | 6)
+#define PINMUX_GPIO133__FUNC_DBG_MON_A2 (MTK_PIN_NO(133) | 7)
+
+#define PINMUX_GPIO134__FUNC_GPIO134 (MTK_PIN_NO(134) | 0)
+#define PINMUX_GPIO134__FUNC_MSDC1_CLK (MTK_PIN_NO(134) | 1)
+#define PINMUX_GPIO134__FUNC_PCM1_CLK (MTK_PIN_NO(134) | 2)
+#define PINMUX_GPIO134__FUNC_SPI5_B_MI (MTK_PIN_NO(134) | 3)
+#define PINMUX_GPIO134__FUNC_UDI_TCK (MTK_PIN_NO(134) | 4)
+#define PINMUX_GPIO134__FUNC_CONN_DSP_JCK (MTK_PIN_NO(134) | 5)
+#define PINMUX_GPIO134__FUNC_IPU_JTAG_TCK (MTK_PIN_NO(134) | 6)
+#define PINMUX_GPIO134__FUNC_JTCK_SEL3 (MTK_PIN_NO(134) | 7)
+
+#define PINMUX_GPIO135__FUNC_GPIO135 (MTK_PIN_NO(135) | 0)
+#define PINMUX_GPIO135__FUNC_MSDC1_CMD (MTK_PIN_NO(135) | 1)
+#define PINMUX_GPIO135__FUNC_PCM1_SYNC (MTK_PIN_NO(135) | 2)
+#define PINMUX_GPIO135__FUNC_SPI5_B_CSB (MTK_PIN_NO(135) | 3)
+#define PINMUX_GPIO135__FUNC_UDI_TMS (MTK_PIN_NO(135) | 4)
+#define PINMUX_GPIO135__FUNC_CONN_DSP_JMS (MTK_PIN_NO(135) | 5)
+#define PINMUX_GPIO135__FUNC_IPU_JTAG_TMS (MTK_PIN_NO(135) | 6)
+#define PINMUX_GPIO135__FUNC_JTMS_SEL3 (MTK_PIN_NO(135) | 7)
+
+#define PINMUX_GPIO136__FUNC_GPIO136 (MTK_PIN_NO(136) | 0)
+#define PINMUX_GPIO136__FUNC_MSDC1_DAT3 (MTK_PIN_NO(136) | 1)
+#define PINMUX_GPIO136__FUNC_PCM1_DI (MTK_PIN_NO(136) | 2)
+#define PINMUX_GPIO136__FUNC_SPI5_B_MO (MTK_PIN_NO(136) | 3)
+#define PINMUX_GPIO136__FUNC_CONN_TCXOENA_REQ (MTK_PIN_NO(136) | 4)
+#define PINMUX_GPIO136__FUNC_CONN_DSP_JINTP (MTK_PIN_NO(136) | 5)
+#define PINMUX_GPIO136__FUNC_CONN_MCU_AICE_TMSC (MTK_PIN_NO(136) | 6)
+
+#define PINMUX_GPIO137__FUNC_GPIO137 (MTK_PIN_NO(137) | 0)
+#define PINMUX_GPIO137__FUNC_MSDC1_DAT0 (MTK_PIN_NO(137) | 1)
+#define PINMUX_GPIO137__FUNC_PCM1_DO0 (MTK_PIN_NO(137) | 2)
+#define PINMUX_GPIO137__FUNC_SPI5_B_CLK (MTK_PIN_NO(137) | 3)
+#define PINMUX_GPIO137__FUNC_UDI_TDI (MTK_PIN_NO(137) | 4)
+#define PINMUX_GPIO137__FUNC_CONN_DSP_JDI (MTK_PIN_NO(137) | 5)
+#define PINMUX_GPIO137__FUNC_IPU_JTAG_TDI (MTK_PIN_NO(137) | 6)
+#define PINMUX_GPIO137__FUNC_JTDI_SEL3 (MTK_PIN_NO(137) | 7)
+
+#define PINMUX_GPIO138__FUNC_GPIO138 (MTK_PIN_NO(138) | 0)
+#define PINMUX_GPIO138__FUNC_MSDC1_DAT2 (MTK_PIN_NO(138) | 1)
+#define PINMUX_GPIO138__FUNC_PCM1_DO2 (MTK_PIN_NO(138) | 2)
+#define PINMUX_GPIO138__FUNC_ANT_SEL11 (MTK_PIN_NO(138) | 3)
+#define PINMUX_GPIO138__FUNC_UDI_NTRST (MTK_PIN_NO(138) | 4)
+#define PINMUX_GPIO138__FUNC_CONN_MCU_AICE_TCKC (MTK_PIN_NO(138) | 5)
+#define PINMUX_GPIO138__FUNC_IPU_JTAG_TRST (MTK_PIN_NO(138) | 6)
+#define PINMUX_GPIO138__FUNC_JTRSTN_SEL3 (MTK_PIN_NO(138) | 7)
+
+#define PINMUX_GPIO139__FUNC_GPIO139 (MTK_PIN_NO(139) | 0)
+#define PINMUX_GPIO139__FUNC_MSDC1_DAT1 (MTK_PIN_NO(139) | 1)
+#define PINMUX_GPIO139__FUNC_PCM1_DO1 (MTK_PIN_NO(139) | 2)
+#define PINMUX_GPIO139__FUNC_ANT_SEL12 (MTK_PIN_NO(139) | 3)
+#define PINMUX_GPIO139__FUNC_UDI_TDO (MTK_PIN_NO(139) | 4)
+#define PINMUX_GPIO139__FUNC_CONN_DSP_JDO (MTK_PIN_NO(139) | 5)
+#define PINMUX_GPIO139__FUNC_IPU_JTAG_TDO (MTK_PIN_NO(139) | 6)
+#define PINMUX_GPIO139__FUNC_JTDO_SEL3 (MTK_PIN_NO(139) | 7)
+
+#define PINMUX_GPIO140__FUNC_GPIO140 (MTK_PIN_NO(140) | 0)
+#define PINMUX_GPIO140__FUNC_MD_INT1_C2K_UIM0_HOT_PLUG (MTK_PIN_NO(140) | 1)
+#define PINMUX_GPIO140__FUNC_MD_INT2_C2K_UIM1_HOT_PLUG (MTK_PIN_NO(140) | 2)
+#define PINMUX_GPIO140__FUNC_ADSP_URXD0 (MTK_PIN_NO(140) | 3)
+#define PINMUX_GPIO140__FUNC_SCL_6306 (MTK_PIN_NO(140) | 4)
+#define PINMUX_GPIO140__FUNC_PTA_RXD (MTK_PIN_NO(140) | 5)
+#define PINMUX_GPIO140__FUNC_SSPM_URXD_AO (MTK_PIN_NO(140) | 6)
+
+#define PINMUX_GPIO141__FUNC_GPIO141 (MTK_PIN_NO(141) | 0)
+#define PINMUX_GPIO141__FUNC_MD_INT2_C2K_UIM1_HOT_PLUG (MTK_PIN_NO(141) | 1)
+#define PINMUX_GPIO141__FUNC_MD_INT1_C2K_UIM0_HOT_PLUG (MTK_PIN_NO(141) | 2)
+#define PINMUX_GPIO141__FUNC_ADSP_UTXD0 (MTK_PIN_NO(141) | 3)
+#define PINMUX_GPIO141__FUNC_SDA_6306 (MTK_PIN_NO(141) | 4)
+#define PINMUX_GPIO141__FUNC_PTA_TXD (MTK_PIN_NO(141) | 5)
+#define PINMUX_GPIO141__FUNC_SSPM_UTXD_AO (MTK_PIN_NO(141) | 6)
+
+#define PINMUX_GPIO142__FUNC_GPIO142 (MTK_PIN_NO(142) | 0)
+#define PINMUX_GPIO142__FUNC_SCP_VREQ_VAO (MTK_PIN_NO(142) | 1)
+#define PINMUX_GPIO142__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(142) | 2)
+
+#define PINMUX_GPIO143__FUNC_GPIO143 (MTK_PIN_NO(143) | 0)
+#define PINMUX_GPIO143__FUNC_AUD_DAT_MOSI2 (MTK_PIN_NO(143) | 1)
+#define PINMUX_GPIO143__FUNC_DBG_MON_A9 (MTK_PIN_NO(143) | 7)
+
+#define PINMUX_GPIO144__FUNC_GPIO144 (MTK_PIN_NO(144) | 0)
+#define PINMUX_GPIO144__FUNC_AUD_NLE_MOSI1 (MTK_PIN_NO(144) | 1)
+#define PINMUX_GPIO144__FUNC_AUD_CLK_MISO (MTK_PIN_NO(144) | 2)
+#define PINMUX_GPIO144__FUNC_I2S2_MCK (MTK_PIN_NO(144) | 3)
+#define PINMUX_GPIO144__FUNC_UDI_TCK (MTK_PIN_NO(144) | 5)
+#define PINMUX_GPIO144__FUNC_UFS_UNIPRO_SDA (MTK_PIN_NO(144) | 6)
+#define PINMUX_GPIO144__FUNC_DBG_MON_A10 (MTK_PIN_NO(144) | 7)
+
+#define PINMUX_GPIO145__FUNC_GPIO145 (MTK_PIN_NO(145) | 0)
+#define PINMUX_GPIO145__FUNC_AUD_NLE_MOSI0 (MTK_PIN_NO(145) | 1)
+#define PINMUX_GPIO145__FUNC_AUD_SYNC_MISO (MTK_PIN_NO(145) | 2)
+#define PINMUX_GPIO145__FUNC_I2S2_BCK (MTK_PIN_NO(145) | 3)
+#define PINMUX_GPIO145__FUNC_UDI_TMS (MTK_PIN_NO(145) | 5)
+#define PINMUX_GPIO145__FUNC_DBG_MON_A11 (MTK_PIN_NO(145) | 7)
+
+#define PINMUX_GPIO146__FUNC_GPIO146 (MTK_PIN_NO(146) | 0)
+#define PINMUX_GPIO146__FUNC_AUD_DAT_MISO2 (MTK_PIN_NO(146) | 1)
+#define PINMUX_GPIO146__FUNC_I2S2_DI2 (MTK_PIN_NO(146) | 3)
+#define PINMUX_GPIO146__FUNC_UDI_TDO (MTK_PIN_NO(146) | 5)
+#define PINMUX_GPIO146__FUNC_DBG_MON_A14 (MTK_PIN_NO(146) | 7)
+
+#define PINMUX_GPIO147__FUNC_GPIO147 (MTK_PIN_NO(147) | 0)
+#define PINMUX_GPIO147__FUNC_ANT_SEL0 (MTK_PIN_NO(147) | 1)
+#define PINMUX_GPIO147__FUNC_PWM_3 (MTK_PIN_NO(147) | 2)
+
+#define PINMUX_GPIO148__FUNC_GPIO148 (MTK_PIN_NO(148) | 0)
+#define PINMUX_GPIO148__FUNC_ANT_SEL1 (MTK_PIN_NO(148) | 1)
+#define PINMUX_GPIO148__FUNC_SPI0_B_MI (MTK_PIN_NO(148) | 2)
+#define PINMUX_GPIO148__FUNC_SSPM_URXD_AO (MTK_PIN_NO(148) | 3)
+#define PINMUX_GPIO148__FUNC_TP_UCTS2_AO (MTK_PIN_NO(148) | 5)
+#define PINMUX_GPIO148__FUNC_CLKM0 (MTK_PIN_NO(148) | 6)
+
+#define PINMUX_GPIO149__FUNC_GPIO149 (MTK_PIN_NO(149) | 0)
+#define PINMUX_GPIO149__FUNC_ANT_SEL2 (MTK_PIN_NO(149) | 1)
+#define PINMUX_GPIO149__FUNC_SPI0_B_CSB (MTK_PIN_NO(149) | 2)
+#define PINMUX_GPIO149__FUNC_SSPM_UTXD_AO (MTK_PIN_NO(149) | 3)
+#define PINMUX_GPIO149__FUNC_TP_URTS2_AO (MTK_PIN_NO(149) | 5)
+#define PINMUX_GPIO149__FUNC_CONN_TCXOENA_REQ (MTK_PIN_NO(149) | 6)
+
+#define PINMUX_GPIO150__FUNC_GPIO150 (MTK_PIN_NO(150) | 0)
+#define PINMUX_GPIO150__FUNC_ANT_SEL3 (MTK_PIN_NO(150) | 1)
+#define PINMUX_GPIO150__FUNC_SPI0_B_MO (MTK_PIN_NO(150) | 2)
+#define PINMUX_GPIO150__FUNC_UCTS1 (MTK_PIN_NO(150) | 3)
+#define PINMUX_GPIO150__FUNC_TP_UCTS1_AO (MTK_PIN_NO(150) | 5)
+#define PINMUX_GPIO150__FUNC_IDDIG (MTK_PIN_NO(150) | 6)
+#define PINMUX_GPIO150__FUNC_SCL9 (MTK_PIN_NO(150) | 7)
+
+#define PINMUX_GPIO151__FUNC_GPIO151 (MTK_PIN_NO(151) | 0)
+#define PINMUX_GPIO151__FUNC_ANT_SEL4 (MTK_PIN_NO(151) | 1)
+#define PINMUX_GPIO151__FUNC_SPI0_B_CLK (MTK_PIN_NO(151) | 2)
+#define PINMUX_GPIO151__FUNC_URTS1 (MTK_PIN_NO(151) | 3)
+#define PINMUX_GPIO151__FUNC_TP_URTS1_AO (MTK_PIN_NO(151) | 5)
+#define PINMUX_GPIO151__FUNC_USB_DRVVBUS (MTK_PIN_NO(151) | 6)
+#define PINMUX_GPIO151__FUNC_SDA9 (MTK_PIN_NO(151) | 7)
+
+#define PINMUX_GPIO152__FUNC_GPIO152 (MTK_PIN_NO(152) | 0)
+#define PINMUX_GPIO152__FUNC_ANT_SEL5 (MTK_PIN_NO(152) | 1)
+#define PINMUX_GPIO152__FUNC_SPI1_B_MI (MTK_PIN_NO(152) | 2)
+#define PINMUX_GPIO152__FUNC_CLKM3 (MTK_PIN_NO(152) | 3)
+#define PINMUX_GPIO152__FUNC_TP_URXD1_AO (MTK_PIN_NO(152) | 5)
+#define PINMUX_GPIO152__FUNC_SCP_SPI1_B_MI (MTK_PIN_NO(152) | 6)
+#define PINMUX_GPIO152__FUNC_SCL8 (MTK_PIN_NO(152) | 7)
+
+#define PINMUX_GPIO153__FUNC_GPIO153 (MTK_PIN_NO(153) | 0)
+#define PINMUX_GPIO153__FUNC_ANT_SEL6 (MTK_PIN_NO(153) | 1)
+#define PINMUX_GPIO153__FUNC_SPI1_B_CSB (MTK_PIN_NO(153) | 2)
+#define PINMUX_GPIO153__FUNC_SRCLKENAI0 (MTK_PIN_NO(153) | 3)
+#define PINMUX_GPIO153__FUNC_PWM_0 (MTK_PIN_NO(153) | 4)
+#define PINMUX_GPIO153__FUNC_TP_UTXD1_AO (MTK_PIN_NO(153) | 5)
+#define PINMUX_GPIO153__FUNC_SCP_SPI1_B_CS (MTK_PIN_NO(153) | 6)
+#define PINMUX_GPIO153__FUNC_SDA8 (MTK_PIN_NO(153) | 7)
+
+#define PINMUX_GPIO154__FUNC_GPIO154 (MTK_PIN_NO(154) | 0)
+#define PINMUX_GPIO154__FUNC_ANT_SEL7 (MTK_PIN_NO(154) | 1)
+#define PINMUX_GPIO154__FUNC_SPI1_B_MO (MTK_PIN_NO(154) | 2)
+#define PINMUX_GPIO154__FUNC_SRCLKENAI1 (MTK_PIN_NO(154) | 3)
+#define PINMUX_GPIO154__FUNC_TP_URXD2_AO (MTK_PIN_NO(154) | 5)
+#define PINMUX_GPIO154__FUNC_SCP_SPI1_B_MO (MTK_PIN_NO(154) | 6)
+
+#define PINMUX_GPIO155__FUNC_GPIO155 (MTK_PIN_NO(155) | 0)
+#define PINMUX_GPIO155__FUNC_ANT_SEL8 (MTK_PIN_NO(155) | 1)
+#define PINMUX_GPIO155__FUNC_SPI1_B_CLK (MTK_PIN_NO(155) | 2)
+#define PINMUX_GPIO155__FUNC_MD_INT0 (MTK_PIN_NO(155) | 3)
+#define PINMUX_GPIO155__FUNC_TP_UTXD2_AO (MTK_PIN_NO(155) | 5)
+#define PINMUX_GPIO155__FUNC_SCP_SPI1_B_CK (MTK_PIN_NO(155) | 6)
+#define PINMUX_GPIO155__FUNC_DBG_MON_A15 (MTK_PIN_NO(155) | 7)
+
+#define PINMUX_GPIO156__FUNC_GPIO156 (MTK_PIN_NO(156) | 0)
+#define PINMUX_GPIO156__FUNC_CONN_TOP_CLK (MTK_PIN_NO(156) | 1)
+#define PINMUX_GPIO156__FUNC_AUXIF_CLK0 (MTK_PIN_NO(156) | 2)
+#define PINMUX_GPIO156__FUNC_DBG_MON_A16 (MTK_PIN_NO(156) | 7)
+
+#define PINMUX_GPIO157__FUNC_GPIO157 (MTK_PIN_NO(157) | 0)
+#define PINMUX_GPIO157__FUNC_CONN_TOP_DATA (MTK_PIN_NO(157) | 1)
+#define PINMUX_GPIO157__FUNC_AUXIF_ST0 (MTK_PIN_NO(157) | 2)
+#define PINMUX_GPIO157__FUNC_DBG_MON_A17 (MTK_PIN_NO(157) | 7)
+
+#define PINMUX_GPIO158__FUNC_GPIO158 (MTK_PIN_NO(158) | 0)
+#define PINMUX_GPIO158__FUNC_CONN_HRST_B (MTK_PIN_NO(158) | 1)
+#define PINMUX_GPIO158__FUNC_DBG_MON_A18 (MTK_PIN_NO(158) | 7)
+
+#define PINMUX_GPIO159__FUNC_GPIO159 (MTK_PIN_NO(159) | 0)
+#define PINMUX_GPIO159__FUNC_CONN_WB_PTA (MTK_PIN_NO(159) | 1)
+#define PINMUX_GPIO159__FUNC_DBG_MON_A19 (MTK_PIN_NO(159) | 7)
+
+#define PINMUX_GPIO160__FUNC_GPIO160 (MTK_PIN_NO(160) | 0)
+#define PINMUX_GPIO160__FUNC_CONN_BT_CLK (MTK_PIN_NO(160) | 1)
+#define PINMUX_GPIO160__FUNC_AUXIF_CLK1 (MTK_PIN_NO(160) | 2)
+#define PINMUX_GPIO160__FUNC_DBG_MON_A20 (MTK_PIN_NO(160) | 7)
+
+#define PINMUX_GPIO161__FUNC_GPIO161 (MTK_PIN_NO(161) | 0)
+#define PINMUX_GPIO161__FUNC_CONN_BT_DATA (MTK_PIN_NO(161) | 1)
+#define PINMUX_GPIO161__FUNC_AUXIF_ST1 (MTK_PIN_NO(161) | 2)
+#define PINMUX_GPIO161__FUNC_DBG_MON_A21 (MTK_PIN_NO(161) | 7)
+
+#define PINMUX_GPIO162__FUNC_GPIO162 (MTK_PIN_NO(162) | 0)
+#define PINMUX_GPIO162__FUNC_CONN_WF_CTRL0 (MTK_PIN_NO(162) | 1)
+#define PINMUX_GPIO162__FUNC_DBG_MON_A22 (MTK_PIN_NO(162) | 7)
+
+#define PINMUX_GPIO163__FUNC_GPIO163 (MTK_PIN_NO(163) | 0)
+#define PINMUX_GPIO163__FUNC_CONN_WF_CTRL1 (MTK_PIN_NO(163) | 1)
+#define PINMUX_GPIO163__FUNC_UFS_MPHY_SCL (MTK_PIN_NO(163) | 2)
+#define PINMUX_GPIO163__FUNC_DBG_MON_A23 (MTK_PIN_NO(163) | 7)
+
+#define PINMUX_GPIO164__FUNC_GPIO164 (MTK_PIN_NO(164) | 0)
+#define PINMUX_GPIO164__FUNC_CONN_WF_CTRL2 (MTK_PIN_NO(164) | 1)
+#define PINMUX_GPIO164__FUNC_UFS_MPHY_SDA (MTK_PIN_NO(164) | 2)
+#define PINMUX_GPIO164__FUNC_DBG_MON_A24 (MTK_PIN_NO(164) | 7)
+
+#define PINMUX_GPIO165__FUNC_GPIO165 (MTK_PIN_NO(165) | 0)
+#define PINMUX_GPIO165__FUNC_CONN_WF_CTRL3 (MTK_PIN_NO(165) | 1)
+#define PINMUX_GPIO165__FUNC_UFS_UNIPRO_SDA (MTK_PIN_NO(165) | 2)
+#define PINMUX_GPIO165__FUNC_DBG_MON_A25 (MTK_PIN_NO(165) | 7)
+
+#define PINMUX_GPIO166__FUNC_GPIO166 (MTK_PIN_NO(166) | 0)
+#define PINMUX_GPIO166__FUNC_CONN_WF_CTRL4 (MTK_PIN_NO(166) | 1)
+#define PINMUX_GPIO166__FUNC_UFS_UNIPRO_SCL (MTK_PIN_NO(166) | 2)
+#define PINMUX_GPIO166__FUNC_DBG_MON_A26 (MTK_PIN_NO(166) | 7)
+
+#define PINMUX_GPIO167__FUNC_GPIO167 (MTK_PIN_NO(167) | 0)
+#define PINMUX_GPIO167__FUNC_MSDC0_CMD (MTK_PIN_NO(167) | 1)
+
+#define PINMUX_GPIO168__FUNC_GPIO168 (MTK_PIN_NO(168) | 0)
+#define PINMUX_GPIO168__FUNC_MSDC0_DAT0 (MTK_PIN_NO(168) | 1)
+
+#define PINMUX_GPIO169__FUNC_GPIO169 (MTK_PIN_NO(169) | 0)
+#define PINMUX_GPIO169__FUNC_MSDC0_DAT2 (MTK_PIN_NO(169) | 1)
+
+#define PINMUX_GPIO170__FUNC_GPIO170 (MTK_PIN_NO(170) | 0)
+#define PINMUX_GPIO170__FUNC_MSDC0_DAT4 (MTK_PIN_NO(170) | 1)
+
+#define PINMUX_GPIO171__FUNC_GPIO171 (MTK_PIN_NO(171) | 0)
+#define PINMUX_GPIO171__FUNC_MSDC0_DAT6 (MTK_PIN_NO(171) | 1)
+
+#define PINMUX_GPIO172__FUNC_GPIO172 (MTK_PIN_NO(172) | 0)
+#define PINMUX_GPIO172__FUNC_MSDC0_DAT1 (MTK_PIN_NO(172) | 1)
+
+#define PINMUX_GPIO173__FUNC_GPIO173 (MTK_PIN_NO(173) | 0)
+#define PINMUX_GPIO173__FUNC_MSDC0_DAT5 (MTK_PIN_NO(173) | 1)
+
+#define PINMUX_GPIO174__FUNC_GPIO174 (MTK_PIN_NO(174) | 0)
+#define PINMUX_GPIO174__FUNC_MSDC0_DAT7 (MTK_PIN_NO(174) | 1)
+
+#define PINMUX_GPIO175__FUNC_GPIO175 (MTK_PIN_NO(175) | 0)
+#define PINMUX_GPIO175__FUNC_MSDC0_DSL (MTK_PIN_NO(175) | 1)
+#define PINMUX_GPIO175__FUNC_ANT_SEL9 (MTK_PIN_NO(175) | 2)
+
+#define PINMUX_GPIO176__FUNC_GPIO176 (MTK_PIN_NO(176) | 0)
+#define PINMUX_GPIO176__FUNC_MSDC0_CLK (MTK_PIN_NO(176) | 1)
+#define PINMUX_GPIO176__FUNC_ANT_SEL10 (MTK_PIN_NO(176) | 2)
+
+#define PINMUX_GPIO177__FUNC_GPIO177 (MTK_PIN_NO(177) | 0)
+#define PINMUX_GPIO177__FUNC_MSDC0_DAT3 (MTK_PIN_NO(177) | 1)
+
+#define PINMUX_GPIO178__FUNC_GPIO178 (MTK_PIN_NO(178) | 0)
+#define PINMUX_GPIO178__FUNC_MSDC0_RSTB (MTK_PIN_NO(178) | 1)
+
+#define PINMUX_GPIO179__FUNC_GPIO179 (MTK_PIN_NO(179) | 0)
+#define PINMUX_GPIO179__FUNC_RFIC0_BSI_EN (MTK_PIN_NO(179) | 1)
+
+#define PINMUX_GPIO180__FUNC_GPIO180 (MTK_PIN_NO(180) | 0)
+#define PINMUX_GPIO180__FUNC_RFIC0_BSI_CK (MTK_PIN_NO(180) | 1)
+
+#define PINMUX_GPIO181__FUNC_GPIO181 (MTK_PIN_NO(181) | 0)
+#define PINMUX_GPIO181__FUNC_SRCLKENA0 (MTK_PIN_NO(181) | 1)
+
+#define PINMUX_GPIO182__FUNC_GPIO182 (MTK_PIN_NO(182) | 0)
+#define PINMUX_GPIO182__FUNC_SRCLKENA1 (MTK_PIN_NO(182) | 1)
+
+#define PINMUX_GPIO183__FUNC_GPIO183 (MTK_PIN_NO(183) | 0)
+#define PINMUX_GPIO183__FUNC_WATCHDOG (MTK_PIN_NO(183) | 1)
+
+#define PINMUX_GPIO184__FUNC_GPIO184 (MTK_PIN_NO(184) | 0)
+#define PINMUX_GPIO184__FUNC_PWRAP_SPI0_MI (MTK_PIN_NO(184) | 1)
+#define PINMUX_GPIO184__FUNC_PWRAP_SPI0_MO (MTK_PIN_NO(184) | 2)
+
+#define PINMUX_GPIO185__FUNC_GPIO185 (MTK_PIN_NO(185) | 0)
+#define PINMUX_GPIO185__FUNC_PWRAP_SPI0_CSN (MTK_PIN_NO(185) | 1)
+
+#define PINMUX_GPIO186__FUNC_GPIO186 (MTK_PIN_NO(186) | 0)
+#define PINMUX_GPIO186__FUNC_PWRAP_SPI0_MO (MTK_PIN_NO(186) | 1)
+#define PINMUX_GPIO186__FUNC_PWRAP_SPI0_MI (MTK_PIN_NO(186) | 2)
+
+#define PINMUX_GPIO187__FUNC_GPIO187 (MTK_PIN_NO(187) | 0)
+#define PINMUX_GPIO187__FUNC_PWRAP_SPI0_CK (MTK_PIN_NO(187) | 1)
+
+#define PINMUX_GPIO188__FUNC_GPIO188 (MTK_PIN_NO(188) | 0)
+#define PINMUX_GPIO188__FUNC_RTC32K_CK (MTK_PIN_NO(188) | 1)
+
+#define PINMUX_GPIO189__FUNC_GPIO189 (MTK_PIN_NO(189) | 0)
+#define PINMUX_GPIO189__FUNC_AUD_CLK_MOSI (MTK_PIN_NO(189) | 1)
+#define PINMUX_GPIO189__FUNC_I2S1_MCK (MTK_PIN_NO(189) | 3)
+#define PINMUX_GPIO189__FUNC_UFS_UNIPRO_SCL (MTK_PIN_NO(189) | 6)
+
+#define PINMUX_GPIO190__FUNC_GPIO190 (MTK_PIN_NO(190) | 0)
+#define PINMUX_GPIO190__FUNC_AUD_SYNC_MOSI (MTK_PIN_NO(190) | 1)
+#define PINMUX_GPIO190__FUNC_I2S1_BCK (MTK_PIN_NO(190) | 3)
+#define PINMUX_GPIO190__FUNC_DBG_MON_A6 (MTK_PIN_NO(190) | 7)
+
+#define PINMUX_GPIO191__FUNC_GPIO191 (MTK_PIN_NO(191) | 0)
+#define PINMUX_GPIO191__FUNC_AUD_DAT_MOSI0 (MTK_PIN_NO(191) | 1)
+#define PINMUX_GPIO191__FUNC_I2S1_LRCK (MTK_PIN_NO(191) | 3)
+#define PINMUX_GPIO191__FUNC_DBG_MON_A7 (MTK_PIN_NO(191) | 7)
+
+#define PINMUX_GPIO192__FUNC_GPIO192 (MTK_PIN_NO(192) | 0)
+#define PINMUX_GPIO192__FUNC_AUD_DAT_MOSI1 (MTK_PIN_NO(192) | 1)
+#define PINMUX_GPIO192__FUNC_I2S1_DO (MTK_PIN_NO(192) | 3)
+#define PINMUX_GPIO192__FUNC_UFS_MPHY_SDA (MTK_PIN_NO(192) | 6)
+#define PINMUX_GPIO192__FUNC_DBG_MON_A8 (MTK_PIN_NO(192) | 7)
+
+#define PINMUX_GPIO193__FUNC_GPIO193 (MTK_PIN_NO(193) | 0)
+#define PINMUX_GPIO193__FUNC_AUD_DAT_MISO0 (MTK_PIN_NO(193) | 1)
+#define PINMUX_GPIO193__FUNC_VOW_DAT_MISO (MTK_PIN_NO(193) | 2)
+#define PINMUX_GPIO193__FUNC_I2S2_LRCK (MTK_PIN_NO(193) | 3)
+#define PINMUX_GPIO193__FUNC_UDI_TDI (MTK_PIN_NO(193) | 5)
+#define PINMUX_GPIO193__FUNC_DBG_MON_A12 (MTK_PIN_NO(193) | 7)
+
+#define PINMUX_GPIO194__FUNC_GPIO194 (MTK_PIN_NO(194) | 0)
+#define PINMUX_GPIO194__FUNC_AUD_DAT_MISO1 (MTK_PIN_NO(194) | 1)
+#define PINMUX_GPIO194__FUNC_VOW_CLK_MISO (MTK_PIN_NO(194) | 2)
+#define PINMUX_GPIO194__FUNC_I2S2_DI (MTK_PIN_NO(194) | 3)
+#define PINMUX_GPIO194__FUNC_UDI_NTRST (MTK_PIN_NO(194) | 5)
+#define PINMUX_GPIO194__FUNC_UFS_MPHY_SCL (MTK_PIN_NO(194) | 6)
+#define PINMUX_GPIO194__FUNC_DBG_MON_A13 (MTK_PIN_NO(194) | 7)
+
+#define PINMUX_GPIO195__FUNC_GPIO195 (MTK_PIN_NO(195) | 0)
+#define PINMUX_GPIO195__FUNC_ADSP_JTAG_TCK (MTK_PIN_NO(195) | 3)
+#define PINMUX_GPIO195__FUNC_VPU_UDI_TCK (MTK_PIN_NO(195) | 4)
+#define PINMUX_GPIO195__FUNC_SPM_JTAG_TCK (MTK_PIN_NO(195) | 5)
+#define PINMUX_GPIO195__FUNC_SSPM_JTAG_TCK (MTK_PIN_NO(195) | 6)
+
+#define PINMUX_GPIO196__FUNC_GPIO196 (MTK_PIN_NO(196) | 0)
+#define PINMUX_GPIO196__FUNC_CMMCLK4 (MTK_PIN_NO(196) | 1)
+#define PINMUX_GPIO196__FUNC_ADSP_JTAG_TDI (MTK_PIN_NO(196) | 3)
+#define PINMUX_GPIO196__FUNC_VPU_UDI_TDI (MTK_PIN_NO(196) | 4)
+#define PINMUX_GPIO196__FUNC_SPM_JTAG_TDI (MTK_PIN_NO(196) | 5)
+#define PINMUX_GPIO196__FUNC_SSPM_JTAG_TDI (MTK_PIN_NO(196) | 6)
+
+#define PINMUX_GPIO197__FUNC_GPIO197 (MTK_PIN_NO(197) | 0)
+#define PINMUX_GPIO197__FUNC_ADSP_JTAG_TDO (MTK_PIN_NO(197) | 3)
+#define PINMUX_GPIO197__FUNC_VPU_UDI_TDO (MTK_PIN_NO(197) | 4)
+#define PINMUX_GPIO197__FUNC_SPM_JTAG_TDO (MTK_PIN_NO(197) | 5)
+#define PINMUX_GPIO197__FUNC_SSPM_JTAG_TDO (MTK_PIN_NO(197) | 6)
+
+#define PINMUX_GPIO198__FUNC_GPIO198 (MTK_PIN_NO(198) | 0)
+#define PINMUX_GPIO198__FUNC_SCL7 (MTK_PIN_NO(198) | 1)
+
+#define PINMUX_GPIO199__FUNC_GPIO199 (MTK_PIN_NO(199) | 0)
+#define PINMUX_GPIO199__FUNC_SDA7 (MTK_PIN_NO(199) | 1)
+
+#define PINMUX_GPIO200__FUNC_GPIO200 (MTK_PIN_NO(200) | 0)
+#define PINMUX_GPIO200__FUNC_URXD1 (MTK_PIN_NO(200) | 1)
+#define PINMUX_GPIO200__FUNC_ADSP_URXD0 (MTK_PIN_NO(200) | 2)
+#define PINMUX_GPIO200__FUNC_TP_URXD1_AO (MTK_PIN_NO(200) | 3)
+#define PINMUX_GPIO200__FUNC_SSPM_URXD_AO (MTK_PIN_NO(200) | 4)
+#define PINMUX_GPIO200__FUNC_TP_URXD2_AO (MTK_PIN_NO(200) | 5)
+#define PINMUX_GPIO200__FUNC_MBISTREADEN_TRIGGER (MTK_PIN_NO(200) | 6)
+
+#define PINMUX_GPIO201__FUNC_GPIO201 (MTK_PIN_NO(201) | 0)
+#define PINMUX_GPIO201__FUNC_UTXD1 (MTK_PIN_NO(201) | 1)
+#define PINMUX_GPIO201__FUNC_ADSP_UTXD0 (MTK_PIN_NO(201) | 2)
+#define PINMUX_GPIO201__FUNC_TP_UTXD1_AO (MTK_PIN_NO(201) | 3)
+#define PINMUX_GPIO201__FUNC_SSPM_UTXD_AO (MTK_PIN_NO(201) | 4)
+#define PINMUX_GPIO201__FUNC_TP_UTXD2_AO (MTK_PIN_NO(201) | 5)
+#define PINMUX_GPIO201__FUNC_MBISTWRITEEN_TRIGGER (MTK_PIN_NO(201) | 6)
+
+#define PINMUX_GPIO202__FUNC_GPIO202 (MTK_PIN_NO(202) | 0)
+#define PINMUX_GPIO202__FUNC_PWM_3 (MTK_PIN_NO(202) | 1)
+#define PINMUX_GPIO202__FUNC_CLKM3 (MTK_PIN_NO(202) | 2)
+
+#define PINMUX_GPIO203__FUNC_GPIO203 (MTK_PIN_NO(203) | 0)
+
+#define PINMUX_GPIO204__FUNC_GPIO204 (MTK_PIN_NO(204) | 0)
+
+#define PINMUX_GPIO205__FUNC_GPIO205 (MTK_PIN_NO(205) | 0)
+
+#define PINMUX_GPIO206__FUNC_GPIO206 (MTK_PIN_NO(206) | 0)
+
+#define PINMUX_GPIO207__FUNC_GPIO207 (MTK_PIN_NO(207) | 0)
+
+#define PINMUX_GPIO208__FUNC_GPIO208 (MTK_PIN_NO(208) | 0)
+
+#define PINMUX_GPIO209__FUNC_GPIO209 (MTK_PIN_NO(209) | 0)
+
+#endif /* __MT6779-PINFUNC_H */
diff --git a/include/dt-bindings/pinctrl/omap.h b/include/dt-bindings/pinctrl/omap.h
index 625718042413..2d2a8c737822 100644
--- a/include/dt-bindings/pinctrl/omap.h
+++ b/include/dt-bindings/pinctrl/omap.h
@@ -65,7 +65,7 @@
#define DM814X_IOPAD(pa, val) OMAP_IOPAD_OFFSET((pa), 0x0800) (val)
#define DM816X_IOPAD(pa, val) OMAP_IOPAD_OFFSET((pa), 0x0800) (val)
#define AM33XX_IOPAD(pa, val) OMAP_IOPAD_OFFSET((pa), 0x0800) (val)
-#define AM33XX_PADCONF(pa, dir, mux) OMAP_IOPAD_OFFSET((pa), 0x0800) ((dir) | (mux))
+#define AM33XX_PADCONF(pa, conf, mux) OMAP_IOPAD_OFFSET((pa), 0x0800) (conf) (mux)
/*
* Macros to allow using the offset from the padconf physical address
diff --git a/include/dt-bindings/power/qcom-rpmpd.h b/include/dt-bindings/power/qcom-rpmpd.h
index dc146e44228b..5e61eaf73bdd 100644
--- a/include/dt-bindings/power/qcom-rpmpd.h
+++ b/include/dt-bindings/power/qcom-rpmpd.h
@@ -55,6 +55,7 @@
#define RPMH_REGULATOR_LEVEL_MIN_SVS 48
#define RPMH_REGULATOR_LEVEL_LOW_SVS 64
#define RPMH_REGULATOR_LEVEL_SVS 128
+#define RPMH_REGULATOR_LEVEL_SVS_L0 144
#define RPMH_REGULATOR_LEVEL_SVS_L1 192
#define RPMH_REGULATOR_LEVEL_SVS_L2 224
#define RPMH_REGULATOR_LEVEL_NOM 256
diff --git a/include/dt-bindings/power/r8a774e1-sysc.h b/include/dt-bindings/power/r8a774e1-sysc.h
new file mode 100644
index 000000000000..7edb8161db36
--- /dev/null
+++ b/include/dt-bindings/power/r8a774e1-sysc.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2020 Renesas Electronics Corp.
+ */
+#ifndef __DT_BINDINGS_POWER_R8A774E1_SYSC_H__
+#define __DT_BINDINGS_POWER_R8A774E1_SYSC_H__
+
+/*
+ * These power domain indices match the numbers of the interrupt bits
+ * representing the power areas in the various Interrupt Registers
+ * (e.g. SYSCISR, Interrupt Status Register)
+ */
+
+#define R8A774E1_PD_CA57_CPU0 0
+#define R8A774E1_PD_CA57_CPU1 1
+#define R8A774E1_PD_CA57_CPU2 2
+#define R8A774E1_PD_CA57_CPU3 3
+#define R8A774E1_PD_CA53_CPU0 5
+#define R8A774E1_PD_CA53_CPU1 6
+#define R8A774E1_PD_CA53_CPU2 7
+#define R8A774E1_PD_CA53_CPU3 8
+#define R8A774E1_PD_A3VP 9
+#define R8A774E1_PD_CA57_SCU 12
+#define R8A774E1_PD_A3VC 14
+#define R8A774E1_PD_3DG_A 17
+#define R8A774E1_PD_3DG_B 18
+#define R8A774E1_PD_3DG_C 19
+#define R8A774E1_PD_3DG_D 20
+#define R8A774E1_PD_CA53_SCU 21
+#define R8A774E1_PD_3DG_E 22
+#define R8A774E1_PD_A2VC1 26
+
+/* Always-on power area */
+#define R8A774E1_PD_ALWAYS_ON 32
+
+#endif /* __DT_BINDINGS_POWER_R8A774E1_SYSC_H__ */
diff --git a/include/dt-bindings/regulator/dlg,da9211-regulator.h b/include/dt-bindings/regulator/dlg,da9211-regulator.h
new file mode 100644
index 000000000000..cdce2d54c8ba
--- /dev/null
+++ b/include/dt-bindings/regulator/dlg,da9211-regulator.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _DT_BINDINGS_REGULATOR_DLG_DA9211_H
+#define _DT_BINDINGS_REGULATOR_DLG_DA9211_H
+
+/*
+ * These buck mode constants may be used to specify values in device tree
+ * properties (e.g. regulator-initial-mode, regulator-allowed-modes).
+ * A description of the following modes is in the manufacturers datasheet.
+ */
+
+#define DA9211_BUCK_MODE_SLEEP 1
+#define DA9211_BUCK_MODE_SYNC 2
+#define DA9211_BUCK_MODE_AUTO 3
+
+#endif
diff --git a/include/dt-bindings/regulator/mediatek,mt6397-regulator.h b/include/dt-bindings/regulator/mediatek,mt6397-regulator.h
new file mode 100644
index 000000000000..99869a8665cf
--- /dev/null
+++ b/include/dt-bindings/regulator/mediatek,mt6397-regulator.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _DT_BINDINGS_REGULATOR_MEDIATEK_MT6397_H_
+#define _DT_BINDINGS_REGULATOR_MEDIATEK_MT6397_H_
+
+/*
+ * Buck mode constants which may be used in devicetree properties (eg.
+ * regulator-initial-mode, regulator-allowed-modes).
+ * See the manufacturer's datasheet for more information on these modes.
+ */
+
+#define MT6397_BUCK_MODE_AUTO 0
+#define MT6397_BUCK_MODE_FORCE_PWM 1
+
+#endif
diff --git a/include/dt-bindings/reset/actions,s500-reset.h b/include/dt-bindings/reset/actions,s500-reset.h
new file mode 100644
index 000000000000..f5d94176d10b
--- /dev/null
+++ b/include/dt-bindings/reset/actions,s500-reset.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Device Tree binding constants for Actions Semi S500 Reset Management Unit
+ *
+ * Copyright (c) 2014 Actions Semi Inc.
+ * Copyright (c) 2020 Cristian Ciocaltea <cristian.ciocaltea@gmail.com>
+ */
+
+#ifndef __DT_BINDINGS_ACTIONS_S500_RESET_H
+#define __DT_BINDINGS_ACTIONS_S500_RESET_H
+
+#define RESET_DMAC 0
+#define RESET_NORIF 1
+#define RESET_DDR 2
+#define RESET_NANDC 3
+#define RESET_SD0 4
+#define RESET_SD1 5
+#define RESET_PCM1 6
+#define RESET_DE 7
+#define RESET_LCD 8
+#define RESET_SD2 9
+#define RESET_DSI 10
+#define RESET_CSI 11
+#define RESET_BISP 12
+#define RESET_KEY 13
+#define RESET_GPIO 14
+#define RESET_AUDIO 15
+#define RESET_PCM0 16
+#define RESET_VDE 17
+#define RESET_VCE 18
+#define RESET_GPU3D 19
+#define RESET_NIC301 20
+#define RESET_LENS 21
+#define RESET_PERIPHRESET 22
+#define RESET_USB2_0 23
+#define RESET_TVOUT 24
+#define RESET_HDMI 25
+#define RESET_HDCP2TX 26
+#define RESET_UART6 27
+#define RESET_UART0 28
+#define RESET_UART1 29
+#define RESET_UART2 30
+#define RESET_SPI0 31
+#define RESET_SPI1 32
+#define RESET_SPI2 33
+#define RESET_SPI3 34
+#define RESET_I2C0 35
+#define RESET_I2C1 36
+#define RESET_USB3 37
+#define RESET_UART3 38
+#define RESET_UART4 39
+#define RESET_UART5 40
+#define RESET_I2C2 41
+#define RESET_I2C3 42
+#define RESET_ETHERNET 43
+#define RESET_CHIPID 44
+#define RESET_USB2_1 45
+#define RESET_WD0RESET 46
+#define RESET_WD1RESET 47
+#define RESET_WD2RESET 48
+#define RESET_WD3RESET 49
+#define RESET_DBG0RESET 50
+#define RESET_DBG1RESET 51
+#define RESET_DBG2RESET 52
+#define RESET_DBG3RESET 53
+
+#endif /* __DT_BINDINGS_ACTIONS_S500_RESET_H */
diff --git a/include/dt-bindings/reset/ti-syscon.h b/include/dt-bindings/reset/ti-syscon.h
index 6d696d2d1508..eacc0f18083e 100644
--- a/include/dt-bindings/reset/ti-syscon.h
+++ b/include/dt-bindings/reset/ti-syscon.h
@@ -2,7 +2,7 @@
/*
* TI Syscon Reset definitions
*
- * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2015-2016 Texas Instruments Incorporated - https://www.ti.com/
*/
#ifndef __DT_BINDINGS_RESET_TI_SYSCON_H__
diff --git a/include/dt-bindings/sound/qcom,q6asm.h b/include/dt-bindings/sound/qcom,q6asm.h
index 1eb77d87c2e8..f59d74f14395 100644
--- a/include/dt-bindings/sound/qcom,q6asm.h
+++ b/include/dt-bindings/sound/qcom,q6asm.h
@@ -19,4 +19,8 @@
#define MSM_FRONTEND_DAI_MULTIMEDIA15 14
#define MSM_FRONTEND_DAI_MULTIMEDIA16 15
+#define Q6ASM_DAI_TX_RX 0
+#define Q6ASM_DAI_TX 1
+#define Q6ASM_DAI_RX 2
+
#endif /* __DT_BINDINGS_Q6_ASM_H__ */
diff --git a/include/keys/asymmetric-parser.h b/include/keys/asymmetric-parser.h
index 8a21d6a613ab..c47dc5405f79 100644
--- a/include/keys/asymmetric-parser.h
+++ b/include/keys/asymmetric-parser.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/* Asymmetric public-key cryptography data parser
*
- * See Documentation/crypto/asymmetric-keys.txt
+ * See Documentation/crypto/asymmetric-keys.rst
*
* Copyright (C) 2012 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
diff --git a/include/keys/asymmetric-subtype.h b/include/keys/asymmetric-subtype.h
index 21407815d9c3..d55171f640a0 100644
--- a/include/keys/asymmetric-subtype.h
+++ b/include/keys/asymmetric-subtype.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/* Asymmetric public-key cryptography key subtype
*
- * See Documentation/crypto/asymmetric-keys.txt
+ * See Documentation/crypto/asymmetric-keys.rst
*
* Copyright (C) 2012 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
diff --git a/include/keys/asymmetric-type.h b/include/keys/asymmetric-type.h
index 91cfd9bd9385..a29d3ff2e7e8 100644
--- a/include/keys/asymmetric-type.h
+++ b/include/keys/asymmetric-type.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/* Asymmetric Public-key cryptography key type interface
*
- * See Documentation/crypto/asymmetric-keys.txt
+ * See Documentation/crypto/asymmetric-keys.rst
*
* Copyright (C) 2012 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
diff --git a/include/kunit/test.h b/include/kunit/test.h
index 47e61e1d5337..59f3144f009a 100644
--- a/include/kunit/test.h
+++ b/include/kunit/test.h
@@ -15,6 +15,7 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/types.h>
+#include <linux/kref.h>
struct kunit_resource;
@@ -23,13 +24,19 @@ typedef void (*kunit_resource_free_t)(struct kunit_resource *);
/**
* struct kunit_resource - represents a *test managed resource*
- * @allocation: for the user to store arbitrary data.
+ * @data: for the user to store arbitrary data.
* @free: a user supplied function to free the resource. Populated by
- * kunit_alloc_resource().
+ * kunit_resource_alloc().
*
* Represents a *test managed resource*, a resource which will automatically be
* cleaned up at the end of a test case.
*
+ * Resources are reference counted so if a resource is retrieved via
+ * kunit_alloc_and_get_resource() or kunit_find_resource(), we need
+ * to call kunit_put_resource() to reduce the resource reference count
+ * when finished with it. Note that kunit_alloc_resource() does not require a
+ * kunit_resource_put() because it does not retrieve the resource itself.
+ *
* Example:
*
* .. code-block:: c
@@ -42,9 +49,9 @@ typedef void (*kunit_resource_free_t)(struct kunit_resource *);
* static int kunit_kmalloc_init(struct kunit_resource *res, void *context)
* {
* struct kunit_kmalloc_params *params = context;
- * res->allocation = kmalloc(params->size, params->gfp);
+ * res->data = kmalloc(params->size, params->gfp);
*
- * if (!res->allocation)
+ * if (!res->data)
* return -ENOMEM;
*
* return 0;
@@ -52,30 +59,32 @@ typedef void (*kunit_resource_free_t)(struct kunit_resource *);
*
* static void kunit_kmalloc_free(struct kunit_resource *res)
* {
- * kfree(res->allocation);
+ * kfree(res->data);
* }
*
* void *kunit_kmalloc(struct kunit *test, size_t size, gfp_t gfp)
* {
* struct kunit_kmalloc_params params;
- * struct kunit_resource *res;
*
* params.size = size;
* params.gfp = gfp;
*
- * res = kunit_alloc_resource(test, kunit_kmalloc_init,
+ * return kunit_alloc_resource(test, kunit_kmalloc_init,
* kunit_kmalloc_free, &params);
- * if (res)
- * return res->allocation;
- *
- * return NULL;
* }
+ *
+ * Resources can also be named, with lookup/removal done on a name
+ * basis also. kunit_add_named_resource(), kunit_find_named_resource()
+ * and kunit_destroy_named_resource(). Resource names must be
+ * unique within the test instance.
*/
struct kunit_resource {
- void *allocation;
- kunit_resource_free_t free;
+ void *data;
+ const char *name; /* optional name */
/* private: internal use only. */
+ kunit_resource_free_t free;
+ struct kref refcount;
struct list_head node;
};
@@ -284,6 +293,79 @@ struct kunit_resource *kunit_alloc_and_get_resource(struct kunit *test,
void *context);
/**
+ * kunit_get_resource() - Hold resource for use. Should not need to be used
+ * by most users as we automatically get resources
+ * retrieved by kunit_find_resource*().
+ * @res: resource
+ */
+static inline void kunit_get_resource(struct kunit_resource *res)
+{
+ kref_get(&res->refcount);
+}
+
+/*
+ * Called when refcount reaches zero via kunit_put_resources();
+ * should not be called directly.
+ */
+static inline void kunit_release_resource(struct kref *kref)
+{
+ struct kunit_resource *res = container_of(kref, struct kunit_resource,
+ refcount);
+
+ /* If free function is defined, resource was dynamically allocated. */
+ if (res->free) {
+ res->free(res);
+ kfree(res);
+ }
+}
+
+/**
+ * kunit_put_resource() - When caller is done with retrieved resource,
+ * kunit_put_resource() should be called to drop
+ * reference count. The resource list maintains
+ * a reference count on resources, so if no users
+ * are utilizing a resource and it is removed from
+ * the resource list, it will be freed via the
+ * associated free function (if any). Only
+ * needs to be used if we alloc_and_get() or
+ * find() resource.
+ * @res: resource
+ */
+static inline void kunit_put_resource(struct kunit_resource *res)
+{
+ kref_put(&res->refcount, kunit_release_resource);
+}
+
+/**
+ * kunit_add_resource() - Add a *test managed resource*.
+ * @test: The test context object.
+ * @init: a user-supplied function to initialize the result (if needed). If
+ * none is supplied, the resource data value is simply set to @data.
+ * If an init function is supplied, @data is passed to it instead.
+ * @free: a user-supplied function to free the resource (if needed).
+ * @data: value to pass to init function or set in resource data field.
+ */
+int kunit_add_resource(struct kunit *test,
+ kunit_resource_init_t init,
+ kunit_resource_free_t free,
+ struct kunit_resource *res,
+ void *data);
+
+/**
+ * kunit_add_named_resource() - Add a named *test managed resource*.
+ * @test: The test context object.
+ * @init: a user-supplied function to initialize the resource data, if needed.
+ * @free: a user-supplied function to free the resource data, if needed.
+ * @name_data: name and data to be set for resource.
+ */
+int kunit_add_named_resource(struct kunit *test,
+ kunit_resource_init_t init,
+ kunit_resource_free_t free,
+ struct kunit_resource *res,
+ const char *name,
+ void *data);
+
+/**
* kunit_alloc_resource() - Allocates a *test managed resource*.
* @test: The test context object.
* @init: a user supplied function to initialize the resource.
@@ -295,7 +377,7 @@ struct kunit_resource *kunit_alloc_and_get_resource(struct kunit *test,
* cleaned up at the end of a test case. See &struct kunit_resource for an
* example.
*
- * NOTE: KUnit needs to allocate memory for each kunit_resource object. You must
+ * Note: KUnit needs to allocate memory for a kunit_resource object. You must
* specify an @internal_gfp that is compatible with the use context of your
* resource.
*/
@@ -307,54 +389,122 @@ static inline void *kunit_alloc_resource(struct kunit *test,
{
struct kunit_resource *res;
- res = kunit_alloc_and_get_resource(test, init, free, internal_gfp,
- context);
+ res = kzalloc(sizeof(*res), internal_gfp);
+ if (!res)
+ return NULL;
- if (res)
- return res->allocation;
+ if (!kunit_add_resource(test, init, free, res, context))
+ return res->data;
return NULL;
}
typedef bool (*kunit_resource_match_t)(struct kunit *test,
- const void *res,
+ struct kunit_resource *res,
void *match_data);
/**
* kunit_resource_instance_match() - Match a resource with the same instance.
* @test: Test case to which the resource belongs.
- * @res: The data stored in kunit_resource->allocation.
+ * @res: The resource.
* @match_data: The resource pointer to match against.
*
* An instance of kunit_resource_match_t that matches a resource whose
* allocation matches @match_data.
*/
static inline bool kunit_resource_instance_match(struct kunit *test,
- const void *res,
+ struct kunit_resource *res,
void *match_data)
{
- return res == match_data;
+ return res->data == match_data;
}
/**
- * kunit_resource_destroy() - Find a kunit_resource and destroy it.
+ * kunit_resource_name_match() - Match a resource with the same name.
+ * @test: Test case to which the resource belongs.
+ * @res: The resource.
+ * @match_name: The name to match against.
+ */
+static inline bool kunit_resource_name_match(struct kunit *test,
+ struct kunit_resource *res,
+ void *match_name)
+{
+ return res->name && strcmp(res->name, match_name) == 0;
+}
+
+/**
+ * kunit_find_resource() - Find a resource using match function/data.
+ * @test: Test case to which the resource belongs.
+ * @match: match function to be applied to resources/match data.
+ * @match_data: data to be used in matching.
+ */
+static inline struct kunit_resource *
+kunit_find_resource(struct kunit *test,
+ kunit_resource_match_t match,
+ void *match_data)
+{
+ struct kunit_resource *res, *found = NULL;
+
+ spin_lock(&test->lock);
+
+ list_for_each_entry_reverse(res, &test->resources, node) {
+ if (match(test, res, (void *)match_data)) {
+ found = res;
+ kunit_get_resource(found);
+ break;
+ }
+ }
+
+ spin_unlock(&test->lock);
+
+ return found;
+}
+
+/**
+ * kunit_find_named_resource() - Find a resource using match name.
+ * @test: Test case to which the resource belongs.
+ * @name: match name.
+ */
+static inline struct kunit_resource *
+kunit_find_named_resource(struct kunit *test,
+ const char *name)
+{
+ return kunit_find_resource(test, kunit_resource_name_match,
+ (void *)name);
+}
+
+/**
+ * kunit_destroy_resource() - Find a kunit_resource and destroy it.
* @test: Test case to which the resource belongs.
* @match: Match function. Returns whether a given resource matches @match_data.
- * @free: Must match free on the kunit_resource to free.
* @match_data: Data passed into @match.
*
- * Free the latest kunit_resource of @test for which @free matches the
- * kunit_resource_free_t associated with the resource and for which @match
- * returns true.
- *
* RETURNS:
* 0 if kunit_resource is found and freed, -ENOENT if not found.
*/
-int kunit_resource_destroy(struct kunit *test,
+int kunit_destroy_resource(struct kunit *test,
kunit_resource_match_t match,
- kunit_resource_free_t free,
void *match_data);
+static inline int kunit_destroy_named_resource(struct kunit *test,
+ const char *name)
+{
+ return kunit_destroy_resource(test, kunit_resource_name_match,
+ (void *)name);
+}
+
+/**
+ * kunit_remove_resource: remove resource from resource list associated with
+ * test.
+ * @test: The test context object.
+ * @res: The resource to be removed.
+ *
+ * Note that the resource will not be immediately freed since it is likely
+ * the caller has a reference to it via alloc_and_get() or find();
+ * in this case a final call to kunit_put_resource() is required.
+ */
+void kunit_remove_resource(struct kunit *test, struct kunit_resource *res);
+
/**
* kunit_kmalloc() - Like kmalloc() except the allocation is *test managed*.
* @test: The test context object.
diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h
index d120e6c323e7..51c19381108c 100644
--- a/include/kvm/arm_arch_timer.h
+++ b/include/kvm/arm_arch_timer.h
@@ -26,16 +26,9 @@ enum kvm_arch_timer_regs {
struct arch_timer_context {
struct kvm_vcpu *vcpu;
- /* Registers: control register, timer value */
- u32 cnt_ctl;
- u64 cnt_cval;
-
/* Timer IRQ */
struct kvm_irq_level irq;
- /* Virtual offset */
- u64 cntvoff;
-
/* Emulated Timer (may be unused) */
struct hrtimer hrtimer;
@@ -71,7 +64,7 @@ int kvm_timer_hyp_init(bool);
int kvm_timer_enable(struct kvm_vcpu *vcpu);
int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu);
void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu);
-void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu);
+void kvm_timer_sync_user(struct kvm_vcpu *vcpu);
bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu);
void kvm_timer_update_run(struct kvm_vcpu *vcpu);
void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu);
@@ -109,4 +102,8 @@ void kvm_arm_timer_write_sysreg(struct kvm_vcpu *vcpu,
enum kvm_arch_timer_regs treg,
u64 val);
+/* Needed for tracing */
+u32 timer_get_ctl(struct arch_timer_context *ctxt);
+u64 timer_get_cval(struct arch_timer_context *ctxt);
+
#endif
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index d661cd0ee64d..1e4cdc6c7ae2 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -905,6 +905,13 @@ static inline int acpi_dma_configure(struct device *dev,
return 0;
}
+static inline int acpi_dma_configure_id(struct device *dev,
+ enum dev_dma_attr attr,
+ const u32 *input_id)
+{
+ return 0;
+}
+
#define ACPI_PTR(_ptr) (NULL)
static inline void acpi_device_set_enumerated(struct acpi_device *adev)
@@ -1143,16 +1150,27 @@ struct acpi_probe_entry {
kernel_ulong_t driver_data;
};
-#define ACPI_DECLARE_PROBE_ENTRY(table, name, table_id, subtable, valid, data, fn) \
+#define ACPI_DECLARE_PROBE_ENTRY(table, name, table_id, subtable, \
+ valid, data, fn) \
+ static const struct acpi_probe_entry __acpi_probe_##name \
+ __used __section(__##table##_acpi_probe_table) = { \
+ .id = table_id, \
+ .type = subtable, \
+ .subtable_valid = valid, \
+ .probe_table = fn, \
+ .driver_data = data, \
+ }
+
+#define ACPI_DECLARE_SUBTABLE_PROBE_ENTRY(table, name, table_id, \
+ subtable, valid, data, fn) \
static const struct acpi_probe_entry __acpi_probe_##name \
- __used __section(__##table##_acpi_probe_table) \
- = { \
+ __used __section(__##table##_acpi_probe_table) = { \
.id = table_id, \
.type = subtable, \
.subtable_valid = valid, \
- .probe_table = (acpi_tbl_table_handler)fn, \
- .driver_data = data, \
- }
+ .probe_subtbl = fn, \
+ .driver_data = data, \
+ }
#define ACPI_PROBE_TABLE(name) __##name##_acpi_probe_table
#define ACPI_PROBE_TABLE_END(name) __##name##_acpi_probe_table_end
diff --git a/include/linux/acpi_iort.h b/include/linux/acpi_iort.h
index 8e7e2ec37f1b..20a32120bb88 100644
--- a/include/linux/acpi_iort.h
+++ b/include/linux/acpi_iort.h
@@ -28,27 +28,29 @@ void iort_deregister_domain_token(int trans_id);
struct fwnode_handle *iort_find_domain_token(int trans_id);
#ifdef CONFIG_ACPI_IORT
void acpi_iort_init(void);
-u32 iort_msi_map_rid(struct device *dev, u32 req_id);
-struct irq_domain *iort_get_device_domain(struct device *dev, u32 req_id);
+u32 iort_msi_map_id(struct device *dev, u32 id);
+struct irq_domain *iort_get_device_domain(struct device *dev, u32 id,
+ enum irq_domain_bus_token bus_token);
void acpi_configure_pmsi_domain(struct device *dev);
int iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id);
/* IOMMU interface */
void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *size);
-const struct iommu_ops *iort_iommu_configure(struct device *dev);
+const struct iommu_ops *iort_iommu_configure_id(struct device *dev,
+ const u32 *id_in);
int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head);
#else
static inline void acpi_iort_init(void) { }
-static inline u32 iort_msi_map_rid(struct device *dev, u32 req_id)
-{ return req_id; }
-static inline struct irq_domain *iort_get_device_domain(struct device *dev,
- u32 req_id)
+static inline u32 iort_msi_map_id(struct device *dev, u32 id)
+{ return id; }
+static inline struct irq_domain *iort_get_device_domain(
+ struct device *dev, u32 id, enum irq_domain_bus_token bus_token)
{ return NULL; }
static inline void acpi_configure_pmsi_domain(struct device *dev) { }
/* IOMMU interface */
static inline void iort_dma_setup(struct device *dev, u64 *dma_addr,
u64 *size) { }
-static inline const struct iommu_ops *iort_iommu_configure(
- struct device *dev)
+static inline const struct iommu_ops *iort_iommu_configure_id(
+ struct device *dev, const u32 *id_in)
{ return NULL; }
static inline
int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head)
diff --git a/include/linux/amba/clcd-regs.h b/include/linux/amba/clcd-regs.h
deleted file mode 100644
index 421b0fa90d6a..000000000000
--- a/include/linux/amba/clcd-regs.h
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * David A Rusling
- *
- * Copyright (C) 2001 ARM Limited
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive
- * for more details.
- */
-
-#ifndef AMBA_CLCD_REGS_H
-#define AMBA_CLCD_REGS_H
-
-/*
- * CLCD Controller Internal Register addresses
- */
-#define CLCD_TIM0 0x00000000
-#define CLCD_TIM1 0x00000004
-#define CLCD_TIM2 0x00000008
-#define CLCD_TIM3 0x0000000c
-#define CLCD_UBAS 0x00000010
-#define CLCD_LBAS 0x00000014
-
-#define CLCD_PL110_IENB 0x00000018
-#define CLCD_PL110_CNTL 0x0000001c
-#define CLCD_PL110_STAT 0x00000020
-#define CLCD_PL110_INTR 0x00000024
-#define CLCD_PL110_UCUR 0x00000028
-#define CLCD_PL110_LCUR 0x0000002C
-
-#define CLCD_PL111_CNTL 0x00000018
-#define CLCD_PL111_IENB 0x0000001c
-#define CLCD_PL111_RIS 0x00000020
-#define CLCD_PL111_MIS 0x00000024
-#define CLCD_PL111_ICR 0x00000028
-#define CLCD_PL111_UCUR 0x0000002c
-#define CLCD_PL111_LCUR 0x00000030
-
-#define CLCD_PALL 0x00000200
-#define CLCD_PALETTE 0x00000200
-
-#define TIM2_PCD_LO_MASK GENMASK(4, 0)
-#define TIM2_PCD_LO_BITS 5
-#define TIM2_CLKSEL (1 << 5)
-#define TIM2_ACB_MASK GENMASK(10, 6)
-#define TIM2_IVS (1 << 11)
-#define TIM2_IHS (1 << 12)
-#define TIM2_IPC (1 << 13)
-#define TIM2_IOE (1 << 14)
-#define TIM2_BCD (1 << 26)
-#define TIM2_PCD_HI_MASK GENMASK(31, 27)
-#define TIM2_PCD_HI_BITS 5
-#define TIM2_PCD_HI_SHIFT 27
-
-#define CNTL_LCDEN (1 << 0)
-#define CNTL_LCDBPP1 (0 << 1)
-#define CNTL_LCDBPP2 (1 << 1)
-#define CNTL_LCDBPP4 (2 << 1)
-#define CNTL_LCDBPP8 (3 << 1)
-#define CNTL_LCDBPP16 (4 << 1)
-#define CNTL_LCDBPP16_565 (6 << 1)
-#define CNTL_LCDBPP16_444 (7 << 1)
-#define CNTL_LCDBPP24 (5 << 1)
-#define CNTL_LCDBW (1 << 4)
-#define CNTL_LCDTFT (1 << 5)
-#define CNTL_LCDMONO8 (1 << 6)
-#define CNTL_LCDDUAL (1 << 7)
-#define CNTL_BGR (1 << 8)
-#define CNTL_BEBO (1 << 9)
-#define CNTL_BEPO (1 << 10)
-#define CNTL_LCDPWR (1 << 11)
-#define CNTL_LCDVCOMP(x) ((x) << 12)
-#define CNTL_LDMAFIFOTIME (1 << 15)
-#define CNTL_WATERMARK (1 << 16)
-
-/* ST Microelectronics variant bits */
-#define CNTL_ST_1XBPP_444 0x0
-#define CNTL_ST_1XBPP_5551 (1 << 17)
-#define CNTL_ST_1XBPP_565 (1 << 18)
-#define CNTL_ST_CDWID_12 0x0
-#define CNTL_ST_CDWID_16 (1 << 19)
-#define CNTL_ST_CDWID_18 (1 << 20)
-#define CNTL_ST_CDWID_24 ((1 << 19)|(1 << 20))
-#define CNTL_ST_CEAEN (1 << 21)
-#define CNTL_ST_LCDBPP24_PACKED (6 << 1)
-
-#endif /* AMBA_CLCD_REGS_H */
diff --git a/include/linux/amba/clcd.h b/include/linux/amba/clcd.h
deleted file mode 100644
index b6e0cbeaf533..000000000000
--- a/include/linux/amba/clcd.h
+++ /dev/null
@@ -1,290 +0,0 @@
-/*
- * linux/include/asm-arm/hardware/amba_clcd.h -- Integrator LCD panel.
- *
- * David A Rusling
- *
- * Copyright (C) 2001 ARM Limited
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive
- * for more details.
- */
-#include <linux/fb.h>
-#include <linux/amba/clcd-regs.h>
-
-enum {
- /* individual formats */
- CLCD_CAP_RGB444 = (1 << 0),
- CLCD_CAP_RGB5551 = (1 << 1),
- CLCD_CAP_RGB565 = (1 << 2),
- CLCD_CAP_RGB888 = (1 << 3),
- CLCD_CAP_BGR444 = (1 << 4),
- CLCD_CAP_BGR5551 = (1 << 5),
- CLCD_CAP_BGR565 = (1 << 6),
- CLCD_CAP_BGR888 = (1 << 7),
-
- /* connection layouts */
- CLCD_CAP_444 = CLCD_CAP_RGB444 | CLCD_CAP_BGR444,
- CLCD_CAP_5551 = CLCD_CAP_RGB5551 | CLCD_CAP_BGR5551,
- CLCD_CAP_565 = CLCD_CAP_RGB565 | CLCD_CAP_BGR565,
- CLCD_CAP_888 = CLCD_CAP_RGB888 | CLCD_CAP_BGR888,
-
- /* red/blue ordering */
- CLCD_CAP_RGB = CLCD_CAP_RGB444 | CLCD_CAP_RGB5551 |
- CLCD_CAP_RGB565 | CLCD_CAP_RGB888,
- CLCD_CAP_BGR = CLCD_CAP_BGR444 | CLCD_CAP_BGR5551 |
- CLCD_CAP_BGR565 | CLCD_CAP_BGR888,
-
- CLCD_CAP_ALL = CLCD_CAP_BGR | CLCD_CAP_RGB,
-};
-
-struct backlight_device;
-
-struct clcd_panel {
- struct fb_videomode mode;
- signed short width; /* width in mm */
- signed short height; /* height in mm */
- u32 tim2;
- u32 tim3;
- u32 cntl;
- u32 caps;
- unsigned int bpp:8,
- fixedtimings:1,
- grayscale:1;
- unsigned int connector;
- struct backlight_device *backlight;
- /*
- * If the B/R lines are switched between the CLCD
- * and the panel we need to know this and not try to
- * compensate with the BGR bit in the control register.
- */
- bool bgr_connection;
-};
-
-struct clcd_regs {
- u32 tim0;
- u32 tim1;
- u32 tim2;
- u32 tim3;
- u32 cntl;
- unsigned long pixclock;
-};
-
-struct clcd_fb;
-
-/*
- * the board-type specific routines
- */
-struct clcd_board {
- const char *name;
-
- /*
- * Optional. Hardware capability flags.
- */
- u32 caps;
-
- /*
- * Optional. Check whether the var structure is acceptable
- * for this display.
- */
- int (*check)(struct clcd_fb *fb, struct fb_var_screeninfo *var);
-
- /*
- * Compulsory. Decode fb->fb.var into regs->*. In the case of
- * fixed timing, set regs->* to the register values required.
- */
- void (*decode)(struct clcd_fb *fb, struct clcd_regs *regs);
-
- /*
- * Optional. Disable any extra display hardware.
- */
- void (*disable)(struct clcd_fb *);
-
- /*
- * Optional. Enable any extra display hardware.
- */
- void (*enable)(struct clcd_fb *);
-
- /*
- * Setup platform specific parts of CLCD driver
- */
- int (*setup)(struct clcd_fb *);
-
- /*
- * mmap the framebuffer memory
- */
- int (*mmap)(struct clcd_fb *, struct vm_area_struct *);
-
- /*
- * Remove platform specific parts of CLCD driver
- */
- void (*remove)(struct clcd_fb *);
-};
-
-struct amba_device;
-struct clk;
-
-/* this data structure describes each frame buffer device we find */
-struct clcd_fb {
- struct fb_info fb;
- struct amba_device *dev;
- struct clk *clk;
- struct clcd_panel *panel;
- struct clcd_board *board;
- void *board_data;
- void __iomem *regs;
- u16 off_ienb;
- u16 off_cntl;
- u32 clcd_cntl;
- u32 cmap[16];
- bool clk_enabled;
-};
-
-static inline void clcdfb_decode(struct clcd_fb *fb, struct clcd_regs *regs)
-{
- struct fb_var_screeninfo *var = &fb->fb.var;
- u32 val, cpl;
-
- /*
- * Program the CLCD controller registers and start the CLCD
- */
- val = ((var->xres / 16) - 1) << 2;
- val |= (var->hsync_len - 1) << 8;
- val |= (var->right_margin - 1) << 16;
- val |= (var->left_margin - 1) << 24;
- regs->tim0 = val;
-
- val = var->yres;
- if (fb->panel->cntl & CNTL_LCDDUAL)
- val /= 2;
- val -= 1;
- val |= (var->vsync_len - 1) << 10;
- val |= var->lower_margin << 16;
- val |= var->upper_margin << 24;
- regs->tim1 = val;
-
- val = fb->panel->tim2;
- val |= var->sync & FB_SYNC_HOR_HIGH_ACT ? 0 : TIM2_IHS;
- val |= var->sync & FB_SYNC_VERT_HIGH_ACT ? 0 : TIM2_IVS;
-
- cpl = var->xres_virtual;
- if (fb->panel->cntl & CNTL_LCDTFT) /* TFT */
- /* / 1 */;
- else if (!var->grayscale) /* STN color */
- cpl = cpl * 8 / 3;
- else if (fb->panel->cntl & CNTL_LCDMONO8) /* STN monochrome, 8bit */
- cpl /= 8;
- else /* STN monochrome, 4bit */
- cpl /= 4;
-
- regs->tim2 = val | ((cpl - 1) << 16);
-
- regs->tim3 = fb->panel->tim3;
-
- val = fb->panel->cntl;
- if (var->grayscale)
- val |= CNTL_LCDBW;
-
- if (fb->panel->caps && fb->board->caps && var->bits_per_pixel >= 16) {
- /*
- * if board and panel supply capabilities, we can support
- * changing BGR/RGB depending on supplied parameters. Here
- * we switch to what the framebuffer is providing if need
- * be, so if the framebuffer is BGR but the display connection
- * is RGB (first case) we switch it around. Vice versa mutatis
- * mutandis if the framebuffer is RGB but the display connection
- * is BGR, we flip it around.
- */
- if (var->red.offset == 0)
- val &= ~CNTL_BGR;
- else
- val |= CNTL_BGR;
- if (fb->panel->bgr_connection)
- val ^= CNTL_BGR;
- }
-
- switch (var->bits_per_pixel) {
- case 1:
- val |= CNTL_LCDBPP1;
- break;
- case 2:
- val |= CNTL_LCDBPP2;
- break;
- case 4:
- val |= CNTL_LCDBPP4;
- break;
- case 8:
- val |= CNTL_LCDBPP8;
- break;
- case 16:
- /*
- * PL110 cannot choose between 5551 and 565 modes in its
- * control register. It is possible to use 565 with
- * custom external wiring.
- */
- if (amba_part(fb->dev) == 0x110 ||
- var->green.length == 5)
- val |= CNTL_LCDBPP16;
- else if (var->green.length == 6)
- val |= CNTL_LCDBPP16_565;
- else
- val |= CNTL_LCDBPP16_444;
- break;
- case 32:
- val |= CNTL_LCDBPP24;
- break;
- }
-
- regs->cntl = val;
- regs->pixclock = var->pixclock;
-}
-
-static inline int clcdfb_check(struct clcd_fb *fb, struct fb_var_screeninfo *var)
-{
- var->xres_virtual = var->xres = (var->xres + 15) & ~15;
- var->yres_virtual = var->yres = (var->yres + 1) & ~1;
-
-#define CHECK(e,l,h) (var->e < l || var->e > h)
- if (CHECK(right_margin, (5+1), 256) || /* back porch */
- CHECK(left_margin, (5+1), 256) || /* front porch */
- CHECK(hsync_len, (5+1), 256) ||
- var->xres > 4096 ||
- var->lower_margin > 255 || /* back porch */
- var->upper_margin > 255 || /* front porch */
- var->vsync_len > 32 ||
- var->yres > 1024)
- return -EINVAL;
-#undef CHECK
-
- /* single panel mode: PCD = max(PCD, 1) */
- /* dual panel mode: PCD = max(PCD, 5) */
-
- /*
- * You can't change the grayscale setting, and
- * we can only do non-interlaced video.
- */
- if (var->grayscale != fb->fb.var.grayscale ||
- (var->vmode & FB_VMODE_MASK) != FB_VMODE_NONINTERLACED)
- return -EINVAL;
-
-#define CHECK(e) (var->e != fb->fb.var.e)
- if (fb->panel->fixedtimings &&
- (CHECK(xres) ||
- CHECK(yres) ||
- CHECK(bits_per_pixel) ||
- CHECK(pixclock) ||
- CHECK(left_margin) ||
- CHECK(right_margin) ||
- CHECK(upper_margin) ||
- CHECK(lower_margin) ||
- CHECK(hsync_len) ||
- CHECK(vsync_len) ||
- CHECK(sync)))
- return -EINVAL;
-#undef CHECK
-
- var->nonstd = 0;
- var->accel_flags = 0;
-
- return 0;
-}
diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h
index 0566cb3314ef..69b1dabe39dc 100644
--- a/include/linux/arch_topology.h
+++ b/include/linux/arch_topology.h
@@ -39,8 +39,8 @@ static inline unsigned long topology_get_thermal_pressure(int cpu)
return per_cpu(thermal_pressure, cpu);
}
-void arch_set_thermal_pressure(struct cpumask *cpus,
- unsigned long th_pressure);
+void topology_set_thermal_pressure(const struct cpumask *cpus,
+ unsigned long th_pressure);
struct cpu_topology {
int thread_id;
diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h
index 56d6a5c6e353..15c706fb0a37 100644
--- a/include/linux/arm-smccc.h
+++ b/include/linux/arm-smccc.h
@@ -71,6 +71,11 @@
ARM_SMCCC_SMC_32, \
0, 1)
+#define ARM_SMCCC_ARCH_SOC_ID \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ 0, 2)
+
#define ARM_SMCCC_ARCH_WORKAROUND_1 \
ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
ARM_SMCCC_SMC_32, \
@@ -81,6 +86,28 @@
ARM_SMCCC_SMC_32, \
0, 0x7fff)
+/* Paravirtualised time calls (defined by ARM DEN0057A) */
+#define ARM_SMCCC_HV_PV_TIME_FEATURES \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_64, \
+ ARM_SMCCC_OWNER_STANDARD_HYP, \
+ 0x20)
+
+#define ARM_SMCCC_HV_PV_TIME_ST \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_64, \
+ ARM_SMCCC_OWNER_STANDARD_HYP, \
+ 0x21)
+
+/*
+ * Return codes defined in ARM DEN 0070A
+ * ARM DEN 0070A is now merged/consolidated into ARM DEN 0028 C
+ */
+#define SMCCC_RET_SUCCESS 0
+#define SMCCC_RET_NOT_SUPPORTED -1
+#define SMCCC_RET_NOT_REQUIRED -2
+#define SMCCC_RET_INVALID_PARAMETER -3
+
#ifndef __ASSEMBLY__
#include <linux/linkage.h>
@@ -332,15 +359,6 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1,
#define arm_smccc_1_1_hvc(...) __arm_smccc_1_1(SMCCC_HVC_INST, __VA_ARGS__)
/*
- * Return codes defined in ARM DEN 0070A
- * ARM DEN 0070A is now merged/consolidated into ARM DEN 0028 C
- */
-#define SMCCC_RET_SUCCESS 0
-#define SMCCC_RET_NOT_SUPPORTED -1
-#define SMCCC_RET_NOT_REQUIRED -2
-#define SMCCC_RET_INVALID_PARAMETER -3
-
-/*
* Like arm_smccc_1_1* but always returns SMCCC_RET_NOT_SUPPORTED.
* Used when the SMCCC conduit is not defined. The empty asm statement
* avoids compiler warnings about unused variables.
@@ -385,18 +403,5 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1,
method; \
})
-/* Paravirtualised time calls (defined by ARM DEN0057A) */
-#define ARM_SMCCC_HV_PV_TIME_FEATURES \
- ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
- ARM_SMCCC_SMC_64, \
- ARM_SMCCC_OWNER_STANDARD_HYP, \
- 0x20)
-
-#define ARM_SMCCC_HV_PV_TIME_ST \
- ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
- ARM_SMCCC_SMC_64, \
- ARM_SMCCC_OWNER_STANDARD_HYP, \
- 0x21)
-
#endif /*__ASSEMBLY__*/
#endif /*__LINUX_ARM_SMCCC_H*/
diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h
index 75e582b8d2d9..4c328fef403c 100644
--- a/include/linux/async_tx.h
+++ b/include/linux/async_tx.h
@@ -36,7 +36,7 @@ struct dma_chan_ref {
/**
* async_tx_flags - modifiers for the async_* calls
* @ASYNC_TX_XOR_ZERO_DST: this flag must be used for xor operations where the
- * the destination address is not a source. The asynchronous case handles this
+ * destination address is not a source. The asynchronous case handles this
* implicitly, the synchronous case needs to zero the destination block.
* @ASYNC_TX_XOR_DROP_DST: this flag must be used if the destination address is
* also one of the source addresses. In the synchronous case the destination
diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
index 8124815eb121..5d5ff2203fa2 100644
--- a/include/linux/atmdev.h
+++ b/include/linux/atmdev.h
@@ -176,11 +176,6 @@ struct atm_dev {
#define ATM_OF_IMMED 1 /* Attempt immediate delivery */
#define ATM_OF_INRATE 2 /* Attempt in-rate delivery */
-
-/*
- * ioctl, getsockopt, and setsockopt are optional and can be set to NULL.
- */
-
struct atmdev_ops { /* only send is required */
void (*dev_close)(struct atm_dev *dev);
int (*open)(struct atm_vcc *vcc);
@@ -190,10 +185,6 @@ struct atmdev_ops { /* only send is required */
int (*compat_ioctl)(struct atm_dev *dev,unsigned int cmd,
void __user *arg);
#endif
- int (*getsockopt)(struct atm_vcc *vcc,int level,int optname,
- void __user *optval,int optlen);
- int (*setsockopt)(struct atm_vcc *vcc,int level,int optname,
- void __user *optval,unsigned int optlen);
int (*send)(struct atm_vcc *vcc,struct sk_buff *skb);
int (*send_oam)(struct atm_vcc *vcc,void *cell,int flags);
void (*phy_put)(struct atm_dev *dev,unsigned char value,
diff --git a/include/linux/audit.h b/include/linux/audit.h
index 3fcd9ee49734..b3d859831a31 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -12,6 +12,7 @@
#include <linux/sched.h>
#include <linux/ptrace.h>
#include <uapi/linux/audit.h>
+#include <uapi/linux/netfilter/nf_tables.h>
#define AUDIT_INO_UNSET ((unsigned long)-1)
#define AUDIT_DEV_UNSET ((dev_t)-1)
@@ -98,6 +99,23 @@ enum audit_nfcfgop {
AUDIT_XT_OP_REGISTER,
AUDIT_XT_OP_REPLACE,
AUDIT_XT_OP_UNREGISTER,
+ AUDIT_NFT_OP_TABLE_REGISTER,
+ AUDIT_NFT_OP_TABLE_UNREGISTER,
+ AUDIT_NFT_OP_CHAIN_REGISTER,
+ AUDIT_NFT_OP_CHAIN_UNREGISTER,
+ AUDIT_NFT_OP_RULE_REGISTER,
+ AUDIT_NFT_OP_RULE_UNREGISTER,
+ AUDIT_NFT_OP_SET_REGISTER,
+ AUDIT_NFT_OP_SET_UNREGISTER,
+ AUDIT_NFT_OP_SETELEM_REGISTER,
+ AUDIT_NFT_OP_SETELEM_UNREGISTER,
+ AUDIT_NFT_OP_GEN_REGISTER,
+ AUDIT_NFT_OP_OBJ_REGISTER,
+ AUDIT_NFT_OP_OBJ_UNREGISTER,
+ AUDIT_NFT_OP_OBJ_RESET,
+ AUDIT_NFT_OP_FLOWTABLE_REGISTER,
+ AUDIT_NFT_OP_FLOWTABLE_UNREGISTER,
+ AUDIT_NFT_OP_INVALID,
};
extern int is_audit_feature_set(int which);
@@ -274,7 +292,7 @@ extern void __audit_syscall_entry(int major, unsigned long a0, unsigned long a1,
extern void __audit_syscall_exit(int ret_success, long ret_value);
extern struct filename *__audit_reusename(const __user char *uptr);
extern void __audit_getname(struct filename *name);
-
+extern void __audit_getcwd(void);
extern void __audit_inode(struct filename *name, const struct dentry *dentry,
unsigned int flags);
extern void __audit_file(const struct file *);
@@ -333,6 +351,11 @@ static inline void audit_getname(struct filename *name)
if (unlikely(!audit_dummy_context()))
__audit_getname(name);
}
+static inline void audit_getcwd(void)
+{
+ if (unlikely(audit_context()))
+ __audit_getcwd();
+}
static inline void audit_inode(struct filename *name,
const struct dentry *dentry,
unsigned int aflags) {
@@ -386,7 +409,7 @@ extern void __audit_fanotify(unsigned int response);
extern void __audit_tk_injoffset(struct timespec64 offset);
extern void __audit_ntp_log(const struct audit_ntp_data *ad);
extern void __audit_log_nfcfg(const char *name, u8 af, unsigned int nentries,
- enum audit_nfcfgop op);
+ enum audit_nfcfgop op, gfp_t gfp);
static inline void audit_ipc_obj(struct kern_ipc_perm *ipcp)
{
@@ -524,10 +547,10 @@ static inline void audit_ntp_log(const struct audit_ntp_data *ad)
static inline void audit_log_nfcfg(const char *name, u8 af,
unsigned int nentries,
- enum audit_nfcfgop op)
+ enum audit_nfcfgop op, gfp_t gfp)
{
if (audit_enabled)
- __audit_log_nfcfg(name, af, nentries, op);
+ __audit_log_nfcfg(name, af, nentries, op, gfp);
}
extern int audit_n_rules;
@@ -561,13 +584,7 @@ static inline struct filename *audit_reusename(const __user char *name)
}
static inline void audit_getname(struct filename *name)
{ }
-static inline void __audit_inode(struct filename *name,
- const struct dentry *dentry,
- unsigned int flags)
-{ }
-static inline void __audit_inode_child(struct inode *parent,
- const struct dentry *dentry,
- const unsigned char type)
+static inline void audit_getcwd(void)
{ }
static inline void audit_inode(struct filename *name,
const struct dentry *dentry,
@@ -665,7 +682,7 @@ static inline void audit_ptrace(struct task_struct *t)
static inline void audit_log_nfcfg(const char *name, u8 af,
unsigned int nentries,
- enum audit_nfcfgop op)
+ enum audit_nfcfgop op, gfp_t gfp)
{ }
#define audit_n_rules 0
@@ -677,9 +694,4 @@ static inline bool audit_loginuid_set(struct task_struct *tsk)
return uid_valid(audit_get_loginuid(tsk));
}
-static inline void audit_log_string(struct audit_buffer *ab, const char *buf)
-{
- audit_log_n_string(ab, buf, strlen(buf));
-}
-
#endif
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
index 90a7e844a098..fff9367a6348 100644
--- a/include/linux/backing-dev-defs.h
+++ b/include/linux/backing-dev-defs.h
@@ -33,8 +33,6 @@ enum wb_congested_state {
WB_sync_congested, /* The sync queue is getting full */
};
-typedef int (congested_fn)(void *, int);
-
enum wb_stat_item {
WB_RECLAIMABLE,
WB_WRITEBACK,
@@ -88,26 +86,6 @@ struct wb_completion {
struct wb_completion cmpl = WB_COMPLETION_INIT(bdi)
/*
- * For cgroup writeback, multiple wb's may map to the same blkcg. Those
- * wb's can operate mostly independently but should share the congested
- * state. To facilitate such sharing, the congested state is tracked using
- * the following struct which is created on demand, indexed by blkcg ID on
- * its bdi, and refcounted.
- */
-struct bdi_writeback_congested {
- unsigned long state; /* WB_[a]sync_congested flags */
- refcount_t refcnt; /* nr of attached wb's and blkg */
-
-#ifdef CONFIG_CGROUP_WRITEBACK
- struct backing_dev_info *__bdi; /* the associated bdi, set to NULL
- * on bdi unregistration. For memcg-wb
- * internal use only! */
- int blkcg_id; /* ID of the associated blkcg */
- struct rb_node rb_node; /* on bdi->cgwb_congestion_tree */
-#endif
-};
-
-/*
* Each wb (bdi_writeback) can perform writeback operations, is measured
* and throttled, independently. Without cgroup writeback, each bdi
* (bdi_writeback) is served by its embedded bdi->wb.
@@ -140,7 +118,7 @@ struct bdi_writeback {
struct percpu_counter stat[NR_WB_STAT_ITEMS];
- struct bdi_writeback_congested *congested;
+ unsigned long congested; /* WB_[a]sync_congested flags */
unsigned long bw_time_stamp; /* last time write bw is updated */
unsigned long dirtied_stamp;
@@ -190,8 +168,6 @@ struct backing_dev_info {
struct list_head bdi_list;
unsigned long ra_pages; /* max readahead in PAGE_SIZE units */
unsigned long io_pages; /* max allowed IO size */
- congested_fn *congested_fn; /* Function pointer if device is md/dm */
- void *congested_data; /* Pointer to aux data for congested func */
struct kref refcnt; /* Reference counter for the structure */
unsigned int capabilities; /* Device capabilities */
@@ -208,11 +184,8 @@ struct backing_dev_info {
struct list_head wb_list; /* list of all wbs */
#ifdef CONFIG_CGROUP_WRITEBACK
struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */
- struct rb_root cgwb_congested_tree; /* their congested states */
struct mutex cgwb_release_mutex; /* protect shutdown of wb structs */
struct rw_semaphore wb_switch_rwsem; /* no cgwb switch while syncing */
-#else
- struct bdi_writeback_congested *wb_congested;
#endif
wait_queue_head_t wb_waitq;
@@ -232,18 +205,8 @@ enum {
BLK_RW_SYNC = 1,
};
-void clear_wb_congested(struct bdi_writeback_congested *congested, int sync);
-void set_wb_congested(struct bdi_writeback_congested *congested, int sync);
-
-static inline void clear_bdi_congested(struct backing_dev_info *bdi, int sync)
-{
- clear_wb_congested(bdi->wb.congested, sync);
-}
-
-static inline void set_bdi_congested(struct backing_dev_info *bdi, int sync)
-{
- set_wb_congested(bdi->wb.congested, sync);
-}
+void clear_bdi_congested(struct backing_dev_info *bdi, int sync);
+void set_bdi_congested(struct backing_dev_info *bdi, int sync);
struct wb_lock_cookie {
bool locked;
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index 6b3504bf7a42..0b06b2d26c9a 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -169,11 +169,7 @@ static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
static inline int wb_congested(struct bdi_writeback *wb, int cong_bits)
{
- struct backing_dev_info *bdi = wb->bdi;
-
- if (bdi->congested_fn)
- return bdi->congested_fn(bdi->congested_data, cong_bits);
- return wb->congested->state & cong_bits;
+ return wb->congested & cong_bits;
}
long congestion_wait(int sync, long timeout);
@@ -224,9 +220,6 @@ static inline int bdi_sched_wait(void *word)
#ifdef CONFIG_CGROUP_WRITEBACK
-struct bdi_writeback_congested *
-wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp);
-void wb_congested_put(struct bdi_writeback_congested *congested);
struct bdi_writeback *wb_get_lookup(struct backing_dev_info *bdi,
struct cgroup_subsys_state *memcg_css);
struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi,
@@ -404,19 +397,6 @@ static inline bool inode_cgwb_enabled(struct inode *inode)
return false;
}
-static inline struct bdi_writeback_congested *
-wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp)
-{
- refcount_inc(&bdi->wb_congested->refcnt);
- return bdi->wb_congested;
-}
-
-static inline void wb_congested_put(struct bdi_writeback_congested *congested)
-{
- if (refcount_dec_and_test(&congested->refcnt))
- kfree(congested);
-}
-
static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi)
{
return &bdi->wb;
diff --git a/include/linux/backlight.h b/include/linux/backlight.h
index 56e4580d4f55..614653e07e3a 100644
--- a/include/linux/backlight.h
+++ b/include/linux/backlight.h
@@ -14,113 +14,336 @@
#include <linux/mutex.h>
#include <linux/notifier.h>
-/* Notes on locking:
- *
- * backlight_device->ops_lock is an internal backlight lock protecting the
- * ops pointer and no code outside the core should need to touch it.
- *
- * Access to update_status() is serialised by the update_lock mutex since
- * most drivers seem to need this and historically get it wrong.
- *
- * Most drivers don't need locking on their get_brightness() method.
- * If yours does, you need to implement it in the driver. You can use the
- * update_lock mutex if appropriate.
+/**
+ * enum backlight_update_reason - what method was used to update backlight
*
- * Any other use of the locks below is probably wrong.
+ * A driver indicates the method (reason) used for updating the backlight
+ * when calling backlight_force_update().
*/
-
enum backlight_update_reason {
+ /**
+ * @BACKLIGHT_UPDATE_HOTKEY: The backlight was updated using a hot-key.
+ */
BACKLIGHT_UPDATE_HOTKEY,
+
+ /**
+ * @BACKLIGHT_UPDATE_SYSFS: The backlight was updated using sysfs.
+ */
BACKLIGHT_UPDATE_SYSFS,
};
+/**
+ * enum backlight_type - the type of backlight control
+ *
+ * The type of interface used to control the backlight.
+ */
enum backlight_type {
+ /**
+ * @BACKLIGHT_RAW:
+ *
+ * The backlight is controlled using hardware registers.
+ */
BACKLIGHT_RAW = 1,
+
+ /**
+ * @BACKLIGHT_PLATFORM:
+ *
+ * The backlight is controlled using a platform-specific interface.
+ */
BACKLIGHT_PLATFORM,
+
+ /**
+ * @BACKLIGHT_FIRMWARE:
+ *
+ * The backlight is controlled using a standard firmware interface.
+ */
BACKLIGHT_FIRMWARE,
+
+ /**
+ * @BACKLIGHT_TYPE_MAX: Number of entries.
+ */
BACKLIGHT_TYPE_MAX,
};
+/**
+ * enum backlight_notification - the type of notification
+ *
+ * The notifications that is used for notification sent to the receiver
+ * that registered notifications using backlight_register_notifier().
+ */
enum backlight_notification {
+ /**
+ * @BACKLIGHT_REGISTERED: The backlight device is registered.
+ */
BACKLIGHT_REGISTERED,
+
+ /**
+ * @BACKLIGHT_UNREGISTERED: The backlight revice is unregistered.
+ */
BACKLIGHT_UNREGISTERED,
};
+/** enum backlight_scale - the type of scale used for brightness values
+ *
+ * The type of scale used for brightness values.
+ */
enum backlight_scale {
+ /**
+ * @BACKLIGHT_SCALE_UNKNOWN: The scale is unknown.
+ */
BACKLIGHT_SCALE_UNKNOWN = 0,
+
+ /**
+ * @BACKLIGHT_SCALE_LINEAR: The scale is linear.
+ *
+ * The linear scale will increase brightness the same for each step.
+ */
BACKLIGHT_SCALE_LINEAR,
+
+ /**
+ * @BACKLIGHT_SCALE_NON_LINEAR: The scale is not linear.
+ *
+ * This is often used when the brightness values tries to adjust to
+ * the relative perception of the eye demanding a non-linear scale.
+ */
BACKLIGHT_SCALE_NON_LINEAR,
};
struct backlight_device;
struct fb_info;
+/**
+ * struct backlight_ops - backlight operations
+ *
+ * The backlight operations are specified when the backlight device is registered.
+ */
struct backlight_ops {
+ /**
+ * @options: Configure how operations are called from the core.
+ *
+ * The options parameter is used to adjust the behaviour of the core.
+ * Set BL_CORE_SUSPENDRESUME to get the update_status() operation called
+ * upon suspend and resume.
+ */
unsigned int options;
#define BL_CORE_SUSPENDRESUME (1 << 0)
- /* Notify the backlight driver some property has changed */
+ /**
+ * @update_status: Operation called when properties have changed.
+ *
+ * Notify the backlight driver some property has changed.
+ * The update_status operation is protected by the update_lock.
+ *
+ * The backlight driver is expected to use backlight_is_blank()
+ * to check if the display is blanked and set brightness accordingly.
+ * update_status() is called when any of the properties has changed.
+ *
+ * RETURNS:
+ *
+ * 0 on success, negative error code if any failure occurred.
+ */
int (*update_status)(struct backlight_device *);
- /* Return the current backlight brightness (accounting for power,
- fb_blank etc.) */
+
+ /**
+ * @get_brightness: Return the current backlight brightness.
+ *
+ * The driver may implement this as a readback from the HW.
+ * This operation is optional and if not present then the current
+ * brightness property value is used.
+ *
+ * RETURNS:
+ *
+ * A brightness value which is 0 or a positive number.
+ * On failure a negative error code is returned.
+ */
int (*get_brightness)(struct backlight_device *);
- /* Check if given framebuffer device is the one bound to this backlight;
- return 0 if not, !=0 if it is. If NULL, backlight always matches the fb. */
- int (*check_fb)(struct backlight_device *, struct fb_info *);
+
+ /**
+ * @check_fb: Check the framebuffer device.
+ *
+ * Check if given framebuffer device is the one bound to this backlight.
+ * This operation is optional and if not implemented it is assumed that the
+ * fbdev is always the one bound to the backlight.
+ *
+ * RETURNS:
+ *
+ * If info is NULL or the info matches the fbdev bound to the backlight return true.
+ * If info does not match the fbdev bound to the backlight return false.
+ */
+ int (*check_fb)(struct backlight_device *bd, struct fb_info *info);
};
-/* This structure defines all the properties of a backlight */
+/**
+ * struct backlight_properties - backlight properties
+ *
+ * This structure defines all the properties of a backlight.
+ */
struct backlight_properties {
- /* Current User requested brightness (0 - max_brightness) */
+ /**
+ * @brightness: The current brightness requested by the user.
+ *
+ * The backlight core makes sure the range is (0 to max_brightness)
+ * when the brightness is set via the sysfs attribute:
+ * /sys/class/backlight/<backlight>/brightness.
+ *
+ * This value can be set in the backlight_properties passed
+ * to devm_backlight_device_register() to set a default brightness
+ * value.
+ */
int brightness;
- /* Maximal value for brightness (read-only) */
+
+ /**
+ * @max_brightness: The maximum brightness value.
+ *
+ * This value must be set in the backlight_properties passed to
+ * devm_backlight_device_register() and shall not be modified by the
+ * driver after registration.
+ */
int max_brightness;
- /* Current FB Power mode (0: full on, 1..3: power saving
- modes; 4: full off), see FB_BLANK_XXX */
+
+ /**
+ * @power: The current power mode.
+ *
+ * User space can configure the power mode using the sysfs
+ * attribute: /sys/class/backlight/<backlight>/bl_power
+ * When the power property is updated update_status() is called.
+ *
+ * The possible values are: (0: full on, 1 to 3: power saving
+ * modes; 4: full off), see FB_BLANK_XXX.
+ *
+ * When the backlight device is enabled @power is set
+ * to FB_BLANK_UNBLANK. When the backlight device is disabled
+ * @power is set to FB_BLANK_POWERDOWN.
+ */
int power;
- /* FB Blanking active? (values as for power) */
- /* Due to be removed, please use (state & BL_CORE_FBBLANK) */
+
+ /**
+ * @fb_blank: The power state from the FBIOBLANK ioctl.
+ *
+ * When the FBIOBLANK ioctl is called @fb_blank is set to the
+ * blank parameter and the update_status() operation is called.
+ *
+ * When the backlight device is enabled @fb_blank is set
+ * to FB_BLANK_UNBLANK. When the backlight device is disabled
+ * @fb_blank is set to FB_BLANK_POWERDOWN.
+ *
+ * Backlight drivers should avoid using this property. It has been
+ * replaced by state & BL_CORE_FBLANK (although most drivers should
+ * use backlight_is_blank() as the preferred means to get the blank
+ * state).
+ *
+ * fb_blank is deprecated and will be removed.
+ */
int fb_blank;
- /* Backlight type */
+
+ /**
+ * @type: The type of backlight supported.
+ *
+ * The backlight type allows userspace to make appropriate
+ * policy decisions based on the backlight type.
+ *
+ * This value must be set in the backlight_properties
+ * passed to devm_backlight_device_register().
+ */
enum backlight_type type;
- /* Flags used to signal drivers of state changes */
+
+ /**
+ * @state: The state of the backlight core.
+ *
+ * The state is a bitmask. BL_CORE_FBBLANK is set when the display
+ * is expected to be blank. BL_CORE_SUSPENDED is set when the
+ * driver is suspended.
+ *
+ * backlight drivers are expected to use backlight_is_blank()
+ * in their update_status() operation rather than reading the
+ * state property.
+ *
+ * The state is maintained by the core and drivers may not modify it.
+ */
unsigned int state;
- /* Type of the brightness scale (linear, non-linear, ...) */
- enum backlight_scale scale;
#define BL_CORE_SUSPENDED (1 << 0) /* backlight is suspended */
#define BL_CORE_FBBLANK (1 << 1) /* backlight is under an fb blank event */
+ /**
+ * @scale: The type of the brightness scale.
+ */
+ enum backlight_scale scale;
};
+/**
+ * struct backlight_device - backlight device data
+ *
+ * This structure holds all data required by a backlight device.
+ */
struct backlight_device {
- /* Backlight properties */
+ /**
+ * @props: Backlight properties
+ */
struct backlight_properties props;
- /* Serialise access to update_status method */
+ /**
+ * @update_lock: The lock used when calling the update_status() operation.
+ *
+ * update_lock is an internal backlight lock that serialise access
+ * to the update_status() operation. The backlight core holds the update_lock
+ * when calling the update_status() operation. The update_lock shall not
+ * be used by backlight drivers.
+ */
struct mutex update_lock;
- /* This protects the 'ops' field. If 'ops' is NULL, the driver that
- registered this device has been unloaded, and if class_get_devdata()
- points to something in the body of that driver, it is also invalid. */
+ /**
+ * @ops_lock: The lock used around everything related to backlight_ops.
+ *
+ * ops_lock is an internal backlight lock that protects the ops pointer
+ * and is used around all accesses to ops and when the operations are
+ * invoked. The ops_lock shall not be used by backlight drivers.
+ */
struct mutex ops_lock;
+
+ /**
+ * @ops: Pointer to the backlight operations.
+ *
+ * If ops is NULL, the driver that registered this device has been unloaded,
+ * and if class_get_devdata() points to something in the body of that driver,
+ * it is also invalid.
+ */
const struct backlight_ops *ops;
- /* The framebuffer notifier block */
+ /**
+ * @fb_notif: The framebuffer notifier block
+ */
struct notifier_block fb_notif;
- /* list entry of all registered backlight devices */
+ /**
+ * @entry: List entry of all registered backlight devices
+ */
struct list_head entry;
+ /**
+ * @dev: Parent device.
+ */
struct device dev;
- /* Multiple framebuffers may share one backlight device */
+ /**
+ * @fb_bl_on: The state of individual fbdev's.
+ *
+ * Multiple fbdev's may share one backlight device. The fb_bl_on
+ * records the state of the individual fbdev.
+ */
bool fb_bl_on[FB_MAX];
+ /**
+ * @use_count: The number of uses of fb_bl_on.
+ */
int use_count;
};
+/**
+ * backlight_update_status - force an update of the backlight device status
+ * @bd: the backlight device
+ */
static inline int backlight_update_status(struct backlight_device *bd)
{
int ret = -ENOENT;
@@ -166,49 +389,83 @@ static inline int backlight_disable(struct backlight_device *bd)
}
/**
- * backlight_put - Drop backlight reference
- * @bd: the backlight device to put
+ * backlight_is_blank - Return true if display is expected to be blank
+ * @bd: the backlight device
+ *
+ * Display is expected to be blank if any of these is true::
+ *
+ * 1) if power in not UNBLANK
+ * 2) if fb_blank is not UNBLANK
+ * 3) if state indicate BLANK or SUSPENDED
+ *
+ * Returns true if display is expected to be blank, false otherwise.
*/
-static inline void backlight_put(struct backlight_device *bd)
+static inline bool backlight_is_blank(const struct backlight_device *bd)
{
- if (bd)
- put_device(&bd->dev);
+ return bd->props.power != FB_BLANK_UNBLANK ||
+ bd->props.fb_blank != FB_BLANK_UNBLANK ||
+ bd->props.state & (BL_CORE_SUSPENDED | BL_CORE_FBBLANK);
}
-extern struct backlight_device *backlight_device_register(const char *name,
- struct device *dev, void *devdata, const struct backlight_ops *ops,
- const struct backlight_properties *props);
-extern struct backlight_device *devm_backlight_device_register(
- struct device *dev, const char *name, struct device *parent,
- void *devdata, const struct backlight_ops *ops,
- const struct backlight_properties *props);
-extern void backlight_device_unregister(struct backlight_device *bd);
-extern void devm_backlight_device_unregister(struct device *dev,
- struct backlight_device *bd);
-extern void backlight_force_update(struct backlight_device *bd,
- enum backlight_update_reason reason);
-extern int backlight_register_notifier(struct notifier_block *nb);
-extern int backlight_unregister_notifier(struct notifier_block *nb);
-extern struct backlight_device *backlight_device_get_by_type(enum backlight_type type);
+/**
+ * backlight_get_brightness - Returns the current brightness value
+ * @bd: the backlight device
+ *
+ * Returns the current brightness value, taking in consideration the current
+ * state. If backlight_is_blank() returns true then return 0 as brightness
+ * otherwise return the current brightness property value.
+ *
+ * Backlight drivers are expected to use this function in their update_status()
+ * operation to get the brightness value.
+ */
+static inline int backlight_get_brightness(const struct backlight_device *bd)
+{
+ if (backlight_is_blank(bd))
+ return 0;
+ else
+ return bd->props.brightness;
+}
+
+struct backlight_device *
+backlight_device_register(const char *name, struct device *dev, void *devdata,
+ const struct backlight_ops *ops,
+ const struct backlight_properties *props);
+struct backlight_device *
+devm_backlight_device_register(struct device *dev, const char *name,
+ struct device *parent, void *devdata,
+ const struct backlight_ops *ops,
+ const struct backlight_properties *props);
+void backlight_device_unregister(struct backlight_device *bd);
+void devm_backlight_device_unregister(struct device *dev,
+ struct backlight_device *bd);
+void backlight_force_update(struct backlight_device *bd,
+ enum backlight_update_reason reason);
+int backlight_register_notifier(struct notifier_block *nb);
+int backlight_unregister_notifier(struct notifier_block *nb);
struct backlight_device *backlight_device_get_by_name(const char *name);
-extern int backlight_device_set_brightness(struct backlight_device *bd, unsigned long brightness);
+struct backlight_device *backlight_device_get_by_type(enum backlight_type type);
+int backlight_device_set_brightness(struct backlight_device *bd,
+ unsigned long brightness);
#define to_backlight_device(obj) container_of(obj, struct backlight_device, dev)
+/**
+ * bl_get_data - access devdata
+ * @bl_dev: pointer to backlight device
+ *
+ * When a backlight device is registered the driver has the possibility
+ * to supply a void * devdata. bl_get_data() return a pointer to the
+ * devdata.
+ *
+ * RETURNS:
+ *
+ * pointer to devdata stored while registering the backlight device.
+ */
static inline void * bl_get_data(struct backlight_device *bl_dev)
{
return dev_get_drvdata(&bl_dev->dev);
}
-struct generic_bl_info {
- const char *name;
- int max_intensity;
- int default_intensity;
- int limit_mask;
- void (*set_bl_intensity)(int intensity);
- void (*kick_battery)(void);
-};
-
#ifdef CONFIG_OF
struct backlight_device *of_find_backlight_by_node(struct device_node *node);
#else
@@ -220,14 +477,8 @@ of_find_backlight_by_node(struct device_node *node)
#endif
#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
-struct backlight_device *of_find_backlight(struct device *dev);
struct backlight_device *devm_of_find_backlight(struct device *dev);
#else
-static inline struct backlight_device *of_find_backlight(struct device *dev)
-{
- return NULL;
-}
-
static inline struct backlight_device *
devm_of_find_backlight(struct device *dev)
{
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
index 4a20b7517dd0..0571701ab1c5 100644
--- a/include/linux/binfmts.h
+++ b/include/linux/binfmts.h
@@ -45,17 +45,18 @@ struct linux_binprm {
#ifdef __alpha__
unsigned int taso:1;
#endif
- struct file * executable; /* Executable to pass to the interpreter */
- struct file * interpreter;
- struct file * file;
+ struct file *executable; /* Executable to pass to the interpreter */
+ struct file *interpreter;
+ struct file *file;
struct cred *cred; /* new credentials */
int unsafe; /* how unsafe this exec is (mask of LSM_UNSAFE_*) */
unsigned int per_clear; /* bits to clear in current->personality */
int argc, envc;
- const char * filename; /* Name of binary as seen by procps */
- const char * interp; /* Name of the binary really executed. Most
+ const char *filename; /* Name of binary as seen by procps */
+ const char *interp; /* Name of the binary really executed. Most
of the time same as filename, but could be
different for binfmt_{misc,script} */
+ const char *fdpath; /* generated filename for execveat */
unsigned interp_flags;
int execfd; /* File descriptor of the executable */
unsigned long loader, exec;
@@ -134,13 +135,7 @@ int copy_string_kernel(const char *arg, struct linux_binprm *bprm);
extern void set_binfmt(struct linux_binfmt *new);
extern ssize_t read_code(struct file *, unsigned long, loff_t, size_t);
-extern int do_execve(struct filename *,
- const char __user * const __user *,
- const char __user * const __user *);
-extern int do_execveat(int, struct filename *,
- const char __user * const __user *,
- const char __user * const __user *,
- int);
-int do_execve_file(struct file *file, void *__argv, void *__envp);
+int kernel_execve(const char *filename,
+ const char *const *argv, const char *const *envp);
#endif /* _LINUX_BINFMTS_H */
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 91676d4b2dfe..c6d765382926 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -8,8 +8,6 @@
#include <linux/highmem.h>
#include <linux/mempool.h>
#include <linux/ioprio.h>
-
-#ifdef CONFIG_BLOCK
/* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */
#include <linux/blk_types.h>
@@ -491,21 +489,12 @@ do { \
#define bio_dev(bio) \
disk_devt((bio)->bi_disk)
-#if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
-void bio_associate_blkg_from_page(struct bio *bio, struct page *page);
-#else
-static inline void bio_associate_blkg_from_page(struct bio *bio,
- struct page *page) { }
-#endif
-
#ifdef CONFIG_BLK_CGROUP
-void bio_disassociate_blkg(struct bio *bio);
void bio_associate_blkg(struct bio *bio);
void bio_associate_blkg_from_css(struct bio *bio,
struct cgroup_subsys_state *css);
void bio_clone_blkg_association(struct bio *dst, struct bio *src);
#else /* CONFIG_BLK_CGROUP */
-static inline void bio_disassociate_blkg(struct bio *bio) { }
static inline void bio_associate_blkg(struct bio *bio) { }
static inline void bio_associate_blkg_from_css(struct bio *bio,
struct cgroup_subsys_state *css)
@@ -824,5 +813,4 @@ static inline void bio_set_polled(struct bio *bio, struct kiocb *kiocb)
bio->bi_opf |= REQ_NOWAIT;
}
-#endif /* CONFIG_BLOCK */
#endif /* __LINUX_BIO_H */
diff --git a/include/linux/bitfield.h b/include/linux/bitfield.h
index 48ea093ff04c..4e035aca6f7e 100644
--- a/include/linux/bitfield.h
+++ b/include/linux/bitfield.h
@@ -77,7 +77,7 @@
*/
#define FIELD_FIT(_mask, _val) \
({ \
- __BF_FIELD_CHECK(_mask, 0ULL, _val, "FIELD_FIT: "); \
+ __BF_FIELD_CHECK(_mask, 0ULL, 0ULL, "FIELD_FIT: "); \
!((((typeof(_mask))_val) << __bf_shf(_mask)) & ~(_mask)); \
})
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
index a57ebe2f00ab..c8fc9792ac77 100644
--- a/include/linux/blk-cgroup.h
+++ b/include/linux/blk-cgroup.h
@@ -109,12 +109,6 @@ struct blkcg_gq {
struct hlist_node blkcg_node;
struct blkcg *blkcg;
- /*
- * Each blkg gets congested separately and the congestion state is
- * propagated to the matching bdi_writeback_congested.
- */
- struct bdi_writeback_congested *wb_congested;
-
/* all non-root blkcg_gq's are guaranteed to have access to parent */
struct blkcg_gq *parent;
@@ -183,10 +177,6 @@ extern bool blkcg_debug_stats;
struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
struct request_queue *q, bool update_hint);
-struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
- struct request_queue *q);
-struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
- struct request_queue *q);
int blkcg_init_queue(struct request_queue *q);
void blkcg_exit_queue(struct request_queue *q);
@@ -481,32 +471,6 @@ static inline bool blkg_tryget(struct blkcg_gq *blkg)
}
/**
- * blkg_tryget_closest - try and get a blkg ref on the closet blkg
- * @blkg: blkg to get
- *
- * This needs to be called rcu protected. As the failure mode here is to walk
- * up the blkg tree, this ensure that the blkg->parent pointers are always
- * valid. This returns the blkg that it ended up taking a reference on or %NULL
- * if no reference was taken.
- */
-static inline struct blkcg_gq *blkg_tryget_closest(struct blkcg_gq *blkg)
-{
- struct blkcg_gq *ret_blkg = NULL;
-
- WARN_ON_ONCE(!rcu_read_lock_held());
-
- while (blkg) {
- if (blkg_tryget(blkg)) {
- ret_blkg = blkg;
- break;
- }
- blkg = blkg->parent;
- }
-
- return ret_blkg;
-}
-
-/**
* blkg_put - put a blkg reference
* @blkg: blkg to put
*/
@@ -547,14 +511,6 @@ static inline void blkg_put(struct blkcg_gq *blkg)
if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
(p_blkg)->q, false)))
-#ifdef CONFIG_BLK_DEV_THROTTLING
-extern bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
- struct bio *bio);
-#else
-static inline bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
- struct bio *bio) { return false; }
-#endif
-
bool __blkcg_punt_bio_submit(struct bio *bio);
static inline bool blkcg_punt_bio_submit(struct bio *bio)
@@ -570,65 +526,6 @@ static inline void blkcg_bio_issue_init(struct bio *bio)
bio_issue_init(&bio->bi_issue, bio_sectors(bio));
}
-static inline bool blkcg_bio_issue_check(struct request_queue *q,
- struct bio *bio)
-{
- struct blkcg_gq *blkg;
- bool throtl = false;
-
- rcu_read_lock();
-
- if (!bio->bi_blkg) {
- char b[BDEVNAME_SIZE];
-
- WARN_ONCE(1,
- "no blkg associated for bio on block-device: %s\n",
- bio_devname(bio, b));
- bio_associate_blkg(bio);
- }
-
- blkg = bio->bi_blkg;
-
- throtl = blk_throtl_bio(q, blkg, bio);
-
- if (!throtl) {
- struct blkg_iostat_set *bis;
- int rwd, cpu;
-
- if (op_is_discard(bio->bi_opf))
- rwd = BLKG_IOSTAT_DISCARD;
- else if (op_is_write(bio->bi_opf))
- rwd = BLKG_IOSTAT_WRITE;
- else
- rwd = BLKG_IOSTAT_READ;
-
- cpu = get_cpu();
- bis = per_cpu_ptr(blkg->iostat_cpu, cpu);
- u64_stats_update_begin(&bis->sync);
-
- /*
- * If the bio is flagged with BIO_CGROUP_ACCT it means this is a
- * split bio and we would have already accounted for the size of
- * the bio.
- */
- if (!bio_flagged(bio, BIO_CGROUP_ACCT)) {
- bio_set_flag(bio, BIO_CGROUP_ACCT);
- bis->cur.bytes[rwd] += bio->bi_iter.bi_size;
- }
- bis->cur.ios[rwd]++;
-
- u64_stats_update_end(&bis->sync);
- if (cgroup_subsys_on_dfl(io_cgrp_subsys))
- cgroup_rstat_updated(blkg->blkcg->css.cgroup, cpu);
- put_cpu();
- }
-
- blkcg_bio_issue_init(bio);
-
- rcu_read_unlock();
- return !throtl;
-}
-
static inline void blkcg_use_delay(struct blkcg_gq *blkg)
{
if (WARN_ON_ONCE(atomic_read(&blkg->use_delay) < 0))
@@ -702,6 +599,7 @@ static inline void blkcg_clear_delay(struct blkcg_gq *blkg)
atomic_dec(&blkg->blkcg->css.cgroup->congestion_count);
}
+void blk_cgroup_bio_start(struct bio *bio);
void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta);
void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay);
void blkcg_maybe_throttle_current(void);
@@ -755,8 +653,7 @@ static inline void blkg_put(struct blkcg_gq *blkg) { }
static inline bool blkcg_punt_bio_submit(struct bio *bio) { return false; }
static inline void blkcg_bio_issue_init(struct bio *bio) { }
-static inline bool blkcg_bio_issue_check(struct request_queue *q,
- struct bio *bio) { return true; }
+static inline void blk_cgroup_bio_start(struct bio *bio) { }
#define blk_queue_for_each_rl(rl, q) \
for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index d6fcae17da5a..9d2d5ad367a4 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -267,27 +267,9 @@ struct blk_mq_queue_data {
bool last;
};
-typedef blk_status_t (queue_rq_fn)(struct blk_mq_hw_ctx *,
- const struct blk_mq_queue_data *);
-typedef void (commit_rqs_fn)(struct blk_mq_hw_ctx *);
-typedef bool (get_budget_fn)(struct blk_mq_hw_ctx *);
-typedef void (put_budget_fn)(struct blk_mq_hw_ctx *);
-typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool);
-typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int);
-typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
-typedef int (init_request_fn)(struct blk_mq_tag_set *set, struct request *,
- unsigned int, unsigned int);
-typedef void (exit_request_fn)(struct blk_mq_tag_set *set, struct request *,
- unsigned int);
-
typedef bool (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *,
bool);
typedef bool (busy_tag_iter_fn)(struct request *, void *, bool);
-typedef int (poll_fn)(struct blk_mq_hw_ctx *);
-typedef int (map_queues_fn)(struct blk_mq_tag_set *set);
-typedef bool (busy_fn)(struct request_queue *);
-typedef void (complete_fn)(struct request *);
-typedef void (cleanup_rq_fn)(struct request *);
/**
* struct blk_mq_ops - Callback functions that implements block driver
@@ -297,7 +279,8 @@ struct blk_mq_ops {
/**
* @queue_rq: Queue a new request from block IO.
*/
- queue_rq_fn *queue_rq;
+ blk_status_t (*queue_rq)(struct blk_mq_hw_ctx *,
+ const struct blk_mq_queue_data *);
/**
* @commit_rqs: If a driver uses bd->last to judge when to submit
@@ -306,7 +289,7 @@ struct blk_mq_ops {
* purpose of kicking the hardware (which the last request otherwise
* would have done).
*/
- commit_rqs_fn *commit_rqs;
+ void (*commit_rqs)(struct blk_mq_hw_ctx *);
/**
* @get_budget: Reserve budget before queue request, once .queue_rq is
@@ -314,37 +297,38 @@ struct blk_mq_ops {
* reserved budget. Also we have to handle failure case
* of .get_budget for avoiding I/O deadlock.
*/
- get_budget_fn *get_budget;
+ bool (*get_budget)(struct request_queue *);
+
/**
* @put_budget: Release the reserved budget.
*/
- put_budget_fn *put_budget;
+ void (*put_budget)(struct request_queue *);
/**
* @timeout: Called on request timeout.
*/
- timeout_fn *timeout;
+ enum blk_eh_timer_return (*timeout)(struct request *, bool);
/**
* @poll: Called to poll for completion of a specific tag.
*/
- poll_fn *poll;
+ int (*poll)(struct blk_mq_hw_ctx *);
/**
* @complete: Mark the request as complete.
*/
- complete_fn *complete;
+ void (*complete)(struct request *);
/**
* @init_hctx: Called when the block layer side of a hardware queue has
* been set up, allowing the driver to allocate/init matching
* structures.
*/
- init_hctx_fn *init_hctx;
+ int (*init_hctx)(struct blk_mq_hw_ctx *, void *, unsigned int);
/**
* @exit_hctx: Ditto for exit/teardown.
*/
- exit_hctx_fn *exit_hctx;
+ void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int);
/**
* @init_request: Called for every command allocated by the block layer
@@ -353,11 +337,13 @@ struct blk_mq_ops {
* Tag greater than or equal to queue_depth is for setting up
* flush request.
*/
- init_request_fn *init_request;
+ int (*init_request)(struct blk_mq_tag_set *set, struct request *,
+ unsigned int, unsigned int);
/**
* @exit_request: Ditto for exit/teardown.
*/
- exit_request_fn *exit_request;
+ void (*exit_request)(struct blk_mq_tag_set *set, struct request *,
+ unsigned int);
/**
* @initialize_rq_fn: Called from inside blk_get_request().
@@ -368,18 +354,18 @@ struct blk_mq_ops {
* @cleanup_rq: Called before freeing one request which isn't completed
* yet, and usually for freeing the driver private data.
*/
- cleanup_rq_fn *cleanup_rq;
+ void (*cleanup_rq)(struct request *);
/**
* @busy: If set, returns whether or not this queue currently is busy.
*/
- busy_fn *busy;
+ bool (*busy)(struct request_queue *);
/**
* @map_queues: This allows drivers specify their own queue mapping by
* overriding the setup-time function that builds the mq_map.
*/
- map_queues_fn *map_queues;
+ int (*map_queues)(struct blk_mq_tag_set *set);
#ifdef CONFIG_BLK_DEBUG_FS
/**
@@ -447,8 +433,6 @@ enum {
BLK_MQ_REQ_NOWAIT = (__force blk_mq_req_flags_t)(1 << 0),
/* allocate from reserved pool */
BLK_MQ_REQ_RESERVED = (__force blk_mq_req_flags_t)(1 << 1),
- /* allocate internal/sched tag */
- BLK_MQ_REQ_INTERNAL = (__force blk_mq_req_flags_t)(1 << 2),
/* set RQF_PREEMPT */
BLK_MQ_REQ_PREEMPT = (__force blk_mq_req_flags_t)(1 << 3),
};
@@ -503,8 +487,8 @@ void __blk_mq_end_request(struct request *rq, blk_status_t error);
void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list);
void blk_mq_kick_requeue_list(struct request_queue *q);
void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
-bool blk_mq_complete_request(struct request *rq);
-void blk_mq_force_complete_rq(struct request *rq);
+void blk_mq_complete_request(struct request *rq);
+bool blk_mq_complete_request_remote(struct request *rq);
bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list,
struct bio *bio, unsigned int nr_segs);
bool blk_mq_queue_stopped(struct request_queue *q);
@@ -537,6 +521,15 @@ void blk_mq_quiesce_queue_nowait(struct request_queue *q);
unsigned int blk_mq_rq_cpu(struct request *rq);
+bool __blk_should_fake_timeout(struct request_queue *q);
+static inline bool blk_should_fake_timeout(struct request_queue *q)
+{
+ if (IS_ENABLED(CONFIG_FAIL_IO_TIMEOUT) &&
+ test_bit(QUEUE_FLAG_FAIL_IO, &q->queue_flags))
+ return __blk_should_fake_timeout(q);
+ return false;
+}
+
/**
* blk_mq_rq_from_pdu - cast a PDU to a request
* @pdu: the PDU (Protocol Data Unit) to be casted
@@ -589,6 +582,6 @@ static inline void blk_mq_cleanup_rq(struct request *rq)
rq->q->mq_ops->cleanup_rq(rq);
}
-blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio);
+blk_qc_t blk_mq_submit_bio(struct bio *bio);
#endif
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index ccb895f911b1..4ecf4fed171f 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -14,12 +14,39 @@ struct bio_set;
struct bio;
struct bio_integrity_payload;
struct page;
-struct block_device;
struct io_context;
struct cgroup_subsys_state;
typedef void (bio_end_io_t) (struct bio *);
struct bio_crypt_ctx;
+struct block_device {
+ dev_t bd_dev; /* not a kdev_t - it's a search key */
+ int bd_openers;
+ struct inode * bd_inode; /* will die */
+ struct super_block * bd_super;
+ struct mutex bd_mutex; /* open/close mutex */
+ void * bd_claiming;
+ void * bd_holder;
+ int bd_holders;
+ bool bd_write_holder;
+#ifdef CONFIG_SYSFS
+ struct list_head bd_holder_disks;
+#endif
+ struct block_device * bd_contains;
+ u8 bd_partno;
+ struct hd_struct * bd_part;
+ /* number of times partitions within this device have been opened. */
+ unsigned bd_part_count;
+ int bd_invalidated;
+ struct gendisk * bd_disk;
+ struct backing_dev_info *bd_bdi;
+
+ /* The counter of freeze processes */
+ int bd_fsfreeze_count;
+ /* Mutex for freeze */
+ struct mutex bd_fsfreeze_mutex;
+} __randomize_layout;
+
/*
* Block error status values. See block/blk-core:blk_errors for the details.
* Alpha cannot write a byte atomically, so we need to use 32-bit value.
@@ -300,12 +327,8 @@ enum req_opf {
REQ_OP_DISCARD = 3,
/* securely erase sectors */
REQ_OP_SECURE_ERASE = 5,
- /* reset a zone write pointer */
- REQ_OP_ZONE_RESET = 6,
/* write the same sector many times */
REQ_OP_WRITE_SAME = 7,
- /* reset all the zone present on the device */
- REQ_OP_ZONE_RESET_ALL = 8,
/* write the zero filled sector many times */
REQ_OP_WRITE_ZEROES = 9,
/* Open a zone */
@@ -316,6 +339,10 @@ enum req_opf {
REQ_OP_ZONE_FINISH = 12,
/* write data at the current zone write pointer */
REQ_OP_ZONE_APPEND = 13,
+ /* reset a zone write pointer */
+ REQ_OP_ZONE_RESET = 15,
+ /* reset all the zone present on the device */
+ REQ_OP_ZONE_RESET_ALL = 17,
/* SCSI passthrough using struct scsi_request */
REQ_OP_SCSI_IN = 32,
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 57241417ff2f..bb5636cc17b9 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -4,9 +4,6 @@
#include <linux/sched.h>
#include <linux/sched/clock.h>
-
-#ifdef CONFIG_BLOCK
-
#include <linux/major.h>
#include <linux/genhd.h>
#include <linux/list.h>
@@ -289,8 +286,6 @@ static inline unsigned short req_get_ioprio(struct request *req)
struct blk_queue_ctx;
-typedef blk_qc_t (make_request_fn) (struct request_queue *q, struct bio *bio);
-
struct bio_vec;
enum blk_eh_timer_return {
@@ -311,11 +306,14 @@ enum blk_queue_state {
/*
* Zoned block device models (zoned limit).
+ *
+ * Note: This needs to be ordered from the least to the most severe
+ * restrictions for the inheritance in blk_stack_limits() to work.
*/
enum blk_zoned_model {
- BLK_ZONED_NONE, /* Regular block device */
- BLK_ZONED_HA, /* Host-aware zoned block device */
- BLK_ZONED_HM, /* Host-managed zoned block device */
+ BLK_ZONED_NONE = 0, /* Regular block device */
+ BLK_ZONED_HA, /* Host-aware zoned block device */
+ BLK_ZONED_HM, /* Host-managed zoned block device */
};
struct queue_limits {
@@ -401,8 +399,6 @@ struct request_queue {
struct blk_queue_stats *stats;
struct rq_qos *rq_qos;
- make_request_fn *make_request_fn;
-
const struct blk_mq_ops *mq_ops;
/* sw queues */
@@ -520,6 +516,8 @@ struct request_queue {
unsigned int nr_zones;
unsigned long *conv_zones_bitmap;
unsigned long *seq_zones_wlock;
+ unsigned int max_open_zones;
+ unsigned int max_active_zones;
#endif /* CONFIG_BLK_DEV_ZONED */
/*
@@ -528,9 +526,9 @@ struct request_queue {
unsigned int sg_timeout;
unsigned int sg_reserved_size;
int node;
+ struct mutex debugfs_mutex;
#ifdef CONFIG_BLK_DEV_IO_TRACE
struct blk_trace __rcu *blk_trace;
- struct mutex blk_trace_mutex;
#endif
/*
* for flush operations
@@ -574,8 +572,9 @@ struct request_queue {
struct list_head tag_set_list;
struct bio_set bio_split;
-#ifdef CONFIG_BLK_DEBUG_FS
struct dentry *debugfs_dir;
+
+#ifdef CONFIG_BLK_DEBUG_FS
struct dentry *sched_debugfs_dir;
struct dentry *rqos_debugfs_dir;
#endif
@@ -584,8 +583,6 @@ struct request_queue {
size_t cmd_size;
- struct work_struct release_work;
-
#define BLK_MAX_WRITE_HINTS 5
u64 write_hints[BLK_MAX_WRITE_HINTS];
};
@@ -730,6 +727,28 @@ static inline bool blk_queue_zone_is_seq(struct request_queue *q,
return true;
return !test_bit(blk_queue_zone_no(q, sector), q->conv_zones_bitmap);
}
+
+static inline void blk_queue_max_open_zones(struct request_queue *q,
+ unsigned int max_open_zones)
+{
+ q->max_open_zones = max_open_zones;
+}
+
+static inline unsigned int queue_max_open_zones(const struct request_queue *q)
+{
+ return q->max_open_zones;
+}
+
+static inline void blk_queue_max_active_zones(struct request_queue *q,
+ unsigned int max_active_zones)
+{
+ q->max_active_zones = max_active_zones;
+}
+
+static inline unsigned int queue_max_active_zones(const struct request_queue *q)
+{
+ return q->max_active_zones;
+}
#else /* CONFIG_BLK_DEV_ZONED */
static inline unsigned int blk_queue_nr_zones(struct request_queue *q)
{
@@ -745,6 +764,14 @@ static inline unsigned int blk_queue_zone_no(struct request_queue *q,
{
return 0;
}
+static inline unsigned int queue_max_open_zones(const struct request_queue *q)
+{
+ return 0;
+}
+static inline unsigned int queue_max_active_zones(const struct request_queue *q)
+{
+ return 0;
+}
#endif /* CONFIG_BLK_DEV_ZONED */
static inline bool rq_is_sync(struct request *rq)
@@ -861,8 +888,7 @@ static inline void rq_flush_dcache_pages(struct request *rq)
extern int blk_register_queue(struct gendisk *disk);
extern void blk_unregister_queue(struct gendisk *disk);
-extern blk_qc_t generic_make_request(struct bio *bio);
-extern blk_qc_t direct_make_request(struct bio *bio);
+blk_qc_t submit_bio_noacct(struct bio *bio);
extern void blk_rq_init(struct request_queue *q, struct request *rq);
extern void blk_put_request(struct request *);
extern struct request *blk_get_request(struct request_queue *, unsigned int op,
@@ -876,7 +902,7 @@ extern void blk_rq_unprep_clone(struct request *rq);
extern blk_status_t blk_insert_cloned_request(struct request_queue *q,
struct request *rq);
extern int blk_rq_append_bio(struct request *rq, struct bio **bio);
-extern void blk_queue_split(struct request_queue *, struct bio **);
+extern void blk_queue_split(struct bio **);
extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int);
extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t,
unsigned int, void __user *);
@@ -1079,7 +1105,6 @@ void blk_steal_bios(struct bio_list *list, struct request *rq);
extern bool blk_update_request(struct request *rq, blk_status_t error,
unsigned int nr_bytes);
-extern void __blk_complete_request(struct request *);
extern void blk_abort_request(struct request *);
/*
@@ -1114,11 +1139,8 @@ extern void blk_set_default_limits(struct queue_limits *lim);
extern void blk_set_stacking_limits(struct queue_limits *lim);
extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
sector_t offset);
-extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
- sector_t offset);
extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
sector_t offset);
-extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
extern void blk_queue_virt_boundary(struct request_queue *, unsigned long);
@@ -1166,13 +1188,13 @@ static inline int blk_rq_map_sg(struct request_queue *q, struct request *rq,
return __blk_rq_map_sg(q, rq, sglist, &last_sg);
}
extern void blk_dump_rq_flags(struct request *, char *);
-extern long nr_blockdev_pages(void);
bool __must_check blk_get_queue(struct request_queue *);
-struct request_queue *blk_alloc_queue(make_request_fn make_request, int node_id);
+struct request_queue *blk_alloc_queue(int node_id);
extern void blk_put_queue(struct request_queue *);
extern void blk_set_queue_dying(struct request_queue *);
+#ifdef CONFIG_BLOCK
/*
* blk_plug permits building a queue of related requests by holding the I/O
* fragments for a short period. This allows merging of sequential requests
@@ -1190,6 +1212,7 @@ struct blk_plug {
struct list_head cb_list; /* md requires an unplug callback */
unsigned short rq_count;
bool multiple_queues;
+ bool nowait;
};
#define BLK_MAX_REQUEST_COUNT 16
#define BLK_PLUG_FLUSH_SIZE (128 * 1024)
@@ -1232,9 +1255,47 @@ static inline bool blk_needs_flush_plug(struct task_struct *tsk)
!list_empty(&plug->cb_list));
}
+int blkdev_issue_flush(struct block_device *, gfp_t);
+long nr_blockdev_pages(void);
+#else /* CONFIG_BLOCK */
+struct blk_plug {
+};
+
+static inline void blk_start_plug(struct blk_plug *plug)
+{
+}
+
+static inline void blk_finish_plug(struct blk_plug *plug)
+{
+}
+
+static inline void blk_flush_plug(struct task_struct *task)
+{
+}
+
+static inline void blk_schedule_flush_plug(struct task_struct *task)
+{
+}
+
+
+static inline bool blk_needs_flush_plug(struct task_struct *tsk)
+{
+ return false;
+}
+
+static inline int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask)
+{
+ return 0;
+}
+
+static inline long nr_blockdev_pages(void)
+{
+ return 0;
+}
+#endif /* CONFIG_BLOCK */
+
extern void blk_io_schedule(void);
-int blkdev_issue_flush(struct block_device *, gfp_t);
extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, struct page *page);
@@ -1491,6 +1552,24 @@ static inline sector_t bdev_zone_sectors(struct block_device *bdev)
return 0;
}
+static inline unsigned int bdev_max_open_zones(struct block_device *bdev)
+{
+ struct request_queue *q = bdev_get_queue(bdev);
+
+ if (q)
+ return queue_max_open_zones(q);
+ return 0;
+}
+
+static inline unsigned int bdev_max_active_zones(struct block_device *bdev)
+{
+ struct request_queue *q = bdev_get_queue(bdev);
+
+ if (q)
+ return queue_max_active_zones(q);
+ return 0;
+}
+
static inline int queue_dma_alignment(const struct request_queue *q)
{
return q ? q->dma_alignment : 511;
@@ -1516,7 +1595,7 @@ static inline unsigned int blksize_bits(unsigned int size)
static inline unsigned int block_size(struct block_device *bdev)
{
- return bdev->bd_block_size;
+ return 1 << bdev->bd_inode->i_blkbits;
}
int kblockd_schedule_work(struct work_struct *work);
@@ -1746,6 +1825,7 @@ static inline void blk_ksm_unregister(struct request_queue *q) { }
struct block_device_operations {
+ blk_qc_t (*submit_bio) (struct bio *bio);
int (*open) (struct block_device *, fmode_t);
void (*release) (struct gendisk *, fmode_t);
int (*rw_page)(struct block_device *, sector_t, struct page *, unsigned int);
@@ -1753,8 +1833,6 @@ struct block_device_operations {
int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
unsigned int (*check_events) (struct gendisk *disk,
unsigned int clearing);
- /* ->media_changed() is DEPRECATED, use ->check_events() instead */
- int (*media_changed) (struct gendisk *);
void (*unlock_native_capacity) (struct gendisk *);
int (*revalidate_disk) (struct gendisk *);
int (*getgeo)(struct block_device *, struct hd_geometry *);
@@ -1834,52 +1912,6 @@ static inline bool blk_req_can_dispatch_to_zone(struct request *rq)
}
#endif /* CONFIG_BLK_DEV_ZONED */
-#else /* CONFIG_BLOCK */
-
-struct block_device;
-
-/*
- * stubs for when the block layer is configured out
- */
-#define buffer_heads_over_limit 0
-
-static inline long nr_blockdev_pages(void)
-{
- return 0;
-}
-
-struct blk_plug {
-};
-
-static inline void blk_start_plug(struct blk_plug *plug)
-{
-}
-
-static inline void blk_finish_plug(struct blk_plug *plug)
-{
-}
-
-static inline void blk_flush_plug(struct task_struct *task)
-{
-}
-
-static inline void blk_schedule_flush_plug(struct task_struct *task)
-{
-}
-
-
-static inline bool blk_needs_flush_plug(struct task_struct *tsk)
-{
- return false;
-}
-
-static inline int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask)
-{
- return 0;
-}
-
-#endif /* CONFIG_BLOCK */
-
static inline void blk_wake_io_task(struct task_struct *waiter)
{
/*
@@ -1893,7 +1925,6 @@ static inline void blk_wake_io_task(struct task_struct *waiter)
wake_up_process(waiter);
}
-#ifdef CONFIG_BLOCK
unsigned long disk_start_io_acct(struct gendisk *disk, unsigned int sectors,
unsigned int op);
void disk_end_io_acct(struct gendisk *disk, unsigned int op,
@@ -1919,6 +1950,53 @@ static inline void bio_end_io_acct(struct bio *bio, unsigned long start_time)
{
return disk_end_io_acct(bio->bi_disk, bio_op(bio), start_time);
}
-#endif /* CONFIG_BLOCK */
+int bdev_read_only(struct block_device *bdev);
+int set_blocksize(struct block_device *bdev, int size);
+
+const char *bdevname(struct block_device *bdev, char *buffer);
+struct block_device *lookup_bdev(const char *);
+
+void blkdev_show(struct seq_file *seqf, off_t offset);
+
+#define BDEVNAME_SIZE 32 /* Largest string for a blockdev identifier */
+#define BDEVT_SIZE 10 /* Largest string for MAJ:MIN for blkdev */
+#ifdef CONFIG_BLOCK
+#define BLKDEV_MAJOR_MAX 512
+#else
+#define BLKDEV_MAJOR_MAX 0
+#endif
+
+int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder);
+struct block_device *blkdev_get_by_path(const char *path, fmode_t mode,
+ void *holder);
+struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder);
+int bd_prepare_to_claim(struct block_device *bdev, struct block_device *whole,
+ void *holder);
+void bd_abort_claiming(struct block_device *bdev, struct block_device *whole,
+ void *holder);
+void blkdev_put(struct block_device *bdev, fmode_t mode);
+
+struct block_device *I_BDEV(struct inode *inode);
+struct block_device *bdget(dev_t);
+struct block_device *bdgrab(struct block_device *bdev);
+void bdput(struct block_device *);
+
+#ifdef CONFIG_BLOCK
+void invalidate_bdev(struct block_device *bdev);
+int sync_blockdev(struct block_device *bdev);
+#else
+static inline void invalidate_bdev(struct block_device *bdev)
+{
+}
+static inline int sync_blockdev(struct block_device *bdev)
+{
+ return 0;
+}
#endif
+int fsync_bdev(struct block_device *bdev);
+
+struct super_block *freeze_bdev(struct block_device *bdev);
+int thaw_bdev(struct block_device *bdev, struct super_block *sb);
+
+#endif /* _LINUX_BLKDEV_H */
diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
index c66c545e161a..64f367044e25 100644
--- a/include/linux/bpf-cgroup.h
+++ b/include/linux/bpf-cgroup.h
@@ -46,7 +46,8 @@ struct bpf_cgroup_storage {
};
struct bpf_cgroup_storage_map *map;
struct bpf_cgroup_storage_key key;
- struct list_head list;
+ struct list_head list_map;
+ struct list_head list_cg;
struct rb_node node;
struct rcu_head rcu;
};
@@ -78,6 +79,9 @@ struct cgroup_bpf {
struct list_head progs[MAX_BPF_ATTACH_TYPE];
u32 flags[MAX_BPF_ATTACH_TYPE];
+ /* list of cgroup shared storages */
+ struct list_head storages;
+
/* temp storage for effective prog array used by prog_attach/detach */
struct bpf_prog_array *inactive;
@@ -161,6 +165,9 @@ static inline void bpf_cgroup_storage_set(struct bpf_cgroup_storage
this_cpu_write(bpf_cgroup_storage[stype], storage[stype]);
}
+struct bpf_cgroup_storage *
+cgroup_storage_lookup(struct bpf_cgroup_storage_map *map,
+ void *key, bool locked);
struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog,
enum bpf_cgroup_storage_type stype);
void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage);
@@ -169,7 +176,6 @@ void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage,
enum bpf_attach_type type);
void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage);
int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *map);
-void bpf_cgroup_storage_release(struct bpf_prog_aux *aux, struct bpf_map *map);
int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value);
int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
@@ -210,6 +216,9 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) \
BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_CREATE)
+#define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) \
+ BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_RELEASE)
+
#define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) \
BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET4_POST_BIND)
@@ -380,8 +389,6 @@ static inline void bpf_cgroup_storage_set(
struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) {}
static inline int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux,
struct bpf_map *map) { return 0; }
-static inline void bpf_cgroup_storage_release(struct bpf_prog_aux *aux,
- struct bpf_map *map) {}
static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(
struct bpf_prog *prog, enum bpf_cgroup_storage_type stype) { return NULL; }
static inline void bpf_cgroup_storage_free(
@@ -401,6 +408,7 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; })
diff --git a/include/linux/bpf-netns.h b/include/linux/bpf-netns.h
index 47d5b0c708c9..722f799c1a2e 100644
--- a/include/linux/bpf-netns.h
+++ b/include/linux/bpf-netns.h
@@ -8,6 +8,7 @@
enum netns_bpf_attach_type {
NETNS_BPF_INVALID = -1,
NETNS_BPF_FLOW_DISSECTOR = 0,
+ NETNS_BPF_SK_LOOKUP,
MAX_NETNS_BPF_ATTACH_TYPE
};
@@ -17,6 +18,8 @@ to_netns_bpf_attach_type(enum bpf_attach_type attach_type)
switch (attach_type) {
case BPF_FLOW_DISSECTOR:
return NETNS_BPF_FLOW_DISSECTOR;
+ case BPF_SK_LOOKUP:
+ return NETNS_BPF_SK_LOOKUP;
default:
return NETNS_BPF_INVALID;
}
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 9750a1902ee5..55f694b63164 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -33,10 +33,21 @@ struct btf;
struct btf_type;
struct exception_table_entry;
struct seq_operations;
+struct bpf_iter_aux_info;
extern struct idr btf_idr;
extern spinlock_t btf_idr_lock;
+typedef int (*bpf_iter_init_seq_priv_t)(void *private_data,
+ struct bpf_iter_aux_info *aux);
+typedef void (*bpf_iter_fini_seq_priv_t)(void *private_data);
+struct bpf_iter_seq_info {
+ const struct seq_operations *seq_ops;
+ bpf_iter_init_seq_priv_t init_seq_private;
+ bpf_iter_fini_seq_priv_t fini_seq_private;
+ u32 seq_priv_size;
+};
+
/* map is generic key/value storage optionally accesible by eBPF programs */
struct bpf_map_ops {
/* funcs callable from userspace (via syscall) */
@@ -92,6 +103,13 @@ struct bpf_map_ops {
int (*map_mmap)(struct bpf_map *map, struct vm_area_struct *vma);
__poll_t (*map_poll)(struct bpf_map *map, struct file *filp,
struct poll_table_struct *pts);
+
+ /* BTF name and id of struct allocated by map_alloc */
+ const char * const map_btf_name;
+ int *map_btf_id;
+
+ /* bpf_iter info used to open a seq_file */
+ const struct bpf_iter_seq_info *iter_seq_info;
};
struct bpf_map_memory {
@@ -245,6 +263,7 @@ enum bpf_arg_type {
ARG_PTR_TO_INT, /* pointer to int */
ARG_PTR_TO_LONG, /* pointer to long */
ARG_PTR_TO_SOCKET, /* pointer to bpf_sock (fullsock) */
+ ARG_PTR_TO_SOCKET_OR_NULL, /* pointer to bpf_sock (fullsock) or NULL */
ARG_PTR_TO_BTF_ID, /* pointer to in-kernel struct */
ARG_PTR_TO_ALLOC_MEM, /* pointer to dynamically allocated memory */
ARG_PTR_TO_ALLOC_MEM_OR_NULL, /* pointer to dynamically allocated memory or NULL */
@@ -261,6 +280,7 @@ enum bpf_return_type {
RET_PTR_TO_TCP_SOCK_OR_NULL, /* returns a pointer to a tcp_sock or NULL */
RET_PTR_TO_SOCK_COMMON_OR_NULL, /* returns a pointer to a sock_common or NULL */
RET_PTR_TO_ALLOC_MEM_OR_NULL, /* returns a pointer to dynamically allocated memory or NULL */
+ RET_PTR_TO_BTF_ID_OR_NULL, /* returns a pointer to a btf_id or NULL */
};
/* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs
@@ -283,6 +303,12 @@ struct bpf_func_proto {
enum bpf_arg_type arg_type[5];
};
int *btf_id; /* BTF ids of arguments */
+ bool (*check_btf_id)(u32 btf_id, u32 arg); /* if the argument btf_id is
+ * valid. Often used if more
+ * than one btf id is permitted
+ * for this argument.
+ */
+ int *ret_btf_id; /* return value btf_id */
};
/* bpf_context is intentionally undefined structure. Pointer to bpf_context is
@@ -330,6 +356,10 @@ enum bpf_reg_type {
PTR_TO_BTF_ID_OR_NULL, /* reg points to kernel struct or NULL */
PTR_TO_MEM, /* reg points to valid memory region */
PTR_TO_MEM_OR_NULL, /* reg points to valid memory region or NULL */
+ PTR_TO_RDONLY_BUF, /* reg points to a readonly buffer */
+ PTR_TO_RDONLY_BUF_OR_NULL, /* reg points to a readonly buffer or NULL */
+ PTR_TO_RDWR_BUF, /* reg points to a read/write buffer */
+ PTR_TO_RDWR_BUF_OR_NULL, /* reg points to a read/write buffer or NULL */
};
/* The information passed from prog-specific *_is_valid_access
@@ -656,6 +686,7 @@ struct bpf_jit_poke_descriptor {
struct bpf_ctx_arg_aux {
u32 offset;
enum bpf_reg_type reg_type;
+ u32 btf_id;
};
struct bpf_prog_aux {
@@ -670,6 +701,8 @@ struct bpf_prog_aux {
u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */
u32 attach_btf_id; /* in-kernel BTF type id to attach to */
u32 ctx_arg_info_size;
+ u32 max_rdonly_access;
+ u32 max_rdwr_access;
const struct bpf_ctx_arg_aux *ctx_arg_info;
struct bpf_prog *linked_prog;
bool verifier_zext; /* Zero extensions has been inserted by verifier. */
@@ -748,6 +781,33 @@ struct bpf_array_aux {
struct work_struct work;
};
+struct bpf_link {
+ atomic64_t refcnt;
+ u32 id;
+ enum bpf_link_type type;
+ const struct bpf_link_ops *ops;
+ struct bpf_prog *prog;
+ struct work_struct work;
+};
+
+struct bpf_link_ops {
+ void (*release)(struct bpf_link *link);
+ void (*dealloc)(struct bpf_link *link);
+ int (*detach)(struct bpf_link *link);
+ int (*update_prog)(struct bpf_link *link, struct bpf_prog *new_prog,
+ struct bpf_prog *old_prog);
+ void (*show_fdinfo)(const struct bpf_link *link, struct seq_file *seq);
+ int (*fill_link_info)(const struct bpf_link *link,
+ struct bpf_link_info *info);
+};
+
+struct bpf_link_primer {
+ struct bpf_link *link;
+ struct file *file;
+ int fd;
+ u32 id;
+};
+
struct bpf_struct_ops_value;
struct btf_type;
struct btf_member;
@@ -917,6 +977,9 @@ int bpf_prog_array_copy_to_user(struct bpf_prog_array *progs,
void bpf_prog_array_delete_safe(struct bpf_prog_array *progs,
struct bpf_prog *old_prog);
+int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index);
+int bpf_prog_array_update_at(struct bpf_prog_array *array, int index,
+ struct bpf_prog *prog);
int bpf_prog_array_copy_info(struct bpf_prog_array *array,
u32 *prog_ids, u32 request_cnt,
u32 *prog_cnt);
@@ -1101,6 +1164,7 @@ int generic_map_delete_batch(struct bpf_map *map,
const union bpf_attr *attr,
union bpf_attr __user *uattr);
struct bpf_map *bpf_map_get_curr_or_next(u32 *id);
+struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id);
extern int sysctl_unprivileged_bpf_disabled;
@@ -1109,6 +1173,11 @@ static inline bool bpf_allow_ptr_leaks(void)
return perfmon_capable();
}
+static inline bool bpf_allow_ptr_to_map_access(void)
+{
+ return perfmon_capable();
+}
+
static inline bool bpf_bypass_spec_v1(void)
{
return perfmon_capable();
@@ -1122,32 +1191,6 @@ static inline bool bpf_bypass_spec_v4(void)
int bpf_map_new_fd(struct bpf_map *map, int flags);
int bpf_prog_new_fd(struct bpf_prog *prog);
-struct bpf_link {
- atomic64_t refcnt;
- u32 id;
- enum bpf_link_type type;
- const struct bpf_link_ops *ops;
- struct bpf_prog *prog;
- struct work_struct work;
-};
-
-struct bpf_link_primer {
- struct bpf_link *link;
- struct file *file;
- int fd;
- u32 id;
-};
-
-struct bpf_link_ops {
- void (*release)(struct bpf_link *link);
- void (*dealloc)(struct bpf_link *link);
- int (*update_prog)(struct bpf_link *link, struct bpf_prog *new_prog,
- struct bpf_prog *old_prog);
- void (*show_fdinfo)(const struct bpf_link *link, struct seq_file *seq);
- int (*fill_link_info)(const struct bpf_link *link,
- struct bpf_link_info *info);
-};
-
void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
const struct bpf_link_ops *ops, struct bpf_prog *prog);
int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer);
@@ -1167,18 +1210,23 @@ int bpf_obj_get_user(const char __user *pathname, int flags);
extern int bpf_iter_ ## target(args); \
int __init bpf_iter_ ## target(args) { return 0; }
-typedef int (*bpf_iter_init_seq_priv_t)(void *private_data);
-typedef void (*bpf_iter_fini_seq_priv_t)(void *private_data);
+struct bpf_iter_aux_info {
+ struct bpf_map *map;
+};
+
+typedef int (*bpf_iter_attach_target_t)(struct bpf_prog *prog,
+ union bpf_iter_link_info *linfo,
+ struct bpf_iter_aux_info *aux);
+typedef void (*bpf_iter_detach_target_t)(struct bpf_iter_aux_info *aux);
#define BPF_ITER_CTX_ARG_MAX 2
struct bpf_iter_reg {
const char *target;
- const struct seq_operations *seq_ops;
- bpf_iter_init_seq_priv_t init_seq_private;
- bpf_iter_fini_seq_priv_t fini_seq_private;
- u32 seq_priv_size;
+ bpf_iter_attach_target_t attach_target;
+ bpf_iter_detach_target_t detach_target;
u32 ctx_arg_info_size;
struct bpf_ctx_arg_aux ctx_arg_info[BPF_ITER_CTX_ARG_MAX];
+ const struct bpf_iter_seq_info *seq_info;
};
struct bpf_iter_meta {
@@ -1187,6 +1235,13 @@ struct bpf_iter_meta {
u64 seq_num;
};
+struct bpf_iter__bpf_map_elem {
+ __bpf_md_ptr(struct bpf_iter_meta *, meta);
+ __bpf_md_ptr(struct bpf_map *, map);
+ __bpf_md_ptr(void *, key);
+ __bpf_md_ptr(void *, value);
+};
+
int bpf_iter_reg_target(const struct bpf_iter_reg *reg_info);
void bpf_iter_unreg_target(const struct bpf_iter_reg *reg_info);
bool bpf_iter_prog_supported(struct bpf_prog *prog);
@@ -1256,6 +1311,7 @@ struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key);
void __cpu_map_flush(void);
int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp,
struct net_device *dev_rx);
+bool cpu_map_prog_allowed(struct bpf_map *map);
/* Return map's numa specified by userspace */
static inline int bpf_map_attr_numa_node(const union bpf_attr *attr)
@@ -1348,6 +1404,35 @@ static inline void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
{
}
+static inline void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
+ const struct bpf_link_ops *ops,
+ struct bpf_prog *prog)
+{
+}
+
+static inline int bpf_link_prime(struct bpf_link *link,
+ struct bpf_link_primer *primer)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int bpf_link_settle(struct bpf_link_primer *primer)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void bpf_link_cleanup(struct bpf_link_primer *primer)
+{
+}
+
+static inline void bpf_link_inc(struct bpf_link *link)
+{
+}
+
+static inline void bpf_link_put(struct bpf_link *link)
+{
+}
+
static inline int bpf_obj_get_user(const char __user *pathname, int flags)
{
return -EOPNOTSUPP;
@@ -1416,6 +1501,11 @@ static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu,
return 0;
}
+static inline bool cpu_map_prog_allowed(struct bpf_map *map)
+{
+ return false;
+}
+
static inline struct bpf_prog *bpf_prog_get_type_path(const char *name,
enum bpf_prog_type type)
{
@@ -1616,6 +1706,9 @@ extern const struct bpf_func_proto bpf_get_current_uid_gid_proto;
extern const struct bpf_func_proto bpf_get_current_comm_proto;
extern const struct bpf_func_proto bpf_get_stackid_proto;
extern const struct bpf_func_proto bpf_get_stack_proto;
+extern const struct bpf_func_proto bpf_get_task_stack_proto;
+extern const struct bpf_func_proto bpf_get_stackid_proto_pe;
+extern const struct bpf_func_proto bpf_get_stack_proto_pe;
extern const struct bpf_func_proto bpf_sock_map_update_proto;
extern const struct bpf_func_proto bpf_sock_hash_update_proto;
extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto;
@@ -1638,6 +1731,11 @@ extern const struct bpf_func_proto bpf_ringbuf_reserve_proto;
extern const struct bpf_func_proto bpf_ringbuf_submit_proto;
extern const struct bpf_func_proto bpf_ringbuf_discard_proto;
extern const struct bpf_func_proto bpf_ringbuf_query_proto;
+extern const struct bpf_func_proto bpf_skc_to_tcp6_sock_proto;
+extern const struct bpf_func_proto bpf_skc_to_tcp_sock_proto;
+extern const struct bpf_func_proto bpf_skc_to_tcp_timewait_sock_proto;
+extern const struct bpf_func_proto bpf_skc_to_tcp_request_sock_proto;
+extern const struct bpf_func_proto bpf_skc_to_udp6_sock_proto;
const struct bpf_func_proto *bpf_tracing_func_proto(
enum bpf_func_id func_id, const struct bpf_prog *prog);
diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h
index a18ae82a298a..a52a5688418e 100644
--- a/include/linux/bpf_types.h
+++ b/include/linux/bpf_types.h
@@ -64,6 +64,8 @@ BPF_PROG_TYPE(BPF_PROG_TYPE_LIRC_MODE2, lirc_mode2,
#ifdef CONFIG_INET
BPF_PROG_TYPE(BPF_PROG_TYPE_SK_REUSEPORT, sk_reuseport,
struct sk_reuseport_md, struct sk_reuseport_kern)
+BPF_PROG_TYPE(BPF_PROG_TYPE_SK_LOOKUP, sk_lookup,
+ struct bpf_sk_lookup, struct bpf_sk_lookup_kern)
#endif
#if defined(CONFIG_BPF_JIT)
BPF_PROG_TYPE(BPF_PROG_TYPE_STRUCT_OPS, bpf_struct_ops,
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index ca08db4ffb5f..53c7bd568c5d 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -379,6 +379,7 @@ struct bpf_verifier_env {
u32 used_map_cnt; /* number of used maps */
u32 id_gen; /* used to generate unique reg IDs */
bool allow_ptr_leaks;
+ bool allow_ptr_to_map_access;
bool bpf_capable;
bool bypass_spec_v1;
bool bypass_spec_v4;
diff --git a/include/linux/bpfilter.h b/include/linux/bpfilter.h
index d815622cd31e..2ae3c8e1d83c 100644
--- a/include/linux/bpfilter.h
+++ b/include/linux/bpfilter.h
@@ -3,22 +3,23 @@
#define _LINUX_BPFILTER_H
#include <uapi/linux/bpfilter.h>
-#include <linux/umh.h>
+#include <linux/usermode_driver.h>
+#include <linux/sockptr.h>
struct sock;
-int bpfilter_ip_set_sockopt(struct sock *sk, int optname, char __user *optval,
+int bpfilter_ip_set_sockopt(struct sock *sk, int optname, sockptr_t optval,
unsigned int optlen);
int bpfilter_ip_get_sockopt(struct sock *sk, int optname, char __user *optval,
int __user *optlen);
+void bpfilter_umh_cleanup(struct umd_info *info);
+
struct bpfilter_umh_ops {
- struct umh_info info;
+ struct umd_info info;
/* since ip_getsockopt() can run in parallel, serialize access to umh */
struct mutex lock;
- int (*sockopt)(struct sock *sk, int optname,
- char __user *optval,
+ int (*sockopt)(struct sock *sk, int optname, sockptr_t optval,
unsigned int optlen, bool is_set);
int (*start)(void);
- bool stop;
};
extern struct bpfilter_umh_ops bpfilter_ops;
#endif
diff --git a/include/linux/btf_ids.h b/include/linux/btf_ids.h
new file mode 100644
index 000000000000..4867d549e3c1
--- /dev/null
+++ b/include/linux/btf_ids.h
@@ -0,0 +1,130 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _LINUX_BTF_IDS_H
+#define _LINUX_BTF_IDS_H
+
+#ifdef CONFIG_DEBUG_INFO_BTF
+
+#include <linux/compiler.h> /* for __PASTE */
+
+/*
+ * Following macros help to define lists of BTF IDs placed
+ * in .BTF_ids section. They are initially filled with zeros
+ * (during compilation) and resolved later during the
+ * linking phase by resolve_btfids tool.
+ *
+ * Any change in list layout must be reflected in resolve_btfids
+ * tool logic.
+ */
+
+#define BTF_IDS_SECTION ".BTF_ids"
+
+#define ____BTF_ID(symbol) \
+asm( \
+".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \
+".local " #symbol " ; \n" \
+".type " #symbol ", STT_OBJECT; \n" \
+".size " #symbol ", 4; \n" \
+#symbol ": \n" \
+".zero 4 \n" \
+".popsection; \n");
+
+#define __BTF_ID(symbol) \
+ ____BTF_ID(symbol)
+
+#define __ID(prefix) \
+ __PASTE(prefix, __COUNTER__)
+
+/*
+ * The BTF_ID defines unique symbol for each ID pointing
+ * to 4 zero bytes.
+ */
+#define BTF_ID(prefix, name) \
+ __BTF_ID(__ID(__BTF_ID__##prefix##__##name##__))
+
+/*
+ * The BTF_ID_LIST macro defines pure (unsorted) list
+ * of BTF IDs, with following layout:
+ *
+ * BTF_ID_LIST(list1)
+ * BTF_ID(type1, name1)
+ * BTF_ID(type2, name2)
+ *
+ * list1:
+ * __BTF_ID__type1__name1__1:
+ * .zero 4
+ * __BTF_ID__type2__name2__2:
+ * .zero 4
+ *
+ */
+#define __BTF_ID_LIST(name, scope) \
+asm( \
+".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \
+"." #scope " " #name "; \n" \
+#name ":; \n" \
+".popsection; \n"); \
+
+#define BTF_ID_LIST(name) \
+__BTF_ID_LIST(name, local) \
+extern u32 name[];
+
+#define BTF_ID_LIST_GLOBAL(name) \
+__BTF_ID_LIST(name, globl)
+
+/*
+ * The BTF_ID_UNUSED macro defines 4 zero bytes.
+ * It's used when we want to define 'unused' entry
+ * in BTF_ID_LIST, like:
+ *
+ * BTF_ID_LIST(bpf_skb_output_btf_ids)
+ * BTF_ID(struct, sk_buff)
+ * BTF_ID_UNUSED
+ * BTF_ID(struct, task_struct)
+ */
+
+#define BTF_ID_UNUSED \
+asm( \
+".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \
+".zero 4 \n" \
+".popsection; \n");
+
+#else
+
+#define BTF_ID_LIST(name) static u32 name[5];
+#define BTF_ID(prefix, name)
+#define BTF_ID_UNUSED
+#define BTF_ID_LIST_GLOBAL(name) u32 name[1];
+
+#endif /* CONFIG_DEBUG_INFO_BTF */
+
+#ifdef CONFIG_NET
+/* Define a list of socket types which can be the argument for
+ * skc_to_*_sock() helpers. All these sockets should have
+ * sock_common as the first argument in its memory layout.
+ */
+#define BTF_SOCK_TYPE_xxx \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET, inet_sock) \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET_CONN, inet_connection_sock) \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET_REQ, inet_request_sock) \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET_TW, inet_timewait_sock) \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_REQ, request_sock) \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_SOCK, sock) \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_SOCK_COMMON, sock_common) \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP, tcp_sock) \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP_REQ, tcp_request_sock) \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP_TW, tcp_timewait_sock) \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP6, tcp6_sock) \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP, udp_sock) \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP6, udp6_sock)
+
+enum {
+#define BTF_SOCK_TYPE(name, str) name,
+BTF_SOCK_TYPE_xxx
+#undef BTF_SOCK_TYPE
+MAX_BTF_SOCK_TYPE,
+};
+
+extern u32 btf_sock_ids[];
+#endif
+
+#endif
diff --git a/include/linux/btree.h b/include/linux/btree.h
index 68f858c831b1..243ee544397a 100644
--- a/include/linux/btree.h
+++ b/include/linux/btree.h
@@ -10,7 +10,7 @@
*
* A B+Tree is a data structure for looking up arbitrary (currently allowing
* unsigned long, u32, u64 and 2 * u64) keys into pointers. The data structure
- * is described at http://en.wikipedia.org/wiki/B-tree, we currently do not
+ * is described at https://en.wikipedia.org/wiki/B-tree, we currently do not
* use binary search to find the key on lookups.
*
* Each B+Tree consists of a head, that contains bookkeeping information and
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 22fb11e2d2e0..6b47f94378c5 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -406,6 +406,7 @@ static inline int inode_has_buffers(struct inode *inode) { return 0; }
static inline void invalidate_inode_buffers(struct inode *inode) {}
static inline int remove_inode_buffers(struct inode *inode) { return 1; }
static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; }
+#define buffer_heads_over_limit 0
#endif /* CONFIG_BLOCK */
#endif /* _LINUX_BUFFER_HEAD_H */
diff --git a/include/linux/capability.h b/include/linux/capability.h
index b4345b38a6be..1e7fe311cabe 100644
--- a/include/linux/capability.h
+++ b/include/linux/capability.h
@@ -261,6 +261,12 @@ static inline bool bpf_capable(void)
return capable(CAP_BPF) || capable(CAP_SYS_ADMIN);
}
+static inline bool checkpoint_restore_ns_capable(struct user_namespace *ns)
+{
+ return ns_capable(ns, CAP_CHECKPOINT_RESTORE) ||
+ ns_capable(ns, CAP_SYS_ADMIN);
+}
+
/* audit system wants to get cap info from files as well */
extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
index 8543fa59da72..f48d0a31deae 100644
--- a/include/linux/cdrom.h
+++ b/include/linux/cdrom.h
@@ -73,7 +73,6 @@ struct cdrom_device_ops {
int (*drive_status) (struct cdrom_device_info *, int);
unsigned int (*check_events) (struct cdrom_device_info *cdi,
unsigned int clearing, int slot);
- int (*media_changed) (struct cdrom_device_info *, int);
int (*tray_move) (struct cdrom_device_info *, int);
int (*lock_door) (struct cdrom_device_info *, int);
int (*select_speed) (struct cdrom_device_info *, int);
@@ -107,7 +106,6 @@ extern int cdrom_ioctl(struct cdrom_device_info *cdi, struct block_device *bdev,
fmode_t mode, unsigned int cmd, unsigned long arg);
extern unsigned int cdrom_check_events(struct cdrom_device_info *cdi,
unsigned int clearing);
-extern int cdrom_media_changed(struct cdrom_device_info *);
extern int register_cdrom(struct gendisk *disk, struct cdrom_device_info *cdi);
extern void unregister_cdrom(struct cdrom_device_info *cdi);
diff --git a/include/linux/ceph/ceph_features.h b/include/linux/ceph/ceph_features.h
index 39e6f4c57580..fcd84e8d88f4 100644
--- a/include/linux/ceph/ceph_features.h
+++ b/include/linux/ceph/ceph_features.h
@@ -58,7 +58,7 @@
* because 10.2.z (jewel) did not care if its peers advertised this
* feature bit.
*
- * - In the second phase we stop advertising the the bit and call it
+ * - In the second phase we stop advertising the bit and call it
* RETIRED. This can normally be done in the *next* major release
* following the one in which we marked the feature DEPRECATED. In
* the above example, for 12.0.z (luminous) we can say:
diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h
index ebf5ba62b772..455e9b9e2adf 100644
--- a/include/linux/ceph/ceph_fs.h
+++ b/include/linux/ceph/ceph_fs.h
@@ -130,6 +130,7 @@ struct ceph_dir_layout {
#define CEPH_MSG_CLIENT_REQUEST 24
#define CEPH_MSG_CLIENT_REQUEST_FORWARD 25
#define CEPH_MSG_CLIENT_REPLY 26
+#define CEPH_MSG_CLIENT_METRICS 29
#define CEPH_MSG_CLIENT_CAPS 0x310
#define CEPH_MSG_CLIENT_LEASE 0x311
#define CEPH_MSG_CLIENT_SNAP 0x312
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
index e5ed1c541e7f..c8645f0b797d 100644
--- a/include/linux/ceph/libceph.h
+++ b/include/linux/ceph/libceph.h
@@ -282,6 +282,7 @@ extern struct kmem_cache *ceph_dentry_cachep;
extern struct kmem_cache *ceph_file_cachep;
extern struct kmem_cache *ceph_dir_file_cachep;
extern struct kmem_cache *ceph_mds_request_cachep;
+extern mempool_t *ceph_wb_pagevec_pool;
/* ceph_common.c */
extern bool libceph_compatible(void *data);
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index c60b59e9291b..83fa08a06507 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -404,7 +404,7 @@ void ceph_osdc_clear_abort_err(struct ceph_osd_client *osdc);
&__oreq->r_ops[__whch].typ.fld; \
})
-extern void osd_req_op_init(struct ceph_osd_request *osd_req,
+struct ceph_osd_req_op *osd_req_op_init(struct ceph_osd_request *osd_req,
unsigned int which, u16 opcode, u32 flags);
extern void osd_req_op_raw_data_in_pages(struct ceph_osd_request *,
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index bd1ee9039558..03a5de5f99f4 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -189,7 +189,7 @@ struct clk_duty {
* and >= numerator) Return 0 on success, otherwise -EERROR.
*
* @init: Perform platform-specific initialization magic.
- * This is not not used by any of the basic clock types.
+ * This is not used by any of the basic clock types.
* This callback exist for HW which needs to perform some
* initialisation magic for CCF to get an accurate view of the
* clock. It may also be used dynamic resource allocation is
@@ -1096,7 +1096,6 @@ int clk_hw_get_parent_index(struct clk_hw *hw);
int clk_hw_set_parent(struct clk_hw *hw, struct clk_hw *new_parent);
unsigned int __clk_get_enable_count(struct clk *clk);
unsigned long clk_hw_get_rate(const struct clk_hw *hw);
-unsigned long __clk_get_flags(struct clk *clk);
unsigned long clk_hw_get_flags(const struct clk_hw *hw);
#define clk_hw_can_set_rate_parent(hw) \
(clk_hw_get_flags((hw)) & CLK_SET_RATE_PARENT)
diff --git a/include/linux/clk/at91_pmc.h b/include/linux/clk/at91_pmc.h
index 49a53a137610..a4f82e836a7c 100644
--- a/include/linux/clk/at91_pmc.h
+++ b/include/linux/clk/at91_pmc.h
@@ -59,6 +59,7 @@
#define AT91_PMC_PLL_UPDT 0x1C /* PMC PLL update register [for SAM9X60] */
#define AT91_PMC_PLL_UPDT_UPDATE (1 << 8) /* Update PLL settings */
#define AT91_PMC_PLL_UPDT_ID (1 << 0) /* PLL ID */
+#define AT91_PMC_PLL_UPDT_ID_MSK (0xf) /* PLL ID mask */
#define AT91_PMC_PLL_UPDT_STUPTIM (0xff << 16) /* Startup time */
#define AT91_CKGR_MOR 0x20 /* Main Oscillator Register [not on SAM9RL] */
@@ -136,6 +137,8 @@
#define AT91_PMC_PLLADIV2_ON (1 << 12)
#define AT91_PMC_H32MXDIV BIT(24)
+#define AT91_PMC_XTALF 0x34 /* Main XTAL Frequency Register [SAMA7G5 only] */
+
#define AT91_PMC_USB 0x38 /* USB Clock Register [some SAM9 only] */
#define AT91_PMC_USBS (0x1 << 0) /* USB OHCI Input clock selection */
#define AT91_PMC_USBS_PLLA (0 << 0)
@@ -174,6 +177,7 @@
#define AT91_PMC_MOSCRCS (1 << 17) /* Main On-Chip RC [some SAM9] */
#define AT91_PMC_CFDEV (1 << 18) /* Clock Failure Detector Event [some SAM9] */
#define AT91_PMC_GCKRDY (1 << 24) /* Generated Clocks */
+#define AT91_PMC_MCKXRDY (1 << 26) /* Master Clock x [x=1..4] Ready Status */
#define AT91_PMC_IMR 0x6c /* Interrupt Mask Register */
#define AT91_PMC_FSMR 0x70 /* Fast Startup Mode Register */
diff --git a/include/linux/clock_cooling.h b/include/linux/clock_cooling.h
deleted file mode 100644
index 4b0a69863656..000000000000
--- a/include/linux/clock_cooling.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * linux/include/linux/clock_cooling.h
- *
- * Copyright (C) 2014 Eduardo Valentin <edubezval@gmail.com>
- *
- * Copyright (C) 2013 Texas Instruments Inc.
- * Contact: Eduardo Valentin <eduardo.valentin@ti.com>
- *
- * Highly based on cpufreq_cooling.c.
- * Copyright (C) 2012 Samsung Electronics Co., Ltd(http://www.samsung.com)
- * Copyright (C) 2012 Amit Daniel <amit.kachhap@linaro.org>
- */
-
-#ifndef __CPU_COOLING_H__
-#define __CPU_COOLING_H__
-
-#include <linux/of.h>
-#include <linux/thermal.h>
-#include <linux/cpumask.h>
-
-#ifdef CONFIG_CLOCK_THERMAL
-/**
- * clock_cooling_register - function to create clock cooling device.
- * @dev: struct device pointer to the device used as clock cooling device.
- * @clock_name: string containing the clock used as cooling mechanism.
- */
-struct thermal_cooling_device *
-clock_cooling_register(struct device *dev, const char *clock_name);
-
-/**
- * clock_cooling_unregister - function to remove clock cooling device.
- * @cdev: thermal cooling device pointer.
- */
-void clock_cooling_unregister(struct thermal_cooling_device *cdev);
-
-unsigned long clock_cooling_get_level(struct thermal_cooling_device *cdev,
- unsigned long freq);
-#else /* !CONFIG_CLOCK_THERMAL */
-static inline struct thermal_cooling_device *
-clock_cooling_register(struct device *dev, const char *clock_name)
-{
- return NULL;
-}
-static inline
-void clock_cooling_unregister(struct thermal_cooling_device *cdev)
-{
-}
-static inline
-unsigned long clock_cooling_get_level(struct thermal_cooling_device *cdev,
- unsigned long freq)
-{
- return THERMAL_CSTATE_INVALID;
-}
-#endif /* CONFIG_CLOCK_THERMAL */
-
-#endif /* __CPU_COOLING_H__ */
diff --git a/include/linux/compaction.h b/include/linux/compaction.h
index 6fa0eea3f530..25a521d299c1 100644
--- a/include/linux/compaction.h
+++ b/include/linux/compaction.h
@@ -85,11 +85,13 @@ static inline unsigned long compact_gap(unsigned int order)
#ifdef CONFIG_COMPACTION
extern int sysctl_compact_memory;
+extern unsigned int sysctl_compaction_proactiveness;
extern int sysctl_compaction_handler(struct ctl_table *table, int write,
void *buffer, size_t *length, loff_t *ppos);
extern int sysctl_extfrag_threshold;
extern int sysctl_compact_unevictable_allowed;
+extern unsigned int extfrag_for_order(struct zone *zone, unsigned int order);
extern int fragmentation_index(struct zone *zone, unsigned int order);
extern enum compact_result try_to_compact_pages(gfp_t gfp_mask,
unsigned int order, unsigned int alloc_flags,
diff --git a/include/linux/compat.h b/include/linux/compat.h
index e90100c0de72..d38c4d7e83bd 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -737,10 +737,6 @@ asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg);
asmlinkage long compat_sys_recvfrom(int fd, void __user *buf, compat_size_t len,
unsigned flags, struct sockaddr __user *addr,
int __user *addrlen);
-asmlinkage long compat_sys_setsockopt(int fd, int level, int optname,
- char __user *optval, unsigned int optlen);
-asmlinkage long compat_sys_getsockopt(int fd, int level, int optname,
- char __user *optval, int __user *optlen);
asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg,
unsigned flags);
asmlinkage long compat_sys_recvmsg(int fd, struct compat_msghdr __user *msg,
@@ -855,7 +851,6 @@ asmlinkage long compat_sys_select(int n, compat_ulong_t __user *inp,
asmlinkage long compat_sys_ustat(unsigned dev, struct compat_ustat __user *u32);
asmlinkage long compat_sys_recv(int fd, void __user *buf, compat_size_t len,
unsigned flags);
-asmlinkage long compat_sys_sysctl(struct compat_sysctl_args __user *args);
/* obsolete: fs/readdir.c */
asmlinkage long compat_sys_old_readdir(unsigned int fd,
diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
index 5e55302e3bf6..cee0c728d39a 100644
--- a/include/linux/compiler-clang.h
+++ b/include/linux/compiler-clang.h
@@ -5,8 +5,6 @@
/* Compiler specific definitions for Clang compiler */
-#define uninitialized_var(x) x = *(&(x))
-
/* same as gcc, this was present in clang-2.6 so we can assume it works
* with any version that can compile the kernel
*/
@@ -42,7 +40,7 @@
#endif
/*
- * Not all versions of clang implement the the type-generic versions
+ * Not all versions of clang implement the type-generic versions
* of the builtin overflow checkers. Fortunately, clang implements
* __has_builtin allowing us to avoid awkward version
* checks. Unfortunately, we don't know which version of gcc clang
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index 0b1dc61f3955..7a3769040d7d 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -59,12 +59,6 @@
(typeof(ptr)) (__ptr + (off)); \
})
-/*
- * A trick to suppress uninitialized variable warning without generating any
- * code
- */
-#define uninitialized_var(x) x = x
-
#ifdef CONFIG_RETPOLINE
#define __noretpoline __attribute__((__indirect_branch__("keep")))
#endif
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 204e76856435..6810d80acb0b 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -120,65 +120,12 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
/* Annotate a C jump table to allow objtool to follow the code flow */
#define __annotate_jump_table __section(.rodata..c_jump_table)
-#ifdef CONFIG_DEBUG_ENTRY
-/* Begin/end of an instrumentation safe region */
-#define instrumentation_begin() ({ \
- asm volatile("%c0: nop\n\t" \
- ".pushsection .discard.instr_begin\n\t" \
- ".long %c0b - .\n\t" \
- ".popsection\n\t" : : "i" (__COUNTER__)); \
-})
-
-/*
- * Because instrumentation_{begin,end}() can nest, objtool validation considers
- * _begin() a +1 and _end() a -1 and computes a sum over the instructions.
- * When the value is greater than 0, we consider instrumentation allowed.
- *
- * There is a problem with code like:
- *
- * noinstr void foo()
- * {
- * instrumentation_begin();
- * ...
- * if (cond) {
- * instrumentation_begin();
- * ...
- * instrumentation_end();
- * }
- * bar();
- * instrumentation_end();
- * }
- *
- * If instrumentation_end() would be an empty label, like all the other
- * annotations, the inner _end(), which is at the end of a conditional block,
- * would land on the instruction after the block.
- *
- * If we then consider the sum of the !cond path, we'll see that the call to
- * bar() is with a 0-value, even though, we meant it to happen with a positive
- * value.
- *
- * To avoid this, have _end() be a NOP instruction, this ensures it will be
- * part of the condition block and does not escape.
- */
-#define instrumentation_end() ({ \
- asm volatile("%c0: nop\n\t" \
- ".pushsection .discard.instr_end\n\t" \
- ".long %c0b - .\n\t" \
- ".popsection\n\t" : : "i" (__COUNTER__)); \
-})
-#endif /* CONFIG_DEBUG_ENTRY */
-
#else
#define annotate_reachable()
#define annotate_unreachable()
#define __annotate_jump_table
#endif
-#ifndef instrumentation_begin
-#define instrumentation_begin() do { } while(0)
-#define instrumentation_end() do { } while(0)
-#endif
-
#ifndef ASM_UNREACHABLE
# define ASM_UNREACHABLE
#endif
@@ -230,28 +177,6 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
# define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
#endif
-/*
- * Prevent the compiler from merging or refetching reads or writes. The
- * compiler is also forbidden from reordering successive instances of
- * READ_ONCE and WRITE_ONCE, but only when the compiler is aware of some
- * particular ordering. One way to make the compiler aware of ordering is to
- * put the two invocations of READ_ONCE or WRITE_ONCE in different C
- * statements.
- *
- * These two macros will also work on aggregate data types like structs or
- * unions.
- *
- * Their two major use cases are: (1) Mediating communication between
- * process-level code and irq/NMI handlers, all running on the same CPU,
- * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
- * mutilate accesses that either do not require ordering or that interact
- * with an explicit memory barrier or atomic instruction that provides the
- * required ordering.
- */
-#include <asm/barrier.h>
-#include <linux/kasan-checks.h>
-#include <linux/kcsan-checks.h>
-
/**
* data_race - mark an expression as containing intentional data races
*
@@ -272,65 +197,6 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
__v; \
})
-/*
- * Use __READ_ONCE() instead of READ_ONCE() if you do not require any
- * atomicity or dependency ordering guarantees. Note that this may result
- * in tears!
- */
-#define __READ_ONCE(x) (*(const volatile __unqual_scalar_typeof(x) *)&(x))
-
-#define __READ_ONCE_SCALAR(x) \
-({ \
- __unqual_scalar_typeof(x) __x = __READ_ONCE(x); \
- smp_read_barrier_depends(); \
- (typeof(x))__x; \
-})
-
-#define READ_ONCE(x) \
-({ \
- compiletime_assert_rwonce_type(x); \
- __READ_ONCE_SCALAR(x); \
-})
-
-#define __WRITE_ONCE(x, val) \
-do { \
- *(volatile typeof(x) *)&(x) = (val); \
-} while (0)
-
-#define WRITE_ONCE(x, val) \
-do { \
- compiletime_assert_rwonce_type(x); \
- __WRITE_ONCE(x, val); \
-} while (0)
-
-static __no_sanitize_or_inline
-unsigned long __read_once_word_nocheck(const void *addr)
-{
- return __READ_ONCE(*(unsigned long *)addr);
-}
-
-/*
- * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need to load a
- * word from memory atomically but without telling KASAN/KCSAN. This is
- * usually used by unwinding code when walking the stack of a running process.
- */
-#define READ_ONCE_NOCHECK(x) \
-({ \
- unsigned long __x; \
- compiletime_assert(sizeof(x) == sizeof(__x), \
- "Unsupported access size for READ_ONCE_NOCHECK()."); \
- __x = __read_once_word_nocheck(&(x)); \
- smp_read_barrier_depends(); \
- (typeof(x))__x; \
-})
-
-static __no_kasan_or_inline
-unsigned long read_word_at_a_time(const void *addr)
-{
- kasan_check_read(addr, 1);
- return *(unsigned long *)addr;
-}
-
#endif /* __KERNEL__ */
/*
@@ -354,57 +220,6 @@ static inline void *offset_to_ptr(const int *off)
#endif /* __ASSEMBLY__ */
-/* Compile time object size, -1 for unknown */
-#ifndef __compiletime_object_size
-# define __compiletime_object_size(obj) -1
-#endif
-#ifndef __compiletime_warning
-# define __compiletime_warning(message)
-#endif
-#ifndef __compiletime_error
-# define __compiletime_error(message)
-#endif
-
-#ifdef __OPTIMIZE__
-# define __compiletime_assert(condition, msg, prefix, suffix) \
- do { \
- extern void prefix ## suffix(void) __compiletime_error(msg); \
- if (!(condition)) \
- prefix ## suffix(); \
- } while (0)
-#else
-# define __compiletime_assert(condition, msg, prefix, suffix) do { } while (0)
-#endif
-
-#define _compiletime_assert(condition, msg, prefix, suffix) \
- __compiletime_assert(condition, msg, prefix, suffix)
-
-/**
- * compiletime_assert - break build and emit msg if condition is false
- * @condition: a compile-time constant condition to check
- * @msg: a message to emit if condition is false
- *
- * In tradition of POSIX assert, this macro will break the build if the
- * supplied condition is *false*, emitting the supplied error message if the
- * compiler has support to do so.
- */
-#define compiletime_assert(condition, msg) \
- _compiletime_assert(condition, msg, __compiletime_assert_, __COUNTER__)
-
-#define compiletime_assert_atomic_type(t) \
- compiletime_assert(__native_word(t), \
- "Need native word sized stores/loads for atomicity.")
-
-/*
- * Yes, this permits 64-bit accesses on 32-bit architectures. These will
- * actually be atomic in some cases (namely Armv7 + LPAE), but for others we
- * rely on the access being split into 2x32-bit accesses for a 32-bit quantity
- * (e.g. a virtual address) and a strong prevailing wind.
- */
-#define compiletime_assert_rwonce_type(t) \
- compiletime_assert(__native_word(t) || sizeof(t) == sizeof(long long), \
- "Unsupported access size for {READ,WRITE}_ONCE().")
-
/* &a[0] degrades to a pointer: a different type from an array */
#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
@@ -414,4 +229,6 @@ static inline void *offset_to_ptr(const int *off)
*/
#define prevent_tail_call_optimization() mb()
+#include <asm/rwonce.h>
+
#endif /* __LINUX_COMPILER_H */
diff --git a/include/linux/compiler_attributes.h b/include/linux/compiler_attributes.h
index c8f03d2969df..6122efdad6ad 100644
--- a/include/linux/compiler_attributes.h
+++ b/include/linux/compiler_attributes.h
@@ -37,6 +37,7 @@
# define __GCC4_has_attribute___copy__ 0
# define __GCC4_has_attribute___designated_init__ 0
# define __GCC4_has_attribute___externally_visible__ 1
+# define __GCC4_has_attribute___no_caller_saved_registers__ 0
# define __GCC4_has_attribute___noclone__ 1
# define __GCC4_has_attribute___nonstring__ 0
# define __GCC4_has_attribute___no_sanitize_address__ (__GNUC_MINOR__ >= 8)
@@ -177,6 +178,18 @@
#define __mode(x) __attribute__((__mode__(x)))
/*
+ * Optional: only supported since gcc >= 7
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/x86-Function-Attributes.html#index-no_005fcaller_005fsaved_005fregisters-function-attribute_002c-x86
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#no-caller-saved-registers
+ */
+#if __has_attribute(__no_caller_saved_registers__)
+# define __no_caller_saved_registers __attribute__((__no_caller_saved_registers__))
+#else
+# define __no_caller_saved_registers
+#endif
+
+/*
* Optional: not supported by clang
*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-noclone-function-attribute
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
index 01dd58c74d80..4b33cb385f96 100644
--- a/include/linux/compiler_types.h
+++ b/include/linux/compiler_types.h
@@ -5,48 +5,54 @@
#ifndef __ASSEMBLY__
#ifdef __CHECKER__
+/* address spaces */
# define __kernel __attribute__((address_space(0)))
# define __user __attribute__((noderef, address_space(__user)))
-# define __safe __attribute__((safe))
-# define __force __attribute__((force))
-# define __nocast __attribute__((nocast))
# define __iomem __attribute__((noderef, address_space(__iomem)))
+# define __percpu __attribute__((noderef, address_space(__percpu)))
+# define __rcu __attribute__((noderef, address_space(__rcu)))
+extern void __chk_user_ptr(const volatile void __user *);
+extern void __chk_io_ptr(const volatile void __iomem *);
+/* context/locking */
# define __must_hold(x) __attribute__((context(x,1,1)))
# define __acquires(x) __attribute__((context(x,0,1)))
# define __releases(x) __attribute__((context(x,1,0)))
# define __acquire(x) __context__(x,1)
# define __release(x) __context__(x,-1)
# define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
-# define __percpu __attribute__((noderef, address_space(__percpu)))
-# define __rcu __attribute__((noderef, address_space(__rcu)))
+/* other */
+# define __force __attribute__((force))
+# define __nocast __attribute__((nocast))
+# define __safe __attribute__((safe))
# define __private __attribute__((noderef))
-extern void __chk_user_ptr(const volatile void __user *);
-extern void __chk_io_ptr(const volatile void __iomem *);
# define ACCESS_PRIVATE(p, member) (*((typeof((p)->member) __force *) &(p)->member))
#else /* __CHECKER__ */
+/* address spaces */
+# define __kernel
# ifdef STRUCTLEAK_PLUGIN
-# define __user __attribute__((user))
+# define __user __attribute__((user))
# else
# define __user
# endif
-# define __kernel
-# define __safe
-# define __force
-# define __nocast
# define __iomem
-# define __chk_user_ptr(x) (void)0
-# define __chk_io_ptr(x) (void)0
-# define __builtin_warning(x, y...) (1)
+# define __percpu
+# define __rcu
+# define __chk_user_ptr(x) (void)0
+# define __chk_io_ptr(x) (void)0
+/* context/locking */
# define __must_hold(x)
# define __acquires(x)
# define __releases(x)
-# define __acquire(x) (void)0
-# define __release(x) (void)0
+# define __acquire(x) (void)0
+# define __release(x) (void)0
# define __cond_lock(x,c) (c)
-# define __percpu
-# define __rcu
+/* other */
+# define __force
+# define __nocast
+# define __safe
# define __private
# define ACCESS_PRIVATE(p, member) ((p)->member)
+# define __builtin_warning(x, y...) (1)
#endif /* __CHECKER__ */
/* Indirect macros required for expanded argument pasting, eg. __LINE__. */
@@ -275,6 +281,47 @@ struct ftrace_likely_data {
(sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || \
sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
+/* Compile time object size, -1 for unknown */
+#ifndef __compiletime_object_size
+# define __compiletime_object_size(obj) -1
+#endif
+#ifndef __compiletime_warning
+# define __compiletime_warning(message)
+#endif
+#ifndef __compiletime_error
+# define __compiletime_error(message)
+#endif
+
+#ifdef __OPTIMIZE__
+# define __compiletime_assert(condition, msg, prefix, suffix) \
+ do { \
+ extern void prefix ## suffix(void) __compiletime_error(msg); \
+ if (!(condition)) \
+ prefix ## suffix(); \
+ } while (0)
+#else
+# define __compiletime_assert(condition, msg, prefix, suffix) do { } while (0)
+#endif
+
+#define _compiletime_assert(condition, msg, prefix, suffix) \
+ __compiletime_assert(condition, msg, prefix, suffix)
+
+/**
+ * compiletime_assert - break build and emit msg if condition is false
+ * @condition: a compile-time constant condition to check
+ * @msg: a message to emit if condition is false
+ *
+ * In tradition of POSIX assert, this macro will break the build if the
+ * supplied condition is *false*, emitting the supplied error message if the
+ * compiler has support to do so.
+ */
+#define compiletime_assert(condition, msg) \
+ _compiletime_assert(condition, msg, __compiletime_assert_, __COUNTER__)
+
+#define compiletime_assert_atomic_type(t) \
+ compiletime_assert(__native_word(t), \
+ "Need native word sized stores/loads for atomicity.")
+
/* Helpers for emitting diagnostics in pragmas. */
#ifndef __diag
#define __diag(string)
diff --git a/include/linux/console.h b/include/linux/console.h
index 75dd20650fbe..0670d3491e0e 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -24,17 +24,13 @@ struct module;
struct tty_struct;
struct notifier_block;
-/*
- * this is what the terminal answers to a ESC-Z or csi0c query.
- */
-#define VT100ID "\033[?1;2c"
-#define VT102ID "\033[?6c"
-
enum con_scroll {
SM_UP,
SM_DOWN,
};
+enum vc_intensity;
+
/**
* struct consw - callbacks for consoles
*
@@ -74,8 +70,9 @@ struct consw {
void (*con_scrolldelta)(struct vc_data *vc, int lines);
int (*con_set_origin)(struct vc_data *vc);
void (*con_save_screen)(struct vc_data *vc);
- u8 (*con_build_attr)(struct vc_data *vc, u8 color, u8 intensity,
- u8 blink, u8 underline, u8 reverse, u8 italic);
+ u8 (*con_build_attr)(struct vc_data *vc, u8 color,
+ enum vc_intensity intensity,
+ bool blink, bool underline, bool reverse, bool italic);
void (*con_invert_region)(struct vc_data *vc, u16 *p, int count);
u16 *(*con_screen_pos)(struct vc_data *vc, int offset);
unsigned long (*con_getxy)(struct vc_data *vc, unsigned long position,
diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h
index 24d4c16e3ae0..153734816b49 100644
--- a/include/linux/console_struct.h
+++ b/include/linux/console_struct.h
@@ -21,6 +21,43 @@ struct uni_pagedir;
struct uni_screen;
#define NPAR 16
+#define VC_TABSTOPS_COUNT 256U
+
+enum vc_intensity {
+ VCI_HALF_BRIGHT,
+ VCI_NORMAL,
+ VCI_BOLD,
+ VCI_MASK = 0x3,
+};
+
+/**
+ * struct vc_state -- state of a VC
+ * @x: cursor's x-position
+ * @y: cursor's y-position
+ * @color: foreground & background colors
+ * @Gx_charset: what's G0/G1 slot set to (like GRAF_MAP, LAT1_MAP)
+ * @charset: what character set to use (0=G0 or 1=G1)
+ * @intensity: see enum vc_intensity for values
+ * @reverse: reversed foreground/background colors
+ *
+ * These members are defined separately from struct vc_data as we save &
+ * restore them at times.
+ */
+struct vc_state {
+ unsigned int x, y;
+
+ unsigned char color;
+
+ unsigned char Gx_charset[2];
+ unsigned int charset : 1;
+
+ /* attribute flags */
+ enum vc_intensity intensity;
+ bool italic;
+ bool underline;
+ bool blink;
+ bool reverse;
+};
/*
* Example: vc_data of a console that was scrolled 3 lines down.
@@ -57,6 +94,8 @@ struct uni_screen;
struct vc_data {
struct tty_port port; /* Upper level data */
+ struct vc_state state, saved_state;
+
unsigned short vc_num; /* Console number */
unsigned int vc_cols; /* [#] Console size */
unsigned int vc_rows;
@@ -73,8 +112,6 @@ struct vc_data {
/* attributes for all characters on screen */
unsigned char vc_attr; /* Current attributes */
unsigned char vc_def_color; /* Default colors */
- unsigned char vc_color; /* Foreground & background */
- unsigned char vc_s_color; /* Saved foreground & background */
unsigned char vc_ulcolor; /* Color for underline mode */
unsigned char vc_itcolor;
unsigned char vc_halfcolor; /* Color for half intensity mode */
@@ -82,8 +119,6 @@ struct vc_data {
unsigned int vc_cursor_type;
unsigned short vc_complement_mask; /* [#] Xor mask for mouse pointer */
unsigned short vc_s_complement_mask; /* Saved mouse pointer mask */
- unsigned int vc_x, vc_y; /* Cursor position */
- unsigned int vc_saved_x, vc_saved_y;
unsigned long vc_pos; /* Cursor address */
/* fonts */
unsigned short vc_hi_font_mask; /* [#] Attribute set for upper 256 chars of font or 0 if not supported */
@@ -98,8 +133,6 @@ struct vc_data {
int vt_newvt;
wait_queue_head_t paste_wait;
/* mode flags */
- unsigned int vc_charset : 1; /* Character set G0 / G1 */
- unsigned int vc_s_charset : 1; /* Saved character set */
unsigned int vc_disp_ctrl : 1; /* Display chars < 32? */
unsigned int vc_toggle_meta : 1; /* Toggle high bit? */
unsigned int vc_decscnm : 1; /* Screen Mode */
@@ -107,17 +140,6 @@ struct vc_data {
unsigned int vc_decawm : 1; /* Autowrap Mode */
unsigned int vc_deccm : 1; /* Cursor Visible */
unsigned int vc_decim : 1; /* Insert Mode */
- /* attribute flags */
- unsigned int vc_intensity : 2; /* 0=half-bright, 1=normal, 2=bold */
- unsigned int vc_italic:1;
- unsigned int vc_underline : 1;
- unsigned int vc_blink : 1;
- unsigned int vc_reverse : 1;
- unsigned int vc_s_intensity : 2; /* saved rendition */
- unsigned int vc_s_italic:1;
- unsigned int vc_s_underline : 1;
- unsigned int vc_s_blink : 1;
- unsigned int vc_s_reverse : 1;
/* misc */
unsigned int vc_priv : 3;
unsigned int vc_need_wrap : 1;
@@ -126,13 +148,9 @@ struct vc_data {
unsigned char vc_utf : 1; /* Unicode UTF-8 encoding */
unsigned char vc_utf_count;
int vc_utf_char;
- unsigned int vc_tab_stop[8]; /* Tab stops. 256 columns. */
+ DECLARE_BITMAP(vc_tab_stop, VC_TABSTOPS_COUNT); /* Tab stops. 256 columns. */
unsigned char vc_palette[16*3]; /* Colour palette for VGA+ */
unsigned short * vc_translate;
- unsigned char vc_G0_charset;
- unsigned char vc_G1_charset;
- unsigned char vc_saved_G0;
- unsigned char vc_saved_G1;
unsigned int vc_resize_user; /* resize request from user */
unsigned int vc_bell_pitch; /* Console bell pitch */
unsigned int vc_bell_duration; /* Console bell duration */
@@ -149,24 +167,29 @@ struct vc {
struct work_struct SAK_work;
/* might add scrmem, kbd at some time,
- to have everything in one place - the disadvantage
- would be that vc_cons etc can no longer be static */
+ to have everything in one place */
};
extern struct vc vc_cons [MAX_NR_CONSOLES];
extern void vc_SAK(struct work_struct *work);
-#define CUR_DEF 0
-#define CUR_NONE 1
-#define CUR_UNDERLINE 2
-#define CUR_LOWER_THIRD 3
-#define CUR_LOWER_HALF 4
-#define CUR_TWO_THIRDS 5
-#define CUR_BLOCK 6
-#define CUR_HWMASK 0x0f
-#define CUR_SWMASK 0xfff0
-
-#define CUR_DEFAULT CUR_UNDERLINE
+#define CUR_MAKE(size, change, set) ((size) | ((change) << 8) | \
+ ((set) << 16))
+#define CUR_SIZE(c) ((c) & 0x00000f)
+# define CUR_DEF 0
+# define CUR_NONE 1
+# define CUR_UNDERLINE 2
+# define CUR_LOWER_THIRD 3
+# define CUR_LOWER_HALF 4
+# define CUR_TWO_THIRDS 5
+# define CUR_BLOCK 6
+#define CUR_SW 0x000010
+#define CUR_ALWAYS_BG 0x000020
+#define CUR_INVERT_FG_BG 0x000040
+#define CUR_FG 0x000700
+#define CUR_BG 0x007000
+#define CUR_CHANGE(c) ((c) & 0x00ff00)
+#define CUR_SET(c) (((c) & 0xff0000) >> 8)
bool con_is_visible(const struct vc_data *vc);
diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h
index 981b880d5b60..d53cd331c4dd 100644
--- a/include/linux/context_tracking.h
+++ b/include/linux/context_tracking.h
@@ -5,6 +5,8 @@
#include <linux/sched.h>
#include <linux/vtime.h>
#include <linux/context_tracking_state.h>
+#include <linux/instrumentation.h>
+
#include <asm/ptrace.h>
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
index e3e9f0e3a878..58fffdecdbfd 100644
--- a/include/linux/coresight.h
+++ b/include/linux/coresight.h
@@ -48,6 +48,7 @@ enum coresight_dev_subtype_sink {
CORESIGHT_DEV_SUBTYPE_SINK_NONE,
CORESIGHT_DEV_SUBTYPE_SINK_PORT,
CORESIGHT_DEV_SUBTYPE_SINK_BUFFER,
+ CORESIGHT_DEV_SUBTYPE_SINK_SYSMEM,
};
enum coresight_dev_subtype_link {
@@ -179,8 +180,10 @@ struct coresight_sysfs_link {
* @enable: 'true' if component is currently part of an active path.
* @activated: 'true' only if a _sink_ has been activated. A sink can be
* activated but not yet enabled. Enabling for a _sink_
- * appens when a source has been selected for that it.
+ * happens when a source has been selected and a path is enabled
+ * from source to that sink.
* @ea: Device attribute for sink representation under PMU directory.
+ * @def_sink: cached reference to default sink found for this device.
* @ect_dev: Associated cross trigger device. Not part of the trace data
* path or connections.
* @nr_links: number of sysfs links created to other components from this
@@ -199,6 +202,7 @@ struct coresight_device {
/* sink specific fields */
bool activated; /* true only if a sink is part of a path */
struct dev_ext_attribute *ea;
+ struct coresight_device *def_sink;
/* cross trigger handling */
struct coresight_device *ect_dev;
/* sysfs links between components */
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 52692587f7fe..8aa84c052fdf 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -64,6 +64,7 @@ extern ssize_t cpu_show_tsx_async_abort(struct device *dev,
char *buf);
extern ssize_t cpu_show_itlb_multihit(struct device *dev,
struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *buf);
extern __printf(4, 5)
struct device *cpu_device_create(struct device *parent, void *drvdata,
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 3494f6763597..8f141d4c859c 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -127,7 +127,7 @@ struct cpufreq_policy {
/* Cached frequency lookup from cpufreq_driver_resolve_freq. */
unsigned int cached_target_freq;
- int cached_resolved_idx;
+ unsigned int cached_resolved_idx;
/* Synchronization for frequency transitions */
bool transition_ongoing; /* Tracks transition status */
@@ -576,6 +576,22 @@ unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
unsigned int cpufreq_policy_transition_delay_us(struct cpufreq_policy *policy);
int cpufreq_register_governor(struct cpufreq_governor *governor);
void cpufreq_unregister_governor(struct cpufreq_governor *governor);
+int cpufreq_start_governor(struct cpufreq_policy *policy);
+void cpufreq_stop_governor(struct cpufreq_policy *policy);
+
+#define cpufreq_governor_init(__governor) \
+static int __init __governor##_init(void) \
+{ \
+ return cpufreq_register_governor(&__governor); \
+} \
+core_initcall(__governor##_init)
+
+#define cpufreq_governor_exit(__governor) \
+static void __exit __governor##_exit(void) \
+{ \
+ return cpufreq_unregister_governor(&__governor); \
+} \
+module_exit(__governor##_exit)
struct cpufreq_governor *cpufreq_default_governor(void);
struct cpufreq_governor *cpufreq_fallback_governor(void);
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 191772d4a4d7..3215023d4852 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -132,6 +132,7 @@ enum cpuhp_state {
CPUHP_AP_MIPS_GIC_TIMER_STARTING,
CPUHP_AP_ARC_TIMER_STARTING,
CPUHP_AP_RISCV_TIMER_STARTING,
+ CPUHP_AP_CLINT_TIMER_STARTING,
CPUHP_AP_CSKY_TIMER_STARTING,
CPUHP_AP_HYPERV_TIMER_STARTING,
CPUHP_AP_KVM_STARTING,
@@ -181,6 +182,7 @@ enum cpuhp_state {
CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE,
CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE,
CPUHP_AP_PERF_POWERPC_TRACE_IMC_ONLINE,
+ CPUHP_AP_PERF_POWERPC_HV_24x7_ONLINE,
CPUHP_AP_WATCHDOG_ONLINE,
CPUHP_AP_WORKQUEUE_ONLINE,
CPUHP_AP_RCUTREE_ONLINE,
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index ec2ef63771f0..b65909ae4e20 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -65,10 +65,13 @@ struct cpuidle_state {
* CPUs execute ->enter_s2idle with the local tick or entire timekeeping
* suspended, so it must not re-enable interrupts at any point (even
* temporarily) or attempt to change states of clock event devices.
+ *
+ * This callback may point to the same function as ->enter if all of
+ * the above requirements are met by it.
*/
- void (*enter_s2idle) (struct cpuidle_device *dev,
- struct cpuidle_driver *drv,
- int index);
+ int (*enter_s2idle)(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv,
+ int index);
};
/* Idle State Flags */
diff --git a/include/linux/crash_core.h b/include/linux/crash_core.h
index 525510a9f965..6594dbc34a37 100644
--- a/include/linux/crash_core.h
+++ b/include/linux/crash_core.h
@@ -38,6 +38,8 @@ phys_addr_t paddr_vmcoreinfo_note(void);
#define VMCOREINFO_OSRELEASE(value) \
vmcoreinfo_append_str("OSRELEASE=%s\n", value)
+#define VMCOREINFO_BUILD_ID(value) \
+ vmcoreinfo_append_str("BUILD-ID=%s\n", value)
#define VMCOREINFO_PAGESIZE(value) \
vmcoreinfo_append_str("PAGESIZE=%ld\n", value)
#define VMCOREINFO_SYMBOL(name) \
@@ -64,6 +66,10 @@ extern unsigned char *vmcoreinfo_data;
extern size_t vmcoreinfo_size;
extern u32 *vmcoreinfo_note;
+/* raw contents of kernel .notes section */
+extern const void __start_notes __weak;
+extern const void __stop_notes __weak;
+
Elf_Word *append_elf_note(Elf_Word *buf, char *name, unsigned int type,
void *data, size_t data_len);
void final_note(Elf_Word *buf);
diff --git a/include/linux/crush/crush.h b/include/linux/crush/crush.h
index 33c16f2de7f6..2f811baf78d2 100644
--- a/include/linux/crush/crush.h
+++ b/include/linux/crush/crush.h
@@ -17,7 +17,7 @@
* The algorithm was originally described in detail in this paper
* (although the algorithm has evolved somewhat since then):
*
- * http://www.ssrc.ucsc.edu/Papers/weil-sc06.pdf
+ * https://www.ssrc.ucsc.edu/Papers/weil-sc06.pdf
*
* LGPL2
*/
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 763863dbc079..ef90e07c9635 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -16,9 +16,8 @@
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/bug.h>
+#include <linux/refcount.h>
#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/uaccess.h>
#include <linux/completion.h>
/*
@@ -61,8 +60,8 @@
#define CRYPTO_ALG_ASYNC 0x00000080
/*
- * Set this bit if and only if the algorithm requires another algorithm of
- * the same type to handle corner cases.
+ * Set if the algorithm (or an algorithm which it uses) requires another
+ * algorithm of the same type to handle corner cases.
*/
#define CRYPTO_ALG_NEED_FALLBACK 0x00000100
@@ -102,6 +101,38 @@
#define CRYPTO_NOLOAD 0x00008000
/*
+ * The algorithm may allocate memory during request processing, i.e. during
+ * encryption, decryption, or hashing. Users can request an algorithm with this
+ * flag unset if they can't handle memory allocation failures.
+ *
+ * This flag is currently only implemented for algorithms of type "skcipher",
+ * "aead", "ahash", "shash", and "cipher". Algorithms of other types might not
+ * have this flag set even if they allocate memory.
+ *
+ * In some edge cases, algorithms can allocate memory regardless of this flag.
+ * To avoid these cases, users must obey the following usage constraints:
+ * skcipher:
+ * - The IV buffer and all scatterlist elements must be aligned to the
+ * algorithm's alignmask.
+ * - If the data were to be divided into chunks of size
+ * crypto_skcipher_walksize() (with any remainder going at the end), no
+ * chunk can cross a page boundary or a scatterlist element boundary.
+ * aead:
+ * - The IV buffer and all scatterlist elements must be aligned to the
+ * algorithm's alignmask.
+ * - The first scatterlist element must contain all the associated data,
+ * and its pages must be !PageHighMem.
+ * - If the plaintext/ciphertext were to be divided into chunks of size
+ * crypto_aead_walksize() (with the remainder going at the end), no chunk
+ * can cross a page boundary or a scatterlist element boundary.
+ * ahash:
+ * - The result buffer must be aligned to the algorithm's alignmask.
+ * - crypto_ahash_finup() must not be used unless the algorithm implements
+ * ->finup() natively.
+ */
+#define CRYPTO_ALG_ALLOCATES_MEMORY 0x00010000
+
+/*
* Transform masks and values (for crt_flags).
*/
#define CRYPTO_TFM_NEED_KEY 0x00000001
@@ -595,6 +626,8 @@ int crypto_has_alg(const char *name, u32 type, u32 mask);
struct crypto_tfm {
u32 crt_flags;
+
+ int node;
void (*exit)(struct crypto_tfm *tfm);
diff --git a/include/linux/dasd_mod.h b/include/linux/dasd_mod.h
index d39abad2ff6e..14e6cf8c6267 100644
--- a/include/linux/dasd_mod.h
+++ b/include/linux/dasd_mod.h
@@ -4,6 +4,8 @@
#include <asm/dasd.h>
+struct gendisk;
+
extern int dasd_biodasdinfo(struct gendisk *disk, dasd_information2_t *info);
#endif
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index a81f0c3cf352..65d975bf9390 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -89,7 +89,7 @@ extern struct dentry_stat_t dentry_stat;
struct dentry {
/* RCU lookup touched fields */
unsigned int d_flags; /* protected by d_lock */
- seqcount_t d_seq; /* per dentry seqlock */
+ seqcount_spinlock_t d_seq; /* per dentry seqlock */
struct hlist_bl_node d_hash; /* lookup hash list */
struct dentry *d_parent; /* parent directory */
struct qstr d_name;
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index 63cb3606dea7..851dd1f9a8a5 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -38,6 +38,11 @@ struct debugfs_regset32 {
struct device *dev; /* Optional device for Runtime PM */
};
+struct debugfs_u32_array {
+ u32 *array;
+ u32 n_elements;
+};
+
extern struct dentry *arch_debugfs_dir;
#define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt) \
@@ -136,7 +141,8 @@ void debugfs_print_regs32(struct seq_file *s, const struct debugfs_reg32 *regs,
int nregs, void __iomem *base, char *prefix);
void debugfs_create_u32_array(const char *name, umode_t mode,
- struct dentry *parent, u32 *array, u32 elements);
+ struct dentry *parent,
+ struct debugfs_u32_array *array);
struct dentry *debugfs_create_devm_seqfile(struct device *dev, const char *name,
struct dentry *parent,
@@ -316,8 +322,8 @@ static inline bool debugfs_initialized(void)
}
static inline void debugfs_create_u32_array(const char *name, umode_t mode,
- struct dentry *parent, u32 *array,
- u32 elements)
+ struct dentry *parent,
+ struct debugfs_u32_array *array)
{
}
diff --git a/include/linux/decompress/unzstd.h b/include/linux/decompress/unzstd.h
new file mode 100644
index 000000000000..56d539ae880f
--- /dev/null
+++ b/include/linux/decompress/unzstd.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef LINUX_DECOMPRESS_UNZSTD_H
+#define LINUX_DECOMPRESS_UNZSTD_H
+
+int unzstd(unsigned char *inbuf, long len,
+ long (*fill)(void*, unsigned long),
+ long (*flush)(void*, unsigned long),
+ unsigned char *output,
+ long *pos,
+ void (*error_fn)(char *x));
+#endif
diff --git a/include/linux/delay.h b/include/linux/delay.h
index 5e016a4029d9..1d0e2ce6b6d9 100644
--- a/include/linux/delay.h
+++ b/include/linux/delay.h
@@ -16,7 +16,7 @@
* 3. CPU clock rate changes.
*
* Please see this thread:
- * http://lists.openwall.net/linux-kernel/2011/01/09/56
+ * https://lists.openwall.net/linux-kernel/2011/01/09/56
*/
#include <linux/kernel.h>
diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
index 57e871a559a9..12782fbb4c25 100644
--- a/include/linux/devfreq.h
+++ b/include/linux/devfreq.h
@@ -31,6 +31,13 @@
#define DEVFREQ_PRECHANGE (0)
#define DEVFREQ_POSTCHANGE (1)
+/* DEVFREQ work timers */
+enum devfreq_timer {
+ DEVFREQ_TIMER_DEFERRABLE = 0,
+ DEVFREQ_TIMER_DELAYED,
+ DEVFREQ_TIMER_NUM,
+};
+
struct devfreq;
struct devfreq_governor;
@@ -70,6 +77,7 @@ struct devfreq_dev_status {
* @initial_freq: The operating frequency when devfreq_add_device() is
* called.
* @polling_ms: The polling interval in ms. 0 disables polling.
+ * @timer: Timer type is either deferrable or delayed timer.
* @target: The device should set its operating frequency at
* freq or lowest-upper-than-freq value. If freq is
* higher than any operable frequency, set maximum.
@@ -96,6 +104,7 @@ struct devfreq_dev_status {
struct devfreq_dev_profile {
unsigned long initial_freq;
unsigned int polling_ms;
+ enum devfreq_timer timer;
int (*target)(struct device *dev, unsigned long *freq, u32 flags);
int (*get_dev_status)(struct device *dev,
diff --git a/include/linux/devfreq_cooling.h b/include/linux/devfreq_cooling.h
index 79a6e37a1d6f..9df2dfca68dd 100644
--- a/include/linux/devfreq_cooling.h
+++ b/include/linux/devfreq_cooling.h
@@ -1,17 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* devfreq_cooling: Thermal cooling device implementation for devices using
* devfreq
*
* Copyright (C) 2014-2015 ARM Limited
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __DEVFREQ_COOLING_H__
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 73dec4b5d5be..93096e524e43 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -322,12 +322,6 @@ struct dm_target {
bool discards_supported:1;
};
-/* Each target can link one of these into the table */
-struct dm_target_callbacks {
- struct list_head list;
- int (*congested_fn) (struct dm_target_callbacks *, int);
-};
-
void *dm_per_bio_data(struct bio *bio, size_t data_size);
struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size);
unsigned dm_bio_get_target_bio_nr(const struct bio *bio);
@@ -479,11 +473,6 @@ int dm_table_add_target(struct dm_table *t, const char *type,
sector_t start, sector_t len, char *params);
/*
- * Target_ctr should call this if it needs to add any callbacks.
- */
-void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callbacks *cb);
-
-/*
* Target can use this to set the table's type.
* Can only ever be called from a target's ctr.
* Useful for "hybrid" target (supports both bio-based
diff --git a/include/linux/device.h b/include/linux/device.h
index 5efed864b387..ca18da4768e3 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -13,6 +13,7 @@
#define _DEVICE_H_
#include <linux/dev_printk.h>
+#include <linux/energy_model.h>
#include <linux/ioport.h>
#include <linux/kobject.h>
#include <linux/klist.h>
@@ -128,8 +129,12 @@ ssize_t device_store_bool(struct device *dev, struct device_attribute *attr,
__ATTR_PREALLOC(_name, _mode, _show, _store)
#define DEVICE_ATTR_RW(_name) \
struct device_attribute dev_attr_##_name = __ATTR_RW(_name)
+#define DEVICE_ATTR_ADMIN_RW(_name) \
+ struct device_attribute dev_attr_##_name = __ATTR_RW_MODE(_name, 0600)
#define DEVICE_ATTR_RO(_name) \
struct device_attribute dev_attr_##_name = __ATTR_RO(_name)
+#define DEVICE_ATTR_ADMIN_RO(_name) \
+ struct device_attribute dev_attr_##_name = __ATTR_RO_MODE(_name, 0400)
#define DEVICE_ATTR_WO(_name) \
struct device_attribute dev_attr_##_name = __ATTR_WO(_name)
#define DEVICE_ULONG_ATTR(_name, _mode, _var) \
@@ -145,68 +150,66 @@ ssize_t device_store_bool(struct device *dev, struct device_attribute *attr,
struct device_attribute dev_attr_##_name = \
__ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store)
-extern int device_create_file(struct device *device,
- const struct device_attribute *entry);
-extern void device_remove_file(struct device *dev,
- const struct device_attribute *attr);
-extern bool device_remove_file_self(struct device *dev,
- const struct device_attribute *attr);
-extern int __must_check device_create_bin_file(struct device *dev,
+int device_create_file(struct device *device,
+ const struct device_attribute *entry);
+void device_remove_file(struct device *dev,
+ const struct device_attribute *attr);
+bool device_remove_file_self(struct device *dev,
+ const struct device_attribute *attr);
+int __must_check device_create_bin_file(struct device *dev,
const struct bin_attribute *attr);
-extern void device_remove_bin_file(struct device *dev,
- const struct bin_attribute *attr);
+void device_remove_bin_file(struct device *dev,
+ const struct bin_attribute *attr);
/* device resource management */
typedef void (*dr_release_t)(struct device *dev, void *res);
typedef int (*dr_match_t)(struct device *dev, void *res, void *match_data);
#ifdef CONFIG_DEBUG_DEVRES
-extern void *__devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp,
- int nid, const char *name) __malloc;
+void *__devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp,
+ int nid, const char *name) __malloc;
#define devres_alloc(release, size, gfp) \
__devres_alloc_node(release, size, gfp, NUMA_NO_NODE, #release)
#define devres_alloc_node(release, size, gfp, nid) \
__devres_alloc_node(release, size, gfp, nid, #release)
#else
-extern void *devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp,
- int nid) __malloc;
+void *devres_alloc_node(dr_release_t release, size_t size,
+ gfp_t gfp, int nid) __malloc;
static inline void *devres_alloc(dr_release_t release, size_t size, gfp_t gfp)
{
return devres_alloc_node(release, size, gfp, NUMA_NO_NODE);
}
#endif
-extern void devres_for_each_res(struct device *dev, dr_release_t release,
- dr_match_t match, void *match_data,
- void (*fn)(struct device *, void *, void *),
- void *data);
-extern void devres_free(void *res);
-extern void devres_add(struct device *dev, void *res);
-extern void *devres_find(struct device *dev, dr_release_t release,
- dr_match_t match, void *match_data);
-extern void *devres_get(struct device *dev, void *new_res,
- dr_match_t match, void *match_data);
-extern void *devres_remove(struct device *dev, dr_release_t release,
- dr_match_t match, void *match_data);
-extern int devres_destroy(struct device *dev, dr_release_t release,
- dr_match_t match, void *match_data);
-extern int devres_release(struct device *dev, dr_release_t release,
- dr_match_t match, void *match_data);
+void devres_for_each_res(struct device *dev, dr_release_t release,
+ dr_match_t match, void *match_data,
+ void (*fn)(struct device *, void *, void *),
+ void *data);
+void devres_free(void *res);
+void devres_add(struct device *dev, void *res);
+void *devres_find(struct device *dev, dr_release_t release,
+ dr_match_t match, void *match_data);
+void *devres_get(struct device *dev, void *new_res,
+ dr_match_t match, void *match_data);
+void *devres_remove(struct device *dev, dr_release_t release,
+ dr_match_t match, void *match_data);
+int devres_destroy(struct device *dev, dr_release_t release,
+ dr_match_t match, void *match_data);
+int devres_release(struct device *dev, dr_release_t release,
+ dr_match_t match, void *match_data);
/* devres group */
-extern void * __must_check devres_open_group(struct device *dev, void *id,
- gfp_t gfp);
-extern void devres_close_group(struct device *dev, void *id);
-extern void devres_remove_group(struct device *dev, void *id);
-extern int devres_release_group(struct device *dev, void *id);
+void * __must_check devres_open_group(struct device *dev, void *id, gfp_t gfp);
+void devres_close_group(struct device *dev, void *id);
+void devres_remove_group(struct device *dev, void *id);
+int devres_release_group(struct device *dev, void *id);
/* managed devm_k.alloc/kfree for device drivers */
-extern void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp) __malloc;
-extern __printf(3, 0)
-char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt,
- va_list ap) __malloc;
-extern __printf(3, 4)
-char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...) __malloc;
+void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp) __malloc;
+__printf(3, 0) char *devm_kvasprintf(struct device *dev, gfp_t gfp,
+ const char *fmt, va_list ap) __malloc;
+__printf(3, 4) char *devm_kasprintf(struct device *dev, gfp_t gfp,
+ const char *fmt, ...) __malloc;
static inline void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp)
{
return devm_kmalloc(dev, size, gfp | __GFP_ZERO);
@@ -226,16 +229,14 @@ static inline void *devm_kcalloc(struct device *dev,
{
return devm_kmalloc_array(dev, n, size, flags | __GFP_ZERO);
}
-extern void devm_kfree(struct device *dev, const void *p);
-extern char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp) __malloc;
-extern const char *devm_kstrdup_const(struct device *dev,
- const char *s, gfp_t gfp);
-extern void *devm_kmemdup(struct device *dev, const void *src, size_t len,
- gfp_t gfp);
+void devm_kfree(struct device *dev, const void *p);
+char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp) __malloc;
+const char *devm_kstrdup_const(struct device *dev, const char *s, gfp_t gfp);
+void *devm_kmemdup(struct device *dev, const void *src, size_t len, gfp_t gfp);
-extern unsigned long devm_get_free_pages(struct device *dev,
- gfp_t gfp_mask, unsigned int order);
-extern void devm_free_pages(struct device *dev, unsigned long addr);
+unsigned long devm_get_free_pages(struct device *dev,
+ gfp_t gfp_mask, unsigned int order);
+void devm_free_pages(struct device *dev, unsigned long addr);
void __iomem *devm_ioremap_resource(struct device *dev,
const struct resource *res);
@@ -387,34 +388,6 @@ enum device_link_state {
#define DL_FLAG_SYNC_STATE_ONLY BIT(7)
/**
- * struct device_link - Device link representation.
- * @supplier: The device on the supplier end of the link.
- * @s_node: Hook to the supplier device's list of links to consumers.
- * @consumer: The device on the consumer end of the link.
- * @c_node: Hook to the consumer device's list of links to suppliers.
- * @status: The state of the link (with respect to the presence of drivers).
- * @flags: Link flags.
- * @rpm_active: Whether or not the consumer device is runtime-PM-active.
- * @kref: Count repeated addition of the same link.
- * @rcu_head: An RCU head to use for deferred execution of SRCU callbacks.
- * @supplier_preactivated: Supplier has been made active before consumer probe.
- */
-struct device_link {
- struct device *supplier;
- struct list_head s_node;
- struct device *consumer;
- struct list_head c_node;
- enum device_link_state status;
- u32 flags;
- refcount_t rpm_active;
- struct kref kref;
-#ifdef CONFIG_SRCU
- struct rcu_head rcu_head;
-#endif
- bool supplier_preactivated; /* Owned by consumer probe. */
-};
-
-/**
* enum dl_dev_state - Device driver presence tracking information.
* @DL_DEV_NO_DRIVER: There is no driver attached to the device.
* @DL_DEV_PROBING: A driver is probing.
@@ -524,6 +497,11 @@ struct dev_links_info {
* sync_state() callback.
* @dma_coherent: this particular device is dma coherent, even if the
* architecture supports non-coherent devices.
+ * @dma_ops_bypass: If set to %true then the dma_ops are bypassed for the
+ * streaming DMA operations (->map_* / ->unmap_* / ->sync_*),
+ * and optionall (if the coherent mask is large enough) also
+ * for dma allocations. This flag is managed by the dma ops
+ * instance from ->dma_supported.
*
* At the lowest level, every device in a Linux system is represented by an
* instance of struct device. The device structure contains the information
@@ -560,6 +538,10 @@ struct device {
struct dev_pm_info power;
struct dev_pm_domain *pm_domain;
+#ifdef CONFIG_ENERGY_MODEL
+ struct em_perf_domain *em_pd;
+#endif
+
#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
struct irq_domain *msi_domain;
#endif
@@ -569,8 +551,9 @@ struct device {
#ifdef CONFIG_GENERIC_MSI_IRQ
struct list_head msi_list;
#endif
-
+#ifdef CONFIG_DMA_OPS
const struct dma_map_ops *dma_ops;
+#endif
u64 *dma_mask; /* dma mask (if dma'able device) */
u64 coherent_dma_mask;/* Like dma_mask, but for
alloc_coherent mappings as
@@ -623,6 +606,39 @@ struct device {
defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
bool dma_coherent:1;
#endif
+#ifdef CONFIG_DMA_OPS_BYPASS
+ bool dma_ops_bypass : 1;
+#endif
+};
+
+/**
+ * struct device_link - Device link representation.
+ * @supplier: The device on the supplier end of the link.
+ * @s_node: Hook to the supplier device's list of links to consumers.
+ * @consumer: The device on the consumer end of the link.
+ * @c_node: Hook to the consumer device's list of links to suppliers.
+ * @link_dev: device used to expose link details in sysfs
+ * @status: The state of the link (with respect to the presence of drivers).
+ * @flags: Link flags.
+ * @rpm_active: Whether or not the consumer device is runtime-PM-active.
+ * @kref: Count repeated addition of the same link.
+ * @rcu_head: An RCU head to use for deferred execution of SRCU callbacks.
+ * @supplier_preactivated: Supplier has been made active before consumer probe.
+ */
+struct device_link {
+ struct device *supplier;
+ struct list_head s_node;
+ struct device *consumer;
+ struct list_head c_node;
+ struct device link_dev;
+ enum device_link_state status;
+ u32 flags;
+ refcount_t rpm_active;
+ struct kref kref;
+#ifdef CONFIG_SRCU
+ struct rcu_head rcu_head;
+#endif
+ bool supplier_preactivated; /* Owned by consumer probe. */
};
static inline struct device *kobj_to_dev(struct kobject *kobj)
@@ -652,8 +668,7 @@ static inline const char *dev_name(const struct device *dev)
return kobject_name(&dev->kobj);
}
-extern __printf(2, 3)
-int dev_set_name(struct device *dev, const char *name, ...);
+__printf(2, 3) int dev_set_name(struct device *dev, const char *name, ...);
#ifdef CONFIG_NUMA
static inline int dev_to_node(struct device *dev)
@@ -810,39 +825,39 @@ static inline bool dev_has_sync_state(struct device *dev)
/*
* High level routines for use by the bus drivers
*/
-extern int __must_check device_register(struct device *dev);
-extern void device_unregister(struct device *dev);
-extern void device_initialize(struct device *dev);
-extern int __must_check device_add(struct device *dev);
-extern void device_del(struct device *dev);
-extern int device_for_each_child(struct device *dev, void *data,
- int (*fn)(struct device *dev, void *data));
-extern int device_for_each_child_reverse(struct device *dev, void *data,
- int (*fn)(struct device *dev, void *data));
-extern struct device *device_find_child(struct device *dev, void *data,
- int (*match)(struct device *dev, void *data));
-extern struct device *device_find_child_by_name(struct device *parent,
- const char *name);
-extern int device_rename(struct device *dev, const char *new_name);
-extern int device_move(struct device *dev, struct device *new_parent,
- enum dpm_order dpm_order);
-extern int device_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid);
-extern const char *device_get_devnode(struct device *dev,
- umode_t *mode, kuid_t *uid, kgid_t *gid,
- const char **tmp);
+int __must_check device_register(struct device *dev);
+void device_unregister(struct device *dev);
+void device_initialize(struct device *dev);
+int __must_check device_add(struct device *dev);
+void device_del(struct device *dev);
+int device_for_each_child(struct device *dev, void *data,
+ int (*fn)(struct device *dev, void *data));
+int device_for_each_child_reverse(struct device *dev, void *data,
+ int (*fn)(struct device *dev, void *data));
+struct device *device_find_child(struct device *dev, void *data,
+ int (*match)(struct device *dev, void *data));
+struct device *device_find_child_by_name(struct device *parent,
+ const char *name);
+int device_rename(struct device *dev, const char *new_name);
+int device_move(struct device *dev, struct device *new_parent,
+ enum dpm_order dpm_order);
+int device_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid);
+const char *device_get_devnode(struct device *dev, umode_t *mode, kuid_t *uid,
+ kgid_t *gid, const char **tmp);
+int device_is_dependent(struct device *dev, void *target);
static inline bool device_supports_offline(struct device *dev)
{
return dev->bus && dev->bus->offline && dev->bus->online;
}
-extern void lock_device_hotplug(void);
-extern void unlock_device_hotplug(void);
-extern int lock_device_hotplug_sysfs(void);
-extern int device_offline(struct device *dev);
-extern int device_online(struct device *dev);
-extern void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
-extern void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
+void lock_device_hotplug(void);
+void unlock_device_hotplug(void);
+int lock_device_hotplug_sysfs(void);
+int device_offline(struct device *dev);
+int device_online(struct device *dev);
+void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
+void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
void device_set_of_node_from_dev(struct device *dev, const struct device *dev2);
static inline int dev_num_vf(struct device *dev)
@@ -855,14 +870,13 @@ static inline int dev_num_vf(struct device *dev)
/*
* Root device objects for grouping under /sys/devices
*/
-extern struct device *__root_device_register(const char *name,
- struct module *owner);
+struct device *__root_device_register(const char *name, struct module *owner);
/* This is a macro to avoid include problems with THIS_MODULE */
#define root_device_register(name) \
__root_device_register(name, THIS_MODULE)
-extern void root_device_unregister(struct device *root);
+void root_device_unregister(struct device *root);
static inline void *dev_get_platdata(const struct device *dev)
{
@@ -873,33 +887,31 @@ static inline void *dev_get_platdata(const struct device *dev)
* Manual binding of a device to driver. See drivers/base/bus.c
* for information on use.
*/
-extern int __must_check device_bind_driver(struct device *dev);
-extern void device_release_driver(struct device *dev);
-extern int __must_check device_attach(struct device *dev);
-extern int __must_check driver_attach(struct device_driver *drv);
-extern void device_initial_probe(struct device *dev);
-extern int __must_check device_reprobe(struct device *dev);
+int __must_check device_bind_driver(struct device *dev);
+void device_release_driver(struct device *dev);
+int __must_check device_attach(struct device *dev);
+int __must_check driver_attach(struct device_driver *drv);
+void device_initial_probe(struct device *dev);
+int __must_check device_reprobe(struct device *dev);
-extern bool device_is_bound(struct device *dev);
+bool device_is_bound(struct device *dev);
/*
* Easy functions for dynamically creating devices on the fly
*/
-extern __printf(5, 6)
-struct device *device_create(struct class *cls, struct device *parent,
- dev_t devt, void *drvdata,
- const char *fmt, ...);
-extern __printf(6, 7)
-struct device *device_create_with_groups(struct class *cls,
- struct device *parent, dev_t devt, void *drvdata,
- const struct attribute_group **groups,
- const char *fmt, ...);
-extern void device_destroy(struct class *cls, dev_t devt);
-
-extern int __must_check device_add_groups(struct device *dev,
- const struct attribute_group **groups);
-extern void device_remove_groups(struct device *dev,
- const struct attribute_group **groups);
+__printf(5, 6) struct device *
+device_create(struct class *cls, struct device *parent, dev_t devt,
+ void *drvdata, const char *fmt, ...);
+__printf(6, 7) struct device *
+device_create_with_groups(struct class *cls, struct device *parent, dev_t devt,
+ void *drvdata, const struct attribute_group **groups,
+ const char *fmt, ...);
+void device_destroy(struct class *cls, dev_t devt);
+
+int __must_check device_add_groups(struct device *dev,
+ const struct attribute_group **groups);
+void device_remove_groups(struct device *dev,
+ const struct attribute_group **groups);
static inline int __must_check device_add_group(struct device *dev,
const struct attribute_group *grp)
@@ -917,14 +929,14 @@ static inline void device_remove_group(struct device *dev,
return device_remove_groups(dev, groups);
}
-extern int __must_check devm_device_add_groups(struct device *dev,
+int __must_check devm_device_add_groups(struct device *dev,
const struct attribute_group **groups);
-extern void devm_device_remove_groups(struct device *dev,
- const struct attribute_group **groups);
-extern int __must_check devm_device_add_group(struct device *dev,
- const struct attribute_group *grp);
-extern void devm_device_remove_group(struct device *dev,
- const struct attribute_group *grp);
+void devm_device_remove_groups(struct device *dev,
+ const struct attribute_group **groups);
+int __must_check devm_device_add_group(struct device *dev,
+ const struct attribute_group *grp);
+void devm_device_remove_group(struct device *dev,
+ const struct attribute_group *grp);
/*
* Platform "fixup" functions - allow the platform to have their say
@@ -941,21 +953,21 @@ extern int (*platform_notify_remove)(struct device *dev);
* get_device - atomically increment the reference count for the device.
*
*/
-extern struct device *get_device(struct device *dev);
-extern void put_device(struct device *dev);
-extern bool kill_device(struct device *dev);
+struct device *get_device(struct device *dev);
+void put_device(struct device *dev);
+bool kill_device(struct device *dev);
#ifdef CONFIG_DEVTMPFS
-extern int devtmpfs_mount(void);
+int devtmpfs_mount(void);
#else
static inline int devtmpfs_mount(void) { return 0; }
#endif
/* drivers/base/power/shutdown.c */
-extern void device_shutdown(void);
+void device_shutdown(void);
/* debugging and troubleshooting/diagnostic helpers. */
-extern const char *dev_driver_string(const struct device *dev);
+const char *dev_driver_string(const struct device *dev);
/* Device links interface. */
struct device_link *device_link_add(struct device *consumer,
@@ -965,6 +977,9 @@ void device_link_remove(void *consumer, struct device *supplier);
void device_links_supplier_sync_state_pause(void);
void device_links_supplier_sync_state_resume(void);
+extern __printf(3, 4)
+int dev_err_probe(const struct device *dev, int err, const char *fmt, ...);
+
/* Create alias, so I can be autoloaded. */
#define MODULE_ALIAS_CHARDEV(major,minor) \
MODULE_ALIAS("char-major-" __stringify(major) "-" __stringify(minor))
diff --git a/include/linux/dma-debug.h b/include/linux/dma-debug.h
index 4208f94d93f7..7b3b04ba60f3 100644
--- a/include/linux/dma-debug.h
+++ b/include/linux/dma-debug.h
@@ -67,8 +67,6 @@ extern void debug_dma_sync_sg_for_device(struct device *dev,
extern void debug_dma_dump_mappings(struct device *dev);
-extern void debug_dma_assert_idle(struct page *page);
-
#else /* CONFIG_DMA_API_DEBUG */
static inline void dma_debug_add_bus(struct bus_type *bus)
@@ -157,10 +155,6 @@ static inline void debug_dma_dump_mappings(struct device *dev)
{
}
-static inline void debug_dma_assert_idle(struct page *page)
-{
-}
-
#endif /* CONFIG_DMA_API_DEBUG */
#endif /* __DMA_DEBUG_H */
diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h
index ab2e20cba951..6e87225600ae 100644
--- a/include/linux/dma-direct.h
+++ b/include/linux/dma-direct.h
@@ -1,10 +1,16 @@
/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Internals of the DMA direct mapping implementation. Only for use by the
+ * DMA mapping code and IOMMU drivers.
+ */
#ifndef _LINUX_DMA_DIRECT_H
#define _LINUX_DMA_DIRECT_H 1
#include <linux/dma-mapping.h>
+#include <linux/dma-noncoherent.h>
#include <linux/memblock.h> /* for min_low_pfn */
#include <linux/mem_encrypt.h>
+#include <linux/swiotlb.h>
extern unsigned int zone_dma_bits;
@@ -67,9 +73,6 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size,
}
u64 dma_direct_get_required_mask(struct device *dev);
-gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
- u64 *phys_mask);
-bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size);
void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t gfp, unsigned long attrs);
void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
@@ -87,4 +90,102 @@ int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
unsigned long attrs);
int dma_direct_supported(struct device *dev, u64 mask);
bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr);
+int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
+ enum dma_data_direction dir, unsigned long attrs);
+dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
+ size_t size, enum dma_data_direction dir, unsigned long attrs);
+size_t dma_direct_max_mapping_size(struct device *dev);
+
+#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
+ defined(CONFIG_SWIOTLB)
+void dma_direct_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
+ int nents, enum dma_data_direction dir);
+#else
+static inline void dma_direct_sync_sg_for_device(struct device *dev,
+ struct scatterlist *sgl, int nents, enum dma_data_direction dir)
+{
+}
+#endif
+
+#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
+ defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
+ defined(CONFIG_SWIOTLB)
+void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
+ int nents, enum dma_data_direction dir, unsigned long attrs);
+void dma_direct_sync_sg_for_cpu(struct device *dev,
+ struct scatterlist *sgl, int nents, enum dma_data_direction dir);
+#else
+static inline void dma_direct_unmap_sg(struct device *dev,
+ struct scatterlist *sgl, int nents, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+}
+static inline void dma_direct_sync_sg_for_cpu(struct device *dev,
+ struct scatterlist *sgl, int nents, enum dma_data_direction dir)
+{
+}
+#endif
+
+static inline void dma_direct_sync_single_for_device(struct device *dev,
+ dma_addr_t addr, size_t size, enum dma_data_direction dir)
+{
+ phys_addr_t paddr = dma_to_phys(dev, addr);
+
+ if (unlikely(is_swiotlb_buffer(paddr)))
+ swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE);
+
+ if (!dev_is_dma_coherent(dev))
+ arch_sync_dma_for_device(paddr, size, dir);
+}
+
+static inline void dma_direct_sync_single_for_cpu(struct device *dev,
+ dma_addr_t addr, size_t size, enum dma_data_direction dir)
+{
+ phys_addr_t paddr = dma_to_phys(dev, addr);
+
+ if (!dev_is_dma_coherent(dev)) {
+ arch_sync_dma_for_cpu(paddr, size, dir);
+ arch_sync_dma_for_cpu_all();
+ }
+
+ if (unlikely(is_swiotlb_buffer(paddr)))
+ swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU);
+}
+
+static inline dma_addr_t dma_direct_map_page(struct device *dev,
+ struct page *page, unsigned long offset, size_t size,
+ enum dma_data_direction dir, unsigned long attrs)
+{
+ phys_addr_t phys = page_to_phys(page) + offset;
+ dma_addr_t dma_addr = phys_to_dma(dev, phys);
+
+ if (unlikely(swiotlb_force == SWIOTLB_FORCE))
+ return swiotlb_map(dev, phys, size, dir, attrs);
+
+ if (unlikely(!dma_capable(dev, dma_addr, size, true))) {
+ if (swiotlb_force != SWIOTLB_NO_FORCE)
+ return swiotlb_map(dev, phys, size, dir, attrs);
+
+ dev_WARN_ONCE(dev, 1,
+ "DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
+ &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
+ return DMA_MAPPING_ERROR;
+ }
+
+ if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ arch_sync_dma_for_device(phys, size, dir);
+ return dma_addr;
+}
+
+static inline void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
+ size_t size, enum dma_data_direction dir, unsigned long attrs)
+{
+ phys_addr_t phys = dma_to_phys(dev, addr);
+
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ dma_direct_sync_single_for_cpu(dev, addr, size, dir);
+
+ if (unlikely(is_swiotlb_buffer(phys)))
+ swiotlb_tbl_unmap_single(dev, phys, size, size, dir, attrs);
+}
#endif /* _LINUX_DMA_DIRECT_H */
diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
index 3347c54f3a87..09e23adb351d 100644
--- a/include/linux/dma-fence.h
+++ b/include/linux/dma-fence.h
@@ -357,6 +357,19 @@ dma_fence_get_rcu_safe(struct dma_fence __rcu **fencep)
} while (1);
}
+#ifdef CONFIG_LOCKDEP
+bool dma_fence_begin_signalling(void);
+void dma_fence_end_signalling(bool cookie);
+void __dma_fence_might_wait(void);
+#else
+static inline bool dma_fence_begin_signalling(void)
+{
+ return true;
+}
+static inline void dma_fence_end_signalling(bool cookie) {}
+static inline void __dma_fence_might_wait(void) {}
+#endif
+
int dma_fence_signal(struct dma_fence *fence);
int dma_fence_signal_locked(struct dma_fence *fence);
signed long dma_fence_default_wait(struct dma_fence *fence,
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index a33ed3954ed4..52635e91143b 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -14,7 +14,7 @@
/**
* List of possible attributes associated with a DMA mapping. The semantics
- * of each attribute should be defined in Documentation/DMA-attributes.txt.
+ * of each attribute should be defined in Documentation/core-api/dma-attributes.rst.
*/
/*
@@ -188,76 +188,10 @@ static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma,
}
#endif /* CONFIG_DMA_DECLARE_COHERENT */
-static inline bool dma_is_direct(const struct dma_map_ops *ops)
-{
- return likely(!ops);
-}
-
-/*
- * All the dma_direct_* declarations are here just for the indirect call bypass,
- * and must not be used directly drivers!
- */
-dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size, enum dma_data_direction dir,
- unsigned long attrs);
-int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
- enum dma_data_direction dir, unsigned long attrs);
-dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
- size_t size, enum dma_data_direction dir, unsigned long attrs);
-
-#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
- defined(CONFIG_SWIOTLB)
-void dma_direct_sync_single_for_device(struct device *dev,
- dma_addr_t addr, size_t size, enum dma_data_direction dir);
-void dma_direct_sync_sg_for_device(struct device *dev,
- struct scatterlist *sgl, int nents, enum dma_data_direction dir);
-#else
-static inline void dma_direct_sync_single_for_device(struct device *dev,
- dma_addr_t addr, size_t size, enum dma_data_direction dir)
-{
-}
-static inline void dma_direct_sync_sg_for_device(struct device *dev,
- struct scatterlist *sgl, int nents, enum dma_data_direction dir)
-{
-}
-#endif
-
-#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
- defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
- defined(CONFIG_SWIOTLB)
-void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
- size_t size, enum dma_data_direction dir, unsigned long attrs);
-void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
- int nents, enum dma_data_direction dir, unsigned long attrs);
-void dma_direct_sync_single_for_cpu(struct device *dev,
- dma_addr_t addr, size_t size, enum dma_data_direction dir);
-void dma_direct_sync_sg_for_cpu(struct device *dev,
- struct scatterlist *sgl, int nents, enum dma_data_direction dir);
-#else
-static inline void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
- size_t size, enum dma_data_direction dir, unsigned long attrs)
-{
-}
-static inline void dma_direct_unmap_sg(struct device *dev,
- struct scatterlist *sgl, int nents, enum dma_data_direction dir,
- unsigned long attrs)
-{
-}
-static inline void dma_direct_sync_single_for_cpu(struct device *dev,
- dma_addr_t addr, size_t size, enum dma_data_direction dir)
-{
-}
-static inline void dma_direct_sync_sg_for_cpu(struct device *dev,
- struct scatterlist *sgl, int nents, enum dma_data_direction dir)
-{
-}
-#endif
-
-size_t dma_direct_max_mapping_size(struct device *dev);
-
#ifdef CONFIG_HAS_DMA
#include <asm/dma-mapping.h>
+#ifdef CONFIG_DMA_OPS
static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{
if (dev->dma_ops)
@@ -270,165 +204,16 @@ static inline void set_dma_ops(struct device *dev,
{
dev->dma_ops = dma_ops;
}
-
-static inline dma_addr_t dma_map_page_attrs(struct device *dev,
- struct page *page, size_t offset, size_t size,
- enum dma_data_direction dir, unsigned long attrs)
-{
- const struct dma_map_ops *ops = get_dma_ops(dev);
- dma_addr_t addr;
-
- BUG_ON(!valid_dma_direction(dir));
- if (dma_is_direct(ops))
- addr = dma_direct_map_page(dev, page, offset, size, dir, attrs);
- else
- addr = ops->map_page(dev, page, offset, size, dir, attrs);
- debug_dma_map_page(dev, page, offset, size, dir, addr);
-
- return addr;
-}
-
-static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr,
- size_t size, enum dma_data_direction dir, unsigned long attrs)
-{
- const struct dma_map_ops *ops = get_dma_ops(dev);
-
- BUG_ON(!valid_dma_direction(dir));
- if (dma_is_direct(ops))
- dma_direct_unmap_page(dev, addr, size, dir, attrs);
- else if (ops->unmap_page)
- ops->unmap_page(dev, addr, size, dir, attrs);
- debug_dma_unmap_page(dev, addr, size, dir);
-}
-
-/*
- * dma_maps_sg_attrs returns 0 on error and > 0 on success.
- * It should never return a value < 0.
- */
-static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir,
- unsigned long attrs)
-{
- const struct dma_map_ops *ops = get_dma_ops(dev);
- int ents;
-
- BUG_ON(!valid_dma_direction(dir));
- if (dma_is_direct(ops))
- ents = dma_direct_map_sg(dev, sg, nents, dir, attrs);
- else
- ents = ops->map_sg(dev, sg, nents, dir, attrs);
- BUG_ON(ents < 0);
- debug_dma_map_sg(dev, sg, nents, ents, dir);
-
- return ents;
-}
-
-static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir,
- unsigned long attrs)
-{
- const struct dma_map_ops *ops = get_dma_ops(dev);
-
- BUG_ON(!valid_dma_direction(dir));
- debug_dma_unmap_sg(dev, sg, nents, dir);
- if (dma_is_direct(ops))
- dma_direct_unmap_sg(dev, sg, nents, dir, attrs);
- else if (ops->unmap_sg)
- ops->unmap_sg(dev, sg, nents, dir, attrs);
-}
-
-static inline dma_addr_t dma_map_resource(struct device *dev,
- phys_addr_t phys_addr,
- size_t size,
- enum dma_data_direction dir,
- unsigned long attrs)
-{
- const struct dma_map_ops *ops = get_dma_ops(dev);
- dma_addr_t addr = DMA_MAPPING_ERROR;
-
- BUG_ON(!valid_dma_direction(dir));
-
- /* Don't allow RAM to be mapped */
- if (WARN_ON_ONCE(pfn_valid(PHYS_PFN(phys_addr))))
- return DMA_MAPPING_ERROR;
-
- if (dma_is_direct(ops))
- addr = dma_direct_map_resource(dev, phys_addr, size, dir, attrs);
- else if (ops->map_resource)
- addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
-
- debug_dma_map_resource(dev, phys_addr, size, dir, addr);
- return addr;
-}
-
-static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
- size_t size, enum dma_data_direction dir,
- unsigned long attrs)
-{
- const struct dma_map_ops *ops = get_dma_ops(dev);
-
- BUG_ON(!valid_dma_direction(dir));
- if (!dma_is_direct(ops) && ops->unmap_resource)
- ops->unmap_resource(dev, addr, size, dir, attrs);
- debug_dma_unmap_resource(dev, addr, size, dir);
-}
-
-static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
- size_t size,
- enum dma_data_direction dir)
-{
- const struct dma_map_ops *ops = get_dma_ops(dev);
-
- BUG_ON(!valid_dma_direction(dir));
- if (dma_is_direct(ops))
- dma_direct_sync_single_for_cpu(dev, addr, size, dir);
- else if (ops->sync_single_for_cpu)
- ops->sync_single_for_cpu(dev, addr, size, dir);
- debug_dma_sync_single_for_cpu(dev, addr, size, dir);
-}
-
-static inline void dma_sync_single_for_device(struct device *dev,
- dma_addr_t addr, size_t size,
- enum dma_data_direction dir)
-{
- const struct dma_map_ops *ops = get_dma_ops(dev);
-
- BUG_ON(!valid_dma_direction(dir));
- if (dma_is_direct(ops))
- dma_direct_sync_single_for_device(dev, addr, size, dir);
- else if (ops->sync_single_for_device)
- ops->sync_single_for_device(dev, addr, size, dir);
- debug_dma_sync_single_for_device(dev, addr, size, dir);
-}
-
-static inline void
-dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
- int nelems, enum dma_data_direction dir)
+#else /* CONFIG_DMA_OPS */
+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{
- const struct dma_map_ops *ops = get_dma_ops(dev);
-
- BUG_ON(!valid_dma_direction(dir));
- if (dma_is_direct(ops))
- dma_direct_sync_sg_for_cpu(dev, sg, nelems, dir);
- else if (ops->sync_sg_for_cpu)
- ops->sync_sg_for_cpu(dev, sg, nelems, dir);
- debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
+ return NULL;
}
-
-static inline void
-dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
- int nelems, enum dma_data_direction dir)
+static inline void set_dma_ops(struct device *dev,
+ const struct dma_map_ops *dma_ops)
{
- const struct dma_map_ops *ops = get_dma_ops(dev);
-
- BUG_ON(!valid_dma_direction(dir));
- if (dma_is_direct(ops))
- dma_direct_sync_sg_for_device(dev, sg, nelems, dir);
- else if (ops->sync_sg_for_device)
- ops->sync_sg_for_device(dev, sg, nelems, dir);
- debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
-
}
+#endif /* CONFIG_DMA_OPS */
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
@@ -439,6 +224,28 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
return 0;
}
+dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
+ size_t offset, size_t size, enum dma_data_direction dir,
+ unsigned long attrs);
+void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size,
+ enum dma_data_direction dir, unsigned long attrs);
+int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir, unsigned long attrs);
+void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir,
+ unsigned long attrs);
+dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr,
+ size_t size, enum dma_data_direction dir, unsigned long attrs);
+void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size,
+ enum dma_data_direction dir, unsigned long attrs);
+void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
+ enum dma_data_direction dir);
+void dma_sync_single_for_device(struct device *dev, dma_addr_t addr,
+ size_t size, enum dma_data_direction dir);
+void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
+ int nelems, enum dma_data_direction dir);
+void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+ int nelems, enum dma_data_direction dir);
void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t flag, unsigned long attrs);
void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
@@ -715,8 +522,9 @@ void *dma_common_pages_remap(struct page **pages, size_t size,
pgprot_t prot, const void *caller);
void dma_common_free_remap(void *cpu_addr, size_t size);
-void *dma_alloc_from_pool(struct device *dev, size_t size,
- struct page **ret_page, gfp_t flags);
+struct page *dma_alloc_from_pool(struct device *dev, size_t size,
+ void **cpu_addr, gfp_t flags,
+ bool (*phys_addr_ok)(struct device *, phys_addr_t, size_t));
bool dma_free_from_pool(struct device *dev, void *start, size_t size);
int
diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
index ee50d10f052b..d44a77e8a7e3 100644
--- a/include/linux/dma-resv.h
+++ b/include/linux/dma-resv.h
@@ -46,8 +46,6 @@
#include <linux/rcupdate.h>
extern struct ww_class reservation_ww_class;
-extern struct lock_class_key reservation_seqcount_class;
-extern const char reservation_seqcount_string[];
/**
* struct dma_resv_list - a list of shared fences
@@ -71,7 +69,7 @@ struct dma_resv_list {
*/
struct dma_resv {
struct ww_mutex lock;
- seqcount_t seq;
+ seqcount_ww_mutex_t seq;
struct dma_fence __rcu *fence_excl;
struct dma_resv_list __rcu *fence;
diff --git a/include/linux/dma/k3-psil.h b/include/linux/dma/k3-psil.h
index 61d5cc0ad601..1962f75fa2d3 100644
--- a/include/linux/dma/k3-psil.h
+++ b/include/linux/dma/k3-psil.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ * Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com
*/
#ifndef K3_PSIL_H_
diff --git a/include/linux/dma/k3-udma-glue.h b/include/linux/dma/k3-udma-glue.h
index caadbab1632a..5eb34ad973a7 100644
--- a/include/linux/dma/k3-udma-glue.h
+++ b/include/linux/dma/k3-udma-glue.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ * Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com
*/
#ifndef K3_UDMA_GLUE_H_
diff --git a/include/linux/dma/ti-cppi5.h b/include/linux/dma/ti-cppi5.h
index 579356ae447e..5896441ee604 100644
--- a/include/linux/dma/ti-cppi5.h
+++ b/include/linux/dma/ti-cppi5.h
@@ -2,7 +2,7 @@
/*
* CPPI5 descriptors interface
*
- * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ * Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com
*/
#ifndef __TI_CPPI5_H__
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 6283917edd90..6fbd5c99e30c 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -39,6 +39,7 @@ enum dma_status {
DMA_IN_PROGRESS,
DMA_PAUSED,
DMA_ERROR,
+ DMA_OUT_OF_ORDER,
};
/**
@@ -61,6 +62,9 @@ enum dma_transaction_type {
DMA_SLAVE,
DMA_CYCLIC,
DMA_INTERLEAVE,
+ DMA_COMPLETION_NO_ORDER,
+ DMA_REPEAT,
+ DMA_LOAD_EOT,
/* last transaction type for creation of the capabilities mask */
DMA_TX_TYPE_END,
};
@@ -162,7 +166,7 @@ struct dma_interleaved_template {
* @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of
* this transaction
* @DMA_CTRL_ACK - if clear, the descriptor cannot be reused until the client
- * acknowledges receipt, i.e. has has a chance to establish any dependency
+ * acknowledges receipt, i.e. has a chance to establish any dependency
* chains
* @DMA_PREP_PQ_DISABLE_P - prevent generation of P while generating Q
* @DMA_PREP_PQ_DISABLE_Q - prevent generation of Q while generating P
@@ -176,6 +180,16 @@ struct dma_interleaved_template {
* @DMA_PREP_CMD: tell the driver that the data passed to DMA API is command
* data and the descriptor should be in different format from normal
* data descriptors.
+ * @DMA_PREP_REPEAT: tell the driver that the transaction shall be automatically
+ * repeated when it ends until a transaction is issued on the same channel
+ * with the DMA_PREP_LOAD_EOT flag set. This flag is only applicable to
+ * interleaved transactions and is ignored for all other transaction types.
+ * @DMA_PREP_LOAD_EOT: tell the driver that the transaction shall replace any
+ * active repeated (as indicated by DMA_PREP_REPEAT) transaction when the
+ * repeated transaction ends. Not setting this flag when the previously queued
+ * transaction is marked with DMA_PREP_REPEAT will cause the new transaction
+ * to never be processed and stay in the issued queue forever. The flag is
+ * ignored if the previous transaction is not a repeated transaction.
*/
enum dma_ctrl_flags {
DMA_PREP_INTERRUPT = (1 << 0),
@@ -186,6 +200,8 @@ enum dma_ctrl_flags {
DMA_PREP_FENCE = (1 << 5),
DMA_CTRL_REUSE = (1 << 6),
DMA_PREP_CMD = (1 << 7),
+ DMA_PREP_REPEAT = (1 << 8),
+ DMA_PREP_LOAD_EOT = (1 << 9),
};
/**
@@ -465,7 +481,11 @@ enum dma_residue_granularity {
* Since the enum dma_transfer_direction is not defined as bit flag for
* each type, the dma controller should set BIT(<TYPE>) and same
* should be checked by controller as well
+ * @min_burst: min burst capability per-transfer
* @max_burst: max burst capability per-transfer
+ * @max_sg_burst: max number of SG list entries executed in a single burst
+ * DMA tansaction with no software intervention for reinitialization.
+ * Zero value means unlimited number of entries.
* @cmd_pause: true, if pause is supported (i.e. for reading residue or
* for resume later)
* @cmd_resume: true, if resume is supported
@@ -478,7 +498,9 @@ struct dma_slave_caps {
u32 src_addr_widths;
u32 dst_addr_widths;
u32 directions;
+ u32 min_burst;
u32 max_burst;
+ u32 max_sg_burst;
bool cmd_pause;
bool cmd_resume;
bool cmd_terminate;
@@ -769,7 +791,11 @@ struct dma_filter {
* Since the enum dma_transfer_direction is not defined as bit flag for
* each type, the dma controller should set BIT(<TYPE>) and same
* should be checked by controller as well
+ * @min_burst: min burst capability per-transfer
* @max_burst: max burst capability per-transfer
+ * @max_sg_burst: max number of SG list entries executed in a single burst
+ * DMA tansaction with no software intervention for reinitialization.
+ * Zero value means unlimited number of entries.
* @residue_granularity: granularity of the transfer residue reported
* by tx_status
* @device_alloc_chan_resources: allocate resources and return the
@@ -789,6 +815,8 @@ struct dma_filter {
* be called after period_len bytes have been transferred.
* @device_prep_interleaved_dma: Transfer expression in a generic way.
* @device_prep_dma_imm_data: DMA's 8 byte immediate data to the dst address
+ * @device_caps: May be used to override the generic DMA slave capabilities
+ * with per-channel specific ones
* @device_config: Pushes a new configuration to a channel, return 0 or an error
* code
* @device_pause: Pauses any transfer happening on a channel. Returns
@@ -839,7 +867,9 @@ struct dma_device {
u32 src_addr_widths;
u32 dst_addr_widths;
u32 directions;
+ u32 min_burst;
u32 max_burst;
+ u32 max_sg_burst;
bool descriptor_reuse;
enum dma_residue_granularity residue_granularity;
@@ -887,6 +917,8 @@ struct dma_device {
struct dma_chan *chan, dma_addr_t dst, u64 data,
unsigned long flags);
+ void (*device_caps)(struct dma_chan *chan,
+ struct dma_slave_caps *caps);
int (*device_config)(struct dma_chan *chan,
struct dma_slave_config *config);
int (*device_pause)(struct dma_chan *chan);
@@ -980,6 +1012,9 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma(
{
if (!chan || !chan->device || !chan->device->device_prep_interleaved_dma)
return NULL;
+ if (flags & DMA_PREP_REPEAT &&
+ !test_bit(DMA_REPEAT, chan->device->cap_mask.bits))
+ return NULL;
return chan->device->device_prep_interleaved_dma(chan, xt, flags);
}
diff --git a/include/linux/dmar.h b/include/linux/dmar.h
index d7bf029df737..65565820328a 100644
--- a/include/linux/dmar.h
+++ b/include/linux/dmar.h
@@ -48,6 +48,7 @@ struct dmar_drhd_unit {
u16 segment; /* PCI domain */
u8 ignored:1; /* ignore drhd */
u8 include_all:1;
+ u8 gfx_dedicated:1; /* graphic dedicated */
struct intel_iommu *iommu;
};
diff --git a/include/linux/dsa/loop.h b/include/linux/dsa/loop.h
new file mode 100644
index 000000000000..5a3470bcc8a7
--- /dev/null
+++ b/include/linux/dsa/loop.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef DSA_LOOP_H
+#define DSA_LOOP_H
+
+#include <linux/types.h>
+#include <linux/ethtool.h>
+#include <net/dsa.h>
+
+struct dsa_loop_vlan {
+ u16 members;
+ u16 untagged;
+};
+
+struct dsa_loop_mib_entry {
+ char name[ETH_GSTRING_LEN];
+ unsigned long val;
+};
+
+enum dsa_loop_mib_counters {
+ DSA_LOOP_PHY_READ_OK,
+ DSA_LOOP_PHY_READ_ERR,
+ DSA_LOOP_PHY_WRITE_OK,
+ DSA_LOOP_PHY_WRITE_ERR,
+ __DSA_LOOP_CNT_MAX,
+};
+
+struct dsa_loop_port {
+ struct dsa_loop_mib_entry mib[__DSA_LOOP_CNT_MAX];
+ u16 pvid;
+ int mtu;
+};
+
+struct dsa_loop_priv {
+ struct mii_bus *bus;
+ unsigned int port_base;
+ struct dsa_loop_vlan vlans[VLAN_N_VID];
+ struct net_device *netdev;
+ struct dsa_loop_port ports[DSA_MAX_PORTS];
+};
+
+#endif /* DSA_LOOP_H */
diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h
index abcd5fde30eb..aa9ff9e1c0b3 100644
--- a/include/linux/dynamic_debug.h
+++ b/include/linux/dynamic_debug.h
@@ -80,7 +80,7 @@ void __dynamic_ibdev_dbg(struct _ddebug *descriptor,
#define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt) \
static struct _ddebug __aligned(8) \
- __attribute__((section("__verbose"))) name = { \
+ __section(__dyndbg) name = { \
.modname = KBUILD_MODNAME, \
.function = __func__, \
.filename = __FILE__, \
@@ -133,7 +133,7 @@ void __dynamic_ibdev_dbg(struct _ddebug *descriptor,
/*
* "Factory macro" for generating a call to func, guarded by a
- * DYNAMIC_DEBUG_BRANCH. The dynamic debug decriptor will be
+ * DYNAMIC_DEBUG_BRANCH. The dynamic debug descriptor will be
* initialized using the fmt argument. The function will be called with
* the address of the descriptor as first argument, followed by all
* the varargs. Note that fmt is repeated in invocations of this
diff --git a/include/linux/dynamic_queue_limits.h b/include/linux/dynamic_queue_limits.h
index 99fc06f0afc1..407c2f281b64 100644
--- a/include/linux/dynamic_queue_limits.h
+++ b/include/linux/dynamic_queue_limits.h
@@ -38,6 +38,8 @@
#ifdef __KERNEL__
+#include <asm/bug.h>
+
struct dql {
/* Fields accessed in enqueue path (dql_queued) */
unsigned int num_queued; /* Total ever queued */
diff --git a/include/linux/edac.h b/include/linux/edac.h
index 6eb7d55d7c3d..15e8f3d8a895 100644
--- a/include/linux/edac.h
+++ b/include/linux/edac.h
@@ -595,27 +595,6 @@ struct mem_ctl_info {
: NULL)
/**
- * edac_get_dimm_by_index - Get DIMM info at @index from a memory
- * controller
- *
- * @mci: MC descriptor struct mem_ctl_info
- * @index: index in the memory controller's DIMM array
- *
- * Returns a struct dimm_info * or NULL on failure.
- */
-static inline struct dimm_info *
-edac_get_dimm_by_index(struct mem_ctl_info *mci, int index)
-{
- if (index < 0 || index >= mci->tot_dimms)
- return NULL;
-
- if (WARN_ON_ONCE(mci->dimms[index]->idx != index))
- return NULL;
-
- return mci->dimms[index];
-}
-
-/**
* edac_get_dimm - Get DIMM info from a memory controller given by
* [layer0,layer1,layer2] position
*
@@ -650,6 +629,12 @@ static inline struct dimm_info *edac_get_dimm(struct mem_ctl_info *mci,
if (mci->n_layers > 2)
index = index * mci->layers[2].size + layer2;
- return edac_get_dimm_by_index(mci, index);
+ if (index < 0 || index >= mci->tot_dimms)
+ return NULL;
+
+ if (WARN_ON_ONCE(mci->dimms[index]->idx != index))
+ return NULL;
+
+ return mci->dimms[index];
}
#endif /* _LINUX_EDAC_H_ */
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 05c47f857383..73db1ae04cef 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -606,7 +606,11 @@ extern void *efi_get_pal_addr (void);
extern void efi_map_pal_code (void);
extern void efi_memmap_walk (efi_freemem_callback_t callback, void *arg);
extern void efi_gettimeofday (struct timespec64 *ts);
+#ifdef CONFIG_EFI
extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if possible */
+#else
+static inline void efi_enter_virtual_mode (void) {}
+#endif
#ifdef CONFIG_X86
extern efi_status_t efi_query_variable_store(u32 attributes,
unsigned long size,
diff --git a/include/linux/elfcore-compat.h b/include/linux/elfcore-compat.h
index 7a37f4ce9fd2..10485f0c9740 100644
--- a/include/linux/elfcore-compat.h
+++ b/include/linux/elfcore-compat.h
@@ -32,10 +32,6 @@ struct compat_elf_prstatus
struct old_timeval32 pr_cutime;
struct old_timeval32 pr_cstime;
compat_elf_gregset_t pr_reg;
-#ifdef CONFIG_BINFMT_ELF_FDPIC
- compat_ulong_t pr_exec_fdpic_loadmap;
- compat_ulong_t pr_interp_fdpic_loadmap;
-#endif
compat_int_t pr_fpvalid;
};
diff --git a/include/linux/elfcore.h b/include/linux/elfcore.h
index 4cad0e784b28..46c3d691f677 100644
--- a/include/linux/elfcore.h
+++ b/include/linux/elfcore.h
@@ -5,12 +5,65 @@
#include <linux/user.h>
#include <linux/bug.h>
#include <linux/sched/task_stack.h>
-
-#include <asm/elf.h>
-#include <uapi/linux/elfcore.h>
+#include <linux/types.h>
+#include <linux/signal.h>
+#include <linux/time.h>
+#include <linux/ptrace.h>
+#include <linux/fs.h>
+#include <linux/elf.h>
struct coredump_params;
+struct elf_siginfo
+{
+ int si_signo; /* signal number */
+ int si_code; /* extra code */
+ int si_errno; /* errno */
+};
+
+/*
+ * Definitions to generate Intel SVR4-like core files.
+ * These mostly have the same names as the SVR4 types with "elf_"
+ * tacked on the front to prevent clashes with linux definitions,
+ * and the typedef forms have been avoided. This is mostly like
+ * the SVR4 structure, but more Linuxy, with things that Linux does
+ * not support and which gdb doesn't really use excluded.
+ */
+struct elf_prstatus
+{
+ struct elf_siginfo pr_info; /* Info associated with signal */
+ short pr_cursig; /* Current signal */
+ unsigned long pr_sigpend; /* Set of pending signals */
+ unsigned long pr_sighold; /* Set of held signals */
+ pid_t pr_pid;
+ pid_t pr_ppid;
+ pid_t pr_pgrp;
+ pid_t pr_sid;
+ struct __kernel_old_timeval pr_utime; /* User time */
+ struct __kernel_old_timeval pr_stime; /* System time */
+ struct __kernel_old_timeval pr_cutime; /* Cumulative user time */
+ struct __kernel_old_timeval pr_cstime; /* Cumulative system time */
+ elf_gregset_t pr_reg; /* GP registers */
+ int pr_fpvalid; /* True if math co-processor being used. */
+};
+
+#define ELF_PRARGSZ (80) /* Number of chars for args */
+
+struct elf_prpsinfo
+{
+ char pr_state; /* numeric process state */
+ char pr_sname; /* char for pr_state */
+ char pr_zomb; /* zombie */
+ char pr_nice; /* nice val */
+ unsigned long pr_flag; /* flags */
+ __kernel_uid_t pr_uid;
+ __kernel_gid_t pr_gid;
+ pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
+ /* Lots missing */
+ char pr_fname[16]; /* filename of executable */
+ char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
+};
+
static inline void elf_core_copy_regs(elf_gregset_t *elfregs, struct pt_regs *regs)
{
#ifdef ELF_CORE_COPY_REGS
@@ -51,13 +104,6 @@ static inline int elf_core_copy_task_fpregs(struct task_struct *t, struct pt_reg
#endif
}
-#ifdef ELF_CORE_COPY_XFPREGS
-static inline int elf_core_copy_task_xfpregs(struct task_struct *t, elf_fpxregset_t *xfpu)
-{
- return ELF_CORE_COPY_XFPREGS(t, xfpu);
-}
-#endif
-
/*
* These functions parameterize elf_core_dump in fs/binfmt_elf.c to write out
* extra segments containing the gate DSO contents. Dumping its
diff --git a/include/linux/energy_model.h b/include/linux/energy_model.h
index ade6486a3382..b67a51c574b9 100644
--- a/include/linux/energy_model.h
+++ b/include/linux/energy_model.h
@@ -2,6 +2,7 @@
#ifndef _LINUX_ENERGY_MODEL_H
#define _LINUX_ENERGY_MODEL_H
#include <linux/cpumask.h>
+#include <linux/device.h>
#include <linux/jump_label.h>
#include <linux/kobject.h>
#include <linux/rcupdate.h>
@@ -10,13 +11,15 @@
#include <linux/types.h>
/**
- * em_cap_state - Capacity state of a performance domain
- * @frequency: The CPU frequency in KHz, for consistency with CPUFreq
- * @power: The power consumed by 1 CPU at this level, in milli-watts
+ * em_perf_state - Performance state of a performance domain
+ * @frequency: The frequency in KHz, for consistency with CPUFreq
+ * @power: The power consumed at this level, in milli-watts (by 1 CPU or
+ by a registered device). It can be a total power: static and
+ dynamic.
* @cost: The cost coefficient associated with this level, used during
* energy calculation. Equal to: power * max_frequency / frequency
*/
-struct em_cap_state {
+struct em_perf_state {
unsigned long frequency;
unsigned long power;
unsigned long cost;
@@ -24,102 +27,119 @@ struct em_cap_state {
/**
* em_perf_domain - Performance domain
- * @table: List of capacity states, in ascending order
- * @nr_cap_states: Number of capacity states
- * @cpus: Cpumask covering the CPUs of the domain
+ * @table: List of performance states, in ascending order
+ * @nr_perf_states: Number of performance states
+ * @cpus: Cpumask covering the CPUs of the domain. It's here
+ * for performance reasons to avoid potential cache
+ * misses during energy calculations in the scheduler
+ * and simplifies allocating/freeing that memory region.
*
- * A "performance domain" represents a group of CPUs whose performance is
- * scaled together. All CPUs of a performance domain must have the same
- * micro-architecture. Performance domains often have a 1-to-1 mapping with
- * CPUFreq policies.
+ * In case of CPU device, a "performance domain" represents a group of CPUs
+ * whose performance is scaled together. All CPUs of a performance domain
+ * must have the same micro-architecture. Performance domains often have
+ * a 1-to-1 mapping with CPUFreq policies. In case of other devices the @cpus
+ * field is unused.
*/
struct em_perf_domain {
- struct em_cap_state *table;
- int nr_cap_states;
+ struct em_perf_state *table;
+ int nr_perf_states;
unsigned long cpus[];
};
+#define em_span_cpus(em) (to_cpumask((em)->cpus))
+
#ifdef CONFIG_ENERGY_MODEL
-#define EM_CPU_MAX_POWER 0xFFFF
+#define EM_MAX_POWER 0xFFFF
struct em_data_callback {
/**
- * active_power() - Provide power at the next capacity state of a CPU
- * @power : Active power at the capacity state in mW (modified)
- * @freq : Frequency at the capacity state in kHz (modified)
- * @cpu : CPU for which we do this operation
+ * active_power() - Provide power at the next performance state of
+ * a device
+ * @power : Active power at the performance state in mW
+ * (modified)
+ * @freq : Frequency at the performance state in kHz
+ * (modified)
+ * @dev : Device for which we do this operation (can be a CPU)
*
- * active_power() must find the lowest capacity state of 'cpu' above
+ * active_power() must find the lowest performance state of 'dev' above
* 'freq' and update 'power' and 'freq' to the matching active power
* and frequency.
*
- * The power is the one of a single CPU in the domain, expressed in
- * milli-watts. It is expected to fit in the [0, EM_CPU_MAX_POWER]
- * range.
+ * In case of CPUs, the power is the one of a single CPU in the domain,
+ * expressed in milli-watts. It is expected to fit in the
+ * [0, EM_MAX_POWER] range.
*
* Return 0 on success.
*/
- int (*active_power)(unsigned long *power, unsigned long *freq, int cpu);
+ int (*active_power)(unsigned long *power, unsigned long *freq,
+ struct device *dev);
};
#define EM_DATA_CB(_active_power_cb) { .active_power = &_active_power_cb }
struct em_perf_domain *em_cpu_get(int cpu);
-int em_register_perf_domain(cpumask_t *span, unsigned int nr_states,
- struct em_data_callback *cb);
+struct em_perf_domain *em_pd_get(struct device *dev);
+int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states,
+ struct em_data_callback *cb, cpumask_t *span);
+void em_dev_unregister_perf_domain(struct device *dev);
/**
- * em_pd_energy() - Estimates the energy consumed by the CPUs of a perf. domain
+ * em_cpu_energy() - Estimates the energy consumed by the CPUs of a
+ performance domain
* @pd : performance domain for which energy has to be estimated
* @max_util : highest utilization among CPUs of the domain
* @sum_util : sum of the utilization of all CPUs in the domain
*
+ * This function must be used only for CPU devices. There is no validation,
+ * i.e. if the EM is a CPU type and has cpumask allocated. It is called from
+ * the scheduler code quite frequently and that is why there is not checks.
+ *
* Return: the sum of the energy consumed by the CPUs of the domain assuming
* a capacity state satisfying the max utilization of the domain.
*/
-static inline unsigned long em_pd_energy(struct em_perf_domain *pd,
+static inline unsigned long em_cpu_energy(struct em_perf_domain *pd,
unsigned long max_util, unsigned long sum_util)
{
unsigned long freq, scale_cpu;
- struct em_cap_state *cs;
+ struct em_perf_state *ps;
int i, cpu;
/*
- * In order to predict the capacity state, map the utilization of the
- * most utilized CPU of the performance domain to a requested frequency,
- * like schedutil.
+ * In order to predict the performance state, map the utilization of
+ * the most utilized CPU of the performance domain to a requested
+ * frequency, like schedutil.
*/
cpu = cpumask_first(to_cpumask(pd->cpus));
scale_cpu = arch_scale_cpu_capacity(cpu);
- cs = &pd->table[pd->nr_cap_states - 1];
- freq = map_util_freq(max_util, cs->frequency, scale_cpu);
+ ps = &pd->table[pd->nr_perf_states - 1];
+ freq = map_util_freq(max_util, ps->frequency, scale_cpu);
/*
- * Find the lowest capacity state of the Energy Model above the
+ * Find the lowest performance state of the Energy Model above the
* requested frequency.
*/
- for (i = 0; i < pd->nr_cap_states; i++) {
- cs = &pd->table[i];
- if (cs->frequency >= freq)
+ for (i = 0; i < pd->nr_perf_states; i++) {
+ ps = &pd->table[i];
+ if (ps->frequency >= freq)
break;
}
/*
- * The capacity of a CPU in the domain at that capacity state (cs)
+ * The capacity of a CPU in the domain at the performance state (ps)
* can be computed as:
*
- * cs->freq * scale_cpu
- * cs->cap = -------------------- (1)
+ * ps->freq * scale_cpu
+ * ps->cap = -------------------- (1)
* cpu_max_freq
*
* So, ignoring the costs of idle states (which are not available in
- * the EM), the energy consumed by this CPU at that capacity state is
- * estimated as:
+ * the EM), the energy consumed by this CPU at that performance state
+ * is estimated as:
*
- * cs->power * cpu_util
+ * ps->power * cpu_util
* cpu_nrg = -------------------- (2)
- * cs->cap
+ * ps->cap
*
- * since 'cpu_util / cs->cap' represents its percentage of busy time.
+ * since 'cpu_util / ps->cap' represents its percentage of busy time.
*
* NOTE: Although the result of this computation actually is in
* units of power, it can be manipulated as an energy value
@@ -129,55 +149,64 @@ static inline unsigned long em_pd_energy(struct em_perf_domain *pd,
* By injecting (1) in (2), 'cpu_nrg' can be re-expressed as a product
* of two terms:
*
- * cs->power * cpu_max_freq cpu_util
+ * ps->power * cpu_max_freq cpu_util
* cpu_nrg = ------------------------ * --------- (3)
- * cs->freq scale_cpu
+ * ps->freq scale_cpu
*
- * The first term is static, and is stored in the em_cap_state struct
- * as 'cs->cost'.
+ * The first term is static, and is stored in the em_perf_state struct
+ * as 'ps->cost'.
*
* Since all CPUs of the domain have the same micro-architecture, they
- * share the same 'cs->cost', and the same CPU capacity. Hence, the
+ * share the same 'ps->cost', and the same CPU capacity. Hence, the
* total energy of the domain (which is the simple sum of the energy of
* all of its CPUs) can be factorized as:
*
- * cs->cost * \Sum cpu_util
+ * ps->cost * \Sum cpu_util
* pd_nrg = ------------------------ (4)
* scale_cpu
*/
- return cs->cost * sum_util / scale_cpu;
+ return ps->cost * sum_util / scale_cpu;
}
/**
- * em_pd_nr_cap_states() - Get the number of capacity states of a perf. domain
+ * em_pd_nr_perf_states() - Get the number of performance states of a perf.
+ * domain
* @pd : performance domain for which this must be done
*
- * Return: the number of capacity states in the performance domain table
+ * Return: the number of performance states in the performance domain table
*/
-static inline int em_pd_nr_cap_states(struct em_perf_domain *pd)
+static inline int em_pd_nr_perf_states(struct em_perf_domain *pd)
{
- return pd->nr_cap_states;
+ return pd->nr_perf_states;
}
#else
struct em_data_callback {};
#define EM_DATA_CB(_active_power_cb) { }
-static inline int em_register_perf_domain(cpumask_t *span,
- unsigned int nr_states, struct em_data_callback *cb)
+static inline
+int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states,
+ struct em_data_callback *cb, cpumask_t *span)
{
return -EINVAL;
}
+static inline void em_dev_unregister_perf_domain(struct device *dev)
+{
+}
static inline struct em_perf_domain *em_cpu_get(int cpu)
{
return NULL;
}
-static inline unsigned long em_pd_energy(struct em_perf_domain *pd,
+static inline struct em_perf_domain *em_pd_get(struct device *dev)
+{
+ return NULL;
+}
+static inline unsigned long em_cpu_energy(struct em_perf_domain *pd,
unsigned long max_util, unsigned long sum_util)
{
return 0;
}
-static inline int em_pd_nr_cap_states(struct em_perf_domain *pd)
+static inline int em_pd_nr_perf_states(struct em_perf_domain *pd)
{
return 0;
}
diff --git a/include/linux/entry-common.h b/include/linux/entry-common.h
new file mode 100644
index 000000000000..efebbffcd5cc
--- /dev/null
+++ b/include/linux/entry-common.h
@@ -0,0 +1,372 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_ENTRYCOMMON_H
+#define __LINUX_ENTRYCOMMON_H
+
+#include <linux/tracehook.h>
+#include <linux/syscalls.h>
+#include <linux/seccomp.h>
+#include <linux/sched.h>
+
+#include <asm/entry-common.h>
+
+/*
+ * Define dummy _TIF work flags if not defined by the architecture or for
+ * disabled functionality.
+ */
+#ifndef _TIF_SYSCALL_EMU
+# define _TIF_SYSCALL_EMU (0)
+#endif
+
+#ifndef _TIF_SYSCALL_TRACEPOINT
+# define _TIF_SYSCALL_TRACEPOINT (0)
+#endif
+
+#ifndef _TIF_SECCOMP
+# define _TIF_SECCOMP (0)
+#endif
+
+#ifndef _TIF_SYSCALL_AUDIT
+# define _TIF_SYSCALL_AUDIT (0)
+#endif
+
+#ifndef _TIF_PATCH_PENDING
+# define _TIF_PATCH_PENDING (0)
+#endif
+
+#ifndef _TIF_UPROBE
+# define _TIF_UPROBE (0)
+#endif
+
+/*
+ * TIF flags handled in syscall_enter_from_usermode()
+ */
+#ifndef ARCH_SYSCALL_ENTER_WORK
+# define ARCH_SYSCALL_ENTER_WORK (0)
+#endif
+
+#define SYSCALL_ENTER_WORK \
+ (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
+ _TIF_SYSCALL_TRACEPOINT | _TIF_SYSCALL_EMU | \
+ ARCH_SYSCALL_ENTER_WORK)
+
+/*
+ * TIF flags handled in syscall_exit_to_user_mode()
+ */
+#ifndef ARCH_SYSCALL_EXIT_WORK
+# define ARCH_SYSCALL_EXIT_WORK (0)
+#endif
+
+#define SYSCALL_EXIT_WORK \
+ (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
+ _TIF_SYSCALL_TRACEPOINT | ARCH_SYSCALL_EXIT_WORK)
+
+/*
+ * TIF flags handled in exit_to_user_mode_loop()
+ */
+#ifndef ARCH_EXIT_TO_USER_MODE_WORK
+# define ARCH_EXIT_TO_USER_MODE_WORK (0)
+#endif
+
+#define EXIT_TO_USER_MODE_WORK \
+ (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
+ _TIF_NEED_RESCHED | _TIF_PATCH_PENDING | \
+ ARCH_EXIT_TO_USER_MODE_WORK)
+
+/**
+ * arch_check_user_regs - Architecture specific sanity check for user mode regs
+ * @regs: Pointer to currents pt_regs
+ *
+ * Defaults to an empty implementation. Can be replaced by architecture
+ * specific code.
+ *
+ * Invoked from syscall_enter_from_user_mode() in the non-instrumentable
+ * section. Use __always_inline so the compiler cannot push it out of line
+ * and make it instrumentable.
+ */
+static __always_inline void arch_check_user_regs(struct pt_regs *regs);
+
+#ifndef arch_check_user_regs
+static __always_inline void arch_check_user_regs(struct pt_regs *regs) {}
+#endif
+
+/**
+ * arch_syscall_enter_tracehook - Wrapper around tracehook_report_syscall_entry()
+ * @regs: Pointer to currents pt_regs
+ *
+ * Returns: 0 on success or an error code to skip the syscall.
+ *
+ * Defaults to tracehook_report_syscall_entry(). Can be replaced by
+ * architecture specific code.
+ *
+ * Invoked from syscall_enter_from_user_mode()
+ */
+static inline __must_check int arch_syscall_enter_tracehook(struct pt_regs *regs);
+
+#ifndef arch_syscall_enter_tracehook
+static inline __must_check int arch_syscall_enter_tracehook(struct pt_regs *regs)
+{
+ return tracehook_report_syscall_entry(regs);
+}
+#endif
+
+/**
+ * syscall_enter_from_user_mode - Check and handle work before invoking
+ * a syscall
+ * @regs: Pointer to currents pt_regs
+ * @syscall: The syscall number
+ *
+ * Invoked from architecture specific syscall entry code with interrupts
+ * disabled. The calling code has to be non-instrumentable. When the
+ * function returns all state is correct and the subsequent functions can be
+ * instrumented.
+ *
+ * Returns: The original or a modified syscall number
+ *
+ * If the returned syscall number is -1 then the syscall should be
+ * skipped. In this case the caller may invoke syscall_set_error() or
+ * syscall_set_return_value() first. If neither of those are called and -1
+ * is returned, then the syscall will fail with ENOSYS.
+ *
+ * The following functionality is handled here:
+ *
+ * 1) Establish state (lockdep, RCU (context tracking), tracing)
+ * 2) TIF flag dependent invocations of arch_syscall_enter_tracehook(),
+ * __secure_computing(), trace_sys_enter()
+ * 3) Invocation of audit_syscall_entry()
+ */
+long syscall_enter_from_user_mode(struct pt_regs *regs, long syscall);
+
+/**
+ * local_irq_enable_exit_to_user - Exit to user variant of local_irq_enable()
+ * @ti_work: Cached TIF flags gathered with interrupts disabled
+ *
+ * Defaults to local_irq_enable(). Can be supplied by architecture specific
+ * code.
+ */
+static inline void local_irq_enable_exit_to_user(unsigned long ti_work);
+
+#ifndef local_irq_enable_exit_to_user
+static inline void local_irq_enable_exit_to_user(unsigned long ti_work)
+{
+ local_irq_enable();
+}
+#endif
+
+/**
+ * local_irq_disable_exit_to_user - Exit to user variant of local_irq_disable()
+ *
+ * Defaults to local_irq_disable(). Can be supplied by architecture specific
+ * code.
+ */
+static inline void local_irq_disable_exit_to_user(void);
+
+#ifndef local_irq_disable_exit_to_user
+static inline void local_irq_disable_exit_to_user(void)
+{
+ local_irq_disable();
+}
+#endif
+
+/**
+ * arch_exit_to_user_mode_work - Architecture specific TIF work for exit
+ * to user mode.
+ * @regs: Pointer to currents pt_regs
+ * @ti_work: Cached TIF flags gathered with interrupts disabled
+ *
+ * Invoked from exit_to_user_mode_loop() with interrupt enabled
+ *
+ * Defaults to NOOP. Can be supplied by architecture specific code.
+ */
+static inline void arch_exit_to_user_mode_work(struct pt_regs *regs,
+ unsigned long ti_work);
+
+#ifndef arch_exit_to_user_mode_work
+static inline void arch_exit_to_user_mode_work(struct pt_regs *regs,
+ unsigned long ti_work)
+{
+}
+#endif
+
+/**
+ * arch_exit_to_user_mode_prepare - Architecture specific preparation for
+ * exit to user mode.
+ * @regs: Pointer to currents pt_regs
+ * @ti_work: Cached TIF flags gathered with interrupts disabled
+ *
+ * Invoked from exit_to_user_mode_prepare() with interrupt disabled as the last
+ * function before return. Defaults to NOOP.
+ */
+static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
+ unsigned long ti_work);
+
+#ifndef arch_exit_to_user_mode_prepare
+static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
+ unsigned long ti_work)
+{
+}
+#endif
+
+/**
+ * arch_exit_to_user_mode - Architecture specific final work before
+ * exit to user mode.
+ *
+ * Invoked from exit_to_user_mode() with interrupt disabled as the last
+ * function before return. Defaults to NOOP.
+ *
+ * This needs to be __always_inline because it is non-instrumentable code
+ * invoked after context tracking switched to user mode.
+ *
+ * An architecture implementation must not do anything complex, no locking
+ * etc. The main purpose is for speculation mitigations.
+ */
+static __always_inline void arch_exit_to_user_mode(void);
+
+#ifndef arch_exit_to_user_mode
+static __always_inline void arch_exit_to_user_mode(void) { }
+#endif
+
+/**
+ * arch_do_signal - Architecture specific signal delivery function
+ * @regs: Pointer to currents pt_regs
+ *
+ * Invoked from exit_to_user_mode_loop().
+ */
+void arch_do_signal(struct pt_regs *regs);
+
+/**
+ * arch_syscall_exit_tracehook - Wrapper around tracehook_report_syscall_exit()
+ * @regs: Pointer to currents pt_regs
+ * @step: Indicator for single step
+ *
+ * Defaults to tracehook_report_syscall_exit(). Can be replaced by
+ * architecture specific code.
+ *
+ * Invoked from syscall_exit_to_user_mode()
+ */
+static inline void arch_syscall_exit_tracehook(struct pt_regs *regs, bool step);
+
+#ifndef arch_syscall_exit_tracehook
+static inline void arch_syscall_exit_tracehook(struct pt_regs *regs, bool step)
+{
+ tracehook_report_syscall_exit(regs, step);
+}
+#endif
+
+/**
+ * syscall_exit_to_user_mode - Handle work before returning to user mode
+ * @regs: Pointer to currents pt_regs
+ *
+ * Invoked with interrupts enabled and fully valid regs. Returns with all
+ * work handled, interrupts disabled such that the caller can immediately
+ * switch to user mode. Called from architecture specific syscall and ret
+ * from fork code.
+ *
+ * The call order is:
+ * 1) One-time syscall exit work:
+ * - rseq syscall exit
+ * - audit
+ * - syscall tracing
+ * - tracehook (single stepping)
+ *
+ * 2) Preparatory work
+ * - Exit to user mode loop (common TIF handling). Invokes
+ * arch_exit_to_user_mode_work() for architecture specific TIF work
+ * - Architecture specific one time work arch_exit_to_user_mode_prepare()
+ * - Address limit and lockdep checks
+ *
+ * 3) Final transition (lockdep, tracing, context tracking, RCU). Invokes
+ * arch_exit_to_user_mode() to handle e.g. speculation mitigations
+ */
+void syscall_exit_to_user_mode(struct pt_regs *regs);
+
+/**
+ * irqentry_enter_from_user_mode - Establish state before invoking the irq handler
+ * @regs: Pointer to currents pt_regs
+ *
+ * Invoked from architecture specific entry code with interrupts disabled.
+ * Can only be called when the interrupt entry came from user mode. The
+ * calling code must be non-instrumentable. When the function returns all
+ * state is correct and the subsequent functions can be instrumented.
+ *
+ * The function establishes state (lockdep, RCU (context tracking), tracing)
+ */
+void irqentry_enter_from_user_mode(struct pt_regs *regs);
+
+/**
+ * irqentry_exit_to_user_mode - Interrupt exit work
+ * @regs: Pointer to current's pt_regs
+ *
+ * Invoked with interrupts disbled and fully valid regs. Returns with all
+ * work handled, interrupts disabled such that the caller can immediately
+ * switch to user mode. Called from architecture specific interrupt
+ * handling code.
+ *
+ * The call order is #2 and #3 as described in syscall_exit_to_user_mode().
+ * Interrupt exit is not invoking #1 which is the syscall specific one time
+ * work.
+ */
+void irqentry_exit_to_user_mode(struct pt_regs *regs);
+
+#ifndef irqentry_state
+typedef struct irqentry_state {
+ bool exit_rcu;
+} irqentry_state_t;
+#endif
+
+/**
+ * irqentry_enter - Handle state tracking on ordinary interrupt entries
+ * @regs: Pointer to pt_regs of interrupted context
+ *
+ * Invokes:
+ * - lockdep irqflag state tracking as low level ASM entry disabled
+ * interrupts.
+ *
+ * - Context tracking if the exception hit user mode.
+ *
+ * - The hardirq tracer to keep the state consistent as low level ASM
+ * entry disabled interrupts.
+ *
+ * As a precondition, this requires that the entry came from user mode,
+ * idle, or a kernel context in which RCU is watching.
+ *
+ * For kernel mode entries RCU handling is done conditional. If RCU is
+ * watching then the only RCU requirement is to check whether the tick has
+ * to be restarted. If RCU is not watching then rcu_irq_enter() has to be
+ * invoked on entry and rcu_irq_exit() on exit.
+ *
+ * Avoiding the rcu_irq_enter/exit() calls is an optimization but also
+ * solves the problem of kernel mode pagefaults which can schedule, which
+ * is not possible after invoking rcu_irq_enter() without undoing it.
+ *
+ * For user mode entries irqentry_enter_from_user_mode() is invoked to
+ * establish the proper context for NOHZ_FULL. Otherwise scheduling on exit
+ * would not be possible.
+ *
+ * Returns: An opaque object that must be passed to idtentry_exit()
+ */
+irqentry_state_t noinstr irqentry_enter(struct pt_regs *regs);
+
+/**
+ * irqentry_exit_cond_resched - Conditionally reschedule on return from interrupt
+ *
+ * Conditional reschedule with additional sanity checks.
+ */
+void irqentry_exit_cond_resched(void);
+
+/**
+ * irqentry_exit - Handle return from exception that used irqentry_enter()
+ * @regs: Pointer to pt_regs (exception entry regs)
+ * @state: Return value from matching call to irqentry_enter()
+ *
+ * Depending on the return target (kernel/user) this runs the necessary
+ * preemption and work checks if possible and reguired and returns to
+ * the caller with interrupts disabled and no further work pending.
+ *
+ * This is the last action before returning to the low level ASM code which
+ * just needs to return to the appropriate context.
+ *
+ * Counterpart to irqentry_enter().
+ */
+void noinstr irqentry_exit(struct pt_regs *regs, irqentry_state_t state);
+
+#endif
diff --git a/include/linux/entry-kvm.h b/include/linux/entry-kvm.h
new file mode 100644
index 000000000000..0cef17afb41a
--- /dev/null
+++ b/include/linux/entry-kvm.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_ENTRYKVM_H
+#define __LINUX_ENTRYKVM_H
+
+#include <linux/entry-common.h>
+
+/* Transfer to guest mode work */
+#ifdef CONFIG_KVM_XFER_TO_GUEST_WORK
+
+#ifndef ARCH_XFER_TO_GUEST_MODE_WORK
+# define ARCH_XFER_TO_GUEST_MODE_WORK (0)
+#endif
+
+#define XFER_TO_GUEST_MODE_WORK \
+ (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
+ _TIF_NOTIFY_RESUME | ARCH_XFER_TO_GUEST_MODE_WORK)
+
+struct kvm_vcpu;
+
+/**
+ * arch_xfer_to_guest_mode_handle_work - Architecture specific xfer to guest
+ * mode work handling function.
+ * @vcpu: Pointer to current's VCPU data
+ * @ti_work: Cached TIF flags gathered in xfer_to_guest_mode_handle_work()
+ *
+ * Invoked from xfer_to_guest_mode_handle_work(). Defaults to NOOP. Can be
+ * replaced by architecture specific code.
+ */
+static inline int arch_xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu,
+ unsigned long ti_work);
+
+#ifndef arch_xfer_to_guest_mode_work
+static inline int arch_xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu,
+ unsigned long ti_work)
+{
+ return 0;
+}
+#endif
+
+/**
+ * xfer_to_guest_mode_handle_work - Check and handle pending work which needs
+ * to be handled before going to guest mode
+ * @vcpu: Pointer to current's VCPU data
+ *
+ * Returns: 0 or an error code
+ */
+int xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu);
+
+/**
+ * __xfer_to_guest_mode_work_pending - Check if work is pending
+ *
+ * Returns: True if work pending, False otherwise.
+ *
+ * Bare variant of xfer_to_guest_mode_work_pending(). Can be called from
+ * interrupt enabled code for racy quick checks with care.
+ */
+static inline bool __xfer_to_guest_mode_work_pending(void)
+{
+ unsigned long ti_work = READ_ONCE(current_thread_info()->flags);
+
+ return !!(ti_work & XFER_TO_GUEST_MODE_WORK);
+}
+
+/**
+ * xfer_to_guest_mode_work_pending - Check if work is pending which needs to be
+ * handled before returning to guest mode
+ *
+ * Returns: True if work pending, False otherwise.
+ *
+ * Has to be invoked with interrupts disabled before the transition to
+ * guest mode.
+ */
+static inline bool xfer_to_guest_mode_work_pending(void)
+{
+ lockdep_assert_irqs_disabled();
+ return __xfer_to_guest_mode_work_pending();
+}
+#endif /* CONFIG_KVM_XFER_TO_GUEST_WORK */
+
+#endif
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index a23b26eab479..969a80211df6 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -86,6 +86,22 @@ struct net_device;
u32 ethtool_op_get_link(struct net_device *dev);
int ethtool_op_get_ts_info(struct net_device *dev, struct ethtool_ts_info *eti);
+
+/**
+ * struct ethtool_link_ext_state_info - link extended state and substate.
+ */
+struct ethtool_link_ext_state_info {
+ enum ethtool_link_ext_state link_ext_state;
+ union {
+ enum ethtool_link_ext_substate_autoneg autoneg;
+ enum ethtool_link_ext_substate_link_training link_training;
+ enum ethtool_link_ext_substate_link_logical_mismatch link_logical_mismatch;
+ enum ethtool_link_ext_substate_bad_signal_integrity bad_signal_integrity;
+ enum ethtool_link_ext_substate_cable_issue cable_issue;
+ u8 __link_ext_substate;
+ };
+};
+
/**
* ethtool_rxfh_indir_default - get default value for RX flow hash indirection
* @index: Index in RX flow hash indirection table
@@ -245,6 +261,11 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
* @get_link: Report whether physical link is up. Will only be called if
* the netdev is up. Should usually be set to ethtool_op_get_link(),
* which uses netif_carrier_ok().
+ * @get_link_ext_state: Report link extended state. Should set link_ext_state and
+ * link_ext_substate (link_ext_substate of 0 means link_ext_substate is unknown,
+ * do not attach ext_substate attribute to netlink message). If link_ext_state
+ * and link_ext_substate are unknown, return -ENODATA. If not implemented,
+ * link_ext_state and link_ext_substate will not be sent to userspace.
* @get_eeprom: Read data from the device EEPROM.
* Should fill in the magic field. Don't need to check len for zero
* or wraparound. Fill in the data argument with the eeprom values
@@ -384,6 +405,8 @@ struct ethtool_ops {
void (*set_msglevel)(struct net_device *, u32);
int (*nway_reset)(struct net_device *);
u32 (*get_link)(struct net_device *);
+ int (*get_link_ext_state)(struct net_device *,
+ struct ethtool_link_ext_state_info *);
int (*get_eeprom_len)(struct net_device *);
int (*get_eeprom)(struct net_device *,
struct ethtool_eeprom *, u8 *);
@@ -479,5 +502,37 @@ int ethtool_virtdev_set_link_ksettings(struct net_device *dev,
const struct ethtool_link_ksettings *cmd,
u32 *dev_speed, u8 *dev_duplex);
+struct netlink_ext_ack;
+struct phy_device;
+struct phy_tdr_config;
+
+/**
+ * struct ethtool_phy_ops - Optional PHY device options
+ * @get_sset_count: Get number of strings that @get_strings will write.
+ * @get_strings: Return a set of strings that describe the requested objects
+ * @get_stats: Return extended statistics about the PHY device.
+ * @start_cable_test - Start a cable test
+ * @start_cable_test_tdr - Start a Time Domain Reflectometry cable test
+ *
+ * All operations are optional (i.e. the function pointer may be set to %NULL)
+ * and callers must take this into account. Callers must hold the RTNL lock.
+ */
+struct ethtool_phy_ops {
+ int (*get_sset_count)(struct phy_device *dev);
+ int (*get_strings)(struct phy_device *dev, u8 *data);
+ int (*get_stats)(struct phy_device *dev,
+ struct ethtool_stats *stats, u64 *data);
+ int (*start_cable_test)(struct phy_device *phydev,
+ struct netlink_ext_ack *extack);
+ int (*start_cable_test_tdr)(struct phy_device *phydev,
+ struct netlink_ext_ack *extack,
+ const struct phy_tdr_config *config);
+};
+
+/**
+ * ethtool_set_ethtool_phy_ops - Set the ethtool_phy_ops singleton
+ * @ops: Ethtool PHY operations to set
+ */
+void ethtool_set_ethtool_phy_ops(const struct ethtool_phy_ops *ops);
#endif /* _LINUX_ETHTOOL_H */
diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h
index d896b8657085..3ceb72b67a7a 100644
--- a/include/linux/exportfs.h
+++ b/include/linux/exportfs.h
@@ -178,7 +178,7 @@ struct fid {
* get_name:
* @get_name should find a name for the given @child in the given @parent
* directory. The name should be stored in the @name (with the
- * understanding that it is already pointing to a a %NAME_MAX+1 sized
+ * understanding that it is already pointing to a %NAME_MAX+1 sized
* buffer. get_name() should return %0 on success, a negative error code
* or error. @get_name will be called without @parent->i_mutex held.
*
diff --git a/include/linux/fanotify.h b/include/linux/fanotify.h
index b79fa9bb7359..3e9c56ee651f 100644
--- a/include/linux/fanotify.h
+++ b/include/linux/fanotify.h
@@ -18,8 +18,10 @@
#define FANOTIFY_CLASS_BITS (FAN_CLASS_NOTIF | FAN_CLASS_CONTENT | \
FAN_CLASS_PRE_CONTENT)
-#define FANOTIFY_INIT_FLAGS (FANOTIFY_CLASS_BITS | \
- FAN_REPORT_TID | FAN_REPORT_FID | \
+#define FANOTIFY_FID_BITS (FAN_REPORT_FID | FAN_REPORT_DFID_NAME)
+
+#define FANOTIFY_INIT_FLAGS (FANOTIFY_CLASS_BITS | FANOTIFY_FID_BITS | \
+ FAN_REPORT_TID | \
FAN_CLOEXEC | FAN_NONBLOCK | \
FAN_UNLIMITED_QUEUE | FAN_UNLIMITED_MARKS)
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 3b4b2f0c6994..850f79e9a7cb 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -400,8 +400,6 @@ struct fb_tile_ops {
#define FBINFO_HWACCEL_YPAN 0x2000 /* optional */
#define FBINFO_HWACCEL_YWRAP 0x4000 /* optional */
-#define FBINFO_MISC_USEREVENT 0x10000 /* event request
- from userspace */
#define FBINFO_MISC_TILEBLITTING 0x20000 /* use tile blitting */
/* A driver may set this flag to indicate that it does want a set_par to be
@@ -506,8 +504,9 @@ struct fb_info {
};
static inline struct apertures_struct *alloc_apertures(unsigned int max_num) {
- struct apertures_struct *a = kzalloc(sizeof(struct apertures_struct)
- + max_num * sizeof(struct aperture), GFP_KERNEL);
+ struct apertures_struct *a;
+
+ a = kzalloc(struct_size(a, ranges, max_num), GFP_KERNEL);
if (!a)
return NULL;
a->count = max_num;
diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
index f07c55ea0c22..a32bf47c593e 100644
--- a/include/linux/fdtable.h
+++ b/include/linux/fdtable.h
@@ -22,6 +22,7 @@
* as this is the granularity returned by copy_fdset().
*/
#define NR_OPEN_DEFAULT BITS_PER_LONG
+#define NR_OPEN_MAX ~0U
struct fdtable {
unsigned int max_fds;
@@ -109,7 +110,7 @@ struct files_struct *get_files_struct(struct task_struct *);
void put_files_struct(struct files_struct *fs);
void reset_files_struct(struct files_struct *);
int unshare_files(struct files_struct **);
-struct files_struct *dup_fd(struct files_struct *, int *) __latent_entropy;
+struct files_struct *dup_fd(struct files_struct *, unsigned, int *) __latent_entropy;
void do_close_on_exec(struct files_struct *);
int iterate_fd(struct files_struct *, unsigned,
int (*)(const void *, struct file *, unsigned),
@@ -121,7 +122,10 @@ extern void __fd_install(struct files_struct *files,
unsigned int fd, struct file *file);
extern int __close_fd(struct files_struct *files,
unsigned int fd);
+extern int __close_range(unsigned int fd, unsigned int max_fd, unsigned int flags);
extern int __close_fd_get_file(unsigned int fd, struct file **res);
+extern int unshare_fd(unsigned long unshare_flags, unsigned int max_fds,
+ struct files_struct **new_fdp);
extern struct kmem_cache *files_cachep;
diff --git a/include/linux/file.h b/include/linux/file.h
index 122f80084a3e..225982792fa2 100644
--- a/include/linux/file.h
+++ b/include/linux/file.h
@@ -9,6 +9,7 @@
#include <linux/compiler.h>
#include <linux/types.h>
#include <linux/posix_types.h>
+#include <linux/errno.h>
struct file;
@@ -91,6 +92,24 @@ extern void put_unused_fd(unsigned int fd);
extern void fd_install(unsigned int fd, struct file *file);
+extern int __receive_fd(int fd, struct file *file, int __user *ufd,
+ unsigned int o_flags);
+static inline int receive_fd_user(struct file *file, int __user *ufd,
+ unsigned int o_flags)
+{
+ if (ufd == NULL)
+ return -EFAULT;
+ return __receive_fd(-1, file, ufd, o_flags);
+}
+static inline int receive_fd(struct file *file, unsigned int o_flags)
+{
+ return __receive_fd(-1, file, NULL, o_flags);
+}
+static inline int receive_fd_replace(int fd, struct file *file, unsigned int o_flags)
+{
+ return __receive_fd(fd, file, NULL, o_flags);
+}
+
extern void flush_delayed_fput(void);
extern void __fput_sync(struct file *);
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 0b0144752d78..0a355b005bf4 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -20,6 +20,7 @@
#include <linux/kallsyms.h>
#include <linux/if_vlan.h>
#include <linux/vmalloc.h>
+#include <linux/sockptr.h>
#include <crypto/sha.h>
#include <net/sch_generic.h>
@@ -502,13 +503,11 @@ static inline bool insn_is_zext(const struct bpf_insn *insn)
offsetof(TYPE, MEMBER); \
})
-#ifdef CONFIG_COMPAT
/* A struct sock_filter is architecture independent. */
struct compat_sock_fprog {
u16 len;
compat_uptr_t filter; /* struct sock_filter * */
};
-#endif
struct sock_fprog_kern {
u16 len;
@@ -534,7 +533,8 @@ struct bpf_prog {
is_func:1, /* program is a bpf function */
kprobe_override:1, /* Do we override a kprobe? */
has_callchain_buf:1, /* callchain buffer allocated? */
- enforce_expected_attach_type:1; /* Enforce expected_attach_type checking at attach time */
+ enforce_expected_attach_type:1, /* Enforce expected_attach_type checking at attach time */
+ call_get_stack:1; /* Do we call bpf_get_stack() or bpf_get_stackid() */
enum bpf_prog_type type; /* Type of BPF program */
enum bpf_attach_type expected_attach_type; /* For some prog types */
u32 len; /* Number of filter blocks */
@@ -1278,4 +1278,153 @@ struct bpf_sockopt_kern {
s32 retval;
};
+int copy_bpf_fprog_from_user(struct sock_fprog *dst, sockptr_t src, int len);
+
+struct bpf_sk_lookup_kern {
+ u16 family;
+ u16 protocol;
+ struct {
+ __be32 saddr;
+ __be32 daddr;
+ } v4;
+ struct {
+ const struct in6_addr *saddr;
+ const struct in6_addr *daddr;
+ } v6;
+ __be16 sport;
+ u16 dport;
+ struct sock *selected_sk;
+ bool no_reuseport;
+};
+
+extern struct static_key_false bpf_sk_lookup_enabled;
+
+/* Runners for BPF_SK_LOOKUP programs to invoke on socket lookup.
+ *
+ * Allowed return values for a BPF SK_LOOKUP program are SK_PASS and
+ * SK_DROP. Their meaning is as follows:
+ *
+ * SK_PASS && ctx.selected_sk != NULL: use selected_sk as lookup result
+ * SK_PASS && ctx.selected_sk == NULL: continue to htable-based socket lookup
+ * SK_DROP : terminate lookup with -ECONNREFUSED
+ *
+ * This macro aggregates return values and selected sockets from
+ * multiple BPF programs according to following rules in order:
+ *
+ * 1. If any program returned SK_PASS and a non-NULL ctx.selected_sk,
+ * macro result is SK_PASS and last ctx.selected_sk is used.
+ * 2. If any program returned SK_DROP return value,
+ * macro result is SK_DROP.
+ * 3. Otherwise result is SK_PASS and ctx.selected_sk is NULL.
+ *
+ * Caller must ensure that the prog array is non-NULL, and that the
+ * array as well as the programs it contains remain valid.
+ */
+#define BPF_PROG_SK_LOOKUP_RUN_ARRAY(array, ctx, func) \
+ ({ \
+ struct bpf_sk_lookup_kern *_ctx = &(ctx); \
+ struct bpf_prog_array_item *_item; \
+ struct sock *_selected_sk = NULL; \
+ bool _no_reuseport = false; \
+ struct bpf_prog *_prog; \
+ bool _all_pass = true; \
+ u32 _ret; \
+ \
+ migrate_disable(); \
+ _item = &(array)->items[0]; \
+ while ((_prog = READ_ONCE(_item->prog))) { \
+ /* restore most recent selection */ \
+ _ctx->selected_sk = _selected_sk; \
+ _ctx->no_reuseport = _no_reuseport; \
+ \
+ _ret = func(_prog, _ctx); \
+ if (_ret == SK_PASS && _ctx->selected_sk) { \
+ /* remember last non-NULL socket */ \
+ _selected_sk = _ctx->selected_sk; \
+ _no_reuseport = _ctx->no_reuseport; \
+ } else if (_ret == SK_DROP && _all_pass) { \
+ _all_pass = false; \
+ } \
+ _item++; \
+ } \
+ _ctx->selected_sk = _selected_sk; \
+ _ctx->no_reuseport = _no_reuseport; \
+ migrate_enable(); \
+ _all_pass || _selected_sk ? SK_PASS : SK_DROP; \
+ })
+
+static inline bool bpf_sk_lookup_run_v4(struct net *net, int protocol,
+ const __be32 saddr, const __be16 sport,
+ const __be32 daddr, const u16 dport,
+ struct sock **psk)
+{
+ struct bpf_prog_array *run_array;
+ struct sock *selected_sk = NULL;
+ bool no_reuseport = false;
+
+ rcu_read_lock();
+ run_array = rcu_dereference(net->bpf.run_array[NETNS_BPF_SK_LOOKUP]);
+ if (run_array) {
+ struct bpf_sk_lookup_kern ctx = {
+ .family = AF_INET,
+ .protocol = protocol,
+ .v4.saddr = saddr,
+ .v4.daddr = daddr,
+ .sport = sport,
+ .dport = dport,
+ };
+ u32 act;
+
+ act = BPF_PROG_SK_LOOKUP_RUN_ARRAY(run_array, ctx, BPF_PROG_RUN);
+ if (act == SK_PASS) {
+ selected_sk = ctx.selected_sk;
+ no_reuseport = ctx.no_reuseport;
+ } else {
+ selected_sk = ERR_PTR(-ECONNREFUSED);
+ }
+ }
+ rcu_read_unlock();
+ *psk = selected_sk;
+ return no_reuseport;
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static inline bool bpf_sk_lookup_run_v6(struct net *net, int protocol,
+ const struct in6_addr *saddr,
+ const __be16 sport,
+ const struct in6_addr *daddr,
+ const u16 dport,
+ struct sock **psk)
+{
+ struct bpf_prog_array *run_array;
+ struct sock *selected_sk = NULL;
+ bool no_reuseport = false;
+
+ rcu_read_lock();
+ run_array = rcu_dereference(net->bpf.run_array[NETNS_BPF_SK_LOOKUP]);
+ if (run_array) {
+ struct bpf_sk_lookup_kern ctx = {
+ .family = AF_INET6,
+ .protocol = protocol,
+ .v6.saddr = saddr,
+ .v6.daddr = daddr,
+ .sport = sport,
+ .dport = dport,
+ };
+ u32 act;
+
+ act = BPF_PROG_SK_LOOKUP_RUN_ARRAY(run_array, ctx, BPF_PROG_RUN);
+ if (act == SK_PASS) {
+ selected_sk = ctx.selected_sk;
+ no_reuseport = ctx.no_reuseport;
+ } else {
+ selected_sk = ERR_PTR(-ECONNREFUSED);
+ }
+ }
+ rcu_read_unlock();
+ *psk = selected_sk;
+ return no_reuseport;
+}
+#endif /* IS_ENABLED(CONFIG_IPV6) */
+
#endif /* __LINUX_FILTER_H__ */
diff --git a/include/linux/firmware/imx/sci.h b/include/linux/firmware/imx/sci.h
index 3fa418a4ca67..22c76571a294 100644
--- a/include/linux/firmware/imx/sci.h
+++ b/include/linux/firmware/imx/sci.h
@@ -14,9 +14,11 @@
#include <linux/firmware/imx/svc/misc.h>
#include <linux/firmware/imx/svc/pm.h>
+#include <linux/firmware/imx/svc/rm.h>
int imx_scu_enable_general_irq_channel(struct device *dev);
int imx_scu_irq_register_notifier(struct notifier_block *nb);
int imx_scu_irq_unregister_notifier(struct notifier_block *nb);
int imx_scu_irq_group_enable(u8 group, u32 mask, u8 enable);
+int imx_scu_soc_init(struct device *dev);
#endif /* _SC_SCI_H */
diff --git a/include/linux/firmware/imx/svc/rm.h b/include/linux/firmware/imx/svc/rm.h
new file mode 100644
index 000000000000..456b6a59d29b
--- /dev/null
+++ b/include/linux/firmware/imx/svc/rm.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ * Copyright 2017-2020 NXP
+ *
+ * Header file containing the public API for the System Controller (SC)
+ * Resource Management (RM) function. This includes functions for
+ * partitioning resources, pads, and memory regions.
+ *
+ * RM_SVC (SVC) Resource Management Service
+ *
+ * Module for the Resource Management (RM) service.
+ */
+
+#ifndef _SC_RM_API_H
+#define _SC_RM_API_H
+
+#include <linux/firmware/imx/sci.h>
+
+/*
+ * This type is used to indicate RPC RM function calls.
+ */
+enum imx_sc_rm_func {
+ IMX_SC_RM_FUNC_UNKNOWN = 0,
+ IMX_SC_RM_FUNC_PARTITION_ALLOC = 1,
+ IMX_SC_RM_FUNC_SET_CONFIDENTIAL = 31,
+ IMX_SC_RM_FUNC_PARTITION_FREE = 2,
+ IMX_SC_RM_FUNC_GET_DID = 26,
+ IMX_SC_RM_FUNC_PARTITION_STATIC = 3,
+ IMX_SC_RM_FUNC_PARTITION_LOCK = 4,
+ IMX_SC_RM_FUNC_GET_PARTITION = 5,
+ IMX_SC_RM_FUNC_SET_PARENT = 6,
+ IMX_SC_RM_FUNC_MOVE_ALL = 7,
+ IMX_SC_RM_FUNC_ASSIGN_RESOURCE = 8,
+ IMX_SC_RM_FUNC_SET_RESOURCE_MOVABLE = 9,
+ IMX_SC_RM_FUNC_SET_SUBSYS_RSRC_MOVABLE = 28,
+ IMX_SC_RM_FUNC_SET_MASTER_ATTRIBUTES = 10,
+ IMX_SC_RM_FUNC_SET_MASTER_SID = 11,
+ IMX_SC_RM_FUNC_SET_PERIPHERAL_PERMISSIONS = 12,
+ IMX_SC_RM_FUNC_IS_RESOURCE_OWNED = 13,
+ IMX_SC_RM_FUNC_GET_RESOURCE_OWNER = 33,
+ IMX_SC_RM_FUNC_IS_RESOURCE_MASTER = 14,
+ IMX_SC_RM_FUNC_IS_RESOURCE_PERIPHERAL = 15,
+ IMX_SC_RM_FUNC_GET_RESOURCE_INFO = 16,
+ IMX_SC_RM_FUNC_MEMREG_ALLOC = 17,
+ IMX_SC_RM_FUNC_MEMREG_SPLIT = 29,
+ IMX_SC_RM_FUNC_MEMREG_FRAG = 32,
+ IMX_SC_RM_FUNC_MEMREG_FREE = 18,
+ IMX_SC_RM_FUNC_FIND_MEMREG = 30,
+ IMX_SC_RM_FUNC_ASSIGN_MEMREG = 19,
+ IMX_SC_RM_FUNC_SET_MEMREG_PERMISSIONS = 20,
+ IMX_SC_RM_FUNC_IS_MEMREG_OWNED = 21,
+ IMX_SC_RM_FUNC_GET_MEMREG_INFO = 22,
+ IMX_SC_RM_FUNC_ASSIGN_PAD = 23,
+ IMX_SC_RM_FUNC_SET_PAD_MOVABLE = 24,
+ IMX_SC_RM_FUNC_IS_PAD_OWNED = 25,
+ IMX_SC_RM_FUNC_DUMP = 27,
+};
+
+#if IS_ENABLED(CONFIG_IMX_SCU)
+bool imx_sc_rm_is_resource_owned(struct imx_sc_ipc *ipc, u16 resource);
+#else
+static inline bool
+imx_sc_rm_is_resource_owned(struct imx_sc_ipc *ipc, u16 resource)
+{
+ return true;
+}
+#endif
+#endif
diff --git a/include/linux/firmware/intel/stratix10-smc.h b/include/linux/firmware/intel/stratix10-smc.h
index 682dbf694007..c3e5ab014caf 100644
--- a/include/linux/firmware/intel/stratix10-smc.h
+++ b/include/linux/firmware/intel/stratix10-smc.h
@@ -361,3 +361,46 @@ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_COMPLETED_WRITE)
#define INTEL_SIP_SMC_FUNCID_RSU_RETRY_COUNTER 15
#define INTEL_SIP_SMC_RSU_RETRY_COUNTER \
INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_RSU_RETRY_COUNTER)
+
+/**
+ * Request INTEL_SIP_SMC_RSU_DCMF_VERSION
+ *
+ * Sync call used by service driver at EL1 to query DCMF (Decision
+ * Configuration Management Firmware) version from FW
+ *
+ * Call register usage:
+ * a0 INTEL_SIP_SMC_RSU_DCMF_VERSION
+ * a1-7 not used
+ *
+ * Return status
+ * a0 INTEL_SIP_SMC_STATUS_OK
+ * a1 dcmf1 | dcmf0
+ * a2 dcmf3 | dcmf2
+ *
+ * Or
+ *
+ * a0 INTEL_SIP_SMC_RSU_ERROR
+ */
+#define INTEL_SIP_SMC_FUNCID_RSU_DCMF_VERSION 16
+#define INTEL_SIP_SMC_RSU_DCMF_VERSION \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_RSU_DCMF_VERSION)
+
+/**
+ * Request INTEL_SIP_SMC_RSU_MAX_RETRY
+ *
+ * Sync call used by service driver at EL1 to query max retry value from FW
+ *
+ * Call register usage:
+ * a0 INTEL_SIP_SMC_RSU_MAX_RETRY
+ * a1-7 not used
+ *
+ * Return status
+ * a0 INTEL_SIP_SMC_STATUS_OK
+ * a1 max retry value
+ *
+ * Or
+ * a0 INTEL_SIP_SMC_RSU_ERROR
+ */
+#define INTEL_SIP_SMC_FUNCID_RSU_MAX_RETRY 18
+#define INTEL_SIP_SMC_RSU_MAX_RETRY \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_RSU_MAX_RETRY)
diff --git a/include/linux/firmware/intel/stratix10-svc-client.h b/include/linux/firmware/intel/stratix10-svc-client.h
index 64213c3e82f5..a93d85932eb9 100644
--- a/include/linux/firmware/intel/stratix10-svc-client.h
+++ b/include/linux/firmware/intel/stratix10-svc-client.h
@@ -54,18 +54,17 @@
* Flag bit for COMMAND_RECONFIG
*
* COMMAND_RECONFIG_FLAG_PARTIAL:
- * Set to FPGA configuration type (full or partial), the default
- * is full reconfig.
+ * Set to FPGA configuration type (full or partial).
*/
-#define COMMAND_RECONFIG_FLAG_PARTIAL 0
+#define COMMAND_RECONFIG_FLAG_PARTIAL 1
/**
* Timeout settings for service clients:
* timeout value used in Stratix10 FPGA manager driver.
* timeout value used in RSU driver
*/
-#define SVC_RECONFIG_REQUEST_TIMEOUT_MS 100
-#define SVC_RECONFIG_BUFFER_TIMEOUT_MS 240
+#define SVC_RECONFIG_REQUEST_TIMEOUT_MS 300
+#define SVC_RECONFIG_BUFFER_TIMEOUT_MS 720
#define SVC_RSU_REQUEST_TIMEOUT_MS 300
struct stratix10_svc_chan;
@@ -99,6 +98,12 @@ struct stratix10_svc_chan;
*
* @COMMAND_RSU_RETRY: query firmware for the current image's retry counter,
* return status is SVC_STATUS_OK or SVC_STATUS_ERROR
+ *
+ * @COMMAND_RSU_MAX_RETRY: query firmware for the max retry value,
+ * return status is SVC_STATUS_OK or SVC_STATUS_ERROR
+ *
+ * @COMMAND_RSU_DCMF_VERSION: query firmware for the DCMF version, return status
+ * is SVC_STATUS_OK or SVC_STATUS_ERROR
*/
enum stratix10_svc_command_code {
COMMAND_NOOP = 0,
@@ -110,6 +115,8 @@ enum stratix10_svc_command_code {
COMMAND_RSU_UPDATE,
COMMAND_RSU_NOTIFY,
COMMAND_RSU_RETRY,
+ COMMAND_RSU_MAX_RETRY,
+ COMMAND_RSU_DCMF_VERSION,
};
/**
diff --git a/include/linux/freezer.h b/include/linux/freezer.h
index 21f5aa0b217f..27828145ca09 100644
--- a/include/linux/freezer.h
+++ b/include/linux/freezer.h
@@ -207,6 +207,17 @@ static inline long freezable_schedule_timeout_interruptible(long timeout)
return __retval;
}
+/* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
+static inline long freezable_schedule_timeout_interruptible_unsafe(long timeout)
+{
+ long __retval;
+
+ freezer_do_not_count();
+ __retval = schedule_timeout_interruptible(timeout);
+ freezer_count_unsafe();
+ return __retval;
+}
+
/* Like schedule_timeout_killable(), but should not block the freezer. */
static inline long freezable_schedule_timeout_killable(long timeout)
{
@@ -285,6 +296,9 @@ static inline void set_freezable(void) {}
#define freezable_schedule_timeout_interruptible(timeout) \
schedule_timeout_interruptible(timeout)
+#define freezable_schedule_timeout_interruptible_unsafe(timeout) \
+ schedule_timeout_interruptible(timeout)
+
#define freezable_schedule_timeout_killable(timeout) \
schedule_timeout_killable(timeout)
diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
index 6d775984905b..b07d88c92bb2 100644
--- a/include/linux/frontswap.h
+++ b/include/linux/frontswap.h
@@ -10,7 +10,7 @@
/*
* Return code to denote that requested number of
* frontswap pages are unused(moved to page cache).
- * Used in in shmem_unuse and try_to_unuse.
+ * Used in shmem_unuse and try_to_unuse.
*/
#define FRONTSWAP_PAGES_UNUSED 2
diff --git a/include/linux/fs.h b/include/linux/fs.h
index f5abba86107d..e019ea2f1347 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -175,6 +175,9 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
/* File does not contribute to nr_files count */
#define FMODE_NOACCOUNT ((__force fmode_t)0x20000000)
+/* File supports async buffered reads */
+#define FMODE_BUF_RASYNC ((__force fmode_t)0x40000000)
+
/*
* Flag for rw_copy_check_uvector and compat_rw_copy_check_uvector
* that indicates that they should check the contents of the iovec are
@@ -315,6 +318,8 @@ enum rw_hint {
#define IOCB_SYNC (1 << 5)
#define IOCB_WRITE (1 << 6)
#define IOCB_NOWAIT (1 << 7)
+/* iocb->ki_waitq is valid */
+#define IOCB_WAITQ (1 << 8)
#define IOCB_NOIO (1 << 9)
struct kiocb {
@@ -329,7 +334,10 @@ struct kiocb {
int ki_flags;
u16 ki_hint;
u16 ki_ioprio; /* See linux/ioprio.h */
- unsigned int ki_cookie; /* for ->iopoll */
+ union {
+ unsigned int ki_cookie; /* for ->iopoll */
+ struct wait_page_queue *ki_waitq; /* for async buffered IO */
+ };
randomized_struct_fields_end
};
@@ -471,45 +479,6 @@ struct address_space {
* must be enforced here for CRIS, to let the least significant bit
* of struct page's "mapping" pointer be used for PAGE_MAPPING_ANON.
*/
-struct request_queue;
-
-struct block_device {
- dev_t bd_dev; /* not a kdev_t - it's a search key */
- int bd_openers;
- struct inode * bd_inode; /* will die */
- struct super_block * bd_super;
- struct mutex bd_mutex; /* open/close mutex */
- void * bd_claiming;
- void * bd_holder;
- int bd_holders;
- bool bd_write_holder;
-#ifdef CONFIG_SYSFS
- struct list_head bd_holder_disks;
-#endif
- struct block_device * bd_contains;
- unsigned bd_block_size;
- u8 bd_partno;
- struct hd_struct * bd_part;
- /* number of times partitions within this device have been opened. */
- unsigned bd_part_count;
- int bd_invalidated;
- struct gendisk * bd_disk;
- struct request_queue * bd_queue;
- struct backing_dev_info *bd_bdi;
- struct list_head bd_list;
- /*
- * Private data. You must have bd_claim'ed the block_device
- * to use this. NOTE: bd_claim allows an owner to claim
- * the same device multiple times, the owner must take special
- * care to not mess up bd_private for that case.
- */
- unsigned long bd_private;
-
- /* The counter of freeze processes */
- int bd_fsfreeze_count;
- /* Mutex for freeze */
- struct mutex bd_fsfreeze_mutex;
-} __randomize_layout;
/* XArray tags, for tagging dirty and writeback pages in the pagecache. */
#define PAGECACHE_TAG_DIRTY XA_MARK_0
@@ -549,6 +518,16 @@ static inline void i_mmap_unlock_read(struct address_space *mapping)
up_read(&mapping->i_mmap_rwsem);
}
+static inline void i_mmap_assert_locked(struct address_space *mapping)
+{
+ lockdep_assert_held(&mapping->i_mmap_rwsem);
+}
+
+static inline void i_mmap_assert_write_locked(struct address_space *mapping)
+{
+ lockdep_assert_held_write(&mapping->i_mmap_rwsem);
+}
+
/*
* Might pages of this file be mapped into userspace?
*/
@@ -559,7 +538,7 @@ static inline int mapping_mapped(struct address_space *mapping)
/*
* Might pages of this file have been modified in userspace?
- * Note that i_mmap_writable counts all VM_SHARED vmas: do_mmap_pgoff
+ * Note that i_mmap_writable counts all VM_SHARED vmas: do_mmap
* marks vma as VM_SHARED if it is shared, and the file was opened for
* writing i.e. vma may be mprotected writable even if now readonly.
*
@@ -908,8 +887,6 @@ static inline unsigned imajor(const struct inode *inode)
return MAJOR(inode->i_rdev);
}
-extern struct block_device *I_BDEV(struct inode *inode);
-
struct fown_struct {
rwlock_t lock; /* protects pid, uid, euid fields */
struct pid *pid; /* pid or -pgrp where SIGIO should be sent */
@@ -1381,6 +1358,7 @@ extern int send_sigurg(struct fown_struct *fown);
#define SB_NODIRATIME 2048 /* Do not update directory access times */
#define SB_SILENT 32768
#define SB_POSIXACL (1<<16) /* VFS does not apply the umask */
+#define SB_INLINECRYPT (1<<17) /* Use blk-crypto for encrypted files */
#define SB_KERNMOUNT (1<<22) /* this is a kern_mount call */
#define SB_I_VERSION (1<<23) /* Update inode I_version field */
#define SB_LAZYTIME (1<<25) /* Update the on-disk [acm]times lazily */
@@ -1744,6 +1722,10 @@ int vfs_mkobj(struct dentry *, umode_t,
int (*f)(struct dentry *, umode_t, void *),
void *);
+int vfs_fchown(struct file *file, uid_t user, gid_t group);
+int vfs_fchmod(struct file *file, umode_t mode);
+int vfs_utimes(const struct path *path, struct timespec64 *times);
+
extern long vfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
#ifdef CONFIG_COMPAT
@@ -1775,14 +1757,6 @@ struct dir_context {
loff_t pos;
};
-struct block_device_operations;
-
-/* These macros are for out of kernel modules to test that
- * the kernel supports the unlocked_ioctl and compat_ioctl
- * fields in struct file_operations. */
-#define HAVE_COMPAT_IOCTL 1
-#define HAVE_UNLOCKED_IOCTL 1
-
/*
* These flags let !MMU mmap() govern direct device mapping vs immediate
* copying more easily for MAP_PRIVATE, especially for ROM filesystems.
@@ -1982,27 +1956,27 @@ struct super_operations {
/*
* Inode flags - they have no relation to superblock flags now
*/
-#define S_SYNC 1 /* Writes are synced at once */
-#define S_NOATIME 2 /* Do not update access times */
-#define S_APPEND 4 /* Append-only file */
-#define S_IMMUTABLE 8 /* Immutable file */
-#define S_DEAD 16 /* removed, but still open directory */
-#define S_NOQUOTA 32 /* Inode is not counted to quota */
-#define S_DIRSYNC 64 /* Directory modifications are synchronous */
-#define S_NOCMTIME 128 /* Do not update file c/mtime */
-#define S_SWAPFILE 256 /* Do not truncate: swapon got its bmaps */
-#define S_PRIVATE 512 /* Inode is fs-internal */
-#define S_IMA 1024 /* Inode has an associated IMA struct */
-#define S_AUTOMOUNT 2048 /* Automount/referral quasi-directory */
-#define S_NOSEC 4096 /* no suid or xattr security attributes */
+#define S_SYNC (1 << 0) /* Writes are synced at once */
+#define S_NOATIME (1 << 1) /* Do not update access times */
+#define S_APPEND (1 << 2) /* Append-only file */
+#define S_IMMUTABLE (1 << 3) /* Immutable file */
+#define S_DEAD (1 << 4) /* removed, but still open directory */
+#define S_NOQUOTA (1 << 5) /* Inode is not counted to quota */
+#define S_DIRSYNC (1 << 6) /* Directory modifications are synchronous */
+#define S_NOCMTIME (1 << 7) /* Do not update file c/mtime */
+#define S_SWAPFILE (1 << 8) /* Do not truncate: swapon got its bmaps */
+#define S_PRIVATE (1 << 9) /* Inode is fs-internal */
+#define S_IMA (1 << 10) /* Inode has an associated IMA struct */
+#define S_AUTOMOUNT (1 << 11) /* Automount/referral quasi-directory */
+#define S_NOSEC (1 << 12) /* no suid or xattr security attributes */
#ifdef CONFIG_FS_DAX
-#define S_DAX 8192 /* Direct Access, avoiding the page cache */
+#define S_DAX (1 << 13) /* Direct Access, avoiding the page cache */
#else
-#define S_DAX 0 /* Make all the DAX code disappear */
+#define S_DAX 0 /* Make all the DAX code disappear */
#endif
-#define S_ENCRYPTED 16384 /* Encrypted file (using fs/crypto/) */
-#define S_CASEFOLD 32768 /* Casefolded file */
-#define S_VERITY 65536 /* Verity file (using fs/verity/) */
+#define S_ENCRYPTED (1 << 14) /* Encrypted file (using fs/crypto/) */
+#define S_CASEFOLD (1 << 15) /* Casefolded file */
+#define S_VERITY (1 << 16) /* Verity file (using fs/verity/) */
/*
* Note that nosuid etc flags are inode-specific: setting some file-system
@@ -2264,18 +2238,9 @@ struct file_system_type {
#define MODULE_ALIAS_FS(NAME) MODULE_ALIAS("fs-" NAME)
-#ifdef CONFIG_BLOCK
extern struct dentry *mount_bdev(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data,
int (*fill_super)(struct super_block *, void *, int));
-#else
-static inline struct dentry *mount_bdev(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data,
- int (*fill_super)(struct super_block *, void *, int))
-{
- return ERR_PTR(-ENODEV);
-}
-#endif
extern struct dentry *mount_single(struct file_system_type *fs_type,
int flags, void *data,
int (*fill_super)(struct super_block *, void *, int));
@@ -2284,14 +2249,7 @@ extern struct dentry *mount_nodev(struct file_system_type *fs_type,
int (*fill_super)(struct super_block *, void *, int));
extern struct dentry *mount_subtree(struct vfsmount *mnt, const char *path);
void generic_shutdown_super(struct super_block *sb);
-#ifdef CONFIG_BLOCK
void kill_block_super(struct super_block *sb);
-#else
-static inline void kill_block_super(struct super_block *sb)
-{
- BUG();
-}
-#endif
void kill_anon_super(struct super_block *sb);
void kill_litter_super(struct super_block *sb);
void deactivate_super(struct super_block *sb);
@@ -2581,93 +2539,16 @@ extern struct kmem_cache *names_cachep;
#define __getname() kmem_cache_alloc(names_cachep, GFP_KERNEL)
#define __putname(name) kmem_cache_free(names_cachep, (void *)(name))
-#ifdef CONFIG_BLOCK
-extern int register_blkdev(unsigned int, const char *);
-extern void unregister_blkdev(unsigned int, const char *);
-extern struct block_device *bdget(dev_t);
-extern struct block_device *bdgrab(struct block_device *bdev);
-extern void bd_set_size(struct block_device *, loff_t size);
-extern void bd_forget(struct inode *inode);
-extern void bdput(struct block_device *);
-extern void invalidate_bdev(struct block_device *);
-extern void iterate_bdevs(void (*)(struct block_device *, void *), void *);
-extern int sync_blockdev(struct block_device *bdev);
-extern struct super_block *freeze_bdev(struct block_device *);
-extern void emergency_thaw_all(void);
-extern void emergency_thaw_bdev(struct super_block *sb);
-extern int thaw_bdev(struct block_device *bdev, struct super_block *sb);
-extern int fsync_bdev(struct block_device *);
-
extern struct super_block *blockdev_superblock;
-
static inline bool sb_is_blkdev_sb(struct super_block *sb)
{
- return sb == blockdev_superblock;
-}
-#else
-static inline void bd_forget(struct inode *inode) {}
-static inline int sync_blockdev(struct block_device *bdev) { return 0; }
-static inline void invalidate_bdev(struct block_device *bdev) {}
-
-static inline struct super_block *freeze_bdev(struct block_device *sb)
-{
- return NULL;
-}
-
-static inline int thaw_bdev(struct block_device *bdev, struct super_block *sb)
-{
- return 0;
+ return IS_ENABLED(CONFIG_BLOCK) && sb == blockdev_superblock;
}
-static inline int emergency_thaw_bdev(struct super_block *sb)
-{
- return 0;
-}
-
-static inline void iterate_bdevs(void (*f)(struct block_device *, void *), void *arg)
-{
-}
-
-static inline bool sb_is_blkdev_sb(struct super_block *sb)
-{
- return false;
-}
-#endif
+void emergency_thaw_all(void);
extern int sync_filesystem(struct super_block *);
extern const struct file_operations def_blk_fops;
extern const struct file_operations def_chr_fops;
-#ifdef CONFIG_BLOCK
-extern int blkdev_ioctl(struct block_device *, fmode_t, unsigned, unsigned long);
-extern long compat_blkdev_ioctl(struct file *, unsigned, unsigned long);
-extern int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder);
-extern struct block_device *blkdev_get_by_path(const char *path, fmode_t mode,
- void *holder);
-extern struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode,
- void *holder);
-extern struct block_device *bd_start_claiming(struct block_device *bdev,
- void *holder);
-extern void bd_finish_claiming(struct block_device *bdev,
- struct block_device *whole, void *holder);
-extern void bd_abort_claiming(struct block_device *bdev,
- struct block_device *whole, void *holder);
-extern void blkdev_put(struct block_device *bdev, fmode_t mode);
-
-#ifdef CONFIG_SYSFS
-extern int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk);
-extern void bd_unlink_disk_holder(struct block_device *bdev,
- struct gendisk *disk);
-#else
-static inline int bd_link_disk_holder(struct block_device *bdev,
- struct gendisk *disk)
-{
- return 0;
-}
-static inline void bd_unlink_disk_holder(struct block_device *bdev,
- struct gendisk *disk)
-{
-}
-#endif
-#endif
/* fs/char_dev.c */
#define CHRDEV_MAJOR_MAX 512
@@ -2698,31 +2579,12 @@ static inline void unregister_chrdev(unsigned int major, const char *name)
__unregister_chrdev(major, 0, 256, name);
}
-/* fs/block_dev.c */
-#define BDEVNAME_SIZE 32 /* Largest string for a blockdev identifier */
-#define BDEVT_SIZE 10 /* Largest string for MAJ:MIN for blkdev */
-
-#ifdef CONFIG_BLOCK
-#define BLKDEV_MAJOR_MAX 512
-extern const char *bdevname(struct block_device *bdev, char *buffer);
-extern struct block_device *lookup_bdev(const char *);
-extern void blkdev_show(struct seq_file *,off_t);
-
-#else
-#define BLKDEV_MAJOR_MAX 0
-#endif
-
extern void init_special_inode(struct inode *, umode_t, dev_t);
/* Invalid inode operations -- fs/bad_inode.c */
extern void make_bad_inode(struct inode *);
extern bool is_bad_inode(struct inode *);
-#ifdef CONFIG_BLOCK
-extern int revalidate_disk(struct gendisk *);
-extern int check_disk_change(struct block_device *);
-extern int __invalidate_device(struct block_device *, bool);
-#endif
unsigned long invalidate_mapping_pages(struct address_space *mapping,
pgoff_t start, pgoff_t end);
@@ -2798,7 +2660,7 @@ static inline void filemap_set_wb_err(struct address_space *mapping, int err)
}
/**
- * filemap_check_wb_error - has an error occurred since the mark was sampled?
+ * filemap_check_wb_err - has an error occurred since the mark was sampled?
* @mapping: mapping to check for writeback errors
* @since: previously-sampled errseq_t
*
@@ -2827,7 +2689,7 @@ static inline errseq_t filemap_sample_wb_err(struct address_space *mapping)
/**
* file_sample_sb_err - sample the current errseq_t to test for later errors
- * @mapping: mapping to be sampled
+ * @file: file pointer to be sampled
*
* Grab the most current superblock-level errseq_t value for the given
* struct file.
@@ -3098,6 +2960,21 @@ extern void discard_new_inode(struct inode *);
extern unsigned int get_next_ino(void);
extern void evict_inodes(struct super_block *sb);
+/*
+ * Userspace may rely on the the inode number being non-zero. For example, glibc
+ * simply ignores files with zero i_ino in unlink() and other places.
+ *
+ * As an additional complication, if userspace was compiled with
+ * _FILE_OFFSET_BITS=32 on a 64-bit kernel we'll only end up reading out the
+ * lower 32 bits, so we need to check that those aren't zero explicitly. With
+ * _FILE_OFFSET_BITS=64, this may cause some harmless false-negatives, but
+ * better safe than sorry.
+ */
+static inline bool is_zero_ino(ino_t ino)
+{
+ return (u32)ino == 0;
+}
+
extern void __iget(struct inode * inode);
extern void iget_failed(struct inode *);
extern void clear_inode(struct inode *);
@@ -3123,10 +3000,6 @@ static inline void remove_inode_hash(struct inode *inode)
extern void inode_sb_list_add(struct inode *inode);
-#ifdef CONFIG_BLOCK
-extern int bdev_read_only(struct block_device *);
-#endif
-extern int set_blocksize(struct block_device *, int);
extern int sb_set_blocksize(struct super_block *, int);
extern int sb_min_blocksize(struct super_block *, int);
@@ -3439,22 +3312,28 @@ static inline int iocb_flags(struct file *file)
static inline int kiocb_set_rw_flags(struct kiocb *ki, rwf_t flags)
{
+ int kiocb_flags = 0;
+
+ if (!flags)
+ return 0;
if (unlikely(flags & ~RWF_SUPPORTED))
return -EOPNOTSUPP;
if (flags & RWF_NOWAIT) {
if (!(ki->ki_filp->f_mode & FMODE_NOWAIT))
return -EOPNOTSUPP;
- ki->ki_flags |= IOCB_NOWAIT;
+ kiocb_flags |= IOCB_NOWAIT | IOCB_NOIO;
}
if (flags & RWF_HIPRI)
- ki->ki_flags |= IOCB_HIPRI;
+ kiocb_flags |= IOCB_HIPRI;
if (flags & RWF_DSYNC)
- ki->ki_flags |= IOCB_DSYNC;
+ kiocb_flags |= IOCB_DSYNC;
if (flags & RWF_SYNC)
- ki->ki_flags |= (IOCB_DSYNC | IOCB_SYNC);
+ kiocb_flags |= (IOCB_DSYNC | IOCB_SYNC);
if (flags & RWF_APPEND)
- ki->ki_flags |= IOCB_APPEND;
+ kiocb_flags |= IOCB_APPEND;
+
+ ki->ki_flags |= kiocb_flags;
return 0;
}
diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
index cf1015abfbf2..783b48dedb72 100644
--- a/include/linux/fs_struct.h
+++ b/include/linux/fs_struct.h
@@ -9,7 +9,7 @@
struct fs_struct {
int users;
spinlock_t lock;
- seqcount_t seq;
+ seqcount_spinlock_t seq;
int umask;
int in_exec;
struct path root, pwd;
diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h
index 2862ca5fea33..991ff8575d0e 100644
--- a/include/linux/fscrypt.h
+++ b/include/linux/fscrypt.h
@@ -69,12 +69,20 @@ struct fscrypt_operations {
bool (*has_stable_inodes)(struct super_block *sb);
void (*get_ino_and_lblk_bits)(struct super_block *sb,
int *ino_bits_ret, int *lblk_bits_ret);
+ int (*get_num_devices)(struct super_block *sb);
+ void (*get_devices)(struct super_block *sb,
+ struct request_queue **devs);
};
-static inline bool fscrypt_has_encryption_key(const struct inode *inode)
+static inline struct fscrypt_info *fscrypt_get_info(const struct inode *inode)
{
- /* pairs with cmpxchg_release() in fscrypt_get_encryption_info() */
- return READ_ONCE(inode->i_crypt_info) != NULL;
+ /*
+ * Pairs with the cmpxchg_release() in fscrypt_get_encryption_info().
+ * I.e., another task may publish ->i_crypt_info concurrently, executing
+ * a RELEASE barrier. We need to use smp_load_acquire() here to safely
+ * ACQUIRE the memory the other task published.
+ */
+ return smp_load_acquire(&inode->i_crypt_info);
}
/**
@@ -231,9 +239,9 @@ static inline void fscrypt_set_ops(struct super_block *sb,
}
#else /* !CONFIG_FS_ENCRYPTION */
-static inline bool fscrypt_has_encryption_key(const struct inode *inode)
+static inline struct fscrypt_info *fscrypt_get_info(const struct inode *inode)
{
- return false;
+ return NULL;
}
static inline bool fscrypt_needs_contents_encryption(const struct inode *inode)
@@ -537,6 +545,99 @@ static inline void fscrypt_set_ops(struct super_block *sb,
#endif /* !CONFIG_FS_ENCRYPTION */
+/* inline_crypt.c */
+#ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT
+
+bool __fscrypt_inode_uses_inline_crypto(const struct inode *inode);
+
+void fscrypt_set_bio_crypt_ctx(struct bio *bio,
+ const struct inode *inode, u64 first_lblk,
+ gfp_t gfp_mask);
+
+void fscrypt_set_bio_crypt_ctx_bh(struct bio *bio,
+ const struct buffer_head *first_bh,
+ gfp_t gfp_mask);
+
+bool fscrypt_mergeable_bio(struct bio *bio, const struct inode *inode,
+ u64 next_lblk);
+
+bool fscrypt_mergeable_bio_bh(struct bio *bio,
+ const struct buffer_head *next_bh);
+
+#else /* CONFIG_FS_ENCRYPTION_INLINE_CRYPT */
+
+static inline bool __fscrypt_inode_uses_inline_crypto(const struct inode *inode)
+{
+ return false;
+}
+
+static inline void fscrypt_set_bio_crypt_ctx(struct bio *bio,
+ const struct inode *inode,
+ u64 first_lblk, gfp_t gfp_mask) { }
+
+static inline void fscrypt_set_bio_crypt_ctx_bh(
+ struct bio *bio,
+ const struct buffer_head *first_bh,
+ gfp_t gfp_mask) { }
+
+static inline bool fscrypt_mergeable_bio(struct bio *bio,
+ const struct inode *inode,
+ u64 next_lblk)
+{
+ return true;
+}
+
+static inline bool fscrypt_mergeable_bio_bh(struct bio *bio,
+ const struct buffer_head *next_bh)
+{
+ return true;
+}
+#endif /* !CONFIG_FS_ENCRYPTION_INLINE_CRYPT */
+
+/**
+ * fscrypt_inode_uses_inline_crypto() - test whether an inode uses inline
+ * encryption
+ * @inode: an inode. If encrypted, its key must be set up.
+ *
+ * Return: true if the inode requires file contents encryption and if the
+ * encryption should be done in the block layer via blk-crypto rather
+ * than in the filesystem layer.
+ */
+static inline bool fscrypt_inode_uses_inline_crypto(const struct inode *inode)
+{
+ return fscrypt_needs_contents_encryption(inode) &&
+ __fscrypt_inode_uses_inline_crypto(inode);
+}
+
+/**
+ * fscrypt_inode_uses_fs_layer_crypto() - test whether an inode uses fs-layer
+ * encryption
+ * @inode: an inode. If encrypted, its key must be set up.
+ *
+ * Return: true if the inode requires file contents encryption and if the
+ * encryption should be done in the filesystem layer rather than in the
+ * block layer via blk-crypto.
+ */
+static inline bool fscrypt_inode_uses_fs_layer_crypto(const struct inode *inode)
+{
+ return fscrypt_needs_contents_encryption(inode) &&
+ !__fscrypt_inode_uses_inline_crypto(inode);
+}
+
+/**
+ * fscrypt_has_encryption_key() - check whether an inode has had its key set up
+ * @inode: the inode to check
+ *
+ * Return: %true if the inode has had its encryption key set up, else %false.
+ *
+ * Usually this should be preceded by fscrypt_get_encryption_info() to try to
+ * set up the key first.
+ */
+static inline bool fscrypt_has_encryption_key(const struct inode *inode)
+{
+ return fscrypt_get_info(inode) != NULL;
+}
+
/**
* fscrypt_require_key() - require an inode's encryption key
* @inode: the inode we need the key for
diff --git a/include/linux/fsl/enetc_mdio.h b/include/linux/fsl/enetc_mdio.h
index 4875dd38af7e..2d9203314865 100644
--- a/include/linux/fsl/enetc_mdio.h
+++ b/include/linux/fsl/enetc_mdio.h
@@ -15,6 +15,7 @@
#define ENETC_PCS_IF_MODE_SGMII_EN BIT(0)
#define ENETC_PCS_IF_MODE_USE_SGMII_AN BIT(1)
#define ENETC_PCS_IF_MODE_SGMII_SPEED(x) (((x) << 2) & GENMASK(3, 2))
+#define ENETC_PCS_IF_MODE_DUPLEX_HALF BIT(3)
/* Not a mistake, the SerDes PLL needs to be set at 3.125 GHz by Reset
* Configuration Word (RCW, outside Linux control) for 2.5G SGMII mode. The PCS
diff --git a/include/linux/fsl/mc.h b/include/linux/fsl/mc.h
index 2b5f8366dbe1..a428c61ead6e 100644
--- a/include/linux/fsl/mc.h
+++ b/include/linux/fsl/mc.h
@@ -339,7 +339,7 @@ struct fsl_mc_io {
* This field is only meaningful if the
* FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag is set
*/
- spinlock_t spinlock; /* serializes mc_send_command() */
+ raw_spinlock_t spinlock; /* serializes mc_send_command() */
};
};
@@ -433,6 +433,11 @@ extern struct device_type fsl_mc_bus_dpmcp_type;
extern struct device_type fsl_mc_bus_dpmac_type;
extern struct device_type fsl_mc_bus_dprtc_type;
extern struct device_type fsl_mc_bus_dpseci_type;
+extern struct device_type fsl_mc_bus_dpdmux_type;
+extern struct device_type fsl_mc_bus_dpdcei_type;
+extern struct device_type fsl_mc_bus_dpaiop_type;
+extern struct device_type fsl_mc_bus_dpci_type;
+extern struct device_type fsl_mc_bus_dpdmai_type;
static inline bool is_fsl_mc_bus_dprc(const struct fsl_mc_device *mc_dev)
{
@@ -454,6 +459,11 @@ static inline bool is_fsl_mc_bus_dpsw(const struct fsl_mc_device *mc_dev)
return mc_dev->dev.type == &fsl_mc_bus_dpsw_type;
}
+static inline bool is_fsl_mc_bus_dpdmux(const struct fsl_mc_device *mc_dev)
+{
+ return mc_dev->dev.type == &fsl_mc_bus_dpdmux_type;
+}
+
static inline bool is_fsl_mc_bus_dpbp(const struct fsl_mc_device *mc_dev)
{
return mc_dev->dev.type == &fsl_mc_bus_dpbp_type;
@@ -484,6 +494,26 @@ static inline bool is_fsl_mc_bus_dpseci(const struct fsl_mc_device *mc_dev)
return mc_dev->dev.type == &fsl_mc_bus_dpseci_type;
}
+static inline bool is_fsl_mc_bus_dpdcei(const struct fsl_mc_device *mc_dev)
+{
+ return mc_dev->dev.type == &fsl_mc_bus_dpdcei_type;
+}
+
+static inline bool is_fsl_mc_bus_dpaiop(const struct fsl_mc_device *mc_dev)
+{
+ return mc_dev->dev.type == &fsl_mc_bus_dpaiop_type;
+}
+
+static inline bool is_fsl_mc_bus_dpci(const struct fsl_mc_device *mc_dev)
+{
+ return mc_dev->dev.type == &fsl_mc_bus_dpci_type;
+}
+
+static inline bool is_fsl_mc_bus_dpdmai(const struct fsl_mc_device *mc_dev)
+{
+ return mc_dev->dev.type == &fsl_mc_bus_dpdmai_type;
+}
+
/*
* Data Path Buffer Pool (DPBP) API
* Contains initialization APIs and runtime control APIs for DPBP
diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
index 5ab28f6c7d26..f8acddcf54fb 100644
--- a/include/linux/fsnotify.h
+++ b/include/linux/fsnotify.h
@@ -23,19 +23,14 @@
* have changed (i.e. renamed over).
*
* Unlike fsnotify_parent(), the event will be reported regardless of the
- * FS_EVENT_ON_CHILD mask on the parent inode.
+ * FS_EVENT_ON_CHILD mask on the parent inode and will not be reported if only
+ * the child is interested and not the parent.
*/
static inline void fsnotify_name(struct inode *dir, __u32 mask,
struct inode *child,
const struct qstr *name, u32 cookie)
{
- fsnotify(dir, mask, child, FSNOTIFY_EVENT_INODE, name, cookie);
- /*
- * Send another flavor of the event without child inode data and
- * without the specific event type (e.g. FS_CREATE|FS_IS_DIR).
- * The name is relative to the dir inode the event is reported to.
- */
- fsnotify(dir, FS_DIR_MODIFY, dir, FSNOTIFY_EVENT_INODE, name, 0);
+ fsnotify(mask, child, FSNOTIFY_EVENT_INODE, dir, name, NULL, cookie);
}
static inline void fsnotify_dirent(struct inode *dir, struct dentry *dentry,
@@ -44,38 +39,55 @@ static inline void fsnotify_dirent(struct inode *dir, struct dentry *dentry,
fsnotify_name(dir, mask, d_inode(dentry), &dentry->d_name, 0);
}
-/*
- * Simple wrappers to consolidate calls fsnotify_parent()/fsnotify() when
- * an event is on a file/dentry.
- */
-static inline void fsnotify_dentry(struct dentry *dentry, __u32 mask)
+static inline void fsnotify_inode(struct inode *inode, __u32 mask)
+{
+ if (S_ISDIR(inode->i_mode))
+ mask |= FS_ISDIR;
+
+ fsnotify(mask, inode, FSNOTIFY_EVENT_INODE, NULL, NULL, inode, 0);
+}
+
+/* Notify this dentry's parent about a child's events. */
+static inline int fsnotify_parent(struct dentry *dentry, __u32 mask,
+ const void *data, int data_type)
{
struct inode *inode = d_inode(dentry);
- if (S_ISDIR(inode->i_mode))
+ if (S_ISDIR(inode->i_mode)) {
mask |= FS_ISDIR;
- fsnotify_parent(dentry, mask, inode, FSNOTIFY_EVENT_INODE);
- fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
+ /* sb/mount marks are not interested in name of directory */
+ if (!(dentry->d_flags & DCACHE_FSNOTIFY_PARENT_WATCHED))
+ goto notify_child;
+ }
+
+ /* disconnected dentry cannot notify parent */
+ if (IS_ROOT(dentry))
+ goto notify_child;
+
+ return __fsnotify_parent(dentry, mask, data, data_type);
+
+notify_child:
+ return fsnotify(mask, data, data_type, NULL, NULL, inode, 0);
+}
+
+/*
+ * Simple wrappers to consolidate calls to fsnotify_parent() when an event
+ * is on a file/dentry.
+ */
+static inline void fsnotify_dentry(struct dentry *dentry, __u32 mask)
+{
+ fsnotify_parent(dentry, mask, d_inode(dentry), FSNOTIFY_EVENT_INODE);
}
static inline int fsnotify_file(struct file *file, __u32 mask)
{
const struct path *path = &file->f_path;
- struct inode *inode = file_inode(file);
- int ret;
if (file->f_mode & FMODE_NONOTIFY)
return 0;
- if (S_ISDIR(inode->i_mode))
- mask |= FS_ISDIR;
-
- ret = fsnotify_parent(path->dentry, mask, path, FSNOTIFY_EVENT_PATH);
- if (ret)
- return ret;
-
- return fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0);
+ return fsnotify_parent(path->dentry, mask, path, FSNOTIFY_EVENT_PATH);
}
/* Simple call site for access decisions */
@@ -108,12 +120,7 @@ static inline int fsnotify_perm(struct file *file, int mask)
*/
static inline void fsnotify_link_count(struct inode *inode)
{
- __u32 mask = FS_ATTRIB;
-
- if (S_ISDIR(inode->i_mode))
- mask |= FS_ISDIR;
-
- fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
+ fsnotify_inode(inode, FS_ATTRIB);
}
/*
@@ -128,7 +135,6 @@ static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir,
u32 fs_cookie = fsnotify_get_cookie();
__u32 old_dir_mask = FS_MOVED_FROM;
__u32 new_dir_mask = FS_MOVED_TO;
- __u32 mask = FS_MOVE_SELF;
const struct qstr *new_name = &moved->d_name;
if (old_dir == new_dir)
@@ -137,7 +143,6 @@ static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir,
if (isdir) {
old_dir_mask |= FS_ISDIR;
new_dir_mask |= FS_ISDIR;
- mask |= FS_ISDIR;
}
fsnotify_name(old_dir, old_dir_mask, source, old_name, fs_cookie);
@@ -145,9 +150,7 @@ static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir,
if (target)
fsnotify_link_count(target);
-
- if (source)
- fsnotify(source, mask, source, FSNOTIFY_EVENT_INODE, NULL, 0);
+ fsnotify_inode(source, FS_MOVE_SELF);
audit_inode_child(new_dir, moved, AUDIT_TYPE_CHILD_CREATE);
}
@@ -172,12 +175,7 @@ static inline void fsnotify_vfsmount_delete(struct vfsmount *mnt)
*/
static inline void fsnotify_inoderemove(struct inode *inode)
{
- __u32 mask = FS_DELETE_SELF;
-
- if (S_ISDIR(inode->i_mode))
- mask |= FS_ISDIR;
-
- fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
+ fsnotify_inode(inode, FS_DELETE_SELF);
__fsnotify_inode_delete(inode);
}
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index f0c506405b54..f8529a3a2923 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -47,11 +47,13 @@
#define FS_OPEN_PERM 0x00010000 /* open event in an permission hook */
#define FS_ACCESS_PERM 0x00020000 /* access event in a permissions hook */
#define FS_OPEN_EXEC_PERM 0x00040000 /* open/exec event in a permission hook */
-#define FS_DIR_MODIFY 0x00080000 /* Directory entry was modified */
#define FS_EXCL_UNLINK 0x04000000 /* do not send events if object is unlinked */
-/* This inode cares about things that happen to its children. Always set for
- * dnotify and inotify. */
+/*
+ * Set on inode mark that cares about things that happen to its children.
+ * Always set for dnotify and inotify.
+ * Set on inode/sb/mount marks that care about parent/name info.
+ */
#define FS_EVENT_ON_CHILD 0x08000000
#define FS_DN_RENAME 0x10000000 /* file renamed */
@@ -67,21 +69,28 @@
* The watching parent may get an FS_ATTRIB|FS_EVENT_ON_CHILD event
* when a directory entry inside a child subdir changes.
*/
-#define ALL_FSNOTIFY_DIRENT_EVENTS (FS_CREATE | FS_DELETE | FS_MOVE | \
- FS_DIR_MODIFY)
+#define ALL_FSNOTIFY_DIRENT_EVENTS (FS_CREATE | FS_DELETE | FS_MOVE)
#define ALL_FSNOTIFY_PERM_EVENTS (FS_OPEN_PERM | FS_ACCESS_PERM | \
FS_OPEN_EXEC_PERM)
/*
- * This is a list of all events that may get sent to a parent based on fs event
- * happening to inodes inside that directory.
+ * This is a list of all events that may get sent to a parent that is watching
+ * with flag FS_EVENT_ON_CHILD based on fs event on a child of that directory.
*/
#define FS_EVENTS_POSS_ON_CHILD (ALL_FSNOTIFY_PERM_EVENTS | \
FS_ACCESS | FS_MODIFY | FS_ATTRIB | \
FS_CLOSE_WRITE | FS_CLOSE_NOWRITE | \
FS_OPEN | FS_OPEN_EXEC)
+/*
+ * This is a list of all events that may get sent with the parent inode as the
+ * @to_tell argument of fsnotify().
+ * It may include events that can be sent to an inode/sb/mount mark, but cannot
+ * be sent to a parent watching children.
+ */
+#define FS_EVENTS_POSS_TO_PARENT (FS_EVENTS_POSS_ON_CHILD)
+
/* Events that can be reported to backends */
#define ALL_FSNOTIFY_EVENTS (ALL_FSNOTIFY_DIRENT_EVENTS | \
FS_EVENTS_POSS_ON_CHILD | \
@@ -108,18 +117,41 @@ struct mem_cgroup;
* these operations for each relevant group.
*
* handle_event - main call for a group to handle an fs event
+ * @group: group to notify
+ * @mask: event type and flags
+ * @data: object that event happened on
+ * @data_type: type of object for fanotify_data_XXX() accessors
+ * @dir: optional directory associated with event -
+ * if @file_name is not NULL, this is the directory that
+ * @file_name is relative to
+ * @file_name: optional file name associated with event
+ * @cookie: inotify rename cookie
+ * @iter_info: array of marks from this group that are interested in the event
+ *
+ * handle_inode_event - simple variant of handle_event() for groups that only
+ * have inode marks and don't have ignore mask
+ * @mark: mark to notify
+ * @mask: event type and flags
+ * @inode: inode that event happened on
+ * @dir: optional directory associated with event -
+ * if @file_name is not NULL, this is the directory that
+ * @file_name is relative to.
+ * @file_name: optional file name associated with event
+ *
* free_group_priv - called when a group refcnt hits 0 to clean up the private union
* freeing_mark - called when a mark is being destroyed for some reason. The group
- * MUST be holding a reference on each mark and that reference must be
- * dropped in this function. inotify uses this function to send
- * userspace messages that marks have been removed.
+ * MUST be holding a reference on each mark and that reference must be
+ * dropped in this function. inotify uses this function to send
+ * userspace messages that marks have been removed.
*/
struct fsnotify_ops {
- int (*handle_event)(struct fsnotify_group *group,
- struct inode *inode,
- u32 mask, const void *data, int data_type,
+ int (*handle_event)(struct fsnotify_group *group, u32 mask,
+ const void *data, int data_type, struct inode *dir,
const struct qstr *file_name, u32 cookie,
struct fsnotify_iter_info *iter_info);
+ int (*handle_inode_event)(struct fsnotify_mark *mark, u32 mask,
+ struct inode *inode, struct inode *dir,
+ const struct qstr *file_name);
void (*free_group_priv)(struct fsnotify_group *group);
void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group);
void (*free_event)(struct fsnotify_event *event);
@@ -220,12 +252,11 @@ enum fsnotify_data_type {
FSNOTIFY_EVENT_INODE,
};
-static inline const struct inode *fsnotify_data_inode(const void *data,
- int data_type)
+static inline struct inode *fsnotify_data_inode(const void *data, int data_type)
{
switch (data_type) {
case FSNOTIFY_EVENT_INODE:
- return data;
+ return (struct inode *)data;
case FSNOTIFY_EVENT_PATH:
return d_inode(((const struct path *)data)->dentry);
default:
@@ -246,6 +277,7 @@ static inline const struct path *fsnotify_data_path(const void *data,
enum fsnotify_obj_type {
FSNOTIFY_OBJ_TYPE_INODE,
+ FSNOTIFY_OBJ_TYPE_CHILD,
FSNOTIFY_OBJ_TYPE_VFSMOUNT,
FSNOTIFY_OBJ_TYPE_SB,
FSNOTIFY_OBJ_TYPE_COUNT,
@@ -253,6 +285,7 @@ enum fsnotify_obj_type {
};
#define FSNOTIFY_OBJ_TYPE_INODE_FL (1U << FSNOTIFY_OBJ_TYPE_INODE)
+#define FSNOTIFY_OBJ_TYPE_CHILD_FL (1U << FSNOTIFY_OBJ_TYPE_CHILD)
#define FSNOTIFY_OBJ_TYPE_VFSMOUNT_FL (1U << FSNOTIFY_OBJ_TYPE_VFSMOUNT)
#define FSNOTIFY_OBJ_TYPE_SB_FL (1U << FSNOTIFY_OBJ_TYPE_SB)
#define FSNOTIFY_OBJ_ALL_TYPES_MASK ((1U << FSNOTIFY_OBJ_TYPE_COUNT) - 1)
@@ -297,6 +330,7 @@ static inline struct fsnotify_mark *fsnotify_iter_##name##_mark( \
}
FSNOTIFY_ITER_FUNCS(inode, INODE)
+FSNOTIFY_ITER_FUNCS(child, CHILD)
FSNOTIFY_ITER_FUNCS(vfsmount, VFSMOUNT)
FSNOTIFY_ITER_FUNCS(sb, SB)
@@ -377,15 +411,29 @@ struct fsnotify_mark {
/* called from the vfs helpers */
/* main fsnotify call to send events */
-extern int fsnotify(struct inode *to_tell, __u32 mask, const void *data,
- int data_type, const struct qstr *name, u32 cookie);
-extern int fsnotify_parent(struct dentry *dentry, __u32 mask, const void *data,
+extern int fsnotify(__u32 mask, const void *data, int data_type,
+ struct inode *dir, const struct qstr *name,
+ struct inode *inode, u32 cookie);
+extern int __fsnotify_parent(struct dentry *dentry, __u32 mask, const void *data,
int data_type);
extern void __fsnotify_inode_delete(struct inode *inode);
extern void __fsnotify_vfsmount_delete(struct vfsmount *mnt);
extern void fsnotify_sb_delete(struct super_block *sb);
extern u32 fsnotify_get_cookie(void);
+static inline __u32 fsnotify_parent_needed_mask(__u32 mask)
+{
+ /* FS_EVENT_ON_CHILD is set on marks that want parent/name info */
+ if (!(mask & FS_EVENT_ON_CHILD))
+ return 0;
+ /*
+ * This object might be watched by a mark that cares about parent/name
+ * info, does it care about the specific set of events that can be
+ * reported with parent/name info?
+ */
+ return mask & FS_EVENTS_POSS_TO_PARENT;
+}
+
static inline int fsnotify_inode_watches_children(struct inode *inode)
{
/* FS_EVENT_ON_CHILD is set if the inode may care */
@@ -535,13 +583,14 @@ static inline void fsnotify_init_event(struct fsnotify_event *event,
#else
-static inline int fsnotify(struct inode *to_tell, __u32 mask, const void *data,
- int data_type, const struct qstr *name, u32 cookie)
+static inline int fsnotify(__u32 mask, const void *data, int data_type,
+ struct inode *dir, const struct qstr *name,
+ struct inode *inode, u32 cookie)
{
return 0;
}
-static inline int fsnotify_parent(struct dentry *dentry, __u32 mask,
+static inline int __fsnotify_parent(struct dentry *dentry, __u32 mask,
const void *data, int data_type)
{
return 0;
diff --git a/include/linux/fsverity.h b/include/linux/fsverity.h
index 78201a6d35f6..c1144a450392 100644
--- a/include/linux/fsverity.h
+++ b/include/linux/fsverity.h
@@ -115,8 +115,13 @@ struct fsverity_operations {
static inline struct fsverity_info *fsverity_get_info(const struct inode *inode)
{
- /* pairs with the cmpxchg() in fsverity_set_info() */
- return READ_ONCE(inode->i_verity_info);
+ /*
+ * Pairs with the cmpxchg_release() in fsverity_set_info().
+ * I.e., another task may publish ->i_verity_info concurrently,
+ * executing a RELEASE barrier. We need to use smp_load_acquire() here
+ * to safely ACQUIRE the memory the other task published.
+ */
+ return smp_load_acquire(&inode->i_verity_info);
}
/* enable.c */
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index e339dac91ee6..ce2c06f72e86 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -58,9 +58,6 @@ struct ftrace_direct_func;
const char *
ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
unsigned long *off, char **modname, char *sym);
-int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
- char *type, char *name,
- char *module_name, int *exported);
#else
static inline const char *
ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
@@ -68,6 +65,13 @@ ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
{
return NULL;
}
+#endif
+
+#if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE)
+int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
+ char *type, char *name,
+ char *module_name, int *exported);
+#else
static inline int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
char *type, char *name,
char *module_name, int *exported)
@@ -76,7 +80,6 @@ static inline int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *val
}
#endif
-
#ifdef CONFIG_FUNCTION_TRACER
extern int ftrace_enabled;
@@ -207,6 +210,7 @@ struct ftrace_ops {
struct ftrace_ops_hash old_hash;
unsigned long trampoline;
unsigned long trampoline_size;
+ struct list_head list;
#endif
};
diff --git a/include/linux/generic-radix-tree.h b/include/linux/generic-radix-tree.h
index 02393c0c98f9..bfd00320c7f3 100644
--- a/include/linux/generic-radix-tree.h
+++ b/include/linux/generic-radix-tree.h
@@ -44,7 +44,7 @@
struct genradix_root;
struct __genradix {
- struct genradix_root __rcu *root;
+ struct genradix_root *root;
};
/*
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 392aad5e29a2..4ab853461dff 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -19,13 +19,12 @@
#include <linux/blk_types.h>
#include <asm/local.h>
-#ifdef CONFIG_BLOCK
-
#define dev_to_disk(device) container_of((device), struct gendisk, part0.__dev)
#define dev_to_part(device) container_of((device), struct hd_struct, __dev)
#define disk_to_dev(disk) (&(disk)->part0.__dev)
#define part_to_dev(part) (&((part)->__dev))
+extern const struct device_type disk_type;
extern struct device_type part_type;
extern struct class block_class;
@@ -337,12 +336,9 @@ static inline void set_capacity(struct gendisk *disk, sector_t size)
disk->part0.nr_sects = size;
}
-extern dev_t blk_lookup_devt(const char *name, int partno);
-
int bdev_disk_changed(struct block_device *bdev, bool invalidate);
int blk_add_partitions(struct gendisk *disk, struct block_device *bdev);
int blk_drop_partitions(struct block_device *bdev);
-extern void printk_all_partitions(void);
extern struct gendisk *__alloc_disk_node(int minors, int node_id);
extern struct kobject *get_disk_and_module(struct gendisk *disk);
@@ -373,10 +369,40 @@ extern void blk_unregister_region(dev_t devt, unsigned long range);
#define alloc_disk(minors) alloc_disk_node(minors, NUMA_NO_NODE)
-#else /* CONFIG_BLOCK */
+int register_blkdev(unsigned int major, const char *name);
+void unregister_blkdev(unsigned int major, const char *name);
-static inline void printk_all_partitions(void) { }
+int revalidate_disk(struct gendisk *disk);
+int check_disk_change(struct block_device *bdev);
+int __invalidate_device(struct block_device *bdev, bool kill_dirty);
+void bd_set_size(struct block_device *bdev, loff_t size);
+/* for drivers/char/raw.c: */
+int blkdev_ioctl(struct block_device *, fmode_t, unsigned, unsigned long);
+long compat_blkdev_ioctl(struct file *, unsigned, unsigned long);
+
+#ifdef CONFIG_SYSFS
+int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk);
+void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk);
+#else
+static inline int bd_link_disk_holder(struct block_device *bdev,
+ struct gendisk *disk)
+{
+ return 0;
+}
+static inline void bd_unlink_disk_holder(struct block_device *bdev,
+ struct gendisk *disk)
+{
+}
+#endif /* CONFIG_SYSFS */
+
+#ifdef CONFIG_BLOCK
+void printk_all_partitions(void);
+dev_t blk_lookup_devt(const char *name, int partno);
+#else /* CONFIG_BLOCK */
+static inline void printk_all_partitions(void)
+{
+}
static inline dev_t blk_lookup_devt(const char *name, int partno)
{
dev_t devt = MKDEV(0, 0);
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index c4f272af7af5..d1cef5c2715c 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -474,6 +474,22 @@ struct gpio_chip {
extern const char *gpiochip_is_requested(struct gpio_chip *gc,
unsigned int offset);
+/**
+ * for_each_requested_gpio_in_range - iterates over requested GPIOs in a given range
+ * @chip: the chip to query
+ * @i: loop variable
+ * @base: first GPIO in the range
+ * @size: amount of GPIOs to check starting from @base
+ * @label: label of current GPIO
+ */
+#define for_each_requested_gpio_in_range(chip, i, base, size, label) \
+ for (i = 0; i < size; i++) \
+ if ((label = gpiochip_is_requested(chip, base + i)) == NULL) {} else
+
+/* Iterates over all requested GPIO of the given @chip */
+#define for_each_requested_gpio(chip, i, label) \
+ for_each_requested_gpio_in_range(chip, i, 0, chip->ngpio, label)
+
/* add/remove chips */
extern int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
struct lock_class_key *lock_key,
@@ -481,25 +497,25 @@ extern int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
/**
* gpiochip_add_data() - register a gpio_chip
- * @chip: the chip to register, with chip->base initialized
+ * @gc: the chip to register, with gc->base initialized
* @data: driver-private data associated with this chip
*
* Context: potentially before irqs will work
*
* When gpiochip_add_data() is called very early during boot, so that GPIOs
- * can be freely used, the chip->parent device must be registered before
+ * can be freely used, the gc->parent device must be registered before
* the gpio framework's arch_initcall(). Otherwise sysfs initialization
* for GPIOs will fail rudely.
*
* gpiochip_add_data() must only be called after gpiolib initialization,
* ie after core_initcall().
*
- * If chip->base is negative, this requests dynamic assignment of
+ * If gc->base is negative, this requests dynamic assignment of
* a range of valid GPIOs.
*
* Returns:
* A negative errno if the chip can't be registered, such as because the
- * chip->base is invalid or already associated with a different chip.
+ * gc->base is invalid or already associated with a different chip.
* Otherwise it returns zero as a success code.
*/
#ifdef CONFIG_LOCKDEP
@@ -509,8 +525,16 @@ extern int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
gpiochip_add_data_with_key(gc, data, &lock_key, \
&request_key); \
})
+#define devm_gpiochip_add_data(dev, gc, data) ({ \
+ static struct lock_class_key lock_key; \
+ static struct lock_class_key request_key; \
+ devm_gpiochip_add_data_with_key(dev, gc, data, &lock_key, \
+ &request_key); \
+ })
#else
#define gpiochip_add_data(gc, data) gpiochip_add_data_with_key(gc, data, NULL, NULL)
+#define devm_gpiochip_add_data(dev, gc, data) \
+ devm_gpiochip_add_data_with_key(dev, gc, data, NULL, NULL)
#endif /* CONFIG_LOCKDEP */
static inline int gpiochip_add(struct gpio_chip *gc)
@@ -518,8 +542,9 @@ static inline int gpiochip_add(struct gpio_chip *gc)
return gpiochip_add_data(gc, NULL);
}
extern void gpiochip_remove(struct gpio_chip *gc);
-extern int devm_gpiochip_add_data(struct device *dev, struct gpio_chip *gc,
- void *data);
+extern int devm_gpiochip_add_data_with_key(struct device *dev, struct gpio_chip *gc, void *data,
+ struct lock_class_key *lock_key,
+ struct lock_class_key *request_key);
extern struct gpio_chip *gpiochip_find(void *data,
int (*match)(struct gpio_chip *gc, void *data));
diff --git a/include/linux/gpio/regmap.h b/include/linux/gpio/regmap.h
index 4c1e6b34e824..ad76f3d0a6ba 100644
--- a/include/linux/gpio/regmap.h
+++ b/include/linux/gpio/regmap.h
@@ -8,7 +8,7 @@ struct gpio_regmap;
struct irq_domain;
struct regmap;
-#define GPIO_REGMAP_ADDR_ZERO ((unsigned long)(-1))
+#define GPIO_REGMAP_ADDR_ZERO ((unsigned int)(-1))
#define GPIO_REGMAP_ADDR(addr) ((addr) ? : GPIO_REGMAP_ADDR_ZERO)
/**
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index 03c9fece7d43..754f67ac4326 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -111,32 +111,42 @@ extern void rcu_nmi_exit(void);
/*
* nmi_enter() can nest up to 15 times; see NMI_BITS.
*/
-#define nmi_enter() \
+#define __nmi_enter() \
do { \
+ lockdep_off(); \
arch_nmi_enter(); \
printk_nmi_enter(); \
- lockdep_off(); \
BUG_ON(in_nmi() == NMI_MASK); \
__preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET); \
- rcu_nmi_enter(); \
+ } while (0)
+
+#define nmi_enter() \
+ do { \
+ __nmi_enter(); \
lockdep_hardirq_enter(); \
+ rcu_nmi_enter(); \
instrumentation_begin(); \
ftrace_nmi_enter(); \
instrumentation_end(); \
} while (0)
+#define __nmi_exit() \
+ do { \
+ BUG_ON(!in_nmi()); \
+ __preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET); \
+ printk_nmi_exit(); \
+ arch_nmi_exit(); \
+ lockdep_on(); \
+ } while (0)
+
#define nmi_exit() \
do { \
instrumentation_begin(); \
ftrace_nmi_exit(); \
instrumentation_end(); \
- lockdep_hardirq_exit(); \
rcu_nmi_exit(); \
- BUG_ON(!in_nmi()); \
- __preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET); \
- lockdep_on(); \
- printk_nmi_exit(); \
- arch_nmi_exit(); \
+ lockdep_hardirq_exit(); \
+ __nmi_exit(); \
} while (0)
#endif /* LINUX_HARDIRQ_H */
diff --git a/include/linux/hashtable.h b/include/linux/hashtable.h
index 78b6ea5fa8ba..f6c666730b8c 100644
--- a/include/linux/hashtable.h
+++ b/include/linux/hashtable.h
@@ -173,9 +173,9 @@ static inline void hash_del_rcu(struct hlist_node *node)
* @member: the name of the hlist_node within the struct
* @key: the key of the objects to iterate over
*/
-#define hash_for_each_possible_rcu(name, obj, member, key) \
+#define hash_for_each_possible_rcu(name, obj, member, key, cond...) \
hlist_for_each_entry_rcu(obj, &name[hash_min(key, HASH_BITS(name))],\
- member)
+ member, ## cond)
/**
* hash_for_each_possible_rcu_notrace - iterate over all possible objects hashing
diff --git a/include/linux/hdmi.h b/include/linux/hdmi.h
index 50c31f1a0a2d..9850d59d6f1c 100644
--- a/include/linux/hdmi.h
+++ b/include/linux/hdmi.h
@@ -57,6 +57,7 @@ enum hdmi_infoframe_type {
#define HDMI_SPD_INFOFRAME_SIZE 25
#define HDMI_AUDIO_INFOFRAME_SIZE 10
#define HDMI_DRM_INFOFRAME_SIZE 26
+#define HDMI_VENDOR_INFOFRAME_SIZE 4
#define HDMI_INFOFRAME_SIZE(type) \
(HDMI_INFOFRAME_HEADER_SIZE + HDMI_ ## type ## _INFOFRAME_SIZE)
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index d6e82e3de027..14e6202ce47f 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -73,7 +73,7 @@ static inline void kunmap(struct page *page)
* no global lock is needed and because the kmap code must perform a global TLB
* invalidation when the kmap pool wraps.
*
- * However when holding an atomic kmap is is not legal to sleep, so atomic
+ * However when holding an atomic kmap it is not legal to sleep, so atomic
* kmaps are appropriate for short, tight code paths only.
*
* The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
diff --git a/include/linux/hmm.h b/include/linux/hmm.h
index f4a09ed223ac..866a0fa104c4 100644
--- a/include/linux/hmm.h
+++ b/include/linux/hmm.h
@@ -37,16 +37,17 @@
* will fail. Must be combined with HMM_PFN_REQ_FAULT.
*/
enum hmm_pfn_flags {
- /* Output flags */
+ /* Output fields and flags */
HMM_PFN_VALID = 1UL << (BITS_PER_LONG - 1),
HMM_PFN_WRITE = 1UL << (BITS_PER_LONG - 2),
HMM_PFN_ERROR = 1UL << (BITS_PER_LONG - 3),
+ HMM_PFN_ORDER_SHIFT = (BITS_PER_LONG - 8),
/* Input flags */
HMM_PFN_REQ_FAULT = HMM_PFN_VALID,
HMM_PFN_REQ_WRITE = HMM_PFN_WRITE,
- HMM_PFN_FLAGS = HMM_PFN_VALID | HMM_PFN_WRITE | HMM_PFN_ERROR,
+ HMM_PFN_FLAGS = 0xFFUL << HMM_PFN_ORDER_SHIFT,
};
/*
@@ -62,6 +63,25 @@ static inline struct page *hmm_pfn_to_page(unsigned long hmm_pfn)
}
/*
+ * hmm_pfn_to_map_order() - return the CPU mapping size order
+ *
+ * This is optionally useful to optimize processing of the pfn result
+ * array. It indicates that the page starts at the order aligned VA and is
+ * 1<<order bytes long. Every pfn within an high order page will have the
+ * same pfn flags, both access protections and the map_order. The caller must
+ * be careful with edge cases as the start and end VA of the given page may
+ * extend past the range used with hmm_range_fault().
+ *
+ * This must be called under the caller 'user_lock' after a successful
+ * mmu_interval_read_begin(). The caller must have tested for HMM_PFN_VALID
+ * already.
+ */
+static inline unsigned int hmm_pfn_to_map_order(unsigned long hmm_pfn)
+{
+ return (hmm_pfn >> HMM_PFN_ORDER_SHIFT) & 0x1F;
+}
+
+/*
* struct hmm_range - track invalidation lock on virtual address range
*
* @notifier: a mmu_interval_notifier that includes the start/end
diff --git a/include/linux/host1x.h b/include/linux/host1x.h
index a3a568bf9686..20c885d0bddc 100644
--- a/include/linux/host1x.h
+++ b/include/linux/host1x.h
@@ -328,10 +328,12 @@ int host1x_client_resume(struct host1x_client *client);
struct tegra_mipi_device;
-struct tegra_mipi_device *tegra_mipi_request(struct device *device);
+struct tegra_mipi_device *tegra_mipi_request(struct device *device,
+ struct device_node *np);
void tegra_mipi_free(struct tegra_mipi_device *device);
int tegra_mipi_enable(struct tegra_mipi_device *device);
int tegra_mipi_disable(struct tegra_mipi_device *device);
int tegra_mipi_calibrate(struct tegra_mipi_device *device);
+int tegra_mipi_wait(struct tegra_mipi_device *device);
#endif
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 15c8ac313678..107cedd7019a 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -17,6 +17,7 @@
#include <linux/init.h>
#include <linux/list.h>
#include <linux/percpu.h>
+#include <linux/seqlock.h>
#include <linux/timer.h>
#include <linux/timerqueue.h>
@@ -159,7 +160,7 @@ struct hrtimer_clock_base {
struct hrtimer_cpu_base *cpu_base;
unsigned int index;
clockid_t clockid;
- seqcount_t seq;
+ seqcount_raw_spinlock_t seq;
struct hrtimer *running;
struct timerqueue_head active;
ktime_t (*get_time)(void);
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 71f20776b06c..8a8bc46a2432 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -42,7 +42,7 @@ extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, unsigned long end,
unsigned char *vec);
extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
- unsigned long new_addr, unsigned long old_end,
+ unsigned long new_addr,
pmd_t *old_pmd, pmd_t *new_pmd);
extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, pgprot_t newprot,
@@ -181,13 +181,6 @@ static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
#define transparent_hugepage_use_zero_page() \
(transparent_hugepage_flags & \
(1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
-#ifdef CONFIG_DEBUG_VM
-#define transparent_hugepage_debug_cow() \
- (transparent_hugepage_flags & \
- (1<<TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG))
-#else /* CONFIG_DEBUG_VM */
-#define transparent_hugepage_debug_cow() 0
-#endif /* CONFIG_DEBUG_VM */
extern unsigned long thp_get_unmapped_area(struct file *filp,
unsigned long addr, unsigned long len, unsigned long pgoff,
@@ -265,9 +258,36 @@ static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
else
return NULL;
}
-static inline int hpage_nr_pages(struct page *page)
+
+/**
+ * thp_head - Head page of a transparent huge page.
+ * @page: Any page (tail, head or regular) found in the page cache.
+ */
+static inline struct page *thp_head(struct page *page)
{
- if (unlikely(PageTransHuge(page)))
+ return compound_head(page);
+}
+
+/**
+ * thp_order - Order of a transparent huge page.
+ * @page: Head page of a transparent huge page.
+ */
+static inline unsigned int thp_order(struct page *page)
+{
+ VM_BUG_ON_PGFLAGS(PageTail(page), page);
+ if (PageHead(page))
+ return HPAGE_PMD_ORDER;
+ return 0;
+}
+
+/**
+ * thp_nr_pages - The number of regular pages in this huge page.
+ * @page: The head page of a huge page.
+ */
+static inline int thp_nr_pages(struct page *page)
+{
+ VM_BUG_ON_PGFLAGS(PageTail(page), page);
+ if (PageHead(page))
return HPAGE_PMD_NR;
return 1;
}
@@ -324,9 +344,21 @@ static inline struct list_head *page_deferred_list(struct page *page)
#define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; })
#define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; })
-static inline int hpage_nr_pages(struct page *page)
+static inline struct page *thp_head(struct page *page)
+{
+ VM_BUG_ON_PGFLAGS(PageTail(page), page);
+ return page;
+}
+
+static inline unsigned int thp_order(struct page *page)
+{
+ VM_BUG_ON_PGFLAGS(PageTail(page), page);
+ return 0;
+}
+
+static inline int thp_nr_pages(struct page *page)
{
- VM_BUG_ON_PAGE(PageTail(page), page);
+ VM_BUG_ON_PGFLAGS(PageTail(page), page);
return 1;
}
@@ -450,4 +482,15 @@ static inline bool thp_migration_supported(void)
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+/**
+ * thp_size - Size of a transparent huge page.
+ * @page: Head page of a transparent huge page.
+ *
+ * Return: Number of bytes in this page.
+ */
+static inline unsigned long thp_size(struct page *page)
+{
+ return PAGE_SIZE << thp_order(page);
+}
+
#endif /* _LINUX_HUGE_MM_H */
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 50650d0d01b9..d5cc5f802dd4 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -10,6 +10,7 @@
#include <linux/list.h>
#include <linux/kref.h>
#include <linux/pgtable.h>
+#include <linux/gfp.h>
struct ctl_table;
struct user_struct;
@@ -164,7 +165,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
unsigned long addr, unsigned long sz);
pte_t *huge_pte_offset(struct mm_struct *mm,
unsigned long addr, unsigned long sz);
-int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
+int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long *addr, pte_t *ptep);
void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
unsigned long *start, unsigned long *end);
struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
@@ -203,8 +205,9 @@ static inline struct address_space *hugetlb_page_mapping_lock_write(
return NULL;
}
-static inline int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr,
- pte_t *ptep)
+static inline int huge_pmd_unshare(struct mm_struct *mm,
+ struct vm_area_struct *vma,
+ unsigned long *addr, pte_t *ptep)
{
return 0;
}
@@ -504,13 +507,10 @@ struct huge_bootmem_page {
struct page *alloc_huge_page(struct vm_area_struct *vma,
unsigned long addr, int avoid_reserve);
-struct page *alloc_huge_page_node(struct hstate *h, int nid);
struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
- nodemask_t *nmask);
+ nodemask_t *nmask, gfp_t gfp_mask);
struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
unsigned long address);
-struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
- int nid, nodemask_t *nmask);
int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
pgoff_t idx);
@@ -692,6 +692,27 @@ static inline bool hugepage_movable_supported(struct hstate *h)
return true;
}
+/* Movability of hugepages depends on migration support. */
+static inline gfp_t htlb_alloc_mask(struct hstate *h)
+{
+ if (hugepage_movable_supported(h))
+ return GFP_HIGHUSER_MOVABLE;
+ else
+ return GFP_HIGHUSER;
+}
+
+static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
+{
+ gfp_t modified_mask = htlb_alloc_mask(h);
+
+ /* Some callers might want to enforce node */
+ modified_mask |= (gfp_mask & __GFP_THISNODE);
+
+ modified_mask |= (gfp_mask & __GFP_NOWARN);
+
+ return modified_mask;
+}
+
static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
struct mm_struct *mm, pte_t *pte)
{
@@ -759,13 +780,9 @@ static inline struct page *alloc_huge_page(struct vm_area_struct *vma,
return NULL;
}
-static inline struct page *alloc_huge_page_node(struct hstate *h, int nid)
-{
- return NULL;
-}
-
static inline struct page *
-alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, nodemask_t *nmask)
+alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
+ nodemask_t *nmask, gfp_t gfp_mask)
{
return NULL;
}
@@ -878,6 +895,16 @@ static inline bool hugepage_movable_supported(struct hstate *h)
return false;
}
+static inline gfp_t htlb_alloc_mask(struct hstate *h)
+{
+ return 0;
+}
+
+static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
+{
+ return 0;
+}
+
static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
struct mm_struct *mm, pte_t *pte)
{
diff --git a/include/linux/hw_breakpoint.h b/include/linux/hw_breakpoint.h
index d7d4250cd1e4..78dd7035d1e5 100644
--- a/include/linux/hw_breakpoint.h
+++ b/include/linux/hw_breakpoint.h
@@ -72,7 +72,6 @@ register_wide_hw_breakpoint(struct perf_event_attr *attr,
void *context);
extern int register_perf_hw_breakpoint(struct perf_event *bp);
-extern int __register_perf_hw_breakpoint(struct perf_event *bp);
extern void unregister_hw_breakpoint(struct perf_event *bp);
extern void unregister_wide_hw_breakpoint(struct perf_event * __percpu *cpu_events);
@@ -119,8 +118,6 @@ register_wide_hw_breakpoint(struct perf_event_attr *attr,
void *context) { return NULL; }
static inline int
register_perf_hw_breakpoint(struct perf_event *bp) { return -ENOSYS; }
-static inline int
-__register_perf_hw_breakpoint(struct perf_event *bp) { return -ENOSYS; }
static inline void unregister_hw_breakpoint(struct perf_event *bp) { }
static inline void
unregister_wide_hw_breakpoint(struct perf_event * __percpu *cpu_events) { }
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 40df3103e890..38100e80360a 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -803,17 +803,15 @@ struct vmbus_channel {
u64 sig_event;
/*
- * Starting with win8, this field will be used to specify
- * the target virtual processor on which to deliver the interrupt for
- * the host to guest communication.
- * Prior to win8, incoming channel interrupts would only
- * be delivered on cpu 0. Setting this value to 0 would
- * preserve the earlier behavior.
+ * Starting with win8, this field will be used to specify the
+ * target CPU on which to deliver the interrupt for the host
+ * to guest communication.
+ *
+ * Prior to win8, incoming channel interrupts would only be
+ * delivered on CPU 0. Setting this value to 0 would preserve
+ * the earlier behavior.
*/
- u32 target_vp;
- /* The corresponding CPUID in the guest */
u32 target_cpu;
- int numa_node;
/*
* Support for sub-channels. For high performance devices,
* it will be useful to have multiple sub-channels to support
@@ -843,12 +841,6 @@ struct vmbus_channel {
void (*chn_rescind_callback)(struct vmbus_channel *channel);
/*
- * The spinlock to protect the structure. It is being used to protect
- * test-and-set access to various attributes of the structure as well
- * as all sc_list operations.
- */
- spinlock_t lock;
- /*
* All Sub-channels of a primary channel are linked here.
*/
struct list_head sc_list;
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index ee328cf80bd9..fc55ea41d323 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -231,7 +231,6 @@ enum i2c_alert_protocol {
* @detect: Callback for device detection
* @address_list: The I2C addresses to probe (for detect)
* @clients: List of detected clients we created (for i2c-core use only)
- * @disable_i2c_core_irq_mapping: Tell the i2c-core to not do irq-mapping
*
* The driver.owner field should be set to the module owner of this driver.
* The driver.name field should be set to the name of this driver.
@@ -290,8 +289,6 @@ struct i2c_driver {
int (*detect)(struct i2c_client *client, struct i2c_board_info *info);
const unsigned short *address_list;
struct list_head clients;
-
- bool disable_i2c_core_irq_mapping;
};
#define to_i2c_driver(d) container_of(d, struct i2c_driver, driver)
@@ -609,6 +606,14 @@ struct i2c_timings {
* may configure padmux here for SDA/SCL line or something else they want.
* @scl_gpiod: gpiod of the SCL line. Only required for GPIO recovery.
* @sda_gpiod: gpiod of the SDA line. Only required for GPIO recovery.
+ * @pinctrl: pinctrl used by GPIO recovery to change the state of the I2C pins.
+ * Optional.
+ * @pins_default: default pinctrl state of SCL/SDA lines, when they are assigned
+ * to the I2C bus. Optional. Populated internally for GPIO recovery, if
+ * state with the name PINCTRL_STATE_DEFAULT is found and pinctrl is valid.
+ * @pins_gpio: recovery pinctrl state of SCL/SDA lines, when they are used as
+ * GPIOs. Optional. Populated internally for GPIO recovery, if this state
+ * is called "gpio" or "recovery" and pinctrl is valid.
*/
struct i2c_bus_recovery_info {
int (*recover_bus)(struct i2c_adapter *adap);
@@ -625,6 +630,9 @@ struct i2c_bus_recovery_info {
/* gpio recovery */
struct gpio_desc *scl_gpiod;
struct gpio_desc *sda_gpiod;
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *pins_default;
+ struct pinctrl_state *pins_gpio;
};
int i2c_recover_bus(struct i2c_adapter *adap);
@@ -1001,7 +1009,7 @@ static inline u32 i2c_acpi_find_bus_speed(struct device *dev)
static inline struct i2c_client *i2c_acpi_new_device(struct device *dev,
int index, struct i2c_board_info *info)
{
- return NULL;
+ return ERR_PTR(-ENODEV);
}
static inline struct i2c_adapter *i2c_acpi_find_adapter_by_handle(acpi_handle handle)
{
diff --git a/include/linux/icmp.h b/include/linux/icmp.h
index 81ca84ce3119..0af4d210ee31 100644
--- a/include/linux/icmp.h
+++ b/include/linux/icmp.h
@@ -15,6 +15,7 @@
#include <linux/skbuff.h>
#include <uapi/linux/icmp.h>
+#include <uapi/linux/errqueue.h>
static inline struct icmphdr *icmp_hdr(const struct sk_buff *skb)
{
@@ -35,4 +36,8 @@ static inline bool icmp_is_err(int type)
return false;
}
+void ip_icmp_error_rfc4884(const struct sk_buff *skb,
+ struct sock_ee_data_rfc4884 *out,
+ int thlen, int off);
+
#endif /* _LINUX_ICMP_H */
diff --git a/include/linux/icmpv6.h b/include/linux/icmpv6.h
index 33d379602314..1b3371ae8193 100644
--- a/include/linux/icmpv6.h
+++ b/include/linux/icmpv6.h
@@ -13,12 +13,32 @@ static inline struct icmp6hdr *icmp6_hdr(const struct sk_buff *skb)
#include <linux/netdevice.h>
#if IS_ENABLED(CONFIG_IPV6)
-extern void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info);
typedef void ip6_icmp_send_t(struct sk_buff *skb, u8 type, u8 code, __u32 info,
const struct in6_addr *force_saddr);
+#if IS_BUILTIN(CONFIG_IPV6)
+void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
+ const struct in6_addr *force_saddr);
+static inline void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
+{
+ icmp6_send(skb, type, code, info, NULL);
+}
+static inline int inet6_register_icmp_sender(ip6_icmp_send_t *fn)
+{
+ BUILD_BUG_ON(fn != icmp6_send);
+ return 0;
+}
+static inline int inet6_unregister_icmp_sender(ip6_icmp_send_t *fn)
+{
+ BUILD_BUG_ON(fn != icmp6_send);
+ return 0;
+}
+#else
+extern void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info);
extern int inet6_register_icmp_sender(ip6_icmp_send_t *fn);
extern int inet6_unregister_icmp_sender(ip6_icmp_send_t *fn);
+#endif
+
int ip6_err_gen_icmpv6_unreach(struct sk_buff *skb, int nhs, int type,
unsigned int data_len);
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 9f732499ea88..c47f43e65a2f 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -2561,6 +2561,8 @@ enum ieee80211_statuscode {
/* 802.11ai */
WLAN_STATUS_FILS_AUTHENTICATION_FAILURE = 108,
WLAN_STATUS_UNKNOWN_AUTHENTICATION_SERVER = 109,
+ WLAN_STATUS_SAE_HASH_TO_ELEMENT = 126,
+ WLAN_STATUS_SAE_PK = 127,
};
diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h
index b3a8d3054af0..6479a38e52fa 100644
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -49,6 +49,7 @@ struct br_ip_list {
#define BR_ISOLATED BIT(16)
#define BR_MRP_AWARE BIT(17)
#define BR_MRP_LOST_CONT BIT(18)
+#define BR_MRP_LOST_IN_CONT BIT(19)
#define BR_DEFAULT_AGEING_TIME (300 * HZ)
diff --git a/include/linux/iio/common/cros_ec_sensors_core.h b/include/linux/iio/common/cros_ec_sensors_core.h
index 7bc961defa87..caa8bb279a34 100644
--- a/include/linux/iio/common/cros_ec_sensors_core.h
+++ b/include/linux/iio/common/cros_ec_sensors_core.h
@@ -42,6 +42,10 @@ typedef irqreturn_t (*cros_ec_sensors_capture_t)(int irq, void *p);
* @resp: motion sensor response structure
* @type: type of motion sensor
* @loc: location where the motion sensor is placed
+ * @range_updated: True if the range of the sensor has been
+ * updated.
+ * @curr_range: If updated, the current range value.
+ * It will be reapplied at every resume.
* @calib: calibration parameters. Note that trigger
* captured data will always provide the calibrated
* data
@@ -65,6 +69,9 @@ struct cros_ec_sensors_core_state {
enum motionsensor_type type;
enum motionsensor_location loc;
+ bool range_updated;
+ int curr_range;
+
struct calib_data {
s16 offset;
u16 scale;
@@ -114,7 +121,9 @@ int cros_ec_sensors_core_write(struct cros_ec_sensors_core_state *st,
struct iio_chan_spec const *chan,
int val, int val2, long mask);
-/* List of extended channel specification for all sensors */
+extern const struct dev_pm_ops cros_ec_sensors_pm_ops;
+
+/* List of extended channel specification for all sensors. */
extern const struct iio_chan_spec_ext_info cros_ec_sensors_ext_info[];
extern const struct attribute *cros_ec_sensor_fifo_attributes[];
diff --git a/include/linux/iio/iio-opaque.h b/include/linux/iio/iio-opaque.h
new file mode 100644
index 000000000000..f2e94196d31f
--- /dev/null
+++ b/include/linux/iio/iio-opaque.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _INDUSTRIAL_IO_OPAQUE_H_
+#define _INDUSTRIAL_IO_OPAQUE_H_
+
+/**
+ * struct iio_dev_opaque - industrial I/O device opaque information
+ * @indio_dev: public industrial I/O device information
+ * @event_interface: event chrdevs associated with interrupt lines
+ * @buffer_list: list of all buffers currently attached
+ * @channel_attr_list: keep track of automatically created channel
+ * attributes
+ * @chan_attr_group: group for all attrs in base directory
+ * @debugfs_dentry: device specific debugfs dentry
+ * @cached_reg_addr: cached register address for debugfs reads
+ * @read_buf: read buffer to be used for the initial reg read
+ * @read_buf_len: data length in @read_buf
+ */
+struct iio_dev_opaque {
+ struct iio_dev indio_dev;
+ struct iio_event_interface *event_interface;
+ struct list_head buffer_list;
+ struct list_head channel_attr_list;
+ struct attribute_group chan_attr_group;
+#if defined(CONFIG_DEBUG_FS)
+ struct dentry *debugfs_dentry;
+ unsigned cached_reg_addr;
+ char read_buf[20];
+ unsigned int read_buf_len;
+#endif
+};
+
+#define to_iio_dev_opaque(indio_dev) \
+ container_of(indio_dev, struct iio_dev_opaque, indio_dev)
+
+#endif
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
index a1be82e74c93..e2df67a3b9ab 100644
--- a/include/linux/iio/iio.h
+++ b/include/linux/iio/iio.h
@@ -488,9 +488,7 @@ struct iio_buffer_setup_ops {
* @currentmode: [DRIVER] current operating mode
* @dev: [DRIVER] device structure, should be assigned a parent
* and owner
- * @event_interface: [INTERN] event chrdevs associated with interrupt lines
* @buffer: [DRIVER] any buffer present
- * @buffer_list: [INTERN] list of all buffers currently attached
* @scan_bytes: [INTERN] num bytes captured to be fed to buffer demux
* @mlock: [INTERN] lock used to prevent simultaneous device state
* changes
@@ -506,9 +504,6 @@ struct iio_buffer_setup_ops {
* @pollfunc_event: [DRIVER] function run on events trigger being received
* @channels: [DRIVER] channel specification structure table
* @num_channels: [DRIVER] number of channels specified in @channels.
- * @channel_attr_list: [INTERN] keep track of automatically created channel
- * attributes
- * @chan_attr_group: [INTERN] group for all attrs in base directory
* @name: [DRIVER] name of the device.
* @label: [DRIVER] unique name to identify which device this is
* @info: [DRIVER] callbacks and constant info from driver
@@ -520,8 +515,8 @@ struct iio_buffer_setup_ops {
* @groups: [INTERN] attribute groups
* @groupcounter: [INTERN] index of next attribute group
* @flags: [INTERN] file ops related flags including busy flag.
- * @debugfs_dentry: [INTERN] device specific debugfs dentry.
- * @cached_reg_addr: [INTERN] cached register address for debugfs reads.
+ * @priv: [DRIVER] reference to driver's private information
+ * **MUST** be accessed **ONLY** via iio_priv() helper
*/
struct iio_dev {
int id;
@@ -531,10 +526,7 @@ struct iio_dev {
int currentmode;
struct device dev;
- struct iio_event_interface *event_interface;
-
struct iio_buffer *buffer;
- struct list_head buffer_list;
int scan_bytes;
struct mutex mlock;
@@ -551,8 +543,6 @@ struct iio_dev {
struct iio_chan_spec const *channels;
int num_channels;
- struct list_head channel_attr_list;
- struct attribute_group chan_attr_group;
const char *name;
const char *label;
const struct iio_info *info;
@@ -565,12 +555,7 @@ struct iio_dev {
int groupcounter;
unsigned long flags;
-#if defined(CONFIG_DEBUG_FS)
- struct dentry *debugfs_dentry;
- unsigned cached_reg_addr;
- char read_buf[20];
- unsigned int read_buf_len;
-#endif
+ void *priv;
};
const struct iio_chan_spec
@@ -649,6 +634,26 @@ static inline struct iio_dev *iio_device_get(struct iio_dev *indio_dev)
return indio_dev ? dev_to_iio_dev(get_device(&indio_dev->dev)) : NULL;
}
+/**
+ * iio_device_set_parent() - assign parent device to the IIO device object
+ * @indio_dev: IIO device structure
+ * @parent: reference to parent device object
+ *
+ * This utility must be called between IIO device allocation
+ * (via devm_iio_device_alloc()) & IIO device registration
+ * (via {devm_}iio_device_register()).
+ * By default, the device allocation will also assign a parent device to
+ * the IIO device object. In cases where devm_iio_device_alloc() is used,
+ * sometimes the parent device must be different than the device used to
+ * manage the allocation.
+ * In that case, this helper should be used to change the parent, hence the
+ * requirement to call this between allocation & registration.
+ **/
+static inline void iio_device_set_parent(struct iio_dev *indio_dev,
+ struct device *parent)
+{
+ indio_dev->dev.parent = parent;
+}
/**
* iio_device_set_drvdata() - Set device driver data
@@ -669,28 +674,23 @@ static inline void iio_device_set_drvdata(struct iio_dev *indio_dev, void *data)
*
* Returns the data previously set with iio_device_set_drvdata()
*/
-static inline void *iio_device_get_drvdata(struct iio_dev *indio_dev)
+static inline void *iio_device_get_drvdata(const struct iio_dev *indio_dev)
{
return dev_get_drvdata(&indio_dev->dev);
}
/* Can we make this smaller? */
#define IIO_ALIGN L1_CACHE_BYTES
-struct iio_dev *iio_device_alloc(int sizeof_priv);
+struct iio_dev *iio_device_alloc(struct device *parent, int sizeof_priv);
+/* The information at the returned address is guaranteed to be cacheline aligned */
static inline void *iio_priv(const struct iio_dev *indio_dev)
{
- return (char *)indio_dev + ALIGN(sizeof(struct iio_dev), IIO_ALIGN);
-}
-
-static inline struct iio_dev *iio_priv_to_dev(void *priv)
-{
- return (struct iio_dev *)((char *)priv -
- ALIGN(sizeof(struct iio_dev), IIO_ALIGN));
+ return indio_dev->priv;
}
void iio_device_free(struct iio_dev *indio_dev);
-struct iio_dev *devm_iio_device_alloc(struct device *dev, int sizeof_priv);
+struct iio_dev *devm_iio_device_alloc(struct device *parent, int sizeof_priv);
struct iio_trigger *devm_iio_trigger_alloc(struct device *dev,
const char *fmt, ...);
/**
@@ -709,10 +709,7 @@ static inline bool iio_buffer_enabled(struct iio_dev *indio_dev)
* @indio_dev: IIO device structure for device
**/
#if defined(CONFIG_DEBUG_FS)
-static inline struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev)
-{
- return indio_dev->debugfs_dentry;
-}
+struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev);
#else
static inline struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev)
{
diff --git a/include/linux/iio/trigger_consumer.h b/include/linux/iio/trigger_consumer.h
index c3c6ba5ec423..3aa2f132dd67 100644
--- a/include/linux/iio/trigger_consumer.h
+++ b/include/linux/iio/trigger_consumer.h
@@ -50,11 +50,4 @@ irqreturn_t iio_pollfunc_store_time(int irq, void *p);
void iio_trigger_notify_done(struct iio_trigger *trig);
-/*
- * Two functions for common case where all that happens is a pollfunc
- * is attached and detached from a trigger
- */
-int iio_triggered_buffer_postenable(struct iio_dev *indio_dev);
-int iio_triggered_buffer_predisable(struct iio_dev *indio_dev);
-
#endif
diff --git a/include/linux/ima.h b/include/linux/ima.h
index 9164e1534ec9..d15100de6cdd 100644
--- a/include/linux/ima.h
+++ b/include/linux/ima.h
@@ -25,7 +25,7 @@ extern int ima_post_read_file(struct file *file, void *buf, loff_t size,
enum kernel_read_file_id id);
extern void ima_post_path_mknod(struct dentry *dentry);
extern int ima_file_hash(struct file *file, char *buf, size_t buf_size);
-extern void ima_kexec_cmdline(const void *buf, int size);
+extern void ima_kexec_cmdline(int kernel_fd, const void *buf, int size);
#ifdef CONFIG_IMA_KEXEC
extern void ima_add_kexec_buffer(struct kimage *image);
@@ -103,7 +103,7 @@ static inline int ima_file_hash(struct file *file, char *buf, size_t buf_size)
return -EOPNOTSUPP;
}
-static inline void ima_kexec_cmdline(const void *buf, int size) {}
+static inline void ima_kexec_cmdline(int kernel_fd, const void *buf, int size) {}
#endif /* CONFIG_IMA */
#ifndef CONFIG_IMA_KEXEC
diff --git a/include/linux/indirect_call_wrapper.h b/include/linux/indirect_call_wrapper.h
index 00d7e8e919c6..54c02c84906a 100644
--- a/include/linux/indirect_call_wrapper.h
+++ b/include/linux/indirect_call_wrapper.h
@@ -23,6 +23,16 @@
likely(f == f2) ? f2(__VA_ARGS__) : \
INDIRECT_CALL_1(f, f1, __VA_ARGS__); \
})
+#define INDIRECT_CALL_3(f, f3, f2, f1, ...) \
+ ({ \
+ likely(f == f3) ? f3(__VA_ARGS__) : \
+ INDIRECT_CALL_2(f, f2, f1, __VA_ARGS__); \
+ })
+#define INDIRECT_CALL_4(f, f4, f3, f2, f1, ...) \
+ ({ \
+ likely(f == f4) ? f4(__VA_ARGS__) : \
+ INDIRECT_CALL_3(f, f3, f2, f1, __VA_ARGS__); \
+ })
#define INDIRECT_CALLABLE_DECLARE(f) f
#define INDIRECT_CALLABLE_SCOPE
@@ -30,6 +40,8 @@
#else
#define INDIRECT_CALL_1(f, f1, ...) f(__VA_ARGS__)
#define INDIRECT_CALL_2(f, f2, f1, ...) f(__VA_ARGS__)
+#define INDIRECT_CALL_3(f, f3, f2, f1, ...) f(__VA_ARGS__)
+#define INDIRECT_CALL_4(f, f4, f3, f2, f1, ...) f(__VA_ARGS__)
#define INDIRECT_CALLABLE_DECLARE(f)
#define INDIRECT_CALLABLE_SCOPE static
#endif
diff --git a/include/linux/init_syscalls.h b/include/linux/init_syscalls.h
new file mode 100644
index 000000000000..92045d18cbfc
--- /dev/null
+++ b/include/linux/init_syscalls.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+int __init init_mount(const char *dev_name, const char *dir_name,
+ const char *type_page, unsigned long flags, void *data_page);
+int __init init_umount(const char *name, int flags);
+int __init init_chdir(const char *filename);
+int __init init_chroot(const char *filename);
+int __init init_chown(const char *filename, uid_t user, gid_t group, int flags);
+int __init init_chmod(const char *filename, umode_t mode);
+int __init init_eaccess(const char *filename);
+int __init init_stat(const char *filename, struct kstat *stat, int flags);
+int __init init_mknod(const char *filename, umode_t mode, unsigned int dev);
+int __init init_link(const char *oldname, const char *newname);
+int __init init_symlink(const char *oldname, const char *newname);
+int __init init_unlink(const char *pathname);
+int __init init_mkdir(const char *pathname, umode_t mode);
+int __init init_rmdir(const char *pathname);
+int __init init_utimes(char *filename, struct timespec64 *ts);
+int __init init_dup(struct file *file);
diff --git a/include/linux/initrd.h b/include/linux/initrd.h
index aa5914355728..8db6f8c8030b 100644
--- a/include/linux/initrd.h
+++ b/include/linux/initrd.h
@@ -2,12 +2,6 @@
#define INITRD_MINOR 250 /* shouldn't collide with /dev/ram* too soon ... */
-/* 1 = load ramdisk, 0 = don't load */
-extern int rd_doload;
-
-/* 1 = prompt for ramdisk, 0 = don't prompt */
-extern int rd_prompt;
-
/* starting block # of image */
extern int rd_image_start;
diff --git a/include/linux/instrumentation.h b/include/linux/instrumentation.h
new file mode 100644
index 000000000000..93e2ad67fc10
--- /dev/null
+++ b/include/linux/instrumentation.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_INSTRUMENTATION_H
+#define __LINUX_INSTRUMENTATION_H
+
+#if defined(CONFIG_DEBUG_ENTRY) && defined(CONFIG_STACK_VALIDATION)
+
+/* Begin/end of an instrumentation safe region */
+#define instrumentation_begin() ({ \
+ asm volatile("%c0: nop\n\t" \
+ ".pushsection .discard.instr_begin\n\t" \
+ ".long %c0b - .\n\t" \
+ ".popsection\n\t" : : "i" (__COUNTER__)); \
+})
+
+/*
+ * Because instrumentation_{begin,end}() can nest, objtool validation considers
+ * _begin() a +1 and _end() a -1 and computes a sum over the instructions.
+ * When the value is greater than 0, we consider instrumentation allowed.
+ *
+ * There is a problem with code like:
+ *
+ * noinstr void foo()
+ * {
+ * instrumentation_begin();
+ * ...
+ * if (cond) {
+ * instrumentation_begin();
+ * ...
+ * instrumentation_end();
+ * }
+ * bar();
+ * instrumentation_end();
+ * }
+ *
+ * If instrumentation_end() would be an empty label, like all the other
+ * annotations, the inner _end(), which is at the end of a conditional block,
+ * would land on the instruction after the block.
+ *
+ * If we then consider the sum of the !cond path, we'll see that the call to
+ * bar() is with a 0-value, even though, we meant it to happen with a positive
+ * value.
+ *
+ * To avoid this, have _end() be a NOP instruction, this ensures it will be
+ * part of the condition block and does not escape.
+ */
+#define instrumentation_end() ({ \
+ asm volatile("%c0: nop\n\t" \
+ ".pushsection .discard.instr_end\n\t" \
+ ".long %c0b - .\n\t" \
+ ".popsection\n\t" : : "i" (__COUNTER__)); \
+})
+#else
+# define instrumentation_begin() do { } while(0)
+# define instrumentation_end() do { } while(0)
+#endif
+
+#endif /* __LINUX_INSTRUMENTATION_H */
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index 3e8fa1c7a1e6..b1ed2f25f7c0 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -381,8 +381,7 @@ enum {
#define QI_DEV_EIOTLB_ADDR(a) ((u64)(a) & VTD_PAGE_MASK)
#define QI_DEV_EIOTLB_SIZE (((u64)1) << 11)
-#define QI_DEV_EIOTLB_GLOB(g) ((u64)g)
-#define QI_DEV_EIOTLB_PASID(p) (((u64)p) << 32)
+#define QI_DEV_EIOTLB_PASID(p) ((u64)((p) & 0xfffff) << 32)
#define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 16)
#define QI_DEV_EIOTLB_QDEP(qd) ((u64)((qd) & 0x1f) << 4)
#define QI_DEV_EIOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | \
@@ -600,6 +599,8 @@ struct intel_iommu {
struct iommu_device iommu; /* IOMMU core code handle */
int node;
u32 flags; /* Software defined flags */
+
+ struct dmar_drhd_unit *drhd;
};
/* PCI domain-device relationship */
@@ -705,7 +706,7 @@ void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr,
void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid,
u32 pasid, u16 qdep, u64 addr,
- unsigned int size_order, u64 granu);
+ unsigned int size_order);
void qi_flush_pasid_cache(struct intel_iommu *iommu, u16 did, u64 granu,
int pasid);
@@ -728,6 +729,7 @@ void iommu_flush_write_buffer(struct intel_iommu *iommu);
int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev);
struct dmar_domain *find_domain(struct device *dev);
struct device_domain_info *get_domain_info(struct device *dev);
+struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn);
#ifdef CONFIG_INTEL_IOMMU_SVM
extern void intel_svm_check(struct intel_iommu *iommu);
@@ -740,6 +742,9 @@ struct iommu_sva *intel_svm_bind(struct device *dev, struct mm_struct *mm,
void *drvdata);
void intel_svm_unbind(struct iommu_sva *handle);
int intel_svm_get_pasid(struct iommu_sva *handle);
+int intel_svm_page_response(struct device *dev, struct iommu_fault_event *evt,
+ struct iommu_page_response *msg);
+
struct svm_dev_ops;
struct intel_svm_dev {
@@ -766,8 +771,6 @@ struct intel_svm {
struct list_head devs;
struct list_head list;
};
-
-extern struct intel_iommu *intel_svm_device_to_iommu(struct device *dev);
#else
static inline void intel_svm_check(struct intel_iommu *iommu) {}
#endif
diff --git a/include/linux/intel_rapl.h b/include/linux/intel_rapl.h
index efb3ce892c20..3582176a1eca 100644
--- a/include/linux/intel_rapl.h
+++ b/include/linux/intel_rapl.h
@@ -29,6 +29,7 @@ enum rapl_domain_reg_id {
RAPL_DOMAIN_REG_PERF,
RAPL_DOMAIN_REG_POLICY,
RAPL_DOMAIN_REG_INFO,
+ RAPL_DOMAIN_REG_PL4,
RAPL_DOMAIN_REG_MAX,
};
@@ -38,12 +39,14 @@ enum rapl_primitives {
ENERGY_COUNTER,
POWER_LIMIT1,
POWER_LIMIT2,
+ POWER_LIMIT4,
FW_LOCK,
PL1_ENABLE, /* power limit 1, aka long term */
PL1_CLAMP, /* allow frequency to go below OS request */
PL2_ENABLE, /* power limit 2, aka short term, instantaneous */
PL2_CLAMP,
+ PL4_ENABLE, /* power limit 4, aka max peak power */
TIME_WINDOW1, /* long term */
TIME_WINDOW2, /* short term */
@@ -65,7 +68,7 @@ struct rapl_domain_data {
unsigned long timestamp;
};
-#define NR_POWER_LIMITS (2)
+#define NR_POWER_LIMITS (3)
struct rapl_power_limit {
struct powercap_zone_constraint *constraint;
int prim_id; /* primitive ID used to enable */
diff --git a/include/linux/interconnect-provider.h b/include/linux/interconnect-provider.h
index 0c494534b4d3..4735518de515 100644
--- a/include/linux/interconnect-provider.h
+++ b/include/linux/interconnect-provider.h
@@ -41,6 +41,7 @@ struct icc_node *of_icc_xlate_onecell(struct of_phandle_args *spec,
* @xlate: provider-specific callback for mapping nodes from phandle arguments
* @dev: the device this interconnect provider belongs to
* @users: count of active users
+ * @inter_set: whether inter-provider pairs will be configured with @set
* @data: pointer to private data
*/
struct icc_provider {
@@ -53,6 +54,7 @@ struct icc_provider {
struct icc_node* (*xlate)(struct of_phandle_args *spec, void *data);
struct device *dev;
int users;
+ bool inter_set;
void *data;
};
@@ -103,6 +105,7 @@ void icc_node_del(struct icc_node *node);
int icc_nodes_remove(struct icc_provider *provider);
int icc_provider_add(struct icc_provider *provider);
int icc_provider_del(struct icc_provider *provider);
+struct icc_node *of_icc_get_from_provider(struct of_phandle_args *spec);
#else
@@ -117,7 +120,7 @@ static inline struct icc_node *icc_node_create(int id)
return ERR_PTR(-ENOTSUPP);
}
-void icc_node_destroy(int id)
+static inline void icc_node_destroy(int id)
{
}
@@ -126,16 +129,16 @@ static inline int icc_link_create(struct icc_node *node, const int dst_id)
return -ENOTSUPP;
}
-int icc_link_destroy(struct icc_node *src, struct icc_node *dst)
+static inline int icc_link_destroy(struct icc_node *src, struct icc_node *dst)
{
return -ENOTSUPP;
}
-void icc_node_add(struct icc_node *node, struct icc_provider *provider)
+static inline void icc_node_add(struct icc_node *node, struct icc_provider *provider)
{
}
-void icc_node_del(struct icc_node *node)
+static inline void icc_node_del(struct icc_node *node)
{
}
@@ -154,6 +157,11 @@ static inline int icc_provider_del(struct icc_provider *provider)
return -ENOTSUPP;
}
+static inline struct icc_node *of_icc_get_from_provider(struct of_phandle_args *spec)
+{
+ return ERR_PTR(-ENOTSUPP);
+}
+
#endif /* CONFIG_INTERCONNECT */
#endif /* __LINUX_INTERCONNECT_PROVIDER_H */
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 5db970b6615a..f9aee3538461 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -585,6 +585,9 @@ static inline struct task_struct *this_cpu_ksoftirqd(void)
/* Tasklets --- multithreaded analogue of BHs.
+ This API is deprecated. Please consider using threaded IRQs instead:
+ https://lore.kernel.org/lkml/20200716081538.2sivhkj4hcyrusem@linutronix.de
+
Main feature differing them of generic softirqs: tasklet
is running only on one CPU simultaneously.
@@ -608,16 +611,42 @@ struct tasklet_struct
struct tasklet_struct *next;
unsigned long state;
atomic_t count;
- void (*func)(unsigned long);
+ bool use_callback;
+ union {
+ void (*func)(unsigned long data);
+ void (*callback)(struct tasklet_struct *t);
+ };
unsigned long data;
};
-#define DECLARE_TASKLET(name, func, data) \
-struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data }
+#define DECLARE_TASKLET(name, _callback) \
+struct tasklet_struct name = { \
+ .count = ATOMIC_INIT(0), \
+ .callback = _callback, \
+ .use_callback = true, \
+}
+
+#define DECLARE_TASKLET_DISABLED(name, _callback) \
+struct tasklet_struct name = { \
+ .count = ATOMIC_INIT(1), \
+ .callback = _callback, \
+ .use_callback = true, \
+}
-#define DECLARE_TASKLET_DISABLED(name, func, data) \
-struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
+#define from_tasklet(var, callback_tasklet, tasklet_fieldname) \
+ container_of(callback_tasklet, typeof(*var), tasklet_fieldname)
+#define DECLARE_TASKLET_OLD(name, _func) \
+struct tasklet_struct name = { \
+ .count = ATOMIC_INIT(0), \
+ .func = _func, \
+}
+
+#define DECLARE_TASKLET_DISABLED_OLD(name, _func) \
+struct tasklet_struct name = { \
+ .count = ATOMIC_INIT(1), \
+ .func = _func, \
+}
enum
{
@@ -686,6 +715,8 @@ extern void tasklet_kill(struct tasklet_struct *t);
extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
extern void tasklet_init(struct tasklet_struct *t,
void (*func)(unsigned long), unsigned long data);
+extern void tasklet_setup(struct tasklet_struct *t,
+ void (*callback)(struct tasklet_struct *));
/*
* Autoprobing for irqs:
diff --git a/include/linux/io-64-nonatomic-hi-lo.h b/include/linux/io-64-nonatomic-hi-lo.h
index ae21b72cce85..f32522bb3aa5 100644
--- a/include/linux/io-64-nonatomic-hi-lo.h
+++ b/include/linux/io-64-nonatomic-hi-lo.h
@@ -57,7 +57,7 @@ static inline void hi_lo_writeq_relaxed(__u64 val, volatile void __iomem *addr)
#ifndef ioread64_hi_lo
#define ioread64_hi_lo ioread64_hi_lo
-static inline u64 ioread64_hi_lo(void __iomem *addr)
+static inline u64 ioread64_hi_lo(const void __iomem *addr)
{
u32 low, high;
@@ -79,7 +79,7 @@ static inline void iowrite64_hi_lo(u64 val, void __iomem *addr)
#ifndef ioread64be_hi_lo
#define ioread64be_hi_lo ioread64be_hi_lo
-static inline u64 ioread64be_hi_lo(void __iomem *addr)
+static inline u64 ioread64be_hi_lo(const void __iomem *addr)
{
u32 low, high;
diff --git a/include/linux/io-64-nonatomic-lo-hi.h b/include/linux/io-64-nonatomic-lo-hi.h
index faaa842dbdb9..448a21435dba 100644
--- a/include/linux/io-64-nonatomic-lo-hi.h
+++ b/include/linux/io-64-nonatomic-lo-hi.h
@@ -57,7 +57,7 @@ static inline void lo_hi_writeq_relaxed(__u64 val, volatile void __iomem *addr)
#ifndef ioread64_lo_hi
#define ioread64_lo_hi ioread64_lo_hi
-static inline u64 ioread64_lo_hi(void __iomem *addr)
+static inline u64 ioread64_lo_hi(const void __iomem *addr)
{
u32 low, high;
@@ -79,7 +79,7 @@ static inline void iowrite64_lo_hi(u64 val, void __iomem *addr)
#ifndef ioread64be_lo_hi
#define ioread64be_lo_hi ioread64be_lo_hi
-static inline u64 ioread64be_lo_hi(void __iomem *addr)
+static inline u64 ioread64be_lo_hi(const void __iomem *addr)
{
u32 low, high;
diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h
index 53d53c6c2be9..23285ba645db 100644
--- a/include/linux/io-pgtable.h
+++ b/include/linux/io-pgtable.h
@@ -155,7 +155,7 @@ struct io_pgtable_cfg {
*/
struct io_pgtable_ops {
int (*map)(struct io_pgtable_ops *ops, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot);
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
size_t (*unmap)(struct io_pgtable_ops *ops, unsigned long iova,
size_t size, struct iommu_iotlb_gather *gather);
phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops,
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 5f0b7859d2eb..fee209efb756 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -31,12 +31,6 @@
* if the IOMMU page table format is equivalent.
*/
#define IOMMU_PRIV (1 << 5)
-/*
- * Non-coherent masters can use this page protection flag to set cacheable
- * memory attributes for only a transparent outer level of cache, also known as
- * the last-level or system cache.
- */
-#define IOMMU_SYS_CACHE_ONLY (1 << 6)
struct iommu_ops;
struct iommu_group;
@@ -457,22 +451,6 @@ extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t io
extern void iommu_set_fault_handler(struct iommu_domain *domain,
iommu_fault_handler_t handler, void *token);
-/**
- * iommu_map_sgtable - Map the given buffer to the IOMMU domain
- * @domain: The IOMMU domain to perform the mapping
- * @iova: The start address to map the buffer
- * @sgt: The sg_table object describing the buffer
- * @prot: IOMMU protection bits
- *
- * Creates a mapping at @iova for the buffer described by a scatterlist
- * stored in the given sg_table object in the provided IOMMU domain.
- */
-static inline size_t iommu_map_sgtable(struct iommu_domain *domain,
- unsigned long iova, struct sg_table *sgt, int prot)
-{
- return iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, prot);
-}
-
extern void iommu_get_resv_regions(struct device *dev, struct list_head *list);
extern void iommu_put_resv_regions(struct device *dev, struct list_head *list);
extern void generic_iommu_put_resv_regions(struct device *dev,
@@ -1079,6 +1057,22 @@ static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev)
}
#endif /* CONFIG_IOMMU_API */
+/**
+ * iommu_map_sgtable - Map the given buffer to the IOMMU domain
+ * @domain: The IOMMU domain to perform the mapping
+ * @iova: The start address to map the buffer
+ * @sgt: The sg_table object describing the buffer
+ * @prot: IOMMU protection bits
+ *
+ * Creates a mapping at @iova for the buffer described by a scatterlist
+ * stored in the given sg_table object in the provided IOMMU domain.
+ */
+static inline size_t iommu_map_sgtable(struct iommu_domain *domain,
+ unsigned long iova, struct sg_table *sgt, int prot)
+{
+ return iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, prot);
+}
+
#ifdef CONFIG_IOMMU_DEBUGFS
extern struct dentry *iommu_debugfs_dir;
void iommu_debugfs_setup(void);
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 2cb445a8fc9e..a44789d027cc 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -223,7 +223,7 @@ struct ipv6_pinfo {
/*
* Packed in 16bits.
- * Omit one shift by by putting the signed field at MSB.
+ * Omit one shift by putting the signed field at MSB.
*/
#if defined(__BIG_ENDIAN_BITFIELD)
__s16 hop_limit:9;
@@ -283,6 +283,7 @@ struct ipv6_pinfo {
autoflowlabel:1,
autoflowlabel_set:1,
mc_all:1,
+ recverr_rfc4884:1,
rtalert_isolate:1;
__u8 min_hopcount;
__u8 tclass;
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 8d5bc2c237d7..1b7f4dfee35b 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -213,6 +213,8 @@ struct irq_data {
* required
* IRQD_HANDLE_ENFORCE_IRQCTX - Enforce that handle_irq_*() is only invoked
* from actual interrupt context.
+ * IRQD_AFFINITY_ON_ACTIVATE - Affinity is set on activation. Don't call
+ * irq_chip::irq_set_affinity() when deactivated.
*/
enum {
IRQD_TRIGGER_MASK = 0xf,
@@ -237,6 +239,7 @@ enum {
IRQD_CAN_RESERVE = (1 << 26),
IRQD_MSI_NOMASK_QUIRK = (1 << 27),
IRQD_HANDLE_ENFORCE_IRQCTX = (1 << 28),
+ IRQD_AFFINITY_ON_ACTIVATE = (1 << 29),
};
#define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors)
@@ -421,6 +424,16 @@ static inline bool irqd_msi_nomask_quirk(struct irq_data *d)
return __irqd_to_state(d) & IRQD_MSI_NOMASK_QUIRK;
}
+static inline void irqd_set_affinity_on_activate(struct irq_data *d)
+{
+ __irqd_to_state(d) |= IRQD_AFFINITY_ON_ACTIVATE;
+}
+
+static inline bool irqd_affinity_on_activate(struct irq_data *d)
+{
+ return __irqd_to_state(d) & IRQD_AFFINITY_ON_ACTIVATE;
+}
+
#undef __irqd_to_state
static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
diff --git a/include/linux/irqchip.h b/include/linux/irqchip.h
index 950e4b2458f0..67351aac65ef 100644
--- a/include/linux/irqchip.h
+++ b/include/linux/irqchip.h
@@ -12,7 +12,9 @@
#define _LINUX_IRQCHIP_H
#include <linux/acpi.h>
+#include <linux/module.h>
#include <linux/of.h>
+#include <linux/platform_device.h>
/*
* This macro must be used by the different irqchip drivers to declare
@@ -26,6 +28,28 @@
*/
#define IRQCHIP_DECLARE(name, compat, fn) OF_DECLARE_2(irqchip, name, compat, fn)
+extern int platform_irqchip_probe(struct platform_device *pdev);
+
+#define IRQCHIP_PLATFORM_DRIVER_BEGIN(drv_name) \
+static const struct of_device_id drv_name##_irqchip_match_table[] = {
+
+#define IRQCHIP_MATCH(compat, fn) { .compatible = compat, .data = fn },
+
+#define IRQCHIP_PLATFORM_DRIVER_END(drv_name) \
+ {}, \
+}; \
+MODULE_DEVICE_TABLE(of, drv_name##_irqchip_match_table); \
+static struct platform_driver drv_name##_driver = { \
+ .probe = platform_irqchip_probe, \
+ .driver = { \
+ .name = #drv_name, \
+ .owner = THIS_MODULE, \
+ .of_match_table = drv_name##_irqchip_match_table, \
+ .suppress_bind_attrs = true, \
+ }, \
+}; \
+builtin_platform_driver(drv_name##_driver)
+
/*
* This macro must be used by the different irqchip drivers to declare
* the association between their version and their initialization function.
@@ -39,8 +63,9 @@
* @fn: initialization function
*/
#define IRQCHIP_ACPI_DECLARE(name, subtable, validate, data, fn) \
- ACPI_DECLARE_PROBE_ENTRY(irqchip, name, ACPI_SIG_MADT, \
- subtable, validate, data, fn)
+ ACPI_DECLARE_SUBTABLE_PROBE_ENTRY(irqchip, name, \
+ ACPI_SIG_MADT, subtable, \
+ validate, data, fn)
#ifdef CONFIG_IRQCHIP
void irqchip_init(void);
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index 6c36b6cc3edf..f6d092fdb93d 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -19,7 +19,6 @@
#define GICD_CLRSPI_NSR 0x0048
#define GICD_SETSPI_SR 0x0050
#define GICD_CLRSPI_SR 0x0058
-#define GICD_SEIR 0x0068
#define GICD_IGROUPR 0x0080
#define GICD_ISENABLER 0x0100
#define GICD_ICENABLER 0x0180
@@ -119,14 +118,11 @@
#define GICR_WAKER 0x0014
#define GICR_SETLPIR 0x0040
#define GICR_CLRLPIR 0x0048
-#define GICR_SEIR GICD_SEIR
#define GICR_PROPBASER 0x0070
#define GICR_PENDBASER 0x0078
#define GICR_INVLPIR 0x00A0
#define GICR_INVALLR 0x00B0
#define GICR_SYNCR 0x00C0
-#define GICR_MOVLPIR 0x0100
-#define GICR_MOVALLR 0x0110
#define GICR_IDREGS GICD_IDREGS
#define GICR_PIDR2 GICD_PIDR2
diff --git a/include/linux/irqchip/arm-vic.h b/include/linux/irqchip/arm-vic.h
index a158b97242c7..f2b11d1df23d 100644
--- a/include/linux/irqchip/arm-vic.h
+++ b/include/linux/irqchip/arm-vic.h
@@ -9,17 +9,6 @@
#include <linux/types.h>
-#define VIC_RAW_STATUS 0x08
-#define VIC_INT_ENABLE 0x10 /* 1 = enable, 0 = disable */
-#define VIC_INT_ENABLE_CLEAR 0x14
-
-struct device_node;
-struct pt_regs;
-
-void __vic_init(void __iomem *base, int parent_irq, int irq_start,
- u32 vic_sources, u32 resume_sources, struct device_node *node);
void vic_init(void __iomem *base, unsigned int irq_start, u32 vic_sources, u32 resume_sources);
-int vic_init_cascaded(void __iomem *base, unsigned int parent_irq,
- u32 vic_sources, u32 resume_sources);
#endif
diff --git a/include/linux/irqchip/irq-bcm2836.h b/include/linux/irqchip/irq-bcm2836.h
index f224d6f9e550..ac5719d8f56b 100644
--- a/include/linux/irqchip/irq-bcm2836.h
+++ b/include/linux/irqchip/irq-bcm2836.h
@@ -30,7 +30,7 @@
*/
#define LOCAL_MAILBOX_INT_CONTROL0 0x050
/*
- * The CPU's interrupt status register. Bits are defined by the the
+ * The CPU's interrupt status register. Bits are defined by the
* LOCAL_IRQ_* bits below.
*/
#define LOCAL_IRQ_PENDING0 0x060
diff --git a/include/linux/irqchip/irq-omap-intc.h b/include/linux/irqchip/irq-omap-intc.h
index 216e5adf80ce..dca379c0d7eb 100644
--- a/include/linux/irqchip/irq-omap-intc.h
+++ b/include/linux/irqchip/irq-omap-intc.h
@@ -2,7 +2,7 @@
/**
* irq-omap-intc.h - INTC Idle Functions
*
- * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com
+ * Copyright (C) 2014 Texas Instruments Incorporated - https://www.ti.com
*
* Author: Felipe Balbi <balbi@ti.com>
*/
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index 8f2820c5e69e..5745491303e0 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -22,7 +22,6 @@ struct pt_regs;
* @irq_common_data: per irq and chip data passed down to chip functions
* @kstat_irqs: irq stats per cpu
* @handle_irq: highlevel irq-events handler
- * @preflow_handler: handler called before the flow handler (currently used by sparc)
* @action: the irq action chain
* @status_use_accessors: status information
* @core_internal_state__do_not_mess_with_it: core internal status information
@@ -58,9 +57,6 @@ struct irq_desc {
struct irq_data irq_data;
unsigned int __percpu *kstat_irqs;
irq_flow_handler_t handle_irq;
-#ifdef CONFIG_IRQ_PREFLOW_FASTEOI
- irq_preflow_handler_t preflow_handler;
-#endif
struct irqaction *action; /* IRQ action list */
unsigned int status_use_accessors;
unsigned int core_internal_state__do_not_mess_with_it;
@@ -268,15 +264,4 @@ irq_set_lockdep_class(unsigned int irq, struct lock_class_key *lock_class,
}
}
-#ifdef CONFIG_IRQ_PREFLOW_FASTEOI
-static inline void
-__irq_set_preflow_handler(unsigned int irq, irq_preflow_handler_t handler)
-{
- struct irq_desc *desc;
-
- desc = irq_to_desc(irq);
- desc->preflow_handler = handler;
-}
-#endif
-
#endif
diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
index 6384d2813ded..bd5c55755447 100644
--- a/include/linux/irqflags.h
+++ b/include/linux/irqflags.h
@@ -14,6 +14,7 @@
#include <linux/typecheck.h>
#include <asm/irqflags.h>
+#include <asm/percpu.h>
/* Currently lockdep_softirqs_on/off is used only by lockdep */
#ifdef CONFIG_PROVE_LOCKING
@@ -31,18 +32,35 @@
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
+
+/* Per-task IRQ trace events information. */
+struct irqtrace_events {
+ unsigned int irq_events;
+ unsigned long hardirq_enable_ip;
+ unsigned long hardirq_disable_ip;
+ unsigned int hardirq_enable_event;
+ unsigned int hardirq_disable_event;
+ unsigned long softirq_disable_ip;
+ unsigned long softirq_enable_ip;
+ unsigned int softirq_disable_event;
+ unsigned int softirq_enable_event;
+};
+
+DECLARE_PER_CPU(int, hardirqs_enabled);
+DECLARE_PER_CPU(int, hardirq_context);
+
extern void trace_hardirqs_on_prepare(void);
extern void trace_hardirqs_off_finish(void);
extern void trace_hardirqs_on(void);
extern void trace_hardirqs_off(void);
-# define lockdep_hardirq_context(p) ((p)->hardirq_context)
+# define lockdep_hardirq_context() (this_cpu_read(hardirq_context))
# define lockdep_softirq_context(p) ((p)->softirq_context)
-# define lockdep_hardirqs_enabled(p) ((p)->hardirqs_enabled)
+# define lockdep_hardirqs_enabled() (this_cpu_read(hardirqs_enabled))
# define lockdep_softirqs_enabled(p) ((p)->softirqs_enabled)
-# define lockdep_hardirq_enter() \
-do { \
- if (!current->hardirq_context++) \
- current->hardirq_threaded = 0; \
+# define lockdep_hardirq_enter() \
+do { \
+ if (this_cpu_inc_return(hardirq_context) == 1) \
+ current->hardirq_threaded = 0; \
} while (0)
# define lockdep_hardirq_threaded() \
do { \
@@ -50,7 +68,7 @@ do { \
} while (0)
# define lockdep_hardirq_exit() \
do { \
- current->hardirq_context--; \
+ this_cpu_dec(hardirq_context); \
} while (0)
# define lockdep_softirq_enter() \
do { \
@@ -104,9 +122,9 @@ do { \
# define trace_hardirqs_off_finish() do { } while (0)
# define trace_hardirqs_on() do { } while (0)
# define trace_hardirqs_off() do { } while (0)
-# define lockdep_hardirq_context(p) 0
+# define lockdep_hardirq_context() 0
# define lockdep_softirq_context(p) 0
-# define lockdep_hardirqs_enabled(p) 0
+# define lockdep_hardirqs_enabled() 0
# define lockdep_softirqs_enabled(p) 0
# define lockdep_hardirq_enter() do { } while (0)
# define lockdep_hardirq_threaded() do { } while (0)
diff --git a/include/linux/irqhandler.h b/include/linux/irqhandler.h
index 1e6f4e7123d6..c30f454a9518 100644
--- a/include/linux/irqhandler.h
+++ b/include/linux/irqhandler.h
@@ -10,6 +10,5 @@
struct irq_desc;
struct irq_data;
typedef void (*irq_flow_handler_t)(struct irq_desc *desc);
-typedef void (*irq_preflow_handler_t)(struct irq_data *data);
#endif
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index d56128df2aff..08f904943ab2 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -27,6 +27,7 @@
#include <linux/timer.h>
#include <linux/slab.h>
#include <linux/bit_spinlock.h>
+#include <linux/blkdev.h>
#include <crypto/hash.h>
#endif
@@ -1380,7 +1381,7 @@ extern int jbd2_journal_dirty_metadata (handle_t *, struct buffer_head *);
extern int jbd2_journal_forget (handle_t *, struct buffer_head *);
extern int jbd2_journal_invalidatepage(journal_t *,
struct page *, unsigned int, unsigned int);
-extern int jbd2_journal_try_to_free_buffers(journal_t *, struct page *, gfp_t);
+extern int jbd2_journal_try_to_free_buffers(journal_t *journal, struct page *page);
extern int jbd2_journal_stop(handle_t *);
extern int jbd2_journal_flush (journal_t *);
extern void jbd2_journal_lock_updates (journal_t *);
diff --git a/include/linux/jhash.h b/include/linux/jhash.h
index ba2f6a9776b6..19ddd43aee68 100644
--- a/include/linux/jhash.h
+++ b/include/linux/jhash.h
@@ -5,7 +5,7 @@
*
* Copyright (C) 2006. Bob Jenkins (bob_jenkins@burtleburtle.net)
*
- * http://burtleburtle.net/bob/hash/
+ * https://burtleburtle.net/bob/hash/
*
* These are the credits from Bob's sources:
*
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index 3526c0aee954..32809624d422 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -68,7 +68,7 @@
* Lacking toolchain and or architecture support, static keys fall back to a
* simple conditional branch.
*
- * Additional babbling in: Documentation/static-keys.txt
+ * Additional babbling in: Documentation/staging/static-keys.rst
*/
#ifndef __ASSEMBLY__
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 82522e996c76..087fba34b209 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -38,7 +38,6 @@ extern void kasan_disable_current(void);
void kasan_unpoison_shadow(const void *address, size_t size);
void kasan_unpoison_task_stack(struct task_struct *task);
-void kasan_unpoison_stack_above_sp_to(const void *watermark);
void kasan_alloc_pages(struct page *page, unsigned int order);
void kasan_free_pages(struct page *page, unsigned int order);
@@ -101,7 +100,6 @@ void kasan_restore_multi_shot(bool enabled);
static inline void kasan_unpoison_shadow(const void *address, size_t size) {}
static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
-static inline void kasan_unpoison_stack_above_sp_to(const void *watermark) {}
static inline void kasan_enable_current(void) {}
static inline void kasan_disable_current(void) {}
@@ -174,11 +172,13 @@ static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
void kasan_cache_shrink(struct kmem_cache *cache);
void kasan_cache_shutdown(struct kmem_cache *cache);
+void kasan_record_aux_stack(void *ptr);
#else /* CONFIG_KASAN_GENERIC */
static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
+static inline void kasan_record_aux_stack(void *ptr) {}
#endif /* CONFIG_KASAN_GENERIC */
diff --git a/include/linux/kcsan-checks.h b/include/linux/kcsan-checks.h
index 7b0b9c44f5f3..c5f6c1dcf7e3 100644
--- a/include/linux/kcsan-checks.h
+++ b/include/linux/kcsan-checks.h
@@ -337,11 +337,13 @@ static inline void __kcsan_disable_current(void) { }
* release_for_reuse(obj);
* }
*
- * Note: ASSERT_EXCLUSIVE_ACCESS_SCOPED(), if applicable, performs more thorough
- * checking if a clear scope where no concurrent accesses are expected exists.
+ * Note:
*
- * Note: For cases where the object is freed, `KASAN <kasan.html>`_ is a better
- * fit to detect use-after-free bugs.
+ * 1. ASSERT_EXCLUSIVE_ACCESS_SCOPED(), if applicable, performs more thorough
+ * checking if a clear scope where no concurrent accesses are expected exists.
+ *
+ * 2. For cases where the object is freed, `KASAN <kasan.html>`_ is a better
+ * fit to detect use-after-free bugs.
*
* @var: variable to assert on
*/
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 82d91547d122..500def620d8f 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -17,7 +17,6 @@
#include <asm/byteorder.h>
#include <asm/div64.h>
#include <uapi/linux/kernel.h>
-#include <asm/div64.h>
#define STACK_MAGIC 0xdeadbeef
@@ -34,6 +33,7 @@
#define ALIGN_DOWN(x, a) __ALIGN_KERNEL((x) - ((a) - 1), (a))
#define __ALIGN_MASK(x, mask) __ALIGN_KERNEL_MASK((x), (mask))
#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a)))
+#define PTR_ALIGN_DOWN(p, a) ((typeof(p))ALIGN_DOWN((unsigned long)(p), (a)))
#define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0)
/* generic data direction definitions */
@@ -321,8 +321,7 @@ void panic(const char *fmt, ...) __noreturn __cold;
void nmi_panic(struct pt_regs *regs, const char *msg);
extern void oops_enter(void);
extern void oops_exit(void);
-void print_oops_end_marker(void);
-extern int oops_may_print(void);
+extern bool oops_may_print(void);
void do_exit(long error_code) __noreturn;
void complete_and_exit(struct completion *, long) __noreturn;
@@ -346,7 +345,7 @@ int __must_check kstrtoll(const char *s, unsigned int base, long long *res);
* @res: Where to write the result of the conversion on success.
*
* Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error.
- * Used as a replacement for the simple_strtoull. Return code must be checked.
+ * Preferred over simple_strtoul(). Return code must be checked.
*/
static inline int __must_check kstrtoul(const char *s, unsigned int base, unsigned long *res)
{
@@ -374,7 +373,7 @@ static inline int __must_check kstrtoul(const char *s, unsigned int base, unsign
* @res: Where to write the result of the conversion on success.
*
* Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error.
- * Used as a replacement for the simple_strtoull. Return code must be checked.
+ * Preferred over simple_strtol(). Return code must be checked.
*/
static inline int __must_check kstrtol(const char *s, unsigned int base, long *res)
{
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index ea67910ae6b7..9e93bef52968 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -183,17 +183,24 @@ int kexec_purgatory_get_set_symbol(struct kimage *image, const char *name,
bool get_value);
void *kexec_purgatory_get_symbol_addr(struct kimage *image, const char *name);
-int __weak arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
- unsigned long buf_len);
-void * __weak arch_kexec_kernel_image_load(struct kimage *image);
-int __weak arch_kexec_apply_relocations_add(struct purgatory_info *pi,
- Elf_Shdr *section,
- const Elf_Shdr *relsec,
- const Elf_Shdr *symtab);
-int __weak arch_kexec_apply_relocations(struct purgatory_info *pi,
- Elf_Shdr *section,
- const Elf_Shdr *relsec,
- const Elf_Shdr *symtab);
+/* Architectures may override the below functions */
+int arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
+ unsigned long buf_len);
+void *arch_kexec_kernel_image_load(struct kimage *image);
+int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
+ Elf_Shdr *section,
+ const Elf_Shdr *relsec,
+ const Elf_Shdr *symtab);
+int arch_kexec_apply_relocations(struct purgatory_info *pi,
+ Elf_Shdr *section,
+ const Elf_Shdr *relsec,
+ const Elf_Shdr *symtab);
+int arch_kimage_file_post_load_cleanup(struct kimage *image);
+#ifdef CONFIG_KEXEC_SIG
+int arch_kexec_kernel_verify_sig(struct kimage *image, void *buf,
+ unsigned long buf_len);
+#endif
+int arch_kexec_locate_mem_hole(struct kexec_buf *kbuf);
extern int kexec_add_buffer(struct kexec_buf *kbuf);
int kexec_locate_mem_hole(struct kexec_buf *kbuf);
diff --git a/include/linux/kobject.h b/include/linux/kobject.h
index 6cba088bee24..ea30529fba08 100644
--- a/include/linux/kobject.h
+++ b/include/linux/kobject.h
@@ -59,7 +59,6 @@ enum kobject_action {
KOBJ_OFFLINE,
KOBJ_BIND,
KOBJ_UNBIND,
- KOBJ_MAX
};
struct kobject {
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 6adf90f248d7..9be1bff4f586 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -227,7 +227,6 @@ extern int arch_prepare_kprobe(struct kprobe *p);
extern void arch_arm_kprobe(struct kprobe *p);
extern void arch_disarm_kprobe(struct kprobe *p);
extern int arch_init_kprobes(void);
-extern void show_registers(struct pt_regs *regs);
extern void kprobes_inc_nmissed_count(struct kprobe *p);
extern bool arch_within_kprobe_blacklist(unsigned long addr);
extern int arch_populate_kprobe_blacklist(void);
@@ -242,6 +241,7 @@ struct kprobe_insn_cache {
struct mutex mutex;
void *(*alloc)(void); /* allocate insn page */
void (*free)(void *); /* free insn page */
+ const char *sym; /* symbol for insn pages */
struct list_head pages; /* list of kprobe_insn_page */
size_t insn_size; /* size of instruction slot */
int nr_garbage;
@@ -272,6 +272,10 @@ static inline bool is_kprobe_##__name##_slot(unsigned long addr) \
{ \
return __is_insn_slot_addr(&kprobe_##__name##_slots, addr); \
}
+#define KPROBE_INSN_PAGE_SYM "kprobe_insn_page"
+#define KPROBE_OPTINSN_PAGE_SYM "kprobe_optinsn_page"
+int kprobe_cache_get_kallsym(struct kprobe_insn_cache *c, unsigned int *symnum,
+ unsigned long *value, char *type, char *sym);
#else /* __ARCH_WANT_KPROBES_INSN_SLOT */
#define DEFINE_INSN_CACHE_OPS(__name) \
static inline bool is_kprobe_##__name##_slot(unsigned long addr) \
@@ -377,6 +381,11 @@ void dump_kprobe(struct kprobe *kp);
void *alloc_insn_page(void);
void free_insn_page(void *page);
+int kprobe_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
+ char *sym);
+
+int arch_kprobe_get_kallsym(unsigned int *symnum, unsigned long *value,
+ char *type, char *sym);
#else /* !CONFIG_KPROBES: */
static inline int kprobes_built_in(void)
@@ -439,6 +448,11 @@ static inline bool within_kprobe_blacklist(unsigned long addr)
{
return true;
}
+static inline int kprobe_get_kallsym(unsigned int symnum, unsigned long *value,
+ char *type, char *sym)
+{
+ return -ERANGE;
+}
#endif /* CONFIG_KPROBES */
static inline int disable_kretprobe(struct kretprobe *rp)
{
diff --git a/include/linux/ktime.h b/include/linux/ktime.h
index 42d2e6ac35f2..a12b5523cc18 100644
--- a/include/linux/ktime.h
+++ b/include/linux/ktime.h
@@ -23,6 +23,7 @@
#include <linux/time.h>
#include <linux/jiffies.h>
+#include <asm/bug.h>
/* Nanosecond scalar representation for kernel time values */
typedef s64 ktime_t;
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index d564855243d8..a23076765b4c 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -211,8 +211,8 @@ struct kvm_async_pf {
void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu);
-int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
- unsigned long hva, struct kvm_arch_async_pf *arch);
+bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
+ unsigned long hva, struct kvm_arch_async_pf *arch);
int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
#endif
@@ -774,6 +774,7 @@ int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
+bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn);
void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
@@ -816,6 +817,13 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool usermode_vcpu_not_eligible);
void kvm_flush_remote_tlbs(struct kvm *kvm);
void kvm_reload_remote_mmus(struct kvm *kvm);
+#ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
+int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min);
+int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc);
+void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc);
+void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc);
+#endif
+
bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
struct kvm_vcpu *except,
unsigned long *vcpu_bitmap, cpumask_var_t tmp);
@@ -1439,4 +1447,12 @@ int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn,
uintptr_t data, const char *name,
struct task_struct **thread_ptr);
+#ifdef CONFIG_KVM_XFER_TO_GUEST_WORK
+static inline void kvm_handle_signal_exit(struct kvm_vcpu *vcpu)
+{
+ vcpu->run->exit_reason = KVM_EXIT_INTR;
+ vcpu->stat.signal_exits++;
+}
+#endif /* CONFIG_KVM_XFER_TO_GUEST_WORK */
+
#endif
diff --git a/include/linux/kvm_irqfd.h b/include/linux/kvm_irqfd.h
index dc1da020305b..dac047abdba7 100644
--- a/include/linux/kvm_irqfd.h
+++ b/include/linux/kvm_irqfd.h
@@ -42,7 +42,7 @@ struct kvm_kernel_irqfd {
wait_queue_entry_t wait;
/* Update side is protected by irqfds.lock */
struct kvm_kernel_irq_routing_entry irq_entry;
- seqcount_t irq_entry_sc;
+ seqcount_spinlock_t irq_entry_sc;
/* Used for level IRQ fast-path */
int gsi;
struct work_struct inject;
diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
index 68e84cf42a3f..a7580f69dda0 100644
--- a/include/linux/kvm_types.h
+++ b/include/linux/kvm_types.h
@@ -20,6 +20,8 @@ enum kvm_mr_change;
#include <linux/types.h>
+#include <asm/kvm_types.h>
+
/*
* Address types:
*
@@ -58,4 +60,21 @@ struct gfn_to_pfn_cache {
bool dirty;
};
+#ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
+/*
+ * Memory caches are used to preallocate memory ahead of various MMU flows,
+ * e.g. page fault handlers. Gracefully handling allocation failures deep in
+ * MMU flows is problematic, as is triggering reclaim, I/O, etc... while
+ * holding MMU locks. Note, these caches act more like prefetch buffers than
+ * classical caches, i.e. objects are not returned to the cache on being freed.
+ */
+struct kvm_mmu_memory_cache {
+ int nobjs;
+ gfp_t gfp_zero;
+ struct kmem_cache *kmem_cache;
+ void *objects[KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE];
+};
+#endif
+
+
#endif /* __KVM_TYPES_H__ */
diff --git a/include/linux/led-class-multicolor.h b/include/linux/led-class-multicolor.h
new file mode 100644
index 000000000000..5116f9a866cc
--- /dev/null
+++ b/include/linux/led-class-multicolor.h
@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* LED Multicolor class interface
+ * Copyright (C) 2019-20 Texas Instruments Incorporated - http://www.ti.com/
+ */
+
+#ifndef _LINUX_MULTICOLOR_LEDS_H_INCLUDED
+#define _LINUX_MULTICOLOR_LEDS_H_INCLUDED
+
+#include <linux/leds.h>
+#include <dt-bindings/leds/common.h>
+
+struct mc_subled {
+ unsigned int color_index;
+ unsigned int brightness;
+ unsigned int intensity;
+ unsigned int channel;
+};
+
+struct led_classdev_mc {
+ /* led class device */
+ struct led_classdev led_cdev;
+ unsigned int num_colors;
+
+ struct mc_subled *subled_info;
+};
+
+static inline struct led_classdev_mc *lcdev_to_mccdev(
+ struct led_classdev *led_cdev)
+{
+ return container_of(led_cdev, struct led_classdev_mc, led_cdev);
+}
+
+#if IS_ENABLED(CONFIG_LEDS_CLASS_MULTICOLOR)
+/**
+ * led_classdev_multicolor_register_ext - register a new object of led_classdev
+ * class with support for multicolor LEDs
+ * @parent: the multicolor LED to register
+ * @mcled_cdev: the led_classdev_mc structure for this device
+ * @init_data: the LED class multicolor device initialization data
+ *
+ * Returns: 0 on success or negative error value on failure
+ */
+int led_classdev_multicolor_register_ext(struct device *parent,
+ struct led_classdev_mc *mcled_cdev,
+ struct led_init_data *init_data);
+
+static inline int led_classdev_multicolor_register(struct device *parent,
+ struct led_classdev_mc *mcled_cdev)
+{
+ return led_classdev_multicolor_register_ext(parent, mcled_cdev, NULL);
+}
+
+/**
+ * led_classdev_multicolor_unregister - unregisters an object of led_classdev
+ * class with support for multicolor LEDs
+ * @mcled_cdev: the multicolor LED to unregister
+ *
+ * Unregister a previously registered via led_classdev_multicolor_register
+ * object
+ */
+void led_classdev_multicolor_unregister(struct led_classdev_mc *mcled_cdev);
+
+/* Calculate brightness for the monochrome LED cluster */
+int led_mc_calc_color_components(struct led_classdev_mc *mcled_cdev,
+ enum led_brightness brightness);
+
+int devm_led_classdev_multicolor_register_ext(struct device *parent,
+ struct led_classdev_mc *mcled_cdev,
+ struct led_init_data *init_data);
+
+static inline int devm_led_classdev_multicolor_register(struct device *parent,
+ struct led_classdev_mc *mcled_cdev)
+{
+ return devm_led_classdev_multicolor_register_ext(parent, mcled_cdev,
+ NULL);
+}
+
+void devm_led_classdev_multicolor_unregister(struct device *parent,
+ struct led_classdev_mc *mcled_cdev);
+#else
+
+static inline int led_classdev_multicolor_register_ext(struct device *parent,
+ struct led_classdev_mc *mcled_cdev,
+ struct led_init_data *init_data)
+{
+ return -EINVAL;
+}
+
+static inline int led_classdev_multicolor_register(struct device *parent,
+ struct led_classdev_mc *mcled_cdev)
+{
+ return led_classdev_multicolor_register_ext(parent, mcled_cdev, NULL);
+}
+
+static inline void led_classdev_multicolor_unregister(struct led_classdev_mc *mcled_cdev) {};
+static inline int led_mc_calc_color_components(struct led_classdev_mc *mcled_cdev,
+ enum led_brightness brightness)
+{
+ return -EINVAL;
+}
+
+static inline int devm_led_classdev_multicolor_register_ext(struct device *parent,
+ struct led_classdev_mc *mcled_cdev,
+ struct led_init_data *init_data)
+{
+ return -EINVAL;
+}
+
+static inline int devm_led_classdev_multicolor_register(struct device *parent,
+ struct led_classdev_mc *mcled_cdev)
+{
+ return devm_led_classdev_multicolor_register_ext(parent, mcled_cdev,
+ NULL);
+}
+
+static inline void devm_led_classdev_multicolor_unregister(struct device *parent,
+ struct led_classdev_mc *mcled_cdev)
+{};
+
+#endif /* IS_ENABLED(CONFIG_LEDS_CLASS_MULTICOLOR) */
+#endif /* _LINUX_MULTICOLOR_LEDS_H_INCLUDED */
diff --git a/include/linux/leds-ti-lmu-common.h b/include/linux/leds-ti-lmu-common.h
index 5eb111f38803..420b61e5a213 100644
--- a/include/linux/leds-ti-lmu-common.h
+++ b/include/linux/leds-ti-lmu-common.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
// TI LMU Common Core
-// Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+// Copyright (C) 2018 Texas Instruments Incorporated - https://www.ti.com/
#ifndef _TI_LMU_COMMON_H_
#define _TI_LMU_COMMON_H_
diff --git a/include/linux/leds.h b/include/linux/leds.h
index 2451962d1ec5..6a8d6409c993 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -57,6 +57,10 @@ struct led_init_data {
bool devname_mandatory;
};
+struct led_hw_trigger_type {
+ int dummy;
+};
+
struct led_classdev {
const char *name;
enum led_brightness brightness;
@@ -141,6 +145,9 @@ struct led_classdev {
void *trigger_data;
/* true if activated - deactivate routine uses it to do cleanup */
bool activated;
+
+ /* LEDs that have private triggers have this set */
+ struct led_hw_trigger_type *trigger_type;
#endif
#ifdef CONFIG_LEDS_BRIGHTNESS_HW_CHANGED
@@ -345,6 +352,9 @@ struct led_trigger {
int (*activate)(struct led_classdev *led_cdev);
void (*deactivate)(struct led_classdev *led_cdev);
+ /* LED-private triggers have this set */
+ struct led_hw_trigger_type *trigger_type;
+
/* LEDs under control by this trigger (for simple triggers) */
rwlock_t leddev_list_lock;
struct list_head led_cdevs;
diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h
index 18da4059be09..01f251b6e36c 100644
--- a/include/linux/libnvdimm.h
+++ b/include/linux/libnvdimm.h
@@ -76,8 +76,9 @@ typedef int (*ndctl_fn)(struct nvdimm_bus_descriptor *nd_desc,
struct device_node;
struct nvdimm_bus_descriptor {
const struct attribute_group **attr_groups;
- unsigned long bus_dsm_mask;
unsigned long cmd_mask;
+ unsigned long dimm_family_mask;
+ unsigned long bus_family_mask;
struct module *module;
char *provider_name;
struct device_node *of_node;
@@ -85,6 +86,7 @@ struct nvdimm_bus_descriptor {
int (*flush_probe)(struct nvdimm_bus_descriptor *nd_desc);
int (*clear_to_send)(struct nvdimm_bus_descriptor *nd_desc,
struct nvdimm *nvdimm, unsigned int cmd, void *data);
+ const struct nvdimm_bus_fw_ops *fw_ops;
};
struct nd_cmd_desc {
@@ -199,6 +201,49 @@ struct nvdimm_security_ops {
int (*query_overwrite)(struct nvdimm *nvdimm);
};
+enum nvdimm_fwa_state {
+ NVDIMM_FWA_INVALID,
+ NVDIMM_FWA_IDLE,
+ NVDIMM_FWA_ARMED,
+ NVDIMM_FWA_BUSY,
+ NVDIMM_FWA_ARM_OVERFLOW,
+};
+
+enum nvdimm_fwa_trigger {
+ NVDIMM_FWA_ARM,
+ NVDIMM_FWA_DISARM,
+};
+
+enum nvdimm_fwa_capability {
+ NVDIMM_FWA_CAP_INVALID,
+ NVDIMM_FWA_CAP_NONE,
+ NVDIMM_FWA_CAP_QUIESCE,
+ NVDIMM_FWA_CAP_LIVE,
+};
+
+enum nvdimm_fwa_result {
+ NVDIMM_FWA_RESULT_INVALID,
+ NVDIMM_FWA_RESULT_NONE,
+ NVDIMM_FWA_RESULT_SUCCESS,
+ NVDIMM_FWA_RESULT_NOTSTAGED,
+ NVDIMM_FWA_RESULT_NEEDRESET,
+ NVDIMM_FWA_RESULT_FAIL,
+};
+
+struct nvdimm_bus_fw_ops {
+ enum nvdimm_fwa_state (*activate_state)
+ (struct nvdimm_bus_descriptor *nd_desc);
+ enum nvdimm_fwa_capability (*capability)
+ (struct nvdimm_bus_descriptor *nd_desc);
+ int (*activate)(struct nvdimm_bus_descriptor *nd_desc);
+};
+
+struct nvdimm_fw_ops {
+ enum nvdimm_fwa_state (*activate_state)(struct nvdimm *nvdimm);
+ enum nvdimm_fwa_result (*activate_result)(struct nvdimm *nvdimm);
+ int (*arm)(struct nvdimm *nvdimm, enum nvdimm_fwa_trigger arg);
+};
+
void badrange_init(struct badrange *badrange);
int badrange_add(struct badrange *badrange, u64 addr, u64 length);
void badrange_forget(struct badrange *badrange, phys_addr_t start,
@@ -224,14 +269,15 @@ struct nvdimm *__nvdimm_create(struct nvdimm_bus *nvdimm_bus,
void *provider_data, const struct attribute_group **groups,
unsigned long flags, unsigned long cmd_mask, int num_flush,
struct resource *flush_wpq, const char *dimm_id,
- const struct nvdimm_security_ops *sec_ops);
+ const struct nvdimm_security_ops *sec_ops,
+ const struct nvdimm_fw_ops *fw_ops);
static inline struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus,
void *provider_data, const struct attribute_group **groups,
unsigned long flags, unsigned long cmd_mask, int num_flush,
struct resource *flush_wpq)
{
return __nvdimm_create(nvdimm_bus, provider_data, groups, flags,
- cmd_mask, num_flush, flush_wpq, NULL, NULL);
+ cmd_mask, num_flush, flush_wpq, NULL, NULL, NULL);
}
const struct nd_cmd_desc *nd_cmd_dimm_desc(int cmd);
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index ee8ec2e68055..1db223710b28 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -631,7 +631,6 @@ static inline int nvm_next_ppa_in_chk(struct nvm_tgt_dev *dev,
return last;
}
-typedef blk_qc_t (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *);
typedef sector_t (nvm_tgt_capacity_fn)(void *);
typedef void *(nvm_tgt_init_fn)(struct nvm_tgt_dev *, struct gendisk *,
int flags);
@@ -650,7 +649,7 @@ struct nvm_tgt_type {
int flags;
/* target entry points */
- nvm_tgt_make_rq_fn *make_rq;
+ const struct block_device_operations *bops;
nvm_tgt_capacity_fn *capacity;
/* module-specific init/teardown */
diff --git a/include/linux/linkmode.h b/include/linux/linkmode.h
index c664c27a29a0..f8397f300fcd 100644
--- a/include/linux/linkmode.h
+++ b/include/linux/linkmode.h
@@ -82,6 +82,12 @@ static inline int linkmode_equal(const unsigned long *src1,
return bitmap_equal(src1, src2, __ETHTOOL_LINK_MODE_MASK_NBITS);
}
+static inline int linkmode_intersects(const unsigned long *src1,
+ const unsigned long *src2)
+{
+ return bitmap_intersects(src1, src2, __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
static inline int linkmode_subset(const unsigned long *src1,
const unsigned long *src2)
{
diff --git a/include/linux/list.h b/include/linux/list.h
index aff44d34f4e4..0d0d17a10d25 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -283,6 +283,24 @@ static inline int list_empty(const struct list_head *head)
}
/**
+ * list_del_init_careful - deletes entry from list and reinitialize it.
+ * @entry: the element to delete from the list.
+ *
+ * This is the same as list_del_init(), except designed to be used
+ * together with list_empty_careful() in a way to guarantee ordering
+ * of other memory operations.
+ *
+ * Any memory operations done before a list_del_init_careful() are
+ * guaranteed to be visible after a list_empty_careful() test.
+ */
+static inline void list_del_init_careful(struct list_head *entry)
+{
+ __list_del_entry(entry);
+ entry->prev = entry;
+ smp_store_release(&entry->next, entry);
+}
+
+/**
* list_empty_careful - tests whether a list is empty and not being modified
* @head: the list to test
*
@@ -297,7 +315,7 @@ static inline int list_empty(const struct list_head *head)
*/
static inline int list_empty_careful(const struct list_head *head)
{
- struct list_head *next = head->next;
+ struct list_head *next = smp_load_acquire(&head->next);
return (next == head) && (next == head->prev);
}
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 8fce5c98a4b0..62a382d1845b 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -10,33 +10,16 @@
#ifndef __LINUX_LOCKDEP_H
#define __LINUX_LOCKDEP_H
+#include <linux/lockdep_types.h>
+#include <linux/smp.h>
+#include <asm/percpu.h>
+
struct task_struct;
-struct lockdep_map;
/* for sysctl */
extern int prove_locking;
extern int lock_stat;
-#define MAX_LOCKDEP_SUBCLASSES 8UL
-
-#include <linux/types.h>
-
-enum lockdep_wait_type {
- LD_WAIT_INV = 0, /* not checked, catch all */
-
- LD_WAIT_FREE, /* wait free, rcu etc.. */
- LD_WAIT_SPIN, /* spin loops, raw_spinlock_t etc.. */
-
-#ifdef CONFIG_PROVE_RAW_LOCK_NESTING
- LD_WAIT_CONFIG, /* CONFIG_PREEMPT_LOCK, spinlock_t etc.. */
-#else
- LD_WAIT_CONFIG = LD_WAIT_SPIN,
-#endif
- LD_WAIT_SLEEP, /* sleeping locks, mutex_t etc.. */
-
- LD_WAIT_MAX, /* must be last */
-};
-
#ifdef CONFIG_LOCKDEP
#include <linux/linkage.h>
@@ -44,147 +27,6 @@ enum lockdep_wait_type {
#include <linux/debug_locks.h>
#include <linux/stacktrace.h>
-/*
- * We'd rather not expose kernel/lockdep_states.h this wide, but we do need
- * the total number of states... :-(
- */
-#define XXX_LOCK_USAGE_STATES (1+2*4)
-
-/*
- * NR_LOCKDEP_CACHING_CLASSES ... Number of classes
- * cached in the instance of lockdep_map
- *
- * Currently main class (subclass == 0) and signle depth subclass
- * are cached in lockdep_map. This optimization is mainly targeting
- * on rq->lock. double_rq_lock() acquires this highly competitive with
- * single depth.
- */
-#define NR_LOCKDEP_CACHING_CLASSES 2
-
-/*
- * A lockdep key is associated with each lock object. For static locks we use
- * the lock address itself as the key. Dynamically allocated lock objects can
- * have a statically or dynamically allocated key. Dynamically allocated lock
- * keys must be registered before being used and must be unregistered before
- * the key memory is freed.
- */
-struct lockdep_subclass_key {
- char __one_byte;
-} __attribute__ ((__packed__));
-
-/* hash_entry is used to keep track of dynamically allocated keys. */
-struct lock_class_key {
- union {
- struct hlist_node hash_entry;
- struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES];
- };
-};
-
-extern struct lock_class_key __lockdep_no_validate__;
-
-struct lock_trace;
-
-#define LOCKSTAT_POINTS 4
-
-/*
- * The lock-class itself. The order of the structure members matters.
- * reinit_class() zeroes the key member and all subsequent members.
- */
-struct lock_class {
- /*
- * class-hash:
- */
- struct hlist_node hash_entry;
-
- /*
- * Entry in all_lock_classes when in use. Entry in free_lock_classes
- * when not in use. Instances that are being freed are on one of the
- * zapped_classes lists.
- */
- struct list_head lock_entry;
-
- /*
- * These fields represent a directed graph of lock dependencies,
- * to every node we attach a list of "forward" and a list of
- * "backward" graph nodes.
- */
- struct list_head locks_after, locks_before;
-
- const struct lockdep_subclass_key *key;
- unsigned int subclass;
- unsigned int dep_gen_id;
-
- /*
- * IRQ/softirq usage tracking bits:
- */
- unsigned long usage_mask;
- const struct lock_trace *usage_traces[XXX_LOCK_USAGE_STATES];
-
- /*
- * Generation counter, when doing certain classes of graph walking,
- * to ensure that we check one node only once:
- */
- int name_version;
- const char *name;
-
- short wait_type_inner;
- short wait_type_outer;
-
-#ifdef CONFIG_LOCK_STAT
- unsigned long contention_point[LOCKSTAT_POINTS];
- unsigned long contending_point[LOCKSTAT_POINTS];
-#endif
-} __no_randomize_layout;
-
-#ifdef CONFIG_LOCK_STAT
-struct lock_time {
- s64 min;
- s64 max;
- s64 total;
- unsigned long nr;
-};
-
-enum bounce_type {
- bounce_acquired_write,
- bounce_acquired_read,
- bounce_contended_write,
- bounce_contended_read,
- nr_bounce_types,
-
- bounce_acquired = bounce_acquired_write,
- bounce_contended = bounce_contended_write,
-};
-
-struct lock_class_stats {
- unsigned long contention_point[LOCKSTAT_POINTS];
- unsigned long contending_point[LOCKSTAT_POINTS];
- struct lock_time read_waittime;
- struct lock_time write_waittime;
- struct lock_time read_holdtime;
- struct lock_time write_holdtime;
- unsigned long bounces[nr_bounce_types];
-};
-
-struct lock_class_stats lock_stats(struct lock_class *class);
-void clear_lock_stats(struct lock_class *class);
-#endif
-
-/*
- * Map the lock object (the lock instance) to the lock-class object.
- * This is embedded into specific lock instances:
- */
-struct lockdep_map {
- struct lock_class_key *key;
- struct lock_class *class_cache[NR_LOCKDEP_CACHING_CLASSES];
- const char *name;
- short wait_type_outer; /* can be taken in this context */
- short wait_type_inner; /* presents this context */
-#ifdef CONFIG_LOCK_STAT
- int cpu;
- unsigned long ip;
-#endif
-};
-
static inline void lockdep_copy_map(struct lockdep_map *to,
struct lockdep_map *from)
{
@@ -440,8 +282,6 @@ static inline void lock_set_subclass(struct lockdep_map *lock,
extern void lock_downgrade(struct lockdep_map *lock, unsigned long ip);
-struct pin_cookie { unsigned int val; };
-
#define NIL_COOKIE (struct pin_cookie){ .val = 0U, }
extern struct pin_cookie lock_pin_lock(struct lockdep_map *lock);
@@ -520,10 +360,6 @@ static inline void lockdep_set_selftest_task(struct task_struct *task)
# define lockdep_reset() do { debug_locks = 1; } while (0)
# define lockdep_free_key_range(start, size) do { } while (0)
# define lockdep_sys_exit() do { } while (0)
-/*
- * The class key takes no space if lockdep is disabled:
- */
-struct lock_class_key { };
static inline void lockdep_register_key(struct lock_class_key *key)
{
@@ -533,11 +369,6 @@ static inline void lockdep_unregister_key(struct lock_class_key *key)
{
}
-/*
- * The lockdep_map takes no space if lockdep is disabled:
- */
-struct lockdep_map { };
-
#define lockdep_depth(tsk) (0)
#define lockdep_is_held_type(l, r) (1)
@@ -549,8 +380,6 @@ struct lockdep_map { };
#define lockdep_recursing(tsk) (0)
-struct pin_cookie { };
-
#define NIL_COOKIE (struct pin_cookie){ }
#define lockdep_pin_lock(l) ({ struct pin_cookie cookie = { }; cookie; })
@@ -703,38 +532,58 @@ do { \
lock_release(&(lock)->dep_map, _THIS_IP_); \
} while (0)
-#define lockdep_assert_irqs_enabled() do { \
- WARN_ONCE(debug_locks && !current->lockdep_recursion && \
- !current->hardirqs_enabled, \
- "IRQs not enabled as expected\n"); \
- } while (0)
+DECLARE_PER_CPU(int, hardirqs_enabled);
+DECLARE_PER_CPU(int, hardirq_context);
-#define lockdep_assert_irqs_disabled() do { \
- WARN_ONCE(debug_locks && !current->lockdep_recursion && \
- current->hardirqs_enabled, \
- "IRQs not disabled as expected\n"); \
- } while (0)
+#define lockdep_assert_irqs_enabled() \
+do { \
+ WARN_ON_ONCE(debug_locks && !this_cpu_read(hardirqs_enabled)); \
+} while (0)
-#define lockdep_assert_in_irq() do { \
- WARN_ONCE(debug_locks && !current->lockdep_recursion && \
- !current->hardirq_context, \
- "Not in hardirq as expected\n"); \
- } while (0)
+#define lockdep_assert_irqs_disabled() \
+do { \
+ WARN_ON_ONCE(debug_locks && this_cpu_read(hardirqs_enabled)); \
+} while (0)
+
+#define lockdep_assert_in_irq() \
+do { \
+ WARN_ON_ONCE(debug_locks && !this_cpu_read(hardirq_context)); \
+} while (0)
+
+#define lockdep_assert_preemption_enabled() \
+do { \
+ WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \
+ debug_locks && \
+ (preempt_count() != 0 || \
+ !this_cpu_read(hardirqs_enabled))); \
+} while (0)
+
+#define lockdep_assert_preemption_disabled() \
+do { \
+ WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \
+ debug_locks && \
+ (preempt_count() == 0 && \
+ this_cpu_read(hardirqs_enabled))); \
+} while (0)
#else
# define might_lock(lock) do { } while (0)
# define might_lock_read(lock) do { } while (0)
# define might_lock_nested(lock, subclass) do { } while (0)
+
# define lockdep_assert_irqs_enabled() do { } while (0)
# define lockdep_assert_irqs_disabled() do { } while (0)
# define lockdep_assert_in_irq() do { } while (0)
+
+# define lockdep_assert_preemption_enabled() do { } while (0)
+# define lockdep_assert_preemption_disabled() do { } while (0)
#endif
#ifdef CONFIG_PROVE_RAW_LOCK_NESTING
# define lockdep_assert_RT_in_threaded_ctx() do { \
WARN_ONCE(debug_locks && !current->lockdep_recursion && \
- current->hardirq_context && \
+ lockdep_hardirq_context() && \
!(current->hardirq_threaded || current->irq_config), \
"Not in threaded context on PREEMPT_RT as expected\n"); \
} while (0)
diff --git a/include/linux/lockdep_types.h b/include/linux/lockdep_types.h
new file mode 100644
index 000000000000..bb35b449f533
--- /dev/null
+++ b/include/linux/lockdep_types.h
@@ -0,0 +1,194 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Runtime locking correctness validator
+ *
+ * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
+ *
+ * see Documentation/locking/lockdep-design.rst for more details.
+ */
+#ifndef __LINUX_LOCKDEP_TYPES_H
+#define __LINUX_LOCKDEP_TYPES_H
+
+#include <linux/types.h>
+
+#define MAX_LOCKDEP_SUBCLASSES 8UL
+
+enum lockdep_wait_type {
+ LD_WAIT_INV = 0, /* not checked, catch all */
+
+ LD_WAIT_FREE, /* wait free, rcu etc.. */
+ LD_WAIT_SPIN, /* spin loops, raw_spinlock_t etc.. */
+
+#ifdef CONFIG_PROVE_RAW_LOCK_NESTING
+ LD_WAIT_CONFIG, /* CONFIG_PREEMPT_LOCK, spinlock_t etc.. */
+#else
+ LD_WAIT_CONFIG = LD_WAIT_SPIN,
+#endif
+ LD_WAIT_SLEEP, /* sleeping locks, mutex_t etc.. */
+
+ LD_WAIT_MAX, /* must be last */
+};
+
+#ifdef CONFIG_LOCKDEP
+
+/*
+ * We'd rather not expose kernel/lockdep_states.h this wide, but we do need
+ * the total number of states... :-(
+ */
+#define XXX_LOCK_USAGE_STATES (1+2*4)
+
+/*
+ * NR_LOCKDEP_CACHING_CLASSES ... Number of classes
+ * cached in the instance of lockdep_map
+ *
+ * Currently main class (subclass == 0) and signle depth subclass
+ * are cached in lockdep_map. This optimization is mainly targeting
+ * on rq->lock. double_rq_lock() acquires this highly competitive with
+ * single depth.
+ */
+#define NR_LOCKDEP_CACHING_CLASSES 2
+
+/*
+ * A lockdep key is associated with each lock object. For static locks we use
+ * the lock address itself as the key. Dynamically allocated lock objects can
+ * have a statically or dynamically allocated key. Dynamically allocated lock
+ * keys must be registered before being used and must be unregistered before
+ * the key memory is freed.
+ */
+struct lockdep_subclass_key {
+ char __one_byte;
+} __attribute__ ((__packed__));
+
+/* hash_entry is used to keep track of dynamically allocated keys. */
+struct lock_class_key {
+ union {
+ struct hlist_node hash_entry;
+ struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES];
+ };
+};
+
+extern struct lock_class_key __lockdep_no_validate__;
+
+struct lock_trace;
+
+#define LOCKSTAT_POINTS 4
+
+/*
+ * The lock-class itself. The order of the structure members matters.
+ * reinit_class() zeroes the key member and all subsequent members.
+ */
+struct lock_class {
+ /*
+ * class-hash:
+ */
+ struct hlist_node hash_entry;
+
+ /*
+ * Entry in all_lock_classes when in use. Entry in free_lock_classes
+ * when not in use. Instances that are being freed are on one of the
+ * zapped_classes lists.
+ */
+ struct list_head lock_entry;
+
+ /*
+ * These fields represent a directed graph of lock dependencies,
+ * to every node we attach a list of "forward" and a list of
+ * "backward" graph nodes.
+ */
+ struct list_head locks_after, locks_before;
+
+ const struct lockdep_subclass_key *key;
+ unsigned int subclass;
+ unsigned int dep_gen_id;
+
+ /*
+ * IRQ/softirq usage tracking bits:
+ */
+ unsigned long usage_mask;
+ const struct lock_trace *usage_traces[XXX_LOCK_USAGE_STATES];
+
+ /*
+ * Generation counter, when doing certain classes of graph walking,
+ * to ensure that we check one node only once:
+ */
+ int name_version;
+ const char *name;
+
+ short wait_type_inner;
+ short wait_type_outer;
+
+#ifdef CONFIG_LOCK_STAT
+ unsigned long contention_point[LOCKSTAT_POINTS];
+ unsigned long contending_point[LOCKSTAT_POINTS];
+#endif
+} __no_randomize_layout;
+
+#ifdef CONFIG_LOCK_STAT
+struct lock_time {
+ s64 min;
+ s64 max;
+ s64 total;
+ unsigned long nr;
+};
+
+enum bounce_type {
+ bounce_acquired_write,
+ bounce_acquired_read,
+ bounce_contended_write,
+ bounce_contended_read,
+ nr_bounce_types,
+
+ bounce_acquired = bounce_acquired_write,
+ bounce_contended = bounce_contended_write,
+};
+
+struct lock_class_stats {
+ unsigned long contention_point[LOCKSTAT_POINTS];
+ unsigned long contending_point[LOCKSTAT_POINTS];
+ struct lock_time read_waittime;
+ struct lock_time write_waittime;
+ struct lock_time read_holdtime;
+ struct lock_time write_holdtime;
+ unsigned long bounces[nr_bounce_types];
+};
+
+struct lock_class_stats lock_stats(struct lock_class *class);
+void clear_lock_stats(struct lock_class *class);
+#endif
+
+/*
+ * Map the lock object (the lock instance) to the lock-class object.
+ * This is embedded into specific lock instances:
+ */
+struct lockdep_map {
+ struct lock_class_key *key;
+ struct lock_class *class_cache[NR_LOCKDEP_CACHING_CLASSES];
+ const char *name;
+ short wait_type_outer; /* can be taken in this context */
+ short wait_type_inner; /* presents this context */
+#ifdef CONFIG_LOCK_STAT
+ int cpu;
+ unsigned long ip;
+#endif
+};
+
+struct pin_cookie { unsigned int val; };
+
+#else /* !CONFIG_LOCKDEP */
+
+/*
+ * The class key takes no space if lockdep is disabled:
+ */
+struct lock_class_key { };
+
+/*
+ * The lockdep_map takes no space if lockdep is disabled:
+ */
+struct lockdep_map { };
+
+struct pin_cookie { };
+
+#endif /* !LOCKDEP */
+
+#endif /* __LINUX_LOCKDEP_TYPES_H */
diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h
index af998f93d256..2a8c74d99015 100644
--- a/include/linux/lsm_hook_defs.h
+++ b/include/linux/lsm_hook_defs.h
@@ -15,7 +15,7 @@
*/
/*
- * The macro LSM_HOOK is used to define the data structures required by the
+ * The macro LSM_HOOK is used to define the data structures required by
* the LSM framework using the pattern:
*
* LSM_HOOK(<return_type>, <default_value>, <hook_name>, args...)
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index 95b7c1d32062..9e2e3e63719d 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -815,7 +815,7 @@
* structure. Note that the security field was not added directly to the
* socket structure, but rather, the socket security information is stored
* in the associated inode. Typically, the inode alloc_security hook will
- * allocate and and attach security information to
+ * allocate and attach security information to
* SOCK_INODE(sock)->i_security. This hook may be used to update the
* SOCK_INODE(sock)->i_security field with additional information that
* wasn't available when the inode was allocated.
diff --git a/include/linux/mailbox/mtk-cmdq-mailbox.h b/include/linux/mailbox/mtk-cmdq-mailbox.h
index a4dc45fbec0a..05eea1aef5aa 100644
--- a/include/linux/mailbox/mtk-cmdq-mailbox.h
+++ b/include/linux/mailbox/mtk-cmdq-mailbox.h
@@ -17,6 +17,7 @@
#define CMDQ_JUMP_PASS CMDQ_INST_SIZE
#define CMDQ_WFE_UPDATE BIT(31)
+#define CMDQ_WFE_UPDATE_VALUE BIT(16)
#define CMDQ_WFE_WAIT BIT(15)
#define CMDQ_WFE_WAIT_VALUE 0x1
@@ -59,6 +60,7 @@ enum cmdq_code {
CMDQ_CODE_JUMP = 0x10,
CMDQ_CODE_WFE = 0x20,
CMDQ_CODE_EOC = 0x40,
+ CMDQ_CODE_LOGIC = 0xa0,
};
enum cmdq_cb_status {
@@ -88,4 +90,6 @@ struct cmdq_pkt {
void *cl;
};
+u8 cmdq_get_shift_pa(struct mbox_chan *chan);
+
#endif /* __MTK_CMDQ_MAILBOX_H__ */
diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h
index af6b11d4d673..ff7b7607c8cf 100644
--- a/include/linux/marvell_phy.h
+++ b/include/linux/marvell_phy.h
@@ -15,10 +15,12 @@
#define MARVELL_PHY_ID_88E1149R 0x01410e50
#define MARVELL_PHY_ID_88E1240 0x01410e30
#define MARVELL_PHY_ID_88E1318S 0x01410e90
+#define MARVELL_PHY_ID_88E1340S 0x01410dc0
#define MARVELL_PHY_ID_88E1116R 0x01410e40
#define MARVELL_PHY_ID_88E1510 0x01410dd0
#define MARVELL_PHY_ID_88E1540 0x01410eb0
#define MARVELL_PHY_ID_88E1545 0x01410ea0
+#define MARVELL_PHY_ID_88E1548P 0x01410ec0
#define MARVELL_PHY_ID_88E3016 0x01410e60
#define MARVELL_PHY_ID_88X3310 0x002b09a0
#define MARVELL_PHY_ID_88E2110 0x002b09b0
diff --git a/include/linux/math64.h b/include/linux/math64.h
index 11a267413e8e..3381d9e33c4e 100644
--- a/include/linux/math64.h
+++ b/include/linux/math64.h
@@ -263,6 +263,8 @@ static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor)
}
#endif /* mul_u64_u32_div */
+u64 mul_u64_u64_div_u64(u64 a, u64 mul, u64 div);
+
#define DIV64_U64_ROUND_UP(ll, d) \
({ u64 _tmp = (d); div64_u64((ll) + _tmp - 1, _tmp); })
@@ -279,4 +281,23 @@ static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor)
#define DIV64_U64_ROUND_CLOSEST(dividend, divisor) \
({ u64 _tmp = (divisor); div64_u64((dividend) + _tmp / 2, _tmp); })
+/*
+ * DIV_S64_ROUND_CLOSEST - signed 64bit divide with 32bit divisor rounded to nearest integer
+ * @dividend: signed 64bit dividend
+ * @divisor: signed 32bit divisor
+ *
+ * Divide signed 64bit dividend by signed 32bit divisor
+ * and round to closest integer.
+ *
+ * Return: dividend / divisor rounded to nearest integer
+ */
+#define DIV_S64_ROUND_CLOSEST(dividend, divisor)( \
+{ \
+ s64 __x = (dividend); \
+ s32 __d = (divisor); \
+ ((__x > 0) == (__d > 0)) ? \
+ div_s64((__x + (__d / 2)), __d) : \
+ div_s64((__x - (__d / 2)), __d); \
+} \
+)
#endif /* _LINUX_MATH64_H */
diff --git a/include/linux/mdio.h b/include/linux/mdio.h
index 36d2e0673d03..898cbf00332a 100644
--- a/include/linux/mdio.h
+++ b/include/linux/mdio.h
@@ -18,6 +18,7 @@
struct gpio_desc;
struct mii_bus;
+struct reset_control;
/* Multiple levels of nesting are possible. However typically this is
* limited to nested DSA like layer, a MUX layer, and the normal
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index 017fae833d4a..9d925db0d355 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -77,16 +77,12 @@ struct memblock_type {
* @current_limit: physical address of the current allocation limit
* @memory: usable memory regions
* @reserved: reserved memory regions
- * @physmem: all physical memory
*/
struct memblock {
bool bottom_up; /* is bottom up direction? */
phys_addr_t current_limit;
struct memblock_type memory;
struct memblock_type reserved;
-#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
- struct memblock_type physmem;
-#endif
};
extern struct memblock memblock;
@@ -145,6 +141,30 @@ void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start,
void __memblock_free_late(phys_addr_t base, phys_addr_t size);
+#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
+static inline void __next_physmem_range(u64 *idx, struct memblock_type *type,
+ phys_addr_t *out_start,
+ phys_addr_t *out_end)
+{
+ extern struct memblock_type physmem;
+
+ __next_mem_range(idx, NUMA_NO_NODE, MEMBLOCK_NONE, &physmem, type,
+ out_start, out_end, NULL);
+}
+
+/**
+ * for_each_physmem_range - iterate through physmem areas not included in type.
+ * @i: u64 used as loop variable
+ * @type: ptr to memblock_type which excludes from the iteration, can be %NULL
+ * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
+ * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
+ */
+#define for_each_physmem_range(i, type, p_start, p_end) \
+ for (i = 0, __next_physmem_range(&i, type, p_start, p_end); \
+ i != (u64)ULLONG_MAX; \
+ __next_physmem_range(&i, type, p_start, p_end))
+#endif /* CONFIG_HAVE_MEMBLOCK_PHYS_MAP */
+
/**
* for_each_mem_range - iterate through memblock areas from type_a and not
* included in type_b. Or just type_a if type_b is NULL.
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index e77197a62809..d0b036123c6a 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -23,6 +23,7 @@
#include <linux/page-flags.h>
struct mem_cgroup;
+struct obj_cgroup;
struct page;
struct mm_struct;
struct kmem_cache;
@@ -31,8 +32,7 @@ struct kmem_cache;
enum memcg_stat_item {
MEMCG_SWAP = NR_VM_NODE_STAT_ITEMS,
MEMCG_SOCK,
- /* XXX: why are these zone and not node counters? */
- MEMCG_KERNEL_STACK_KB,
+ MEMCG_PERCPU_B,
MEMCG_NR_STAT,
};
@@ -48,12 +48,6 @@ enum memcg_memory_event {
MEMCG_NR_MEMORY_EVENTS,
};
-enum mem_cgroup_protection {
- MEMCG_PROT_NONE,
- MEMCG_PROT_LOW,
- MEMCG_PROT_MIN,
-};
-
struct mem_cgroup_reclaim_cookie {
pg_data_t *pgdat;
unsigned int generation;
@@ -71,8 +65,8 @@ struct mem_cgroup_id {
/*
* Per memcg event counter is incremented at every pagein/pageout. With THP,
- * it will be incremated by the number of pages. This counter is used for
- * for trigger some periodic events. This is straightforward and better
+ * it will be incremented by the number of pages. This counter is used
+ * to trigger some periodic events. This is straightforward and better
* than using jiffies etc. to handle periodic memcg event.
*/
enum mem_cgroup_events_target {
@@ -193,6 +187,22 @@ struct memcg_cgwb_frn {
};
/*
+ * Bucket for arbitrarily byte-sized objects charged to a memory
+ * cgroup. The bucket can be reparented in one piece when the cgroup
+ * is destroyed, without having to round up the individual references
+ * of all live memory objects in the wild.
+ */
+struct obj_cgroup {
+ struct percpu_ref refcnt;
+ struct mem_cgroup *memcg;
+ atomic_t nr_charged_bytes;
+ union {
+ struct list_head list;
+ struct rcu_head rcu;
+ };
+};
+
+/*
* The memory controller data structure. The memory controller controls both
* page cache and RSS per cgroup. We would eventually like to provide
* statistics based on the statistics developed by Rik Van Riel for clock-pro,
@@ -300,7 +310,8 @@ struct mem_cgroup {
/* Index in the kmem_cache->memcg_params.memcg_caches array */
int kmemcg_id;
enum memcg_kmem_state kmem_state;
- struct list_head kmem_caches;
+ struct obj_cgroup __rcu *objcg;
+ struct list_head objcg_list; /* list of inherited objcgs */
#endif
#ifdef CONFIG_CGROUP_WRITEBACK
@@ -329,6 +340,13 @@ struct mem_cgroup {
extern struct mem_cgroup *root_mem_cgroup;
+static __always_inline bool memcg_stat_item_in_bytes(int idx)
+{
+ if (idx == MEMCG_PERCPU_B)
+ return true;
+ return vmstat_item_in_bytes(idx);
+}
+
static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
{
return (memcg == root_mem_cgroup);
@@ -339,12 +357,49 @@ static inline bool mem_cgroup_disabled(void)
return !cgroup_subsys_enabled(memory_cgrp_subsys);
}
-static inline unsigned long mem_cgroup_protection(struct mem_cgroup *memcg,
+static inline unsigned long mem_cgroup_protection(struct mem_cgroup *root,
+ struct mem_cgroup *memcg,
bool in_low_reclaim)
{
if (mem_cgroup_disabled())
return 0;
+ /*
+ * There is no reclaim protection applied to a targeted reclaim.
+ * We are special casing this specific case here because
+ * mem_cgroup_protected calculation is not robust enough to keep
+ * the protection invariant for calculated effective values for
+ * parallel reclaimers with different reclaim target. This is
+ * especially a problem for tail memcgs (as they have pages on LRU)
+ * which would want to have effective values 0 for targeted reclaim
+ * but a different value for external reclaim.
+ *
+ * Example
+ * Let's have global and A's reclaim in parallel:
+ * |
+ * A (low=2G, usage = 3G, max = 3G, children_low_usage = 1.5G)
+ * |\
+ * | C (low = 1G, usage = 2.5G)
+ * B (low = 1G, usage = 0.5G)
+ *
+ * For the global reclaim
+ * A.elow = A.low
+ * B.elow = min(B.usage, B.low) because children_low_usage <= A.elow
+ * C.elow = min(C.usage, C.low)
+ *
+ * With the effective values resetting we have A reclaim
+ * A.elow = 0
+ * B.elow = B.low
+ * C.elow = C.low
+ *
+ * If the global reclaim races with A's reclaim then
+ * B.elow = C.elow = 0 because children_low_usage > A.elow)
+ * is possible and reclaiming B would be violating the protection.
+ *
+ */
+ if (root == memcg)
+ return 0;
+
if (in_low_reclaim)
return READ_ONCE(memcg->memory.emin);
@@ -352,8 +407,36 @@ static inline unsigned long mem_cgroup_protection(struct mem_cgroup *memcg,
READ_ONCE(memcg->memory.elow));
}
-enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
- struct mem_cgroup *memcg);
+void mem_cgroup_calculate_protection(struct mem_cgroup *root,
+ struct mem_cgroup *memcg);
+
+static inline bool mem_cgroup_supports_protection(struct mem_cgroup *memcg)
+{
+ /*
+ * The root memcg doesn't account charges, and doesn't support
+ * protection.
+ */
+ return !mem_cgroup_disabled() && !mem_cgroup_is_root(memcg);
+
+}
+
+static inline bool mem_cgroup_below_low(struct mem_cgroup *memcg)
+{
+ if (!mem_cgroup_supports_protection(memcg))
+ return false;
+
+ return READ_ONCE(memcg->memory.elow) >=
+ page_counter_read(&memcg->memory);
+}
+
+static inline bool mem_cgroup_below_min(struct mem_cgroup *memcg)
+{
+ if (!mem_cgroup_supports_protection(memcg))
+ return false;
+
+ return READ_ONCE(memcg->memory.emin) >=
+ page_counter_read(&memcg->memory);
+}
int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask);
@@ -416,6 +499,33 @@ struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){
return css ? container_of(css, struct mem_cgroup, css) : NULL;
}
+static inline bool obj_cgroup_tryget(struct obj_cgroup *objcg)
+{
+ return percpu_ref_tryget(&objcg->refcnt);
+}
+
+static inline void obj_cgroup_get(struct obj_cgroup *objcg)
+{
+ percpu_ref_get(&objcg->refcnt);
+}
+
+static inline void obj_cgroup_put(struct obj_cgroup *objcg)
+{
+ percpu_ref_put(&objcg->refcnt);
+}
+
+/*
+ * After the initialization objcg->memcg is always pointing at
+ * a valid memcg, but can be atomically swapped to the parent memcg.
+ *
+ * The caller must ensure that the returned memcg won't be released:
+ * e.g. acquire the rcu_read_lock or css_set_lock.
+ */
+static inline struct mem_cgroup *obj_cgroup_memcg(struct obj_cgroup *objcg)
+{
+ return READ_ONCE(objcg->memcg);
+}
+
static inline void mem_cgroup_put(struct mem_cgroup *memcg)
{
if (memcg)
@@ -520,7 +630,7 @@ unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
struct mem_cgroup_per_node *mz;
mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
- return mz->lru_zone_size[zone_idx][lru];
+ return READ_ONCE(mz->lru_zone_size[zone_idx][lru]);
}
void mem_cgroup_handle_over_high(void);
@@ -679,11 +789,34 @@ static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
return x;
}
+void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
+ int val);
void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
int val);
void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val);
+
void mod_memcg_obj_state(void *p, int idx, int val);
+static inline void mod_lruvec_slab_state(void *p, enum node_stat_item idx,
+ int val)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ __mod_lruvec_slab_state(p, idx, val);
+ local_irq_restore(flags);
+}
+
+static inline void mod_memcg_lruvec_state(struct lruvec *lruvec,
+ enum node_stat_item idx, int val)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ __mod_memcg_lruvec_state(lruvec, idx, val);
+ local_irq_restore(flags);
+}
+
static inline void mod_lruvec_state(struct lruvec *lruvec,
enum node_stat_item idx, int val)
{
@@ -825,16 +958,26 @@ static inline void memcg_memory_event_mm(struct mm_struct *mm,
{
}
-static inline unsigned long mem_cgroup_protection(struct mem_cgroup *memcg,
+static inline unsigned long mem_cgroup_protection(struct mem_cgroup *root,
+ struct mem_cgroup *memcg,
bool in_low_reclaim)
{
return 0;
}
-static inline enum mem_cgroup_protection mem_cgroup_protected(
- struct mem_cgroup *root, struct mem_cgroup *memcg)
+static inline void mem_cgroup_calculate_protection(struct mem_cgroup *root,
+ struct mem_cgroup *memcg)
+{
+}
+
+static inline bool mem_cgroup_below_low(struct mem_cgroup *memcg)
+{
+ return false;
+}
+
+static inline bool mem_cgroup_below_min(struct mem_cgroup *memcg)
{
- return MEMCG_PROT_NONE;
+ return false;
}
static inline int mem_cgroup_charge(struct page *page, struct mm_struct *mm,
@@ -1057,6 +1200,11 @@ static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
return node_page_state(lruvec_pgdat(lruvec), idx);
}
+static inline void __mod_memcg_lruvec_state(struct lruvec *lruvec,
+ enum node_stat_item idx, int val)
+{
+}
+
static inline void __mod_lruvec_state(struct lruvec *lruvec,
enum node_stat_item idx, int val)
{
@@ -1089,6 +1237,14 @@ static inline void __mod_lruvec_slab_state(void *p, enum node_stat_item idx,
__mod_node_page_state(page_pgdat(page), idx, val);
}
+static inline void mod_lruvec_slab_state(void *p, enum node_stat_item idx,
+ int val)
+{
+ struct page *page = virt_to_head_page(p);
+
+ mod_node_page_state(page_pgdat(page), idx, val);
+}
+
static inline void mod_memcg_obj_state(void *p, int idx, int val)
{
}
@@ -1341,9 +1497,6 @@ static inline void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
}
#endif
-struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep);
-void memcg_kmem_put_cache(struct kmem_cache *cachep);
-
#ifdef CONFIG_MEMCG_KMEM
int __memcg_kmem_charge(struct mem_cgroup *memcg, gfp_t gfp,
unsigned int nr_pages);
@@ -1351,8 +1504,12 @@ void __memcg_kmem_uncharge(struct mem_cgroup *memcg, unsigned int nr_pages);
int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order);
void __memcg_kmem_uncharge_page(struct page *page, int order);
+struct obj_cgroup *get_obj_cgroup_from_current(void);
+
+int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size);
+void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size);
+
extern struct static_key_false memcg_kmem_enabled_key;
-extern struct workqueue_struct *memcg_kmem_cache_wq;
extern int memcg_nr_cache_ids;
void memcg_get_cache_ids(void);
@@ -1368,7 +1525,19 @@ void memcg_put_cache_ids(void);
static inline bool memcg_kmem_enabled(void)
{
- return static_branch_unlikely(&memcg_kmem_enabled_key);
+ return static_branch_likely(&memcg_kmem_enabled_key);
+}
+
+static inline bool memcg_kmem_bypass(void)
+{
+ if (in_interrupt())
+ return true;
+
+ /* Allow remote memcg charging in kthread contexts. */
+ if ((!current->mm || (current->flags & PF_KTHREAD)) &&
+ !current->active_memcg)
+ return true;
+ return false;
}
static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp,
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index ea9c15b60a96..5f1c74df264d 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -6,7 +6,7 @@
#ifndef _LINUX_MEMPOLICY_H
#define _LINUX_MEMPOLICY_H 1
-
+#include <linux/sched.h>
#include <linux/mmzone.h>
#include <linux/dax.h>
#include <linux/slab.h>
@@ -28,7 +28,7 @@ struct mm_struct;
* the process policy is used. Interrupts ignore the memory policy
* of the current process.
*
- * Locking policy for interlave:
+ * Locking policy for interleave:
* In process context there is no locking because only the process accesses
* its own state. All vma manipulation is somewhat protected by a down_read on
* mmap_lock.
@@ -152,6 +152,15 @@ extern int huge_node(struct vm_area_struct *vma,
extern bool init_nodemask_of_mempolicy(nodemask_t *mask);
extern bool mempolicy_nodemask_intersects(struct task_struct *tsk,
const nodemask_t *mask);
+extern nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy);
+
+static inline nodemask_t *policy_nodemask_current(gfp_t gfp)
+{
+ struct mempolicy *mpol = get_task_policy(current);
+
+ return policy_nodemask(gfp, mpol);
+}
+
extern unsigned int mempolicy_slab_node(void);
extern enum zone_type policy_zone;
@@ -281,5 +290,10 @@ static inline int mpol_misplaced(struct page *page, struct vm_area_struct *vma,
static inline void mpol_put_task_policy(struct task_struct *task)
{
}
+
+static inline nodemask_t *policy_nodemask_current(gfp_t gfp)
+{
+ return NULL;
+}
#endif /* CONFIG_NUMA */
#endif
diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h
index ab76cdd06199..4b35baa14d30 100644
--- a/include/linux/mfd/core.h
+++ b/include/linux/mfd/core.h
@@ -14,7 +14,7 @@
#define MFD_RES_SIZE(arr) (sizeof(arr) / sizeof(struct resource))
-#define MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, _compat, _match)\
+#define MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, _compat, _of_reg, _use_of_reg, _match) \
{ \
.name = (_name), \
.resources = (_res), \
@@ -22,24 +22,32 @@
.platform_data = (_pdata), \
.pdata_size = (_pdsize), \
.of_compatible = (_compat), \
+ .of_reg = (_of_reg), \
+ .use_of_reg = (_use_of_reg), \
.acpi_match = (_match), \
.id = (_id), \
}
-#define OF_MFD_CELL(_name, _res, _pdata, _pdsize,_id, _compat) \
- MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, _compat, NULL) \
+#define OF_MFD_CELL_REG(_name, _res, _pdata, _pdsize, _id, _compat, _of_reg) \
+ MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, _compat, _of_reg, true, NULL)
-#define ACPI_MFD_CELL(_name, _res, _pdata, _pdsize, _id, _match) \
- MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, NULL, _match) \
+#define OF_MFD_CELL(_name, _res, _pdata, _pdsize, _id, _compat) \
+ MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, _compat, 0, false, NULL)
-#define MFD_CELL_BASIC(_name, _res, _pdata, _pdsize, _id) \
- MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, NULL, NULL) \
+#define ACPI_MFD_CELL(_name, _res, _pdata, _pdsize, _id, _match) \
+ MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, NULL, 0, false, _match)
-#define MFD_CELL_RES(_name, _res) \
- MFD_CELL_ALL(_name, _res, NULL, 0, 0, NULL, NULL) \
+#define MFD_CELL_BASIC(_name, _res, _pdata, _pdsize, _id) \
+ MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, NULL, 0, false, NULL)
-#define MFD_CELL_NAME(_name) \
- MFD_CELL_ALL(_name, NULL, NULL, 0, 0, NULL, NULL) \
+#define MFD_CELL_RES(_name, _res) \
+ MFD_CELL_ALL(_name, _res, NULL, 0, 0, NULL, 0, false, NULL)
+
+#define MFD_CELL_NAME(_name) \
+ MFD_CELL_ALL(_name, NULL, NULL, 0, 0, NULL, 0, false, NULL)
+
+#define MFD_DEP_LEVEL_NORMAL 0
+#define MFD_DEP_LEVEL_HIGH 1
struct irq_domain;
struct property_entry;
@@ -58,6 +66,7 @@ struct mfd_cell_acpi_match {
struct mfd_cell {
const char *name;
int id;
+ int level;
int (*enable)(struct platform_device *dev);
int (*disable)(struct platform_device *dev);
@@ -78,6 +87,16 @@ struct mfd_cell {
*/
const char *of_compatible;
+ /*
+ * Address as defined in Device Tree. Used to compement 'of_compatible'
+ * (above) when matching OF nodes with devices that have identical
+ * compatible strings
+ */
+ const u64 of_reg;
+
+ /* Set to 'true' to use 'of_reg' (above) - allows for of_reg=0 */
+ bool use_of_reg;
+
/* Matches ACPI */
const struct mfd_cell_acpi_match *acpi_match;
@@ -135,6 +154,7 @@ static inline int mfd_add_hotplug_devices(struct device *parent,
}
extern void mfd_remove_devices(struct device *parent);
+extern void mfd_remove_devices_late(struct device *parent);
extern int devm_mfd_add_devices(struct device *dev, int id,
const struct mfd_cell *cells, int n_devs,
diff --git a/include/linux/mfd/da9055/pdata.h b/include/linux/mfd/da9055/pdata.h
index eac48e483190..d3f126990ad0 100644
--- a/include/linux/mfd/da9055/pdata.h
+++ b/include/linux/mfd/da9055/pdata.h
@@ -35,7 +35,7 @@ struct da9055_pdata {
int *gpio_rsel;
/*
* Regulator mode control bits value (GPI offset) that
- * that controls the regulator state, 0 if not available.
+ * controls the regulator state, 0 if not available.
*/
enum gpio_select *reg_ren;
/*
diff --git a/include/linux/mfd/da9063/core.h b/include/linux/mfd/da9063/core.h
index 5cd06ab26352..fa7a43f02f27 100644
--- a/include/linux/mfd/da9063/core.h
+++ b/include/linux/mfd/da9063/core.h
@@ -35,6 +35,7 @@ enum da9063_variant_codes {
PMIC_DA9063_AD = 0x3,
PMIC_DA9063_BB = 0x5,
PMIC_DA9063_CA = 0x6,
+ PMIC_DA9063_DA = 0x7,
};
/* Interrupts */
diff --git a/include/linux/mfd/da9063/registers.h b/include/linux/mfd/da9063/registers.h
index ba706b0e28c2..1dbabf1b3cb8 100644
--- a/include/linux/mfd/da9063/registers.h
+++ b/include/linux/mfd/da9063/registers.h
@@ -292,8 +292,10 @@
#define DA9063_BB_REG_GP_ID_19 0x134
/* Chip ID and variant */
-#define DA9063_REG_CHIP_ID 0x181
-#define DA9063_REG_CHIP_VARIANT 0x182
+#define DA9063_REG_DEVICE_ID 0x181
+#define DA9063_REG_VARIANT_ID 0x182
+#define DA9063_REG_CUSTOMER_ID 0x183
+#define DA9063_REG_CONFIG_ID 0x184
/*
* PMIC registers bits
@@ -929,9 +931,6 @@
#define DA9063_RTC_CLOCK 0x40
#define DA9063_OUT_32K_EN 0x80
-/* DA9063_REG_CHIP_VARIANT */
-#define DA9063_CHIP_VARIANT_SHIFT 4
-
/* DA9063_REG_BUCK_ILIM_A (addr=0x9A) */
#define DA9063_BIO_ILIM_MASK 0x0F
#define DA9063_BMEM_ILIM_MASK 0xF0
@@ -1065,4 +1064,10 @@
#define DA9063_MON_A10_IDX_LDO9 0x04
#define DA9063_MON_A10_IDX_LDO10 0x05
+/* DA9063_REG_VARIANT_ID (addr=0x182) */
+#define DA9063_VARIANT_ID_VRC_SHIFT 0
+#define DA9063_VARIANT_ID_VRC_MASK 0x0F
+#define DA9063_VARIANT_ID_MRC_SHIFT 4
+#define DA9063_VARIANT_ID_MRC_MASK 0xF0
+
#endif /* _DA9063_REG_H */
diff --git a/include/linux/mfd/hi6421-pmic.h b/include/linux/mfd/hi6421-pmic.h
index bbc64484c021..2cadf8897c64 100644
--- a/include/linux/mfd/hi6421-pmic.h
+++ b/include/linux/mfd/hi6421-pmic.h
@@ -5,7 +5,7 @@
* Copyright (c) <2011-2014> HiSilicon Technologies Co., Ltd.
* http://www.hisilicon.com
* Copyright (c) <2013-2014> Linaro Ltd.
- * http://www.linaro.org
+ * https://www.linaro.org
*
* Author: Guodong Xu <guodong.xu@linaro.org>
*/
diff --git a/include/linux/mfd/khadas-mcu.h b/include/linux/mfd/khadas-mcu.h
new file mode 100644
index 000000000000..a99ba2ed0e4e
--- /dev/null
+++ b/include/linux/mfd/khadas-mcu.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Khadas System control Microcontroller Register map
+ *
+ * Copyright (C) 2020 BayLibre SAS
+ *
+ * Author(s): Neil Armstrong <narmstrong@baylibre.com>
+ */
+
+#ifndef MFD_KHADAS_MCU_H
+#define MFD_KHADAS_MCU_H
+
+#define KHADAS_MCU_PASSWD_VEN_0_REG 0x00 /* RO */
+#define KHADAS_MCU_PASSWD_VEN_1_REG 0x01 /* RO */
+#define KHADAS_MCU_PASSWD_VEN_2_REG 0x02 /* RO */
+#define KHADAS_MCU_PASSWD_VEN_3_REG 0x03 /* RO */
+#define KHADAS_MCU_PASSWD_VEN_4_REG 0x04 /* RO */
+#define KHADAS_MCU_PASSWD_VEN_5_REG 0x05 /* RO */
+#define KHADAS_MCU_MAC_0_REG 0x06 /* RO */
+#define KHADAS_MCU_MAC_1_REG 0x07 /* RO */
+#define KHADAS_MCU_MAC_2_REG 0x08 /* RO */
+#define KHADAS_MCU_MAC_3_REG 0x09 /* RO */
+#define KHADAS_MCU_MAC_4_REG 0x0a /* RO */
+#define KHADAS_MCU_MAC_5_REG 0x0b /* RO */
+#define KHADAS_MCU_USID_0_REG 0x0c /* RO */
+#define KHADAS_MCU_USID_1_REG 0x0d /* RO */
+#define KHADAS_MCU_USID_2_REG 0x0e /* RO */
+#define KHADAS_MCU_USID_3_REG 0x0f /* RO */
+#define KHADAS_MCU_USID_4_REG 0x10 /* RO */
+#define KHADAS_MCU_USID_5_REG 0x11 /* RO */
+#define KHADAS_MCU_VERSION_0_REG 0x12 /* RO */
+#define KHADAS_MCU_VERSION_1_REG 0x13 /* RO */
+#define KHADAS_MCU_DEVICE_NO_0_REG 0x14 /* RO */
+#define KHADAS_MCU_DEVICE_NO_1_REG 0x15 /* RO */
+#define KHADAS_MCU_FACTORY_TEST_REG 0x16 /* R */
+#define KHADAS_MCU_BOOT_MODE_REG 0x20 /* RW */
+#define KHADAS_MCU_BOOT_EN_WOL_REG 0x21 /* RW */
+#define KHADAS_MCU_BOOT_EN_RTC_REG 0x22 /* RW */
+#define KHADAS_MCU_BOOT_EN_EXP_REG 0x23 /* RW */
+#define KHADAS_MCU_BOOT_EN_IR_REG 0x24 /* RW */
+#define KHADAS_MCU_BOOT_EN_DCIN_REG 0x25 /* RW */
+#define KHADAS_MCU_BOOT_EN_KEY_REG 0x26 /* RW */
+#define KHADAS_MCU_KEY_MODE_REG 0x27 /* RW */
+#define KHADAS_MCU_LED_MODE_ON_REG 0x28 /* RW */
+#define KHADAS_MCU_LED_MODE_OFF_REG 0x29 /* RW */
+#define KHADAS_MCU_SHUTDOWN_NORMAL_REG 0x2c /* RW */
+#define KHADAS_MCU_MAC_SWITCH_REG 0x2d /* RW */
+#define KHADAS_MCU_MCU_SLEEP_MODE_REG 0x2e /* RW */
+#define KHADAS_MCU_IR_CODE1_0_REG 0x2f /* RW */
+#define KHADAS_MCU_IR_CODE1_1_REG 0x30 /* RW */
+#define KHADAS_MCU_IR_CODE1_2_REG 0x31 /* RW */
+#define KHADAS_MCU_IR_CODE1_3_REG 0x32 /* RW */
+#define KHADAS_MCU_USB_PCIE_SWITCH_REG 0x33 /* RW */
+#define KHADAS_MCU_IR_CODE2_0_REG 0x34 /* RW */
+#define KHADAS_MCU_IR_CODE2_1_REG 0x35 /* RW */
+#define KHADAS_MCU_IR_CODE2_2_REG 0x36 /* RW */
+#define KHADAS_MCU_IR_CODE2_3_REG 0x37 /* RW */
+#define KHADAS_MCU_PASSWD_USER_0_REG 0x40 /* RW */
+#define KHADAS_MCU_PASSWD_USER_1_REG 0x41 /* RW */
+#define KHADAS_MCU_PASSWD_USER_2_REG 0x42 /* RW */
+#define KHADAS_MCU_PASSWD_USER_3_REG 0x43 /* RW */
+#define KHADAS_MCU_PASSWD_USER_4_REG 0x44 /* RW */
+#define KHADAS_MCU_PASSWD_USER_5_REG 0x45 /* RW */
+#define KHADAS_MCU_USER_DATA_0_REG 0x46 /* RW 56 bytes */
+#define KHADAS_MCU_PWR_OFF_CMD_REG 0x80 /* WO */
+#define KHADAS_MCU_PASSWD_START_REG 0x81 /* WO */
+#define KHADAS_MCU_CHECK_VEN_PASSWD_REG 0x82 /* WO */
+#define KHADAS_MCU_CHECK_USER_PASSWD_REG 0x83 /* WO */
+#define KHADAS_MCU_SHUTDOWN_NORMAL_STATUS_REG 0x86 /* RO */
+#define KHADAS_MCU_WOL_INIT_START_REG 0x87 /* WO */
+#define KHADAS_MCU_CMD_FAN_STATUS_CTRL_REG 0x88 /* WO */
+
+enum {
+ KHADAS_BOARD_VIM1 = 0x1,
+ KHADAS_BOARD_VIM2,
+ KHADAS_BOARD_VIM3,
+ KHADAS_BOARD_EDGE = 0x11,
+ KHADAS_BOARD_EDGE_V,
+};
+
+/**
+ * struct khadas_mcu - Khadas MCU structure
+ * @device: device reference used for logs
+ * @regmap: register map
+ */
+struct khadas_mcu {
+ struct device *dev;
+ struct regmap *regmap;
+};
+
+#endif /* MFD_KHADAS_MCU_H */
diff --git a/include/linux/mfd/lp873x.h b/include/linux/mfd/lp873x.h
index edbec8350a49..5546688c7da7 100644
--- a/include/linux/mfd/lp873x.h
+++ b/include/linux/mfd/lp873x.h
@@ -1,7 +1,7 @@
/*
* Functions to access LP873X power management chip.
*
- * Copyright (C) 2016 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2016 Texas Instruments Incorporated - https://www.ti.com/
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
diff --git a/include/linux/mfd/lp87565.h b/include/linux/mfd/lp87565.h
index ce965354bbad..43716aca46fa 100644
--- a/include/linux/mfd/lp87565.h
+++ b/include/linux/mfd/lp87565.h
@@ -2,7 +2,7 @@
/*
* Functions to access LP87565 power management chip.
*
- * Copyright (C) 2017 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2017 Texas Instruments Incorporated - https://www.ti.com/
*/
#ifndef __LINUX_MFD_LP87565_H
diff --git a/include/linux/mfd/madera/pdata.h b/include/linux/mfd/madera/pdata.h
index fa9595dd42ba..601cbbc10370 100644
--- a/include/linux/mfd/madera/pdata.h
+++ b/include/linux/mfd/madera/pdata.h
@@ -21,7 +21,6 @@
struct gpio_desc;
struct pinctrl_map;
-struct madera_codec_pdata;
/**
* struct madera_pdata - Configuration data for Madera devices
diff --git a/include/linux/mfd/max77693-private.h b/include/linux/mfd/max77693-private.h
index e798c81aec31..311f7d3d2323 100644
--- a/include/linux/mfd/max77693-private.h
+++ b/include/linux/mfd/max77693-private.h
@@ -131,7 +131,7 @@ enum max77693_pmic_reg {
#define FLASH_INT_FLED1_SHORT BIT(3)
#define FLASH_INT_OVER_CURRENT BIT(4)
-/* Fast charge timer in in hours */
+/* Fast charge timer in hours */
#define DEFAULT_FAST_CHARGE_TIMER 4
/* microamps */
#define DEFAULT_TOP_OFF_THRESHOLD_CURRENT 150000
diff --git a/include/linux/mfd/sky81452.h b/include/linux/mfd/sky81452.h
index d469aa481243..b08570ff34df 100644
--- a/include/linux/mfd/sky81452.h
+++ b/include/linux/mfd/sky81452.h
@@ -9,11 +9,9 @@
#ifndef _SKY81452_H
#define _SKY81452_H
-#include <linux/platform_data/sky81452-backlight.h>
#include <linux/regulator/machine.h>
struct sky81452_platform_data {
- struct sky81452_bl_platform_data *bl_pdata;
struct regulator_init_data *regulator_init_data;
};
diff --git a/include/linux/mfd/smsc.h b/include/linux/mfd/smsc.h
deleted file mode 100644
index 83944124e886..000000000000
--- a/include/linux/mfd/smsc.h
+++ /dev/null
@@ -1,104 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * SMSC ECE1099
- *
- * Copyright 2012 Texas Instruments Inc.
- *
- * Author: Sourav Poddar <sourav.poddar@ti.com>
- */
-
-#ifndef __LINUX_MFD_SMSC_H
-#define __LINUX_MFD_SMSC_H
-
-#include <linux/regmap.h>
-
-#define SMSC_ID_ECE1099 1
-#define SMSC_NUM_CLIENTS 2
-
-#define SMSC_BASE_ADDR 0x38
-#define OMAP_GPIO_SMSC_IRQ 151
-
-#define SMSC_MAXGPIO 32
-#define SMSC_BANK(offs) ((offs) >> 3)
-#define SMSC_BIT(offs) (1u << ((offs) & 0x7))
-
-struct smsc {
- struct device *dev;
- struct i2c_client *i2c_clients[SMSC_NUM_CLIENTS];
- struct regmap *regmap;
- int clk;
- /* Stored chip id */
- int id;
-};
-
-struct smsc_gpio;
-struct smsc_keypad;
-
-static inline int smsc_read(struct device *child, unsigned int reg,
- unsigned int *dest)
-{
- struct smsc *smsc = dev_get_drvdata(child->parent);
-
- return regmap_read(smsc->regmap, reg, dest);
-}
-
-static inline int smsc_write(struct device *child, unsigned int reg,
- unsigned int value)
-{
- struct smsc *smsc = dev_get_drvdata(child->parent);
-
- return regmap_write(smsc->regmap, reg, value);
-}
-
-/* Registers for SMSC */
-#define SMSC_RESET 0xF5
-#define SMSC_GRP_INT 0xF9
-#define SMSC_CLK_CTRL 0xFA
-#define SMSC_WKUP_CTRL 0xFB
-#define SMSC_DEV_ID 0xFC
-#define SMSC_DEV_REV 0xFD
-#define SMSC_VEN_ID_L 0xFE
-#define SMSC_VEN_ID_H 0xFF
-
-/* CLK VALUE */
-#define SMSC_CLK_VALUE 0x13
-
-/* Registers for function GPIO INPUT */
-#define SMSC_GPIO_DATA_IN_START 0x00
-
-/* Registers for function GPIO OUPUT */
-#define SMSC_GPIO_DATA_OUT_START 0x05
-
-/* Definitions for SMSC GPIO CONFIGURATION REGISTER*/
-#define SMSC_GPIO_INPUT_LOW 0x01
-#define SMSC_GPIO_INPUT_RISING 0x09
-#define SMSC_GPIO_INPUT_FALLING 0x11
-#define SMSC_GPIO_INPUT_BOTH_EDGE 0x19
-#define SMSC_GPIO_OUTPUT_PP 0x21
-#define SMSC_GPIO_OUTPUT_OP 0x31
-
-#define GRP_INT_STAT 0xf9
-#define SMSC_GPI_INT 0x0f
-#define SMSC_CFG_START 0x0A
-
-/* Registers for SMSC GPIO INTERRUPT STATUS REGISTER*/
-#define SMSC_GPIO_INT_STAT_START 0x32
-
-/* Registers for SMSC GPIO INTERRUPT MASK REGISTER*/
-#define SMSC_GPIO_INT_MASK_START 0x37
-
-/* Registers for SMSC function KEYPAD*/
-#define SMSC_KP_OUT 0x40
-#define SMSC_KP_IN 0x41
-#define SMSC_KP_INT_STAT 0x42
-#define SMSC_KP_INT_MASK 0x43
-
-/* Definitions for keypad */
-#define SMSC_KP_KSO 0x70
-#define SMSC_KP_KSI 0x51
-#define SMSC_KSO_ALL_LOW 0x20
-#define SMSC_KP_SET_LOW_PWR 0x0B
-#define SMSC_KP_SET_HIGH 0xFF
-#define SMSC_KSO_EVAL 0x00
-
-#endif /* __LINUX_MFD_SMSC_H */
diff --git a/include/linux/mfd/stm32-lptimer.h b/include/linux/mfd/stm32-lptimer.h
index 605f62264825..90b20550c1c8 100644
--- a/include/linux/mfd/stm32-lptimer.h
+++ b/include/linux/mfd/stm32-lptimer.h
@@ -27,10 +27,15 @@
#define STM32_LPTIM_CMPOK BIT(3)
/* STM32_LPTIM_ICR - bit fields */
+#define STM32_LPTIM_ARRMCF BIT(1)
#define STM32_LPTIM_CMPOKCF_ARROKCF GENMASK(4, 3)
+/* STM32_LPTIM_IER - bit flieds */
+#define STM32_LPTIM_ARRMIE BIT(1)
+
/* STM32_LPTIM_CR - bit fields */
#define STM32_LPTIM_CNTSTRT BIT(2)
+#define STM32_LPTIM_SNGSTRT BIT(1)
#define STM32_LPTIM_ENABLE BIT(0)
/* STM32_LPTIM_CFGR - bit fields */
diff --git a/include/linux/mfd/ti_am335x_tscadc.h b/include/linux/mfd/ti_am335x_tscadc.h
index 483168403ae5..ffc091b77633 100644
--- a/include/linux/mfd/ti_am335x_tscadc.h
+++ b/include/linux/mfd/ti_am335x_tscadc.h
@@ -4,7 +4,7 @@
/*
* TI Touch Screen / ADC MFD driver
*
- * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com/
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
diff --git a/include/linux/mfd/tps65086.h b/include/linux/mfd/tps65086.h
index a228ae4c88d9..e0a417e53766 100644
--- a/include/linux/mfd/tps65086.h
+++ b/include/linux/mfd/tps65086.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2015 Texas Instruments Incorporated - https://www.ti.com/
* Andrew F. Davis <afd@ti.com>
*
* This program is free software; you can redistribute it and/or
diff --git a/include/linux/mfd/tps65217.h b/include/linux/mfd/tps65217.h
index b5dd108421c8..db7091824ed0 100644
--- a/include/linux/mfd/tps65217.h
+++ b/include/linux/mfd/tps65217.h
@@ -3,7 +3,7 @@
*
* Functions to access TPS65217 power management chip.
*
- * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
diff --git a/include/linux/mfd/tps65218.h b/include/linux/mfd/tps65218.h
index b0470c35162d..f4ca367e3473 100644
--- a/include/linux/mfd/tps65218.h
+++ b/include/linux/mfd/tps65218.h
@@ -3,7 +3,7 @@
*
* Functions to access TPS65219 power management chip.
*
- * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2014 Texas Instruments Incorporated - https://www.ti.com/
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2 as
diff --git a/include/linux/mfd/tps65912.h b/include/linux/mfd/tps65912.h
index b25d0297ba88..7943e413deae 100644
--- a/include/linux/mfd/tps65912.h
+++ b/include/linux/mfd/tps65912.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2015 Texas Instruments Incorporated - https://www.ti.com/
* Andrew F. Davis <afd@ti.com>
*
* This program is free software; you can redistribute it and/or
diff --git a/include/linux/mic_bus.h b/include/linux/mic_bus.h
index 491156a2359f..e99c789424e0 100644
--- a/include/linux/mic_bus.h
+++ b/include/linux/mic_bus.h
@@ -6,7 +6,7 @@
*
* Intel MIC Bus driver.
*
- * This implementation is very similar to the the virtio bus driver
+ * This implementation is very similar to the virtio bus driver
* implementation @ include/linux/virtio.h.
*/
#ifndef _MIC_BUS_H_
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 3e546cbf03dd..0f8d1583fa8e 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -10,6 +10,8 @@
typedef struct page *new_page_t(struct page *page, unsigned long private);
typedef void free_page_t(struct page *page, unsigned long private);
+struct migration_target_control;
+
/*
* Return values from addresss_space_operations.migratepage():
* - negative errno on page migration failure;
@@ -31,34 +33,6 @@ enum migrate_reason {
/* In mm/debug.c; also keep sync with include/trace/events/migrate.h */
extern const char *migrate_reason_names[MR_TYPES];
-static inline struct page *new_page_nodemask(struct page *page,
- int preferred_nid, nodemask_t *nodemask)
-{
- gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL;
- unsigned int order = 0;
- struct page *new_page = NULL;
-
- if (PageHuge(page))
- return alloc_huge_page_nodemask(page_hstate(compound_head(page)),
- preferred_nid, nodemask);
-
- if (PageTransHuge(page)) {
- gfp_mask |= GFP_TRANSHUGE;
- order = HPAGE_PMD_ORDER;
- }
-
- if (PageHighMem(page) || (zone_idx(page_zone(page)) == ZONE_MOVABLE))
- gfp_mask |= __GFP_HIGHMEM;
-
- new_page = __alloc_pages_nodemask(gfp_mask, order,
- preferred_nid, nodemask);
-
- if (new_page && PageTransHuge(new_page))
- prep_transhuge_page(new_page);
-
- return new_page;
-}
-
#ifdef CONFIG_MIGRATION
extern void putback_movable_pages(struct list_head *l);
@@ -67,6 +41,7 @@ extern int migrate_page(struct address_space *mapping,
enum migrate_mode mode);
extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
unsigned long private, enum migrate_mode mode, int reason);
+extern struct page *alloc_migration_target(struct page *page, unsigned long private);
extern int isolate_movable_page(struct page *page, isolate_mode_t mode);
extern void putback_movable_page(struct page *page);
@@ -85,6 +60,9 @@ static inline int migrate_pages(struct list_head *l, new_page_t new,
free_page_t free, unsigned long private, enum migrate_mode mode,
int reason)
{ return -ENOSYS; }
+static inline struct page *alloc_migration_target(struct page *page,
+ unsigned long private)
+ { return NULL; }
static inline int isolate_movable_page(struct page *page, isolate_mode_t mode)
{ return -EBUSY; }
@@ -180,6 +158,11 @@ static inline unsigned long migrate_pfn(unsigned long pfn)
return (pfn << MIGRATE_PFN_SHIFT) | MIGRATE_PFN_VALID;
}
+enum migrate_vma_direction {
+ MIGRATE_VMA_SELECT_SYSTEM = 1 << 0,
+ MIGRATE_VMA_SELECT_DEVICE_PRIVATE = 1 << 1,
+};
+
struct migrate_vma {
struct vm_area_struct *vma;
/*
@@ -199,11 +182,14 @@ struct migrate_vma {
/*
* Set to the owner value also stored in page->pgmap->owner for
- * migrating out of device private memory. If set only device
- * private pages with this owner are migrated. If not set
- * device private pages are not migrated at all.
+ * migrating out of device private memory. The flags also need to
+ * be set to MIGRATE_VMA_SELECT_DEVICE_PRIVATE.
+ * The caller should always set this field when using mmu notifier
+ * callbacks to avoid device MMU invalidations for device private
+ * pages that are not being migrated.
*/
- void *src_owner;
+ void *pgmap_owner;
+ unsigned long flags;
};
int migrate_vma_setup(struct migrate_vma *args);
diff --git a/include/linux/mlx5/accel.h b/include/linux/mlx5/accel.h
index 96ebaa94a92e..dacf69516002 100644
--- a/include/linux/mlx5/accel.h
+++ b/include/linux/mlx5/accel.h
@@ -126,7 +126,7 @@ enum mlx5_accel_ipsec_cap {
MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN = 1 << 7,
};
-#ifdef CONFIG_MLX5_FPGA_IPSEC
+#ifdef CONFIG_MLX5_ACCEL
u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev);
@@ -152,5 +152,5 @@ static inline int
mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
const struct mlx5_accel_esp_xfrm_attrs *attrs) { return -EOPNOTSUPP; }
-#endif
-#endif
+#endif /* CONFIG_MLX5_ACCEL */
+#endif /* __MLX5_ACCEL_H__ */
diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h
index b5a9399e07ee..7bfb67363434 100644
--- a/include/linux/mlx5/cq.h
+++ b/include/linux/mlx5/cq.h
@@ -33,7 +33,6 @@
#ifndef MLX5_CORE_CQ_H
#define MLX5_CORE_CQ_H
-#include <rdma/ib_verbs.h>
#include <linux/mlx5/driver.h>
#include <linux/refcount.h>
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 1bc27aca648b..4d3376e20f5e 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -276,7 +276,9 @@ enum {
MLX5_MKEY_MASK_RW = 1ull << 20,
MLX5_MKEY_MASK_A = 1ull << 21,
MLX5_MKEY_MASK_SMALL_FENCE = 1ull << 23,
- MLX5_MKEY_MASK_FREE = 1ull << 29,
+ MLX5_MKEY_MASK_RELAXED_ORDERING_WRITE = 1ull << 25,
+ MLX5_MKEY_MASK_FREE = 1ull << 29,
+ MLX5_MKEY_MASK_RELAXED_ORDERING_READ = 1ull << 47,
};
enum {
@@ -458,6 +460,15 @@ enum {
MLX5_OPC_MOD_TLS_TIR_PROGRESS_PARAMS = 0x2,
};
+struct mlx5_wqe_tls_static_params_seg {
+ u8 ctx[MLX5_ST_SZ_BYTES(tls_static_params)];
+};
+
+struct mlx5_wqe_tls_progress_params_seg {
+ __be32 tis_tir_num;
+ u8 ctx[MLX5_ST_SZ_BYTES(tls_progress_params)];
+};
+
enum {
MLX5_SET_PORT_RESET_QKEY = 0,
MLX5_SET_PORT_GUID0 = 16,
@@ -998,7 +1009,6 @@ enum {
MLX5_MKEY_REMOTE_INVAL = 1 << 24,
MLX5_MKEY_FLAG_SYNC_UMR = 1 << 29,
MLX5_MKEY_BSF_EN = 1 << 30,
- MLX5_MKEY_LEN64 = 1 << 31,
};
struct mlx5_mkey_seg {
@@ -1352,11 +1362,11 @@ enum mlx5_qcam_feature_groups {
MLX5_ADDR_OF(device_event_cap, (mdev)->caps.hca_cur[MLX5_CAP_DEV_EVENT], cap)
#define MLX5_CAP_DEV_VDPA_EMULATION(mdev, cap)\
- MLX5_GET(device_virtio_emulation_cap, \
+ MLX5_GET(virtio_emulation_cap, \
(mdev)->caps.hca_cur[MLX5_CAP_VDPA_EMULATION], cap)
#define MLX5_CAP64_DEV_VDPA_EMULATION(mdev, cap)\
- MLX5_GET64(device_virtio_emulation_cap, \
+ MLX5_GET64(virtio_emulation_cap, \
(mdev)->caps.hca_cur[MLX5_CAP_VDPA_EMULATION], cap)
#define MLX5_CAP_IPSEC(mdev, cap)\
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 1e6ca716635a..c145de0473bc 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -541,7 +541,7 @@ struct mlx5_priv {
/* pages stuff */
struct mlx5_nb pg_nb;
struct workqueue_struct *pg_wq;
- struct rb_root page_root;
+ struct xarray page_root_xa;
int fw_pages;
atomic_t reg_pages;
struct list_head free_list;
@@ -708,6 +708,9 @@ struct mlx5_core_dev {
#ifdef CONFIG_MLX5_FPGA
struct mlx5_fpga_device *fpga;
#endif
+#ifdef CONFIG_MLX5_ACCEL
+ const struct mlx5_accel_ipsec_ops *ipsec_ops;
+#endif
struct mlx5_clock clock;
struct mlx5_ib_clock_info *clock_info;
struct mlx5_fw_tracer *tracer;
@@ -972,6 +975,7 @@ void mlx5_register_debugfs(void);
void mlx5_unregister_debugfs(void);
void mlx5_fill_page_array(struct mlx5_frag_buf *buf, __be64 *pas);
+void mlx5_fill_page_frag_array_perm(struct mlx5_frag_buf *buf, __be64 *pas, u8 perm);
void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas);
int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
unsigned int *irqn);
@@ -1054,6 +1058,7 @@ enum {
enum {
MLX5_INTERFACE_PROTOCOL_IB = 0,
MLX5_INTERFACE_PROTOCOL_ETH = 1,
+ MLX5_INTERFACE_PROTOCOL_VDPA = 2,
};
struct mlx5_interface {
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index 6c5aa0a21425..92d991d93757 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -207,7 +207,10 @@ struct mlx5_flow_act {
u32 action;
struct mlx5_modify_hdr *modify_hdr;
struct mlx5_pkt_reformat *pkt_reformat;
- uintptr_t esp_id;
+ union {
+ u32 ipsec_obj_id;
+ uintptr_t esp_id;
+ };
u32 flags;
struct mlx5_fs_vlan vlan[MLX5_FS_VLAN_DEPTH];
struct ib_counters *counters;
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 073b79eacc99..de1ffb4804d6 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -93,6 +93,7 @@ enum {
enum {
MLX5_OBJ_TYPE_GENEVE_TLV_OPT = 0x000b,
+ MLX5_OBJ_TYPE_VIRTIO_NET_Q = 0x000d,
MLX5_OBJ_TYPE_MKEY = 0xff01,
MLX5_OBJ_TYPE_QP = 0xff02,
MLX5_OBJ_TYPE_PSV = 0xff03,
@@ -416,7 +417,11 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
u8 table_miss_action_domain[0x1];
u8 termination_table[0x1];
u8 reformat_and_fwd_to_table[0x1];
- u8 reserved_at_1a[0x6];
+ u8 reserved_at_1a[0x2];
+ u8 ipsec_encrypt[0x1];
+ u8 ipsec_decrypt[0x1];
+ u8 reserved_at_1e[0x2];
+
u8 termination_table_raw_traffic[0x1];
u8 reserved_at_21[0x1];
u8 log_max_ft_size[0x6];
@@ -981,17 +986,40 @@ struct mlx5_ifc_device_event_cap_bits {
u8 user_unaffiliated_events[4][0x40];
};
-struct mlx5_ifc_device_virtio_emulation_cap_bits {
- u8 reserved_at_0[0x20];
+struct mlx5_ifc_virtio_emulation_cap_bits {
+ u8 desc_tunnel_offload_type[0x1];
+ u8 eth_frame_offload_type[0x1];
+ u8 virtio_version_1_0[0x1];
+ u8 device_features_bits_mask[0xd];
+ u8 event_mode[0x8];
+ u8 virtio_queue_type[0x8];
- u8 reserved_at_20[0x13];
+ u8 max_tunnel_desc[0x10];
+ u8 reserved_at_30[0x3];
u8 log_doorbell_stride[0x5];
u8 reserved_at_38[0x3];
u8 log_doorbell_bar_size[0x5];
u8 doorbell_bar_offset[0x40];
- u8 reserved_at_80[0x780];
+ u8 max_emulated_devices[0x8];
+ u8 max_num_virtio_queues[0x18];
+
+ u8 reserved_at_a0[0x60];
+
+ u8 umem_1_buffer_param_a[0x20];
+
+ u8 umem_1_buffer_param_b[0x20];
+
+ u8 umem_2_buffer_param_a[0x20];
+
+ u8 umem_2_buffer_param_b[0x20];
+
+ u8 umem_3_buffer_param_a[0x20];
+
+ u8 umem_3_buffer_param_b[0x20];
+
+ u8 reserved_at_1c0[0x640];
};
enum {
@@ -1216,7 +1244,11 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 max_sgl_for_optimized_performance[0x8];
u8 log_max_cq_sz[0x8];
- u8 reserved_at_d0[0xb];
+ u8 relaxed_ordering_write_umr[0x1];
+ u8 relaxed_ordering_read_umr[0x1];
+ u8 reserved_at_d2[0x7];
+ u8 virtio_net_device_emualtion_manager[0x1];
+ u8 virtio_blk_device_emualtion_manager[0x1];
u8 log_max_cq[0x5];
u8 log_max_eq_sz[0x8];
@@ -1392,7 +1424,10 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 bf[0x1];
u8 driver_version[0x1];
u8 pad_tx_eth_packet[0x1];
- u8 reserved_at_263[0x8];
+ u8 reserved_at_263[0x3];
+ u8 mkey_by_name[0x1];
+ u8 reserved_at_267[0x4];
+
u8 log_bf_reg_size[0x5];
u8 reserved_at_270[0x8];
@@ -2949,7 +2984,7 @@ union mlx5_ifc_hca_cap_union_bits {
struct mlx5_ifc_fpga_cap_bits fpga_cap;
struct mlx5_ifc_tls_cap_bits tls_cap;
struct mlx5_ifc_device_mem_cap_bits device_mem_cap;
- struct mlx5_ifc_device_virtio_emulation_cap_bits virtio_emulation_cap;
+ struct mlx5_ifc_virtio_emulation_cap_bits virtio_emulation_cap;
u8 reserved_at_0[0x8000];
};
@@ -2965,6 +3000,8 @@ enum {
MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH = 0x100,
MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2 = 0x400,
MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2 = 0x800,
+ MLX5_FLOW_CONTEXT_ACTION_IPSEC_DECRYPT = 0x1000,
+ MLX5_FLOW_CONTEXT_ACTION_IPSEC_ENCRYPT = 0x2000,
};
enum {
@@ -3006,7 +3043,8 @@ struct mlx5_ifc_flow_context_bits {
struct mlx5_ifc_vlan_bits push_vlan_2;
- u8 reserved_at_120[0xe0];
+ u8 ipsec_obj_id[0x20];
+ u8 reserved_at_140[0xc0];
struct mlx5_ifc_fte_match_param_bits match_value;
@@ -3295,15 +3333,18 @@ struct mlx5_ifc_scheduling_context_bits {
};
struct mlx5_ifc_rqtc_bits {
- u8 reserved_at_0[0xa0];
+ u8 reserved_at_0[0xa0];
- u8 reserved_at_a0[0x10];
- u8 rqt_max_size[0x10];
+ u8 reserved_at_a0[0x5];
+ u8 list_q_type[0x3];
+ u8 reserved_at_a8[0x8];
+ u8 rqt_max_size[0x10];
- u8 reserved_at_c0[0x10];
- u8 rqt_actual_size[0x10];
+ u8 rq_vhca_id_format[0x1];
+ u8 reserved_at_c1[0xf];
+ u8 rqt_actual_size[0x10];
- u8 reserved_at_e0[0x6a0];
+ u8 reserved_at_e0[0x6a0];
struct mlx5_ifc_rq_num_bits rq_num[];
};
@@ -4381,6 +4422,7 @@ struct mlx5_ifc_query_vport_state_out_bits {
enum {
MLX5_VPORT_STATE_OP_MOD_VNIC_VPORT = 0x0,
MLX5_VPORT_STATE_OP_MOD_ESW_VPORT = 0x1,
+ MLX5_VPORT_STATE_OP_MOD_UPLINK = 0x2,
};
struct mlx5_ifc_arm_monitor_counter_in_bits {
@@ -5752,6 +5794,7 @@ enum {
MLX5_ACTION_IN_FIELD_METADATA_REG_C_7 = 0x58,
MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM = 0x59,
MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM = 0x5B,
+ MLX5_ACTION_IN_FIELD_IPSEC_SYNDROME = 0x5D,
};
struct mlx5_ifc_alloc_modify_header_context_out_bits {
@@ -7083,7 +7126,7 @@ struct mlx5_ifc_destroy_mkey_out_bits {
struct mlx5_ifc_destroy_mkey_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -7714,8 +7757,10 @@ struct mlx5_ifc_create_qp_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_at_40[0x40];
+ u8 reserved_at_40[0x8];
+ u8 input_qpn[0x18];
+ u8 reserved_at_60[0x20];
u8 opt_param_mask[0x20];
u8 ece[0x20];
@@ -7779,7 +7824,7 @@ struct mlx5_ifc_create_mkey_out_bits {
struct mlx5_ifc_create_mkey_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -10337,6 +10382,40 @@ struct mlx5_ifc_create_umem_in_bits {
struct mlx5_ifc_umem_bits umem;
};
+struct mlx5_ifc_create_umem_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x8];
+ u8 umem_id[0x18];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_destroy_umem_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x8];
+ u8 umem_id[0x18];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_destroy_umem_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
struct mlx5_ifc_create_uctx_in_bits {
u8 opcode[0x10];
u8 reserved_at_10[0x10];
@@ -10349,6 +10428,18 @@ struct mlx5_ifc_create_uctx_in_bits {
struct mlx5_ifc_uctx_bits uctx;
};
+struct mlx5_ifc_create_uctx_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_60[0x20];
+};
+
struct mlx5_ifc_destroy_uctx_in_bits {
u8 opcode[0x10];
u8 reserved_at_10[0x10];
@@ -10362,6 +10453,15 @@ struct mlx5_ifc_destroy_uctx_in_bits {
u8 reserved_at_60[0x20];
};
+struct mlx5_ifc_destroy_uctx_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
struct mlx5_ifc_create_sw_icm_in_bits {
struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
struct mlx5_ifc_sw_icm_bits sw_icm;
@@ -10668,17 +10768,20 @@ struct mlx5_ifc_tls_static_params_bits {
};
struct mlx5_ifc_tls_progress_params_bits {
- u8 reserved_at_0[0x8];
- u8 tisn[0x18];
-
u8 next_record_tcp_sn[0x20];
u8 hw_resync_tcp_sn[0x20];
u8 record_tracker_state[0x2];
u8 auth_state[0x2];
- u8 reserved_at_64[0x4];
+ u8 reserved_at_44[0x4];
u8 hw_offset_record_number[0x18];
};
+enum {
+ MLX5_MTT_PERM_READ = 1 << 0,
+ MLX5_MTT_PERM_WRITE = 1 << 1,
+ MLX5_MTT_PERM_RW = MLX5_MTT_PERM_READ | MLX5_MTT_PERM_WRITE,
+};
+
#endif /* MLX5_IFC_H */
diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h
index de9a272c9f3d..2d45a6af52a4 100644
--- a/include/linux/mlx5/port.h
+++ b/include/linux/mlx5/port.h
@@ -104,8 +104,11 @@ enum mlx5e_ext_link_mode {
MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR = 8,
MLX5E_CAUI_4_100GBASE_CR4_KR4 = 9,
MLX5E_100GAUI_2_100GBASE_CR2_KR2 = 10,
+ MLX5E_100GAUI_1_100GBASE_CR_KR = 11,
MLX5E_200GAUI_4_200GBASE_CR4_KR4 = 12,
+ MLX5E_200GAUI_2_200GBASE_CR2_KR2 = 13,
MLX5E_400GAUI_8 = 15,
+ MLX5E_400GAUI_4_400GBASE_CR4_KR4 = 16,
MLX5E_EXT_LINK_MODES_NUMBER,
};
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index b8992b861ae6..36492a1342cf 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -209,7 +209,7 @@ struct mlx5_wqe_ctrl_seg {
__be32 general_id;
__be32 imm;
__be32 umr_mkey;
- __be32 tisn;
+ __be32 tis_tir_num;
};
};
diff --git a/include/linux/mlx5/rsc_dump.h b/include/linux/mlx5/rsc_dump.h
new file mode 100644
index 000000000000..d11c0b228620
--- /dev/null
+++ b/include/linux/mlx5/rsc_dump.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020 Mellanox Technologies inc. */
+
+#include <linux/mlx5/driver.h>
+
+#ifndef __MLX5_RSC_DUMP
+#define __MLX5_RSC_DUMP
+
+enum mlx5_sgmt_type {
+ MLX5_SGMT_TYPE_HW_CQPC,
+ MLX5_SGMT_TYPE_HW_SQPC,
+ MLX5_SGMT_TYPE_HW_RQPC,
+ MLX5_SGMT_TYPE_FULL_SRQC,
+ MLX5_SGMT_TYPE_FULL_CQC,
+ MLX5_SGMT_TYPE_FULL_EQC,
+ MLX5_SGMT_TYPE_FULL_QPC,
+ MLX5_SGMT_TYPE_SND_BUFF,
+ MLX5_SGMT_TYPE_RCV_BUFF,
+ MLX5_SGMT_TYPE_SRQ_BUFF,
+ MLX5_SGMT_TYPE_CQ_BUFF,
+ MLX5_SGMT_TYPE_EQ_BUFF,
+ MLX5_SGMT_TYPE_SX_SLICE,
+ MLX5_SGMT_TYPE_SX_SLICE_ALL,
+ MLX5_SGMT_TYPE_RDB,
+ MLX5_SGMT_TYPE_RX_SLICE_ALL,
+ MLX5_SGMT_TYPE_PRM_QUERY_QP,
+ MLX5_SGMT_TYPE_PRM_QUERY_CQ,
+ MLX5_SGMT_TYPE_PRM_QUERY_MKEY,
+ MLX5_SGMT_TYPE_MENU,
+ MLX5_SGMT_TYPE_TERMINATE,
+
+ MLX5_SGMT_TYPE_NUM, /* Keep last */
+};
+
+struct mlx5_rsc_key {
+ enum mlx5_sgmt_type rsc;
+ int index1;
+ int index2;
+ int num_of_obj1;
+ int num_of_obj2;
+ int size;
+};
+
+struct mlx5_rsc_dump_cmd;
+
+struct mlx5_rsc_dump_cmd *mlx5_rsc_dump_cmd_create(struct mlx5_core_dev *dev,
+ struct mlx5_rsc_key *key);
+void mlx5_rsc_dump_cmd_destroy(struct mlx5_rsc_dump_cmd *cmd);
+int mlx5_rsc_dump_next(struct mlx5_core_dev *dev, struct mlx5_rsc_dump_cmd *cmd,
+ struct page *page, int *size);
+#endif /* __MLX5_RSC_DUMP */
diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h
index 8170da1e9f70..4db87bcfce7b 100644
--- a/include/linux/mlx5/vport.h
+++ b/include/linux/mlx5/vport.h
@@ -75,7 +75,7 @@ void mlx5_query_min_inline(struct mlx5_core_dev *mdev, u8 *min_inline);
int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev,
u16 vport, u8 min_inline);
int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev,
- u16 vport, u8 *addr);
+ u16 vport, const u8 *addr);
int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu);
int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu);
int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
diff --git a/include/linux/mm.h b/include/linux/mm.h
index dc7b87310c10..1983e08f5906 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -24,6 +24,7 @@
#include <linux/resource.h>
#include <linux/page_ext.h>
#include <linux/err.h>
+#include <linux/page-flags.h>
#include <linux/page_ref.h>
#include <linux/memremap.h>
#include <linux/overflow.h>
@@ -38,6 +39,7 @@ struct file_ra_state;
struct user_struct;
struct writeback_control;
struct bdi_writeback;
+struct pt_regs;
void init_mm_internals(void);
@@ -206,6 +208,8 @@ int overcommit_ratio_handler(struct ctl_table *, int, void *, size_t *,
loff_t *);
int overcommit_kbytes_handler(struct ctl_table *, int, void *, size_t *,
loff_t *);
+int overcommit_policy_handler(struct ctl_table *, int, void *, size_t *,
+ loff_t *);
#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
@@ -317,8 +321,6 @@ extern unsigned int kobjsize(const void *objp);
#if defined(CONFIG_X86)
# define VM_PAT VM_ARCH_1 /* PAT reserves whole VMA at once (x86) */
-#elif defined(CONFIG_PPC)
-# define VM_SAO VM_ARCH_1 /* Strong Access Ordering (powerpc) */
#elif defined(CONFIG_PARISC)
# define VM_GROWSUP VM_ARCH_1
#elif defined(CONFIG_IA64)
@@ -479,7 +481,7 @@ static inline bool fault_flag_allow_retry_first(unsigned int flags)
{ FAULT_FLAG_INTERRUPTIBLE, "INTERRUPTIBLE" }
/*
- * vm_fault is filled by the the pagefault handler and passed to the vma's
+ * vm_fault is filled by the pagefault handler and passed to the vma's
* ->fault function. The vma's ->fault is responsible for returning a bitmask
* of VM_FAULT_xxx flags that give details about how the fault was handled.
*
@@ -667,11 +669,6 @@ int vma_is_stack_for_current(struct vm_area_struct *vma);
struct mmu_gather;
struct inode;
-/*
- * FIXME: take this include out, include page-flags.h in
- * files which need it (119 of them)
- */
-#include <linux/page-flags.h>
#include <linux/huge_mm.h>
/*
@@ -779,6 +776,11 @@ static inline void *kvcalloc(size_t n, size_t size, gfp_t flags)
extern void kvfree(const void *addr);
extern void kvfree_sensitive(const void *addr, size_t len);
+static inline int head_mapcount(struct page *head)
+{
+ return atomic_read(compound_mapcount_ptr(head)) + 1;
+}
+
/*
* Mapcount of compound page as a whole, does not include mapped sub-pages.
*
@@ -788,7 +790,7 @@ static inline int compound_mapcount(struct page *page)
{
VM_BUG_ON_PAGE(!PageCompound(page), page);
page = compound_head(page);
- return atomic_read(compound_mapcount_ptr(page)) + 1;
+ return head_mapcount(page);
}
/*
@@ -901,22 +903,30 @@ static inline bool hpage_pincount_available(struct page *page)
return PageCompound(page) && compound_order(page) > 1;
}
+static inline int head_pincount(struct page *head)
+{
+ return atomic_read(compound_pincount_ptr(head));
+}
+
static inline int compound_pincount(struct page *page)
{
VM_BUG_ON_PAGE(!hpage_pincount_available(page), page);
page = compound_head(page);
- return atomic_read(compound_pincount_ptr(page));
+ return head_pincount(page);
}
static inline void set_compound_order(struct page *page, unsigned int order)
{
page[1].compound_order = order;
+ page[1].compound_nr = 1U << order;
}
/* Returns the number of pages in this potentially compound page. */
static inline unsigned long compound_nr(struct page *page)
{
- return 1UL << compound_order(page);
+ if (!PageHead(page))
+ return 1;
+ return page[1].compound_nr;
}
/* Returns the number of bytes in this potentially compound page. */
@@ -1057,6 +1067,7 @@ vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf);
static inline enum zone_type page_zonenum(const struct page *page)
{
+ ASSERT_EXCLUSIVE_BITS(page->flags, ZONES_MASK << ZONES_PGSHIFT);
return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
}
@@ -1584,6 +1595,7 @@ static inline void clear_page_pfmemalloc(struct page *page)
extern void pagefault_out_of_memory(void);
#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
+#define offset_in_thp(page, p) ((unsigned long)(p) & (thp_size(page) - 1))
/*
* Flags passed to show_mem() and show_free_areas() to suppress output in
@@ -1648,8 +1660,9 @@ int invalidate_inode_page(struct page *page);
#ifdef CONFIG_MMU
extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
- unsigned long address, unsigned int flags);
-extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long address, unsigned int flags,
+ struct pt_regs *regs);
+extern int fixup_user_fault(struct mm_struct *mm,
unsigned long address, unsigned int fault_flags,
bool *unlocked);
void unmap_mapping_pages(struct address_space *mapping,
@@ -1658,14 +1671,14 @@ void unmap_mapping_range(struct address_space *mapping,
loff_t const holebegin, loff_t const holelen, int even_cows);
#else
static inline vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
- unsigned long address, unsigned int flags)
+ unsigned long address, unsigned int flags,
+ struct pt_regs *regs)
{
/* should never happen if there's no MMU */
BUG();
return VM_FAULT_SIGBUS;
}
-static inline int fixup_user_fault(struct task_struct *tsk,
- struct mm_struct *mm, unsigned long address,
+static inline int fixup_user_fault(struct mm_struct *mm, unsigned long address,
unsigned int fault_flags, bool *unlocked)
{
/* should never happen if there's no MMU */
@@ -1691,11 +1704,11 @@ extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
extern int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
unsigned long addr, void *buf, int len, unsigned int gup_flags);
-long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
+long get_user_pages_remote(struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages,
struct vm_area_struct **vmas, int *locked);
-long pin_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
+long pin_user_pages_remote(struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages,
struct vm_area_struct **vmas, int *locked);
@@ -2093,51 +2106,11 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, p4d_t *p4d,
NULL : pud_offset(p4d, address);
}
-static inline p4d_t *p4d_alloc_track(struct mm_struct *mm, pgd_t *pgd,
- unsigned long address,
- pgtbl_mod_mask *mod_mask)
-
-{
- if (unlikely(pgd_none(*pgd))) {
- if (__p4d_alloc(mm, pgd, address))
- return NULL;
- *mod_mask |= PGTBL_PGD_MODIFIED;
- }
-
- return p4d_offset(pgd, address);
-}
-
-static inline pud_t *pud_alloc_track(struct mm_struct *mm, p4d_t *p4d,
- unsigned long address,
- pgtbl_mod_mask *mod_mask)
-{
- if (unlikely(p4d_none(*p4d))) {
- if (__pud_alloc(mm, p4d, address))
- return NULL;
- *mod_mask |= PGTBL_P4D_MODIFIED;
- }
-
- return pud_offset(p4d, address);
-}
-
static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
{
return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
NULL: pmd_offset(pud, address);
}
-
-static inline pmd_t *pmd_alloc_track(struct mm_struct *mm, pud_t *pud,
- unsigned long address,
- pgtbl_mod_mask *mod_mask)
-{
- if (unlikely(pud_none(*pud))) {
- if (__pmd_alloc(mm, pud, address))
- return NULL;
- *mod_mask |= PGTBL_PUD_MODIFIED;
- }
-
- return pmd_offset(pud, address);
-}
#endif /* CONFIG_MMU */
#if USE_SPLIT_PTE_PTLOCKS
@@ -2253,11 +2226,6 @@ static inline void pgtable_pte_page_dtor(struct page *page)
((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd))? \
NULL: pte_offset_kernel(pmd, address))
-#define pte_alloc_kernel_track(pmd, address, mask) \
- ((unlikely(pmd_none(*(pmd))) && \
- (__pte_alloc_kernel(pmd) || ({*(mask)|=PGTBL_PMD_MODIFIED;0;})))?\
- NULL: pte_offset_kernel(pmd, address))
-
#if USE_SPLIT_PMD_PTLOCKS
static struct page *pmd_to_page(pmd_t *pmd)
@@ -2415,9 +2383,6 @@ static inline unsigned long get_num_physpages(void)
* for_each_valid_physical_page_range()
* memblock_add_node(base, size, nid)
* free_area_init(max_zone_pfns);
- *
- * sparse_memory_present_with_active_regions() calls memory_present() for
- * each range when SPARSEMEM is enabled.
*/
void free_area_init(unsigned long *max_zone_pfn);
unsigned long node_map_pfn_alignment(void);
@@ -2428,7 +2393,6 @@ extern unsigned long absent_pages_in_range(unsigned long start_pfn,
extern void get_pfn_range_for_nid(unsigned int nid,
unsigned long *start_pfn, unsigned long *end_pfn);
extern unsigned long find_min_pfn_with_active_regions(void);
-extern void sparse_memory_present_with_active_regions(int nid);
#ifndef CONFIG_NEED_MULTIPLE_NODES
static inline int early_pfn_to_nid(unsigned long pfn)
@@ -2579,23 +2543,13 @@ extern unsigned long mmap_region(struct file *file, unsigned long addr,
struct list_head *uf);
extern unsigned long do_mmap(struct file *file, unsigned long addr,
unsigned long len, unsigned long prot, unsigned long flags,
- vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate,
- struct list_head *uf);
+ unsigned long pgoff, unsigned long *populate, struct list_head *uf);
extern int __do_munmap(struct mm_struct *, unsigned long, size_t,
struct list_head *uf, bool downgrade);
extern int do_munmap(struct mm_struct *, unsigned long, size_t,
struct list_head *uf);
extern int do_madvise(unsigned long start, size_t len_in, int behavior);
-static inline unsigned long
-do_mmap_pgoff(struct file *file, unsigned long addr,
- unsigned long len, unsigned long prot, unsigned long flags,
- unsigned long pgoff, unsigned long *populate,
- struct list_head *uf)
-{
- return do_mmap(file, addr, len, prot, flags, 0, pgoff, populate, uf);
-}
-
#ifdef CONFIG_MMU
extern int __mm_populate(unsigned long addr, unsigned long len,
int ignore_errors);
@@ -2648,7 +2602,7 @@ extern unsigned long stack_guard_gap;
/* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */
extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
-/* CONFIG_STACK_GROWSUP still needs to to grow downwards at some places */
+/* CONFIG_STACK_GROWSUP still needs to grow downwards at some places */
extern int expand_downwards(struct vm_area_struct *vma,
unsigned long address);
#if VM_GROWSUP
@@ -3011,14 +2965,15 @@ pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
p4d_t *vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node);
pud_t *vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node);
pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
-pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node);
+pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node,
+ struct vmem_altmap *altmap);
void *vmemmap_alloc_block(unsigned long size, int node);
struct vmem_altmap;
-void *vmemmap_alloc_block_buf(unsigned long size, int node);
-void *altmap_alloc_block_buf(unsigned long size, struct vmem_altmap *altmap);
+void *vmemmap_alloc_block_buf(unsigned long size, int node,
+ struct vmem_altmap *altmap);
void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
int vmemmap_populate_basepages(unsigned long start, unsigned long end,
- int node);
+ int node, struct vmem_altmap *altmap);
int vmemmap_populate(unsigned long start, unsigned long end, int node,
struct vmem_altmap *altmap);
void vmemmap_populate_print_last(void);
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index 219bef41d87c..8fc71e9d7bb0 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -48,14 +48,14 @@ static __always_inline void update_lru_size(struct lruvec *lruvec,
static __always_inline void add_page_to_lru_list(struct page *page,
struct lruvec *lruvec, enum lru_list lru)
{
- update_lru_size(lruvec, lru, page_zonenum(page), hpage_nr_pages(page));
+ update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page));
list_add(&page->lru, &lruvec->lists[lru]);
}
static __always_inline void add_page_to_lru_list_tail(struct page *page,
struct lruvec *lruvec, enum lru_list lru)
{
- update_lru_size(lruvec, lru, page_zonenum(page), hpage_nr_pages(page));
+ update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page));
list_add_tail(&page->lru, &lruvec->lists[lru]);
}
@@ -63,7 +63,7 @@ static __always_inline void del_page_from_lru_list(struct page *page,
struct lruvec *lruvec, enum lru_list lru)
{
list_del(&page->lru);
- update_lru_size(lruvec, lru, page_zonenum(page), -hpage_nr_pages(page));
+ update_lru_size(lruvec, lru, page_zonenum(page), -thp_nr_pages(page));
}
/**
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 64ede5f150dc..496c3ff97cce 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -134,6 +134,7 @@ struct page {
unsigned char compound_dtor;
unsigned char compound_order;
atomic_t compound_mapcount;
+ unsigned int compound_nr; /* 1 << compound_order */
};
struct { /* Second tail page of compound page */
unsigned long _compound_pad_1; /* compound_head */
@@ -198,7 +199,10 @@ struct page {
atomic_t _refcount;
#ifdef CONFIG_MEMCG
- struct mem_cgroup *mem_cgroup;
+ union {
+ struct mem_cgroup *mem_cgroup;
+ struct obj_cgroup **obj_cgroups;
+ };
#endif
/*
diff --git a/include/linux/mman.h b/include/linux/mman.h
index 4b08e9c9c538..6f34c33075f9 100644
--- a/include/linux/mman.h
+++ b/include/linux/mman.h
@@ -57,8 +57,12 @@ extern struct percpu_counter vm_committed_as;
#ifdef CONFIG_SMP
extern s32 vm_committed_as_batch;
+extern void mm_compute_batch(int overcommit_policy);
#else
#define vm_committed_as_batch 0
+static inline void mm_compute_batch(int overcommit_policy)
+{
+}
#endif
unsigned long vm_memory_committed(void);
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 7149bab555d7..c5b6e97cb21a 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -287,6 +287,7 @@ struct mmc_host {
#ifdef CONFIG_PM_SLEEP
struct notifier_block pm_notify;
#endif
+ struct wakeup_source *ws; /* Enable consume of uevents */
u32 max_current_330;
u32 max_current_300;
u32 max_current_180;
@@ -351,6 +352,7 @@ struct mmc_host {
#define MMC_CAP2_BOOTPART_NOACC (1 << 0) /* Boot partition no access */
#define MMC_CAP2_FULL_PWR_CYCLE (1 << 2) /* Can do full power cycle */
+#define MMC_CAP2_FULL_PWR_CYCLE_IN_SUSPEND (1 << 3) /* Can do full power cycle in suspend */
#define MMC_CAP2_HS200_1_8V_SDR (1 << 5) /* can support */
#define MMC_CAP2_HS200_1_2V_SDR (1 << 6) /* can support */
#define MMC_CAP2_HS200 (MMC_CAP2_HS200_1_8V_SDR | \
diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h
index 15ed8ce9d394..12036619346c 100644
--- a/include/linux/mmc/sdio_ids.h
+++ b/include/linux/mmc/sdio_ids.h
@@ -105,6 +105,9 @@
#define SDIO_DEVICE_ID_MEDIATEK_MT7663 0x7663
#define SDIO_DEVICE_ID_MEDIATEK_MT7668 0x7668
+#define SDIO_VENDOR_ID_MICROCHIP_WILC 0x0296
+#define SDIO_DEVICE_ID_MICROCHIP_WILC1000 0x5347
+
#define SDIO_VENDOR_ID_SIANO 0x039a
#define SDIO_DEVICE_ID_SIANO_NOVA_B0 0x0201
#define SDIO_DEVICE_ID_SIANO_NICE 0x0202
@@ -118,6 +121,10 @@
#define SDIO_DEVICE_ID_SIANO_NOVA_A0 0x1100
#define SDIO_DEVICE_ID_SIANO_STELLAR 0x5347
+#define SDIO_VENDOR_ID_RSI 0x041b
+#define SDIO_DEVICE_ID_RSI_9113 0x9330
+#define SDIO_DEVICE_ID_RSI_9116 0x9116
+
#define SDIO_VENDOR_ID_TI_WL1251 0x104c
#define SDIO_DEVICE_ID_TI_WL1251 0x9066
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index fc68f3570e19..b8200782dede 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -38,6 +38,10 @@ struct mmu_interval_notifier;
*
* @MMU_NOTIFY_RELEASE: used during mmu_interval_notifier invalidate to signal
* that the mm refcount is zero and the range is no longer accessible.
+ *
+ * @MMU_NOTIFY_MIGRATE: used during migrate_vma_collect() invalidate to signal
+ * a device driver to possibly ignore the invalidation if the
+ * migrate_pgmap_owner field matches the driver's device private pgmap owner.
*/
enum mmu_notifier_event {
MMU_NOTIFY_UNMAP = 0,
@@ -46,6 +50,7 @@ enum mmu_notifier_event {
MMU_NOTIFY_PROTECTION_PAGE,
MMU_NOTIFY_SOFT_DIRTY,
MMU_NOTIFY_RELEASE,
+ MMU_NOTIFY_MIGRATE,
};
#define MMU_NOTIFIER_RANGE_BLOCKABLE (1 << 0)
@@ -264,6 +269,7 @@ struct mmu_notifier_range {
unsigned long end;
unsigned flags;
enum mmu_notifier_event event;
+ void *migrate_pgmap_owner;
};
static inline int mm_has_notifiers(struct mm_struct *mm)
@@ -515,6 +521,16 @@ static inline void mmu_notifier_range_init(struct mmu_notifier_range *range,
range->flags = flags;
}
+static inline void mmu_notifier_range_init_migrate(
+ struct mmu_notifier_range *range, unsigned int flags,
+ struct vm_area_struct *vma, struct mm_struct *mm,
+ unsigned long start, unsigned long end, void *pgmap)
+{
+ mmu_notifier_range_init(range, MMU_NOTIFY_MIGRATE, flags, vma, mm,
+ start, end);
+ range->migrate_pgmap_owner = pgmap;
+}
+
#define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
({ \
int __young; \
@@ -639,6 +655,9 @@ static inline void _mmu_notifier_range_init(struct mmu_notifier_range *range,
#define mmu_notifier_range_init(range,event,flags,vma,mm,start,end) \
_mmu_notifier_range_init(range, start, end)
+#define mmu_notifier_range_init_migrate(range, flags, vma, mm, start, end, \
+ pgmap) \
+ _mmu_notifier_range_init(range, start, end)
static inline bool
mmu_notifier_range_blockable(const struct mmu_notifier_range *range)
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index f6f884970511..8379432f4f2f 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -88,12 +88,10 @@ static inline bool is_migrate_movable(int mt)
extern int page_group_by_mobility_disabled;
-#define NR_MIGRATETYPE_BITS (PB_migrate_end - PB_migrate + 1)
-#define MIGRATETYPE_MASK ((1UL << NR_MIGRATETYPE_BITS) - 1)
+#define MIGRATETYPE_MASK ((1UL << PB_migratetype_bits) - 1)
#define get_pageblock_migratetype(page) \
- get_pfnblock_flags_mask(page, page_to_pfn(page), \
- PB_migrate_end, MIGRATETYPE_MASK)
+ get_pfnblock_flags_mask(page, page_to_pfn(page), MIGRATETYPE_MASK)
struct free_area {
struct list_head free_list[MIGRATE_TYPES];
@@ -155,10 +153,6 @@ enum zone_stat_item {
NR_ZONE_WRITE_PENDING, /* Count of dirty, writeback and unstable pages */
NR_MLOCK, /* mlock()ed pages found and moved off LRU */
NR_PAGETABLE, /* used for pagetables */
- NR_KERNEL_STACK_KB, /* measured in KiB */
-#if IS_ENABLED(CONFIG_SHADOW_CALL_STACK)
- NR_KERNEL_SCS_KB, /* measured in KiB */
-#endif
/* Second 128 byte cacheline */
NR_BOUNCE,
#if IS_ENABLED(CONFIG_ZSMALLOC)
@@ -174,14 +168,20 @@ enum node_stat_item {
NR_INACTIVE_FILE, /* " " " " " */
NR_ACTIVE_FILE, /* " " " " " */
NR_UNEVICTABLE, /* " " " " " */
- NR_SLAB_RECLAIMABLE,
- NR_SLAB_UNRECLAIMABLE,
+ NR_SLAB_RECLAIMABLE_B,
+ NR_SLAB_UNRECLAIMABLE_B,
NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */
NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */
WORKINGSET_NODES,
- WORKINGSET_REFAULT,
- WORKINGSET_ACTIVATE,
- WORKINGSET_RESTORE,
+ WORKINGSET_REFAULT_BASE,
+ WORKINGSET_REFAULT_ANON = WORKINGSET_REFAULT_BASE,
+ WORKINGSET_REFAULT_FILE,
+ WORKINGSET_ACTIVATE_BASE,
+ WORKINGSET_ACTIVATE_ANON = WORKINGSET_ACTIVATE_BASE,
+ WORKINGSET_ACTIVATE_FILE,
+ WORKINGSET_RESTORE_BASE,
+ WORKINGSET_RESTORE_ANON = WORKINGSET_RESTORE_BASE,
+ WORKINGSET_RESTORE_FILE,
WORKINGSET_NODERECLAIM,
NR_ANON_MAPPED, /* Mapped anonymous pages */
NR_FILE_MAPPED, /* pagecache pages mapped into pagetables.
@@ -203,10 +203,34 @@ enum node_stat_item {
NR_KERNEL_MISC_RECLAIMABLE, /* reclaimable non-slab kernel pages */
NR_FOLL_PIN_ACQUIRED, /* via: pin_user_page(), gup flag: FOLL_PIN */
NR_FOLL_PIN_RELEASED, /* pages returned via unpin_user_page() */
+ NR_KERNEL_STACK_KB, /* measured in KiB */
+#if IS_ENABLED(CONFIG_SHADOW_CALL_STACK)
+ NR_KERNEL_SCS_KB, /* measured in KiB */
+#endif
NR_VM_NODE_STAT_ITEMS
};
/*
+ * Returns true if the value is measured in bytes (most vmstat values are
+ * measured in pages). This defines the API part, the internal representation
+ * might be different.
+ */
+static __always_inline bool vmstat_item_in_bytes(int idx)
+{
+ /*
+ * Global and per-node slab counters track slab pages.
+ * It's expected that changes are multiples of PAGE_SIZE.
+ * Internally values are stored in pages.
+ *
+ * Per-memcg and per-lruvec counters track memory, consumed
+ * by individual slab objects. These counters are actually
+ * byte-precise.
+ */
+ return (idx == NR_SLAB_RECLAIMABLE_B ||
+ idx == NR_SLAB_UNRECLAIMABLE_B);
+}
+
+/*
* We do arithmetic on the LRU lists in various places in the code,
* so it is important to keep the active lists LRU_ACTIVE higher in
* the array than the corresponding inactive lists, and to keep
@@ -259,8 +283,8 @@ struct lruvec {
unsigned long file_cost;
/* Non-resident age, driven by LRU movement */
atomic_long_t nonresident_age;
- /* Refaults at the time of last reclaim cycle */
- unsigned long refaults;
+ /* Refaults at the time of last reclaim cycle, anon=0, file=1 */
+ unsigned long refaults[2];
/* Various lruvec state flags (enum lruvec_flags) */
unsigned long flags;
#ifdef CONFIG_MEMCG
@@ -512,6 +536,7 @@ struct zone {
* On compaction failure, 1<<compact_defer_shift compactions
* are skipped before trying again. The number attempted since
* last failure is tracked with compact_considered.
+ * compact_order_failed is the minimum compaction failed order.
*/
unsigned int compact_considered;
unsigned int compact_defer_shift;
@@ -819,18 +844,6 @@ static inline struct pglist_data *lruvec_pgdat(struct lruvec *lruvec)
extern unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx);
-#ifdef CONFIG_HAVE_MEMORY_PRESENT
-void memory_present(int nid, unsigned long start, unsigned long end);
-#else
-static inline void memory_present(int nid, unsigned long start, unsigned long end) {}
-#endif
-
-#if defined(CONFIG_SPARSEMEM)
-void memblocks_present(void);
-#else
-static inline void memblocks_present(void) {}
-#endif
-
#ifdef CONFIG_HAVE_MEMORYLESS_NODES
int local_memory_node(int node_id);
#else
@@ -1387,8 +1400,6 @@ struct mminit_pfnnid_cache {
#define early_pfn_valid(pfn) (1)
#endif
-void memory_present(int nid, unsigned long start, unsigned long end);
-
/*
* If it is possible to have holes within a MAX_ORDER_NR_PAGES, then we
* need to check pfn validity within that MAX_ORDER_NR_PAGES block.
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index e14cbe444afc..5b08a473cdba 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -251,6 +251,8 @@ struct hda_device_id {
struct sdw_device_id {
__u16 mfg_id;
__u16 part_id;
+ __u8 sdw_version;
+ __u8 class_id;
kernel_ulong_t driver_data;
};
diff --git a/include/linux/module.h b/include/linux/module.h
index 2e6670860d27..e30ed5fa33a7 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -389,6 +389,7 @@ struct module {
unsigned int num_gpl_syms;
const struct kernel_symbol *gpl_syms;
const s32 *gpl_crcs;
+ bool using_gplonly_symbols;
#ifdef CONFIG_UNUSED_SYMBOLS
/* unused exported symbols. */
@@ -582,34 +583,14 @@ struct module *find_module(const char *name);
struct symsearch {
const struct kernel_symbol *start, *stop;
const s32 *crcs;
- enum {
+ enum mod_license {
NOT_GPL_ONLY,
GPL_ONLY,
WILL_BE_GPL_ONLY,
- } licence;
+ } license;
bool unused;
};
-/*
- * Search for an exported symbol by name.
- *
- * Must be called with module_mutex held or preemption disabled.
- */
-const struct kernel_symbol *find_symbol(const char *name,
- struct module **owner,
- const s32 **crc,
- bool gplok,
- bool warn);
-
-/*
- * Walk the exported symbol table
- *
- * Must be called with module_mutex held or preemption disabled.
- */
-bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
- struct module *owner,
- void *data), void *data);
-
/* Returns 0 and fills in value, defined and namebuf, or -ERANGE if
symnum out of range. */
int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
@@ -657,7 +638,6 @@ static inline void __module_get(struct module *module)
#define symbol_put_addr(p) do { } while (0)
#endif /* CONFIG_MODULE_UNLOAD */
-int ref_module(struct module *a, struct module *b);
/* This is a #define so the string doesn't get put in every .o file */
#define module_name(mod) \
diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
index 3ef917ff0964..1ad5aa3b86d9 100644
--- a/include/linux/moduleparam.h
+++ b/include/linux/moduleparam.h
@@ -108,7 +108,7 @@ struct kparam_array
* ".") the kernel commandline parameter. Note that - is changed to _, so
* the user can use "foo-bar=1" even for variable "foo_bar".
*
- * @perm is 0 if the the variable is not to appear in sysfs, or 0444
+ * @perm is 0 if the variable is not to appear in sysfs, or 0444
* for world-readable, 0644 for root-writable, etc. Note that if it
* is writable, you may need to use kernel_param_lock() around
* accesses (esp. charp, which can be kfreed when it changes).
diff --git a/include/linux/mpi.h b/include/linux/mpi.h
index 7bd6d8af0004..5d906dfbf3ed 100644
--- a/include/linux/mpi.h
+++ b/include/linux/mpi.h
@@ -63,6 +63,9 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod);
int mpi_cmp_ui(MPI u, ulong v);
int mpi_cmp(MPI u, MPI v);
+/*-- mpi-sub-ui.c --*/
+int mpi_sub_ui(MPI w, MPI u, unsigned long vval);
+
/*-- mpi-bit.c --*/
void mpi_normalize(MPI a);
unsigned mpi_get_nbits(MPI a);
diff --git a/include/linux/mroute.h b/include/linux/mroute.h
index 9a36fad9e068..6cbbfe94348c 100644
--- a/include/linux/mroute.h
+++ b/include/linux/mroute.h
@@ -8,6 +8,7 @@
#include <net/fib_notifier.h>
#include <uapi/linux/mroute.h>
#include <linux/mroute_base.h>
+#include <linux/sockptr.h>
#ifdef CONFIG_IP_MROUTE
static inline int ip_mroute_opt(int opt)
@@ -15,7 +16,7 @@ static inline int ip_mroute_opt(int opt)
return opt >= MRT_BASE && opt <= MRT_MAX;
}
-int ip_mroute_setsockopt(struct sock *, int, char __user *, unsigned int);
+int ip_mroute_setsockopt(struct sock *, int, sockptr_t, unsigned int);
int ip_mroute_getsockopt(struct sock *, int, char __user *, int __user *);
int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg);
int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg);
@@ -23,7 +24,7 @@ int ip_mr_init(void);
bool ipmr_rule_default(const struct fib_rule *rule);
#else
static inline int ip_mroute_setsockopt(struct sock *sock, int optname,
- char __user *optval, unsigned int optlen)
+ sockptr_t optval, unsigned int optlen)
{
return -ENOPROTOOPT;
}
diff --git a/include/linux/mroute6.h b/include/linux/mroute6.h
index c4a45859f586..bc351a85ce9b 100644
--- a/include/linux/mroute6.h
+++ b/include/linux/mroute6.h
@@ -8,6 +8,7 @@
#include <net/net_namespace.h>
#include <uapi/linux/mroute6.h>
#include <linux/mroute_base.h>
+#include <linux/sockptr.h>
#include <net/fib_rules.h>
#ifdef CONFIG_IPV6_MROUTE
@@ -25,7 +26,7 @@ static inline int ip6_mroute_opt(int opt)
struct sock;
#ifdef CONFIG_IPV6_MROUTE
-extern int ip6_mroute_setsockopt(struct sock *, int, char __user *, unsigned int);
+extern int ip6_mroute_setsockopt(struct sock *, int, sockptr_t, unsigned int);
extern int ip6_mroute_getsockopt(struct sock *, int, char __user *, int __user *);
extern int ip6_mr_input(struct sk_buff *skb);
extern int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg);
@@ -33,9 +34,8 @@ extern int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *ar
extern int ip6_mr_init(void);
extern void ip6_mr_cleanup(void);
#else
-static inline
-int ip6_mroute_setsockopt(struct sock *sock,
- int optname, char __user *optval, unsigned int optlen)
+static inline int ip6_mroute_setsockopt(struct sock *sock, int optname,
+ sockptr_t optval, unsigned int optlen)
{
return -ENOPROTOOPT;
}
diff --git a/include/linux/mtd/hyperbus.h b/include/linux/mtd/hyperbus.h
index 2dfe65964f6e..2129f7d3b6eb 100644
--- a/include/linux/mtd/hyperbus.h
+++ b/include/linux/mtd/hyperbus.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0
*
- * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com/
*/
#ifndef __LINUX_MTD_HYPERBUS_H__
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index 0c7483843a32..af99041ceaa9 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -12,6 +12,8 @@
#include <linux/mtd/mtd.h>
+struct nand_device;
+
/**
* struct nand_memory_organization - Memory organization structure
* @bits_per_cell: number of bits per NAND cell
@@ -114,11 +116,11 @@ struct nand_page_io_req {
};
/**
- * struct nand_ecc_req - NAND ECC requirements
+ * struct nand_ecc_props - NAND ECC properties
* @strength: ECC strength
- * @step_size: ECC step/block size
+ * @step_size: Number of bytes per step
*/
-struct nand_ecc_req {
+struct nand_ecc_props {
unsigned int strength;
unsigned int step_size;
};
@@ -133,8 +135,6 @@ struct nand_bbt {
unsigned long *cache;
};
-struct nand_device;
-
/**
* struct nand_ops - NAND operations
* @erase: erase a specific block. No need to check if the block is bad before
@@ -179,7 +179,7 @@ struct nand_ops {
struct nand_device {
struct mtd_info mtd;
struct nand_memory_organization memorg;
- struct nand_ecc_req eccreq;
+ struct nand_ecc_props eccreq;
struct nand_row_converter rowconv;
struct nand_bbt bbt;
const struct nand_ops *ops;
diff --git a/include/linux/mtd/pfow.h b/include/linux/mtd/pfow.h
index 122f3439e1af..6166e7c60869 100644
--- a/include/linux/mtd/pfow.h
+++ b/include/linux/mtd/pfow.h
@@ -19,7 +19,7 @@
/* Identification info for LPDDR chip */
#define PFOW_MANUFACTURER_ID 0x0020
#define PFOW_DEVICE_ID 0x0022
-/* Address in PFOW where prog buffer can can be found */
+/* Address in PFOW where prog buffer can be found */
#define PFOW_PROGRAM_BUFFER_OFFSET 0x0040
/* Size of program buffer in words */
#define PFOW_PROGRAM_BUFFER_SIZE 0x0042
diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h
index 65b1c1c18b41..a725b620aca2 100644
--- a/include/linux/mtd/rawnand.h
+++ b/include/linux/mtd/rawnand.h
@@ -492,22 +492,22 @@ struct nand_sdr_timings {
};
/**
- * enum nand_data_interface_type - NAND interface timing type
+ * enum nand_interface_type - NAND interface type
* @NAND_SDR_IFACE: Single Data Rate interface
*/
-enum nand_data_interface_type {
+enum nand_interface_type {
NAND_SDR_IFACE,
};
/**
- * struct nand_data_interface - NAND interface timing
+ * struct nand_interface_config - NAND interface timing
* @type: type of the timing
* @timings: The timing information
* @timings.mode: Timing mode as defined in the specification
* @timings.sdr: Use it when @type is %NAND_SDR_IFACE.
*/
-struct nand_data_interface {
- enum nand_data_interface_type type;
+struct nand_interface_config {
+ enum nand_interface_type type;
struct nand_timings {
unsigned int mode;
union {
@@ -521,7 +521,7 @@ struct nand_data_interface {
* @conf: The data interface
*/
static inline const struct nand_sdr_timings *
-nand_get_sdr_timings(const struct nand_data_interface *conf)
+nand_get_sdr_timings(const struct nand_interface_config *conf)
{
if (conf->type != NAND_SDR_IFACE)
return ERR_PTR(-EINVAL);
@@ -944,11 +944,10 @@ static inline void nand_op_trace(const char *prefix,
* This method replaces chip->legacy.cmdfunc(),
* chip->legacy.{read,write}_{buf,byte,word}(),
* chip->legacy.dev_ready() and chip->legacy.waifunc().
- * @setup_data_interface: setup the data interface and timing. If
- * chipnr is set to %NAND_DATA_IFACE_CHECK_ONLY this
- * means the configuration should not be applied but
- * only checked.
- * This hook is optional.
+ * @setup_interface: setup the data interface and timing. If chipnr is set to
+ * %NAND_DATA_IFACE_CHECK_ONLY this means the configuration
+ * should not be applied but only checked.
+ * This hook is optional.
*/
struct nand_controller_ops {
int (*attach_chip)(struct nand_chip *chip);
@@ -956,8 +955,8 @@ struct nand_controller_ops {
int (*exec_op)(struct nand_chip *chip,
const struct nand_operation *op,
bool check_only);
- int (*setup_data_interface)(struct nand_chip *chip, int chipnr,
- const struct nand_data_interface *conf);
+ int (*setup_interface)(struct nand_chip *chip, int chipnr,
+ const struct nand_interface_config *conf);
};
/**
@@ -1028,140 +1027,138 @@ struct nand_legacy {
};
/**
- * struct nand_chip - NAND Private Flash Chip Data
- * @base: Inherit from the generic NAND device
- * @legacy: All legacy fields/hooks. If you develop a new driver,
- * don't even try to use any of these fields/hooks, and if
- * you're modifying an existing driver that is using those
- * fields/hooks, you should consider reworking the driver
- * avoid using them.
- * @setup_read_retry: [FLASHSPECIFIC] flash (vendor) specific function for
- * setting the read-retry mode. Mostly needed for MLC NAND.
- * @ecc: [BOARDSPECIFIC] ECC control structure
- * @buf_align: minimum buffer alignment required by a platform
- * @oob_poi: "poison value buffer," used for laying out OOB data
- * before writing
- * @page_shift: [INTERN] number of address bits in a page (column
- * address bits).
- * @phys_erase_shift: [INTERN] number of address bits in a physical eraseblock
- * @bbt_erase_shift: [INTERN] number of address bits in a bbt entry
- * @chip_shift: [INTERN] number of address bits in one chip
- * @options: [BOARDSPECIFIC] various chip options. They can partly
- * be set to inform nand_scan about special functionality.
- * See the defines for further explanation.
- * @bbt_options: [INTERN] bad block specific options. All options used
- * here must come from bbm.h. By default, these options
- * will be copied to the appropriate nand_bbt_descr's.
- * @badblockpos: [INTERN] position of the bad block marker in the oob
- * area.
- * @badblockbits: [INTERN] minimum number of set bits in a good block's
- * bad block marker position; i.e., BBM == 11110111b is
- * not bad when badblockbits == 7
- * @onfi_timing_mode_default: [INTERN] default ONFI timing mode. This field is
- * set to the actually used ONFI mode if the chip is
- * ONFI compliant or deduced from the datasheet if
- * the NAND chip is not ONFI compliant.
- * @pagemask: [INTERN] page number mask = number of (pages / chip) - 1
- * @data_buf: [INTERN] buffer for data, size is (page size + oobsize).
- * @pagecache: Structure containing page cache related fields
- * @pagecache.bitflips: Number of bitflips of the cached page
- * @pagecache.page: Page number currently in the cache. -1 means no page is
- * currently cached
- * @subpagesize: [INTERN] holds the subpagesize
- * @id: [INTERN] holds NAND ID
- * @parameters: [INTERN] holds generic parameters under an easily
- * readable form.
- * @data_interface: [INTERN] NAND interface timing information
- * @cur_cs: currently selected target. -1 means no target selected,
- * otherwise we should always have cur_cs >= 0 &&
- * cur_cs < nanddev_ntargets(). NAND Controller drivers
- * should not modify this value, but they're allowed to
- * read it.
- * @read_retries: [INTERN] the number of read retry modes supported
- * @lock: lock protecting the suspended field. Also used to
- * serialize accesses to the NAND device.
- * @suspended: set to 1 when the device is suspended, 0 when it's not.
- * @suspend: [REPLACEABLE] specific NAND device suspend operation
- * @resume: [REPLACEABLE] specific NAND device resume operation
- * @bbt: [INTERN] bad block table pointer
- * @bbt_td: [REPLACEABLE] bad block table descriptor for flash
- * lookup.
- * @bbt_md: [REPLACEABLE] bad block table mirror descriptor
- * @badblock_pattern: [REPLACEABLE] bad block scan pattern used for initial
- * bad block scan.
- * @controller: [REPLACEABLE] a pointer to a hardware controller
- * structure which is shared among multiple independent
- * devices.
- * @priv: [OPTIONAL] pointer to private chip data
- * @manufacturer: [INTERN] Contains manufacturer information
- * @manufacturer.desc: [INTERN] Contains manufacturer's description
- * @manufacturer.priv: [INTERN] Contains manufacturer private information
- * @lock_area: [REPLACEABLE] specific NAND chip lock operation
- * @unlock_area: [REPLACEABLE] specific NAND chip unlock operation
+ * struct nand_chip_ops - NAND chip operations
+ * @suspend: Suspend operation
+ * @resume: Resume operation
+ * @lock_area: Lock operation
+ * @unlock_area: Unlock operation
+ * @setup_read_retry: Set the read-retry mode (mostly needed for MLC NANDs)
+ * @choose_interface_config: Choose the best interface configuration
+ */
+struct nand_chip_ops {
+ int (*suspend)(struct nand_chip *chip);
+ void (*resume)(struct nand_chip *chip);
+ int (*lock_area)(struct nand_chip *chip, loff_t ofs, uint64_t len);
+ int (*unlock_area)(struct nand_chip *chip, loff_t ofs, uint64_t len);
+ int (*setup_read_retry)(struct nand_chip *chip, int retry_mode);
+ int (*choose_interface_config)(struct nand_chip *chip,
+ struct nand_interface_config *iface);
+};
+
+/**
+ * struct nand_manufacturer - NAND manufacturer structure
+ * @desc: The manufacturer description
+ * @priv: Private information for the manufacturer driver
*/
+struct nand_manufacturer {
+ const struct nand_manufacturer_desc *desc;
+ void *priv;
+};
+/**
+ * struct nand_chip - NAND Private Flash Chip Data
+ * @base: Inherit from the generic NAND device
+ * @id: Holds NAND ID
+ * @parameters: Holds generic parameters under an easily readable form
+ * @manufacturer: Manufacturer information
+ * @ops: NAND chip operations
+ * @legacy: All legacy fields/hooks. If you develop a new driver, don't even try
+ * to use any of these fields/hooks, and if you're modifying an
+ * existing driver that is using those fields/hooks, you should
+ * consider reworking the driver and avoid using them.
+ * @options: Various chip options. They can partly be set to inform nand_scan
+ * about special functionality. See the defines for further
+ * explanation.
+ * @current_interface_config: The currently used NAND interface configuration
+ * @best_interface_config: The best NAND interface configuration which fits both
+ * the NAND chip and NAND controller constraints. If
+ * unset, the default reset interface configuration must
+ * be used.
+ * @bbt_erase_shift: Number of address bits in a bbt entry
+ * @bbt_options: Bad block table specific options. All options used here must
+ * come from bbm.h. By default, these options will be copied to
+ * the appropriate nand_bbt_descr's.
+ * @badblockpos: Bad block marker position in the oob area
+ * @badblockbits: Minimum number of set bits in a good block's bad block marker
+ * position; i.e., BBM = 11110111b is good when badblockbits = 7
+ * @bbt_td: Bad block table descriptor for flash lookup
+ * @bbt_md: Bad block table mirror descriptor
+ * @badblock_pattern: Bad block scan pattern used for initial bad block scan
+ * @bbt: Bad block table pointer
+ * @page_shift: Number of address bits in a page (column address bits)
+ * @phys_erase_shift: Number of address bits in a physical eraseblock
+ * @chip_shift: Number of address bits in one chip
+ * @pagemask: Page number mask = number of (pages / chip) - 1
+ * @subpagesize: Holds the subpagesize
+ * @data_buf: Buffer for data, size is (page size + oobsize)
+ * @oob_poi: pointer on the OOB area covered by data_buf
+ * @pagecache: Structure containing page cache related fields
+ * @pagecache.bitflips: Number of bitflips of the cached page
+ * @pagecache.page: Page number currently in the cache. -1 means no page is
+ * currently cached
+ * @buf_align: Minimum buffer alignment required by a platform
+ * @lock: Lock protecting the suspended field. Also used to serialize accesses
+ * to the NAND device
+ * @suspended: Set to 1 when the device is suspended, 0 when it's not
+ * @cur_cs: Currently selected target. -1 means no target selected, otherwise we
+ * should always have cur_cs >= 0 && cur_cs < nanddev_ntargets().
+ * NAND Controller drivers should not modify this value, but they're
+ * allowed to read it.
+ * @read_retries: The number of read retry modes supported
+ * @controller: The hardware controller structure which is shared among multiple
+ * independent devices
+ * @ecc: The ECC controller structure
+ * @priv: Chip private data
+ */
struct nand_chip {
struct nand_device base;
-
+ struct nand_id id;
+ struct nand_parameters parameters;
+ struct nand_manufacturer manufacturer;
+ struct nand_chip_ops ops;
struct nand_legacy legacy;
+ unsigned int options;
- int (*setup_read_retry)(struct nand_chip *chip, int retry_mode);
+ /* Data interface */
+ const struct nand_interface_config *current_interface_config;
+ struct nand_interface_config *best_interface_config;
- unsigned int options;
+ /* Bad block information */
+ unsigned int bbt_erase_shift;
unsigned int bbt_options;
+ unsigned int badblockpos;
+ unsigned int badblockbits;
+ struct nand_bbt_descr *bbt_td;
+ struct nand_bbt_descr *bbt_md;
+ struct nand_bbt_descr *badblock_pattern;
+ u8 *bbt;
- int page_shift;
- int phys_erase_shift;
- int bbt_erase_shift;
- int chip_shift;
- int pagemask;
- u8 *data_buf;
+ /* Device internal layout */
+ unsigned int page_shift;
+ unsigned int phys_erase_shift;
+ unsigned int chip_shift;
+ unsigned int pagemask;
+ unsigned int subpagesize;
+ /* Buffers */
+ u8 *data_buf;
+ u8 *oob_poi;
struct {
unsigned int bitflips;
int page;
} pagecache;
+ unsigned long buf_align;
- int subpagesize;
- int onfi_timing_mode_default;
- unsigned int badblockpos;
- int badblockbits;
-
- struct nand_id id;
- struct nand_parameters parameters;
-
- struct nand_data_interface data_interface;
-
- int cur_cs;
-
- int read_retries;
-
+ /* Internals */
struct mutex lock;
unsigned int suspended : 1;
- int (*suspend)(struct nand_chip *chip);
- void (*resume)(struct nand_chip *chip);
+ int cur_cs;
+ int read_retries;
- uint8_t *oob_poi;
+ /* Externals */
struct nand_controller *controller;
-
struct nand_ecc_ctrl ecc;
- unsigned long buf_align;
-
- uint8_t *bbt;
- struct nand_bbt_descr *bbt_td;
- struct nand_bbt_descr *bbt_md;
-
- struct nand_bbt_descr *badblock_pattern;
-
void *priv;
-
- struct {
- const struct nand_manufacturer *desc;
- void *priv;
- } manufacturer;
-
- int (*lock_area)(struct nand_chip *chip, loff_t ofs, uint64_t len);
- int (*unlock_area)(struct nand_chip *chip, loff_t ofs, uint64_t len);
};
extern const struct mtd_ooblayout_ops nand_ooblayout_sp_ops;
@@ -1209,6 +1206,17 @@ static inline struct device_node *nand_get_flash_node(struct nand_chip *chip)
return mtd_get_of_node(nand_to_mtd(chip));
}
+/**
+ * nand_get_interface_config - Retrieve the current interface configuration
+ * of a NAND chip
+ * @chip: The NAND chip
+ */
+static inline const struct nand_interface_config *
+nand_get_interface_config(struct nand_chip *chip)
+{
+ return chip->current_interface_config;
+}
+
/*
* A helper for defining older NAND chips where the second ID byte fully
* defined the chip, including the geometry (chip size, eraseblock size, page
@@ -1261,10 +1269,6 @@ static inline struct device_node *nand_get_flash_node(struct nand_chip *chip)
* @ecc_step_ds in nand_chip{}, also from the datasheet.
* For example, the "4bit ECC for each 512Byte" can be set with
* NAND_ECC_INFO(4, 512).
- * @onfi_timing_mode_default: the default ONFI timing mode entered after a NAND
- * reset. Should be deduced from timings described
- * in the datasheet.
- *
*/
struct nand_flash_dev {
char *name;
@@ -1285,7 +1289,6 @@ struct nand_flash_dev {
uint16_t strength_ds;
uint16_t step_ds;
} ecc;
- int onfi_timing_mode_default;
};
int nand_create_bbt(struct nand_chip *chip);
diff --git a/include/linux/mtd/spinand.h b/include/linux/mtd/spinand.h
index 1077c45721ff..7b78c4ba9b3e 100644
--- a/include/linux/mtd/spinand.h
+++ b/include/linux/mtd/spinand.h
@@ -309,7 +309,7 @@ struct spinand_info {
struct spinand_devid devid;
u32 flags;
struct nand_memory_organization memorg;
- struct nand_ecc_req eccreq;
+ struct nand_ecc_props eccreq;
struct spinand_ecc_info eccinfo;
struct {
const struct spinand_op_variants *read_cache;
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index ae197cc00cc8..dcd185cbfe79 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -65,6 +65,17 @@ struct mutex {
#endif
};
+struct ww_class;
+struct ww_acquire_ctx;
+
+struct ww_mutex {
+ struct mutex base;
+ struct ww_acquire_ctx *ctx;
+#ifdef CONFIG_DEBUG_MUTEXES
+ struct ww_class *ww_class;
+#endif
+};
+
/*
* This is the control structure for tasks blocked on mutex,
* which resides on the blocked task's kernel stack:
diff --git a/include/linux/net.h b/include/linux/net.h
index 016a9c5faa34..d48ff1180879 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -21,6 +21,7 @@
#include <linux/rcupdate.h>
#include <linux/once.h>
#include <linux/fs.h>
+#include <linux/sockptr.h>
#include <uapi/linux/net.h>
@@ -162,15 +163,10 @@ struct proto_ops {
int (*listen) (struct socket *sock, int len);
int (*shutdown) (struct socket *sock, int flags);
int (*setsockopt)(struct socket *sock, int level,
- int optname, char __user *optval, unsigned int optlen);
+ int optname, sockptr_t optval,
+ unsigned int optlen);
int (*getsockopt)(struct socket *sock, int level,
int optname, char __user *optval, int __user *optlen);
-#ifdef CONFIG_COMPAT
- int (*compat_setsockopt)(struct socket *sock, int level,
- int optname, char __user *optval, unsigned int optlen);
- int (*compat_getsockopt)(struct socket *sock, int level,
- int optname, char __user *optval, int __user *optlen);
-#endif
void (*show_fdinfo)(struct seq_file *m, struct socket *sock);
int (*sendmsg) (struct socket *sock, struct msghdr *m,
size_t total_len);
diff --git a/include/linux/net/intel/i40e_client.h b/include/linux/net/intel/i40e_client.h
new file mode 100644
index 000000000000..f41387a8969f
--- /dev/null
+++ b/include/linux/net/intel/i40e_client.h
@@ -0,0 +1,194 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
+
+#ifndef _I40E_CLIENT_H_
+#define _I40E_CLIENT_H_
+
+#define I40E_CLIENT_STR_LENGTH 10
+
+/* Client interface version should be updated anytime there is a change in the
+ * existing APIs or data structures.
+ */
+#define I40E_CLIENT_VERSION_MAJOR 0
+#define I40E_CLIENT_VERSION_MINOR 01
+#define I40E_CLIENT_VERSION_BUILD 00
+#define I40E_CLIENT_VERSION_STR \
+ __stringify(I40E_CLIENT_VERSION_MAJOR) "." \
+ __stringify(I40E_CLIENT_VERSION_MINOR) "." \
+ __stringify(I40E_CLIENT_VERSION_BUILD)
+
+struct i40e_client_version {
+ u8 major;
+ u8 minor;
+ u8 build;
+ u8 rsvd;
+};
+
+enum i40e_client_state {
+ __I40E_CLIENT_NULL,
+ __I40E_CLIENT_REGISTERED
+};
+
+enum i40e_client_instance_state {
+ __I40E_CLIENT_INSTANCE_NONE,
+ __I40E_CLIENT_INSTANCE_OPENED,
+};
+
+struct i40e_ops;
+struct i40e_client;
+
+#define I40E_QUEUE_INVALID_IDX 0xFFFF
+
+struct i40e_qv_info {
+ u32 v_idx; /* msix_vector */
+ u16 ceq_idx;
+ u16 aeq_idx;
+ u8 itr_idx;
+};
+
+struct i40e_qvlist_info {
+ u32 num_vectors;
+ struct i40e_qv_info qv_info[1];
+};
+
+
+/* set of LAN parameters useful for clients managed by LAN */
+
+/* Struct to hold per priority info */
+struct i40e_prio_qos_params {
+ u16 qs_handle; /* qs handle for prio */
+ u8 tc; /* TC mapped to prio */
+ u8 reserved;
+};
+
+#define I40E_CLIENT_MAX_USER_PRIORITY 8
+/* Struct to hold Client QoS */
+struct i40e_qos_params {
+ struct i40e_prio_qos_params prio_qos[I40E_CLIENT_MAX_USER_PRIORITY];
+};
+
+struct i40e_params {
+ struct i40e_qos_params qos;
+ u16 mtu;
+};
+
+/* Structure to hold Lan device info for a client device */
+struct i40e_info {
+ struct i40e_client_version version;
+ u8 lanmac[6];
+ struct net_device *netdev;
+ struct pci_dev *pcidev;
+ u8 __iomem *hw_addr;
+ u8 fid; /* function id, PF id or VF id */
+#define I40E_CLIENT_FTYPE_PF 0
+ u8 ftype; /* function type, PF or VF */
+ void *pf;
+
+ /* All L2 params that could change during the life span of the PF
+ * and needs to be communicated to the client when they change
+ */
+ struct i40e_qvlist_info *qvlist_info;
+ struct i40e_params params;
+ struct i40e_ops *ops;
+
+ u16 msix_count; /* number of msix vectors*/
+ /* Array down below will be dynamically allocated based on msix_count */
+ struct msix_entry *msix_entries;
+ u16 itr_index; /* Which ITR index the PE driver is suppose to use */
+ u16 fw_maj_ver; /* firmware major version */
+ u16 fw_min_ver; /* firmware minor version */
+ u32 fw_build; /* firmware build number */
+};
+
+#define I40E_CLIENT_RESET_LEVEL_PF 1
+#define I40E_CLIENT_RESET_LEVEL_CORE 2
+#define I40E_CLIENT_VSI_FLAG_TCP_ENABLE BIT(1)
+
+struct i40e_ops {
+ /* setup_q_vector_list enables queues with a particular vector */
+ int (*setup_qvlist)(struct i40e_info *ldev, struct i40e_client *client,
+ struct i40e_qvlist_info *qv_info);
+
+ int (*virtchnl_send)(struct i40e_info *ldev, struct i40e_client *client,
+ u32 vf_id, u8 *msg, u16 len);
+
+ /* If the PE Engine is unresponsive, RDMA driver can request a reset.
+ * The level helps determine the level of reset being requested.
+ */
+ void (*request_reset)(struct i40e_info *ldev,
+ struct i40e_client *client, u32 level);
+
+ /* API for the RDMA driver to set certain VSI flags that control
+ * PE Engine.
+ */
+ int (*update_vsi_ctxt)(struct i40e_info *ldev,
+ struct i40e_client *client,
+ bool is_vf, u32 vf_id,
+ u32 flag, u32 valid_flag);
+};
+
+struct i40e_client_ops {
+ /* Should be called from register_client() or whenever PF is ready
+ * to create a specific client instance.
+ */
+ int (*open)(struct i40e_info *ldev, struct i40e_client *client);
+
+ /* Should be called when netdev is unavailable or when unregister
+ * call comes in. If the close is happenening due to a reset being
+ * triggered set the reset bit to true.
+ */
+ void (*close)(struct i40e_info *ldev, struct i40e_client *client,
+ bool reset);
+
+ /* called when some l2 managed parameters changes - mtu */
+ void (*l2_param_change)(struct i40e_info *ldev,
+ struct i40e_client *client,
+ struct i40e_params *params);
+
+ int (*virtchnl_receive)(struct i40e_info *ldev,
+ struct i40e_client *client, u32 vf_id,
+ u8 *msg, u16 len);
+
+ /* called when a VF is reset by the PF */
+ void (*vf_reset)(struct i40e_info *ldev,
+ struct i40e_client *client, u32 vf_id);
+
+ /* called when the number of VFs changes */
+ void (*vf_enable)(struct i40e_info *ldev,
+ struct i40e_client *client, u32 num_vfs);
+
+ /* returns true if VF is capable of specified offload */
+ int (*vf_capable)(struct i40e_info *ldev,
+ struct i40e_client *client, u32 vf_id);
+};
+
+/* Client device */
+struct i40e_client_instance {
+ struct list_head list;
+ struct i40e_info lan_info;
+ struct i40e_client *client;
+ unsigned long state;
+};
+
+struct i40e_client {
+ struct list_head list; /* list of registered clients */
+ char name[I40E_CLIENT_STR_LENGTH];
+ struct i40e_client_version version;
+ unsigned long state; /* client state */
+ atomic_t ref_cnt; /* Count of all the client devices of this kind */
+ u32 flags;
+ u8 type;
+#define I40E_CLIENT_IWARP 0
+ const struct i40e_client_ops *ops; /* client ops provided by the client */
+};
+
+static inline bool i40e_client_is_registered(struct i40e_client *client)
+{
+ return test_bit(__I40E_CLIENT_REGISTERED, &client->state);
+}
+
+/* used by clients */
+int i40e_register_client(struct i40e_client *client);
+int i40e_unregister_client(struct i40e_client *client);
+
+#endif /* _I40E_CLIENT_H_ */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 39e28e11863c..b0e303f6603f 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -65,6 +65,8 @@ struct wpan_dev;
struct mpls_dev;
/* UDP Tunnel offloads */
struct udp_tunnel_info;
+struct udp_tunnel_nic_info;
+struct udp_tunnel_nic;
struct bpf_prog;
struct xdp_buff;
@@ -874,8 +876,6 @@ enum bpf_netdev_command {
*/
XDP_SETUP_PROG,
XDP_SETUP_PROG_HW,
- XDP_QUERY_PROG,
- XDP_QUERY_PROG_HW,
/* BPF program for offload callbacks, invoked at program load time. */
BPF_OFFLOAD_MAP_ALLOC,
BPF_OFFLOAD_MAP_FREE,
@@ -886,6 +886,19 @@ struct bpf_prog_offload_ops;
struct netlink_ext_ack;
struct xdp_umem;
struct xdp_dev_bulk_queue;
+struct bpf_xdp_link;
+
+enum bpf_xdp_mode {
+ XDP_MODE_SKB = 0,
+ XDP_MODE_DRV = 1,
+ XDP_MODE_HW = 2,
+ __MAX_XDP_MODE
+};
+
+struct bpf_xdp_entity {
+ struct bpf_prog *prog;
+ struct bpf_xdp_link *link;
+};
struct netdev_bpf {
enum bpf_netdev_command command;
@@ -896,12 +909,6 @@ struct netdev_bpf {
struct bpf_prog *prog;
struct netlink_ext_ack *extack;
};
- /* XDP_QUERY_PROG, XDP_QUERY_PROG_HW */
- struct {
- u32 prog_id;
- /* flags with which program was installed */
- u32 prog_flags;
- };
/* BPF_OFFLOAD_MAP_ALLOC, BPF_OFFLOAD_MAP_FREE */
struct {
struct bpf_offloaded_map *offmap;
@@ -1742,6 +1749,8 @@ enum netdev_priv_flags {
* @real_num_rx_queues: Number of RX queues currently active in device
* @xdp_prog: XDP sockets filter program pointer
* @gro_flush_timeout: timeout for GRO layer in NAPI
+ * @napi_defer_hard_irqs: If not zero, provides a counter that would
+ * allow to avoid NIC hard IRQ, on busy queues.
*
* @rx_handler: handler for received packets
* @rx_handler_data: XXX: need comments on this one
@@ -1836,6 +1845,10 @@ enum netdev_priv_flags {
*
* @macsec_ops: MACsec offloading ops
*
+ * @udp_tunnel_nic_info: static structure describing the UDP tunnel
+ * offload capabilities of the device
+ * @udp_tunnel_nic: UDP tunnel offload state
+ *
* FIXME: cleanup struct net_device such that network protocol info
* moves out.
*/
@@ -2052,6 +2065,8 @@ struct net_device {
struct timer_list watchdog_timer;
int watchdog_timeo;
+ u32 proto_down_reason;
+
struct list_head todo_list;
int __percpu *pcpu_refcnt;
@@ -2134,6 +2149,11 @@ struct net_device {
/* MACsec management functions */
const struct macsec_ops *macsec_ops;
#endif
+ const struct udp_tunnel_nic_info *udp_tunnel_nic_info;
+ struct udp_tunnel_nic *udp_tunnel_nic;
+
+ /* protected by rtnl_lock */
+ struct bpf_xdp_entity xdp_state[__MAX_XDP_MODE];
};
#define to_net_dev(d) container_of(d, struct net_device, dev)
@@ -3802,6 +3822,8 @@ int dev_get_port_parent_id(struct net_device *dev,
bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b);
int dev_change_proto_down(struct net_device *dev, bool proto_down);
int dev_change_proto_down_generic(struct net_device *dev, bool proto_down);
+void dev_change_proto_down_reason(struct net_device *dev, unsigned long mask,
+ u32 value);
struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again);
struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
struct netdev_queue *txq, int *ret);
@@ -3809,8 +3831,9 @@ struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf);
int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
int fd, int expected_fd, u32 flags);
-u32 __dev_xdp_query(struct net_device *dev, bpf_op_t xdp_op,
- enum bpf_netdev_command cmd);
+int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
+u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode);
+
int xdp_umem_query(struct net_device *dev, u16 queue_id);
int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
@@ -4819,6 +4842,11 @@ static inline bool netif_is_ovs_port(const struct net_device *dev)
return dev->priv_flags & IFF_OVS_DATAPATH;
}
+static inline bool netif_is_any_bridge_port(const struct net_device *dev)
+{
+ return netif_is_bridge_port(dev) || netif_is_ovs_port(dev);
+}
+
static inline bool netif_is_team_master(const struct net_device *dev)
{
return dev->priv_flags & IFF_TEAM;
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index eb312e7ca36e..0101747de549 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -13,6 +13,7 @@
#include <linux/static_key.h>
#include <linux/netfilter_defs.h>
#include <linux/netdevice.h>
+#include <linux/sockptr.h>
#include <net/net_namespace.h>
static inline int NF_DROP_GETERR(int verdict)
@@ -163,18 +164,11 @@ struct nf_sockopt_ops {
/* Non-inclusive ranges: use 0/0/NULL to never get called. */
int set_optmin;
int set_optmax;
- int (*set)(struct sock *sk, int optval, void __user *user, unsigned int len);
-#ifdef CONFIG_COMPAT
- int (*compat_set)(struct sock *sk, int optval,
- void __user *user, unsigned int len);
-#endif
+ int (*set)(struct sock *sk, int optval, sockptr_t arg,
+ unsigned int len);
int get_optmin;
int get_optmax;
int (*get)(struct sock *sk, int optval, void __user *user, int *len);
-#ifdef CONFIG_COMPAT
- int (*compat_get)(struct sock *sk, int optval,
- void __user *user, int *len);
-#endif
/* Use the module struct to lock set/get code in place */
struct module *owner;
};
@@ -346,16 +340,10 @@ NF_HOOK_LIST(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
}
/* Call setsockopt() */
-int nf_setsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt,
+int nf_setsockopt(struct sock *sk, u_int8_t pf, int optval, sockptr_t opt,
unsigned int len);
int nf_getsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt,
int *len);
-#ifdef CONFIG_COMPAT
-int compat_nf_setsockopt(struct sock *sk, u_int8_t pf, int optval,
- char __user *opt, unsigned int len);
-int compat_nf_getsockopt(struct sock *sk, u_int8_t pf, int optval,
- char __user *opt, int *len);
-#endif
struct flowi;
struct nf_queue_entry;
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index 5da88451853b..5deb099d156d 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -301,8 +301,8 @@ int xt_target_to_user(const struct xt_entry_target *t,
int xt_data_to_user(void __user *dst, const void *src,
int usersize, int size, int aligned_size);
-void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
- struct xt_counters_info *info, bool compat);
+void *xt_copy_counters(sockptr_t arg, unsigned int len,
+ struct xt_counters_info *info);
struct xt_counters *xt_counters_alloc(unsigned int counters);
struct xt_table *xt_register_table(struct net *net,
diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h
index aac42c28fe62..9b67394471e1 100644
--- a/include/linux/netfilter_ipv6.h
+++ b/include/linux/netfilter_ipv6.h
@@ -58,7 +58,6 @@ struct nf_ipv6_ops {
int (*output)(struct net *, struct sock *, struct sk_buff *));
int (*reroute)(struct sk_buff *skb, const struct nf_queue_entry *entry);
#if IS_MODULE(CONFIG_IPV6)
- int (*br_defrag)(struct net *net, struct sk_buff *skb, u32 user);
int (*br_fragment)(struct net *net, struct sock *sk,
struct sk_buff *skb,
struct nf_bridge_frag_data *data,
@@ -117,23 +116,6 @@ static inline int nf_ip6_route(struct net *net, struct dst_entry **dst,
#include <net/netfilter/ipv6/nf_defrag_ipv6.h>
-static inline int nf_ipv6_br_defrag(struct net *net, struct sk_buff *skb,
- u32 user)
-{
-#if IS_MODULE(CONFIG_IPV6)
- const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops();
-
- if (!v6_ops)
- return 1;
-
- return v6_ops->br_defrag(net, skb, user);
-#elif IS_BUILTIN(CONFIG_IPV6)
- return nf_ct_frag6_gather(net, skb, user);
-#else
- return 1;
-#endif
-}
-
int br_ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
struct nf_bridge_frag_data *data,
int (*output)(struct net *, struct sock *sk,
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h
index f47af135bd56..e6a2d72e0dc7 100644
--- a/include/linux/netpoll.h
+++ b/include/linux/netpoll.h
@@ -102,9 +102,6 @@ static inline void *netpoll_poll_lock(struct napi_struct *napi)
static inline void netpoll_poll_unlock(void *have)
{
}
-static inline void netpoll_netdev_init(struct net_device *dev)
-{
-}
static inline bool netpoll_tx_running(struct net_device *dev)
{
return false;
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index 4dba3c948932..b8360be141da 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -150,6 +150,12 @@ enum nfs_opnum4 {
OP_WRITE_SAME = 70,
OP_CLONE = 71,
+ /* xattr support (RFC8726) */
+ OP_GETXATTR = 72,
+ OP_SETXATTR = 73,
+ OP_LISTXATTRS = 74,
+ OP_REMOVEXATTR = 75,
+
OP_ILLEGAL = 10044,
};
@@ -159,7 +165,7 @@ Needs to be updated if more operations are defined in future.*/
#define FIRST_NFS4_OP OP_ACCESS
#define LAST_NFS40_OP OP_RELEASE_LOCKOWNER
#define LAST_NFS41_OP OP_RECLAIM_COMPLETE
-#define LAST_NFS42_OP OP_CLONE
+#define LAST_NFS42_OP OP_REMOVEXATTR
#define LAST_NFS4_OP LAST_NFS42_OP
enum nfsstat4 {
@@ -280,6 +286,10 @@ enum nfsstat4 {
NFS4ERR_WRONG_LFS = 10092,
NFS4ERR_BADLABEL = 10093,
NFS4ERR_OFFLOAD_NO_REQS = 10094,
+
+ /* xattr (RFC8276) */
+ NFS4ERR_NOXATTR = 10095,
+ NFS4ERR_XATTR2BIG = 10096,
};
static inline bool seqid_mutating_err(u32 err)
@@ -452,6 +462,7 @@ enum change_attr_type4 {
#define FATTR4_WORD2_CHANGE_ATTR_TYPE (1UL << 15)
#define FATTR4_WORD2_SECURITY_LABEL (1UL << 16)
#define FATTR4_WORD2_MODE_UMASK (1UL << 17)
+#define FATTR4_WORD2_XATTR_SUPPORT (1UL << 18)
/* MDS threshold bitmap bits */
#define THRESHOLD_RD (1UL << 0)
@@ -542,6 +553,11 @@ enum {
NFSPROC4_CLNT_LAYOUTERROR,
NFSPROC4_CLNT_COPY_NOTIFY,
+
+ NFSPROC4_CLNT_GETXATTR,
+ NFSPROC4_CLNT_SETXATTR,
+ NFSPROC4_CLNT_LISTXATTRS,
+ NFSPROC4_CLNT_REMOVEXATTR,
};
/* nfs41 types */
@@ -700,4 +716,13 @@ struct nl4_server {
struct nfs42_netaddr nl4_addr; /* NL4_NETADDR */
} u;
};
+
+/*
+ * Options for setxattr. These match the flags for setxattr(2).
+ */
+enum nfs4_setxattr_options {
+ SETXATTR4_EITHER = 0,
+ SETXATTR4_CREATE = 1,
+ SETXATTR4_REPLACE = 2,
+};
#endif
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 6ee9119acc5d..a2c6455ea3fa 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -102,6 +102,8 @@ struct nfs_delegation;
struct posix_acl;
+struct nfs4_xattr_cache;
+
/*
* nfs fs inode data in memory
*/
@@ -188,6 +190,10 @@ struct nfs_inode {
struct fscache_cookie *fscache;
#endif
struct inode vfs_inode;
+
+#ifdef CONFIG_NFS_V4_2
+ struct nfs4_xattr_cache *xattr_cache;
+#endif
};
struct nfs4_copy_state {
@@ -212,6 +218,9 @@ struct nfs4_copy_state {
#define NFS_ACCESS_EXTEND 0x0008
#define NFS_ACCESS_DELETE 0x0010
#define NFS_ACCESS_EXECUTE 0x0020
+#define NFS_ACCESS_XAREAD 0x0040
+#define NFS_ACCESS_XAWRITE 0x0080
+#define NFS_ACCESS_XALIST 0x0100
/*
* Cache validity bit flags
@@ -231,6 +240,7 @@ struct nfs4_copy_state {
#define NFS_INO_DATA_INVAL_DEFER \
BIT(13) /* Deferred cache invalidation */
#define NFS_INO_INVALID_BLOCKS BIT(14) /* cached blocks are invalid */
+#define NFS_INO_INVALID_XATTR BIT(15) /* xattrs are invalid */
#define NFS_INO_INVALID_ATTR (NFS_INO_INVALID_CHANGE \
| NFS_INO_INVALID_CTIME \
@@ -490,6 +500,8 @@ extern int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fh,
struct nfs_fattr *fattr, struct nfs4_label *label);
extern int nfs_may_open(struct inode *inode, const struct cred *cred, int openflags);
extern void nfs_access_zap_cache(struct inode *inode);
+extern int nfs_access_get_cached(struct inode *inode, const struct cred *cred, struct nfs_access_entry *res,
+ bool may_block);
/*
* linux/fs/nfs/symlink.c
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 465fa98258a3..7eae72a8762e 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -163,6 +163,11 @@ struct nfs_server {
unsigned int dtsize; /* readdir size */
unsigned short port; /* "port=" setting */
unsigned int bsize; /* server block size */
+#ifdef CONFIG_NFS_V4_2
+ unsigned int gxasize; /* getxattr size */
+ unsigned int sxasize; /* setxattr size */
+ unsigned int lxasize; /* listxattr size */
+#endif
unsigned int acregmin; /* attr cache timeouts */
unsigned int acregmax;
unsigned int acdirmin;
@@ -281,5 +286,6 @@ struct nfs_server {
#define NFS_CAP_OFFLOAD_CANCEL (1U << 25)
#define NFS_CAP_LAYOUTERROR (1U << 26)
#define NFS_CAP_COPY_NOTIFY (1U << 27)
+#define NFS_CAP_XATTR (1U << 28)
#endif
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 5fd0a9ef425f..9408f3252c8e 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -150,6 +150,7 @@ struct nfs_fsinfo {
__u32 layouttype[NFS_MAX_LAYOUT_TYPES]; /* supported pnfs layout driver */
__u32 blksize; /* preferred pnfs io block size */
__u32 clone_blksize; /* granularity of a CLONE operation */
+ __u32 xattr_support; /* User xattrs supported */
};
struct nfs_fsstat {
@@ -1497,7 +1498,64 @@ struct nfs42_seek_res {
u32 sr_eof;
u64 sr_offset;
};
-#endif
+
+struct nfs42_setxattrargs {
+ struct nfs4_sequence_args seq_args;
+ struct nfs_fh *fh;
+ const char *xattr_name;
+ u32 xattr_flags;
+ size_t xattr_len;
+ struct page **xattr_pages;
+};
+
+struct nfs42_setxattrres {
+ struct nfs4_sequence_res seq_res;
+ struct nfs4_change_info cinfo;
+};
+
+struct nfs42_getxattrargs {
+ struct nfs4_sequence_args seq_args;
+ struct nfs_fh *fh;
+ const char *xattr_name;
+ size_t xattr_len;
+ struct page **xattr_pages;
+};
+
+struct nfs42_getxattrres {
+ struct nfs4_sequence_res seq_res;
+ size_t xattr_len;
+};
+
+struct nfs42_listxattrsargs {
+ struct nfs4_sequence_args seq_args;
+ struct nfs_fh *fh;
+ u32 count;
+ u64 cookie;
+ struct page **xattr_pages;
+};
+
+struct nfs42_listxattrsres {
+ struct nfs4_sequence_res seq_res;
+ struct page *scratch;
+ void *xattr_buf;
+ size_t xattr_len;
+ u64 cookie;
+ bool eof;
+ size_t copied;
+};
+
+struct nfs42_removexattrargs {
+ struct nfs4_sequence_args seq_args;
+ struct nfs_fh *fh;
+ const char *xattr_name;
+};
+
+struct nfs42_removexattrres {
+ struct nfs4_sequence_res seq_res;
+ struct nfs4_change_info cinfo;
+};
+
+#endif /* CONFIG_NFS_V4_2 */
struct nfs_page;
diff --git a/include/linux/nospec.h b/include/linux/nospec.h
index 0c5ef54fd416..c1e79f72cd89 100644
--- a/include/linux/nospec.h
+++ b/include/linux/nospec.h
@@ -5,6 +5,8 @@
#ifndef _LINUX_NOSPEC_H
#define _LINUX_NOSPEC_H
+
+#include <linux/compiler.h>
#include <asm/barrier.h>
struct task_struct;
diff --git a/include/linux/nvme-fc-driver.h b/include/linux/nvme-fc-driver.h
index 41e7795a3ee4..2a38f2b477a5 100644
--- a/include/linux/nvme-fc-driver.h
+++ b/include/linux/nvme-fc-driver.h
@@ -672,7 +672,7 @@ enum {
* Values set by the LLDD indicating completion status of the FCP operation.
* Must be set prior to calling the done() callback.
* @transferred_length: amount of DATA_OUT payload data received by a
- * a WRITEDATA operation. If not a WRITEDATA operation, value must
+ * WRITEDATA operation. If not a WRITEDATA operation, value must
* be set to 0. Should equal transfer_length on success.
* @fcp_error: status of the FCP operation. Must be 0 on success; on failure
* must be a NVME_SC_FC_xxxx value.
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index 5ce51ab4c50e..d92535997687 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -132,6 +132,7 @@ enum {
#define NVME_CAP_TIMEOUT(cap) (((cap) >> 24) & 0xff)
#define NVME_CAP_STRIDE(cap) (((cap) >> 32) & 0xf)
#define NVME_CAP_NSSRC(cap) (((cap) >> 36) & 0x1)
+#define NVME_CAP_CSS(cap) (((cap) >> 37) & 0xff)
#define NVME_CAP_MPSMIN(cap) (((cap) >> 48) & 0xf)
#define NVME_CAP_MPSMAX(cap) (((cap) >> 52) & 0xf)
@@ -162,7 +163,6 @@ enum {
enum {
NVME_CC_ENABLE = 1 << 0,
- NVME_CC_CSS_NVM = 0 << 4,
NVME_CC_EN_SHIFT = 0,
NVME_CC_CSS_SHIFT = 4,
NVME_CC_MPS_SHIFT = 7,
@@ -170,6 +170,9 @@ enum {
NVME_CC_SHN_SHIFT = 14,
NVME_CC_IOSQES_SHIFT = 16,
NVME_CC_IOCQES_SHIFT = 20,
+ NVME_CC_CSS_NVM = 0 << NVME_CC_CSS_SHIFT,
+ NVME_CC_CSS_CSI = 6 << NVME_CC_CSS_SHIFT,
+ NVME_CC_CSS_MASK = 7 << NVME_CC_CSS_SHIFT,
NVME_CC_AMS_RR = 0 << NVME_CC_AMS_SHIFT,
NVME_CC_AMS_WRRU = 1 << NVME_CC_AMS_SHIFT,
NVME_CC_AMS_VS = 7 << NVME_CC_AMS_SHIFT,
@@ -179,6 +182,8 @@ enum {
NVME_CC_SHN_MASK = 3 << NVME_CC_SHN_SHIFT,
NVME_CC_IOSQES = NVME_NVM_IOSQES << NVME_CC_IOSQES_SHIFT,
NVME_CC_IOCQES = NVME_NVM_IOCQES << NVME_CC_IOCQES_SHIFT,
+ NVME_CAP_CSS_NVM = 1 << 0,
+ NVME_CAP_CSS_CSI = 1 << 6,
NVME_CSTS_RDY = 1 << 0,
NVME_CSTS_CFS = 1 << 1,
NVME_CSTS_NSSRO = 1 << 4,
@@ -307,6 +312,7 @@ enum {
NVME_CTRL_ONCS_WRITE_UNCORRECTABLE = 1 << 1,
NVME_CTRL_ONCS_DSM = 1 << 2,
NVME_CTRL_ONCS_WRITE_ZEROES = 1 << 3,
+ NVME_CTRL_ONCS_RESERVATIONS = 1 << 5,
NVME_CTRL_ONCS_TIMESTAMP = 1 << 6,
NVME_CTRL_VWC_PRESENT = 1 << 0,
NVME_CTRL_OACS_SEC_SUPP = 1 << 0,
@@ -369,11 +375,37 @@ struct nvme_id_ns {
__u8 vs[3712];
};
+struct nvme_zns_lbafe {
+ __le64 zsze;
+ __u8 zdes;
+ __u8 rsvd9[7];
+};
+
+struct nvme_id_ns_zns {
+ __le16 zoc;
+ __le16 ozcs;
+ __le32 mar;
+ __le32 mor;
+ __le32 rrl;
+ __le32 frl;
+ __u8 rsvd20[2796];
+ struct nvme_zns_lbafe lbafe[16];
+ __u8 rsvd3072[768];
+ __u8 vs[256];
+};
+
+struct nvme_id_ctrl_zns {
+ __u8 zasl;
+ __u8 rsvd1[4095];
+};
+
enum {
NVME_ID_CNS_NS = 0x00,
NVME_ID_CNS_CTRL = 0x01,
NVME_ID_CNS_NS_ACTIVE_LIST = 0x02,
NVME_ID_CNS_NS_DESC_LIST = 0x03,
+ NVME_ID_CNS_CS_NS = 0x05,
+ NVME_ID_CNS_CS_CTRL = 0x06,
NVME_ID_CNS_NS_PRESENT_LIST = 0x10,
NVME_ID_CNS_NS_PRESENT = 0x11,
NVME_ID_CNS_CTRL_NS_LIST = 0x12,
@@ -384,6 +416,11 @@ enum {
};
enum {
+ NVME_CSI_NVM = 0,
+ NVME_CSI_ZNS = 2,
+};
+
+enum {
NVME_DIR_IDENTIFY = 0x00,
NVME_DIR_STREAMS = 0x01,
NVME_DIR_SND_ID_OP_ENABLE = 0x01,
@@ -435,11 +472,13 @@ struct nvme_ns_id_desc {
#define NVME_NIDT_EUI64_LEN 8
#define NVME_NIDT_NGUID_LEN 16
#define NVME_NIDT_UUID_LEN 16
+#define NVME_NIDT_CSI_LEN 1
enum {
NVME_NIDT_EUI64 = 0x01,
NVME_NIDT_NGUID = 0x02,
NVME_NIDT_UUID = 0x03,
+ NVME_NIDT_CSI = 0x04,
};
struct nvme_smart_log {
@@ -519,6 +558,27 @@ struct nvme_ana_rsp_hdr {
__le16 rsvd10[3];
};
+struct nvme_zone_descriptor {
+ __u8 zt;
+ __u8 zs;
+ __u8 za;
+ __u8 rsvd3[5];
+ __le64 zcap;
+ __le64 zslba;
+ __le64 wp;
+ __u8 rsvd32[32];
+};
+
+enum {
+ NVME_ZONE_TYPE_SEQWRITE_REQ = 0x2,
+};
+
+struct nvme_zone_report {
+ __le64 nr_zones;
+ __u8 resv8[56];
+ struct nvme_zone_descriptor entries[];
+};
+
enum {
NVME_SMART_CRIT_SPARE = 1 << 0,
NVME_SMART_CRIT_TEMPERATURE = 1 << 1,
@@ -613,6 +673,9 @@ enum nvme_opcode {
nvme_cmd_resv_report = 0x0e,
nvme_cmd_resv_acquire = 0x11,
nvme_cmd_resv_release = 0x15,
+ nvme_cmd_zone_mgmt_send = 0x79,
+ nvme_cmd_zone_mgmt_recv = 0x7a,
+ nvme_cmd_zone_append = 0x7d,
};
#define nvme_opcode_name(opcode) { opcode, #opcode }
@@ -751,6 +814,7 @@ struct nvme_rw_command {
enum {
NVME_RW_LR = 1 << 15,
NVME_RW_FUA = 1 << 14,
+ NVME_RW_APPEND_PIREMAP = 1 << 9,
NVME_RW_DSM_FREQ_UNSPEC = 0,
NVME_RW_DSM_FREQ_TYPICAL = 1,
NVME_RW_DSM_FREQ_RARE = 2,
@@ -816,6 +880,53 @@ struct nvme_write_zeroes_cmd {
__le16 appmask;
};
+enum nvme_zone_mgmt_action {
+ NVME_ZONE_CLOSE = 0x1,
+ NVME_ZONE_FINISH = 0x2,
+ NVME_ZONE_OPEN = 0x3,
+ NVME_ZONE_RESET = 0x4,
+ NVME_ZONE_OFFLINE = 0x5,
+ NVME_ZONE_SET_DESC_EXT = 0x10,
+};
+
+struct nvme_zone_mgmt_send_cmd {
+ __u8 opcode;
+ __u8 flags;
+ __u16 command_id;
+ __le32 nsid;
+ __le32 cdw2[2];
+ __le64 metadata;
+ union nvme_data_ptr dptr;
+ __le64 slba;
+ __le32 cdw12;
+ __u8 zsa;
+ __u8 select_all;
+ __u8 rsvd13[2];
+ __le32 cdw14[2];
+};
+
+struct nvme_zone_mgmt_recv_cmd {
+ __u8 opcode;
+ __u8 flags;
+ __u16 command_id;
+ __le32 nsid;
+ __le64 rsvd2[2];
+ union nvme_data_ptr dptr;
+ __le64 slba;
+ __le32 numd;
+ __u8 zra;
+ __u8 zrasf;
+ __u8 pr;
+ __u8 rsvd13;
+ __le32 cdw14[2];
+};
+
+enum {
+ NVME_ZRA_ZONE_REPORT = 0,
+ NVME_ZRASF_ZONE_REPORT_ALL = 0,
+ NVME_REPORT_ZONE_PARTIAL = 1,
+};
+
/* Features */
enum {
@@ -872,6 +983,7 @@ enum nvme_admin_opcode {
nvme_admin_security_recv = 0x82,
nvme_admin_sanitize_nvm = 0x84,
nvme_admin_get_lba_status = 0x86,
+ nvme_admin_vendor_start = 0xC0,
};
#define nvme_admin_opcode_name(opcode) { opcode, #opcode }
@@ -935,6 +1047,8 @@ enum {
NVME_FEAT_RESV_MASK = 0x82,
NVME_FEAT_RESV_PERSIST = 0x83,
NVME_FEAT_WRITE_PROTECT = 0x84,
+ NVME_FEAT_VENDOR_START = 0xC0,
+ NVME_FEAT_VENDOR_END = 0xFF,
NVME_LOG_ERROR = 0x01,
NVME_LOG_SMART = 0x02,
NVME_LOG_FW_SLOT = 0x03,
@@ -972,7 +1086,9 @@ struct nvme_identify {
__u8 cns;
__u8 rsvd3;
__le16 ctrlid;
- __u32 rsvd11[5];
+ __u8 rsvd11[3];
+ __u8 csi;
+ __u32 rsvd12[4];
};
#define NVME_IDENTIFY_DATA_SIZE 4096
@@ -1086,7 +1202,9 @@ struct nvme_get_log_page_command {
};
__le64 lpo;
};
- __u32 rsvd14[2];
+ __u8 rsvd14[3];
+ __u8 csi;
+ __u32 rsvd15;
};
struct nvme_directive_cmd {
@@ -1283,6 +1401,8 @@ struct nvme_command {
struct nvme_format_cmd format;
struct nvme_dsm_cmd dsm;
struct nvme_write_zeroes_cmd write_zeroes;
+ struct nvme_zone_mgmt_send_cmd zms;
+ struct nvme_zone_mgmt_recv_cmd zmr;
struct nvme_abort_cmd abort;
struct nvme_get_log_page_command get_log_page;
struct nvmf_common_command fabrics;
@@ -1417,6 +1537,18 @@ enum {
NVME_SC_AUTH_REQUIRED = 0x191,
/*
+ * I/O Command Set Specific - Zoned commands:
+ */
+ NVME_SC_ZONE_BOUNDARY_ERROR = 0x1b8,
+ NVME_SC_ZONE_FULL = 0x1b9,
+ NVME_SC_ZONE_READ_ONLY = 0x1ba,
+ NVME_SC_ZONE_OFFLINE = 0x1bb,
+ NVME_SC_ZONE_INVALID_WRITE = 0x1bc,
+ NVME_SC_ZONE_TOO_MANY_ACTIVE = 0x1bd,
+ NVME_SC_ZONE_TOO_MANY_OPEN = 0x1be,
+ NVME_SC_ZONE_INVALID_TRANSITION = 0x1bf,
+
+ /*
* Media and Data Integrity Errors:
*/
NVME_SC_WRITE_FAULT = 0x280,
diff --git a/include/linux/nvmem-consumer.h b/include/linux/nvmem-consumer.h
index 1b311d27c9b8..052293f4cbdb 100644
--- a/include/linux/nvmem-consumer.h
+++ b/include/linux/nvmem-consumer.h
@@ -61,6 +61,7 @@ void nvmem_cell_put(struct nvmem_cell *cell);
void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell);
void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len);
int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len);
+int nvmem_cell_read_u8(struct device *dev, const char *cell_id, u8 *val);
int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val);
int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val);
int nvmem_cell_read_u64(struct device *dev, const char *cell_id, u64 *val);
diff --git a/include/linux/nvmem-provider.h b/include/linux/nvmem-provider.h
index 6d6f8e5d24c9..06409a6c40bc 100644
--- a/include/linux/nvmem-provider.h
+++ b/include/linux/nvmem-provider.h
@@ -27,6 +27,9 @@ enum nvmem_type {
NVMEM_TYPE_BATTERY_BACKED,
};
+#define NVMEM_DEVID_NONE (-1)
+#define NVMEM_DEVID_AUTO (-2)
+
/**
* struct nvmem_config - NVMEM device configuration
*
diff --git a/include/linux/of.h b/include/linux/of.h
index c669c0a4732f..5cf7ae0465d1 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -554,7 +554,7 @@ bool of_console_check(struct device_node *dn, char *name, int index);
extern int of_cpu_node_to_id(struct device_node *np);
-int of_map_rid(struct device_node *np, u32 rid,
+int of_map_id(struct device_node *np, u32 id,
const char *map_name, const char *map_mask_name,
struct device_node **target, u32 *id_out);
@@ -630,6 +630,11 @@ static inline struct device_node *of_get_parent(const struct device_node *node)
return NULL;
}
+static inline struct device_node *of_get_next_parent(struct device_node *node)
+{
+ return NULL;
+}
+
static inline struct device_node *of_get_next_child(
const struct device_node *node, struct device_node *prev)
{
@@ -978,7 +983,7 @@ static inline int of_cpu_node_to_id(struct device_node *np)
return -ENODEV;
}
-static inline int of_map_rid(struct device_node *np, u32 rid,
+static inline int of_map_id(struct device_node *np, u32 id,
const char *map_name, const char *map_mask_name,
struct device_node **target, u32 *id_out)
{
diff --git a/include/linux/of_address.h b/include/linux/of_address.h
index 763022ed3456..88bc943405cd 100644
--- a/include/linux/of_address.h
+++ b/include/linux/of_address.h
@@ -6,8 +6,11 @@
#include <linux/of.h>
#include <linux/io.h>
+struct of_bus;
+
struct of_pci_range_parser {
struct device_node *node;
+ struct of_bus *bus;
const __be32 *range;
const __be32 *end;
int na;
@@ -119,6 +122,7 @@ static inline void __iomem *of_iomap(struct device_node *device, int index)
return NULL;
}
#endif
+#define of_range_parser_init of_pci_range_parser_init
#if defined(CONFIG_OF_ADDRESS) && defined(CONFIG_PCI)
extern const __be32 *of_get_pci_address(struct device_node *dev, int bar_no,
diff --git a/include/linux/of_device.h b/include/linux/of_device.h
index 8d31e39dd564..07ca187fc5e4 100644
--- a/include/linux/of_device.h
+++ b/include/linux/of_device.h
@@ -55,9 +55,15 @@ static inline struct device_node *of_cpu_device_node_get(int cpu)
return of_node_get(cpu_dev->of_node);
}
-int of_dma_configure(struct device *dev,
+int of_dma_configure_id(struct device *dev,
struct device_node *np,
- bool force_dma);
+ bool force_dma, const u32 *id);
+static inline int of_dma_configure(struct device *dev,
+ struct device_node *np,
+ bool force_dma)
+{
+ return of_dma_configure_id(dev, np, force_dma, NULL);
+}
#else /* CONFIG_OF */
static inline int of_driver_match_device(struct device *dev,
@@ -106,6 +112,12 @@ static inline struct device_node *of_cpu_device_node_get(int cpu)
return NULL;
}
+static inline int of_dma_configure_id(struct device *dev,
+ struct device_node *np,
+ bool force_dma)
+{
+ return 0;
+}
static inline int of_dma_configure(struct device *dev,
struct device_node *np,
bool force_dma)
diff --git a/include/linux/of_graph.h b/include/linux/of_graph.h
index 01038a6aade0..4d7756087b6b 100644
--- a/include/linux/of_graph.h
+++ b/include/linux/of_graph.h
@@ -38,6 +38,7 @@ struct of_endpoint {
child = of_graph_get_next_endpoint(parent, child))
#ifdef CONFIG_OF
+bool of_graph_is_present(const struct device_node *node);
int of_graph_parse_endpoint(const struct device_node *node,
struct of_endpoint *endpoint);
int of_graph_get_endpoint_count(const struct device_node *np);
@@ -56,6 +57,11 @@ struct device_node *of_graph_get_remote_node(const struct device_node *node,
u32 port, u32 endpoint);
#else
+static inline bool of_graph_is_present(const struct device_node *node)
+{
+ return false;
+}
+
static inline int of_graph_parse_endpoint(const struct device_node *node,
struct of_endpoint *endpoint)
{
diff --git a/include/linux/of_iommu.h b/include/linux/of_iommu.h
index f3d40dd7bb66..16f4b3e87f20 100644
--- a/include/linux/of_iommu.h
+++ b/include/linux/of_iommu.h
@@ -13,7 +13,8 @@ extern int of_get_dma_window(struct device_node *dn, const char *prefix,
size_t *size);
extern const struct iommu_ops *of_iommu_configure(struct device *dev,
- struct device_node *master_np);
+ struct device_node *master_np,
+ const u32 *id);
#else
@@ -25,7 +26,8 @@ static inline int of_get_dma_window(struct device_node *dn, const char *prefix,
}
static inline const struct iommu_ops *of_iommu_configure(struct device *dev,
- struct device_node *master_np)
+ struct device_node *master_np,
+ const u32 *id)
{
return NULL;
}
diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h
index 1214cabb2247..e8b78139f78c 100644
--- a/include/linux/of_irq.h
+++ b/include/linux/of_irq.h
@@ -52,9 +52,10 @@ extern struct irq_domain *of_msi_get_domain(struct device *dev,
struct device_node *np,
enum irq_domain_bus_token token);
extern struct irq_domain *of_msi_map_get_device_domain(struct device *dev,
- u32 rid);
+ u32 id,
+ u32 bus_token);
extern void of_msi_configure(struct device *dev, struct device_node *np);
-u32 of_msi_map_rid(struct device *dev, struct device_node *msi_np, u32 rid_in);
+u32 of_msi_map_id(struct device *dev, struct device_node *msi_np, u32 id_in);
#else
static inline int of_irq_count(struct device_node *dev)
{
@@ -85,17 +86,17 @@ static inline struct irq_domain *of_msi_get_domain(struct device *dev,
return NULL;
}
static inline struct irq_domain *of_msi_map_get_device_domain(struct device *dev,
- u32 rid)
+ u32 id, u32 bus_token)
{
return NULL;
}
static inline void of_msi_configure(struct device *dev, struct device_node *np)
{
}
-static inline u32 of_msi_map_rid(struct device *dev,
- struct device_node *msi_np, u32 rid_in)
+static inline u32 of_msi_map_id(struct device *dev,
+ struct device_node *msi_np, u32 id_in)
{
- return rid_in;
+ return id_in;
}
#endif
diff --git a/include/linux/of_mdio.h b/include/linux/of_mdio.h
index 0f61a4ac6bcf..1efb88d9f892 100644
--- a/include/linux/of_mdio.h
+++ b/include/linux/of_mdio.h
@@ -8,31 +8,33 @@
#ifndef __LINUX_OF_MDIO_H
#define __LINUX_OF_MDIO_H
+#include <linux/device.h>
#include <linux/phy.h>
#include <linux/of.h>
#if IS_ENABLED(CONFIG_OF_MDIO)
-extern bool of_mdiobus_child_is_phy(struct device_node *child);
-extern int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np);
-extern struct phy_device *of_phy_find_device(struct device_node *phy_np);
-extern struct phy_device *of_phy_connect(struct net_device *dev,
- struct device_node *phy_np,
- void (*hndlr)(struct net_device *),
- u32 flags, phy_interface_t iface);
-extern struct phy_device *
+bool of_mdiobus_child_is_phy(struct device_node *child);
+int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np);
+int devm_of_mdiobus_register(struct device *dev, struct mii_bus *mdio,
+ struct device_node *np);
+struct phy_device *of_phy_find_device(struct device_node *phy_np);
+struct phy_device *
+of_phy_connect(struct net_device *dev, struct device_node *phy_np,
+ void (*hndlr)(struct net_device *), u32 flags,
+ phy_interface_t iface);
+struct phy_device *
of_phy_get_and_connect(struct net_device *dev, struct device_node *np,
void (*hndlr)(struct net_device *));
-struct phy_device *of_phy_attach(struct net_device *dev,
- struct device_node *phy_np, u32 flags,
- phy_interface_t iface);
-
-extern struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np);
-extern int of_phy_register_fixed_link(struct device_node *np);
-extern void of_phy_deregister_fixed_link(struct device_node *np);
-extern bool of_phy_is_fixed_link(struct device_node *np);
-extern int of_mdiobus_phy_device_register(struct mii_bus *mdio,
- struct phy_device *phy,
- struct device_node *child, u32 addr);
+struct phy_device *
+of_phy_attach(struct net_device *dev, struct device_node *phy_np,
+ u32 flags, phy_interface_t iface);
+
+struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np);
+int of_phy_register_fixed_link(struct device_node *np);
+void of_phy_deregister_fixed_link(struct device_node *np);
+bool of_phy_is_fixed_link(struct device_node *np);
+int of_mdiobus_phy_device_register(struct mii_bus *mdio, struct phy_device *phy,
+ struct device_node *child, u32 addr);
static inline int of_mdio_parse_addr(struct device *dev,
const struct device_node *np)
diff --git a/include/linux/oom.h b/include/linux/oom.h
index c696c265f019..f022f581ac29 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -48,7 +48,7 @@ struct oom_control {
/* Used by oom implementation, do not set */
unsigned long totalpages;
struct task_struct *chosen;
- unsigned long chosen_points;
+ long chosen_points;
/* Used to print the constraint info. */
enum oom_constraint constraint;
@@ -107,7 +107,7 @@ static inline vm_fault_t check_stable_address_space(struct mm_struct *mm)
bool __oom_reap_task_mm(struct mm_struct *mm);
-extern unsigned long oom_badness(struct task_struct *p,
+long oom_badness(struct task_struct *p,
unsigned long totalpages);
extern bool out_of_memory(struct oom_control *oc);
diff --git a/include/linux/padata.h b/include/linux/padata.h
index 7302efff5e65..a433f13fc4bf 100644
--- a/include/linux/padata.h
+++ b/include/linux/padata.h
@@ -67,17 +67,6 @@ struct padata_serial_queue {
};
/**
- * struct padata_parallel_queue - The percpu padata parallel queue
- *
- * @reorder: List to wait for reordering after parallel processing.
- * @num_obj: Number of objects that are processed by this cpu.
- */
-struct padata_parallel_queue {
- struct padata_list reorder;
- atomic_t num_obj;
-};
-
-/**
* struct padata_cpumask - The cpumasks for the parallel/serial workers
*
* @pcpu: cpumask for the parallel workers.
@@ -93,7 +82,7 @@ struct padata_cpumask {
* that depends on the cpumask in use.
*
* @ps: padata_shell object.
- * @pqueue: percpu padata queues used for parallelization.
+ * @reorder_list: percpu reorder lists
* @squeue: percpu padata queues used for serialuzation.
* @refcnt: Number of objects holding a reference on this parallel_data.
* @seq_nr: Sequence number of the parallelized data object.
@@ -105,7 +94,7 @@ struct padata_cpumask {
*/
struct parallel_data {
struct padata_shell *ps;
- struct padata_parallel_queue __percpu *pqueue;
+ struct padata_list __percpu *reorder_list;
struct padata_serial_queue __percpu *squeue;
atomic_t refcnt;
unsigned int seq_nr;
@@ -167,7 +156,6 @@ struct padata_mt_job {
* @serial_wq: The workqueue used for serial work.
* @pslist: List of padata_shell objects attached to this instance.
* @cpumask: User supplied cpumasks for parallel and serial works.
- * @rcpumask: Actual cpumasks based on user cpumask and cpu_online_mask.
* @kobj: padata instance kernel object.
* @lock: padata instance lock.
* @flags: padata flags.
@@ -179,7 +167,6 @@ struct padata_instance {
struct workqueue_struct *serial_wq;
struct list_head pslist;
struct padata_cpumask cpumask;
- struct padata_cpumask rcpumask;
struct kobject kobj;
struct mutex lock;
u8 flags;
@@ -194,7 +181,7 @@ extern void __init padata_init(void);
static inline void __init padata_init(void) {}
#endif
-extern struct padata_instance *padata_alloc_possible(const char *name);
+extern struct padata_instance *padata_alloc(const char *name);
extern void padata_free(struct padata_instance *pinst);
extern struct padata_shell *padata_alloc_shell(struct padata_instance *pinst);
extern void padata_free_shell(struct padata_shell *ps);
@@ -204,6 +191,4 @@ extern void padata_do_serial(struct padata_priv *padata);
extern void __init padata_do_multithreaded(struct padata_mt_job *job);
extern int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type,
cpumask_var_t cpumask);
-extern int padata_start(struct padata_instance *pinst);
-extern void padata_stop(struct padata_instance *pinst);
#endif
diff --git a/include/linux/page-flags-layout.h b/include/linux/page-flags-layout.h
index 71283739ffd2..e200eef6a7fd 100644
--- a/include/linux/page-flags-layout.h
+++ b/include/linux/page-flags-layout.h
@@ -98,9 +98,11 @@
/*
* We are going to use the flags for the page to node mapping if its in
* there. This includes the case where there is no node, so it is implicit.
+ * Note that this #define MUST have a value so that it can be tested with
+ * the IS_ENABLED() macro.
*/
#if !(NODES_WIDTH > 0 || NODES_SHIFT == 0)
-#define NODE_NOT_IN_PAGE_FLAGS
+#define NODE_NOT_IN_PAGE_FLAGS 1
#endif
#if defined(CONFIG_NUMA_BALANCING) && LAST_CPUPID_WIDTH == 0
diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h
index c066fec5b74b..fff52ad370c1 100644
--- a/include/linux/pageblock-flags.h
+++ b/include/linux/pageblock-flags.h
@@ -56,35 +56,25 @@ struct page;
unsigned long get_pfnblock_flags_mask(struct page *page,
unsigned long pfn,
- unsigned long end_bitidx,
unsigned long mask);
void set_pfnblock_flags_mask(struct page *page,
unsigned long flags,
unsigned long pfn,
- unsigned long end_bitidx,
unsigned long mask);
/* Declarations for getting and setting flags. See mm/page_alloc.c */
-#define get_pageblock_flags_group(page, start_bitidx, end_bitidx) \
- get_pfnblock_flags_mask(page, page_to_pfn(page), \
- end_bitidx, \
- (1 << (end_bitidx - start_bitidx + 1)) - 1)
-#define set_pageblock_flags_group(page, flags, start_bitidx, end_bitidx) \
- set_pfnblock_flags_mask(page, flags, page_to_pfn(page), \
- end_bitidx, \
- (1 << (end_bitidx - start_bitidx + 1)) - 1)
-
#ifdef CONFIG_COMPACTION
#define get_pageblock_skip(page) \
- get_pageblock_flags_group(page, PB_migrate_skip, \
- PB_migrate_skip)
+ get_pfnblock_flags_mask(page, page_to_pfn(page), \
+ (1 << (PB_migrate_skip)))
#define clear_pageblock_skip(page) \
- set_pageblock_flags_group(page, 0, PB_migrate_skip, \
- PB_migrate_skip)
+ set_pfnblock_flags_mask(page, 0, page_to_pfn(page), \
+ (1 << PB_migrate_skip))
#define set_pageblock_skip(page) \
- set_pageblock_flags_group(page, 1, PB_migrate_skip, \
- PB_migrate_skip)
+ set_pfnblock_flags_mask(page, (1 << PB_migrate_skip), \
+ page_to_pfn(page), \
+ (1 << PB_migrate_skip))
#else
static inline bool get_pageblock_skip(struct page *page)
{
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index cf2468da68e9..7de11dcd534d 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -381,7 +381,7 @@ static inline struct page *find_subpage(struct page *head, pgoff_t index)
if (PageHuge(head))
return head;
- return head + (index & (hpage_nr_pages(head) - 1));
+ return head + (index & (thp_nr_pages(head) - 1));
}
struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
@@ -496,8 +496,35 @@ static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
return pgoff;
}
+/* This has the same layout as wait_bit_key - see fs/cachefiles/rdwr.c */
+struct wait_page_key {
+ struct page *page;
+ int bit_nr;
+ int page_match;
+};
+
+struct wait_page_queue {
+ struct page *page;
+ int bit_nr;
+ wait_queue_entry_t wait;
+};
+
+static inline bool wake_page_match(struct wait_page_queue *wait_page,
+ struct wait_page_key *key)
+{
+ if (wait_page->page != key->page)
+ return false;
+ key->page_match = 1;
+
+ if (wait_page->bit_nr != key->bit_nr)
+ return false;
+
+ return true;
+}
+
extern void __lock_page(struct page *page);
extern int __lock_page_killable(struct page *page);
+extern int __lock_page_async(struct page *page, struct wait_page_queue *wait);
extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
unsigned int flags);
extern void unlock_page(struct page *page);
@@ -535,6 +562,22 @@ static inline int lock_page_killable(struct page *page)
}
/*
+ * lock_page_async - Lock the page, unless this would block. If the page
+ * is already locked, then queue a callback when the page becomes unlocked.
+ * This callback can then retry the operation.
+ *
+ * Returns 0 if the page is locked successfully, or -EIOCBQUEUED if the page
+ * was already locked and the callback defined in 'wait' was queued.
+ */
+static inline int lock_page_async(struct page *page,
+ struct wait_page_queue *wait)
+{
+ if (!trylock_page(page))
+ return __lock_page_async(page, wait);
+ return 0;
+}
+
+/*
* lock_page_or_retry - Lock the page, unless this would block and the
* caller indicated that it can handle a retry.
*
@@ -730,7 +773,7 @@ static inline struct page *readahead_page(struct readahead_control *rac)
page = xa_load(&rac->mapping->i_pages, rac->_index);
VM_BUG_ON_PAGE(!PageLocked(page), page);
- rac->_batch_count = hpage_nr_pages(page);
+ rac->_batch_count = thp_nr_pages(page);
return page;
}
@@ -753,7 +796,7 @@ static inline unsigned int __readahead_batch(struct readahead_control *rac,
VM_BUG_ON_PAGE(!PageLocked(page), page);
VM_BUG_ON_PAGE(PageTail(page), page);
array[i++] = page;
- rac->_batch_count += hpage_nr_pages(page);
+ rac->_batch_count += thp_nr_pages(page);
/*
* The page cache isn't using multi-index entries yet,
diff --git a/include/linux/pci-ats.h b/include/linux/pci-ats.h
index f75c307f346d..df54cd5b15db 100644
--- a/include/linux/pci-ats.h
+++ b/include/linux/pci-ats.h
@@ -28,6 +28,10 @@ int pci_enable_pri(struct pci_dev *pdev, u32 reqs);
void pci_disable_pri(struct pci_dev *pdev);
int pci_reset_pri(struct pci_dev *pdev);
int pci_prg_resp_pasid_required(struct pci_dev *pdev);
+bool pci_pri_supported(struct pci_dev *pdev);
+#else
+static inline bool pci_pri_supported(struct pci_dev *pdev)
+{ return false; }
#endif /* CONFIG_PCI_PRI */
#ifdef CONFIG_PCI_PASID
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 34c1c4f45288..835530605c0d 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -179,7 +179,7 @@ static inline const char *pci_power_name(pci_power_t state)
*/
typedef unsigned int __bitwise pci_channel_state_t;
-enum pci_channel_state {
+enum {
/* I/O channel is in normal state */
pci_channel_io_normal = (__force pci_channel_state_t) 1,
@@ -432,6 +432,12 @@ struct pci_dev {
* mappings to make sure they cannot access arbitrary memory.
*/
unsigned int untrusted:1;
+ /*
+ * Info from the platform, e.g., ACPI or device tree, may mark a
+ * device as "external-facing". An external-facing device is
+ * itself internal but devices downstream from it are external.
+ */
+ unsigned int external_facing:1;
unsigned int broken_intx_masking:1; /* INTx masking can't be used */
unsigned int io_window_1k:1; /* Intel bridge 1K I/O windows */
unsigned int irq_managed:1;
@@ -486,6 +492,7 @@ struct pci_dev {
#ifdef CONFIG_PCI_P2PDMA
struct pci_p2pdma *p2pdma;
#endif
+ u16 acs_cap; /* ACS Capability offset */
phys_addr_t rom; /* Physical address if not from BAR */
size_t romlen; /* Length if not from BAR */
char *driver_override; /* Driver name to force a match */
@@ -785,7 +792,7 @@ enum pci_ers_result {
struct pci_error_handlers {
/* PCI bus error detected on this device */
pci_ers_result_t (*error_detected)(struct pci_dev *dev,
- enum pci_channel_state error);
+ pci_channel_state_t error);
/* MMIO has been re-enabled, but not DMA */
pci_ers_result_t (*mmio_enabled)(struct pci_dev *dev);
@@ -1053,13 +1060,6 @@ void pci_sort_breadthfirst(void);
/* Generic PCI functions exported to card drivers */
-enum pci_lost_interrupt_reason {
- PCI_LOST_IRQ_NO_INFORMATION = 0,
- PCI_LOST_IRQ_DISABLE_MSI,
- PCI_LOST_IRQ_DISABLE_MSIX,
- PCI_LOST_IRQ_DISABLE_ACPI,
-};
-enum pci_lost_interrupt_reason pci_lost_interrupt(struct pci_dev *dev);
int pci_find_capability(struct pci_dev *dev, int cap);
int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap);
int pci_find_ext_capability(struct pci_dev *dev, int cap);
@@ -2302,10 +2302,6 @@ int pci_vpd_find_info_keyword(const u8 *buf, unsigned int off,
struct device_node;
struct irq_domain;
struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus);
-int pci_parse_request_of_pci_ranges(struct device *dev,
- struct list_head *resources,
- struct list_head *ib_resources,
- struct resource **bus_range);
/* Arch may override this (weak) */
struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus);
@@ -2313,14 +2309,6 @@ struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus);
#else /* CONFIG_OF */
static inline struct irq_domain *
pci_host_bridge_of_msi_domain(struct pci_bus *bus) { return NULL; }
-static inline int
-pci_parse_request_of_pci_ranges(struct device *dev,
- struct list_head *resources,
- struct list_head *ib_resources,
- struct resource **bus_range)
-{
- return -EINVAL;
-}
#endif /* CONFIG_OF */
static inline struct device_node *
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 0ad57693f392..1ab1e24bcbce 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2585,6 +2585,8 @@
#define PCI_VENDOR_ID_ASMEDIA 0x1b21
+#define PCI_VENDOR_ID_REDHAT 0x1b36
+
#define PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS 0x1c36
#define PCI_VENDOR_ID_CIRCUITCO 0x1cc8
@@ -2659,6 +2661,8 @@
#define PCI_DEVICE_ID_INTEL_80332_1 0x0332
#define PCI_DEVICE_ID_INTEL_80333_0 0x0370
#define PCI_DEVICE_ID_INTEL_80333_1 0x0372
+#define PCI_DEVICE_ID_INTEL_QAT_DH895XCC 0x0435
+#define PCI_DEVICE_ID_INTEL_QAT_DH895XCC_VF 0x0443
#define PCI_DEVICE_ID_INTEL_82375 0x0482
#define PCI_DEVICE_ID_INTEL_82424 0x0483
#define PCI_DEVICE_ID_INTEL_82378 0x0484
@@ -2708,6 +2712,8 @@
#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_NHI 0x1577
#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE 0x1578
#define PCI_DEVICE_ID_INTEL_80960_RP 0x1960
+#define PCI_DEVICE_ID_INTEL_QAT_C3XXX 0x19e2
+#define PCI_DEVICE_ID_INTEL_QAT_C3XXX_VF 0x19e3
#define PCI_DEVICE_ID_INTEL_82840_HB 0x1a21
#define PCI_DEVICE_ID_INTEL_82845_HB 0x1a30
#define PCI_DEVICE_ID_INTEL_IOAT 0x1a38
@@ -2924,6 +2930,8 @@
#define PCI_DEVICE_ID_INTEL_IOAT_JSF7 0x3717
#define PCI_DEVICE_ID_INTEL_IOAT_JSF8 0x3718
#define PCI_DEVICE_ID_INTEL_IOAT_JSF9 0x3719
+#define PCI_DEVICE_ID_INTEL_QAT_C62X 0x37c8
+#define PCI_DEVICE_ID_INTEL_QAT_C62X_VF 0x37c9
#define PCI_DEVICE_ID_INTEL_ICH10_0 0x3a14
#define PCI_DEVICE_ID_INTEL_ICH10_1 0x3a16
#define PCI_DEVICE_ID_INTEL_ICH10_2 0x3a18
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
index 22d9d183950d..87d8a38bdea1 100644
--- a/include/linux/percpu-refcount.h
+++ b/include/linux/percpu-refcount.h
@@ -155,7 +155,7 @@ static inline bool __ref_is_percpu(struct percpu_ref *ref,
* between contaminating the pointer value, meaning that
* READ_ONCE() is required when fetching it.
*
- * The smp_read_barrier_depends() implied by READ_ONCE() pairs
+ * The dependency ordering from the READ_ONCE() pairs
* with smp_store_release() in __percpu_ref_switch_to_percpu().
*/
percpu_ptr = READ_ONCE(ref->percpu_count_ptr);
diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h
index 0a4f54dd4737..01861eebed79 100644
--- a/include/linux/percpu_counter.h
+++ b/include/linux/percpu_counter.h
@@ -44,6 +44,7 @@ void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount,
s32 batch);
s64 __percpu_counter_sum(struct percpu_counter *fbc);
int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch);
+void percpu_counter_sync(struct percpu_counter *fbc);
static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
{
@@ -172,6 +173,9 @@ static inline bool percpu_counter_initialized(struct percpu_counter *fbc)
return true;
}
+static inline void percpu_counter_sync(struct percpu_counter *fbc)
+{
+}
#endif /* CONFIG_SMP */
static inline void percpu_counter_inc(struct percpu_counter *fbc)
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index b4bb32082342..04a49ccc7beb 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -366,7 +366,7 @@ struct pmu {
* ->stop() with PERF_EF_UPDATE will read the counter and update
* period/count values like ->read() would.
*
- * ->start() with PERF_EF_RELOAD will reprogram the the counter
+ * ->start() with PERF_EF_RELOAD will reprogram the counter
* value, must be preceded by a ->stop() with PERF_EF_UPDATE.
*/
void (*start) (struct perf_event *event, int flags);
@@ -419,10 +419,11 @@ struct pmu {
*/
void (*sched_task) (struct perf_event_context *ctx,
bool sched_in);
+
/*
- * PMU specific data size
+ * Kmem cache of PMU specific data
*/
- size_t task_ctx_size;
+ struct kmem_cache *task_ctx_cache;
/*
* PMU specific parts of task perf event context (i.e. ctx->task_ctx_data)
@@ -1232,6 +1233,9 @@ extern void perf_event_exec(void);
extern void perf_event_comm(struct task_struct *tsk, bool exec);
extern void perf_event_namespaces(struct task_struct *tsk);
extern void perf_event_fork(struct task_struct *tsk);
+extern void perf_event_text_poke(const void *addr,
+ const void *old_bytes, size_t old_len,
+ const void *new_bytes, size_t new_len);
/* Callchains */
DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
@@ -1244,6 +1248,8 @@ get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
extern struct perf_callchain_entry *perf_callchain(struct perf_event *event, struct pt_regs *regs);
extern int get_callchain_buffers(int max_stack);
extern void put_callchain_buffers(void);
+extern struct perf_callchain_entry *get_callchain_entry(int *rctx);
+extern void put_callchain_entry(int rctx);
extern int sysctl_perf_event_max_stack;
extern int sysctl_perf_event_max_contexts_per_stack;
@@ -1479,6 +1485,11 @@ static inline void perf_event_exec(void) { }
static inline void perf_event_comm(struct task_struct *tsk, bool exec) { }
static inline void perf_event_namespaces(struct task_struct *tsk) { }
static inline void perf_event_fork(struct task_struct *tsk) { }
+static inline void perf_event_text_poke(const void *addr,
+ const void *old_bytes,
+ size_t old_len,
+ const void *new_bytes,
+ size_t new_len) { }
static inline void perf_event_init(void) { }
static inline int perf_swevent_get_recursion_context(void) { return -1; }
static inline void perf_swevent_put_recursion_context(int rctx) { }
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index 56c1e8eb7bb0..e8cbc2e795d5 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -117,7 +117,9 @@ static inline pgd_t *pgd_offset_pgd(pgd_t *pgd, unsigned long address)
* a shortcut which implies the use of the kernel's pgd, instead
* of a process's
*/
+#ifndef pgd_offset_k
#define pgd_offset_k(address) pgd_offset(&init_mm, (address))
+#endif
/*
* In many cases it is known that a virtual address is mapped at PMD or PTE
@@ -647,40 +649,6 @@ static inline int arch_unmap_one(struct mm_struct *mm,
#define flush_tlb_fix_spurious_fault(vma, address) flush_tlb_page(vma, address)
#endif
-#ifndef pgprot_nx
-#define pgprot_nx(prot) (prot)
-#endif
-
-#ifndef pgprot_noncached
-#define pgprot_noncached(prot) (prot)
-#endif
-
-#ifndef pgprot_writecombine
-#define pgprot_writecombine pgprot_noncached
-#endif
-
-#ifndef pgprot_writethrough
-#define pgprot_writethrough pgprot_noncached
-#endif
-
-#ifndef pgprot_device
-#define pgprot_device pgprot_noncached
-#endif
-
-#ifndef pgprot_modify
-#define pgprot_modify pgprot_modify
-static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
-{
- if (pgprot_val(oldprot) == pgprot_val(pgprot_noncached(oldprot)))
- newprot = pgprot_noncached(newprot);
- if (pgprot_val(oldprot) == pgprot_val(pgprot_writecombine(oldprot)))
- newprot = pgprot_writecombine(newprot);
- if (pgprot_val(oldprot) == pgprot_val(pgprot_device(oldprot)))
- newprot = pgprot_device(newprot);
- return newprot;
-}
-#endif
-
/*
* When walking page tables, get the address of the next boundary,
* or the end address of the range if that comes earlier. Although no
@@ -838,8 +806,45 @@ static inline void ptep_modify_prot_commit(struct vm_area_struct *vma,
/*
* No-op macros that just return the current protection value. Defined here
- * because these macros can be used used even if CONFIG_MMU is not defined.
+ * because these macros can be used even if CONFIG_MMU is not defined.
*/
+
+#ifndef pgprot_nx
+#define pgprot_nx(prot) (prot)
+#endif
+
+#ifndef pgprot_noncached
+#define pgprot_noncached(prot) (prot)
+#endif
+
+#ifndef pgprot_writecombine
+#define pgprot_writecombine pgprot_noncached
+#endif
+
+#ifndef pgprot_writethrough
+#define pgprot_writethrough pgprot_noncached
+#endif
+
+#ifndef pgprot_device
+#define pgprot_device pgprot_noncached
+#endif
+
+#ifdef CONFIG_MMU
+#ifndef pgprot_modify
+#define pgprot_modify pgprot_modify
+static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
+{
+ if (pgprot_val(oldprot) == pgprot_val(pgprot_noncached(oldprot)))
+ newprot = pgprot_noncached(newprot);
+ if (pgprot_val(oldprot) == pgprot_val(pgprot_writecombine(oldprot)))
+ newprot = pgprot_writecombine(newprot);
+ if (pgprot_val(oldprot) == pgprot_val(pgprot_device(oldprot)))
+ newprot = pgprot_device(newprot);
+ return newprot;
+}
+#endif
+#endif /* CONFIG_MMU */
+
#ifndef pgprot_encrypted
#define pgprot_encrypted(prot) (prot)
#endif
@@ -1231,7 +1236,7 @@ static inline int pmd_trans_unstable(pmd_t *pmd)
* Technically a PTE can be PROTNONE even when not doing NUMA balancing but
* the only case the kernel cares is for NUMA balancing and is only ever set
* when the VMA is accessible. For PROT_NONE VMAs, the PTEs are not marked
- * _PAGE_PROTNONE so by by default, implement the helper as "always no". It
+ * _PAGE_PROTNONE so by default, implement the helper as "always no". It
* is the responsibility of the caller to distinguish between PROT_NONE
* protections and NUMA hinting fault protections.
*/
@@ -1315,10 +1320,10 @@ static inline int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
/*
* ARCHes with special requirements for evicting THP backing TLB entries can
* implement this. Otherwise also, it can help optimize normal TLB flush in
- * THP regime. stock flush_tlb_range() typically has optimization to nuke the
- * entire TLB TLB if flush span is greater than a threshold, which will
- * likely be true for a single huge page. Thus a single thp flush will
- * invalidate the entire TLB which is not desitable.
+ * THP regime. Stock flush_tlb_range() typically has optimization to nuke the
+ * entire TLB if flush span is greater than a threshold, which will
+ * likely be true for a single huge page. Thus a single THP flush will
+ * invalidate the entire TLB which is not desirable.
* e.g. see arch/arc: flush_pmd_tlb_range
*/
#define flush_pmd_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
diff --git a/include/linux/phy.h b/include/linux/phy.h
index b693b609b2f5..3a09d2bf69ea 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -244,7 +244,8 @@ struct phy_package_shared {
};
/* used as bit number in atomic bitops */
-#define PHY_SHARED_F_INIT_DONE 0
+#define PHY_SHARED_F_INIT_DONE 0
+#define PHY_SHARED_F_PROBE_DONE 1
/*
* The Bus class for PHYs. Devices which provide access to
@@ -260,9 +261,6 @@ struct mii_bus {
int (*reset)(struct mii_bus *bus);
struct mdio_bus_stats stats[PHY_MAX_ADDR];
- unsigned int is_managed:1; /* is device-managed */
- unsigned int is_managed_registered:1;
-
/*
* A lock to ensure that only one thing can read/write
* the MDIO bus at a time
@@ -295,9 +293,19 @@ struct mii_bus {
/* GPIO reset pulse width in microseconds */
int reset_delay_us;
+ /* GPIO reset deassert delay in microseconds */
+ int reset_post_delay_us;
/* RESET GPIO descriptor pointer */
struct gpio_desc *reset_gpiod;
+ /* bus capabilities, used for probing */
+ enum {
+ MDIOBUS_NO_CAP = 0,
+ MDIOBUS_C22,
+ MDIOBUS_C45,
+ MDIOBUS_C22_C45,
+ } probe_capabilities;
+
/* protect access to the shared element */
struct mutex shared_lock;
@@ -313,20 +321,11 @@ static inline struct mii_bus *mdiobus_alloc(void)
}
int __mdiobus_register(struct mii_bus *bus, struct module *owner);
+int __devm_mdiobus_register(struct device *dev, struct mii_bus *bus,
+ struct module *owner);
#define mdiobus_register(bus) __mdiobus_register(bus, THIS_MODULE)
-static inline int devm_mdiobus_register(struct mii_bus *bus)
-{
- int ret;
-
- if (!bus->is_managed)
- return -EPERM;
-
- ret = mdiobus_register(bus);
- if (!ret)
- bus->is_managed_registered = 1;
-
- return ret;
-}
+#define devm_mdiobus_register(dev, bus) \
+ __devm_mdiobus_register(dev, bus, THIS_MODULE)
void mdiobus_unregister(struct mii_bus *bus);
void mdiobus_free(struct mii_bus *bus);
@@ -337,7 +336,6 @@ static inline struct mii_bus *devm_mdiobus_alloc(struct device *dev)
}
struct mii_bus *mdio_find_bus(const char *mdio_name);
-void devm_mdiobus_free(struct device *dev, struct mii_bus *bus);
struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr);
#define PHY_INTERRUPT_DISABLED false
@@ -388,14 +386,18 @@ enum phy_state {
PHY_CABLETEST,
};
+#define MDIO_MMD_NUM 32
+
/**
* struct phy_c45_device_ids - 802.3-c45 Device Identifiers
- * @devices_in_package: Bit vector of devices present.
+ * @devices_in_package: IEEE 802.3 devices in package register value.
+ * @mmds_present: bit vector of MMDs present.
* @device_ids: The device identifer for each present device.
*/
struct phy_c45_device_ids {
u32 devices_in_package;
- u32 device_ids[8];
+ u32 mmds_present;
+ u32 device_ids[MDIO_MMD_NUM];
};
struct macsec_context;
@@ -1385,6 +1387,9 @@ int genphy_c45_pma_read_abilities(struct phy_device *phydev);
int genphy_c45_read_status(struct phy_device *phydev);
int genphy_c45_config_aneg(struct phy_device *phydev);
+/* Generic C45 PHY driver */
+extern struct phy_driver genphy_c45_driver;
+
/* The gen10g_* functions are the old Clause 45 stub */
int gen10g_config_aneg(struct phy_device *phydev);
@@ -1431,6 +1436,10 @@ void phy_set_asym_pause(struct phy_device *phydev, bool rx, bool tx);
bool phy_validate_pause(struct phy_device *phydev,
struct ethtool_pauseparam *pp);
void phy_get_pause(struct phy_device *phydev, bool *tx_pause, bool *rx_pause);
+
+s32 phy_get_internal_delay(struct phy_device *phydev, struct device *dev,
+ const int *delay_values, int size, bool is_rx);
+
void phy_resolve_pause(unsigned long *local_adv, unsigned long *partner_adv,
bool *tx_pause, bool *rx_pause);
@@ -1467,51 +1476,10 @@ int __init mdio_bus_init(void);
void mdio_bus_exit(void);
#endif
-/* Inline function for use within net/core/ethtool.c (built-in) */
-static inline int phy_ethtool_get_strings(struct phy_device *phydev, u8 *data)
-{
- if (!phydev->drv)
- return -EIO;
-
- mutex_lock(&phydev->lock);
- phydev->drv->get_strings(phydev, data);
- mutex_unlock(&phydev->lock);
-
- return 0;
-}
-
-static inline int phy_ethtool_get_sset_count(struct phy_device *phydev)
-{
- int ret;
-
- if (!phydev->drv)
- return -EIO;
-
- if (phydev->drv->get_sset_count &&
- phydev->drv->get_strings &&
- phydev->drv->get_stats) {
- mutex_lock(&phydev->lock);
- ret = phydev->drv->get_sset_count(phydev);
- mutex_unlock(&phydev->lock);
-
- return ret;
- }
-
- return -EOPNOTSUPP;
-}
-
-static inline int phy_ethtool_get_stats(struct phy_device *phydev,
- struct ethtool_stats *stats, u64 *data)
-{
- if (!phydev->drv)
- return -EIO;
-
- mutex_lock(&phydev->lock);
- phydev->drv->get_stats(phydev, stats, data);
- mutex_unlock(&phydev->lock);
-
- return 0;
-}
+int phy_ethtool_get_strings(struct phy_device *phydev, u8 *data);
+int phy_ethtool_get_sset_count(struct phy_device *phydev);
+int phy_ethtool_get_stats(struct phy_device *phydev,
+ struct ethtool_stats *stats, u64 *data);
static inline int phy_package_read(struct phy_device *phydev, u32 regnum)
{
@@ -1555,14 +1523,25 @@ static inline int __phy_package_write(struct phy_device *phydev,
return __mdiobus_write(phydev->mdio.bus, shared->addr, regnum, val);
}
-static inline bool phy_package_init_once(struct phy_device *phydev)
+static inline bool __phy_package_set_once(struct phy_device *phydev,
+ unsigned int b)
{
struct phy_package_shared *shared = phydev->shared;
if (!shared)
return false;
- return !test_and_set_bit(PHY_SHARED_F_INIT_DONE, &shared->flags);
+ return !test_and_set_bit(b, &shared->flags);
+}
+
+static inline bool phy_package_init_once(struct phy_device *phydev)
+{
+ return __phy_package_set_once(phydev, PHY_SHARED_F_INIT_DONE);
+}
+
+static inline bool phy_package_probe_once(struct phy_device *phydev)
+{
+ return __phy_package_set_once(phydev, PHY_SHARED_F_PROBE_DONE);
}
extern struct bus_type mdio_bus_type;
diff --git a/include/linux/phylink.h b/include/linux/phylink.h
index cc5b452a184e..c36fb41a7d90 100644
--- a/include/linux/phylink.h
+++ b/include/linux/phylink.h
@@ -62,6 +62,10 @@ enum phylink_op_type {
* @dev: a pointer to a struct device associated with the MAC
* @type: operation type of PHYLINK instance
* @pcs_poll: MAC PCS cannot provide link change interrupt
+ * @poll_fixed_state: if true, starts link_poll,
+ * if MAC link is at %MLO_AN_FIXED mode.
+ * @get_fixed_state: callback to execute to determine the fixed link state,
+ * if MAC link is at %MLO_AN_FIXED mode.
*/
struct phylink_config {
struct device *dev;
@@ -76,7 +80,9 @@ struct phylink_config {
* struct phylink_mac_ops - MAC operations structure.
* @validate: Validate and update the link configuration.
* @mac_pcs_get_state: Read the current link state from the hardware.
+ * @mac_prepare: prepare for a major reconfiguration of the interface.
* @mac_config: configure the MAC for the selected mode and state.
+ * @mac_finish: finish a major reconfiguration of the interface.
* @mac_an_restart: restart 802.3z BaseX autonegotiation.
* @mac_link_down: take the link down.
* @mac_link_up: allow the link to come up.
@@ -89,8 +95,12 @@ struct phylink_mac_ops {
struct phylink_link_state *state);
void (*mac_pcs_get_state)(struct phylink_config *config,
struct phylink_link_state *state);
+ int (*mac_prepare)(struct phylink_config *config, unsigned int mode,
+ phy_interface_t iface);
void (*mac_config)(struct phylink_config *config, unsigned int mode,
const struct phylink_link_state *state);
+ int (*mac_finish)(struct phylink_config *config, unsigned int mode,
+ phy_interface_t iface);
void (*mac_an_restart)(struct phylink_config *config);
void (*mac_link_down)(struct phylink_config *config, unsigned int mode,
phy_interface_t interface);
@@ -146,6 +156,31 @@ void mac_pcs_get_state(struct phylink_config *config,
struct phylink_link_state *state);
/**
+ * mac_prepare() - prepare to change the PHY interface mode
+ * @config: a pointer to a &struct phylink_config.
+ * @mode: one of %MLO_AN_FIXED, %MLO_AN_PHY, %MLO_AN_INBAND.
+ * @iface: interface mode to switch to
+ *
+ * phylink will call this method at the beginning of a full initialisation
+ * of the link, which includes changing the interface mode or at initial
+ * startup time. It may be called for the current mode. The MAC driver
+ * should perform whatever actions are required, e.g. disabling the
+ * Serdes PHY.
+ *
+ * This will be the first call in the sequence:
+ * - mac_prepare()
+ * - mac_config()
+ * - pcs_config()
+ * - possible pcs_an_restart()
+ * - mac_finish()
+ *
+ * Returns zero on success, or negative errno on failure which will be
+ * reported to the kernel log.
+ */
+int mac_prepare(struct phylink_config *config, unsigned int mode,
+ phy_interface_t iface);
+
+/**
* mac_config() - configure the MAC for the selected mode and state
* @config: a pointer to a &struct phylink_config.
* @mode: one of %MLO_AN_FIXED, %MLO_AN_PHY, %MLO_AN_INBAND.
@@ -221,6 +256,23 @@ void mac_config(struct phylink_config *config, unsigned int mode,
const struct phylink_link_state *state);
/**
+ * mac_finish() - finish a to change the PHY interface mode
+ * @config: a pointer to a &struct phylink_config.
+ * @mode: one of %MLO_AN_FIXED, %MLO_AN_PHY, %MLO_AN_INBAND.
+ * @iface: interface mode to switch to
+ *
+ * phylink will call this if it called mac_prepare() to allow the MAC to
+ * complete any necessary steps after the MAC and PCS have been configured
+ * for the @mode and @iface. E.g. a MAC driver may wish to re-enable the
+ * Serdes PHY here if it was previously disabled by mac_prepare().
+ *
+ * Returns zero on success, or negative errno on failure which will be
+ * reported to the kernel log.
+ */
+int mac_finish(struct phylink_config *config, unsigned int mode,
+ phy_interface_t iface);
+
+/**
* mac_an_restart() - restart 802.3z BaseX autonegotiation
* @config: a pointer to a &struct phylink_config.
*/
@@ -273,6 +325,21 @@ void mac_link_up(struct phylink_config *config, struct phy_device *phy,
int speed, int duplex, bool tx_pause, bool rx_pause);
#endif
+struct phylink_pcs_ops;
+
+/**
+ * struct phylink_pcs - PHYLINK PCS instance
+ * @ops: a pointer to the &struct phylink_pcs_ops structure
+ * @poll: poll the PCS for link changes
+ *
+ * This structure is designed to be embedded within the PCS private data,
+ * and will be passed between phylink and the PCS.
+ */
+struct phylink_pcs {
+ const struct phylink_pcs_ops *ops;
+ bool poll;
+};
+
/**
* struct phylink_pcs_ops - MAC PCS operations structure.
* @pcs_get_state: read the current MAC PCS link state from the hardware.
@@ -282,20 +349,21 @@ void mac_link_up(struct phylink_config *config, struct phy_device *phy,
* (where necessary).
*/
struct phylink_pcs_ops {
- void (*pcs_get_state)(struct phylink_config *config,
+ void (*pcs_get_state)(struct phylink_pcs *pcs,
struct phylink_link_state *state);
- int (*pcs_config)(struct phylink_config *config, unsigned int mode,
+ int (*pcs_config)(struct phylink_pcs *pcs, unsigned int mode,
phy_interface_t interface,
- const unsigned long *advertising);
- void (*pcs_an_restart)(struct phylink_config *config);
- void (*pcs_link_up)(struct phylink_config *config, unsigned int mode,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac);
+ void (*pcs_an_restart)(struct phylink_pcs *pcs);
+ void (*pcs_link_up)(struct phylink_pcs *pcs, unsigned int mode,
phy_interface_t interface, int speed, int duplex);
};
#if 0 /* For kernel-doc purposes only. */
/**
* pcs_get_state() - Read the current inband link state from the hardware
- * @config: a pointer to a &struct phylink_config.
+ * @pcs: a pointer to a &struct phylink_pcs.
* @state: a pointer to a &struct phylink_link_state.
*
* Read the current inband link state from the MAC PCS, reporting the
@@ -308,18 +376,20 @@ struct phylink_pcs_ops {
* When present, this overrides mac_pcs_get_state() in &struct
* phylink_mac_ops.
*/
-void pcs_get_state(struct phylink_config *config,
+void pcs_get_state(struct phylink_pcs *pcs,
struct phylink_link_state *state);
/**
* pcs_config() - Configure the PCS mode and advertisement
- * @config: a pointer to a &struct phylink_config.
+ * @pcs: a pointer to a &struct phylink_pcs.
* @mode: one of %MLO_AN_FIXED, %MLO_AN_PHY, %MLO_AN_INBAND.
* @interface: interface mode to be used
* @advertising: adertisement ethtool link mode mask
+ * @permit_pause_to_mac: permit forwarding pause resolution to MAC
*
* Configure the PCS for the operating mode, the interface mode, and set
- * the advertisement mask.
+ * the advertisement mask. @permit_pause_to_mac indicates whether the
+ * hardware may forward the pause mode resolution to the MAC.
*
* When operating in %MLO_AN_INBAND, inband should always be enabled,
* otherwise inband should be disabled.
@@ -331,21 +401,22 @@ void pcs_get_state(struct phylink_config *config,
*
* For most 10GBASE-R, there is no advertisement.
*/
-int (*pcs_config)(struct phylink_config *config, unsigned int mode,
- phy_interface_t interface, const unsigned long *advertising);
+int pcs_config(struct phylink_pcs *pcs, unsigned int mode,
+ phy_interface_t interface, const unsigned long *advertising,
+ bool permit_pause_to_mac);
/**
* pcs_an_restart() - restart 802.3z BaseX autonegotiation
- * @config: a pointer to a &struct phylink_config.
+ * @pcs: a pointer to a &struct phylink_pcs.
*
* When PCS ops are present, this overrides mac_an_restart() in &struct
* phylink_mac_ops.
*/
-void (*pcs_an_restart)(struct phylink_config *config);
+void pcs_an_restart(struct phylink_pcs *pcs);
/**
* pcs_link_up() - program the PCS for the resolved link configuration
- * @config: a pointer to a &struct phylink_config.
+ * @pcs: a pointer to a &struct phylink_pcs.
* @mode: link autonegotiation mode
* @interface: link &typedef phy_interface_t mode
* @speed: link speed
@@ -356,14 +427,14 @@ void (*pcs_an_restart)(struct phylink_config *config);
* mode without in-band AN needs to be manually configured for the link
* and duplex setting. Otherwise, this should be a no-op.
*/
-void (*pcs_link_up)(struct phylink_config *config, unsigned int mode,
- phy_interface_t interface, int speed, int duplex);
+void pcs_link_up(struct phylink_pcs *pcs, unsigned int mode,
+ phy_interface_t interface, int speed, int duplex);
#endif
struct phylink *phylink_create(struct phylink_config *, struct fwnode_handle *,
phy_interface_t iface,
const struct phylink_mac_ops *mac_ops);
-void phylink_add_pcs(struct phylink *, const struct phylink_pcs_ops *ops);
+void phylink_set_pcs(struct phylink *, struct phylink_pcs *pcs);
void phylink_destroy(struct phylink *);
int phylink_connect_phy(struct phylink *, struct phy_device *);
@@ -392,6 +463,8 @@ int phylink_init_eee(struct phylink *, bool);
int phylink_ethtool_get_eee(struct phylink *, struct ethtool_eee *);
int phylink_ethtool_set_eee(struct phylink *, struct ethtool_eee *);
int phylink_mii_ioctl(struct phylink *, struct ifreq *, int);
+int phylink_speed_down(struct phylink *pl, bool sync);
+int phylink_speed_up(struct phylink *pl);
#define phylink_zero(bm) \
bitmap_zero(bm, __ETHTOOL_LINK_MODE_MASK_NBITS)
@@ -410,6 +483,9 @@ void phylink_mii_c22_pcs_get_state(struct mdio_device *pcs,
int phylink_mii_c22_pcs_set_advertisement(struct mdio_device *pcs,
phy_interface_t interface,
const unsigned long *advertising);
+int phylink_mii_c22_pcs_config(struct mdio_device *pcs, unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertising);
void phylink_mii_c22_pcs_an_restart(struct mdio_device *pcs);
void phylink_mii_c45_pcs_get_state(struct mdio_device *pcs,
diff --git a/include/linux/platform_data/clk-st.h b/include/linux/platform_data/clk-fch.h
index 7cdb6a402b35..b9f682459f08 100644
--- a/include/linux/platform_data/clk-st.h
+++ b/include/linux/platform_data/clk-fch.h
@@ -1,17 +1,18 @@
/* SPDX-License-Identifier: MIT */
/*
- * clock framework for AMD Stoney based clock
+ * clock framework for AMD misc clocks
*
* Copyright 2018 Advanced Micro Devices, Inc.
*/
-#ifndef __CLK_ST_H
-#define __CLK_ST_H
+#ifndef __CLK_FCH_H
+#define __CLK_FCH_H
#include <linux/compiler.h>
-struct st_clk_data {
+struct fch_clk_data {
void __iomem *base;
+ u32 is_rv;
};
-#endif /* __CLK_ST_H */
+#endif /* __CLK_FCH_H */
diff --git a/include/linux/platform_data/cros_ec_commands.h b/include/linux/platform_data/cros_ec_commands.h
index 69210881ebac..1fcfe9e63cb9 100644
--- a/include/linux/platform_data/cros_ec_commands.h
+++ b/include/linux/platform_data/cros_ec_commands.h
@@ -4917,15 +4917,26 @@ struct ec_response_usb_pd_control_v1 {
#define USBC_PD_CC_UFP_ATTACHED 4 /* UFP attached to usbc */
#define USBC_PD_CC_DFP_ATTACHED 5 /* DPF attached to usbc */
+/* Active/Passive Cable */
+#define USB_PD_CTRL_ACTIVE_CABLE BIT(0)
+/* Optical/Non-optical cable */
+#define USB_PD_CTRL_OPTICAL_CABLE BIT(1)
+/* 3rd Gen TBT device (or AMA)/2nd gen tbt Adapter */
+#define USB_PD_CTRL_TBT_LEGACY_ADAPTER BIT(2)
+/* Active Link Uni-Direction */
+#define USB_PD_CTRL_ACTIVE_LINK_UNIDIR BIT(3)
+
struct ec_response_usb_pd_control_v2 {
uint8_t enabled;
uint8_t role;
uint8_t polarity;
char state[32];
- uint8_t cc_state; /* USBC_PD_CC_*Encoded cc state */
- uint8_t dp_mode; /* Current DP pin mode (MODE_DP_PIN_[A-E]) */
- /* CL:1500994 Current cable type */
- uint8_t reserved_cable_type;
+ uint8_t cc_state; /* enum pd_cc_states representing cc state */
+ uint8_t dp_mode; /* Current DP pin mode (MODE_DP_PIN_[A-E]) */
+ uint8_t reserved; /* Reserved for future use */
+ uint8_t control_flags; /* USB_PD_CTRL_*flags */
+ uint8_t cable_speed; /* TBT_SS_* cable speed */
+ uint8_t cable_gen; /* TBT_GEN3_* cable rounded support */
} __ec_align1;
#define EC_CMD_USB_PD_PORTS 0x0102
@@ -5207,11 +5218,15 @@ struct ec_params_usb_pd_mux_info {
} __ec_align1;
/* Flags representing mux state */
-#define USB_PD_MUX_USB_ENABLED BIT(0) /* USB connected */
-#define USB_PD_MUX_DP_ENABLED BIT(1) /* DP connected */
-#define USB_PD_MUX_POLARITY_INVERTED BIT(2) /* CC line Polarity inverted */
-#define USB_PD_MUX_HPD_IRQ BIT(3) /* HPD IRQ is asserted */
-#define USB_PD_MUX_HPD_LVL BIT(4) /* HPD level is asserted */
+#define USB_PD_MUX_NONE 0 /* Open switch */
+#define USB_PD_MUX_USB_ENABLED BIT(0) /* USB connected */
+#define USB_PD_MUX_DP_ENABLED BIT(1) /* DP connected */
+#define USB_PD_MUX_POLARITY_INVERTED BIT(2) /* CC line Polarity inverted */
+#define USB_PD_MUX_HPD_IRQ BIT(3) /* HPD IRQ is asserted */
+#define USB_PD_MUX_HPD_LVL BIT(4) /* HPD level is asserted */
+#define USB_PD_MUX_SAFE_MODE BIT(5) /* DP is in safe mode */
+#define USB_PD_MUX_TBT_COMPAT_ENABLED BIT(6) /* TBT compat enabled */
+#define USB_PD_MUX_USB4_ENABLED BIT(7) /* USB4 enabled */
struct ec_response_usb_pd_mux_info {
uint8_t flags; /* USB_PD_MUX_*-encoded USB mux state */
@@ -5431,6 +5446,89 @@ struct ec_response_rollback_info {
#define EC_CMD_AP_RESET 0x0125
/*****************************************************************************/
+/* Voltage regulator controls */
+
+/*
+ * Get basic info of voltage regulator for given index.
+ *
+ * Returns the regulator name and supported voltage list in mV.
+ */
+#define EC_CMD_REGULATOR_GET_INFO 0x012C
+
+/* Maximum length of regulator name */
+#define EC_REGULATOR_NAME_MAX_LEN 16
+
+/* Maximum length of the supported voltage list. */
+#define EC_REGULATOR_VOLTAGE_MAX_COUNT 16
+
+struct ec_params_regulator_get_info {
+ uint32_t index;
+} __ec_align4;
+
+struct ec_response_regulator_get_info {
+ char name[EC_REGULATOR_NAME_MAX_LEN];
+ uint16_t num_voltages;
+ uint16_t voltages_mv[EC_REGULATOR_VOLTAGE_MAX_COUNT];
+} __ec_align2;
+
+/*
+ * Configure the regulator as enabled / disabled.
+ */
+#define EC_CMD_REGULATOR_ENABLE 0x012D
+
+struct ec_params_regulator_enable {
+ uint32_t index;
+ uint8_t enable;
+} __ec_align4;
+
+/*
+ * Query if the regulator is enabled.
+ *
+ * Returns 1 if the regulator is enabled, 0 if not.
+ */
+#define EC_CMD_REGULATOR_IS_ENABLED 0x012E
+
+struct ec_params_regulator_is_enabled {
+ uint32_t index;
+} __ec_align4;
+
+struct ec_response_regulator_is_enabled {
+ uint8_t enabled;
+} __ec_align1;
+
+/*
+ * Set voltage for the voltage regulator within the range specified.
+ *
+ * The driver should select the voltage in range closest to min_mv.
+ *
+ * Also note that this might be called before the regulator is enabled, and the
+ * setting should be in effect after the regulator is enabled.
+ */
+#define EC_CMD_REGULATOR_SET_VOLTAGE 0x012F
+
+struct ec_params_regulator_set_voltage {
+ uint32_t index;
+ uint32_t min_mv;
+ uint32_t max_mv;
+} __ec_align4;
+
+/*
+ * Get the currently configured voltage for the voltage regulator.
+ *
+ * Note that this might be called before the regulator is enabled, and this
+ * should return the configured output voltage if the regulator is enabled.
+ */
+#define EC_CMD_REGULATOR_GET_VOLTAGE 0x0130
+
+struct ec_params_regulator_get_voltage {
+ uint32_t index;
+} __ec_align4;
+
+struct ec_response_regulator_get_voltage {
+ uint32_t voltage_mv;
+} __ec_align4;
+
+/*****************************************************************************/
/* The command range 0x200-0x2FF is reserved for Rotor. */
/*****************************************************************************/
diff --git a/include/linux/platform_data/cros_ec_proto.h b/include/linux/platform_data/cros_ec_proto.h
index 383243326676..4a415ae851ef 100644
--- a/include/linux/platform_data/cros_ec_proto.h
+++ b/include/linux/platform_data/cros_ec_proto.h
@@ -216,9 +216,6 @@ int cros_ec_prepare_tx(struct cros_ec_device *ec_dev,
int cros_ec_check_result(struct cros_ec_device *ec_dev,
struct cros_ec_command *msg);
-int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev,
- struct cros_ec_command *msg);
-
int cros_ec_cmd_xfer_status(struct cros_ec_device *ec_dev,
struct cros_ec_command *msg);
diff --git a/include/linux/platform_data/davinci-cpufreq.h b/include/linux/platform_data/davinci-cpufreq.h
index 3fbf9f2793b5..bc208c64e3d7 100644
--- a/include/linux/platform_data/davinci-cpufreq.h
+++ b/include/linux/platform_data/davinci-cpufreq.h
@@ -2,7 +2,7 @@
/*
* TI DaVinci CPUFreq platform support.
*
- * Copyright (C) 2009 Texas Instruments, Inc. http://www.ti.com/
+ * Copyright (C) 2009 Texas Instruments, Inc. https://www.ti.com/
*/
#ifndef _MACH_DAVINCI_CPUFREQ_H
diff --git a/include/linux/platform_data/davinci_asp.h b/include/linux/platform_data/davinci_asp.h
index 7fe80f1c7e08..5d1fb0d78a22 100644
--- a/include/linux/platform_data/davinci_asp.h
+++ b/include/linux/platform_data/davinci_asp.h
@@ -1,7 +1,7 @@
/*
* TI DaVinci Audio Serial Port support
*
- * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com/
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
diff --git a/include/linux/platform_data/dma-dw.h b/include/linux/platform_data/dma-dw.h
index f3eaf9ec00a1..fbbeb2f6189b 100644
--- a/include/linux/platform_data/dma-dw.h
+++ b/include/linux/platform_data/dma-dw.h
@@ -8,10 +8,15 @@
#ifndef _PLATFORM_DATA_DMA_DW_H
#define _PLATFORM_DATA_DMA_DW_H
-#include <linux/device.h>
+#include <linux/bits.h>
+#include <linux/types.h>
#define DW_DMA_MAX_NR_MASTERS 4
#define DW_DMA_MAX_NR_CHANNELS 8
+#define DW_DMA_MIN_BURST 1
+#define DW_DMA_MAX_BURST 256
+
+struct device;
/**
* struct dw_dma_slave - Controller-specific information about a slave
@@ -42,6 +47,8 @@ struct dw_dma_slave {
* @data_width: Maximum data width supported by hardware per AHB master
* (in bytes, power of 2)
* @multi_block: Multi block transfers supported by hardware per channel.
+ * @max_burst: Maximum value of burst transaction size supported by hardware
+ * per channel (in units of CTL.SRC_TR_WIDTH/CTL.DST_TR_WIDTH).
* @protctl: Protection control signals setting per channel.
*/
struct dw_dma_platform_data {
@@ -56,6 +63,7 @@ struct dw_dma_platform_data {
unsigned char nr_masters;
unsigned char data_width[DW_DMA_MAX_NR_MASTERS];
unsigned char multi_block[DW_DMA_MAX_NR_CHANNELS];
+ u32 max_burst[DW_DMA_MAX_NR_CHANNELS];
#define CHAN_PROTCTL_PRIVILEGED BIT(0)
#define CHAN_PROTCTL_BUFFERABLE BIT(1)
#define CHAN_PROTCTL_CACHEABLE BIT(2)
diff --git a/include/linux/platform_data/elm.h b/include/linux/platform_data/elm.h
index 0f491d8abfdd..3cc78f0447b1 100644
--- a/include/linux/platform_data/elm.h
+++ b/include/linux/platform_data/elm.h
@@ -2,7 +2,7 @@
/*
* BCH Error Location Module
*
- * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com/
*/
#ifndef __ELM_H
diff --git a/include/linux/platform_data/gpio-davinci.h b/include/linux/platform_data/gpio-davinci.h
index a93841bfb9f7..e182a46e609f 100644
--- a/include/linux/platform_data/gpio-davinci.h
+++ b/include/linux/platform_data/gpio-davinci.h
@@ -1,7 +1,7 @@
/*
* DaVinci GPIO Platform Related Defines
*
- * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2013 Texas Instruments Incorporated - https://www.ti.com/
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
diff --git a/include/linux/platform_data/gpmc-omap.h b/include/linux/platform_data/gpmc-omap.h
index ef663e570552..c9cc4e32435d 100644
--- a/include/linux/platform_data/gpmc-omap.h
+++ b/include/linux/platform_data/gpmc-omap.h
@@ -2,7 +2,7 @@
/*
* OMAP GPMC Platform data
*
- * Copyright (C) 2014 Texas Instruments, Inc. - http://www.ti.com
+ * Copyright (C) 2014 Texas Instruments, Inc. - https://www.ti.com
* Roger Quadros <rogerq@ti.com>
*/
diff --git a/include/linux/platform_data/gsc_hwmon.h b/include/linux/platform_data/gsc_hwmon.h
index ec1611aff863..37a8f554da00 100644
--- a/include/linux/platform_data/gsc_hwmon.h
+++ b/include/linux/platform_data/gsc_hwmon.h
@@ -4,8 +4,9 @@
enum gsc_hwmon_mode {
mode_temperature,
- mode_voltage,
+ mode_voltage_24bit,
mode_voltage_raw,
+ mode_voltage_16bit,
mode_max,
};
diff --git a/include/linux/platform_data/leds-lp55xx.h b/include/linux/platform_data/leds-lp55xx.h
index 96a787100fda..3441064713a3 100644
--- a/include/linux/platform_data/leds-lp55xx.h
+++ b/include/linux/platform_data/leds-lp55xx.h
@@ -12,17 +12,26 @@
#ifndef _LEDS_LP55XX_H
#define _LEDS_LP55XX_H
+#include <linux/gpio/consumer.h>
+#include <linux/led-class-multicolor.h>
+
/* Clock configuration */
#define LP55XX_CLOCK_AUTO 0
#define LP55XX_CLOCK_INT 1
#define LP55XX_CLOCK_EXT 2
+#define LP55XX_MAX_GROUPED_CHAN 4
+
struct lp55xx_led_config {
const char *name;
const char *default_trigger;
u8 chan_nr;
u8 led_current; /* mA x10, 0 if led is not connected */
u8 max_current;
+ int num_colors;
+ unsigned int max_channel;
+ int color_id[LED_COLOR_ID_MAX];
+ int output_num[LED_COLOR_ID_MAX];
};
struct lp55xx_predef_pattern {
@@ -49,7 +58,7 @@ enum lp8501_pwr_sel {
* @clock_mode : Input clock mode. LP55XX_CLOCK_AUTO or _INT or _EXT
* @setup_resources : Platform specific function before enabling the chip
* @release_resources : Platform specific function after disabling the chip
- * @enable : EN pin control by platform side
+ * @enable_gpiod : enable GPIO descriptor
* @patterns : Predefined pattern data for RGB channels
* @num_patterns : Number of patterns
* @update_config : Value of CONFIG register
@@ -65,7 +74,7 @@ struct lp55xx_platform_data {
u8 clock_mode;
/* optional enable GPIO */
- int enable_gpio;
+ struct gpio_desc *enable_gpiod;
/* Predefined pattern data */
struct lp55xx_predef_pattern *patterns;
diff --git a/include/linux/platform_data/leds-s3c24xx.h b/include/linux/platform_data/leds-s3c24xx.h
index 5bbae85811e2..64f8d14876e0 100644
--- a/include/linux/platform_data/leds-s3c24xx.h
+++ b/include/linux/platform_data/leds-s3c24xx.h
@@ -10,13 +10,7 @@
#ifndef __LEDS_S3C24XX_H
#define __LEDS_S3C24XX_H
-#define S3C24XX_LEDF_ACTLOW (1<<0) /* LED is on when GPIO low */
-#define S3C24XX_LEDF_TRISTATE (1<<1) /* tristate to turn off */
-
struct s3c24xx_led_platdata {
- unsigned int gpio;
- unsigned int flags;
-
char *name;
char *def_trigger;
};
diff --git a/include/linux/platform_data/media/omap1_camera.h b/include/linux/platform_data/media/omap1_camera.h
deleted file mode 100644
index 386439db68de..000000000000
--- a/include/linux/platform_data/media/omap1_camera.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Header for V4L2 SoC Camera driver for OMAP1 Camera Interface
- *
- * Copyright (C) 2010, Janusz Krzysztofik <jkrzyszt@tis.icnet.pl>
- */
-
-#ifndef __MEDIA_OMAP1_CAMERA_H_
-#define __MEDIA_OMAP1_CAMERA_H_
-
-#include <linux/bitops.h>
-
-#define OMAP1_CAMERA_IOSIZE 0x1c
-
-enum omap1_cam_vb_mode {
- OMAP1_CAM_DMA_CONTIG = 0,
- OMAP1_CAM_DMA_SG,
-};
-
-#define OMAP1_CAMERA_MIN_BUF_COUNT(x) ((x) == OMAP1_CAM_DMA_CONTIG ? 3 : 2)
-
-struct omap1_cam_platform_data {
- unsigned long camexclk_khz;
- unsigned long lclk_khz_max;
- unsigned long flags;
-};
-
-#define OMAP1_CAMERA_LCLK_RISING BIT(0)
-#define OMAP1_CAMERA_RST_LOW BIT(1)
-#define OMAP1_CAMERA_RST_HIGH BIT(2)
-
-#endif /* __MEDIA_OMAP1_CAMERA_H_ */
diff --git a/include/linux/platform_data/mlxreg.h b/include/linux/platform_data/mlxreg.h
index b8da8aef2446..1af9c01563f9 100644
--- a/include/linux/platform_data/mlxreg.h
+++ b/include/linux/platform_data/mlxreg.h
@@ -43,10 +43,13 @@
*
* TYPE1 HW watchdog implementation exist in old systems.
* All new systems have TYPE2 HW watchdog.
+ * TYPE3 HW watchdog can exist on all systems with new CPLD.
+ * TYPE3 is selected by WD capability bit.
*/
enum mlxreg_wdt_type {
MLX_WDT_TYPE1,
MLX_WDT_TYPE2,
+ MLX_WDT_TYPE3,
};
/**
@@ -75,11 +78,13 @@ struct mlxreg_hotplug_device {
* @mask: attribute access mask;
* @bit: attribute effective bit;
* @capability: attribute capability register;
+ * @reg_prsnt: attribute presence register;
* @mode: access mode;
* @np - pointer to node platform associated with attribute;
* @hpdev - hotplug device data;
* @health_cntr: dynamic device health indication counter;
* @attached: true if device has been attached after good health indication;
+ * @regnum: number of registers occupied by multi-register attribute;
*/
struct mlxreg_core_data {
char label[MLXREG_CORE_LABEL_MAX_SIZE];
@@ -87,11 +92,13 @@ struct mlxreg_core_data {
u32 mask;
u32 bit;
u32 capability;
+ u32 reg_prsnt;
umode_t mode;
struct device_node *np;
struct mlxreg_hotplug_device hpdev;
- u8 health_cntr;
+ u32 health_cntr;
bool attached;
+ u8 regnum;
};
/**
diff --git a/include/linux/platform_data/mmc-omap.h b/include/linux/platform_data/mmc-omap.h
index 9acf0e87aa9b..f0b8947e6b07 100644
--- a/include/linux/platform_data/mmc-omap.h
+++ b/include/linux/platform_data/mmc-omap.h
@@ -116,3 +116,6 @@ struct omap_mmc_platform_data {
} slots[OMAP_MMC_MAX_SLOTS];
};
+
+extern void omap_mmc_notify_cover_event(struct device *dev, int slot,
+ int is_closed);
diff --git a/include/linux/platform_data/mtd-davinci-aemif.h b/include/linux/platform_data/mtd-davinci-aemif.h
index a403dd51dacc..a49826214a39 100644
--- a/include/linux/platform_data/mtd-davinci-aemif.h
+++ b/include/linux/platform_data/mtd-davinci-aemif.h
@@ -1,7 +1,7 @@
/*
* TI DaVinci AEMIF support
*
- * Copyright 2010 (C) Texas Instruments, Inc. http://www.ti.com/
+ * Copyright 2010 (C) Texas Instruments, Inc. https://www.ti.com/
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
diff --git a/include/linux/platform_data/omap-twl4030.h b/include/linux/platform_data/omap-twl4030.h
index 8419c8caf54e..0dd851ea1c72 100644
--- a/include/linux/platform_data/omap-twl4030.h
+++ b/include/linux/platform_data/omap-twl4030.h
@@ -3,7 +3,7 @@
* omap-twl4030.h - ASoC machine driver for TI SoC based boards with twl4030
* codec, header.
*
- * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com
+ * Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com
* All rights reserved.
*
* Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
diff --git a/include/linux/platform_data/sky81452-backlight.h b/include/linux/platform_data/sky81452-backlight.h
deleted file mode 100644
index 02653d92d84f..000000000000
--- a/include/linux/platform_data/sky81452-backlight.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * sky81452.h SKY81452 backlight driver
- *
- * Copyright 2014 Skyworks Solutions Inc.
- * Author : Gyungoh Yoo <jack.yoo@skyworksinc.com>
- */
-
-#ifndef _SKY81452_BACKLIGHT_H
-#define _SKY81452_BACKLIGHT_H
-
-/**
- * struct sky81452_platform_data
- * @name: backlight driver name.
- If it is not defined, default name is lcd-backlight.
- * @gpio_enable:GPIO number which control EN pin
- * @enable: Enable mask for current sink channel 1, 2, 3, 4, 5 and 6.
- * @ignore_pwm: true if DPWMI should be ignored.
- * @dpwm_mode: true is DPWM dimming mode, otherwise Analog dimming mode.
- * @phase_shift:true is phase shift mode.
- * @short_detecion_threshold: It should be one of 4, 5, 6 and 7V.
- * @boost_current_limit: It should be one of 2300, 2750mA.
- */
-struct sky81452_bl_platform_data {
- const char *name;
- int gpio_enable;
- unsigned int enable;
- bool ignore_pwm;
- bool dpwm_mode;
- bool phase_shift;
- unsigned int short_detection_threshold;
- unsigned int boost_current_limit;
-};
-
-#endif
diff --git a/include/linux/platform_data/spi-imx.h b/include/linux/platform_data/spi-imx.h
deleted file mode 100644
index 328f670d10bd..000000000000
--- a/include/linux/platform_data/spi-imx.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-
-#ifndef __MACH_SPI_H_
-#define __MACH_SPI_H_
-
-/*
- * struct spi_imx_master - device.platform_data for SPI controller devices.
- * @chipselect: Array of chipselects for this master or NULL. Numbers >= 0
- * mean GPIO pins, -ENOENT means internal CSPI chipselect
- * matching the position in the array. E.g., if chipselect[1] =
- * -ENOENT then a SPI slave using chip select 1 will use the
- * native SS1 line of the CSPI. Omitting the array will use
- * all native chip selects.
-
- * Normally you want to use gpio based chip selects as the CSPI
- * module tries to be intelligent about when to assert the
- * chipselect: The CSPI module deasserts the chipselect once it
- * runs out of input data. The other problem is that it is not
- * possible to mix between high active and low active chipselects
- * on one single bus using the internal chipselects.
- * Unfortunately, on some SoCs, Freescale decided to put some
- * chipselects on dedicated pins which are not usable as gpios,
- * so we have to support the internal chipselects.
- *
- * @num_chipselect: If @chipselect is specified, ARRAY_SIZE(chipselect),
- * otherwise the number of native chip selects.
- */
-struct spi_imx_master {
- int *chipselect;
- int num_chipselect;
-};
-
-#endif /* __MACH_SPI_H_*/
diff --git a/include/linux/platform_data/uio_pruss.h b/include/linux/platform_data/uio_pruss.h
index 3d47d219827f..31f2e22661bc 100644
--- a/include/linux/platform_data/uio_pruss.h
+++ b/include/linux/platform_data/uio_pruss.h
@@ -3,7 +3,7 @@
*
* Platform data for uio_pruss driver
*
- * Copyright (C) 2010-11 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2010-11 Texas Instruments Incorporated - https://www.ti.com/
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
diff --git a/include/linux/platform_data/usb-omap.h b/include/linux/platform_data/usb-omap.h
index fa579b4c666b..5e70d667031c 100644
--- a/include/linux/platform_data/usb-omap.h
+++ b/include/linux/platform_data/usb-omap.h
@@ -1,7 +1,7 @@
/*
* usb-omap.h - Platform data for the various OMAP USB IPs
*
- * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com
+ * Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com
*
* This software is distributed under the terms of the GNU General Public
* License ("GPL") version 2, as published by the Free Software Foundation.
diff --git a/include/linux/pldmfw.h b/include/linux/pldmfw.h
new file mode 100644
index 000000000000..0fc831338226
--- /dev/null
+++ b/include/linux/pldmfw.h
@@ -0,0 +1,165 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018-2019, Intel Corporation. */
+
+#ifndef _PLDMFW_H_
+#define _PLDMFW_H_
+
+#include <linux/list.h>
+#include <linux/firmware.h>
+
+#define PLDM_DEVICE_UPDATE_CONTINUE_AFTER_FAIL BIT(0)
+
+#define PLDM_STRING_TYPE_UNKNOWN 0
+#define PLDM_STRING_TYPE_ASCII 1
+#define PLDM_STRING_TYPE_UTF8 2
+#define PLDM_STRING_TYPE_UTF16 3
+#define PLDM_STRING_TYPE_UTF16LE 4
+#define PLDM_STRING_TYPE_UTF16BE 5
+
+struct pldmfw_record {
+ struct list_head entry;
+
+ /* List of descriptor TLVs */
+ struct list_head descs;
+
+ /* Component Set version string*/
+ const u8 *version_string;
+ u8 version_type;
+ u8 version_len;
+
+ /* Package Data length */
+ u16 package_data_len;
+
+ /* Bitfield of Device Update Flags */
+ u32 device_update_flags;
+
+ /* Package Data block */
+ const u8 *package_data;
+
+ /* Bitmap of components applicable to this record */
+ unsigned long *component_bitmap;
+ u16 component_bitmap_len;
+};
+
+/* Standard descriptor TLV identifiers */
+#define PLDM_DESC_ID_PCI_VENDOR_ID 0x0000
+#define PLDM_DESC_ID_IANA_ENTERPRISE_ID 0x0001
+#define PLDM_DESC_ID_UUID 0x0002
+#define PLDM_DESC_ID_PNP_VENDOR_ID 0x0003
+#define PLDM_DESC_ID_ACPI_VENDOR_ID 0x0004
+#define PLDM_DESC_ID_PCI_DEVICE_ID 0x0100
+#define PLDM_DESC_ID_PCI_SUBVENDOR_ID 0x0101
+#define PLDM_DESC_ID_PCI_SUBDEV_ID 0x0102
+#define PLDM_DESC_ID_PCI_REVISION_ID 0x0103
+#define PLDM_DESC_ID_PNP_PRODUCT_ID 0x0104
+#define PLDM_DESC_ID_ACPI_PRODUCT_ID 0x0105
+#define PLDM_DESC_ID_VENDOR_DEFINED 0xFFFF
+
+struct pldmfw_desc_tlv {
+ struct list_head entry;
+
+ const u8 *data;
+ u16 type;
+ u16 size;
+};
+
+#define PLDM_CLASSIFICATION_UNKNOWN 0x0000
+#define PLDM_CLASSIFICATION_OTHER 0x0001
+#define PLDM_CLASSIFICATION_DRIVER 0x0002
+#define PLDM_CLASSIFICATION_CONFIG_SW 0x0003
+#define PLDM_CLASSIFICATION_APP_SW 0x0004
+#define PLDM_CLASSIFICATION_INSTRUMENTATION 0x0005
+#define PLDM_CLASSIFICATION_BIOS 0x0006
+#define PLDM_CLASSIFICATION_DIAGNOSTIC_SW 0x0007
+#define PLDM_CLASSIFICATION_OS 0x0008
+#define PLDM_CLASSIFICATION_MIDDLEWARE 0x0009
+#define PLDM_CLASSIFICATION_FIRMWARE 0x000A
+#define PLDM_CLASSIFICATION_CODE 0x000B
+#define PLDM_CLASSIFICATION_SERVICE_PACK 0x000C
+#define PLDM_CLASSIFICATION_SOFTWARE_BUNDLE 0x000D
+
+#define PLDM_ACTIVATION_METHOD_AUTO BIT(0)
+#define PLDM_ACTIVATION_METHOD_SELF_CONTAINED BIT(1)
+#define PLDM_ACTIVATION_METHOD_MEDIUM_SPECIFIC BIT(2)
+#define PLDM_ACTIVATION_METHOD_REBOOT BIT(3)
+#define PLDM_ACTIVATION_METHOD_DC_CYCLE BIT(4)
+#define PLDM_ACTIVATION_METHOD_AC_CYCLE BIT(5)
+
+#define PLDMFW_COMPONENT_OPTION_FORCE_UPDATE BIT(0)
+#define PLDMFW_COMPONENT_OPTION_USE_COMPARISON_STAMP BIT(1)
+
+struct pldmfw_component {
+ struct list_head entry;
+
+ /* component identifier */
+ u16 classification;
+ u16 identifier;
+
+ u16 options;
+ u16 activation_method;
+
+ u32 comparison_stamp;
+
+ u32 component_size;
+ const u8 *component_data;
+
+ /* Component version string */
+ const u8 *version_string;
+ u8 version_type;
+ u8 version_len;
+
+ /* component index */
+ u8 index;
+
+};
+
+/* Transfer flag used for sending components to the firmware */
+#define PLDM_TRANSFER_FLAG_START BIT(0)
+#define PLDM_TRANSFER_FLAG_MIDDLE BIT(1)
+#define PLDM_TRANSFER_FLAG_END BIT(2)
+
+struct pldmfw_ops;
+
+/* Main entry point to the PLDM firmware update engine. Device drivers
+ * should embed this in a private structure and use container_of to obtain
+ * a pointer to their own data, used to implement the device specific
+ * operations.
+ */
+struct pldmfw {
+ const struct pldmfw_ops *ops;
+ struct device *dev;
+};
+
+bool pldmfw_op_pci_match_record(struct pldmfw *context, struct pldmfw_record *record);
+
+/* Operations invoked by the generic PLDM firmware update engine. Used to
+ * implement device specific logic.
+ *
+ * @match_record: check if the device matches the given record. For
+ * convenience, a standard implementation is provided for PCI devices.
+ *
+ * @send_package_data: send the package data associated with the matching
+ * record to firmware.
+ *
+ * @send_component_table: send the component data associated with a given
+ * component to firmware. Called once for each applicable component.
+ *
+ * @flash_component: Flash the data for a given component to the device.
+ * Called once for each applicable component, after all component tables have
+ * been sent.
+ *
+ * @finalize_update: (optional) Finish the update. Called after all components
+ * have been flashed.
+ */
+struct pldmfw_ops {
+ bool (*match_record)(struct pldmfw *context, struct pldmfw_record *record);
+ int (*send_package_data)(struct pldmfw *context, const u8 *data, u16 length);
+ int (*send_component_table)(struct pldmfw *context, struct pldmfw_component *component,
+ u8 transfer_flag);
+ int (*flash_component)(struct pldmfw *context, struct pldmfw_component *component);
+ int (*finalize_update)(struct pldmfw *context);
+};
+
+int pldmfw_flash_image(struct pldmfw *context, const struct firmware *fw);
+
+#endif
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 121c104a4090..a30a4b54df52 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -351,7 +351,7 @@ struct dev_pm_ops {
* to RAM and hibernation.
*/
#define SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \
-const struct dev_pm_ops name = { \
+const struct dev_pm_ops __maybe_unused name = { \
SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
}
@@ -369,11 +369,17 @@ const struct dev_pm_ops name = { \
* .runtime_resume(), respectively (and analogously for hibernation).
*/
#define UNIVERSAL_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn) \
-const struct dev_pm_ops name = { \
+const struct dev_pm_ops __maybe_unused name = { \
SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
}
+#ifdef CONFIG_PM
+#define pm_ptr(_ptr) (_ptr)
+#else
+#define pm_ptr(_ptr) NULL
+#endif
+
/*
* PM_EVENT_ messages
*
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index 9ec78ee53652..ee11502a575b 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -95,8 +95,8 @@ struct generic_pm_domain {
struct device dev;
struct dev_pm_domain domain; /* PM domain operations */
struct list_head gpd_list_node; /* Node in the global PM domains list */
- struct list_head master_links; /* Links with PM domain as a master */
- struct list_head slave_links; /* Links with PM domain as a slave */
+ struct list_head parent_links; /* Links with PM domain as a parent */
+ struct list_head child_links; /* Links with PM domain as a child */
struct list_head dev_list; /* List of devices */
struct dev_power_governor *gov;
struct work_struct power_off_work;
@@ -151,10 +151,10 @@ static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd)
}
struct gpd_link {
- struct generic_pm_domain *master;
- struct list_head master_node;
- struct generic_pm_domain *slave;
- struct list_head slave_node;
+ struct generic_pm_domain *parent;
+ struct list_head parent_node;
+ struct generic_pm_domain *child;
+ struct list_head child_node;
/* Sub-domain's per-master domain performance state */
unsigned int performance_state;
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
index d5c4a329321d..dbb484524f82 100644
--- a/include/linux/pm_opp.h
+++ b/include/linux/pm_opp.h
@@ -11,6 +11,7 @@
#ifndef __LINUX_OPP_H__
#define __LINUX_OPP_H__
+#include <linux/energy_model.h>
#include <linux/err.h>
#include <linux/notifier.h>
@@ -151,6 +152,7 @@ struct opp_table *dev_pm_opp_attach_genpd(struct device *dev, const char **names
void dev_pm_opp_detach_genpd(struct opp_table *opp_table);
int dev_pm_opp_xlate_performance_state(struct opp_table *src_table, struct opp_table *dst_table, unsigned int pstate);
int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq);
+int dev_pm_opp_set_bw(struct device *dev, struct dev_pm_opp *opp);
int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask);
int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask);
void dev_pm_opp_remove_table(struct device *dev);
@@ -342,6 +344,11 @@ static inline int dev_pm_opp_set_rate(struct device *dev, unsigned long target_f
return -ENOTSUPP;
}
+static inline int dev_pm_opp_set_bw(struct device *dev, struct dev_pm_opp *opp)
+{
+ return -EOPNOTSUPP;
+}
+
static inline int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask)
{
return -ENOTSUPP;
@@ -373,7 +380,11 @@ struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev);
struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp);
int of_get_required_opp_performance_state(struct device_node *np, int index);
int dev_pm_opp_of_find_icc_paths(struct device *dev, struct opp_table *opp_table);
-void dev_pm_opp_of_register_em(struct cpumask *cpus);
+int dev_pm_opp_of_register_em(struct device *dev, struct cpumask *cpus);
+static inline void dev_pm_opp_of_unregister_em(struct device *dev)
+{
+ em_dev_unregister_perf_domain(dev);
+}
#else
static inline int dev_pm_opp_of_add_table(struct device *dev)
{
@@ -413,7 +424,13 @@ static inline struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp)
return NULL;
}
-static inline void dev_pm_opp_of_register_em(struct cpumask *cpus)
+static inline int dev_pm_opp_of_register_em(struct device *dev,
+ struct cpumask *cpus)
+{
+ return -ENOTSUPP;
+}
+
+static inline void dev_pm_opp_of_unregister_em(struct device *dev)
{
}
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
index 3dbc207bff53..6245caa18034 100644
--- a/include/linux/pm_runtime.h
+++ b/include/linux/pm_runtime.h
@@ -60,58 +60,151 @@ extern void pm_runtime_put_suppliers(struct device *dev);
extern void pm_runtime_new_link(struct device *dev);
extern void pm_runtime_drop_link(struct device *dev);
+/**
+ * pm_runtime_get_if_in_use - Conditionally bump up runtime PM usage counter.
+ * @dev: Target device.
+ *
+ * Increment the runtime PM usage counter of @dev if its runtime PM status is
+ * %RPM_ACTIVE and its runtime PM usage counter is greater than 0.
+ */
static inline int pm_runtime_get_if_in_use(struct device *dev)
{
return pm_runtime_get_if_active(dev, false);
}
+/**
+ * pm_suspend_ignore_children - Set runtime PM behavior regarding children.
+ * @dev: Target device.
+ * @enable: Whether or not to ignore possible dependencies on children.
+ *
+ * The dependencies of @dev on its children will not be taken into account by
+ * the runtime PM framework going forward if @enable is %true, or they will
+ * be taken into account otherwise.
+ */
static inline void pm_suspend_ignore_children(struct device *dev, bool enable)
{
dev->power.ignore_children = enable;
}
+/**
+ * pm_runtime_get_noresume - Bump up runtime PM usage counter of a device.
+ * @dev: Target device.
+ */
static inline void pm_runtime_get_noresume(struct device *dev)
{
atomic_inc(&dev->power.usage_count);
}
+/**
+ * pm_runtime_put_noidle - Drop runtime PM usage counter of a device.
+ * @dev: Target device.
+ *
+ * Decrement the runtime PM usage counter of @dev unless it is 0 already.
+ */
static inline void pm_runtime_put_noidle(struct device *dev)
{
atomic_add_unless(&dev->power.usage_count, -1, 0);
}
+/**
+ * pm_runtime_suspended - Check whether or not a device is runtime-suspended.
+ * @dev: Target device.
+ *
+ * Return %true if runtime PM is enabled for @dev and its runtime PM status is
+ * %RPM_SUSPENDED, or %false otherwise.
+ *
+ * Note that the return value of this function can only be trusted if it is
+ * called under the runtime PM lock of @dev or under conditions in which
+ * runtime PM cannot be either disabled or enabled for @dev and its runtime PM
+ * status cannot change.
+ */
static inline bool pm_runtime_suspended(struct device *dev)
{
return dev->power.runtime_status == RPM_SUSPENDED
&& !dev->power.disable_depth;
}
+/**
+ * pm_runtime_active - Check whether or not a device is runtime-active.
+ * @dev: Target device.
+ *
+ * Return %true if runtime PM is enabled for @dev and its runtime PM status is
+ * %RPM_ACTIVE, or %false otherwise.
+ *
+ * Note that the return value of this function can only be trusted if it is
+ * called under the runtime PM lock of @dev or under conditions in which
+ * runtime PM cannot be either disabled or enabled for @dev and its runtime PM
+ * status cannot change.
+ */
static inline bool pm_runtime_active(struct device *dev)
{
return dev->power.runtime_status == RPM_ACTIVE
|| dev->power.disable_depth;
}
+/**
+ * pm_runtime_status_suspended - Check if runtime PM status is "suspended".
+ * @dev: Target device.
+ *
+ * Return %true if the runtime PM status of @dev is %RPM_SUSPENDED, or %false
+ * otherwise, regardless of whether or not runtime PM has been enabled for @dev.
+ *
+ * Note that the return value of this function can only be trusted if it is
+ * called under the runtime PM lock of @dev or under conditions in which the
+ * runtime PM status of @dev cannot change.
+ */
static inline bool pm_runtime_status_suspended(struct device *dev)
{
return dev->power.runtime_status == RPM_SUSPENDED;
}
+/**
+ * pm_runtime_enabled - Check if runtime PM is enabled.
+ * @dev: Target device.
+ *
+ * Return %true if runtime PM is enabled for @dev or %false otherwise.
+ *
+ * Note that the return value of this function can only be trusted if it is
+ * called under the runtime PM lock of @dev or under conditions in which
+ * runtime PM cannot be either disabled or enabled for @dev.
+ */
static inline bool pm_runtime_enabled(struct device *dev)
{
return !dev->power.disable_depth;
}
+/**
+ * pm_runtime_has_no_callbacks - Check if runtime PM callbacks may be present.
+ * @dev: Target device.
+ *
+ * Return %true if @dev is a special device without runtime PM callbacks or
+ * %false otherwise.
+ */
static inline bool pm_runtime_has_no_callbacks(struct device *dev)
{
return dev->power.no_callbacks;
}
+/**
+ * pm_runtime_mark_last_busy - Update the last access time of a device.
+ * @dev: Target device.
+ *
+ * Update the last access time of @dev used by the runtime PM autosuspend
+ * mechanism to the current time as returned by ktime_get_mono_fast_ns().
+ */
static inline void pm_runtime_mark_last_busy(struct device *dev)
{
WRITE_ONCE(dev->power.last_busy, ktime_get_mono_fast_ns());
}
+/**
+ * pm_runtime_is_irq_safe - Check if runtime PM can work in interrupt context.
+ * @dev: Target device.
+ *
+ * Return %true if @dev has been marked as an "IRQ-safe" device (with respect
+ * to runtime PM), in which case its runtime PM callabcks can be expected to
+ * work correctly when invoked from interrupt handlers.
+ */
static inline bool pm_runtime_is_irq_safe(struct device *dev)
{
return dev->power.irq_safe;
@@ -191,97 +284,250 @@ static inline void pm_runtime_drop_link(struct device *dev) {}
#endif /* !CONFIG_PM */
+/**
+ * pm_runtime_idle - Conditionally set up autosuspend of a device or suspend it.
+ * @dev: Target device.
+ *
+ * Invoke the "idle check" callback of @dev and, depending on its return value,
+ * set up autosuspend of @dev or suspend it (depending on whether or not
+ * autosuspend has been enabled for it).
+ */
static inline int pm_runtime_idle(struct device *dev)
{
return __pm_runtime_idle(dev, 0);
}
+/**
+ * pm_runtime_suspend - Suspend a device synchronously.
+ * @dev: Target device.
+ */
static inline int pm_runtime_suspend(struct device *dev)
{
return __pm_runtime_suspend(dev, 0);
}
+/**
+ * pm_runtime_autosuspend - Set up autosuspend of a device or suspend it.
+ * @dev: Target device.
+ *
+ * Set up autosuspend of @dev or suspend it (depending on whether or not
+ * autosuspend is enabled for it) without engaging its "idle check" callback.
+ */
static inline int pm_runtime_autosuspend(struct device *dev)
{
return __pm_runtime_suspend(dev, RPM_AUTO);
}
+/**
+ * pm_runtime_resume - Resume a device synchronously.
+ * @dev: Target device.
+ */
static inline int pm_runtime_resume(struct device *dev)
{
return __pm_runtime_resume(dev, 0);
}
+/**
+ * pm_request_idle - Queue up "idle check" execution for a device.
+ * @dev: Target device.
+ *
+ * Queue up a work item to run an equivalent of pm_runtime_idle() for @dev
+ * asynchronously.
+ */
static inline int pm_request_idle(struct device *dev)
{
return __pm_runtime_idle(dev, RPM_ASYNC);
}
+/**
+ * pm_request_resume - Queue up runtime-resume of a device.
+ * @dev: Target device.
+ */
static inline int pm_request_resume(struct device *dev)
{
return __pm_runtime_resume(dev, RPM_ASYNC);
}
+/**
+ * pm_request_autosuspend - Queue up autosuspend of a device.
+ * @dev: Target device.
+ *
+ * Queue up a work item to run an equivalent pm_runtime_autosuspend() for @dev
+ * asynchronously.
+ */
static inline int pm_request_autosuspend(struct device *dev)
{
return __pm_runtime_suspend(dev, RPM_ASYNC | RPM_AUTO);
}
+/**
+ * pm_runtime_get - Bump up usage counter and queue up resume of a device.
+ * @dev: Target device.
+ *
+ * Bump up the runtime PM usage counter of @dev and queue up a work item to
+ * carry out runtime-resume of it.
+ */
static inline int pm_runtime_get(struct device *dev)
{
return __pm_runtime_resume(dev, RPM_GET_PUT | RPM_ASYNC);
}
+/**
+ * pm_runtime_get_sync - Bump up usage counter of a device and resume it.
+ * @dev: Target device.
+ *
+ * Bump up the runtime PM usage counter of @dev and carry out runtime-resume of
+ * it synchronously.
+ *
+ * The possible return values of this function are the same as for
+ * pm_runtime_resume() and the runtime PM usage counter of @dev remains
+ * incremented in all cases, even if it returns an error code.
+ */
static inline int pm_runtime_get_sync(struct device *dev)
{
return __pm_runtime_resume(dev, RPM_GET_PUT);
}
+/**
+ * pm_runtime_put - Drop device usage counter and queue up "idle check" if 0.
+ * @dev: Target device.
+ *
+ * Decrement the runtime PM usage counter of @dev and if it turns out to be
+ * equal to 0, queue up a work item for @dev like in pm_request_idle().
+ */
static inline int pm_runtime_put(struct device *dev)
{
return __pm_runtime_idle(dev, RPM_GET_PUT | RPM_ASYNC);
}
+/**
+ * pm_runtime_put_autosuspend - Drop device usage counter and queue autosuspend if 0.
+ * @dev: Target device.
+ *
+ * Decrement the runtime PM usage counter of @dev and if it turns out to be
+ * equal to 0, queue up a work item for @dev like in pm_request_autosuspend().
+ */
static inline int pm_runtime_put_autosuspend(struct device *dev)
{
return __pm_runtime_suspend(dev,
RPM_GET_PUT | RPM_ASYNC | RPM_AUTO);
}
+/**
+ * pm_runtime_put_sync - Drop device usage counter and run "idle check" if 0.
+ * @dev: Target device.
+ *
+ * Decrement the runtime PM usage counter of @dev and if it turns out to be
+ * equal to 0, invoke the "idle check" callback of @dev and, depending on its
+ * return value, set up autosuspend of @dev or suspend it (depending on whether
+ * or not autosuspend has been enabled for it).
+ *
+ * The possible return values of this function are the same as for
+ * pm_runtime_idle() and the runtime PM usage counter of @dev remains
+ * decremented in all cases, even if it returns an error code.
+ */
static inline int pm_runtime_put_sync(struct device *dev)
{
return __pm_runtime_idle(dev, RPM_GET_PUT);
}
+/**
+ * pm_runtime_put_sync_suspend - Drop device usage counter and suspend if 0.
+ * @dev: Target device.
+ *
+ * Decrement the runtime PM usage counter of @dev and if it turns out to be
+ * equal to 0, carry out runtime-suspend of @dev synchronously.
+ *
+ * The possible return values of this function are the same as for
+ * pm_runtime_suspend() and the runtime PM usage counter of @dev remains
+ * decremented in all cases, even if it returns an error code.
+ */
static inline int pm_runtime_put_sync_suspend(struct device *dev)
{
return __pm_runtime_suspend(dev, RPM_GET_PUT);
}
+/**
+ * pm_runtime_put_sync_autosuspend - Drop device usage counter and autosuspend if 0.
+ * @dev: Target device.
+ *
+ * Decrement the runtime PM usage counter of @dev and if it turns out to be
+ * equal to 0, set up autosuspend of @dev or suspend it synchronously (depending
+ * on whether or not autosuspend has been enabled for it).
+ *
+ * The possible return values of this function are the same as for
+ * pm_runtime_autosuspend() and the runtime PM usage counter of @dev remains
+ * decremented in all cases, even if it returns an error code.
+ */
static inline int pm_runtime_put_sync_autosuspend(struct device *dev)
{
return __pm_runtime_suspend(dev, RPM_GET_PUT | RPM_AUTO);
}
+/**
+ * pm_runtime_set_active - Set runtime PM status to "active".
+ * @dev: Target device.
+ *
+ * Set the runtime PM status of @dev to %RPM_ACTIVE and ensure that dependencies
+ * of it will be taken into account.
+ *
+ * It is not valid to call this function for devices with runtime PM enabled.
+ */
static inline int pm_runtime_set_active(struct device *dev)
{
return __pm_runtime_set_status(dev, RPM_ACTIVE);
}
+/**
+ * pm_runtime_set_suspended - Set runtime PM status to "active".
+ * @dev: Target device.
+ *
+ * Set the runtime PM status of @dev to %RPM_SUSPENDED and ensure that
+ * dependencies of it will be taken into account.
+ *
+ * It is not valid to call this function for devices with runtime PM enabled.
+ */
static inline int pm_runtime_set_suspended(struct device *dev)
{
return __pm_runtime_set_status(dev, RPM_SUSPENDED);
}
+/**
+ * pm_runtime_disable - Disable runtime PM for a device.
+ * @dev: Target device.
+ *
+ * Prevent the runtime PM framework from working with @dev (by incrementing its
+ * "blocking" counter).
+ *
+ * For each invocation of this function for @dev there must be a matching
+ * pm_runtime_enable() call in order for runtime PM to be enabled for it.
+ */
static inline void pm_runtime_disable(struct device *dev)
{
__pm_runtime_disable(dev, true);
}
+/**
+ * pm_runtime_use_autosuspend - Allow autosuspend to be used for a device.
+ * @dev: Target device.
+ *
+ * Allow the runtime PM autosuspend mechanism to be used for @dev whenever
+ * requested (or "autosuspend" will be handled as direct runtime-suspend for
+ * it).
+ */
static inline void pm_runtime_use_autosuspend(struct device *dev)
{
__pm_runtime_use_autosuspend(dev, true);
}
+/**
+ * pm_runtime_dont_use_autosuspend - Prevent autosuspend from being used.
+ * @dev: Target device.
+ *
+ * Prevent the runtime PM autosuspend mechanism from being used for @dev which
+ * means that "autosuspend" will be handled as direct runtime-suspend for it
+ * going forward.
+ */
static inline void pm_runtime_dont_use_autosuspend(struct device *dev)
{
__pm_runtime_use_autosuspend(dev, false);
diff --git a/include/linux/poison.h b/include/linux/poison.h
index df34330b4e34..dc8ae5d8db03 100644
--- a/include/linux/poison.h
+++ b/include/linux/poison.h
@@ -24,10 +24,6 @@
#define LIST_POISON2 ((void *) 0x122 + POISON_POINTER_DELTA)
/********** include/linux/timer.h **********/
-/*
- * Magic number "tsta" to indicate a static timer initializer
- * for the object debugging code.
- */
#define TIMER_ENTRY_STATIC ((void *) 0x300 + POISON_POINTER_DELTA)
/********** mm/page_poison.c **********/
diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h
index e3f0f8585da4..896c16d2c5fb 100644
--- a/include/linux/posix-timers.h
+++ b/include/linux/posix-timers.h
@@ -6,6 +6,7 @@
#include <linux/list.h>
#include <linux/alarmtimer.h>
#include <linux/timerqueue.h>
+#include <linux/task_work.h>
struct kernel_siginfo;
struct task_struct;
@@ -125,6 +126,16 @@ struct posix_cputimers {
unsigned int expiry_active;
};
+/**
+ * posix_cputimers_work - Container for task work based posix CPU timer expiry
+ * @work: The task work to be scheduled
+ * @scheduled: @work has been scheduled already, no further processing
+ */
+struct posix_cputimers_work {
+ struct callback_head work;
+ unsigned int scheduled;
+};
+
static inline void posix_cputimers_init(struct posix_cputimers *pct)
{
memset(pct, 0, sizeof(*pct));
@@ -165,6 +176,12 @@ static inline void posix_cputimers_group_init(struct posix_cputimers *pct,
u64 cpu_limit) { }
#endif
+#ifdef CONFIG_POSIX_CPU_TIMERS_TASK_WORK
+void posix_cputimers_init_work(void);
+#else
+static inline void posix_cputimers_init_work(void) { }
+#endif
+
#define REQUEUE_PENDING 1
/**
diff --git a/include/linux/power/bq2415x_charger.h b/include/linux/power/bq2415x_charger.h
index 4ca08321e251..f3c267f2a467 100644
--- a/include/linux/power/bq2415x_charger.h
+++ b/include/linux/power/bq2415x_charger.h
@@ -14,8 +14,8 @@
* value is -1 then default chip value (specified in datasheet) will be
* used.
*
- * Value resistor_sense is needed for for configuring charge and
- * termination current. It it is less or equal to zero, configuring charge
+ * Value resistor_sense is needed for configuring charge and
+ * termination current. If it is less or equal to zero, configuring charge
* and termination current will not be possible.
*
* For automode support is needed to provide name of power supply device
diff --git a/include/linux/power/bq27xxx_battery.h b/include/linux/power/bq27xxx_battery.h
index 507c5e214c42..987d9652aa4e 100644
--- a/include/linux/power/bq27xxx_battery.h
+++ b/include/linux/power/bq27xxx_battery.h
@@ -30,6 +30,8 @@ enum bq27xxx_chip {
BQ27426,
BQ27441,
BQ27621,
+ BQ27Z561,
+ BQ28Z610,
};
struct bq27xxx_device_info;
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index ac1345a48ad0..97cc4b85bf61 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -48,6 +48,7 @@ enum {
POWER_SUPPLY_CHARGE_TYPE_STANDARD, /* normal speed */
POWER_SUPPLY_CHARGE_TYPE_ADAPTIVE, /* dynamically adjusted speed */
POWER_SUPPLY_CHARGE_TYPE_CUSTOM, /* use CHARGE_CONTROL_* props */
+ POWER_SUPPLY_CHARGE_TYPE_LONGLIFE, /* slow speed, longer life */
};
enum {
@@ -62,6 +63,9 @@ enum {
POWER_SUPPLY_HEALTH_SAFETY_TIMER_EXPIRE,
POWER_SUPPLY_HEALTH_OVERCURRENT,
POWER_SUPPLY_HEALTH_CALIBRATION_REQUIRED,
+ POWER_SUPPLY_HEALTH_WARM,
+ POWER_SUPPLY_HEALTH_COOL,
+ POWER_SUPPLY_HEALTH_HOT,
};
enum {
diff --git a/include/linux/prandom.h b/include/linux/prandom.h
new file mode 100644
index 000000000000..aa16e6468f91
--- /dev/null
+++ b/include/linux/prandom.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * include/linux/prandom.h
+ *
+ * Include file for the fast pseudo-random 32-bit
+ * generation.
+ */
+#ifndef _LINUX_PRANDOM_H
+#define _LINUX_PRANDOM_H
+
+#include <linux/types.h>
+#include <linux/percpu.h>
+
+u32 prandom_u32(void);
+void prandom_bytes(void *buf, size_t nbytes);
+void prandom_seed(u32 seed);
+void prandom_reseed_late(void);
+
+struct rnd_state {
+ __u32 s1, s2, s3, s4;
+};
+
+DECLARE_PER_CPU(struct rnd_state, net_rand_state);
+
+u32 prandom_u32_state(struct rnd_state *state);
+void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes);
+void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state);
+
+#define prandom_init_once(pcpu_state) \
+ DO_ONCE(prandom_seed_full_state, (pcpu_state))
+
+/**
+ * prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro)
+ * @ep_ro: right open interval endpoint
+ *
+ * Returns a pseudo-random number that is in interval [0, ep_ro). Note
+ * that the result depends on PRNG being well distributed in [0, ~0U]
+ * u32 space. Here we use maximally equidistributed combined Tausworthe
+ * generator, that is, prandom_u32(). This is useful when requesting a
+ * random index of an array containing ep_ro elements, for example.
+ *
+ * Returns: pseudo-random number in interval [0, ep_ro)
+ */
+static inline u32 prandom_u32_max(u32 ep_ro)
+{
+ return (u32)(((u64) prandom_u32() * ep_ro) >> 32);
+}
+
+/*
+ * Handle minimum values for seeds
+ */
+static inline u32 __seed(u32 x, u32 m)
+{
+ return (x < m) ? x + m : x;
+}
+
+/**
+ * prandom_seed_state - set seed for prandom_u32_state().
+ * @state: pointer to state structure to receive the seed.
+ * @seed: arbitrary 64-bit value to use as a seed.
+ */
+static inline void prandom_seed_state(struct rnd_state *state, u64 seed)
+{
+ u32 i = (seed >> 32) ^ (seed << 10) ^ seed;
+
+ state->s1 = __seed(i, 2U);
+ state->s2 = __seed(i, 8U);
+ state->s3 = __seed(i, 16U);
+ state->s4 = __seed(i, 128U);
+}
+
+/* Pseudo random number generator from numerical recipes. */
+static inline u32 next_pseudo_random32(u32 seed)
+{
+ return seed * 1664525 + 1013904223;
+}
+
+#endif
diff --git a/include/linux/printk.h b/include/linux/printk.h
index fc8f03c54543..34c1a7be3e01 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -7,6 +7,7 @@
#include <linux/kern_levels.h>
#include <linux/linkage.h>
#include <linux/cache.h>
+#include <linux/ratelimit_types.h>
extern const char linux_banner[];
extern const char linux_proc_banner[];
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
index d1eed1b43651..2df965cd0974 100644
--- a/include/linux/proc_fs.h
+++ b/include/linux/proc_fs.h
@@ -133,7 +133,8 @@ struct proc_dir_entry *proc_create_net_single_write(const char *name, umode_t mo
void *data);
extern struct pid *tgid_pidfd_to_pid(const struct file *file);
-extern int bpf_iter_init_seq_net(void *priv_data);
+struct bpf_iter_aux_info;
+extern int bpf_iter_init_seq_net(void *priv_data, struct bpf_iter_aux_info *aux);
extern void bpf_iter_fini_seq_net(void *priv_data);
#ifdef CONFIG_PROC_PID_ARCH_STATUS
diff --git a/include/linux/property.h b/include/linux/property.h
index 10d03572f52e..9f805c442819 100644
--- a/include/linux/property.h
+++ b/include/linux/property.h
@@ -389,6 +389,11 @@ struct fwnode_handle *
fwnode_graph_get_remote_node(const struct fwnode_handle *fwnode, u32 port,
u32 endpoint);
+static inline bool fwnode_graph_is_endpoint(struct fwnode_handle *fwnode)
+{
+ return fwnode_property_present(fwnode, "remote-endpoint");
+}
+
/*
* Fwnode lookup flags
*
diff --git a/include/linux/psi_types.h b/include/linux/psi_types.h
index 4b7258495a04..b95f3211566a 100644
--- a/include/linux/psi_types.h
+++ b/include/linux/psi_types.h
@@ -153,9 +153,10 @@ struct psi_group {
unsigned long avg[NR_PSI_STATES - 1][3];
/* Monitor work control */
- atomic_t poll_scheduled;
- struct kthread_worker __rcu *poll_kworker;
- struct kthread_delayed_work poll_work;
+ struct task_struct __rcu *poll_task;
+ struct timer_list poll_timer;
+ wait_queue_head_t poll_wait;
+ atomic_t poll_wakeup;
/* Protects data used by the monitor */
struct mutex trigger_lock;
diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h
index 417db0a79a62..808f9d3ee546 100644
--- a/include/linux/ptr_ring.h
+++ b/include/linux/ptr_ring.h
@@ -107,7 +107,7 @@ static inline int __ptr_ring_produce(struct ptr_ring *r, void *ptr)
return -ENOSPC;
/* Make sure the pointer we are storing points to a valid data. */
- /* Pairs with smp_read_barrier_depends in __ptr_ring_consume. */
+ /* Pairs with the dependency ordering in __ptr_ring_consume. */
smp_wmb();
WRITE_ONCE(r->queue[r->producer++], ptr);
diff --git a/include/linux/pwm.h b/include/linux/pwm.h
index 2635b2a55090..a13ff383fa1d 100644
--- a/include/linux/pwm.h
+++ b/include/linux/pwm.h
@@ -39,7 +39,7 @@ enum pwm_polarity {
* current PWM hardware state.
*/
struct pwm_args {
- unsigned int period;
+ u64 period;
enum pwm_polarity polarity;
};
@@ -56,8 +56,8 @@ enum {
* @enabled: PWM enabled status
*/
struct pwm_state {
- unsigned int period;
- unsigned int duty_cycle;
+ u64 period;
+ u64 duty_cycle;
enum pwm_polarity polarity;
bool enabled;
};
@@ -107,13 +107,13 @@ static inline bool pwm_is_enabled(const struct pwm_device *pwm)
return state.enabled;
}
-static inline void pwm_set_period(struct pwm_device *pwm, unsigned int period)
+static inline void pwm_set_period(struct pwm_device *pwm, u64 period)
{
if (pwm)
pwm->state.period = period;
}
-static inline unsigned int pwm_get_period(const struct pwm_device *pwm)
+static inline u64 pwm_get_period(const struct pwm_device *pwm)
{
struct pwm_state state;
@@ -128,7 +128,7 @@ static inline void pwm_set_duty_cycle(struct pwm_device *pwm, unsigned int duty)
pwm->state.duty_cycle = duty;
}
-static inline unsigned int pwm_get_duty_cycle(const struct pwm_device *pwm)
+static inline u64 pwm_get_duty_cycle(const struct pwm_device *pwm)
{
struct pwm_state state;
diff --git a/include/linux/qcom-geni-se.h b/include/linux/qcom-geni-se.h
index dd464943f717..8f385fbe5a0e 100644
--- a/include/linux/qcom-geni-se.h
+++ b/include/linux/qcom-geni-se.h
@@ -6,6 +6,8 @@
#ifndef _LINUX_QCOM_GENI_SE
#define _LINUX_QCOM_GENI_SE
+#include <linux/interconnect.h>
+
/* Transfer mode supported by GENI Serial Engines */
enum geni_se_xfer_mode {
GENI_SE_INVALID,
@@ -25,6 +27,17 @@ enum geni_se_protocol_type {
struct geni_wrapper;
struct clk;
+enum geni_icc_path_index {
+ GENI_TO_CORE,
+ CPU_TO_GENI,
+ GENI_TO_DDR
+};
+
+struct geni_icc_path {
+ struct icc_path *path;
+ unsigned int avg_bw;
+};
+
/**
* struct geni_se - GENI Serial Engine
* @base: Base Address of the Serial Engine's register block
@@ -33,6 +46,9 @@ struct clk;
* @clk: Handle to the core serial engine clock
* @num_clk_levels: Number of valid clock levels in clk_perf_tbl
* @clk_perf_tbl: Table of clock frequency input to serial engine clock
+ * @icc_paths: Array of ICC paths for SE
+ * @opp_table: Pointer to the OPP table
+ * @has_opp_table: Specifies if the SE has an OPP table
*/
struct geni_se {
void __iomem *base;
@@ -41,6 +57,9 @@ struct geni_se {
struct clk *clk;
unsigned int num_clk_levels;
unsigned long *clk_perf_tbl;
+ struct geni_icc_path icc_paths[3];
+ struct opp_table *opp_table;
+ bool has_opp_table;
};
/* Common SE registers */
@@ -229,6 +248,21 @@ struct geni_se {
#define GENI_SE_VERSION_MINOR(ver) ((ver & HW_VER_MINOR_MASK) >> HW_VER_MINOR_SHFT)
#define GENI_SE_VERSION_STEP(ver) (ver & HW_VER_STEP_MASK)
+/*
+ * Define bandwidth thresholds that cause the underlying Core 2X interconnect
+ * clock to run at the named frequency. These baseline values are recommended
+ * by the hardware team, and are not dynamically scaled with GENI bandwidth
+ * beyond basic on/off.
+ */
+#define CORE_2X_19_2_MHZ 960
+#define CORE_2X_50_MHZ 2500
+#define CORE_2X_100_MHZ 5000
+#define CORE_2X_150_MHZ 7500
+#define CORE_2X_200_MHZ 10000
+#define CORE_2X_236_MHZ 16383
+
+#define GENI_DEFAULT_BW Bps_to_icc(1000)
+
#if IS_ENABLED(CONFIG_QCOM_GENI_SE)
u32 geni_se_get_qup_hw_version(struct geni_se *se);
@@ -416,5 +450,16 @@ int geni_se_rx_dma_prep(struct geni_se *se, void *buf, size_t len,
void geni_se_tx_dma_unprep(struct geni_se *se, dma_addr_t iova, size_t len);
void geni_se_rx_dma_unprep(struct geni_se *se, dma_addr_t iova, size_t len);
+
+int geni_icc_get(struct geni_se *se, const char *icc_ddr);
+
+int geni_icc_set_bw(struct geni_se *se);
+void geni_icc_set_tag(struct geni_se *se, u32 tag);
+
+int geni_icc_enable(struct geni_se *se);
+
+int geni_icc_disable(struct geni_se *se);
+
+void geni_remove_earlycon_icc_vote(void);
#endif
#endif
diff --git a/include/linux/qcom_scm.h b/include/linux/qcom_scm.h
index 3d6a24697761..2e1193a3fb5f 100644
--- a/include/linux/qcom_scm.h
+++ b/include/linux/qcom_scm.h
@@ -44,6 +44,13 @@ enum qcom_scm_sec_dev_id {
QCOM_SCM_ICE_DEV_ID = 20,
};
+enum qcom_scm_ice_cipher {
+ QCOM_SCM_ICE_CIPHER_AES_128_XTS = 0,
+ QCOM_SCM_ICE_CIPHER_AES_128_CBC = 1,
+ QCOM_SCM_ICE_CIPHER_AES_256_XTS = 3,
+ QCOM_SCM_ICE_CIPHER_AES_256_CBC = 4,
+};
+
#define QCOM_SCM_VMID_HLOS 0x3
#define QCOM_SCM_VMID_MSS_MSA 0xF
#define QCOM_SCM_VMID_WLAN 0x18
@@ -88,6 +95,12 @@ extern int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset,
extern int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset,
u32 size);
+extern bool qcom_scm_ice_available(void);
+extern int qcom_scm_ice_invalidate_key(u32 index);
+extern int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size,
+ enum qcom_scm_ice_cipher cipher,
+ u32 data_unit_size);
+
extern bool qcom_scm_hdcp_available(void);
extern int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt,
u32 *resp);
@@ -138,6 +151,12 @@ static inline int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset,
static inline int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id,
u32 offset, u32 size) { return -ENODEV; }
+static inline bool qcom_scm_ice_available(void) { return false; }
+static inline int qcom_scm_ice_invalidate_key(u32 index) { return -ENODEV; }
+static inline int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size,
+ enum qcom_scm_ice_cipher cipher,
+ u32 data_unit_size) { return -ENODEV; }
+
static inline bool qcom_scm_hdcp_available(void) { return false; }
static inline int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt,
u32 *resp) { return -ENODEV; }
diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h
index 2c4737e6694a..977807e1be53 100644
--- a/include/linux/qed/common_hsi.h
+++ b/include/linux/qed/common_hsi.h
@@ -1,33 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/* QLogic qed NIC Driver
* Copyright (c) 2015-2016 QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and /or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
*/
#ifndef _COMMON_HSI_H
diff --git a/include/linux/qed/eth_common.h b/include/linux/qed/eth_common.h
index 95f5fd615852..cd1207ad4ada 100644
--- a/include/linux/qed/eth_common.h
+++ b/include/linux/qed/eth_common.h
@@ -1,33 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/* QLogic qed NIC Driver
* Copyright (c) 2015-2017 QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and /or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
*/
#ifndef __ETH_COMMON__
diff --git a/include/linux/qed/fcoe_common.h b/include/linux/qed/fcoe_common.h
index 98cfc195abe2..68eda1c21cde 100644
--- a/include/linux/qed/fcoe_common.h
+++ b/include/linux/qed/fcoe_common.h
@@ -1,6 +1,7 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/* QLogic qed NIC Driver
* Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2019-2020 Marvell International Ltd.
*/
#ifndef __FCOE_COMMON__
diff --git a/include/linux/qed/iscsi_common.h b/include/linux/qed/iscsi_common.h
index 2f0a771a9176..157019f716f1 100644
--- a/include/linux/qed/iscsi_common.h
+++ b/include/linux/qed/iscsi_common.h
@@ -1,33 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/* QLogic qed NIC Driver
* Copyright (c) 2015-2017 QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and /or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
*/
#ifndef __ISCSI_COMMON__
diff --git a/include/linux/qed/iwarp_common.h b/include/linux/qed/iwarp_common.h
index c6cfd39cd910..14f9e4c0ddd9 100644
--- a/include/linux/qed/iwarp_common.h
+++ b/include/linux/qed/iwarp_common.h
@@ -1,33 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/* QLogic qed NIC Driver
* Copyright (c) 2015-2017 QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and /or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
*/
#ifndef __IWARP_COMMON__
diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h
index 6d15040c642c..4d58dc8943f0 100644
--- a/include/linux/qed/qed_chain.h
+++ b/include/linux/qed/qed_chain.h
@@ -1,33 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/* QLogic qed NIC Driver
* Copyright (c) 2015-2017 QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and /or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
*/
#ifndef _QED_CHAIN_H
@@ -37,6 +11,7 @@
#include <asm/byteorder.h>
#include <linux/kernel.h>
#include <linux/list.h>
+#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/qed/common_hsi.h>
@@ -52,9 +27,9 @@ enum qed_chain_mode {
};
enum qed_chain_use_mode {
- QED_CHAIN_USE_TO_PRODUCE, /* Chain starts empty */
- QED_CHAIN_USE_TO_CONSUME, /* Chain starts full */
- QED_CHAIN_USE_TO_CONSUME_PRODUCE, /* Chain starts empty */
+ QED_CHAIN_USE_TO_PRODUCE, /* Chain starts empty */
+ QED_CHAIN_USE_TO_CONSUME, /* Chain starts full */
+ QED_CHAIN_USE_TO_CONSUME_PRODUCE, /* Chain starts empty */
};
enum qed_chain_cnt_type {
@@ -66,197 +41,230 @@ enum qed_chain_cnt_type {
};
struct qed_chain_next {
- struct regpair next_phys;
- void *next_virt;
+ struct regpair next_phys;
+ void *next_virt;
};
struct qed_chain_pbl_u16 {
- u16 prod_page_idx;
- u16 cons_page_idx;
+ u16 prod_page_idx;
+ u16 cons_page_idx;
};
struct qed_chain_pbl_u32 {
- u32 prod_page_idx;
- u32 cons_page_idx;
-};
-
-struct qed_chain_ext_pbl {
- dma_addr_t p_pbl_phys;
- void *p_pbl_virt;
+ u32 prod_page_idx;
+ u32 cons_page_idx;
};
struct qed_chain_u16 {
/* Cyclic index of next element to produce/consme */
- u16 prod_idx;
- u16 cons_idx;
+ u16 prod_idx;
+ u16 cons_idx;
};
struct qed_chain_u32 {
/* Cyclic index of next element to produce/consme */
- u32 prod_idx;
- u32 cons_idx;
+ u32 prod_idx;
+ u32 cons_idx;
};
struct addr_tbl_entry {
- void *virt_addr;
- dma_addr_t dma_map;
+ void *virt_addr;
+ dma_addr_t dma_map;
};
struct qed_chain {
- /* fastpath portion of the chain - required for commands such
+ /* Fastpath portion of the chain - required for commands such
* as produce / consume.
*/
+
/* Point to next element to produce/consume */
- void *p_prod_elem;
- void *p_cons_elem;
+ void *p_prod_elem;
+ void *p_cons_elem;
/* Fastpath portions of the PBL [if exists] */
+
struct {
/* Table for keeping the virtual and physical addresses of the
* chain pages, respectively to the physical addresses
* in the pbl table.
*/
- struct addr_tbl_entry *pp_addr_tbl;
+ struct addr_tbl_entry *pp_addr_tbl;
union {
- struct qed_chain_pbl_u16 u16;
- struct qed_chain_pbl_u32 u32;
- } c;
- } pbl;
+ struct qed_chain_pbl_u16 u16;
+ struct qed_chain_pbl_u32 u32;
+ } c;
+ } pbl;
union {
- struct qed_chain_u16 chain16;
- struct qed_chain_u32 chain32;
- } u;
+ struct qed_chain_u16 chain16;
+ struct qed_chain_u32 chain32;
+ } u;
/* Capacity counts only usable elements */
- u32 capacity;
- u32 page_cnt;
+ u32 capacity;
+ u32 page_cnt;
- enum qed_chain_mode mode;
+ enum qed_chain_mode mode;
/* Elements information for fast calculations */
- u16 elem_per_page;
- u16 elem_per_page_mask;
- u16 elem_size;
- u16 next_page_mask;
- u16 usable_per_page;
- u8 elem_unusable;
+ u16 elem_per_page;
+ u16 elem_per_page_mask;
+ u16 elem_size;
+ u16 next_page_mask;
+ u16 usable_per_page;
+ u8 elem_unusable;
- u8 cnt_type;
+ enum qed_chain_cnt_type cnt_type;
/* Slowpath of the chain - required for initialization and destruction,
* but isn't involved in regular functionality.
*/
+ u32 page_size;
+
/* Base address of a pre-allocated buffer for pbl */
struct {
- dma_addr_t p_phys_table;
- void *p_virt_table;
- } pbl_sp;
+ __le64 *table_virt;
+ dma_addr_t table_phys;
+ size_t table_size;
+ } pbl_sp;
/* Address of first page of the chain - the address is required
- * for fastpath operation [consume/produce] but only for the the SINGLE
+ * for fastpath operation [consume/produce] but only for the SINGLE
* flavour which isn't considered fastpath [== SPQ].
*/
- void *p_virt_addr;
- dma_addr_t p_phys_addr;
+ void *p_virt_addr;
+ dma_addr_t p_phys_addr;
/* Total number of elements [for entire chain] */
- u32 size;
+ u32 size;
+
+ enum qed_chain_use_mode intended_use;
+
+ bool b_external_pbl;
+};
+
+struct qed_chain_init_params {
+ enum qed_chain_mode mode;
+ enum qed_chain_use_mode intended_use;
+ enum qed_chain_cnt_type cnt_type;
- u8 intended_use;
+ u32 page_size;
+ u32 num_elems;
+ size_t elem_size;
- bool b_external_pbl;
+ void *ext_pbl_virt;
+ dma_addr_t ext_pbl_phys;
};
-#define QED_CHAIN_PBL_ENTRY_SIZE (8)
-#define QED_CHAIN_PAGE_SIZE (0x1000)
-#define ELEMS_PER_PAGE(elem_size) (QED_CHAIN_PAGE_SIZE / (elem_size))
+#define QED_CHAIN_PAGE_SIZE SZ_4K
-#define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode) \
- (((mode) == QED_CHAIN_MODE_NEXT_PTR) ? \
- (u8)(1 + ((sizeof(struct qed_chain_next) - 1) / \
- (elem_size))) : 0)
+#define ELEMS_PER_PAGE(elem_size, page_size) \
+ ((page_size) / (elem_size))
-#define USABLE_ELEMS_PER_PAGE(elem_size, mode) \
- ((u32)(ELEMS_PER_PAGE(elem_size) - \
- UNUSABLE_ELEMS_PER_PAGE(elem_size, mode)))
+#define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode) \
+ (((mode) == QED_CHAIN_MODE_NEXT_PTR) ? \
+ (u8)(1 + ((sizeof(struct qed_chain_next) - 1) / (elem_size))) : \
+ 0)
-#define QED_CHAIN_PAGE_CNT(elem_cnt, elem_size, mode) \
- DIV_ROUND_UP(elem_cnt, USABLE_ELEMS_PER_PAGE(elem_size, mode))
+#define USABLE_ELEMS_PER_PAGE(elem_size, page_size, mode) \
+ ((u32)(ELEMS_PER_PAGE((elem_size), (page_size)) - \
+ UNUSABLE_ELEMS_PER_PAGE((elem_size), (mode))))
-#define is_chain_u16(p) ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U16)
-#define is_chain_u32(p) ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U32)
+#define QED_CHAIN_PAGE_CNT(elem_cnt, elem_size, page_size, mode) \
+ DIV_ROUND_UP((elem_cnt), \
+ USABLE_ELEMS_PER_PAGE((elem_size), (page_size), (mode)))
+
+#define is_chain_u16(p) \
+ ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U16)
+#define is_chain_u32(p) \
+ ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U32)
/* Accessors */
-static inline u16 qed_chain_get_prod_idx(struct qed_chain *p_chain)
+
+static inline u16 qed_chain_get_prod_idx(const struct qed_chain *chain)
{
- return p_chain->u.chain16.prod_idx;
+ return chain->u.chain16.prod_idx;
}
-static inline u16 qed_chain_get_cons_idx(struct qed_chain *p_chain)
+static inline u16 qed_chain_get_cons_idx(const struct qed_chain *chain)
{
- return p_chain->u.chain16.cons_idx;
+ return chain->u.chain16.cons_idx;
}
-static inline u32 qed_chain_get_cons_idx_u32(struct qed_chain *p_chain)
+static inline u32 qed_chain_get_prod_idx_u32(const struct qed_chain *chain)
{
- return p_chain->u.chain32.cons_idx;
+ return chain->u.chain32.prod_idx;
}
-static inline u16 qed_chain_get_elem_left(struct qed_chain *p_chain)
+static inline u32 qed_chain_get_cons_idx_u32(const struct qed_chain *chain)
{
- u16 elem_per_page = p_chain->elem_per_page;
- u32 prod = p_chain->u.chain16.prod_idx;
- u32 cons = p_chain->u.chain16.cons_idx;
+ return chain->u.chain32.cons_idx;
+}
+
+static inline u16 qed_chain_get_elem_used(const struct qed_chain *chain)
+{
+ u32 prod = qed_chain_get_prod_idx(chain);
+ u32 cons = qed_chain_get_cons_idx(chain);
+ u16 elem_per_page = chain->elem_per_page;
u16 used;
if (prod < cons)
prod += (u32)U16_MAX + 1;
used = (u16)(prod - cons);
- if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR)
- used -= prod / elem_per_page - cons / elem_per_page;
+ if (chain->mode == QED_CHAIN_MODE_NEXT_PTR)
+ used -= (u16)(prod / elem_per_page - cons / elem_per_page);
+
+ return used;
+}
- return (u16)(p_chain->capacity - used);
+static inline u16 qed_chain_get_elem_left(const struct qed_chain *chain)
+{
+ return (u16)(chain->capacity - qed_chain_get_elem_used(chain));
}
-static inline u32 qed_chain_get_elem_left_u32(struct qed_chain *p_chain)
+static inline u32 qed_chain_get_elem_used_u32(const struct qed_chain *chain)
{
- u16 elem_per_page = p_chain->elem_per_page;
- u64 prod = p_chain->u.chain32.prod_idx;
- u64 cons = p_chain->u.chain32.cons_idx;
+ u64 prod = qed_chain_get_prod_idx_u32(chain);
+ u64 cons = qed_chain_get_cons_idx_u32(chain);
+ u16 elem_per_page = chain->elem_per_page;
u32 used;
if (prod < cons)
prod += (u64)U32_MAX + 1;
used = (u32)(prod - cons);
- if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR)
+ if (chain->mode == QED_CHAIN_MODE_NEXT_PTR)
used -= (u32)(prod / elem_per_page - cons / elem_per_page);
- return p_chain->capacity - used;
+ return used;
+}
+
+static inline u32 qed_chain_get_elem_left_u32(const struct qed_chain *chain)
+{
+ return chain->capacity - qed_chain_get_elem_used_u32(chain);
}
-static inline u16 qed_chain_get_usable_per_page(struct qed_chain *p_chain)
+static inline u16 qed_chain_get_usable_per_page(const struct qed_chain *chain)
{
- return p_chain->usable_per_page;
+ return chain->usable_per_page;
}
-static inline u8 qed_chain_get_unusable_per_page(struct qed_chain *p_chain)
+static inline u8 qed_chain_get_unusable_per_page(const struct qed_chain *chain)
{
- return p_chain->elem_unusable;
+ return chain->elem_unusable;
}
-static inline u32 qed_chain_get_page_cnt(struct qed_chain *p_chain)
+static inline u32 qed_chain_get_page_cnt(const struct qed_chain *chain)
{
- return p_chain->page_cnt;
+ return chain->page_cnt;
}
-static inline dma_addr_t qed_chain_get_pbl_phys(struct qed_chain *p_chain)
+static inline dma_addr_t qed_chain_get_pbl_phys(const struct qed_chain *chain)
{
- return p_chain->pbl_sp.p_phys_table;
+ return chain->pbl_sp.table_phys;
}
/**
@@ -511,118 +519,6 @@ static inline void qed_chain_reset(struct qed_chain *p_chain)
}
/**
- * @brief qed_chain_init - Initalizes a basic chain struct
- *
- * @param p_chain
- * @param p_virt_addr
- * @param p_phys_addr physical address of allocated buffer's beginning
- * @param page_cnt number of pages in the allocated buffer
- * @param elem_size size of each element in the chain
- * @param intended_use
- * @param mode
- */
-static inline void qed_chain_init_params(struct qed_chain *p_chain,
- u32 page_cnt,
- u8 elem_size,
- enum qed_chain_use_mode intended_use,
- enum qed_chain_mode mode,
- enum qed_chain_cnt_type cnt_type)
-{
- /* chain fixed parameters */
- p_chain->p_virt_addr = NULL;
- p_chain->p_phys_addr = 0;
- p_chain->elem_size = elem_size;
- p_chain->intended_use = (u8)intended_use;
- p_chain->mode = mode;
- p_chain->cnt_type = (u8)cnt_type;
-
- p_chain->elem_per_page = ELEMS_PER_PAGE(elem_size);
- p_chain->usable_per_page = USABLE_ELEMS_PER_PAGE(elem_size, mode);
- p_chain->elem_per_page_mask = p_chain->elem_per_page - 1;
- p_chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(elem_size, mode);
- p_chain->next_page_mask = (p_chain->usable_per_page &
- p_chain->elem_per_page_mask);
-
- p_chain->page_cnt = page_cnt;
- p_chain->capacity = p_chain->usable_per_page * page_cnt;
- p_chain->size = p_chain->elem_per_page * page_cnt;
-
- p_chain->pbl_sp.p_phys_table = 0;
- p_chain->pbl_sp.p_virt_table = NULL;
- p_chain->pbl.pp_addr_tbl = NULL;
-}
-
-/**
- * @brief qed_chain_init_mem -
- *
- * Initalizes a basic chain struct with its chain buffers
- *
- * @param p_chain
- * @param p_virt_addr virtual address of allocated buffer's beginning
- * @param p_phys_addr physical address of allocated buffer's beginning
- *
- */
-static inline void qed_chain_init_mem(struct qed_chain *p_chain,
- void *p_virt_addr, dma_addr_t p_phys_addr)
-{
- p_chain->p_virt_addr = p_virt_addr;
- p_chain->p_phys_addr = p_phys_addr;
-}
-
-/**
- * @brief qed_chain_init_pbl_mem -
- *
- * Initalizes a basic chain struct with its pbl buffers
- *
- * @param p_chain
- * @param p_virt_pbl pointer to a pre allocated side table which will hold
- * virtual page addresses.
- * @param p_phys_pbl pointer to a pre-allocated side table which will hold
- * physical page addresses.
- * @param pp_virt_addr_tbl
- * pointer to a pre-allocated side table which will hold
- * the virtual addresses of the chain pages.
- *
- */
-static inline void qed_chain_init_pbl_mem(struct qed_chain *p_chain,
- void *p_virt_pbl,
- dma_addr_t p_phys_pbl,
- struct addr_tbl_entry *pp_addr_tbl)
-{
- p_chain->pbl_sp.p_phys_table = p_phys_pbl;
- p_chain->pbl_sp.p_virt_table = p_virt_pbl;
- p_chain->pbl.pp_addr_tbl = pp_addr_tbl;
-}
-
-/**
- * @brief qed_chain_init_next_ptr_elem -
- *
- * Initalizes a next pointer element
- *
- * @param p_chain
- * @param p_virt_curr virtual address of a chain page of which the next
- * pointer element is initialized
- * @param p_virt_next virtual address of the next chain page
- * @param p_phys_next physical address of the next chain page
- *
- */
-static inline void
-qed_chain_init_next_ptr_elem(struct qed_chain *p_chain,
- void *p_virt_curr,
- void *p_virt_next, dma_addr_t p_phys_next)
-{
- struct qed_chain_next *p_next;
- u32 size;
-
- size = p_chain->elem_size * p_chain->usable_per_page;
- p_next = (struct qed_chain_next *)((u8 *)p_virt_curr + size);
-
- DMA_REGPAIR_LE(p_next->next_phys, p_phys_next);
-
- p_next->next_virt = p_virt_next;
-}
-
-/**
* @brief qed_chain_get_last_elem -
*
* Returns a pointer to the last element of the chain
@@ -729,7 +625,7 @@ static inline void qed_chain_pbl_zero_mem(struct qed_chain *p_chain)
for (i = 0; i < page_cnt; i++)
memset(p_chain->pbl.pp_addr_tbl[i].virt_addr, 0,
- QED_CHAIN_PAGE_SIZE);
+ p_chain->page_size);
}
#endif
diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h
index a1310482c4ed..812a4d751163 100644
--- a/include/linux/qed/qed_eth_if.h
+++ b/include/linux/qed/qed_eth_if.h
@@ -1,33 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/* QLogic qed NIC Driver
* Copyright (c) 2015-2017 QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and /or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
*/
#ifndef _QED_ETH_IF_H
diff --git a/include/linux/qed/qed_fcoe_if.h b/include/linux/qed/qed_fcoe_if.h
index 46082480a2c3..16752eca5cbd 100644
--- a/include/linux/qed/qed_fcoe_if.h
+++ b/include/linux/qed/qed_fcoe_if.h
@@ -1,4 +1,6 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/* Copyright (c) 2019-2020 Marvell International Ltd. */
+
#ifndef _QED_FCOE_IF_H
#define _QED_FCOE_IF_H
#include <linux/types.h>
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index 8cb76405cbce..cd6a5c7e56eb 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -1,33 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/* QLogic qed NIC Driver
* Copyright (c) 2015-2017 QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and /or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
*/
#ifndef _QED_IF_H
@@ -524,7 +498,7 @@ struct qed_fcoe_pf_params {
u8 bdq_pbl_num_entries[2];
};
-/* Most of the the parameters below are described in the FW iSCSI / TCP HSI */
+/* Most of the parameters below are described in the FW iSCSI / TCP HSI */
struct qed_iscsi_pf_params {
u64 glbl_q_params_addr;
u64 bdq_pbl_base_addr[3];
@@ -620,6 +594,7 @@ enum qed_hw_err_type {
enum qed_dev_type {
QED_DEV_TYPE_BB,
QED_DEV_TYPE_AH,
+ QED_DEV_TYPE_E5,
};
struct qed_dev_info {
@@ -687,87 +662,72 @@ enum qed_protocol {
QED_PROTOCOL_FCOE,
};
-enum qed_link_mode_bits {
- QED_LM_FIBRE_BIT = BIT(0),
- QED_LM_Autoneg_BIT = BIT(1),
- QED_LM_Asym_Pause_BIT = BIT(2),
- QED_LM_Pause_BIT = BIT(3),
- QED_LM_1000baseT_Full_BIT = BIT(4),
- QED_LM_10000baseT_Full_BIT = BIT(5),
- QED_LM_10000baseKR_Full_BIT = BIT(6),
- QED_LM_20000baseKR2_Full_BIT = BIT(7),
- QED_LM_25000baseKR_Full_BIT = BIT(8),
- QED_LM_40000baseLR4_Full_BIT = BIT(9),
- QED_LM_50000baseKR2_Full_BIT = BIT(10),
- QED_LM_100000baseKR4_Full_BIT = BIT(11),
- QED_LM_TP_BIT = BIT(12),
- QED_LM_Backplane_BIT = BIT(13),
- QED_LM_1000baseKX_Full_BIT = BIT(14),
- QED_LM_10000baseKX4_Full_BIT = BIT(15),
- QED_LM_10000baseR_FEC_BIT = BIT(16),
- QED_LM_40000baseKR4_Full_BIT = BIT(17),
- QED_LM_40000baseCR4_Full_BIT = BIT(18),
- QED_LM_40000baseSR4_Full_BIT = BIT(19),
- QED_LM_25000baseCR_Full_BIT = BIT(20),
- QED_LM_25000baseSR_Full_BIT = BIT(21),
- QED_LM_50000baseCR2_Full_BIT = BIT(22),
- QED_LM_100000baseSR4_Full_BIT = BIT(23),
- QED_LM_100000baseCR4_Full_BIT = BIT(24),
- QED_LM_100000baseLR4_ER4_Full_BIT = BIT(25),
- QED_LM_50000baseSR2_Full_BIT = BIT(26),
- QED_LM_1000baseX_Full_BIT = BIT(27),
- QED_LM_10000baseCR_Full_BIT = BIT(28),
- QED_LM_10000baseSR_Full_BIT = BIT(29),
- QED_LM_10000baseLR_Full_BIT = BIT(30),
- QED_LM_10000baseLRM_Full_BIT = BIT(31),
- QED_LM_COUNT = 32
+enum qed_fec_mode {
+ QED_FEC_MODE_NONE = BIT(0),
+ QED_FEC_MODE_FIRECODE = BIT(1),
+ QED_FEC_MODE_RS = BIT(2),
+ QED_FEC_MODE_AUTO = BIT(3),
+ QED_FEC_MODE_UNSUPPORTED = BIT(4),
};
struct qed_link_params {
- bool link_up;
-
-#define QED_LINK_OVERRIDE_SPEED_AUTONEG BIT(0)
-#define QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS BIT(1)
-#define QED_LINK_OVERRIDE_SPEED_FORCED_SPEED BIT(2)
-#define QED_LINK_OVERRIDE_PAUSE_CONFIG BIT(3)
-#define QED_LINK_OVERRIDE_LOOPBACK_MODE BIT(4)
-#define QED_LINK_OVERRIDE_EEE_CONFIG BIT(5)
- u32 override_flags;
- bool autoneg;
- u32 adv_speeds;
- u32 forced_speed;
-#define QED_LINK_PAUSE_AUTONEG_ENABLE BIT(0)
-#define QED_LINK_PAUSE_RX_ENABLE BIT(1)
-#define QED_LINK_PAUSE_TX_ENABLE BIT(2)
- u32 pause_config;
-#define QED_LINK_LOOPBACK_NONE BIT(0)
-#define QED_LINK_LOOPBACK_INT_PHY BIT(1)
-#define QED_LINK_LOOPBACK_EXT_PHY BIT(2)
-#define QED_LINK_LOOPBACK_EXT BIT(3)
-#define QED_LINK_LOOPBACK_MAC BIT(4)
- u32 loopback_mode;
- struct qed_link_eee_params eee;
+ bool link_up;
+
+ u32 override_flags;
+#define QED_LINK_OVERRIDE_SPEED_AUTONEG BIT(0)
+#define QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS BIT(1)
+#define QED_LINK_OVERRIDE_SPEED_FORCED_SPEED BIT(2)
+#define QED_LINK_OVERRIDE_PAUSE_CONFIG BIT(3)
+#define QED_LINK_OVERRIDE_LOOPBACK_MODE BIT(4)
+#define QED_LINK_OVERRIDE_EEE_CONFIG BIT(5)
+#define QED_LINK_OVERRIDE_FEC_CONFIG BIT(6)
+
+ bool autoneg;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(adv_speeds);
+ u32 forced_speed;
+
+ u32 pause_config;
+#define QED_LINK_PAUSE_AUTONEG_ENABLE BIT(0)
+#define QED_LINK_PAUSE_RX_ENABLE BIT(1)
+#define QED_LINK_PAUSE_TX_ENABLE BIT(2)
+
+ u32 loopback_mode;
+#define QED_LINK_LOOPBACK_NONE BIT(0)
+#define QED_LINK_LOOPBACK_INT_PHY BIT(1)
+#define QED_LINK_LOOPBACK_EXT_PHY BIT(2)
+#define QED_LINK_LOOPBACK_EXT BIT(3)
+#define QED_LINK_LOOPBACK_MAC BIT(4)
+#define QED_LINK_LOOPBACK_CNIG_AH_ONLY_0123 BIT(5)
+#define QED_LINK_LOOPBACK_CNIG_AH_ONLY_2301 BIT(6)
+#define QED_LINK_LOOPBACK_PCS_AH_ONLY BIT(7)
+#define QED_LINK_LOOPBACK_REVERSE_MAC_AH_ONLY BIT(8)
+#define QED_LINK_LOOPBACK_INT_PHY_FEA_AH_ONLY BIT(9)
+
+ struct qed_link_eee_params eee;
+ u32 fec;
};
struct qed_link_output {
- bool link_up;
+ bool link_up;
- /* In QED_LM_* defs */
- u32 supported_caps;
- u32 advertised_caps;
- u32 lp_caps;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported_caps);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(advertised_caps);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(lp_caps);
- u32 speed; /* In Mb/s */
- u8 duplex; /* In DUPLEX defs */
- u8 port; /* In PORT defs */
- bool autoneg;
- u32 pause_config;
+ u32 speed; /* In Mb/s */
+ u8 duplex; /* In DUPLEX defs */
+ u8 port; /* In PORT defs */
+ bool autoneg;
+ u32 pause_config;
/* EEE - capability & param */
- bool eee_supported;
- bool eee_active;
- u8 sup_caps;
- struct qed_link_eee_params eee;
+ bool eee_supported;
+ bool eee_active;
+ u8 sup_caps;
+ struct qed_link_eee_params eee;
+
+ u32 sup_fec;
+ u32 active_fec;
};
struct qed_probe_params {
@@ -988,13 +948,8 @@ struct qed_common_ops {
u8 dp_level);
int (*chain_alloc)(struct qed_dev *cdev,
- enum qed_chain_use_mode intended_use,
- enum qed_chain_mode mode,
- enum qed_chain_cnt_type cnt_type,
- u32 num_elems,
- size_t elem_size,
- struct qed_chain *p_chain,
- struct qed_chain_ext_pbl *ext_pbl);
+ struct qed_chain *chain,
+ struct qed_chain_init_params *params);
void (*chain_free)(struct qed_dev *cdev,
struct qed_chain *p_chain);
@@ -1429,16 +1384,15 @@ static inline void qed_sb_ack(struct qed_sb_info *sb_info,
enum igu_int_cmd int_cmd,
u8 upd_flg)
{
- struct igu_prod_cons_update igu_ack = { 0 };
+ u32 igu_ack;
- igu_ack.sb_id_and_flags =
- ((sb_info->sb_ack << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) |
- (upd_flg << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) |
- (int_cmd << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) |
- (IGU_SEG_ACCESS_REG <<
- IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT));
+ igu_ack = ((sb_info->sb_ack << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) |
+ (upd_flg << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) |
+ (int_cmd << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) |
+ (IGU_SEG_ACCESS_REG <<
+ IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT));
- DIRECT_REG_WR(sb_info->igu_addr, igu_ack.sb_id_and_flags);
+ DIRECT_REG_WR(sb_info->igu_addr, igu_ack);
/* Both segments (interrupts & acks) are written to same place address;
* Need to guarantee all commands will be received (in-order) by HW.
diff --git a/include/linux/qed/qed_iov_if.h b/include/linux/qed/qed_iov_if.h
index ac2e6a3199a3..8e31a28e51b9 100644
--- a/include/linux/qed/qed_iov_if.h
+++ b/include/linux/qed/qed_iov_if.h
@@ -1,33 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/* QLogic qed NIC Driver
* Copyright (c) 2015-2017 QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and /or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
*/
#ifndef _QED_IOV_IF_H
diff --git a/include/linux/qed/qed_iscsi_if.h b/include/linux/qed/qed_iscsi_if.h
index d0df1bec5357..04180d9af560 100644
--- a/include/linux/qed/qed_iscsi_if.h
+++ b/include/linux/qed/qed_iscsi_if.h
@@ -1,33 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/* QLogic qed NIC Driver
* Copyright (c) 2015-2017 QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and /or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
*/
#ifndef _QED_ISCSI_IF_H
diff --git a/include/linux/qed/qed_ll2_if.h b/include/linux/qed/qed_ll2_if.h
index 1313c34d9a68..2f64ed79cee9 100644
--- a/include/linux/qed/qed_ll2_if.h
+++ b/include/linux/qed/qed_ll2_if.h
@@ -1,33 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/* QLogic qed NIC Driver
* Copyright (c) 2015-2017 QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and /or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
*/
#ifndef _QED_LL2_IF_H
diff --git a/include/linux/qed/qed_rdma_if.h b/include/linux/qed/qed_rdma_if.h
index 2d3ddd2b85e0..f464d85e88a4 100644
--- a/include/linux/qed/qed_rdma_if.h
+++ b/include/linux/qed/qed_rdma_if.h
@@ -1,34 +1,9 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/* QLogic qed NIC Driver
* Copyright (c) 2015-2017 QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and /or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
*/
+
#ifndef _QED_RDMA_IF_H
#define _QED_RDMA_IF_H
#include <linux/types.h>
diff --git a/include/linux/qed/qede_rdma.h b/include/linux/qed/qede_rdma.h
index 5a00c7a473bf..072da2f6da37 100644
--- a/include/linux/qed/qede_rdma.h
+++ b/include/linux/qed/qede_rdma.h
@@ -1,34 +1,9 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/* QLogic qedr NIC Driver
* Copyright (c) 2015-2017 QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and /or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
*/
+
#ifndef QEDE_ROCE_H
#define QEDE_ROCE_H
diff --git a/include/linux/qed/rdma_common.h b/include/linux/qed/rdma_common.h
index 480a57eb36cc..bab078b25834 100644
--- a/include/linux/qed/rdma_common.h
+++ b/include/linux/qed/rdma_common.h
@@ -1,33 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/* QLogic qed NIC Driver
* Copyright (c) 2015-2017 QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and /or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
*/
#ifndef __RDMA_COMMON__
diff --git a/include/linux/qed/roce_common.h b/include/linux/qed/roce_common.h
index 473fba76aa77..ccddd7a96b67 100644
--- a/include/linux/qed/roce_common.h
+++ b/include/linux/qed/roce_common.h
@@ -1,33 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/* QLogic qed NIC Driver
* Copyright (c) 2015-2017 QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and /or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
*/
#ifndef __ROCE_COMMON__
diff --git a/include/linux/qed/storage_common.h b/include/linux/qed/storage_common.h
index 9a973ffbbff5..91896e8793bf 100644
--- a/include/linux/qed/storage_common.h
+++ b/include/linux/qed/storage_common.h
@@ -1,33 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/* QLogic qed NIC Driver
* Copyright (c) 2015-2017 QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and /or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
*/
#ifndef __STORAGE_COMMON__
diff --git a/include/linux/qed/tcp_common.h b/include/linux/qed/tcp_common.h
index 4a4845193539..2b2c87d10e0a 100644
--- a/include/linux/qed/tcp_common.h
+++ b/include/linux/qed/tcp_common.h
@@ -1,33 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/* QLogic qed NIC Driver
* Copyright (c) 2015-2017 QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and /or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
*/
#ifndef __TCP_COMMON__
diff --git a/include/linux/raid/detect.h b/include/linux/raid/detect.h
index 37dd3f40cd31..1f029a71c3ef 100644
--- a/include/linux/raid/detect.h
+++ b/include/linux/raid/detect.h
@@ -1,3 +1,11 @@
/* SPDX-License-Identifier: GPL-2.0 */
void md_autodetect_dev(dev_t dev);
+
+#ifdef CONFIG_BLK_DEV_MD
+void md_run_setup(void);
+#else
+static inline void md_run_setup(void)
+{
+}
+#endif
diff --git a/include/linux/raid/md_u.h b/include/linux/raid/md_u.h
deleted file mode 100644
index 8dfec085a20e..000000000000
--- a/include/linux/raid/md_u.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- md_u.h : user <=> kernel API between Linux raidtools and RAID drivers
- Copyright (C) 1998 Ingo Molnar
-
-*/
-#ifndef _MD_U_H
-#define _MD_U_H
-
-#include <uapi/linux/raid/md_u.h>
-
-extern int mdp_major;
-#endif
diff --git a/include/linux/random.h b/include/linux/random.h
index 45e1f8fa742b..f45b8be3e3c4 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -110,61 +110,12 @@ declare_get_random_var_wait(long)
unsigned long randomize_page(unsigned long start, unsigned long range);
-u32 prandom_u32(void);
-void prandom_bytes(void *buf, size_t nbytes);
-void prandom_seed(u32 seed);
-void prandom_reseed_late(void);
-
-struct rnd_state {
- __u32 s1, s2, s3, s4;
-};
-
-u32 prandom_u32_state(struct rnd_state *state);
-void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes);
-void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state);
-
-#define prandom_init_once(pcpu_state) \
- DO_ONCE(prandom_seed_full_state, (pcpu_state))
-
-/**
- * prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro)
- * @ep_ro: right open interval endpoint
- *
- * Returns a pseudo-random number that is in interval [0, ep_ro). Note
- * that the result depends on PRNG being well distributed in [0, ~0U]
- * u32 space. Here we use maximally equidistributed combined Tausworthe
- * generator, that is, prandom_u32(). This is useful when requesting a
- * random index of an array containing ep_ro elements, for example.
- *
- * Returns: pseudo-random number in interval [0, ep_ro)
- */
-static inline u32 prandom_u32_max(u32 ep_ro)
-{
- return (u32)(((u64) prandom_u32() * ep_ro) >> 32);
-}
-
/*
- * Handle minimum values for seeds
+ * This is designed to be standalone for just prandom
+ * users, but for now we include it from <linux/random.h>
+ * for legacy reasons.
*/
-static inline u32 __seed(u32 x, u32 m)
-{
- return (x < m) ? x + m : x;
-}
-
-/**
- * prandom_seed_state - set seed for prandom_u32_state().
- * @state: pointer to state structure to receive the seed.
- * @seed: arbitrary 64-bit value to use as a seed.
- */
-static inline void prandom_seed_state(struct rnd_state *state, u64 seed)
-{
- u32 i = (seed >> 32) ^ (seed << 10) ^ seed;
-
- state->s1 = __seed(i, 2U);
- state->s2 = __seed(i, 8U);
- state->s3 = __seed(i, 16U);
- state->s4 = __seed(i, 128U);
-}
+#include <linux/prandom.h>
#ifdef CONFIG_ARCH_RANDOM
# include <asm/archrandom.h>
@@ -207,10 +158,4 @@ static inline bool __init arch_get_random_long_early(unsigned long *v)
}
#endif
-/* Pseudo random number generator from numerical recipes. */
-static inline u32 next_pseudo_random32(u32 seed)
-{
- return seed * 1664525 + 1013904223;
-}
-
#endif /* _LINUX_RANDOM_H */
diff --git a/include/linux/ratelimit.h b/include/linux/ratelimit.h
index 8ddf79e9207a..b17e0cd0a30c 100644
--- a/include/linux/ratelimit.h
+++ b/include/linux/ratelimit.h
@@ -2,41 +2,10 @@
#ifndef _LINUX_RATELIMIT_H
#define _LINUX_RATELIMIT_H
-#include <linux/param.h>
+#include <linux/ratelimit_types.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
-#define DEFAULT_RATELIMIT_INTERVAL (5 * HZ)
-#define DEFAULT_RATELIMIT_BURST 10
-
-/* issue num suppressed message on exit */
-#define RATELIMIT_MSG_ON_RELEASE BIT(0)
-
-struct ratelimit_state {
- raw_spinlock_t lock; /* protect the state */
-
- int interval;
- int burst;
- int printed;
- int missed;
- unsigned long begin;
- unsigned long flags;
-};
-
-#define RATELIMIT_STATE_INIT(name, interval_init, burst_init) { \
- .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
- .interval = interval_init, \
- .burst = burst_init, \
- }
-
-#define RATELIMIT_STATE_INIT_DISABLED \
- RATELIMIT_STATE_INIT(ratelimit_state, 0, DEFAULT_RATELIMIT_BURST)
-
-#define DEFINE_RATELIMIT_STATE(name, interval_init, burst_init) \
- \
- struct ratelimit_state name = \
- RATELIMIT_STATE_INIT(name, interval_init, burst_init) \
-
static inline void ratelimit_state_init(struct ratelimit_state *rs,
int interval, int burst)
{
@@ -73,9 +42,6 @@ ratelimit_set_flags(struct ratelimit_state *rs, unsigned long flags)
extern struct ratelimit_state printk_ratelimit_state;
-extern int ___ratelimit(struct ratelimit_state *rs, const char *func);
-#define __ratelimit(state) ___ratelimit(state, __func__)
-
#ifdef CONFIG_PRINTK
#define WARN_ON_RATELIMIT(condition, state) ({ \
diff --git a/include/linux/ratelimit_types.h b/include/linux/ratelimit_types.h
new file mode 100644
index 000000000000..b676aa419eef
--- /dev/null
+++ b/include/linux/ratelimit_types.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_RATELIMIT_TYPES_H
+#define _LINUX_RATELIMIT_TYPES_H
+
+#include <linux/bits.h>
+#include <linux/param.h>
+#include <linux/spinlock_types.h>
+
+#define DEFAULT_RATELIMIT_INTERVAL (5 * HZ)
+#define DEFAULT_RATELIMIT_BURST 10
+
+/* issue num suppressed message on exit */
+#define RATELIMIT_MSG_ON_RELEASE BIT(0)
+
+struct ratelimit_state {
+ raw_spinlock_t lock; /* protect the state */
+
+ int interval;
+ int burst;
+ int printed;
+ int missed;
+ unsigned long begin;
+ unsigned long flags;
+};
+
+#define RATELIMIT_STATE_INIT(name, interval_init, burst_init) { \
+ .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
+ .interval = interval_init, \
+ .burst = burst_init, \
+ }
+
+#define RATELIMIT_STATE_INIT_DISABLED \
+ RATELIMIT_STATE_INIT(ratelimit_state, 0, DEFAULT_RATELIMIT_BURST)
+
+#define DEFINE_RATELIMIT_STATE(name, interval_init, burst_init) \
+ \
+ struct ratelimit_state name = \
+ RATELIMIT_STATE_INIT(name, interval_init, burst_init) \
+
+extern int ___ratelimit(struct ratelimit_state *rs, const char *func);
+#define __ratelimit(state) ___ratelimit(state, __func__)
+
+#endif /* _LINUX_RATELIMIT_TYPES_H */
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index df587d181844..7a6fc9956510 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -248,6 +248,8 @@ static inline void __list_splice_init_rcu(struct list_head *list,
*/
sync();
+ ASSERT_EXCLUSIVE_ACCESS(*first);
+ ASSERT_EXCLUSIVE_ACCESS(*last);
/*
* Readers are finished with the source list, so perform splice.
@@ -512,7 +514,7 @@ static inline void hlist_replace_rcu(struct hlist_node *old,
* @right: The hlist head on the right
*
* The lists start out as [@left ][node1 ... ] and
- [@right ][node2 ... ]
+ * [@right ][node2 ... ]
* The lists end up as [@left ][node2 ... ]
* [@right ][node1 ... ]
*/
diff --git a/include/linux/rculist_nulls.h b/include/linux/rculist_nulls.h
index 9670b54b484a..ff3e94779e73 100644
--- a/include/linux/rculist_nulls.h
+++ b/include/linux/rculist_nulls.h
@@ -162,7 +162,7 @@ static inline void hlist_nulls_add_fake(struct hlist_nulls_node *n)
* The barrier() is needed to make sure compiler doesn't cache first element [1],
* as this loop can be restarted [2]
* [1] Documentation/core-api/atomic_ops.rst around line 114
- * [2] Documentation/RCU/rculist_nulls.txt around line 146
+ * [2] Documentation/RCU/rculist_nulls.rst around line 146
*/
#define hlist_nulls_for_each_entry_rcu(tpos, pos, head, member) \
for (({barrier();}), \
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 659cbfa7581a..d15d46db61f7 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -828,17 +828,17 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
/*
* Does the specified offset indicate that the corresponding rcu_head
- * structure can be handled by kfree_rcu()?
+ * structure can be handled by kvfree_rcu()?
*/
-#define __is_kfree_rcu_offset(offset) ((offset) < 4096)
+#define __is_kvfree_rcu_offset(offset) ((offset) < 4096)
/*
* Helper macro for kfree_rcu() to prevent argument-expansion eyestrain.
*/
-#define __kfree_rcu(head, offset) \
+#define __kvfree_rcu(head, offset) \
do { \
- BUILD_BUG_ON(!__is_kfree_rcu_offset(offset)); \
- kfree_call_rcu(head, (rcu_callback_t)(unsigned long)(offset)); \
+ BUILD_BUG_ON(!__is_kvfree_rcu_offset(offset)); \
+ kvfree_call_rcu(head, (rcu_callback_t)(unsigned long)(offset)); \
} while (0)
/**
@@ -857,7 +857,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
* Because the functions are not allowed in the low-order 4096 bytes of
* kernel virtual memory, offsets up to 4095 bytes can be accommodated.
* If the offset is larger than 4095 bytes, a compile-time error will
- * be generated in __kfree_rcu(). If this error is triggered, you can
+ * be generated in __kvfree_rcu(). If this error is triggered, you can
* either fall back to use of call_rcu() or rearrange the structure to
* position the rcu_head structure into the first 4096 bytes.
*
@@ -872,7 +872,46 @@ do { \
typeof (ptr) ___p = (ptr); \
\
if (___p) \
- __kfree_rcu(&((___p)->rhf), offsetof(typeof(*(ptr)), rhf)); \
+ __kvfree_rcu(&((___p)->rhf), offsetof(typeof(*(ptr)), rhf)); \
+} while (0)
+
+/**
+ * kvfree_rcu() - kvfree an object after a grace period.
+ *
+ * This macro consists of one or two arguments and it is
+ * based on whether an object is head-less or not. If it
+ * has a head then a semantic stays the same as it used
+ * to be before:
+ *
+ * kvfree_rcu(ptr, rhf);
+ *
+ * where @ptr is a pointer to kvfree(), @rhf is the name
+ * of the rcu_head structure within the type of @ptr.
+ *
+ * When it comes to head-less variant, only one argument
+ * is passed and that is just a pointer which has to be
+ * freed after a grace period. Therefore the semantic is
+ *
+ * kvfree_rcu(ptr);
+ *
+ * where @ptr is a pointer to kvfree().
+ *
+ * Please note, head-less way of freeing is permitted to
+ * use from a context that has to follow might_sleep()
+ * annotation. Otherwise, please switch and embed the
+ * rcu_head structure within the type of @ptr.
+ */
+#define kvfree_rcu(...) KVFREE_GET_MACRO(__VA_ARGS__, \
+ kvfree_rcu_arg_2, kvfree_rcu_arg_1)(__VA_ARGS__)
+
+#define KVFREE_GET_MACRO(_1, _2, NAME, ...) NAME
+#define kvfree_rcu_arg_2(ptr, rhf) kfree_rcu(ptr, rhf)
+#define kvfree_rcu_arg_1(ptr) \
+do { \
+ typeof(ptr) ___p = (ptr); \
+ \
+ if (___p) \
+ kvfree_call_rcu(NULL, (rcu_callback_t) (___p)); \
} while (0)
/*
diff --git a/include/linux/rcupdate_trace.h b/include/linux/rcupdate_trace.h
index 4c25a41f8b27..d9015aac78c6 100644
--- a/include/linux/rcupdate_trace.h
+++ b/include/linux/rcupdate_trace.h
@@ -36,8 +36,8 @@ void rcu_read_unlock_trace_special(struct task_struct *t, int nesting);
/**
* rcu_read_lock_trace - mark beginning of RCU-trace read-side critical section
*
- * When synchronize_rcu_trace() is invoked by one task, then that task
- * is guaranteed to block until all other tasks exit their read-side
+ * When synchronize_rcu_tasks_trace() is invoked by one task, then that
+ * task is guaranteed to block until all other tasks exit their read-side
* critical sections. Similarly, if call_rcu_trace() is invoked on one
* task while other tasks are within RCU read-side critical sections,
* invocation of the corresponding RCU callback is deferred until after
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index 8512caeb7682..5cc9637cac16 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -34,9 +34,25 @@ static inline void synchronize_rcu_expedited(void)
synchronize_rcu();
}
-static inline void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
+/*
+ * Add one more declaration of kvfree() here. It is
+ * not so straight forward to just include <linux/mm.h>
+ * where it is defined due to getting many compile
+ * errors caused by that include.
+ */
+extern void kvfree(const void *addr);
+
+static inline void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
{
- call_rcu(head, func);
+ if (head) {
+ call_rcu(head, func);
+ return;
+ }
+
+ // kvfree_rcu(one_arg) call.
+ might_sleep();
+ synchronize_rcu();
+ kvfree((void *) func);
}
void rcu_qs(void);
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index d5cc9d675987..d2f4064ebd1d 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -33,7 +33,7 @@ static inline void rcu_virt_note_context_switch(int cpu)
}
void synchronize_rcu_expedited(void);
-void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func);
+void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func);
void rcu_barrier(void);
bool rcu_eqs_special_set(int cpu);
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index cb666b9c6b6a..1970ed59d49f 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -18,6 +18,7 @@
#include <linux/bug.h>
#include <linux/lockdep.h>
#include <linux/iopoll.h>
+#include <linux/fwnode.h>
struct module;
struct clk;
@@ -80,36 +81,6 @@ struct reg_sequence {
}
#define REG_SEQ0(_reg, _def) REG_SEQ(_reg, _def, 0)
-#define regmap_update_bits(map, reg, mask, val) \
- regmap_update_bits_base(map, reg, mask, val, NULL, false, false)
-#define regmap_update_bits_async(map, reg, mask, val)\
- regmap_update_bits_base(map, reg, mask, val, NULL, true, false)
-#define regmap_update_bits_check(map, reg, mask, val, change)\
- regmap_update_bits_base(map, reg, mask, val, change, false, false)
-#define regmap_update_bits_check_async(map, reg, mask, val, change)\
- regmap_update_bits_base(map, reg, mask, val, change, true, false)
-
-#define regmap_write_bits(map, reg, mask, val) \
- regmap_update_bits_base(map, reg, mask, val, NULL, false, true)
-
-#define regmap_field_write(field, val) \
- regmap_field_update_bits_base(field, ~0, val, NULL, false, false)
-#define regmap_field_force_write(field, val) \
- regmap_field_update_bits_base(field, ~0, val, NULL, false, true)
-#define regmap_field_update_bits(field, mask, val)\
- regmap_field_update_bits_base(field, mask, val, NULL, false, false)
-#define regmap_field_force_update_bits(field, mask, val) \
- regmap_field_update_bits_base(field, mask, val, NULL, false, true)
-
-#define regmap_fields_write(field, id, val) \
- regmap_fields_update_bits_base(field, id, ~0, val, NULL, false, false)
-#define regmap_fields_force_write(field, id, val) \
- regmap_fields_update_bits_base(field, id, ~0, val, NULL, false, true)
-#define regmap_fields_update_bits(field, id, mask, val)\
- regmap_fields_update_bits_base(field, id, mask, val, NULL, false, false)
-#define regmap_fields_force_update_bits(field, id, mask, val) \
- regmap_fields_update_bits_base(field, id, mask, val, NULL, false, true)
-
/**
* regmap_read_poll_timeout - Poll until a condition is met or a timeout occurs
*
@@ -304,7 +275,7 @@ typedef void (*regmap_unlock)(void *);
* readable if it belongs to one of the ranges specified
* by rd_noinc_table).
* @disable_locking: This regmap is either protected by external means or
- * is guaranteed not be be accessed from multiple threads.
+ * is guaranteed not to be accessed from multiple threads.
* Don't use any locking mechanisms.
* @lock: Optional lock callback (overrides regmap's default lock
* function, based on spinlock or mutex).
@@ -1054,6 +1025,42 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
int regmap_update_bits_base(struct regmap *map, unsigned int reg,
unsigned int mask, unsigned int val,
bool *change, bool async, bool force);
+
+static inline int regmap_update_bits(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val)
+{
+ return regmap_update_bits_base(map, reg, mask, val, NULL, false, false);
+}
+
+static inline int regmap_update_bits_async(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val)
+{
+ return regmap_update_bits_base(map, reg, mask, val, NULL, true, false);
+}
+
+static inline int regmap_update_bits_check(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val,
+ bool *change)
+{
+ return regmap_update_bits_base(map, reg, mask, val,
+ change, false, false);
+}
+
+static inline int
+regmap_update_bits_check_async(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val,
+ bool *change)
+{
+ return regmap_update_bits_base(map, reg, mask, val,
+ change, true, false);
+}
+
+static inline int regmap_write_bits(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val)
+{
+ return regmap_update_bits_base(map, reg, mask, val, NULL, false, true);
+}
+
int regmap_get_val_bytes(struct regmap *map);
int regmap_get_max_register(struct regmap *map);
int regmap_get_reg_stride(struct regmap *map);
@@ -1152,6 +1159,65 @@ int regmap_fields_read(struct regmap_field *field, unsigned int id,
int regmap_fields_update_bits_base(struct regmap_field *field, unsigned int id,
unsigned int mask, unsigned int val,
bool *change, bool async, bool force);
+
+static inline int regmap_field_write(struct regmap_field *field,
+ unsigned int val)
+{
+ return regmap_field_update_bits_base(field, ~0, val,
+ NULL, false, false);
+}
+
+static inline int regmap_field_force_write(struct regmap_field *field,
+ unsigned int val)
+{
+ return regmap_field_update_bits_base(field, ~0, val, NULL, false, true);
+}
+
+static inline int regmap_field_update_bits(struct regmap_field *field,
+ unsigned int mask, unsigned int val)
+{
+ return regmap_field_update_bits_base(field, mask, val,
+ NULL, false, false);
+}
+
+static inline int
+regmap_field_force_update_bits(struct regmap_field *field,
+ unsigned int mask, unsigned int val)
+{
+ return regmap_field_update_bits_base(field, mask, val,
+ NULL, false, true);
+}
+
+static inline int regmap_fields_write(struct regmap_field *field,
+ unsigned int id, unsigned int val)
+{
+ return regmap_fields_update_bits_base(field, id, ~0, val,
+ NULL, false, false);
+}
+
+static inline int regmap_fields_force_write(struct regmap_field *field,
+ unsigned int id, unsigned int val)
+{
+ return regmap_fields_update_bits_base(field, id, ~0, val,
+ NULL, false, true);
+}
+
+static inline int
+regmap_fields_update_bits(struct regmap_field *field, unsigned int id,
+ unsigned int mask, unsigned int val)
+{
+ return regmap_fields_update_bits_base(field, id, mask, val,
+ NULL, false, false);
+}
+
+static inline int
+regmap_fields_force_update_bits(struct regmap_field *field, unsigned int id,
+ unsigned int mask, unsigned int val)
+{
+ return regmap_fields_update_bits_base(field, id, mask, val,
+ NULL, false, true);
+}
+
/**
* struct regmap_irq_type - IRQ type definitions.
*
@@ -1311,21 +1377,23 @@ struct regmap_irq_chip_data;
int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
int irq_base, const struct regmap_irq_chip *chip,
struct regmap_irq_chip_data **data);
-int regmap_add_irq_chip_np(struct device_node *np, struct regmap *map, int irq,
- int irq_flags, int irq_base,
- const struct regmap_irq_chip *chip,
- struct regmap_irq_chip_data **data);
+int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
+ struct regmap *map, int irq,
+ int irq_flags, int irq_base,
+ const struct regmap_irq_chip *chip,
+ struct regmap_irq_chip_data **data);
void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *data);
int devm_regmap_add_irq_chip(struct device *dev, struct regmap *map, int irq,
int irq_flags, int irq_base,
const struct regmap_irq_chip *chip,
struct regmap_irq_chip_data **data);
-int devm_regmap_add_irq_chip_np(struct device *dev, struct device_node *np,
- struct regmap *map, int irq, int irq_flags,
- int irq_base,
- const struct regmap_irq_chip *chip,
- struct regmap_irq_chip_data **data);
+int devm_regmap_add_irq_chip_fwnode(struct device *dev,
+ struct fwnode_handle *fwnode,
+ struct regmap *map, int irq,
+ int irq_flags, int irq_base,
+ const struct regmap_irq_chip *chip,
+ struct regmap_irq_chip_data **data);
void devm_regmap_del_irq_chip(struct device *dev, int irq,
struct regmap_irq_chip_data *data);
@@ -1458,6 +1526,103 @@ static inline int regmap_fields_update_bits_base(struct regmap_field *field,
return -EINVAL;
}
+static inline int regmap_update_bits(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_update_bits_async(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_update_bits_check(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val,
+ bool *change)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int
+regmap_update_bits_check_async(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val,
+ bool *change)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_write_bits(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_field_write(struct regmap_field *field,
+ unsigned int val)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_field_force_write(struct regmap_field *field,
+ unsigned int val)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_field_update_bits(struct regmap_field *field,
+ unsigned int mask, unsigned int val)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int
+regmap_field_force_update_bits(struct regmap_field *field,
+ unsigned int mask, unsigned int val)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_fields_write(struct regmap_field *field,
+ unsigned int id, unsigned int val)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_fields_force_write(struct regmap_field *field,
+ unsigned int id, unsigned int val)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int
+regmap_fields_update_bits(struct regmap_field *field, unsigned int id,
+ unsigned int mask, unsigned int val)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int
+regmap_fields_force_update_bits(struct regmap_field *field, unsigned int id,
+ unsigned int mask, unsigned int val)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
static inline int regmap_get_val_bytes(struct regmap *map)
{
WARN_ONCE(1, "regmap API is disabled");
diff --git a/include/linux/regset.h b/include/linux/regset.h
index 46d6ae68c455..c3403f328257 100644
--- a/include/linux/regset.h
+++ b/include/linux/regset.h
@@ -17,6 +17,52 @@
struct task_struct;
struct user_regset;
+struct membuf {
+ void *p;
+ size_t left;
+};
+
+static inline int membuf_zero(struct membuf *s, size_t size)
+{
+ if (s->left) {
+ if (size > s->left)
+ size = s->left;
+ memset(s->p, 0, size);
+ s->p += size;
+ s->left -= size;
+ }
+ return s->left;
+}
+
+static inline int membuf_write(struct membuf *s, const void *v, size_t size)
+{
+ if (s->left) {
+ if (size > s->left)
+ size = s->left;
+ memcpy(s->p, v, size);
+ s->p += size;
+ s->left -= size;
+ }
+ return s->left;
+}
+
+/* current s->p must be aligned for v; v must be a scalar */
+#define membuf_store(s, v) \
+({ \
+ struct membuf *__s = (s); \
+ if (__s->left) { \
+ typeof(v) __v = (v); \
+ size_t __size = sizeof(__v); \
+ if (unlikely(__size > __s->left)) { \
+ __size = __s->left; \
+ memcpy(__s->p, &__v, __size); \
+ } else { \
+ *(typeof(__v + 0) *)__s->p = __v; \
+ } \
+ __s->p += __size; \
+ __s->left -= __size; \
+ } \
+ __s->left;})
/**
* user_regset_active_fn - type of @active function in &struct user_regset
@@ -36,26 +82,9 @@ struct user_regset;
typedef int user_regset_active_fn(struct task_struct *target,
const struct user_regset *regset);
-/**
- * user_regset_get_fn - type of @get function in &struct user_regset
- * @target: thread being examined
- * @regset: regset being examined
- * @pos: offset into the regset data to access, in bytes
- * @count: amount of data to copy, in bytes
- * @kbuf: if not %NULL, a kernel-space pointer to copy into
- * @ubuf: if @kbuf is %NULL, a user-space pointer to copy into
- *
- * Fetch register values. Return %0 on success; -%EIO or -%ENODEV
- * are usual failure returns. The @pos and @count values are in
- * bytes, but must be properly aligned. If @kbuf is non-null, that
- * buffer is used and @ubuf is ignored. If @kbuf is %NULL, then
- * ubuf gives a userland pointer to access directly, and an -%EFAULT
- * return value is possible.
- */
-typedef int user_regset_get_fn(struct task_struct *target,
+typedef int user_regset_get2_fn(struct task_struct *target,
const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- void *kbuf, void __user *ubuf);
+ struct membuf to);
/**
* user_regset_set_fn - type of @set function in &struct user_regset
@@ -104,28 +133,6 @@ typedef int user_regset_writeback_fn(struct task_struct *target,
int immediate);
/**
- * user_regset_get_size_fn - type of @get_size function in &struct user_regset
- * @target: thread being examined
- * @regset: regset being examined
- *
- * This call is optional; usually the pointer is %NULL.
- *
- * When provided, this function must return the current size of regset
- * data, as observed by the @get function in &struct user_regset. The
- * value returned must be a multiple of @size. The returned size is
- * required to be valid only until the next time (if any) @regset is
- * modified for @target.
- *
- * This function is intended for dynamically sized regsets. A regset
- * that is statically sized does not need to implement it.
- *
- * This function should not be called directly: instead, callers should
- * call regset_size() to determine the current size of a regset.
- */
-typedef unsigned int user_regset_get_size_fn(struct task_struct *target,
- const struct user_regset *regset);
-
-/**
* struct user_regset - accessible thread CPU state
* @n: Number of slots (registers).
* @size: Size in bytes of a slot (register).
@@ -136,7 +143,6 @@ typedef unsigned int user_regset_get_size_fn(struct task_struct *target,
* @set: Function to store values.
* @active: Function to report if regset is active, or %NULL.
* @writeback: Function to write data back to user memory, or %NULL.
- * @get_size: Function to return the regset's size, or %NULL.
*
* This data structure describes a machine resource we call a register set.
* This is part of the state of an individual thread, not necessarily
@@ -144,12 +150,7 @@ typedef unsigned int user_regset_get_size_fn(struct task_struct *target,
* similar slots, given by @n. Each slot is @size bytes, and aligned to
* @align bytes (which is at least @size). For dynamically-sized
* regsets, @n must contain the maximum possible number of slots for the
- * regset, and @get_size must point to a function that returns the
- * current regset size.
- *
- * Callers that need to know only the current size of the regset and do
- * not care about its internal structure should call regset_size()
- * instead of inspecting @n or calling @get_size.
+ * regset.
*
* For backward compatibility, the @get and @set methods must pad to, or
* accept, @n * @size bytes, even if the current regset size is smaller.
@@ -185,11 +186,10 @@ typedef unsigned int user_regset_get_size_fn(struct task_struct *target,
* omitted when there is an @active function and it returns zero.
*/
struct user_regset {
- user_regset_get_fn *get;
+ user_regset_get2_fn *regset_get;
user_regset_set_fn *set;
user_regset_active_fn *active;
user_regset_writeback_fn *writeback;
- user_regset_get_size_fn *get_size;
unsigned int n;
unsigned int size;
unsigned int align;
@@ -238,44 +238,6 @@ struct user_regset_view {
*/
const struct user_regset_view *task_user_regset_view(struct task_struct *tsk);
-
-/*
- * These are helpers for writing regset get/set functions in arch code.
- * Because @start_pos and @end_pos are always compile-time constants,
- * these are inlined into very little code though they look large.
- *
- * Use one or more calls sequentially for each chunk of regset data stored
- * contiguously in memory. Call with constants for @start_pos and @end_pos,
- * giving the range of byte positions in the regset that data corresponds
- * to; @end_pos can be -1 if this chunk is at the end of the regset layout.
- * Each call updates the arguments to point past its chunk.
- */
-
-static inline int user_regset_copyout(unsigned int *pos, unsigned int *count,
- void **kbuf,
- void __user **ubuf, const void *data,
- const int start_pos, const int end_pos)
-{
- if (*count == 0)
- return 0;
- BUG_ON(*pos < start_pos);
- if (end_pos < 0 || *pos < end_pos) {
- unsigned int copy = (end_pos < 0 ? *count
- : min(*count, end_pos - *pos));
- data += *pos - start_pos;
- if (*kbuf) {
- memcpy(*kbuf, data, copy);
- *kbuf += copy;
- } else if (__copy_to_user(*ubuf, data, copy))
- return -EFAULT;
- else
- *ubuf += copy;
- *pos += copy;
- *count -= copy;
- }
- return 0;
-}
-
static inline int user_regset_copyin(unsigned int *pos, unsigned int *count,
const void **kbuf,
const void __user **ubuf, void *data,
@@ -301,35 +263,6 @@ static inline int user_regset_copyin(unsigned int *pos, unsigned int *count,
return 0;
}
-/*
- * These two parallel the two above, but for portions of a regset layout
- * that always read as all-zero or for which writes are ignored.
- */
-static inline int user_regset_copyout_zero(unsigned int *pos,
- unsigned int *count,
- void **kbuf, void __user **ubuf,
- const int start_pos,
- const int end_pos)
-{
- if (*count == 0)
- return 0;
- BUG_ON(*pos < start_pos);
- if (end_pos < 0 || *pos < end_pos) {
- unsigned int copy = (end_pos < 0 ? *count
- : min(*count, end_pos - *pos));
- if (*kbuf) {
- memset(*kbuf, 0, copy);
- *kbuf += copy;
- } else if (clear_user(*ubuf, copy))
- return -EFAULT;
- else
- *ubuf += copy;
- *pos += copy;
- *count -= copy;
- }
- return 0;
-}
-
static inline int user_regset_copyin_ignore(unsigned int *pos,
unsigned int *count,
const void **kbuf,
@@ -353,31 +286,19 @@ static inline int user_regset_copyin_ignore(unsigned int *pos,
return 0;
}
-/**
- * copy_regset_to_user - fetch a thread's user_regset data into user memory
- * @target: thread to be examined
- * @view: &struct user_regset_view describing user thread machine state
- * @setno: index in @view->regsets
- * @offset: offset into the regset data, in bytes
- * @size: amount of data to copy, in bytes
- * @data: user-mode pointer to copy into
- */
-static inline int copy_regset_to_user(struct task_struct *target,
- const struct user_regset_view *view,
- unsigned int setno,
- unsigned int offset, unsigned int size,
- void __user *data)
-{
- const struct user_regset *regset = &view->regsets[setno];
-
- if (!regset->get)
- return -EOPNOTSUPP;
+extern int regset_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int size, void *data);
- if (!access_ok(data, size))
- return -EFAULT;
+extern int regset_get_alloc(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int size,
+ void **data);
- return regset->get(target, regset, offset, size, NULL, data);
-}
+extern int copy_regset_to_user(struct task_struct *target,
+ const struct user_regset_view *view,
+ unsigned int setno, unsigned int offset,
+ unsigned int size, void __user *data);
/**
* copy_regset_from_user - store into thread's user_regset data from user memory
@@ -405,21 +326,4 @@ static inline int copy_regset_from_user(struct task_struct *target,
return regset->set(target, regset, offset, size, NULL, data);
}
-/**
- * regset_size - determine the current size of a regset
- * @target: thread to be examined
- * @regset: regset to be examined
- *
- * Note that the returned size is valid only until the next time
- * (if any) @regset is modified for @target.
- */
-static inline unsigned int regset_size(struct task_struct *target,
- const struct user_regset *regset)
-{
- if (!regset->get_size)
- return regset->n * regset->size;
- else
- return regset->get_size(target, regset);
-}
-
#endif /* <linux/regset.h> */
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index 6a92fd3105a3..2024944fd2f7 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -32,10 +32,12 @@
#define __LINUX_REGULATOR_CONSUMER_H_
#include <linux/err.h>
+#include <linux/suspend.h>
struct device;
struct notifier_block;
struct regmap;
+struct regulator_dev;
/*
* Regulator operating modes.
@@ -277,6 +279,14 @@ int regulator_unregister_notifier(struct regulator *regulator,
void devm_regulator_unregister_notifier(struct regulator *regulator,
struct notifier_block *nb);
+/* regulator suspend */
+int regulator_suspend_enable(struct regulator_dev *rdev,
+ suspend_state_t state);
+int regulator_suspend_disable(struct regulator_dev *rdev,
+ suspend_state_t state);
+int regulator_set_suspend_voltage(struct regulator *regulator, int min_uV,
+ int max_uV, suspend_state_t state);
+
/* driver data - core doesn't touch */
void *regulator_get_drvdata(struct regulator *regulator);
void regulator_set_drvdata(struct regulator *regulator, void *data);
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index 7eb9fea8e482..8539f34ae42b 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -117,7 +117,7 @@ enum regulator_status {
* suspended.
* @set_suspend_mode: Set the operating mode for the regulator when the
* system is suspended.
- *
+ * @resume: Resume operation of suspended regulator.
* @set_pull_down: Configure the regulator to pull down when the regulator
* is disabled.
*
@@ -305,6 +305,9 @@ enum regulator_type {
* @enable_time: Time taken for initial enable of regulator (in uS).
* @off_on_delay: guard time (in uS), before re-enabling a regulator
*
+ * @poll_enabled_time: The polling interval (in uS) to use while checking that
+ * the regulator was actually enabled. Max upto enable_time.
+ *
* @of_map_mode: Maps a hardware mode defined in a DeviceTree to a standard mode
*/
struct regulator_desc {
@@ -372,6 +375,8 @@ struct regulator_desc {
unsigned int off_on_delay;
+ unsigned int poll_enabled_time;
+
unsigned int (*of_map_mode)(unsigned int mode);
};
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h
index a84cc8879c3e..8a56f033b6cd 100644
--- a/include/linux/regulator/machine.h
+++ b/include/linux/regulator/machine.h
@@ -101,6 +101,7 @@ struct regulator_state {
* @system_load: Load that isn't captured by any consumer requests.
*
* @max_spread: Max possible spread between coupled regulators
+ * @max_uV_step: Max possible step change in voltage
* @valid_modes_mask: Mask of modes which may be configured by consumers.
* @valid_ops_mask: Operations which may be performed by consumers.
*
diff --git a/include/linux/regulator/pca9450.h b/include/linux/regulator/pca9450.h
new file mode 100644
index 000000000000..1bbd3014f906
--- /dev/null
+++ b/include/linux/regulator/pca9450.h
@@ -0,0 +1,219 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Copyright 2020 NXP. */
+
+#ifndef __LINUX_REG_PCA9450_H__
+#define __LINUX_REG_PCA9450_H__
+
+#include <linux/regmap.h>
+
+enum pca9450_chip_type {
+ PCA9450_TYPE_PCA9450A = 0,
+ PCA9450_TYPE_PCA9450BC,
+ PCA9450_TYPE_AMOUNT,
+};
+
+enum {
+ PCA9450_BUCK1 = 0,
+ PCA9450_BUCK2,
+ PCA9450_BUCK3,
+ PCA9450_BUCK4,
+ PCA9450_BUCK5,
+ PCA9450_BUCK6,
+ PCA9450_LDO1,
+ PCA9450_LDO2,
+ PCA9450_LDO3,
+ PCA9450_LDO4,
+ PCA9450_LDO5,
+ PCA9450_REGULATOR_CNT,
+};
+
+enum {
+ PCA9450_DVS_LEVEL_RUN = 0,
+ PCA9450_DVS_LEVEL_STANDBY,
+ PCA9450_DVS_LEVEL_MAX,
+};
+
+#define PCA9450_BUCK1_VOLTAGE_NUM 0x80
+#define PCA9450_BUCK2_VOLTAGE_NUM 0x80
+#define PCA9450_BUCK3_VOLTAGE_NUM 0x80
+#define PCA9450_BUCK4_VOLTAGE_NUM 0x80
+
+#define PCA9450_BUCK5_VOLTAGE_NUM 0x80
+#define PCA9450_BUCK6_VOLTAGE_NUM 0x80
+
+#define PCA9450_LDO1_VOLTAGE_NUM 0x08
+#define PCA9450_LDO2_VOLTAGE_NUM 0x08
+#define PCA9450_LDO3_VOLTAGE_NUM 0x20
+#define PCA9450_LDO4_VOLTAGE_NUM 0x20
+#define PCA9450_LDO5_VOLTAGE_NUM 0x10
+
+enum {
+ PCA9450_REG_DEV_ID = 0x00,
+ PCA9450_REG_INT1 = 0x01,
+ PCA9450_REG_INT1_MSK = 0x02,
+ PCA9450_REG_STATUS1 = 0x03,
+ PCA9450_REG_STATUS2 = 0x04,
+ PCA9450_REG_PWRON_STAT = 0x05,
+ PCA9450_REG_SWRST = 0x06,
+ PCA9450_REG_PWRCTRL = 0x07,
+ PCA9450_REG_RESET_CTRL = 0x08,
+ PCA9450_REG_CONFIG1 = 0x09,
+ PCA9450_REG_CONFIG2 = 0x0A,
+ PCA9450_REG_BUCK123_DVS = 0x0C,
+ PCA9450_REG_BUCK1OUT_LIMIT = 0x0D,
+ PCA9450_REG_BUCK2OUT_LIMIT = 0x0E,
+ PCA9450_REG_BUCK3OUT_LIMIT = 0x0F,
+ PCA9450_REG_BUCK1CTRL = 0x10,
+ PCA9450_REG_BUCK1OUT_DVS0 = 0x11,
+ PCA9450_REG_BUCK1OUT_DVS1 = 0x12,
+ PCA9450_REG_BUCK2CTRL = 0x13,
+ PCA9450_REG_BUCK2OUT_DVS0 = 0x14,
+ PCA9450_REG_BUCK2OUT_DVS1 = 0x15,
+ PCA9450_REG_BUCK3CTRL = 0x16,
+ PCA9450_REG_BUCK3OUT_DVS0 = 0x17,
+ PCA9450_REG_BUCK3OUT_DVS1 = 0x18,
+ PCA9450_REG_BUCK4CTRL = 0x19,
+ PCA9450_REG_BUCK4OUT = 0x1A,
+ PCA9450_REG_BUCK5CTRL = 0x1B,
+ PCA9450_REG_BUCK5OUT = 0x1C,
+ PCA9450_REG_BUCK6CTRL = 0x1D,
+ PCA9450_REG_BUCK6OUT = 0x1E,
+ PCA9450_REG_LDO_AD_CTRL = 0x20,
+ PCA9450_REG_LDO1CTRL = 0x21,
+ PCA9450_REG_LDO2CTRL = 0x22,
+ PCA9450_REG_LDO3CTRL = 0x23,
+ PCA9450_REG_LDO4CTRL = 0x24,
+ PCA9450_REG_LDO5CTRL_L = 0x25,
+ PCA9450_REG_LDO5CTRL_H = 0x26,
+ PCA9450_REG_LOADSW_CTRL = 0x2A,
+ PCA9450_REG_VRFLT1_STS = 0x2B,
+ PCA9450_REG_VRFLT2_STS = 0x2C,
+ PCA9450_REG_VRFLT1_MASK = 0x2D,
+ PCA9450_REG_VRFLT2_MASK = 0x2E,
+ PCA9450_MAX_REGISTER = 0x2F,
+};
+
+/* PCA9450 BUCK ENMODE bits */
+#define BUCK_ENMODE_OFF 0x00
+#define BUCK_ENMODE_ONREQ 0x01
+#define BUCK_ENMODE_ONREQ_STBYREQ 0x02
+#define BUCK_ENMODE_ON 0x03
+
+/* PCA9450_REG_BUCK1_CTRL bits */
+#define BUCK1_RAMP_MASK 0xC0
+#define BUCK1_RAMP_25MV 0x0
+#define BUCK1_RAMP_12P5MV 0x1
+#define BUCK1_RAMP_6P25MV 0x2
+#define BUCK1_RAMP_3P125MV 0x3
+#define BUCK1_DVS_CTRL 0x10
+#define BUCK1_AD 0x08
+#define BUCK1_FPWM 0x04
+#define BUCK1_ENMODE_MASK 0x03
+
+/* PCA9450_REG_BUCK2_CTRL bits */
+#define BUCK2_RAMP_MASK 0xC0
+#define BUCK2_RAMP_25MV 0x0
+#define BUCK2_RAMP_12P5MV 0x1
+#define BUCK2_RAMP_6P25MV 0x2
+#define BUCK2_RAMP_3P125MV 0x3
+#define BUCK2_DVS_CTRL 0x10
+#define BUCK2_AD 0x08
+#define BUCK2_FPWM 0x04
+#define BUCK2_ENMODE_MASK 0x03
+
+/* PCA9450_REG_BUCK3_CTRL bits */
+#define BUCK3_RAMP_MASK 0xC0
+#define BUCK3_RAMP_25MV 0x0
+#define BUCK3_RAMP_12P5MV 0x1
+#define BUCK3_RAMP_6P25MV 0x2
+#define BUCK3_RAMP_3P125MV 0x3
+#define BUCK3_DVS_CTRL 0x10
+#define BUCK3_AD 0x08
+#define BUCK3_FPWM 0x04
+#define BUCK3_ENMODE_MASK 0x03
+
+/* PCA9450_REG_BUCK4_CTRL bits */
+#define BUCK4_AD 0x08
+#define BUCK4_FPWM 0x04
+#define BUCK4_ENMODE_MASK 0x03
+
+/* PCA9450_REG_BUCK5_CTRL bits */
+#define BUCK5_AD 0x08
+#define BUCK5_FPWM 0x04
+#define BUCK5_ENMODE_MASK 0x03
+
+/* PCA9450_REG_BUCK6_CTRL bits */
+#define BUCK6_AD 0x08
+#define BUCK6_FPWM 0x04
+#define BUCK6_ENMODE_MASK 0x03
+
+/* PCA9450_BUCK1OUT_DVS0 bits */
+#define BUCK1OUT_DVS0_MASK 0x7F
+#define BUCK1OUT_DVS0_DEFAULT 0x14
+
+/* PCA9450_BUCK1OUT_DVS1 bits */
+#define BUCK1OUT_DVS1_MASK 0x7F
+#define BUCK1OUT_DVS1_DEFAULT 0x14
+
+/* PCA9450_BUCK2OUT_DVS0 bits */
+#define BUCK2OUT_DVS0_MASK 0x7F
+#define BUCK2OUT_DVS0_DEFAULT 0x14
+
+/* PCA9450_BUCK2OUT_DVS1 bits */
+#define BUCK2OUT_DVS1_MASK 0x7F
+#define BUCK2OUT_DVS1_DEFAULT 0x14
+
+/* PCA9450_BUCK3OUT_DVS0 bits */
+#define BUCK3OUT_DVS0_MASK 0x7F
+#define BUCK3OUT_DVS0_DEFAULT 0x14
+
+/* PCA9450_BUCK3OUT_DVS1 bits */
+#define BUCK3OUT_DVS1_MASK 0x7F
+#define BUCK3OUT_DVS1_DEFAULT 0x14
+
+/* PCA9450_REG_BUCK4OUT bits */
+#define BUCK4OUT_MASK 0x7F
+#define BUCK4OUT_DEFAULT 0x6C
+
+/* PCA9450_REG_BUCK5OUT bits */
+#define BUCK5OUT_MASK 0x7F
+#define BUCK5OUT_DEFAULT 0x30
+
+/* PCA9450_REG_BUCK6OUT bits */
+#define BUCK6OUT_MASK 0x7F
+#define BUCK6OUT_DEFAULT 0x14
+
+/* PCA9450_REG_LDO1_VOLT bits */
+#define LDO1_EN_MASK 0xC0
+#define LDO1OUT_MASK 0x07
+
+/* PCA9450_REG_LDO2_VOLT bits */
+#define LDO2_EN_MASK 0xC0
+#define LDO2OUT_MASK 0x07
+
+/* PCA9450_REG_LDO3_VOLT bits */
+#define LDO3_EN_MASK 0xC0
+#define LDO3OUT_MASK 0x0F
+
+/* PCA9450_REG_LDO4_VOLT bits */
+#define LDO4_EN_MASK 0xC0
+#define LDO4OUT_MASK 0x0F
+
+/* PCA9450_REG_LDO5_VOLT bits */
+#define LDO5L_EN_MASK 0xC0
+#define LDO5LOUT_MASK 0x0F
+
+#define LDO5H_EN_MASK 0xC0
+#define LDO5HOUT_MASK 0x0F
+
+/* PCA9450_REG_IRQ bits */
+#define IRQ_PWRON 0x80
+#define IRQ_WDOGB 0x40
+#define IRQ_RSVD 0x20
+#define IRQ_VR_FLT1 0x10
+#define IRQ_VR_FLT2 0x08
+#define IRQ_LOWVSYS 0x04
+#define IRQ_THERM_105 0x02
+#define IRQ_THERM_125 0x01
+
+#endif /* __LINUX_REG_PCA9450_H__ */
diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h
index e7b7bab8b235..2fa68bf5aa4f 100644
--- a/include/linux/remoteproc.h
+++ b/include/linux/remoteproc.h
@@ -38,6 +38,7 @@
#include <linux/types.h>
#include <linux/mutex.h>
#include <linux/virtio.h>
+#include <linux/cdev.h>
#include <linux/completion.h>
#include <linux/idr.h>
#include <linux/of.h>
@@ -359,6 +360,7 @@ enum rsc_handling_status {
* @unprepare: unprepare device after stop
* @start: power on the device and boot it
* @stop: power off the device
+ * @attach: attach to a device that his already powered up
* @kick: kick a virtqueue (virtqueue id given as a parameter)
* @da_to_va: optional platform hook to perform address translations
* @parse_fw: parse firmware to extract information (e.g. resource table)
@@ -379,6 +381,7 @@ struct rproc_ops {
int (*unprepare)(struct rproc *rproc);
int (*start)(struct rproc *rproc);
int (*stop)(struct rproc *rproc);
+ int (*attach)(struct rproc *rproc);
void (*kick)(struct rproc *rproc, int vqid);
void * (*da_to_va)(struct rproc *rproc, u64 da, size_t len);
int (*parse_fw)(struct rproc *rproc, const struct firmware *fw);
@@ -400,6 +403,8 @@ struct rproc_ops {
* @RPROC_RUNNING: device is up and running
* @RPROC_CRASHED: device has crashed; need to start recovery
* @RPROC_DELETED: device is deleted
+ * @RPROC_DETACHED: device has been booted by another entity and waiting
+ * for the core to attach to it
* @RPROC_LAST: just keep this one at the end
*
* Please note that the values of these states are used as indices
@@ -414,7 +419,8 @@ enum rproc_state {
RPROC_RUNNING = 2,
RPROC_CRASHED = 3,
RPROC_DELETED = 4,
- RPROC_LAST = 5,
+ RPROC_DETACHED = 5,
+ RPROC_LAST = 6,
};
/**
@@ -435,6 +441,20 @@ enum rproc_crash_type {
};
/**
+ * enum rproc_dump_mechanism - Coredump options for core
+ * @RPROC_COREDUMP_DEFAULT: Copy dump to separate buffer and carry on with
+ recovery
+ * @RPROC_COREDUMP_INLINE: Read segments directly from device memory. Stall
+ recovery until all segments are read
+ * @RPROC_COREDUMP_DISABLED: Don't perform any dump
+ */
+enum rproc_dump_mechanism {
+ RPROC_COREDUMP_DEFAULT,
+ RPROC_COREDUMP_INLINE,
+ RPROC_COREDUMP_DISABLED,
+};
+
+/**
* struct rproc_dump_segment - segment info from ELF header
* @node: list node related to the rproc segment list
* @da: device address of the segment
@@ -451,7 +471,7 @@ struct rproc_dump_segment {
void *priv;
void (*dump)(struct rproc *rproc, struct rproc_dump_segment *segment,
- void *dest);
+ void *dest, size_t offset, size_t size);
loff_t offset;
};
@@ -466,6 +486,7 @@ struct rproc_dump_segment {
* @dev: virtual device for refcounting and common remoteproc behavior
* @power: refcount of users who need this rproc powered up
* @state: state of the device
+ * @dump_conf: Currently selected coredump configuration
* @lock: lock which protects concurrent manipulations of the rproc
* @dbg_dir: debugfs directory of this rproc device
* @traces: list of trace buffers
@@ -486,8 +507,11 @@ struct rproc_dump_segment {
* @table_sz: size of @cached_table
* @has_iommu: flag to indicate if remote processor is behind an MMU
* @auto_boot: flag to indicate if remote processor should be auto-started
+ * @autonomous: true if an external entity has booted the remote processor
* @dump_segments: list of segments in the firmware
* @nb_vdev: number of vdev currently handled by rproc
+ * @char_dev: character device of the rproc
+ * @cdev_put_on_release: flag to indicate if remoteproc should be shutdown on @char_dev release
*/
struct rproc {
struct list_head node;
@@ -499,6 +523,7 @@ struct rproc {
struct device dev;
atomic_t power;
unsigned int state;
+ enum rproc_dump_mechanism dump_conf;
struct mutex lock;
struct dentry *dbg_dir;
struct list_head traces;
@@ -519,10 +544,13 @@ struct rproc {
size_t table_sz;
bool has_iommu;
bool auto_boot;
+ bool autonomous;
struct list_head dump_segments;
int nb_vdev;
u8 elf_class;
u16 elf_machine;
+ struct cdev cdev;
+ bool cdev_put_on_release;
};
/**
@@ -603,6 +631,7 @@ void rproc_put(struct rproc *rproc);
int rproc_add(struct rproc *rproc);
int rproc_del(struct rproc *rproc);
void rproc_free(struct rproc *rproc);
+void rproc_resource_cleanup(struct rproc *rproc);
struct rproc *devm_rproc_alloc(struct device *dev, const char *name,
const struct rproc_ops *ops,
@@ -630,7 +659,8 @@ int rproc_coredump_add_custom_segment(struct rproc *rproc,
dma_addr_t da, size_t size,
void (*dumpfn)(struct rproc *rproc,
struct rproc_dump_segment *segment,
- void *dest),
+ void *dest, size_t offset,
+ size_t size),
void *priv);
int rproc_coredump_set_elf_info(struct rproc *rproc, u8 class, u16 machine);
diff --git a/include/linux/remoteproc/qcom_q6v5_ipa_notify.h b/include/linux/remoteproc/qcom_q6v5_ipa_notify.h
deleted file mode 100644
index 0820edc0ab7d..000000000000
--- a/include/linux/remoteproc/qcom_q6v5_ipa_notify.h
+++ /dev/null
@@ -1,82 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-
-/* Copyright (C) 2019 Linaro Ltd. */
-
-#ifndef __QCOM_Q6V5_IPA_NOTIFY_H__
-#define __QCOM_Q6V5_IPA_NOTIFY_H__
-
-#if IS_ENABLED(CONFIG_QCOM_Q6V5_IPA_NOTIFY)
-
-#include <linux/remoteproc.h>
-
-enum qcom_rproc_event {
- MODEM_STARTING = 0, /* Modem is about to be started */
- MODEM_RUNNING = 1, /* Startup complete; modem is operational */
- MODEM_STOPPING = 2, /* Modem is about to shut down */
- MODEM_CRASHED = 3, /* Modem has crashed (implies stopping) */
- MODEM_OFFLINE = 4, /* Modem is now offline */
- MODEM_REMOVING = 5, /* Modem is about to be removed */
-};
-
-typedef void (*qcom_ipa_notify_t)(void *data, enum qcom_rproc_event event);
-
-struct qcom_rproc_ipa_notify {
- struct rproc_subdev subdev;
-
- qcom_ipa_notify_t notify;
- void *data;
-};
-
-/**
- * qcom_add_ipa_notify_subdev() - Register IPA notification subdevice
- * @rproc: rproc handle
- * @ipa_notify: IPA notification subdevice handle
- *
- * Register the @ipa_notify subdevice with the @rproc so modem events
- * can be sent to IPA when they occur.
- *
- * This is defined in "qcom_q6v5_ipa_notify.c".
- */
-void qcom_add_ipa_notify_subdev(struct rproc *rproc,
- struct qcom_rproc_ipa_notify *ipa_notify);
-
-/**
- * qcom_remove_ipa_notify_subdev() - Remove IPA SSR subdevice
- * @rproc: rproc handle
- * @ipa_notify: IPA notification subdevice handle
- *
- * This is defined in "qcom_q6v5_ipa_notify.c".
- */
-void qcom_remove_ipa_notify_subdev(struct rproc *rproc,
- struct qcom_rproc_ipa_notify *ipa_notify);
-
-/**
- * qcom_register_ipa_notify() - Register IPA notification function
- * @rproc: Remote processor handle
- * @notify: Non-null IPA notification callback function pointer
- * @data: Data supplied to IPA notification callback function
- *
- * @Return: 0 if successful, or a negative error code otherwise
- *
- * This is defined in "qcom_q6v5_mss.c".
- */
-int qcom_register_ipa_notify(struct rproc *rproc, qcom_ipa_notify_t notify,
- void *data);
-/**
- * qcom_deregister_ipa_notify() - Deregister IPA notification function
- * @rproc: Remote processor handle
- *
- * This is defined in "qcom_q6v5_mss.c".
- */
-void qcom_deregister_ipa_notify(struct rproc *rproc);
-
-#else /* !IS_ENABLED(CONFIG_QCOM_Q6V5_IPA_NOTIFY) */
-
-struct qcom_rproc_ipa_notify { /* empty */ };
-
-#define qcom_add_ipa_notify_subdev(rproc, ipa_notify) /* no-op */
-#define qcom_remove_ipa_notify_subdev(rproc, ipa_notify) /* no-op */
-
-#endif /* !IS_ENABLED(CONFIG_QCOM_Q6V5_IPA_NOTIFY) */
-
-#endif /* !__QCOM_Q6V5_IPA_NOTIFY_H__ */
diff --git a/include/linux/remoteproc/qcom_rproc.h b/include/linux/remoteproc/qcom_rproc.h
index fa8e38681b4b..647051662174 100644
--- a/include/linux/remoteproc/qcom_rproc.h
+++ b/include/linux/remoteproc/qcom_rproc.h
@@ -5,17 +5,43 @@ struct notifier_block;
#if IS_ENABLED(CONFIG_QCOM_RPROC_COMMON)
-int qcom_register_ssr_notifier(struct notifier_block *nb);
-void qcom_unregister_ssr_notifier(struct notifier_block *nb);
+/**
+ * enum qcom_ssr_notify_type - Startup/Shutdown events related to a remoteproc
+ * processor.
+ *
+ * @QCOM_SSR_BEFORE_POWERUP: Remoteproc about to start (prepare stage)
+ * @QCOM_SSR_AFTER_POWERUP: Remoteproc is running (start stage)
+ * @QCOM_SSR_BEFORE_SHUTDOWN: Remoteproc crashed or shutting down (stop stage)
+ * @QCOM_SSR_AFTER_SHUTDOWN: Remoteproc is down (unprepare stage)
+ */
+enum qcom_ssr_notify_type {
+ QCOM_SSR_BEFORE_POWERUP,
+ QCOM_SSR_AFTER_POWERUP,
+ QCOM_SSR_BEFORE_SHUTDOWN,
+ QCOM_SSR_AFTER_SHUTDOWN,
+};
+
+struct qcom_ssr_notify_data {
+ const char *name;
+ bool crashed;
+};
+
+void *qcom_register_ssr_notifier(const char *name, struct notifier_block *nb);
+int qcom_unregister_ssr_notifier(void *notify, struct notifier_block *nb);
#else
-static inline int qcom_register_ssr_notifier(struct notifier_block *nb)
+static inline void *qcom_register_ssr_notifier(const char *name,
+ struct notifier_block *nb)
{
- return 0;
+ return NULL;
}
-static inline void qcom_unregister_ssr_notifier(struct notifier_block *nb) {}
+static inline int qcom_unregister_ssr_notifier(void *notify,
+ struct notifier_block *nb)
+{
+ return 0;
+}
#endif
diff --git a/include/linux/reset/reset-simple.h b/include/linux/reset/reset-simple.h
new file mode 100644
index 000000000000..c3e44f45b0f7
--- /dev/null
+++ b/include/linux/reset/reset-simple.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Simple Reset Controller ops
+ *
+ * Based on Allwinner SoCs Reset Controller driver
+ *
+ * Copyright 2013 Maxime Ripard
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ */
+
+#ifndef __RESET_SIMPLE_H__
+#define __RESET_SIMPLE_H__
+
+#include <linux/io.h>
+#include <linux/reset-controller.h>
+#include <linux/spinlock.h>
+
+/**
+ * struct reset_simple_data - driver data for simple reset controllers
+ * @lock: spinlock to protect registers during read-modify-write cycles
+ * @membase: memory mapped I/O register range
+ * @rcdev: reset controller device base structure
+ * @active_low: if true, bits are cleared to assert the reset. Otherwise, bits
+ * are set to assert the reset. Note that this says nothing about
+ * the voltage level of the actual reset line.
+ * @status_active_low: if true, bits read back as cleared while the reset is
+ * asserted. Otherwise, bits read back as set while the
+ * reset is asserted.
+ * @reset_us: Minimum delay in microseconds needed that needs to be
+ * waited for between an assert and a deassert to reset the
+ * device. If multiple consumers with different delay
+ * requirements are connected to this controller, it must
+ * be the largest minimum delay. 0 means that such a delay is
+ * unknown and the reset operation is unsupported.
+ */
+struct reset_simple_data {
+ spinlock_t lock;
+ void __iomem *membase;
+ struct reset_controller_dev rcdev;
+ bool active_low;
+ bool status_active_low;
+ unsigned int reset_us;
+};
+
+extern const struct reset_control_ops reset_simple_ops;
+
+#endif /* __RESET_SIMPLE_H__ */
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index d3432ee65de7..68dab3e08aad 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -84,7 +84,7 @@ struct bucket_table {
struct lockdep_map dep_map;
- struct rhash_lock_head *buckets[] ____cacheline_aligned_in_smp;
+ struct rhash_lock_head __rcu *buckets[] ____cacheline_aligned_in_smp;
};
/*
@@ -261,13 +261,12 @@ void rhashtable_free_and_destroy(struct rhashtable *ht,
void *arg);
void rhashtable_destroy(struct rhashtable *ht);
-struct rhash_lock_head **rht_bucket_nested(const struct bucket_table *tbl,
- unsigned int hash);
-struct rhash_lock_head **__rht_bucket_nested(const struct bucket_table *tbl,
- unsigned int hash);
-struct rhash_lock_head **rht_bucket_nested_insert(struct rhashtable *ht,
- struct bucket_table *tbl,
- unsigned int hash);
+struct rhash_lock_head __rcu **rht_bucket_nested(
+ const struct bucket_table *tbl, unsigned int hash);
+struct rhash_lock_head __rcu **__rht_bucket_nested(
+ const struct bucket_table *tbl, unsigned int hash);
+struct rhash_lock_head __rcu **rht_bucket_nested_insert(
+ struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash);
#define rht_dereference(p, ht) \
rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht))
@@ -284,21 +283,21 @@ struct rhash_lock_head **rht_bucket_nested_insert(struct rhashtable *ht,
#define rht_entry(tpos, pos, member) \
({ tpos = container_of(pos, typeof(*tpos), member); 1; })
-static inline struct rhash_lock_head *const *rht_bucket(
+static inline struct rhash_lock_head __rcu *const *rht_bucket(
const struct bucket_table *tbl, unsigned int hash)
{
return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) :
&tbl->buckets[hash];
}
-static inline struct rhash_lock_head **rht_bucket_var(
+static inline struct rhash_lock_head __rcu **rht_bucket_var(
struct bucket_table *tbl, unsigned int hash)
{
return unlikely(tbl->nest) ? __rht_bucket_nested(tbl, hash) :
&tbl->buckets[hash];
}
-static inline struct rhash_lock_head **rht_bucket_insert(
+static inline struct rhash_lock_head __rcu **rht_bucket_insert(
struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash)
{
return unlikely(tbl->nest) ? rht_bucket_nested_insert(ht, tbl, hash) :
@@ -325,7 +324,7 @@ static inline struct rhash_lock_head **rht_bucket_insert(
*/
static inline void rht_lock(struct bucket_table *tbl,
- struct rhash_lock_head **bkt)
+ struct rhash_lock_head __rcu **bkt)
{
local_bh_disable();
bit_spin_lock(0, (unsigned long *)bkt);
@@ -333,7 +332,7 @@ static inline void rht_lock(struct bucket_table *tbl,
}
static inline void rht_lock_nested(struct bucket_table *tbl,
- struct rhash_lock_head **bucket,
+ struct rhash_lock_head __rcu **bucket,
unsigned int subclass)
{
local_bh_disable();
@@ -342,18 +341,18 @@ static inline void rht_lock_nested(struct bucket_table *tbl,
}
static inline void rht_unlock(struct bucket_table *tbl,
- struct rhash_lock_head **bkt)
+ struct rhash_lock_head __rcu **bkt)
{
lock_map_release(&tbl->dep_map);
bit_spin_unlock(0, (unsigned long *)bkt);
local_bh_enable();
}
-static inline struct rhash_head __rcu *__rht_ptr(
- struct rhash_lock_head *const *bkt)
+static inline struct rhash_head *__rht_ptr(
+ struct rhash_lock_head *p, struct rhash_lock_head __rcu *const *bkt)
{
- return (struct rhash_head __rcu *)
- ((unsigned long)*bkt & ~BIT(0) ?:
+ return (struct rhash_head *)
+ ((unsigned long)p & ~BIT(0) ?:
(unsigned long)RHT_NULLS_MARKER(bkt));
}
@@ -365,47 +364,41 @@ static inline struct rhash_head __rcu *__rht_ptr(
* access is guaranteed, such as when destroying the table.
*/
static inline struct rhash_head *rht_ptr_rcu(
- struct rhash_lock_head *const *bkt)
+ struct rhash_lock_head __rcu *const *bkt)
{
- struct rhash_head __rcu *p = __rht_ptr(bkt);
-
- return rcu_dereference(p);
+ return __rht_ptr(rcu_dereference(*bkt), bkt);
}
static inline struct rhash_head *rht_ptr(
- struct rhash_lock_head *const *bkt,
+ struct rhash_lock_head __rcu *const *bkt,
struct bucket_table *tbl,
unsigned int hash)
{
- return rht_dereference_bucket(__rht_ptr(bkt), tbl, hash);
+ return __rht_ptr(rht_dereference_bucket(*bkt, tbl, hash), bkt);
}
static inline struct rhash_head *rht_ptr_exclusive(
- struct rhash_lock_head *const *bkt)
+ struct rhash_lock_head __rcu *const *bkt)
{
- return rcu_dereference_protected(__rht_ptr(bkt), 1);
+ return __rht_ptr(rcu_dereference_protected(*bkt, 1), bkt);
}
-static inline void rht_assign_locked(struct rhash_lock_head **bkt,
+static inline void rht_assign_locked(struct rhash_lock_head __rcu **bkt,
struct rhash_head *obj)
{
- struct rhash_head __rcu **p = (struct rhash_head __rcu **)bkt;
-
if (rht_is_a_nulls(obj))
obj = NULL;
- rcu_assign_pointer(*p, (void *)((unsigned long)obj | BIT(0)));
+ rcu_assign_pointer(*bkt, (void *)((unsigned long)obj | BIT(0)));
}
static inline void rht_assign_unlock(struct bucket_table *tbl,
- struct rhash_lock_head **bkt,
+ struct rhash_lock_head __rcu **bkt,
struct rhash_head *obj)
{
- struct rhash_head __rcu **p = (struct rhash_head __rcu **)bkt;
-
if (rht_is_a_nulls(obj))
obj = NULL;
lock_map_release(&tbl->dep_map);
- rcu_assign_pointer(*p, obj);
+ rcu_assign_pointer(*bkt, (void *)obj);
preempt_enable();
__release(bitlock);
local_bh_enable();
@@ -593,7 +586,7 @@ static inline struct rhash_head *__rhashtable_lookup(
.ht = ht,
.key = key,
};
- struct rhash_lock_head *const *bkt;
+ struct rhash_lock_head __rcu *const *bkt;
struct bucket_table *tbl;
struct rhash_head *he;
unsigned int hash;
@@ -709,7 +702,7 @@ static inline void *__rhashtable_insert_fast(
.ht = ht,
.key = key,
};
- struct rhash_lock_head **bkt;
+ struct rhash_lock_head __rcu **bkt;
struct rhash_head __rcu **pprev;
struct bucket_table *tbl;
struct rhash_head *head;
@@ -995,7 +988,7 @@ static inline int __rhashtable_remove_fast_one(
struct rhash_head *obj, const struct rhashtable_params params,
bool rhlist)
{
- struct rhash_lock_head **bkt;
+ struct rhash_lock_head __rcu **bkt;
struct rhash_head __rcu **pprev;
struct rhash_head *he;
unsigned int hash;
@@ -1147,7 +1140,7 @@ static inline int __rhashtable_replace_fast(
struct rhash_head *obj_old, struct rhash_head *obj_new,
const struct rhashtable_params params)
{
- struct rhash_lock_head **bkt;
+ struct rhash_lock_head __rcu **bkt;
struct rhash_head __rcu **pprev;
struct rhash_head *he;
unsigned int hash;
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index c76b2f3b3ac4..136ea0997e6d 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -143,6 +143,7 @@ bool ring_buffer_iter_dropped(struct ring_buffer_iter *iter);
unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu);
void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu);
+void ring_buffer_reset_online_cpus(struct trace_buffer *buffer);
void ring_buffer_reset(struct trace_buffer *buffer);
#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
diff --git a/include/linux/rmi.h b/include/linux/rmi.h
index 7b22366d0065..8ed37f93f3c8 100644
--- a/include/linux/rmi.h
+++ b/include/linux/rmi.h
@@ -206,7 +206,7 @@ struct rmi_device_platform_data_spi {
*
* @reset_delay_ms - after issuing a reset command to the touch sensor, the
* driver waits a few milliseconds to give the firmware a chance to
- * to re-initialize. You can override the default wait period here.
+ * re-initialize. You can override the default wait period here.
* @irq: irq associated with the attn gpio line, or negative
*/
struct rmi_device_platform_data {
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index bba3db3f7efa..22d1575e4991 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -55,10 +55,6 @@ extern struct class *rtc_class;
*
* The (current) exceptions are mostly filesystem hooks:
* - the proc() hook for procfs
- * - non-ioctl() chardev hooks: open(), release()
- *
- * REVISIT those periodic irq calls *do* have ops_lock when they're
- * issued through ioctl() ...
*/
struct rtc_class_ops {
int (*ioctl)(struct device *, unsigned int, unsigned long);
diff --git a/include/linux/rtsx_pci.h b/include/linux/rtsx_pci.h
index e8780d4e4636..745f5e73f99a 100644
--- a/include/linux/rtsx_pci.h
+++ b/include/linux/rtsx_pci.h
@@ -99,18 +99,6 @@
#define rtsx_pci_readb(pcr, reg) \
ioread8((pcr)->remap_addr + reg)
-#define rtsx_pci_read_config_byte(pcr, where, val) \
- pci_read_config_byte((pcr)->pci, where, val)
-
-#define rtsx_pci_write_config_byte(pcr, where, val) \
- pci_write_config_byte((pcr)->pci, where, val)
-
-#define rtsx_pci_read_config_dword(pcr, where, val) \
- pci_read_config_dword((pcr)->pci, where, val)
-
-#define rtsx_pci_write_config_dword(pcr, where, val) \
- pci_write_config_dword((pcr)->pci, where, val)
-
#define STATE_TRANS_NONE 0
#define STATE_TRANS_CMD 1
#define STATE_TRANS_BUF 2
@@ -305,6 +293,8 @@
#define SD30_CLK_STOP_CFG0 0x01
#define REG_PRE_RW_MODE 0xFD70
#define EN_INFINITE_MODE 0x01
+#define REG_CRC_DUMMY_0 0xFD71
+#define CFG_SD_POW_AUTO_PD (1<<0)
#define SRCTL 0xFC13
@@ -599,6 +589,7 @@
#define ASPM_FORCE_CTL 0xFE57
#define FORCE_ASPM_CTL0 0x10
+#define FORCE_ASPM_CTL1 0x20
#define FORCE_ASPM_VAL_MASK 0x03
#define FORCE_ASPM_L1_EN 0x02
#define FORCE_ASPM_L0_EN 0x01
@@ -667,6 +658,11 @@
#define PM_WAKE_EN 0x01
#define PM_CTRL4 0xFF47
+#define REG_CFG_OOBS_OFF_TIMER 0xFEA6
+#define REG_CFG_OOBS_ON_TIMER 0xFEA7
+#define REG_CFG_VCM_ON_TIMER 0xFEA8
+#define REG_CFG_OOBS_POLLING 0xFEA9
+
/* Memory mapping */
#define SRAM_BASE 0xE600
#define RBUF_BASE 0xF400
@@ -1041,10 +1037,6 @@
#define PHY_DIG1E_RX_EN_KEEP 0x0001
#define PHY_DUM_REG 0x1F
-#define PCR_ASPM_SETTING_REG1 0x160
-#define PCR_ASPM_SETTING_REG2 0x168
-#define PCR_ASPM_SETTING_5260 0x178
-
#define PCR_SETTING_REG1 0x724
#define PCR_SETTING_REG2 0x814
#define PCR_SETTING_REG3 0x747
@@ -1091,11 +1083,6 @@ struct pcr_ops {
enum PDEV_STAT {PDEV_STAT_IDLE, PDEV_STAT_RUN};
-#define ASPM_L1_1_EN_MASK BIT(3)
-#define ASPM_L1_2_EN_MASK BIT(2)
-#define PM_L1_1_EN_MASK BIT(1)
-#define PM_L1_2_EN_MASK BIT(0)
-
#define ASPM_L1_1_EN BIT(0)
#define ASPM_L1_2_EN BIT(1)
#define PM_L1_1_EN BIT(2)
@@ -1158,7 +1145,6 @@ struct rtsx_hw_param {
struct rtsx_pcr {
struct pci_dev *pci;
unsigned int id;
- int pcie_cap;
struct rtsx_cr_option option;
struct rtsx_hw_param hw_param;
@@ -1204,6 +1190,7 @@ struct rtsx_pcr {
#define EXTRA_CAPS_MMC_HSDDR (1 << 3)
#define EXTRA_CAPS_MMC_HS200 (1 << 4)
#define EXTRA_CAPS_MMC_8BIT (1 << 5)
+#define EXTRA_CAPS_NO_MMC (1 << 7)
u32 extra_caps;
#define IC_VER_A 0
@@ -1242,6 +1229,7 @@ struct rtsx_pcr {
u8 dma_error_count;
u8 ocp_stat;
u8 ocp_stat2;
+ u8 rtd3_en;
};
#define PID_524A 0x524A
@@ -1250,6 +1238,7 @@ struct rtsx_pcr {
#define PID_525A 0x525A
#define PID_5260 0x5260
#define PID_5261 0x5261
+#define PID_5228 0x5228
#define CHK_PCI_PID(pcr, pid) ((pcr)->pci->device == (pid))
#define PCI_VID(pcr) ((pcr)->pci->vendor)
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index 7e5b2a4eb560..25e3fde85617 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -60,39 +60,39 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem)
}
#define RWSEM_UNLOCKED_VALUE 0L
-#define __RWSEM_INIT_COUNT(name) .count = ATOMIC_LONG_INIT(RWSEM_UNLOCKED_VALUE)
+#define __RWSEM_COUNT_INIT(name) .count = ATOMIC_LONG_INIT(RWSEM_UNLOCKED_VALUE)
/* Common initializer macros and functions */
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define __RWSEM_DEP_MAP_INIT(lockname) \
- , .dep_map = { \
+ .dep_map = { \
.name = #lockname, \
.wait_type_inner = LD_WAIT_SLEEP, \
- }
+ },
#else
# define __RWSEM_DEP_MAP_INIT(lockname)
#endif
#ifdef CONFIG_DEBUG_RWSEMS
-# define __DEBUG_RWSEM_INITIALIZER(lockname) , .magic = &lockname
+# define __RWSEM_DEBUG_INIT(lockname) .magic = &lockname,
#else
-# define __DEBUG_RWSEM_INITIALIZER(lockname)
+# define __RWSEM_DEBUG_INIT(lockname)
#endif
#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
-#define __RWSEM_OPT_INIT(lockname) , .osq = OSQ_LOCK_UNLOCKED
+#define __RWSEM_OPT_INIT(lockname) .osq = OSQ_LOCK_UNLOCKED,
#else
#define __RWSEM_OPT_INIT(lockname)
#endif
#define __RWSEM_INITIALIZER(name) \
- { __RWSEM_INIT_COUNT(name), \
+ { __RWSEM_COUNT_INIT(name), \
.owner = ATOMIC_LONG_INIT(0), \
- .wait_list = LIST_HEAD_INIT((name).wait_list), \
- .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock) \
__RWSEM_OPT_INIT(name) \
- __DEBUG_RWSEM_INITIALIZER(name) \
+ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock),\
+ .wait_list = LIST_HEAD_INIT((name).wait_list), \
+ __RWSEM_DEBUG_INIT(name) \
__RWSEM_DEP_MAP_INIT(name) }
#define DECLARE_RWSEM(name) \
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 683372943093..93ecd930efd3 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -18,6 +18,7 @@
#include <linux/mutex.h>
#include <linux/plist.h>
#include <linux/hrtimer.h>
+#include <linux/irqflags.h>
#include <linux/seccomp.h>
#include <linux/nodemask.h>
#include <linux/rcupdate.h>
@@ -31,6 +32,7 @@
#include <linux/task_io_accounting.h>
#include <linux/posix-timers.h>
#include <linux/rseq.h>
+#include <linux/seqlock.h>
#include <linux/kcsan.h>
/* task_struct member predeclarations (sorted alphabetically): */
@@ -154,24 +156,24 @@ struct task_group;
*
* for (;;) {
* set_current_state(TASK_UNINTERRUPTIBLE);
- * if (!need_sleep)
- * break;
+ * if (CONDITION)
+ * break;
*
* schedule();
* }
* __set_current_state(TASK_RUNNING);
*
* If the caller does not need such serialisation (because, for instance, the
- * condition test and condition change and wakeup are under the same lock) then
+ * CONDITION test and condition change and wakeup are under the same lock) then
* use __set_current_state().
*
* The above is typically ordered against the wakeup, which does:
*
- * need_sleep = false;
+ * CONDITION = 1;
* wake_up_state(p, TASK_UNINTERRUPTIBLE);
*
- * where wake_up_state() executes a full memory barrier before accessing the
- * task state.
+ * where wake_up_state()/try_to_wake_up() executes a full memory barrier before
+ * accessing p->state.
*
* Wakeup will do: if (@state & p->state) p->state = TASK_RUNNING, that is,
* once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a
@@ -374,7 +376,7 @@ struct util_est {
* For cfs_rq, they are the aggregated values of all runnable and blocked
* sched_entities.
*
- * The load/runnable/util_avg doesn't direcly factor frequency scaling and CPU
+ * The load/runnable/util_avg doesn't directly factor frequency scaling and CPU
* capacity scaling. The scaling is done through the rq_clock_pelt that is used
* for computing those signals (see update_rq_clock_pelt())
*
@@ -686,9 +688,15 @@ struct task_struct {
struct sched_dl_entity dl;
#ifdef CONFIG_UCLAMP_TASK
- /* Clamp values requested for a scheduling entity */
+ /*
+ * Clamp values requested for a scheduling entity.
+ * Must be updated with task_rq_lock() held.
+ */
struct uclamp_se uclamp_req[UCLAMP_CNT];
- /* Effective clamp values used for a scheduling entity */
+ /*
+ * Effective clamp values used for a scheduling entity.
+ * Must be updated with task_rq_lock() held.
+ */
struct uclamp_se uclamp[UCLAMP_CNT];
#endif
@@ -882,6 +890,10 @@ struct task_struct {
/* Empty if CONFIG_POSIX_CPUTIMERS=n */
struct posix_cputimers posix_cputimers;
+#ifdef CONFIG_POSIX_CPU_TIMERS_TASK_WORK
+ struct posix_cputimers_work posix_cputimers_work;
+#endif
+
/* Process credentials: */
/* Tracer's credentials at attach: */
@@ -980,19 +992,9 @@ struct task_struct {
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
- unsigned int irq_events;
+ struct irqtrace_events irqtrace;
unsigned int hardirq_threaded;
- unsigned long hardirq_enable_ip;
- unsigned long hardirq_disable_ip;
- unsigned int hardirq_enable_event;
- unsigned int hardirq_disable_event;
- int hardirqs_enabled;
- int hardirq_context;
u64 hardirq_chain_key;
- unsigned long softirq_disable_ip;
- unsigned long softirq_enable_ip;
- unsigned int softirq_disable_event;
- unsigned int softirq_enable_event;
int softirqs_enabled;
int softirq_context;
int irq_config;
@@ -1052,7 +1054,7 @@ struct task_struct {
/* Protected by ->alloc_lock: */
nodemask_t mems_allowed;
/* Seqence number to catch updates: */
- seqcount_t mems_allowed_seq;
+ seqcount_spinlock_t mems_allowed_seq;
int cpuset_mem_spread_rotor;
int cpuset_slab_spread_rotor;
#endif
@@ -1193,8 +1195,12 @@ struct task_struct {
#ifdef CONFIG_KASAN
unsigned int kasan_depth;
#endif
+
#ifdef CONFIG_KCSAN
struct kcsan_ctx kcsan_ctx;
+#ifdef CONFIG_TRACE_IRQFLAGS
+ struct irqtrace_events kcsan_save_irqtrace;
+#endif
#endif
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
@@ -1506,7 +1512,6 @@ extern struct pid *cad_pid;
#define PF_KTHREAD 0x00200000 /* I am a kernel thread */
#define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */
#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */
-#define PF_UMH 0x02000000 /* I'm an Usermodehelper process */
#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_mask */
#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */
#define PF_MEMALLOC_NOCMA 0x10000000 /* All allocation request will have _GFP_MOVABLE cleared */
@@ -1648,6 +1653,9 @@ extern int idle_cpu(int cpu);
extern int available_idle_cpu(int cpu);
extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *);
extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *);
+extern void sched_set_fifo(struct task_struct *p);
+extern void sched_set_fifo_low(struct task_struct *p);
+extern void sched_set_normal(struct task_struct *p, int nice);
extern int sched_setattr(struct task_struct *, const struct sched_attr *);
extern int sched_setattr_nocheck(struct task_struct *, const struct sched_attr *);
extern struct task_struct *idle_task(int cpu);
@@ -2015,14 +2023,6 @@ static inline void rseq_execve(struct task_struct *t)
#endif
-void __exit_umh(struct task_struct *tsk);
-
-static inline void exit_umh(struct task_struct *tsk)
-{
- if (unlikely(tsk->flags & PF_UMH))
- __exit_umh(tsk);
-}
-
#ifdef CONFIG_DEBUG_RSEQ
void rseq_syscall(struct pt_regs *regs);
@@ -2044,6 +2044,7 @@ const struct sched_avg *sched_trace_rq_avg_dl(struct rq *rq);
const struct sched_avg *sched_trace_rq_avg_irq(struct rq *rq);
int sched_trace_rq_cpu(struct rq *rq);
+int sched_trace_rq_nr_running(struct rq *rq);
const struct cpumask *sched_trace_rd_span(struct root_domain *rd);
diff --git a/include/linux/sched/isolation.h b/include/linux/sched/isolation.h
index 0fbcbacd1b29..cc9f393e2a70 100644
--- a/include/linux/sched/isolation.h
+++ b/include/linux/sched/isolation.h
@@ -14,6 +14,7 @@ enum hk_flags {
HK_FLAG_DOMAIN = (1 << 5),
HK_FLAG_WQ = (1 << 6),
HK_FLAG_MANAGED_IRQ = (1 << 7),
+ HK_FLAG_KTHREAD = (1 << 8),
};
#ifdef CONFIG_CPU_ISOLATION
diff --git a/include/linux/sched/loadavg.h b/include/linux/sched/loadavg.h
index 4859bea47a7b..83ec54b65e79 100644
--- a/include/linux/sched/loadavg.h
+++ b/include/linux/sched/loadavg.h
@@ -43,6 +43,6 @@ extern unsigned long calc_load_n(unsigned long load, unsigned long exp,
#define LOAD_INT(x) ((x) >> FSHIFT)
#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
-extern void calc_global_load(unsigned long ticks);
+extern void calc_global_load(void);
#endif /* _LINUX_SCHED_LOADAVG_H */
diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
index 480a4d1b7dd8..f889e332912f 100644
--- a/include/linux/sched/mm.h
+++ b/include/linux/sched/mm.h
@@ -23,7 +23,7 @@ extern struct mm_struct *mm_alloc(void);
* will still exist later on and mmget_not_zero() has to be used before
* accessing it.
*
- * This is a preferred way to to pin @mm for a longer/unbounded amount
+ * This is a preferred way to pin @mm for a longer/unbounded amount
* of time.
*
* Use mmdrop() to release the reference acquired by mmgrab().
@@ -49,8 +49,6 @@ static inline void mmdrop(struct mm_struct *mm)
__mmdrop(mm);
}
-void mmdrop(struct mm_struct *mm);
-
/*
* This has to be called after a get_task_mm()/mmget_not_zero()
* followed by taking the mmap_lock for writing before modifying the
@@ -177,24 +175,20 @@ static inline bool in_vfork(struct task_struct *tsk)
* Applies per-task gfp context to the given allocation flags.
* PF_MEMALLOC_NOIO implies GFP_NOIO
* PF_MEMALLOC_NOFS implies GFP_NOFS
- * PF_MEMALLOC_NOCMA implies no allocation from CMA region.
*/
static inline gfp_t current_gfp_context(gfp_t flags)
{
- if (unlikely(current->flags &
- (PF_MEMALLOC_NOIO | PF_MEMALLOC_NOFS | PF_MEMALLOC_NOCMA))) {
+ unsigned int pflags = READ_ONCE(current->flags);
+
+ if (unlikely(pflags & (PF_MEMALLOC_NOIO | PF_MEMALLOC_NOFS))) {
/*
* NOIO implies both NOIO and NOFS and it is a weaker context
* so always make sure it makes precedence
*/
- if (current->flags & PF_MEMALLOC_NOIO)
+ if (pflags & PF_MEMALLOC_NOIO)
flags &= ~(__GFP_IO | __GFP_FS);
- else if (current->flags & PF_MEMALLOC_NOFS)
+ else if (pflags & PF_MEMALLOC_NOFS)
flags &= ~__GFP_FS;
-#ifdef CONFIG_CMA
- if (current->flags & PF_MEMALLOC_NOCMA)
- flags &= ~__GFP_MOVABLE;
-#endif
}
return flags;
}
@@ -234,7 +228,7 @@ static inline unsigned int memalloc_noio_save(void)
* @flags: Flags to restore.
*
* Ends the implicit GFP_NOIO scope started by memalloc_noio_save function.
- * Always make sure that that the given flags is the return value from the
+ * Always make sure that the given flags is the return value from the
* pairing memalloc_noio_save call.
*/
static inline void memalloc_noio_restore(unsigned int flags)
@@ -265,7 +259,7 @@ static inline unsigned int memalloc_nofs_save(void)
* @flags: Flags to restore.
*
* Ends the implicit GFP_NOFS scope started by memalloc_nofs_save function.
- * Always make sure that that the given flags is the return value from the
+ * Always make sure that the given flags is the return value from the
* pairing memalloc_nofs_save call.
*/
static inline void memalloc_nofs_restore(unsigned int flags)
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
index 0ee5e696c5d8..1bad18a1d8ba 100644
--- a/include/linux/sched/signal.h
+++ b/include/linux/sched/signal.h
@@ -674,6 +674,8 @@ static inline int thread_group_empty(struct task_struct *p)
#define delay_group_leader(p) \
(thread_group_leader(p) && !thread_group_empty(p))
+extern bool thread_group_exited(struct pid *pid);
+
extern struct sighand_struct *__lock_task_sighand(struct task_struct *task,
unsigned long *flags);
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index 660ac49f2b53..3c31ba88aca5 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -61,9 +61,13 @@ int sched_proc_update_handler(struct ctl_table *table, int write,
extern unsigned int sysctl_sched_rt_period;
extern int sysctl_sched_rt_runtime;
+extern unsigned int sysctl_sched_dl_period_max;
+extern unsigned int sysctl_sched_dl_period_min;
+
#ifdef CONFIG_UCLAMP_TASK
extern unsigned int sysctl_sched_uclamp_util_min;
extern unsigned int sysctl_sched_uclamp_util_max;
+extern unsigned int sysctl_sched_uclamp_util_min_rt_default;
#endif
#ifdef CONFIG_CFS_BANDWIDTH
diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h
index 38359071236a..a98965007eef 100644
--- a/include/linux/sched/task.h
+++ b/include/linux/sched/task.h
@@ -55,6 +55,7 @@ extern asmlinkage void schedule_tail(struct task_struct *prev);
extern void init_idle(struct task_struct *idle, int cpu);
extern int sched_fork(unsigned long clone_flags, struct task_struct *p);
+extern void sched_post_fork(struct task_struct *p);
extern void sched_dead(struct task_struct *p);
void __noreturn do_task_dead(void);
@@ -65,22 +66,9 @@ extern void fork_init(void);
extern void release_task(struct task_struct * p);
-#ifdef CONFIG_HAVE_COPY_THREAD_TLS
-extern int copy_thread_tls(unsigned long, unsigned long, unsigned long,
- struct task_struct *, unsigned long);
-#else
extern int copy_thread(unsigned long, unsigned long, unsigned long,
- struct task_struct *);
+ struct task_struct *, unsigned long);
-/* Architectures that haven't opted into copy_thread_tls get the tls argument
- * via pt_regs, so ignore the tls argument passed via C. */
-static inline int copy_thread_tls(
- unsigned long clone_flags, unsigned long sp, unsigned long arg,
- struct task_struct *p, unsigned long tls)
-{
- return copy_thread(clone_flags, sp, arg, p);
-}
-#endif
extern void flush_thread(void);
#ifdef CONFIG_HAVE_EXIT_THREAD
@@ -96,12 +84,11 @@ extern void exit_files(struct task_struct *);
extern void exit_itimers(struct signal_struct *);
extern long _do_fork(struct kernel_clone_args *kargs);
-extern bool legacy_clone_args_valid(const struct kernel_clone_args *kargs);
-extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *);
struct task_struct *fork_idle(int);
struct mm_struct *copy_init_mm(void);
extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
extern long kernel_wait4(pid_t, int __user *, int, struct rusage *);
+int kernel_wait(pid_t pid, int *stat);
extern void free_task(struct task_struct *tsk);
@@ -126,6 +113,12 @@ static inline void put_task_struct(struct task_struct *t)
__put_task_struct(t);
}
+static inline void put_task_struct_many(struct task_struct *t, int nr)
+{
+ if (refcount_sub_and_test(nr, &t->usage))
+ __put_task_struct(t);
+}
+
void put_task_struct_rcu_user(struct task_struct *task);
#ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h
index fb11091129b3..820511289857 100644
--- a/include/linux/sched/topology.h
+++ b/include/linux/sched/topology.h
@@ -217,6 +217,16 @@ static inline bool cpus_share_cache(int this_cpu, int that_cpu)
#endif /* !CONFIG_SMP */
#ifndef arch_scale_cpu_capacity
+/**
+ * arch_scale_cpu_capacity - get the capacity scale factor of a given CPU.
+ * @cpu: the CPU in question.
+ *
+ * Return: the CPU scale factor normalized against SCHED_CAPACITY_SCALE, i.e.
+ *
+ * max_perf(cpu)
+ * ----------------------------- * SCHED_CAPACITY_SCALE
+ * max(max_perf(c) : c \in CPUs)
+ */
static __always_inline
unsigned long arch_scale_cpu_capacity(int cpu)
{
@@ -232,6 +242,13 @@ unsigned long arch_scale_thermal_pressure(int cpu)
}
#endif
+#ifndef arch_set_thermal_pressure
+static __always_inline
+void arch_set_thermal_pressure(const struct cpumask *cpus,
+ unsigned long th_pressure)
+{ }
+#endif
+
static inline int task_node(const struct task_struct *p)
{
return cpu_to_node(task_cpu(p));
diff --git a/include/linux/sched/user.h b/include/linux/sched/user.h
index 917d88edb7b9..a8ec3b6093fc 100644
--- a/include/linux/sched/user.h
+++ b/include/linux/sched/user.h
@@ -36,6 +36,9 @@ struct user_struct {
defined(CONFIG_NET) || defined(CONFIG_IO_URING)
atomic_long_t locked_vm;
#endif
+#ifdef CONFIG_WATCH_QUEUE
+ atomic_t nr_watches; /* The number of watches this user currently has */
+#endif
/* Miscellaneous per-user rate limit */
struct ratelimit_state ratelimit;
diff --git a/include/linux/sched_clock.h b/include/linux/sched_clock.h
index 0bb04a96a6d4..528718e4ed52 100644
--- a/include/linux/sched_clock.h
+++ b/include/linux/sched_clock.h
@@ -6,6 +6,34 @@
#define LINUX_SCHED_CLOCK
#ifdef CONFIG_GENERIC_SCHED_CLOCK
+/**
+ * struct clock_read_data - data required to read from sched_clock()
+ *
+ * @epoch_ns: sched_clock() value at last update
+ * @epoch_cyc: Clock cycle value at last update.
+ * @sched_clock_mask: Bitmask for two's complement subtraction of non 64bit
+ * clocks.
+ * @read_sched_clock: Current clock source (or dummy source when suspended).
+ * @mult: Multipler for scaled math conversion.
+ * @shift: Shift value for scaled math conversion.
+ *
+ * Care must be taken when updating this structure; it is read by
+ * some very hot code paths. It occupies <=40 bytes and, when combined
+ * with the seqcount used to synchronize access, comfortably fits into
+ * a 64 byte cache line.
+ */
+struct clock_read_data {
+ u64 epoch_ns;
+ u64 epoch_cyc;
+ u64 sched_clock_mask;
+ u64 (*read_sched_clock)(void);
+ u32 mult;
+ u32 shift;
+};
+
+extern struct clock_read_data *sched_clock_read_begin(unsigned int *seq);
+extern int sched_clock_read_retry(unsigned int seq);
+
extern void generic_sched_clock_init(void);
extern void sched_clock_register(u64 (*read)(void), int bits,
diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h
index ce2f5c28b2df..7e5dd7d1e221 100644
--- a/include/linux/scmi_protocol.h
+++ b/include/linux/scmi_protocol.h
@@ -9,6 +9,7 @@
#define _LINUX_SCMI_PROTOCOL_H
#include <linux/device.h>
+#include <linux/notifier.h>
#include <linux/types.h>
#define SCMI_MAX_STR_SIZE 16
@@ -118,6 +119,8 @@ struct scmi_perf_ops {
unsigned long *rate, bool poll);
int (*est_power_get)(const struct scmi_handle *handle, u32 domain,
unsigned long *rate, unsigned long *power);
+ bool (*fast_switch_possible)(const struct scmi_handle *handle,
+ struct device *dev);
};
/**
@@ -173,18 +176,13 @@ enum scmi_sensor_class {
*
* @count_get: get the count of sensors provided by SCMI
* @info_get: get the information of the specified sensor
- * @trip_point_notify: control notifications on cross-over events for
- * the trip-points
* @trip_point_config: selects and configures a trip-point of interest
* @reading_get: gets the current value of the sensor
*/
struct scmi_sensor_ops {
int (*count_get)(const struct scmi_handle *handle);
-
const struct scmi_sensor_info *(*info_get)
(const struct scmi_handle *handle, u32 sensor_id);
- int (*trip_point_notify)(const struct scmi_handle *handle,
- u32 sensor_id, bool enable);
int (*trip_point_config)(const struct scmi_handle *handle,
u32 sensor_id, u8 trip_id, u64 trip_value);
int (*reading_get)(const struct scmi_handle *handle, u32 sensor_id,
@@ -212,6 +210,49 @@ struct scmi_reset_ops {
};
/**
+ * struct scmi_notify_ops - represents notifications' operations provided by
+ * SCMI core
+ * @register_event_notifier: Register a notifier_block for the requested event
+ * @unregister_event_notifier: Unregister a notifier_block for the requested
+ * event
+ *
+ * A user can register/unregister its own notifier_block against the wanted
+ * platform instance regarding the desired event identified by the
+ * tuple: (proto_id, evt_id, src_id) using the provided register/unregister
+ * interface where:
+ *
+ * @handle: The handle identifying the platform instance to use
+ * @proto_id: The protocol ID as in SCMI Specification
+ * @evt_id: The message ID of the desired event as in SCMI Specification
+ * @src_id: A pointer to the desired source ID if different sources are
+ * possible for the protocol (like domain_id, sensor_id...etc)
+ *
+ * @src_id can be provided as NULL if it simply does NOT make sense for
+ * the protocol at hand, OR if the user is explicitly interested in
+ * receiving notifications from ANY existent source associated to the
+ * specified proto_id / evt_id.
+ *
+ * Received notifications are finally delivered to the registered users,
+ * invoking the callback provided with the notifier_block *nb as follows:
+ *
+ * int user_cb(nb, evt_id, report)
+ *
+ * with:
+ *
+ * @nb: The notifier block provided by the user
+ * @evt_id: The message ID of the delivered event
+ * @report: A custom struct describing the specific event delivered
+ */
+struct scmi_notify_ops {
+ int (*register_event_notifier)(const struct scmi_handle *handle,
+ u8 proto_id, u8 evt_id, u32 *src_id,
+ struct notifier_block *nb);
+ int (*unregister_event_notifier)(const struct scmi_handle *handle,
+ u8 proto_id, u8 evt_id, u32 *src_id,
+ struct notifier_block *nb);
+};
+
+/**
* struct scmi_handle - Handle returned to ARM SCMI clients for usage.
*
* @dev: pointer to the SCMI device
@@ -221,6 +262,7 @@ struct scmi_reset_ops {
* @clk_ops: pointer to set of clock protocol operations
* @sensor_ops: pointer to set of sensor protocol operations
* @reset_ops: pointer to set of reset protocol operations
+ * @notify_ops: pointer to set of notifications related operations
* @perf_priv: pointer to private data structure specific to performance
* protocol(for internal use only)
* @clk_priv: pointer to private data structure specific to clock
@@ -231,6 +273,8 @@ struct scmi_reset_ops {
* protocol(for internal use only)
* @reset_priv: pointer to private data structure specific to reset
* protocol(for internal use only)
+ * @notify_priv: pointer to private data structure specific to notifications
+ * (for internal use only)
*/
struct scmi_handle {
struct device *dev;
@@ -240,12 +284,14 @@ struct scmi_handle {
struct scmi_power_ops *power_ops;
struct scmi_sensor_ops *sensor_ops;
struct scmi_reset_ops *reset_ops;
+ struct scmi_notify_ops *notify_ops;
/* for protocol internal use */
void *perf_priv;
void *clk_priv;
void *power_priv;
void *sensor_priv;
void *reset_priv;
+ void *notify_priv;
};
enum scmi_std_protocol {
@@ -324,4 +370,58 @@ typedef int (*scmi_prot_init_fn_t)(struct scmi_handle *);
int scmi_protocol_register(int protocol_id, scmi_prot_init_fn_t fn);
void scmi_protocol_unregister(int protocol_id);
+/* SCMI Notification API - Custom Event Reports */
+enum scmi_notification_events {
+ SCMI_EVENT_POWER_STATE_CHANGED = 0x0,
+ SCMI_EVENT_PERFORMANCE_LIMITS_CHANGED = 0x0,
+ SCMI_EVENT_PERFORMANCE_LEVEL_CHANGED = 0x1,
+ SCMI_EVENT_SENSOR_TRIP_POINT_EVENT = 0x0,
+ SCMI_EVENT_RESET_ISSUED = 0x0,
+ SCMI_EVENT_BASE_ERROR_EVENT = 0x0,
+};
+
+struct scmi_power_state_changed_report {
+ ktime_t timestamp;
+ unsigned int agent_id;
+ unsigned int domain_id;
+ unsigned int power_state;
+};
+
+struct scmi_perf_limits_report {
+ ktime_t timestamp;
+ unsigned int agent_id;
+ unsigned int domain_id;
+ unsigned int range_max;
+ unsigned int range_min;
+};
+
+struct scmi_perf_level_report {
+ ktime_t timestamp;
+ unsigned int agent_id;
+ unsigned int domain_id;
+ unsigned int performance_level;
+};
+
+struct scmi_sensor_trip_point_report {
+ ktime_t timestamp;
+ unsigned int agent_id;
+ unsigned int sensor_id;
+ unsigned int trip_point_desc;
+};
+
+struct scmi_reset_issued_report {
+ ktime_t timestamp;
+ unsigned int agent_id;
+ unsigned int domain_id;
+ unsigned int reset_state;
+};
+
+struct scmi_base_error_report {
+ ktime_t timestamp;
+ unsigned int agent_id;
+ bool fatal;
+ unsigned int cmd_count;
+ unsigned long long reports[];
+};
+
#endif /* _LINUX_SCMI_PROTOCOL_H */
diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h
index 4192369b8418..02aef2844c38 100644
--- a/include/linux/seccomp.h
+++ b/include/linux/seccomp.h
@@ -10,9 +10,14 @@
SECCOMP_FILTER_FLAG_NEW_LISTENER | \
SECCOMP_FILTER_FLAG_TSYNC_ESRCH)
+/* sizeof() the first published struct seccomp_notif_addfd */
+#define SECCOMP_NOTIFY_ADDFD_SIZE_VER0 24
+#define SECCOMP_NOTIFY_ADDFD_SIZE_LATEST SECCOMP_NOTIFY_ADDFD_SIZE_VER0
+
#ifdef CONFIG_SECCOMP
#include <linux/thread_info.h>
+#include <linux/atomic.h>
#include <asm/seccomp.h>
struct seccomp_filter;
@@ -29,6 +34,7 @@ struct seccomp_filter;
*/
struct seccomp {
int mode;
+ atomic_t filter_count;
struct seccomp_filter *filter;
};
@@ -58,9 +64,11 @@ static inline int seccomp_mode(struct seccomp *s)
struct seccomp { };
struct seccomp_filter { };
+struct seccomp_data;
#ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
static inline int secure_computing(void) { return 0; }
+static inline int __secure_computing(const struct seccomp_data *sd) { return 0; }
#else
static inline void secure_computing_strict(int this_syscall) { return; }
#endif
@@ -82,10 +90,10 @@ static inline int seccomp_mode(struct seccomp *s)
#endif /* CONFIG_SECCOMP */
#ifdef CONFIG_SECCOMP_FILTER
-extern void put_seccomp_filter(struct task_struct *tsk);
+extern void seccomp_filter_release(struct task_struct *tsk);
extern void get_seccomp_filter(struct task_struct *tsk);
#else /* CONFIG_SECCOMP_FILTER */
-static inline void put_seccomp_filter(struct task_struct *tsk)
+static inline void seccomp_filter_release(struct task_struct *tsk)
{
return;
}
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 8b97204f35a7..962d9768945f 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -1,48 +1,30 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_SEQLOCK_H
#define __LINUX_SEQLOCK_H
+
/*
- * Reader/writer consistent mechanism without starving writers. This type of
- * lock for data where the reader wants a consistent set of information
- * and is willing to retry if the information changes. There are two types
- * of readers:
- * 1. Sequence readers which never block a writer but they may have to retry
- * if a writer is in progress by detecting change in sequence number.
- * Writers do not wait for a sequence reader.
- * 2. Locking readers which will wait if a writer or another locking reader
- * is in progress. A locking reader in progress will also block a writer
- * from going forward. Unlike the regular rwlock, the read lock here is
- * exclusive so that only one locking reader can get it.
- *
- * This is not as cache friendly as brlock. Also, this may not work well
- * for data that contains pointers, because any writer could
- * invalidate a pointer that a reader was following.
- *
- * Expected non-blocking reader usage:
- * do {
- * seq = read_seqbegin(&foo);
- * ...
- * } while (read_seqretry(&foo, seq));
+ * seqcount_t / seqlock_t - a reader-writer consistency mechanism with
+ * lockless readers (read-only retry loops), and no writer starvation.
*
+ * See Documentation/locking/seqlock.rst
*
- * On non-SMP the spin locks disappear but the writer still needs
- * to increment the sequence variables because an interrupt routine could
- * change the state of the data.
- *
- * Based on x86_64 vsyscall gettimeofday
- * by Keith Owens and Andrea Arcangeli
+ * Copyrights:
+ * - Based on x86_64 vsyscall gettimeofday: Keith Owens, Andrea Arcangeli
+ * - Sequence counters with associated locks, (C) 2020 Linutronix GmbH
*/
-#include <linux/spinlock.h>
-#include <linux/preempt.h>
-#include <linux/lockdep.h>
#include <linux/compiler.h>
#include <linux/kcsan-checks.h>
+#include <linux/lockdep.h>
+#include <linux/mutex.h>
+#include <linux/preempt.h>
+#include <linux/spinlock.h>
+
#include <asm/processor.h>
/*
- * The seqlock interface does not prescribe a precise sequence of read
- * begin/retry/end. For readers, typically there is a call to
+ * The seqlock seqcount_t interface does not prescribe a precise sequence of
+ * read begin/retry/end. For readers, typically there is a call to
* read_seqcount_begin() and read_seqcount_retry(), however, there are more
* esoteric cases which do not follow this pattern.
*
@@ -50,16 +32,34 @@
* via seqcount_t under KCSAN: upon beginning a seq-reader critical section,
* pessimistically mark the next KCSAN_SEQLOCK_REGION_MAX memory accesses as
* atomics; if there is a matching read_seqcount_retry() call, no following
- * memory operations are considered atomic. Usage of seqlocks via seqlock_t
- * interface is not affected.
+ * memory operations are considered atomic. Usage of the seqlock_t interface
+ * is not affected.
*/
#define KCSAN_SEQLOCK_REGION_MAX 1000
/*
- * Version using sequence counter only.
- * This can be used when code has its own mutex protecting the
- * updating starting before the write_seqcountbeqin() and ending
- * after the write_seqcount_end().
+ * Sequence counters (seqcount_t)
+ *
+ * This is the raw counting mechanism, without any writer protection.
+ *
+ * Write side critical sections must be serialized and non-preemptible.
+ *
+ * If readers can be invoked from hardirq or softirq contexts,
+ * interrupts or bottom halves must also be respectively disabled before
+ * entering the write section.
+ *
+ * This mechanism can't be used if the protected data contains pointers,
+ * as the writer can invalidate a pointer that a reader is following.
+ *
+ * If the write serialization mechanism is one of the common kernel
+ * locking primitives, use a sequence counter with associated lock
+ * (seqcount_LOCKTYPE_t) instead.
+ *
+ * If it's desired to automatically handle the sequence counter writer
+ * serialization and non-preemptibility requirements, use a sequential
+ * lock (seqlock_t) instead.
+ *
+ * See Documentation/locking/seqlock.rst
*/
typedef struct seqcount {
unsigned sequence;
@@ -79,13 +79,18 @@ static inline void __seqcount_init(seqcount_t *s, const char *name,
}
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define SEQCOUNT_DEP_MAP_INIT(lockname) \
- .dep_map = { .name = #lockname } \
-# define seqcount_init(s) \
- do { \
- static struct lock_class_key __key; \
- __seqcount_init((s), #s, &__key); \
+# define SEQCOUNT_DEP_MAP_INIT(lockname) \
+ .dep_map = { .name = #lockname }
+
+/**
+ * seqcount_init() - runtime initializer for seqcount_t
+ * @s: Pointer to the seqcount_t instance
+ */
+# define seqcount_init(s) \
+ do { \
+ static struct lock_class_key __key; \
+ __seqcount_init((s), #s, &__key); \
} while (0)
static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
@@ -105,13 +110,149 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
# define seqcount_lockdep_reader_access(x)
#endif
-#define SEQCNT_ZERO(lockname) { .sequence = 0, SEQCOUNT_DEP_MAP_INIT(lockname)}
+/**
+ * SEQCNT_ZERO() - static initializer for seqcount_t
+ * @name: Name of the seqcount_t instance
+ */
+#define SEQCNT_ZERO(name) { .sequence = 0, SEQCOUNT_DEP_MAP_INIT(name) }
+
+/*
+ * Sequence counters with associated locks (seqcount_LOCKTYPE_t)
+ *
+ * A sequence counter which associates the lock used for writer
+ * serialization at initialization time. This enables lockdep to validate
+ * that the write side critical section is properly serialized.
+ *
+ * For associated locks which do not implicitly disable preemption,
+ * preemption protection is enforced in the write side function.
+ *
+ * Lockdep is never used in any for the raw write variants.
+ *
+ * See Documentation/locking/seqlock.rst
+ */
+
+#ifdef CONFIG_LOCKDEP
+#define __SEQ_LOCK(expr) expr
+#else
+#define __SEQ_LOCK(expr)
+#endif
+
+/**
+ * typedef seqcount_LOCKNAME_t - sequence counter with LOCKTYPR associated
+ * @seqcount: The real sequence counter
+ * @lock: Pointer to the associated spinlock
+ *
+ * A plain sequence counter with external writer synchronization by a
+ * spinlock. The spinlock is associated to the sequence count in the
+ * static initializer or init function. This enables lockdep to validate
+ * that the write side critical section is properly serialized.
+ */
+
+/**
+ * seqcount_LOCKNAME_init() - runtime initializer for seqcount_LOCKNAME_t
+ * @s: Pointer to the seqcount_LOCKNAME_t instance
+ * @lock: Pointer to the associated LOCKTYPE
+ */
+
+/*
+ * SEQCOUNT_LOCKTYPE() - Instantiate seqcount_LOCKNAME_t and helpers
+ * @locktype: actual typename
+ * @lockname: name
+ * @preemptible: preemptibility of above locktype
+ * @lockmember: argument for lockdep_assert_held()
+ */
+#define SEQCOUNT_LOCKTYPE(locktype, lockname, preemptible, lockmember) \
+typedef struct seqcount_##lockname { \
+ seqcount_t seqcount; \
+ __SEQ_LOCK(locktype *lock); \
+} seqcount_##lockname##_t; \
+ \
+static __always_inline void \
+seqcount_##lockname##_init(seqcount_##lockname##_t *s, locktype *lock) \
+{ \
+ seqcount_init(&s->seqcount); \
+ __SEQ_LOCK(s->lock = lock); \
+} \
+ \
+static __always_inline seqcount_t * \
+__seqcount_##lockname##_ptr(seqcount_##lockname##_t *s) \
+{ \
+ return &s->seqcount; \
+} \
+ \
+static __always_inline bool \
+__seqcount_##lockname##_preemptible(seqcount_##lockname##_t *s) \
+{ \
+ return preemptible; \
+} \
+ \
+static __always_inline void \
+__seqcount_##lockname##_assert(seqcount_##lockname##_t *s) \
+{ \
+ __SEQ_LOCK(lockdep_assert_held(lockmember)); \
+}
+
+/*
+ * __seqprop() for seqcount_t
+ */
+
+static inline seqcount_t *__seqcount_ptr(seqcount_t *s)
+{
+ return s;
+}
+
+static inline bool __seqcount_preemptible(seqcount_t *s)
+{
+ return false;
+}
+
+static inline void __seqcount_assert(seqcount_t *s)
+{
+ lockdep_assert_preemption_disabled();
+}
+
+SEQCOUNT_LOCKTYPE(raw_spinlock_t, raw_spinlock, false, s->lock)
+SEQCOUNT_LOCKTYPE(spinlock_t, spinlock, false, s->lock)
+SEQCOUNT_LOCKTYPE(rwlock_t, rwlock, false, s->lock)
+SEQCOUNT_LOCKTYPE(struct mutex, mutex, true, s->lock)
+SEQCOUNT_LOCKTYPE(struct ww_mutex, ww_mutex, true, &s->lock->base)
+
+/**
+ * SEQCNT_LOCKNAME_ZERO - static initializer for seqcount_LOCKNAME_t
+ * @name: Name of the seqcount_LOCKNAME_t instance
+ * @lock: Pointer to the associated LOCKTYPE
+ */
+
+#define SEQCOUNT_LOCKTYPE_ZERO(seq_name, assoc_lock) { \
+ .seqcount = SEQCNT_ZERO(seq_name.seqcount), \
+ __SEQ_LOCK(.lock = (assoc_lock)) \
+}
+
+#define SEQCNT_SPINLOCK_ZERO(name, lock) SEQCOUNT_LOCKTYPE_ZERO(name, lock)
+#define SEQCNT_RAW_SPINLOCK_ZERO(name, lock) SEQCOUNT_LOCKTYPE_ZERO(name, lock)
+#define SEQCNT_RWLOCK_ZERO(name, lock) SEQCOUNT_LOCKTYPE_ZERO(name, lock)
+#define SEQCNT_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKTYPE_ZERO(name, lock)
+#define SEQCNT_WW_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKTYPE_ZERO(name, lock)
+#define __seqprop_case(s, lockname, prop) \
+ seqcount_##lockname##_t: __seqcount_##lockname##_##prop((void *)(s))
+
+#define __seqprop(s, prop) _Generic(*(s), \
+ seqcount_t: __seqcount_##prop((void *)(s)), \
+ __seqprop_case((s), raw_spinlock, prop), \
+ __seqprop_case((s), spinlock, prop), \
+ __seqprop_case((s), rwlock, prop), \
+ __seqprop_case((s), mutex, prop), \
+ __seqprop_case((s), ww_mutex, prop))
+
+#define __seqcount_ptr(s) __seqprop(s, ptr)
+#define __seqcount_lock_preemptible(s) __seqprop(s, preemptible)
+#define __seqcount_assert_lock_held(s) __seqprop(s, assert)
+
/**
- * __read_seqcount_begin - begin a seq-read critical section (without barrier)
- * @s: pointer to seqcount_t
- * Returns: count to be passed to read_seqcount_retry
+ * __read_seqcount_begin() - begin a seqcount_t read section w/o barrier
+ * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
*
* __read_seqcount_begin is like read_seqcount_begin, but has no smp_rmb()
* barrier. Callers should ensure that smp_rmb() or equivalent ordering is
@@ -120,8 +261,13 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
*
* Use carefully, only in critical code, and comment how the barrier is
* provided.
+ *
+ * Return: count to be passed to read_seqcount_retry()
*/
-static inline unsigned __read_seqcount_begin(const seqcount_t *s)
+#define __read_seqcount_begin(s) \
+ __read_seqcount_t_begin(__seqcount_ptr(s))
+
+static inline unsigned __read_seqcount_t_begin(const seqcount_t *s)
{
unsigned ret;
@@ -136,80 +282,91 @@ repeat:
}
/**
- * raw_read_seqcount - Read the raw seqcount
- * @s: pointer to seqcount_t
- * Returns: count to be passed to read_seqcount_retry
+ * raw_read_seqcount_begin() - begin a seqcount_t read section w/o lockdep
+ * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
*
- * raw_read_seqcount opens a read critical section of the given
- * seqcount without any lockdep checking and without checking or
- * masking the LSB. Calling code is responsible for handling that.
+ * Return: count to be passed to read_seqcount_retry()
*/
-static inline unsigned raw_read_seqcount(const seqcount_t *s)
+#define raw_read_seqcount_begin(s) \
+ raw_read_seqcount_t_begin(__seqcount_ptr(s))
+
+static inline unsigned raw_read_seqcount_t_begin(const seqcount_t *s)
{
- unsigned ret = READ_ONCE(s->sequence);
+ unsigned ret = __read_seqcount_t_begin(s);
smp_rmb();
- kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX);
return ret;
}
/**
- * raw_read_seqcount_begin - start seq-read critical section w/o lockdep
- * @s: pointer to seqcount_t
- * Returns: count to be passed to read_seqcount_retry
+ * read_seqcount_begin() - begin a seqcount_t read critical section
+ * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
*
- * raw_read_seqcount_begin opens a read critical section of the given
- * seqcount, but without any lockdep checking. Validity of the critical
- * section is tested by checking read_seqcount_retry function.
+ * Return: count to be passed to read_seqcount_retry()
*/
-static inline unsigned raw_read_seqcount_begin(const seqcount_t *s)
+#define read_seqcount_begin(s) \
+ read_seqcount_t_begin(__seqcount_ptr(s))
+
+static inline unsigned read_seqcount_t_begin(const seqcount_t *s)
{
- unsigned ret = __read_seqcount_begin(s);
- smp_rmb();
- return ret;
+ seqcount_lockdep_reader_access(s);
+ return raw_read_seqcount_t_begin(s);
}
/**
- * read_seqcount_begin - begin a seq-read critical section
- * @s: pointer to seqcount_t
- * Returns: count to be passed to read_seqcount_retry
+ * raw_read_seqcount() - read the raw seqcount_t counter value
+ * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
*
- * read_seqcount_begin opens a read critical section of the given seqcount.
- * Validity of the critical section is tested by checking read_seqcount_retry
- * function.
+ * raw_read_seqcount opens a read critical section of the given
+ * seqcount_t, without any lockdep checking, and without checking or
+ * masking the sequence counter LSB. Calling code is responsible for
+ * handling that.
+ *
+ * Return: count to be passed to read_seqcount_retry()
*/
-static inline unsigned read_seqcount_begin(const seqcount_t *s)
+#define raw_read_seqcount(s) \
+ raw_read_seqcount_t(__seqcount_ptr(s))
+
+static inline unsigned raw_read_seqcount_t(const seqcount_t *s)
{
- seqcount_lockdep_reader_access(s);
- return raw_read_seqcount_begin(s);
+ unsigned ret = READ_ONCE(s->sequence);
+ smp_rmb();
+ kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX);
+ return ret;
}
/**
- * raw_seqcount_begin - begin a seq-read critical section
- * @s: pointer to seqcount_t
- * Returns: count to be passed to read_seqcount_retry
+ * raw_seqcount_begin() - begin a seqcount_t read critical section w/o
+ * lockdep and w/o counter stabilization
+ * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
*
- * raw_seqcount_begin opens a read critical section of the given seqcount.
- * Validity of the critical section is tested by checking read_seqcount_retry
- * function.
+ * raw_seqcount_begin opens a read critical section of the given
+ * seqcount_t. Unlike read_seqcount_begin(), this function will not wait
+ * for the count to stabilize. If a writer is active when it begins, it
+ * will fail the read_seqcount_retry() at the end of the read critical
+ * section instead of stabilizing at the beginning of it.
*
- * Unlike read_seqcount_begin(), this function will not wait for the count
- * to stabilize. If a writer is active when we begin, we will fail the
- * read_seqcount_retry() instead of stabilizing at the beginning of the
- * critical section.
+ * Use this only in special kernel hot paths where the read section is
+ * small and has a high probability of success through other external
+ * means. It will save a single branching instruction.
+ *
+ * Return: count to be passed to read_seqcount_retry()
*/
-static inline unsigned raw_seqcount_begin(const seqcount_t *s)
+#define raw_seqcount_begin(s) \
+ raw_seqcount_t_begin(__seqcount_ptr(s))
+
+static inline unsigned raw_seqcount_t_begin(const seqcount_t *s)
{
- unsigned ret = READ_ONCE(s->sequence);
- smp_rmb();
- kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX);
- return ret & ~1;
+ /*
+ * If the counter is odd, let read_seqcount_retry() fail
+ * by decrementing the counter.
+ */
+ return raw_read_seqcount_t(s) & ~1;
}
/**
- * __read_seqcount_retry - end a seq-read critical section (without barrier)
- * @s: pointer to seqcount_t
- * @start: count, from read_seqcount_begin
- * Returns: 1 if retry is required, else 0
+ * __read_seqcount_retry() - end a seqcount_t read section w/o barrier
+ * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
+ * @start: count, from read_seqcount_begin()
*
* __read_seqcount_retry is like read_seqcount_retry, but has no smp_rmb()
* barrier. Callers should ensure that smp_rmb() or equivalent ordering is
@@ -218,39 +375,70 @@ static inline unsigned raw_seqcount_begin(const seqcount_t *s)
*
* Use carefully, only in critical code, and comment how the barrier is
* provided.
+ *
+ * Return: true if a read section retry is required, else false
*/
-static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start)
+#define __read_seqcount_retry(s, start) \
+ __read_seqcount_t_retry(__seqcount_ptr(s), start)
+
+static inline int __read_seqcount_t_retry(const seqcount_t *s, unsigned start)
{
kcsan_atomic_next(0);
return unlikely(READ_ONCE(s->sequence) != start);
}
/**
- * read_seqcount_retry - end a seq-read critical section
- * @s: pointer to seqcount_t
- * @start: count, from read_seqcount_begin
- * Returns: 1 if retry is required, else 0
+ * read_seqcount_retry() - end a seqcount_t read critical section
+ * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
+ * @start: count, from read_seqcount_begin()
*
- * read_seqcount_retry closes a read critical section of the given seqcount.
- * If the critical section was invalid, it must be ignored (and typically
- * retried).
+ * read_seqcount_retry closes the read critical section of given
+ * seqcount_t. If the critical section was invalid, it must be ignored
+ * (and typically retried).
+ *
+ * Return: true if a read section retry is required, else false
*/
-static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
+#define read_seqcount_retry(s, start) \
+ read_seqcount_t_retry(__seqcount_ptr(s), start)
+
+static inline int read_seqcount_t_retry(const seqcount_t *s, unsigned start)
{
smp_rmb();
- return __read_seqcount_retry(s, start);
+ return __read_seqcount_t_retry(s, start);
}
-
-
-static inline void raw_write_seqcount_begin(seqcount_t *s)
+/**
+ * raw_write_seqcount_begin() - start a seqcount_t write section w/o lockdep
+ * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
+ */
+#define raw_write_seqcount_begin(s) \
+do { \
+ if (__seqcount_lock_preemptible(s)) \
+ preempt_disable(); \
+ \
+ raw_write_seqcount_t_begin(__seqcount_ptr(s)); \
+} while (0)
+
+static inline void raw_write_seqcount_t_begin(seqcount_t *s)
{
kcsan_nestable_atomic_begin();
s->sequence++;
smp_wmb();
}
-static inline void raw_write_seqcount_end(seqcount_t *s)
+/**
+ * raw_write_seqcount_end() - end a seqcount_t write section w/o lockdep
+ * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
+ */
+#define raw_write_seqcount_end(s) \
+do { \
+ raw_write_seqcount_t_end(__seqcount_ptr(s)); \
+ \
+ if (__seqcount_lock_preemptible(s)) \
+ preempt_enable(); \
+} while (0)
+
+static inline void raw_write_seqcount_t_end(seqcount_t *s)
{
smp_wmb();
s->sequence++;
@@ -258,47 +446,120 @@ static inline void raw_write_seqcount_end(seqcount_t *s)
}
/**
- * raw_write_seqcount_barrier - do a seq write barrier
- * @s: pointer to seqcount_t
+ * write_seqcount_begin_nested() - start a seqcount_t write section with
+ * custom lockdep nesting level
+ * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
+ * @subclass: lockdep nesting level
+ *
+ * See Documentation/locking/lockdep-design.rst
+ */
+#define write_seqcount_begin_nested(s, subclass) \
+do { \
+ __seqcount_assert_lock_held(s); \
+ \
+ if (__seqcount_lock_preemptible(s)) \
+ preempt_disable(); \
+ \
+ write_seqcount_t_begin_nested(__seqcount_ptr(s), subclass); \
+} while (0)
+
+static inline void write_seqcount_t_begin_nested(seqcount_t *s, int subclass)
+{
+ raw_write_seqcount_t_begin(s);
+ seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);
+}
+
+/**
+ * write_seqcount_begin() - start a seqcount_t write side critical section
+ * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
+ *
+ * write_seqcount_begin opens a write side critical section of the given
+ * seqcount_t.
+ *
+ * Context: seqcount_t write side critical sections must be serialized and
+ * non-preemptible. If readers can be invoked from hardirq or softirq
+ * context, interrupts or bottom halves must be respectively disabled.
+ */
+#define write_seqcount_begin(s) \
+do { \
+ __seqcount_assert_lock_held(s); \
+ \
+ if (__seqcount_lock_preemptible(s)) \
+ preempt_disable(); \
+ \
+ write_seqcount_t_begin(__seqcount_ptr(s)); \
+} while (0)
+
+static inline void write_seqcount_t_begin(seqcount_t *s)
+{
+ write_seqcount_t_begin_nested(s, 0);
+}
+
+/**
+ * write_seqcount_end() - end a seqcount_t write side critical section
+ * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
*
- * This can be used to provide an ordering guarantee instead of the
- * usual consistency guarantee. It is one wmb cheaper, because we can
- * collapse the two back-to-back wmb()s.
+ * The write section must've been opened with write_seqcount_begin().
+ */
+#define write_seqcount_end(s) \
+do { \
+ write_seqcount_t_end(__seqcount_ptr(s)); \
+ \
+ if (__seqcount_lock_preemptible(s)) \
+ preempt_enable(); \
+} while (0)
+
+static inline void write_seqcount_t_end(seqcount_t *s)
+{
+ seqcount_release(&s->dep_map, _RET_IP_);
+ raw_write_seqcount_t_end(s);
+}
+
+/**
+ * raw_write_seqcount_barrier() - do a seqcount_t write barrier
+ * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
+ *
+ * This can be used to provide an ordering guarantee instead of the usual
+ * consistency guarantee. It is one wmb cheaper, because it can collapse
+ * the two back-to-back wmb()s.
*
* Note that writes surrounding the barrier should be declared atomic (e.g.
* via WRITE_ONCE): a) to ensure the writes become visible to other threads
* atomically, avoiding compiler optimizations; b) to document which writes are
* meant to propagate to the reader critical section. This is necessary because
* neither writes before and after the barrier are enclosed in a seq-writer
- * critical section that would ensure readers are aware of ongoing writes.
+ * critical section that would ensure readers are aware of ongoing writes::
*
- * seqcount_t seq;
- * bool X = true, Y = false;
+ * seqcount_t seq;
+ * bool X = true, Y = false;
*
- * void read(void)
- * {
- * bool x, y;
+ * void read(void)
+ * {
+ * bool x, y;
*
- * do {
- * int s = read_seqcount_begin(&seq);
+ * do {
+ * int s = read_seqcount_begin(&seq);
*
- * x = X; y = Y;
+ * x = X; y = Y;
*
- * } while (read_seqcount_retry(&seq, s));
+ * } while (read_seqcount_retry(&seq, s));
*
- * BUG_ON(!x && !y);
+ * BUG_ON(!x && !y);
* }
*
* void write(void)
* {
- * WRITE_ONCE(Y, true);
+ * WRITE_ONCE(Y, true);
*
- * raw_write_seqcount_barrier(seq);
+ * raw_write_seqcount_barrier(seq);
*
- * WRITE_ONCE(X, false);
+ * WRITE_ONCE(X, false);
* }
*/
-static inline void raw_write_seqcount_barrier(seqcount_t *s)
+#define raw_write_seqcount_barrier(s) \
+ raw_write_seqcount_t_barrier(__seqcount_ptr(s))
+
+static inline void raw_write_seqcount_t_barrier(seqcount_t *s)
{
kcsan_nestable_atomic_begin();
s->sequence++;
@@ -307,7 +568,44 @@ static inline void raw_write_seqcount_barrier(seqcount_t *s)
kcsan_nestable_atomic_end();
}
-static inline int raw_read_seqcount_latch(seqcount_t *s)
+/**
+ * write_seqcount_invalidate() - invalidate in-progress seqcount_t read
+ * side operations
+ * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
+ *
+ * After write_seqcount_invalidate, no seqcount_t read side operations
+ * will complete successfully and see data older than this.
+ */
+#define write_seqcount_invalidate(s) \
+ write_seqcount_t_invalidate(__seqcount_ptr(s))
+
+static inline void write_seqcount_t_invalidate(seqcount_t *s)
+{
+ smp_wmb();
+ kcsan_nestable_atomic_begin();
+ s->sequence+=2;
+ kcsan_nestable_atomic_end();
+}
+
+/**
+ * raw_read_seqcount_latch() - pick even/odd seqcount_t latch data copy
+ * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
+ *
+ * Use seqcount_t latching to switch between two storage places protected
+ * by a sequence counter. Doing so allows having interruptible, preemptible,
+ * seqcount_t write side critical sections.
+ *
+ * Check raw_write_seqcount_latch() for more details and a full reader and
+ * writer usage example.
+ *
+ * Return: sequence counter raw value. Use the lowest bit as an index for
+ * picking which data copy to read. The full counter value must then be
+ * checked with read_seqcount_retry().
+ */
+#define raw_read_seqcount_latch(s) \
+ raw_read_seqcount_t_latch(__seqcount_ptr(s))
+
+static inline int raw_read_seqcount_t_latch(seqcount_t *s)
{
/* Pairs with the first smp_wmb() in raw_write_seqcount_latch() */
int seq = READ_ONCE(s->sequence); /* ^^^ */
@@ -315,8 +613,8 @@ static inline int raw_read_seqcount_latch(seqcount_t *s)
}
/**
- * raw_write_seqcount_latch - redirect readers to even/odd copy
- * @s: pointer to seqcount_t
+ * raw_write_seqcount_latch() - redirect readers to even/odd copy
+ * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
*
* The latch technique is a multiversion concurrency control method that allows
* queries during non-atomic modifications. If you can guarantee queries never
@@ -332,66 +630,73 @@ static inline int raw_read_seqcount_latch(seqcount_t *s)
* Very simply put: we first modify one copy and then the other. This ensures
* there is always one copy in a stable state, ready to give us an answer.
*
- * The basic form is a data structure like:
+ * The basic form is a data structure like::
*
- * struct latch_struct {
- * seqcount_t seq;
- * struct data_struct data[2];
- * };
+ * struct latch_struct {
+ * seqcount_t seq;
+ * struct data_struct data[2];
+ * };
*
* Where a modification, which is assumed to be externally serialized, does the
- * following:
+ * following::
*
- * void latch_modify(struct latch_struct *latch, ...)
- * {
- * smp_wmb(); <- Ensure that the last data[1] update is visible
- * latch->seq++;
- * smp_wmb(); <- Ensure that the seqcount update is visible
+ * void latch_modify(struct latch_struct *latch, ...)
+ * {
+ * smp_wmb(); // Ensure that the last data[1] update is visible
+ * latch->seq++;
+ * smp_wmb(); // Ensure that the seqcount update is visible
*
- * modify(latch->data[0], ...);
+ * modify(latch->data[0], ...);
*
- * smp_wmb(); <- Ensure that the data[0] update is visible
- * latch->seq++;
- * smp_wmb(); <- Ensure that the seqcount update is visible
+ * smp_wmb(); // Ensure that the data[0] update is visible
+ * latch->seq++;
+ * smp_wmb(); // Ensure that the seqcount update is visible
*
- * modify(latch->data[1], ...);
- * }
+ * modify(latch->data[1], ...);
+ * }
*
- * The query will have a form like:
+ * The query will have a form like::
*
- * struct entry *latch_query(struct latch_struct *latch, ...)
- * {
- * struct entry *entry;
- * unsigned seq, idx;
+ * struct entry *latch_query(struct latch_struct *latch, ...)
+ * {
+ * struct entry *entry;
+ * unsigned seq, idx;
*
- * do {
- * seq = raw_read_seqcount_latch(&latch->seq);
+ * do {
+ * seq = raw_read_seqcount_latch(&latch->seq);
*
- * idx = seq & 0x01;
- * entry = data_query(latch->data[idx], ...);
+ * idx = seq & 0x01;
+ * entry = data_query(latch->data[idx], ...);
*
- * smp_rmb();
- * } while (seq != latch->seq);
+ * // read_seqcount_retry() includes needed smp_rmb()
+ * } while (read_seqcount_retry(&latch->seq, seq));
*
- * return entry;
- * }
+ * return entry;
+ * }
*
* So during the modification, queries are first redirected to data[1]. Then we
* modify data[0]. When that is complete, we redirect queries back to data[0]
* and we can modify data[1].
*
- * NOTE: The non-requirement for atomic modifications does _NOT_ include
- * the publishing of new entries in the case where data is a dynamic
- * data structure.
+ * NOTE:
+ *
+ * The non-requirement for atomic modifications does _NOT_ include
+ * the publishing of new entries in the case where data is a dynamic
+ * data structure.
*
- * An iteration might start in data[0] and get suspended long enough
- * to miss an entire modification sequence, once it resumes it might
- * observe the new entry.
+ * An iteration might start in data[0] and get suspended long enough
+ * to miss an entire modification sequence, once it resumes it might
+ * observe the new entry.
*
- * NOTE: When data is a dynamic data structure; one should use regular RCU
- * patterns to manage the lifetimes of the objects within.
+ * NOTE:
+ *
+ * When data is a dynamic data structure; one should use regular RCU
+ * patterns to manage the lifetimes of the objects within.
*/
-static inline void raw_write_seqcount_latch(seqcount_t *s)
+#define raw_write_seqcount_latch(s) \
+ raw_write_seqcount_t_latch(__seqcount_ptr(s))
+
+static inline void raw_write_seqcount_t_latch(seqcount_t *s)
{
smp_wmb(); /* prior stores before incrementing "sequence" */
s->sequence++;
@@ -399,67 +704,48 @@ static inline void raw_write_seqcount_latch(seqcount_t *s)
}
/*
- * Sequence counter only version assumes that callers are using their
- * own mutexing.
- */
-static inline void write_seqcount_begin_nested(seqcount_t *s, int subclass)
-{
- raw_write_seqcount_begin(s);
- seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);
-}
-
-static inline void write_seqcount_begin(seqcount_t *s)
-{
- write_seqcount_begin_nested(s, 0);
-}
-
-static inline void write_seqcount_end(seqcount_t *s)
-{
- seqcount_release(&s->dep_map, _RET_IP_);
- raw_write_seqcount_end(s);
-}
-
-/**
- * write_seqcount_invalidate - invalidate in-progress read-side seq operations
- * @s: pointer to seqcount_t
+ * Sequential locks (seqlock_t)
+ *
+ * Sequence counters with an embedded spinlock for writer serialization
+ * and non-preemptibility.
*
- * After write_seqcount_invalidate, no read-side seq operations will complete
- * successfully and see data older than this.
+ * For more info, see:
+ * - Comments on top of seqcount_t
+ * - Documentation/locking/seqlock.rst
*/
-static inline void write_seqcount_invalidate(seqcount_t *s)
-{
- smp_wmb();
- kcsan_nestable_atomic_begin();
- s->sequence+=2;
- kcsan_nestable_atomic_end();
-}
-
typedef struct {
struct seqcount seqcount;
spinlock_t lock;
} seqlock_t;
-/*
- * These macros triggered gcc-3.x compile-time problems. We think these are
- * OK now. Be cautious.
- */
-#define __SEQLOCK_UNLOCKED(lockname) \
- { \
- .seqcount = SEQCNT_ZERO(lockname), \
- .lock = __SPIN_LOCK_UNLOCKED(lockname) \
+#define __SEQLOCK_UNLOCKED(lockname) \
+ { \
+ .seqcount = SEQCNT_ZERO(lockname), \
+ .lock = __SPIN_LOCK_UNLOCKED(lockname) \
}
-#define seqlock_init(x) \
- do { \
- seqcount_init(&(x)->seqcount); \
- spin_lock_init(&(x)->lock); \
+/**
+ * seqlock_init() - dynamic initializer for seqlock_t
+ * @sl: Pointer to the seqlock_t instance
+ */
+#define seqlock_init(sl) \
+ do { \
+ seqcount_init(&(sl)->seqcount); \
+ spin_lock_init(&(sl)->lock); \
} while (0)
-#define DEFINE_SEQLOCK(x) \
- seqlock_t x = __SEQLOCK_UNLOCKED(x)
+/**
+ * DEFINE_SEQLOCK() - Define a statically allocated seqlock_t
+ * @sl: Name of the seqlock_t instance
+ */
+#define DEFINE_SEQLOCK(sl) \
+ seqlock_t sl = __SEQLOCK_UNLOCKED(sl)
-/*
- * Read side functions for starting and finalizing a read side section.
+/**
+ * read_seqbegin() - start a seqlock_t read side critical section
+ * @sl: Pointer to seqlock_t
+ *
+ * Return: count, to be passed to read_seqretry()
*/
static inline unsigned read_seqbegin(const seqlock_t *sl)
{
@@ -470,6 +756,17 @@ static inline unsigned read_seqbegin(const seqlock_t *sl)
return ret;
}
+/**
+ * read_seqretry() - end a seqlock_t read side section
+ * @sl: Pointer to seqlock_t
+ * @start: count, from read_seqbegin()
+ *
+ * read_seqretry closes the read side critical section of given seqlock_t.
+ * If the critical section was invalid, it must be ignored (and typically
+ * retried).
+ *
+ * Return: true if a read section retry is required, else false
+ */
static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
{
/*
@@ -481,44 +778,88 @@ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
return read_seqcount_retry(&sl->seqcount, start);
}
-/*
- * Lock out other writers and update the count.
- * Acts like a normal spin_lock/unlock.
- * Don't need preempt_disable() because that is in the spin_lock already.
+/**
+ * write_seqlock() - start a seqlock_t write side critical section
+ * @sl: Pointer to seqlock_t
+ *
+ * write_seqlock opens a write side critical section for the given
+ * seqlock_t. It also implicitly acquires the spinlock_t embedded inside
+ * that sequential lock. All seqlock_t write side sections are thus
+ * automatically serialized and non-preemptible.
+ *
+ * Context: if the seqlock_t read section, or other write side critical
+ * sections, can be invoked from hardirq or softirq contexts, use the
+ * _irqsave or _bh variants of this function instead.
*/
static inline void write_seqlock(seqlock_t *sl)
{
spin_lock(&sl->lock);
- write_seqcount_begin(&sl->seqcount);
+ write_seqcount_t_begin(&sl->seqcount);
}
+/**
+ * write_sequnlock() - end a seqlock_t write side critical section
+ * @sl: Pointer to seqlock_t
+ *
+ * write_sequnlock closes the (serialized and non-preemptible) write side
+ * critical section of given seqlock_t.
+ */
static inline void write_sequnlock(seqlock_t *sl)
{
- write_seqcount_end(&sl->seqcount);
+ write_seqcount_t_end(&sl->seqcount);
spin_unlock(&sl->lock);
}
+/**
+ * write_seqlock_bh() - start a softirqs-disabled seqlock_t write section
+ * @sl: Pointer to seqlock_t
+ *
+ * _bh variant of write_seqlock(). Use only if the read side section, or
+ * other write side sections, can be invoked from softirq contexts.
+ */
static inline void write_seqlock_bh(seqlock_t *sl)
{
spin_lock_bh(&sl->lock);
- write_seqcount_begin(&sl->seqcount);
+ write_seqcount_t_begin(&sl->seqcount);
}
+/**
+ * write_sequnlock_bh() - end a softirqs-disabled seqlock_t write section
+ * @sl: Pointer to seqlock_t
+ *
+ * write_sequnlock_bh closes the serialized, non-preemptible, and
+ * softirqs-disabled, seqlock_t write side critical section opened with
+ * write_seqlock_bh().
+ */
static inline void write_sequnlock_bh(seqlock_t *sl)
{
- write_seqcount_end(&sl->seqcount);
+ write_seqcount_t_end(&sl->seqcount);
spin_unlock_bh(&sl->lock);
}
+/**
+ * write_seqlock_irq() - start a non-interruptible seqlock_t write section
+ * @sl: Pointer to seqlock_t
+ *
+ * _irq variant of write_seqlock(). Use only if the read side section, or
+ * other write sections, can be invoked from hardirq contexts.
+ */
static inline void write_seqlock_irq(seqlock_t *sl)
{
spin_lock_irq(&sl->lock);
- write_seqcount_begin(&sl->seqcount);
+ write_seqcount_t_begin(&sl->seqcount);
}
+/**
+ * write_sequnlock_irq() - end a non-interruptible seqlock_t write section
+ * @sl: Pointer to seqlock_t
+ *
+ * write_sequnlock_irq closes the serialized and non-interruptible
+ * seqlock_t write side section opened with write_seqlock_irq().
+ */
static inline void write_sequnlock_irq(seqlock_t *sl)
{
- write_seqcount_end(&sl->seqcount);
+ write_seqcount_t_end(&sl->seqcount);
spin_unlock_irq(&sl->lock);
}
@@ -527,79 +868,112 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
unsigned long flags;
spin_lock_irqsave(&sl->lock, flags);
- write_seqcount_begin(&sl->seqcount);
+ write_seqcount_t_begin(&sl->seqcount);
return flags;
}
+/**
+ * write_seqlock_irqsave() - start a non-interruptible seqlock_t write
+ * section
+ * @lock: Pointer to seqlock_t
+ * @flags: Stack-allocated storage for saving caller's local interrupt
+ * state, to be passed to write_sequnlock_irqrestore().
+ *
+ * _irqsave variant of write_seqlock(). Use it only if the read side
+ * section, or other write sections, can be invoked from hardirq context.
+ */
#define write_seqlock_irqsave(lock, flags) \
do { flags = __write_seqlock_irqsave(lock); } while (0)
+/**
+ * write_sequnlock_irqrestore() - end non-interruptible seqlock_t write
+ * section
+ * @sl: Pointer to seqlock_t
+ * @flags: Caller's saved interrupt state, from write_seqlock_irqsave()
+ *
+ * write_sequnlock_irqrestore closes the serialized and non-interruptible
+ * seqlock_t write section previously opened with write_seqlock_irqsave().
+ */
static inline void
write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
{
- write_seqcount_end(&sl->seqcount);
+ write_seqcount_t_end(&sl->seqcount);
spin_unlock_irqrestore(&sl->lock, flags);
}
-/*
- * A locking reader exclusively locks out other writers and locking readers,
- * but doesn't update the sequence number. Acts like a normal spin_lock/unlock.
- * Don't need preempt_disable() because that is in the spin_lock already.
+/**
+ * read_seqlock_excl() - begin a seqlock_t locking reader section
+ * @sl: Pointer to seqlock_t
+ *
+ * read_seqlock_excl opens a seqlock_t locking reader critical section. A
+ * locking reader exclusively locks out *both* other writers *and* other
+ * locking readers, but it does not update the embedded sequence number.
+ *
+ * Locking readers act like a normal spin_lock()/spin_unlock().
+ *
+ * Context: if the seqlock_t write section, *or other read sections*, can
+ * be invoked from hardirq or softirq contexts, use the _irqsave or _bh
+ * variant of this function instead.
+ *
+ * The opened read section must be closed with read_sequnlock_excl().
*/
static inline void read_seqlock_excl(seqlock_t *sl)
{
spin_lock(&sl->lock);
}
+/**
+ * read_sequnlock_excl() - end a seqlock_t locking reader critical section
+ * @sl: Pointer to seqlock_t
+ */
static inline void read_sequnlock_excl(seqlock_t *sl)
{
spin_unlock(&sl->lock);
}
/**
- * read_seqbegin_or_lock - begin a sequence number check or locking block
- * @lock: sequence lock
- * @seq : sequence number to be checked
+ * read_seqlock_excl_bh() - start a seqlock_t locking reader section with
+ * softirqs disabled
+ * @sl: Pointer to seqlock_t
*
- * First try it once optimistically without taking the lock. If that fails,
- * take the lock. The sequence number is also used as a marker for deciding
- * whether to be a reader (even) or writer (odd).
- * N.B. seq must be initialized to an even number to begin with.
+ * _bh variant of read_seqlock_excl(). Use this variant only if the
+ * seqlock_t write side section, *or other read sections*, can be invoked
+ * from softirq contexts.
*/
-static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq)
-{
- if (!(*seq & 1)) /* Even */
- *seq = read_seqbegin(lock);
- else /* Odd */
- read_seqlock_excl(lock);
-}
-
-static inline int need_seqretry(seqlock_t *lock, int seq)
-{
- return !(seq & 1) && read_seqretry(lock, seq);
-}
-
-static inline void done_seqretry(seqlock_t *lock, int seq)
-{
- if (seq & 1)
- read_sequnlock_excl(lock);
-}
-
static inline void read_seqlock_excl_bh(seqlock_t *sl)
{
spin_lock_bh(&sl->lock);
}
+/**
+ * read_sequnlock_excl_bh() - stop a seqlock_t softirq-disabled locking
+ * reader section
+ * @sl: Pointer to seqlock_t
+ */
static inline void read_sequnlock_excl_bh(seqlock_t *sl)
{
spin_unlock_bh(&sl->lock);
}
+/**
+ * read_seqlock_excl_irq() - start a non-interruptible seqlock_t locking
+ * reader section
+ * @sl: Pointer to seqlock_t
+ *
+ * _irq variant of read_seqlock_excl(). Use this only if the seqlock_t
+ * write side section, *or other read sections*, can be invoked from a
+ * hardirq context.
+ */
static inline void read_seqlock_excl_irq(seqlock_t *sl)
{
spin_lock_irq(&sl->lock);
}
+/**
+ * read_sequnlock_excl_irq() - end an interrupts-disabled seqlock_t
+ * locking reader section
+ * @sl: Pointer to seqlock_t
+ */
static inline void read_sequnlock_excl_irq(seqlock_t *sl)
{
spin_unlock_irq(&sl->lock);
@@ -613,15 +987,117 @@ static inline unsigned long __read_seqlock_excl_irqsave(seqlock_t *sl)
return flags;
}
+/**
+ * read_seqlock_excl_irqsave() - start a non-interruptible seqlock_t
+ * locking reader section
+ * @lock: Pointer to seqlock_t
+ * @flags: Stack-allocated storage for saving caller's local interrupt
+ * state, to be passed to read_sequnlock_excl_irqrestore().
+ *
+ * _irqsave variant of read_seqlock_excl(). Use this only if the seqlock_t
+ * write side section, *or other read sections*, can be invoked from a
+ * hardirq context.
+ */
#define read_seqlock_excl_irqsave(lock, flags) \
do { flags = __read_seqlock_excl_irqsave(lock); } while (0)
+/**
+ * read_sequnlock_excl_irqrestore() - end non-interruptible seqlock_t
+ * locking reader section
+ * @sl: Pointer to seqlock_t
+ * @flags: Caller saved interrupt state, from read_seqlock_excl_irqsave()
+ */
static inline void
read_sequnlock_excl_irqrestore(seqlock_t *sl, unsigned long flags)
{
spin_unlock_irqrestore(&sl->lock, flags);
}
+/**
+ * read_seqbegin_or_lock() - begin a seqlock_t lockless or locking reader
+ * @lock: Pointer to seqlock_t
+ * @seq : Marker and return parameter. If the passed value is even, the
+ * reader will become a *lockless* seqlock_t reader as in read_seqbegin().
+ * If the passed value is odd, the reader will become a *locking* reader
+ * as in read_seqlock_excl(). In the first call to this function, the
+ * caller *must* initialize and pass an even value to @seq; this way, a
+ * lockless read can be optimistically tried first.
+ *
+ * read_seqbegin_or_lock is an API designed to optimistically try a normal
+ * lockless seqlock_t read section first. If an odd counter is found, the
+ * lockless read trial has failed, and the next read iteration transforms
+ * itself into a full seqlock_t locking reader.
+ *
+ * This is typically used to avoid seqlock_t lockless readers starvation
+ * (too much retry loops) in the case of a sharp spike in write side
+ * activity.
+ *
+ * Context: if the seqlock_t write section, *or other read sections*, can
+ * be invoked from hardirq or softirq contexts, use the _irqsave or _bh
+ * variant of this function instead.
+ *
+ * Check Documentation/locking/seqlock.rst for template example code.
+ *
+ * Return: the encountered sequence counter value, through the @seq
+ * parameter, which is overloaded as a return parameter. This returned
+ * value must be checked with need_seqretry(). If the read section need to
+ * be retried, this returned value must also be passed as the @seq
+ * parameter of the next read_seqbegin_or_lock() iteration.
+ */
+static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq)
+{
+ if (!(*seq & 1)) /* Even */
+ *seq = read_seqbegin(lock);
+ else /* Odd */
+ read_seqlock_excl(lock);
+}
+
+/**
+ * need_seqretry() - validate seqlock_t "locking or lockless" read section
+ * @lock: Pointer to seqlock_t
+ * @seq: sequence count, from read_seqbegin_or_lock()
+ *
+ * Return: true if a read section retry is required, false otherwise
+ */
+static inline int need_seqretry(seqlock_t *lock, int seq)
+{
+ return !(seq & 1) && read_seqretry(lock, seq);
+}
+
+/**
+ * done_seqretry() - end seqlock_t "locking or lockless" reader section
+ * @lock: Pointer to seqlock_t
+ * @seq: count, from read_seqbegin_or_lock()
+ *
+ * done_seqretry finishes the seqlock_t read side critical section started
+ * with read_seqbegin_or_lock() and validated by need_seqretry().
+ */
+static inline void done_seqretry(seqlock_t *lock, int seq)
+{
+ if (seq & 1)
+ read_sequnlock_excl(lock);
+}
+
+/**
+ * read_seqbegin_or_lock_irqsave() - begin a seqlock_t lockless reader, or
+ * a non-interruptible locking reader
+ * @lock: Pointer to seqlock_t
+ * @seq: Marker and return parameter. Check read_seqbegin_or_lock().
+ *
+ * This is the _irqsave variant of read_seqbegin_or_lock(). Use it only if
+ * the seqlock_t write section, *or other read sections*, can be invoked
+ * from hardirq context.
+ *
+ * Note: Interrupts will be disabled only for "locking reader" mode.
+ *
+ * Return:
+ *
+ * 1. The saved local interrupts state in case of a locking reader, to
+ * be passed to done_seqretry_irqrestore().
+ *
+ * 2. The encountered sequence counter value, returned through @seq
+ * overloaded as a return parameter. Check read_seqbegin_or_lock().
+ */
static inline unsigned long
read_seqbegin_or_lock_irqsave(seqlock_t *lock, int *seq)
{
@@ -635,6 +1111,18 @@ read_seqbegin_or_lock_irqsave(seqlock_t *lock, int *seq)
return flags;
}
+/**
+ * done_seqretry_irqrestore() - end a seqlock_t lockless reader, or a
+ * non-interruptible locking reader section
+ * @lock: Pointer to seqlock_t
+ * @seq: Count, from read_seqbegin_or_lock_irqsave()
+ * @flags: Caller's saved local interrupt state in case of a locking
+ * reader, also from read_seqbegin_or_lock_irqsave()
+ *
+ * This is the _irqrestore variant of done_seqretry(). The read section
+ * must've been opened with read_seqbegin_or_lock_irqsave(), and validated
+ * by need_seqretry().
+ */
static inline void
done_seqretry_irqrestore(seqlock_t *lock, int seq, unsigned long flags)
{
diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h
index 6545f8cfc8fa..2b70f736b091 100644
--- a/include/linux/serial_8250.h
+++ b/include/linux/serial_8250.h
@@ -155,6 +155,8 @@ extern int early_serial_setup(struct uart_port *port);
extern int early_serial8250_setup(struct earlycon_device *device,
const char *options);
+extern void serial8250_update_uartclk(struct uart_port *port,
+ unsigned int uartclk);
extern void serial8250_do_set_termios(struct uart_port *port,
struct ktermios *termios, struct ktermios *old);
extern void serial8250_do_set_ldisc(struct uart_port *port,
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 791f4844efeb..01fc4d9c9c54 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -10,7 +10,6 @@
#include <linux/bitops.h>
#include <linux/compiler.h>
#include <linux/console.h>
-#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/circ_buf.h>
#include <linux/spinlock.h>
@@ -30,6 +29,7 @@
struct uart_port;
struct serial_struct;
struct device;
+struct gpio_desc;
/*
* This structure describes all the operations that can be done on the
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index 7a35a6901221..a5a5d1d4d7b1 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -36,6 +36,9 @@ struct shmem_sb_info {
unsigned char huge; /* Whether to try for hugepages */
kuid_t uid; /* Mount uid for root directory */
kgid_t gid; /* Mount gid for root directory */
+ bool full_inums; /* If i_ino should be uint or ino_t */
+ ino_t next_ino; /* The next per-sb inode number to use */
+ ino_t __percpu *ino_batch; /* The next per-cpu inode number to use */
struct mempolicy *mpol; /* default memory policy for mappings */
spinlock_t shrinklist_lock; /* Protects shrinklist */
struct list_head shrinklist; /* List of shinkable inodes */
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 0c0377fc00c2..46881d902124 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -238,6 +238,7 @@
SKB_DATA_ALIGN(sizeof(struct sk_buff)) + \
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+struct ahash_request;
struct net_device;
struct scatterlist;
struct pipe_inode_info;
@@ -283,6 +284,7 @@ struct nf_bridge_info {
*/
struct tc_skb_ext {
__u32 chain;
+ __u16 mru;
};
#endif
@@ -1328,7 +1330,7 @@ void skb_flow_dissect_meta(const struct sk_buff *skb,
void *target_container);
/* Gets a skb connection tracking info, ctinfo map should be a
- * a map of mapsize to translate enum ip_conntrack_info states
+ * map of mapsize to translate enum ip_conntrack_info states
* to user states.
*/
void
@@ -1342,6 +1344,10 @@ skb_flow_dissect_tunnel_info(const struct sk_buff *skb,
struct flow_dissector *flow_dissector,
void *target_container);
+void skb_flow_dissect_hash(const struct sk_buff *skb,
+ struct flow_dissector *flow_dissector,
+ void *target_container);
+
static inline __u32 skb_get_hash(struct sk_buff *skb)
{
if (!skb->l4_hash && !skb->sw_hash)
@@ -3812,7 +3818,7 @@ static inline bool skb_defer_rx_timestamp(struct sk_buff *skb)
* must call this function to return the skb back to the stack with a
* timestamp.
*
- * @skb: clone of the the original outgoing packet
+ * @skb: clone of the original outgoing packet
* @hwtstamps: hardware time stamps
*
*/
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 6d454886bcaf..24df2393ec03 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -155,9 +155,6 @@ struct kmem_cache *kmem_cache_create_usercopy(const char *name,
void kmem_cache_destroy(struct kmem_cache *);
int kmem_cache_shrink(struct kmem_cache *);
-void memcg_create_kmem_cache(struct mem_cgroup *, struct kmem_cache *);
-void memcg_deactivate_kmem_caches(struct mem_cgroup *, struct mem_cgroup *);
-
/*
* Please use this macro to create slab caches. Simply specify the
* name of the structure and maybe some flags that are listed above.
@@ -186,10 +183,12 @@ void memcg_deactivate_kmem_caches(struct mem_cgroup *, struct mem_cgroup *);
*/
void * __must_check krealloc(const void *, size_t, gfp_t);
void kfree(const void *);
-void kzfree(const void *);
+void kfree_sensitive(const void *);
size_t __ksize(const void *);
size_t ksize(const void *);
+#define kzfree(x) kfree_sensitive(x) /* For backward compatibility */
+
#ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
bool to_user);
@@ -578,8 +577,6 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
return __kmalloc_node(size, flags, node);
}
-int memcg_update_all_caches(int num_memcgs);
-
/**
* kmalloc_array - allocate memory for an array.
* @n: number of elements.
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index abc7de77b988..9eb430c163c2 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -72,9 +72,6 @@ struct kmem_cache {
int obj_offset;
#endif /* CONFIG_DEBUG_SLAB */
-#ifdef CONFIG_MEMCG
- struct memcg_cache_params memcg_params;
-#endif
#ifdef CONFIG_KASAN
struct kasan_cache kasan_info;
#endif
@@ -114,4 +111,10 @@ static inline unsigned int obj_to_index(const struct kmem_cache *cache,
return reciprocal_divide(offset, cache->reciprocal_buffer_size);
}
+static inline int objs_per_slab_page(const struct kmem_cache *cache,
+ const struct page *page)
+{
+ return cache->num;
+}
+
#endif /* _LINUX_SLAB_DEF_H */
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index d2153789bd9f..1be0ed5befa1 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -8,6 +8,7 @@
* (C) 2007 SGI, Christoph Lameter
*/
#include <linux/kobject.h>
+#include <linux/reciprocal_div.h>
enum stat_item {
ALLOC_FASTPATH, /* Allocation from cpu slab */
@@ -86,6 +87,7 @@ struct kmem_cache {
unsigned long min_partial;
unsigned int size; /* The size of an object including metadata */
unsigned int object_size;/* The size of an object without metadata */
+ struct reciprocal_value reciprocal_size;
unsigned int offset; /* Free pointer offset */
#ifdef CONFIG_SLUB_CPU_PARTIAL
/* Number of per cpu partial objects to keep around */
@@ -106,17 +108,7 @@ struct kmem_cache {
struct list_head list; /* List of slab caches */
#ifdef CONFIG_SYSFS
struct kobject kobj; /* For sysfs */
- struct work_struct kobj_remove_work;
#endif
-#ifdef CONFIG_MEMCG
- struct memcg_cache_params memcg_params;
- /* For propagation, maximum size of a stored attr */
- unsigned int max_attr_size;
-#ifdef CONFIG_SYSFS
- struct kset *memcg_kset;
-#endif
-#endif
-
#ifdef CONFIG_SLAB_FREELIST_HARDENED
unsigned long random;
#endif
@@ -182,4 +174,23 @@ static inline void *nearest_obj(struct kmem_cache *cache, struct page *page,
return result;
}
+/* Determine object index from a given position */
+static inline unsigned int __obj_to_index(const struct kmem_cache *cache,
+ void *addr, void *obj)
+{
+ return reciprocal_divide(kasan_reset_tag(obj) - addr,
+ cache->reciprocal_size);
+}
+
+static inline unsigned int obj_to_index(const struct kmem_cache *cache,
+ const struct page *page, void *obj)
+{
+ return __obj_to_index(cache, page_address(page), obj);
+}
+
+static inline int objs_per_slab_page(const struct kmem_cache *cache,
+ const struct page *page)
+{
+ return page->objects;
+}
#endif /* _LINUX_SLUB_DEF_H */
diff --git a/include/linux/soc/mediatek/mtk-cmdq.h b/include/linux/soc/mediatek/mtk-cmdq.h
index a74c1d5acdf3..2249ecaf77e4 100644
--- a/include/linux/soc/mediatek/mtk-cmdq.h
+++ b/include/linux/soc/mediatek/mtk-cmdq.h
@@ -121,6 +121,15 @@ int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event);
int cmdq_pkt_clear_event(struct cmdq_pkt *pkt, u16 event);
/**
+ * cmdq_pkt_set_event() - append set event command to the CMDQ packet
+ * @pkt: the CMDQ packet
+ * @event: the desired event to be set
+ *
+ * Return: 0 for success; else the error code is returned
+ */
+int cmdq_pkt_set_event(struct cmdq_pkt *pkt, u16 event);
+
+/**
* cmdq_pkt_poll() - Append polling command to the CMDQ packet, ask GCE to
* execute an instruction that wait for a specified
* hardware register to check for the value w/o mask.
@@ -152,6 +161,28 @@ int cmdq_pkt_poll(struct cmdq_pkt *pkt, u8 subsys,
*/
int cmdq_pkt_poll_mask(struct cmdq_pkt *pkt, u8 subsys,
u16 offset, u32 value, u32 mask);
+
+/**
+ * cmdq_pkt_assign() - Append logic assign command to the CMDQ packet, ask GCE
+ * to execute an instruction that set a constant value into
+ * internal register and use as value, mask or address in
+ * read/write instruction.
+ * @pkt: the CMDQ packet
+ * @reg_idx: the CMDQ internal register ID
+ * @value: the specified value
+ *
+ * Return: 0 for success; else the error code is returned
+ */
+int cmdq_pkt_assign(struct cmdq_pkt *pkt, u16 reg_idx, u32 value);
+
+/**
+ * cmdq_pkt_finalize() - Append EOC and jump command to pkt.
+ * @pkt: the CMDQ packet
+ *
+ * Return: 0 for success; else the error code is returned
+ */
+int cmdq_pkt_finalize(struct cmdq_pkt *pkt);
+
/**
* cmdq_pkt_flush_async() - trigger CMDQ to asynchronously execute the CMDQ
* packet and call back at the end of done packet
diff --git a/include/linux/soc/ti/k3-ringacc.h b/include/linux/soc/ti/k3-ringacc.h
index 26f73df0a524..5a472eca5ee4 100644
--- a/include/linux/soc/ti/k3-ringacc.h
+++ b/include/linux/soc/ti/k3-ringacc.h
@@ -2,7 +2,7 @@
/*
* K3 Ring Accelerator (RA) subsystem interface
*
- * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ * Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com
*/
#ifndef __SOC_TI_K3_RINGACC_API_H_
@@ -107,6 +107,10 @@ struct k3_ringacc *of_k3_ringacc_get_by_phandle(struct device_node *np,
struct k3_ring *k3_ringacc_request_ring(struct k3_ringacc *ringacc,
int id, u32 flags);
+int k3_ringacc_request_rings_pair(struct k3_ringacc *ringacc,
+ int fwd_id, int compl_id,
+ struct k3_ring **fwd_ring,
+ struct k3_ring **compl_ring);
/**
* k3_ringacc_ring_reset - ring reset
* @ring: pointer on Ring
diff --git a/include/linux/soc/ti/knav_qmss.h b/include/linux/soc/ti/knav_qmss.h
index 9745df6ed9d3..c75ef99c99ca 100644
--- a/include/linux/soc/ti/knav_qmss.h
+++ b/include/linux/soc/ti/knav_qmss.h
@@ -1,7 +1,7 @@
/*
* Keystone Navigator Queue Management Sub-System header
*
- * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com
+ * Copyright (C) 2014 Texas Instruments Incorporated - https://www.ti.com
* Author: Sandeep Nair <sandeep_n@ti.com>
* Cyril Chemparathy <cyril@ti.com>
* Santosh Shilimkar <santosh.shilimkar@ti.com>
diff --git a/include/linux/soc/ti/ti-msgmgr.h b/include/linux/soc/ti/ti-msgmgr.h
index eac8e0c6fe11..1f6e76d423cf 100644
--- a/include/linux/soc/ti/ti-msgmgr.h
+++ b/include/linux/soc/ti/ti-msgmgr.h
@@ -1,7 +1,7 @@
/*
* Texas Instruments' Message Manager
*
- * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2015-2016 Texas Instruments Incorporated - https://www.ti.com/
* Nishanth Menon
*
* This program is free software; you can redistribute it and/or modify
diff --git a/include/linux/soc/ti/ti_sci_inta_msi.h b/include/linux/soc/ti/ti_sci_inta_msi.h
index 11fb5048f5f6..e3aa8b14612e 100644
--- a/include/linux/soc/ti/ti_sci_inta_msi.h
+++ b/include/linux/soc/ti/ti_sci_inta_msi.h
@@ -2,7 +2,7 @@
/*
* Texas Instruments' K3 TI SCI INTA MSI helper
*
- * Copyright (C) 2018-2019 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2018-2019 Texas Instruments Incorporated - https://www.ti.com/
* Lokesh Vutla <lokeshvutla@ti.com>
*/
diff --git a/include/linux/soc/ti/ti_sci_protocol.h b/include/linux/soc/ti/ti_sci_protocol.h
index 9531ec823298..49c5d29cd33c 100644
--- a/include/linux/soc/ti/ti_sci_protocol.h
+++ b/include/linux/soc/ti/ti_sci_protocol.h
@@ -2,7 +2,7 @@
/*
* Texas Instruments System Control Interface Protocol
*
- * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2015-2016 Texas Instruments Incorporated - https://www.ti.com/
* Nishanth Menon
*/
@@ -226,8 +226,8 @@ struct ti_sci_rm_core_ops {
* and destination
* @set_event_map: Set an Event based peripheral irq to Interrupt
* Aggregator.
- * @free_irq: Free an an IRQ route between the requested source
- * destination.
+ * @free_irq: Free an IRQ route between the requested source
+ * and destination.
* @free_event_map: Free an event based peripheral irq to Interrupt
* Aggregator.
*/
diff --git a/include/linux/socket.h b/include/linux/socket.h
index 04d2bc97f497..e9cb30d8cbfb 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -10,6 +10,7 @@
#include <linux/compiler.h> /* __user */
#include <uapi/linux/socket.h>
+struct file;
struct pid;
struct cred;
struct socket;
diff --git a/include/linux/sockptr.h b/include/linux/sockptr.h
new file mode 100644
index 000000000000..ea193414298b
--- /dev/null
+++ b/include/linux/sockptr.h
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020 Christoph Hellwig.
+ *
+ * Support for "universal" pointers that can point to either kernel or userspace
+ * memory.
+ */
+#ifndef _LINUX_SOCKPTR_H
+#define _LINUX_SOCKPTR_H
+
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+typedef struct {
+ union {
+ void *kernel;
+ void __user *user;
+ };
+ bool is_kernel : 1;
+} sockptr_t;
+
+static inline bool sockptr_is_kernel(sockptr_t sockptr)
+{
+ return sockptr.is_kernel;
+}
+
+static inline sockptr_t KERNEL_SOCKPTR(void *p)
+{
+ return (sockptr_t) { .kernel = p, .is_kernel = true };
+}
+
+static inline sockptr_t USER_SOCKPTR(void __user *p)
+{
+ return (sockptr_t) { .user = p };
+}
+
+static inline bool sockptr_is_null(sockptr_t sockptr)
+{
+ if (sockptr_is_kernel(sockptr))
+ return !sockptr.kernel;
+ return !sockptr.user;
+}
+
+static inline int copy_from_sockptr_offset(void *dst, sockptr_t src,
+ size_t offset, size_t size)
+{
+ if (!sockptr_is_kernel(src))
+ return copy_from_user(dst, src.user + offset, size);
+ memcpy(dst, src.kernel + offset, size);
+ return 0;
+}
+
+static inline int copy_from_sockptr(void *dst, sockptr_t src, size_t size)
+{
+ return copy_from_sockptr_offset(dst, src, 0, size);
+}
+
+static inline int copy_to_sockptr_offset(sockptr_t dst, size_t offset,
+ const void *src, size_t size)
+{
+ if (!sockptr_is_kernel(dst))
+ return copy_to_user(dst.user + offset, src, size);
+ memcpy(dst.kernel + offset, src, size);
+ return 0;
+}
+
+static inline void *memdup_sockptr(sockptr_t src, size_t len)
+{
+ void *p = kmalloc_track_caller(len, GFP_USER | __GFP_NOWARN);
+
+ if (!p)
+ return ERR_PTR(-ENOMEM);
+ if (copy_from_sockptr(p, src, len)) {
+ kfree(p);
+ return ERR_PTR(-EFAULT);
+ }
+ return p;
+}
+
+static inline void *memdup_sockptr_nul(sockptr_t src, size_t len)
+{
+ char *p = kmalloc_track_caller(len + 1, GFP_KERNEL);
+
+ if (!p)
+ return ERR_PTR(-ENOMEM);
+ if (copy_from_sockptr(p, src, len)) {
+ kfree(p);
+ return ERR_PTR(-EFAULT);
+ }
+ p[len] = '\0';
+ return p;
+}
+
+static inline long strncpy_from_sockptr(char *dst, sockptr_t src, size_t count)
+{
+ if (sockptr_is_kernel(src)) {
+ size_t len = min(strnlen(src.kernel, count - 1) + 1, count);
+
+ memcpy(dst, src.kernel, len);
+ return len;
+ }
+ return strncpy_from_user(dst, src.user, count);
+}
+
+#endif /* _LINUX_SOCKPTR_H */
diff --git a/include/linux/soundwire/sdw.h b/include/linux/soundwire/sdw.h
index 9c27a32df9bb..76052f12c9f7 100644
--- a/include/linux/soundwire/sdw.h
+++ b/include/linux/soundwire/sdw.h
@@ -88,10 +88,10 @@ enum sdw_slave_status {
* @SDW_CLK_POST_DEPREPARE: post clock stop de-prepare
*/
enum sdw_clk_stop_type {
- SDW_CLK_PRE_PREPARE = 0,
- SDW_CLK_POST_PREPARE,
- SDW_CLK_PRE_DEPREPARE,
- SDW_CLK_POST_DEPREPARE,
+ SDW_CLK_PRE_PREPARE = 0,
+ SDW_CLK_POST_PREPARE,
+ SDW_CLK_PRE_DEPREPARE,
+ SDW_CLK_POST_DEPREPARE,
};
/**
@@ -152,19 +152,19 @@ enum sdw_data_direction {
*
* @SDW_PORT_DATA_MODE_NORMAL: Normal data mode where audio data is received
* and transmitted.
+ * @SDW_PORT_DATA_MODE_PRBS: Test mode which uses a PRBS generator to produce
+ * a pseudo random data pattern that is transferred
+ * @SDW_PORT_DATA_MODE_STATIC_0: Simple test mode which uses static value of
+ * logic 0. The encoding will result in no signal transitions
* @SDW_PORT_DATA_MODE_STATIC_1: Simple test mode which uses static value of
* logic 1. The encoding will result in signal transitions at every bitslot
* owned by this Port
- * @SDW_PORT_DATA_MODE_STATIC_0: Simple test mode which uses static value of
- * logic 0. The encoding will result in no signal transitions
- * @SDW_PORT_DATA_MODE_PRBS: Test mode which uses a PRBS generator to produce
- * a pseudo random data pattern that is transferred
*/
enum sdw_port_data_mode {
SDW_PORT_DATA_MODE_NORMAL = 0,
- SDW_PORT_DATA_MODE_STATIC_1 = 1,
+ SDW_PORT_DATA_MODE_PRBS = 1,
SDW_PORT_DATA_MODE_STATIC_0 = 2,
- SDW_PORT_DATA_MODE_PRBS = 3,
+ SDW_PORT_DATA_MODE_STATIC_1 = 3,
};
/*
@@ -426,8 +426,7 @@ int sdw_slave_read_prop(struct sdw_slave *slave);
* struct sdw_slave_id - Slave ID
* @mfg_id: MIPI Manufacturer ID
* @part_id: Device Part ID
- * @class_id: MIPI Class ID, unused now.
- * Currently a placeholder in MIPI SoundWire Spec
+ * @class_id: MIPI Class ID (defined starting with SoundWire 1.2 spec)
* @unique_id: Device unique ID
* @sdw_version: SDW version implemented
*
@@ -659,10 +658,14 @@ struct sdw_driver {
struct device_driver driver;
};
-#define SDW_SLAVE_ENTRY(_mfg_id, _part_id, _drv_data) \
- { .mfg_id = (_mfg_id), .part_id = (_part_id), \
+#define SDW_SLAVE_ENTRY_EXT(_mfg_id, _part_id, _version, _c_id, _drv_data) \
+ { .mfg_id = (_mfg_id), .part_id = (_part_id), \
+ .sdw_version = (_version), .class_id = (_c_id), \
.driver_data = (unsigned long)(_drv_data) }
+#define SDW_SLAVE_ENTRY(_mfg_id, _part_id, _drv_data) \
+ SDW_SLAVE_ENTRY_EXT((_mfg_id), (_part_id), 0, 0, (_drv_data))
+
int sdw_handle_slave_status(struct sdw_bus *bus,
enum sdw_slave_status status[]);
@@ -952,10 +955,12 @@ int sdw_stream_remove_master(struct sdw_bus *bus,
struct sdw_stream_runtime *stream);
int sdw_stream_remove_slave(struct sdw_slave *slave,
struct sdw_stream_runtime *stream);
+int sdw_startup_stream(void *sdw_substream);
int sdw_prepare_stream(struct sdw_stream_runtime *stream);
int sdw_enable_stream(struct sdw_stream_runtime *stream);
int sdw_disable_stream(struct sdw_stream_runtime *stream);
int sdw_deprepare_stream(struct sdw_stream_runtime *stream);
+void sdw_shutdown_stream(void *sdw_substream);
int sdw_bus_prep_clk_stop(struct sdw_bus *bus);
int sdw_bus_clk_stop(struct sdw_bus *bus);
int sdw_bus_exit_clk_stop(struct sdw_bus *bus);
diff --git a/include/linux/soundwire/sdw_intel.h b/include/linux/soundwire/sdw_intel.h
index 979b41b5dcb4..120ffddc03d2 100644
--- a/include/linux/soundwire/sdw_intel.h
+++ b/include/linux/soundwire/sdw_intel.h
@@ -115,6 +115,7 @@ struct sdw_intel_slave_id {
* links
* @link_list: list to handle interrupts across all links
* @shim_lock: mutex to handle concurrent rmw access to shared SHIM registers.
+ * @shim_mask: flags to track initialization of SHIM shared registers
*/
struct sdw_intel_ctx {
int count;
@@ -126,6 +127,7 @@ struct sdw_intel_ctx {
struct sdw_intel_slave_id *ids;
struct list_head link_list;
struct mutex shim_lock; /* lock for access to shared SHIM registers */
+ u32 shim_mask;
};
/**
diff --git a/include/linux/soundwire/sdw_registers.h b/include/linux/soundwire/sdw_registers.h
index a686f7988156..5d3c271af7d1 100644
--- a/include/linux/soundwire/sdw_registers.h
+++ b/include/linux/soundwire/sdw_registers.h
@@ -12,7 +12,7 @@
#define SDW_REG_SHIFT(n) (ffs(n) - 1)
/*
- * SDW registers as defined by MIPI 1.1 Spec
+ * SDW registers as defined by MIPI 1.2 Spec
*/
#define SDW_REGADDR GENMASK(14, 0)
#define SDW_SCP_ADDRPAGE2_MASK GENMASK(22, 15)
@@ -43,6 +43,8 @@
#define SDW_DP0_INT_TEST_FAIL BIT(0)
#define SDW_DP0_INT_PORT_READY BIT(1)
#define SDW_DP0_INT_BRA_FAILURE BIT(2)
+#define SDW_DP0_SDCA_CASCADE BIT(3)
+/* BIT(4) not allocated in SoundWire specification 1.2 */
#define SDW_DP0_INT_IMPDEF1 BIT(5)
#define SDW_DP0_INT_IMPDEF2 BIT(6)
#define SDW_DP0_INT_IMPDEF3 BIT(7)
@@ -106,6 +108,20 @@
#define SDW_SCP_ADDRPAGE2 0x49
#define SDW_SCP_KEEPEREN 0x4A
#define SDW_SCP_BANKDELAY 0x4B
+#define SDW_SCP_COMMIT 0x4C
+
+#define SDW_SCP_BUS_CLOCK_BASE 0x4D
+#define SDW_SCP_BASE_CLOCK_FREQ GENMASK(2, 0)
+#define SDW_SCP_BASE_CLOCK_UNKNOWN 0x0
+#define SDW_SCP_BASE_CLOCK_19200000_HZ 0x1
+#define SDW_SCP_BASE_CLOCK_24000000_HZ 0x2
+#define SDW_SCP_BASE_CLOCK_24576000_HZ 0x3
+#define SDW_SCP_BASE_CLOCK_22579200_HZ 0x4
+#define SDW_SCP_BASE_CLOCK_32000000_HZ 0x5
+#define SDW_SCP_BASE_CLOCK_RESERVED 0x6
+#define SDW_SCP_BASE_CLOCK_IMP_DEF 0x7
+
+/* 0x4E is not allocated in SoundWire specification 1.2 */
#define SDW_SCP_TESTMODE 0x4F
#define SDW_SCP_DEVID_0 0x50
#define SDW_SCP_DEVID_1 0x51
@@ -114,12 +130,111 @@
#define SDW_SCP_DEVID_4 0x54
#define SDW_SCP_DEVID_5 0x55
+/* Both INT and STATUS register are same */
+#define SDW_SCP_SDCA_INT1 0x58
+#define SDW_SCP_SDCA_INT_SDCA_0 BIT(0)
+#define SDW_SCP_SDCA_INT_SDCA_1 BIT(1)
+#define SDW_SCP_SDCA_INT_SDCA_2 BIT(2)
+#define SDW_SCP_SDCA_INT_SDCA_3 BIT(3)
+#define SDW_SCP_SDCA_INT_SDCA_4 BIT(4)
+#define SDW_SCP_SDCA_INT_SDCA_5 BIT(5)
+#define SDW_SCP_SDCA_INT_SDCA_6 BIT(6)
+#define SDW_SCP_SDCA_INT_SDCA_7 BIT(7)
+
+#define SDW_SCP_SDCA_INT2 0x59
+#define SDW_SCP_SDCA_INT_SDCA_8 BIT(0)
+#define SDW_SCP_SDCA_INT_SDCA_9 BIT(1)
+#define SDW_SCP_SDCA_INT_SDCA_10 BIT(2)
+#define SDW_SCP_SDCA_INT_SDCA_11 BIT(3)
+#define SDW_SCP_SDCA_INT_SDCA_12 BIT(4)
+#define SDW_SCP_SDCA_INT_SDCA_13 BIT(5)
+#define SDW_SCP_SDCA_INT_SDCA_14 BIT(6)
+#define SDW_SCP_SDCA_INT_SDCA_15 BIT(7)
+
+#define SDW_SCP_SDCA_INT3 0x5A
+#define SDW_SCP_SDCA_INT_SDCA_16 BIT(0)
+#define SDW_SCP_SDCA_INT_SDCA_17 BIT(1)
+#define SDW_SCP_SDCA_INT_SDCA_18 BIT(2)
+#define SDW_SCP_SDCA_INT_SDCA_19 BIT(3)
+#define SDW_SCP_SDCA_INT_SDCA_20 BIT(4)
+#define SDW_SCP_SDCA_INT_SDCA_21 BIT(5)
+#define SDW_SCP_SDCA_INT_SDCA_22 BIT(6)
+#define SDW_SCP_SDCA_INT_SDCA_23 BIT(7)
+
+#define SDW_SCP_SDCA_INT4 0x5B
+#define SDW_SCP_SDCA_INT_SDCA_24 BIT(0)
+#define SDW_SCP_SDCA_INT_SDCA_25 BIT(1)
+#define SDW_SCP_SDCA_INT_SDCA_26 BIT(2)
+#define SDW_SCP_SDCA_INT_SDCA_27 BIT(3)
+#define SDW_SCP_SDCA_INT_SDCA_28 BIT(4)
+#define SDW_SCP_SDCA_INT_SDCA_29 BIT(5)
+#define SDW_SCP_SDCA_INT_SDCA_30 BIT(6)
+/* BIT(7) not allocated in SoundWire 1.2 specification */
+
+#define SDW_SCP_SDCA_INTMASK1 0x5C
+#define SDW_SCP_SDCA_INTMASK_SDCA_0 BIT(0)
+#define SDW_SCP_SDCA_INTMASK_SDCA_1 BIT(1)
+#define SDW_SCP_SDCA_INTMASK_SDCA_2 BIT(2)
+#define SDW_SCP_SDCA_INTMASK_SDCA_3 BIT(3)
+#define SDW_SCP_SDCA_INTMASK_SDCA_4 BIT(4)
+#define SDW_SCP_SDCA_INTMASK_SDCA_5 BIT(5)
+#define SDW_SCP_SDCA_INTMASK_SDCA_6 BIT(6)
+#define SDW_SCP_SDCA_INTMASK_SDCA_7 BIT(7)
+
+#define SDW_SCP_SDCA_INTMASK2 0x5D
+#define SDW_SCP_SDCA_INTMASK_SDCA_8 BIT(0)
+#define SDW_SCP_SDCA_INTMASK_SDCA_9 BIT(1)
+#define SDW_SCP_SDCA_INTMASK_SDCA_10 BIT(2)
+#define SDW_SCP_SDCA_INTMASK_SDCA_11 BIT(3)
+#define SDW_SCP_SDCA_INTMASK_SDCA_12 BIT(4)
+#define SDW_SCP_SDCA_INTMASK_SDCA_13 BIT(5)
+#define SDW_SCP_SDCA_INTMASK_SDCA_14 BIT(6)
+#define SDW_SCP_SDCA_INTMASK_SDCA_15 BIT(7)
+
+#define SDW_SCP_SDCA_INTMASK3 0x5E
+#define SDW_SCP_SDCA_INTMASK_SDCA_16 BIT(0)
+#define SDW_SCP_SDCA_INTMASK_SDCA_17 BIT(1)
+#define SDW_SCP_SDCA_INTMASK_SDCA_18 BIT(2)
+#define SDW_SCP_SDCA_INTMASK_SDCA_19 BIT(3)
+#define SDW_SCP_SDCA_INTMASK_SDCA_20 BIT(4)
+#define SDW_SCP_SDCA_INTMASK_SDCA_21 BIT(5)
+#define SDW_SCP_SDCA_INTMASK_SDCA_22 BIT(6)
+#define SDW_SCP_SDCA_INTMASK_SDCA_23 BIT(7)
+
+#define SDW_SCP_SDCA_INTMASK4 0x5F
+#define SDW_SCP_SDCA_INTMASK_SDCA_24 BIT(0)
+#define SDW_SCP_SDCA_INTMASK_SDCA_25 BIT(1)
+#define SDW_SCP_SDCA_INTMASK_SDCA_26 BIT(2)
+#define SDW_SCP_SDCA_INTMASK_SDCA_27 BIT(3)
+#define SDW_SCP_SDCA_INTMASK_SDCA_28 BIT(4)
+#define SDW_SCP_SDCA_INTMASK_SDCA_29 BIT(5)
+#define SDW_SCP_SDCA_INTMASK_SDCA_30 BIT(6)
+/* BIT(7) not allocated in SoundWire 1.2 specification */
+
/* Banked Registers */
#define SDW_SCP_FRAMECTRL_B0 0x60
#define SDW_SCP_FRAMECTRL_B1 (0x60 + SDW_BANK1_OFFSET)
#define SDW_SCP_NEXTFRAME_B0 0x61
#define SDW_SCP_NEXTFRAME_B1 (0x61 + SDW_BANK1_OFFSET)
+#define SDW_SCP_BUSCLOCK_SCALE_B0 0x62
+#define SDW_SCP_BUSCLOCK_SCALE_B1 (0x62 + SDW_BANK1_OFFSET)
+#define SDW_SCP_CLOCK_SCALE GENMASK(3, 0)
+
+/* PHY registers - CTRL and STAT are the same address */
+#define SDW_SCP_PHY_OUT_CTRL_0 0x80
+#define SDW_SCP_PHY_OUT_CTRL_1 0x81
+#define SDW_SCP_PHY_OUT_CTRL_2 0x82
+#define SDW_SCP_PHY_OUT_CTRL_3 0x83
+#define SDW_SCP_PHY_OUT_CTRL_4 0x84
+#define SDW_SCP_PHY_OUT_CTRL_5 0x85
+#define SDW_SCP_PHY_OUT_CTRL_6 0x86
+#define SDW_SCP_PHY_OUT_CTRL_7 0x87
+
+#define SDW_SCP_CAP_LOAD_CTRL GENMASK(2, 0)
+#define SDW_SCP_DRIVE_STRENGTH_CTRL GENMASK(5, 3)
+#define SDW_SCP_SLEW_TIME_CTRL GENMASK(7, 6)
+
/* Both INT and STATUS register is same */
#define SDW_DPN_INT(n) (0x0 + SDW_DPN_SIZE * (n))
#define SDW_DPN_INTMASK(n) (0x1 + SDW_DPN_SIZE * (n))
diff --git a/include/linux/spi/altera.h b/include/linux/spi/altera.h
new file mode 100644
index 000000000000..2d42641499a6
--- /dev/null
+++ b/include/linux/spi/altera.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Header File for Altera SPI Driver.
+ */
+#ifndef __LINUX_SPI_ALTERA_H
+#define __LINUX_SPI_ALTERA_H
+
+#include <linux/regmap.h>
+#include <linux/spi/spi.h>
+#include <linux/types.h>
+
+/**
+ * struct altera_spi_platform_data - Platform data of the Altera SPI driver
+ * @mode_bits: Mode bits of SPI master.
+ * @num_chipselect: Number of chipselects.
+ * @bits_per_word_mask: bitmask of supported bits_per_word for transfers.
+ * @num_devices: Number of devices that shall be added when the driver
+ * is probed.
+ * @devices: The devices to add.
+ */
+struct altera_spi_platform_data {
+ u16 mode_bits;
+ u16 num_chipselect;
+ u32 bits_per_word_mask;
+ u16 num_devices;
+ struct spi_board_info *devices;
+};
+
+#endif /* __LINUX_SPI_ALTERA_H */
diff --git a/include/linux/spi/spi-mem.h b/include/linux/spi/spi-mem.h
index af9ff2f0f1b2..159463cc659c 100644
--- a/include/linux/spi/spi-mem.h
+++ b/include/linux/spi/spi-mem.h
@@ -17,6 +17,7 @@
{ \
.buswidth = __buswidth, \
.opcode = __opcode, \
+ .nbytes = 1, \
}
#define SPI_MEM_OP_ADDR(__nbytes, __val, __buswidth) \
@@ -69,11 +70,15 @@ enum spi_mem_data_dir {
/**
* struct spi_mem_op - describes a SPI memory operation
+ * @cmd.nbytes: number of opcode bytes (only 1 or 2 are valid). The opcode is
+ * sent MSB-first.
* @cmd.buswidth: number of IO lines used to transmit the command
* @cmd.opcode: operation opcode
+ * @cmd.dtr: whether the command opcode should be sent in DTR mode or not
* @addr.nbytes: number of address bytes to send. Can be zero if the operation
* does not need to send an address
* @addr.buswidth: number of IO lines used to transmit the address cycles
+ * @addr.dtr: whether the address should be sent in DTR mode or not
* @addr.val: address value. This value is always sent MSB first on the bus.
* Note that only @addr.nbytes are taken into account in this
* address value, so users should make sure the value fits in the
@@ -81,7 +86,9 @@ enum spi_mem_data_dir {
* @dummy.nbytes: number of dummy bytes to send after an opcode or address. Can
* be zero if the operation does not require dummy bytes
* @dummy.buswidth: number of IO lanes used to transmit the dummy bytes
+ * @dummy.dtr: whether the dummy bytes should be sent in DTR mode or not
* @data.buswidth: number of IO lanes used to send/receive the data
+ * @data.dtr: whether the data should be sent in DTR mode or not
* @data.dir: direction of the transfer
* @data.nbytes: number of data bytes to send/receive. Can be zero if the
* operation does not involve transferring data
@@ -90,23 +97,28 @@ enum spi_mem_data_dir {
*/
struct spi_mem_op {
struct {
+ u8 nbytes;
u8 buswidth;
- u8 opcode;
+ u8 dtr : 1;
+ u16 opcode;
} cmd;
struct {
u8 nbytes;
u8 buswidth;
+ u8 dtr : 1;
u64 val;
} addr;
struct {
u8 nbytes;
u8 buswidth;
+ u8 dtr : 1;
} dummy;
struct {
u8 buswidth;
+ u8 dtr : 1;
enum spi_mem_data_dir dir;
unsigned int nbytes;
union {
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index aac57b5b7c21..99380c0825db 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -329,6 +329,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* every chipselect is connected to a slave.
* @dma_alignment: SPI controller constraint on DMA buffers alignment.
* @mode_bits: flags understood by this controller driver
+ * @buswidth_override_bits: flags to override for this controller driver
* @bits_per_word_mask: A mask indicating which values of bits_per_word are
* supported by the driver. Bit n indicates that a bits_per_word n+1 is
* supported. If set, the SPI core will reject any transfer with an
@@ -358,8 +359,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* @cleanup: frees controller-specific state
* @can_dma: determine whether this controller supports DMA
* @queued: whether this controller is providing an internal message queue
- * @kworker: thread struct for message pump
- * @kworker_task: pointer to task for message pump kworker thread
+ * @kworker: pointer to thread struct for message pump
* @pump_messages: work struct for scheduling work to the message pump
* @queue_lock: spinlock to syncronise access to message queue
* @queue: message queue
@@ -368,6 +368,8 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* @cur_msg_prepared: spi_prepare_message was called for the currently
* in-flight message
* @cur_msg_mapped: message has been mapped for DMA
+ * @last_cs_enable: was enable true on the last call to set_cs.
+ * @last_cs_mode_high: was (mode & SPI_CS_HIGH) true on the last call to set_cs.
* @xfer_completion: used by core transfer_one_message()
* @busy: message pump is busy
* @running: message pump is running
@@ -447,6 +449,8 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* If the driver does not set this, the SPI core takes the snapshot as
* close to the driver hand-over as possible.
* @irq_flags: Interrupt enable state during PTP system timestamping
+ * @fallback: fallback to pio if dma transfer return failure with
+ * SPI_TRANS_FAIL_NO_START.
*
* Each SPI controller can communicate with one or more @spi_device
* children. These make a small bus, sharing MOSI, MISO and SCK signals
@@ -589,8 +593,7 @@ struct spi_controller {
* Over time we expect SPI drivers to be phased over to this API.
*/
bool queued;
- struct kthread_worker kworker;
- struct task_struct *kworker_task;
+ struct kthread_worker *kworker;
struct kthread_work pump_messages;
spinlock_t queue_lock;
struct list_head queue;
@@ -602,6 +605,9 @@ struct spi_controller {
bool auto_runtime_pm;
bool cur_msg_prepared;
bool cur_msg_mapped;
+ bool last_cs_enable;
+ bool last_cs_mode_high;
+ bool fallback;
struct completion xfer_completion;
size_t max_dma_len;
@@ -841,12 +847,8 @@ extern void spi_res_release(struct spi_controller *ctlr,
* processed the word, i.e. the "pre" timestamp should be taken before
* transmitting the "pre" word, and the "post" timestamp after receiving
* transmit confirmation from the controller for the "post" word.
- * @timestamped_pre: Set by the SPI controller driver to denote it has acted
- * upon the @ptp_sts request. Not set when the SPI core has taken care of
- * the task. SPI device drivers are free to print a warning if this comes
- * back unset and they need the better resolution.
- * @timestamped_post: See above. The reason why both exist is that these
- * booleans are also used to keep state in the core SPI logic.
+ * @timestamped: true if the transfer has been timestamped
+ * @error: Error status logged by spi controller driver.
*
* SPI transfers always write the same number of bytes as they read.
* Protocol drivers should always provide @rx_buf and/or @tx_buf.
@@ -940,6 +942,9 @@ struct spi_transfer {
bool timestamped;
struct list_head transfer_list;
+
+#define SPI_TRANS_FAIL_NO_START BIT(0)
+ u16 error;
};
/**
@@ -962,7 +967,7 @@ struct spi_transfer {
* each represented by a struct spi_transfer. The sequence is "atomic"
* in the sense that no other spi_message may use that SPI bus until that
* sequence completes. On some systems, many such sequences can execute as
- * as single programmed DMA transfer. On all systems, these messages are
+ * a single programmed DMA transfer. On all systems, these messages are
* queued, and might complete after transactions to other devices. Messages
* sent to a given spi_device are always executed in FIFO order.
*
@@ -1225,7 +1230,7 @@ extern int spi_bus_unlock(struct spi_controller *ctlr);
*
* For more specific semantics see spi_sync().
*
- * Return: Return: zero on success, else a negative error code.
+ * Return: zero on success, else a negative error code.
*/
static inline int
spi_sync_transfer(struct spi_device *spi, struct spi_transfer *xfers,
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index d3770b3f9d9a..f2f12d746dbd 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -56,6 +56,7 @@
#include <linux/kernel.h>
#include <linux/stringify.h>
#include <linux/bottom_half.h>
+#include <linux/lockdep.h>
#include <asm/barrier.h>
#include <asm/mmiowb.h>
diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h
index 6102e6bff3ae..b981caafe8bf 100644
--- a/include/linux/spinlock_types.h
+++ b/include/linux/spinlock_types.h
@@ -15,7 +15,7 @@
# include <linux/spinlock_types_up.h>
#endif
-#include <linux/lockdep.h>
+#include <linux/lockdep_types.h>
typedef struct raw_spinlock {
arch_spinlock_t raw_lock;
diff --git a/include/linux/string_helpers.h b/include/linux/string_helpers.h
index c28955132234..86f150c2a6b6 100644
--- a/include/linux/string_helpers.h
+++ b/include/linux/string_helpers.h
@@ -2,6 +2,7 @@
#ifndef _LINUX_STRING_HELPERS_H_
#define _LINUX_STRING_HELPERS_H_
+#include <linux/ctype.h>
#include <linux/types.h>
struct file;
@@ -75,6 +76,20 @@ static inline int string_escape_str_any_np(const char *src, char *dst,
return string_escape_str(src, dst, sz, ESCAPE_ANY_NP, only);
}
+static inline void string_upper(char *dst, const char *src)
+{
+ do {
+ *dst++ = toupper(*src);
+ } while (*src++);
+}
+
+static inline void string_lower(char *dst, const char *src)
+{
+ do {
+ *dst++ = tolower(*src);
+ } while (*src++);
+}
+
char *kstrdup_quotable(const char *src, gfp_t gfp);
char *kstrdup_quotable_cmdline(struct task_struct *task, gfp_t gfp);
char *kstrdup_quotable_file(struct file *file, gfp_t gfp);
diff --git a/include/linux/sunrpc/rpc_rdma.h b/include/linux/sunrpc/rpc_rdma.h
index 320c672d84de..4af31bbc8802 100644
--- a/include/linux/sunrpc/rpc_rdma.h
+++ b/include/linux/sunrpc/rpc_rdma.h
@@ -124,4 +124,78 @@ rpcrdma_decode_buffer_size(u8 val)
return ((unsigned int)val + 1) << 10;
}
+/**
+ * xdr_encode_rdma_segment - Encode contents of an RDMA segment
+ * @p: Pointer into a send buffer
+ * @handle: The RDMA handle to encode
+ * @length: The RDMA length to encode
+ * @offset: The RDMA offset to encode
+ *
+ * Return value:
+ * Pointer to the XDR position that follows the encoded RDMA segment
+ */
+static inline __be32 *xdr_encode_rdma_segment(__be32 *p, u32 handle,
+ u32 length, u64 offset)
+{
+ *p++ = cpu_to_be32(handle);
+ *p++ = cpu_to_be32(length);
+ return xdr_encode_hyper(p, offset);
+}
+
+/**
+ * xdr_encode_read_segment - Encode contents of a Read segment
+ * @p: Pointer into a send buffer
+ * @position: The position to encode
+ * @handle: The RDMA handle to encode
+ * @length: The RDMA length to encode
+ * @offset: The RDMA offset to encode
+ *
+ * Return value:
+ * Pointer to the XDR position that follows the encoded Read segment
+ */
+static inline __be32 *xdr_encode_read_segment(__be32 *p, u32 position,
+ u32 handle, u32 length,
+ u64 offset)
+{
+ *p++ = cpu_to_be32(position);
+ return xdr_encode_rdma_segment(p, handle, length, offset);
+}
+
+/**
+ * xdr_decode_rdma_segment - Decode contents of an RDMA segment
+ * @p: Pointer to the undecoded RDMA segment
+ * @handle: Upon return, the RDMA handle
+ * @length: Upon return, the RDMA length
+ * @offset: Upon return, the RDMA offset
+ *
+ * Return value:
+ * Pointer to the XDR item that follows the RDMA segment
+ */
+static inline __be32 *xdr_decode_rdma_segment(__be32 *p, u32 *handle,
+ u32 *length, u64 *offset)
+{
+ *handle = be32_to_cpup(p++);
+ *length = be32_to_cpup(p++);
+ return xdr_decode_hyper(p, offset);
+}
+
+/**
+ * xdr_decode_read_segment - Decode contents of a Read segment
+ * @p: Pointer to the undecoded Read segment
+ * @position: Upon return, the segment's position
+ * @handle: Upon return, the RDMA handle
+ * @length: Upon return, the RDMA length
+ * @offset: Upon return, the RDMA offset
+ *
+ * Return value:
+ * Pointer to the XDR item that follows the Read segment
+ */
+static inline __be32 *xdr_decode_read_segment(__be32 *p, u32 *position,
+ u32 *handle, u32 *length,
+ u64 *offset)
+{
+ *position = be32_to_cpup(p++);
+ return xdr_decode_rdma_segment(p, handle, length, offset);
+}
+
#endif /* _LINUX_SUNRPC_RPC_RDMA_H */
diff --git a/include/linux/sunrpc/rpc_rdma_cid.h b/include/linux/sunrpc/rpc_rdma_cid.h
new file mode 100644
index 000000000000..be24ab2baa6a
--- /dev/null
+++ b/include/linux/sunrpc/rpc_rdma_cid.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * * Copyright (c) 2020, Oracle and/or its affiliates.
+ */
+
+#ifndef RPC_RDMA_CID_H
+#define RPC_RDMA_CID_H
+
+/*
+ * The rpc_rdma_cid struct records completion ID information. A
+ * completion ID matches an incoming Send or Receive completion
+ * to a Completion Queue and to a previous ib_post_*(). The ID
+ * can then be displayed in an error message or recorded in a
+ * trace record.
+ *
+ * This struct is shared between the server and client RPC/RDMA
+ * transport implementations.
+ */
+struct rpc_rdma_cid {
+ u32 ci_queue_id;
+ int ci_completion_id;
+};
+
+#endif /* RPC_RDMA_CID_H */
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
index 7ed82625dc0b..9dc3a3b88391 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -46,6 +46,7 @@
#include <linux/sunrpc/xdr.h>
#include <linux/sunrpc/svcsock.h>
#include <linux/sunrpc/rpc_rdma.h>
+#include <linux/sunrpc/rpc_rdma_cid.h>
#include <rdma/ib_verbs.h>
#include <rdma/rdma_cm.h>
@@ -109,6 +110,8 @@ struct svcxprt_rdma {
struct work_struct sc_work;
struct llist_head sc_recv_ctxts;
+
+ atomic_t sc_completion_ids;
};
/* sc_flags */
#define RDMAXPRT_CONN_PENDING 3
@@ -129,6 +132,7 @@ struct svc_rdma_recv_ctxt {
struct list_head rc_list;
struct ib_recv_wr rc_recv_wr;
struct ib_cqe rc_cqe;
+ struct rpc_rdma_cid rc_cid;
struct ib_sge rc_recv_sge;
void *rc_recv_buf;
struct xdr_buf rc_arg;
@@ -147,6 +151,8 @@ struct svc_rdma_recv_ctxt {
struct svc_rdma_send_ctxt {
struct list_head sc_list;
+ struct rpc_rdma_cid sc_cid;
+
struct ib_send_wr sc_send_wr;
struct ib_cqe sc_cqe;
struct xdr_buf sc_hdrbuf;
@@ -190,20 +196,21 @@ extern struct svc_rdma_send_ctxt *
svc_rdma_send_ctxt_get(struct svcxprt_rdma *rdma);
extern void svc_rdma_send_ctxt_put(struct svcxprt_rdma *rdma,
struct svc_rdma_send_ctxt *ctxt);
-extern int svc_rdma_send(struct svcxprt_rdma *rdma, struct ib_send_wr *wr);
+extern int svc_rdma_send(struct svcxprt_rdma *rdma,
+ struct svc_rdma_send_ctxt *ctxt);
extern int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
struct svc_rdma_send_ctxt *sctxt,
const struct svc_rdma_recv_ctxt *rctxt,
struct xdr_buf *xdr);
+extern void svc_rdma_send_error_msg(struct svcxprt_rdma *rdma,
+ struct svc_rdma_send_ctxt *sctxt,
+ struct svc_rdma_recv_ctxt *rctxt,
+ int status);
extern int svc_rdma_sendto(struct svc_rqst *);
extern int svc_rdma_read_payload(struct svc_rqst *rqstp, unsigned int offset,
unsigned int length);
/* svc_rdma_transport.c */
-extern int svc_rdma_create_listen(struct svc_serv *, int, struct sockaddr *);
-extern void svc_sq_reap(struct svcxprt_rdma *);
-extern void svc_rq_reap(struct svcxprt_rdma *);
-
extern struct svc_xprt_class svc_rdma_class;
#ifdef CONFIG_SUNRPC_BACKCHANNEL
extern struct svc_xprt_class svc_rdma_bc_class;
diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
index 22c207b2425f..5a6a81b7cd9f 100644
--- a/include/linux/sunrpc/xdr.h
+++ b/include/linux/sunrpc/xdr.h
@@ -475,6 +475,32 @@ xdr_stream_encode_uint32_array(struct xdr_stream *xdr,
}
/**
+ * xdr_item_is_absent - symbolically handle XDR discriminators
+ * @p: pointer to undecoded discriminator
+ *
+ * Return values:
+ * %true if the following XDR item is absent
+ * %false if the following XDR item is present
+ */
+static inline bool xdr_item_is_absent(const __be32 *p)
+{
+ return *p == xdr_zero;
+}
+
+/**
+ * xdr_item_is_present - symbolically handle XDR discriminators
+ * @p: pointer to undecoded discriminator
+ *
+ * Return values:
+ * %true if the following XDR item is present
+ * %false if the following XDR item is absent
+ */
+static inline bool xdr_item_is_present(const __be32 *p)
+{
+ return *p != xdr_zero;
+}
+
+/**
* xdr_stream_decode_u32 - Decode a 32-bit integer
* @xdr: pointer to xdr_stream
* @ptr: location to store integer
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index e64bd8222f55..a603d48d2b2c 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -101,6 +101,7 @@ struct rpc_rqst {
* used in the softirq.
*/
unsigned long rq_majortimeo; /* major timeout alarm */
+ unsigned long rq_minortimeo; /* minor timeout alarm */
unsigned long rq_timeout; /* Current timeout value */
ktime_t rq_rtt; /* round-trip time */
unsigned int rq_retries; /* # of retries */
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index b960098acfb0..cb9afad82a90 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -453,6 +453,8 @@ extern bool hibernation_available(void);
asmlinkage int swsusp_save(void);
extern struct pbe *restore_pblist;
int pfn_is_nosave(unsigned long pfn);
+
+int hibernate_quiet_exec(int (*func)(void *data), void *data);
#else /* CONFIG_HIBERNATION */
static inline void register_nosave_region(unsigned long b, unsigned long e) {}
static inline void register_nosave_region_late(unsigned long b, unsigned long e) {}
@@ -464,6 +466,10 @@ static inline void hibernation_set_ops(const struct platform_hibernation_ops *op
static inline int hibernate(void) { return -ENOSYS; }
static inline bool system_entering_hibernation(void) { return false; }
static inline bool hibernation_available(void) { return false; }
+
+static inline int hibernate_quiet_exec(int (*func)(void *data), void *data) {
+ return -ENOTSUPP;
+}
#endif /* CONFIG_HIBERNATION */
#ifdef CONFIG_HIBERNATION_SNAPSHOT_DEV
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 5b3216ba39a9..661046994db4 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -328,7 +328,6 @@ void workingset_update_node(struct xa_node *node);
/* linux/mm/page_alloc.c */
extern unsigned long totalreserve_pages;
extern unsigned long nr_free_buffer_pages(void);
-extern unsigned long nr_free_pagecache_pages(void);
/* Definition of global_zone_page_state not available yet */
#define nr_free_pages() global_zone_page_state(NR_FREE_PAGES)
@@ -353,7 +352,7 @@ extern void deactivate_page(struct page *page);
extern void mark_page_lazyfree(struct page *page);
extern void swap_setup(void);
-extern void lru_cache_add_active_or_unevictable(struct page *page,
+extern void lru_cache_add_inactive_or_unevictable(struct page *page,
struct vm_area_struct *vma);
/* linux/mm/vmscan.c */
@@ -372,7 +371,6 @@ extern unsigned long mem_cgroup_shrink_node(struct mem_cgroup *mem,
extern unsigned long shrink_all_memory(unsigned long nr_pages);
extern int vm_swappiness;
extern int remove_mapping(struct address_space *mapping, struct page *page);
-extern unsigned long vm_total_pages;
extern unsigned long reclaim_pages(struct list_head *page_list);
#ifdef CONFIG_NUMA
@@ -416,9 +414,14 @@ extern struct address_space *swapper_spaces[];
extern unsigned long total_swapcache_pages(void);
extern void show_swap_cache_info(void);
extern int add_to_swap(struct page *page);
-extern int add_to_swap_cache(struct page *, swp_entry_t, gfp_t);
-extern void __delete_from_swap_cache(struct page *, swp_entry_t entry);
+extern void *get_shadow_from_swap_cache(swp_entry_t entry);
+extern int add_to_swap_cache(struct page *page, swp_entry_t entry,
+ gfp_t gfp, void **shadowp);
+extern void __delete_from_swap_cache(struct page *page,
+ swp_entry_t entry, void *shadow);
extern void delete_from_swap_cache(struct page *);
+extern void clear_shadow_from_swap_cache(int type, unsigned long begin,
+ unsigned long end);
extern void free_page_and_swap_cache(struct page *);
extern void free_pages_and_swap_cache(struct page **, int);
extern struct page *lookup_swap_cache(swp_entry_t entry,
@@ -571,14 +574,19 @@ static inline int add_to_swap(struct page *page)
return 0;
}
+static inline void *get_shadow_from_swap_cache(swp_entry_t entry)
+{
+ return NULL;
+}
+
static inline int add_to_swap_cache(struct page *page, swp_entry_t entry,
- gfp_t gfp_mask)
+ gfp_t gfp_mask, void **shadowp)
{
return -1;
}
static inline void __delete_from_swap_cache(struct page *page,
- swp_entry_t entry)
+ swp_entry_t entry, void *shadow)
{
}
@@ -586,6 +594,11 @@ static inline void delete_from_swap_cache(struct page *page)
{
}
+static inline void clear_shadow_from_swap_cache(int type, unsigned long begin,
+ unsigned long end)
+{
+}
+
static inline int page_swapcount(struct page *page)
{
return 0;
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index b951a87da987..75ac7f8ae93c 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -47,7 +47,6 @@ struct stat64;
struct statfs;
struct statfs64;
struct statx;
-struct __sysctl_args;
struct sysinfo;
struct timespec;
struct __kernel_old_timeval;
@@ -263,7 +262,7 @@ static inline void addr_limit_user_check(void)
return;
#endif
- if (CHECK_DATA_CORRUPTION(!segment_eq(get_fs(), USER_DS),
+ if (CHECK_DATA_CORRUPTION(uaccess_kernel(),
"Invalid address limit on user-mode return"))
force_sig(SIGKILL);
@@ -444,6 +443,8 @@ asmlinkage long sys_openat(int dfd, const char __user *filename, int flags,
asmlinkage long sys_openat2(int dfd, const char __user *filename,
struct open_how *how, size_t size);
asmlinkage long sys_close(unsigned int fd);
+asmlinkage long sys_close_range(unsigned int fd, unsigned int max_fd,
+ unsigned int flags);
asmlinkage long sys_vhangup(void);
/* fs/pipe.c */
@@ -1115,7 +1116,6 @@ asmlinkage long sys_send(int, void __user *, size_t, unsigned);
asmlinkage long sys_bdflush(int func, long data);
asmlinkage long sys_oldumount(char __user *name);
asmlinkage long sys_uselib(const char __user *library);
-asmlinkage long sys_sysctl(struct __sysctl_args __user *args);
asmlinkage long sys_sysfs(int option,
unsigned long arg1, unsigned long arg2);
asmlinkage long sys_fork(void);
@@ -1235,18 +1235,8 @@ asmlinkage long sys_ni_syscall(void);
* Instead, use one of the functions which work equivalently, such as
* the ksys_xyzyyz() functions prototyped below.
*/
-
-int ksys_umount(char __user *name, int flags);
-int ksys_dup(unsigned int fildes);
-int ksys_chroot(const char __user *filename);
ssize_t ksys_write(unsigned int fd, const char __user *buf, size_t count);
-int ksys_chdir(const char __user *filename);
-int ksys_fchmod(unsigned int fd, umode_t mode);
int ksys_fchown(unsigned int fd, uid_t user, gid_t group);
-int ksys_getdents64(unsigned int fd, struct linux_dirent64 __user *dirent,
- unsigned int count);
-int ksys_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg);
-off_t ksys_lseek(unsigned int fd, off_t offset, unsigned int whence);
ssize_t ksys_read(unsigned int fd, char __user *buf, size_t count);
void ksys_sync(void);
int ksys_unshare(unsigned long unshare_flags);
@@ -1280,68 +1270,6 @@ int compat_ksys_ipc(u32 call, int first, int second,
* The following kernel syscall equivalents are just wrappers to fs-internal
* functions. Therefore, provide stubs to be inlined at the callsites.
*/
-extern long do_unlinkat(int dfd, struct filename *name);
-
-static inline long ksys_unlink(const char __user *pathname)
-{
- return do_unlinkat(AT_FDCWD, getname(pathname));
-}
-
-extern long do_rmdir(int dfd, const char __user *pathname);
-
-static inline long ksys_rmdir(const char __user *pathname)
-{
- return do_rmdir(AT_FDCWD, pathname);
-}
-
-extern long do_mkdirat(int dfd, const char __user *pathname, umode_t mode);
-
-static inline long ksys_mkdir(const char __user *pathname, umode_t mode)
-{
- return do_mkdirat(AT_FDCWD, pathname, mode);
-}
-
-extern long do_symlinkat(const char __user *oldname, int newdfd,
- const char __user *newname);
-
-static inline long ksys_symlink(const char __user *oldname,
- const char __user *newname)
-{
- return do_symlinkat(oldname, AT_FDCWD, newname);
-}
-
-extern long do_mknodat(int dfd, const char __user *filename, umode_t mode,
- unsigned int dev);
-
-static inline long ksys_mknod(const char __user *filename, umode_t mode,
- unsigned int dev)
-{
- return do_mknodat(AT_FDCWD, filename, mode, dev);
-}
-
-extern int do_linkat(int olddfd, const char __user *oldname, int newdfd,
- const char __user *newname, int flags);
-
-static inline long ksys_link(const char __user *oldname,
- const char __user *newname)
-{
- return do_linkat(AT_FDCWD, oldname, AT_FDCWD, newname, 0);
-}
-
-extern int do_fchmodat(int dfd, const char __user *filename, umode_t mode);
-
-static inline int ksys_chmod(const char __user *filename, umode_t mode)
-{
- return do_fchmodat(AT_FDCWD, filename, mode);
-}
-
-long do_faccessat(int dfd, const char __user *filename, int mode, int flags);
-
-static inline long ksys_access(const char __user *filename, int mode)
-{
- return do_faccessat(AT_FDCWD, filename, mode, 0);
-}
-
extern int do_fchownat(int dfd, const char __user *filename, uid_t user,
gid_t group, int flag);
@@ -1377,17 +1305,6 @@ static inline int ksys_close(unsigned int fd)
return __close_fd(current->files, fd);
}
-extern long do_sys_open(int dfd, const char __user *filename, int flags,
- umode_t mode);
-
-static inline long ksys_open(const char __user *filename, int flags,
- umode_t mode)
-{
- if (force_o_largefile())
- flags |= O_LARGEFILE;
- return do_sys_open(AT_FDCWD, filename, flags, mode);
-}
-
extern long do_sys_truncate(const char __user *pathname, loff_t length);
static inline long ksys_truncate(const char __user *pathname, loff_t length)
@@ -1424,4 +1341,8 @@ long compat_ksys_semtimedop(int semid, struct sembuf __user *tsems,
unsigned int nsops,
const struct old_timespec32 __user *timeout);
+int __sys_getsockopt(int fd, int level, int optname, char __user *optval,
+ int __user *optlen);
+int __sys_setsockopt(int fd, int level, int optname, char __user *optval,
+ int optlen);
#endif
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index 50bb7f383a1b..51298a4f4623 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -74,15 +74,13 @@ int proc_do_static_key(struct ctl_table *table, int write, void *buffer,
* sysctl names can be mirrored automatically under /proc/sys. The
* procname supplied controls /proc naming.
*
- * The table's mode will be honoured both for sys_sysctl(2) and
- * proc-fs access.
+ * The table's mode will be honoured for proc-fs access.
*
* Leaf nodes in the sysctl tree will be represented by a single file
* under /proc; non-leaf nodes will be represented by directories. A
* null procname disables /proc mirroring at this node.
*
- * sysctl(2) can automatically manage read and write requests through
- * the sysctl table. The data and maxlen fields of the ctl_table
+ * The data and maxlen fields of the ctl_table
* struct enable minimal validation of the values being written to be
* performed, and the mode field allows minimal authentication.
*
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index 86067dbe7745..34e84122f635 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -123,6 +123,13 @@ struct attribute_group {
.show = _name##_show, \
}
+#define __ATTR_RW_MODE(_name, _mode) { \
+ .attr = { .name = __stringify(_name), \
+ .mode = VERIFY_OCTAL_PERMISSIONS(_mode) }, \
+ .show = _name##_show, \
+ .store = _name##_store, \
+}
+
#define __ATTR_WO(_name) { \
.attr = { .name = __stringify(_name), .mode = 0200 }, \
.store = _name##_store, \
diff --git a/include/linux/tboot.h b/include/linux/tboot.h
index c7e424766360..5146d2574e85 100644
--- a/include/linux/tboot.h
+++ b/include/linux/tboot.h
@@ -44,7 +44,7 @@ struct tboot_acpi_generic_address {
/*
* combines Sx info from FADT and FACS tables per ACPI 2.0+ spec
- * (http://www.acpi.info/)
+ * (https://uefi.org/specifications)
*/
struct tboot_acpi_sleep_info {
struct tboot_acpi_generic_address pm1a_cnt_blk;
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index a1bbaa1c1a3a..14b62d7df942 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -484,7 +484,8 @@ static inline void tcp_saved_syn_free(struct tcp_sock *tp)
tp->saved_syn = NULL;
}
-struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk);
+struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk,
+ const struct sk_buff *orig_skb);
static inline u16 tcp_mss_clamp(const struct tcp_sock *tp, u16 mss)
{
@@ -501,6 +502,7 @@ int tcp_skb_shift(struct sk_buff *to, struct sk_buff *from, int pcount,
void tcp_sock_set_cork(struct sock *sk, bool on);
int tcp_sock_set_keepcnt(struct sock *sk, int val);
+int tcp_sock_set_keepidle_locked(struct sock *sk, int val);
int tcp_sock_set_keepidle(struct sock *sk, int val);
int tcp_sock_set_keepintvl(struct sock *sk, int val);
void tcp_sock_set_nodelay(struct sock *sk);
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index 216185bb3014..42ef807e5d84 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -37,18 +37,6 @@ struct thermal_cooling_device;
struct thermal_instance;
struct thermal_attr;
-enum thermal_device_mode {
- THERMAL_DEVICE_DISABLED = 0,
- THERMAL_DEVICE_ENABLED,
-};
-
-enum thermal_trip_type {
- THERMAL_TRIP_ACTIVE = 0,
- THERMAL_TRIP_PASSIVE,
- THERMAL_TRIP_HOT,
- THERMAL_TRIP_CRITICAL,
-};
-
enum thermal_trend {
THERMAL_TREND_STABLE, /* temperature is stable */
THERMAL_TREND_RAISING, /* temperature is raising */
@@ -76,9 +64,7 @@ struct thermal_zone_device_ops {
struct thermal_cooling_device *);
int (*get_temp) (struct thermal_zone_device *, int *);
int (*set_trips) (struct thermal_zone_device *, int, int);
- int (*get_mode) (struct thermal_zone_device *,
- enum thermal_device_mode *);
- int (*set_mode) (struct thermal_zone_device *,
+ int (*change_mode) (struct thermal_zone_device *,
enum thermal_device_mode);
int (*get_trip_type) (struct thermal_zone_device *, int,
enum thermal_trip_type *);
@@ -128,6 +114,7 @@ struct thermal_cooling_device {
* @trip_temp_attrs: attributes for trip points for sysfs: trip temperature
* @trip_type_attrs: attributes for trip points for sysfs: trip type
* @trip_hyst_attrs: attributes for trip points for sysfs: trip hysteresis
+ * @mode: current mode of this thermal zone
* @devdata: private pointer for device private data
* @trips: number of trip points the thermal zone supports
* @trips_disabled; bitmap for disabled trips
@@ -170,6 +157,7 @@ struct thermal_zone_device {
struct thermal_attr *trip_temp_attrs;
struct thermal_attr *trip_type_attrs;
struct thermal_attr *trip_hyst_attrs;
+ enum thermal_device_mode mode;
void *devdata;
int trips;
unsigned long trips_disabled; /* bitmap for disabled trips */
@@ -303,11 +291,6 @@ struct thermal_zone_params {
int offset;
};
-struct thermal_genl_event {
- u32 orig;
- enum events event;
-};
-
/**
* struct thermal_zone_of_device_ops - scallbacks for handling DT based zones
*
@@ -416,6 +399,8 @@ int thermal_zone_get_offset(struct thermal_zone_device *tz);
void thermal_cdev_update(struct thermal_cooling_device *);
void thermal_notify_framework(struct thermal_zone_device *, int);
+int thermal_zone_device_enable(struct thermal_zone_device *tz);
+int thermal_zone_device_disable(struct thermal_zone_device *tz);
#else
static inline struct thermal_zone_device *thermal_zone_device_register(
const char *type, int trips, int mask, void *devdata,
@@ -463,6 +448,12 @@ static inline void thermal_cdev_update(struct thermal_cooling_device *cdev)
static inline void thermal_notify_framework(struct thermal_zone_device *tz,
int trip)
{ }
+
+static inline int thermal_zone_device_enable(struct thermal_zone_device *tz)
+{ return -ENODEV; }
+
+static inline int thermal_zone_device_disable(struct thermal_zone_device *tz)
+{ return -ENODEV; }
#endif /* CONFIG_THERMAL */
#endif /* __THERMAL_H__ */
diff --git a/include/linux/thunderbolt.h b/include/linux/thunderbolt.h
index ff397c0d5c07..5db2b11ab085 100644
--- a/include/linux/thunderbolt.h
+++ b/include/linux/thunderbolt.h
@@ -504,8 +504,6 @@ struct tb_ring {
#define RING_FLAG_NO_SUSPEND BIT(0)
/* Configure the ring to be in frame mode */
#define RING_FLAG_FRAME BIT(1)
-/* Enable end-to-end flow control */
-#define RING_FLAG_E2E BIT(2)
struct ring_frame;
typedef void (*ring_cb)(struct tb_ring *, struct ring_frame *, bool canceled);
diff --git a/include/linux/time.h b/include/linux/time.h
index 4c325bf44ce0..b142cb5f5a53 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -3,7 +3,6 @@
#define _LINUX_TIME_H
# include <linux/cache.h>
-# include <linux/seqlock.h>
# include <linux/math64.h>
# include <linux/time64.h>
diff --git a/include/linux/time_namespace.h b/include/linux/time_namespace.h
index 824d54e057eb..5b6031385db0 100644
--- a/include/linux/time_namespace.h
+++ b/include/linux/time_namespace.h
@@ -33,6 +33,7 @@ extern struct time_namespace init_time_ns;
#ifdef CONFIG_TIME_NS
extern int vdso_join_timens(struct task_struct *task,
struct time_namespace *ns);
+extern void timens_commit(struct task_struct *tsk, struct time_namespace *ns);
static inline struct time_namespace *get_time_ns(struct time_namespace *ns)
{
@@ -96,6 +97,11 @@ static inline int vdso_join_timens(struct task_struct *task,
return 0;
}
+static inline void timens_commit(struct task_struct *tsk,
+ struct time_namespace *ns)
+{
+}
+
static inline struct time_namespace *get_time_ns(struct time_namespace *ns)
{
return NULL;
diff --git a/include/linux/torture.h b/include/linux/torture.h
index 629b66e6c161..7f65bd1dd307 100644
--- a/include/linux/torture.h
+++ b/include/linux/torture.h
@@ -55,6 +55,11 @@ struct torture_random_state {
#define DEFINE_TORTURE_RANDOM_PERCPU(name) \
DEFINE_PER_CPU(struct torture_random_state, name)
unsigned long torture_random(struct torture_random_state *trsp);
+static inline void torture_random_init(struct torture_random_state *trsp)
+{
+ trsp->trs_state = 0;
+ trsp->trs_count = 0;
+}
/* Task shuffler, which causes CPUs to occasionally go idle. */
void torture_shuffle_task_register(struct task_struct *tp);
diff --git a/include/linux/tpm.h b/include/linux/tpm.h
index 03e9b184411b..8f4ff39f51e7 100644
--- a/include/linux/tpm.h
+++ b/include/linux/tpm.h
@@ -96,6 +96,7 @@ struct tpm_space {
u8 *context_buf;
u32 session_tbl[3];
u8 *session_buf;
+ u32 buf_size;
};
struct tpm_bios_log {
diff --git a/include/linux/tpm_eventlog.h b/include/linux/tpm_eventlog.h
index 64356b199e94..739ba9a03ec1 100644
--- a/include/linux/tpm_eventlog.h
+++ b/include/linux/tpm_eventlog.h
@@ -211,9 +211,16 @@ static inline int __calc_tpm2_event_size(struct tcg_pcr_event2_head *event,
efispecid = (struct tcg_efi_specid_event_head *)event_header->event;
- /* Check if event is malformed. */
+ /*
+ * Perform validation of the event in order to identify malformed
+ * events. This function may be asked to parse arbitrary byte sequences
+ * immediately following a valid event log. The caller expects this
+ * function to recognize that the byte sequence is not a valid event
+ * and to return an event size of 0.
+ */
if (memcmp(efispecid->signature, TCG_SPECID_SIG,
- sizeof(TCG_SPECID_SIG)) || count > efispecid->num_algs) {
+ sizeof(TCG_SPECID_SIG)) ||
+ !efispecid->num_algs || count != efispecid->num_algs) {
size = 0;
goto out;
}
diff --git a/include/linux/trace.h b/include/linux/trace.h
index 7fd86d3c691f..36d255d66f88 100644
--- a/include/linux/trace.h
+++ b/include/linux/trace.h
@@ -29,6 +29,7 @@ struct trace_array;
void trace_printk_init_buffers(void);
int trace_array_printk(struct trace_array *tr, unsigned long ip,
const char *fmt, ...);
+int trace_array_init_printk(struct trace_array *tr);
void trace_array_put(struct trace_array *tr);
struct trace_array *trace_array_get_by_name(const char *name);
int trace_array_destroy(struct trace_array *tr);
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index a1fecf311621..598fec9f9dbf 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -116,8 +116,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
#define __TRACEPOINT_ENTRY(name) \
static tracepoint_ptr_t __tracepoint_ptr_##name __used \
- __attribute__((section("__tracepoints_ptrs"))) = \
- &__tracepoint_##name
+ __section(__tracepoints_ptrs) = &__tracepoint_##name
#endif
#endif /* _LINUX_TRACEPOINT_H */
@@ -280,9 +279,9 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
*/
#define DEFINE_TRACE_FN(name, reg, unreg) \
static const char __tpstrtab_##name[] \
- __attribute__((section("__tracepoints_strings"))) = #name; \
- struct tracepoint __tracepoint_##name \
- __attribute__((section("__tracepoints"), used)) = \
+ __section(__tracepoints_strings) = #name; \
+ struct tracepoint __tracepoint_##name __used \
+ __section(__tracepoints) = \
{ __tpstrtab_##name, STATIC_KEY_INIT_FALSE, reg, unreg, NULL };\
__TRACEPOINT_ENTRY(name);
@@ -361,7 +360,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
static const char *___tp_str __tracepoint_string = str; \
___tp_str; \
})
-#define __tracepoint_string __attribute__((section("__tracepoint_str")))
+#define __tracepoint_string __used __section(__tracepoint_str)
#else
/*
* tracepoint_string() is used to save the string address for userspace
diff --git a/include/linux/types.h b/include/linux/types.h
index d3021c879179..a147977602b5 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -167,6 +167,8 @@ typedef struct {
int counter;
} atomic_t;
+#define ATOMIC_INIT(i) { (i) }
+
#ifdef CONFIG_64BIT
typedef struct {
s64 counter;
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index 0a76ddc07d59..94b285411659 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -6,11 +6,27 @@
#include <linux/sched.h>
#include <linux/thread_info.h>
-#define uaccess_kernel() segment_eq(get_fs(), KERNEL_DS)
-
#include <asm/uaccess.h>
/*
+ * Force the uaccess routines to be wired up for actual userspace access,
+ * overriding any possible set_fs(KERNEL_DS) still lingering around. Undone
+ * using force_uaccess_end below.
+ */
+static inline mm_segment_t force_uaccess_begin(void)
+{
+ mm_segment_t fs = get_fs();
+
+ set_fs(USER_DS);
+ return fs;
+}
+
+static inline void force_uaccess_end(mm_segment_t oldfs)
+{
+ set_fs(oldfs);
+}
+
+/*
* Architectures should provide two primitives (raw_copy_{to,from}_user())
* and get rid of their private instances of copy_{to,from}_user() and
* __copy_{to,from}_user{,_inatomic}().
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 9576fd8158d7..3835a8a8e9ea 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -7,7 +7,6 @@
#include <linux/kernel.h>
#include <linux/thread_info.h>
-#include <crypto/hash.h>
#include <uapi/linux/uio.h>
struct page;
diff --git a/include/linux/umh.h b/include/linux/umh.h
index 0c08de356d0d..244aff638220 100644
--- a/include/linux/umh.h
+++ b/include/linux/umh.h
@@ -22,10 +22,8 @@ struct subprocess_info {
const char *path;
char **argv;
char **envp;
- struct file *file;
int wait;
int retval;
- pid_t pid;
int (*init)(struct subprocess_info *info, struct cred *new);
void (*cleanup)(struct subprocess_info *info);
void *data;
@@ -40,19 +38,6 @@ call_usermodehelper_setup(const char *path, char **argv, char **envp,
int (*init)(struct subprocess_info *info, struct cred *new),
void (*cleanup)(struct subprocess_info *), void *data);
-struct subprocess_info *call_usermodehelper_setup_file(struct file *file,
- int (*init)(struct subprocess_info *info, struct cred *new),
- void (*cleanup)(struct subprocess_info *), void *data);
-struct umh_info {
- const char *cmdline;
- struct file *pipe_to_umh;
- struct file *pipe_from_umh;
- struct list_head list;
- void (*cleanup)(struct umh_info *info);
- pid_t pid;
-};
-int fork_usermode_blob(void *data, size_t len, struct umh_info *info);
-
extern int
call_usermodehelper_exec(struct subprocess_info *info, int wait);
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 9f3c721c70dc..20c555db4621 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -341,7 +341,7 @@ struct usb_interface_cache {
* @interface: array of pointers to usb_interface structures, one for each
* interface in the configuration. The number of interfaces is stored
* in desc.bNumInterfaces. These pointers are valid only while the
- * the configuration is active.
+ * configuration is active.
* @intf_cache: array of pointers to usb_interface_cache structures, one
* for each interface in the configuration. These structures exist
* for the entire life of the device.
@@ -422,7 +422,7 @@ struct usb_devmap {
* Allocated per bus (tree of devices) we have:
*/
struct usb_bus {
- struct device *controller; /* host/master side hardware */
+ struct device *controller; /* host side hardware */
struct device *sysdev; /* as seen from firmware or bus */
int busnum; /* Bus number (in order of reg) */
const char *bus_name; /* stable id (PCI slot_name etc) */
@@ -620,9 +620,9 @@ struct usb3_lpm_parameters {
* Management to be disabled for this usb_device. This count should only
* be manipulated by those functions, with the bandwidth_mutex is held.
* @hub_delay: cached value consisting of:
- * parent->hub_delay + wHubDelay + tTPTransmissionDelay (40ns)
- *
+ * parent->hub_delay + wHubDelay + tTPTransmissionDelay (40ns)
* Will be used as wValue for SetIsochDelay requests.
+ * @use_generic_driver: ask driver core to reprobe using the generic driver.
*
* Notes:
* Usbcore drivers should not set usbdev->state directly. Instead use
@@ -1215,6 +1215,7 @@ struct usb_driver {
* struct usb_device_driver - identifies USB device driver to usbcore
* @name: The driver name should be unique among USB drivers,
* and should normally be the same as the module name.
+ * @match: If set, used for better device/driver matching.
* @probe: Called to see if the driver is willing to manage a particular
* device. If it is, probe returns zero and uses dev_set_drvdata()
* to associate driver-specific data with the device. If unwilling
@@ -1227,13 +1228,16 @@ struct usb_driver {
* @dev_groups: Attributes attached to the device that will be created once it
* is bound to the driver.
* @drvwrap: Driver-model core structure wrapper.
+ * @id_table: used with @match() to select better matching driver at
+ * probe() time.
* @supports_autosuspend: if set to 0, the USB core will not allow autosuspend
* for devices bound to this driver.
* @generic_subclass: if set to 1, the generic USB driver's probe, disconnect,
* resume and suspend functions will be called in addition to the driver's
* own, so this part of the setup does not need to be replicated.
*
- * USB drivers must provide all the fields listed above except drvwrap.
+ * USB drivers must provide all the fields listed above except drvwrap,
+ * match, and id_table.
*/
struct usb_device_driver {
const char *name;
diff --git a/include/linux/usb/ch9.h b/include/linux/usb/ch9.h
index 58b83066bea4..604c6c514a50 100644
--- a/include/linux/usb/ch9.h
+++ b/include/linux/usb/ch9.h
@@ -6,13 +6,13 @@
* Wireless USB 1.0 (spread around). Linux has several APIs in C that
* need these:
*
- * - the master/host side Linux-USB kernel driver API;
+ * - the host side Linux-USB kernel driver API;
* - the "usbfs" user space API; and
- * - the Linux "gadget" slave/device/peripheral side driver API.
+ * - the Linux "gadget" device/peripheral side driver API.
*
* USB 2.0 adds an additional "On The Go" (OTG) mode, which lets systems
- * act either as a USB master/host or as a USB slave/device. That means
- * the master and slave side APIs benefit from working well together.
+ * act either as a USB host or as a USB device. That means the host and
+ * device side APIs benefit from working well together.
*
* There's also "Wireless USB", using low power short range radios for
* peripheral interconnection but otherwise building on the USB framework.
diff --git a/include/linux/usb/chipidea.h b/include/linux/usb/chipidea.h
index 54167a2d28ea..025b41687ce9 100644
--- a/include/linux/usb/chipidea.h
+++ b/include/linux/usb/chipidea.h
@@ -99,5 +99,7 @@ struct platform_device *ci_hdrc_add_device(struct device *dev,
struct ci_hdrc_platform_data *platdata);
/* Remove ci hdrc device */
void ci_hdrc_remove_device(struct platform_device *pdev);
+/* Get current available role */
+enum usb_dr_mode ci_hdrc_query_available_role(struct platform_device *pdev);
#endif
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
index 6a178177e4c9..52ce1f6b8f83 100644
--- a/include/linux/usb/gadget.h
+++ b/include/linux/usb/gadget.h
@@ -4,7 +4,8 @@
*
* We call the USB code inside a Linux-based peripheral device a "gadget"
* driver, except for the hardware-specific bus glue. One USB host can
- * master many USB gadgets, but the gadgets are only slaved to one host.
+ * talk to many USB gadgets, but the gadgets are only able to communicate
+ * to one host.
*
*
* (C) Copyright 2002-2004 by David Brownell
@@ -328,7 +329,7 @@ struct usb_gadget_ops {
};
/**
- * struct usb_gadget - represents a usb slave device
+ * struct usb_gadget - represents a usb device
* @work: (internal use) Workqueue to be used for sysfs_notify()
* @udc: struct usb_udc pointer for this gadget
* @ops: Function pointers used to access hardware-specific operations.
@@ -602,7 +603,7 @@ static inline int usb_gadget_activate(struct usb_gadget *gadget)
/*-------------------------------------------------------------------------*/
/**
- * struct usb_gadget_driver - driver for usb 'slave' devices
+ * struct usb_gadget_driver - driver for usb gadget devices
* @function: String describing the gadget's function
* @max_speed: Highest speed the driver handles.
* @setup: Invoked for ep0 control requests that aren't handled by
@@ -730,7 +731,7 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver);
* it will first disconnect(). The driver is also requested
* to unbind() and clean up any device state, before this procedure
* finally returns. It's expected that the unbind() functions
- * will in in exit sections, so may not be linked in some kernels.
+ * will be in exit sections, so may not be linked in some kernels.
*/
int usb_gadget_unregister_driver(struct usb_gadget_driver *driver);
diff --git a/include/linux/usb/pd.h b/include/linux/usb/pd.h
index a665d7f21142..b6c233e79bd4 100644
--- a/include/linux/usb/pd.h
+++ b/include/linux/usb/pd.h
@@ -483,4 +483,5 @@ static inline unsigned int rdo_max_power(u32 rdo)
#define PD_N_CAPS_COUNT (PD_T_NO_RESPONSE / PD_T_SEND_SOURCE_CAP)
#define PD_N_HARD_RESET_COUNT 2
+#define PD_T_BIST_CONT_MODE 50 /* 30 - 60 ms */
#endif /* __LINUX_USB_PD_H */
diff --git a/include/linux/usb/pd_vdo.h b/include/linux/usb/pd_vdo.h
index 35b8e15efaa0..68bdc4e2f5a9 100644
--- a/include/linux/usb/pd_vdo.h
+++ b/include/linux/usb/pd_vdo.h
@@ -249,7 +249,7 @@
* SVDM Discover SVIDs request -> response
*
* Request is properly formatted VDM Header with discover SVIDs command.
- * Response is a set of SVIDs of all all supported SVIDs with all zero's to
+ * Response is a set of SVIDs of all supported SVIDs with all zero's to
* mark the end of SVIDs. If more than 12 SVIDs are supported command SHOULD be
* repeated.
*/
diff --git a/include/linux/usb/phy_companion.h b/include/linux/usb/phy_companion.h
index 407f530061cd..263196f05015 100644
--- a/include/linux/usb/phy_companion.h
+++ b/include/linux/usb/phy_companion.h
@@ -2,7 +2,7 @@
/*
* phy-companion.h -- phy companion to indicate the comparator part of PHY
*
- * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com
+ * Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
index 22c1f579afe3..5e4c497f54d6 100644
--- a/include/linux/usb/quirks.h
+++ b/include/linux/usb/quirks.h
@@ -69,7 +69,7 @@
/* Hub needs extra delay after resetting its port. */
#define USB_QUIRK_HUB_SLOW_RESET BIT(14)
-/* device has blacklisted endpoints */
-#define USB_QUIRK_ENDPOINT_BLACKLIST BIT(15)
+/* device has endpoints that should be ignored */
+#define USB_QUIRK_ENDPOINT_IGNORE BIT(15)
#endif /* __LINUX_USB_QUIRKS_H */
diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h
index 14cac4a1ae8f..8e67eff9039f 100644
--- a/include/linux/usb/serial.h
+++ b/include/linux/usb/serial.h
@@ -17,7 +17,6 @@
#include <linux/kref.h>
#include <linux/mutex.h>
#include <linux/serial.h>
-#include <linux/sysrq.h>
#include <linux/kfifo.h>
/* The maximum number of ports one device can grab at once */
@@ -213,7 +212,7 @@ struct usb_serial_endpoints {
* Return 0 to continue on with the initialization sequence. Anything
* else will abort it.
* @attach: pointer to the driver's attach function.
- * This will be called when the struct usb_serial structure is fully set
+ * This will be called when the struct usb_serial structure is fully
* set up. Do any local initialization of the device, or any private
* memory structure allocation at this point in time.
* @disconnect: pointer to the driver's disconnect function. This will be
@@ -316,19 +315,19 @@ struct usb_serial_driver {
#define to_usb_serial_driver(d) \
container_of(d, struct usb_serial_driver, driver)
-extern int usb_serial_register_drivers(struct usb_serial_driver *const serial_drivers[],
+int usb_serial_register_drivers(struct usb_serial_driver *const serial_drivers[],
const char *name, const struct usb_device_id *id_table);
-extern void usb_serial_deregister_drivers(struct usb_serial_driver *const serial_drivers[]);
-extern void usb_serial_port_softint(struct usb_serial_port *port);
+void usb_serial_deregister_drivers(struct usb_serial_driver *const serial_drivers[]);
+void usb_serial_port_softint(struct usb_serial_port *port);
-extern int usb_serial_suspend(struct usb_interface *intf, pm_message_t message);
-extern int usb_serial_resume(struct usb_interface *intf);
+int usb_serial_suspend(struct usb_interface *intf, pm_message_t message);
+int usb_serial_resume(struct usb_interface *intf);
/* USB Serial console functions */
#ifdef CONFIG_USB_SERIAL_CONSOLE
-extern void usb_serial_console_init(int minor);
-extern void usb_serial_console_exit(void);
-extern void usb_serial_console_disconnect(struct usb_serial *serial);
+void usb_serial_console_init(int minor);
+void usb_serial_console_exit(void);
+void usb_serial_console_disconnect(struct usb_serial *serial);
#else
static inline void usb_serial_console_init(int minor) { }
static inline void usb_serial_console_exit(void) { }
@@ -336,45 +335,49 @@ static inline void usb_serial_console_disconnect(struct usb_serial *serial) {}
#endif
/* Functions needed by other parts of the usbserial core */
-extern struct usb_serial_port *usb_serial_port_get_by_minor(unsigned int minor);
-extern void usb_serial_put(struct usb_serial *serial);
-extern int usb_serial_generic_open(struct tty_struct *tty,
- struct usb_serial_port *port);
-extern int usb_serial_generic_write_start(struct usb_serial_port *port,
- gfp_t mem_flags);
-extern int usb_serial_generic_write(struct tty_struct *tty,
- struct usb_serial_port *port, const unsigned char *buf, int count);
-extern void usb_serial_generic_close(struct usb_serial_port *port);
-extern int usb_serial_generic_resume(struct usb_serial *serial);
-extern int usb_serial_generic_write_room(struct tty_struct *tty);
-extern int usb_serial_generic_chars_in_buffer(struct tty_struct *tty);
-extern void usb_serial_generic_wait_until_sent(struct tty_struct *tty,
- long timeout);
-extern void usb_serial_generic_read_bulk_callback(struct urb *urb);
-extern void usb_serial_generic_write_bulk_callback(struct urb *urb);
-extern void usb_serial_generic_throttle(struct tty_struct *tty);
-extern void usb_serial_generic_unthrottle(struct tty_struct *tty);
-extern int usb_serial_generic_tiocmiwait(struct tty_struct *tty,
- unsigned long arg);
-extern int usb_serial_generic_get_icount(struct tty_struct *tty,
- struct serial_icounter_struct *icount);
-extern int usb_serial_generic_register(void);
-extern void usb_serial_generic_deregister(void);
-extern int usb_serial_generic_submit_read_urbs(struct usb_serial_port *port,
- gfp_t mem_flags);
-extern void usb_serial_generic_process_read_urb(struct urb *urb);
-extern int usb_serial_generic_prepare_write_buffer(struct usb_serial_port *port,
- void *dest, size_t size);
-extern int usb_serial_handle_sysrq_char(struct usb_serial_port *port,
- unsigned int ch);
-extern int usb_serial_handle_break(struct usb_serial_port *port);
-extern void usb_serial_handle_dcd_change(struct usb_serial_port *usb_port,
- struct tty_struct *tty,
- unsigned int status);
+struct usb_serial_port *usb_serial_port_get_by_minor(unsigned int minor);
+void usb_serial_put(struct usb_serial *serial);
+int usb_serial_generic_open(struct tty_struct *tty, struct usb_serial_port *port);
+int usb_serial_generic_write_start(struct usb_serial_port *port, gfp_t mem_flags);
+int usb_serial_generic_write(struct tty_struct *tty, struct usb_serial_port *port,
+ const unsigned char *buf, int count);
+void usb_serial_generic_close(struct usb_serial_port *port);
+int usb_serial_generic_resume(struct usb_serial *serial);
+int usb_serial_generic_write_room(struct tty_struct *tty);
+int usb_serial_generic_chars_in_buffer(struct tty_struct *tty);
+void usb_serial_generic_wait_until_sent(struct tty_struct *tty, long timeout);
+void usb_serial_generic_read_bulk_callback(struct urb *urb);
+void usb_serial_generic_write_bulk_callback(struct urb *urb);
+void usb_serial_generic_throttle(struct tty_struct *tty);
+void usb_serial_generic_unthrottle(struct tty_struct *tty);
+int usb_serial_generic_tiocmiwait(struct tty_struct *tty, unsigned long arg);
+int usb_serial_generic_get_icount(struct tty_struct *tty, struct serial_icounter_struct *icount);
+int usb_serial_generic_register(void);
+void usb_serial_generic_deregister(void);
+int usb_serial_generic_submit_read_urbs(struct usb_serial_port *port, gfp_t mem_flags);
+void usb_serial_generic_process_read_urb(struct urb *urb);
+int usb_serial_generic_prepare_write_buffer(struct usb_serial_port *port, void *dest, size_t size);
+
+#if defined(CONFIG_USB_SERIAL_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+int usb_serial_handle_sysrq_char(struct usb_serial_port *port, unsigned int ch);
+int usb_serial_handle_break(struct usb_serial_port *port);
+#else
+static inline int usb_serial_handle_sysrq_char(struct usb_serial_port *port, unsigned int ch)
+{
+ return 0;
+}
+static inline int usb_serial_handle_break(struct usb_serial_port *port)
+{
+ return 0;
+}
+#endif
+
+void usb_serial_handle_dcd_change(struct usb_serial_port *usb_port,
+ struct tty_struct *tty, unsigned int status);
-extern int usb_serial_bus_register(struct usb_serial_driver *device);
-extern void usb_serial_bus_deregister(struct usb_serial_driver *device);
+int usb_serial_bus_register(struct usb_serial_driver *device);
+void usb_serial_bus_deregister(struct usb_serial_driver *device);
extern struct bus_type usb_serial_bus_type;
extern struct tty_driver *usb_serial_tty_driver;
diff --git a/include/linux/usb/tcpm.h b/include/linux/usb/tcpm.h
index e7979c01c351..89f58760cf48 100644
--- a/include/linux/usb/tcpm.h
+++ b/include/linux/usb/tcpm.h
@@ -79,6 +79,7 @@ enum tcpm_transmit_type {
* @try_role: Optional; called to set a preferred role
* @pd_transmit:Called to transmit PD message
* @mux: Pointer to multiplexer data
+ * @set_bist_data: Turn on/off bist data mode for compliance testing
*/
struct tcpc_dev {
struct fwnode_handle *fwnode;
@@ -103,6 +104,7 @@ struct tcpc_dev {
int (*try_role)(struct tcpc_dev *dev, int role);
int (*pd_transmit)(struct tcpc_dev *dev, enum tcpm_transmit_type type,
const struct pd_message *msg);
+ int (*set_bist_data)(struct tcpc_dev *dev, bool on);
};
struct tcpm_port;
diff --git a/include/linux/usb/typec.h b/include/linux/usb/typec.h
index 5daa1c49761c..9cb1bec94b71 100644
--- a/include/linux/usb/typec.h
+++ b/include/linux/usb/typec.h
@@ -73,6 +73,20 @@ enum typec_orientation {
};
/*
+ * struct enter_usb_data - Enter_USB Message details
+ * @eudo: Enter_USB Data Object
+ * @active_link_training: Active Cable Plug Link Training
+ *
+ * @active_link_training is a flag that should be set with uni-directional SBRX
+ * communication, and left 0 with passive cables and with bi-directional SBRX
+ * communication.
+ */
+struct enter_usb_data {
+ u32 eudo;
+ unsigned char active_link_training:1;
+};
+
+/*
* struct usb_pd_identity - USB Power Delivery identity data
* @id_header: ID Header VDO
* @cert_stat: Cert Stat VDO
diff --git a/include/linux/usb/typec_altmode.h b/include/linux/usb/typec_altmode.h
index d834e236c6df..a4b65eaa0f62 100644
--- a/include/linux/usb/typec_altmode.h
+++ b/include/linux/usb/typec_altmode.h
@@ -95,13 +95,7 @@ enum {
*
* Port drivers can use TYPEC_MODE_AUDIO and TYPEC_MODE_DEBUG as the mode
* value for typec_set_mode() when accessory modes are supported.
- */
-enum {
- TYPEC_MODE_AUDIO = TYPEC_STATE_MODAL, /* Audio Accessory */
- TYPEC_MODE_DEBUG, /* Debug Accessory */
-};
-
-/*
+ *
* USB4 also requires that the pins on the connector are repurposed, just like
* Alternate Modes. USB4 mode is however not entered with the Enter Mode Command
* like the Alternate Modes are, but instead with a special Enter_USB Message.
@@ -112,9 +106,11 @@ enum {
* state values, just like the Accessory Modes.
*/
enum {
- TYPEC_MODE_USB2 = TYPEC_MODE_DEBUG, /* USB 2.0 mode */
+ TYPEC_MODE_USB2 = TYPEC_STATE_MODAL, /* USB 2.0 mode */
TYPEC_MODE_USB3, /* USB 3.2 mode */
- TYPEC_MODE_USB4 /* USB4 mode */
+ TYPEC_MODE_USB4, /* USB4 mode */
+ TYPEC_MODE_AUDIO, /* Audio Accessory */
+ TYPEC_MODE_DEBUG, /* Debug Accessory */
};
#define TYPEC_MODAL_STATE(_state_) ((_state_) + TYPEC_STATE_MODAL)
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
index b0bff3083278..2e4f7721fc4e 100644
--- a/include/linux/usb/usbnet.h
+++ b/include/linux/usb/usbnet.h
@@ -207,6 +207,7 @@ struct cdc_state {
struct usb_interface *data;
};
+extern void usbnet_cdc_update_filter(struct usbnet *dev);
extern int usbnet_generic_cdc_bind(struct usbnet *, struct usb_interface *);
extern int usbnet_ether_cdc_bind(struct usbnet *dev, struct usb_interface *intf);
extern int usbnet_cdc_bind(struct usbnet *, struct usb_interface *);
@@ -273,6 +274,7 @@ extern int usbnet_set_link_ksettings(struct net_device *net,
extern u32 usbnet_get_link(struct net_device *net);
extern u32 usbnet_get_msglevel(struct net_device *);
extern void usbnet_set_msglevel(struct net_device *, u32);
+extern void usbnet_set_rx_mode(struct net_device *net);
extern void usbnet_get_drvinfo(struct net_device *, struct ethtool_drvinfo *);
extern int usbnet_nway_reset(struct net_device *net);
diff --git a/include/linux/usermode_driver.h b/include/linux/usermode_driver.h
new file mode 100644
index 000000000000..073a9e0ec07d
--- /dev/null
+++ b/include/linux/usermode_driver.h
@@ -0,0 +1,18 @@
+#ifndef __LINUX_USERMODE_DRIVER_H__
+#define __LINUX_USERMODE_DRIVER_H__
+
+#include <linux/umh.h>
+#include <linux/path.h>
+
+struct umd_info {
+ const char *driver_name;
+ struct file *pipe_to_umh;
+ struct file *pipe_from_umh;
+ struct path wd;
+ struct pid *tgid;
+};
+int umd_load_blob(struct umd_info *info, const void *data, size_t len);
+int umd_unload_blob(struct umd_info *info);
+int fork_usermode_driver(struct umd_info *info);
+
+#endif /* __LINUX_USERMODE_DRIVER_H__ */
diff --git a/include/linux/uuid.h b/include/linux/uuid.h
index d41b0d3e9474..8cdc0d3567cd 100644
--- a/include/linux/uuid.h
+++ b/include/linux/uuid.h
@@ -98,8 +98,6 @@ int guid_parse(const char *uuid, guid_t *u);
int uuid_parse(const char *uuid, uuid_t *u);
/* backwards compatibility, don't use in new code */
-#define uuid_le_to_bin(guid, u) guid_parse(guid, u)
-
static inline int uuid_le_cmp(const guid_t u1, const guid_t u2)
{
return memcmp(&u1, &u2, sizeof(guid_t));
diff --git a/include/linux/vbox_utils.h b/include/linux/vbox_utils.h
index ff56c443180c..db8a7d118093 100644
--- a/include/linux/vbox_utils.h
+++ b/include/linux/vbox_utils.h
@@ -16,6 +16,7 @@ struct vbg_dev;
__printf(1, 2) void vbg_info(const char *fmt, ...);
__printf(1, 2) void vbg_warn(const char *fmt, ...);
__printf(1, 2) void vbg_err(const char *fmt, ...);
+__printf(1, 2) void vbg_err_ratelimited(const char *fmt, ...);
/* Only use backdoor logging for non-dynamic debug builds */
#if defined(DEBUG) && !defined(CONFIG_DYNAMIC_DEBUG)
diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h
index 239db794357c..eae0bfd87d91 100644
--- a/include/linux/vdpa.h
+++ b/include/linux/vdpa.h
@@ -28,17 +28,28 @@ struct vdpa_notification_area {
};
/**
+ * vDPA vq_state definition
+ * @avail_index: available index
+ */
+struct vdpa_vq_state {
+ u16 avail_index;
+};
+
+/**
* vDPA device - representation of a vDPA device
* @dev: underlying device
* @dma_dev: the actual device that is performing DMA
* @config: the configuration ops for this device.
* @index: device index
+ * @features_valid: were features initialized? for legacy guests
*/
struct vdpa_device {
struct device dev;
struct device *dma_dev;
const struct vdpa_config_ops *config;
unsigned int index;
+ bool features_valid;
+ int nvqs;
};
/**
@@ -77,16 +88,22 @@ struct vdpa_device {
* @set_vq_state: Set the state for a virtqueue
* @vdev: vdpa device
* @idx: virtqueue index
- * @state: virtqueue state (last_avail_idx)
+ * @state: pointer to set virtqueue state (last_avail_idx)
* Returns integer: success (0) or error (< 0)
* @get_vq_state: Get the state for a virtqueue
* @vdev: vdpa device
* @idx: virtqueue index
- * Returns virtqueue state (last_avail_idx)
+ * @state: pointer to returned state (last_avail_idx)
* @get_vq_notification: Get the notification area for a virtqueue
* @vdev: vdpa device
* @idx: virtqueue index
* Returns the notifcation area
+ * @get_vq_irq: Get the irq number of a virtqueue (optional,
+ * but must implemented if require vq irq offloading)
+ * @vdev: vdpa device
+ * @idx: virtqueue index
+ * Returns int: irq number of a virtqueue,
+ * negative number if no irq assigned.
* @get_vq_align: Get the virtqueue align requirement
* for the device
* @vdev: vdpa device
@@ -174,10 +191,14 @@ struct vdpa_config_ops {
struct vdpa_callback *cb);
void (*set_vq_ready)(struct vdpa_device *vdev, u16 idx, bool ready);
bool (*get_vq_ready)(struct vdpa_device *vdev, u16 idx);
- int (*set_vq_state)(struct vdpa_device *vdev, u16 idx, u64 state);
- u64 (*get_vq_state)(struct vdpa_device *vdev, u16 idx);
+ int (*set_vq_state)(struct vdpa_device *vdev, u16 idx,
+ const struct vdpa_vq_state *state);
+ int (*get_vq_state)(struct vdpa_device *vdev, u16 idx,
+ struct vdpa_vq_state *state);
struct vdpa_notification_area
(*get_vq_notification)(struct vdpa_device *vdev, u16 idx);
+ /* vq irq is not expected to be changed once DRIVER_OK is set */
+ int (*get_vq_irq)(struct vdpa_device *vdv, u16 idx);
/* Device ops */
u32 (*get_vq_align)(struct vdpa_device *vdev);
@@ -208,11 +229,12 @@ struct vdpa_config_ops {
struct vdpa_device *__vdpa_alloc_device(struct device *parent,
const struct vdpa_config_ops *config,
+ int nvqs,
size_t size);
-#define vdpa_alloc_device(dev_struct, member, parent, config) \
+#define vdpa_alloc_device(dev_struct, member, parent, config, nvqs) \
container_of(__vdpa_alloc_device( \
- parent, config, \
+ parent, config, nvqs, \
sizeof(dev_struct) + \
BUILD_BUG_ON_ZERO(offsetof( \
dev_struct, member))), \
@@ -266,4 +288,36 @@ static inline struct device *vdpa_get_dma_dev(struct vdpa_device *vdev)
{
return vdev->dma_dev;
}
+
+static inline void vdpa_reset(struct vdpa_device *vdev)
+{
+ const struct vdpa_config_ops *ops = vdev->config;
+
+ vdev->features_valid = false;
+ ops->set_status(vdev, 0);
+}
+
+static inline int vdpa_set_features(struct vdpa_device *vdev, u64 features)
+{
+ const struct vdpa_config_ops *ops = vdev->config;
+
+ vdev->features_valid = true;
+ return ops->set_features(vdev, features);
+}
+
+
+static inline void vdpa_get_config(struct vdpa_device *vdev, unsigned offset,
+ void *buf, unsigned int len)
+{
+ const struct vdpa_config_ops *ops = vdev->config;
+
+ /*
+ * Config accesses aren't supposed to trigger before features are set.
+ * If it does happen we assume a legacy guest.
+ */
+ if (!vdev->features_valid)
+ vdpa_set_features(vdev, 0);
+ ops->get_config(vdev, offset, buf, len);
+}
+
#endif /* _LINUX_VDPA_H */
diff --git a/include/linux/vgaarb.h b/include/linux/vgaarb.h
index 553b34c8b5f7..977caf96c8d2 100644
--- a/include/linux/vgaarb.h
+++ b/include/linux/vgaarb.h
@@ -110,12 +110,6 @@ static inline int vga_get_uninterruptible(struct pci_dev *pdev,
}
#if defined(CONFIG_VGA_ARB)
-extern int vga_tryget(struct pci_dev *pdev, unsigned int rsrc);
-#else
-static inline int vga_tryget(struct pci_dev *pdev, unsigned int rsrc) { return 0; }
-#endif
-
-#if defined(CONFIG_VGA_ARB)
extern void vga_put(struct pci_dev *pdev, unsigned int rsrc);
#else
#define vga_put(pdev, rsrc)
diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
index 16c0ed6c50a7..219037f4c08d 100644
--- a/include/linux/videodev2.h
+++ b/include/linux/videodev2.h
@@ -57,6 +57,7 @@
#define __LINUX_VIDEODEV2_H
#include <linux/time.h> /* need struct timeval */
+#include <linux/kernel.h>
#include <uapi/linux/videodev2.h>
#endif /* __LINUX_VIDEODEV2_H */
diff --git a/include/linux/virtio_caif.h b/include/linux/virtio_caif.h
index 5d2d3124ca3d..ea722479510c 100644
--- a/include/linux/virtio_caif.h
+++ b/include/linux/virtio_caif.h
@@ -11,9 +11,9 @@
#include <linux/types.h>
struct virtio_caif_transf_config {
- u16 headroom;
- u16 tailroom;
- u32 mtu;
+ __virtio16 headroom;
+ __virtio16 tailroom;
+ __virtio32 mtu;
u8 reserved[4];
};
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h
index bb4cc4910750..8fe857e27ef3 100644
--- a/include/linux/virtio_config.h
+++ b/include/linux/virtio_config.h
@@ -6,6 +6,7 @@
#include <linux/bug.h>
#include <linux/virtio.h>
#include <linux/virtio_byteorder.h>
+#include <linux/compiler_types.h>
#include <uapi/linux/virtio_config.h>
struct irq_affinity;
@@ -162,16 +163,16 @@ static inline bool virtio_has_feature(const struct virtio_device *vdev,
}
/**
- * virtio_has_iommu_quirk - determine whether this device has the iommu quirk
+ * virtio_has_dma_quirk - determine whether this device has the DMA quirk
* @vdev: the device
*/
-static inline bool virtio_has_iommu_quirk(const struct virtio_device *vdev)
+static inline bool virtio_has_dma_quirk(const struct virtio_device *vdev)
{
/*
* Note the reverse polarity of the quirk feature (compared to most
* other features), this is for compatibility with legacy systems.
*/
- return !virtio_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM);
+ return !virtio_has_feature(vdev, VIRTIO_F_ACCESS_PLATFORM);
}
static inline
@@ -287,70 +288,133 @@ static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val)
return __cpu_to_virtio64(virtio_is_little_endian(vdev), val);
}
+#define virtio_to_cpu(vdev, x) \
+ _Generic((x), \
+ __u8: (x), \
+ __virtio16: virtio16_to_cpu((vdev), (x)), \
+ __virtio32: virtio32_to_cpu((vdev), (x)), \
+ __virtio64: virtio64_to_cpu((vdev), (x)) \
+ )
+
+#define cpu_to_virtio(vdev, x, m) \
+ _Generic((m), \
+ __u8: (x), \
+ __virtio16: cpu_to_virtio16((vdev), (x)), \
+ __virtio32: cpu_to_virtio32((vdev), (x)), \
+ __virtio64: cpu_to_virtio64((vdev), (x)) \
+ )
+
+#define __virtio_native_type(structname, member) \
+ typeof(virtio_to_cpu(NULL, ((structname*)0)->member))
+
/* Config space accessors. */
#define virtio_cread(vdev, structname, member, ptr) \
do { \
+ typeof(((structname*)0)->member) virtio_cread_v; \
+ \
might_sleep(); \
- /* Must match the member's type, and be integer */ \
- if (!typecheck(typeof((((structname*)0)->member)), *(ptr))) \
- (*ptr) = 1; \
+ /* Sanity check: must match the member's type */ \
+ typecheck(typeof(virtio_to_cpu((vdev), virtio_cread_v)), *(ptr)); \
\
- switch (sizeof(*ptr)) { \
+ switch (sizeof(virtio_cread_v)) { \
case 1: \
- *(ptr) = virtio_cread8(vdev, \
- offsetof(structname, member)); \
- break; \
case 2: \
- *(ptr) = virtio_cread16(vdev, \
- offsetof(structname, member)); \
- break; \
case 4: \
- *(ptr) = virtio_cread32(vdev, \
- offsetof(structname, member)); \
- break; \
- case 8: \
- *(ptr) = virtio_cread64(vdev, \
- offsetof(structname, member)); \
+ vdev->config->get((vdev), \
+ offsetof(structname, member), \
+ &virtio_cread_v, \
+ sizeof(virtio_cread_v)); \
break; \
default: \
- BUG(); \
+ __virtio_cread_many((vdev), \
+ offsetof(structname, member), \
+ &virtio_cread_v, \
+ 1, \
+ sizeof(virtio_cread_v)); \
+ break; \
} \
+ *(ptr) = virtio_to_cpu(vdev, virtio_cread_v); \
} while(0)
/* Config space accessors. */
#define virtio_cwrite(vdev, structname, member, ptr) \
do { \
+ typeof(((structname*)0)->member) virtio_cwrite_v = \
+ cpu_to_virtio(vdev, *(ptr), ((structname*)0)->member); \
+ \
+ might_sleep(); \
+ /* Sanity check: must match the member's type */ \
+ typecheck(typeof(virtio_to_cpu((vdev), virtio_cwrite_v)), *(ptr)); \
+ \
+ vdev->config->set((vdev), offsetof(structname, member), \
+ &virtio_cwrite_v, \
+ sizeof(virtio_cwrite_v)); \
+ } while(0)
+
+/*
+ * Nothing virtio-specific about these, but let's worry about generalizing
+ * these later.
+ */
+#define virtio_le_to_cpu(x) \
+ _Generic((x), \
+ __u8: (u8)(x), \
+ __le16: (u16)le16_to_cpu(x), \
+ __le32: (u32)le32_to_cpu(x), \
+ __le64: (u64)le64_to_cpu(x) \
+ )
+
+#define virtio_cpu_to_le(x, m) \
+ _Generic((m), \
+ __u8: (x), \
+ __le16: cpu_to_le16(x), \
+ __le32: cpu_to_le32(x), \
+ __le64: cpu_to_le64(x) \
+ )
+
+/* LE (e.g. modern) Config space accessors. */
+#define virtio_cread_le(vdev, structname, member, ptr) \
+ do { \
+ typeof(((structname*)0)->member) virtio_cread_v; \
+ \
might_sleep(); \
- /* Must match the member's type, and be integer */ \
- if (!typecheck(typeof((((structname*)0)->member)), *(ptr))) \
- BUG_ON((*ptr) == 1); \
+ /* Sanity check: must match the member's type */ \
+ typecheck(typeof(virtio_le_to_cpu(virtio_cread_v)), *(ptr)); \
\
- switch (sizeof(*ptr)) { \
+ switch (sizeof(virtio_cread_v)) { \
case 1: \
- virtio_cwrite8(vdev, \
- offsetof(structname, member), \
- *(ptr)); \
- break; \
case 2: \
- virtio_cwrite16(vdev, \
- offsetof(structname, member), \
- *(ptr)); \
- break; \
case 4: \
- virtio_cwrite32(vdev, \
- offsetof(structname, member), \
- *(ptr)); \
- break; \
- case 8: \
- virtio_cwrite64(vdev, \
- offsetof(structname, member), \
- *(ptr)); \
+ vdev->config->get((vdev), \
+ offsetof(structname, member), \
+ &virtio_cread_v, \
+ sizeof(virtio_cread_v)); \
break; \
default: \
- BUG(); \
+ __virtio_cread_many((vdev), \
+ offsetof(structname, member), \
+ &virtio_cread_v, \
+ 1, \
+ sizeof(virtio_cread_v)); \
+ break; \
} \
+ *(ptr) = virtio_le_to_cpu(virtio_cread_v); \
+ } while(0)
+
+#define virtio_cwrite_le(vdev, structname, member, ptr) \
+ do { \
+ typeof(((structname*)0)->member) virtio_cwrite_v = \
+ virtio_cpu_to_le(*(ptr), ((structname*)0)->member); \
+ \
+ might_sleep(); \
+ /* Sanity check: must match the member's type */ \
+ typecheck(typeof(virtio_le_to_cpu(virtio_cwrite_v)), *(ptr)); \
+ \
+ vdev->config->set((vdev), offsetof(structname, member), \
+ &virtio_cwrite_v, \
+ sizeof(virtio_cwrite_v)); \
} while(0)
+
/* Read @count fields, @bytes each. */
static inline void __virtio_cread_many(struct virtio_device *vdev,
unsigned int offset,
@@ -399,53 +463,60 @@ static inline void virtio_cwrite8(struct virtio_device *vdev,
static inline u16 virtio_cread16(struct virtio_device *vdev,
unsigned int offset)
{
- u16 ret;
+ __virtio16 ret;
might_sleep();
vdev->config->get(vdev, offset, &ret, sizeof(ret));
- return virtio16_to_cpu(vdev, (__force __virtio16)ret);
+ return virtio16_to_cpu(vdev, ret);
}
static inline void virtio_cwrite16(struct virtio_device *vdev,
unsigned int offset, u16 val)
{
+ __virtio16 v;
+
might_sleep();
- val = (__force u16)cpu_to_virtio16(vdev, val);
- vdev->config->set(vdev, offset, &val, sizeof(val));
+ v = cpu_to_virtio16(vdev, val);
+ vdev->config->set(vdev, offset, &v, sizeof(v));
}
static inline u32 virtio_cread32(struct virtio_device *vdev,
unsigned int offset)
{
- u32 ret;
+ __virtio32 ret;
might_sleep();
vdev->config->get(vdev, offset, &ret, sizeof(ret));
- return virtio32_to_cpu(vdev, (__force __virtio32)ret);
+ return virtio32_to_cpu(vdev, ret);
}
static inline void virtio_cwrite32(struct virtio_device *vdev,
unsigned int offset, u32 val)
{
+ __virtio32 v;
+
might_sleep();
- val = (__force u32)cpu_to_virtio32(vdev, val);
- vdev->config->set(vdev, offset, &val, sizeof(val));
+ v = cpu_to_virtio32(vdev, val);
+ vdev->config->set(vdev, offset, &v, sizeof(v));
}
static inline u64 virtio_cread64(struct virtio_device *vdev,
unsigned int offset)
{
- u64 ret;
+ __virtio64 ret;
+
__virtio_cread_many(vdev, offset, &ret, 1, sizeof(ret));
- return virtio64_to_cpu(vdev, (__force __virtio64)ret);
+ return virtio64_to_cpu(vdev, ret);
}
static inline void virtio_cwrite64(struct virtio_device *vdev,
unsigned int offset, u64 val)
{
+ __virtio64 v;
+
might_sleep();
- val = (__force u64)cpu_to_virtio64(vdev, val);
- vdev->config->set(vdev, offset, &val, sizeof(val));
+ v = cpu_to_virtio64(vdev, val);
+ vdev->config->set(vdev, offset, &v, sizeof(v));
}
/* Conditional config space accessors. */
@@ -459,4 +530,14 @@ static inline void virtio_cwrite64(struct virtio_device *vdev,
_r; \
})
+/* Conditional config space accessors. */
+#define virtio_cread_le_feature(vdev, fbit, structname, member, ptr) \
+ ({ \
+ int _r = 0; \
+ if (!virtio_has_feature(vdev, fbit)) \
+ _r = -ENOENT; \
+ else \
+ virtio_cread_le((vdev), structname, member, ptr); \
+ _r; \
+ })
#endif /* _LINUX_VIRTIO_CONFIG_H */
diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h
index 3dc70adfe5f5..b485b13fa50b 100644
--- a/include/linux/virtio_ring.h
+++ b/include/linux/virtio_ring.h
@@ -46,16 +46,15 @@ static inline void virtio_wmb(bool weak_barriers)
dma_wmb();
}
-static inline void virtio_store_mb(bool weak_barriers,
- __virtio16 *p, __virtio16 v)
-{
- if (weak_barriers) {
- virt_store_mb(*p, v);
- } else {
- WRITE_ONCE(*p, v);
- mb();
- }
-}
+#define virtio_store_mb(weak_barriers, p, v) \
+do { \
+ if (weak_barriers) { \
+ virt_store_mb(*p, v); \
+ } else { \
+ WRITE_ONCE(*p, v); \
+ mb(); \
+ } \
+} while (0) \
struct virtio_device;
struct virtqueue;
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index 24fc7c3ae7d6..2e6ca53b9bbd 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -56,6 +56,9 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
#endif
#ifdef CONFIG_MIGRATION
PGMIGRATE_SUCCESS, PGMIGRATE_FAIL,
+ THP_MIGRATION_SUCCESS,
+ THP_MIGRATION_FAIL,
+ THP_MIGRATION_SPLIT,
#endif
#ifdef CONFIG_COMPACTION
COMPACTMIGRATE_SCANNED, COMPACTFREE_SCANNED,
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index aa961088c551..91220ace31da 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -8,6 +8,7 @@
#include <linux/vm_event_item.h>
#include <linux/atomic.h>
#include <linux/static_key.h>
+#include <linux/mmdebug.h>
extern int sysctl_stat_interval;
@@ -192,7 +193,8 @@ static inline unsigned long global_zone_page_state(enum zone_stat_item item)
return x;
}
-static inline unsigned long global_node_page_state(enum node_stat_item item)
+static inline
+unsigned long global_node_page_state_pages(enum node_stat_item item)
{
long x = atomic_long_read(&vm_node_stat[item]);
#ifdef CONFIG_SMP
@@ -202,6 +204,13 @@ static inline unsigned long global_node_page_state(enum node_stat_item item)
return x;
}
+static inline unsigned long global_node_page_state(enum node_stat_item item)
+{
+ VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
+
+ return global_node_page_state_pages(item);
+}
+
static inline unsigned long zone_page_state(struct zone *zone,
enum zone_stat_item item)
{
@@ -242,9 +251,12 @@ extern unsigned long sum_zone_node_page_state(int node,
extern unsigned long sum_zone_numa_state(int node, enum numa_stat_item item);
extern unsigned long node_page_state(struct pglist_data *pgdat,
enum node_stat_item item);
+extern unsigned long node_page_state_pages(struct pglist_data *pgdat,
+ enum node_stat_item item);
#else
#define sum_zone_node_page_state(node, item) global_zone_page_state(item)
#define node_page_state(node, item) global_node_page_state(item)
+#define node_page_state_pages(node, item) global_node_page_state_pages(item)
#endif /* CONFIG_NUMA */
#ifdef CONFIG_SMP
diff --git a/include/linux/vmw_vmci_defs.h b/include/linux/vmw_vmci_defs.h
index fefb5292403b..be0afe6f379b 100644
--- a/include/linux/vmw_vmci_defs.h
+++ b/include/linux/vmw_vmci_defs.h
@@ -159,7 +159,7 @@ static inline bool vmci_handle_is_invalid(struct vmci_handle h)
*/
#define VMCI_ANON_SRC_CONTEXT_ID VMCI_INVALID_ID
#define VMCI_ANON_SRC_RESOURCE_ID VMCI_INVALID_ID
-static const struct vmci_handle VMCI_ANON_SRC_HANDLE = {
+static const struct vmci_handle __maybe_unused VMCI_ANON_SRC_HANDLE = {
.context = VMCI_ANON_SRC_CONTEXT_ID,
.resource = VMCI_ANON_SRC_RESOURCE_ID
};
diff --git a/include/linux/vt_kern.h b/include/linux/vt_kern.h
index abf5bccf906a..349e39c3ab60 100644
--- a/include/linux/vt_kern.h
+++ b/include/linux/vt_kern.h
@@ -74,8 +74,6 @@ int con_set_default_unimap(struct vc_data *vc);
void con_free_unimap(struct vc_data *vc);
int con_copy_unimap(struct vc_data *dst_vc, struct vc_data *src_vc);
-#define vc_translate(vc, c) ((vc)->vc_translate[(c) | \
- ((vc)->vc_toggle_meta ? 0x80 : 0)])
#else
static inline int con_set_trans_old(unsigned char __user *table)
{
@@ -124,7 +122,6 @@ int con_copy_unimap(struct vc_data *dst_vc, struct vc_data *src_vc)
return 0;
}
-#define vc_translate(vc, c) (c)
#endif
/* vt.c */
diff --git a/include/linux/watchdog.h b/include/linux/watchdog.h
index 1464ce6ffa31..9b19e6bb68b5 100644
--- a/include/linux/watchdog.h
+++ b/include/linux/watchdog.h
@@ -210,6 +210,8 @@ extern int watchdog_init_timeout(struct watchdog_device *wdd,
extern int watchdog_register_device(struct watchdog_device *);
extern void watchdog_unregister_device(struct watchdog_device *);
+int watchdog_set_last_hw_keepalive(struct watchdog_device *, unsigned int);
+
/* devres register variant */
int devm_watchdog_register_device(struct device *dev, struct watchdog_device *);
diff --git a/include/linux/wimax/debug.h b/include/linux/wimax/debug.h
index 4dd2c1cea6a9..cdae052bcdcd 100644
--- a/include/linux/wimax/debug.h
+++ b/include/linux/wimax/debug.h
@@ -184,8 +184,8 @@ do { \
/*
- * CPP sintatic sugar to generate A_B like symbol names when one of
- * the arguments is a a preprocessor #define.
+ * CPP syntactic sugar to generate A_B like symbol names when one of
+ * the arguments is a preprocessor #define.
*/
#define __D_PASTE__(varname, modulename) varname##_##modulename
#define __D_PASTE(varname, modulename) (__D_PASTE__(varname, modulename))
diff --git a/include/linux/wkup_m3_ipc.h b/include/linux/wkup_m3_ipc.h
index e497e621dbb7..3f496967b538 100644
--- a/include/linux/wkup_m3_ipc.h
+++ b/include/linux/wkup_m3_ipc.h
@@ -1,7 +1,7 @@
/*
* TI Wakeup M3 for AMx3 SoCs Power Management Routines
*
- * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2015 Texas Instruments Incorporated - https://www.ti.com/
* Dave Gerlach <d-gerlach@ti.com>
*
* This program is free software; you can redistribute it and/or
diff --git a/include/linux/ww_mutex.h b/include/linux/ww_mutex.h
index d7554252404c..850424e5d030 100644
--- a/include/linux/ww_mutex.h
+++ b/include/linux/ww_mutex.h
@@ -48,14 +48,6 @@ struct ww_acquire_ctx {
#endif
};
-struct ww_mutex {
- struct mutex base;
- struct ww_acquire_ctx *ctx;
-#ifdef CONFIG_DEBUG_MUTEXES
- struct ww_class *ww_class;
-#endif
-};
-
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define __WW_CLASS_MUTEX_INITIALIZER(lockname, class) \
, .ww_class = class
diff --git a/include/linux/xattr.h b/include/linux/xattr.h
index c5afaf8ca7a2..10b4dc2709f0 100644
--- a/include/linux/xattr.h
+++ b/include/linux/xattr.h
@@ -52,14 +52,18 @@ ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t);
ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size);
int __vfs_setxattr(struct dentry *, struct inode *, const char *, const void *, size_t, int);
int __vfs_setxattr_noperm(struct dentry *, const char *, const void *, size_t, int);
+int __vfs_setxattr_locked(struct dentry *, const char *, const void *, size_t, int, struct inode **);
int vfs_setxattr(struct dentry *, const char *, const void *, size_t, int);
int __vfs_removexattr(struct dentry *, const char *);
+int __vfs_removexattr_locked(struct dentry *, const char *, struct inode **);
int vfs_removexattr(struct dentry *, const char *);
ssize_t generic_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size);
ssize_t vfs_getxattr_alloc(struct dentry *dentry, const char *name,
char **xattr_value, size_t size, gfp_t flags);
+int xattr_supported_namespace(struct inode *inode, const char *prefix);
+
static inline const char *xattr_prefix(const struct xattr_handler *handler)
{
return handler->prefix ?: handler->name;
diff --git a/include/linux/xxhash.h b/include/linux/xxhash.h
index 52b073fea17f..df42511438d0 100644
--- a/include/linux/xxhash.h
+++ b/include/linux/xxhash.h
@@ -34,7 +34,7 @@
* ("BSD").
*
* You can contact the author at:
- * - xxHash homepage: http://cyan4973.github.io/xxHash/
+ * - xxHash homepage: https://cyan4973.github.io/xxHash/
* - xxHash source repository: https://github.com/Cyan4973/xxHash
*/
diff --git a/include/linux/xz.h b/include/linux/xz.h
index 64cffa6ddfce..9884c8440188 100644
--- a/include/linux/xz.h
+++ b/include/linux/xz.h
@@ -2,7 +2,7 @@
* XZ decompressor
*
* Authors: Lasse Collin <lasse.collin@tukaani.org>
- * Igor Pavlov <http://7-zip.org/>
+ * Igor Pavlov <https://7-zip.org/>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
@@ -28,7 +28,7 @@
* enum xz_mode - Operation mode
*
* @XZ_SINGLE: Single-call mode. This uses less RAM than
- * than multi-call modes, because the LZMA2
+ * multi-call modes, because the LZMA2
* dictionary doesn't need to be allocated as
* part of the decoder state. All required data
* structures are allocated at initialization,
diff --git a/include/linux/zlib.h b/include/linux/zlib.h
index c757d848a758..78ede944c082 100644
--- a/include/linux/zlib.h
+++ b/include/linux/zlib.h
@@ -23,7 +23,7 @@
The data format used by the zlib library is described by RFCs (Request for
- Comments) 1950 to 1952 in the files http://www.ietf.org/rfc/rfc1950.txt
+ Comments) 1950 to 1952 in the files https://www.ietf.org/rfc/rfc1950.txt
(zlib format), rfc1951.txt (deflate format) and rfc1952.txt (gzip format).
*/
diff --git a/include/media/cec.h b/include/media/cec.h
index 972bc8cd4384..c48b5f2e4b50 100644
--- a/include/media/cec.h
+++ b/include/media/cec.h
@@ -144,6 +144,60 @@ struct cec_adap_ops {
*/
#define CEC_MAX_MSG_TX_QUEUE_SZ (18 * 1)
+/**
+ * struct cec_adapter - cec adapter structure
+ * @owner: module owner
+ * @name: name of the CEC adapter
+ * @devnode: device node for the /dev/cecX device
+ * @lock: mutex controlling access to this structure
+ * @rc: remote control device
+ * @transmit_queue: queue of pending transmits
+ * @transmit_queue_sz: number of pending transmits
+ * @wait_queue: queue of transmits waiting for a reply
+ * @transmitting: CEC messages currently being transmitted
+ * @transmit_in_progress: true if a transmit is in progress
+ * @kthread_config: kthread used to configure a CEC adapter
+ * @config_completion: used to signal completion of the config kthread
+ * @kthread: main CEC processing thread
+ * @kthread_waitq: main CEC processing wait_queue
+ * @ops: cec adapter ops
+ * @priv: cec driver's private data
+ * @capabilities: cec adapter capabilities
+ * @available_log_addrs: maximum number of available logical addresses
+ * @phys_addr: the current physical address
+ * @needs_hpd: if true, then the HDMI HotPlug Detect pin must be high
+ * in order to transmit or receive CEC messages. This is usually a HW
+ * limitation.
+ * @is_configuring: the CEC adapter is configuring (i.e. claiming LAs)
+ * @is_configured: the CEC adapter is configured (i.e. has claimed LAs)
+ * @cec_pin_is_high: if true then the CEC pin is high. Only used with the
+ * CEC pin framework.
+ * @adap_controls_phys_addr: if true, then the CEC adapter controls the
+ * physical address, i.e. the CEC hardware can detect HPD changes and
+ * read the EDID and is not dependent on an external HDMI driver.
+ * Drivers that need this can set this field to true after the
+ * cec_allocate_adapter() call.
+ * @last_initiator: the initiator of the last transmitted message.
+ * @monitor_all_cnt: number of filehandles monitoring all msgs
+ * @monitor_pin_cnt: number of filehandles monitoring pin changes
+ * @follower_cnt: number of filehandles in follower mode
+ * @cec_follower: filehandle of the exclusive follower
+ * @cec_initiator: filehandle of the exclusive initiator
+ * @passthrough: if true, then the exclusive follower is in
+ * passthrough mode.
+ * @log_addrs: current logical addresses
+ * @conn_info: current connector info
+ * @tx_timeouts: number of transmit timeouts
+ * @notifier: CEC notifier
+ * @pin: CEC pin status struct
+ * @cec_dir: debugfs cec directory
+ * @status_file: debugfs cec status file
+ * @error_inj_file: debugfs cec error injection file
+ * @sequence: transmit sequence counter
+ * @input_phys: remote control input_phys name
+ *
+ * This structure represents a cec adapter.
+ */
struct cec_adapter {
struct module *owner;
char name[32];
@@ -162,7 +216,6 @@ struct cec_adapter {
struct task_struct *kthread;
wait_queue_head_t kthread_waitq;
- wait_queue_head_t waitq;
const struct cec_adap_ops *ops;
void *priv;
@@ -174,6 +227,7 @@ struct cec_adapter {
bool is_configuring;
bool is_configured;
bool cec_pin_is_high;
+ bool adap_controls_phys_addr;
u8 last_initiator;
u32 monitor_all_cnt;
u32 monitor_pin_cnt;
@@ -197,7 +251,6 @@ struct cec_adapter {
struct dentry *status_file;
struct dentry *error_inj_file;
- u16 phys_addrs[15];
u32 sequence;
char input_phys[32];
diff --git a/include/media/davinci/vpbe_display.h b/include/media/davinci/vpbe_display.h
index 56d05a855140..6d2a93740130 100644
--- a/include/media/davinci/vpbe_display.h
+++ b/include/media/davinci/vpbe_display.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2010 Texas Instruments Incorporated - https://www.ti.com/
*/
#ifndef VPBE_DISPLAY_H
#define VPBE_DISPLAY_H
diff --git a/include/media/drv-intf/soc_mediabus.h b/include/media/drv-intf/soc_mediabus.h
deleted file mode 100644
index 361f8852c9fc..000000000000
--- a/include/media/drv-intf/soc_mediabus.h
+++ /dev/null
@@ -1,107 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * SoC-camera Media Bus API extensions
- *
- * Copyright (C) 2009, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
- */
-
-#ifndef SOC_MEDIABUS_H
-#define SOC_MEDIABUS_H
-
-#include <linux/videodev2.h>
-#include <linux/v4l2-mediabus.h>
-
-/**
- * enum soc_mbus_packing - data packing types on the media-bus
- * @SOC_MBUS_PACKING_NONE: no packing, bit-for-bit transfer to RAM, one
- * sample represents one pixel
- * @SOC_MBUS_PACKING_2X8_PADHI: 16 bits transferred in 2 8-bit samples, in the
- * possibly incomplete byte high bits are padding
- * @SOC_MBUS_PACKING_2X8_PADLO: as above, but low bits are padding
- * @SOC_MBUS_PACKING_EXTEND16: sample width (e.g., 10 bits) has to be extended
- * to 16 bits
- * @SOC_MBUS_PACKING_VARIABLE: compressed formats with variable packing
- * @SOC_MBUS_PACKING_1_5X8: used for packed YUV 4:2:0 formats, where 4
- * pixels occupy 6 bytes in RAM
- * @SOC_MBUS_PACKING_EXTEND32: sample width (e.g., 24 bits) has to be extended
- * to 32 bits
- */
-enum soc_mbus_packing {
- SOC_MBUS_PACKING_NONE,
- SOC_MBUS_PACKING_2X8_PADHI,
- SOC_MBUS_PACKING_2X8_PADLO,
- SOC_MBUS_PACKING_EXTEND16,
- SOC_MBUS_PACKING_VARIABLE,
- SOC_MBUS_PACKING_1_5X8,
- SOC_MBUS_PACKING_EXTEND32,
-};
-
-/**
- * enum soc_mbus_order - sample order on the media bus
- * @SOC_MBUS_ORDER_LE: least significant sample first
- * @SOC_MBUS_ORDER_BE: most significant sample first
- */
-enum soc_mbus_order {
- SOC_MBUS_ORDER_LE,
- SOC_MBUS_ORDER_BE,
-};
-
-/**
- * enum soc_mbus_layout - planes layout in memory
- * @SOC_MBUS_LAYOUT_PACKED: color components packed
- * @SOC_MBUS_LAYOUT_PLANAR_2Y_U_V: YUV components stored in 3 planes (4:2:2)
- * @SOC_MBUS_LAYOUT_PLANAR_2Y_C: YUV components stored in a luma and a
- * chroma plane (C plane is half the size
- * of Y plane)
- * @SOC_MBUS_LAYOUT_PLANAR_Y_C: YUV components stored in a luma and a
- * chroma plane (C plane is the same size
- * as Y plane)
- */
-enum soc_mbus_layout {
- SOC_MBUS_LAYOUT_PACKED = 0,
- SOC_MBUS_LAYOUT_PLANAR_2Y_U_V,
- SOC_MBUS_LAYOUT_PLANAR_2Y_C,
- SOC_MBUS_LAYOUT_PLANAR_Y_C,
-};
-
-/**
- * struct soc_mbus_pixelfmt - Data format on the media bus
- * @fourcc: Fourcc code, that will be obtained if the data is
- * stored in memory in the following way:
- * @packing: Type of sample-packing, that has to be used
- * @order: Sample order when storing in memory
- * @bits_per_sample: How many bits the bridge has to sample
- */
-struct soc_mbus_pixelfmt {
- u32 fourcc;
- enum soc_mbus_packing packing;
- enum soc_mbus_order order;
- enum soc_mbus_layout layout;
- u8 bits_per_sample;
-};
-
-/**
- * struct soc_mbus_lookup - Lookup FOURCC IDs by mediabus codes for pass-through
- * @code: mediabus pixel-code
- * @fmt: pixel format description
- */
-struct soc_mbus_lookup {
- u32 code;
- struct soc_mbus_pixelfmt fmt;
-};
-
-const struct soc_mbus_pixelfmt *soc_mbus_find_fmtdesc(
- u32 code,
- const struct soc_mbus_lookup *lookup,
- int n);
-const struct soc_mbus_pixelfmt *soc_mbus_get_fmtdesc(
- u32 code);
-s32 soc_mbus_bytes_per_line(u32 width, const struct soc_mbus_pixelfmt *mf);
-s32 soc_mbus_image_size(const struct soc_mbus_pixelfmt *mf,
- u32 bytes_per_line, u32 height);
-int soc_mbus_samples_per_pixel(const struct soc_mbus_pixelfmt *mf,
- unsigned int *numerator, unsigned int *denominator);
-unsigned int soc_mbus_config_compatible(const struct v4l2_mbus_config *cfg,
- unsigned int flags);
-
-#endif
diff --git a/include/media/dvbdev.h b/include/media/dvbdev.h
index 551325858de3..e547cbeee431 100644
--- a/include/media/dvbdev.h
+++ b/include/media/dvbdev.h
@@ -293,8 +293,8 @@ static inline void dvb_register_media_controller(struct dvb_adapter *adap,
*
* @adap: pointer to &struct dvb_adapter
*/
-static inline struct media_device
-*dvb_get_media_controller(struct dvb_adapter *adap)
+static inline struct media_device *
+dvb_get_media_controller(struct dvb_adapter *adap)
{
return adap->mdev;
}
@@ -385,7 +385,7 @@ struct i2c_client;
* with dvb_module_probe() should use dvb_module_release() to unbind.
*
* Return:
- * On success, return an &struct i2c_client, pointing the the bound
+ * On success, return an &struct i2c_client, pointing to the bound
* I2C device. %NULL otherwise.
*
* .. note::
diff --git a/include/media/media-device.h b/include/media/media-device.h
index fa0895430720..1345e6da688a 100644
--- a/include/media/media-device.h
+++ b/include/media/media-device.h
@@ -128,7 +128,7 @@ struct media_device_ops {
*
* Use-case: find tuner entity connected to the decoder
* entity and check if it is available, and activate the
- * the link between them from @enable_source and deactivate
+ * link between them from @enable_source and deactivate
* from @disable_source.
*
* .. note::
diff --git a/include/media/media-devnode.h b/include/media/media-devnode.h
index 6393842c6b21..d27c1c646c28 100644
--- a/include/media/media-devnode.h
+++ b/include/media/media-devnode.h
@@ -39,7 +39,7 @@ struct media_device;
* @poll: pointer to the function that implements poll() syscall
* @ioctl: pointer to the function that implements ioctl() syscall
* @compat_ioctl: pointer to the function that will handle 32 bits userspace
- * calls to the the ioctl() syscall on a Kernel compiled with 64 bits.
+ * calls to the ioctl() syscall on a Kernel compiled with 64 bits.
* @open: pointer to the function that implements open() syscall
* @release: pointer to the function that will release the resources allocated
* by the @open function.
diff --git a/include/media/media-entity.h b/include/media/media-entity.h
index cde80ad029b7..cbdfcb79d0d0 100644
--- a/include/media/media-entity.h
+++ b/include/media/media-entity.h
@@ -803,7 +803,7 @@ int __media_entity_setup_link(struct media_link *link, u32 flags);
* @flags: the requested new link flags
*
* The only configurable property is the %MEDIA_LNK_FL_ENABLED link flag
- * flag to enable/disable a link. Links marked with the
+ * to enable/disable a link. Links marked with the
* %MEDIA_LNK_FL_IMMUTABLE link flag can not be enabled or disabled.
*
* When a link is enabled or disabled, the media framework calls the
diff --git a/include/media/soc_camera.h b/include/media/soc_camera.h
deleted file mode 100644
index 331c343a5b5a..000000000000
--- a/include/media/soc_camera.h
+++ /dev/null
@@ -1,397 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * camera image capture (abstract) bus driver header
- *
- * Copyright (C) 2006, Sascha Hauer, Pengutronix
- * Copyright (C) 2008, Guennadi Liakhovetski <kernel@pengutronix.de>
- */
-
-#ifndef SOC_CAMERA_H
-#define SOC_CAMERA_H
-
-#include <linux/bitops.h>
-#include <linux/device.h>
-#include <linux/mutex.h>
-#include <linux/pm.h>
-#include <linux/videodev2.h>
-#include <media/videobuf2-v4l2.h>
-#include <media/v4l2-async.h>
-#include <media/v4l2-ctrls.h>
-#include <media/v4l2-device.h>
-
-struct file;
-struct soc_camera_desc;
-struct soc_camera_async_client;
-
-struct soc_camera_device {
- struct list_head list; /* list of all registered devices */
- struct soc_camera_desc *sdesc;
- struct device *pdev; /* Platform device */
- struct device *parent; /* Camera host device */
- struct device *control; /* E.g., the i2c client */
- s32 user_width;
- s32 user_height;
- u32 bytesperline; /* for padding, zero if unused */
- u32 sizeimage;
- enum v4l2_colorspace colorspace;
- unsigned char iface; /* Host number */
- unsigned char devnum; /* Device number per host */
- struct soc_camera_sense *sense; /* See comment in struct definition */
- struct video_device *vdev;
- struct v4l2_ctrl_handler ctrl_handler;
- const struct soc_camera_format_xlate *current_fmt;
- struct soc_camera_format_xlate *user_formats;
- int num_user_formats;
- enum v4l2_field field; /* Preserve field over close() */
- void *host_priv; /* Per-device host private data */
- /* soc_camera.c private count. Only accessed with .host_lock held */
- int use_count;
- struct file *streamer; /* stream owner */
- struct v4l2_clk *clk;
- /* Asynchronous subdevice management */
- struct soc_camera_async_client *sasc;
- /* video buffer queue */
- struct vb2_queue vb2_vidq;
-};
-
-/* Host supports programmable stride */
-#define SOCAM_HOST_CAP_STRIDE (1 << 0)
-
-enum soc_camera_subdev_role {
- SOCAM_SUBDEV_DATA_SOURCE = 1,
- SOCAM_SUBDEV_DATA_SINK,
- SOCAM_SUBDEV_DATA_PROCESSOR,
-};
-
-struct soc_camera_async_subdev {
- struct v4l2_async_subdev asd;
- enum soc_camera_subdev_role role;
-};
-
-struct soc_camera_host {
- struct v4l2_device v4l2_dev;
- struct list_head list;
- struct mutex host_lock; /* Main synchronisation lock */
- struct mutex clk_lock; /* Protect pipeline modifications */
- unsigned char nr; /* Host number */
- u32 capabilities;
- struct soc_camera_device *icd; /* Currently attached client */
- void *priv;
- const char *drv_name;
- struct soc_camera_host_ops *ops;
- struct v4l2_async_subdev **asd; /* Flat array, arranged in groups */
- unsigned int *asd_sizes; /* 0-terminated array of asd group sizes */
-};
-
-struct soc_camera_host_ops {
- struct module *owner;
- int (*add)(struct soc_camera_device *);
- void (*remove)(struct soc_camera_device *);
- int (*clock_start)(struct soc_camera_host *);
- void (*clock_stop)(struct soc_camera_host *);
- /*
- * .get_formats() is called for each client device format, but
- * .put_formats() is only called once. Further, if any of the calls to
- * .get_formats() fail, .put_formats() will not be called at all, the
- * failing .get_formats() must then clean up internally.
- */
- int (*get_formats)(struct soc_camera_device *, unsigned int,
- struct soc_camera_format_xlate *);
- void (*put_formats)(struct soc_camera_device *);
- int (*get_selection)(struct soc_camera_device *, struct v4l2_selection *);
- int (*set_selection)(struct soc_camera_device *, struct v4l2_selection *);
- /*
- * The difference to .set_selection() is, that .set_liveselection is not allowed
- * to change the output sizes
- */
- int (*set_liveselection)(struct soc_camera_device *, struct v4l2_selection *);
- int (*set_fmt)(struct soc_camera_device *, struct v4l2_format *);
- int (*try_fmt)(struct soc_camera_device *, struct v4l2_format *);
- int (*init_videobuf2)(struct vb2_queue *,
- struct soc_camera_device *);
- int (*querycap)(struct soc_camera_host *, struct v4l2_capability *);
- int (*set_bus_param)(struct soc_camera_device *);
- int (*get_parm)(struct soc_camera_device *, struct v4l2_streamparm *);
- int (*set_parm)(struct soc_camera_device *, struct v4l2_streamparm *);
- int (*enum_framesizes)(struct soc_camera_device *, struct v4l2_frmsizeenum *);
- __poll_t (*poll)(struct file *, poll_table *);
-};
-
-#define SOCAM_SENSOR_INVERT_PCLK (1 << 0)
-#define SOCAM_SENSOR_INVERT_MCLK (1 << 1)
-#define SOCAM_SENSOR_INVERT_HSYNC (1 << 2)
-#define SOCAM_SENSOR_INVERT_VSYNC (1 << 3)
-#define SOCAM_SENSOR_INVERT_DATA (1 << 4)
-
-struct i2c_board_info;
-struct regulator_bulk_data;
-
-struct soc_camera_subdev_desc {
- /* Per camera SOCAM_SENSOR_* bus flags */
- unsigned long flags;
-
- /* sensor driver private platform data */
- void *drv_priv;
-
- /*
- * Set unbalanced_power to true to deal with legacy drivers, failing to
- * balance their calls to subdevice's .s_power() method. clock_state is
- * then used internally by helper functions, it shouldn't be touched by
- * drivers or the platform code.
- */
- bool unbalanced_power;
- unsigned long clock_state;
-
- /* Optional callbacks to power on or off and reset the sensor */
- int (*power)(struct device *, int);
- int (*reset)(struct device *);
-
- /*
- * some platforms may support different data widths than the sensors
- * native ones due to different data line routing. Let the board code
- * overwrite the width flags.
- */
- int (*set_bus_param)(struct soc_camera_subdev_desc *, unsigned long flags);
- unsigned long (*query_bus_param)(struct soc_camera_subdev_desc *);
- void (*free_bus)(struct soc_camera_subdev_desc *);
-
- /* Optional regulators that have to be managed on power on/off events */
- struct v4l2_subdev_platform_data sd_pdata;
-};
-
-struct soc_camera_host_desc {
- /* Camera bus id, used to match a camera and a bus */
- int bus_id;
- int i2c_adapter_id;
- struct i2c_board_info *board_info;
- const char *module_name;
-
- /*
- * For non-I2C devices platform has to provide methods to add a device
- * to the system and to remove it
- */
- int (*add_device)(struct soc_camera_device *);
- void (*del_device)(struct soc_camera_device *);
-};
-
-/*
- * Platform data for "soc-camera-pdrv"
- * This MUST be kept binary-identical to struct soc_camera_link below, until
- * it is completely replaced by this one, after which we can split it into its
- * two components.
- */
-struct soc_camera_desc {
- struct soc_camera_subdev_desc subdev_desc;
- struct soc_camera_host_desc host_desc;
-};
-
-/* Prepare to replace this struct: don't change its layout any more! */
-struct soc_camera_link {
- /*
- * Subdevice part - keep at top and compatible to
- * struct soc_camera_subdev_desc
- */
-
- /* Per camera SOCAM_SENSOR_* bus flags */
- unsigned long flags;
-
- void *priv;
-
- /* Set by platforms to handle misbehaving drivers */
- bool unbalanced_power;
- /* Used by soc-camera helper functions */
- unsigned long clock_state;
-
- /* Optional callbacks to power on or off and reset the sensor */
- int (*power)(struct device *, int);
- int (*reset)(struct device *);
- /*
- * some platforms may support different data widths than the sensors
- * native ones due to different data line routing. Let the board code
- * overwrite the width flags.
- */
- int (*set_bus_param)(struct soc_camera_link *, unsigned long flags);
- unsigned long (*query_bus_param)(struct soc_camera_link *);
- void (*free_bus)(struct soc_camera_link *);
-
- /* Optional regulators that have to be managed on power on/off events */
- struct regulator_bulk_data *regulators;
- int num_regulators;
-
- void *host_priv;
-
- /*
- * Host part - keep at bottom and compatible to
- * struct soc_camera_host_desc
- */
-
- /* Camera bus id, used to match a camera and a bus */
- int bus_id;
- int i2c_adapter_id;
- struct i2c_board_info *board_info;
- const char *module_name;
-
- /*
- * For non-I2C devices platform has to provide methods to add a device
- * to the system and to remove it
- */
- int (*add_device)(struct soc_camera_device *);
- void (*del_device)(struct soc_camera_device *);
-};
-
-static inline struct soc_camera_host *to_soc_camera_host(
- const struct device *dev)
-{
- struct v4l2_device *v4l2_dev = dev_get_drvdata(dev);
-
- return container_of(v4l2_dev, struct soc_camera_host, v4l2_dev);
-}
-
-static inline struct soc_camera_desc *to_soc_camera_desc(
- const struct soc_camera_device *icd)
-{
- return icd->sdesc;
-}
-
-static inline struct device *to_soc_camera_control(
- const struct soc_camera_device *icd)
-{
- return icd->control;
-}
-
-static inline struct v4l2_subdev *soc_camera_to_subdev(
- const struct soc_camera_device *icd)
-{
- struct device *control = to_soc_camera_control(icd);
- return dev_get_drvdata(control);
-}
-
-int soc_camera_host_register(struct soc_camera_host *ici);
-void soc_camera_host_unregister(struct soc_camera_host *ici);
-
-const struct soc_camera_format_xlate *soc_camera_xlate_by_fourcc(
- struct soc_camera_device *icd, unsigned int fourcc);
-
-/**
- * struct soc_camera_format_xlate - match between host and sensor formats
- * @code: code of a sensor provided format
- * @host_fmt: host format after host translation from code
- *
- * Host and sensor translation structure. Used in table of host and sensor
- * formats matchings in soc_camera_device. A host can override the generic list
- * generation by implementing get_formats(), and use it for format checks and
- * format setup.
- */
-struct soc_camera_format_xlate {
- u32 code;
- const struct soc_mbus_pixelfmt *host_fmt;
-};
-
-#define SOCAM_SENSE_PCLK_CHANGED (1 << 0)
-
-/**
- * This struct can be attached to struct soc_camera_device by the host driver
- * to request sense from the camera, for example, when calling .set_fmt(). The
- * host then can check which flags are set and verify respective values if any.
- * For example, if SOCAM_SENSE_PCLK_CHANGED is set, it means, pixclock has
- * changed during this operation. After completion the host should detach sense.
- *
- * @flags ored SOCAM_SENSE_* flags
- * @master_clock if the host wants to be informed about pixel-clock
- * change, it better set master_clock.
- * @pixel_clock_max maximum pixel clock frequency supported by the host,
- * camera is not allowed to exceed this.
- * @pixel_clock if the camera driver changed pixel clock during this
- * operation, it sets SOCAM_SENSE_PCLK_CHANGED, uses
- * master_clock to calculate the new pixel-clock and
- * sets this field.
- */
-struct soc_camera_sense {
- unsigned long flags;
- unsigned long master_clock;
- unsigned long pixel_clock_max;
- unsigned long pixel_clock;
-};
-
-#define SOCAM_DATAWIDTH(x) BIT((x) - 1)
-#define SOCAM_DATAWIDTH_4 SOCAM_DATAWIDTH(4)
-#define SOCAM_DATAWIDTH_8 SOCAM_DATAWIDTH(8)
-#define SOCAM_DATAWIDTH_9 SOCAM_DATAWIDTH(9)
-#define SOCAM_DATAWIDTH_10 SOCAM_DATAWIDTH(10)
-#define SOCAM_DATAWIDTH_12 SOCAM_DATAWIDTH(12)
-#define SOCAM_DATAWIDTH_15 SOCAM_DATAWIDTH(15)
-#define SOCAM_DATAWIDTH_16 SOCAM_DATAWIDTH(16)
-#define SOCAM_DATAWIDTH_18 SOCAM_DATAWIDTH(18)
-#define SOCAM_DATAWIDTH_24 SOCAM_DATAWIDTH(24)
-
-#define SOCAM_DATAWIDTH_MASK (SOCAM_DATAWIDTH_4 | SOCAM_DATAWIDTH_8 | \
- SOCAM_DATAWIDTH_9 | SOCAM_DATAWIDTH_10 | \
- SOCAM_DATAWIDTH_12 | SOCAM_DATAWIDTH_15 | \
- SOCAM_DATAWIDTH_16 | SOCAM_DATAWIDTH_18 | \
- SOCAM_DATAWIDTH_24)
-
-static inline void soc_camera_limit_side(int *start, int *length,
- unsigned int start_min,
- unsigned int length_min, unsigned int length_max)
-{
- if (*length < length_min)
- *length = length_min;
- else if (*length > length_max)
- *length = length_max;
-
- if (*start < start_min)
- *start = start_min;
- else if (*start > start_min + length_max - *length)
- *start = start_min + length_max - *length;
-}
-
-unsigned long soc_camera_apply_board_flags(struct soc_camera_subdev_desc *ssdd,
- const struct v4l2_mbus_config *cfg);
-
-int soc_camera_power_init(struct device *dev, struct soc_camera_subdev_desc *ssdd);
-int soc_camera_power_on(struct device *dev, struct soc_camera_subdev_desc *ssdd,
- struct v4l2_clk *clk);
-int soc_camera_power_off(struct device *dev, struct soc_camera_subdev_desc *ssdd,
- struct v4l2_clk *clk);
-
-static inline int soc_camera_set_power(struct device *dev,
- struct soc_camera_subdev_desc *ssdd, struct v4l2_clk *clk, bool on)
-{
- return on ? soc_camera_power_on(dev, ssdd, clk)
- : soc_camera_power_off(dev, ssdd, clk);
-}
-
-/* This is only temporary here - until v4l2-subdev begins to link to video_device */
-#include <linux/i2c.h>
-static inline struct video_device *soc_camera_i2c_to_vdev(const struct i2c_client *client)
-{
- struct v4l2_subdev *sd = i2c_get_clientdata(client);
- struct soc_camera_device *icd = v4l2_get_subdev_hostdata(sd);
- return icd ? icd->vdev : NULL;
-}
-
-static inline struct soc_camera_subdev_desc *soc_camera_i2c_to_desc(const struct i2c_client *client)
-{
- return client->dev.platform_data;
-}
-
-static inline struct v4l2_subdev *soc_camera_vdev_to_subdev(struct video_device *vdev)
-{
- struct soc_camera_device *icd = video_get_drvdata(vdev);
- return soc_camera_to_subdev(icd);
-}
-
-static inline struct soc_camera_device *soc_camera_from_vb2q(const struct vb2_queue *vq)
-{
- return container_of(vq, struct soc_camera_device, vb2_vidq);
-}
-
-static inline u32 soc_camera_grp_id(const struct soc_camera_device *icd)
-{
- return (icd->iface << 8) | (icd->devnum + 1);
-}
-
-void soc_camera_lock(struct vb2_queue *vq);
-void soc_camera_unlock(struct vb2_queue *vq);
-
-#endif
diff --git a/include/media/tpg/v4l2-tpg.h b/include/media/tpg/v4l2-tpg.h
index eb191e85d363..0b0ddb87380e 100644
--- a/include/media/tpg/v4l2-tpg.h
+++ b/include/media/tpg/v4l2-tpg.h
@@ -241,7 +241,7 @@ void tpg_log_status(struct tpg_data *tpg);
void tpg_set_font(const u8 *f);
void tpg_gen_text(const struct tpg_data *tpg,
- u8 *basep[TPG_MAX_PLANES][2], int y, int x, char *text);
+ u8 *basep[TPG_MAX_PLANES][2], int y, int x, const char *text);
void tpg_calc_text_basep(struct tpg_data *tpg,
u8 *basep[TPG_MAX_PLANES][2], unsigned p, u8 *vbuf);
unsigned tpg_g_interleaved_plane(const struct tpg_data *tpg, unsigned buf_line);
@@ -252,6 +252,7 @@ void tpg_fillbuffer(struct tpg_data *tpg, v4l2_std_id std,
bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc);
void tpg_s_crop_compose(struct tpg_data *tpg, const struct v4l2_rect *crop,
const struct v4l2_rect *compose);
+const char *tpg_g_color_order(const struct tpg_data *tpg);
static inline void tpg_s_pattern(struct tpg_data *tpg, enum tpg_pattern pattern)
{
diff --git a/include/media/v4l2-mc.h b/include/media/v4l2-mc.h
index 246eed398648..bdaa5f2f8ca2 100644
--- a/include/media/v4l2-mc.h
+++ b/include/media/v4l2-mc.h
@@ -89,8 +89,8 @@ int v4l_vb2q_enable_media_source(struct vb2_queue *q);
* v4l2_create_fwnode_links_to_pad - Create fwnode-based links from a
* source subdev to a sink subdev pad.
*
- * @src_sd - pointer to a source subdev
- * @sink - pointer to a subdev sink pad
+ * @src_sd: pointer to a source subdev
+ * @sink: pointer to a subdev sink pad
*
* This function searches for fwnode endpoint connections from a source
* subdevice to a single sink pad, and if suitable connections are found,
@@ -113,8 +113,8 @@ int v4l2_create_fwnode_links_to_pad(struct v4l2_subdev *src_sd,
* v4l2_create_fwnode_links - Create fwnode-based links from a source
* subdev to a sink subdev.
*
- * @src_sd - pointer to a source subdevice
- * @sink_sd - pointer to a sink subdevice
+ * @src_sd: pointer to a source subdevice
+ * @sink_sd: pointer to a sink subdevice
*
* This function searches for any and all fwnode endpoint connections
* between source and sink subdevices, and translates them into media
diff --git a/include/media/v4l2-rect.h b/include/media/v4l2-rect.h
index 8800a640c224..bd587d0c0dc3 100644
--- a/include/media/v4l2-rect.h
+++ b/include/media/v4l2-rect.h
@@ -184,4 +184,24 @@ static inline bool v4l2_rect_overlap(const struct v4l2_rect *r1,
return true;
}
+/**
+ * v4l2_rect_enclosed() - is r1 enclosed in r2?
+ * @r1: rectangle.
+ * @r2: rectangle.
+ *
+ * Returns true if @r1 is enclosed in @r2.
+ */
+static inline bool v4l2_rect_enclosed(struct v4l2_rect *r1,
+ struct v4l2_rect *r2)
+{
+ if (r1->left < r2->left || r1->top < r2->top)
+ return false;
+ if (r1->left + r1->width > r2->left + r2->width)
+ return false;
+ if (r1->top + r1->height > r2->top + r2->height)
+ return false;
+
+ return true;
+}
+
#endif
diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h
index f7fe78a6f65a..d4e3b44cf14c 100644
--- a/include/media/v4l2-subdev.h
+++ b/include/media/v4l2-subdev.h
@@ -566,9 +566,9 @@ struct v4l2_subdev_ir_parameters {
*
* @rx_read: Reads received codes or pulse width data.
* The semantics are similar to a non-blocking read() call.
- * @rx_g_parameters: Get the current operating parameters and state of the
+ * @rx_g_parameters: Get the current operating parameters and state of
* the IR receiver.
- * @rx_s_parameters: Set the current operating parameters and state of the
+ * @rx_s_parameters: Set the current operating parameters and state of
* the IR receiver. It is recommended to call
* [rt]x_g_parameters first to fill out the current state, and only change
* the fields that need to be changed. Upon return, the actual device
@@ -582,9 +582,9 @@ struct v4l2_subdev_ir_parameters {
*
* @tx_write: Writes codes or pulse width data for transmission.
* The semantics are similar to a non-blocking write() call.
- * @tx_g_parameters: Get the current operating parameters and state of the
+ * @tx_g_parameters: Get the current operating parameters and state of
* the IR transmitter.
- * @tx_s_parameters: Set the current operating parameters and state of the
+ * @tx_s_parameters: Set the current operating parameters and state of
* the IR transmitter. It is recommended to call
* [rt]x_g_parameters first to fill out the current state, and only change
* the fields that need to be changed. Upon return, the actual device
@@ -930,10 +930,10 @@ struct v4l2_subdev_fh {
* @cfg: pointer to &struct v4l2_subdev_pad_config array.
* @pad: index of the pad in the @cfg array.
*/
-static inline struct v4l2_mbus_framefmt
-*v4l2_subdev_get_try_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- unsigned int pad)
+static inline struct v4l2_mbus_framefmt *
+v4l2_subdev_get_try_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad)
{
if (WARN_ON(pad >= sd->entity.num_pads))
pad = 0;
@@ -948,10 +948,10 @@ static inline struct v4l2_mbus_framefmt
* @cfg: pointer to &struct v4l2_subdev_pad_config array.
* @pad: index of the pad in the @cfg array.
*/
-static inline struct v4l2_rect
-*v4l2_subdev_get_try_crop(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- unsigned int pad)
+static inline struct v4l2_rect *
+v4l2_subdev_get_try_crop(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad)
{
if (WARN_ON(pad >= sd->entity.num_pads))
pad = 0;
@@ -959,22 +959,23 @@ static inline struct v4l2_rect
}
/**
- * v4l2_subdev_get_try_crop - ancillary routine to call
+ * v4l2_subdev_get_try_compose - ancillary routine to call
* &struct v4l2_subdev_pad_config->try_compose
*
* @sd: pointer to &struct v4l2_subdev
* @cfg: pointer to &struct v4l2_subdev_pad_config array.
* @pad: index of the pad in the @cfg array.
*/
-static inline struct v4l2_rect
-*v4l2_subdev_get_try_compose(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- unsigned int pad)
+static inline struct v4l2_rect *
+v4l2_subdev_get_try_compose(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad)
{
if (WARN_ON(pad >= sd->entity.num_pads))
pad = 0;
return &cfg[pad].try_compose;
}
+
#endif
extern const struct v4l2_file_operations v4l2_subdev_fops;
@@ -1031,8 +1032,8 @@ static inline void *v4l2_get_subdev_hostdata(const struct v4l2_subdev *sd)
* v4l2_subdev_get_fwnode_pad_1_to_1 - Get pad number from a subdev fwnode
* endpoint, assuming 1:1 port:pad
*
- * @entity - Pointer to the subdev entity
- * @endpoint - Pointer to a parsed fwnode endpoint
+ * @entity: Pointer to the subdev entity
+ * @endpoint: Pointer to a parsed fwnode endpoint
*
* This function can be used as the .get_fwnode_pad operation for
* subdevices that map port numbers and pad indexes 1:1. If the endpoint
diff --git a/include/media/videobuf-dma-sg.h b/include/media/videobuf-dma-sg.h
index b89d5e31f172..34450f7ad510 100644
--- a/include/media/videobuf-dma-sg.h
+++ b/include/media/videobuf-dma-sg.h
@@ -31,7 +31,7 @@
* does memory allocation too using vmalloc_32().
*
* videobuf_dma_*()
- * see Documentation/DMA-API-HOWTO.txt, these functions to
+ * see Documentation/core-api/dma-api-howto.rst, these functions to
* basically the same. The map function does also build a
* scatterlist for the buffer (and unmap frees it ...)
*
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
index f11b96514cf7..52ef92049073 100644
--- a/include/media/videobuf2-core.h
+++ b/include/media/videobuf2-core.h
@@ -263,6 +263,10 @@ struct vb2_buffer {
* after the 'buf_finish' op is called.
* copied_timestamp: the timestamp of this capture buffer was copied
* from an output buffer.
+ * need_cache_sync_on_prepare: when set buffer's ->prepare() function
+ * performs cache sync/invalidation.
+ * need_cache_sync_on_finish: when set buffer's ->finish() function
+ * performs cache sync/invalidation.
* queued_entry: entry on the queued buffers list, which holds
* all buffers queued from userspace
* done_entry: entry on the list that stores all buffers ready
@@ -273,6 +277,8 @@ struct vb2_buffer {
unsigned int synced:1;
unsigned int prepared:1;
unsigned int copied_timestamp:1;
+ unsigned int need_cache_sync_on_prepare:1;
+ unsigned int need_cache_sync_on_finish:1;
struct vb2_plane planes[VB2_MAX_PLANES];
struct list_head queued_entry;
@@ -491,6 +497,9 @@ struct vb2_buf_ops {
* @uses_requests: requests are used for this queue. Set to 1 the first time
* a request is queued. Set to 0 when the queue is canceled.
* If this is 1, then you cannot queue buffers directly.
+ * @allow_cache_hints: when set user-space can pass cache management hints in
+ * order to skip cache flush/invalidation on ->prepare() or/and
+ * ->finish().
* @lock: pointer to a mutex that protects the &struct vb2_queue. The
* driver can set this to a mutex to let the v4l2 core serialize
* the queuing ioctls. If the driver wants to handle locking
@@ -552,21 +561,24 @@ struct vb2_buf_ops {
* when a buffer with the %V4L2_BUF_FLAG_LAST is dequeued.
* @fileio: file io emulator internal data, used only if emulator is active
* @threadio: thread io internal data, used only if thread is active
+ * @name: queue name, used for logging purpose. Initialized automatically
+ * if left empty by drivers.
*/
struct vb2_queue {
unsigned int type;
unsigned int io_modes;
struct device *dev;
unsigned long dma_attrs;
- unsigned bidirectional:1;
- unsigned fileio_read_once:1;
- unsigned fileio_write_immediately:1;
- unsigned allow_zero_bytesused:1;
- unsigned quirk_poll_must_check_waiting_for_buffers:1;
- unsigned supports_requests:1;
- unsigned requires_requests:1;
- unsigned uses_qbuf:1;
- unsigned uses_requests:1;
+ unsigned int bidirectional:1;
+ unsigned int fileio_read_once:1;
+ unsigned int fileio_write_immediately:1;
+ unsigned int allow_zero_bytesused:1;
+ unsigned int quirk_poll_must_check_waiting_for_buffers:1;
+ unsigned int supports_requests:1;
+ unsigned int requires_requests:1;
+ unsigned int uses_qbuf:1;
+ unsigned int uses_requests:1;
+ unsigned int allow_cache_hints:1;
struct mutex *lock;
void *owner;
@@ -612,6 +624,8 @@ struct vb2_queue {
struct vb2_fileio_data *fileio;
struct vb2_threadio_data *threadio;
+ char name[32];
+
#ifdef CONFIG_VIDEO_ADV_DEBUG
/*
* Counters for how often these queue-related ops are
@@ -626,6 +640,17 @@ struct vb2_queue {
};
/**
+ * vb2_queue_allows_cache_hints() - Return true if the queue allows cache
+ * and memory consistency hints.
+ *
+ * @q: pointer to &struct vb2_queue with videobuf2 queue
+ */
+static inline bool vb2_queue_allows_cache_hints(struct vb2_queue *q)
+{
+ return q->allow_cache_hints && q->memory == VB2_MEMORY_MMAP;
+}
+
+/**
* vb2_plane_vaddr() - Return a kernel virtual address of a given plane.
* @vb: pointer to &struct vb2_buffer to which the plane in
* question belongs to.
@@ -719,6 +744,8 @@ void vb2_core_querybuf(struct vb2_queue *q, unsigned int index, void *pb);
* vb2_core_reqbufs() - Initiate streaming.
* @q: pointer to &struct vb2_queue with videobuf2 queue.
* @memory: memory type, as defined by &enum vb2_memory.
+ * @flags: auxiliary queue/buffer management flags. Currently, the only
+ * used flag is %V4L2_FLAG_MEMORY_NON_CONSISTENT.
* @count: requested buffer count.
*
* Videobuf2 core helper to implement VIDIOC_REQBUF() operation. It is called
@@ -743,12 +770,13 @@ void vb2_core_querybuf(struct vb2_queue *q, unsigned int index, void *pb);
* Return: returns zero on success; an error code otherwise.
*/
int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
- unsigned int *count);
+ unsigned int flags, unsigned int *count);
/**
* vb2_core_create_bufs() - Allocate buffers and any required auxiliary structs
* @q: pointer to &struct vb2_queue with videobuf2 queue.
* @memory: memory type, as defined by &enum vb2_memory.
+ * @flags: auxiliary queue/buffer management flags.
* @count: requested buffer count.
* @requested_planes: number of planes requested.
* @requested_sizes: array with the size of the planes.
@@ -766,7 +794,8 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
* Return: returns zero on success; an error code otherwise.
*/
int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
- unsigned int *count, unsigned int requested_planes,
+ unsigned int flags, unsigned int *count,
+ unsigned int requested_planes,
const unsigned int requested_sizes[]);
/**
diff --git a/include/media/videobuf2-v4l2.h b/include/media/videobuf2-v4l2.h
index 59bf33a12648..b7b5a9cb5a28 100644
--- a/include/media/videobuf2-v4l2.h
+++ b/include/media/videobuf2-v4l2.h
@@ -237,6 +237,19 @@ int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type);
int __must_check vb2_queue_init(struct vb2_queue *q);
/**
+ * vb2_queue_init_name() - initialize a videobuf2 queue with a name
+ * @q: pointer to &struct vb2_queue with videobuf2 queue.
+ * @name: the queue name
+ *
+ * This function initializes the vb2_queue exactly like vb2_queue_init(),
+ * and additionally sets the queue name. The queue name is used for logging
+ * purpose, and should uniquely identify the queue within the context of the
+ * device it belongs to. This is useful to attribute kernel log messages to the
+ * right queue for m2m devices or other devices that handle multiple queues.
+ */
+int __must_check vb2_queue_init_name(struct vb2_queue *q, const char *name);
+
+/**
* vb2_queue_release() - stop streaming, release the queue and free memory
* @q: pointer to &struct vb2_queue with videobuf2 queue.
*
diff --git a/include/memory/renesas-rpc-if.h b/include/memory/renesas-rpc-if.h
new file mode 100644
index 000000000000..9ad136682c47
--- /dev/null
+++ b/include/memory/renesas-rpc-if.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Renesas RPC-IF core driver
+ *
+ * Copyright (C) 2018~2019 Renesas Solutions Corp.
+ * Copyright (C) 2019 Macronix International Co., Ltd.
+ * Copyright (C) 2019-2020 Cogent Embedded, Inc.
+ */
+
+#ifndef __RENESAS_RPC_IF_H
+#define __RENESAS_RPC_IF_H
+
+#include <linux/types.h>
+
+enum rpcif_data_dir {
+ RPCIF_NO_DATA,
+ RPCIF_DATA_IN,
+ RPCIF_DATA_OUT,
+};
+
+struct rpcif_op {
+ struct {
+ u8 buswidth;
+ u8 opcode;
+ bool ddr;
+ } cmd, ocmd;
+
+ struct {
+ u8 nbytes;
+ u8 buswidth;
+ bool ddr;
+ u64 val;
+ } addr;
+
+ struct {
+ u8 ncycles;
+ u8 buswidth;
+ } dummy;
+
+ struct {
+ u8 nbytes;
+ u8 buswidth;
+ bool ddr;
+ u32 val;
+ } option;
+
+ struct {
+ u8 buswidth;
+ unsigned int nbytes;
+ enum rpcif_data_dir dir;
+ bool ddr;
+ union {
+ void *in;
+ const void *out;
+ } buf;
+ } data;
+};
+
+struct rpcif {
+ struct device *dev;
+ void __iomem *dirmap;
+ struct regmap *regmap;
+ struct reset_control *rstc;
+ size_t size;
+ enum rpcif_data_dir dir;
+ u8 bus_size;
+ void *buffer;
+ u32 xferlen;
+ u32 smcr;
+ u32 smadr;
+ u32 command; /* DRCMR or SMCMR */
+ u32 option; /* DROPR or SMOPR */
+ u32 enable; /* DRENR or SMENR */
+ u32 dummy; /* DRDMCR or SMDMCR */
+ u32 ddr; /* DRDRENR or SMDRENR */
+};
+
+int rpcif_sw_init(struct rpcif *rpc, struct device *dev);
+void rpcif_hw_init(struct rpcif *rpc, bool hyperflash);
+void rpcif_enable_rpm(struct rpcif *rpc);
+void rpcif_disable_rpm(struct rpcif *rpc);
+void rpcif_prepare(struct rpcif *rpc, const struct rpcif_op *op, u64 *offs,
+ size_t *len);
+int rpcif_manual_xfer(struct rpcif *rpc);
+ssize_t rpcif_dirmap_read(struct rpcif *rpc, u64 offs, size_t len, void *buf);
+
+#endif // __RENESAS_RPC_IF_H
diff --git a/include/misc/ocxl-config.h b/include/misc/ocxl-config.h
index 3526fa996a22..ccfd3b463517 100644
--- a/include/misc/ocxl-config.h
+++ b/include/misc/ocxl-config.h
@@ -41,5 +41,6 @@
#define OCXL_DVSEC_VENDOR_CFG_VERS 0x0C
#define OCXL_DVSEC_VENDOR_TLX_VERS 0x10
#define OCXL_DVSEC_VENDOR_DLX_VERS 0x20
+#define OCXL_DVSEC_VENDOR_RESET_RELOAD 0x38
#endif /* _OCXL_CONFIG_H_ */
diff --git a/include/misc/ocxl.h b/include/misc/ocxl.h
index 06dd5839e438..357ef1aadbc0 100644
--- a/include/misc/ocxl.h
+++ b/include/misc/ocxl.h
@@ -62,8 +62,7 @@ struct ocxl_context;
// Device detection & initialisation
/**
- * Open an OpenCAPI function on an OpenCAPI device
- *
+ * ocxl_function_open() - Open an OpenCAPI function on an OpenCAPI device
* @dev: The PCI device that contains the function
*
* Returns an opaque pointer to the function, or an error pointer (check with IS_ERR)
@@ -71,8 +70,7 @@ struct ocxl_context;
struct ocxl_fn *ocxl_function_open(struct pci_dev *dev);
/**
- * Get the list of AFUs associated with a PCI function device
- *
+ * ocxl_function_afu_list() - Get the list of AFUs associated with a PCI function device
* Returns a list of struct ocxl_afu *
*
* @fn: The OpenCAPI function containing the AFUs
@@ -80,8 +78,7 @@ struct ocxl_fn *ocxl_function_open(struct pci_dev *dev);
struct list_head *ocxl_function_afu_list(struct ocxl_fn *fn);
/**
- * Fetch an AFU instance from an OpenCAPI function
- *
+ * ocxl_function_fetch_afu() - Fetch an AFU instance from an OpenCAPI function
* @fn: The OpenCAPI function to get the AFU from
* @afu_idx: The index of the AFU to get
*
@@ -92,23 +89,20 @@ struct list_head *ocxl_function_afu_list(struct ocxl_fn *fn);
struct ocxl_afu *ocxl_function_fetch_afu(struct ocxl_fn *fn, u8 afu_idx);
/**
- * Take a reference to an AFU
- *
+ * ocxl_afu_get() - Take a reference to an AFU
* @afu: The AFU to increment the reference count on
*/
void ocxl_afu_get(struct ocxl_afu *afu);
/**
- * Release a reference to an AFU
- *
+ * ocxl_afu_put() - Release a reference to an AFU
* @afu: The AFU to decrement the reference count on
*/
void ocxl_afu_put(struct ocxl_afu *afu);
/**
- * Get the configuration information for an OpenCAPI function
- *
+ * ocxl_function_config() - Get the configuration information for an OpenCAPI function
* @fn: The OpenCAPI function to get the config for
*
* Returns the function config, or NULL on error
@@ -116,8 +110,7 @@ void ocxl_afu_put(struct ocxl_afu *afu);
const struct ocxl_fn_config *ocxl_function_config(struct ocxl_fn *fn);
/**
- * Close an OpenCAPI function
- *
+ * ocxl_function_close() - Close an OpenCAPI function
* This will free any AFUs previously retrieved from the function, and
* detach and associated contexts. The contexts must by freed by the caller.
*
@@ -129,8 +122,7 @@ void ocxl_function_close(struct ocxl_fn *fn);
// Context allocation
/**
- * Allocate an OpenCAPI context
- *
+ * ocxl_context_alloc() - Allocate an OpenCAPI context
* @context: The OpenCAPI context to allocate, must be freed with ocxl_context_free
* @afu: The AFU the context belongs to
* @mapping: The mapping to unmap when the context is closed (may be NULL)
@@ -139,14 +131,13 @@ int ocxl_context_alloc(struct ocxl_context **context, struct ocxl_afu *afu,
struct address_space *mapping);
/**
- * Free an OpenCAPI context
- *
+ * ocxl_context_free() - Free an OpenCAPI context
* @ctx: The OpenCAPI context to free
*/
void ocxl_context_free(struct ocxl_context *ctx);
/**
- * Grant access to an MM to an OpenCAPI context
+ * ocxl_context_attach() - Grant access to an MM to an OpenCAPI context
* @ctx: The OpenCAPI context to attach
* @amr: The value of the AMR register to restrict access
* @mm: The mm to attach to the context
@@ -157,7 +148,7 @@ int ocxl_context_attach(struct ocxl_context *ctx, u64 amr,
struct mm_struct *mm);
/**
- * Detach an MM from an OpenCAPI context
+ * ocxl_context_detach() - Detach an MM from an OpenCAPI context
* @ctx: The OpenCAPI context to attach
*
* Returns 0 on success, negative on failure
@@ -167,25 +158,25 @@ int ocxl_context_detach(struct ocxl_context *ctx);
// AFU IRQs
/**
- * Allocate an IRQ associated with an AFU context
+ * ocxl_afu_irq_alloc() - Allocate an IRQ associated with an AFU context
* @ctx: the AFU context
* @irq_id: out, the IRQ ID
*
* Returns 0 on success, negative on failure
*/
-extern int ocxl_afu_irq_alloc(struct ocxl_context *ctx, int *irq_id);
+int ocxl_afu_irq_alloc(struct ocxl_context *ctx, int *irq_id);
/**
- * Frees an IRQ associated with an AFU context
+ * ocxl_afu_irq_free() - Frees an IRQ associated with an AFU context
* @ctx: the AFU context
* @irq_id: the IRQ ID
*
* Returns 0 on success, negative on failure
*/
-extern int ocxl_afu_irq_free(struct ocxl_context *ctx, int irq_id);
+int ocxl_afu_irq_free(struct ocxl_context *ctx, int irq_id);
/**
- * Gets the address of the trigger page for an IRQ
+ * ocxl_afu_irq_get_addr() - Gets the address of the trigger page for an IRQ
* This can then be provided to an AFU which will write to that
* page to trigger the IRQ.
* @ctx: The AFU context that the IRQ is associated with
@@ -193,10 +184,10 @@ extern int ocxl_afu_irq_free(struct ocxl_context *ctx, int irq_id);
*
* returns the trigger page address, or 0 if the IRQ is not valid
*/
-extern u64 ocxl_afu_irq_get_addr(struct ocxl_context *ctx, int irq_id);
+u64 ocxl_afu_irq_get_addr(struct ocxl_context *ctx, int irq_id);
/**
- * Provide a callback to be called when an IRQ is triggered
+ * ocxl_irq_set_handler() - Provide a callback to be called when an IRQ is triggered
* @ctx: The AFU context that the IRQ is associated with
* @irq_id: The IRQ ID
* @handler: the callback to be called when the IRQ is triggered
@@ -213,8 +204,7 @@ int ocxl_irq_set_handler(struct ocxl_context *ctx, int irq_id,
// AFU Metadata
/**
- * Get a pointer to the config for an AFU
- *
+ * ocxl_afu_config() - Get a pointer to the config for an AFU
* @afu: a pointer to the AFU to get the config for
*
* Returns a pointer to the AFU config
@@ -222,27 +212,24 @@ int ocxl_irq_set_handler(struct ocxl_context *ctx, int irq_id,
struct ocxl_afu_config *ocxl_afu_config(struct ocxl_afu *afu);
/**
- * Assign opaque hardware specific information to an OpenCAPI AFU.
- *
- * @dev: The PCI device associated with the OpenCAPI device
+ * ocxl_afu_set_private() - Assign opaque hardware specific information to an OpenCAPI AFU.
+ * @afu: The OpenCAPI AFU
* @private: the opaque hardware specific information to assign to the driver
*/
void ocxl_afu_set_private(struct ocxl_afu *afu, void *private);
/**
- * Fetch the hardware specific information associated with an external OpenCAPI
- * AFU. This may be consumed by an external OpenCAPI driver.
- *
- * @afu: The AFU
+ * ocxl_afu_get_private() - Fetch the hardware specific information associated with
+ * an external OpenCAPI AFU. This may be consumed by an external OpenCAPI driver.
+ * @afu: The OpenCAPI AFU
*
* Returns the opaque pointer associated with the device, or NULL if not set
*/
-void *ocxl_afu_get_private(struct ocxl_afu *dev);
+void *ocxl_afu_get_private(struct ocxl_afu *afu);
// Global MMIO
/**
- * Read a 32 bit value from global MMIO
- *
+ * ocxl_global_mmio_read32() - Read a 32 bit value from global MMIO
* @afu: The AFU
* @offset: The Offset from the start of MMIO
* @endian: the endianness that the MMIO data is in
@@ -251,11 +238,10 @@ void *ocxl_afu_get_private(struct ocxl_afu *dev);
* Returns 0 for success, negative on error
*/
int ocxl_global_mmio_read32(struct ocxl_afu *afu, size_t offset,
- enum ocxl_endian endian, u32 *val);
+ enum ocxl_endian endian, u32 *val);
/**
- * Read a 64 bit value from global MMIO
- *
+ * ocxl_global_mmio_read64() - Read a 64 bit value from global MMIO
* @afu: The AFU
* @offset: The Offset from the start of MMIO
* @endian: the endianness that the MMIO data is in
@@ -264,11 +250,10 @@ int ocxl_global_mmio_read32(struct ocxl_afu *afu, size_t offset,
* Returns 0 for success, negative on error
*/
int ocxl_global_mmio_read64(struct ocxl_afu *afu, size_t offset,
- enum ocxl_endian endian, u64 *val);
+ enum ocxl_endian endian, u64 *val);
/**
- * Write a 32 bit value to global MMIO
- *
+ * ocxl_global_mmio_write32() - Write a 32 bit value to global MMIO
* @afu: The AFU
* @offset: The Offset from the start of MMIO
* @endian: the endianness that the MMIO data is in
@@ -277,11 +262,10 @@ int ocxl_global_mmio_read64(struct ocxl_afu *afu, size_t offset,
* Returns 0 for success, negative on error
*/
int ocxl_global_mmio_write32(struct ocxl_afu *afu, size_t offset,
- enum ocxl_endian endian, u32 val);
+ enum ocxl_endian endian, u32 val);
/**
- * Write a 64 bit value to global MMIO
- *
+ * ocxl_global_mmio_write64() - Write a 64 bit value to global MMIO
* @afu: The AFU
* @offset: The Offset from the start of MMIO
* @endian: the endianness that the MMIO data is in
@@ -290,11 +274,10 @@ int ocxl_global_mmio_write32(struct ocxl_afu *afu, size_t offset,
* Returns 0 for success, negative on error
*/
int ocxl_global_mmio_write64(struct ocxl_afu *afu, size_t offset,
- enum ocxl_endian endian, u64 val);
+ enum ocxl_endian endian, u64 val);
/**
- * Set bits in a 32 bit global MMIO register
- *
+ * ocxl_global_mmio_set32() - Set bits in a 32 bit global MMIO register
* @afu: The AFU
* @offset: The Offset from the start of MMIO
* @endian: the endianness that the MMIO data is in
@@ -303,11 +286,10 @@ int ocxl_global_mmio_write64(struct ocxl_afu *afu, size_t offset,
* Returns 0 for success, negative on error
*/
int ocxl_global_mmio_set32(struct ocxl_afu *afu, size_t offset,
- enum ocxl_endian endian, u32 mask);
+ enum ocxl_endian endian, u32 mask);
/**
- * Set bits in a 64 bit global MMIO register
- *
+ * ocxl_global_mmio_set64() - Set bits in a 64 bit global MMIO register
* @afu: The AFU
* @offset: The Offset from the start of MMIO
* @endian: the endianness that the MMIO data is in
@@ -316,11 +298,10 @@ int ocxl_global_mmio_set32(struct ocxl_afu *afu, size_t offset,
* Returns 0 for success, negative on error
*/
int ocxl_global_mmio_set64(struct ocxl_afu *afu, size_t offset,
- enum ocxl_endian endian, u64 mask);
+ enum ocxl_endian endian, u64 mask);
/**
- * Set bits in a 32 bit global MMIO register
- *
+ * ocxl_global_mmio_clear32() - Set bits in a 32 bit global MMIO register
* @afu: The AFU
* @offset: The Offset from the start of MMIO
* @endian: the endianness that the MMIO data is in
@@ -329,11 +310,10 @@ int ocxl_global_mmio_set64(struct ocxl_afu *afu, size_t offset,
* Returns 0 for success, negative on error
*/
int ocxl_global_mmio_clear32(struct ocxl_afu *afu, size_t offset,
- enum ocxl_endian endian, u32 mask);
+ enum ocxl_endian endian, u32 mask);
/**
- * Set bits in a 64 bit global MMIO register
- *
+ * ocxl_global_mmio_clear64() - Set bits in a 64 bit global MMIO register
* @afu: The AFU
* @offset: The Offset from the start of MMIO
* @endian: the endianness that the MMIO data is in
@@ -342,7 +322,7 @@ int ocxl_global_mmio_clear32(struct ocxl_afu *afu, size_t offset,
* Returns 0 for success, negative on error
*/
int ocxl_global_mmio_clear64(struct ocxl_afu *afu, size_t offset,
- enum ocxl_endian endian, u64 mask);
+ enum ocxl_endian endian, u64 mask);
// Functions left here are for compatibility with the cxlflash driver
diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h
index 98a2be2de04a..3eb4261b2958 100644
--- a/include/net/9p/transport.h
+++ b/include/net/9p/transport.h
@@ -25,7 +25,7 @@
* @request: member function to issue a request to the transport
* @cancel: member function to cancel a request (if it hasn't been sent)
* @cancelled: member function to notify that a cancelled request will not
- * not receive a reply
+ * receive a reply
*
* This is the basic API for a transport module which is registered by the
* transport module with the 9P core network module and used by the client
diff --git a/include/net/act_api.h b/include/net/act_api.h
index 8c3934880670..cb382a89ea58 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -106,7 +106,7 @@ struct tc_action_ops {
struct netlink_callback *, int,
const struct tc_action_ops *,
struct netlink_ext_ack *);
- void (*stats_update)(struct tc_action *, u64, u32, u64, bool);
+ void (*stats_update)(struct tc_action *, u64, u64, u64, u64, bool);
size_t (*get_fill_size)(const struct tc_action *act);
struct net_device *(*get_dev)(const struct tc_action *a,
tc_action_priv_destructor *destructor);
@@ -232,8 +232,8 @@ static inline void tcf_action_inc_overlimit_qstats(struct tc_action *a)
spin_unlock(&a->tcfa_lock);
}
-void tcf_action_update_stats(struct tc_action *a, u64 bytes, u32 packets,
- bool drop, bool hw);
+void tcf_action_update_stats(struct tc_action *a, u64 bytes, u64 packets,
+ u64 drops, bool hw);
int tcf_action_copy_stats(struct sk_buff *, struct tc_action *, int);
int tcf_action_check_ctrlact(int action, struct tcf_proto *tp,
@@ -244,13 +244,14 @@ struct tcf_chain *tcf_action_set_ctrlact(struct tc_action *a, int action,
#endif /* CONFIG_NET_CLS_ACT */
static inline void tcf_action_stats_update(struct tc_action *a, u64 bytes,
- u64 packets, u64 lastuse, bool hw)
+ u64 packets, u64 drops,
+ u64 lastuse, bool hw)
{
#ifdef CONFIG_NET_CLS_ACT
if (!a->ops->stats_update)
return;
- a->ops->stats_update(a, bytes, packets, lastuse, hw);
+ a->ops->stats_update(a, bytes, packets, drops, lastuse, hw);
#endif
}
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index fdb07105384c..18f783dcd55f 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -97,6 +97,9 @@ bool ipv6_chk_custom_prefix(const struct in6_addr *addr,
int ipv6_chk_prefix(const struct in6_addr *addr, struct net_device *dev);
+struct net_device *ipv6_dev_find(struct net *net, const struct in6_addr *addr,
+ struct net_device *dev);
+
struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net,
const struct in6_addr *addr,
struct net_device *dev, int strict);
@@ -274,6 +277,7 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex,
const struct in6_addr *addr);
int ipv6_sock_ac_drop(struct sock *sk, int ifindex,
const struct in6_addr *addr);
+void __ipv6_sock_ac_close(struct sock *sk);
void ipv6_sock_ac_close(struct sock *sk);
int __ipv6_dev_ac_inc(struct inet6_dev *idev, const struct in6_addr *addr);
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index 18190055374c..9125effbf448 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -41,6 +41,8 @@
#define BLUETOOTH_VER_1_1 1
#define BLUETOOTH_VER_1_2 2
#define BLUETOOTH_VER_2_0 3
+#define BLUETOOTH_VER_2_1 4
+#define BLUETOOTH_VER_4_0 6
/* Reserv for core and drivers use */
#define BT_SKB_RESERVE 8
@@ -147,6 +149,10 @@ struct bt_voice {
#define BT_MODE_LE_FLOWCTL 0x03
#define BT_MODE_EXT_FLOWCTL 0x04
+#define BT_PKT_STATUS 16
+
+#define BT_SCM_PKT_STATUS 0x03
+
__printf(1, 2)
void bt_info(const char *fmt, ...);
__printf(1, 2)
@@ -286,6 +292,7 @@ struct bt_sock {
struct sock *parent;
unsigned long flags;
void (*skb_msg_name)(struct sk_buff *, void *, int *);
+ void (*skb_put_cmsg)(struct sk_buff *, struct msghdr *, struct sock *);
};
enum {
@@ -335,6 +342,10 @@ struct l2cap_ctrl {
struct l2cap_chan *chan;
};
+struct sco_ctrl {
+ u8 pkt_status;
+};
+
struct hci_dev;
typedef void (*hci_req_complete_t)(struct hci_dev *hdev, u8 status, u16 opcode);
@@ -361,6 +372,7 @@ struct bt_skb_cb {
u8 incoming:1;
union {
struct l2cap_ctrl l2cap;
+ struct sco_ctrl sco;
struct hci_ctrl hci;
};
};
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index 16ab6ce87883..c8e67042a3b1 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -227,6 +227,17 @@ enum {
* supported.
*/
HCI_QUIRK_VALID_LE_STATES,
+
+ /* When this quirk is set, then erroneous data reporting
+ * is ignored. This is mainly due to the fact that the HCI
+ * Read Default Erroneous Data Reporting command is advertised,
+ * but not supported; these controllers often reply with unknown
+ * command and tend to lock up randomly. Needing a hard reset.
+ *
+ * This quirk can be set before hci_register_dev is called or
+ * during the hdev->setup vendor callback.
+ */
+ HCI_QUIRK_BROKEN_ERR_DATA_REPORTING,
};
/* HCI device flags */
@@ -307,6 +318,7 @@ enum {
HCI_FORCE_BREDR_SMP,
HCI_FORCE_STATIC_ADDR,
HCI_LL_RPA_RESOLUTION,
+ HCI_ENABLE_LL_PRIVACY,
HCI_CMD_PENDING,
HCI_FORCE_NO_MITM,
@@ -1637,6 +1649,8 @@ struct hci_rp_le_read_resolv_list_size {
#define HCI_OP_LE_SET_ADDR_RESOLV_ENABLE 0x202d
+#define HCI_OP_LE_SET_RPA_TIMEOUT 0x202e
+
#define HCI_OP_LE_READ_MAX_DATA_LEN 0x202f
struct hci_rp_le_read_max_data_len {
__u8 status;
@@ -2268,8 +2282,10 @@ struct hci_ev_le_conn_complete {
#define LE_EXT_ADV_SCAN_RSP 0x0008
#define LE_EXT_ADV_LEGACY_PDU 0x0010
-#define ADDR_LE_DEV_PUBLIC 0x00
-#define ADDR_LE_DEV_RANDOM 0x01
+#define ADDR_LE_DEV_PUBLIC 0x00
+#define ADDR_LE_DEV_RANDOM 0x01
+#define ADDR_LE_DEV_PUBLIC_RESOLVED 0x02
+#define ADDR_LE_DEV_RANDOM_RESOLVED 0x03
#define HCI_EV_LE_ADVERTISING_REPORT 0x02
struct hci_ev_le_advertising_info {
@@ -2516,4 +2532,12 @@ static inline struct hci_sco_hdr *hci_sco_hdr(const struct sk_buff *skb)
#define hci_iso_data_len(h) ((h) & 0x3fff)
#define hci_iso_data_flags(h) ((h) >> 14)
+/* le24 support */
+static inline void hci_cpu_to_le24(__u32 val, __u8 dst[3])
+{
+ dst[0] = val & 0xff;
+ dst[1] = (val & 0xff00) >> 8;
+ dst[2] = (val & 0xff0000) >> 16;
+}
+
#endif /* __HCI_H */
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index cdd4f1db8670..8caac20556b4 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -25,6 +25,7 @@
#ifndef __HCI_CORE_H
#define __HCI_CORE_H
+#include <linux/idr.h>
#include <linux/leds.h>
#include <linux/rculist.h>
@@ -136,6 +137,23 @@ struct bdaddr_list_with_irk {
u8 local_irk[16];
};
+struct bdaddr_list_with_flags {
+ struct list_head list;
+ bdaddr_t bdaddr;
+ u8 bdaddr_type;
+ u32 current_flags;
+};
+
+enum hci_conn_flags {
+ HCI_CONN_FLAG_REMOTE_WAKEUP,
+ HCI_CONN_FLAG_MAX
+};
+
+#define hci_conn_test_flag(nr, flags) ((flags) & (1U << nr))
+
+/* Make sure number of flags doesn't exceed sizeof(current_flags) */
+static_assert(HCI_CONN_FLAG_MAX < 32);
+
struct bt_uuid {
struct list_head list;
u8 uuid[16];
@@ -220,6 +238,24 @@ struct adv_info {
#define HCI_MAX_ADV_INSTANCES 5
#define HCI_DEFAULT_ADV_DURATION 2
+struct adv_pattern {
+ struct list_head list;
+ __u8 ad_type;
+ __u8 offset;
+ __u8 length;
+ __u8 value[HCI_MAX_AD_LENGTH];
+};
+
+struct adv_monitor {
+ struct list_head patterns;
+ bool active;
+ __u16 handle;
+};
+
+#define HCI_MIN_ADV_MONITOR_HANDLE 1
+#define HCI_MAX_ADV_MONITOR_NUM_HANDLES 32
+#define HCI_MAX_ADV_MONITOR_NUM_PATTERNS 16
+
#define HCI_MAX_SHORT_NAME_LENGTH 10
/* Min encryption key size to match with SMP */
@@ -295,6 +331,14 @@ struct hci_dev {
__u8 le_scan_type;
__u16 le_scan_interval;
__u16 le_scan_window;
+ __u16 le_scan_int_suspend;
+ __u16 le_scan_window_suspend;
+ __u16 le_scan_int_discovery;
+ __u16 le_scan_window_discovery;
+ __u16 le_scan_int_adv_monitor;
+ __u16 le_scan_window_adv_monitor;
+ __u16 le_scan_int_connect;
+ __u16 le_scan_window_connect;
__u16 le_conn_min_interval;
__u16 le_conn_max_interval;
__u16 le_conn_latency;
@@ -323,6 +367,17 @@ struct hci_dev {
__u16 devid_product;
__u16 devid_version;
+ __u8 def_page_scan_type;
+ __u16 def_page_scan_int;
+ __u16 def_page_scan_window;
+ __u8 def_inq_scan_type;
+ __u16 def_inq_scan_int;
+ __u16 def_inq_scan_window;
+ __u16 def_br_lsto;
+ __u16 def_page_timeout;
+ __u16 def_multi_adv_rotation_duration;
+ __u16 def_le_autoconnect_timeout;
+
__u16 pkt_type;
__u16 esco_type;
__u16 link_policy;
@@ -438,7 +493,6 @@ struct hci_dev {
struct list_head mgmt_pending;
struct list_head blacklist;
struct list_head whitelist;
- struct list_head wakeable;
struct list_head uuids;
struct list_head link_keys;
struct list_head long_term_keys;
@@ -477,6 +531,9 @@ struct hci_dev {
__u16 adv_instance_timeout;
struct delayed_work adv_instance_expire;
+ struct idr adv_monitors_idr;
+ unsigned int adv_monitors_cnt;
+
__u8 irk[16];
__u32 rpa_timeout;
struct delayed_work rpa_expired;
@@ -508,6 +565,12 @@ struct hci_dev {
#define HCI_PHY_HANDLE(handle) (handle & 0xff)
+enum conn_reasons {
+ CONN_REASON_PAIR_DEVICE,
+ CONN_REASON_L2CAP_CHAN,
+ CONN_REASON_SCO_CONNECT,
+};
+
struct hci_conn {
struct list_head list;
@@ -559,6 +622,8 @@ struct hci_conn {
__s8 max_tx_power;
unsigned long flags;
+ enum conn_reasons conn_reason;
+
__u32 clock;
__u16 clock_accuracy;
@@ -626,7 +691,7 @@ struct hci_conn_params {
struct hci_conn *conn;
bool explicit_connect;
- bool wakeable;
+ u32 current_flags;
};
extern struct list_head hci_dev_list;
@@ -984,12 +1049,14 @@ struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle);
struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
u8 dst_type, u8 sec_level,
- u16 conn_timeout);
+ u16 conn_timeout,
+ enum conn_reasons conn_reason);
struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
u8 dst_type, u8 sec_level, u16 conn_timeout,
u8 role, bdaddr_t *direct_rpa);
struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
- u8 sec_level, u8 auth_type);
+ u8 sec_level, u8 auth_type,
+ enum conn_reasons conn_reason);
struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
__u16 setting);
int hci_conn_check_link_mode(struct hci_conn *conn);
@@ -1151,12 +1218,19 @@ struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *list,
struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
struct list_head *list, bdaddr_t *bdaddr,
u8 type);
+struct bdaddr_list_with_flags *
+hci_bdaddr_list_lookup_with_flags(struct list_head *list, bdaddr_t *bdaddr,
+ u8 type);
int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type);
int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
- u8 type, u8 *peer_irk, u8 *local_irk);
+ u8 type, u8 *peer_irk, u8 *local_irk);
+int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
+ u8 type, u32 flags);
int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type);
int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
- u8 type);
+ u8 type);
+int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
+ u8 type);
void hci_bdaddr_list_clear(struct list_head *list);
struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
@@ -1217,6 +1291,12 @@ int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance);
void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired);
+void hci_adv_monitors_clear(struct hci_dev *hdev);
+void hci_free_adv_monitor(struct adv_monitor *monitor);
+int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor);
+int hci_remove_adv_monitor(struct hci_dev *hdev, u16 handle);
+bool hci_is_adv_monitoring(struct hci_dev *hdev);
+
void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb);
void hci_init_sysfs(struct hci_dev *hdev);
@@ -1279,6 +1359,9 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
#define scan_coded(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_CODED) || \
((dev)->le_rx_def_phys & HCI_LE_SET_PHY_CODED))
+/* Use LL Privacy based address resolution if supported */
+#define use_ll_privacy(dev) ((dev)->le_features[0] & HCI_LE_LL_PRIVACY)
+
/* Use ext scanning if set ext scan param and ext scan enable is supported */
#define use_ext_scan(dev) (((dev)->commands[37] & 0x20) && \
((dev)->commands[37] & 0x40))
@@ -1387,7 +1470,7 @@ static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status)
__u8 encrypt;
if (conn->state == BT_CONFIG) {
- if (status)
+ if (!status)
conn->state = BT_CONNECTED;
hci_connect_cfm(conn, status);
@@ -1402,11 +1485,13 @@ static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status)
else
encrypt = 0x01;
- if (conn->sec_level == BT_SECURITY_SDP)
- conn->sec_level = BT_SECURITY_LOW;
+ if (!status) {
+ if (conn->sec_level == BT_SECURITY_SDP)
+ conn->sec_level = BT_SECURITY_LOW;
- if (conn->pending_sec_level > conn->sec_level)
- conn->sec_level = conn->pending_sec_level;
+ if (conn->pending_sec_level > conn->sec_level)
+ conn->sec_level = conn->pending_sec_level;
+ }
mutex_lock(&hci_cb_list_lock);
list_for_each_entry(cb, &hci_cb_list, list) {
diff --git a/include/net/bluetooth/hci_sock.h b/include/net/bluetooth/hci_sock.h
index 9352bb1bf34c..9949870f7d78 100644
--- a/include/net/bluetooth/hci_sock.h
+++ b/include/net/bluetooth/hci_sock.h
@@ -31,8 +31,8 @@
#define HCI_TIME_STAMP 3
/* CMSG flags */
-#define HCI_CMSG_DIR 0x0001
-#define HCI_CMSG_TSTAMP 0x0002
+#define HCI_CMSG_DIR 0x01
+#define HCI_CMSG_TSTAMP 0x02
struct sockaddr_hci {
sa_family_t hci_family;
diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h
index 16e0d87bd8fa..beae5c3980f0 100644
--- a/include/net/bluetooth/mgmt.h
+++ b/include/net/bluetooth/mgmt.h
@@ -52,6 +52,12 @@ struct mgmt_hdr {
__le16 len;
} __packed;
+struct mgmt_tlv {
+ __le16 type;
+ __u8 length;
+ __u8 value[];
+} __packed;
+
struct mgmt_addr_info {
bdaddr_t bdaddr;
__u8 type;
@@ -702,6 +708,78 @@ struct mgmt_rp_set_exp_feature {
__le32 flags;
} __packed;
+#define MGMT_OP_READ_DEF_SYSTEM_CONFIG 0x004b
+#define MGMT_READ_DEF_SYSTEM_CONFIG_SIZE 0
+
+#define MGMT_OP_SET_DEF_SYSTEM_CONFIG 0x004c
+#define MGMT_SET_DEF_SYSTEM_CONFIG_SIZE 0
+
+#define MGMT_OP_READ_DEF_RUNTIME_CONFIG 0x004d
+#define MGMT_READ_DEF_RUNTIME_CONFIG_SIZE 0
+
+#define MGMT_OP_SET_DEF_RUNTIME_CONFIG 0x004e
+#define MGMT_SET_DEF_RUNTIME_CONFIG_SIZE 0
+
+#define MGMT_OP_GET_DEVICE_FLAGS 0x004F
+#define MGMT_GET_DEVICE_FLAGS_SIZE 7
+struct mgmt_cp_get_device_flags {
+ struct mgmt_addr_info addr;
+} __packed;
+struct mgmt_rp_get_device_flags {
+ struct mgmt_addr_info addr;
+ __le32 supported_flags;
+ __le32 current_flags;
+} __packed;
+
+#define MGMT_OP_SET_DEVICE_FLAGS 0x0050
+#define MGMT_SET_DEVICE_FLAGS_SIZE 11
+struct mgmt_cp_set_device_flags {
+ struct mgmt_addr_info addr;
+ __le32 current_flags;
+} __packed;
+struct mgmt_rp_set_device_flags {
+ struct mgmt_addr_info addr;
+} __packed;
+
+#define MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS BIT(0)
+
+#define MGMT_OP_READ_ADV_MONITOR_FEATURES 0x0051
+#define MGMT_READ_ADV_MONITOR_FEATURES_SIZE 0
+struct mgmt_rp_read_adv_monitor_features {
+ __le32 supported_features;
+ __le32 enabled_features;
+ __le16 max_num_handles;
+ __u8 max_num_patterns;
+ __le16 num_handles;
+ __le16 handles[];
+} __packed;
+
+struct mgmt_adv_pattern {
+ __u8 ad_type;
+ __u8 offset;
+ __u8 length;
+ __u8 value[31];
+} __packed;
+
+#define MGMT_OP_ADD_ADV_PATTERNS_MONITOR 0x0052
+struct mgmt_cp_add_adv_patterns_monitor {
+ __u8 pattern_count;
+ struct mgmt_adv_pattern patterns[];
+} __packed;
+#define MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE 1
+struct mgmt_rp_add_adv_patterns_monitor {
+ __le16 monitor_handle;
+} __packed;
+
+#define MGMT_OP_REMOVE_ADV_MONITOR 0x0053
+struct mgmt_cp_remove_adv_monitor {
+ __le16 monitor_handle;
+} __packed;
+#define MGMT_REMOVE_ADV_MONITOR_SIZE 2
+struct mgmt_rp_remove_adv_monitor {
+ __le16 monitor_handle;
+} __packed;
+
#define MGMT_EV_CMD_COMPLETE 0x0001
struct mgmt_ev_cmd_complete {
__le16 opcode;
@@ -933,3 +1011,20 @@ struct mgmt_ev_exp_feature_changed {
__u8 uuid[16];
__le32 flags;
} __packed;
+
+#define MGMT_EV_DEVICE_FLAGS_CHANGED 0x002a
+struct mgmt_ev_device_flags_changed {
+ struct mgmt_addr_info addr;
+ __le32 supported_flags;
+ __le32 current_flags;
+} __packed;
+
+#define MGMT_EV_ADV_MONITOR_ADDED 0x002b
+struct mgmt_ev_adv_monitor_added {
+ __le16 monitor_handle;
+} __packed;
+
+#define MGMT_EV_ADV_MONITOR_REMOVED 0x002c
+struct mgmt_ev_adv_monitor_removed {
+ __le16 monitor_handle;
+} __packed;
diff --git a/include/net/bluetooth/sco.h b/include/net/bluetooth/sco.h
index f40ddb4264fc..1aa2e14b6c94 100644
--- a/include/net/bluetooth/sco.h
+++ b/include/net/bluetooth/sco.h
@@ -46,4 +46,6 @@ struct sco_conninfo {
__u8 dev_class[3];
};
+#define SCO_CMSG_PKT_STATUS 0x01
+
#endif /* __SCO_H */
diff --git a/include/net/bonding.h b/include/net/bonding.h
index aa854a9c01e2..7d132cc1e584 100644
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -86,6 +86,11 @@
#define bond_for_each_slave_rcu(bond, pos, iter) \
netdev_for_each_lower_private_rcu((bond)->dev, pos, iter)
+#ifdef CONFIG_XFRM_OFFLOAD
+#define BOND_XFRM_FEATURES (NETIF_F_HW_ESP | NETIF_F_HW_ESP_TX_CSUM | \
+ NETIF_F_GSO_ESP)
+#endif /* CONFIG_XFRM_OFFLOAD */
+
#ifdef CONFIG_NET_POLL_CONTROLLER
extern atomic_t netpoll_block_tx;
@@ -238,6 +243,9 @@ struct bonding {
struct dentry *debug_dir;
#endif /* CONFIG_DEBUG_FS */
struct rtnl_link_stats64 bond_stats;
+#ifdef CONFIG_XFRM_OFFLOAD
+ struct xfrm_state *xs;
+#endif /* CONFIG_XFRM_OFFLOAD */
};
#define bond_slave_get_rcu(dev) \
diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h
index 86e028388bad..b001fa91c14e 100644
--- a/include/net/busy_poll.h
+++ b/include/net/busy_poll.h
@@ -114,7 +114,11 @@ static inline void skb_mark_napi_id(struct sk_buff *skb,
struct napi_struct *napi)
{
#ifdef CONFIG_NET_RX_BUSY_POLL
- skb->napi_id = napi->napi_id;
+ /* If the skb was already marked with a valid NAPI ID, avoid overwriting
+ * it.
+ */
+ if (skb->napi_id < MIN_NAPI_ID)
+ skb->napi_id = napi->napi_id;
#endif
}
diff --git a/include/net/caif/caif_layer.h b/include/net/caif/caif_layer.h
index 064094101cb5..51f7bb42a936 100644
--- a/include/net/caif/caif_layer.h
+++ b/include/net/caif/caif_layer.h
@@ -156,7 +156,7 @@ struct cflayer {
* CAIF packets upwards in the stack.
* Packet handling rules:
* - The CAIF packet (cfpkt) ownership is passed to the
- * called receive function. This means that the the
+ * called receive function. This means that the
* packet cannot be accessed after passing it to the
* above layer using up->receive().
*
@@ -184,7 +184,7 @@ struct cflayer {
* CAIF packet downwards in the stack.
* Packet handling rules:
* - The CAIF packet (cfpkt) ownership is passed to the
- * transmit function. This means that the the packet
+ * transmit function. This means that the packet
* cannot be accessed after passing it to the below
* layer using dn->transmit().
*
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index fc7e8807838d..d9e6b9fbd95b 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -267,12 +267,12 @@ struct ieee80211_he_obss_pd {
* struct cfg80211_he_bss_color - AP settings for BSS coloring
*
* @color: the current color.
- * @disabled: is the feature disabled.
+ * @enabled: HE BSS color is used
* @partial: define the AID equation.
*/
struct cfg80211_he_bss_color {
u8 color;
- bool disabled;
+ bool enabled;
bool partial;
};
@@ -418,12 +418,28 @@ struct ieee80211_edmg {
};
/**
+ * struct ieee80211_sta_s1g_cap - STA's S1G capabilities
+ *
+ * This structure describes most essential parameters needed
+ * to describe 802.11ah S1G capabilities for a STA.
+ *
+ * @s1g_supported: is STA an S1G STA
+ * @cap: S1G capabilities information
+ * @nss_mcs: Supported NSS MCS set
+ */
+struct ieee80211_sta_s1g_cap {
+ bool s1g;
+ u8 cap[10]; /* use S1G_CAPAB_ */
+ u8 nss_mcs[5];
+};
+
+/**
* struct ieee80211_supported_band - frequency band definition
*
* This structure describes a frequency band a wiphy
* is able to operate in.
*
- * @channels: Array of channels the hardware can operate in
+ * @channels: Array of channels the hardware can operate with
* in this band.
* @band: the band this structure represents
* @n_channels: Number of channels in @channels
@@ -448,6 +464,7 @@ struct ieee80211_supported_band {
int n_bitrates;
struct ieee80211_sta_ht_cap ht_cap;
struct ieee80211_sta_vht_cap vht_cap;
+ struct ieee80211_sta_s1g_cap s1g_cap;
struct ieee80211_edmg edmg_cap;
u16 n_iftype_data;
const struct ieee80211_sband_iftype_data *iftype_data;
@@ -1581,6 +1598,7 @@ struct cfg80211_tid_stats {
* an FCS error. This counter should be incremented only when TA of the
* received packet with an FCS error matches the peer MAC address.
* @airtime_link_metric: mesh airtime link metric.
+ * @connected_to_as: true if mesh STA has a path to authentication server
*/
struct station_info {
u64 filled;
@@ -1638,6 +1656,8 @@ struct station_info {
u32 fcs_err_count;
u32 airtime_link_metric;
+
+ u8 connected_to_as;
};
#if IS_ENABLED(CONFIG_CFG80211)
@@ -1853,6 +1873,11 @@ struct bss_parameters {
* connected to a mesh gate in mesh formation info. If false, the
* value in mesh formation is determined by the presence of root paths
* in the mesh path table
+ * @dot11MeshNolearn: Try to avoid multi-hop path discovery (e.g. PREQ/PREP
+ * for HWMP) if the destination is a direct neighbor. Note that this might
+ * not be the optimal decision as a multi-hop route might be better. So
+ * if using this setting you will likely also want to disable
+ * dot11MeshForwarding and use another mesh routing protocol on top.
*/
struct mesh_config {
u16 dot11MeshRetryTimeout;
@@ -1873,6 +1898,7 @@ struct mesh_config {
u16 dot11MeshHWMPnetDiameterTraversalTime;
u8 dot11MeshHWMPRootMode;
bool dot11MeshConnectedToMeshGate;
+ bool dot11MeshConnectedToAuthServer;
u16 dot11MeshHWMPRannInterval;
bool dot11MeshGateAnnouncementProtocol;
bool dot11MeshForwarding;
@@ -1884,6 +1910,7 @@ struct mesh_config {
enum nl80211_mesh_power_mode power_mode;
u16 dot11MeshAwakeWindowDuration;
u32 plink_timeout;
+ bool dot11MeshNolearn;
};
/**
@@ -5510,7 +5537,7 @@ static inline int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
*
* @skb: The input A-MSDU frame without any headers.
* @list: The output list of 802.3 frames. It must be allocated and
- * initialized by by the caller.
+ * initialized by the caller.
* @addr: The device MAC address.
* @iftype: The device interface type.
* @extra_headroom: The hardware extra headroom for SKBs in the @list.
@@ -7882,4 +7909,10 @@ void cfg80211_update_owe_info_event(struct net_device *netdev,
struct cfg80211_update_owe_info *owe_info,
gfp_t gfp);
+/**
+ * cfg80211_bss_flush - resets all the scan entries
+ * @wiphy: the wiphy
+ */
+void cfg80211_bss_flush(struct wiphy *wiphy);
+
#endif /* __NET_CFG80211_H */
diff --git a/include/net/cipso_ipv4.h b/include/net/cipso_ipv4.h
index 428b6725b248..53dd7d988a2d 100644
--- a/include/net/cipso_ipv4.h
+++ b/include/net/cipso_ipv4.h
@@ -150,18 +150,6 @@ static inline int cipso_v4_doi_walk(u32 *skip_cnt,
{
return 0;
}
-
-static inline int cipso_v4_doi_domhsh_add(struct cipso_v4_doi *doi_def,
- const char *domain)
-{
- return -ENOSYS;
-}
-
-static inline int cipso_v4_doi_domhsh_remove(struct cipso_v4_doi *doi_def,
- const char *domain)
-{
- return 0;
-}
#endif /* CONFIG_NETLABEL */
/*
diff --git a/include/net/compat.h b/include/net/compat.h
index f241666117d8..745db0d605b6 100644
--- a/include/net/compat.h
+++ b/include/net/compat.h
@@ -61,7 +61,6 @@ int __get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg,
compat_size_t *len);
int get_compat_msghdr(struct msghdr *, struct compat_msghdr __user *,
struct sockaddr __user **, struct iovec **);
-struct sock_fprog __user *get_compat_bpf_fprog(char __user *optval);
int put_cmsg_compat(struct msghdr*, int, int, int, void *);
int cmsghdr_from_user_compat_to_kern(struct msghdr *, struct sock *,
diff --git a/include/net/devlink.h b/include/net/devlink.h
index 1df6dfec26c2..8f3c8a443238 100644
--- a/include/net/devlink.h
+++ b/include/net/devlink.h
@@ -40,7 +40,9 @@ struct devlink {
struct xarray snapshot_ids;
struct device *dev;
possible_net_t _net;
- struct mutex lock;
+ struct mutex lock; /* Serializes access to devlink instance specific objects such as
+ * port, sb, dpipe, resource, params, region, traps and more.
+ */
u8 reload_failed:1,
reload_enabled:1,
registered:1;
@@ -52,7 +54,7 @@ struct devlink_port_phys_attrs {
* A physical port which is visible to the user
* for a given port flavour.
*/
- u32 split_subport_number;
+ u32 split_subport_number; /* If the port is split, this is the number of subport. */
};
struct devlink_port_pci_pf_attrs {
@@ -64,10 +66,18 @@ struct devlink_port_pci_vf_attrs {
u16 vf; /* Associated PCI VF for of the PCI PF for this port. */
};
+/**
+ * struct devlink_port_attrs - devlink port object
+ * @flavour: flavour of the port
+ * @split: indicates if this is split port
+ * @splittable: indicates if the port can be split.
+ * @lanes: maximum number of lanes the port supports. 0 value is not passed to netlink.
+ * @switch_id: if the port is part of switch, this is buffer with ID, otherwise this is NULL
+ */
struct devlink_port_attrs {
- u8 set:1,
- split:1,
- switch_port:1;
+ u8 split:1,
+ splittable:1;
+ u32 lanes;
enum devlink_port_flavour flavour;
struct netdev_phys_item_id switch_id;
union {
@@ -90,7 +100,11 @@ struct devlink_port {
enum devlink_port_type desired_type;
void *type_dev;
struct devlink_port_attrs attrs;
+ u8 attrs_set:1,
+ switch_port:1;
struct delayed_work type_warn_dw;
+ struct list_head reporter_list;
+ struct mutex reporters_lock; /* Protects reporter_list */
};
struct devlink_sb_pool_info {
@@ -689,6 +703,7 @@ enum devlink_trap_generic_id {
DEVLINK_TRAP_GENERIC_ID_PTP_GENERAL,
DEVLINK_TRAP_GENERIC_ID_FLOW_ACTION_SAMPLE,
DEVLINK_TRAP_GENERIC_ID_FLOW_ACTION_TRAP,
+ DEVLINK_TRAP_GENERIC_ID_EARLY_DROP,
/* Add new generic trap IDs above */
__DEVLINK_TRAP_GENERIC_ID_MAX,
@@ -718,6 +733,7 @@ enum devlink_trap_group_generic_id {
DEVLINK_TRAP_GROUP_GENERIC_ID_PIM,
DEVLINK_TRAP_GROUP_GENERIC_ID_UC_LB,
DEVLINK_TRAP_GROUP_GENERIC_ID_LOCAL_DELIVERY,
+ DEVLINK_TRAP_GROUP_GENERIC_ID_EXTERNAL_DELIVERY,
DEVLINK_TRAP_GROUP_GENERIC_ID_IPV6,
DEVLINK_TRAP_GROUP_GENERIC_ID_PTP_EVENT,
DEVLINK_TRAP_GROUP_GENERIC_ID_PTP_GENERAL,
@@ -876,6 +892,8 @@ enum devlink_trap_group_generic_id {
"flow_action_sample"
#define DEVLINK_TRAP_GENERIC_NAME_FLOW_ACTION_TRAP \
"flow_action_trap"
+#define DEVLINK_TRAP_GENERIC_NAME_EARLY_DROP \
+ "early_drop"
#define DEVLINK_TRAP_GROUP_GENERIC_NAME_L2_DROPS \
"l2_drops"
@@ -915,6 +933,8 @@ enum devlink_trap_group_generic_id {
"uc_loopback"
#define DEVLINK_TRAP_GROUP_GENERIC_NAME_LOCAL_DELIVERY \
"local_delivery"
+#define DEVLINK_TRAP_GROUP_GENERIC_NAME_EXTERNAL_DELIVERY \
+ "external_delivery"
#define DEVLINK_TRAP_GROUP_GENERIC_NAME_IPV6 \
"ipv6"
#define DEVLINK_TRAP_GROUP_GENERIC_NAME_PTP_EVENT \
@@ -1057,7 +1077,8 @@ struct devlink_ops {
*/
int (*trap_action_set)(struct devlink *devlink,
const struct devlink_trap *trap,
- enum devlink_trap_action action);
+ enum devlink_trap_action action,
+ struct netlink_ext_ack *extack);
/**
* @trap_group_init: Trap group initialization function.
*
@@ -1074,7 +1095,8 @@ struct devlink_ops {
*/
int (*trap_group_set)(struct devlink *devlink,
const struct devlink_trap_group *group,
- const struct devlink_trap_policer *policer);
+ const struct devlink_trap_policer *policer,
+ struct netlink_ext_ack *extack);
/**
* @trap_policer_init: Trap policer initialization function.
*
@@ -1107,6 +1129,28 @@ struct devlink_ops {
int (*trap_policer_counter_get)(struct devlink *devlink,
const struct devlink_trap_policer *policer,
u64 *p_drops);
+ /**
+ * @port_function_hw_addr_get: Port function's hardware address get function.
+ *
+ * Should be used by device drivers to report the hardware address of a function managed
+ * by the devlink port. Driver should return -EOPNOTSUPP if it doesn't support port
+ * function handling for a particular port.
+ *
+ * Note: @extack can be NULL when port notifier queries the port function.
+ */
+ int (*port_function_hw_addr_get)(struct devlink *devlink, struct devlink_port *port,
+ u8 *hw_addr, int *hw_addr_len,
+ struct netlink_ext_ack *extack);
+ /**
+ * @port_function_hw_addr_set: Port function's hardware address set function.
+ *
+ * Should be used by device drivers to set the hardware address of a function managed
+ * by the devlink port. Driver should return -EOPNOTSUPP if it doesn't support port
+ * function handling for a particular port.
+ */
+ int (*port_function_hw_addr_set)(struct devlink *devlink, struct devlink_port *port,
+ const u8 *hw_addr, int hw_addr_len,
+ struct netlink_ext_ack *extack);
};
static inline void *devlink_priv(struct devlink *devlink)
@@ -1158,17 +1202,9 @@ void devlink_port_type_ib_set(struct devlink_port *devlink_port,
struct ib_device *ibdev);
void devlink_port_type_clear(struct devlink_port *devlink_port);
void devlink_port_attrs_set(struct devlink_port *devlink_port,
- enum devlink_port_flavour flavour,
- u32 port_number, bool split,
- u32 split_subport_number,
- const unsigned char *switch_id,
- unsigned char switch_id_len);
-void devlink_port_attrs_pci_pf_set(struct devlink_port *devlink_port,
- const unsigned char *switch_id,
- unsigned char switch_id_len, u16 pf);
+ struct devlink_port_attrs *devlink_port_attrs);
+void devlink_port_attrs_pci_pf_set(struct devlink_port *devlink_port, u16 pf);
void devlink_port_attrs_pci_vf_set(struct devlink_port *devlink_port,
- const unsigned char *switch_id,
- unsigned char switch_id_len,
u16 pf, u16 vf);
int devlink_sb_register(struct devlink *devlink, unsigned int sb_index,
u32 size, u16 ingress_pools_count,
@@ -1262,6 +1298,8 @@ int devlink_info_serial_number_put(struct devlink_info_req *req,
const char *sn);
int devlink_info_driver_name_put(struct devlink_info_req *req,
const char *name);
+int devlink_info_board_serial_number_put(struct devlink_info_req *req,
+ const char *bsn);
int devlink_info_version_fixed_put(struct devlink_info_req *req,
const char *version_name,
const char *version_value);
@@ -1310,9 +1348,18 @@ struct devlink_health_reporter *
devlink_health_reporter_create(struct devlink *devlink,
const struct devlink_health_reporter_ops *ops,
u64 graceful_period, void *priv);
+
+struct devlink_health_reporter *
+devlink_port_health_reporter_create(struct devlink_port *port,
+ const struct devlink_health_reporter_ops *ops,
+ u64 graceful_period, void *priv);
+
void
devlink_health_reporter_destroy(struct devlink_health_reporter *reporter);
+void
+devlink_port_health_reporter_destroy(struct devlink_health_reporter *reporter);
+
void *
devlink_health_reporter_priv(struct devlink_health_reporter *reporter);
int devlink_health_report(struct devlink_health_reporter *reporter,
diff --git a/include/net/dsa.h b/include/net/dsa.h
index 50389772c597..75c8fac82017 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -44,6 +44,7 @@ struct phylink_link_state;
#define DSA_TAG_PROTO_KSZ8795_VALUE 14
#define DSA_TAG_PROTO_OCELOT_VALUE 15
#define DSA_TAG_PROTO_AR9331_VALUE 16
+#define DSA_TAG_PROTO_RTL4_A_VALUE 17
enum dsa_tag_protocol {
DSA_TAG_PROTO_NONE = DSA_TAG_PROTO_NONE_VALUE,
@@ -63,6 +64,7 @@ enum dsa_tag_protocol {
DSA_TAG_PROTO_KSZ8795 = DSA_TAG_PROTO_KSZ8795_VALUE,
DSA_TAG_PROTO_OCELOT = DSA_TAG_PROTO_OCELOT_VALUE,
DSA_TAG_PROTO_AR9331 = DSA_TAG_PROTO_AR9331_VALUE,
+ DSA_TAG_PROTO_RTL4_A = DSA_TAG_PROTO_RTL4_A_VALUE,
};
struct packet_type;
@@ -84,6 +86,16 @@ struct dsa_device_ops {
enum dsa_tag_protocol proto;
};
+/* This structure defines the control interfaces that are overlayed by the
+ * DSA layer on top of the DSA CPU/management net_device instance. This is
+ * used by the core net_device layer while calling various net_device_ops
+ * function pointers.
+ */
+struct dsa_netdevice_ops {
+ int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr,
+ int cmd);
+};
+
#define DSA_TAG_DRIVER_ALIAS "dsa_tag-"
#define MODULE_ALIAS_DSA_TAG_DRIVER(__proto) \
MODULE_ALIAS(DSA_TAG_DRIVER_ALIAS __stringify(__proto##_VALUE))
@@ -144,7 +156,7 @@ struct dsa_mall_mirror_tc_entry {
/* TC port policer entry */
struct dsa_mall_policer_tc_entry {
- s64 burst;
+ u32 burst;
u64 rate_bytes_per_sec;
};
@@ -215,7 +227,7 @@ struct dsa_port {
/*
* Original copy of the master netdev net_device_ops
*/
- const struct net_device_ops *orig_ndo_ops;
+ const struct dsa_netdevice_ops *netdev_ops;
bool setup;
};
@@ -610,7 +622,7 @@ struct dsa_switch_ops {
* MTU change functionality. Switches can also adjust their MRU through
* this method. By MTU, one understands the SDU (L2 payload) length.
* If the switch needs to account for the DSA tag on the CPU port, this
- * method needs to to do so privately.
+ * method needs to do so privately.
*/
int (*port_change_mtu)(struct dsa_switch *ds, int port,
int new_mtu);
@@ -677,6 +689,42 @@ static inline bool dsa_can_decode(const struct sk_buff *skb,
return false;
}
+#if IS_ENABLED(CONFIG_NET_DSA)
+static inline int __dsa_netdevice_ops_check(struct net_device *dev)
+{
+ int err = -EOPNOTSUPP;
+
+ if (!dev->dsa_ptr)
+ return err;
+
+ if (!dev->dsa_ptr->netdev_ops)
+ return err;
+
+ return 0;
+}
+
+static inline int dsa_ndo_do_ioctl(struct net_device *dev, struct ifreq *ifr,
+ int cmd)
+{
+ const struct dsa_netdevice_ops *ops;
+ int err;
+
+ err = __dsa_netdevice_ops_check(dev);
+ if (err)
+ return err;
+
+ ops = dev->dsa_ptr->netdev_ops;
+
+ return ops->ndo_do_ioctl(dev, ifr, cmd);
+}
+#else
+static inline int dsa_ndo_do_ioctl(struct net_device *dev, struct ifreq *ifr,
+ int cmd)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
void dsa_unregister_switch(struct dsa_switch *ds);
int dsa_register_switch(struct dsa_switch *ds);
struct dsa_switch *dsa_switch_find(int tree_index, int sw_index);
diff --git a/include/net/dst.h b/include/net/dst.h
index 852d8fb36ab7..6ae2e625050d 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -535,14 +535,4 @@ static inline void skb_dst_update_pmtu_no_confirm(struct sk_buff *skb, u32 mtu)
dst->ops->update_pmtu(dst, NULL, skb, mtu, false);
}
-static inline void skb_tunnel_check_pmtu(struct sk_buff *skb,
- struct dst_entry *encap_dst,
- int headroom)
-{
- u32 encap_mtu = dst_mtu(encap_dst);
-
- if (skb->len > encap_mtu - headroom)
- skb_dst_update_pmtu_no_confirm(skb, encap_mtu - headroom);
-}
-
#endif /* _NET_DST_H */
diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h
index a259050f84af..4b10676c69d1 100644
--- a/include/net/fib_rules.h
+++ b/include/net/fib_rules.h
@@ -10,6 +10,7 @@
#include <net/flow.h>
#include <net/rtnetlink.h>
#include <net/fib_notifier.h>
+#include <linux/indirect_call_wrapper.h>
struct fib_kuid_range {
kuid_t start;
@@ -203,4 +204,21 @@ int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack);
int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack);
+
+INDIRECT_CALLABLE_DECLARE(int fib6_rule_match(struct fib_rule *rule,
+ struct flowi *fl, int flags));
+INDIRECT_CALLABLE_DECLARE(int fib4_rule_match(struct fib_rule *rule,
+ struct flowi *fl, int flags));
+
+INDIRECT_CALLABLE_DECLARE(int fib6_rule_action(struct fib_rule *rule,
+ struct flowi *flp, int flags,
+ struct fib_lookup_arg *arg));
+INDIRECT_CALLABLE_DECLARE(int fib4_rule_action(struct fib_rule *rule,
+ struct flowi *flp, int flags,
+ struct fib_lookup_arg *arg));
+
+INDIRECT_CALLABLE_DECLARE(bool fib6_rule_suppress(struct fib_rule *rule,
+ struct fib_lookup_arg *arg));
+INDIRECT_CALLABLE_DECLARE(bool fib4_rule_suppress(struct fib_rule *rule,
+ struct fib_lookup_arg *arg));
#endif
diff --git a/include/net/flow.h b/include/net/flow.h
index a50fb77a0b27..929d3ca614d0 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -204,24 +204,6 @@ static inline struct flowi *flowidn_to_flowi(struct flowidn *fldn)
return container_of(fldn, struct flowi, u.dn);
}
-typedef unsigned long flow_compare_t;
-
-static inline unsigned int flow_key_size(u16 family)
-{
- switch (family) {
- case AF_INET:
- BUILD_BUG_ON(sizeof(struct flowi4) % sizeof(flow_compare_t));
- return sizeof(struct flowi4) / sizeof(flow_compare_t);
- case AF_INET6:
- BUILD_BUG_ON(sizeof(struct flowi6) % sizeof(flow_compare_t));
- return sizeof(struct flowi6) / sizeof(flow_compare_t);
- case AF_DECnet:
- BUILD_BUG_ON(sizeof(struct flowidn) % sizeof(flow_compare_t));
- return sizeof(struct flowidn) / sizeof(flow_compare_t);
- }
- return 0;
-}
-
__u32 __get_hash_from_flowi6(const struct flowi6 *fl6, struct flow_keys *keys);
#endif
diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h
index 4b6e36288ddd..cc10b10dc3a1 100644
--- a/include/net/flow_dissector.h
+++ b/include/net/flow_dissector.h
@@ -243,6 +243,14 @@ struct flow_dissector_key_ct {
u32 ct_labels[4];
};
+/**
+ * struct flow_dissector_key_hash:
+ * @hash: hash value
+ */
+struct flow_dissector_key_hash {
+ u32 hash;
+};
+
enum flow_dissector_key_id {
FLOW_DISSECTOR_KEY_CONTROL, /* struct flow_dissector_key_control */
FLOW_DISSECTOR_KEY_BASIC, /* struct flow_dissector_key_basic */
@@ -271,6 +279,7 @@ enum flow_dissector_key_id {
FLOW_DISSECTOR_KEY_ENC_OPTS, /* struct flow_dissector_key_enc_opts */
FLOW_DISSECTOR_KEY_META, /* struct flow_dissector_key_meta */
FLOW_DISSECTOR_KEY_CT, /* struct flow_dissector_key_ct */
+ FLOW_DISSECTOR_KEY_HASH, /* struct flow_dissector_key_hash */
FLOW_DISSECTOR_KEY_MAX,
};
diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h
index 3eaf25f68b79..123b1e9ea304 100644
--- a/include/net/flow_offload.h
+++ b/include/net/flow_offload.h
@@ -231,8 +231,10 @@ struct flow_action_entry {
bool truncate;
} sample;
struct { /* FLOW_ACTION_POLICE */
- s64 burst;
+ u32 index;
+ u32 burst;
u64 rate_bytes_ps;
+ u32 mtu;
} police;
struct { /* FLOW_ACTION_CT */
int action;
@@ -304,7 +306,7 @@ flow_action_mixed_hw_stats_check(const struct flow_action *action,
struct netlink_ext_ack *extack)
{
const struct flow_action_entry *action_entry;
- u8 uninitialized_var(last_hw_stats);
+ u8 last_hw_stats;
int i;
if (flow_offload_has_one_action(action))
@@ -388,17 +390,20 @@ static inline bool flow_rule_match_key(const struct flow_rule *rule,
struct flow_stats {
u64 pkts;
u64 bytes;
+ u64 drops;
u64 lastused;
enum flow_action_hw_stats used_hw_stats;
bool used_hw_stats_valid;
};
static inline void flow_stats_update(struct flow_stats *flow_stats,
- u64 bytes, u64 pkts, u64 lastused,
+ u64 bytes, u64 pkts,
+ u64 drops, u64 lastused,
enum flow_action_hw_stats used_hw_stats)
{
flow_stats->pkts += pkts;
flow_stats->bytes += bytes;
+ flow_stats->drops += drops;
flow_stats->lastused = max_t(u64, flow_stats->lastused, lastused);
/* The driver should pass value with a maximum of one bit set.
@@ -418,6 +423,8 @@ enum flow_block_binder_type {
FLOW_BLOCK_BINDER_TYPE_UNSPEC,
FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS,
FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS,
+ FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP,
+ FLOW_BLOCK_BINDER_TYPE_RED_MARK,
};
struct flow_block {
@@ -436,6 +443,7 @@ struct flow_block_offload {
struct list_head cb_list;
struct list_head *driver_block_list;
struct netlink_ext_ack *extack;
+ struct Qdisc *sch;
};
enum tc_setup_type;
@@ -447,6 +455,7 @@ struct flow_block_cb;
struct flow_block_indr {
struct list_head list;
struct net_device *dev;
+ struct Qdisc *sch;
enum flow_block_binder_type binder_type;
void *data;
void *cb_priv;
@@ -471,7 +480,8 @@ struct flow_block_cb *flow_indr_block_cb_alloc(flow_setup_cb_t *cb,
void *cb_ident, void *cb_priv,
void (*release)(void *cb_priv),
struct flow_block_offload *bo,
- struct net_device *dev, void *data,
+ struct net_device *dev,
+ struct Qdisc *sch, void *data,
void *indr_cb_priv,
void (*cleanup)(struct flow_block_cb *block_cb));
void flow_block_cb_free(struct flow_block_cb *block_cb);
@@ -545,7 +555,7 @@ static inline void flow_block_init(struct flow_block *flow_block)
INIT_LIST_HEAD(&flow_block->cb_list);
}
-typedef int flow_indr_block_bind_cb_t(struct net_device *dev, void *cb_priv,
+typedef int flow_indr_block_bind_cb_t(struct net_device *dev, struct Qdisc *sch, void *cb_priv,
enum tc_setup_type type, void *type_data,
void *data,
void (*cleanup)(struct flow_block_cb *block_cb));
@@ -553,7 +563,7 @@ typedef int flow_indr_block_bind_cb_t(struct net_device *dev, void *cb_priv,
int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv);
void flow_indr_dev_unregister(flow_indr_block_bind_cb_t *cb, void *cb_priv,
void (*release)(void *cb_priv));
-int flow_indr_dev_setup_offload(struct net_device *dev,
+int flow_indr_dev_setup_offload(struct net_device *dev, struct Qdisc *sch,
enum tc_setup_type type, void *data,
struct flow_block_offload *bo,
void (*cleanup)(struct flow_block_cb *block_cb));
diff --git a/include/net/fq.h b/include/net/fq.h
index 2ad85e683041..e39f3f8d5f8a 100644
--- a/include/net/fq.h
+++ b/include/net/fq.h
@@ -69,7 +69,6 @@ struct fq {
struct list_head backlogs;
spinlock_t lock;
u32 flows_cnt;
- siphash_key_t perturbation;
u32 limit;
u32 memory_limit;
u32 memory_usage;
diff --git a/include/net/fq_impl.h b/include/net/fq_impl.h
index 38a9a3d1222b..e73d74d2fabf 100644
--- a/include/net/fq_impl.h
+++ b/include/net/fq_impl.h
@@ -108,7 +108,7 @@ begin:
static u32 fq_flow_idx(struct fq *fq, struct sk_buff *skb)
{
- u32 hash = skb_get_hash_perturb(skb, &fq->perturbation);
+ u32 hash = skb_get_hash(skb);
return reciprocal_scale(hash, fq->flows_cnt);
}
@@ -308,7 +308,6 @@ static int fq_init(struct fq *fq, int flows_cnt)
INIT_LIST_HEAD(&fq->backlogs);
spin_lock_init(&fq->lock);
fq->flows_cnt = max_t(u32, flows_cnt, 1);
- get_random_bytes(&fq->perturbation, sizeof(fq->perturbation));
fq->quantum = 300;
fq->limit = 8192;
fq->memory_limit = 16 << 20; /* 16 MBytes */
diff --git a/include/net/ieee80211_radiotap.h b/include/net/ieee80211_radiotap.h
index 459d355f6506..19c00d100096 100644
--- a/include/net/ieee80211_radiotap.h
+++ b/include/net/ieee80211_radiotap.h
@@ -117,6 +117,7 @@ enum ieee80211_radiotap_tx_flags {
IEEE80211_RADIOTAP_F_TX_CTS = 0x0002,
IEEE80211_RADIOTAP_F_TX_RTS = 0x0004,
IEEE80211_RADIOTAP_F_TX_NOACK = 0x0008,
+ IEEE80211_RADIOTAP_F_TX_NOSEQNO = 0x0010,
};
/* for IEEE80211_RADIOTAP_MCS "have" flags */
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index e5b388f5fa20..aa8893c68c50 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -16,6 +16,7 @@
#include <linux/timer.h>
#include <linux/poll.h>
#include <linux/kernel.h>
+#include <linux/sockptr.h>
#include <net/inet_sock.h>
#include <net/request_sock.h>
@@ -45,17 +46,9 @@ struct inet_connection_sock_af_ops {
u16 net_frag_header_len;
u16 sockaddr_len;
int (*setsockopt)(struct sock *sk, int level, int optname,
- char __user *optval, unsigned int optlen);
+ sockptr_t optval, unsigned int optlen);
int (*getsockopt)(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen);
-#ifdef CONFIG_COMPAT
- int (*compat_setsockopt)(struct sock *sk,
- int level, int optname,
- char __user *optval, unsigned int optlen);
- int (*compat_getsockopt)(struct sock *sk,
- int level, int optname,
- char __user *optval, int __user *optlen);
-#endif
void (*addr2sockaddr)(struct sock *sk, struct sockaddr *);
void (*mtu_reduced)(struct sock *sk);
};
@@ -311,10 +304,9 @@ void inet_csk_listen_stop(struct sock *sk);
void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr);
-int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname,
- char __user *optval, int __user *optlen);
-int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname,
- char __user *optval, unsigned int optlen);
+/* update the fast reuse flag when adding a socket */
+void inet_csk_update_fastreuse(struct inet_bind_bucket *tb,
+ struct sock *sk);
struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu);
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index a7ce00af6c44..a3702d1d4875 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -225,6 +225,7 @@ struct inet_sock {
mc_all:1,
nodefrag:1;
__u8 bind_address_no_port:1,
+ recverr_rfc4884:1,
defer_connect:1; /* Indicates that fastopen_connect is set
* and cookie exists so we defer connect
* until first data frame is written
diff --git a/include/net/ip.h b/include/net/ip.h
index 04ebe7bf54c6..b09c48d862cc 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -23,6 +23,7 @@
#include <linux/in.h>
#include <linux/skbuff.h>
#include <linux/jhash.h>
+#include <linux/sockptr.h>
#include <net/inet_sock.h>
#include <net/route.h>
@@ -231,11 +232,7 @@ struct sk_buff *ip_make_skb(struct sock *sk, struct flowi4 *fl4,
struct ipcm_cookie *ipc, struct rtable **rtp,
struct inet_cork *cork, unsigned int flags);
-static inline int ip_queue_xmit(struct sock *sk, struct sk_buff *skb,
- struct flowi *fl)
-{
- return __ip_queue_xmit(sk, skb, fl, inet_sk(sk)->tos);
-}
+int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl);
static inline struct sk_buff *ip_finish_skb(struct sock *sk, struct flowi4 *fl4)
{
@@ -711,9 +708,7 @@ int __ip_options_compile(struct net *net, struct ip_options *opt,
int ip_options_compile(struct net *net, struct ip_options *opt,
struct sk_buff *skb);
int ip_options_get(struct net *net, struct ip_options_rcu **optp,
- unsigned char *data, int optlen);
-int ip_options_get_from_user(struct net *net, struct ip_options_rcu **optp,
- unsigned char __user *data, int optlen);
+ sockptr_t data, int optlen);
void ip_options_undo(struct ip_options *opt);
void ip_forward_options(struct sk_buff *skb);
int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev);
@@ -727,14 +722,10 @@ void ip_cmsg_recv_offset(struct msghdr *msg, struct sock *sk,
struct sk_buff *skb, int tlen, int offset);
int ip_cmsg_send(struct sock *sk, struct msghdr *msg,
struct ipcm_cookie *ipc, bool allow_ipv6);
-int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
+int ip_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
unsigned int optlen);
int ip_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
int __user *optlen);
-int compat_ip_setsockopt(struct sock *sk, int level, int optname,
- char __user *optval, unsigned int optlen);
-int compat_ip_getsockopt(struct sock *sk, int level, int optname,
- char __user *optval, int __user *optlen);
int ip_ra_control(struct sock *sk, unsigned char on,
void (*destructor)(struct sock *));
diff --git a/include/net/ip6_checksum.h b/include/net/ip6_checksum.h
index 27ec612cd4a4..b3f4eaa88672 100644
--- a/include/net/ip6_checksum.h
+++ b/include/net/ip6_checksum.h
@@ -85,15 +85,6 @@ static inline void tcp_v6_gso_csum_prep(struct sk_buff *skb)
th->check = ~tcp_v6_check(0, &ipv6h->saddr, &ipv6h->daddr, 0);
}
-#if IS_ENABLED(CONFIG_IPV6)
-static inline void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)
-{
- struct ipv6_pinfo *np = inet6_sk(sk);
-
- __tcp_v6_send_check(skb, &np->saddr, &sk->sk_v6_daddr);
-}
-#endif
-
static inline __sum16 udp_v6_check(int len,
const struct in6_addr *saddr,
const struct in6_addr *daddr,
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 3f615a29766e..ac5ff3c3afb1 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -19,6 +19,7 @@
#include <net/netlink.h>
#include <net/inetpeer.h>
#include <net/fib_notifier.h>
+#include <linux/indirect_call_wrapper.h>
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
#define FIB6_TABLE_HASHSZ 256
@@ -165,7 +166,7 @@ struct fib6_info {
struct fib6_node __rcu *fib6_node;
/* Multipath routes:
- * siblings is a list of fib6_info that have the the same metric/weight,
+ * siblings is a list of fib6_info that have the same metric/weight,
* destination, but not the same gateway. nsiblings is just a cache
* to speed up lookup.
*/
@@ -552,6 +553,41 @@ struct bpf_iter__ipv6_route {
};
#endif
+INDIRECT_CALLABLE_DECLARE(struct rt6_info *ip6_pol_route_output(struct net *net,
+ struct fib6_table *table,
+ struct flowi6 *fl6,
+ const struct sk_buff *skb,
+ int flags));
+INDIRECT_CALLABLE_DECLARE(struct rt6_info *ip6_pol_route_input(struct net *net,
+ struct fib6_table *table,
+ struct flowi6 *fl6,
+ const struct sk_buff *skb,
+ int flags));
+INDIRECT_CALLABLE_DECLARE(struct rt6_info *__ip6_route_redirect(struct net *net,
+ struct fib6_table *table,
+ struct flowi6 *fl6,
+ const struct sk_buff *skb,
+ int flags));
+INDIRECT_CALLABLE_DECLARE(struct rt6_info *ip6_pol_route_lookup(struct net *net,
+ struct fib6_table *table,
+ struct flowi6 *fl6,
+ const struct sk_buff *skb,
+ int flags));
+static inline struct rt6_info *pol_lookup_func(pol_lookup_t lookup,
+ struct net *net,
+ struct fib6_table *table,
+ struct flowi6 *fl6,
+ const struct sk_buff *skb,
+ int flags)
+{
+ return INDIRECT_CALL_4(lookup,
+ ip6_pol_route_output,
+ ip6_pol_route_input,
+ ip6_pol_route_lookup,
+ __ip6_route_redirect,
+ net, table, fl6, skb, flags);
+}
+
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
static inline bool fib6_has_custom_rules(const struct net *net)
{
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index 36025dea7612..02ccd32542d0 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -420,6 +420,8 @@ void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
u8 tos, u8 ttl, __be16 df, bool xnet);
struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md,
gfp_t flags);
+int skb_tunnel_check_pmtu(struct sk_buff *skb, struct dst_entry *encap_dst,
+ int headroom, bool reply);
int iptunnel_handle_offloads(struct sk_buff *skb, int gso_type_mask);
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index 83be2d93b407..9a59a33787cb 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -14,6 +14,7 @@
#include <linux/spinlock.h> /* for struct rwlock_t */
#include <linux/atomic.h> /* for struct atomic_t */
#include <linux/refcount.h> /* for struct refcount_t */
+#include <linux/workqueue.h>
#include <linux/compiler.h>
#include <linux/timer.h>
@@ -874,6 +875,7 @@ struct netns_ipvs {
struct ip_vs_stats tot_stats; /* Statistics & est. */
int num_services; /* no of virtual services */
+ int num_services6; /* IPv6 virtual services */
/* Trash for destinations */
struct list_head dest_trash;
@@ -885,6 +887,8 @@ struct netns_ipvs {
atomic_t conn_out_counter;
#ifdef CONFIG_SYSCTL
+ /* delayed work for expiring no dest connections */
+ struct delayed_work expire_nodest_conn_work;
/* 1/rate drop and drop-entry variables */
struct delayed_work defense_work; /* Work handler */
int drop_rate;
@@ -960,6 +964,7 @@ struct netns_ipvs {
* are not supported when synchronization is enabled.
*/
unsigned int mixed_address_family_dests;
+ unsigned int hooks_afmask; /* &1=AF_INET, &2=AF_INET6 */
};
#define DEFAULT_SYNC_THRESHOLD 3
@@ -1049,6 +1054,11 @@ static inline int sysctl_conn_reuse_mode(struct netns_ipvs *ipvs)
return ipvs->sysctl_conn_reuse_mode;
}
+static inline int sysctl_expire_nodest_conn(struct netns_ipvs *ipvs)
+{
+ return ipvs->sysctl_expire_nodest_conn;
+}
+
static inline int sysctl_schedule_icmp(struct netns_ipvs *ipvs)
{
return ipvs->sysctl_schedule_icmp;
@@ -1136,6 +1146,11 @@ static inline int sysctl_conn_reuse_mode(struct netns_ipvs *ipvs)
return 1;
}
+static inline int sysctl_expire_nodest_conn(struct netns_ipvs *ipvs)
+{
+ return 0;
+}
+
static inline int sysctl_schedule_icmp(struct netns_ipvs *ipvs)
{
return 0;
@@ -1505,6 +1520,22 @@ static inline int ip_vs_todrop(struct netns_ipvs *ipvs)
static inline int ip_vs_todrop(struct netns_ipvs *ipvs) { return 0; }
#endif
+#ifdef CONFIG_SYSCTL
+/* Enqueue delayed work for expiring no dest connections
+ * Only run when sysctl_expire_nodest=1
+ */
+static inline void ip_vs_enqueue_expire_nodest_conns(struct netns_ipvs *ipvs)
+{
+ if (sysctl_expire_nodest_conn(ipvs))
+ queue_delayed_work(system_long_wq,
+ &ipvs->expire_nodest_conn_work, 1);
+}
+
+void ip_vs_expire_nodest_conn_flush(struct netns_ipvs *ipvs);
+#else
+static inline void ip_vs_enqueue_expire_nodest_conns(struct netns_ipvs *ipvs) {}
+#endif
+
#define IP_VS_DFWD_METHOD(dest) (atomic_read(&(dest)->conn_flags) & \
IP_VS_CONN_F_FWD_MASK)
@@ -1624,18 +1655,16 @@ static inline void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp)
}
#endif /* CONFIG_IP_VS_NFCT */
-/* Really using conntrack? */
-static inline bool ip_vs_conn_uses_conntrack(struct ip_vs_conn *cp,
- struct sk_buff *skb)
+/* Using old conntrack that can not be redirected to another real server? */
+static inline bool ip_vs_conn_uses_old_conntrack(struct ip_vs_conn *cp,
+ struct sk_buff *skb)
{
#ifdef CONFIG_IP_VS_NFCT
enum ip_conntrack_info ctinfo;
struct nf_conn *ct;
- if (!(cp->flags & IP_VS_CONN_F_NFCT))
- return false;
ct = nf_ct_get(skb, &ctinfo);
- if (ct)
+ if (ct && nf_ct_is_confirmed(ct))
return true;
#endif
return false;
@@ -1670,6 +1699,9 @@ static inline void ip_vs_unregister_conntrack(struct ip_vs_service *svc)
#endif
}
+int ip_vs_register_hooks(struct netns_ipvs *ipvs, unsigned int af);
+void ip_vs_unregister_hooks(struct netns_ipvs *ipvs, unsigned int af);
+
static inline int
ip_vs_dest_conn_overhead(struct ip_vs_dest *dest)
{
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 5e65bf2fd32d..bd1f396cc9c7 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -406,7 +406,7 @@ struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space,
struct ip6_flowlabel *fl,
struct ipv6_txoptions *fopt);
void fl6_free_socklist(struct sock *sk);
-int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen);
+int ipv6_flowlabel_opt(struct sock *sk, sockptr_t optval, int optlen);
int ipv6_flowlabel_opt_get(struct sock *sk, struct in6_flowlabel_req *freq,
int flags);
int ip6_flowlabel_init(void);
@@ -1084,14 +1084,10 @@ struct in6_addr *fl6_update_dst(struct flowi6 *fl6,
* socket options (ipv6_sockglue.c)
*/
-int ipv6_setsockopt(struct sock *sk, int level, int optname,
- char __user *optval, unsigned int optlen);
+int ipv6_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
+ unsigned int optlen);
int ipv6_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen);
-int compat_ipv6_setsockopt(struct sock *sk, int level, int optname,
- char __user *optval, unsigned int optlen);
-int compat_ipv6_getsockopt(struct sock *sk, int level, int optname,
- char __user *optval, int __user *optlen);
int __ip6_datagram_connect(struct sock *sk, struct sockaddr *addr,
int addr_len);
diff --git a/include/net/l3mdev.h b/include/net/l3mdev.h
index e942372b077b..031c661aa14d 100644
--- a/include/net/l3mdev.h
+++ b/include/net/l3mdev.h
@@ -10,6 +10,16 @@
#include <net/dst.h>
#include <net/fib_rules.h>
+enum l3mdev_type {
+ L3MDEV_TYPE_UNSPEC,
+ L3MDEV_TYPE_VRF,
+ __L3MDEV_TYPE_MAX
+};
+
+#define L3MDEV_TYPE_MAX (__L3MDEV_TYPE_MAX - 1)
+
+typedef int (*lookup_by_table_id_t)(struct net *net, u32 table_d);
+
/**
* struct l3mdev_ops - l3mdev operations
*
@@ -37,6 +47,15 @@ struct l3mdev_ops {
#ifdef CONFIG_NET_L3_MASTER_DEV
+int l3mdev_table_lookup_register(enum l3mdev_type l3type,
+ lookup_by_table_id_t fn);
+
+void l3mdev_table_lookup_unregister(enum l3mdev_type l3type,
+ lookup_by_table_id_t fn);
+
+int l3mdev_ifindex_lookup_by_table_id(enum l3mdev_type l3type, struct net *net,
+ u32 table_id);
+
int l3mdev_fib_rule_match(struct net *net, struct flowi *fl,
struct fib_lookup_arg *arg);
@@ -281,6 +300,26 @@ struct sk_buff *l3mdev_ip6_out(struct sock *sk, struct sk_buff *skb)
}
static inline
+int l3mdev_table_lookup_register(enum l3mdev_type l3type,
+ lookup_by_table_id_t fn)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline
+void l3mdev_table_lookup_unregister(enum l3mdev_type l3type,
+ lookup_by_table_id_t fn)
+{
+}
+
+static inline
+int l3mdev_ifindex_lookup_by_table_id(enum l3mdev_type l3type, struct net *net,
+ u32 table_id)
+{
+ return -ENODEV;
+}
+
+static inline
int l3mdev_fib_rule_match(struct net *net, struct flowi *fl,
struct fib_lookup_arg *arg)
{
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 11d5610d2ad5..66e2bfd165e8 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -825,6 +825,8 @@ enum mac80211_tx_info_flags {
* @IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP: This frame skips mesh path lookup
* @IEEE80211_TX_CTRL_HW_80211_ENCAP: This frame uses hardware encapsulation
* (header conversion)
+ * @IEEE80211_TX_CTRL_NO_SEQNO: Do not overwrite the sequence number that
+ * has already been assigned to this frame.
*
* These flags are used in tx_info->control.flags.
*/
@@ -836,6 +838,7 @@ enum mac80211_tx_control_flags {
IEEE80211_TX_CTRL_FAST_XMIT = BIT(4),
IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP = BIT(5),
IEEE80211_TX_CTRL_HW_80211_ENCAP = BIT(6),
+ IEEE80211_TX_CTRL_NO_SEQNO = BIT(7),
};
/*
@@ -2727,7 +2730,7 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb);
* for devices that support offload of data packets (e.g. ARP responses).
*
* Mac80211 drivers should set the @NL80211_EXT_FEATURE_CAN_REPLACE_PTK0 flag
- * when they are able to replace in-use PTK keys according to to following
+ * when they are able to replace in-use PTK keys according to the following
* requirements:
* 1) They do not hand over frames decrypted with the old key to
mac80211 once the call to set_key() with command %DISABLE_KEY has been
@@ -4358,6 +4361,31 @@ void ieee80211_free_hw(struct ieee80211_hw *hw);
void ieee80211_restart_hw(struct ieee80211_hw *hw);
/**
+ * ieee80211_rx_list - receive frame and store processed skbs in a list
+ *
+ * Use this function to hand received frames to mac80211. The receive
+ * buffer in @skb must start with an IEEE 802.11 header. In case of a
+ * paged @skb is used, the driver is recommended to put the ieee80211
+ * header of the frame on the linear part of the @skb to avoid memory
+ * allocation and/or memcpy by the stack.
+ *
+ * This function may not be called in IRQ context. Calls to this function
+ * for a single hardware must be synchronized against each other. Calls to
+ * this function, ieee80211_rx_ni() and ieee80211_rx_irqsafe() may not be
+ * mixed for a single hardware. Must not run concurrently with
+ * ieee80211_tx_status() or ieee80211_tx_status_ni().
+ *
+ * This function must be called with BHs disabled and RCU read lock
+ *
+ * @hw: the hardware this frame came in on
+ * @sta: the station the frame was received from, or %NULL
+ * @skb: the buffer to receive, owned by mac80211 after this call
+ * @list: the destination list
+ */
+void ieee80211_rx_list(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
+ struct sk_buff *skb, struct list_head *list);
+
+/**
* ieee80211_rx_napi - receive frame from NAPI context
*
* Use this function to hand received frames to mac80211. The receive
@@ -4709,7 +4737,7 @@ void ieee80211_tx_status_irqsafe(struct ieee80211_hw *hw,
*
* Call this function for all transmitted data frames after their transmit
* completion. This callback should only be called for data frames which
- * are are using driver's (or hardware's) offload capability of encap/decap
+ * are using driver's (or hardware's) offload capability of encap/decap
* 802.11 frames.
*
* This function may not be called in IRQ context. Calls to this function
@@ -6236,6 +6264,14 @@ bool ieee80211_tx_prepare_skb(struct ieee80211_hw *hw,
int band, struct ieee80211_sta **sta);
/**
+ * Sanity-check and parse the radiotap header of injected frames
+ * @skb: packet injected by userspace
+ * @dev: the &struct device of this 802.11 device
+ */
+bool ieee80211_parse_tx_radiotap(struct sk_buff *skb,
+ struct net_device *dev);
+
+/**
* struct ieee80211_noa_data - holds temporary data for tracking P2P NoA state
*
* @next_tsf: TSF timestamp of the next absent state change
@@ -6344,7 +6380,7 @@ void ieee80211_unreserve_tid(struct ieee80211_sta *sta, u8 tid);
*
* Note that this must be called in an rcu_read_lock() critical section,
* which can only be released after the SKB was handled. Some pointers in
- * skb->cb, e.g. the key pointer, are protected by by RCU and thus the
+ * skb->cb, e.g. the key pointer, are protected by RCU and thus the
* critical section must persist not just for the duration of this call
* but for the duration of the frame handling.
* However, also note that while in the wake_tx_queue() method,
diff --git a/include/net/mptcp.h b/include/net/mptcp.h
index 46d0487d2b22..3525d2822abe 100644
--- a/include/net/mptcp.h
+++ b/include/net/mptcp.h
@@ -58,6 +58,7 @@ struct mptcp_out_options {
};
#ifdef CONFIG_MPTCP
+extern struct request_sock_ops mptcp_subflow_request_sock_ops;
void mptcp_init(void);
@@ -130,6 +131,9 @@ static inline bool mptcp_skb_can_collapse(const struct sk_buff *to,
}
void mptcp_seq_show(struct seq_file *seq);
+int mptcp_subflow_init_cookie_req(struct request_sock *req,
+ const struct sock *sk_listener,
+ struct sk_buff *skb);
#else
static inline void mptcp_init(void)
@@ -164,10 +168,6 @@ static inline bool mptcp_syn_options(struct sock *sk, const struct sk_buff *skb,
return false;
}
-static inline void mptcp_rcv_synsent(struct sock *sk)
-{
-}
-
static inline bool mptcp_synack_options(const struct request_sock *req,
unsigned int *size,
struct mptcp_out_options *opts)
@@ -203,6 +203,13 @@ static inline bool mptcp_skb_can_collapse(const struct sk_buff *to,
static inline void mptcp_space(const struct sock *ssk, int *s, int *fs) { }
static inline void mptcp_seq_show(struct seq_file *seq) { }
+
+static inline int mptcp_subflow_init_cookie_req(struct request_sock *req,
+ const struct sock *sk_listener,
+ struct sk_buff *skb)
+{
+ return 0; /* TCP fallback */
+}
#endif /* CONFIG_MPTCP */
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index 90690e37a56f..439379ca9ffa 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -279,6 +279,18 @@ static inline bool nf_ct_should_gc(const struct nf_conn *ct)
!nf_ct_is_dying(ct);
}
+#define NF_CT_DAY (86400 * HZ)
+
+/* Set an arbitrary timeout large enough not to ever expire, this save
+ * us a check for the IPS_OFFLOAD_BIT from the packet path via
+ * nf_ct_is_expired().
+ */
+static inline void nf_ct_offload_timeout(struct nf_conn *ct)
+{
+ if (nf_ct_expires(ct) < NF_CT_DAY / 2)
+ ct->timeout = nfct_time_stamp + NF_CT_DAY;
+}
+
struct kernel_param;
int nf_conntrack_set_hashsize(const char *val, const struct kernel_param *kp);
@@ -286,7 +298,7 @@ int nf_conntrack_hash_resize(unsigned int hashsize);
extern struct hlist_nulls_head *nf_conntrack_hash;
extern unsigned int nf_conntrack_htable_size;
-extern seqcount_t nf_conntrack_generation;
+extern seqcount_spinlock_t nf_conntrack_generation;
extern unsigned int nf_conntrack_max;
/* must be called with rcu read lock held */
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 6f0f6fca9ac3..bf9491b77d16 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -899,6 +899,8 @@ static inline struct nft_userdata *nft_userdata(const struct nft_rule *rule)
return (void *)&rule->data[rule->dlen];
}
+void nf_tables_rule_release(const struct nft_ctx *ctx, struct nft_rule *rule);
+
static inline void nft_set_elem_update_expr(const struct nft_set_ext *ext,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
@@ -921,11 +923,6 @@ static inline void nft_set_elem_update_expr(const struct nft_set_ext *ext,
(expr) != (last); \
(expr) = nft_expr_next(expr))
-enum nft_chain_flags {
- NFT_BASE_CHAIN = 0x1,
- NFT_CHAIN_HW_OFFLOAD = 0x2,
-};
-
#define NFT_CHAIN_POLICY_UNSET U8_MAX
/**
@@ -949,7 +946,8 @@ struct nft_chain {
struct nft_table *table;
u64 handle;
u32 use;
- u8 flags:6,
+ u8 flags:5,
+ bound:1,
genmask:2;
char *name;
@@ -994,6 +992,14 @@ int nft_chain_validate_dependency(const struct nft_chain *chain,
int nft_chain_validate_hooks(const struct nft_chain *chain,
unsigned int hook_flags);
+static inline bool nft_chain_is_bound(struct nft_chain *chain)
+{
+ return (chain->flags & NFT_CHAIN_BINDING) && chain->bound;
+}
+
+void nft_chain_del(struct nft_chain *chain);
+void nf_tables_chain_destroy(struct nft_ctx *ctx);
+
struct nft_stats {
u64 bytes;
u64 pkts;
@@ -1036,7 +1042,7 @@ static inline struct nft_base_chain *nft_base_chain(const struct nft_chain *chai
static inline bool nft_is_base_chain(const struct nft_chain *chain)
{
- return chain->flags & NFT_BASE_CHAIN;
+ return chain->flags & NFT_CHAIN_BASE;
}
int __nft_release_basechain(struct nft_ctx *ctx);
@@ -1433,6 +1439,7 @@ struct nft_trans_chain {
char *name;
struct nft_stats __percpu *stats;
u8 policy;
+ u32 chain_id;
};
#define nft_trans_chain_update(trans) \
@@ -1443,6 +1450,8 @@ struct nft_trans_chain {
(((struct nft_trans_chain *)trans->data)->stats)
#define nft_trans_chain_policy(trans) \
(((struct nft_trans_chain *)trans->data)->policy)
+#define nft_trans_chain_id(trans) \
+ (((struct nft_trans_chain *)trans->data)->chain_id)
struct nft_trans_table {
bool update;
@@ -1498,4 +1507,6 @@ void nft_chain_filter_fini(void);
void __init nft_chain_route_init(void);
void nft_chain_route_fini(void);
+
+void nf_tables_trans_destroy_flush_work(void);
#endif /* _NET_NF_TABLES_H */
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index ed65619cbc47..d4d461236351 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -32,6 +32,12 @@ struct tcf_block_ext_info {
u32 block_index;
};
+struct tcf_qevent {
+ struct tcf_block *block;
+ struct tcf_block_ext_info info;
+ struct tcf_proto __rcu *filter_chain;
+};
+
struct tcf_block_cb;
bool tcf_queue_work(struct rcu_work *rwork, work_func_t func);
@@ -262,7 +268,7 @@ static inline void tcf_exts_put_net(struct tcf_exts *exts)
static inline void
tcf_exts_stats_update(const struct tcf_exts *exts,
- u64 bytes, u64 packets, u64 lastuse,
+ u64 bytes, u64 packets, u64 drops, u64 lastuse,
u8 used_hw_stats, bool used_hw_stats_valid)
{
#ifdef CONFIG_NET_CLS_ACT
@@ -273,7 +279,8 @@ tcf_exts_stats_update(const struct tcf_exts *exts,
for (i = 0; i < exts->nr_actions; i++) {
struct tc_action *a = exts->actions[i];
- tcf_action_stats_update(a, bytes, packets, lastuse, true);
+ tcf_action_stats_update(a, bytes, packets, drops,
+ lastuse, true);
a->used_hw_stats = used_hw_stats;
a->used_hw_stats_valid = used_hw_stats_valid;
}
@@ -552,6 +559,49 @@ int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp,
void *cb_priv, u32 *flags, unsigned int *in_hw_count);
unsigned int tcf_exts_num_actions(struct tcf_exts *exts);
+#ifdef CONFIG_NET_CLS_ACT
+int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch,
+ enum flow_block_binder_type binder_type,
+ struct nlattr *block_index_attr,
+ struct netlink_ext_ack *extack);
+void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch);
+int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr,
+ struct netlink_ext_ack *extack);
+struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb,
+ struct sk_buff **to_free, int *ret);
+int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe);
+#else
+static inline int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch,
+ enum flow_block_binder_type binder_type,
+ struct nlattr *block_index_attr,
+ struct netlink_ext_ack *extack)
+{
+ return 0;
+}
+
+static inline void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch)
+{
+}
+
+static inline int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr,
+ struct netlink_ext_ack *extack)
+{
+ return 0;
+}
+
+static inline struct sk_buff *
+tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb,
+ struct sk_buff **to_free, int *ret)
+{
+ return skb;
+}
+
+static inline int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe)
+{
+ return 0;
+}
+#endif
+
struct tc_cls_u32_knode {
struct tcf_exts *exts;
struct tcf_result *res;
diff --git a/include/net/regulatory.h b/include/net/regulatory.h
index 09a3099886e5..47f06f6f5a67 100644
--- a/include/net/regulatory.h
+++ b/include/net/regulatory.h
@@ -44,7 +44,7 @@ enum environment_cap {
* and potentially inform users of which devices specifically
* cased the conflicts.
* @initiator: indicates who sent this request, could be any of
- * of those set in nl80211_reg_initiator (%NL80211_REGDOM_SET_BY_*)
+ * those set in nl80211_reg_initiator (%NL80211_REGDOM_SET_BY_*)
* @alpha2: the ISO / IEC 3166 alpha2 country code of the requested
* regulatory domain. We have a few special codes:
* 00 - World regulatory domain
diff --git a/include/net/request_sock.h b/include/net/request_sock.h
index cf8b33213bbc..b2eb8b4ba697 100644
--- a/include/net/request_sock.h
+++ b/include/net/request_sock.h
@@ -54,7 +54,7 @@ struct request_sock {
struct request_sock *dl_next;
u16 mss;
u8 num_retrans; /* number of retransmits */
- u8 cookie_ts:1; /* syncookie: encode tcpopts in timestamp */
+ u8 syncookie:1; /* syncookie: encode tcpopts in timestamp */
u8 num_timeout:7; /* number of timeouts */
u32 ts_recent;
struct timer_list rsk_timer;
diff --git a/include/net/rpl.h b/include/net/rpl.h
index dceff60e8baf..308ef0a05cae 100644
--- a/include/net/rpl.h
+++ b/include/net/rpl.h
@@ -26,12 +26,6 @@ static inline void rpl_exit(void) {}
/* Worst decompression memory usage ipv6 address (16) + pad 7 */
#define IPV6_RPL_SRH_WORST_SWAP_SIZE (sizeof(struct in6_addr) + 7)
-static inline size_t ipv6_rpl_srh_alloc_size(unsigned char n)
-{
- return sizeof(struct ipv6_rpl_sr_hdr) +
- ((n + 1) * sizeof(struct in6_addr));
-}
-
size_t ipv6_rpl_srh_size(unsigned char n, unsigned char cmpri,
unsigned char cmpre);
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index c510b03b9751..d60e7c39d60c 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -384,6 +384,7 @@ struct qdisc_skb_cb {
};
#define QDISC_CB_PRIV_LEN 20
unsigned char data[QDISC_CB_PRIV_LEN];
+ u16 mru;
};
typedef void tcf_chain_head_change_t(struct tcf_proto *tp_head, void *priv);
@@ -463,7 +464,7 @@ static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
{
struct qdisc_skb_cb *qcb;
- BUILD_BUG_ON(sizeof(skb->cb) < offsetof(struct qdisc_skb_cb, data) + sz);
+ BUILD_BUG_ON(sizeof(skb->cb) < sizeof(*qcb));
BUILD_BUG_ON(sizeof(qcb->data) < sz);
}
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index f8bcb75bb044..4fc747b778eb 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -291,7 +291,7 @@ atomic_dec(&sctp_dbg_objcnt_## name)
#define SCTP_DBG_OBJCNT(name) \
atomic_t sctp_dbg_objcnt_## name = ATOMIC_INIT(0)
-/* Macro to help create new entries in in the global array of
+/* Macro to help create new entries in the global array of
* objcnt counters.
*/
#define SCTP_DBG_OBJCNT_ENTRY(name) \
@@ -412,7 +412,7 @@ static inline void sctp_skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
/* Tests if the list has one and only one entry. */
static inline int sctp_list_single_entry(struct list_head *head)
{
- return (head->next != head) && (head->next == head->prev);
+ return list_is_singular(head);
}
static inline bool sctp_chunk_pending(const struct sctp_chunk *chunk)
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index fb42c90348d3..b33f1aefad09 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -431,23 +431,13 @@ struct sctp_af {
int (*setsockopt) (struct sock *sk,
int level,
int optname,
- char __user *optval,
+ sockptr_t optval,
unsigned int optlen);
int (*getsockopt) (struct sock *sk,
int level,
int optname,
char __user *optval,
int __user *optlen);
- int (*compat_setsockopt) (struct sock *sk,
- int level,
- int optname,
- char __user *optval,
- unsigned int optlen);
- int (*compat_getsockopt) (struct sock *sk,
- int level,
- int optname,
- char __user *optval,
- int __user *optlen);
void (*get_dst) (struct sctp_transport *t,
union sctp_addr *saddr,
struct flowi *fl,
@@ -1398,7 +1388,7 @@ struct sctp_stream_priorities {
struct list_head prio_sched;
/* List of streams scheduled */
struct list_head active;
- /* The next stream stream in line */
+ /* The next stream in line */
struct sctp_stream_out_ext *next;
__u16 prio;
};
@@ -1460,7 +1450,7 @@ struct sctp_stream {
struct {
/* List of streams scheduled */
struct list_head rr_list;
- /* The next stream stream in line */
+ /* The next stream in line */
struct sctp_stream_out_ext *rr_next;
};
};
@@ -1770,7 +1760,7 @@ struct sctp_association {
int max_burst;
/* This is the max_retrans value for the association. This value will
- * be initialized initialized from system defaults, but can be
+ * be initialized from system defaults, but can be
* modified by the SCTP_ASSOCINFO socket option.
*/
int max_retrans;
diff --git a/include/net/sock.h b/include/net/sock.h
index 1183507df95b..064637d1ddf6 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -59,6 +59,7 @@
#include <linux/filter.h>
#include <linux/rculist_nulls.h>
#include <linux/poll.h>
+#include <linux/sockptr.h>
#include <linux/atomic.h>
#include <linux/refcount.h>
@@ -880,6 +881,15 @@ static inline void sock_reset_flag(struct sock *sk, enum sock_flags flag)
__clear_bit(flag, &sk->sk_flags);
}
+static inline void sock_valbool_flag(struct sock *sk, enum sock_flags bit,
+ int valbool)
+{
+ if (valbool)
+ sock_set_flag(sk, bit);
+ else
+ sock_reset_flag(sk, bit);
+}
+
static inline bool sock_flag(const struct sock *sk, enum sock_flags flag)
{
return test_bit(flag, &sk->sk_flags);
@@ -891,6 +901,8 @@ static inline int sk_memalloc_socks(void)
{
return static_branch_unlikely(&memalloc_socks_key);
}
+
+void __receive_sock(struct file *file);
#else
static inline int sk_memalloc_socks(void)
@@ -898,6 +910,8 @@ static inline int sk_memalloc_socks(void)
return 0;
}
+static inline void __receive_sock(struct file *file)
+{ }
#endif
static inline gfp_t sk_gfp_mask(const struct sock *sk, gfp_t gfp_mask)
@@ -1131,21 +1145,13 @@ struct proto {
void (*destroy)(struct sock *sk);
void (*shutdown)(struct sock *sk, int how);
int (*setsockopt)(struct sock *sk, int level,
- int optname, char __user *optval,
+ int optname, sockptr_t optval,
unsigned int optlen);
int (*getsockopt)(struct sock *sk, int level,
int optname, char __user *optval,
int __user *option);
void (*keepalive)(struct sock *sk, int valbool);
#ifdef CONFIG_COMPAT
- int (*compat_setsockopt)(struct sock *sk,
- int level,
- int optname, char __user *optval,
- unsigned int optlen);
- int (*compat_getsockopt)(struct sock *sk,
- int level,
- int optname, char __user *optval,
- int __user *option);
int (*compat_ioctl)(struct sock *sk,
unsigned int cmd, unsigned long arg);
#endif
@@ -1668,7 +1674,7 @@ void sock_pfree(struct sk_buff *skb);
#endif
int sock_setsockopt(struct socket *sock, int level, int op,
- char __user *optval, unsigned int optlen);
+ sockptr_t optval, unsigned int optlen);
int sock_getsockopt(struct socket *sock, int level, int op,
char __user *optval, int __user *optlen);
@@ -1713,8 +1719,6 @@ int sock_no_getname(struct socket *, struct sockaddr *, int);
int sock_no_ioctl(struct socket *, unsigned int, unsigned long);
int sock_no_listen(struct socket *, int);
int sock_no_shutdown(struct socket *, int);
-int sock_no_getsockopt(struct socket *, int , int, char __user *, int __user *);
-int sock_no_setsockopt(struct socket *, int, int, char __user *, unsigned int);
int sock_no_sendmsg(struct socket *, struct msghdr *, size_t);
int sock_no_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t len);
int sock_no_recvmsg(struct socket *, struct msghdr *, size_t, int);
@@ -1734,11 +1738,7 @@ int sock_common_getsockopt(struct socket *sock, int level, int optname,
int sock_common_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
int flags);
int sock_common_setsockopt(struct socket *sock, int level, int optname,
- char __user *optval, unsigned int optlen);
-int compat_sock_common_getsockopt(struct socket *sock, int level,
- int optname, char __user *optval, int __user *optlen);
-int compat_sock_common_setsockopt(struct socket *sock, int level,
- int optname, char __user *optval, unsigned int optlen);
+ sockptr_t optval, unsigned int optlen);
void sk_common_release(struct sock *sk);
@@ -2696,6 +2696,7 @@ void sock_no_linger(struct sock *sk);
void sock_set_keepalive(struct sock *sk);
void sock_set_priority(struct sock *sk, u32 priority);
void sock_set_rcvbuf(struct sock *sk, int val);
+void sock_set_mark(struct sock *sk, u32 val);
void sock_set_reuseaddr(struct sock *sk);
void sock_set_reuseport(struct sock *sk);
void sock_set_sndtimeo(struct sock *sk, s64 secs);
diff --git a/include/net/switchdev.h b/include/net/switchdev.h
index b8c059b4e06d..ff2246914301 100644
--- a/include/net/switchdev.h
+++ b/include/net/switchdev.h
@@ -76,6 +76,10 @@ enum switchdev_obj_id {
SWITCHDEV_OBJ_ID_RING_TEST_MRP,
SWITCHDEV_OBJ_ID_RING_ROLE_MRP,
SWITCHDEV_OBJ_ID_RING_STATE_MRP,
+ SWITCHDEV_OBJ_ID_IN_TEST_MRP,
+ SWITCHDEV_OBJ_ID_IN_ROLE_MRP,
+ SWITCHDEV_OBJ_ID_IN_STATE_MRP,
+
#endif
};
@@ -155,6 +159,40 @@ struct switchdev_obj_ring_state_mrp {
#define SWITCHDEV_OBJ_RING_STATE_MRP(OBJ) \
container_of((OBJ), struct switchdev_obj_ring_state_mrp, obj)
+/* SWITCHDEV_OBJ_ID_IN_TEST_MRP */
+struct switchdev_obj_in_test_mrp {
+ struct switchdev_obj obj;
+ /* The value is in us and a value of 0 represents to stop */
+ u32 interval;
+ u32 in_id;
+ u32 period;
+ u8 max_miss;
+};
+
+#define SWITCHDEV_OBJ_IN_TEST_MRP(OBJ) \
+ container_of((OBJ), struct switchdev_obj_in_test_mrp, obj)
+
+/* SWICHDEV_OBJ_ID_IN_ROLE_MRP */
+struct switchdev_obj_in_role_mrp {
+ struct switchdev_obj obj;
+ struct net_device *i_port;
+ u32 ring_id;
+ u16 in_id;
+ u8 in_role;
+};
+
+#define SWITCHDEV_OBJ_IN_ROLE_MRP(OBJ) \
+ container_of((OBJ), struct switchdev_obj_in_role_mrp, obj)
+
+struct switchdev_obj_in_state_mrp {
+ struct switchdev_obj obj;
+ u32 in_id;
+ u8 in_state;
+};
+
+#define SWITCHDEV_OBJ_IN_STATE_MRP(OBJ) \
+ container_of((OBJ), struct switchdev_obj_in_state_mrp, obj)
+
#endif
typedef int switchdev_obj_dump_cb_t(struct switchdev_obj *obj);
diff --git a/include/net/tc_act/tc_police.h b/include/net/tc_act/tc_police.h
index f098ad4424be..6d1e26b709b5 100644
--- a/include/net/tc_act/tc_police.h
+++ b/include/net/tc_act/tc_police.h
@@ -59,14 +59,52 @@ static inline u64 tcf_police_rate_bytes_ps(const struct tc_action *act)
return params->rate.rate_bytes_ps;
}
-static inline s64 tcf_police_tcfp_burst(const struct tc_action *act)
+static inline u32 tcf_police_burst(const struct tc_action *act)
{
struct tcf_police *police = to_police(act);
struct tcf_police_params *params;
+ u32 burst;
params = rcu_dereference_protected(police->params,
lockdep_is_held(&police->tcf_lock));
- return params->tcfp_burst;
+
+ /*
+ * "rate" bytes "burst" nanoseconds
+ * ------------ * -------------------
+ * 1 second 2^6 ticks
+ *
+ * ------------------------------------
+ * NSEC_PER_SEC nanoseconds
+ * ------------------------
+ * 2^6 ticks
+ *
+ * "rate" bytes "burst" nanoseconds 2^6 ticks
+ * = ------------ * ------------------- * ------------------------
+ * 1 second 2^6 ticks NSEC_PER_SEC nanoseconds
+ *
+ * "rate" * "burst"
+ * = ---------------- bytes/nanosecond
+ * NSEC_PER_SEC^2
+ *
+ *
+ * "rate" * "burst"
+ * = ---------------- bytes/second
+ * NSEC_PER_SEC
+ */
+ burst = div_u64(params->tcfp_burst * params->rate.rate_bytes_ps,
+ NSEC_PER_SEC);
+
+ return burst;
+}
+
+static inline u32 tcf_police_tcfp_mtu(const struct tc_action *act)
+{
+ struct tcf_police *police = to_police(act);
+ struct tcf_police_params *params;
+
+ params = rcu_dereference_protected(police->params,
+ lockdep_is_held(&police->tcf_lock));
+ return params->tcfp_mtu;
}
#endif /* __NET_TC_POLICE_H */
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 4de9485f73d9..eab6c7510b5b 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -25,6 +25,7 @@
#include <linux/skbuff.h>
#include <linux/kref.h>
#include <linux/ktime.h>
+#include <linux/indirect_call_wrapper.h>
#include <net/inet_connection_sock.h>
#include <net/inet_timewait_sock.h>
@@ -398,12 +399,8 @@ __poll_t tcp_poll(struct file *file, struct socket *sock,
struct poll_table_struct *wait);
int tcp_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen);
-int tcp_setsockopt(struct sock *sk, int level, int optname,
- char __user *optval, unsigned int optlen);
-int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
- char __user *optval, int __user *optlen);
-int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
- char __user *optval, unsigned int optlen);
+int tcp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
+ unsigned int optlen);
void tcp_set_keepalive(struct sock *sk, int val);
void tcp_syn_ack_timeout(const struct request_sock *req);
int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
@@ -472,6 +469,8 @@ struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
u32 cookie);
struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
+struct request_sock *cookie_tcp_reqsk_alloc(const struct request_sock_ops *ops,
+ struct sock *sk, struct sk_buff *skb);
#ifdef CONFIG_SYN_COOKIES
/* Syncookies use a monotonic timer which increments every 60 seconds.
@@ -906,6 +905,8 @@ static inline void tcp_skb_bpf_redirect_clear(struct sk_buff *skb)
TCP_SKB_CB(skb)->bpf.sk_redir = NULL;
}
+extern const struct inet_connection_sock_af_ops ipv4_specific;
+
#if IS_ENABLED(CONFIG_IPV6)
/* This is the variant of inet6_iif() that must be used by TCP,
* as TCP moves IP6CB into a different location in skb->cb[]
@@ -931,6 +932,13 @@ static inline int tcp_v6_sdif(const struct sk_buff *skb)
#endif
return 0;
}
+
+extern const struct inet_connection_sock_af_ops ipv6_specific;
+
+INDIRECT_CALLABLE_DECLARE(void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb));
+INDIRECT_CALLABLE_DECLARE(int tcp_v6_rcv(struct sk_buff *skb));
+INDIRECT_CALLABLE_DECLARE(void tcp_v6_early_demux(struct sk_buff *skb));
+
#endif
static inline bool inet_exact_dif_match(struct net *net, struct sk_buff *skb)
@@ -1664,6 +1672,8 @@ void tcp_fastopen_destroy_cipher(struct sock *sk);
void tcp_fastopen_ctx_destroy(struct net *net);
int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk,
void *primary_key, void *backup_key);
+int tcp_fastopen_get_cipher(struct net *net, struct inet_connection_sock *icsk,
+ u64 *key);
void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb);
struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
struct request_sock *req,
@@ -1935,6 +1945,7 @@ struct tcp_iter_state {
struct seq_net_private p;
enum tcp_seq_states state;
struct sock *syn_wait_sk;
+ struct tcp_seq_afinfo *bpf_seq_afinfo;
int bucket, offset, sbucket, num;
loff_t last_pos;
};
@@ -1947,6 +1958,10 @@ void tcp_v4_destroy_sock(struct sock *sk);
struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
netdev_features_t features);
struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb);
+INDIRECT_CALLABLE_DECLARE(int tcp4_gro_complete(struct sk_buff *skb, int thoff));
+INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb));
+INDIRECT_CALLABLE_DECLARE(int tcp6_gro_complete(struct sk_buff *skb, int thoff));
+INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp6_gro_receive(struct list_head *head, struct sk_buff *skb));
int tcp_gro_complete(struct sk_buff *skb);
void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr);
@@ -1991,7 +2006,7 @@ struct tcp_sock_af_ops {
const struct sk_buff *skb);
int (*md5_parse)(struct sock *sk,
int optname,
- char __user *optval,
+ sockptr_t optval,
int optlen);
#endif
};
diff --git a/include/net/tls.h b/include/net/tls.h
index 3212d3c214a9..e5dac7e74e79 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -291,11 +291,19 @@ struct tlsdev_ops {
enum tls_offload_sync_type {
TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ = 0,
TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT = 1,
+ TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ_ASYNC = 2,
};
#define TLS_DEVICE_RESYNC_NH_START_IVAL 2
#define TLS_DEVICE_RESYNC_NH_MAX_IVAL 128
+#define TLS_DEVICE_RESYNC_ASYNC_LOGMAX 13
+struct tls_offload_resync_async {
+ atomic64_t req;
+ u32 loglen;
+ u32 log[TLS_DEVICE_RESYNC_ASYNC_LOGMAX];
+};
+
struct tls_offload_context_rx {
/* sw must be the first member of tls_offload_context_rx */
struct tls_sw_context_rx sw;
@@ -314,6 +322,10 @@ struct tls_offload_context_rx {
u32 decrypted_failed;
u32 decrypted_tgt;
} resync_nh;
+ /* TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ_ASYNC */
+ struct {
+ struct tls_offload_resync_async *resync_async;
+ };
};
u8 driver_state[] __aligned(8);
/* The TLS layer reserves room for driver specific state
@@ -606,9 +618,9 @@ tls_driver_ctx(const struct sock *sk, enum tls_offload_ctx_dir direction)
}
#endif
+#define RESYNC_REQ BIT(0)
+#define RESYNC_REQ_ASYNC BIT(1)
/* The TLS context is valid until sk_destruct is called */
-#define RESYNC_REQ (1 << 0)
-#define RESYNC_REQ_FORCE (1 << 1)
static inline void tls_offload_rx_resync_request(struct sock *sk, __be32 seq)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
@@ -617,12 +629,26 @@ static inline void tls_offload_rx_resync_request(struct sock *sk, __be32 seq)
atomic64_set(&rx_ctx->resync_req, ((u64)ntohl(seq) << 32) | RESYNC_REQ);
}
-static inline void tls_offload_rx_force_resync_request(struct sock *sk)
+/* Log all TLS record header TCP sequences in [seq, seq+len] */
+static inline void
+tls_offload_rx_resync_async_request_start(struct sock *sk, __be32 seq, u16 len)
+{
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
+
+ atomic64_set(&rx_ctx->resync_async->req, ((u64)ntohl(seq) << 32) |
+ ((u64)len << 16) | RESYNC_REQ | RESYNC_REQ_ASYNC);
+ rx_ctx->resync_async->loglen = 0;
+}
+
+static inline void
+tls_offload_rx_resync_async_request_end(struct sock *sk, __be32 seq)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
- atomic64_set(&rx_ctx->resync_req, RESYNC_REQ | RESYNC_REQ_FORCE);
+ atomic64_set(&rx_ctx->resync_async->req,
+ ((u64)ntohl(seq) << 32) | RESYNC_REQ);
}
static inline void
diff --git a/include/net/transp_v6.h b/include/net/transp_v6.h
index a8f6020f1196..da06613c9603 100644
--- a/include/net/transp_v6.h
+++ b/include/net/transp_v6.h
@@ -56,9 +56,6 @@ ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp, __u16 srcp,
#define LOOPBACK4_IPV6 cpu_to_be32(0x7f000006)
-/* address family specific functions */
-extern const struct inet_connection_sock_af_ops ipv4_specific;
-
void inet6_destroy_sock(struct sock *sk);
#define IPV6_SEQ_DGRAM_HEADER \
diff --git a/include/net/tso.h b/include/net/tso.h
index 7e166a570349..62c98a9c60f1 100644
--- a/include/net/tso.h
+++ b/include/net/tso.h
@@ -4,21 +4,22 @@
#include <net/ip.h>
-#define TSO_HEADER_SIZE 128
+#define TSO_HEADER_SIZE 256
struct tso_t {
- int next_frag_idx;
- void *data;
- size_t size;
- u16 ip_id;
- bool ipv6;
- u32 tcp_seq;
+ int next_frag_idx;
+ int size;
+ void *data;
+ u16 ip_id;
+ u8 tlen; /* transport header len */
+ bool ipv6;
+ u32 tcp_seq;
};
-int tso_count_descs(struct sk_buff *skb);
-void tso_build_hdr(struct sk_buff *skb, char *hdr, struct tso_t *tso,
+int tso_count_descs(const struct sk_buff *skb);
+void tso_build_hdr(const struct sk_buff *skb, char *hdr, struct tso_t *tso,
int size, bool is_last);
-void tso_build_data(struct sk_buff *skb, struct tso_t *tso, int size);
-void tso_start(struct sk_buff *skb, struct tso_t *tso);
+void tso_build_data(const struct sk_buff *skb, struct tso_t *tso, int size);
+int tso_start(struct sk_buff *skb, struct tso_t *tso);
#endif /* _TSO_H */
diff --git a/include/net/udp.h b/include/net/udp.h
index a8fa6c0c6ded..295d52a73598 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -27,6 +27,7 @@
#include <linux/ipv6.h>
#include <linux/seq_file.h>
#include <linux/poll.h>
+#include <linux/indirect_call_wrapper.h>
/**
* struct udp_skb_cb - UDP(-Lite) private variables
@@ -166,6 +167,12 @@ static inline void udp_csum_pull_header(struct sk_buff *skb)
typedef struct sock *(*udp_lookup_t)(struct sk_buff *skb, __be16 sport,
__be16 dport);
+INDIRECT_CALLABLE_DECLARE(struct sk_buff *udp4_gro_receive(struct list_head *,
+ struct sk_buff *));
+INDIRECT_CALLABLE_DECLARE(int udp4_gro_complete(struct sk_buff *, int));
+INDIRECT_CALLABLE_DECLARE(struct sk_buff *udp6_gro_receive(struct list_head *,
+ struct sk_buff *));
+INDIRECT_CALLABLE_DECLARE(int udp6_gro_complete(struct sk_buff *, int));
struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
struct udphdr *uh, struct sock *sk);
int udp_gro_complete(struct sk_buff *skb, int nhoff, udp_lookup_t lookup);
@@ -299,7 +306,7 @@ struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
int udp_lib_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen);
int udp_lib_setsockopt(struct sock *sk, int level, int optname,
- char __user *optval, unsigned int optlen,
+ sockptr_t optval, unsigned int optlen,
int (*push_pending_frames)(struct sock *));
struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
__be32 daddr, __be16 dport, int dif);
@@ -440,6 +447,7 @@ struct udp_seq_afinfo {
struct udp_iter_state {
struct seq_net_private p;
int bucket;
+ struct udp_seq_afinfo *bpf_seq_afinfo;
};
void *udp_seq_start(struct seq_file *seq, loff_t *pos);
diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h
index e7312ceb2794..94bb7a882250 100644
--- a/include/net/udp_tunnel.h
+++ b/include/net/udp_tunnel.h
@@ -106,15 +106,16 @@ void setup_udp_tunnel_sock(struct net *net, struct socket *sock,
* call this function to perform Tx offloads on outgoing traffic.
*/
enum udp_parsable_tunnel_type {
- UDP_TUNNEL_TYPE_VXLAN, /* RFC 7348 */
- UDP_TUNNEL_TYPE_GENEVE, /* draft-ietf-nvo3-geneve */
- UDP_TUNNEL_TYPE_VXLAN_GPE, /* draft-ietf-nvo3-vxlan-gpe */
+ UDP_TUNNEL_TYPE_VXLAN = BIT(0), /* RFC 7348 */
+ UDP_TUNNEL_TYPE_GENEVE = BIT(1), /* draft-ietf-nvo3-geneve */
+ UDP_TUNNEL_TYPE_VXLAN_GPE = BIT(2), /* draft-ietf-nvo3-vxlan-gpe */
};
struct udp_tunnel_info {
unsigned short type;
sa_family_t sa_family;
__be16 port;
+ u8 hw_priv;
};
/* Notify network devices of offloadable types */
@@ -181,4 +182,166 @@ static inline void udp_tunnel_encap_enable(struct socket *sock)
udp_encap_enable();
}
+#define UDP_TUNNEL_NIC_MAX_TABLES 4
+
+enum udp_tunnel_nic_info_flags {
+ /* Device callbacks may sleep */
+ UDP_TUNNEL_NIC_INFO_MAY_SLEEP = BIT(0),
+ /* Device only supports offloads when it's open, all ports
+ * will be removed before close and re-added after open.
+ */
+ UDP_TUNNEL_NIC_INFO_OPEN_ONLY = BIT(1),
+ /* Device supports only IPv4 tunnels */
+ UDP_TUNNEL_NIC_INFO_IPV4_ONLY = BIT(2),
+ /* Device has hard-coded the IANA VXLAN port (4789) as VXLAN.
+ * This port must not be counted towards n_entries of any table.
+ * Driver will not receive any callback associated with port 4789.
+ */
+ UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN = BIT(3),
+};
+
+/**
+ * struct udp_tunnel_nic_info - driver UDP tunnel offload information
+ * @set_port: callback for adding a new port
+ * @unset_port: callback for removing a port
+ * @sync_table: callback for syncing the entire port table at once
+ * @flags: device flags from enum udp_tunnel_nic_info_flags
+ * @tables: UDP port tables this device has
+ * @tables.n_entries: number of entries in this table
+ * @tables.tunnel_types: types of tunnels this table accepts
+ *
+ * Drivers are expected to provide either @set_port and @unset_port callbacks
+ * or the @sync_table callback. Callbacks are invoked with rtnl lock held.
+ *
+ * Known limitations:
+ * - UDP tunnel port notifications are fundamentally best-effort -
+ * it is likely the driver will both see skbs which use a UDP tunnel port,
+ * while not being a tunneled skb, and tunnel skbs from other ports -
+ * drivers should only use these ports for non-critical RX-side offloads,
+ * e.g. the checksum offload;
+ * - none of the devices care about the socket family at present, so we don't
+ * track it. Please extend this code if you care.
+ */
+struct udp_tunnel_nic_info {
+ /* one-by-one */
+ int (*set_port)(struct net_device *dev,
+ unsigned int table, unsigned int entry,
+ struct udp_tunnel_info *ti);
+ int (*unset_port)(struct net_device *dev,
+ unsigned int table, unsigned int entry,
+ struct udp_tunnel_info *ti);
+
+ /* all at once */
+ int (*sync_table)(struct net_device *dev, unsigned int table);
+
+ unsigned int flags;
+
+ struct udp_tunnel_nic_table_info {
+ unsigned int n_entries;
+ unsigned int tunnel_types;
+ } tables[UDP_TUNNEL_NIC_MAX_TABLES];
+};
+
+/* UDP tunnel module dependencies
+ *
+ * Tunnel drivers are expected to have a hard dependency on the udp_tunnel
+ * module. NIC drivers are not, they just attach their
+ * struct udp_tunnel_nic_info to the netdev and wait for callbacks to come.
+ * Loading a tunnel driver will cause the udp_tunnel module to be loaded
+ * and only then will all the required state structures be allocated.
+ * Since we want a weak dependency from the drivers and the core to udp_tunnel
+ * we call things through the following stubs.
+ */
+struct udp_tunnel_nic_ops {
+ void (*get_port)(struct net_device *dev, unsigned int table,
+ unsigned int idx, struct udp_tunnel_info *ti);
+ void (*set_port_priv)(struct net_device *dev, unsigned int table,
+ unsigned int idx, u8 priv);
+ void (*add_port)(struct net_device *dev, struct udp_tunnel_info *ti);
+ void (*del_port)(struct net_device *dev, struct udp_tunnel_info *ti);
+ void (*reset_ntf)(struct net_device *dev);
+
+ size_t (*dump_size)(struct net_device *dev, unsigned int table);
+ int (*dump_write)(struct net_device *dev, unsigned int table,
+ struct sk_buff *skb);
+};
+
+#ifdef CONFIG_INET
+extern const struct udp_tunnel_nic_ops *udp_tunnel_nic_ops;
+#else
+#define udp_tunnel_nic_ops ((struct udp_tunnel_nic_ops *)NULL)
+#endif
+
+static inline void
+udp_tunnel_nic_get_port(struct net_device *dev, unsigned int table,
+ unsigned int idx, struct udp_tunnel_info *ti)
+{
+ /* This helper is used from .sync_table, we indicate empty entries
+ * by zero'ed @ti. Drivers which need to know the details of a port
+ * when it gets deleted should use the .set_port / .unset_port
+ * callbacks.
+ * Zero out here, otherwise !CONFIG_INET causes uninitilized warnings.
+ */
+ memset(ti, 0, sizeof(*ti));
+
+ if (udp_tunnel_nic_ops)
+ udp_tunnel_nic_ops->get_port(dev, table, idx, ti);
+}
+
+static inline void
+udp_tunnel_nic_set_port_priv(struct net_device *dev, unsigned int table,
+ unsigned int idx, u8 priv)
+{
+ if (udp_tunnel_nic_ops)
+ udp_tunnel_nic_ops->set_port_priv(dev, table, idx, priv);
+}
+
+static inline void
+udp_tunnel_nic_add_port(struct net_device *dev, struct udp_tunnel_info *ti)
+{
+ if (udp_tunnel_nic_ops)
+ udp_tunnel_nic_ops->add_port(dev, ti);
+}
+
+static inline void
+udp_tunnel_nic_del_port(struct net_device *dev, struct udp_tunnel_info *ti)
+{
+ if (udp_tunnel_nic_ops)
+ udp_tunnel_nic_ops->del_port(dev, ti);
+}
+
+/**
+ * udp_tunnel_nic_reset_ntf() - device-originating reset notification
+ * @dev: network interface device structure
+ *
+ * Called by the driver to inform the core that the entire UDP tunnel port
+ * state has been lost, usually due to device reset. Core will assume device
+ * forgot all the ports and issue .set_port and .sync_table callbacks as
+ * necessary.
+ *
+ * This function must be called with rtnl lock held, and will issue all
+ * the callbacks before returning.
+ */
+static inline void udp_tunnel_nic_reset_ntf(struct net_device *dev)
+{
+ if (udp_tunnel_nic_ops)
+ udp_tunnel_nic_ops->reset_ntf(dev);
+}
+
+static inline size_t
+udp_tunnel_nic_dump_size(struct net_device *dev, unsigned int table)
+{
+ if (!udp_tunnel_nic_ops)
+ return 0;
+ return udp_tunnel_nic_ops->dump_size(dev, table);
+}
+
+static inline int
+udp_tunnel_nic_dump_write(struct net_device *dev, unsigned int table,
+ struct sk_buff *skb)
+{
+ if (!udp_tunnel_nic_ops)
+ return 0;
+ return udp_tunnel_nic_ops->dump_write(dev, table, skb);
+}
#endif
diff --git a/include/net/wimax.h b/include/net/wimax.h
index 24ba7e89c26c..f6e31d2f47aa 100644
--- a/include/net/wimax.h
+++ b/include/net/wimax.h
@@ -28,7 +28,7 @@
*
* USAGE
*
- * Embed a `struct wimax_dev` at the beginning of the the device's
+ * Embed a `struct wimax_dev` at the beginning of the device's
* private structure, initialize and register it. For details, see
* `struct wimax_dev`s documentation.
*
diff --git a/include/net/xdp.h b/include/net/xdp.h
index 609f819ed08b..3814fb631d52 100644
--- a/include/net/xdp.h
+++ b/include/net/xdp.h
@@ -85,6 +85,12 @@ struct xdp_buff {
((xdp)->data_hard_start + (xdp)->frame_sz - \
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+static inline struct skb_shared_info *
+xdp_get_shared_info_from_buff(struct xdp_buff *xdp)
+{
+ return (struct skb_shared_info *)xdp_data_hard_end(xdp);
+}
+
struct xdp_frame {
void *data;
u16 len;
@@ -98,6 +104,22 @@ struct xdp_frame {
struct net_device *dev_rx; /* used by cpumap */
};
+
+static inline struct skb_shared_info *
+xdp_get_shared_info_from_frame(struct xdp_frame *frame)
+{
+ void *data_hard_start = frame->data - frame->headroom - sizeof(*frame);
+
+ return (struct skb_shared_info *)(data_hard_start + frame->frame_sz -
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
+}
+
+struct xdp_cpumap_stats {
+ unsigned int redirect;
+ unsigned int pass;
+ unsigned int drop;
+};
+
/* Clear kernel pointers in xdp_frame */
static inline void xdp_scrub_frame(struct xdp_frame *frame)
{
@@ -121,39 +143,48 @@ void xdp_convert_frame_to_buff(struct xdp_frame *frame, struct xdp_buff *xdp)
xdp->frame_sz = frame->frame_sz;
}
-/* Convert xdp_buff to xdp_frame */
static inline
-struct xdp_frame *xdp_convert_buff_to_frame(struct xdp_buff *xdp)
+int xdp_update_frame_from_buff(struct xdp_buff *xdp,
+ struct xdp_frame *xdp_frame)
{
- struct xdp_frame *xdp_frame;
- int metasize;
- int headroom;
-
- if (xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL)
- return xdp_convert_zc_to_xdp_frame(xdp);
+ int metasize, headroom;
/* Assure headroom is available for storing info */
headroom = xdp->data - xdp->data_hard_start;
metasize = xdp->data - xdp->data_meta;
metasize = metasize > 0 ? metasize : 0;
if (unlikely((headroom - metasize) < sizeof(*xdp_frame)))
- return NULL;
+ return -ENOSPC;
/* Catch if driver didn't reserve tailroom for skb_shared_info */
if (unlikely(xdp->data_end > xdp_data_hard_end(xdp))) {
XDP_WARN("Driver BUG: missing reserved tailroom");
- return NULL;
+ return -ENOSPC;
}
- /* Store info in top of packet */
- xdp_frame = xdp->data_hard_start;
-
xdp_frame->data = xdp->data;
xdp_frame->len = xdp->data_end - xdp->data;
xdp_frame->headroom = headroom - sizeof(*xdp_frame);
xdp_frame->metasize = metasize;
xdp_frame->frame_sz = xdp->frame_sz;
+ return 0;
+}
+
+/* Convert xdp_buff to xdp_frame */
+static inline
+struct xdp_frame *xdp_convert_buff_to_frame(struct xdp_buff *xdp)
+{
+ struct xdp_frame *xdp_frame;
+
+ if (xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL)
+ return xdp_convert_zc_to_xdp_frame(xdp);
+
+ /* Store info in top of packet */
+ xdp_frame = xdp->data_hard_start;
+ if (unlikely(xdp_update_frame_from_buff(xdp, xdp_frame) < 0))
+ return NULL;
+
/* rxq only valid until napi_schedule ends, convert to xdp_mem_info */
xdp_frame->mem = xdp->rxq->mem;
@@ -209,8 +240,6 @@ struct xdp_attachment_info {
};
struct netdev_bpf;
-int xdp_attachment_query(struct xdp_attachment_info *info,
- struct netdev_bpf *bpf);
bool xdp_attachment_flags_ok(struct xdp_attachment_info *info,
struct netdev_bpf *bpf);
void xdp_attachment_setup(struct xdp_attachment_info *info,
diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h
index 96bfc5f5f24e..c9d87cc40c11 100644
--- a/include/net/xdp_sock.h
+++ b/include/net/xdp_sock.h
@@ -69,7 +69,11 @@ struct xdp_sock {
spinlock_t tx_completion_lock;
/* Protects generic receive. */
spinlock_t rx_lock;
+
+ /* Statistics */
u64 rx_dropped;
+ u64 rx_queue_full;
+
struct list_head map_list;
/* Protects map_list */
spinlock_t map_list_lock;
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index c7d213c9f9d8..2737d24ec244 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -15,6 +15,7 @@
#include <linux/audit.h>
#include <linux/slab.h>
#include <linux/refcount.h>
+#include <linux/sockptr.h>
#include <net/sock.h>
#include <net/dst.h>
@@ -127,6 +128,7 @@ struct xfrm_state_walk {
struct xfrm_state_offload {
struct net_device *dev;
+ struct net_device *real_dev;
unsigned long offload_handle;
unsigned int num_exthdrs;
u8 flags;
@@ -372,7 +374,8 @@ struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family);
struct xfrm_state_afinfo *xfrm_state_afinfo_get_rcu(unsigned int family);
struct xfrm_input_afinfo {
- unsigned int family;
+ u8 family;
+ bool is_ipip;
int (*callback)(struct sk_buff *skb, u8 protocol,
int err);
};
@@ -941,7 +944,7 @@ struct xfrm_dst {
static inline struct dst_entry *xfrm_dst_path(const struct dst_entry *dst)
{
#ifdef CONFIG_XFRM
- if (dst->xfrm) {
+ if (dst->xfrm || (dst->flags & DST_XFRM_QUEUE)) {
const struct xfrm_dst *xdst = (const struct xfrm_dst *) dst;
return xdst->path;
@@ -953,7 +956,7 @@ static inline struct dst_entry *xfrm_dst_path(const struct dst_entry *dst)
static inline struct dst_entry *xfrm_dst_child(const struct dst_entry *dst)
{
#ifdef CONFIG_XFRM
- if (dst->xfrm) {
+ if (dst->xfrm || (dst->flags & DST_XFRM_QUEUE)) {
struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
return xdst->child;
}
@@ -1415,6 +1418,7 @@ struct xfrm6_protocol {
/* XFRM tunnel handlers. */
struct xfrm_tunnel {
int (*handler)(struct sk_buff *skb);
+ int (*cb_handler)(struct sk_buff *skb, int err);
int (*err_handler)(struct sk_buff *skb, u32 info);
struct xfrm_tunnel __rcu *next;
@@ -1423,6 +1427,7 @@ struct xfrm_tunnel {
struct xfrm6_tunnel {
int (*handler)(struct sk_buff *skb);
+ int (*cb_handler)(struct sk_buff *skb, int err);
int (*err_handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
u8 type, u8 code, int offset, __be32 info);
struct xfrm6_tunnel __rcu *next;
@@ -1608,10 +1613,11 @@ int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb,
void xfrm6_local_rxpmtu(struct sk_buff *skb, u32 mtu);
int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb);
int xfrm6_udp_encap_rcv(struct sock *sk, struct sk_buff *skb);
-int xfrm_user_policy(struct sock *sk, int optname,
- u8 __user *optval, int optlen);
+int xfrm_user_policy(struct sock *sk, int optname, sockptr_t optval,
+ int optlen);
#else
-static inline int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
+static inline int xfrm_user_policy(struct sock *sk, int optname,
+ sockptr_t optval, int optlen)
{
return -ENOPROTOOPT;
}
@@ -1630,13 +1636,16 @@ int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
void *);
void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net);
int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl);
-struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u32 if_id,
- u8 type, int dir,
+struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net,
+ const struct xfrm_mark *mark,
+ u32 if_id, u8 type, int dir,
struct xfrm_selector *sel,
struct xfrm_sec_ctx *ctx, int delete,
int *err);
-struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u32 if_id, u8,
- int dir, u32 id, int delete, int *err);
+struct xfrm_policy *xfrm_policy_byid(struct net *net,
+ const struct xfrm_mark *mark, u32 if_id,
+ u8 type, int dir, u32 id, int delete,
+ int *err);
int xfrm_policy_flush(struct net *net, u8 type, bool task_valid);
void xfrm_policy_hash_rebuild(struct net *net);
u32 xfrm_get_acqseq(void);
diff --git a/include/rdma/ib.h b/include/rdma/ib.h
index fe2fc9e91588..83139b9ce409 100644
--- a/include/rdma/ib.h
+++ b/include/rdma/ib.h
@@ -1,36 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
* Copyright (c) 2010 Intel Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
-#if !defined(_RDMA_IB_H)
+#ifndef _RDMA_IB_H
#define _RDMA_IB_H
#include <linux/types.h>
diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h
index 2734c895c1bf..b0e636ac6690 100644
--- a/include/rdma/ib_addr.h
+++ b/include/rdma/ib_addr.h
@@ -1,37 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
* Copyright (c) 2005 Voltaire Inc. All rights reserved.
* Copyright (c) 2005 Intel Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
-#if !defined(IB_ADDR_H)
+#ifndef IB_ADDR_H
#define IB_ADDR_H
#include <linux/in.h>
diff --git a/include/rdma/ib_cache.h b/include/rdma/ib_cache.h
index e06d13388ae7..66a8f369a2fa 100644
--- a/include/rdma/ib_cache.h
+++ b/include/rdma/ib_cache.h
@@ -1,35 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
* Copyright (c) 2004 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Intel Corporation. All rights reserved.
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _IB_CACHE_H
diff --git a/include/rdma/ib_cm.h b/include/rdma/ib_cm.h
index 0f1ea5f2d01c..382427add677 100644
--- a/include/rdma/ib_cm.h
+++ b/include/rdma/ib_cm.h
@@ -6,6 +6,7 @@
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
* Copyright (c) 2019, Mellanox Technologies inc. All rights reserved.
*/
+
#ifndef IB_CM_H
#define IB_CM_H
diff --git a/include/rdma/ib_hdrs.h b/include/rdma/ib_hdrs.h
index 9a90bd031e8c..57c1ac881d08 100644
--- a/include/rdma/ib_hdrs.h
+++ b/include/rdma/ib_hdrs.h
@@ -1,48 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright(c) 2016 - 2018 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#ifndef IB_HDRS_H
diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h
index 8c093fc1bb9f..8dfb1ddf345a 100644
--- a/include/rdma/ib_mad.h
+++ b/include/rdma/ib_mad.h
@@ -1,40 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
* Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
* Copyright (c) 2004 Infinicon Corporation. All rights reserved.
* Copyright (c) 2004 Intel Corporation. All rights reserved.
* Copyright (c) 2004 Topspin Corporation. All rights reserved.
* Copyright (c) 2004-2006 Voltaire Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
-#if !defined(IB_MAD_H)
+#ifndef IB_MAD_H
#define IB_MAD_H
#include <linux/list.h>
diff --git a/include/rdma/ib_marshall.h b/include/rdma/ib_marshall.h
index 8ebf84ae9ed1..1838869aad28 100644
--- a/include/rdma/ib_marshall.h
+++ b/include/rdma/ib_marshall.h
@@ -1,36 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
* Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
-#if !defined(IB_USER_MARSHALL_H)
+#ifndef IB_USER_MARSHALL_H
#define IB_USER_MARSHALL_H
#include <rdma/ib_verbs.h>
diff --git a/include/rdma/ib_pack.h b/include/rdma/ib_pack.h
index 7ea1382ad0e5..a9162f25beaf 100644
--- a/include/rdma/ib_pack.h
+++ b/include/rdma/ib_pack.h
@@ -1,33 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
* Copyright (c) 2004 Topspin Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef IB_PACK_H
diff --git a/include/rdma/ib_pma.h b/include/rdma/ib_pma.h
index 2f8a65c1fca7..44c618203785 100644
--- a/include/rdma/ib_pma.h
+++ b/include/rdma/ib_pma.h
@@ -1,38 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
* Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
* All rights reserved.
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
-#if !defined(IB_PMA_H)
+#ifndef IB_PMA_H
#define IB_PMA_H
#include <rdma/ib_mad.h>
diff --git a/include/rdma/ib_sa.h b/include/rdma/ib_sa.h
index 19520979b84c..693285e76f13 100644
--- a/include/rdma/ib_sa.h
+++ b/include/rdma/ib_sa.h
@@ -1,35 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
* Copyright (c) 2004 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Voltaire, Inc. All rights reserved.
* Copyright (c) 2006 Intel Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef IB_SA_H
diff --git a/include/rdma/ib_smi.h b/include/rdma/ib_smi.h
index 7be0028f155c..fdb8633cbaff 100644
--- a/include/rdma/ib_smi.h
+++ b/include/rdma/ib_smi.h
@@ -1,40 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
* Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
* Copyright (c) 2004 Infinicon Corporation. All rights reserved.
* Copyright (c) 2004 Intel Corporation. All rights reserved.
* Copyright (c) 2004 Topspin Corporation. All rights reserved.
* Copyright (c) 2004 Voltaire Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
-#if !defined(IB_SMI_H)
+#ifndef IB_SMI_H
#define IB_SMI_H
#include <rdma/ib_mad.h>
diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h
index e3518fd6b95b..71f573a418bf 100644
--- a/include/rdma/ib_umem.h
+++ b/include/rdma/ib_umem.h
@@ -1,33 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
* Copyright (c) 2007 Cisco Systems. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef IB_UMEM_H
diff --git a/include/rdma/ib_umem_odp.h b/include/rdma/ib_umem_odp.h
index 64314ff76612..d16d2c17e733 100644
--- a/include/rdma/ib_umem_odp.h
+++ b/include/rdma/ib_umem_odp.h
@@ -1,33 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
* Copyright (c) 2014 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef IB_UMEM_ODP_H
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index ef2f3986c493..c0b2fa7e9b95 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
* Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
* Copyright (c) 2004 Infinicon Corporation. All rights reserved.
@@ -6,37 +7,9 @@
* Copyright (c) 2004 Voltaire Corporation. All rights reserved.
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
* Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
-#if !defined(IB_VERBS_H)
+#ifndef IB_VERBS_H
#define IB_VERBS_H
#include <linux/types.h>
@@ -75,6 +48,7 @@ struct ib_umem_odp;
struct ib_uqp_object;
struct ib_usrq_object;
struct ib_uwq_object;
+struct rdma_cm_id;
extern struct workqueue_struct *ib_wq;
extern struct workqueue_struct *ib_comp_wq;
@@ -1566,9 +1540,8 @@ struct ib_xrcd {
struct ib_device *device;
atomic_t usecnt; /* count all exposed resources */
struct inode *inode;
-
- struct mutex tgt_qp_mutex;
- struct list_head tgt_qp_list;
+ struct rw_semaphore tgt_qps_rwsem;
+ struct xarray tgt_qps;
};
struct ib_ah {
@@ -2270,6 +2243,7 @@ struct rdma_netdev_alloc_params {
struct ib_odp_counters {
atomic64_t faults;
atomic64_t invalidations;
+ atomic64_t prefetch;
};
struct ib_counters {
@@ -2476,7 +2450,7 @@ struct ib_device_ops {
struct ib_pd *pd, struct ib_udata *udata);
int (*dereg_mr)(struct ib_mr *mr, struct ib_udata *udata);
struct ib_mr *(*alloc_mr)(struct ib_pd *pd, enum ib_mr_type mr_type,
- u32 max_num_sg, struct ib_udata *udata);
+ u32 max_num_sg);
struct ib_mr *(*alloc_mr_integrity)(struct ib_pd *pd,
u32 max_num_data_sg,
u32 max_num_meta_sg);
@@ -2493,9 +2467,8 @@ struct ib_device_ops {
int (*dealloc_mw)(struct ib_mw *mw);
int (*attach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
int (*detach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
- struct ib_xrcd *(*alloc_xrcd)(struct ib_device *device,
- struct ib_udata *udata);
- int (*dealloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata);
+ int (*alloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata);
+ void (*dealloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata);
struct ib_flow *(*create_flow)(struct ib_qp *qp,
struct ib_flow_attr *flow_attr,
int domain, struct ib_udata *udata);
@@ -2539,9 +2512,9 @@ struct ib_device_ops {
struct ib_mr *(*reg_dm_mr)(struct ib_pd *pd, struct ib_dm *dm,
struct ib_dm_mr_attr *attr,
struct uverbs_attr_bundle *attrs);
- struct ib_counters *(*create_counters)(
- struct ib_device *device, struct uverbs_attr_bundle *attrs);
- int (*destroy_counters)(struct ib_counters *counters);
+ int (*create_counters)(struct ib_counters *counters,
+ struct uverbs_attr_bundle *attrs);
+ void (*destroy_counters)(struct ib_counters *counters);
int (*read_counters)(struct ib_counters *counters,
struct ib_counters_read_attr *counters_read_attr,
struct uverbs_attr_bundle *attrs);
@@ -2581,8 +2554,13 @@ struct ib_device_ops {
/**
* Allows rdma drivers to add their own restrack attributes.
*/
- int (*fill_res_entry)(struct sk_buff *msg,
- struct rdma_restrack_entry *entry);
+ int (*fill_res_mr_entry)(struct sk_buff *msg, struct ib_mr *ibmr);
+ int (*fill_res_mr_entry_raw)(struct sk_buff *msg, struct ib_mr *ibmr);
+ int (*fill_res_cq_entry)(struct sk_buff *msg, struct ib_cq *ibcq);
+ int (*fill_res_cq_entry_raw)(struct sk_buff *msg, struct ib_cq *ibcq);
+ int (*fill_res_qp_entry)(struct sk_buff *msg, struct ib_qp *ibqp);
+ int (*fill_res_qp_entry_raw)(struct sk_buff *msg, struct ib_qp *ibqp);
+ int (*fill_res_cm_id_entry)(struct sk_buff *msg, struct rdma_cm_id *id);
/* Device lifecycle callbacks */
/*
@@ -2637,14 +2615,19 @@ struct ib_device_ops {
* Allows rdma drivers to add their own restrack attributes
* dumped via 'rdma stat' iproute2 command.
*/
- int (*fill_stat_entry)(struct sk_buff *msg,
- struct rdma_restrack_entry *entry);
+ int (*fill_stat_mr_entry)(struct sk_buff *msg, struct ib_mr *ibmr);
+
+ /* query driver for its ucontext properties */
+ int (*query_ucontext)(struct ib_ucontext *context,
+ struct uverbs_attr_bundle *attrs);
DECLARE_RDMA_OBJ_SIZE(ib_ah);
+ DECLARE_RDMA_OBJ_SIZE(ib_counters);
DECLARE_RDMA_OBJ_SIZE(ib_cq);
DECLARE_RDMA_OBJ_SIZE(ib_pd);
DECLARE_RDMA_OBJ_SIZE(ib_srq);
DECLARE_RDMA_OBJ_SIZE(ib_ucontext);
+ DECLARE_RDMA_OBJ_SIZE(ib_xrcd);
};
struct ib_core_device {
@@ -4263,14 +4246,8 @@ static inline int ib_dereg_mr(struct ib_mr *mr)
return ib_dereg_mr_user(mr, NULL);
}
-struct ib_mr *ib_alloc_mr_user(struct ib_pd *pd, enum ib_mr_type mr_type,
- u32 max_num_sg, struct ib_udata *udata);
-
-static inline struct ib_mr *ib_alloc_mr(struct ib_pd *pd,
- enum ib_mr_type mr_type, u32 max_num_sg)
-{
- return ib_alloc_mr_user(pd, mr_type, max_num_sg, NULL);
-}
+struct ib_mr *ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
+ u32 max_num_sg);
struct ib_mr *ib_alloc_mr_integrity(struct ib_pd *pd,
u32 max_num_data_sg,
@@ -4321,21 +4298,9 @@ int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
*/
int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
-/**
- * ib_alloc_xrcd - Allocates an XRC domain.
- * @device: The device on which to allocate the XRC domain.
- * @caller: Module name for kernel consumers
- */
-struct ib_xrcd *__ib_alloc_xrcd(struct ib_device *device, const char *caller);
-#define ib_alloc_xrcd(device) \
- __ib_alloc_xrcd((device), KBUILD_MODNAME)
-
-/**
- * ib_dealloc_xrcd - Deallocates an XRC domain.
- * @xrcd: The XRC domain to deallocate.
- * @udata: Valid user data or NULL for kernel object
- */
-int ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata);
+struct ib_xrcd *ib_alloc_xrcd_user(struct ib_device *device,
+ struct inode *inode, struct ib_udata *udata);
+int ib_dealloc_xrcd_user(struct ib_xrcd *xrcd, struct ib_udata *udata);
static inline int ib_check_mr_access(int flags)
{
@@ -4417,9 +4382,6 @@ struct ib_wq *ib_create_wq(struct ib_pd *pd,
int ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata);
int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *attr,
u32 wq_attr_mask);
-struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device,
- struct ib_rwq_ind_table_init_attr*
- wq_ind_table_init_attr);
int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
index 5aa8a9c76aa0..91975400e1b3 100644
--- a/include/rdma/iw_cm.h
+++ b/include/rdma/iw_cm.h
@@ -1,35 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
* Copyright (c) 2005 Network Appliance, Inc. All rights reserved.
* Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
+
#ifndef IW_CM_H
#define IW_CM_H
diff --git a/include/rdma/iw_portmap.h b/include/rdma/iw_portmap.h
index c89535047c42..6dbc1a235974 100644
--- a/include/rdma/iw_portmap.h
+++ b/include/rdma/iw_portmap.h
@@ -1,35 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
* Copyright (c) 2014 Intel Corporation. All rights reserved.
* Copyright (c) 2014 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
+
#ifndef _IW_PORTMAP_H
#define _IW_PORTMAP_H
diff --git a/include/rdma/opa_addr.h b/include/rdma/opa_addr.h
index 66d4393d339c..c6bfa2640bac 100644
--- a/include/rdma/opa_addr.h
+++ b/include/rdma/opa_addr.h
@@ -1,48 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright(c) 2017 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#ifndef OPA_ADDR_H
diff --git a/include/rdma/opa_port_info.h b/include/rdma/opa_port_info.h
index 0d9e6d74c385..73bcac90a048 100644
--- a/include/rdma/opa_port_info.h
+++ b/include/rdma/opa_port_info.h
@@ -1,36 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
* Copyright (c) 2014-2020 Intel Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
-#if !defined(OPA_PORT_INFO_H)
+#ifndef OPA_PORT_INFO_H
#define OPA_PORT_INFO_H
#include <rdma/opa_smi.h>
diff --git a/include/rdma/opa_smi.h b/include/rdma/opa_smi.h
index c7b2ef12792d..a9f1b5700e98 100644
--- a/include/rdma/opa_smi.h
+++ b/include/rdma/opa_smi.h
@@ -1,36 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
* Copyright (c) 2014 Intel Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
-#if !defined(OPA_SMI_H)
+#ifndef OPA_SMI_H
#define OPA_SMI_H
#include <rdma/ib_mad.h>
diff --git a/include/rdma/opa_vnic.h b/include/rdma/opa_vnic.h
index 6f244e759b4f..cbe3c2811455 100644
--- a/include/rdma/opa_vnic.h
+++ b/include/rdma/opa_vnic.h
@@ -1,52 +1,11 @@
-#ifndef _OPA_VNIC_H
-#define _OPA_VNIC_H
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright(c) 2017 - 2020 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
+#ifndef _OPA_VNIC_H
+#define _OPA_VNIC_H
+
/*
* This file contains Intel Omni-Path (OPA) Virtual Network Interface
* Controller (VNIC) specific declarations.
diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h
index 939d7abe026f..cf5da2ae49bf 100644
--- a/include/rdma/rdma_cm.h
+++ b/include/rdma/rdma_cm.h
@@ -1,37 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
* Copyright (c) 2005 Voltaire Inc. All rights reserved.
* Copyright (c) 2005 Intel Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
-#if !defined(RDMA_CM_H)
+#ifndef RDMA_CM_H
#define RDMA_CM_H
#include <linux/socket.h>
diff --git a/include/rdma/rdma_cm_ib.h b/include/rdma/rdma_cm_ib.h
index 6a69d71a21a5..8354e7de7815 100644
--- a/include/rdma/rdma_cm_ib.h
+++ b/include/rdma/rdma_cm_ib.h
@@ -1,36 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
* Copyright (c) 2006 Intel Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
-#if !defined(RDMA_CM_IB_H)
+#ifndef RDMA_CM_IB_H
#define RDMA_CM_IB_H
#include <rdma/rdma_cm.h>
diff --git a/include/rdma/rdma_netlink.h b/include/rdma/rdma_netlink.h
index ab22759de7ea..2758d9df71ee 100644
--- a/include/rdma/rdma_netlink.h
+++ b/include/rdma/rdma_netlink.h
@@ -1,8 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
+
#ifndef _RDMA_NETLINK_H
#define _RDMA_NETLINK_H
-
#include <linux/netlink.h>
#include <uapi/rdma/rdma_netlink.h>
diff --git a/include/rdma/rdma_vt.h b/include/rdma/rdma_vt.h
index ac5a9430abb6..9fd217b24916 100644
--- a/include/rdma/rdma_vt.h
+++ b/include/rdma/rdma_vt.h
@@ -1,53 +1,11 @@
-#ifndef DEF_RDMA_VT_H
-#define DEF_RDMA_VT_H
-
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright(c) 2016 - 2019 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
+#ifndef DEF_RDMA_VT_H
+#define DEF_RDMA_VT_H
+
/*
* Structure that low level drivers will populate in order to register with the
* rdmavt layer.
diff --git a/include/rdma/rdmavt_cq.h b/include/rdma/rdmavt_cq.h
index 574eb7278f46..1fe2bb5a63b0 100644
--- a/include/rdma/rdmavt_cq.h
+++ b/include/rdma/rdmavt_cq.h
@@ -1,56 +1,11 @@
-#ifndef DEF_RDMAVT_INCCQ_H
-#define DEF_RDMAVT_INCCQ_H
-
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
* Copyright(c) 2016 - 2018 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Copyright(c) 2015 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
+#ifndef DEF_RDMAVT_INCCQ_H
+#define DEF_RDMAVT_INCCQ_H
+
#include <linux/kthread.h>
#include <rdma/ib_user_verbs.h>
#include <rdma/ib_verbs.h>
diff --git a/include/rdma/rdmavt_mr.h b/include/rdma/rdmavt_mr.h
index ce6c888f7fe7..c3367e9833ef 100644
--- a/include/rdma/rdmavt_mr.h
+++ b/include/rdma/rdmavt_mr.h
@@ -1,53 +1,11 @@
-#ifndef DEF_RDMAVT_INCMR_H
-#define DEF_RDMAVT_INCMR_H
-
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright(c) 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
+#ifndef DEF_RDMAVT_INCMR_H
+#define DEF_RDMAVT_INCMR_H
+
/*
* For Memory Regions. This stuff should probably be moved into rdmavt/mr.h once
* drivers no longer need access to the MR directly.
diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h
index c4369a6c2951..8275954f5ce6 100644
--- a/include/rdma/rdmavt_qp.h
+++ b/include/rdma/rdmavt_qp.h
@@ -1,53 +1,11 @@
-#ifndef DEF_RDMAVT_INCQP_H
-#define DEF_RDMAVT_INCQP_H
-
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright(c) 2016 - 2020 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
+#ifndef DEF_RDMAVT_INCQP_H
+#define DEF_RDMAVT_INCQP_H
+
#include <rdma/rdma_vt.h>
#include <rdma/ib_pack.h>
#include <rdma/ib_verbs.h>
@@ -305,6 +263,25 @@ struct rvt_rq {
spinlock_t lock ____cacheline_aligned_in_smp;
};
+/**
+ * rvt_get_rq_count - count numbers of request work queue entries
+ * in circular buffer
+ * @rq: data structure for request queue entry
+ * @head: head indices of the circular buffer
+ * @tail: tail indices of the circular buffer
+ *
+ * Return - total number of entries in the Receive Queue
+ */
+
+static inline u32 rvt_get_rq_count(struct rvt_rq *rq, u32 head, u32 tail)
+{
+ u32 count = head - tail;
+
+ if ((s32)count < 0)
+ count += rq->size;
+ return count;
+}
+
/*
* This structure holds the information that the send tasklet needs
* to send a RDMA read response or atomic operation.
diff --git a/include/rdma/uverbs_ioctl.h b/include/rdma/uverbs_ioctl.h
index 86de10ea30af..b00270c72740 100644
--- a/include/rdma/uverbs_ioctl.h
+++ b/include/rdma/uverbs_ioctl.h
@@ -1,33 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
* Copyright (c) 2017, Mellanox Technologies inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _UVERBS_IOCTL_
@@ -652,6 +625,7 @@ struct uverbs_attr_bundle {
struct ib_udata ucore;
struct ib_uverbs_file *ufile;
struct ib_ucontext *context;
+ struct ib_uobject *uobject;
DECLARE_BITMAP(attr_present, UVERBS_API_ATTR_BKEY_LEN);
struct uverbs_attr attrs[];
};
diff --git a/include/rdma/uverbs_named_ioctl.h b/include/rdma/uverbs_named_ioctl.h
index 6ae6cf8e4c2e..f04f5126f61e 100644
--- a/include/rdma/uverbs_named_ioctl.h
+++ b/include/rdma/uverbs_named_ioctl.h
@@ -1,33 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
* Copyright (c) 2018, Mellanox Technologies inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _UVERBS_NAMED_IOCTL_
diff --git a/include/rdma/uverbs_std_types.h b/include/rdma/uverbs_std_types.h
index bf0392ae15eb..fe0512116958 100644
--- a/include/rdma/uverbs_std_types.h
+++ b/include/rdma/uverbs_std_types.h
@@ -1,33 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
* Copyright (c) 2017, Mellanox Technologies inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _UVERBS_STD_TYPES__
@@ -110,6 +83,20 @@ static inline void uobj_alloc_abort(struct ib_uobject *uobj,
rdma_alloc_abort_uobject(uobj, attrs, false);
}
+static inline void uobj_finalize_uobj_create(struct ib_uobject *uobj,
+ struct uverbs_attr_bundle *attrs)
+{
+ /*
+ * Tell the core code that the write() handler has completed
+ * initializing the object and that the core should commit or
+ * abort this object based upon the return code from the write()
+ * method. Similar to what uverbs_finalize_uobj_create() does for
+ * ioctl()
+ */
+ WARN_ON(attrs->uobject);
+ attrs->uobject = uobj;
+}
+
static inline struct ib_uobject *
__uobj_alloc(const struct uverbs_api_object *obj,
struct uverbs_attr_bundle *attrs, struct ib_device **ib_dev)
diff --git a/include/rdma/uverbs_types.h b/include/rdma/uverbs_types.h
index c15b298aa62f..06db27e35f40 100644
--- a/include/rdma/uverbs_types.h
+++ b/include/rdma/uverbs_types.h
@@ -1,33 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
* Copyright (c) 2017, Mellanox Technologies inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _UVERBS_TYPES_
diff --git a/include/scsi/fc/fc_ms.h b/include/scsi/fc/fc_ms.h
index 800d53dc9470..9e273fed0a85 100644
--- a/include/scsi/fc/fc_ms.h
+++ b/include/scsi/fc/fc_ms.h
@@ -63,8 +63,8 @@ enum fc_fdmi_hba_attr_type {
* HBA Attribute Length
*/
#define FC_FDMI_HBA_ATTR_NODENAME_LEN 8
-#define FC_FDMI_HBA_ATTR_MANUFACTURER_LEN 64
-#define FC_FDMI_HBA_ATTR_SERIALNUMBER_LEN 64
+#define FC_FDMI_HBA_ATTR_MANUFACTURER_LEN 80
+#define FC_FDMI_HBA_ATTR_SERIALNUMBER_LEN 80
#define FC_FDMI_HBA_ATTR_MODEL_LEN 256
#define FC_FDMI_HBA_ATTR_MODELDESCR_LEN 256
#define FC_FDMI_HBA_ATTR_HARDWAREVERSION_LEN 256
diff --git a/include/scsi/scsi_tcq.h b/include/scsi/scsi_tcq.h
index 6053d46e794e..ea7848e74d25 100644
--- a/include/scsi/scsi_tcq.h
+++ b/include/scsi/scsi_tcq.h
@@ -34,7 +34,7 @@ static inline struct scsi_cmnd *scsi_host_find_tag(struct Scsi_Host *shost,
blk_mq_unique_tag_to_tag(tag));
}
- if (!req)
+ if (!req || !blk_mq_request_started(req))
return NULL;
return blk_mq_rq_to_pdu(req);
}
diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
index bdcb6d69d154..8a26a2ffa952 100644
--- a/include/scsi/scsi_transport_iscsi.h
+++ b/include/scsi/scsi_transport_iscsi.h
@@ -57,7 +57,7 @@ struct iscsi_bus_flash_conn;
* When not offloading the data path, this is called
* from the scsi work queue without the session lock.
* @xmit_task Requests LLD to transfer cmd task. Returns 0 or the
- * the number of bytes transferred on success, and -Exyz
+ * number of bytes transferred on success, and -Exyz
* value on error. When offloading the data path, this
* is called from queuecommand with the session lock, or
* from the iscsi_conn_send_pdu context with the session
diff --git a/include/soc/arc/aux.h b/include/soc/arc/aux.h
index e223c4ffa153..9c2eff6140b6 100644
--- a/include/soc/arc/aux.h
+++ b/include/soc/arc/aux.h
@@ -22,7 +22,7 @@ static inline int read_aux_reg(u32 r)
/*
* function helps elide unused variable warning
- * see: http://lists.infradead.org/pipermail/linux-snps-arc/2016-November/001748.html
+ * see: https://lists.infradead.org/pipermail/linux-snps-arc/2016-November/001748.html
*/
static inline void write_aux_reg(u32 r, u32 v)
{
diff --git a/include/soc/at91/atmel_tcb.h b/include/soc/at91/atmel_tcb.h
index c3c7200ce151..1d7071dc0bca 100644
--- a/include/soc/at91/atmel_tcb.h
+++ b/include/soc/at91/atmel_tcb.h
@@ -36,9 +36,14 @@ struct clk;
/**
* struct atmel_tcb_config - SoC data for a Timer/Counter Block
* @counter_width: size in bits of a timer counter register
+ * @has_gclk: boolean indicating if a timer counter has a generic clock
+ * @has_qdec: boolean indicating if a timer counter has a quadrature
+ * decoder.
*/
struct atmel_tcb_config {
size_t counter_width;
+ bool has_gclk;
+ bool has_qdec;
};
/**
diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h
index 4953e9994df3..da369b12005f 100644
--- a/include/soc/mscc/ocelot.h
+++ b/include/soc/mscc/ocelot.h
@@ -65,6 +65,21 @@
#define PGID_MCIPV4 62
#define PGID_MCIPV6 63
+#define for_each_unicast_dest_pgid(ocelot, pgid) \
+ for ((pgid) = 0; \
+ (pgid) < (ocelot)->num_phys_ports; \
+ (pgid)++)
+
+#define for_each_nonreserved_multicast_dest_pgid(ocelot, pgid) \
+ for ((pgid) = (ocelot)->num_phys_ports + 1; \
+ (pgid) < PGID_CPU; \
+ (pgid)++)
+
+#define for_each_aggr_pgid(ocelot, pgid) \
+ for ((pgid) = PGID_AGGR; \
+ (pgid) < PGID_SRC; \
+ (pgid)++)
+
/* Aggregation PGIDs, one per Link Aggregation Code */
#define PGID_AGGR 64
@@ -111,6 +126,7 @@ enum ocelot_target {
HSIO,
PTP,
GCB,
+ DEV_GMII,
TARGET_MAX,
};
@@ -393,6 +409,48 @@ enum ocelot_reg {
PTP_CLK_CFG_ADJ_CFG,
PTP_CLK_CFG_ADJ_FREQ,
GCB_SOFT_RST = GCB << TARGET_OFFSET,
+ GCB_MIIM_MII_STATUS,
+ GCB_MIIM_MII_CMD,
+ GCB_MIIM_MII_DATA,
+ DEV_CLOCK_CFG = DEV_GMII << TARGET_OFFSET,
+ DEV_PORT_MISC,
+ DEV_EVENTS,
+ DEV_EEE_CFG,
+ DEV_RX_PATH_DELAY,
+ DEV_TX_PATH_DELAY,
+ DEV_PTP_PREDICT_CFG,
+ DEV_MAC_ENA_CFG,
+ DEV_MAC_MODE_CFG,
+ DEV_MAC_MAXLEN_CFG,
+ DEV_MAC_TAGS_CFG,
+ DEV_MAC_ADV_CHK_CFG,
+ DEV_MAC_IFG_CFG,
+ DEV_MAC_HDX_CFG,
+ DEV_MAC_DBG_CFG,
+ DEV_MAC_FC_MAC_LOW_CFG,
+ DEV_MAC_FC_MAC_HIGH_CFG,
+ DEV_MAC_STICKY,
+ PCS1G_CFG,
+ PCS1G_MODE_CFG,
+ PCS1G_SD_CFG,
+ PCS1G_ANEG_CFG,
+ PCS1G_ANEG_NP_CFG,
+ PCS1G_LB_CFG,
+ PCS1G_DBG_CFG,
+ PCS1G_CDET_CFG,
+ PCS1G_ANEG_STATUS,
+ PCS1G_ANEG_NP_STATUS,
+ PCS1G_LINK_STATUS,
+ PCS1G_LINK_DOWN_CNT,
+ PCS1G_STICKY,
+ PCS1G_DEBUG_STATUS,
+ PCS1G_LPI_CFG,
+ PCS1G_LPI_WAKE_ERROR_CNT,
+ PCS1G_LPI_STATUS,
+ PCS1G_TSTPAT_MODE_CFG,
+ PCS1G_TSTPAT_STATUS,
+ DEV_PCS_FX100_CFG,
+ DEV_PCS_FX100_STATUS,
};
enum ocelot_regfield {
@@ -432,15 +490,30 @@ enum ocelot_regfield {
ANA_TABLES_MACACCESS_B_DOM,
ANA_TABLES_MACTINDX_BUCKET,
ANA_TABLES_MACTINDX_M_INDEX,
+ QSYS_SWITCH_PORT_MODE_PORT_ENA,
+ QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG,
+ QSYS_SWITCH_PORT_MODE_YEL_RSRVD,
+ QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE,
+ QSYS_SWITCH_PORT_MODE_TX_PFC_ENA,
+ QSYS_SWITCH_PORT_MODE_TX_PFC_MODE,
QSYS_TIMED_FRAME_ENTRY_TFRM_VLD,
QSYS_TIMED_FRAME_ENTRY_TFRM_FP,
QSYS_TIMED_FRAME_ENTRY_TFRM_PORTNO,
QSYS_TIMED_FRAME_ENTRY_TFRM_TM_SEL,
QSYS_TIMED_FRAME_ENTRY_TFRM_TM_T,
+ SYS_PORT_MODE_DATA_WO_TS,
+ SYS_PORT_MODE_INCL_INJ_HDR,
+ SYS_PORT_MODE_INCL_XTR_HDR,
+ SYS_PORT_MODE_INCL_HDR_ERR,
SYS_RESET_CFG_CORE_ENA,
SYS_RESET_CFG_MEM_ENA,
SYS_RESET_CFG_MEM_INIT,
GCB_SOFT_RST_SWC_RST,
+ GCB_MIIM_MII_STATUS_PENDING,
+ GCB_MIIM_MII_STATUS_BUSY,
+ SYS_PAUSE_CFG_PAUSE_START,
+ SYS_PAUSE_CFG_PAUSE_STOP,
+ SYS_PAUSE_CFG_PAUSE_ENA,
REGFIELD_MAX
};
@@ -468,9 +541,10 @@ struct ocelot;
struct ocelot_ops {
int (*reset)(struct ocelot *ocelot);
+ u16 (*wm_enc)(u16 value);
};
-struct ocelot_acl_block {
+struct ocelot_vcap_block {
struct list_head rules;
int count;
int pol_lpr;
@@ -479,7 +553,7 @@ struct ocelot_acl_block {
struct ocelot_port {
struct ocelot *ocelot;
- void __iomem *regs;
+ struct regmap *target;
bool vlan_aware;
@@ -494,6 +568,8 @@ struct ocelot_port {
u8 ts_id;
phy_interface_t phy_mode;
+
+ u8 *xmit_template;
};
struct ocelot {
@@ -535,7 +611,7 @@ struct ocelot {
struct list_head multicast;
- struct ocelot_acl_block acl_block;
+ struct ocelot_vcap_block block;
const struct vcap_field *vcap_is2_keys;
const struct vcap_field *vcap_is2_actions;
@@ -578,6 +654,11 @@ struct ocelot_policer {
#define ocelot_rmw_rix(ocelot, val, m, reg, ri) __ocelot_rmw_ix(ocelot, val, m, reg, reg##_RSZ * (ri))
#define ocelot_rmw(ocelot, val, m, reg) __ocelot_rmw_ix(ocelot, val, m, reg, 0)
+#define ocelot_field_write(ocelot, reg, val) regmap_field_write((ocelot)->regfields[(reg)], (val))
+#define ocelot_field_read(ocelot, reg, val) regmap_field_read((ocelot)->regfields[(reg)], (val))
+#define ocelot_fields_write(ocelot, id, reg, val) regmap_fields_write((ocelot)->regfields[(reg)], (id), (val))
+#define ocelot_fields_read(ocelot, id, reg, val) regmap_fields_read((ocelot)->regfields[(reg)], (id), (val))
+
/* I/O */
u32 ocelot_port_readl(struct ocelot_port *port, u32 reg);
void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg);
@@ -641,5 +722,9 @@ int ocelot_cls_flower_destroy(struct ocelot *ocelot, int port,
struct flow_cls_offload *f, bool ingress);
int ocelot_cls_flower_stats(struct ocelot *ocelot, int port,
struct flow_cls_offload *f, bool ingress);
+int ocelot_port_mdb_add(struct ocelot *ocelot, int port,
+ const struct switchdev_obj_port_mdb *mdb);
+int ocelot_port_mdb_del(struct ocelot *ocelot, int port,
+ const struct switchdev_obj_port_mdb *mdb);
#endif
diff --git a/include/soc/mscc/ocelot_dev.h b/include/soc/mscc/ocelot_dev.h
index 7c08437061fc..0c6021f02fee 100644
--- a/include/soc/mscc/ocelot_dev.h
+++ b/include/soc/mscc/ocelot_dev.h
@@ -8,8 +8,6 @@
#ifndef _MSCC_OCELOT_DEV_H_
#define _MSCC_OCELOT_DEV_H_
-#define DEV_CLOCK_CFG 0x0
-
#define DEV_CLOCK_CFG_MAC_TX_RST BIT(7)
#define DEV_CLOCK_CFG_MAC_RX_RST BIT(6)
#define DEV_CLOCK_CFG_PCS_TX_RST BIT(5)
@@ -19,18 +17,12 @@
#define DEV_CLOCK_CFG_LINK_SPEED(x) ((x) & GENMASK(1, 0))
#define DEV_CLOCK_CFG_LINK_SPEED_M GENMASK(1, 0)
-#define DEV_PORT_MISC 0x4
-
#define DEV_PORT_MISC_FWD_ERROR_ENA BIT(4)
#define DEV_PORT_MISC_FWD_PAUSE_ENA BIT(3)
#define DEV_PORT_MISC_FWD_CTRL_ENA BIT(2)
#define DEV_PORT_MISC_DEV_LOOP_ENA BIT(1)
#define DEV_PORT_MISC_HDX_FAST_DIS BIT(0)
-#define DEV_EVENTS 0x8
-
-#define DEV_EEE_CFG 0xc
-
#define DEV_EEE_CFG_EEE_ENA BIT(22)
#define DEV_EEE_CFG_EEE_TIMER_AGE(x) (((x) << 15) & GENMASK(21, 15))
#define DEV_EEE_CFG_EEE_TIMER_AGE_M GENMASK(21, 15)
@@ -43,33 +35,19 @@
#define DEV_EEE_CFG_EEE_TIMER_HOLDOFF_X(x) (((x) & GENMASK(7, 1)) >> 1)
#define DEV_EEE_CFG_PORT_LPI BIT(0)
-#define DEV_RX_PATH_DELAY 0x10
-
-#define DEV_TX_PATH_DELAY 0x14
-
-#define DEV_PTP_PREDICT_CFG 0x18
-
#define DEV_PTP_PREDICT_CFG_PTP_PHY_PREDICT_CFG(x) (((x) << 4) & GENMASK(11, 4))
#define DEV_PTP_PREDICT_CFG_PTP_PHY_PREDICT_CFG_M GENMASK(11, 4)
#define DEV_PTP_PREDICT_CFG_PTP_PHY_PREDICT_CFG_X(x) (((x) & GENMASK(11, 4)) >> 4)
#define DEV_PTP_PREDICT_CFG_PTP_PHASE_PREDICT_CFG(x) ((x) & GENMASK(3, 0))
#define DEV_PTP_PREDICT_CFG_PTP_PHASE_PREDICT_CFG_M GENMASK(3, 0)
-#define DEV_MAC_ENA_CFG 0x1c
-
#define DEV_MAC_ENA_CFG_RX_ENA BIT(4)
#define DEV_MAC_ENA_CFG_TX_ENA BIT(0)
-#define DEV_MAC_MODE_CFG 0x20
-
#define DEV_MAC_MODE_CFG_FC_WORD_SYNC_ENA BIT(8)
#define DEV_MAC_MODE_CFG_GIGA_MODE_ENA BIT(4)
#define DEV_MAC_MODE_CFG_FDX_ENA BIT(0)
-#define DEV_MAC_MAXLEN_CFG 0x24
-
-#define DEV_MAC_TAGS_CFG 0x28
-
#define DEV_MAC_TAGS_CFG_TAG_ID(x) (((x) << 16) & GENMASK(31, 16))
#define DEV_MAC_TAGS_CFG_TAG_ID_M GENMASK(31, 16)
#define DEV_MAC_TAGS_CFG_TAG_ID_X(x) (((x) & GENMASK(31, 16)) >> 16)
@@ -77,12 +55,8 @@
#define DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA BIT(1)
#define DEV_MAC_TAGS_CFG_VLAN_AWR_ENA BIT(0)
-#define DEV_MAC_ADV_CHK_CFG 0x2c
-
#define DEV_MAC_ADV_CHK_CFG_LEN_DROP_ENA BIT(0)
-#define DEV_MAC_IFG_CFG 0x30
-
#define DEV_MAC_IFG_CFG_RESTORE_OLD_IPG_CHECK BIT(17)
#define DEV_MAC_IFG_CFG_REDUCED_TX_IFG BIT(16)
#define DEV_MAC_IFG_CFG_TX_IFG(x) (((x) << 8) & GENMASK(12, 8))
@@ -94,8 +68,6 @@
#define DEV_MAC_IFG_CFG_RX_IFG1(x) ((x) & GENMASK(3, 0))
#define DEV_MAC_IFG_CFG_RX_IFG1_M GENMASK(3, 0)
-#define DEV_MAC_HDX_CFG 0x34
-
#define DEV_MAC_HDX_CFG_BYPASS_COL_SYNC BIT(26)
#define DEV_MAC_HDX_CFG_OB_ENA BIT(25)
#define DEV_MAC_HDX_CFG_WEXC_DIS BIT(24)
@@ -107,17 +79,9 @@
#define DEV_MAC_HDX_CFG_LATE_COL_POS(x) ((x) & GENMASK(6, 0))
#define DEV_MAC_HDX_CFG_LATE_COL_POS_M GENMASK(6, 0)
-#define DEV_MAC_DBG_CFG 0x38
-
#define DEV_MAC_DBG_CFG_TBI_MODE BIT(4)
#define DEV_MAC_DBG_CFG_IFG_CRS_EXT_CHK_ENA BIT(0)
-#define DEV_MAC_FC_MAC_LOW_CFG 0x3c
-
-#define DEV_MAC_FC_MAC_HIGH_CFG 0x40
-
-#define DEV_MAC_STICKY 0x44
-
#define DEV_MAC_STICKY_RX_IPG_SHRINK_STICKY BIT(9)
#define DEV_MAC_STICKY_RX_PREAM_SHRINK_STICKY BIT(8)
#define DEV_MAC_STICKY_RX_CARRIER_EXT_STICKY BIT(7)
@@ -129,25 +93,17 @@
#define DEV_MAC_STICKY_TX_FRM_LEN_OVR_STICKY BIT(1)
#define DEV_MAC_STICKY_TX_ABORT_STICKY BIT(0)
-#define PCS1G_CFG 0x48
-
#define PCS1G_CFG_LINK_STATUS_TYPE BIT(4)
#define PCS1G_CFG_AN_LINK_CTRL_ENA BIT(1)
#define PCS1G_CFG_PCS_ENA BIT(0)
-#define PCS1G_MODE_CFG 0x4c
-
#define PCS1G_MODE_CFG_UNIDIR_MODE_ENA BIT(4)
#define PCS1G_MODE_CFG_SGMII_MODE_ENA BIT(0)
-#define PCS1G_SD_CFG 0x50
-
#define PCS1G_SD_CFG_SD_SEL BIT(8)
#define PCS1G_SD_CFG_SD_POL BIT(4)
#define PCS1G_SD_CFG_SD_ENA BIT(0)
-#define PCS1G_ANEG_CFG 0x54
-
#define PCS1G_ANEG_CFG_ADV_ABILITY(x) (((x) << 16) & GENMASK(31, 16))
#define PCS1G_ANEG_CFG_ADV_ABILITY_M GENMASK(31, 16)
#define PCS1G_ANEG_CFG_ADV_ABILITY_X(x) (((x) & GENMASK(31, 16)) >> 16)
@@ -155,29 +111,19 @@
#define PCS1G_ANEG_CFG_ANEG_RESTART_ONE_SHOT BIT(1)
#define PCS1G_ANEG_CFG_ANEG_ENA BIT(0)
-#define PCS1G_ANEG_NP_CFG 0x58
-
#define PCS1G_ANEG_NP_CFG_NP_TX(x) (((x) << 16) & GENMASK(31, 16))
#define PCS1G_ANEG_NP_CFG_NP_TX_M GENMASK(31, 16)
#define PCS1G_ANEG_NP_CFG_NP_TX_X(x) (((x) & GENMASK(31, 16)) >> 16)
#define PCS1G_ANEG_NP_CFG_NP_LOADED_ONE_SHOT BIT(0)
-#define PCS1G_LB_CFG 0x5c
-
#define PCS1G_LB_CFG_RA_ENA BIT(4)
#define PCS1G_LB_CFG_GMII_PHY_LB_ENA BIT(1)
#define PCS1G_LB_CFG_TBI_HOST_LB_ENA BIT(0)
-#define PCS1G_DBG_CFG 0x60
-
#define PCS1G_DBG_CFG_UDLT BIT(0)
-#define PCS1G_CDET_CFG 0x64
-
#define PCS1G_CDET_CFG_CDET_ENA BIT(0)
-#define PCS1G_ANEG_STATUS 0x68
-
#define PCS1G_ANEG_STATUS_LP_ADV_ABILITY(x) (((x) << 16) & GENMASK(31, 16))
#define PCS1G_ANEG_STATUS_LP_ADV_ABILITY_M GENMASK(31, 16)
#define PCS1G_ANEG_STATUS_LP_ADV_ABILITY_X(x) (((x) & GENMASK(31, 16)) >> 16)
@@ -185,10 +131,6 @@
#define PCS1G_ANEG_STATUS_PAGE_RX_STICKY BIT(3)
#define PCS1G_ANEG_STATUS_ANEG_COMPLETE BIT(0)
-#define PCS1G_ANEG_NP_STATUS 0x6c
-
-#define PCS1G_LINK_STATUS 0x70
-
#define PCS1G_LINK_STATUS_DELAY_VAR(x) (((x) << 12) & GENMASK(15, 12))
#define PCS1G_LINK_STATUS_DELAY_VAR_M GENMASK(15, 12)
#define PCS1G_LINK_STATUS_DELAY_VAR_X(x) (((x) & GENMASK(15, 12)) >> 12)
@@ -196,17 +138,9 @@
#define PCS1G_LINK_STATUS_LINK_STATUS BIT(4)
#define PCS1G_LINK_STATUS_SYNC_STATUS BIT(0)
-#define PCS1G_LINK_DOWN_CNT 0x74
-
-#define PCS1G_STICKY 0x78
-
#define PCS1G_STICKY_LINK_DOWN_STICKY BIT(4)
#define PCS1G_STICKY_OUT_OF_SYNC_STICKY BIT(0)
-#define PCS1G_DEBUG_STATUS 0x7c
-
-#define PCS1G_LPI_CFG 0x80
-
#define PCS1G_LPI_CFG_QSGMII_MS_SEL BIT(20)
#define PCS1G_LPI_CFG_RX_LPI_OUT_DIS BIT(17)
#define PCS1G_LPI_CFG_LPI_TESTMODE BIT(16)
@@ -215,10 +149,6 @@
#define PCS1G_LPI_CFG_LPI_RX_WTIM_X(x) (((x) & GENMASK(5, 4)) >> 4)
#define PCS1G_LPI_CFG_TX_ASSERT_LPIDLE BIT(0)
-#define PCS1G_LPI_WAKE_ERROR_CNT 0x84
-
-#define PCS1G_LPI_STATUS 0x88
-
#define PCS1G_LPI_STATUS_RX_LPI_FAIL BIT(16)
#define PCS1G_LPI_STATUS_RX_LPI_EVENT_STICKY BIT(12)
#define PCS1G_LPI_STATUS_RX_QUIET BIT(9)
@@ -227,18 +157,12 @@
#define PCS1G_LPI_STATUS_TX_QUIET BIT(1)
#define PCS1G_LPI_STATUS_TX_LPI_MODE BIT(0)
-#define PCS1G_TSTPAT_MODE_CFG 0x8c
-
-#define PCS1G_TSTPAT_STATUS 0x90
-
#define PCS1G_TSTPAT_STATUS_JTP_ERR_CNT(x) (((x) << 8) & GENMASK(15, 8))
#define PCS1G_TSTPAT_STATUS_JTP_ERR_CNT_M GENMASK(15, 8)
#define PCS1G_TSTPAT_STATUS_JTP_ERR_CNT_X(x) (((x) & GENMASK(15, 8)) >> 8)
#define PCS1G_TSTPAT_STATUS_JTP_ERR BIT(4)
#define PCS1G_TSTPAT_STATUS_JTP_LOCK BIT(0)
-#define DEV_PCS_FX100_CFG 0x94
-
#define DEV_PCS_FX100_CFG_SD_SEL BIT(26)
#define DEV_PCS_FX100_CFG_SD_POL BIT(25)
#define DEV_PCS_FX100_CFG_SD_ENA BIT(24)
@@ -259,8 +183,6 @@
#define DEV_PCS_FX100_CFG_FEFGEN_ENA BIT(1)
#define DEV_PCS_FX100_CFG_PCS_ENA BIT(0)
-#define DEV_PCS_FX100_STATUS 0x98
-
#define DEV_PCS_FX100_STATUS_EDGE_POS_PTP(x) (((x) << 8) & GENMASK(11, 8))
#define DEV_PCS_FX100_STATUS_EDGE_POS_PTP_M GENMASK(11, 8)
#define DEV_PCS_FX100_STATUS_EDGE_POS_PTP_X(x) (((x) & GENMASK(11, 8)) >> 8)
diff --git a/include/soc/mscc/ocelot_qsys.h b/include/soc/mscc/ocelot_qsys.h
index d8c63aa761be..a814bc2017d8 100644
--- a/include/soc/mscc/ocelot_qsys.h
+++ b/include/soc/mscc/ocelot_qsys.h
@@ -13,19 +13,6 @@
#define QSYS_PORT_MODE_DEQUEUE_DIS BIT(1)
#define QSYS_PORT_MODE_DEQUEUE_LATE BIT(0)
-#define QSYS_SWITCH_PORT_MODE_RSZ 0x4
-
-#define QSYS_SWITCH_PORT_MODE_PORT_ENA BIT(14)
-#define QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG(x) (((x) << 11) & GENMASK(13, 11))
-#define QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG_M GENMASK(13, 11)
-#define QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG_X(x) (((x) & GENMASK(13, 11)) >> 11)
-#define QSYS_SWITCH_PORT_MODE_YEL_RSRVD BIT(10)
-#define QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE BIT(9)
-#define QSYS_SWITCH_PORT_MODE_TX_PFC_ENA(x) (((x) << 1) & GENMASK(8, 1))
-#define QSYS_SWITCH_PORT_MODE_TX_PFC_ENA_M GENMASK(8, 1)
-#define QSYS_SWITCH_PORT_MODE_TX_PFC_ENA_X(x) (((x) & GENMASK(8, 1)) >> 1)
-#define QSYS_SWITCH_PORT_MODE_TX_PFC_MODE BIT(0)
-
#define QSYS_STAT_CNT_CFG_TX_GREEN_CNT_MODE BIT(5)
#define QSYS_STAT_CNT_CFG_TX_YELLOW_CNT_MODE BIT(4)
#define QSYS_STAT_CNT_CFG_DROP_GREEN_CNT_MODE BIT(3)
diff --git a/include/soc/mscc/ocelot_sys.h b/include/soc/mscc/ocelot_sys.h
index 16f91e172bcb..79cf40ccdbe6 100644
--- a/include/soc/mscc/ocelot_sys.h
+++ b/include/soc/mscc/ocelot_sys.h
@@ -12,19 +12,6 @@
#define SYS_COUNT_TX_OCTETS_RSZ 0x4
-#define SYS_PORT_MODE_RSZ 0x4
-
-#define SYS_PORT_MODE_DATA_WO_TS(x) (((x) << 5) & GENMASK(6, 5))
-#define SYS_PORT_MODE_DATA_WO_TS_M GENMASK(6, 5)
-#define SYS_PORT_MODE_DATA_WO_TS_X(x) (((x) & GENMASK(6, 5)) >> 5)
-#define SYS_PORT_MODE_INCL_INJ_HDR(x) (((x) << 3) & GENMASK(4, 3))
-#define SYS_PORT_MODE_INCL_INJ_HDR_M GENMASK(4, 3)
-#define SYS_PORT_MODE_INCL_INJ_HDR_X(x) (((x) & GENMASK(4, 3)) >> 3)
-#define SYS_PORT_MODE_INCL_XTR_HDR(x) (((x) << 1) & GENMASK(2, 1))
-#define SYS_PORT_MODE_INCL_XTR_HDR_M GENMASK(2, 1)
-#define SYS_PORT_MODE_INCL_XTR_HDR_X(x) (((x) & GENMASK(2, 1)) >> 1)
-#define SYS_PORT_MODE_INJ_HDR_ERR BIT(0)
-
#define SYS_FRONT_PORT_MODE_RSZ 0x4
#define SYS_FRONT_PORT_MODE_HDX_MODE BIT(0)
@@ -56,16 +43,6 @@
#define SYS_TIMESTAMP_OFFSET_TIMESTAMP_OFFSET(x) ((x) & GENMASK(5, 0))
#define SYS_TIMESTAMP_OFFSET_TIMESTAMP_OFFSET_M GENMASK(5, 0)
-#define SYS_PAUSE_CFG_RSZ 0x4
-
-#define SYS_PAUSE_CFG_PAUSE_START(x) (((x) << 10) & GENMASK(18, 10))
-#define SYS_PAUSE_CFG_PAUSE_START_M GENMASK(18, 10)
-#define SYS_PAUSE_CFG_PAUSE_START_X(x) (((x) & GENMASK(18, 10)) >> 10)
-#define SYS_PAUSE_CFG_PAUSE_STOP(x) (((x) << 1) & GENMASK(9, 1))
-#define SYS_PAUSE_CFG_PAUSE_STOP_M GENMASK(9, 1)
-#define SYS_PAUSE_CFG_PAUSE_STOP_X(x) (((x) & GENMASK(9, 1)) >> 1)
-#define SYS_PAUSE_CFG_PAUSE_ENA BIT(0)
-
#define SYS_PAUSE_TOT_CFG_PAUSE_TOT_START(x) (((x) << 9) & GENMASK(17, 9))
#define SYS_PAUSE_TOT_CFG_PAUSE_TOT_START_M GENMASK(17, 9)
#define SYS_PAUSE_TOT_CFG_PAUSE_TOT_START_X(x) (((x) & GENMASK(17, 9)) >> 9)
diff --git a/include/soc/qcom/kryo-l2-accessors.h b/include/soc/qcom/kryo-l2-accessors.h
new file mode 100644
index 000000000000..673c5344afe3
--- /dev/null
+++ b/include/soc/qcom/kryo-l2-accessors.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __SOC_ARCH_QCOM_KRYO_L2_ACCESSORS_H
+#define __SOC_ARCH_QCOM_KRYO_L2_ACCESSORS_H
+
+void kryo_l2_set_indirect_reg(u64 reg, u64 val);
+u64 kryo_l2_get_indirect_reg(u64 reg);
+
+#endif
diff --git a/include/soc/qcom/rpmh.h b/include/soc/qcom/rpmh.h
index f9ec353d24a5..bdbee1a97d36 100644
--- a/include/soc/qcom/rpmh.h
+++ b/include/soc/qcom/rpmh.h
@@ -20,7 +20,7 @@ int rpmh_write_async(const struct device *dev, enum rpmh_state state,
int rpmh_write_batch(const struct device *dev, enum rpmh_state state,
const struct tcs_cmd *cmd, u32 *n);
-int rpmh_invalidate(const struct device *dev);
+void rpmh_invalidate(const struct device *dev);
#else
@@ -38,8 +38,9 @@ static inline int rpmh_write_batch(const struct device *dev,
const struct tcs_cmd *cmd, u32 *n)
{ return -ENODEV; }
-static inline int rpmh_invalidate(const struct device *dev)
-{ return -ENODEV; }
+static inline void rpmh_invalidate(const struct device *dev)
+{
+}
#endif /* CONFIG_QCOM_RPMH */
diff --git a/include/soc/tegra/bpmp-abi.h b/include/soc/tegra/bpmp-abi.h
index 8f8e73e5cd45..bff99f23860c 100644
--- a/include/soc/tegra/bpmp-abi.h
+++ b/include/soc/tegra/bpmp-abi.h
@@ -3,28 +3,38 @@
* Copyright (c) 2014-2020, NVIDIA CORPORATION. All rights reserved.
*/
-#ifndef _ABI_BPMP_ABI_H_
-#define _ABI_BPMP_ABI_H_
+#ifndef ABI_BPMP_ABI_H
+#define ABI_BPMP_ABI_H
-#ifdef LK
+#if defined(LK) || defined(BPMP_ABI_HAVE_STDC)
+#include <stddef.h>
#include <stdint.h>
#endif
-#ifndef __ABI_PACKED
-#define __ABI_PACKED __attribute__((packed))
+#ifndef BPMP_ABI_PACKED
+#ifdef __ABI_PACKED
+#define BPMP_ABI_PACKED __ABI_PACKED
+#else
+#define BPMP_ABI_PACKED __attribute__((packed))
+#endif
#endif
#ifdef NO_GCC_EXTENSIONS
-#define EMPTY char empty;
-#define EMPTY_ARRAY 1
+#define BPMP_ABI_EMPTY char empty;
+#define BPMP_ABI_EMPTY_ARRAY 1
#else
-#define EMPTY
-#define EMPTY_ARRAY 0
+#define BPMP_ABI_EMPTY
+#define BPMP_ABI_EMPTY_ARRAY 0
#endif
-#ifndef __UNION_ANON
-#define __UNION_ANON
+#ifndef BPMP_UNION_ANON
+#ifdef __UNION_ANON
+#define BPMP_UNION_ANON __UNION_ANON
+#else
+#define BPMP_UNION_ANON
+#endif
#endif
+
/**
* @file
*/
@@ -73,6 +83,7 @@
struct mrq_request {
/** @brief MRQ number of the request */
uint32_t mrq;
+
/**
* @brief Flags providing follow up directions to the receiver
*
@@ -82,7 +93,7 @@ struct mrq_request {
* | 0 | should be 1 |
*/
uint32_t flags;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @ingroup MRQ_Format
@@ -98,18 +109,18 @@ struct mrq_response {
int32_t err;
/** @brief Reserved for future use */
uint32_t flags;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @ingroup MRQ_Format
* Minimum needed size for an IPC message buffer
*/
-#define MSG_MIN_SZ 128
+#define MSG_MIN_SZ 128U
/**
* @ingroup MRQ_Format
* Minimum size guaranteed for data in an IPC message buffer
*/
-#define MSG_DATA_MIN_SZ 120
+#define MSG_DATA_MIN_SZ 120U
/**
* @ingroup MRQ_Codes
@@ -118,36 +129,36 @@ struct mrq_response {
* @{
*/
-#define MRQ_PING 0
-#define MRQ_QUERY_TAG 1
-#define MRQ_MODULE_LOAD 4
-#define MRQ_MODULE_UNLOAD 5
-#define MRQ_TRACE_MODIFY 7
-#define MRQ_WRITE_TRACE 8
-#define MRQ_THREADED_PING 9
-#define MRQ_MODULE_MAIL 11
-#define MRQ_DEBUGFS 19
-#define MRQ_RESET 20
-#define MRQ_I2C 21
-#define MRQ_CLK 22
-#define MRQ_QUERY_ABI 23
-#define MRQ_PG_READ_STATE 25
-#define MRQ_PG_UPDATE_STATE 26
-#define MRQ_THERMAL 27
-#define MRQ_CPU_VHINT 28
-#define MRQ_ABI_RATCHET 29
-#define MRQ_EMC_DVFS_LATENCY 31
-#define MRQ_TRACE_ITER 64
-#define MRQ_RINGBUF_CONSOLE 65
-#define MRQ_PG 66
-#define MRQ_CPU_NDIV_LIMITS 67
-#define MRQ_STRAP 68
-#define MRQ_UPHY 69
-#define MRQ_CPU_AUTO_CC3 70
-#define MRQ_QUERY_FW_TAG 71
-#define MRQ_FMON 72
-#define MRQ_EC 73
-#define MRQ_FBVOLT_STATUS 74
+#define MRQ_PING 0U
+#define MRQ_QUERY_TAG 1U
+#define MRQ_MODULE_LOAD 4U
+#define MRQ_MODULE_UNLOAD 5U
+#define MRQ_TRACE_MODIFY 7U
+#define MRQ_WRITE_TRACE 8U
+#define MRQ_THREADED_PING 9U
+#define MRQ_MODULE_MAIL 11U
+#define MRQ_DEBUGFS 19U
+#define MRQ_RESET 20U
+#define MRQ_I2C 21U
+#define MRQ_CLK 22U
+#define MRQ_QUERY_ABI 23U
+#define MRQ_PG_READ_STATE 25U
+#define MRQ_PG_UPDATE_STATE 26U
+#define MRQ_THERMAL 27U
+#define MRQ_CPU_VHINT 28U
+#define MRQ_ABI_RATCHET 29U
+#define MRQ_EMC_DVFS_LATENCY 31U
+#define MRQ_TRACE_ITER 64U
+#define MRQ_RINGBUF_CONSOLE 65U
+#define MRQ_PG 66U
+#define MRQ_CPU_NDIV_LIMITS 67U
+#define MRQ_STRAP 68U
+#define MRQ_UPHY 69U
+#define MRQ_CPU_AUTO_CC3 70U
+#define MRQ_QUERY_FW_TAG 71U
+#define MRQ_FMON 72U
+#define MRQ_EC 73U
+#define MRQ_DEBUG 75U
/** @} */
@@ -156,7 +167,7 @@ struct mrq_response {
* @brief Maximum MRQ code to be sent by CPU software to
* BPMP. Subject to change in future
*/
-#define MAX_CPU_MRQ_ID 74
+#define MAX_CPU_MRQ_ID 75U
/**
* @addtogroup MRQ_Payloads
@@ -223,7 +234,7 @@ struct mrq_response {
struct mrq_ping_request {
/** @brief Arbitrarily chosen value */
uint32_t challenge;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @ingroup Ping
@@ -237,7 +248,7 @@ struct mrq_ping_request {
struct mrq_ping_response {
/** @brief Response to the MRQ_PING challege */
uint32_t reply;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @ingroup MRQ_Codes
@@ -264,7 +275,7 @@ struct mrq_ping_response {
struct mrq_query_tag_request {
/** @brief Base address to store the firmware tag */
uint32_t addr;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
@@ -291,15 +302,15 @@ struct mrq_query_tag_request {
struct mrq_query_fw_tag_response {
/** @brief Array to store tag information */
uint8_t tag[32];
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @ingroup MRQ_Codes
* @def MRQ_MODULE_LOAD
* @brief Dynamically load a BPMP code module
*
- * * Platforms: T210, T214, T186
- * @cond (bpmp_t210 || bpmp_t214 || bpmp_t186)
+ * * Platforms: T210, T210B01, T186
+ * @cond (bpmp_t210 || bpmp_t210b01 || bpmp_t186)
* * Initiators: CCPLEX
* * Targets: BPMP
* * Request Payload: @ref mrq_module_load_request
@@ -327,11 +338,11 @@ struct mrq_query_fw_tag_response {
*
*/
struct mrq_module_load_request {
- /** @brief Base address of the code to load. Treated as (void *) */
- uint32_t phys_addr; /* (void *) */
+ /** @brief Base address of the code to load */
+ uint32_t phys_addr;
/** @brief Size in bytes of code to load */
uint32_t size;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @ingroup Module
@@ -342,7 +353,7 @@ struct mrq_module_load_request {
struct mrq_module_load_response {
/** @brief Handle to the loaded module */
uint32_t base;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/** @endcond*/
/**
@@ -350,8 +361,8 @@ struct mrq_module_load_response {
* @def MRQ_MODULE_UNLOAD
* @brief Unload a previously loaded code module
*
- * * Platforms: T210, T214, T186
- * @cond (bpmp_t210 || bpmp_t214 || bpmp_t186)
+ * * Platforms: T210, T210B01, T186
+ * @cond (bpmp_t210 || bpmp_t210b01 || bpmp_t186)
* * Initiators: CCPLEX
* * Targets: BPMP
* * Request Payload: @ref mrq_module_unload_request
@@ -370,7 +381,7 @@ struct mrq_module_load_response {
struct mrq_module_unload_request {
/** @brief Handle of the module to unload */
uint32_t base;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/** @endcond*/
/**
@@ -378,6 +389,8 @@ struct mrq_module_unload_request {
* @def MRQ_TRACE_MODIFY
* @brief Modify the set of enabled trace events
*
+ * @deprecated
+ *
* * Platforms: All
* * Initiators: CCPLEX
* * Targets: BPMP
@@ -400,7 +413,7 @@ struct mrq_trace_modify_request {
uint32_t clr;
/** @brief Bit mask of trace events to enable */
uint32_t set;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @ingroup Trace
@@ -414,13 +427,15 @@ struct mrq_trace_modify_request {
struct mrq_trace_modify_response {
/** @brief Bit mask of trace event enable states */
uint32_t mask;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @ingroup MRQ_Codes
* @def MRQ_WRITE_TRACE
* @brief Write trace data to a buffer
*
+ * @deprecated
+ *
* * Platforms: All
* * Initiators: CCPLEX
* * Targets: BPMP
@@ -454,7 +469,7 @@ struct mrq_write_trace_request {
uint32_t area;
/** @brief Size in bytes of the output buffer */
uint32_t size;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @ingroup Trace
@@ -471,25 +486,25 @@ struct mrq_write_trace_response {
* drained to the outputbuffer. Value is 0 otherwise.
*/
uint32_t eof;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/** @private */
struct mrq_threaded_ping_request {
uint32_t challenge;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/** @private */
struct mrq_threaded_ping_response {
uint32_t reply;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @ingroup MRQ_Codes
* @def MRQ_MODULE_MAIL
* @brief Send a message to a loadable module
*
- * * Platforms: T210, T214, T186
- * @cond (bpmp_t210 || bpmp_t214 || bpmp_t186)
+ * * Platforms: T210, T210B01, T186
+ * @cond (bpmp_t210 || bpmp_t210b01 || bpmp_t186)
* * Initiators: Any
* * Targets: BPMP
* * Request Payload: @ref mrq_module_mail_request
@@ -510,8 +525,8 @@ struct mrq_module_mail_request {
* The length of data[ ] is unknown to the BPMP core firmware
* but it is limited to the size of an IPC message.
*/
- uint8_t data[EMPTY_ARRAY];
-} __ABI_PACKED;
+ uint8_t data[BPMP_ABI_EMPTY_ARRAY];
+} BPMP_ABI_PACKED;
/**
* @ingroup Module
@@ -523,8 +538,8 @@ struct mrq_module_mail_response {
* The length of data[ ] is unknown to the BPMP core firmware
* but it is limited to the size of an IPC message.
*/
- uint8_t data[EMPTY_ARRAY];
-} __ABI_PACKED;
+ uint8_t data[BPMP_ABI_EMPTY_ARRAY];
+} BPMP_ABI_PACKED;
/** @endcond */
/**
@@ -532,6 +547,8 @@ struct mrq_module_mail_response {
* @def MRQ_DEBUGFS
* @brief Interact with BPMP's debugfs file nodes
*
+ * @deprecated use MRQ_DEBUG instead.
+ *
* * Platforms: T186, T194
* * Initiators: Any
* * Targets: BPMP
@@ -587,7 +604,7 @@ struct cmd_debugfs_fileop_request {
uint32_t dataaddr;
/** @brief Length in bytes of data buffer */
uint32_t datalen;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @ingroup Debugfs
@@ -598,7 +615,7 @@ struct cmd_debugfs_dumpdir_request {
uint32_t dataaddr;
/** @brief Length in bytes of data buffer */
uint32_t datalen;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @ingroup Debugfs
@@ -609,7 +626,7 @@ struct cmd_debugfs_fileop_response {
uint32_t reserved;
/** @brief Number of bytes read from or written to data buffer */
uint32_t nbytes;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @ingroup Debugfs
@@ -620,7 +637,7 @@ struct cmd_debugfs_dumpdir_response {
uint32_t reserved;
/** @brief Number of bytes read from or written to data buffer */
uint32_t nbytes;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @ingroup Debugfs
@@ -643,8 +660,8 @@ struct mrq_debugfs_request {
union {
struct cmd_debugfs_fileop_request fop;
struct cmd_debugfs_dumpdir_request dumpdir;
- } __UNION_ANON;
-} __ABI_PACKED;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
/**
* @ingroup Debugfs
@@ -659,8 +676,8 @@ struct mrq_debugfs_response {
struct cmd_debugfs_fileop_response fop;
/** @brief Response data for CMD_DEBUGFS_DUMPDIR command */
struct cmd_debugfs_dumpdir_response dumpdir;
- } __UNION_ANON;
-} __ABI_PACKED;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
/**
* @addtogroup Debugfs
@@ -673,6 +690,190 @@ struct mrq_debugfs_response {
/**
* @ingroup MRQ_Codes
+ * @def MRQ_DEBUG
+ * @brief Interact with BPMP's debugfs file nodes. Use message payload
+ * for exchanging data. This is functionally equivalent to
+ * @ref MRQ_DEBUGFS. But the way in which data is exchanged is different.
+ * When software running on CPU tries to read a debugfs file,
+ * the file path and read data will be stored in message payload.
+ * Since the message payload size is limited, a debugfs file
+ * transaction might require multiple frames of data exchanged
+ * between BPMP and CPU until the transaction completes.
+ *
+ * * Platforms: T194
+ * * Initiators: Any
+ * * Targets: BPMP
+ * * Request Payload: @ref mrq_debug_request
+ * * Response Payload: @ref mrq_debug_response
+ */
+
+/** @ingroup Debugfs */
+enum mrq_debug_commands {
+ /** @brief Open required file for read operation */
+ CMD_DEBUG_OPEN_RO = 0,
+ /** @brief Open required file for write operation */
+ CMD_DEBUG_OPEN_WO = 1,
+ /** @brief Perform read */
+ CMD_DEBUG_READ = 2,
+ /** @brief Perform write */
+ CMD_DEBUG_WRITE = 3,
+ /** @brief Close file */
+ CMD_DEBUG_CLOSE = 4,
+ /** @brief Not a command */
+ CMD_DEBUG_MAX
+};
+
+/**
+ * @ingroup Debugfs
+ * @brief Maximum number of files that can be open at a given time
+ */
+#define DEBUG_MAX_OPEN_FILES 1
+
+/**
+ * @ingroup Debugfs
+ * @brief Maximum size of null-terminated file name string in bytes.
+ * Value is derived from memory available in message payload while
+ * using @ref cmd_debug_fopen_request
+ * Value 4 corresponds to size of @ref mrq_debug_commands
+ * in @ref mrq_debug_request.
+ * 120 - 4 dbg_cmd(32bit) = 116
+ */
+#define DEBUG_FNAME_MAX_SZ (MSG_DATA_MIN_SZ - 4)
+
+/**
+ * @ingroup Debugfs
+ * @brief Parameters for CMD_DEBUG_OPEN command
+ */
+struct cmd_debug_fopen_request {
+ /** @brief File name - Null-terminated string with maximum
+ * length @ref DEBUG_FNAME_MAX_SZ
+ */
+ char name[DEBUG_FNAME_MAX_SZ];
+} BPMP_ABI_PACKED;
+
+/**
+ * @ingroup Debugfs
+ * @brief Response data for CMD_DEBUG_OPEN_RO/WO command
+ */
+struct cmd_debug_fopen_response {
+ /** @brief Identifier for file access */
+ uint32_t fd;
+ /** @brief Data length. File data size for READ command.
+ * Maximum allowed length for WRITE command
+ */
+ uint32_t datalen;
+} BPMP_ABI_PACKED;
+
+/**
+ * @ingroup Debugfs
+ * @brief Parameters for CMD_DEBUG_READ command
+ */
+struct cmd_debug_fread_request {
+ /** @brief File access identifier received in response
+ * to CMD_DEBUG_OPEN_RO request
+ */
+ uint32_t fd;
+} BPMP_ABI_PACKED;
+
+/**
+ * @ingroup Debugfs
+ * @brief Maximum size of read data in bytes.
+ * Value is derived from memory available in message payload while
+ * using @ref cmd_debug_fread_response.
+ */
+#define DEBUG_READ_MAX_SZ (MSG_DATA_MIN_SZ - 4)
+
+/**
+ * @ingroup Debugfs
+ * @brief Response data for CMD_DEBUG_READ command
+ */
+struct cmd_debug_fread_response {
+ /** @brief Size of data provided in this response in bytes */
+ uint32_t readlen;
+ /** @brief File data from seek position */
+ char data[DEBUG_READ_MAX_SZ];
+} BPMP_ABI_PACKED;
+
+/**
+ * @ingroup Debugfs
+ * @brief Maximum size of write data in bytes.
+ * Value is derived from memory available in message payload while
+ * using @ref cmd_debug_fwrite_request.
+ */
+#define DEBUG_WRITE_MAX_SZ (MSG_DATA_MIN_SZ - 12)
+
+/**
+ * @ingroup Debugfs
+ * @brief Parameters for CMD_DEBUG_WRITE command
+ */
+struct cmd_debug_fwrite_request {
+ /** @brief File access identifier received in response
+ * to CMD_DEBUG_OPEN_RO request
+ */
+ uint32_t fd;
+ /** @brief Size of write data in bytes */
+ uint32_t datalen;
+ /** @brief Data to be written */
+ char data[DEBUG_WRITE_MAX_SZ];
+} BPMP_ABI_PACKED;
+
+/**
+ * @ingroup Debugfs
+ * @brief Parameters for CMD_DEBUG_CLOSE command
+ */
+struct cmd_debug_fclose_request {
+ /** @brief File access identifier received in response
+ * to CMD_DEBUG_OPEN_RO request
+ */
+ uint32_t fd;
+} BPMP_ABI_PACKED;
+
+/**
+ * @ingroup Debugfs
+ * @brief Request with #MRQ_DEBUG.
+ *
+ * The sender of an MRQ_DEBUG message uses #cmd to specify a debugfs
+ * command to execute. Legal commands are the values of @ref
+ * mrq_debug_commands. Each command requires a specific additional
+ * payload of data.
+ *
+ * |command |payload|
+ * |-------------------|-------|
+ * |CMD_DEBUG_OPEN_RO |fop |
+ * |CMD_DEBUG_OPEN_WO |fop |
+ * |CMD_DEBUG_READ |frd |
+ * |CMD_DEBUG_WRITE |fwr |
+ * |CMD_DEBUG_CLOSE |fcl |
+ */
+struct mrq_debug_request {
+ /** @brief Sub-command (@ref mrq_debug_commands) */
+ uint32_t cmd;
+ union {
+ /** @brief Request payload for CMD_DEBUG_OPEN_RO/WO command */
+ struct cmd_debug_fopen_request fop;
+ /** @brief Request payload for CMD_DEBUG_READ command */
+ struct cmd_debug_fread_request frd;
+ /** @brief Request payload for CMD_DEBUG_WRITE command */
+ struct cmd_debug_fwrite_request fwr;
+ /** @brief Request payload for CMD_DEBUG_CLOSE command */
+ struct cmd_debug_fclose_request fcl;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
+
+/**
+ * @ingroup Debugfs
+ */
+struct mrq_debug_response {
+ union {
+ /** @brief Response data for CMD_DEBUG_OPEN_RO/WO command */
+ struct cmd_debug_fopen_response fop;
+ /** @brief Response data for CMD_DEBUG_READ command */
+ struct cmd_debug_fread_response frd;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
+
+/**
+ * @ingroup MRQ_Codes
* @def MRQ_RESET
* @brief Reset an IP block
*
@@ -687,14 +888,41 @@ struct mrq_debugfs_response {
*/
enum mrq_reset_commands {
- /** @brief Assert module reset */
+ /**
+ * @brief Assert module reset
+ *
+ * mrq_response::err is 0 if the operation was successful, or @n
+ * -#BPMP_EINVAL if mrq_reset_request::reset_id is invalid @n
+ * -#BPMP_EACCES if mrq master is not an owner of target domain reset @n
+ * -#BPMP_ENOTSUP if target domain h/w state does not allow reset
+ */
CMD_RESET_ASSERT = 1,
- /** @brief Deassert module reset */
+ /**
+ * @brief Deassert module reset
+ *
+ * mrq_response::err is 0 if the operation was successful, or @n
+ * -#BPMP_EINVAL if mrq_reset_request::reset_id is invalid @n
+ * -#BPMP_EACCES if mrq master is not an owner of target domain reset @n
+ * -#BPMP_ENOTSUP if target domain h/w state does not allow reset
+ */
CMD_RESET_DEASSERT = 2,
- /** @brief Assert and deassert the module reset */
+ /**
+ * @brief Assert and deassert the module reset
+ *
+ * mrq_response::err is 0 if the operation was successful, or @n
+ * -#BPMP_EINVAL if mrq_reset_request::reset_id is invalid @n
+ * -#BPMP_EACCES if mrq master is not an owner of target domain reset @n
+ * -#BPMP_ENOTSUP if target domain h/w state does not allow reset
+ */
CMD_RESET_MODULE = 3,
- /** @brief Get the highest reset ID */
+ /**
+ * @brief Get the highest reset ID
+ *
+ * mrq_response::err is 0 if the operation was successful, or @n
+ * -#BPMP_ENODEV if no reset domains are supported (number of IDs is 0)
+ */
CMD_RESET_GET_MAX_ID = 4,
+
/** @brief Not part of ABI and subject to change */
CMD_RESET_MAX,
};
@@ -710,7 +938,7 @@ struct mrq_reset_request {
uint32_t cmd;
/** @brief Id of the reset to affected */
uint32_t reset_id;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @brief Response for MRQ_RESET sub-command CMD_RESET_GET_MAX_ID. When
@@ -720,7 +948,7 @@ struct mrq_reset_request {
struct cmd_reset_get_max_id_response {
/** @brief Max reset id */
uint32_t max_id;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @brief Response with MRQ_RESET
@@ -739,8 +967,8 @@ struct cmd_reset_get_max_id_response {
struct mrq_reset_response {
union {
struct cmd_reset_get_max_id_response reset_get_max_id;
- } __UNION_ANON;
-} __ABI_PACKED;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
/** @} */
@@ -758,17 +986,17 @@ struct mrq_reset_response {
* @addtogroup I2C
* @{
*/
-#define TEGRA_I2C_IPC_MAX_IN_BUF_SIZE (MSG_DATA_MIN_SZ - 12)
-#define TEGRA_I2C_IPC_MAX_OUT_BUF_SIZE (MSG_DATA_MIN_SZ - 4)
+#define TEGRA_I2C_IPC_MAX_IN_BUF_SIZE (MSG_DATA_MIN_SZ - 12U)
+#define TEGRA_I2C_IPC_MAX_OUT_BUF_SIZE (MSG_DATA_MIN_SZ - 4U)
-#define SERIALI2C_TEN 0x0010
-#define SERIALI2C_RD 0x0001
-#define SERIALI2C_STOP 0x8000
-#define SERIALI2C_NOSTART 0x4000
-#define SERIALI2C_REV_DIR_ADDR 0x2000
-#define SERIALI2C_IGNORE_NAK 0x1000
-#define SERIALI2C_NO_RD_ACK 0x0800
-#define SERIALI2C_RECV_LEN 0x0400
+#define SERIALI2C_TEN 0x0010U
+#define SERIALI2C_RD 0x0001U
+#define SERIALI2C_STOP 0x8000U
+#define SERIALI2C_NOSTART 0x4000U
+#define SERIALI2C_REV_DIR_ADDR 0x2000U
+#define SERIALI2C_IGNORE_NAK 0x1000U
+#define SERIALI2C_NO_RD_ACK 0x0800U
+#define SERIALI2C_RECV_LEN 0x0400U
enum {
CMD_I2C_XFER = 1
@@ -798,7 +1026,7 @@ struct serial_i2c_request {
uint16_t len;
/** @brief For write transactions only, #len bytes of data */
uint8_t data[];
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @brief Trigger one or more i2c transactions
@@ -812,7 +1040,7 @@ struct cmd_i2c_xfer_request {
/** @brief Serialized packed instances of @ref serial_i2c_request*/
uint8_t data_buf[TEGRA_I2C_IPC_MAX_IN_BUF_SIZE];
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @brief Container for data read from the i2c bus
@@ -826,7 +1054,7 @@ struct cmd_i2c_xfer_response {
uint32_t data_size;
/** @brief I2c read data */
uint8_t data_buf[TEGRA_I2C_IPC_MAX_OUT_BUF_SIZE];
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @brief Request with #MRQ_I2C
@@ -836,14 +1064,25 @@ struct mrq_i2c_request {
uint32_t cmd;
/** @brief Parameters of the transfer request */
struct cmd_i2c_xfer_request xfer;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @brief Response to #MRQ_I2C
+ *
+ * mrq_response:err is
+ * 0: Success
+ * -#BPMP_EBADCMD: if mrq_i2c_request::cmd is other than 1
+ * -#BPMP_EINVAL: if cmd_i2c_xfer_request does not contain correctly formatted request
+ * -#BPMP_ENODEV: if cmd_i2c_xfer_request::bus_id is not supported by BPMP
+ * -#BPMP_EACCES: if i2c transaction is not allowed due to firewall rules
+ * -#BPMP_ETIMEDOUT: if i2c transaction times out
+ * -#BPMP_ENXIO: if i2c slave device does not reply with ACK to the transaction
+ * -#BPMP_EAGAIN: if ARB_LOST condition is detected by the i2c controller
+ * -#BPMP_EIO: any other i2c controller error code than NO_ACK or ARB_LOST
*/
struct mrq_i2c_response {
struct cmd_i2c_xfer_response xfer;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/** @} */
@@ -876,90 +1115,105 @@ enum {
CMD_CLK_MAX,
};
-#define BPMP_CLK_HAS_MUX (1 << 0)
-#define BPMP_CLK_HAS_SET_RATE (1 << 1)
-#define BPMP_CLK_IS_ROOT (1 << 2)
+#define BPMP_CLK_HAS_MUX (1U << 0U)
+#define BPMP_CLK_HAS_SET_RATE (1U << 1U)
+#define BPMP_CLK_IS_ROOT (1U << 2U)
+#define BPMP_CLK_IS_VAR_ROOT (1U << 3U)
-#define MRQ_CLK_NAME_MAXLEN 40
-#define MRQ_CLK_MAX_PARENTS 16
+#define MRQ_CLK_NAME_MAXLEN 40U
+#define MRQ_CLK_MAX_PARENTS 16U
/** @private */
struct cmd_clk_get_rate_request {
- EMPTY
-} __ABI_PACKED;
+ BPMP_ABI_EMPTY
+} BPMP_ABI_PACKED;
struct cmd_clk_get_rate_response {
int64_t rate;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
struct cmd_clk_set_rate_request {
int32_t unused;
int64_t rate;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
struct cmd_clk_set_rate_response {
int64_t rate;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
struct cmd_clk_round_rate_request {
int32_t unused;
int64_t rate;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
struct cmd_clk_round_rate_response {
int64_t rate;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/** @private */
struct cmd_clk_get_parent_request {
- EMPTY
-} __ABI_PACKED;
+ BPMP_ABI_EMPTY
+} BPMP_ABI_PACKED;
struct cmd_clk_get_parent_response {
uint32_t parent_id;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
struct cmd_clk_set_parent_request {
uint32_t parent_id;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
struct cmd_clk_set_parent_response {
uint32_t parent_id;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/** @private */
struct cmd_clk_is_enabled_request {
- EMPTY
-} __ABI_PACKED;
+ BPMP_ABI_EMPTY
+} BPMP_ABI_PACKED;
+/**
+ * @brief Response data to #MRQ_CLK sub-command CMD_CLK_IS_ENABLED
+ */
struct cmd_clk_is_enabled_response {
+ /**
+ * @brief The state of the clock that has been succesfully
+ * requested with CMD_CLK_ENABLE or CMD_CLK_DISABLE by the
+ * master invoking the command earlier.
+ *
+ * The state may not reflect the physical state of the clock
+ * if there are some other masters requesting it to be
+ * enabled.
+ *
+ * Value 0 is disabled, all other values indicate enabled.
+ */
int32_t state;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/** @private */
struct cmd_clk_enable_request {
- EMPTY
-} __ABI_PACKED;
+ BPMP_ABI_EMPTY
+} BPMP_ABI_PACKED;
/** @private */
struct cmd_clk_enable_response {
- EMPTY
-} __ABI_PACKED;
+ BPMP_ABI_EMPTY
+} BPMP_ABI_PACKED;
/** @private */
struct cmd_clk_disable_request {
- EMPTY
-} __ABI_PACKED;
+ BPMP_ABI_EMPTY
+} BPMP_ABI_PACKED;
/** @private */
struct cmd_clk_disable_response {
- EMPTY
-} __ABI_PACKED;
+ BPMP_ABI_EMPTY
+} BPMP_ABI_PACKED;
/** @private */
struct cmd_clk_get_all_info_request {
- EMPTY
-} __ABI_PACKED;
+ BPMP_ABI_EMPTY
+} BPMP_ABI_PACKED;
struct cmd_clk_get_all_info_response {
uint32_t flags;
@@ -967,25 +1221,25 @@ struct cmd_clk_get_all_info_response {
uint32_t parents[MRQ_CLK_MAX_PARENTS];
uint8_t num_parents;
uint8_t name[MRQ_CLK_NAME_MAXLEN];
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/** @private */
struct cmd_clk_get_max_clk_id_request {
- EMPTY
-} __ABI_PACKED;
+ BPMP_ABI_EMPTY
+} BPMP_ABI_PACKED;
struct cmd_clk_get_max_clk_id_response {
uint32_t max_id;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/** @private */
struct cmd_clk_get_fmax_at_vmin_request {
- EMPTY
-} __ABI_PACKED;
+ BPMP_ABI_EMPTY
+} BPMP_ABI_PACKED;
struct cmd_clk_get_fmax_at_vmin_response {
int64_t rate;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @ingroup Clocks
@@ -1040,8 +1294,8 @@ struct mrq_clk_request {
struct cmd_clk_get_max_clk_id_request clk_get_max_clk_id;
/** @private */
struct cmd_clk_get_fmax_at_vmin_request clk_get_fmax_at_vmin;
- } __UNION_ANON;
-} __ABI_PACKED;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
/**
* @ingroup Clocks
@@ -1082,8 +1336,8 @@ struct mrq_clk_response {
struct cmd_clk_get_all_info_response clk_get_all_info;
struct cmd_clk_get_max_clk_id_response clk_get_max_clk_id;
struct cmd_clk_get_fmax_at_vmin_response clk_get_fmax_at_vmin;
- } __UNION_ANON;
-} __ABI_PACKED;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
/** @} */
@@ -1109,7 +1363,7 @@ struct mrq_clk_response {
struct mrq_query_abi_request {
/** @brief MRQ code to query */
uint32_t mrq;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @ingroup ABI_info
@@ -1121,7 +1375,7 @@ struct mrq_query_abi_request {
struct mrq_query_abi_response {
/** @brief 0 if queried MRQ is supported. Else, -#BPMP_ENODEV */
int32_t status;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @ingroup MRQ_Codes
@@ -1146,7 +1400,7 @@ struct mrq_query_abi_response {
struct mrq_pg_read_state_request {
/** @brief ID of partition */
uint32_t partition_id;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @ingroup Powergating
@@ -1161,7 +1415,7 @@ struct mrq_pg_read_state_response {
* * 1 : on
*/
uint32_t logic_state;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/** @endcond*/
/** @} */
@@ -1211,7 +1465,7 @@ struct mrq_pg_update_state_request {
* @ref logic_state == 0x3)
*/
uint32_t clock_state;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/** @endcond*/
/**
@@ -1307,25 +1561,38 @@ enum pg_states {
struct cmd_pg_query_abi_request {
/** @ref mrq_pg_cmd */
uint32_t type;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
struct cmd_pg_set_state_request {
/** @ref pg_states */
uint32_t state;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
+/**
+ * @brief Response data to #MRQ_PG sub command #CMD_PG_GET_STATE
+ */
struct cmd_pg_get_state_response {
- /** @ref pg_states */
+ /**
+ * @brief The state of the power partition that has been
+ * succesfuly requested by the master earlier using #MRQ_PG
+ * command #CMD_PG_SET_STATE.
+ *
+ * The state may not reflect the physical state of the power
+ * partition if there are some other masters requesting it to
+ * be enabled.
+ *
+ * See @ref pg_states for possible values
+ */
uint32_t state;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
struct cmd_pg_get_name_response {
uint8_t name[MRQ_PG_NAME_MAXLEN];
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
struct cmd_pg_get_max_id_response {
uint32_t max_id;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @brief Request with #MRQ_PG
@@ -1350,8 +1617,8 @@ struct mrq_pg_request {
union {
struct cmd_pg_query_abi_request query_abi;
struct cmd_pg_set_state_request set_state;
- } __UNION_ANON;
-} __ABI_PACKED;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
/**
* @brief Response to MRQ_PG
@@ -1373,8 +1640,8 @@ struct mrq_pg_response {
struct cmd_pg_get_state_response get_state;
struct cmd_pg_get_name_response get_name;
struct cmd_pg_get_max_id_response get_max_id;
- } __UNION_ANON;
-} __ABI_PACKED;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
/** @} */
@@ -1463,6 +1730,20 @@ enum mrq_thermal_host_to_bpmp_cmd {
*/
CMD_THERMAL_GET_NUM_ZONES = 3,
+ /**
+ * @brief Get the thermtrip of the specified zone.
+ *
+ * Host needs to supply request parameters.
+ *
+ * mrq_response::err is
+ * * 0: Valid zone information returned.
+ * * -#BPMP_EINVAL: Invalid request parameters.
+ * * -#BPMP_ENOENT: No driver registered for thermal zone.
+ * * -#BPMP_ERANGE if thermtrip is invalid or disabled.
+ * * -#BPMP_EFAULT: Problem reading zone information.
+ */
+ CMD_THERMAL_GET_THERMTRIP = 4,
+
/** @brief: number of supported host-to-bpmp commands. May
* increase in future
*/
@@ -1493,7 +1774,7 @@ enum mrq_thermal_bpmp_to_host_cmd {
*/
struct cmd_thermal_query_abi_request {
uint32_t type;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/*
* Host->BPMP request data for request type CMD_THERMAL_GET_TEMP
@@ -1502,7 +1783,7 @@ struct cmd_thermal_query_abi_request {
*/
struct cmd_thermal_get_temp_request {
uint32_t zone;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/*
* BPMP->Host reply data for request CMD_THERMAL_GET_TEMP
@@ -1515,7 +1796,7 @@ struct cmd_thermal_get_temp_request {
*/
struct cmd_thermal_get_temp_response {
int32_t temp;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/*
* Host->BPMP request data for request type CMD_THERMAL_SET_TRIP
@@ -1530,7 +1811,7 @@ struct cmd_thermal_set_trip_request {
int32_t low;
int32_t high;
uint32_t enabled;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/*
* BPMP->Host request data for request type CMD_THERMAL_HOST_TRIP_REACHED
@@ -1539,7 +1820,7 @@ struct cmd_thermal_set_trip_request {
*/
struct cmd_thermal_host_trip_reached_request {
uint32_t zone;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/*
* BPMP->Host reply data for request type CMD_THERMAL_GET_NUM_ZONES
@@ -1549,7 +1830,25 @@ struct cmd_thermal_host_trip_reached_request {
*/
struct cmd_thermal_get_num_zones_response {
uint32_t num;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
+
+/*
+ * Host->BPMP request data for request type CMD_THERMAL_GET_THERMTRIP
+ *
+ * zone: Number of thermal zone.
+ */
+struct cmd_thermal_get_thermtrip_request {
+ uint32_t zone;
+} BPMP_ABI_PACKED;
+
+/*
+ * BPMP->Host reply data for request CMD_THERMAL_GET_THERMTRIP
+ *
+ * thermtrip: HW shutdown temperature in millicelsius.
+ */
+struct cmd_thermal_get_thermtrip_response {
+ int32_t thermtrip;
+} BPMP_ABI_PACKED;
/*
* Host->BPMP request data.
@@ -1565,8 +1864,9 @@ struct mrq_thermal_host_to_bpmp_request {
struct cmd_thermal_query_abi_request query_abi;
struct cmd_thermal_get_temp_request get_temp;
struct cmd_thermal_set_trip_request set_trip;
- } __UNION_ANON;
-} __ABI_PACKED;
+ struct cmd_thermal_get_thermtrip_request get_thermtrip;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
/*
* BPMP->Host request data.
@@ -1578,16 +1878,17 @@ struct mrq_thermal_bpmp_to_host_request {
uint32_t type;
union {
struct cmd_thermal_host_trip_reached_request host_trip_reached;
- } __UNION_ANON;
-} __ABI_PACKED;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
/*
* Data in reply to a Host->BPMP request.
*/
union mrq_thermal_bpmp_to_host_response {
struct cmd_thermal_get_temp_response get_temp;
+ struct cmd_thermal_get_thermtrip_response get_thermtrip;
struct cmd_thermal_get_num_zones_response get_num_zones;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/** @} */
/**
@@ -1619,7 +1920,7 @@ struct mrq_cpu_vhint_request {
uint32_t addr;
/** @brief ID of the cluster whose data is requested */
uint32_t cluster_id;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @brief Description of the CPU v/f relation
@@ -1646,7 +1947,7 @@ struct cpu_vhint_data {
uint16_t vindex_div;
/** reserved for future use */
uint16_t reserved[328];
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/** @endcond */
/** @} */
@@ -1735,11 +2036,11 @@ struct mrq_abi_ratchet_response {
* @brief Used by @ref mrq_emc_dvfs_latency_response
*/
struct emc_dvfs_latency {
- /** @brief EMC frequency in kHz */
+ /** @brief EMC DVFS node frequency in kHz */
uint32_t freq;
/** @brief EMC DVFS latency in nanoseconds */
uint32_t latency;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
#define EMC_DVFS_LATENCY_MAX_SIZE 14
/**
@@ -1748,9 +2049,9 @@ struct emc_dvfs_latency {
struct mrq_emc_dvfs_latency_response {
/** @brief The number valid entries in #pairs */
uint32_t num_pairs;
- /** @brief EMC <frequency, latency> information */
+ /** @brief EMC DVFS node <frequency, latency> information */
struct emc_dvfs_latency pairs[EMC_DVFS_LATENCY_MAX_SIZE];
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/** @} */
@@ -1775,7 +2076,7 @@ struct mrq_emc_dvfs_latency_response {
struct mrq_cpu_ndiv_limits_request {
/** @brief Enum cluster_id */
uint32_t cluster_id;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @brief Response to #MRQ_CPU_NDIV_LIMITS
@@ -1791,7 +2092,7 @@ struct mrq_cpu_ndiv_limits_response {
uint16_t ndiv_max;
/** @brief Minimum allowed NDIV value */
uint16_t ndiv_min;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/** @} */
/** @endcond */
@@ -1823,7 +2124,7 @@ struct mrq_cpu_ndiv_limits_response {
struct mrq_cpu_auto_cc3_request {
/** @brief Enum cluster_id (logical cluster id, known to CCPLEX s/w) */
uint32_t cluster_id;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @brief Response to #MRQ_CPU_AUTO_CC3
@@ -1837,7 +2138,7 @@ struct mrq_cpu_auto_cc3_response {
* - bit [0] if "1" auto-CC3 is allowed, if "0" auto-CC3 is not allowed
*/
uint32_t auto_cc3_config;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/** @} */
/** @endcond */
@@ -1847,6 +2148,8 @@ struct mrq_cpu_auto_cc3_response {
* @def MRQ_TRACE_ITER
* @brief Manage the trace iterator
*
+ * @deprecated
+ *
* * Platforms: All
* * Initiators: CCPLEX
* * Targets: BPMP
@@ -1868,7 +2171,7 @@ enum {
struct mrq_trace_iter_request {
/** @brief TRACE_ITER_INIT or TRACE_ITER_CLEAN */
uint32_t cmd;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/** @} */
@@ -1944,12 +2247,12 @@ enum mrq_ringbuf_console_host_to_bpmp_cmd {
struct cmd_ringbuf_console_query_abi_req {
/** @brief Command identifier to be queried */
uint32_t cmd;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/** @private */
struct cmd_ringbuf_console_query_abi_resp {
- EMPTY
-} __ABI_PACKED;
+ BPMP_ABI_EMPTY
+} BPMP_ABI_PACKED;
/**
* @ingroup RingbufConsole
@@ -1960,7 +2263,7 @@ struct cmd_ringbuf_console_read_req {
* @brief Number of bytes requested to be read from the BPMP TX buffer
*/
uint8_t len;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @ingroup RingbufConsole
@@ -1971,7 +2274,7 @@ struct cmd_ringbuf_console_read_resp {
uint8_t data[MRQ_RINGBUF_CONSOLE_MAX_READ_LEN];
/** @brief Number of bytes in cmd_ringbuf_console_read_resp::data */
uint8_t len;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @ingroup RingbufConsole
@@ -1982,7 +2285,7 @@ struct cmd_ringbuf_console_write_req {
uint8_t data[MRQ_RINGBUF_CONSOLE_MAX_WRITE_LEN];
/** @brief Number of bytes in cmd_ringbuf_console_write_req::data */
uint8_t len;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @ingroup RingbufConsole
@@ -1993,12 +2296,12 @@ struct cmd_ringbuf_console_write_resp {
uint32_t space_avail;
/** @brief Number of bytes that were written to the BPMP RX buffer */
uint8_t len;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/** @private */
struct cmd_ringbuf_console_get_fifo_req {
- EMPTY
-} __ABI_PACKED;
+ BPMP_ABI_EMPTY
+} BPMP_ABI_PACKED;
/**
* @ingroup RingbufConsole
@@ -2013,7 +2316,7 @@ struct cmd_ringbuf_console_get_fifo_resp {
uint64_t bpmp_tx_tail_addr;
/** @brief Length of the BPMP TX buffer */
uint32_t bpmp_tx_buf_len;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @ingroup RingbufConsole
@@ -2033,8 +2336,8 @@ struct mrq_ringbuf_console_host_to_bpmp_request {
struct cmd_ringbuf_console_read_req read;
struct cmd_ringbuf_console_write_req write;
struct cmd_ringbuf_console_get_fifo_req get_fifo;
- } __UNION_ANON;
-} __ABI_PACKED;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
/**
* @ingroup RingbufConsole
@@ -2047,7 +2350,7 @@ union mrq_ringbuf_console_bpmp_to_host_response {
struct cmd_ringbuf_console_read_resp read;
struct cmd_ringbuf_console_write_resp write;
struct cmd_ringbuf_console_get_fifo_resp get_fifo;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/** @} */
/**
@@ -2091,7 +2394,7 @@ struct mrq_strap_request {
uint32_t id;
/** @brief Desired value for strap (if cmd is #STRAP_SET) */
uint32_t value;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @defgroup Strap_Ids Strap Identifiers
@@ -2134,28 +2437,28 @@ struct cmd_uphy_margin_control_request {
uint32_t y;
/** @brief Set number of bit blocks for each margin section */
uint32_t nblks;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
struct cmd_uphy_margin_status_response {
/** @brief Number of errors observed */
uint32_t status;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
struct cmd_uphy_ep_controller_pll_init_request {
/** @brief EP controller number, valid: 0, 4, 5 */
uint8_t ep_controller;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
struct cmd_uphy_pcie_controller_state_request {
/** @brief PCIE controller number, valid: 0, 1, 2, 3, 4 */
uint8_t pcie_controller;
uint8_t enable;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
struct cmd_uphy_ep_controller_pll_off_request {
/** @brief EP controller number, valid: 0, 4, 5 */
uint8_t ep_controller;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @ingroup UPHY
@@ -2186,8 +2489,8 @@ struct mrq_uphy_request {
struct cmd_uphy_ep_controller_pll_init_request ep_ctrlr_pll_init;
struct cmd_uphy_pcie_controller_state_request controller_state;
struct cmd_uphy_ep_controller_pll_off_request ep_ctrlr_pll_off;
- } __UNION_ANON;
-} __ABI_PACKED;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
/**
* @ingroup UPHY
@@ -2207,8 +2510,8 @@ struct mrq_uphy_request {
struct mrq_uphy_response {
union {
struct cmd_uphy_margin_status_response uphy_get_margin_status;
- } __UNION_ANON;
-} __ABI_PACKED;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
/** @} */
/** @endcond */
@@ -2258,31 +2561,31 @@ enum {
struct cmd_fmon_gear_clamp_request {
int32_t unused;
int64_t rate;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/** @private */
struct cmd_fmon_gear_clamp_response {
- EMPTY
-} __ABI_PACKED;
+ BPMP_ABI_EMPTY
+} BPMP_ABI_PACKED;
/** @private */
struct cmd_fmon_gear_free_request {
- EMPTY
-} __ABI_PACKED;
+ BPMP_ABI_EMPTY
+} BPMP_ABI_PACKED;
/** @private */
struct cmd_fmon_gear_free_response {
- EMPTY
-} __ABI_PACKED;
+ BPMP_ABI_EMPTY
+} BPMP_ABI_PACKED;
/** @private */
struct cmd_fmon_gear_get_request {
- EMPTY
-} __ABI_PACKED;
+ BPMP_ABI_EMPTY
+} BPMP_ABI_PACKED;
struct cmd_fmon_gear_get_response {
int64_t rate;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* @ingroup FMON
@@ -2315,8 +2618,8 @@ struct mrq_fmon_request {
struct cmd_fmon_gear_free_request fmon_gear_free;
/** @private */
struct cmd_fmon_gear_get_request fmon_gear_get;
- } __UNION_ANON;
-} __ABI_PACKED;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
/**
* @ingroup FMON
@@ -2340,8 +2643,8 @@ struct mrq_fmon_response {
/** @private */
struct cmd_fmon_gear_free_response fmon_gear_free;
struct cmd_fmon_gear_get_response fmon_gear_get;
- } __UNION_ANON;
-} __ABI_PACKED;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
/** @} */
/** @endcond */
@@ -2366,13 +2669,27 @@ struct mrq_fmon_response {
*/
enum {
/**
+ * @cond DEPRECATED
* @brief Retrieve specified EC status.
*
* mrq_response::err is 0 if the operation was successful, or @n
* -#BPMP_ENODEV if target EC is not owned by BPMP @n
- * -#BPMP_EACCES if target EC power domain is turned off
+ * -#BPMP_EACCES if target EC power domain is turned off @n
+ * -#BPMP_EBADCMD if subcommand is not supported
+ * @endcond
*/
- CMD_EC_STATUS_GET = 1,
+ CMD_EC_STATUS_GET = 1, /* deprecated */
+
+ /**
+ * @brief Retrieve specified EC extended status (includes error
+ * counter and user values).
+ *
+ * mrq_response::err is 0 if the operation was successful, or @n
+ * -#BPMP_ENODEV if target EC is not owned by BPMP @n
+ * -#BPMP_EACCES if target EC power domain is turned off @n
+ * -#BPMP_EBADCMD if subcommand is not supported
+ */
+ CMD_EC_STATUS_EX_GET = 2,
CMD_EC_NUM,
};
@@ -2428,13 +2745,13 @@ enum bpmp_ec_err_type {
/** @brief SW Correctable error
*
- * Error descriptor @ref ec_err_simple_desc.
+ * Error descriptor @ref ec_err_sw_error_desc.
*/
EC_ERR_TYPE_SW_CORRECTABLE = 16,
/** @brief SW Uncorrectable error
*
- * Error descriptor @ref ec_err_simple_desc.
+ * Error descriptor @ref ec_err_sw_error_desc.
*/
EC_ERR_TYPE_SW_UNCORRECTABLE = 17,
@@ -2454,9 +2771,9 @@ enum bpmp_ec_err_type {
/** @brief Group of registers with parity error. */
enum ec_registers_group {
/** @brief Functional registers group */
- EC_ERR_GROUP_FUNC_REG = 0,
+ EC_ERR_GROUP_FUNC_REG = 0U,
/** @brief SCR registers group */
- EC_ERR_GROUP_SCR_REG = 1,
+ EC_ERR_GROUP_SCR_REG = 1U,
};
/**
@@ -2465,11 +2782,11 @@ enum ec_registers_group {
* @{
*/
/** @brief No EC error found flag */
-#define EC_STATUS_FLAG_NO_ERROR 0x0001
+#define EC_STATUS_FLAG_NO_ERROR 0x0001U
/** @brief Last EC error found flag */
-#define EC_STATUS_FLAG_LAST_ERROR 0x0002
+#define EC_STATUS_FLAG_LAST_ERROR 0x0002U
/** @brief EC latent error flag */
-#define EC_STATUS_FLAG_LATENT_ERROR 0x0004
+#define EC_STATUS_FLAG_LATENT_ERROR 0x0004U
/** @} */
/**
@@ -2478,9 +2795,9 @@ enum ec_registers_group {
* @{
*/
/** @brief EC descriptor error resolved flag */
-#define EC_DESC_FLAG_RESOLVED 0x0001
+#define EC_DESC_FLAG_RESOLVED 0x0001U
/** @brief EC descriptor failed to retrieve id flag */
-#define EC_DESC_FLAG_NO_ID 0x0002
+#define EC_DESC_FLAG_NO_ID 0x0002U
/** @} */
/**
@@ -2497,7 +2814,7 @@ struct ec_err_fmon_desc {
uint32_t fmon_faults;
/** @brief FMON faults access error */
int32_t fmon_access_error;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* |error type | vmon_adc_id values |
@@ -2513,7 +2830,7 @@ struct ec_err_vmon_desc {
uint32_t vmon_faults;
/** @brief VMON faults access error */
int32_t vmon_access_error;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/**
* |error type | reg_id values |
@@ -2527,7 +2844,22 @@ struct ec_err_reg_parity_desc {
uint16_t reg_id;
/** @brief Register group @ref ec_registers_group */
uint16_t reg_group;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
+
+/**
+ * |error type | err_source_id values |
+ * |--------------------------------- |--------------------------|
+ * |@ref EC_ERR_TYPE_SW_CORRECTABLE | @ref bpmp_ec_ce_swd_ids |
+ * |@ref EC_ERR_TYPE_SW_UNCORRECTABLE | @ref bpmp_ec_ue_swd_ids |
+ */
+struct ec_err_sw_error_desc {
+ /** @brief Bitmask of @ref bpmp_ec_desc_flags */
+ uint16_t desc_flags;
+ /** @brief Error source id */
+ uint16_t err_source_id;
+ /** @brief Sw error data */
+ uint32_t sw_error_data;
+} BPMP_ABI_PACKED;
/**
* |error type | err_source_id values |
@@ -2537,34 +2869,36 @@ struct ec_err_reg_parity_desc {
* |@ref EC_ERR_TYPE_ECC_DED_INTERNAL |@ref bpmp_ec_ipath_ids |
* |@ref EC_ERR_TYPE_COMPARATOR |@ref bpmp_ec_comparator_ids|
* |@ref EC_ERR_TYPE_PARITY_SRAM |@ref bpmp_clock_ids |
- * |@ref EC_ERR_TYPE_SW_CORRECTABLE |@ref bpmp_ec_misc_ids |
- * |@ref EC_ERR_TYPE_SW_UNCORRECTABLE |@ref bpmp_ec_misc_ids |
- * |@ref EC_ERR_TYPE_OTHER_HW_CORRECTABLE |@ref bpmp_ec_misc_ids |
- * |@ref EC_ERR_TYPE_OTHER_HW_UNCORRECTABLE |@ref bpmp_ec_misc_ids |
+ * |@ref EC_ERR_TYPE_OTHER_HW_CORRECTABLE |@ref bpmp_ec_misc_hwd_ids |
+ * |@ref EC_ERR_TYPE_OTHER_HW_UNCORRECTABLE |@ref bpmp_ec_misc_hwd_ids |
*/
struct ec_err_simple_desc {
/** @brief Bitmask of @ref bpmp_ec_desc_flags */
uint16_t desc_flags;
/** @brief Error source id. Id space depends on error type. */
uint16_t err_source_id;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/** @brief Union of EC error descriptors */
union ec_err_desc {
struct ec_err_fmon_desc fmon_desc;
struct ec_err_vmon_desc vmon_desc;
struct ec_err_reg_parity_desc reg_parity_desc;
+ struct ec_err_sw_error_desc sw_error_desc;
struct ec_err_simple_desc simple_desc;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
struct cmd_ec_status_get_request {
/** @brief HSM error line number that identifies target EC. */
uint32_t ec_hsm_id;
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
/** EC status maximum number of descriptors */
-#define EC_ERR_STATUS_DESC_MAX_NUM 4
+#define EC_ERR_STATUS_DESC_MAX_NUM 4U
+/**
+ * @cond DEPRECATED
+ */
struct cmd_ec_status_get_response {
/** @brief Target EC id (the same id received with request). */
uint32_t ec_hsm_id;
@@ -2582,7 +2916,33 @@ struct cmd_ec_status_get_response {
uint32_t error_desc_num;
/** @brief EC error descriptors */
union ec_err_desc error_descs[EC_ERR_STATUS_DESC_MAX_NUM];
-} __ABI_PACKED;
+} BPMP_ABI_PACKED;
+/** @endcond */
+
+struct cmd_ec_status_ex_get_response {
+ /** @brief Target EC id (the same id received with request). */
+ uint32_t ec_hsm_id;
+ /**
+ * @brief Bitmask of @ref bpmp_ec_status_flags
+ *
+ * If NO_ERROR flag is set, error_ fields should be ignored
+ */
+ uint32_t ec_status_flags;
+ /** @brief Found EC error index. */
+ uint32_t error_idx;
+ /** @brief Found EC error type @ref bpmp_ec_err_type. */
+ uint32_t error_type;
+ /** @brief Found EC mission error counter value */
+ uint32_t error_counter;
+ /** @brief Found EC mission error user value */
+ uint32_t error_uval;
+ /** @brief Reserved entry */
+ uint32_t reserved;
+ /** @brief Number of returned EC error descriptors */
+ uint32_t error_desc_num;
+ /** @brief EC error descriptors */
+ union ec_err_desc error_descs[EC_ERR_STATUS_DESC_MAX_NUM];
+} BPMP_ABI_PACKED;
/**
* @ingroup EC
@@ -2591,9 +2951,15 @@ struct cmd_ec_status_get_response {
* Used by the sender of an #MRQ_EC message to access ECs owned
* by BPMP.
*
+ * @cond DEPRECATED
* |sub-command |payload |
* |----------------------------|-----------------------|
* |@ref CMD_EC_STATUS_GET |ec_status_get |
+ * @endcond
+ *
+ * |sub-command |payload |
+ * |----------------------------|-----------------------|
+ * |@ref CMD_EC_STATUS_EX_GET |ec_status_get |
*
*/
@@ -2603,8 +2969,8 @@ struct mrq_ec_request {
union {
struct cmd_ec_status_get_request ec_status_get;
- } __UNION_ANON;
-} __ABI_PACKED;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
/**
* @ingroup EC
@@ -2613,49 +2979,28 @@ struct mrq_ec_request {
* Each sub-command supported by @ref mrq_ec_request may return
* sub-command-specific data as indicated below.
*
+ * @cond DEPRECATED
* |sub-command |payload |
* |----------------------------|------------------------|
* |@ref CMD_EC_STATUS_GET |ec_status_get |
+ * @endcond
+ *
+ * |sub-command |payload |
+ * |----------------------------|------------------------|
+ * |@ref CMD_EC_STATUS_EX_GET |ec_status_ex_get |
*
*/
struct mrq_ec_response {
union {
+ /**
+ * @cond DEPRECATED
+ */
struct cmd_ec_status_get_response ec_status_get;
- } __UNION_ANON;
-} __ABI_PACKED;
-
-/** @} */
-/** @endcond */
-
-/**
- * @ingroup MRQ_Codes
- * @def MRQ_FBVOLT_STATUS
- * @brief Provides status information about voltage state for fuse burning
- *
- * * Platforms: T194 onwards
- * @cond bpmp_t194
- * * Initiators: CCPLEX
- * * Target: BPMP
- * * Request Payload: None
- * * Response Payload: @ref mrq_fbvolt_status_response
- * @{
- */
-
-/**
- * @ingroup Fbvolt_status
- * @brief Response to #MRQ_FBVOLT_STATUS
- *
- * Value of #ready reflects if core voltages are in a suitable state for buring
- * fuses. A value of 0x1 indicates that core voltages are ready for burning
- * fuses. A value of 0x0 indicates that core voltages are not ready.
- */
-struct mrq_fbvolt_status_response {
- /** @brief Bit [0:0] - ready status, bits [31:1] - reserved */
- uint32_t ready;
- /** @brief Reserved */
- uint32_t unused;
-} __ABI_PACKED;
+ /** @endcond */
+ struct cmd_ec_status_ex_get_response ec_status_ex_get;
+ } BPMP_UNION_ANON;
+} BPMP_ABI_PACKED;
/** @} */
/** @endcond */
@@ -2668,6 +3013,8 @@ struct mrq_fbvolt_status_response {
* @{
*/
+/** @brief Operation not permitted */
+#define BPMP_EPERM 1
/** @brief No such file or directory */
#define BPMP_ENOENT 2
/** @brief No MRQ handler */
@@ -2676,12 +3023,16 @@ struct mrq_fbvolt_status_response {
#define BPMP_EIO 5
/** @brief Bad sub-MRQ command */
#define BPMP_EBADCMD 6
+/** @brief Resource temporarily unavailable */
+#define BPMP_EAGAIN 11
/** @brief Not enough memory */
#define BPMP_ENOMEM 12
/** @brief Permission denied */
#define BPMP_EACCES 13
/** @brief Bad address */
#define BPMP_EFAULT 14
+/** @brief Resource busy */
+#define BPMP_EBUSY 16
/** @brief No such device */
#define BPMP_ENODEV 19
/** @brief Argument is a directory */
@@ -2693,10 +3044,18 @@ struct mrq_fbvolt_status_response {
/** @brief Out of range */
#define BPMP_ERANGE 34
/** @brief Function not implemented */
-#define BPMP_ENOSYS 38
+#define BPMP_ENOSYS 38
/** @brief Invalid slot */
#define BPMP_EBADSLT 57
+/** @brief Not supported */
+#define BPMP_ENOTSUP 134
+/** @brief No such device or address */
+#define BPMP_ENXIO 140
/** @} */
+#if defined(BPMP_ABI_CHECKS)
+#include "bpmp_abi_checks.h"
+#endif
+
#endif
diff --git a/include/soc/tegra/fuse.h b/include/soc/tegra/fuse.h
index 252ea20fe4c1..1097feca41ed 100644
--- a/include/soc/tegra/fuse.h
+++ b/include/soc/tegra/fuse.h
@@ -12,6 +12,8 @@
#define TEGRA124 0x40
#define TEGRA132 0x13
#define TEGRA210 0x21
+#define TEGRA186 0x18
+#define TEGRA194 0x19
#define TEGRA_FUSE_SKU_CALIB_0 0xf0
#define TEGRA30_FUSE_SATA_CALIB 0x124
diff --git a/include/sound/control.h b/include/sound/control.h
index aeaed2a05bae..e128cff10dfa 100644
--- a/include/sound/control.h
+++ b/include/sound/control.h
@@ -188,20 +188,21 @@ int snd_ctl_enum_info(struct snd_ctl_elem_info *info, unsigned int channels,
*/
struct snd_kcontrol *snd_ctl_make_virtual_master(char *name,
const unsigned int *tlv);
-int _snd_ctl_add_slave(struct snd_kcontrol *master, struct snd_kcontrol *slave,
- unsigned int flags);
-/* optional flags for slave */
-#define SND_CTL_SLAVE_NEED_UPDATE (1 << 0)
+int _snd_ctl_add_follower(struct snd_kcontrol *master,
+ struct snd_kcontrol *follower,
+ unsigned int flags);
+/* optional flags for follower */
+#define SND_CTL_FOLLOWER_NEED_UPDATE (1 << 0)
/**
- * snd_ctl_add_slave - Add a virtual slave control
+ * snd_ctl_add_follower - Add a virtual follower control
* @master: vmaster element
- * @slave: slave element to add
+ * @follower: follower element to add
*
- * Add a virtual slave control to the given master element created via
+ * Add a virtual follower control to the given master element created via
* snd_ctl_create_virtual_master() beforehand.
*
- * All slaves must be the same type (returning the same information
+ * All followers must be the same type (returning the same information
* via info callback). The function doesn't check it, so it's your
* responsibility.
*
@@ -213,18 +214,18 @@ int _snd_ctl_add_slave(struct snd_kcontrol *master, struct snd_kcontrol *slave,
* Return: Zero if successful or a negative error code.
*/
static inline int
-snd_ctl_add_slave(struct snd_kcontrol *master, struct snd_kcontrol *slave)
+snd_ctl_add_follower(struct snd_kcontrol *master, struct snd_kcontrol *follower)
{
- return _snd_ctl_add_slave(master, slave, 0);
+ return _snd_ctl_add_follower(master, follower, 0);
}
/**
- * snd_ctl_add_slave_uncached - Add a virtual slave control
+ * snd_ctl_add_follower_uncached - Add a virtual follower control
* @master: vmaster element
- * @slave: slave element to add
+ * @follower: follower element to add
*
- * Add a virtual slave control to the given master.
- * Unlike snd_ctl_add_slave(), the element added via this function
+ * Add a virtual follower control to the given master.
+ * Unlike snd_ctl_add_follower(), the element added via this function
* is supposed to have volatile values, and get callback is called
* at each time queried from the master.
*
@@ -235,10 +236,10 @@ snd_ctl_add_slave(struct snd_kcontrol *master, struct snd_kcontrol *slave)
* Return: Zero if successful or a negative error code.
*/
static inline int
-snd_ctl_add_slave_uncached(struct snd_kcontrol *master,
- struct snd_kcontrol *slave)
+snd_ctl_add_follower_uncached(struct snd_kcontrol *master,
+ struct snd_kcontrol *follower)
{
- return _snd_ctl_add_slave(master, slave, SND_CTL_SLAVE_NEED_UPDATE);
+ return _snd_ctl_add_follower(master, follower, SND_CTL_FOLLOWER_NEED_UPDATE);
}
int snd_ctl_add_vmaster_hook(struct snd_kcontrol *kctl,
@@ -246,11 +247,11 @@ int snd_ctl_add_vmaster_hook(struct snd_kcontrol *kctl,
void *private_data);
void snd_ctl_sync_vmaster(struct snd_kcontrol *kctl, bool hook_only);
#define snd_ctl_sync_vmaster_hook(kctl) snd_ctl_sync_vmaster(kctl, true)
-int snd_ctl_apply_vmaster_slaves(struct snd_kcontrol *kctl,
- int (*func)(struct snd_kcontrol *vslave,
- struct snd_kcontrol *slave,
- void *arg),
- void *arg);
+int snd_ctl_apply_vmaster_followers(struct snd_kcontrol *kctl,
+ int (*func)(struct snd_kcontrol *vfollower,
+ struct snd_kcontrol *follower,
+ void *arg),
+ void *arg);
/*
* Helper functions for jack-detection controls
diff --git a/include/sound/gus.h b/include/sound/gus.h
index 410939ecf3a5..cd8da68cab92 100644
--- a/include/sound/gus.h
+++ b/include/sound/gus.h
@@ -613,4 +613,8 @@ int snd_gus_dram_write(struct snd_gus_card *gus, char __user *ptr,
int snd_gus_dram_read(struct snd_gus_card *gus, char __user *ptr,
unsigned int addr, unsigned int size, int rom);
+/* gus_timer.c */
+void snd_gf1_timers_init(struct snd_gus_card *gus);
+void snd_gf1_timers_done(struct snd_gus_card *gus);
+
#endif /* __SOUND_GUS_H */
diff --git a/include/sound/hda_codec.h b/include/sound/hda_codec.h
index d16a4229209b..0fea49bfc5e8 100644
--- a/include/sound/hda_codec.h
+++ b/include/sound/hda_codec.h
@@ -208,7 +208,7 @@ struct hda_codec {
struct mutex control_mutex;
struct snd_array spdif_out;
unsigned int spdif_in_enable; /* SPDIF input enable? */
- const hda_nid_t *slave_dig_outs; /* optional digital out slave widgets */
+ const hda_nid_t *follower_dig_outs; /* optional digital out follower widgets */
struct snd_array init_pins; /* initial (BIOS) pin configurations */
struct snd_array driver_pins; /* pin configs set by codec parser */
struct snd_array cvt_setups; /* audio convert setups */
@@ -415,6 +415,8 @@ __printf(2, 3)
struct hda_pcm *snd_hda_codec_pcm_new(struct hda_codec *codec,
const char *fmt, ...);
+void snd_hda_codec_cleanup_for_unbind(struct hda_codec *codec);
+
static inline void snd_hda_codec_pcm_get(struct hda_pcm *pcm)
{
kref_get(&pcm->kref);
diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h
index c1f78d9a6e47..6eed61e6cf8a 100644
--- a/include/sound/hdaudio.h
+++ b/include/sound/hdaudio.h
@@ -347,6 +347,9 @@ struct hdac_bus {
int bdl_pos_adj; /* BDL position adjustment */
+ /* delay time in us for dma stop */
+ unsigned int dma_stop_delay;
+
/* locks */
spinlock_t reg_lock;
struct mutex cmd_mutex;
diff --git a/include/sound/hdmi-codec.h b/include/sound/hdmi-codec.h
index 83b17682e01c..7754631a3102 100644
--- a/include/sound/hdmi-codec.h
+++ b/include/sound/hdmi-codec.h
@@ -2,7 +2,7 @@
/*
* hdmi-codec.h - HDMI Codec driver API
*
- * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com
+ * Copyright (C) 2014 Texas Instruments Incorporated - https://www.ti.com
*
* Author: Jyri Sarha <jsarha@ti.com>
*/
@@ -76,7 +76,8 @@ struct hdmi_codec_ops {
* Mute/unmute HDMI audio stream.
* Optional
*/
- int (*digital_mute)(struct device *dev, void *data, bool enable);
+ int (*mute_stream)(struct device *dev, void *data,
+ bool enable, int direction);
/*
* Provides EDID-Like-Data from connected HDMI device.
@@ -99,6 +100,9 @@ struct hdmi_codec_ops {
int (*hook_plugged_cb)(struct device *dev, void *data,
hdmi_codec_plugged_cb fn,
struct device *codec_dev);
+
+ /* bit field */
+ unsigned int no_capture_mute:1;
};
/* HDMI codec initalization data */
diff --git a/include/sound/memalloc.h b/include/sound/memalloc.h
index 3b47832b1c1f..5daa937684a4 100644
--- a/include/sound/memalloc.h
+++ b/include/sound/memalloc.h
@@ -94,7 +94,11 @@ static inline dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab,
size_t offset)
{
struct snd_sg_buf *sgbuf = dmab->private_data;
- dma_addr_t addr = sgbuf->table[offset >> PAGE_SHIFT].addr;
+ dma_addr_t addr;
+
+ if (!sgbuf)
+ return dmab->addr + offset;
+ addr = sgbuf->table[offset >> PAGE_SHIFT].addr;
addr &= ~((dma_addr_t)PAGE_SIZE - 1);
return addr + offset % PAGE_SIZE;
}
@@ -106,6 +110,9 @@ static inline void *snd_sgbuf_get_ptr(struct snd_dma_buffer *dmab,
size_t offset)
{
struct snd_sg_buf *sgbuf = dmab->private_data;
+
+ if (!sgbuf)
+ return dmab->area + offset;
return sgbuf->table[offset >> PAGE_SHIFT].buf + offset % PAGE_SIZE;
}
diff --git a/include/sound/omap-hdmi-audio.h b/include/sound/omap-hdmi-audio.h
index 16c007b651f4..e5f82044a404 100644
--- a/include/sound/omap-hdmi-audio.h
+++ b/include/sound/omap-hdmi-audio.h
@@ -2,7 +2,7 @@
/*
* hdmi-audio.c -- OMAP4+ DSS HDMI audio support library
*
- * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com
+ * Copyright (C) 2014 Texas Instruments Incorporated - https://www.ti.com
*
* Author: Jyri Sarha <jsarha@ti.com>
*/
diff --git a/include/sound/rt5670.h b/include/sound/rt5670.h
deleted file mode 100644
index 02e1d7778354..000000000000
--- a/include/sound/rt5670.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * linux/sound/rt5670.h -- Platform data for RT5670
- *
- * Copyright 2014 Realtek Microelectronics
- */
-
-#ifndef __LINUX_SND_RT5670_H
-#define __LINUX_SND_RT5670_H
-
-struct rt5670_platform_data {
- int jd_mode;
- bool in2_diff;
- bool dev_gpio;
- bool gpio1_is_ext_spk_en;
-
- bool dmic_en;
- unsigned int dmic1_data_pin;
- /* 0 = GPIO6; 1 = IN2P; 3 = GPIO7*/
- unsigned int dmic2_data_pin;
- /* 0 = GPIO8; 1 = IN3N; */
- unsigned int dmic3_data_pin;
- /* 0 = GPIO9; 1 = GPIO10; 2 = GPIO5*/
-};
-
-#endif
diff --git a/include/sound/simple_card_utils.h b/include/sound/simple_card_utils.h
index bbdd1542d6f1..86a1e956991e 100644
--- a/include/sound/simple_card_utils.h
+++ b/include/sound/simple_card_utils.h
@@ -12,9 +12,9 @@
#include <sound/soc.h>
#define asoc_simple_init_hp(card, sjack, prefix) \
- asoc_simple_init_jack(card, sjack, 1, prefix)
+ asoc_simple_init_jack(card, sjack, 1, prefix, NULL)
#define asoc_simple_init_mic(card, sjack, prefix) \
- asoc_simple_init_jack(card, sjack, 0, prefix)
+ asoc_simple_init_jack(card, sjack, 0, prefix, NULL)
struct asoc_simple_dai {
const char *name;
@@ -131,7 +131,7 @@ int asoc_simple_parse_pin_switches(struct snd_soc_card *card,
int asoc_simple_init_jack(struct snd_soc_card *card,
struct asoc_simple_jack *sjack,
- int is_hp, char *prefix);
+ int is_hp, char *prefix, char *pin);
int asoc_simple_init_priv(struct asoc_simple_priv *priv,
struct link_info *li);
diff --git a/include/sound/soc-component.h b/include/sound/soc-component.h
index 5663891148e3..089ea9441fd1 100644
--- a/include/sound/soc-component.h
+++ b/include/sound/soc-component.h
@@ -2,7 +2,8 @@
*
* soc-component.h
*
- * Copyright (c) 2019 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+ * Copyright (C) 2019 Renesas Electronics Corp.
+ * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
*/
#ifndef __SOC_COMPONENT_H
#define __SOC_COMPONENT_H
@@ -324,10 +325,12 @@ static inline int snd_soc_component_cache_sync(
return regcache_sync(component->regmap);
}
+void snd_soc_component_set_aux(struct snd_soc_component *component,
+ struct snd_soc_aux_dev *aux);
+int snd_soc_component_init(struct snd_soc_component *component);
+
/* component IO */
-int snd_soc_component_read(struct snd_soc_component *component,
- unsigned int reg, unsigned int *val);
-unsigned int snd_soc_component_read32(struct snd_soc_component *component,
+unsigned int snd_soc_component_read(struct snd_soc_component *component,
unsigned int reg);
int snd_soc_component_write(struct snd_soc_component *component,
unsigned int reg, unsigned int val);
@@ -359,6 +362,7 @@ int snd_soc_component_stream_event(struct snd_soc_component *component,
int snd_soc_component_set_bias_level(struct snd_soc_component *component,
enum snd_soc_bias_level level);
+void snd_soc_component_setup_regmap(struct snd_soc_component *component);
#ifdef CONFIG_REGMAP
void snd_soc_component_init_regmap(struct snd_soc_component *component,
struct regmap *regmap);
@@ -421,16 +425,6 @@ int snd_soc_component_open(struct snd_soc_component *component,
struct snd_pcm_substream *substream);
int snd_soc_component_close(struct snd_soc_component *component,
struct snd_pcm_substream *substream);
-int snd_soc_component_prepare(struct snd_soc_component *component,
- struct snd_pcm_substream *substream);
-int snd_soc_component_hw_params(struct snd_soc_component *component,
- struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params);
-int snd_soc_component_hw_free(struct snd_soc_component *component,
- struct snd_pcm_substream *substream);
-int snd_soc_component_trigger(struct snd_soc_component *component,
- struct snd_pcm_substream *substream,
- int cmd);
void snd_soc_component_suspend(struct snd_soc_component *component);
void snd_soc_component_resume(struct snd_soc_component *component);
int snd_soc_component_is_suspended(struct snd_soc_component *component);
@@ -455,5 +449,13 @@ int snd_soc_pcm_component_mmap(struct snd_pcm_substream *substream,
struct vm_area_struct *vma);
int snd_soc_pcm_component_new(struct snd_soc_pcm_runtime *rtd);
void snd_soc_pcm_component_free(struct snd_soc_pcm_runtime *rtd);
+int snd_soc_pcm_component_prepare(struct snd_pcm_substream *substream);
+int snd_soc_pcm_component_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_component **last);
+void snd_soc_pcm_component_hw_free(struct snd_pcm_substream *substream,
+ struct snd_soc_component *last);
+int snd_soc_pcm_component_trigger(struct snd_pcm_substream *substream,
+ int cmd);
#endif /* __SOC_COMPONENT_H */
diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h
index 71e178c89793..776a60529e70 100644
--- a/include/sound/soc-dai.h
+++ b/include/sound/soc-dai.h
@@ -39,7 +39,7 @@ struct snd_compr_stream;
/*
* DAI Clock gating.
*
- * DAI bit clocks can be be gated (disabled) when the DAI is not
+ * DAI bit clocks can be gated (disabled) when the DAI is not
* sending or receiving PCM data in a frame. This can be used to save power.
*/
#define SND_SOC_DAIFMT_CONT (1 << 4) /* continuous clock */
@@ -76,12 +76,12 @@ struct snd_compr_stream;
*
* This is wrt the codec, the inverse is true for the interface
* i.e. if the codec is clk and FRM master then the interface is
- * clk and frame slave.
+ * clk and frame secondary.
*/
#define SND_SOC_DAIFMT_CBM_CFM (1 << 12) /* codec clk & FRM master */
-#define SND_SOC_DAIFMT_CBS_CFM (2 << 12) /* codec clk slave & FRM master */
-#define SND_SOC_DAIFMT_CBM_CFS (3 << 12) /* codec clk master & frame slave */
-#define SND_SOC_DAIFMT_CBS_CFS (4 << 12) /* codec clk & FRM slave */
+#define SND_SOC_DAIFMT_CBS_CFM (2 << 12) /* codec clk secondary & FRM master */
+#define SND_SOC_DAIFMT_CBM_CFS (3 << 12) /* codec clk master & frame secondary */
+#define SND_SOC_DAIFMT_CBS_CFS (4 << 12) /* codec clk & FRM secondary */
#define SND_SOC_DAIFMT_FORMAT_MASK 0x000f
#define SND_SOC_DAIFMT_CLOCK_MASK 0x00f0
@@ -247,7 +247,6 @@ struct snd_soc_dai_ops {
* DAI digital mute - optional.
* Called by soc-core to minimise any pops.
*/
- int (*digital_mute)(struct snd_soc_dai *dai, int mute);
int (*mute_stream)(struct snd_soc_dai *dai, int mute, int stream);
/*
@@ -281,6 +280,9 @@ struct snd_soc_dai_ops {
*/
snd_pcm_sframes_t (*delay)(struct snd_pcm_substream *,
struct snd_soc_dai *);
+
+ /* bit field */
+ unsigned int no_capture_mute:1;
};
struct snd_soc_cdai_ops {
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index cc3dcb815282..c3039e97929a 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -16,6 +16,8 @@
#include <sound/asoc.h>
struct device;
+struct snd_soc_pcm_runtime;
+struct soc_enum;
/* widget has no PM register bit */
#define SND_SOC_NOPM -1
@@ -376,6 +378,24 @@ struct snd_soc_dapm_widget_list;
struct snd_soc_dapm_update;
enum snd_soc_dapm_direction;
+/*
+ * Bias levels
+ *
+ * @ON: Bias is fully on for audio playback and capture operations.
+ * @PREPARE: Prepare for audio operations. Called before DAPM switching for
+ * stream start and stop operations.
+ * @STANDBY: Low power standby state when no playback/capture operations are
+ * in progress. NOTE: The transition time between STANDBY and ON
+ * should be as fast as possible and no longer than 10ms.
+ * @OFF: Power Off. No restrictions on transition times.
+ */
+enum snd_soc_bias_level {
+ SND_SOC_BIAS_OFF = 0,
+ SND_SOC_BIAS_STANDBY = 1,
+ SND_SOC_BIAS_PREPARE = 2,
+ SND_SOC_BIAS_ON = 3,
+};
+
int dapm_regulator_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event);
int dapm_clock_event(struct snd_soc_dapm_widget *w,
diff --git a/include/sound/soc-link.h b/include/sound/soc-link.h
index 3dd6e33e94ec..337ac5666757 100644
--- a/include/sound/soc-link.h
+++ b/include/sound/soc-link.h
@@ -9,6 +9,7 @@
#define __SOC_LINK_H
int snd_soc_link_init(struct snd_soc_pcm_runtime *rtd);
+void snd_soc_link_exit(struct snd_soc_pcm_runtime *rtd);
int snd_soc_link_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
struct snd_pcm_hw_params *params);
diff --git a/include/sound/soc.h b/include/sound/soc.h
index 3ce7f0f5aa92..5e3919ffb00c 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -368,24 +368,6 @@
#define SOC_ENUM_SINGLE_VIRT_DECL(name, xtexts) \
const struct soc_enum name = SOC_ENUM_SINGLE_VIRT(ARRAY_SIZE(xtexts), xtexts)
-/*
- * Bias levels
- *
- * @ON: Bias is fully on for audio playback and capture operations.
- * @PREPARE: Prepare for audio operations. Called before DAPM switching for
- * stream start and stop operations.
- * @STANDBY: Low power standby state when no playback/capture operations are
- * in progress. NOTE: The transition time between STANDBY and ON
- * should be as fast as possible and no longer than 10ms.
- * @OFF: Power Off. No restrictions on transition times.
- */
-enum snd_soc_bias_level {
- SND_SOC_BIAS_OFF = 0,
- SND_SOC_BIAS_STANDBY = 1,
- SND_SOC_BIAS_PREPARE = 2,
- SND_SOC_BIAS_ON = 3,
-};
-
struct device_node;
struct snd_jack;
struct snd_soc_card;
@@ -432,11 +414,12 @@ static inline int snd_soc_resume(struct device *dev)
}
#endif
int snd_soc_poweroff(struct device *dev);
-int snd_soc_add_component(struct device *dev,
- struct snd_soc_component *component,
- const struct snd_soc_component_driver *component_driver,
- struct snd_soc_dai_driver *dai_drv,
- int num_dai);
+int snd_soc_component_initialize(struct snd_soc_component *component,
+ const struct snd_soc_component_driver *driver,
+ struct device *dev);
+int snd_soc_add_component(struct snd_soc_component *component,
+ struct snd_soc_dai_driver *dai_drv,
+ int num_dai);
int snd_soc_register_component(struct device *dev,
const struct snd_soc_component_driver *component_driver,
struct snd_soc_dai_driver *dai_drv, int num_dai);
@@ -801,6 +784,9 @@ struct snd_soc_dai_link {
/* codec/machine specific init - e.g. add machine controls */
int (*init)(struct snd_soc_pcm_runtime *rtd);
+ /* codec/machine specific exit - dual of init() */
+ void (*exit)(struct snd_soc_pcm_runtime *rtd);
+
/* optional hw_params re-writing for BE and FE sync */
int (*be_hw_params_fixup)(struct snd_soc_pcm_runtime *rtd,
struct snd_pcm_hw_params *params);
@@ -1183,6 +1169,8 @@ struct snd_soc_pcm_runtime {
/* see soc_new_pcm_runtime() */
#define asoc_rtd_to_cpu(rtd, n) (rtd)->dais[n]
#define asoc_rtd_to_codec(rtd, n) (rtd)->dais[n + (rtd)->num_cpus]
+#define asoc_substream_to_rtd(substream) \
+ (struct snd_soc_pcm_runtime *)snd_pcm_substream_chip(substream)
#define for_each_rtd_components(rtd, i, component) \
for ((i) = 0, component = NULL; \
diff --git a/include/sound/wm8960.h b/include/sound/wm8960.h
index d22e84805025..275fd5b201ce 100644
--- a/include/sound/wm8960.h
+++ b/include/sound/wm8960.h
@@ -16,6 +16,23 @@ struct wm8960_data {
bool capless; /* Headphone outputs configured in capless mode */
bool shared_lrclk; /* DAC and ADC LRCLKs are wired together */
+
+ /*
+ * Setup for headphone detection
+ *
+ * hp_cfg[0]: HPSEL[1:0] of R48 (Additional Control 4)
+ * hp_cfg[1]: {HPSWEN:HPSWPOL} of R24 (Additional Control 2).
+ * hp_cfg[2]: {TOCLKSEL:TOEN} of R23 (Additional Control 1).
+ */
+ u32 hp_cfg[3];
+
+ /*
+ * Setup for gpio configuration
+ *
+ * gpio_cfg[0]: ALRCGPIO of R9 (Audio interface)
+ * gpio_cfg[1]: {GPIOPOL:GPIOSEL[2:0]} of R48 (Additional Control 4).
+ */
+ u32 gpio_cfg[2];
};
#endif
diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
index 4fda324f4b35..1eccb2ac7d02 100644
--- a/include/target/iscsi/iscsi_target_core.h
+++ b/include/target/iscsi/iscsi_target_core.h
@@ -556,10 +556,11 @@ struct iscsi_conn {
struct socket *sock;
void (*orig_data_ready)(struct sock *);
void (*orig_state_change)(struct sock *);
-#define LOGIN_FLAGS_READ_ACTIVE 1
-#define LOGIN_FLAGS_CLOSED 2
-#define LOGIN_FLAGS_READY 4
-#define LOGIN_FLAGS_INITIAL_PDU 8
+#define LOGIN_FLAGS_READY 0
+#define LOGIN_FLAGS_INITIAL_PDU 1
+#define LOGIN_FLAGS_READ_ACTIVE 2
+#define LOGIN_FLAGS_WRITE_ACTIVE 3
+#define LOGIN_FLAGS_CLOSED 4
unsigned long login_flags;
struct delayed_work login_work;
struct iscsi_login *login;
diff --git a/include/target/iscsi/iscsi_transport.h b/include/target/iscsi/iscsi_transport.h
index 75bee29fd7dd..b8feba7ffebc 100644
--- a/include/target/iscsi/iscsi_transport.h
+++ b/include/target/iscsi/iscsi_transport.h
@@ -43,7 +43,7 @@ static inline void *iscsit_priv_cmd(struct iscsi_cmd *cmd)
* From iscsi_target_transport.c
*/
-extern int iscsit_register_transport(struct iscsit_transport *);
+extern void iscsit_register_transport(struct iscsit_transport *);
extern void iscsit_unregister_transport(struct iscsit_transport *);
extern struct iscsit_transport *iscsit_get_transport(int);
extern void iscsit_put_transport(struct iscsit_transport *);
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
index f51452e3b984..6336780d83a7 100644
--- a/include/target/target_core_backend.h
+++ b/include/target/target_core_backend.h
@@ -40,6 +40,8 @@ struct target_backend_ops {
ssize_t (*show_configfs_dev_params)(struct se_device *, char *);
sense_reason_t (*parse_cdb)(struct se_cmd *cmd);
+ void (*tmr_notify)(struct se_device *se_dev, enum tcm_tmreq_table,
+ struct list_head *aborted_cmds);
u32 (*get_device_type)(struct se_device *);
sector_t (*get_blocks)(struct se_device *);
sector_t (*get_alignment_offset_lbas)(struct se_device *);
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 18c3f277b770..549947d407cf 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -207,6 +207,7 @@ enum tcm_tmreq_table {
TMR_LUN_RESET = 5,
TMR_TARGET_WARM_RESET = 6,
TMR_TARGET_COLD_RESET = 7,
+ TMR_LUN_RESET_PRO = 0x80,
TMR_UNKNOWN = 0xff,
};
diff --git a/include/trace/events/block.h b/include/trace/events/block.h
index 93b114226af8..34d64ca306b1 100644
--- a/include/trace/events/block.h
+++ b/include/trace/events/block.h
@@ -212,6 +212,21 @@ DEFINE_EVENT(block_rq, block_rq_issue,
);
/**
+ * block_rq_merge - merge request with another one in the elevator
+ * @q: queue holding operation
+ * @rq: block IO operation operation request
+ *
+ * Called when block operation request @rq from queue @q is merged to another
+ * request queued in the elevator.
+ */
+DEFINE_EVENT(block_rq, block_rq_merge,
+
+ TP_PROTO(struct request_queue *q, struct request *rq),
+
+ TP_ARGS(q, rq)
+);
+
+/**
* block_bio_bounce - used bounce buffer when processing block operation
* @q: queue holding the block operation
* @bio: block operation
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index 360b0f9d2220..863335ecb7e8 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -31,13 +31,6 @@ struct extent_io_tree;
struct prelim_ref;
struct btrfs_space_info;
-TRACE_DEFINE_ENUM(FLUSH_DELAYED_ITEMS_NR);
-TRACE_DEFINE_ENUM(FLUSH_DELAYED_ITEMS);
-TRACE_DEFINE_ENUM(FLUSH_DELALLOC);
-TRACE_DEFINE_ENUM(FLUSH_DELALLOC_WAIT);
-TRACE_DEFINE_ENUM(ALLOC_CHUNK);
-TRACE_DEFINE_ENUM(COMMIT_TRANS);
-
#define show_ref_type(type) \
__print_symbolic(type, \
{ BTRFS_TREE_BLOCK_REF_KEY, "TREE_BLOCK_REF" }, \
@@ -67,30 +60,72 @@ TRACE_DEFINE_ENUM(COMMIT_TRANS);
(obj >= BTRFS_ROOT_TREE_OBJECTID && \
obj <= BTRFS_QUOTA_TREE_OBJECTID)) ? __show_root_type(obj) : "-"
-#define show_fi_type(type) \
- __print_symbolic(type, \
- { BTRFS_FILE_EXTENT_INLINE, "INLINE" }, \
- { BTRFS_FILE_EXTENT_REG, "REG" }, \
- { BTRFS_FILE_EXTENT_PREALLOC, "PREALLOC"})
+#define FLUSH_ACTIONS \
+ EM( BTRFS_RESERVE_NO_FLUSH, "BTRFS_RESERVE_NO_FLUSH") \
+ EM( BTRFS_RESERVE_FLUSH_LIMIT, "BTRFS_RESERVE_FLUSH_LIMIT") \
+ EM( BTRFS_RESERVE_FLUSH_ALL, "BTRFS_RESERVE_FLUSH_ALL") \
+ EMe(BTRFS_RESERVE_FLUSH_ALL_STEAL, "BTRFS_RESERVE_FLUSH_ALL_STEAL")
+
+#define FI_TYPES \
+ EM( BTRFS_FILE_EXTENT_INLINE, "INLINE") \
+ EM( BTRFS_FILE_EXTENT_REG, "REG") \
+ EMe(BTRFS_FILE_EXTENT_PREALLOC, "PREALLOC")
+
+#define QGROUP_RSV_TYPES \
+ EM( BTRFS_QGROUP_RSV_DATA, "DATA") \
+ EM( BTRFS_QGROUP_RSV_META_PERTRANS, "META_PERTRANS") \
+ EMe(BTRFS_QGROUP_RSV_META_PREALLOC, "META_PREALLOC")
+
+#define IO_TREE_OWNER \
+ EM( IO_TREE_FS_PINNED_EXTENTS, "PINNED_EXTENTS") \
+ EM( IO_TREE_FS_EXCLUDED_EXTENTS, "EXCLUDED_EXTENTS") \
+ EM( IO_TREE_INODE_IO, "INODE_IO") \
+ EM( IO_TREE_INODE_IO_FAILURE, "INODE_IO_FAILURE") \
+ EM( IO_TREE_RELOC_BLOCKS, "RELOC_BLOCKS") \
+ EM( IO_TREE_TRANS_DIRTY_PAGES, "TRANS_DIRTY_PAGES") \
+ EM( IO_TREE_ROOT_DIRTY_LOG_PAGES, "ROOT_DIRTY_LOG_PAGES") \
+ EM( IO_TREE_INODE_FILE_EXTENT, "INODE_FILE_EXTENT") \
+ EM( IO_TREE_LOG_CSUM_RANGE, "LOG_CSUM_RANGE") \
+ EMe(IO_TREE_SELFTEST, "SELFTEST")
+
+#define FLUSH_STATES \
+ EM( FLUSH_DELAYED_ITEMS_NR, "FLUSH_DELAYED_ITEMS_NR") \
+ EM( FLUSH_DELAYED_ITEMS, "FLUSH_DELAYED_ITEMS") \
+ EM( FLUSH_DELALLOC, "FLUSH_DELALLOC") \
+ EM( FLUSH_DELALLOC_WAIT, "FLUSH_DELALLOC_WAIT") \
+ EM( FLUSH_DELAYED_REFS_NR, "FLUSH_DELAYED_REFS_NR") \
+ EM( FLUSH_DELAYED_REFS, "FLUSH_ELAYED_REFS") \
+ EM( ALLOC_CHUNK, "ALLOC_CHUNK") \
+ EM( ALLOC_CHUNK_FORCE, "ALLOC_CHUNK_FORCE") \
+ EM( RUN_DELAYED_IPUTS, "RUN_DELAYED_IPUTS") \
+ EMe(COMMIT_TRANS, "COMMIT_TRANS")
+
+/*
+ * First define the enums in the above macros to be exported to userspace via
+ * TRACE_DEFINE_ENUM().
+ */
+
+#undef EM
+#undef EMe
+#define EM(a, b) TRACE_DEFINE_ENUM(a);
+#define EMe(a, b) TRACE_DEFINE_ENUM(a);
+
+FLUSH_ACTIONS
+FI_TYPES
+QGROUP_RSV_TYPES
+IO_TREE_OWNER
+FLUSH_STATES
+
+/*
+ * Now redefine the EM and EMe macros to map the enums to the strings that will
+ * be printed in the output
+ */
+
+#undef EM
+#undef EMe
+#define EM(a, b) {a, b},
+#define EMe(a, b) {a, b}
-#define show_qgroup_rsv_type(type) \
- __print_symbolic(type, \
- { BTRFS_QGROUP_RSV_DATA, "DATA" }, \
- { BTRFS_QGROUP_RSV_META_PERTRANS, "META_PERTRANS" }, \
- { BTRFS_QGROUP_RSV_META_PREALLOC, "META_PREALLOC" })
-
-#define show_extent_io_tree_owner(owner) \
- __print_symbolic(owner, \
- { IO_TREE_FS_PINNED_EXTENTS, "PINNED_EXTENTS" }, \
- { IO_TREE_FS_EXCLUDED_EXTENTS, "EXCLUDED_EXTENTS" }, \
- { IO_TREE_INODE_IO, "INODE_IO" }, \
- { IO_TREE_INODE_IO_FAILURE, "INODE_IO_FAILURE" }, \
- { IO_TREE_RELOC_BLOCKS, "RELOC_BLOCKS" }, \
- { IO_TREE_TRANS_DIRTY_PAGES, "TRANS_DIRTY_PAGES" }, \
- { IO_TREE_ROOT_DIRTY_LOG_PAGES, "ROOT_DIRTY_LOG_PAGES" }, \
- { IO_TREE_INODE_FILE_EXTENT, "INODE_FILE_EXTENT" }, \
- { IO_TREE_LOG_CSUM_RANGE, "LOG_CSUM_RANGE" }, \
- { IO_TREE_SELFTEST, "SELFTEST" })
#define BTRFS_GROUP_FLAGS \
{ BTRFS_BLOCK_GROUP_DATA, "DATA"}, \
@@ -380,7 +415,7 @@ DECLARE_EVENT_CLASS(btrfs__file_extent_item_regular,
__entry->disk_isize, __entry->extent_start,
__entry->extent_end, __entry->num_bytes, __entry->ram_bytes,
__entry->disk_bytenr, __entry->disk_num_bytes,
- __entry->extent_offset, show_fi_type(__entry->extent_type),
+ __entry->extent_offset, __print_symbolic(__entry->extent_type, FI_TYPES),
__entry->compression)
);
@@ -421,7 +456,7 @@ DECLARE_EVENT_CLASS(
"extent_type=%s compression=%u",
show_root_type(__entry->root_obj), __entry->ino, __entry->isize,
__entry->disk_isize, __entry->extent_start,
- __entry->extent_end, show_fi_type(__entry->extent_type),
+ __entry->extent_end, __print_symbolic(__entry->extent_type, FI_TYPES),
__entry->compression)
);
@@ -1042,12 +1077,6 @@ TRACE_EVENT(btrfs_space_reservation,
__entry->bytes)
);
-#define show_flush_action(action) \
- __print_symbolic(action, \
- { BTRFS_RESERVE_NO_FLUSH, "BTRFS_RESERVE_NO_FLUSH"}, \
- { BTRFS_RESERVE_FLUSH_LIMIT, "BTRFS_RESERVE_FLUSH_LIMIT"}, \
- { BTRFS_RESERVE_FLUSH_ALL, "BTRFS_RESERVE_FLUSH_ALL"})
-
TRACE_EVENT(btrfs_trigger_flush,
TP_PROTO(const struct btrfs_fs_info *fs_info, u64 flags, u64 bytes,
@@ -1071,25 +1100,13 @@ TRACE_EVENT(btrfs_trigger_flush,
TP_printk_btrfs("%s: flush=%d(%s) flags=%llu(%s) bytes=%llu",
__get_str(reason), __entry->flush,
- show_flush_action(__entry->flush),
+ __print_symbolic(__entry->flush, FLUSH_ACTIONS),
__entry->flags,
__print_flags((unsigned long)__entry->flags, "|",
BTRFS_GROUP_FLAGS),
__entry->bytes)
);
-#define show_flush_state(state) \
- __print_symbolic(state, \
- { FLUSH_DELAYED_ITEMS_NR, "FLUSH_DELAYED_ITEMS_NR"}, \
- { FLUSH_DELAYED_ITEMS, "FLUSH_DELAYED_ITEMS"}, \
- { FLUSH_DELALLOC, "FLUSH_DELALLOC"}, \
- { FLUSH_DELALLOC_WAIT, "FLUSH_DELALLOC_WAIT"}, \
- { FLUSH_DELAYED_REFS_NR, "FLUSH_DELAYED_REFS_NR"}, \
- { FLUSH_DELAYED_REFS, "FLUSH_ELAYED_REFS"}, \
- { ALLOC_CHUNK, "ALLOC_CHUNK"}, \
- { ALLOC_CHUNK_FORCE, "ALLOC_CHUNK_FORCE"}, \
- { RUN_DELAYED_IPUTS, "RUN_DELAYED_IPUTS"}, \
- { COMMIT_TRANS, "COMMIT_TRANS"})
TRACE_EVENT(btrfs_flush_space,
@@ -1114,7 +1131,7 @@ TRACE_EVENT(btrfs_flush_space,
TP_printk_btrfs("state=%d(%s) flags=%llu(%s) num_bytes=%llu ret=%d",
__entry->state,
- show_flush_state(__entry->state),
+ __print_symbolic(__entry->state, FLUSH_STATES),
__entry->flags,
__print_flags((unsigned long)__entry->flags, "|",
BTRFS_GROUP_FLAGS),
@@ -1690,7 +1707,7 @@ TRACE_EVENT(qgroup_update_reserve,
),
TP_printk_btrfs("qgid=%llu type=%s cur_reserved=%llu diff=%lld",
- __entry->qgid, show_qgroup_rsv_type(__entry->type),
+ __entry->qgid, __print_symbolic(__entry->type, QGROUP_RSV_TYPES),
__entry->cur_reserved, __entry->diff)
);
@@ -1714,7 +1731,7 @@ TRACE_EVENT(qgroup_meta_reserve,
TP_printk_btrfs("refroot=%llu(%s) type=%s diff=%lld",
show_root_type(__entry->refroot),
- show_qgroup_rsv_type(__entry->type), __entry->diff)
+ __print_symbolic(__entry->type, QGROUP_RSV_TYPES), __entry->diff)
);
TRACE_EVENT(qgroup_meta_convert,
@@ -1735,8 +1752,8 @@ TRACE_EVENT(qgroup_meta_convert,
TP_printk_btrfs("refroot=%llu(%s) type=%s->%s diff=%lld",
show_root_type(__entry->refroot),
- show_qgroup_rsv_type(BTRFS_QGROUP_RSV_META_PREALLOC),
- show_qgroup_rsv_type(BTRFS_QGROUP_RSV_META_PERTRANS),
+ __print_symbolic(BTRFS_QGROUP_RSV_META_PREALLOC, QGROUP_RSV_TYPES),
+ __print_symbolic(BTRFS_QGROUP_RSV_META_PERTRANS, QGROUP_RSV_TYPES),
__entry->diff)
);
@@ -1762,7 +1779,7 @@ TRACE_EVENT(qgroup_meta_free_all_pertrans,
TP_printk_btrfs("refroot=%llu(%s) type=%s diff=%lld",
show_root_type(__entry->refroot),
- show_qgroup_rsv_type(__entry->type), __entry->diff)
+ __print_symbolic(__entry->type, QGROUP_RSV_TYPES), __entry->diff)
);
DECLARE_EVENT_CLASS(btrfs__prelim_ref,
@@ -1920,7 +1937,7 @@ TRACE_EVENT(btrfs_set_extent_bit,
TP_printk_btrfs(
"io_tree=%s ino=%llu root=%llu start=%llu len=%llu set_bits=%s",
- show_extent_io_tree_owner(__entry->owner), __entry->ino,
+ __print_symbolic(__entry->owner, IO_TREE_OWNER), __entry->ino,
__entry->rootid, __entry->start, __entry->len,
__print_flags(__entry->set_bits, "|", EXTENT_FLAGS))
);
@@ -1959,7 +1976,7 @@ TRACE_EVENT(btrfs_clear_extent_bit,
TP_printk_btrfs(
"io_tree=%s ino=%llu root=%llu start=%llu len=%llu clear_bits=%s",
- show_extent_io_tree_owner(__entry->owner), __entry->ino,
+ __print_symbolic(__entry->owner, IO_TREE_OWNER), __entry->ino,
__entry->rootid, __entry->start, __entry->len,
__print_flags(__entry->clear_bits, "|", EXTENT_FLAGS))
);
@@ -2000,7 +2017,7 @@ TRACE_EVENT(btrfs_convert_extent_bit,
TP_printk_btrfs(
"io_tree=%s ino=%llu root=%llu start=%llu len=%llu set_bits=%s clear_bits=%s",
- show_extent_io_tree_owner(__entry->owner), __entry->ino,
+ __print_symbolic(__entry->owner, IO_TREE_OWNER), __entry->ino,
__entry->rootid, __entry->start, __entry->len,
__print_flags(__entry->set_bits , "|", EXTENT_FLAGS),
__print_flags(__entry->clear_bits, "|", EXTENT_FLAGS))
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
index cc41d692ae8e..4c8b99ec8606 100644
--- a/include/trace/events/ext4.h
+++ b/include/trace/events/ext4.h
@@ -746,24 +746,29 @@ TRACE_EVENT(ext4_mb_release_group_pa,
);
TRACE_EVENT(ext4_discard_preallocations,
- TP_PROTO(struct inode *inode),
+ TP_PROTO(struct inode *inode, unsigned int len, unsigned int needed),
- TP_ARGS(inode),
+ TP_ARGS(inode, len, needed),
TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( ino_t, ino )
+ __field( dev_t, dev )
+ __field( ino_t, ino )
+ __field( unsigned int, len )
+ __field( unsigned int, needed )
),
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
+ __entry->len = len;
+ __entry->needed = needed;
),
- TP_printk("dev %d,%d ino %lu",
+ TP_printk("dev %d,%d ino %lu len: %u needed %u",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino)
+ (unsigned long) __entry->ino, __entry->len,
+ __entry->needed)
);
TRACE_EVENT(ext4_mb_discard_preallocations,
@@ -1312,18 +1317,34 @@ DEFINE_EVENT(ext4__bitmap_load, ext4_mb_buddy_bitmap_load,
TP_ARGS(sb, group)
);
-DEFINE_EVENT(ext4__bitmap_load, ext4_read_block_bitmap_load,
+DEFINE_EVENT(ext4__bitmap_load, ext4_load_inode_bitmap,
TP_PROTO(struct super_block *sb, unsigned long group),
TP_ARGS(sb, group)
);
-DEFINE_EVENT(ext4__bitmap_load, ext4_load_inode_bitmap,
+TRACE_EVENT(ext4_read_block_bitmap_load,
+ TP_PROTO(struct super_block *sb, unsigned long group, bool prefetch),
- TP_PROTO(struct super_block *sb, unsigned long group),
+ TP_ARGS(sb, group, prefetch),
- TP_ARGS(sb, group)
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( __u32, group )
+ __field( bool, prefetch )
+
+ ),
+
+ TP_fast_assign(
+ __entry->dev = sb->s_dev;
+ __entry->group = group;
+ __entry->prefetch = prefetch;
+ ),
+
+ TP_printk("dev %d,%d group %u prefetch %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->group, __entry->prefetch)
);
TRACE_EVENT(ext4_direct_IO_enter,
@@ -2726,6 +2747,50 @@ TRACE_EVENT(ext4_error,
__entry->function, __entry->line)
);
+TRACE_EVENT(ext4_prefetch_bitmaps,
+ TP_PROTO(struct super_block *sb, ext4_group_t group,
+ ext4_group_t next, unsigned int prefetch_ios),
+
+ TP_ARGS(sb, group, next, prefetch_ios),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( __u32, group )
+ __field( __u32, next )
+ __field( __u32, ios )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = sb->s_dev;
+ __entry->group = group;
+ __entry->next = next;
+ __entry->ios = prefetch_ios;
+ ),
+
+ TP_printk("dev %d,%d group %u next %u ios %u",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->group, __entry->next, __entry->ios)
+);
+
+TRACE_EVENT(ext4_lazy_itable_init,
+ TP_PROTO(struct super_block *sb, ext4_group_t group),
+
+ TP_ARGS(sb, group),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( __u32, group )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = sb->s_dev;
+ __entry->group = group;
+ ),
+
+ TP_printk("dev %d,%d group %u",
+ MAJOR(__entry->dev), MINOR(__entry->dev), __entry->group)
+);
+
#endif /* _TRACE_EXT4_H */
/* This part must be outside protection */
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index 8639ab962a71..8a1c1311acac 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -1891,6 +1891,69 @@ TRACE_EVENT(f2fs_iostat,
__entry->fs_cdrio, __entry->fs_nrio, __entry->fs_mrio)
);
+TRACE_EVENT(f2fs_bmap,
+
+ TP_PROTO(struct inode *inode, sector_t lblock, sector_t pblock),
+
+ TP_ARGS(inode, lblock, pblock),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(sector_t, lblock)
+ __field(sector_t, pblock)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->lblock = lblock;
+ __entry->pblock = pblock;
+ ),
+
+ TP_printk("dev = (%d,%d), ino = %lu, lblock:%lld, pblock:%lld",
+ show_dev_ino(__entry),
+ (unsigned long long)__entry->lblock,
+ (unsigned long long)__entry->pblock)
+);
+
+TRACE_EVENT(f2fs_fiemap,
+
+ TP_PROTO(struct inode *inode, sector_t lblock, sector_t pblock,
+ unsigned long long len, unsigned int flags, int ret),
+
+ TP_ARGS(inode, lblock, pblock, len, flags, ret),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(sector_t, lblock)
+ __field(sector_t, pblock)
+ __field(unsigned long long, len)
+ __field(unsigned int, flags)
+ __field(int, ret)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->lblock = lblock;
+ __entry->pblock = pblock;
+ __entry->len = len;
+ __entry->flags = flags;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("dev = (%d,%d), ino = %lu, lblock:%lld, pblock:%lld, "
+ "len:%llu, flags:%u, ret:%d",
+ show_dev_ino(__entry),
+ (unsigned long long)__entry->lblock,
+ (unsigned long long)__entry->pblock,
+ __entry->len,
+ __entry->flags,
+ __entry->ret)
+);
+
#endif /* _TRACE_F2FS_H */
/* This part must be outside protection */
diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h
index 2c735a3e6613..9417a34aad08 100644
--- a/include/trace/events/kvm.h
+++ b/include/trace/events/kvm.h
@@ -17,7 +17,7 @@
ERSN(NMI), ERSN(INTERNAL_ERROR), ERSN(OSI), ERSN(PAPR_HCALL), \
ERSN(S390_UCONTROL), ERSN(WATCHDOG), ERSN(S390_TSCH), ERSN(EPR),\
ERSN(SYSTEM_EVENT), ERSN(S390_STSI), ERSN(IOAPIC_EOI), \
- ERSN(HYPERV)
+ ERSN(HYPERV), ERSN(ARM_NISV)
TRACE_EVENT(kvm_userspace_exit,
TP_PROTO(__u32 reason, int errno),
diff --git a/include/trace/events/migrate.h b/include/trace/events/migrate.h
index 705b33d1e395..4d434398d64d 100644
--- a/include/trace/events/migrate.h
+++ b/include/trace/events/migrate.h
@@ -46,13 +46,18 @@ MIGRATE_REASON
TRACE_EVENT(mm_migrate_pages,
TP_PROTO(unsigned long succeeded, unsigned long failed,
- enum migrate_mode mode, int reason),
+ unsigned long thp_succeeded, unsigned long thp_failed,
+ unsigned long thp_split, enum migrate_mode mode, int reason),
- TP_ARGS(succeeded, failed, mode, reason),
+ TP_ARGS(succeeded, failed, thp_succeeded, thp_failed,
+ thp_split, mode, reason),
TP_STRUCT__entry(
__field( unsigned long, succeeded)
__field( unsigned long, failed)
+ __field( unsigned long, thp_succeeded)
+ __field( unsigned long, thp_failed)
+ __field( unsigned long, thp_split)
__field( enum migrate_mode, mode)
__field( int, reason)
),
@@ -60,13 +65,19 @@ TRACE_EVENT(mm_migrate_pages,
TP_fast_assign(
__entry->succeeded = succeeded;
__entry->failed = failed;
+ __entry->thp_succeeded = thp_succeeded;
+ __entry->thp_failed = thp_failed;
+ __entry->thp_split = thp_split;
__entry->mode = mode;
__entry->reason = reason;
),
- TP_printk("nr_succeeded=%lu nr_failed=%lu mode=%s reason=%s",
+ TP_printk("nr_succeeded=%lu nr_failed=%lu nr_thp_succeeded=%lu nr_thp_failed=%lu nr_thp_split=%lu mode=%s reason=%s",
__entry->succeeded,
__entry->failed,
+ __entry->thp_succeeded,
+ __entry->thp_failed,
+ __entry->thp_split,
__print_symbolic(__entry->mode, MIGRATE_MODE),
__print_symbolic(__entry->reason, MIGRATE_REASON))
);
diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h
index 5fb752034386..939092dbcb8b 100644
--- a/include/trace/events/mmflags.h
+++ b/include/trace/events/mmflags.h
@@ -114,8 +114,6 @@ IF_HAVE_PG_IDLE(PG_idle, "idle" )
#if defined(CONFIG_X86)
#define __VM_ARCH_SPECIFIC_1 {VM_PAT, "pat" }
-#elif defined(CONFIG_PPC)
-#define __VM_ARCH_SPECIFIC_1 {VM_SAO, "sao" }
#elif defined(CONFIG_PARISC) || defined(CONFIG_IA64)
#define __VM_ARCH_SPECIFIC_1 {VM_GROWSUP, "growsup" }
#elif !defined(CONFIG_MMU)
diff --git a/include/trace/events/random.h b/include/trace/events/random.h
index 32c10a515e2d..9570a10cb949 100644
--- a/include/trace/events/random.h
+++ b/include/trace/events/random.h
@@ -307,6 +307,23 @@ TRACE_EVENT(urandom_read,
__entry->pool_left, __entry->input_left)
);
+TRACE_EVENT(prandom_u32,
+
+ TP_PROTO(unsigned int ret),
+
+ TP_ARGS(ret),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, ret)
+ ),
+
+ TP_fast_assign(
+ __entry->ret = ret;
+ ),
+
+ TP_printk("ret=%u" , __entry->ret)
+);
+
#endif /* _TRACE_RANDOM_H */
/* This part must be outside protection */
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
index f9a7811148e2..ced71237b7e4 100644
--- a/include/trace/events/rcu.h
+++ b/include/trace/events/rcu.h
@@ -435,11 +435,12 @@ TRACE_EVENT_RCU(rcu_fqs,
#endif /* #if defined(CONFIG_TREE_RCU) */
/*
- * Tracepoint for dyntick-idle entry/exit events. These take a string
- * as argument: "Start" for entering dyntick-idle mode, "Startirq" for
- * entering it from irq/NMI, "End" for leaving it, "Endirq" for leaving it
- * to irq/NMI, "--=" for events moving towards idle, and "++=" for events
- * moving away from idle.
+ * Tracepoint for dyntick-idle entry/exit events. These take 2 strings
+ * as argument:
+ * polarity: "Start", "End", "StillNonIdle" for entering, exiting or still not
+ * being in dyntick-idle mode.
+ * context: "USER" or "IDLE" or "IRQ".
+ * NMIs nested in IRQs are inferred with dynticks_nesting > 1 in IRQ context.
*
* These events also take a pair of numbers, which indicate the nesting
* depth before and after the event of interest, and a third number that is
@@ -506,13 +507,13 @@ TRACE_EVENT_RCU(rcu_callback,
/*
* Tracepoint for the registration of a single RCU callback of the special
- * kfree() form. The first argument is the RCU type, the second argument
+ * kvfree() form. The first argument is the RCU type, the second argument
* is a pointer to the RCU callback, the third argument is the offset
* of the callback within the enclosing RCU-protected data structure,
* the fourth argument is the number of lazy callbacks queued, and the
* fifth argument is the total number of callbacks queued.
*/
-TRACE_EVENT_RCU(rcu_kfree_callback,
+TRACE_EVENT_RCU(rcu_kvfree_callback,
TP_PROTO(const char *rcuname, struct rcu_head *rhp, unsigned long offset,
long qlen),
@@ -596,12 +597,12 @@ TRACE_EVENT_RCU(rcu_invoke_callback,
/*
* Tracepoint for the invocation of a single RCU callback of the special
- * kfree() form. The first argument is the RCU flavor, the second
+ * kvfree() form. The first argument is the RCU flavor, the second
* argument is a pointer to the RCU callback, and the third argument
* is the offset of the callback within the enclosing RCU-protected
* data structure.
*/
-TRACE_EVENT_RCU(rcu_invoke_kfree_callback,
+TRACE_EVENT_RCU(rcu_invoke_kvfree_callback,
TP_PROTO(const char *rcuname, struct rcu_head *rhp, unsigned long offset),
diff --git a/include/trace/events/rpcgss.h b/include/trace/events/rpcgss.h
index b9b51a4b1db1..ffdbe6f85da8 100644
--- a/include/trace/events/rpcgss.h
+++ b/include/trace/events/rpcgss.h
@@ -170,55 +170,144 @@ DECLARE_EVENT_CLASS(rpcgss_ctx_class,
DEFINE_CTX_EVENT(init);
DEFINE_CTX_EVENT(destroy);
+DECLARE_EVENT_CLASS(rpcgss_svc_gssapi_class,
+ TP_PROTO(
+ const struct svc_rqst *rqstp,
+ u32 maj_stat
+ ),
+
+ TP_ARGS(rqstp, maj_stat),
+
+ TP_STRUCT__entry(
+ __field(u32, xid)
+ __field(u32, maj_stat)
+ __string(addr, rqstp->rq_xprt->xpt_remotebuf)
+ ),
+
+ TP_fast_assign(
+ __entry->xid = __be32_to_cpu(rqstp->rq_xid);
+ __entry->maj_stat = maj_stat;
+ __assign_str(addr, rqstp->rq_xprt->xpt_remotebuf);
+ ),
+
+ TP_printk("addr=%s xid=0x%08x maj_stat=%s",
+ __get_str(addr), __entry->xid,
+ __entry->maj_stat == 0 ?
+ "GSS_S_COMPLETE" : show_gss_status(__entry->maj_stat))
+);
+
+#define DEFINE_SVC_GSSAPI_EVENT(name) \
+ DEFINE_EVENT(rpcgss_svc_gssapi_class, rpcgss_svc_##name, \
+ TP_PROTO( \
+ const struct svc_rqst *rqstp, \
+ u32 maj_stat \
+ ), \
+ TP_ARGS(rqstp, maj_stat))
+
+DEFINE_SVC_GSSAPI_EVENT(unwrap);
+DEFINE_SVC_GSSAPI_EVENT(mic);
+
+TRACE_EVENT(rpcgss_svc_unwrap_failed,
+ TP_PROTO(
+ const struct svc_rqst *rqstp
+ ),
+
+ TP_ARGS(rqstp),
+
+ TP_STRUCT__entry(
+ __field(u32, xid)
+ __string(addr, rqstp->rq_xprt->xpt_remotebuf)
+ ),
+
+ TP_fast_assign(
+ __entry->xid = be32_to_cpu(rqstp->rq_xid);
+ __assign_str(addr, rqstp->rq_xprt->xpt_remotebuf);
+ ),
+
+ TP_printk("addr=%s xid=0x%08x", __get_str(addr), __entry->xid)
+);
+
+TRACE_EVENT(rpcgss_svc_seqno_bad,
+ TP_PROTO(
+ const struct svc_rqst *rqstp,
+ u32 expected,
+ u32 received
+ ),
+
+ TP_ARGS(rqstp, expected, received),
+
+ TP_STRUCT__entry(
+ __field(u32, expected)
+ __field(u32, received)
+ __field(u32, xid)
+ __string(addr, rqstp->rq_xprt->xpt_remotebuf)
+ ),
+
+ TP_fast_assign(
+ __entry->expected = expected;
+ __entry->received = received;
+ __entry->xid = __be32_to_cpu(rqstp->rq_xid);
+ __assign_str(addr, rqstp->rq_xprt->xpt_remotebuf);
+ ),
+
+ TP_printk("addr=%s xid=0x%08x expected seqno %u, received seqno %u",
+ __get_str(addr), __entry->xid,
+ __entry->expected, __entry->received)
+);
+
TRACE_EVENT(rpcgss_svc_accept_upcall,
TP_PROTO(
- __be32 xid,
+ const struct svc_rqst *rqstp,
u32 major_status,
u32 minor_status
),
- TP_ARGS(xid, major_status, minor_status),
+ TP_ARGS(rqstp, major_status, minor_status),
TP_STRUCT__entry(
- __field(u32, xid)
__field(u32, minor_status)
__field(unsigned long, major_status)
+ __field(u32, xid)
+ __string(addr, rqstp->rq_xprt->xpt_remotebuf)
),
TP_fast_assign(
- __entry->xid = be32_to_cpu(xid);
__entry->minor_status = minor_status;
__entry->major_status = major_status;
+ __entry->xid = be32_to_cpu(rqstp->rq_xid);
+ __assign_str(addr, rqstp->rq_xprt->xpt_remotebuf);
),
- TP_printk("xid=0x%08x major_status=%s (0x%08lx) minor_status=%u",
- __entry->xid, __entry->major_status == 0 ? "GSS_S_COMPLETE" :
- show_gss_status(__entry->major_status),
+ TP_printk("addr=%s xid=0x%08x major_status=%s (0x%08lx) minor_status=%u",
+ __get_str(addr), __entry->xid,
+ (__entry->major_status == 0) ? "GSS_S_COMPLETE" :
+ show_gss_status(__entry->major_status),
__entry->major_status, __entry->minor_status
)
);
-TRACE_EVENT(rpcgss_svc_accept,
+TRACE_EVENT(rpcgss_svc_authenticate,
TP_PROTO(
- __be32 xid,
- size_t len
+ const struct svc_rqst *rqstp,
+ const struct rpc_gss_wire_cred *gc
),
- TP_ARGS(xid, len),
+ TP_ARGS(rqstp, gc),
TP_STRUCT__entry(
+ __field(u32, seqno)
__field(u32, xid)
- __field(size_t, len)
+ __string(addr, rqstp->rq_xprt->xpt_remotebuf)
),
TP_fast_assign(
- __entry->xid = be32_to_cpu(xid);
- __entry->len = len;
+ __entry->xid = be32_to_cpu(rqstp->rq_xid);
+ __entry->seqno = gc->gc_seq;
+ __assign_str(addr, rqstp->rq_xprt->xpt_remotebuf);
),
- TP_printk("xid=0x%08x len=%zu",
- __entry->xid, __entry->len
- )
+ TP_printk("addr=%s xid=0x%08x seqno=%u", __get_str(addr),
+ __entry->xid, __entry->seqno)
);
@@ -371,11 +460,11 @@ TRACE_EVENT(rpcgss_update_slack,
DECLARE_EVENT_CLASS(rpcgss_svc_seqno_class,
TP_PROTO(
- __be32 xid,
+ const struct svc_rqst *rqstp,
u32 seqno
),
- TP_ARGS(xid, seqno),
+ TP_ARGS(rqstp, seqno),
TP_STRUCT__entry(
__field(u32, xid)
@@ -383,25 +472,52 @@ DECLARE_EVENT_CLASS(rpcgss_svc_seqno_class,
),
TP_fast_assign(
- __entry->xid = be32_to_cpu(xid);
+ __entry->xid = be32_to_cpu(rqstp->rq_xid);
__entry->seqno = seqno;
),
- TP_printk("xid=0x%08x seqno=%u, request discarded",
+ TP_printk("xid=0x%08x seqno=%u",
__entry->xid, __entry->seqno)
);
#define DEFINE_SVC_SEQNO_EVENT(name) \
- DEFINE_EVENT(rpcgss_svc_seqno_class, rpcgss_svc_##name, \
+ DEFINE_EVENT(rpcgss_svc_seqno_class, rpcgss_svc_seqno_##name, \
TP_PROTO( \
- __be32 xid, \
+ const struct svc_rqst *rqstp, \
u32 seqno \
), \
- TP_ARGS(xid, seqno))
+ TP_ARGS(rqstp, seqno))
-DEFINE_SVC_SEQNO_EVENT(large_seqno);
-DEFINE_SVC_SEQNO_EVENT(old_seqno);
+DEFINE_SVC_SEQNO_EVENT(large);
+DEFINE_SVC_SEQNO_EVENT(seen);
+TRACE_EVENT(rpcgss_svc_seqno_low,
+ TP_PROTO(
+ const struct svc_rqst *rqstp,
+ u32 seqno,
+ u32 min,
+ u32 max
+ ),
+
+ TP_ARGS(rqstp, seqno, min, max),
+
+ TP_STRUCT__entry(
+ __field(u32, xid)
+ __field(u32, seqno)
+ __field(u32, min)
+ __field(u32, max)
+ ),
+
+ TP_fast_assign(
+ __entry->xid = be32_to_cpu(rqstp->rq_xid);
+ __entry->seqno = seqno;
+ __entry->min = min;
+ __entry->max = max;
+ ),
+
+ TP_printk("xid=0x%08x seqno=%u window=[%u..%u]",
+ __entry->xid, __entry->seqno, __entry->min, __entry->max)
+);
/**
** gssd upcall related trace events
diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h
index 0f05a6e2b9cb..abe942225637 100644
--- a/include/trace/events/rpcrdma.h
+++ b/include/trace/events/rpcrdma.h
@@ -11,6 +11,7 @@
#define _TRACE_RPCRDMA_H
#include <linux/scatterlist.h>
+#include <linux/sunrpc/rpc_rdma_cid.h>
#include <linux/tracepoint.h>
#include <trace/events/rdma.h>
@@ -18,6 +19,46 @@
** Event classes
**/
+DECLARE_EVENT_CLASS(rpcrdma_completion_class,
+ TP_PROTO(
+ const struct ib_wc *wc,
+ const struct rpc_rdma_cid *cid
+ ),
+
+ TP_ARGS(wc, cid),
+
+ TP_STRUCT__entry(
+ __field(u32, cq_id)
+ __field(int, completion_id)
+ __field(unsigned long, status)
+ __field(unsigned int, vendor_err)
+ ),
+
+ TP_fast_assign(
+ __entry->cq_id = cid->ci_queue_id;
+ __entry->completion_id = cid->ci_completion_id;
+ __entry->status = wc->status;
+ if (wc->status)
+ __entry->vendor_err = wc->vendor_err;
+ else
+ __entry->vendor_err = 0;
+ ),
+
+ TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x)",
+ __entry->cq_id, __entry->completion_id,
+ rdma_show_wc_status(__entry->status),
+ __entry->status, __entry->vendor_err
+ )
+);
+
+#define DEFINE_COMPLETION_EVENT(name) \
+ DEFINE_EVENT(rpcrdma_completion_class, name, \
+ TP_PROTO( \
+ const struct ib_wc *wc, \
+ const struct rpc_rdma_cid *cid \
+ ), \
+ TP_ARGS(wc, cid))
+
DECLARE_EVENT_CLASS(xprtrdma_reply_event,
TP_PROTO(
const struct rpcrdma_rep *rep
@@ -1328,13 +1369,16 @@ TRACE_DEFINE_ENUM(RDMA_ERROR);
TRACE_EVENT(svcrdma_decode_rqst,
TP_PROTO(
+ const struct svc_rdma_recv_ctxt *ctxt,
__be32 *p,
unsigned int hdrlen
),
- TP_ARGS(p, hdrlen),
+ TP_ARGS(ctxt, p, hdrlen),
TP_STRUCT__entry(
+ __field(u32, cq_id)
+ __field(int, completion_id)
__field(u32, xid)
__field(u32, vers)
__field(u32, proc)
@@ -1343,6 +1387,8 @@ TRACE_EVENT(svcrdma_decode_rqst,
),
TP_fast_assign(
+ __entry->cq_id = ctxt->rc_cid.ci_queue_id;
+ __entry->completion_id = ctxt->rc_cid.ci_completion_id;
__entry->xid = be32_to_cpup(p++);
__entry->vers = be32_to_cpup(p++);
__entry->credits = be32_to_cpup(p++);
@@ -1350,37 +1396,48 @@ TRACE_EVENT(svcrdma_decode_rqst,
__entry->hdrlen = hdrlen;
),
- TP_printk("xid=0x%08x vers=%u credits=%u proc=%s hdrlen=%u",
+ TP_printk("cq.id=%u cid=%d xid=0x%08x vers=%u credits=%u proc=%s hdrlen=%u",
+ __entry->cq_id, __entry->completion_id,
__entry->xid, __entry->vers, __entry->credits,
show_rpcrdma_proc(__entry->proc), __entry->hdrlen)
);
TRACE_EVENT(svcrdma_decode_short_err,
TP_PROTO(
+ const struct svc_rdma_recv_ctxt *ctxt,
unsigned int hdrlen
),
- TP_ARGS(hdrlen),
+ TP_ARGS(ctxt, hdrlen),
TP_STRUCT__entry(
+ __field(u32, cq_id)
+ __field(int, completion_id)
__field(unsigned int, hdrlen)
),
TP_fast_assign(
+ __entry->cq_id = ctxt->rc_cid.ci_queue_id;
+ __entry->completion_id = ctxt->rc_cid.ci_completion_id;
__entry->hdrlen = hdrlen;
),
- TP_printk("hdrlen=%u", __entry->hdrlen)
+ TP_printk("cq.id=%u cid=%d hdrlen=%u",
+ __entry->cq_id, __entry->completion_id,
+ __entry->hdrlen)
);
DECLARE_EVENT_CLASS(svcrdma_badreq_event,
TP_PROTO(
+ const struct svc_rdma_recv_ctxt *ctxt,
__be32 *p
),
- TP_ARGS(p),
+ TP_ARGS(ctxt, p),
TP_STRUCT__entry(
+ __field(u32, cq_id)
+ __field(int, completion_id)
__field(u32, xid)
__field(u32, vers)
__field(u32, proc)
@@ -1388,13 +1445,16 @@ DECLARE_EVENT_CLASS(svcrdma_badreq_event,
),
TP_fast_assign(
+ __entry->cq_id = ctxt->rc_cid.ci_queue_id;
+ __entry->completion_id = ctxt->rc_cid.ci_completion_id;
__entry->xid = be32_to_cpup(p++);
__entry->vers = be32_to_cpup(p++);
__entry->credits = be32_to_cpup(p++);
__entry->proc = be32_to_cpup(p);
),
- TP_printk("xid=0x%08x vers=%u credits=%u proc=%u",
+ TP_printk("cq.id=%u cid=%d xid=0x%08x vers=%u credits=%u proc=%u",
+ __entry->cq_id, __entry->completion_id,
__entry->xid, __entry->vers, __entry->credits, __entry->proc)
);
@@ -1402,9 +1462,10 @@ DECLARE_EVENT_CLASS(svcrdma_badreq_event,
DEFINE_EVENT(svcrdma_badreq_event, \
svcrdma_decode_##name##_err, \
TP_PROTO( \
+ const struct svc_rdma_recv_ctxt *ctxt, \
__be32 *p \
), \
- TP_ARGS(p))
+ TP_ARGS(ctxt, p))
DEFINE_BADREQ_EVENT(badvers);
DEFINE_BADREQ_EVENT(drop);
@@ -1716,7 +1777,7 @@ TRACE_EVENT(svcrdma_send_pullup,
TP_printk("len=%u", __entry->len)
);
-TRACE_EVENT(svcrdma_send_failed,
+TRACE_EVENT(svcrdma_send_err,
TP_PROTO(
const struct svc_rqst *rqst,
int status
@@ -1727,167 +1788,127 @@ TRACE_EVENT(svcrdma_send_failed,
TP_STRUCT__entry(
__field(int, status)
__field(u32, xid)
- __field(const void *, xprt)
__string(addr, rqst->rq_xprt->xpt_remotebuf)
),
TP_fast_assign(
__entry->status = status;
__entry->xid = __be32_to_cpu(rqst->rq_xid);
- __entry->xprt = rqst->rq_xprt;
__assign_str(addr, rqst->rq_xprt->xpt_remotebuf);
),
- TP_printk("xprt=%p addr=%s xid=0x%08x status=%d",
- __entry->xprt, __get_str(addr),
+ TP_printk("addr=%s xid=0x%08x status=%d", __get_str(addr),
__entry->xid, __entry->status
)
);
-DECLARE_EVENT_CLASS(svcrdma_sendcomp_event,
- TP_PROTO(
- const struct ib_wc *wc
- ),
-
- TP_ARGS(wc),
-
- TP_STRUCT__entry(
- __field(const void *, cqe)
- __field(unsigned int, status)
- __field(unsigned int, vendor_err)
- ),
-
- TP_fast_assign(
- __entry->cqe = wc->wr_cqe;
- __entry->status = wc->status;
- if (wc->status)
- __entry->vendor_err = wc->vendor_err;
- else
- __entry->vendor_err = 0;
- ),
-
- TP_printk("cqe=%p status=%s (%u/0x%x)",
- __entry->cqe, rdma_show_wc_status(__entry->status),
- __entry->status, __entry->vendor_err
- )
-);
-
-#define DEFINE_SENDCOMP_EVENT(name) \
- DEFINE_EVENT(svcrdma_sendcomp_event, svcrdma_wc_##name, \
- TP_PROTO( \
- const struct ib_wc *wc \
- ), \
- TP_ARGS(wc))
-
TRACE_EVENT(svcrdma_post_send,
TP_PROTO(
- const struct ib_send_wr *wr
+ const struct svc_rdma_send_ctxt *ctxt
),
- TP_ARGS(wr),
+ TP_ARGS(ctxt),
TP_STRUCT__entry(
- __field(const void *, cqe)
+ __field(u32, cq_id)
+ __field(int, completion_id)
__field(unsigned int, num_sge)
__field(u32, inv_rkey)
),
TP_fast_assign(
- __entry->cqe = wr->wr_cqe;
+ const struct ib_send_wr *wr = &ctxt->sc_send_wr;
+
+ __entry->cq_id = ctxt->sc_cid.ci_queue_id;
+ __entry->completion_id = ctxt->sc_cid.ci_completion_id;
__entry->num_sge = wr->num_sge;
__entry->inv_rkey = (wr->opcode == IB_WR_SEND_WITH_INV) ?
wr->ex.invalidate_rkey : 0;
),
- TP_printk("cqe=%p num_sge=%u inv_rkey=0x%08x",
- __entry->cqe, __entry->num_sge,
- __entry->inv_rkey
+ TP_printk("cq_id=%u cid=%d num_sge=%u inv_rkey=0x%08x",
+ __entry->cq_id, __entry->completion_id,
+ __entry->num_sge, __entry->inv_rkey
)
);
-DEFINE_SENDCOMP_EVENT(send);
+DEFINE_COMPLETION_EVENT(svcrdma_wc_send);
TRACE_EVENT(svcrdma_post_recv,
TP_PROTO(
- const struct ib_recv_wr *wr,
- int status
+ const struct svc_rdma_recv_ctxt *ctxt
),
- TP_ARGS(wr, status),
+ TP_ARGS(ctxt),
TP_STRUCT__entry(
- __field(const void *, cqe)
- __field(int, status)
+ __field(u32, cq_id)
+ __field(int, completion_id)
),
TP_fast_assign(
- __entry->cqe = wr->wr_cqe;
- __entry->status = status;
+ __entry->cq_id = ctxt->rc_cid.ci_queue_id;
+ __entry->completion_id = ctxt->rc_cid.ci_completion_id;
),
- TP_printk("cqe=%p status=%d",
- __entry->cqe, __entry->status
+ TP_printk("cq.id=%d cid=%d",
+ __entry->cq_id, __entry->completion_id
)
);
-TRACE_EVENT(svcrdma_wc_receive,
+DEFINE_COMPLETION_EVENT(svcrdma_wc_receive);
+
+TRACE_EVENT(svcrdma_rq_post_err,
TP_PROTO(
- const struct ib_wc *wc
+ const struct svcxprt_rdma *rdma,
+ int status
),
- TP_ARGS(wc),
+ TP_ARGS(rdma, status),
TP_STRUCT__entry(
- __field(const void *, cqe)
- __field(u32, byte_len)
- __field(unsigned int, status)
- __field(u32, vendor_err)
+ __field(int, status)
+ __string(addr, rdma->sc_xprt.xpt_remotebuf)
),
TP_fast_assign(
- __entry->cqe = wc->wr_cqe;
- __entry->status = wc->status;
- if (wc->status) {
- __entry->byte_len = 0;
- __entry->vendor_err = wc->vendor_err;
- } else {
- __entry->byte_len = wc->byte_len;
- __entry->vendor_err = 0;
- }
+ __entry->status = status;
+ __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
),
- TP_printk("cqe=%p byte_len=%u status=%s (%u/0x%x)",
- __entry->cqe, __entry->byte_len,
- rdma_show_wc_status(__entry->status),
- __entry->status, __entry->vendor_err
+ TP_printk("addr=%s status=%d",
+ __get_str(addr), __entry->status
)
);
-TRACE_EVENT(svcrdma_post_rw,
+TRACE_EVENT(svcrdma_post_chunk,
TP_PROTO(
- const void *cqe,
+ const struct rpc_rdma_cid *cid,
int sqecount
),
- TP_ARGS(cqe, sqecount),
+ TP_ARGS(cid, sqecount),
TP_STRUCT__entry(
- __field(const void *, cqe)
+ __field(u32, cq_id)
+ __field(int, completion_id)
__field(int, sqecount)
),
TP_fast_assign(
- __entry->cqe = cqe;
+ __entry->cq_id = cid->ci_queue_id;
+ __entry->completion_id = cid->ci_completion_id;
__entry->sqecount = sqecount;
),
- TP_printk("cqe=%p sqecount=%d",
- __entry->cqe, __entry->sqecount
+ TP_printk("cq.id=%u cid=%d sqecount=%d",
+ __entry->cq_id, __entry->completion_id,
+ __entry->sqecount
)
);
-DEFINE_SENDCOMP_EVENT(read);
-DEFINE_SENDCOMP_EVENT(write);
+DEFINE_COMPLETION_EVENT(svcrdma_wc_read);
+DEFINE_COMPLETION_EVENT(svcrdma_wc_write);
TRACE_EVENT(svcrdma_qp_error,
TP_PROTO(
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index ed168b0e2c53..fec25b9cfbaf 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -91,7 +91,7 @@ DEFINE_EVENT(sched_wakeup_template, sched_waking,
/*
* Tracepoint called when the task is actually woken; p->state == TASK_RUNNNG.
- * It it not always called from the waking context.
+ * It is not always called from the waking context.
*/
DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
TP_PROTO(struct task_struct *p),
@@ -634,6 +634,18 @@ DECLARE_TRACE(sched_overutilized_tp,
TP_PROTO(struct root_domain *rd, bool overutilized),
TP_ARGS(rd, overutilized));
+DECLARE_TRACE(sched_util_est_cfs_tp,
+ TP_PROTO(struct cfs_rq *cfs_rq),
+ TP_ARGS(cfs_rq));
+
+DECLARE_TRACE(sched_util_est_se_tp,
+ TP_PROTO(struct sched_entity *se),
+ TP_ARGS(se));
+
+DECLARE_TRACE(sched_update_nr_running_tp,
+ TP_PROTO(struct rq *rq, int change),
+ TP_ARGS(rq, change));
+
#endif /* _TRACE_SCHED_H */
/* This part must be outside protection */
diff --git a/include/trace/events/scmi.h b/include/trace/events/scmi.h
index f076c430d243..f3a4b4d60714 100644
--- a/include/trace/events/scmi.h
+++ b/include/trace/events/scmi.h
@@ -35,7 +35,7 @@ TRACE_EVENT(scmi_xfer_begin,
TRACE_EVENT(scmi_xfer_end,
TP_PROTO(int transfer_id, u8 msg_id, u8 protocol_id, u16 seq,
- u32 status),
+ int status),
TP_ARGS(transfer_id, msg_id, protocol_id, seq, status),
TP_STRUCT__entry(
@@ -43,7 +43,7 @@ TRACE_EVENT(scmi_xfer_end,
__field(u8, msg_id)
__field(u8, protocol_id)
__field(u16, seq)
- __field(u32, status)
+ __field(int, status)
),
TP_fast_assign(
@@ -54,7 +54,7 @@ TRACE_EVENT(scmi_xfer_end,
__entry->status = status;
),
- TP_printk("transfer_id=%d msg_id=%u protocol_id=%u seq=%u status=%u",
+ TP_printk("transfer_id=%d msg_id=%u protocol_id=%u seq=%u status=%d",
__entry->transfer_id, __entry->msg_id, __entry->protocol_id,
__entry->seq, __entry->status)
);
diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
index 6a12935b8b14..65d7dfbbc9cd 100644
--- a/include/trace/events/sunrpc.h
+++ b/include/trace/events/sunrpc.h
@@ -1250,15 +1250,34 @@ DECLARE_EVENT_CLASS(svc_xdr_buf_class,
DEFINE_SVCXDRBUF_EVENT(recvfrom);
DEFINE_SVCXDRBUF_EVENT(sendto);
+/*
+ * from include/linux/sunrpc/svc.h
+ */
+#define SVC_RQST_FLAG_LIST \
+ svc_rqst_flag(SECURE) \
+ svc_rqst_flag(LOCAL) \
+ svc_rqst_flag(USEDEFERRAL) \
+ svc_rqst_flag(DROPME) \
+ svc_rqst_flag(SPLICE_OK) \
+ svc_rqst_flag(VICTIM) \
+ svc_rqst_flag(BUSY) \
+ svc_rqst_flag(DATA) \
+ svc_rqst_flag_end(AUTHERR)
+
+#undef svc_rqst_flag
+#undef svc_rqst_flag_end
+#define svc_rqst_flag(x) TRACE_DEFINE_ENUM(RQ_##x);
+#define svc_rqst_flag_end(x) TRACE_DEFINE_ENUM(RQ_##x);
+
+SVC_RQST_FLAG_LIST
+
+#undef svc_rqst_flag
+#undef svc_rqst_flag_end
+#define svc_rqst_flag(x) { BIT(RQ_##x), #x },
+#define svc_rqst_flag_end(x) { BIT(RQ_##x), #x }
+
#define show_rqstp_flags(flags) \
- __print_flags(flags, "|", \
- { (1UL << RQ_SECURE), "RQ_SECURE"}, \
- { (1UL << RQ_LOCAL), "RQ_LOCAL"}, \
- { (1UL << RQ_USEDEFERRAL), "RQ_USEDEFERRAL"}, \
- { (1UL << RQ_DROPME), "RQ_DROPME"}, \
- { (1UL << RQ_SPLICE_OK), "RQ_SPLICE_OK"}, \
- { (1UL << RQ_VICTIM), "RQ_VICTIM"}, \
- { (1UL << RQ_BUSY), "RQ_BUSY"})
+ __print_flags(flags, "|", SVC_RQST_FLAG_LIST)
TRACE_EVENT(svc_recv,
TP_PROTO(struct svc_rqst *rqst, int len),
diff --git a/include/trace/events/ufs.h b/include/trace/events/ufs.h
index 5f300739240d..84841b3a7ffd 100644
--- a/include/trace/events/ufs.h
+++ b/include/trace/events/ufs.h
@@ -249,6 +249,37 @@ TRACE_EVENT(ufshcd_command,
)
);
+TRACE_EVENT(ufshcd_uic_command,
+ TP_PROTO(const char *dev_name, const char *str, u32 cmd,
+ u32 arg1, u32 arg2, u32 arg3),
+
+ TP_ARGS(dev_name, str, cmd, arg1, arg2, arg3),
+
+ TP_STRUCT__entry(
+ __string(dev_name, dev_name)
+ __string(str, str)
+ __field(u32, cmd)
+ __field(u32, arg1)
+ __field(u32, arg2)
+ __field(u32, arg3)
+ ),
+
+ TP_fast_assign(
+ __assign_str(dev_name, dev_name);
+ __assign_str(str, str);
+ __entry->cmd = cmd;
+ __entry->arg1 = arg1;
+ __entry->arg2 = arg2;
+ __entry->arg3 = arg3;
+ ),
+
+ TP_printk(
+ "%s: %s: cmd: 0x%x, arg1: 0x%x, arg2: 0x%x, arg3: 0x%x",
+ __get_str(str), __get_str(dev_name), __entry->cmd,
+ __entry->arg1, __entry->arg2, __entry->arg3
+ )
+);
+
TRACE_EVENT(ufshcd_upiu,
TP_PROTO(const char *dev_name, const char *str, void *hdr, void *tsf),
diff --git a/include/trace/events/xdp.h b/include/trace/events/xdp.h
index b73d3e141323..cd24e8a59529 100644
--- a/include/trace/events/xdp.h
+++ b/include/trace/events/xdp.h
@@ -177,9 +177,9 @@ DEFINE_EVENT(xdp_redirect_template, xdp_redirect_map_err,
TRACE_EVENT(xdp_cpumap_kthread,
TP_PROTO(int map_id, unsigned int processed, unsigned int drops,
- int sched),
+ int sched, struct xdp_cpumap_stats *xdp_stats),
- TP_ARGS(map_id, processed, drops, sched),
+ TP_ARGS(map_id, processed, drops, sched, xdp_stats),
TP_STRUCT__entry(
__field(int, map_id)
@@ -188,6 +188,9 @@ TRACE_EVENT(xdp_cpumap_kthread,
__field(unsigned int, drops)
__field(unsigned int, processed)
__field(int, sched)
+ __field(unsigned int, xdp_pass)
+ __field(unsigned int, xdp_drop)
+ __field(unsigned int, xdp_redirect)
),
TP_fast_assign(
@@ -197,16 +200,21 @@ TRACE_EVENT(xdp_cpumap_kthread,
__entry->drops = drops;
__entry->processed = processed;
__entry->sched = sched;
+ __entry->xdp_pass = xdp_stats->pass;
+ __entry->xdp_drop = xdp_stats->drop;
+ __entry->xdp_redirect = xdp_stats->redirect;
),
TP_printk("kthread"
" cpu=%d map_id=%d action=%s"
" processed=%u drops=%u"
- " sched=%d",
+ " sched=%d"
+ " xdp_pass=%u xdp_drop=%u xdp_redirect=%u",
__entry->cpu, __entry->map_id,
__print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
__entry->processed, __entry->drops,
- __entry->sched)
+ __entry->sched,
+ __entry->xdp_pass, __entry->xdp_drop, __entry->xdp_redirect)
);
TRACE_EVENT(xdp_cpumap_enqueue,
diff --git a/include/trace/trace_events.h b/include/trace/trace_events.h
index 502c7be50b8d..1bc3e7bba9a4 100644
--- a/include/trace/trace_events.h
+++ b/include/trace/trace_events.h
@@ -210,8 +210,7 @@ TRACE_MAKE_SYSTEM_STR();
#define DEFINE_EVENT(template, name, proto, args)
#undef DEFINE_EVENT_PRINT
-#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
- DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
+#define DEFINE_EVENT_PRINT(template, name, proto, args, print)
#undef TRACE_EVENT_FLAGS
#define TRACE_EVENT_FLAGS(event, flag)
@@ -443,12 +442,8 @@ static struct trace_event_fields trace_event_fields_##call[] = { \
tstruct \
{} };
-#undef DEFINE_EVENT
-#define DEFINE_EVENT(template, name, proto, args)
-
#undef DEFINE_EVENT_PRINT
-#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
- DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
+#define DEFINE_EVENT_PRINT(template, name, proto, args, print)
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
@@ -523,13 +518,6 @@ static inline notrace int trace_event_get_offsets_##call( \
return __data_size; \
}
-#undef DEFINE_EVENT
-#define DEFINE_EVENT(template, name, proto, args)
-
-#undef DEFINE_EVENT_PRINT
-#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
- DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
-
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
/*
@@ -721,9 +709,6 @@ static inline void ftrace_test_probe_##call(void) \
check_trace_callback_type_##call(trace_event_raw_event_##template); \
}
-#undef DEFINE_EVENT_PRINT
-#define DEFINE_EVENT_PRINT(template, name, proto, args, print)
-
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
#undef __entry
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index f4a01305d9a6..995b36c2ea7d 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -606,9 +606,9 @@ __SYSCALL(__NR_sendto, sys_sendto)
#define __NR_recvfrom 207
__SC_COMP(__NR_recvfrom, sys_recvfrom, compat_sys_recvfrom)
#define __NR_setsockopt 208
-__SC_COMP(__NR_setsockopt, sys_setsockopt, compat_sys_setsockopt)
+__SC_COMP(__NR_setsockopt, sys_setsockopt, sys_setsockopt)
#define __NR_getsockopt 209
-__SC_COMP(__NR_getsockopt, sys_getsockopt, compat_sys_getsockopt)
+__SC_COMP(__NR_getsockopt, sys_getsockopt, sys_getsockopt)
#define __NR_shutdown 210
__SYSCALL(__NR_shutdown, sys_shutdown)
#define __NR_sendmsg 211
@@ -850,6 +850,8 @@ __SYSCALL(__NR_pidfd_open, sys_pidfd_open)
#define __NR_clone3 435
__SYSCALL(__NR_clone3, sys_clone3)
#endif
+#define __NR_close_range 436
+__SYSCALL(__NR_close_range, sys_close_range)
#define __NR_openat2 437
__SYSCALL(__NR_openat2, sys_openat2)
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index 4e873dcbe68f..3218576e109d 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -502,15 +502,15 @@ struct drm_amdgpu_gem_op {
#define AMDGPU_VM_MTYPE_MASK (0xf << 5)
/* Default MTYPE. Pre-AI must use this. Recommended for newer ASICs. */
#define AMDGPU_VM_MTYPE_DEFAULT (0 << 5)
-/* Use NC MTYPE instead of default MTYPE */
+/* Use Non Coherent MTYPE instead of default MTYPE */
#define AMDGPU_VM_MTYPE_NC (1 << 5)
-/* Use WC MTYPE instead of default MTYPE */
+/* Use Write Combine MTYPE instead of default MTYPE */
#define AMDGPU_VM_MTYPE_WC (2 << 5)
-/* Use CC MTYPE instead of default MTYPE */
+/* Use Cache Coherent MTYPE instead of default MTYPE */
#define AMDGPU_VM_MTYPE_CC (3 << 5)
-/* Use UC MTYPE instead of default MTYPE */
+/* Use UnCached MTYPE instead of default MTYPE */
#define AMDGPU_VM_MTYPE_UC (4 << 5)
-/* Use RW MTYPE instead of default MTYPE */
+/* Use Read Write MTYPE instead of default MTYPE */
#define AMDGPU_VM_MTYPE_RW (5 << 5)
struct drm_amdgpu_gem_va {
diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
index 490143500a50..82f327801267 100644
--- a/include/uapi/drm/drm_fourcc.h
+++ b/include/uapi/drm/drm_fourcc.h
@@ -236,6 +236,12 @@ extern "C" {
#define DRM_FORMAT_NV61 fourcc_code('N', 'V', '6', '1') /* 2x1 subsampled Cb:Cr plane */
#define DRM_FORMAT_NV24 fourcc_code('N', 'V', '2', '4') /* non-subsampled Cr:Cb plane */
#define DRM_FORMAT_NV42 fourcc_code('N', 'V', '4', '2') /* non-subsampled Cb:Cr plane */
+/*
+ * 2 plane YCbCr
+ * index 0 = Y plane, [39:0] Y3:Y2:Y1:Y0 little endian
+ * index 1 = Cr:Cb plane, [39:0] Cr1:Cb1:Cr0:Cb0 little endian
+ */
+#define DRM_FORMAT_NV15 fourcc_code('N', 'V', '1', '5') /* 2x2 subsampled Cr:Cb plane */
/*
* 2 plane YCbCr MSB aligned
@@ -265,6 +271,22 @@ extern "C" {
*/
#define DRM_FORMAT_P016 fourcc_code('P', '0', '1', '6') /* 2x2 subsampled Cr:Cb plane 16 bits per channel */
+/* 3 plane non-subsampled (444) YCbCr
+ * 16 bits per component, but only 10 bits are used and 6 bits are padded
+ * index 0: Y plane, [15:0] Y:x [10:6] little endian
+ * index 1: Cb plane, [15:0] Cb:x [10:6] little endian
+ * index 2: Cr plane, [15:0] Cr:x [10:6] little endian
+ */
+#define DRM_FORMAT_Q410 fourcc_code('Q', '4', '1', '0')
+
+/* 3 plane non-subsampled (444) YCrCb
+ * 16 bits per component, but only 10 bits are used and 6 bits are padded
+ * index 0: Y plane, [15:0] Y:x [10:6] little endian
+ * index 1: Cr plane, [15:0] Cr:x [10:6] little endian
+ * index 2: Cb plane, [15:0] Cb:x [10:6] little endian
+ */
+#define DRM_FORMAT_Q401 fourcc_code('Q', '4', '0', '1')
+
/*
* 3 plane YCbCr
* index 0: Y plane, [7:0] Y
@@ -309,6 +331,7 @@ extern "C" {
#define DRM_FORMAT_MOD_VENDOR_BROADCOM 0x07
#define DRM_FORMAT_MOD_VENDOR_ARM 0x08
#define DRM_FORMAT_MOD_VENDOR_ALLWINNER 0x09
+#define DRM_FORMAT_MOD_VENDOR_AMLOGIC 0x0a
/* add more to the end as needed */
@@ -323,8 +346,33 @@ extern "C" {
* When adding a new token please document the layout with a code comment,
* similar to the fourcc codes above. drm_fourcc.h is considered the
* authoritative source for all of these.
+ *
+ * Generic modifier names:
+ *
+ * DRM_FORMAT_MOD_GENERIC_* definitions are used to provide vendor-neutral names
+ * for layouts which are common across multiple vendors. To preserve
+ * compatibility, in cases where a vendor-specific definition already exists and
+ * a generic name for it is desired, the common name is a purely symbolic alias
+ * and must use the same numerical value as the original definition.
+ *
+ * Note that generic names should only be used for modifiers which describe
+ * generic layouts (such as pixel re-ordering), which may have
+ * independently-developed support across multiple vendors.
+ *
+ * In future cases where a generic layout is identified before merging with a
+ * vendor-specific modifier, a new 'GENERIC' vendor or modifier using vendor
+ * 'NONE' could be considered. This should only be for obvious, exceptional
+ * cases to avoid polluting the 'GENERIC' namespace with modifiers which only
+ * apply to a single vendor.
+ *
+ * Generic names should not be used for cases where multiple hardware vendors
+ * have implementations of the same standardised compression scheme (such as
+ * AFBC). In those cases, all implementations should use the same format
+ * modifier(s), reflecting the vendor of the standard.
*/
+#define DRM_FORMAT_MOD_GENERIC_16_16_TILE DRM_FORMAT_MOD_SAMSUNG_16_16_TILE
+
/*
* Invalid Modifier
*
@@ -892,6 +940,18 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier)
*/
#define AFBC_FORMAT_MOD_BCH (1ULL << 11)
+/* AFBC uncompressed storage mode
+ *
+ * Indicates that the buffer is using AFBC uncompressed storage mode.
+ * In this mode all superblock payloads in the buffer use the uncompressed
+ * storage mode, which is usually only used for data which cannot be compressed.
+ * The buffer layout is the same as for AFBC buffers without USM set, this only
+ * affects the storage mode of the individual superblocks. Note that even a
+ * buffer without USM set may use uncompressed storage mode for some or all
+ * superblocks, USM just guarantees it for all.
+ */
+#define AFBC_FORMAT_MOD_USM (1ULL << 12)
+
/*
* Arm 16x16 Block U-Interleaved modifier
*
@@ -916,6 +976,86 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier)
*/
#define DRM_FORMAT_MOD_ALLWINNER_TILED fourcc_mod_code(ALLWINNER, 1)
+/*
+ * Amlogic Video Framebuffer Compression modifiers
+ *
+ * Amlogic uses a proprietary lossless image compression protocol and format
+ * for their hardware video codec accelerators, either video decoders or
+ * video input encoders.
+ *
+ * It considerably reduces memory bandwidth while writing and reading
+ * frames in memory.
+ *
+ * The underlying storage is considered to be 3 components, 8bit or 10-bit
+ * per component YCbCr 420, single plane :
+ * - DRM_FORMAT_YUV420_8BIT
+ * - DRM_FORMAT_YUV420_10BIT
+ *
+ * The first 8 bits of the mode defines the layout, then the following 8 bits
+ * defines the options changing the layout.
+ *
+ * Not all combinations are valid, and different SoCs may support different
+ * combinations of layout and options.
+ */
+#define __fourcc_mod_amlogic_layout_mask 0xf
+#define __fourcc_mod_amlogic_options_shift 8
+#define __fourcc_mod_amlogic_options_mask 0xf
+
+#define DRM_FORMAT_MOD_AMLOGIC_FBC(__layout, __options) \
+ fourcc_mod_code(AMLOGIC, \
+ ((__layout) & __fourcc_mod_amlogic_layout_mask) | \
+ (((__options) & __fourcc_mod_amlogic_options_mask) \
+ << __fourcc_mod_amlogic_options_shift))
+
+/* Amlogic FBC Layouts */
+
+/*
+ * Amlogic FBC Basic Layout
+ *
+ * The basic layout is composed of:
+ * - a body content organized in 64x32 superblocks with 4096 bytes per
+ * superblock in default mode.
+ * - a 32 bytes per 128x64 header block
+ *
+ * This layout is transferrable between Amlogic SoCs supporting this modifier.
+ */
+#define AMLOGIC_FBC_LAYOUT_BASIC (1ULL)
+
+/*
+ * Amlogic FBC Scatter Memory layout
+ *
+ * Indicates the header contains IOMMU references to the compressed
+ * frames content to optimize memory access and layout.
+ *
+ * In this mode, only the header memory address is needed, thus the
+ * content memory organization is tied to the current producer
+ * execution and cannot be saved/dumped neither transferrable between
+ * Amlogic SoCs supporting this modifier.
+ *
+ * Due to the nature of the layout, these buffers are not expected to
+ * be accessible by the user-space clients, but only accessible by the
+ * hardware producers and consumers.
+ *
+ * The user-space clients should expect a failure while trying to mmap
+ * the DMA-BUF handle returned by the producer.
+ */
+#define AMLOGIC_FBC_LAYOUT_SCATTER (2ULL)
+
+/* Amlogic FBC Layout Options Bit Mask */
+
+/*
+ * Amlogic FBC Memory Saving mode
+ *
+ * Indicates the storage is packed when pixel size is multiple of word
+ * boudaries, i.e. 8bit should be stored in this mode to save allocation
+ * memory.
+ *
+ * This mode reduces body layout to 3072 bytes per 64x32 superblock with
+ * the basic layout and 3200 bytes per 64x32 superblock combined with
+ * the scatter layout.
+ */
+#define AMLOGIC_FBC_OPTION_MEM_SAVING (1ULL << 0)
+
#if defined(__cplusplus)
}
#endif
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index 735c8cfdaaa1..deea447e5f22 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -497,7 +497,7 @@ struct drm_mode_fb_cmd2 {
* In case of planar formats, this ioctl allows up to 4
* buffer objects with offsets and pitches per plane.
* The pitch and offset order is dictated by the fourcc,
- * e.g. NV12 (http://fourcc.org/yuv.php#NV12) is described as:
+ * e.g. NV12 (https://fourcc.org/yuv.php#NV12) is described as:
*
* YUV 4:2:0 image with a plane of 8 bit Y samples
* followed by an interleaved U/V plane containing
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 14b67cd6b54b..00546062e023 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -55,7 +55,7 @@ extern "C" {
* cause the related events to not be seen.
*
* I915_RESET_UEVENT - Event is generated just before an attempt to reset the
- * the GPU. The value supplied with the event is always 1. NOTE: Disable
+ * GPU. The value supplied with the event is always 1. NOTE: Disable
* reset via module parameter will cause this event to not be seen.
*/
#define I915_L3_PARITY_UEVENT "L3_PARITY_ERROR"
@@ -1934,7 +1934,7 @@ enum drm_i915_perf_property_id {
/**
* The value specifies which set of OA unit metrics should be
- * be configured, defining the contents of any OA unit reports.
+ * configured, defining the contents of any OA unit reports.
*
* This property is available in perf revision 1.
*/
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
index 19806eb3a8e8..a6c1f3eb2623 100644
--- a/include/uapi/drm/msm_drm.h
+++ b/include/uapi/drm/msm_drm.h
@@ -252,8 +252,8 @@ struct drm_msm_gem_submit {
__u64 cmds; /* in, ptr to array of submit_cmd's */
__s32 fence_fd; /* in/out fence fd (see MSM_SUBMIT_FENCE_FD_IN/OUT) */
__u32 queueid; /* in, submitqueue id */
- __u64 in_syncobjs; /* in, ptr to to array of drm_msm_gem_submit_syncobj */
- __u64 out_syncobjs; /* in, ptr to to array of drm_msm_gem_submit_syncobj */
+ __u64 in_syncobjs; /* in, ptr to array of drm_msm_gem_submit_syncobj */
+ __u64 out_syncobjs; /* in, ptr to array of drm_msm_gem_submit_syncobj */
__u32 nr_in_syncobjs; /* in, number of entries in in_syncobj */
__u32 nr_out_syncobjs; /* in, number of entries in out_syncobj. */
__u32 syncobj_stride; /* in, stride of syncobj arrays. */
diff --git a/include/uapi/linux/android/binder.h b/include/uapi/linux/android/binder.h
index 2832134e5397..f1ce2c4c077e 100644
--- a/include/uapi/linux/android/binder.h
+++ b/include/uapi/linux/android/binder.h
@@ -404,7 +404,7 @@ enum binder_driver_return_protocol {
BR_FAILED_REPLY = _IO('r', 17),
/*
- * The the last transaction (either a bcTRANSACTION or
+ * The last transaction (either a bcTRANSACTION or
* a bcATTEMPT_ACQUIRE) failed (e.g. out of memory). No parameters.
*/
};
diff --git a/include/uapi/linux/atmioc.h b/include/uapi/linux/atmioc.h
index cd7655e40c77..a9030bcc8d56 100644
--- a/include/uapi/linux/atmioc.h
+++ b/include/uapi/linux/atmioc.h
@@ -5,7 +5,7 @@
/*
- * See http://icawww1.epfl.ch/linux-atm/magic.html for the complete list of
+ * See https://icawww1.epfl.ch/linux-atm/magic.html for the complete list of
* "magic" ioctl numbers.
*/
diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h
index 9b6a973f4cc3..cd2d8279a5e4 100644
--- a/include/uapi/linux/audit.h
+++ b/include/uapi/linux/audit.h
@@ -333,14 +333,15 @@ enum {
};
/* Status symbols */
- /* Mask values */
-#define AUDIT_STATUS_ENABLED 0x0001
-#define AUDIT_STATUS_FAILURE 0x0002
-#define AUDIT_STATUS_PID 0x0004
+ /* Mask values */
+#define AUDIT_STATUS_ENABLED 0x0001
+#define AUDIT_STATUS_FAILURE 0x0002
+#define AUDIT_STATUS_PID 0x0004
#define AUDIT_STATUS_RATE_LIMIT 0x0008
-#define AUDIT_STATUS_BACKLOG_LIMIT 0x0010
-#define AUDIT_STATUS_BACKLOG_WAIT_TIME 0x0020
-#define AUDIT_STATUS_LOST 0x0040
+#define AUDIT_STATUS_BACKLOG_LIMIT 0x0010
+#define AUDIT_STATUS_BACKLOG_WAIT_TIME 0x0020
+#define AUDIT_STATUS_LOST 0x0040
+#define AUDIT_STATUS_BACKLOG_WAIT_TIME_ACTUAL 0x0080
#define AUDIT_FEATURE_BITMAP_BACKLOG_LIMIT 0x00000001
#define AUDIT_FEATURE_BITMAP_BACKLOG_WAIT_TIME 0x00000002
@@ -467,6 +468,9 @@ struct audit_status {
__u32 feature_bitmap; /* bitmap of kernel audit features */
};
__u32 backlog_wait_time;/* message queue wait timeout */
+ __u32 backlog_wait_time_actual;/* time spent waiting while
+ * message limit exceeded
+ */
};
struct audit_features {
diff --git a/include/uapi/linux/auto_dev-ioctl.h b/include/uapi/linux/auto_dev-ioctl.h
index 374742651c30..62e625356dc8 100644
--- a/include/uapi/linux/auto_dev-ioctl.h
+++ b/include/uapi/linux/auto_dev-ioctl.h
@@ -82,7 +82,7 @@ struct args_ismountpoint {
/*
* All the ioctls use this structure.
* When sending a path size must account for the total length
- * of the chunk of memory otherwise is is the size of the
+ * of the chunk of memory otherwise it is the size of the
* structure.
*/
diff --git a/include/uapi/linux/batadv_packet.h b/include/uapi/linux/batadv_packet.h
index 0ae34c85ef9e..9c8604c5b5f6 100644
--- a/include/uapi/linux/batadv_packet.h
+++ b/include/uapi/linux/batadv_packet.h
@@ -72,8 +72,8 @@ enum batadv_subtype {
/**
* enum batadv_iv_flags - flags used in B.A.T.M.A.N. IV OGM packets
- * @BATADV_NOT_BEST_NEXT_HOP: flag is set when ogm packet is forwarded and was
- * previously received from someone else than the best neighbor.
+ * @BATADV_NOT_BEST_NEXT_HOP: flag is set when the ogm packet is forwarded and
+ * was previously received from someone other than the best neighbor.
* @BATADV_PRIMARIES_FIRST_HOP: flag unused.
* @BATADV_DIRECTLINK: flag is for the first hop or if rebroadcasted from a
* one hop neighbor on the interface where it was originally received.
@@ -195,8 +195,8 @@ struct batadv_bla_claim_dst {
/**
* struct batadv_ogm_packet - ogm (routing protocol) packet
* @packet_type: batman-adv packet type, part of the general header
- * @version: batman-adv protocol version, part of the genereal header
- * @ttl: time to live for this packet, part of the genereal header
+ * @version: batman-adv protocol version, part of the general header
+ * @ttl: time to live for this packet, part of the general header
* @flags: contains routing relevant flags - see enum batadv_iv_flags
* @seqno: sequence identification
* @orig: address of the source node
@@ -247,7 +247,7 @@ struct batadv_ogm2_packet {
/**
* struct batadv_elp_packet - elp (neighbor discovery) packet
* @packet_type: batman-adv packet type, part of the general header
- * @version: batman-adv protocol version, part of the genereal header
+ * @version: batman-adv protocol version, part of the general header
* @orig: originator mac address
* @seqno: sequence number
* @elp_interval: currently used ELP sending interval in ms
@@ -265,15 +265,15 @@ struct batadv_elp_packet {
/**
* struct batadv_icmp_header - common members among all the ICMP packets
* @packet_type: batman-adv packet type, part of the general header
- * @version: batman-adv protocol version, part of the genereal header
- * @ttl: time to live for this packet, part of the genereal header
+ * @version: batman-adv protocol version, part of the general header
+ * @ttl: time to live for this packet, part of the general header
* @msg_type: ICMP packet type
* @dst: address of the destination node
* @orig: address of the source node
* @uid: local ICMP socket identifier
* @align: not used - useful for alignment purposes only
*
- * This structure is used for ICMP packets parsing only and it is never sent
+ * This structure is used for ICMP packet parsing only and it is never sent
* over the wire. The alignment field at the end is there to ensure that
* members are padded the same way as they are in real packets.
*/
@@ -291,8 +291,8 @@ struct batadv_icmp_header {
/**
* struct batadv_icmp_packet - ICMP packet
* @packet_type: batman-adv packet type, part of the general header
- * @version: batman-adv protocol version, part of the genereal header
- * @ttl: time to live for this packet, part of the genereal header
+ * @version: batman-adv protocol version, part of the general header
+ * @ttl: time to live for this packet, part of the general header
* @msg_type: ICMP packet type
* @dst: address of the destination node
* @orig: address of the source node
@@ -315,8 +315,8 @@ struct batadv_icmp_packet {
/**
* struct batadv_icmp_tp_packet - ICMP TP Meter packet
* @packet_type: batman-adv packet type, part of the general header
- * @version: batman-adv protocol version, part of the genereal header
- * @ttl: time to live for this packet, part of the genereal header
+ * @version: batman-adv protocol version, part of the general header
+ * @ttl: time to live for this packet, part of the general header
* @msg_type: ICMP packet type
* @dst: address of the destination node
* @orig: address of the source node
@@ -358,8 +358,8 @@ enum batadv_icmp_tp_subtype {
/**
* struct batadv_icmp_packet_rr - ICMP RouteRecord packet
* @packet_type: batman-adv packet type, part of the general header
- * @version: batman-adv protocol version, part of the genereal header
- * @ttl: time to live for this packet, part of the genereal header
+ * @version: batman-adv protocol version, part of the general header
+ * @ttl: time to live for this packet, part of the general header
* @msg_type: ICMP packet type
* @dst: address of the destination node
* @orig: address of the source node
@@ -397,8 +397,8 @@ struct batadv_icmp_packet_rr {
/**
* struct batadv_unicast_packet - unicast packet for network payload
* @packet_type: batman-adv packet type, part of the general header
- * @version: batman-adv protocol version, part of the genereal header
- * @ttl: time to live for this packet, part of the genereal header
+ * @version: batman-adv protocol version, part of the general header
+ * @ttl: time to live for this packet, part of the general header
* @ttvn: translation table version number
* @dest: originator destination of the unicast packet
*/
@@ -433,8 +433,8 @@ struct batadv_unicast_4addr_packet {
/**
* struct batadv_frag_packet - fragmented packet
* @packet_type: batman-adv packet type, part of the general header
- * @version: batman-adv protocol version, part of the genereal header
- * @ttl: time to live for this packet, part of the genereal header
+ * @version: batman-adv protocol version, part of the general header
+ * @ttl: time to live for this packet, part of the general header
* @dest: final destination used when routing fragments
* @orig: originator of the fragment used when merging the packet
* @no: fragment number within this sequence
@@ -467,8 +467,8 @@ struct batadv_frag_packet {
/**
* struct batadv_bcast_packet - broadcast packet for network payload
* @packet_type: batman-adv packet type, part of the general header
- * @version: batman-adv protocol version, part of the genereal header
- * @ttl: time to live for this packet, part of the genereal header
+ * @version: batman-adv protocol version, part of the general header
+ * @ttl: time to live for this packet, part of the general header
* @reserved: reserved byte for alignment
* @seqno: sequence identification
* @orig: originator of the broadcast packet
@@ -488,10 +488,10 @@ struct batadv_bcast_packet {
/**
* struct batadv_coded_packet - network coded packet
* @packet_type: batman-adv packet type, part of the general header
- * @version: batman-adv protocol version, part of the genereal header
- * @ttl: time to live for this packet, part of the genereal header
+ * @version: batman-adv protocol version, part of the general header
+ * @ttl: time to live for this packet, part of the general header
* @first_source: original source of first included packet
- * @first_orig_dest: original destinal of first included packet
+ * @first_orig_dest: original destination of first included packet
* @first_crc: checksum of first included packet
* @first_ttvn: tt-version number of first included packet
* @second_ttl: ttl of second packet
@@ -523,8 +523,8 @@ struct batadv_coded_packet {
/**
* struct batadv_unicast_tvlv_packet - generic unicast packet with tvlv payload
* @packet_type: batman-adv packet type, part of the general header
- * @version: batman-adv protocol version, part of the genereal header
- * @ttl: time to live for this packet, part of the genereal header
+ * @version: batman-adv protocol version, part of the general header
+ * @ttl: time to live for this packet, part of the general header
* @reserved: reserved field (for packet alignment)
* @src: address of the source
* @dst: address of the destination
diff --git a/include/uapi/linux/batman_adv.h b/include/uapi/linux/batman_adv.h
index 617c180ff0c8..bb0ae945b36a 100644
--- a/include/uapi/linux/batman_adv.h
+++ b/include/uapi/linux/batman_adv.h
@@ -69,7 +69,7 @@ enum batadv_tt_client_flags {
/**
* @BATADV_TT_CLIENT_TEMP: this global client has been detected to be
- * part of the network but no nnode has already announced it
+ * part of the network but no node has already announced it
*/
BATADV_TT_CLIENT_TEMP = (1 << 11),
};
@@ -131,7 +131,7 @@ enum batadv_gw_modes {
/** @BATADV_GW_MODE_CLIENT: send DHCP requests to gw servers */
BATADV_GW_MODE_CLIENT,
- /** @BATADV_GW_MODE_SERVER: announce itself as gatway server */
+ /** @BATADV_GW_MODE_SERVER: announce itself as gateway server */
BATADV_GW_MODE_SERVER,
};
@@ -427,7 +427,8 @@ enum batadv_nl_attrs {
/**
* @BATADV_ATTR_HOP_PENALTY: defines the penalty which will be applied
- * to an originator message's tq-field on every hop.
+ * to an originator message's tq-field on every hop and/or per
+ * hard interface
*/
BATADV_ATTR_HOP_PENALTY,
diff --git a/include/uapi/linux/bcache.h b/include/uapi/linux/bcache.h
index 9a1965c6c3d0..52e8bcb33981 100644
--- a/include/uapi/linux/bcache.h
+++ b/include/uapi/linux/bcache.h
@@ -141,11 +141,13 @@ static inline struct bkey *bkey_idx(const struct bkey *k, unsigned int nr_keys)
* Version 3: Cache device with new UUID format
* Version 4: Backing device with data offset
*/
-#define BCACHE_SB_VERSION_CDEV 0
-#define BCACHE_SB_VERSION_BDEV 1
-#define BCACHE_SB_VERSION_CDEV_WITH_UUID 3
-#define BCACHE_SB_VERSION_BDEV_WITH_OFFSET 4
-#define BCACHE_SB_MAX_VERSION 4
+#define BCACHE_SB_VERSION_CDEV 0
+#define BCACHE_SB_VERSION_BDEV 1
+#define BCACHE_SB_VERSION_CDEV_WITH_UUID 3
+#define BCACHE_SB_VERSION_BDEV_WITH_OFFSET 4
+#define BCACHE_SB_VERSION_CDEV_WITH_FEATURES 5
+#define BCACHE_SB_VERSION_BDEV_WITH_FEATURES 6
+#define BCACHE_SB_MAX_VERSION 6
#define SB_SECTOR 8
#define SB_OFFSET (SB_SECTOR << SECTOR_SHIFT)
@@ -173,7 +175,12 @@ struct cache_sb_disk {
__le64 flags;
__le64 seq;
- __le64 pad[8];
+
+ __le64 feature_compat;
+ __le64 feature_incompat;
+ __le64 feature_ro_compat;
+
+ __le64 pad[5];
union {
struct {
@@ -206,10 +213,16 @@ struct cache_sb_disk {
__le16 keys;
};
__le64 d[SB_JOURNAL_BUCKETS]; /* journal buckets */
+ __le16 bucket_size_hi;
};
+/*
+ * This is for in-memory bcache super block.
+ * NOTE: cache_sb is NOT exactly mapping to cache_sb_disk, the member
+ * size, ordering and even whole struct size may be different
+ * from cache_sb_disk.
+ */
struct cache_sb {
- __u64 csum;
__u64 offset; /* sector where this sb was written */
__u64 version;
@@ -224,7 +237,10 @@ struct cache_sb {
__u64 flags;
__u64 seq;
- __u64 pad[8];
+
+ __u64 feature_compat;
+ __u64 feature_incompat;
+ __u64 feature_ro_compat;
union {
struct {
@@ -232,10 +248,9 @@ struct cache_sb {
__u64 nbuckets; /* device size */
__u16 block_size; /* sectors */
- __u16 bucket_size; /* sectors */
-
__u16 nr_in_set;
__u16 nr_this_dev;
+ __u32 bucket_size; /* sectors */
};
struct {
/* Backing devices */
@@ -262,7 +277,8 @@ struct cache_sb {
static inline _Bool SB_IS_BDEV(const struct cache_sb *sb)
{
return sb->version == BCACHE_SB_VERSION_BDEV
- || sb->version == BCACHE_SB_VERSION_BDEV_WITH_OFFSET;
+ || sb->version == BCACHE_SB_VERSION_BDEV_WITH_OFFSET
+ || sb->version == BCACHE_SB_VERSION_BDEV_WITH_FEATURES;
}
BITMASK(CACHE_SYNC, struct cache_sb, flags, 0, 1);
diff --git a/include/uapi/linux/blkzoned.h b/include/uapi/linux/blkzoned.h
index 0cdef67135f0..42c3366cc25f 100644
--- a/include/uapi/linux/blkzoned.h
+++ b/include/uapi/linux/blkzoned.h
@@ -74,6 +74,15 @@ enum blk_zone_cond {
};
/**
+ * enum blk_zone_report_flags - Feature flags of reported zone descriptors.
+ *
+ * @BLK_ZONE_REP_CAPACITY: Zone descriptor has capacity field.
+ */
+enum blk_zone_report_flags {
+ BLK_ZONE_REP_CAPACITY = (1 << 0),
+};
+
+/**
* struct blk_zone - Zone descriptor for BLKREPORTZONE ioctl.
*
* @start: Zone start in 512 B sector units
@@ -99,7 +108,9 @@ struct blk_zone {
__u8 cond; /* Zone condition */
__u8 non_seq; /* Non-sequential write resources active */
__u8 reset; /* Reset write pointer recommended */
- __u8 reserved[36];
+ __u8 resv[4];
+ __u64 capacity; /* Zone capacity in number of sectors */
+ __u8 reserved[24];
};
/**
@@ -115,7 +126,7 @@ struct blk_zone {
struct blk_zone_report {
__u64 sector;
__u32 nr_zones;
- __u8 reserved[4];
+ __u32 flags;
struct blk_zone zones[0];
};
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 8bd33050b7bb..b6238b2209b7 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -81,6 +81,12 @@ struct bpf_cgroup_storage_key {
__u32 attach_type; /* program attach type */
};
+union bpf_iter_link_info {
+ struct {
+ __u32 map_fd;
+ } map;
+};
+
/* BPF syscall commands, see bpf(2) man-page for details. */
enum bpf_cmd {
BPF_MAP_CREATE,
@@ -117,6 +123,7 @@ enum bpf_cmd {
BPF_LINK_GET_NEXT_ID,
BPF_ENABLE_STATS,
BPF_ITER_CREATE,
+ BPF_LINK_DETACH,
};
enum bpf_map_type {
@@ -189,6 +196,7 @@ enum bpf_prog_type {
BPF_PROG_TYPE_STRUCT_OPS,
BPF_PROG_TYPE_EXT,
BPF_PROG_TYPE_LSM,
+ BPF_PROG_TYPE_SK_LOOKUP,
};
enum bpf_attach_type {
@@ -226,6 +234,10 @@ enum bpf_attach_type {
BPF_CGROUP_INET4_GETSOCKNAME,
BPF_CGROUP_INET6_GETSOCKNAME,
BPF_XDP_DEVMAP,
+ BPF_CGROUP_INET_SOCK_RELEASE,
+ BPF_XDP_CPUMAP,
+ BPF_SK_LOOKUP,
+ BPF_XDP,
__MAX_BPF_ATTACH_TYPE
};
@@ -238,6 +250,7 @@ enum bpf_link_type {
BPF_LINK_TYPE_CGROUP = 3,
BPF_LINK_TYPE_ITER = 4,
BPF_LINK_TYPE_NETNS = 5,
+ BPF_LINK_TYPE_XDP = 6,
MAX_BPF_LINK_TYPE,
};
@@ -603,9 +616,14 @@ union bpf_attr {
struct { /* struct used by BPF_LINK_CREATE command */
__u32 prog_fd; /* eBPF program to attach */
- __u32 target_fd; /* object to attach to */
+ union {
+ __u32 target_fd; /* object to attach to */
+ __u32 target_ifindex; /* target ifindex */
+ };
__u32 attach_type; /* attach type */
__u32 flags; /* extra flags */
+ __aligned_u64 iter_info; /* extra bpf_iter_link_info */
+ __u32 iter_info_len; /* iter_info length */
} link_create;
struct { /* struct used by BPF_LINK_UPDATE command */
@@ -618,6 +636,10 @@ union bpf_attr {
__u32 old_prog_fd;
} link_update;
+ struct {
+ __u32 link_fd;
+ } link_detach;
+
struct { /* struct used by BPF_ENABLE_STATS command */
__u32 type;
} enable_stats;
@@ -653,7 +675,7 @@ union bpf_attr {
* Map value associated to *key*, or **NULL** if no entry was
* found.
*
- * int bpf_map_update_elem(struct bpf_map *map, const void *key, const void *value, u64 flags)
+ * long bpf_map_update_elem(struct bpf_map *map, const void *key, const void *value, u64 flags)
* Description
* Add or update the value of the entry associated to *key* in
* *map* with *value*. *flags* is one of:
@@ -671,13 +693,13 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_map_delete_elem(struct bpf_map *map, const void *key)
+ * long bpf_map_delete_elem(struct bpf_map *map, const void *key)
* Description
* Delete entry with *key* from *map*.
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_probe_read(void *dst, u32 size, const void *unsafe_ptr)
+ * long bpf_probe_read(void *dst, u32 size, const void *unsafe_ptr)
* Description
* For tracing programs, safely attempt to read *size* bytes from
* kernel space address *unsafe_ptr* and store the data in *dst*.
@@ -695,7 +717,7 @@ union bpf_attr {
* Return
* Current *ktime*.
*
- * int bpf_trace_printk(const char *fmt, u32 fmt_size, ...)
+ * long bpf_trace_printk(const char *fmt, u32 fmt_size, ...)
* Description
* This helper is a "printk()-like" facility for debugging. It
* prints a message defined by format *fmt* (of size *fmt_size*)
@@ -745,7 +767,7 @@ union bpf_attr {
*
* Also, note that **bpf_trace_printk**\ () is slow, and should
* only be used for debugging purposes. For this reason, a notice
- * bloc (spanning several lines) is printed to kernel logs and
+ * block (spanning several lines) is printed to kernel logs and
* states that the helper should not be used "for production use"
* the first time this helper is used (or more precisely, when
* **trace_printk**\ () buffers are allocated). For passing values
@@ -775,7 +797,7 @@ union bpf_attr {
* Return
* The SMP id of the processor running the program.
*
- * int bpf_skb_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len, u64 flags)
+ * long bpf_skb_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len, u64 flags)
* Description
* Store *len* bytes from address *from* into the packet
* associated to *skb*, at *offset*. *flags* are a combination of
@@ -792,7 +814,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_l3_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 size)
+ * long bpf_l3_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 size)
* Description
* Recompute the layer 3 (e.g. IP) checksum for the packet
* associated to *skb*. Computation is incremental, so the helper
@@ -817,7 +839,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_l4_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 flags)
+ * long bpf_l4_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 flags)
* Description
* Recompute the layer 4 (e.g. TCP, UDP or ICMP) checksum for the
* packet associated to *skb*. Computation is incremental, so the
@@ -849,7 +871,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_tail_call(void *ctx, struct bpf_map *prog_array_map, u32 index)
+ * long bpf_tail_call(void *ctx, struct bpf_map *prog_array_map, u32 index)
* Description
* This special helper is used to trigger a "tail call", or in
* other words, to jump into another eBPF program. The same stack
@@ -880,7 +902,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_clone_redirect(struct sk_buff *skb, u32 ifindex, u64 flags)
+ * long bpf_clone_redirect(struct sk_buff *skb, u32 ifindex, u64 flags)
* Description
* Clone and redirect the packet associated to *skb* to another
* net device of index *ifindex*. Both ingress and egress
@@ -916,7 +938,7 @@ union bpf_attr {
* A 64-bit integer containing the current GID and UID, and
* created as such: *current_gid* **<< 32 \|** *current_uid*.
*
- * int bpf_get_current_comm(void *buf, u32 size_of_buf)
+ * long bpf_get_current_comm(void *buf, u32 size_of_buf)
* Description
* Copy the **comm** attribute of the current task into *buf* of
* *size_of_buf*. The **comm** attribute contains the name of
@@ -953,7 +975,7 @@ union bpf_attr {
* Return
* The classid, or 0 for the default unconfigured classid.
*
- * int bpf_skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
+ * long bpf_skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
* Description
* Push a *vlan_tci* (VLAN tag control information) of protocol
* *vlan_proto* to the packet associated to *skb*, then update
@@ -969,7 +991,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_skb_vlan_pop(struct sk_buff *skb)
+ * long bpf_skb_vlan_pop(struct sk_buff *skb)
* Description
* Pop a VLAN header from the packet associated to *skb*.
*
@@ -981,7 +1003,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_skb_get_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags)
+ * long bpf_skb_get_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags)
* Description
* Get tunnel metadata. This helper takes a pointer *key* to an
* empty **struct bpf_tunnel_key** of **size**, that will be
@@ -1011,14 +1033,14 @@ union bpf_attr {
*
* int ret;
* struct bpf_tunnel_key key = {};
- *
+ *
* ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0);
* if (ret < 0)
* return TC_ACT_SHOT; // drop packet
- *
+ *
* if (key.remote_ipv4 != 0x0a000001)
* return TC_ACT_SHOT; // drop packet
- *
+ *
* return TC_ACT_OK; // accept packet
*
* This interface can also be used with all encapsulation devices
@@ -1032,7 +1054,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_skb_set_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags)
+ * long bpf_skb_set_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags)
* Description
* Populate tunnel metadata for packet associated to *skb.* The
* tunnel metadata is set to the contents of *key*, of *size*. The
@@ -1098,7 +1120,7 @@ union bpf_attr {
* The value of the perf event counter read from the map, or a
* negative error code in case of failure.
*
- * int bpf_redirect(u32 ifindex, u64 flags)
+ * long bpf_redirect(u32 ifindex, u64 flags)
* Description
* Redirect the packet to another net device of index *ifindex*.
* This helper is somewhat similar to **bpf_clone_redirect**\
@@ -1125,7 +1147,7 @@ union bpf_attr {
* Description
* Retrieve the realm or the route, that is to say the
* **tclassid** field of the destination for the *skb*. The
- * indentifier retrieved is a user-provided tag, similar to the
+ * identifier retrieved is a user-provided tag, similar to the
* one used with the net_cls cgroup (see description for
* **bpf_get_cgroup_classid**\ () helper), but here this tag is
* held by a route (a destination entry), not by a task.
@@ -1145,7 +1167,7 @@ union bpf_attr {
* The realm of the route for the packet associated to *skb*, or 0
* if none was found.
*
- * int bpf_perf_event_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size)
+ * long bpf_perf_event_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size)
* Description
* Write raw *data* blob into a special BPF perf event held by
* *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf
@@ -1190,7 +1212,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_skb_load_bytes(const void *skb, u32 offset, void *to, u32 len)
+ * long bpf_skb_load_bytes(const void *skb, u32 offset, void *to, u32 len)
* Description
* This helper was provided as an easy way to load data from a
* packet. It can be used to load *len* bytes from *offset* from
@@ -1207,7 +1229,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_get_stackid(void *ctx, struct bpf_map *map, u64 flags)
+ * long bpf_get_stackid(void *ctx, struct bpf_map *map, u64 flags)
* Description
* Walk a user or a kernel stack and return its id. To achieve
* this, the helper needs *ctx*, which is a pointer to the context
@@ -1276,7 +1298,7 @@ union bpf_attr {
* The checksum result, or a negative error code in case of
* failure.
*
- * int bpf_skb_get_tunnel_opt(struct sk_buff *skb, void *opt, u32 size)
+ * long bpf_skb_get_tunnel_opt(struct sk_buff *skb, void *opt, u32 size)
* Description
* Retrieve tunnel options metadata for the packet associated to
* *skb*, and store the raw tunnel option data to the buffer *opt*
@@ -1294,7 +1316,7 @@ union bpf_attr {
* Return
* The size of the option data retrieved.
*
- * int bpf_skb_set_tunnel_opt(struct sk_buff *skb, void *opt, u32 size)
+ * long bpf_skb_set_tunnel_opt(struct sk_buff *skb, void *opt, u32 size)
* Description
* Set tunnel options metadata for the packet associated to *skb*
* to the option data contained in the raw buffer *opt* of *size*.
@@ -1304,7 +1326,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_skb_change_proto(struct sk_buff *skb, __be16 proto, u64 flags)
+ * long bpf_skb_change_proto(struct sk_buff *skb, __be16 proto, u64 flags)
* Description
* Change the protocol of the *skb* to *proto*. Currently
* supported are transition from IPv4 to IPv6, and from IPv6 to
@@ -1331,7 +1353,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_skb_change_type(struct sk_buff *skb, u32 type)
+ * long bpf_skb_change_type(struct sk_buff *skb, u32 type)
* Description
* Change the packet type for the packet associated to *skb*. This
* comes down to setting *skb*\ **->pkt_type** to *type*, except
@@ -1358,7 +1380,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_skb_under_cgroup(struct sk_buff *skb, struct bpf_map *map, u32 index)
+ * long bpf_skb_under_cgroup(struct sk_buff *skb, struct bpf_map *map, u32 index)
* Description
* Check whether *skb* is a descendant of the cgroup2 held by
* *map* of type **BPF_MAP_TYPE_CGROUP_ARRAY**, at *index*.
@@ -1389,7 +1411,7 @@ union bpf_attr {
* Return
* A pointer to the current task struct.
*
- * int bpf_probe_write_user(void *dst, const void *src, u32 len)
+ * long bpf_probe_write_user(void *dst, const void *src, u32 len)
* Description
* Attempt in a safe way to write *len* bytes from the buffer
* *src* to *dst* in memory. It only works for threads that are in
@@ -1408,7 +1430,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_current_task_under_cgroup(struct bpf_map *map, u32 index)
+ * long bpf_current_task_under_cgroup(struct bpf_map *map, u32 index)
* Description
* Check whether the probe is being run is the context of a given
* subset of the cgroup2 hierarchy. The cgroup2 to test is held by
@@ -1420,7 +1442,7 @@ union bpf_attr {
* * 1, if the *skb* task does not belong to the cgroup2.
* * A negative error code, if an error occurred.
*
- * int bpf_skb_change_tail(struct sk_buff *skb, u32 len, u64 flags)
+ * long bpf_skb_change_tail(struct sk_buff *skb, u32 len, u64 flags)
* Description
* Resize (trim or grow) the packet associated to *skb* to the
* new *len*. The *flags* are reserved for future usage, and must
@@ -1444,7 +1466,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_skb_pull_data(struct sk_buff *skb, u32 len)
+ * long bpf_skb_pull_data(struct sk_buff *skb, u32 len)
* Description
* Pull in non-linear data in case the *skb* is non-linear and not
* all of *len* are part of the linear section. Make *len* bytes
@@ -1500,7 +1522,7 @@ union bpf_attr {
* recalculation the next time the kernel tries to access this
* hash or when the **bpf_get_hash_recalc**\ () helper is called.
*
- * int bpf_get_numa_node_id(void)
+ * long bpf_get_numa_node_id(void)
* Description
* Return the id of the current NUMA node. The primary use case
* for this helper is the selection of sockets for the local NUMA
@@ -1511,7 +1533,7 @@ union bpf_attr {
* Return
* The id of current NUMA node.
*
- * int bpf_skb_change_head(struct sk_buff *skb, u32 len, u64 flags)
+ * long bpf_skb_change_head(struct sk_buff *skb, u32 len, u64 flags)
* Description
* Grows headroom of packet associated to *skb* and adjusts the
* offset of the MAC header accordingly, adding *len* bytes of
@@ -1532,7 +1554,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_xdp_adjust_head(struct xdp_buff *xdp_md, int delta)
+ * long bpf_xdp_adjust_head(struct xdp_buff *xdp_md, int delta)
* Description
* Adjust (move) *xdp_md*\ **->data** by *delta* bytes. Note that
* it is possible to use a negative value for *delta*. This helper
@@ -1547,7 +1569,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_probe_read_str(void *dst, u32 size, const void *unsafe_ptr)
+ * long bpf_probe_read_str(void *dst, u32 size, const void *unsafe_ptr)
* Description
* Copy a NUL terminated string from an unsafe kernel address
* *unsafe_ptr* to *dst*. See **bpf_probe_read_kernel_str**\ () for
@@ -1595,14 +1617,14 @@ union bpf_attr {
* is returned (note that **overflowuid** might also be the actual
* UID value for the socket).
*
- * u32 bpf_set_hash(struct sk_buff *skb, u32 hash)
+ * long bpf_set_hash(struct sk_buff *skb, u32 hash)
* Description
* Set the full hash for *skb* (set the field *skb*\ **->hash**)
* to value *hash*.
* Return
* 0
*
- * int bpf_setsockopt(void *bpf_socket, int level, int optname, void *optval, int optlen)
+ * long bpf_setsockopt(void *bpf_socket, int level, int optname, void *optval, int optlen)
* Description
* Emulate a call to **setsockopt()** on the socket associated to
* *bpf_socket*, which must be a full socket. The *level* at
@@ -1621,16 +1643,19 @@ union bpf_attr {
*
* * **SOL_SOCKET**, which supports the following *optname*\ s:
* **SO_RCVBUF**, **SO_SNDBUF**, **SO_MAX_PACING_RATE**,
- * **SO_PRIORITY**, **SO_RCVLOWAT**, **SO_MARK**.
+ * **SO_PRIORITY**, **SO_RCVLOWAT**, **SO_MARK**,
+ * **SO_BINDTODEVICE**, **SO_KEEPALIVE**.
* * **IPPROTO_TCP**, which supports the following *optname*\ s:
* **TCP_CONGESTION**, **TCP_BPF_IW**,
- * **TCP_BPF_SNDCWND_CLAMP**.
+ * **TCP_BPF_SNDCWND_CLAMP**, **TCP_SAVE_SYN**,
+ * **TCP_KEEPIDLE**, **TCP_KEEPINTVL**, **TCP_KEEPCNT**,
+ * **TCP_SYNCNT**, **TCP_USER_TIMEOUT**.
* * **IPPROTO_IP**, which supports *optname* **IP_TOS**.
* * **IPPROTO_IPV6**, which supports *optname* **IPV6_TCLASS**.
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_skb_adjust_room(struct sk_buff *skb, s32 len_diff, u32 mode, u64 flags)
+ * long bpf_skb_adjust_room(struct sk_buff *skb, s32 len_diff, u32 mode, u64 flags)
* Description
* Grow or shrink the room for data in the packet associated to
* *skb* by *len_diff*, and according to the selected *mode*.
@@ -1676,7 +1701,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_redirect_map(struct bpf_map *map, u32 key, u64 flags)
+ * long bpf_redirect_map(struct bpf_map *map, u32 key, u64 flags)
* Description
* Redirect the packet to the endpoint referenced by *map* at
* index *key*. Depending on its type, this *map* can contain
@@ -1697,7 +1722,7 @@ union bpf_attr {
* **XDP_REDIRECT** on success, or the value of the two lower bits
* of the *flags* argument on error.
*
- * int bpf_sk_redirect_map(struct sk_buff *skb, struct bpf_map *map, u32 key, u64 flags)
+ * long bpf_sk_redirect_map(struct sk_buff *skb, struct bpf_map *map, u32 key, u64 flags)
* Description
* Redirect the packet to the socket referenced by *map* (of type
* **BPF_MAP_TYPE_SOCKMAP**) at index *key*. Both ingress and
@@ -1708,7 +1733,7 @@ union bpf_attr {
* Return
* **SK_PASS** on success, or **SK_DROP** on error.
*
- * int bpf_sock_map_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags)
+ * long bpf_sock_map_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags)
* Description
* Add an entry to, or update a *map* referencing sockets. The
* *skops* is used as a new value for the entry associated to
@@ -1727,7 +1752,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_xdp_adjust_meta(struct xdp_buff *xdp_md, int delta)
+ * long bpf_xdp_adjust_meta(struct xdp_buff *xdp_md, int delta)
* Description
* Adjust the address pointed by *xdp_md*\ **->data_meta** by
* *delta* (which can be positive or negative). Note that this
@@ -1756,7 +1781,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_perf_event_read_value(struct bpf_map *map, u64 flags, struct bpf_perf_event_value *buf, u32 buf_size)
+ * long bpf_perf_event_read_value(struct bpf_map *map, u64 flags, struct bpf_perf_event_value *buf, u32 buf_size)
* Description
* Read the value of a perf event counter, and store it into *buf*
* of size *buf_size*. This helper relies on a *map* of type
@@ -1806,7 +1831,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_perf_prog_read_value(struct bpf_perf_event_data *ctx, struct bpf_perf_event_value *buf, u32 buf_size)
+ * long bpf_perf_prog_read_value(struct bpf_perf_event_data *ctx, struct bpf_perf_event_value *buf, u32 buf_size)
* Description
* For en eBPF program attached to a perf event, retrieve the
* value of the event counter associated to *ctx* and store it in
@@ -1817,7 +1842,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_getsockopt(void *bpf_socket, int level, int optname, void *optval, int optlen)
+ * long bpf_getsockopt(void *bpf_socket, int level, int optname, void *optval, int optlen)
* Description
* Emulate a call to **getsockopt()** on the socket associated to
* *bpf_socket*, which must be a full socket. The *level* at
@@ -1842,7 +1867,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_override_return(struct pt_regs *regs, u64 rc)
+ * long bpf_override_return(struct pt_regs *regs, u64 rc)
* Description
* Used for error injection, this helper uses kprobes to override
* the return value of the probed function, and to set it to *rc*.
@@ -1867,7 +1892,7 @@ union bpf_attr {
* Return
* 0
*
- * int bpf_sock_ops_cb_flags_set(struct bpf_sock_ops *bpf_sock, int argval)
+ * long bpf_sock_ops_cb_flags_set(struct bpf_sock_ops *bpf_sock, int argval)
* Description
* Attempt to set the value of the **bpf_sock_ops_cb_flags** field
* for the full TCP socket associated to *bpf_sock_ops* to
@@ -1911,7 +1936,7 @@ union bpf_attr {
* be set is returned (which comes down to 0 if all bits were set
* as required).
*
- * int bpf_msg_redirect_map(struct sk_msg_buff *msg, struct bpf_map *map, u32 key, u64 flags)
+ * long bpf_msg_redirect_map(struct sk_msg_buff *msg, struct bpf_map *map, u32 key, u64 flags)
* Description
* This helper is used in programs implementing policies at the
* socket level. If the message *msg* is allowed to pass (i.e. if
@@ -1925,7 +1950,7 @@ union bpf_attr {
* Return
* **SK_PASS** on success, or **SK_DROP** on error.
*
- * int bpf_msg_apply_bytes(struct sk_msg_buff *msg, u32 bytes)
+ * long bpf_msg_apply_bytes(struct sk_msg_buff *msg, u32 bytes)
* Description
* For socket policies, apply the verdict of the eBPF program to
* the next *bytes* (number of bytes) of message *msg*.
@@ -1959,7 +1984,7 @@ union bpf_attr {
* Return
* 0
*
- * int bpf_msg_cork_bytes(struct sk_msg_buff *msg, u32 bytes)
+ * long bpf_msg_cork_bytes(struct sk_msg_buff *msg, u32 bytes)
* Description
* For socket policies, prevent the execution of the verdict eBPF
* program for message *msg* until *bytes* (byte number) have been
@@ -1977,7 +2002,7 @@ union bpf_attr {
* Return
* 0
*
- * int bpf_msg_pull_data(struct sk_msg_buff *msg, u32 start, u32 end, u64 flags)
+ * long bpf_msg_pull_data(struct sk_msg_buff *msg, u32 start, u32 end, u64 flags)
* Description
* For socket policies, pull in non-linear data from user space
* for *msg* and set pointers *msg*\ **->data** and *msg*\
@@ -2008,7 +2033,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_bind(struct bpf_sock_addr *ctx, struct sockaddr *addr, int addr_len)
+ * long bpf_bind(struct bpf_sock_addr *ctx, struct sockaddr *addr, int addr_len)
* Description
* Bind the socket associated to *ctx* to the address pointed by
* *addr*, of length *addr_len*. This allows for making outgoing
@@ -2026,7 +2051,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_xdp_adjust_tail(struct xdp_buff *xdp_md, int delta)
+ * long bpf_xdp_adjust_tail(struct xdp_buff *xdp_md, int delta)
* Description
* Adjust (move) *xdp_md*\ **->data_end** by *delta* bytes. It is
* possible to both shrink and grow the packet tail.
@@ -2040,7 +2065,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_skb_get_xfrm_state(struct sk_buff *skb, u32 index, struct bpf_xfrm_state *xfrm_state, u32 size, u64 flags)
+ * long bpf_skb_get_xfrm_state(struct sk_buff *skb, u32 index, struct bpf_xfrm_state *xfrm_state, u32 size, u64 flags)
* Description
* Retrieve the XFRM state (IP transform framework, see also
* **ip-xfrm(8)**) at *index* in XFRM "security path" for *skb*.
@@ -2056,7 +2081,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_get_stack(void *ctx, void *buf, u32 size, u64 flags)
+ * long bpf_get_stack(void *ctx, void *buf, u32 size, u64 flags)
* Description
* Return a user or a kernel stack in bpf program provided buffer.
* To achieve this, the helper needs *ctx*, which is a pointer
@@ -2089,7 +2114,7 @@ union bpf_attr {
* A non-negative value equal to or less than *size* on success,
* or a negative error in case of failure.
*
- * int bpf_skb_load_bytes_relative(const void *skb, u32 offset, void *to, u32 len, u32 start_header)
+ * long bpf_skb_load_bytes_relative(const void *skb, u32 offset, void *to, u32 len, u32 start_header)
* Description
* This helper is similar to **bpf_skb_load_bytes**\ () in that
* it provides an easy way to load *len* bytes from *offset*
@@ -2111,7 +2136,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_fib_lookup(void *ctx, struct bpf_fib_lookup *params, int plen, u32 flags)
+ * long bpf_fib_lookup(void *ctx, struct bpf_fib_lookup *params, int plen, u32 flags)
* Description
* Do FIB lookup in kernel tables using parameters in *params*.
* If lookup is successful and result shows packet is to be
@@ -2142,7 +2167,7 @@ union bpf_attr {
* * > 0 one of **BPF_FIB_LKUP_RET_** codes explaining why the
* packet is not forwarded or needs assist from full stack
*
- * int bpf_sock_hash_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags)
+ * long bpf_sock_hash_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags)
* Description
* Add an entry to, or update a sockhash *map* referencing sockets.
* The *skops* is used as a new value for the entry associated to
@@ -2161,7 +2186,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_msg_redirect_hash(struct sk_msg_buff *msg, struct bpf_map *map, void *key, u64 flags)
+ * long bpf_msg_redirect_hash(struct sk_msg_buff *msg, struct bpf_map *map, void *key, u64 flags)
* Description
* This helper is used in programs implementing policies at the
* socket level. If the message *msg* is allowed to pass (i.e. if
@@ -2175,7 +2200,7 @@ union bpf_attr {
* Return
* **SK_PASS** on success, or **SK_DROP** on error.
*
- * int bpf_sk_redirect_hash(struct sk_buff *skb, struct bpf_map *map, void *key, u64 flags)
+ * long bpf_sk_redirect_hash(struct sk_buff *skb, struct bpf_map *map, void *key, u64 flags)
* Description
* This helper is used in programs implementing policies at the
* skb socket level. If the sk_buff *skb* is allowed to pass (i.e.
@@ -2189,7 +2214,7 @@ union bpf_attr {
* Return
* **SK_PASS** on success, or **SK_DROP** on error.
*
- * int bpf_lwt_push_encap(struct sk_buff *skb, u32 type, void *hdr, u32 len)
+ * long bpf_lwt_push_encap(struct sk_buff *skb, u32 type, void *hdr, u32 len)
* Description
* Encapsulate the packet associated to *skb* within a Layer 3
* protocol header. This header is provided in the buffer at
@@ -2226,7 +2251,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_lwt_seg6_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len)
+ * long bpf_lwt_seg6_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len)
* Description
* Store *len* bytes from address *from* into the packet
* associated to *skb*, at *offset*. Only the flags, tag and TLVs
@@ -2241,7 +2266,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_lwt_seg6_adjust_srh(struct sk_buff *skb, u32 offset, s32 delta)
+ * long bpf_lwt_seg6_adjust_srh(struct sk_buff *skb, u32 offset, s32 delta)
* Description
* Adjust the size allocated to TLVs in the outermost IPv6
* Segment Routing Header contained in the packet associated to
@@ -2257,7 +2282,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_lwt_seg6_action(struct sk_buff *skb, u32 action, void *param, u32 param_len)
+ * long bpf_lwt_seg6_action(struct sk_buff *skb, u32 action, void *param, u32 param_len)
* Description
* Apply an IPv6 Segment Routing action of type *action* to the
* packet associated to *skb*. Each action takes a parameter
@@ -2286,7 +2311,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_rc_repeat(void *ctx)
+ * long bpf_rc_repeat(void *ctx)
* Description
* This helper is used in programs implementing IR decoding, to
* report a successfully decoded repeat key message. This delays
@@ -2305,7 +2330,7 @@ union bpf_attr {
* Return
* 0
*
- * int bpf_rc_keydown(void *ctx, u32 protocol, u64 scancode, u32 toggle)
+ * long bpf_rc_keydown(void *ctx, u32 protocol, u64 scancode, u32 toggle)
* Description
* This helper is used in programs implementing IR decoding, to
* report a successfully decoded key press with *scancode*,
@@ -2370,7 +2395,7 @@ union bpf_attr {
* Return
* A pointer to the local storage area.
*
- * int bpf_sk_select_reuseport(struct sk_reuseport_md *reuse, struct bpf_map *map, void *key, u64 flags)
+ * long bpf_sk_select_reuseport(struct sk_reuseport_md *reuse, struct bpf_map *map, void *key, u64 flags)
* Description
* Select a **SO_REUSEPORT** socket from a
* **BPF_MAP_TYPE_REUSEPORT_ARRAY** *map*.
@@ -2415,7 +2440,7 @@ union bpf_attr {
* Look for an IPv6 socket.
*
* If the *netns* is a negative signed 32-bit integer, then the
- * socket lookup table in the netns associated with the *ctx* will
+ * socket lookup table in the netns associated with the *ctx*
* will be used. For the TC hooks, this is the netns of the device
* in the skb. For socket hooks, this is the netns of the socket.
* If *netns* is any other signed 32-bit value greater than or
@@ -2452,7 +2477,7 @@ union bpf_attr {
* Look for an IPv6 socket.
*
* If the *netns* is a negative signed 32-bit integer, then the
- * socket lookup table in the netns associated with the *ctx* will
+ * socket lookup table in the netns associated with the *ctx*
* will be used. For the TC hooks, this is the netns of the device
* in the skb. For socket hooks, this is the netns of the socket.
* If *netns* is any other signed 32-bit value greater than or
@@ -2471,7 +2496,7 @@ union bpf_attr {
* result is from *reuse*\ **->socks**\ [] using the hash of the
* tuple.
*
- * int bpf_sk_release(struct bpf_sock *sock)
+ * long bpf_sk_release(struct bpf_sock *sock)
* Description
* Release the reference held by *sock*. *sock* must be a
* non-**NULL** pointer that was returned from
@@ -2479,7 +2504,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags)
+ * long bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags)
* Description
* Push an element *value* in *map*. *flags* is one of:
*
@@ -2489,19 +2514,19 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_map_pop_elem(struct bpf_map *map, void *value)
+ * long bpf_map_pop_elem(struct bpf_map *map, void *value)
* Description
* Pop an element from *map*.
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_map_peek_elem(struct bpf_map *map, void *value)
+ * long bpf_map_peek_elem(struct bpf_map *map, void *value)
* Description
* Get an element from *map* without removing it.
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_msg_push_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags)
+ * long bpf_msg_push_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags)
* Description
* For socket policies, insert *len* bytes into *msg* at offset
* *start*.
@@ -2517,7 +2542,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_msg_pop_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags)
+ * long bpf_msg_pop_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags)
* Description
* Will remove *len* bytes from a *msg* starting at byte *start*.
* This may result in **ENOMEM** errors under certain situations if
@@ -2529,7 +2554,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_rc_pointer_rel(void *ctx, s32 rel_x, s32 rel_y)
+ * long bpf_rc_pointer_rel(void *ctx, s32 rel_x, s32 rel_y)
* Description
* This helper is used in programs implementing IR decoding, to
* report a successfully decoded pointer movement.
@@ -2543,7 +2568,7 @@ union bpf_attr {
* Return
* 0
*
- * int bpf_spin_lock(struct bpf_spin_lock *lock)
+ * long bpf_spin_lock(struct bpf_spin_lock *lock)
* Description
* Acquire a spinlock represented by the pointer *lock*, which is
* stored as part of a value of a map. Taking the lock allows to
@@ -2591,7 +2616,7 @@ union bpf_attr {
* Return
* 0
*
- * int bpf_spin_unlock(struct bpf_spin_lock *lock)
+ * long bpf_spin_unlock(struct bpf_spin_lock *lock)
* Description
* Release the *lock* previously locked by a call to
* **bpf_spin_lock**\ (\ *lock*\ ).
@@ -2614,7 +2639,7 @@ union bpf_attr {
* A **struct bpf_tcp_sock** pointer on success, or **NULL** in
* case of failure.
*
- * int bpf_skb_ecn_set_ce(struct sk_buff *skb)
+ * long bpf_skb_ecn_set_ce(struct sk_buff *skb)
* Description
* Set ECN (Explicit Congestion Notification) field of IP header
* to **CE** (Congestion Encountered) if current value is **ECT**
@@ -2651,7 +2676,7 @@ union bpf_attr {
* result is from *reuse*\ **->socks**\ [] using the hash of the
* tuple.
*
- * int bpf_tcp_check_syncookie(struct bpf_sock *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len)
+ * long bpf_tcp_check_syncookie(struct bpf_sock *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len)
* Description
* Check whether *iph* and *th* contain a valid SYN cookie ACK for
* the listening socket in *sk*.
@@ -2666,7 +2691,7 @@ union bpf_attr {
* 0 if *iph* and *th* are a valid SYN cookie ACK, or a negative
* error otherwise.
*
- * int bpf_sysctl_get_name(struct bpf_sysctl *ctx, char *buf, size_t buf_len, u64 flags)
+ * long bpf_sysctl_get_name(struct bpf_sysctl *ctx, char *buf, size_t buf_len, u64 flags)
* Description
* Get name of sysctl in /proc/sys/ and copy it into provided by
* program buffer *buf* of size *buf_len*.
@@ -2682,7 +2707,7 @@ union bpf_attr {
* **-E2BIG** if the buffer wasn't big enough (*buf* will contain
* truncated name in this case).
*
- * int bpf_sysctl_get_current_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len)
+ * long bpf_sysctl_get_current_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len)
* Description
* Get current value of sysctl as it is presented in /proc/sys
* (incl. newline, etc), and copy it as a string into provided
@@ -2701,7 +2726,7 @@ union bpf_attr {
* **-EINVAL** if current value was unavailable, e.g. because
* sysctl is uninitialized and read returns -EIO for it.
*
- * int bpf_sysctl_get_new_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len)
+ * long bpf_sysctl_get_new_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len)
* Description
* Get new value being written by user space to sysctl (before
* the actual write happens) and copy it as a string into
@@ -2718,7 +2743,7 @@ union bpf_attr {
*
* **-EINVAL** if sysctl is being read.
*
- * int bpf_sysctl_set_new_value(struct bpf_sysctl *ctx, const char *buf, size_t buf_len)
+ * long bpf_sysctl_set_new_value(struct bpf_sysctl *ctx, const char *buf, size_t buf_len)
* Description
* Override new value being written by user space to sysctl with
* value provided by program in buffer *buf* of size *buf_len*.
@@ -2735,7 +2760,7 @@ union bpf_attr {
*
* **-EINVAL** if sysctl is being read.
*
- * int bpf_strtol(const char *buf, size_t buf_len, u64 flags, long *res)
+ * long bpf_strtol(const char *buf, size_t buf_len, u64 flags, long *res)
* Description
* Convert the initial part of the string from buffer *buf* of
* size *buf_len* to a long integer according to the given base
@@ -2759,7 +2784,7 @@ union bpf_attr {
*
* **-ERANGE** if resulting value was out of range.
*
- * int bpf_strtoul(const char *buf, size_t buf_len, u64 flags, unsigned long *res)
+ * long bpf_strtoul(const char *buf, size_t buf_len, u64 flags, unsigned long *res)
* Description
* Convert the initial part of the string from buffer *buf* of
* size *buf_len* to an unsigned long integer according to the
@@ -2810,7 +2835,7 @@ union bpf_attr {
* **NULL** if not found or there was an error in adding
* a new bpf-local-storage.
*
- * int bpf_sk_storage_delete(struct bpf_map *map, struct bpf_sock *sk)
+ * long bpf_sk_storage_delete(struct bpf_map *map, struct bpf_sock *sk)
* Description
* Delete a bpf-local-storage from a *sk*.
* Return
@@ -2818,7 +2843,7 @@ union bpf_attr {
*
* **-ENOENT** if the bpf-local-storage cannot be found.
*
- * int bpf_send_signal(u32 sig)
+ * long bpf_send_signal(u32 sig)
* Description
* Send signal *sig* to the process of the current task.
* The signal may be delivered to any of this process's threads.
@@ -2859,7 +2884,7 @@ union bpf_attr {
*
* **-EPROTONOSUPPORT** IP packet version is not 4 or 6
*
- * int bpf_skb_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size)
+ * long bpf_skb_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size)
* Description
* Write raw *data* blob into a special BPF perf event held by
* *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf
@@ -2883,21 +2908,21 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_probe_read_user(void *dst, u32 size, const void *unsafe_ptr)
+ * long bpf_probe_read_user(void *dst, u32 size, const void *unsafe_ptr)
* Description
* Safely attempt to read *size* bytes from user space address
* *unsafe_ptr* and store the data in *dst*.
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr)
+ * long bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr)
* Description
* Safely attempt to read *size* bytes from kernel space address
* *unsafe_ptr* and store the data in *dst*.
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_probe_read_user_str(void *dst, u32 size, const void *unsafe_ptr)
+ * long bpf_probe_read_user_str(void *dst, u32 size, const void *unsafe_ptr)
* Description
* Copy a NUL terminated string from an unsafe user address
* *unsafe_ptr* to *dst*. The *size* should include the
@@ -2941,7 +2966,7 @@ union bpf_attr {
* including the trailing NUL character. On error, a negative
* value.
*
- * int bpf_probe_read_kernel_str(void *dst, u32 size, const void *unsafe_ptr)
+ * long bpf_probe_read_kernel_str(void *dst, u32 size, const void *unsafe_ptr)
* Description
* Copy a NUL terminated string from an unsafe kernel address *unsafe_ptr*
* to *dst*. Same semantics as with **bpf_probe_read_user_str**\ () apply.
@@ -2949,14 +2974,14 @@ union bpf_attr {
* On success, the strictly positive length of the string, including
* the trailing NUL character. On error, a negative value.
*
- * int bpf_tcp_send_ack(void *tp, u32 rcv_nxt)
+ * long bpf_tcp_send_ack(void *tp, u32 rcv_nxt)
* Description
* Send out a tcp-ack. *tp* is the in-kernel struct **tcp_sock**.
* *rcv_nxt* is the ack_seq to be sent out.
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_send_signal_thread(u32 sig)
+ * long bpf_send_signal_thread(u32 sig)
* Description
* Send signal *sig* to the thread corresponding to the current task.
* Return
@@ -2976,7 +3001,7 @@ union bpf_attr {
* Return
* The 64 bit jiffies
*
- * int bpf_read_branch_records(struct bpf_perf_event_data *ctx, void *buf, u32 size, u64 flags)
+ * long bpf_read_branch_records(struct bpf_perf_event_data *ctx, void *buf, u32 size, u64 flags)
* Description
* For an eBPF program attached to a perf event, retrieve the
* branch records (**struct perf_branch_entry**) associated to *ctx*
@@ -2995,7 +3020,7 @@ union bpf_attr {
*
* **-ENOENT** if architecture does not support branch records.
*
- * int bpf_get_ns_current_pid_tgid(u64 dev, u64 ino, struct bpf_pidns_info *nsdata, u32 size)
+ * long bpf_get_ns_current_pid_tgid(u64 dev, u64 ino, struct bpf_pidns_info *nsdata, u32 size)
* Description
* Returns 0 on success, values for *pid* and *tgid* as seen from the current
* *namespace* will be returned in *nsdata*.
@@ -3007,7 +3032,7 @@ union bpf_attr {
*
* **-ENOENT** if pidns does not exists for the current task.
*
- * int bpf_xdp_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size)
+ * long bpf_xdp_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size)
* Description
* Write raw *data* blob into a special BPF perf event held by
* *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf
@@ -3062,8 +3087,12 @@ union bpf_attr {
* Return
* The id is returned or 0 in case the id could not be retrieved.
*
- * int bpf_sk_assign(struct sk_buff *skb, struct bpf_sock *sk, u64 flags)
+ * long bpf_sk_assign(struct sk_buff *skb, struct bpf_sock *sk, u64 flags)
* Description
+ * Helper is overloaded depending on BPF program type. This
+ * description applies to **BPF_PROG_TYPE_SCHED_CLS** and
+ * **BPF_PROG_TYPE_SCHED_ACT** programs.
+ *
* Assign the *sk* to the *skb*. When combined with appropriate
* routing configuration to receive the packet towards the socket,
* will cause *skb* to be delivered to the specified socket.
@@ -3089,6 +3118,56 @@ union bpf_attr {
* **-ESOCKTNOSUPPORT** if the socket type is not supported
* (reuseport).
*
+ * long bpf_sk_assign(struct bpf_sk_lookup *ctx, struct bpf_sock *sk, u64 flags)
+ * Description
+ * Helper is overloaded depending on BPF program type. This
+ * description applies to **BPF_PROG_TYPE_SK_LOOKUP** programs.
+ *
+ * Select the *sk* as a result of a socket lookup.
+ *
+ * For the operation to succeed passed socket must be compatible
+ * with the packet description provided by the *ctx* object.
+ *
+ * L4 protocol (**IPPROTO_TCP** or **IPPROTO_UDP**) must
+ * be an exact match. While IP family (**AF_INET** or
+ * **AF_INET6**) must be compatible, that is IPv6 sockets
+ * that are not v6-only can be selected for IPv4 packets.
+ *
+ * Only TCP listeners and UDP unconnected sockets can be
+ * selected. *sk* can also be NULL to reset any previous
+ * selection.
+ *
+ * *flags* argument can combination of following values:
+ *
+ * * **BPF_SK_LOOKUP_F_REPLACE** to override the previous
+ * socket selection, potentially done by a BPF program
+ * that ran before us.
+ *
+ * * **BPF_SK_LOOKUP_F_NO_REUSEPORT** to skip
+ * load-balancing within reuseport group for the socket
+ * being selected.
+ *
+ * On success *ctx->sk* will point to the selected socket.
+ *
+ * Return
+ * 0 on success, or a negative errno in case of failure.
+ *
+ * * **-EAFNOSUPPORT** if socket family (*sk->family*) is
+ * not compatible with packet family (*ctx->family*).
+ *
+ * * **-EEXIST** if socket has been already selected,
+ * potentially by another program, and
+ * **BPF_SK_LOOKUP_F_REPLACE** flag was not specified.
+ *
+ * * **-EINVAL** if unsupported flags were specified.
+ *
+ * * **-EPROTOTYPE** if socket L4 protocol
+ * (*sk->protocol*) doesn't match packet protocol
+ * (*ctx->protocol*).
+ *
+ * * **-ESOCKTNOSUPPORT** if socket is not in allowed
+ * state (TCP listening or UDP unconnected).
+ *
* u64 bpf_ktime_get_boot_ns(void)
* Description
* Return the time elapsed since system boot, in nanoseconds.
@@ -3097,7 +3176,7 @@ union bpf_attr {
* Return
* Current *ktime*.
*
- * int bpf_seq_printf(struct seq_file *m, const char *fmt, u32 fmt_size, const void *data, u32 data_len)
+ * long bpf_seq_printf(struct seq_file *m, const char *fmt, u32 fmt_size, const void *data, u32 data_len)
* Description
* **bpf_seq_printf**\ () uses seq_file **seq_printf**\ () to print
* out the format string.
@@ -3126,7 +3205,7 @@ union bpf_attr {
*
* **-EOVERFLOW** if an overflow happened: The same object will be tried again.
*
- * int bpf_seq_write(struct seq_file *m, const void *data, u32 len)
+ * long bpf_seq_write(struct seq_file *m, const void *data, u32 len)
* Description
* **bpf_seq_write**\ () uses seq_file **seq_write**\ () to write the data.
* The *m* represents the seq_file. The *data* and *len* represent the
@@ -3168,7 +3247,7 @@ union bpf_attr {
* Return
* The id is returned or 0 in case the id could not be retrieved.
*
- * int bpf_ringbuf_output(void *ringbuf, void *data, u64 size, u64 flags)
+ * long bpf_ringbuf_output(void *ringbuf, void *data, u64 size, u64 flags)
* Description
* Copy *size* bytes from *data* into a ring buffer *ringbuf*.
* If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification
@@ -3222,7 +3301,7 @@ union bpf_attr {
* Return
* Requested value, or 0, if *flags* are not recognized.
*
- * int bpf_csum_level(struct sk_buff *skb, u64 level)
+ * long bpf_csum_level(struct sk_buff *skb, u64 level)
* Description
* Change the skbs checksum level by one layer up or down, or
* reset it entirely to none in order to have the stack perform
@@ -3253,6 +3332,69 @@ union bpf_attr {
* case of **BPF_CSUM_LEVEL_QUERY**, the current skb->csum_level
* is returned or the error code -EACCES in case the skb is not
* subject to CHECKSUM_UNNECESSARY.
+ *
+ * struct tcp6_sock *bpf_skc_to_tcp6_sock(void *sk)
+ * Description
+ * Dynamically cast a *sk* pointer to a *tcp6_sock* pointer.
+ * Return
+ * *sk* if casting is valid, or NULL otherwise.
+ *
+ * struct tcp_sock *bpf_skc_to_tcp_sock(void *sk)
+ * Description
+ * Dynamically cast a *sk* pointer to a *tcp_sock* pointer.
+ * Return
+ * *sk* if casting is valid, or NULL otherwise.
+ *
+ * struct tcp_timewait_sock *bpf_skc_to_tcp_timewait_sock(void *sk)
+ * Description
+ * Dynamically cast a *sk* pointer to a *tcp_timewait_sock* pointer.
+ * Return
+ * *sk* if casting is valid, or NULL otherwise.
+ *
+ * struct tcp_request_sock *bpf_skc_to_tcp_request_sock(void *sk)
+ * Description
+ * Dynamically cast a *sk* pointer to a *tcp_request_sock* pointer.
+ * Return
+ * *sk* if casting is valid, or NULL otherwise.
+ *
+ * struct udp6_sock *bpf_skc_to_udp6_sock(void *sk)
+ * Description
+ * Dynamically cast a *sk* pointer to a *udp6_sock* pointer.
+ * Return
+ * *sk* if casting is valid, or NULL otherwise.
+ *
+ * long bpf_get_task_stack(struct task_struct *task, void *buf, u32 size, u64 flags)
+ * Description
+ * Return a user or a kernel stack in bpf program provided buffer.
+ * To achieve this, the helper needs *task*, which is a valid
+ * pointer to struct task_struct. To store the stacktrace, the
+ * bpf program provides *buf* with a nonnegative *size*.
+ *
+ * The last argument, *flags*, holds the number of stack frames to
+ * skip (from 0 to 255), masked with
+ * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set
+ * the following flags:
+ *
+ * **BPF_F_USER_STACK**
+ * Collect a user space stack instead of a kernel stack.
+ * **BPF_F_USER_BUILD_ID**
+ * Collect buildid+offset instead of ips for user stack,
+ * only valid if **BPF_F_USER_STACK** is also specified.
+ *
+ * **bpf_get_task_stack**\ () can collect up to
+ * **PERF_MAX_STACK_DEPTH** both kernel and user frames, subject
+ * to sufficient large buffer size. Note that
+ * this limit can be controlled with the **sysctl** program, and
+ * that it should be manually increased in order to profile long
+ * user stacks (such as stacks for Java programs). To do so, use:
+ *
+ * ::
+ *
+ * # sysctl kernel.perf_event_max_stack=<new value>
+ * Return
+ * A non-negative value equal to or less than *size* on success,
+ * or a negative error in case of failure.
+ *
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
@@ -3390,7 +3532,14 @@ union bpf_attr {
FN(ringbuf_submit), \
FN(ringbuf_discard), \
FN(ringbuf_query), \
- FN(csum_level),
+ FN(csum_level), \
+ FN(skc_to_tcp6_sock), \
+ FN(skc_to_tcp_sock), \
+ FN(skc_to_tcp_timewait_sock), \
+ FN(skc_to_tcp_request_sock), \
+ FN(skc_to_udp6_sock), \
+ FN(get_task_stack), \
+ /* */
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
* function eBPF program intends to call
@@ -3532,6 +3681,12 @@ enum {
BPF_RINGBUF_HDR_SZ = 8,
};
+/* BPF_FUNC_sk_assign flags in bpf_sk_lookup context. */
+enum {
+ BPF_SK_LOOKUP_F_REPLACE = (1ULL << 0),
+ BPF_SK_LOOKUP_F_NO_REUSEPORT = (1ULL << 1),
+};
+
/* Mode for BPF_FUNC_skb_adjust_room helper. */
enum bpf_adj_room_mode {
BPF_ADJ_ROOM_NET,
@@ -3775,6 +3930,19 @@ struct bpf_devmap_val {
} bpf_prog;
};
+/* CPUMAP map-value layout
+ *
+ * The struct data-layout of map-value is a configuration interface.
+ * New members can only be added to the end of this structure.
+ */
+struct bpf_cpumap_val {
+ __u32 qsize; /* queue size to remote target CPU */
+ union {
+ int fd; /* prog fd on map write */
+ __u32 id; /* prog id on map read */
+ } bpf_prog;
+};
+
enum sk_action {
SK_DROP = 0,
SK_PASS,
@@ -3907,12 +4075,15 @@ struct bpf_link_info {
__u32 netns_ino;
__u32 attach_type;
} netns;
+ struct {
+ __u32 ifindex;
+ } xdp;
};
} __attribute__((aligned(8)));
/* User bpf_sock_addr struct to access socket fields and sockaddr struct passed
* by user and intended to be used by socket (e.g. to bind to, depends on
- * attach attach type).
+ * attach type).
*/
struct bpf_sock_addr {
__u32 user_family; /* Allows 4-byte read, but no write. */
@@ -4261,4 +4432,19 @@ struct bpf_pidns_info {
__u32 pid;
__u32 tgid;
};
+
+/* User accessible data for SK_LOOKUP programs. Add new fields at the end. */
+struct bpf_sk_lookup {
+ __bpf_md_ptr(struct bpf_sock *, sk); /* Selected socket */
+
+ __u32 family; /* Protocol family (AF_INET, AF_INET6) */
+ __u32 protocol; /* IP protocol (IPPROTO_TCP, IPPROTO_UDP) */
+ __u32 remote_ip4; /* Network byte order */
+ __u32 remote_ip6[4]; /* Network byte order */
+ __u32 remote_port; /* Network byte order */
+ __u32 local_ip4; /* Network byte order */
+ __u32 local_ip6[4]; /* Network byte order */
+ __u32 local_port; /* Host byte order */
+};
+
#endif /* _UAPI__LINUX_BPF_H__ */
diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h
index e6b6cb0f8bc6..2c39d15a2beb 100644
--- a/include/uapi/linux/btrfs.h
+++ b/include/uapi/linux/btrfs.h
@@ -243,6 +243,18 @@ struct btrfs_ioctl_dev_info_args {
__u8 path[BTRFS_DEVICE_PATH_NAME_MAX]; /* out */
};
+/*
+ * Retrieve information about the filesystem
+ */
+
+/* Request information about checksum type and size */
+#define BTRFS_FS_INFO_FLAG_CSUM_INFO (1 << 0)
+
+/* Request information about filesystem generation */
+#define BTRFS_FS_INFO_FLAG_GENERATION (1 << 1)
+/* Request information about filesystem metadata UUID */
+#define BTRFS_FS_INFO_FLAG_METADATA_UUID (1 << 2)
+
struct btrfs_ioctl_fs_info_args {
__u64 max_id; /* out */
__u64 num_devices; /* out */
@@ -250,8 +262,13 @@ struct btrfs_ioctl_fs_info_args {
__u32 nodesize; /* out */
__u32 sectorsize; /* out */
__u32 clone_alignment; /* out */
- __u32 reserved32;
- __u64 reserved[122]; /* pad to 1k */
+ /* See BTRFS_FS_INFO_FLAG_* */
+ __u16 csum_type; /* out */
+ __u16 csum_size; /* out */
+ __u64 flags; /* in/out */
+ __u64 generation; /* out */
+ __u8 metadata_uuid[BTRFS_FSID_SIZE]; /* out */
+ __u8 reserved[944]; /* pad to 1k */
};
/*
diff --git a/include/uapi/linux/btrfs_tree.h b/include/uapi/linux/btrfs_tree.h
index a3f3975df0de..9ba64ca6b4ac 100644
--- a/include/uapi/linux/btrfs_tree.h
+++ b/include/uapi/linux/btrfs_tree.h
@@ -913,9 +913,9 @@ struct btrfs_free_space_info {
#define BTRFS_FREE_SPACE_USING_BITMAPS (1ULL << 0)
#define BTRFS_QGROUP_LEVEL_SHIFT 48
-static inline __u64 btrfs_qgroup_level(__u64 qgroupid)
+static inline __u16 btrfs_qgroup_level(__u64 qgroupid)
{
- return qgroupid >> BTRFS_QGROUP_LEVEL_SHIFT;
+ return (__u16)(qgroupid >> BTRFS_QGROUP_LEVEL_SHIFT);
}
/*
diff --git a/include/uapi/linux/caif/caif_socket.h b/include/uapi/linux/caif/caif_socket.h
index 10ec1d1cf68e..d9970bbaa156 100644
--- a/include/uapi/linux/caif/caif_socket.h
+++ b/include/uapi/linux/caif/caif_socket.h
@@ -169,7 +169,7 @@ struct sockaddr_caif {
* @CAIFSO_LINK_SELECT: Selector used if multiple CAIF Link layers are
* available. Either a high bandwidth
* link can be selected (CAIF_LINK_HIGH_BANDW) or
- * or a low latency link (CAIF_LINK_LOW_LATENCY).
+ * a low latency link (CAIF_LINK_LOW_LATENCY).
* This option is of type __u32.
* Alternatively SO_BINDTODEVICE can be used.
*
diff --git a/include/uapi/linux/capability.h b/include/uapi/linux/capability.h
index 48ff0757ae5e..395dd0df8d08 100644
--- a/include/uapi/linux/capability.h
+++ b/include/uapi/linux/capability.h
@@ -408,7 +408,14 @@ struct vfs_ns_cap_data {
*/
#define CAP_BPF 39
-#define CAP_LAST_CAP CAP_BPF
+
+/* Allow checkpoint/restore related operations */
+/* Allow PID selection during clone3() */
+/* Allow writing to ns_last_pid */
+
+#define CAP_CHECKPOINT_RESTORE 40
+
+#define CAP_LAST_CAP CAP_CHECKPOINT_RESTORE
#define cap_valid(x) ((x) >= 0 && (x) <= CAP_LAST_CAP)
diff --git a/include/uapi/linux/close_range.h b/include/uapi/linux/close_range.h
new file mode 100644
index 000000000000..6928a9fdee3c
--- /dev/null
+++ b/include/uapi/linux/close_range.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _UAPI_LINUX_CLOSE_RANGE_H
+#define _UAPI_LINUX_CLOSE_RANGE_H
+
+/* Unshare the file descriptor table before closing file descriptors. */
+#define CLOSE_RANGE_UNSHARE (1U << 1)
+
+#endif /* _UAPI_LINUX_CLOSE_RANGE_H */
+
diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h
index 08563e6a424d..cfef4245ea5a 100644
--- a/include/uapi/linux/devlink.h
+++ b/include/uapi/linux/devlink.h
@@ -451,6 +451,13 @@ enum devlink_attr {
DEVLINK_ATTR_TRAP_POLICER_RATE, /* u64 */
DEVLINK_ATTR_TRAP_POLICER_BURST, /* u64 */
+ DEVLINK_ATTR_PORT_FUNCTION, /* nested */
+
+ DEVLINK_ATTR_INFO_BOARD_SERIAL_NUMBER, /* string */
+
+ DEVLINK_ATTR_PORT_LANES, /* u32 */
+ DEVLINK_ATTR_PORT_SPLITTABLE, /* u8 */
+
/* add new attributes above here, update the policy in devlink.c */
__DEVLINK_ATTR_MAX,
@@ -497,4 +504,12 @@ enum devlink_resource_unit {
DEVLINK_RESOURCE_UNIT_ENTRY,
};
+enum devlink_port_function_attr {
+ DEVLINK_PORT_FUNCTION_ATTR_UNSPEC,
+ DEVLINK_PORT_FUNCTION_ATTR_HW_ADDR, /* binary */
+
+ __DEVLINK_PORT_FUNCTION_ATTR_MAX,
+ DEVLINK_PORT_FUNCTION_ATTR_MAX = __DEVLINK_PORT_FUNCTION_ATTR_MAX - 1
+};
+
#endif /* _UAPI_LINUX_DEVLINK_H_ */
diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
index c6dd0215482e..22220945a5fd 100644
--- a/include/uapi/linux/elf.h
+++ b/include/uapi/linux/elf.h
@@ -53,7 +53,7 @@ typedef __s64 Elf64_Sxword;
*
* - Oracle: Linker and Libraries.
* Part No: 817–1984–19, August 2011.
- * http://docs.oracle.com/cd/E18752_01/pdf/817-1984.pdf
+ * https://docs.oracle.com/cd/E18752_01/pdf/817-1984.pdf
*
* - System V ABI AMD64 Architecture Processor Supplement
* Draft Version 0.99.4,
diff --git a/include/uapi/linux/elfcore.h b/include/uapi/linux/elfcore.h
deleted file mode 100644
index baf03562306d..000000000000
--- a/include/uapi/linux/elfcore.h
+++ /dev/null
@@ -1,101 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#ifndef _UAPI_LINUX_ELFCORE_H
-#define _UAPI_LINUX_ELFCORE_H
-
-#include <linux/types.h>
-#include <linux/signal.h>
-#include <linux/time.h>
-#include <linux/ptrace.h>
-#include <linux/elf.h>
-#include <linux/fs.h>
-
-struct elf_siginfo
-{
- int si_signo; /* signal number */
- int si_code; /* extra code */
- int si_errno; /* errno */
-};
-
-
-#ifndef __KERNEL__
-typedef elf_greg_t greg_t;
-typedef elf_gregset_t gregset_t;
-typedef elf_fpregset_t fpregset_t;
-typedef elf_fpxregset_t fpxregset_t;
-#define NGREG ELF_NGREG
-#endif
-
-/*
- * Definitions to generate Intel SVR4-like core files.
- * These mostly have the same names as the SVR4 types with "elf_"
- * tacked on the front to prevent clashes with linux definitions,
- * and the typedef forms have been avoided. This is mostly like
- * the SVR4 structure, but more Linuxy, with things that Linux does
- * not support and which gdb doesn't really use excluded.
- * Fields present but not used are marked with "XXX".
- */
-struct elf_prstatus
-{
-#if 0
- long pr_flags; /* XXX Process flags */
- short pr_why; /* XXX Reason for process halt */
- short pr_what; /* XXX More detailed reason */
-#endif
- struct elf_siginfo pr_info; /* Info associated with signal */
- short pr_cursig; /* Current signal */
- unsigned long pr_sigpend; /* Set of pending signals */
- unsigned long pr_sighold; /* Set of held signals */
-#if 0
- struct sigaltstack pr_altstack; /* Alternate stack info */
- struct sigaction pr_action; /* Signal action for current sig */
-#endif
- pid_t pr_pid;
- pid_t pr_ppid;
- pid_t pr_pgrp;
- pid_t pr_sid;
- struct __kernel_old_timeval pr_utime; /* User time */
- struct __kernel_old_timeval pr_stime; /* System time */
- struct __kernel_old_timeval pr_cutime; /* Cumulative user time */
- struct __kernel_old_timeval pr_cstime; /* Cumulative system time */
-#if 0
- long pr_instr; /* Current instruction */
-#endif
- elf_gregset_t pr_reg; /* GP registers */
-#ifdef CONFIG_BINFMT_ELF_FDPIC
- /* When using FDPIC, the loadmap addresses need to be communicated
- * to GDB in order for GDB to do the necessary relocations. The
- * fields (below) used to communicate this information are placed
- * immediately after ``pr_reg'', so that the loadmap addresses may
- * be viewed as part of the register set if so desired.
- */
- unsigned long pr_exec_fdpic_loadmap;
- unsigned long pr_interp_fdpic_loadmap;
-#endif
- int pr_fpvalid; /* True if math co-processor being used. */
-};
-
-#define ELF_PRARGSZ (80) /* Number of chars for args */
-
-struct elf_prpsinfo
-{
- char pr_state; /* numeric process state */
- char pr_sname; /* char for pr_state */
- char pr_zomb; /* zombie */
- char pr_nice; /* nice val */
- unsigned long pr_flag; /* flags */
- __kernel_uid_t pr_uid;
- __kernel_gid_t pr_gid;
- pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
- /* Lots missing */
- char pr_fname[16]; /* filename of executable */
- char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
-};
-
-#ifndef __KERNEL__
-typedef struct elf_prstatus prstatus_t;
-typedef struct elf_prpsinfo prpsinfo_t;
-#define PRARGSZ ELF_PRARGSZ
-#endif
-
-
-#endif /* _UAPI_LINUX_ELFCORE_H */
diff --git a/include/uapi/linux/errqueue.h b/include/uapi/linux/errqueue.h
index ca5cb3e3c6df..3c70e8ac14b8 100644
--- a/include/uapi/linux/errqueue.h
+++ b/include/uapi/linux/errqueue.h
@@ -5,6 +5,13 @@
#include <linux/types.h>
#include <linux/time_types.h>
+/* RFC 4884: return offset to extension struct + validation */
+struct sock_ee_data_rfc4884 {
+ __u16 len;
+ __u8 flags;
+ __u8 reserved;
+};
+
struct sock_extended_err {
__u32 ee_errno;
__u8 ee_origin;
@@ -12,7 +19,10 @@ struct sock_extended_err {
__u8 ee_code;
__u8 ee_pad;
__u32 ee_info;
- __u32 ee_data;
+ union {
+ __u32 ee_data;
+ struct sock_ee_data_rfc4884 ee_rfc4884;
+ };
};
#define SO_EE_ORIGIN_NONE 0
@@ -31,6 +41,8 @@ struct sock_extended_err {
#define SO_EE_CODE_TXTIME_INVALID_PARAM 1
#define SO_EE_CODE_TXTIME_MISSED 2
+#define SO_EE_RFC4884_FLAG_INVALID 1
+
/**
* struct scm_timestamping - timestamps exposed through cmsg
*
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index f4662b3a9e1e..b4f2d134e713 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -579,6 +579,76 @@ struct ethtool_pauseparam {
__u32 tx_pause;
};
+/**
+ * enum ethtool_link_ext_state - link extended state
+ */
+enum ethtool_link_ext_state {
+ ETHTOOL_LINK_EXT_STATE_AUTONEG,
+ ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE,
+ ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH,
+ ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY,
+ ETHTOOL_LINK_EXT_STATE_NO_CABLE,
+ ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE,
+ ETHTOOL_LINK_EXT_STATE_EEPROM_ISSUE,
+ ETHTOOL_LINK_EXT_STATE_CALIBRATION_FAILURE,
+ ETHTOOL_LINK_EXT_STATE_POWER_BUDGET_EXCEEDED,
+ ETHTOOL_LINK_EXT_STATE_OVERHEAT,
+};
+
+/**
+ * enum ethtool_link_ext_substate_autoneg - more information in addition to
+ * ETHTOOL_LINK_EXT_STATE_AUTONEG.
+ */
+enum ethtool_link_ext_substate_autoneg {
+ ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_PARTNER_DETECTED = 1,
+ ETHTOOL_LINK_EXT_SUBSTATE_AN_ACK_NOT_RECEIVED,
+ ETHTOOL_LINK_EXT_SUBSTATE_AN_NEXT_PAGE_EXCHANGE_FAILED,
+ ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_PARTNER_DETECTED_FORCE_MODE,
+ ETHTOOL_LINK_EXT_SUBSTATE_AN_FEC_MISMATCH_DURING_OVERRIDE,
+ ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_HCD,
+};
+
+/**
+ * enum ethtool_link_ext_substate_link_training - more information in addition to
+ * ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE.
+ */
+enum ethtool_link_ext_substate_link_training {
+ ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_FRAME_LOCK_NOT_ACQUIRED = 1,
+ ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_LINK_INHIBIT_TIMEOUT,
+ ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_LINK_PARTNER_DID_NOT_SET_RECEIVER_READY,
+ ETHTOOL_LINK_EXT_SUBSTATE_LT_REMOTE_FAULT,
+};
+
+/**
+ * enum ethtool_link_ext_substate_logical_mismatch - more information in addition
+ * to ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH.
+ */
+enum ethtool_link_ext_substate_link_logical_mismatch {
+ ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_ACQUIRE_BLOCK_LOCK = 1,
+ ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_ACQUIRE_AM_LOCK,
+ ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_GET_ALIGN_STATUS,
+ ETHTOOL_LINK_EXT_SUBSTATE_LLM_FC_FEC_IS_NOT_LOCKED,
+ ETHTOOL_LINK_EXT_SUBSTATE_LLM_RS_FEC_IS_NOT_LOCKED,
+};
+
+/**
+ * enum ethtool_link_ext_substate_bad_signal_integrity - more information in
+ * addition to ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY.
+ */
+enum ethtool_link_ext_substate_bad_signal_integrity {
+ ETHTOOL_LINK_EXT_SUBSTATE_BSI_LARGE_NUMBER_OF_PHYSICAL_ERRORS = 1,
+ ETHTOOL_LINK_EXT_SUBSTATE_BSI_UNSUPPORTED_RATE,
+};
+
+/**
+ * enum ethtool_link_ext_substate_cable_issue - more information in
+ * addition to ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE.
+ */
+enum ethtool_link_ext_substate_cable_issue {
+ ETHTOOL_LINK_EXT_SUBSTATE_CI_UNSUPPORTED_CABLE = 1,
+ ETHTOOL_LINK_EXT_SUBSTATE_CI_CABLE_TEST_FAILURE,
+};
+
#define ETH_GSTRING_LEN 32
/**
@@ -599,6 +669,7 @@ struct ethtool_pauseparam {
* @ETH_SS_SOF_TIMESTAMPING: SOF_TIMESTAMPING_* flags
* @ETH_SS_TS_TX_TYPES: timestamping Tx types
* @ETH_SS_TS_RX_FILTERS: timestamping Rx filters
+ * @ETH_SS_UDP_TUNNEL_TYPES: UDP tunnel types
*/
enum ethtool_stringset {
ETH_SS_TEST = 0,
@@ -616,6 +687,7 @@ enum ethtool_stringset {
ETH_SS_SOF_TIMESTAMPING,
ETH_SS_TS_TX_TYPES,
ETH_SS_TS_RX_FILTERS,
+ ETH_SS_UDP_TUNNEL_TYPES,
/* add new constants above here */
ETH_SS_COUNT
@@ -1530,6 +1602,21 @@ enum ethtool_link_mode_bit_indices {
ETHTOOL_LINK_MODE_400000baseDR8_Full_BIT = 72,
ETHTOOL_LINK_MODE_400000baseCR8_Full_BIT = 73,
ETHTOOL_LINK_MODE_FEC_LLRS_BIT = 74,
+ ETHTOOL_LINK_MODE_100000baseKR_Full_BIT = 75,
+ ETHTOOL_LINK_MODE_100000baseSR_Full_BIT = 76,
+ ETHTOOL_LINK_MODE_100000baseLR_ER_FR_Full_BIT = 77,
+ ETHTOOL_LINK_MODE_100000baseCR_Full_BIT = 78,
+ ETHTOOL_LINK_MODE_100000baseDR_Full_BIT = 79,
+ ETHTOOL_LINK_MODE_200000baseKR2_Full_BIT = 80,
+ ETHTOOL_LINK_MODE_200000baseSR2_Full_BIT = 81,
+ ETHTOOL_LINK_MODE_200000baseLR2_ER2_FR2_Full_BIT = 82,
+ ETHTOOL_LINK_MODE_200000baseDR2_Full_BIT = 83,
+ ETHTOOL_LINK_MODE_200000baseCR2_Full_BIT = 84,
+ ETHTOOL_LINK_MODE_400000baseKR4_Full_BIT = 85,
+ ETHTOOL_LINK_MODE_400000baseSR4_Full_BIT = 86,
+ ETHTOOL_LINK_MODE_400000baseLR4_ER4_FR4_Full_BIT = 87,
+ ETHTOOL_LINK_MODE_400000baseDR4_Full_BIT = 88,
+ ETHTOOL_LINK_MODE_400000baseCR4_Full_BIT = 89,
/* must be last entry */
__ETHTOOL_LINK_MODE_MASK_NBITS
};
diff --git a/include/uapi/linux/ethtool_netlink.h b/include/uapi/linux/ethtool_netlink.h
index 4dda5e4244a7..5dcd24cb33ea 100644
--- a/include/uapi/linux/ethtool_netlink.h
+++ b/include/uapi/linux/ethtool_netlink.h
@@ -41,6 +41,7 @@ enum {
ETHTOOL_MSG_TSINFO_GET,
ETHTOOL_MSG_CABLE_TEST_ACT,
ETHTOOL_MSG_CABLE_TEST_TDR_ACT,
+ ETHTOOL_MSG_TUNNEL_INFO_GET,
/* add new constants above here */
__ETHTOOL_MSG_USER_CNT,
@@ -236,6 +237,8 @@ enum {
ETHTOOL_A_LINKSTATE_LINK, /* u8 */
ETHTOOL_A_LINKSTATE_SQI, /* u32 */
ETHTOOL_A_LINKSTATE_SQI_MAX, /* u32 */
+ ETHTOOL_A_LINKSTATE_EXT_STATE, /* u8 */
+ ETHTOOL_A_LINKSTATE_EXT_SUBSTATE, /* u8 */
/* add new constants above here */
__ETHTOOL_A_LINKSTATE_CNT,
@@ -554,6 +557,60 @@ enum {
ETHTOOL_A_CABLE_TEST_TDR_NTF_MAX = __ETHTOOL_A_CABLE_TEST_TDR_NTF_CNT - 1
};
+/* TUNNEL INFO */
+
+enum {
+ ETHTOOL_UDP_TUNNEL_TYPE_VXLAN,
+ ETHTOOL_UDP_TUNNEL_TYPE_GENEVE,
+ ETHTOOL_UDP_TUNNEL_TYPE_VXLAN_GPE,
+
+ __ETHTOOL_UDP_TUNNEL_TYPE_CNT
+};
+
+enum {
+ ETHTOOL_A_TUNNEL_UDP_ENTRY_UNSPEC,
+
+ ETHTOOL_A_TUNNEL_UDP_ENTRY_PORT, /* be16 */
+ ETHTOOL_A_TUNNEL_UDP_ENTRY_TYPE, /* u32 */
+
+ /* add new constants above here */
+ __ETHTOOL_A_TUNNEL_UDP_ENTRY_CNT,
+ ETHTOOL_A_TUNNEL_UDP_ENTRY_MAX = (__ETHTOOL_A_TUNNEL_UDP_ENTRY_CNT - 1)
+};
+
+enum {
+ ETHTOOL_A_TUNNEL_UDP_TABLE_UNSPEC,
+
+ ETHTOOL_A_TUNNEL_UDP_TABLE_SIZE, /* u32 */
+ ETHTOOL_A_TUNNEL_UDP_TABLE_TYPES, /* bitset */
+ ETHTOOL_A_TUNNEL_UDP_TABLE_ENTRY, /* nest - _UDP_ENTRY_* */
+
+ /* add new constants above here */
+ __ETHTOOL_A_TUNNEL_UDP_TABLE_CNT,
+ ETHTOOL_A_TUNNEL_UDP_TABLE_MAX = (__ETHTOOL_A_TUNNEL_UDP_TABLE_CNT - 1)
+};
+
+enum {
+ ETHTOOL_A_TUNNEL_UDP_UNSPEC,
+
+ ETHTOOL_A_TUNNEL_UDP_TABLE, /* nest - _UDP_TABLE_* */
+
+ /* add new constants above here */
+ __ETHTOOL_A_TUNNEL_UDP_CNT,
+ ETHTOOL_A_TUNNEL_UDP_MAX = (__ETHTOOL_A_TUNNEL_UDP_CNT - 1)
+};
+
+enum {
+ ETHTOOL_A_TUNNEL_INFO_UNSPEC,
+ ETHTOOL_A_TUNNEL_INFO_HEADER, /* nest - _A_HEADER_* */
+
+ ETHTOOL_A_TUNNEL_INFO_UDP_PORTS, /* nest - _UDP_TABLE */
+
+ /* add new constants above here */
+ __ETHTOOL_A_TUNNEL_INFO_CNT,
+ ETHTOOL_A_TUNNEL_INFO_MAX = (__ETHTOOL_A_TUNNEL_INFO_CNT - 1)
+};
+
/* generic netlink info */
#define ETHTOOL_GENL_NAME "ethtool"
#define ETHTOOL_GENL_VERSION 1
diff --git a/include/uapi/linux/fanotify.h b/include/uapi/linux/fanotify.h
index a88c7c6d0692..fbf9c5c7dd59 100644
--- a/include/uapi/linux/fanotify.h
+++ b/include/uapi/linux/fanotify.h
@@ -24,7 +24,6 @@
#define FAN_OPEN_PERM 0x00010000 /* File open in perm check */
#define FAN_ACCESS_PERM 0x00020000 /* File accessed in perm check */
#define FAN_OPEN_EXEC_PERM 0x00040000 /* File open/exec in perm check */
-#define FAN_DIR_MODIFY 0x00080000 /* Directory entry was modified */
#define FAN_EVENT_ON_CHILD 0x08000000 /* Interested in child events */
@@ -54,6 +53,11 @@
/* Flags to determine fanotify event format */
#define FAN_REPORT_TID 0x00000100 /* event->pid is thread id */
#define FAN_REPORT_FID 0x00000200 /* Report unique file id */
+#define FAN_REPORT_DIR_FID 0x00000400 /* Report unique directory id */
+#define FAN_REPORT_NAME 0x00000800 /* Report events with name */
+
+/* Convenience macro - FAN_REPORT_NAME requires FAN_REPORT_DIR_FID */
+#define FAN_REPORT_DFID_NAME (FAN_REPORT_DIR_FID | FAN_REPORT_NAME)
/* Deprecated - do not use this in programs and do not add new flags here! */
#define FAN_ALL_INIT_FLAGS (FAN_CLOEXEC | FAN_NONBLOCK | \
@@ -118,6 +122,7 @@ struct fanotify_event_metadata {
#define FAN_EVENT_INFO_TYPE_FID 1
#define FAN_EVENT_INFO_TYPE_DFID_NAME 2
+#define FAN_EVENT_INFO_TYPE_DFID 3
/* Variable length info record following event metadata */
struct fanotify_event_info_header {
@@ -127,10 +132,11 @@ struct fanotify_event_info_header {
};
/*
- * Unique file identifier info record. This is used both for
- * FAN_EVENT_INFO_TYPE_FID records and for FAN_EVENT_INFO_TYPE_DFID_NAME
- * records. For FAN_EVENT_INFO_TYPE_DFID_NAME there is additionally a null
- * terminated name immediately after the file handle.
+ * Unique file identifier info record.
+ * This structure is used for records of types FAN_EVENT_INFO_TYPE_FID,
+ * FAN_EVENT_INFO_TYPE_DFID and FAN_EVENT_INFO_TYPE_DFID_NAME.
+ * For FAN_EVENT_INFO_TYPE_DFID_NAME there is additionally a null terminated
+ * name immediately after the file handle.
*/
struct fanotify_event_info_fid {
struct fanotify_event_info_header hdr;
diff --git a/include/uapi/linux/fpga-dfl.h b/include/uapi/linux/fpga-dfl.h
index ec70a0746e59..1621b077bf21 100644
--- a/include/uapi/linux/fpga-dfl.h
+++ b/include/uapi/linux/fpga-dfl.h
@@ -151,6 +151,65 @@ struct dfl_fpga_port_dma_unmap {
#define DFL_FPGA_PORT_DMA_UNMAP _IO(DFL_FPGA_MAGIC, DFL_PORT_BASE + 4)
+/**
+ * struct dfl_fpga_irq_set - the argument for DFL_FPGA_XXX_SET_IRQ ioctl.
+ *
+ * @start: Index of the first irq.
+ * @count: The number of eventfd handler.
+ * @evtfds: Eventfd handlers.
+ */
+struct dfl_fpga_irq_set {
+ __u32 start;
+ __u32 count;
+ __s32 evtfds[];
+};
+
+/**
+ * DFL_FPGA_PORT_ERR_GET_IRQ_NUM - _IOR(DFL_FPGA_MAGIC, DFL_PORT_BASE + 5,
+ * __u32 num_irqs)
+ *
+ * Get the number of irqs supported by the fpga port error reporting private
+ * feature. Currently hardware supports up to 1 irq.
+ * Return: 0 on success, -errno on failure.
+ */
+#define DFL_FPGA_PORT_ERR_GET_IRQ_NUM _IOR(DFL_FPGA_MAGIC, \
+ DFL_PORT_BASE + 5, __u32)
+
+/**
+ * DFL_FPGA_PORT_ERR_SET_IRQ - _IOW(DFL_FPGA_MAGIC, DFL_PORT_BASE + 6,
+ * struct dfl_fpga_irq_set)
+ *
+ * Set fpga port error reporting interrupt trigger if evtfds[n] is valid.
+ * Unset related interrupt trigger if evtfds[n] is a negative value.
+ * Return: 0 on success, -errno on failure.
+ */
+#define DFL_FPGA_PORT_ERR_SET_IRQ _IOW(DFL_FPGA_MAGIC, \
+ DFL_PORT_BASE + 6, \
+ struct dfl_fpga_irq_set)
+
+/**
+ * DFL_FPGA_PORT_UINT_GET_IRQ_NUM - _IOR(DFL_FPGA_MAGIC, DFL_PORT_BASE + 7,
+ * __u32 num_irqs)
+ *
+ * Get the number of irqs supported by the fpga AFU interrupt private
+ * feature.
+ * Return: 0 on success, -errno on failure.
+ */
+#define DFL_FPGA_PORT_UINT_GET_IRQ_NUM _IOR(DFL_FPGA_MAGIC, \
+ DFL_PORT_BASE + 7, __u32)
+
+/**
+ * DFL_FPGA_PORT_UINT_SET_IRQ - _IOW(DFL_FPGA_MAGIC, DFL_PORT_BASE + 8,
+ * struct dfl_fpga_irq_set)
+ *
+ * Set fpga AFU interrupt trigger if evtfds[n] is valid.
+ * Unset related interrupt trigger if evtfds[n] is a negative value.
+ * Return: 0 on success, -errno on failure.
+ */
+#define DFL_FPGA_PORT_UINT_SET_IRQ _IOW(DFL_FPGA_MAGIC, \
+ DFL_PORT_BASE + 8, \
+ struct dfl_fpga_irq_set)
+
/* IOCTLs for FME file descriptor */
/**
@@ -194,4 +253,27 @@ struct dfl_fpga_fme_port_pr {
*/
#define DFL_FPGA_FME_PORT_ASSIGN _IOW(DFL_FPGA_MAGIC, DFL_FME_BASE + 2, int)
+/**
+ * DFL_FPGA_FME_ERR_GET_IRQ_NUM - _IOR(DFL_FPGA_MAGIC, DFL_FME_BASE + 3,
+ * __u32 num_irqs)
+ *
+ * Get the number of irqs supported by the fpga fme error reporting private
+ * feature. Currently hardware supports up to 1 irq.
+ * Return: 0 on success, -errno on failure.
+ */
+#define DFL_FPGA_FME_ERR_GET_IRQ_NUM _IOR(DFL_FPGA_MAGIC, \
+ DFL_FME_BASE + 3, __u32)
+
+/**
+ * DFL_FPGA_FME_ERR_SET_IRQ - _IOW(DFL_FPGA_MAGIC, DFL_FME_BASE + 4,
+ * struct dfl_fpga_irq_set)
+ *
+ * Set fpga fme error reporting interrupt trigger if evtfds[n] is valid.
+ * Unset related interrupt trigger if evtfds[n] is a negative value.
+ * Return: 0 on success, -errno on failure.
+ */
+#define DFL_FPGA_FME_ERR_SET_IRQ _IOW(DFL_FPGA_MAGIC, \
+ DFL_FME_BASE + 4, \
+ struct dfl_fpga_irq_set)
+
#endif /* _UAPI_LINUX_FPGA_DFL_H */
diff --git a/include/uapi/linux/gpio.h b/include/uapi/linux/gpio.h
index 0206383c0383..9c27cecf406f 100644
--- a/include/uapi/linux/gpio.h
+++ b/include/uapi/linux/gpio.h
@@ -71,8 +71,8 @@ enum {
* of a GPIO line
* @info: updated line information
* @timestamp: estimate of time of status change occurrence, in nanoseconds
- * and GPIOLINE_CHANGED_CONFIG
* @event_type: one of GPIOLINE_CHANGED_REQUESTED, GPIOLINE_CHANGED_RELEASED
+ * and GPIOLINE_CHANGED_CONFIG
*
* Note: struct gpioline_info embedded here has 32-bit alignment on its own,
* but it works fine with 64-bit alignment too. With its 72 byte size, we can
diff --git a/include/uapi/linux/hsr_netlink.h b/include/uapi/linux/hsr_netlink.h
index c218ef9c35dd..d540ea9bbef4 100644
--- a/include/uapi/linux/hsr_netlink.h
+++ b/include/uapi/linux/hsr_netlink.h
@@ -17,7 +17,7 @@
/* Generic Netlink HSR family definition
*/
-/* attributes */
+/* attributes for HSR or PRP node */
enum {
HSR_A_UNSPEC,
HSR_A_NODE_ADDR,
diff --git a/include/uapi/linux/hyperv.h b/include/uapi/linux/hyperv.h
index 8f24404ad04f..6135d92e0d47 100644
--- a/include/uapi/linux/hyperv.h
+++ b/include/uapi/linux/hyperv.h
@@ -219,7 +219,7 @@ struct hv_do_fcopy {
* kernel and user-level daemon communicate using a connector channel.
*
* The user mode component first registers with the
- * the kernel component. Subsequently, the kernel component requests, data
+ * kernel component. Subsequently, the kernel component requests, data
* for the specified keys. In response to this message the user mode component
* fills in the value corresponding to the specified key. We overload the
* sequence field in the cn_msg header to define our KVP message types.
diff --git a/include/uapi/linux/icmp.h b/include/uapi/linux/icmp.h
index 5589eeb791ca..fb169a50895e 100644
--- a/include/uapi/linux/icmp.h
+++ b/include/uapi/linux/icmp.h
@@ -19,6 +19,7 @@
#define _UAPI_LINUX_ICMP_H
#include <linux/types.h>
+#include <asm/byteorder.h>
#define ICMP_ECHOREPLY 0 /* Echo Reply */
#define ICMP_DEST_UNREACH 3 /* Destination Unreachable */
@@ -95,5 +96,26 @@ struct icmp_filter {
__u32 data;
};
+/* RFC 4884 extension struct: one per message */
+struct icmp_ext_hdr {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 reserved1:4,
+ version:4;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ __u8 version:4,
+ reserved1:4;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+ __u8 reserved2;
+ __sum16 checksum;
+};
+
+/* RFC 4884 extension object header: one for each object */
+struct icmp_extobj_hdr {
+ __be16 length;
+ __u8 class_num;
+ __u8 class_type;
+};
#endif /* _UAPI_LINUX_ICMP_H */
diff --git a/include/uapi/linux/icmpv6.h b/include/uapi/linux/icmpv6.h
index 2622b5a3e616..c1661febc2dc 100644
--- a/include/uapi/linux/icmpv6.h
+++ b/include/uapi/linux/icmpv6.h
@@ -68,6 +68,7 @@ struct icmp6hdr {
#define icmp6_mtu icmp6_dataun.un_data32[0]
#define icmp6_unused icmp6_dataun.un_data32[0]
#define icmp6_maxdelay icmp6_dataun.un_data16[0]
+#define icmp6_datagram_len icmp6_dataun.un_data8[0]
#define icmp6_router icmp6_dataun.u_nd_advt.router
#define icmp6_solicited icmp6_dataun.u_nd_advt.solicited
#define icmp6_override icmp6_dataun.u_nd_advt.override
diff --git a/include/uapi/linux/idxd.h b/include/uapi/linux/idxd.h
index e103c1434e4b..fdcdfe414223 100644
--- a/include/uapi/linux/idxd.h
+++ b/include/uapi/linux/idxd.h
@@ -181,6 +181,12 @@ struct dsa_completion_record {
uint32_t bytes_completed;
uint64_t fault_addr;
union {
+ /* common record */
+ struct {
+ uint32_t invalid_flags:24;
+ uint32_t rsvd2:8;
+ };
+
uint16_t delta_rec_size;
uint16_t crc_val;
diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h
index caa6914a3e53..c1227aecd38f 100644
--- a/include/uapi/linux/if_bridge.h
+++ b/include/uapi/linux/if_bridge.h
@@ -166,6 +166,10 @@ enum {
IFLA_BRIDGE_MRP_RING_STATE,
IFLA_BRIDGE_MRP_RING_ROLE,
IFLA_BRIDGE_MRP_START_TEST,
+ IFLA_BRIDGE_MRP_INFO,
+ IFLA_BRIDGE_MRP_IN_ROLE,
+ IFLA_BRIDGE_MRP_IN_STATE,
+ IFLA_BRIDGE_MRP_START_IN_TEST,
__IFLA_BRIDGE_MRP_MAX,
};
@@ -228,6 +232,58 @@ enum {
#define IFLA_BRIDGE_MRP_START_TEST_MAX (__IFLA_BRIDGE_MRP_START_TEST_MAX - 1)
+enum {
+ IFLA_BRIDGE_MRP_INFO_UNSPEC,
+ IFLA_BRIDGE_MRP_INFO_RING_ID,
+ IFLA_BRIDGE_MRP_INFO_P_IFINDEX,
+ IFLA_BRIDGE_MRP_INFO_S_IFINDEX,
+ IFLA_BRIDGE_MRP_INFO_PRIO,
+ IFLA_BRIDGE_MRP_INFO_RING_STATE,
+ IFLA_BRIDGE_MRP_INFO_RING_ROLE,
+ IFLA_BRIDGE_MRP_INFO_TEST_INTERVAL,
+ IFLA_BRIDGE_MRP_INFO_TEST_MAX_MISS,
+ IFLA_BRIDGE_MRP_INFO_TEST_MONITOR,
+ IFLA_BRIDGE_MRP_INFO_I_IFINDEX,
+ IFLA_BRIDGE_MRP_INFO_IN_STATE,
+ IFLA_BRIDGE_MRP_INFO_IN_ROLE,
+ IFLA_BRIDGE_MRP_INFO_IN_TEST_INTERVAL,
+ IFLA_BRIDGE_MRP_INFO_IN_TEST_MAX_MISS,
+ __IFLA_BRIDGE_MRP_INFO_MAX,
+};
+
+#define IFLA_BRIDGE_MRP_INFO_MAX (__IFLA_BRIDGE_MRP_INFO_MAX - 1)
+
+enum {
+ IFLA_BRIDGE_MRP_IN_STATE_UNSPEC,
+ IFLA_BRIDGE_MRP_IN_STATE_IN_ID,
+ IFLA_BRIDGE_MRP_IN_STATE_STATE,
+ __IFLA_BRIDGE_MRP_IN_STATE_MAX,
+};
+
+#define IFLA_BRIDGE_MRP_IN_STATE_MAX (__IFLA_BRIDGE_MRP_IN_STATE_MAX - 1)
+
+enum {
+ IFLA_BRIDGE_MRP_IN_ROLE_UNSPEC,
+ IFLA_BRIDGE_MRP_IN_ROLE_RING_ID,
+ IFLA_BRIDGE_MRP_IN_ROLE_IN_ID,
+ IFLA_BRIDGE_MRP_IN_ROLE_ROLE,
+ IFLA_BRIDGE_MRP_IN_ROLE_I_IFINDEX,
+ __IFLA_BRIDGE_MRP_IN_ROLE_MAX,
+};
+
+#define IFLA_BRIDGE_MRP_IN_ROLE_MAX (__IFLA_BRIDGE_MRP_IN_ROLE_MAX - 1)
+
+enum {
+ IFLA_BRIDGE_MRP_START_IN_TEST_UNSPEC,
+ IFLA_BRIDGE_MRP_START_IN_TEST_IN_ID,
+ IFLA_BRIDGE_MRP_START_IN_TEST_INTERVAL,
+ IFLA_BRIDGE_MRP_START_IN_TEST_MAX_MISS,
+ IFLA_BRIDGE_MRP_START_IN_TEST_PERIOD,
+ __IFLA_BRIDGE_MRP_START_IN_TEST_MAX,
+};
+
+#define IFLA_BRIDGE_MRP_START_IN_TEST_MAX (__IFLA_BRIDGE_MRP_START_IN_TEST_MAX - 1)
+
struct br_mrp_instance {
__u32 ring_id;
__u32 p_ifindex;
@@ -253,6 +309,25 @@ struct br_mrp_start_test {
__u32 monitor;
};
+struct br_mrp_in_state {
+ __u32 in_state;
+ __u16 in_id;
+};
+
+struct br_mrp_in_role {
+ __u32 ring_id;
+ __u32 in_role;
+ __u32 i_ifindex;
+ __u16 in_id;
+};
+
+struct br_mrp_start_in_test {
+ __u32 interval;
+ __u32 max_miss;
+ __u32 period;
+ __u16 in_id;
+};
+
struct bridge_stp_xstats {
__u64 transition_blk;
__u64 transition_fwd;
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index a009365ad67b..7fba4de511de 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -170,12 +170,22 @@ enum {
IFLA_PROP_LIST,
IFLA_ALT_IFNAME, /* Alternative ifname */
IFLA_PERM_ADDRESS,
+ IFLA_PROTO_DOWN_REASON,
__IFLA_MAX
};
#define IFLA_MAX (__IFLA_MAX - 1)
+enum {
+ IFLA_PROTO_DOWN_REASON_UNSPEC,
+ IFLA_PROTO_DOWN_REASON_MASK, /* u32, mask for reason bits */
+ IFLA_PROTO_DOWN_REASON_VALUE, /* u32, reason bit value */
+
+ __IFLA_PROTO_DOWN_REASON_CNT,
+ IFLA_PROTO_DOWN_REASON_MAX = __IFLA_PROTO_DOWN_REASON_CNT - 1
+};
+
/* backwards compatibility for userspace */
#ifndef __KERNEL__
#define IFLA_RTA(r) ((struct rtattr*)(((char*)(r)) + NLMSG_ALIGN(sizeof(struct ifinfomsg))))
@@ -344,6 +354,7 @@ enum {
IFLA_BRPORT_ISOLATED,
IFLA_BRPORT_BACKUP_PORT,
IFLA_BRPORT_MRP_RING_OPEN,
+ IFLA_BRPORT_MRP_IN_OPEN,
__IFLA_BRPORT_MAX
};
#define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1)
@@ -906,7 +917,14 @@ enum {
#define IFLA_IPOIB_MAX (__IFLA_IPOIB_MAX - 1)
-/* HSR section */
+/* HSR/PRP section, both uses same interface */
+
+/* Different redundancy protocols for hsr device */
+enum {
+ HSR_PROTOCOL_HSR,
+ HSR_PROTOCOL_PRP,
+ HSR_PROTOCOL_MAX,
+};
enum {
IFLA_HSR_UNSPEC,
@@ -916,6 +934,9 @@ enum {
IFLA_HSR_SUPERVISION_ADDR, /* Supervision frame multicast addr */
IFLA_HSR_SEQ_NR,
IFLA_HSR_VERSION, /* HSR version */
+ IFLA_HSR_PROTOCOL, /* Indicate different protocol than
+ * HSR. For example PRP.
+ */
__IFLA_HSR_MAX,
};
diff --git a/include/uapi/linux/if_xdp.h b/include/uapi/linux/if_xdp.h
index be328c59389d..a78a8096f4ce 100644
--- a/include/uapi/linux/if_xdp.h
+++ b/include/uapi/linux/if_xdp.h
@@ -73,9 +73,12 @@ struct xdp_umem_reg {
};
struct xdp_statistics {
- __u64 rx_dropped; /* Dropped for reasons other than invalid desc */
+ __u64 rx_dropped; /* Dropped for other reasons */
__u64 rx_invalid_descs; /* Dropped due to invalid descriptor */
__u64 tx_invalid_descs; /* Dropped due to invalid descriptor */
+ __u64 rx_ring_full; /* Dropped due to rx ring being full */
+ __u64 rx_fill_ring_empty_descs; /* Failed to retrieve item from fill ring */
+ __u64 tx_ring_empty_descs; /* Failed to retrieve item from tx ring */
};
struct xdp_options {
diff --git a/include/uapi/linux/in.h b/include/uapi/linux/in.h
index 8533bf07450f..3d0d8231dc19 100644
--- a/include/uapi/linux/in.h
+++ b/include/uapi/linux/in.h
@@ -123,6 +123,7 @@ struct in_addr {
#define IP_CHECKSUM 23
#define IP_BIND_ADDRESS_NO_PORT 24
#define IP_RECVFRAGSIZE 25
+#define IP_RECVERR_RFC4884 26
/* IP_MTU_DISCOVER values */
#define IP_PMTUDISC_DONT 0 /* Never send DF frames */
diff --git a/include/uapi/linux/in6.h b/include/uapi/linux/in6.h
index 9f2273a08356..5ad396a57eb3 100644
--- a/include/uapi/linux/in6.h
+++ b/include/uapi/linux/in6.h
@@ -179,6 +179,7 @@ struct in6_flowlabel_req {
#define IPV6_LEAVE_ANYCAST 28
#define IPV6_MULTICAST_ALL 29
#define IPV6_ROUTER_ALERT_ISOLATE 30
+#define IPV6_RECVERR_RFC4884 31
/* IPV6_MTU_DISCOVER values */
#define IPV6_PMTUDISC_DONT 0
diff --git a/include/uapi/linux/inet_diag.h b/include/uapi/linux/inet_diag.h
index e6f183ee8417..5ba122c1949a 100644
--- a/include/uapi/linux/inet_diag.h
+++ b/include/uapi/linux/inet_diag.h
@@ -65,6 +65,7 @@ enum {
INET_DIAG_REQ_NONE,
INET_DIAG_REQ_BYTECODE,
INET_DIAG_REQ_SK_BPF_STORAGES,
+ INET_DIAG_REQ_PROTOCOL,
__INET_DIAG_REQ_MAX,
};
diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
index 7843742b8b74..d65fde732518 100644
--- a/include/uapi/linux/io_uring.h
+++ b/include/uapi/linux/io_uring.h
@@ -31,7 +31,8 @@ struct io_uring_sqe {
union {
__kernel_rwf_t rw_flags;
__u32 fsync_flags;
- __u16 poll_events;
+ __u16 poll_events; /* compatibility */
+ __u32 poll32_events; /* word-reversed for BE */
__u32 sync_range_flags;
__u32 msg_flags;
__u32 timeout_flags;
@@ -249,6 +250,7 @@ struct io_uring_params {
#define IORING_FEAT_RW_CUR_POS (1U << 3)
#define IORING_FEAT_CUR_PERSONALITY (1U << 4)
#define IORING_FEAT_FAST_POLL (1U << 5)
+#define IORING_FEAT_POLL_32BITS (1U << 6)
/*
* io_uring_register(2) opcodes and arguments
diff --git a/include/uapi/linux/iommu.h b/include/uapi/linux/iommu.h
index e907b7091a46..c2b2caf9ed41 100644
--- a/include/uapi/linux/iommu.h
+++ b/include/uapi/linux/iommu.h
@@ -81,7 +81,10 @@ struct iommu_fault_unrecoverable {
/**
* struct iommu_fault_page_request - Page Request data
* @flags: encodes whether the corresponding fields are valid and whether this
- * is the last page in group (IOMMU_FAULT_PAGE_REQUEST_* values)
+ * is the last page in group (IOMMU_FAULT_PAGE_REQUEST_* values).
+ * When IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID is set, the page response
+ * must have the same PASID value as the page request. When it is clear,
+ * the page response should not have a PASID.
* @pasid: Process Address Space ID
* @grpid: Page Request Group Index
* @perm: requested page permissions (IOMMU_FAULT_PERM_* values)
@@ -92,6 +95,7 @@ struct iommu_fault_page_request {
#define IOMMU_FAULT_PAGE_REQUEST_PASID_VALID (1 << 0)
#define IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE (1 << 1)
#define IOMMU_FAULT_PAGE_REQUEST_PRIV_DATA (1 << 2)
+#define IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID (1 << 3)
__u32 flags;
__u32 pasid;
__u32 grpid;
diff --git a/include/uapi/linux/isst_if.h b/include/uapi/linux/isst_if.h
index 0a52b7b093d3..ba078f8e9add 100644
--- a/include/uapi/linux/isst_if.h
+++ b/include/uapi/linux/isst_if.h
@@ -69,7 +69,7 @@ struct isst_if_cpu_maps {
* @logical_cpu: Logical CPU number to get target PCI device.
* @reg: PUNIT register offset
* @value: For write operation value to write and for
- * for read placeholder read value
+ * read placeholder read value
*
* Structure to specify read/write data to PUNIT registers.
*/
diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h
index b6be62356d34..f738c3b53f4e 100644
--- a/include/uapi/linux/kfd_ioctl.h
+++ b/include/uapi/linux/kfd_ioctl.h
@@ -26,8 +26,12 @@
#include <drm/drm.h>
#include <linux/ioctl.h>
+/*
+ * - 1.1 - initial version
+ * - 1.3 - Add SMI events support
+ */
#define KFD_IOCTL_MAJOR_VERSION 1
-#define KFD_IOCTL_MINOR_VERSION 1
+#define KFD_IOCTL_MINOR_VERSION 3
struct kfd_ioctl_get_version_args {
__u32 major_version; /* from KFD */
@@ -442,6 +446,17 @@ struct kfd_ioctl_import_dmabuf_args {
__u32 dmabuf_fd; /* to KFD */
};
+/*
+ * KFD SMI(System Management Interface) events
+ */
+/* Event type (defined by bitmask) */
+#define KFD_SMI_EVENT_VMFAULT 0x0000000000000001
+
+struct kfd_ioctl_smi_events_args {
+ __u32 gpuid; /* to KFD */
+ __u32 anon_fd; /* from KFD */
+};
+
/* Register offset inside the remapped mmio page
*/
enum kfd_mmio_remap {
@@ -546,7 +561,10 @@ enum kfd_mmio_remap {
#define AMDKFD_IOC_ALLOC_QUEUE_GWS \
AMDKFD_IOWR(0x1E, struct kfd_ioctl_alloc_queue_gws_args)
+#define AMDKFD_IOC_SMI_EVENTS \
+ AMDKFD_IOWR(0x1F, struct kfd_ioctl_smi_events_args)
+
#define AMDKFD_COMMAND_START 0x01
-#define AMDKFD_COMMAND_END 0x1F
+#define AMDKFD_COMMAND_END 0x20
#endif
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 4fdf30316582..f6d86033c4fa 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -289,6 +289,7 @@ struct kvm_run {
/* KVM_EXIT_FAIL_ENTRY */
struct {
__u64 hardware_entry_failure_reason;
+ __u32 cpu;
} fail_entry;
/* KVM_EXIT_EXCEPTION */
struct {
@@ -1031,6 +1032,9 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_PPC_SECURE_GUEST 181
#define KVM_CAP_HALT_POLL 182
#define KVM_CAP_ASYNC_PF_INT 183
+#define KVM_CAP_LAST_CPU 184
+#define KVM_CAP_SMALLER_MAXPHYADDR 185
+#define KVM_CAP_S390_DIAG318 186
#ifdef KVM_CAP_IRQ_ROUTING
diff --git a/include/uapi/linux/map_to_7segment.h b/include/uapi/linux/map_to_7segment.h
index f9ed18134b83..13a06e5e966e 100644
--- a/include/uapi/linux/map_to_7segment.h
+++ b/include/uapi/linux/map_to_7segment.h
@@ -24,7 +24,7 @@
* of (ASCII) characters to a 7-segments notation.
*
* The 7 segment's wikipedia notation below is used as standard.
- * See: http://en.wikipedia.org/wiki/Seven_segment_display
+ * See: https://en.wikipedia.org/wiki/Seven_segment_display
*
* Notation: +-a-+
* f b
diff --git a/include/uapi/linux/mdio.h b/include/uapi/linux/mdio.h
index 4bcb41c71b8c..3f302e2523b2 100644
--- a/include/uapi/linux/mdio.h
+++ b/include/uapi/linux/mdio.h
@@ -324,4 +324,30 @@ static inline __u16 mdio_phy_id_c45(int prtad, int devad)
return MDIO_PHY_ID_C45 | (prtad << 5) | devad;
}
+/* UsxgmiiChannelInfo[15:0] for USXGMII in-band auto-negotiation.*/
+#define MDIO_USXGMII_EEE_CLK_STP 0x0080 /* EEE clock stop supported */
+#define MDIO_USXGMII_EEE 0x0100 /* EEE supported */
+#define MDIO_USXGMII_SPD_MASK 0x0e00 /* USXGMII speed mask */
+#define MDIO_USXGMII_FULL_DUPLEX 0x1000 /* USXGMII full duplex */
+#define MDIO_USXGMII_DPX_SPD_MASK 0x1e00 /* USXGMII duplex and speed bits */
+#define MDIO_USXGMII_10 0x0000 /* 10Mbps */
+#define MDIO_USXGMII_10HALF 0x0000 /* 10Mbps half-duplex */
+#define MDIO_USXGMII_10FULL 0x1000 /* 10Mbps full-duplex */
+#define MDIO_USXGMII_100 0x0200 /* 100Mbps */
+#define MDIO_USXGMII_100HALF 0x0200 /* 100Mbps half-duplex */
+#define MDIO_USXGMII_100FULL 0x1200 /* 100Mbps full-duplex */
+#define MDIO_USXGMII_1000 0x0400 /* 1000Mbps */
+#define MDIO_USXGMII_1000HALF 0x0400 /* 1000Mbps half-duplex */
+#define MDIO_USXGMII_1000FULL 0x1400 /* 1000Mbps full-duplex */
+#define MDIO_USXGMII_10G 0x0600 /* 10Gbps */
+#define MDIO_USXGMII_10GHALF 0x0600 /* 10Gbps half-duplex */
+#define MDIO_USXGMII_10GFULL 0x1600 /* 10Gbps full-duplex */
+#define MDIO_USXGMII_2500 0x0800 /* 2500Mbps */
+#define MDIO_USXGMII_2500HALF 0x0800 /* 2500Mbps half-duplex */
+#define MDIO_USXGMII_2500FULL 0x1800 /* 2500Mbps full-duplex */
+#define MDIO_USXGMII_5000 0x0a00 /* 5000Mbps */
+#define MDIO_USXGMII_5000HALF 0x0a00 /* 5000Mbps half-duplex */
+#define MDIO_USXGMII_5000FULL 0x1a00 /* 5000Mbps full-duplex */
+#define MDIO_USXGMII_LINK 0x8000 /* PHY link with copper-side partner */
+
#endif /* _UAPI__LINUX_MDIO_H__ */
diff --git a/include/uapi/linux/mptcp.h b/include/uapi/linux/mptcp.h
index 5f2c77082d9e..9762660df741 100644
--- a/include/uapi/linux/mptcp.h
+++ b/include/uapi/linux/mptcp.h
@@ -86,4 +86,21 @@ enum {
__MPTCP_PM_CMD_AFTER_LAST
};
+#define MPTCP_INFO_FLAG_FALLBACK _BITUL(0)
+#define MPTCP_INFO_FLAG_REMOTE_KEY_RECEIVED _BITUL(1)
+
+struct mptcp_info {
+ __u8 mptcpi_subflows;
+ __u8 mptcpi_add_addr_signal;
+ __u8 mptcpi_add_addr_accepted;
+ __u8 mptcpi_subflows_max;
+ __u8 mptcpi_add_addr_signal_max;
+ __u8 mptcpi_add_addr_accepted_max;
+ __u32 mptcpi_flags;
+ __u32 mptcpi_token;
+ __u64 mptcpi_write_seq;
+ __u64 mptcpi_snd_una;
+ __u64 mptcpi_rcv_nxt;
+};
+
#endif /* _UAPI_MPTCP_H */
diff --git a/include/uapi/linux/mrp_bridge.h b/include/uapi/linux/mrp_bridge.h
index bee366540212..6aeb13ef0b1e 100644
--- a/include/uapi/linux/mrp_bridge.h
+++ b/include/uapi/linux/mrp_bridge.h
@@ -21,11 +21,22 @@ enum br_mrp_ring_role_type {
BR_MRP_RING_ROLE_MRA,
};
+enum br_mrp_in_role_type {
+ BR_MRP_IN_ROLE_DISABLED,
+ BR_MRP_IN_ROLE_MIC,
+ BR_MRP_IN_ROLE_MIM,
+};
+
enum br_mrp_ring_state_type {
BR_MRP_RING_STATE_OPEN,
BR_MRP_RING_STATE_CLOSED,
};
+enum br_mrp_in_state_type {
+ BR_MRP_IN_STATE_OPEN,
+ BR_MRP_IN_STATE_CLOSED,
+};
+
enum br_mrp_port_state_type {
BR_MRP_PORT_STATE_DISABLED,
BR_MRP_PORT_STATE_BLOCKED,
@@ -36,6 +47,7 @@ enum br_mrp_port_state_type {
enum br_mrp_port_role_type {
BR_MRP_PORT_ROLE_PRIMARY,
BR_MRP_PORT_ROLE_SECONDARY,
+ BR_MRP_PORT_ROLE_INTER,
};
enum br_mrp_tlv_header_type {
@@ -45,6 +57,10 @@ enum br_mrp_tlv_header_type {
BR_MRP_TLV_HEADER_RING_TOPO = 0x3,
BR_MRP_TLV_HEADER_RING_LINK_DOWN = 0x4,
BR_MRP_TLV_HEADER_RING_LINK_UP = 0x5,
+ BR_MRP_TLV_HEADER_IN_TEST = 0x6,
+ BR_MRP_TLV_HEADER_IN_TOPO = 0x7,
+ BR_MRP_TLV_HEADER_IN_LINK_DOWN = 0x8,
+ BR_MRP_TLV_HEADER_IN_LINK_UP = 0x9,
BR_MRP_TLV_HEADER_OPTION = 0x7f,
};
@@ -118,4 +134,26 @@ struct br_mrp_oui_hdr {
__u8 oui[MRP_OUI_LENGTH];
};
+struct br_mrp_in_test_hdr {
+ __be16 id;
+ __u8 sa[ETH_ALEN];
+ __be16 port_role;
+ __be16 state;
+ __be16 transitions;
+ __be32 timestamp;
+};
+
+struct br_mrp_in_topo_hdr {
+ __u8 sa[ETH_ALEN];
+ __be16 id;
+ __be16 interval;
+};
+
+struct br_mrp_in_link_hdr {
+ __u8 sa[ETH_ALEN];
+ __be16 port_role;
+ __be16 id;
+ __be16 interval;
+};
+
#endif
diff --git a/include/uapi/linux/ndctl.h b/include/uapi/linux/ndctl.h
index 0e09dc5cec19..8cf1e4884fd5 100644
--- a/include/uapi/linux/ndctl.h
+++ b/include/uapi/linux/ndctl.h
@@ -245,6 +245,11 @@ struct nd_cmd_pkg {
#define NVDIMM_FAMILY_MSFT 3
#define NVDIMM_FAMILY_HYPERV 4
#define NVDIMM_FAMILY_PAPR 5
+#define NVDIMM_FAMILY_MAX NVDIMM_FAMILY_PAPR
+
+#define NVDIMM_BUS_FAMILY_NFIT 0
+#define NVDIMM_BUS_FAMILY_INTEL 1
+#define NVDIMM_BUS_FAMILY_MAX NVDIMM_BUS_FAMILY_INTEL
#define ND_IOCTL_CALL _IOWR(ND_IOCTL, ND_CMD_CALL,\
struct nd_cmd_pkg)
diff --git a/include/uapi/linux/neighbour.h b/include/uapi/linux/neighbour.h
index eefcda8ca44e..dc8b72201f6c 100644
--- a/include/uapi/linux/neighbour.h
+++ b/include/uapi/linux/neighbour.h
@@ -30,6 +30,7 @@ enum {
NDA_SRC_VNI,
NDA_PROTOCOL, /* Originator of entry */
NDA_NH_ID,
+ NDA_FDB_EXT_ATTRS,
__NDA_MAX
};
@@ -172,4 +173,27 @@ enum {
};
#define NDTA_MAX (__NDTA_MAX - 1)
+ /* FDB activity notification bits used in NFEA_ACTIVITY_NOTIFY:
+ * - FDB_NOTIFY_BIT - notify on activity/expire for any entry
+ * - FDB_NOTIFY_INACTIVE_BIT - mark as inactive to avoid multiple notifications
+ */
+enum {
+ FDB_NOTIFY_BIT = (1 << 0),
+ FDB_NOTIFY_INACTIVE_BIT = (1 << 1)
+};
+
+/* embedded into NDA_FDB_EXT_ATTRS:
+ * [NDA_FDB_EXT_ATTRS] = {
+ * [NFEA_ACTIVITY_NOTIFY]
+ * ...
+ * }
+ */
+enum {
+ NFEA_UNSPEC,
+ NFEA_ACTIVITY_NOTIFY,
+ NFEA_DONT_REFRESH,
+ __NFEA_MAX
+};
+#define NFEA_MAX (__NFEA_MAX - 1)
+
#endif
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index 4565456c0ef4..42f351c1f5c5 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -184,6 +184,15 @@ enum nft_table_attributes {
};
#define NFTA_TABLE_MAX (__NFTA_TABLE_MAX - 1)
+enum nft_chain_flags {
+ NFT_CHAIN_BASE = (1 << 0),
+ NFT_CHAIN_HW_OFFLOAD = (1 << 1),
+ NFT_CHAIN_BINDING = (1 << 2),
+};
+#define NFT_CHAIN_FLAGS (NFT_CHAIN_BASE | \
+ NFT_CHAIN_HW_OFFLOAD | \
+ NFT_CHAIN_BINDING)
+
/**
* enum nft_chain_attributes - nf_tables chain netlink attributes
*
@@ -196,6 +205,7 @@ enum nft_table_attributes {
* @NFTA_CHAIN_TYPE: type name of the string (NLA_NUL_STRING)
* @NFTA_CHAIN_COUNTERS: counter specification of the chain (NLA_NESTED: nft_counter_attributes)
* @NFTA_CHAIN_FLAGS: chain flags
+ * @NFTA_CHAIN_ID: uniquely identifies a chain in a transaction (NLA_U32)
*/
enum nft_chain_attributes {
NFTA_CHAIN_UNSPEC,
@@ -209,6 +219,7 @@ enum nft_chain_attributes {
NFTA_CHAIN_COUNTERS,
NFTA_CHAIN_PAD,
NFTA_CHAIN_FLAGS,
+ NFTA_CHAIN_ID,
__NFTA_CHAIN_MAX
};
#define NFTA_CHAIN_MAX (__NFTA_CHAIN_MAX - 1)
@@ -238,6 +249,7 @@ enum nft_rule_attributes {
NFTA_RULE_PAD,
NFTA_RULE_ID,
NFTA_RULE_POSITION_ID,
+ NFTA_RULE_CHAIN_ID,
__NFTA_RULE_MAX
};
#define NFTA_RULE_MAX (__NFTA_RULE_MAX - 1)
@@ -468,11 +480,13 @@ enum nft_data_attributes {
*
* @NFTA_VERDICT_CODE: nf_tables verdict (NLA_U32: enum nft_verdicts)
* @NFTA_VERDICT_CHAIN: jump target chain name (NLA_STRING)
+ * @NFTA_VERDICT_CHAIN_ID: jump target chain ID (NLA_U32)
*/
enum nft_verdict_attributes {
NFTA_VERDICT_UNSPEC,
NFTA_VERDICT_CODE,
NFTA_VERDICT_CHAIN,
+ NFTA_VERDICT_CHAIN_ID,
__NFTA_VERDICT_MAX
};
#define NFTA_VERDICT_MAX (__NFTA_VERDICT_MAX - 1)
diff --git a/include/uapi/linux/netfilter/xt_connmark.h b/include/uapi/linux/netfilter/xt_connmark.h
index 1aa5c955ee1e..f01c19b83a2b 100644
--- a/include/uapi/linux/netfilter/xt_connmark.h
+++ b/include/uapi/linux/netfilter/xt_connmark.h
@@ -4,7 +4,7 @@
#include <linux/types.h>
-/* Copyright (C) 2002,2004 MARA Systems AB <http://www.marasystems.com>
+/* Copyright (C) 2002,2004 MARA Systems AB <https://www.marasystems.com>
* by Henrik Nordstrom <hno@marasystems.com>
*
* This program is free software; you can redistribute it and/or modify
diff --git a/include/uapi/linux/nfs4.h b/include/uapi/linux/nfs4.h
index 8572930cf5b0..bf197e99b98f 100644
--- a/include/uapi/linux/nfs4.h
+++ b/include/uapi/linux/nfs4.h
@@ -33,6 +33,9 @@
#define NFS4_ACCESS_EXTEND 0x0008
#define NFS4_ACCESS_DELETE 0x0010
#define NFS4_ACCESS_EXECUTE 0x0020
+#define NFS4_ACCESS_XAREAD 0x0040
+#define NFS4_ACCESS_XAWRITE 0x0080
+#define NFS4_ACCESS_XALIST 0x0100
#define NFS4_FH_PERSISTENT 0x0000
#define NFS4_FH_NOEXPIRE_WITH_OPEN 0x0001
diff --git a/include/uapi/linux/nfs_fs.h b/include/uapi/linux/nfs_fs.h
index 7bcc8cd6831d..3afe3767c55d 100644
--- a/include/uapi/linux/nfs_fs.h
+++ b/include/uapi/linux/nfs_fs.h
@@ -56,6 +56,7 @@
#define NFSDBG_PNFS 0x1000
#define NFSDBG_PNFS_LD 0x2000
#define NFSDBG_STATE 0x4000
+#define NFSDBG_XATTRCACHE 0x8000
#define NFSDBG_ALL 0xFFFF
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index 4e6339ab1fce..631f3a997b3c 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -183,18 +183,27 @@
*
* By setting @NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_PSK flag drivers
* can indicate they support offloading EAPOL handshakes for WPA/WPA2
- * preshared key authentication. In %NL80211_CMD_CONNECT the preshared
- * key should be specified using %NL80211_ATTR_PMK. Drivers supporting
- * this offload may reject the %NL80211_CMD_CONNECT when no preshared
- * key material is provided, for example when that driver does not
- * support setting the temporal keys through %CMD_NEW_KEY.
+ * preshared key authentication in station mode. In %NL80211_CMD_CONNECT
+ * the preshared key should be specified using %NL80211_ATTR_PMK. Drivers
+ * supporting this offload may reject the %NL80211_CMD_CONNECT when no
+ * preshared key material is provided, for example when that driver does
+ * not support setting the temporal keys through %NL80211_CMD_NEW_KEY.
*
* Similarly @NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_1X flag can be
* set by drivers indicating offload support of the PTK/GTK EAPOL
- * handshakes during 802.1X authentication. In order to use the offload
- * the %NL80211_CMD_CONNECT should have %NL80211_ATTR_WANT_1X_4WAY_HS
- * attribute flag. Drivers supporting this offload may reject the
- * %NL80211_CMD_CONNECT when the attribute flag is not present.
+ * handshakes during 802.1X authentication in station mode. In order to
+ * use the offload the %NL80211_CMD_CONNECT should have
+ * %NL80211_ATTR_WANT_1X_4WAY_HS attribute flag. Drivers supporting this
+ * offload may reject the %NL80211_CMD_CONNECT when the attribute flag is
+ * not present.
+ *
+ * By setting @NL80211_EXT_FEATURE_4WAY_HANDSHAKE_AP_PSK flag drivers
+ * can indicate they support offloading EAPOL handshakes for WPA/WPA2
+ * preshared key authentication in AP mode. In %NL80211_CMD_START_AP
+ * the preshared key should be specified using %NL80211_ATTR_PMK. Drivers
+ * supporting this offload may reject the %NL80211_CMD_START_AP when no
+ * preshared key material is provided, for example when that driver does
+ * not support setting the temporal keys through %NL80211_CMD_NEW_KEY.
*
* For 802.1X the PMK or PMK-R0 are set by providing %NL80211_ATTR_PMK
* using %NL80211_CMD_SET_PMK. For offloaded FT support also
@@ -363,7 +372,7 @@
* @NL80211_CMD_SET_STATION: Set station attributes for station identified by
* %NL80211_ATTR_MAC on the interface identified by %NL80211_ATTR_IFINDEX.
* @NL80211_CMD_NEW_STATION: Add a station with given attributes to the
- * the interface identified by %NL80211_ATTR_IFINDEX.
+ * interface identified by %NL80211_ATTR_IFINDEX.
* @NL80211_CMD_DEL_STATION: Remove a station identified by %NL80211_ATTR_MAC
* or, if no MAC address given, all stations, on the interface identified
* by %NL80211_ATTR_IFINDEX. %NL80211_ATTR_MGMT_SUBTYPE and
@@ -383,7 +392,7 @@
* @NL80211_CMD_DEL_MPATH: Delete a mesh path to the destination given by
* %NL80211_ATTR_MAC.
* @NL80211_CMD_NEW_PATH: Add a mesh path with given attributes to the
- * the interface identified by %NL80211_ATTR_IFINDEX.
+ * interface identified by %NL80211_ATTR_IFINDEX.
* @NL80211_CMD_DEL_PATH: Remove a mesh path identified by %NL80211_ATTR_MAC
* or, if no MAC address given, all mesh paths, on the interface identified
* by %NL80211_ATTR_IFINDEX.
@@ -934,7 +943,7 @@
* @NL80211_CMD_SET_COALESCE: Configure coalesce rules or clear existing rules.
*
* @NL80211_CMD_CHANNEL_SWITCH: Perform a channel switch by announcing the
- * the new channel information (Channel Switch Announcement - CSA)
+ * new channel information (Channel Switch Announcement - CSA)
* in the beacon for some time (as defined in the
* %NL80211_ATTR_CH_SWITCH_COUNT parameter) and then change to the
* new channel. Userspace provides the new channel information (using
@@ -1113,7 +1122,7 @@
* randomization may be enabled and configured by specifying the
* %NL80211_ATTR_MAC and %NL80211_ATTR_MAC_MASK attributes.
* If a timeout is requested, use the %NL80211_ATTR_TIMEOUT attribute.
- * A u64 cookie for further %NL80211_ATTR_COOKIE use is is returned in
+ * A u64 cookie for further %NL80211_ATTR_COOKIE use is returned in
* the netlink extended ack message.
*
* To cancel a measurement, close the socket that requested it.
@@ -1511,7 +1520,7 @@ enum nl80211_commands {
* rates as defined by IEEE 802.11 7.3.2.2 but without the length
* restriction (at most %NL80211_MAX_SUPP_RATES).
* @NL80211_ATTR_STA_VLAN: interface index of VLAN interface to move station
- * to, or the AP interface the station was originally added to to.
+ * to, or the AP interface the station was originally added to.
* @NL80211_ATTR_STA_INFO: information about a station, part of station info
* given for %NL80211_CMD_GET_STATION, nested attribute containing
* info as possible, see &enum nl80211_sta_info.
@@ -2084,7 +2093,7 @@ enum nl80211_commands {
* @NL80211_ATTR_STA_SUPPORTED_CHANNELS: array of supported channels.
*
* @NL80211_ATTR_STA_SUPPORTED_OPER_CLASSES: array of supported
- * supported operating classes.
+ * operating classes.
*
* @NL80211_ATTR_HANDLE_DFS: A flag indicating whether user space
* controls DFS operation in IBSS mode. If the flag is included in
@@ -2362,10 +2371,11 @@ enum nl80211_commands {
*
* @NL80211_ATTR_PMK: attribute for passing PMK key material. Used with
* %NL80211_CMD_SET_PMKSA for the PMKSA identified by %NL80211_ATTR_PMKID.
- * For %NL80211_CMD_CONNECT it is used to provide PSK for offloading 4-way
- * handshake for WPA/WPA2-PSK networks. For 802.1X authentication it is
- * used with %NL80211_CMD_SET_PMK. For offloaded FT support this attribute
- * specifies the PMK-R0 if NL80211_ATTR_PMKR0_NAME is included as well.
+ * For %NL80211_CMD_CONNECT and %NL80211_CMD_START_AP it is used to provide
+ * PSK for offloading 4-way handshake for WPA/WPA2-PSK networks. For 802.1X
+ * authentication it is used with %NL80211_CMD_SET_PMK. For offloaded FT
+ * support this attribute specifies the PMK-R0 if NL80211_ATTR_PMKR0_NAME
+ * is included as well.
*
* @NL80211_ATTR_SCHED_SCAN_MULTI: flag attribute which user-space shall use to
* indicate that it supports multiple active scheduled scan requests.
@@ -2395,7 +2405,7 @@ enum nl80211_commands {
* nl80211_txq_stats)
* @NL80211_ATTR_TXQ_LIMIT: Total packet limit for the TXQ queues for this phy.
* The smaller of this and the memory limit is enforced.
- * @NL80211_ATTR_TXQ_MEMORY_LIMIT: Total memory memory limit (in bytes) for the
+ * @NL80211_ATTR_TXQ_MEMORY_LIMIT: Total memory limit (in bytes) for the
* TXQ queues for this phy. The smaller of this and the packet limit is
* enforced.
* @NL80211_ATTR_TXQ_QUANTUM: TXQ scheduler quantum (bytes). Number of bytes
@@ -3370,6 +3380,8 @@ enum nl80211_sta_bss_param {
* @NL80211_STA_INFO_AIRTIME_LINK_METRIC: airtime link metric for mesh station
* @NL80211_STA_INFO_ASSOC_AT_BOOTTIME: Timestamp (CLOCK_BOOTTIME, nanoseconds)
* of STA's association
+ * @NL80211_STA_INFO_CONNECTED_TO_AS: set to true if STA has a path to a
+ * authentication server (u8, 0 or 1)
* @__NL80211_STA_INFO_AFTER_LAST: internal
* @NL80211_STA_INFO_MAX: highest possible station info attribute
*/
@@ -3417,6 +3429,7 @@ enum nl80211_sta_info {
NL80211_STA_INFO_AIRTIME_WEIGHT,
NL80211_STA_INFO_AIRTIME_LINK_METRIC,
NL80211_STA_INFO_ASSOC_AT_BOOTTIME,
+ NL80211_STA_INFO_CONNECTED_TO_AS,
/* keep last */
__NL80211_STA_INFO_AFTER_LAST,
@@ -4236,6 +4249,16 @@ enum nl80211_mesh_power_mode {
* field. If left unset then the mesh formation field will only
* advertise such if there is an active root mesh path.
*
+ * @NL80211_MESHCONF_NOLEARN: Try to avoid multi-hop path discovery (e.g.
+ * PREQ/PREP for HWMP) if the destination is a direct neighbor. Note that
+ * this might not be the optimal decision as a multi-hop route might be
+ * better. So if using this setting you will likely also want to disable
+ * dot11MeshForwarding and use another mesh routing protocol on top.
+ *
+ * @NL80211_MESHCONF_CONNECTED_TO_AS: If set to true then this mesh STA
+ * will advertise that it is connected to a authentication server
+ * in the mesh formation field.
+ *
* @__NL80211_MESHCONF_ATTR_AFTER_LAST: internal use
*/
enum nl80211_meshconf_params {
@@ -4269,6 +4292,8 @@ enum nl80211_meshconf_params {
NL80211_MESHCONF_AWAKE_WINDOW,
NL80211_MESHCONF_PLINK_TIMEOUT,
NL80211_MESHCONF_CONNECTED_TO_GATE,
+ NL80211_MESHCONF_NOLEARN,
+ NL80211_MESHCONF_CONNECTED_TO_AS,
/* keep last */
__NL80211_MESHCONF_ATTR_AFTER_LAST,
@@ -4437,6 +4462,11 @@ enum nl80211_key_mode {
* attribute must be provided as well
* @NL80211_CHAN_WIDTH_5: 5 MHz OFDM channel
* @NL80211_CHAN_WIDTH_10: 10 MHz OFDM channel
+ * @NL80211_CHAN_WIDTH_1: 1 MHz OFDM channel
+ * @NL80211_CHAN_WIDTH_2: 2 MHz OFDM channel
+ * @NL80211_CHAN_WIDTH_4: 4 MHz OFDM channel
+ * @NL80211_CHAN_WIDTH_8: 8 MHz OFDM channel
+ * @NL80211_CHAN_WIDTH_16: 16 MHz OFDM channel
*/
enum nl80211_chan_width {
NL80211_CHAN_WIDTH_20_NOHT,
@@ -4447,6 +4477,11 @@ enum nl80211_chan_width {
NL80211_CHAN_WIDTH_160,
NL80211_CHAN_WIDTH_5,
NL80211_CHAN_WIDTH_10,
+ NL80211_CHAN_WIDTH_1,
+ NL80211_CHAN_WIDTH_2,
+ NL80211_CHAN_WIDTH_4,
+ NL80211_CHAN_WIDTH_8,
+ NL80211_CHAN_WIDTH_16,
};
/**
@@ -4457,11 +4492,15 @@ enum nl80211_chan_width {
* @NL80211_BSS_CHAN_WIDTH_20: control channel is 20 MHz wide or compatible
* @NL80211_BSS_CHAN_WIDTH_10: control channel is 10 MHz wide
* @NL80211_BSS_CHAN_WIDTH_5: control channel is 5 MHz wide
+ * @NL80211_BSS_CHAN_WIDTH_1: control channel is 1 MHz wide
+ * @NL80211_BSS_CHAN_WIDTH_2: control channel is 2 MHz wide
*/
enum nl80211_bss_scan_width {
NL80211_BSS_CHAN_WIDTH_20,
NL80211_BSS_CHAN_WIDTH_10,
NL80211_BSS_CHAN_WIDTH_5,
+ NL80211_BSS_CHAN_WIDTH_1,
+ NL80211_BSS_CHAN_WIDTH_2,
};
/**
@@ -4740,6 +4779,7 @@ enum nl80211_txrate_gi {
* @NL80211_BAND_5GHZ: around 5 GHz band (4.9 - 5.7 GHz)
* @NL80211_BAND_60GHZ: around 60 GHz band (58.32 - 69.12 GHz)
* @NL80211_BAND_6GHZ: around 6 GHz band (5.9 - 7.2 GHz)
+ * @NL80211_BAND_S1GHZ: around 900MHz, supported by S1G PHYs
* @NUM_NL80211_BANDS: number of bands, avoid using this in userspace
* since newer kernel versions may support more bands
*/
@@ -4748,6 +4788,7 @@ enum nl80211_band {
NL80211_BAND_5GHZ,
NL80211_BAND_60GHZ,
NL80211_BAND_6GHZ,
+ NL80211_BAND_S1GHZ,
NUM_NL80211_BANDS,
};
@@ -5636,7 +5677,7 @@ enum nl80211_feature_flags {
* enum nl80211_ext_feature_index - bit index of extended features.
* @NL80211_EXT_FEATURE_VHT_IBSS: This driver supports IBSS with VHT datarates.
* @NL80211_EXT_FEATURE_RRM: This driver supports RRM. When featured, user can
- * can request to use RRM (see %NL80211_ATTR_USE_RRM) with
+ * request to use RRM (see %NL80211_ATTR_USE_RRM) with
* %NL80211_CMD_ASSOCIATE and %NL80211_CMD_CONNECT requests, which will set
* the ASSOC_REQ_USE_RRM flag in the association request even if
* NL80211_FEATURE_QUIET is not advertized.
@@ -5773,6 +5814,13 @@ enum nl80211_feature_flags {
* @NL80211_EXT_FEATURE_CONTROL_PORT_OVER_NL80211_TX_STATUS: The driver
* can report tx status for control port over nl80211 tx operations.
*
+ * @NL80211_EXT_FEATURE_OPERATING_CHANNEL_VALIDATION: Driver supports Operating
+ * Channel Validation (OCV) when using driver's SME for RSNA handshakes.
+ *
+ * @NL80211_EXT_FEATURE_4WAY_HANDSHAKE_AP_PSK: Device wants to do 4-way
+ * handshake with PSK in AP mode (PSK is passed as part of the start AP
+ * command).
+ *
* @NUM_NL80211_EXT_FEATURES: number of extended features.
* @MAX_NL80211_EXT_FEATURES: highest extended feature index.
*/
@@ -5828,6 +5876,8 @@ enum nl80211_ext_feature_index {
NL80211_EXT_FEATURE_BEACON_PROTECTION_CLIENT,
NL80211_EXT_FEATURE_SCAN_FREQ_KHZ,
NL80211_EXT_FEATURE_CONTROL_PORT_OVER_NL80211_TX_STATUS,
+ NL80211_EXT_FEATURE_OPERATING_CHANNEL_VALIDATION,
+ NL80211_EXT_FEATURE_4WAY_HANDSHAKE_AP_PSK,
/* add new features before the definition below */
NUM_NL80211_EXT_FEATURES,
@@ -6045,7 +6095,7 @@ enum nl80211_dfs_state {
};
/**
- * enum enum nl80211_protocol_features - nl80211 protocol features
+ * enum nl80211_protocol_features - nl80211 protocol features
* @NL80211_PROTOCOL_FEATURE_SPLIT_WIPHY_DUMP: nl80211 supports splitting
* wiphy dumps (if requested by the application with the attribute
* %NL80211_ATTR_SPLIT_WIPHY_DUMP. Also supported is filtering the
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index 9b14519e74d9..8300cc29dec8 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -86,6 +86,7 @@ enum ovs_datapath_attr {
OVS_DP_ATTR_MEGAFLOW_STATS, /* struct ovs_dp_megaflow_stats */
OVS_DP_ATTR_USER_FEATURES, /* OVS_DP_F_* */
OVS_DP_ATTR_PAD,
+ OVS_DP_ATTR_MASKS_CACHE_SIZE,
__OVS_DP_ATTR_MAX
};
@@ -102,8 +103,8 @@ struct ovs_dp_megaflow_stats {
__u64 n_mask_hit; /* Number of masks used for flow lookups. */
__u32 n_masks; /* Number of masks for the datapath. */
__u32 pad0; /* Pad for future expension. */
+ __u64 n_cache_hit; /* Number of cache matches for flow lookups. */
__u64 pad1; /* Pad for future expension. */
- __u64 pad2; /* Pad for future expension. */
};
struct ovs_vport_stats {
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index 7b2d6fc9e6ed..077e7ee69e3d 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -383,7 +383,8 @@ struct perf_event_attr {
bpf_event : 1, /* include bpf events */
aux_output : 1, /* generate AUX records instead of events */
cgroup : 1, /* include cgroup events */
- __reserved_1 : 31;
+ text_poke : 1, /* include text poke events */
+ __reserved_1 : 30;
union {
__u32 wakeup_events; /* wakeup every n events */
@@ -532,9 +533,10 @@ struct perf_event_mmap_page {
cap_bit0_is_deprecated : 1, /* Always 1, signals that bit 0 is zero */
cap_user_rdpmc : 1, /* The RDPMC instruction can be used to read counts */
- cap_user_time : 1, /* The time_* fields are used */
+ cap_user_time : 1, /* The time_{shift,mult,offset} fields are used */
cap_user_time_zero : 1, /* The time_zero field is used */
- cap_____res : 59;
+ cap_user_time_short : 1, /* the time_{cycle,mask} fields are used */
+ cap_____res : 58;
};
};
@@ -593,13 +595,29 @@ struct perf_event_mmap_page {
* ((rem * time_mult) >> time_shift);
*/
__u64 time_zero;
+
__u32 size; /* Header size up to __reserved[] fields. */
+ __u32 __reserved_1;
+
+ /*
+ * If cap_usr_time_short, the hardware clock is less than 64bit wide
+ * and we must compute the 'cyc' value, as used by cap_usr_time, as:
+ *
+ * cyc = time_cycles + ((cyc - time_cycles) & time_mask)
+ *
+ * NOTE: this form is explicitly chosen such that cap_usr_time_short
+ * is a correction on top of cap_usr_time, and code that doesn't
+ * know about cap_usr_time_short still works under the assumption
+ * the counter doesn't wrap.
+ */
+ __u64 time_cycles;
+ __u64 time_mask;
/*
* Hole for extension of the self monitor capabilities
*/
- __u8 __reserved[118*8+4]; /* align to 1k. */
+ __u8 __reserved[116*8]; /* align to 1k. */
/*
* Control data for the mmap() data buffer.
@@ -1024,12 +1042,35 @@ enum perf_event_type {
*/
PERF_RECORD_CGROUP = 19,
+ /*
+ * Records changes to kernel text i.e. self-modified code. 'old_len' is
+ * the number of old bytes, 'new_len' is the number of new bytes. Either
+ * 'old_len' or 'new_len' may be zero to indicate, for example, the
+ * addition or removal of a trampoline. 'bytes' contains the old bytes
+ * followed immediately by the new bytes.
+ *
+ * struct {
+ * struct perf_event_header header;
+ * u64 addr;
+ * u16 old_len;
+ * u16 new_len;
+ * u8 bytes[];
+ * struct sample_id sample_id;
+ * };
+ */
+ PERF_RECORD_TEXT_POKE = 20,
+
PERF_RECORD_MAX, /* non-ABI */
};
enum perf_record_ksymbol_type {
PERF_RECORD_KSYMBOL_TYPE_UNKNOWN = 0,
PERF_RECORD_KSYMBOL_TYPE_BPF = 1,
+ /*
+ * Out of line code such as kprobe-replaced instructions or optimized
+ * kprobes or ftrace trampolines.
+ */
+ PERF_RECORD_KSYMBOL_TYPE_OOL = 2,
PERF_RECORD_KSYMBOL_TYPE_MAX /* non-ABI */
};
diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h
index 7576209d96f9..ee95f42fb0ec 100644
--- a/include/uapi/linux/pkt_cls.h
+++ b/include/uapi/linux/pkt_cls.h
@@ -578,6 +578,9 @@ enum {
TCA_FLOWER_KEY_MPLS_OPTS,
+ TCA_FLOWER_KEY_HASH, /* u32 */
+ TCA_FLOWER_KEY_HASH_MASK, /* u32 */
+
__TCA_FLOWER_MAX,
};
diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index a95f3ae7ab37..9e7c2c607845 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -257,6 +257,8 @@ enum {
TCA_RED_STAB,
TCA_RED_MAX_P,
TCA_RED_FLAGS, /* bitfield32 */
+ TCA_RED_EARLY_DROP_BLOCK, /* u32 */
+ TCA_RED_MARK_BLOCK, /* u32 */
__TCA_RED_MAX,
};
diff --git a/include/uapi/linux/ptp_clock.h b/include/uapi/linux/ptp_clock.h
index ff070aa64278..1d108d597f66 100644
--- a/include/uapi/linux/ptp_clock.h
+++ b/include/uapi/linux/ptp_clock.h
@@ -53,12 +53,16 @@
/*
* Bits of the ptp_perout_request.flags field:
*/
-#define PTP_PEROUT_ONE_SHOT (1<<0)
+#define PTP_PEROUT_ONE_SHOT (1<<0)
+#define PTP_PEROUT_DUTY_CYCLE (1<<1)
+#define PTP_PEROUT_PHASE (1<<2)
/*
* flag fields valid for the new PTP_PEROUT_REQUEST2 ioctl.
*/
-#define PTP_PEROUT_VALID_FLAGS (PTP_PEROUT_ONE_SHOT)
+#define PTP_PEROUT_VALID_FLAGS (PTP_PEROUT_ONE_SHOT | \
+ PTP_PEROUT_DUTY_CYCLE | \
+ PTP_PEROUT_PHASE)
/*
* No flags are valid for the original PTP_PEROUT_REQUEST ioctl
@@ -101,11 +105,33 @@ struct ptp_extts_request {
};
struct ptp_perout_request {
- struct ptp_clock_time start; /* Absolute start time. */
+ union {
+ /*
+ * Absolute start time.
+ * Valid only if (flags & PTP_PEROUT_PHASE) is unset.
+ */
+ struct ptp_clock_time start;
+ /*
+ * Phase offset. The signal should start toggling at an
+ * unspecified integer multiple of the period, plus this value.
+ * The start time should be "as soon as possible".
+ * Valid only if (flags & PTP_PEROUT_PHASE) is set.
+ */
+ struct ptp_clock_time phase;
+ };
struct ptp_clock_time period; /* Desired period, zero means disable. */
unsigned int index; /* Which channel to configure. */
unsigned int flags;
- unsigned int rsv[4]; /* Reserved for future use. */
+ union {
+ /*
+ * The "on" time of the signal.
+ * Must be lower than the period.
+ * Valid only if (flags & PTP_PEROUT_DUTY_CYCLE) is set.
+ */
+ struct ptp_clock_time on;
+ /* Reserved for future use. */
+ unsigned int rsv[4];
+ };
};
#define PTP_MAX_SAMPLES 25 /* Maximum allowed offset measurement samples. */
diff --git a/include/uapi/linux/raid/md_p.h b/include/uapi/linux/raid/md_p.h
index 1f2d8c81f0e0..e5a98a16f9b0 100644
--- a/include/uapi/linux/raid/md_p.h
+++ b/include/uapi/linux/raid/md_p.h
@@ -123,7 +123,7 @@ typedef struct mdp_device_descriptor_s {
/*
* Notes:
- * - if an array is being reshaped (restriped) in order to change the
+ * - if an array is being reshaped (restriped) in order to change
* the number of active devices in the array, 'raid_disks' will be
* the larger of the old and new numbers. 'delta_disks' will
* be the "new - old". So if +ve, raid_disks is the new value, and
diff --git a/include/uapi/linux/raw.h b/include/uapi/linux/raw.h
index dc96dda479d6..47874919d0b9 100644
--- a/include/uapi/linux/raw.h
+++ b/include/uapi/linux/raw.h
@@ -14,6 +14,4 @@ struct raw_config_request
__u64 block_minor;
};
-#define MAX_RAW_MINORS CONFIG_MAX_RAW_DEVS
-
#endif /* __LINUX_RAW_H */
diff --git a/include/uapi/linux/remoteproc_cdev.h b/include/uapi/linux/remoteproc_cdev.h
new file mode 100644
index 000000000000..c43768e4b0dc
--- /dev/null
+++ b/include/uapi/linux/remoteproc_cdev.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/*
+ * IOCTLs for Remoteproc's character device interface.
+ *
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _UAPI_REMOTEPROC_CDEV_H_
+#define _UAPI_REMOTEPROC_CDEV_H_
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+#define RPROC_MAGIC 0xB7
+
+/*
+ * The RPROC_SET_SHUTDOWN_ON_RELEASE ioctl allows to enable/disable the shutdown of a remote
+ * processor automatically when the controlling userpsace closes the char device interface.
+ *
+ * input parameter: integer
+ * 0 : disable automatic shutdown
+ * other : enable automatic shutdown
+ */
+#define RPROC_SET_SHUTDOWN_ON_RELEASE _IOW(RPROC_MAGIC, 1, __s32)
+
+/*
+ * The RPROC_GET_SHUTDOWN_ON_RELEASE ioctl gets information about whether the automatic shutdown of
+ * a remote processor is enabled or disabled when the controlling userspace closes the char device
+ * interface.
+ *
+ * output parameter: integer
+ * 0 : automatic shutdown disable
+ * other : automatic shutdown enable
+ */
+#define RPROC_GET_SHUTDOWN_ON_RELEASE _IOR(RPROC_MAGIC, 2, __s32)
+
+#endif
diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h
index 073e71ef6bdd..9b814c92de12 100644
--- a/include/uapi/linux/rtnetlink.h
+++ b/include/uapi/linux/rtnetlink.h
@@ -257,12 +257,12 @@ enum {
/* rtm_protocol */
-#define RTPROT_UNSPEC 0
-#define RTPROT_REDIRECT 1 /* Route installed by ICMP redirects;
- not used by current IPv4 */
-#define RTPROT_KERNEL 2 /* Route installed by kernel */
-#define RTPROT_BOOT 3 /* Route installed during boot */
-#define RTPROT_STATIC 4 /* Route installed by administrator */
+#define RTPROT_UNSPEC 0
+#define RTPROT_REDIRECT 1 /* Route installed by ICMP redirects;
+ not used by current IPv4 */
+#define RTPROT_KERNEL 2 /* Route installed by kernel */
+#define RTPROT_BOOT 3 /* Route installed during boot */
+#define RTPROT_STATIC 4 /* Route installed by administrator */
/* Values of protocol >= RTPROT_STATIC are not interpreted by kernel;
they are just passed from user and back as is.
@@ -271,22 +271,23 @@ enum {
avoid conflicts.
*/
-#define RTPROT_GATED 8 /* Apparently, GateD */
-#define RTPROT_RA 9 /* RDISC/ND router advertisements */
-#define RTPROT_MRT 10 /* Merit MRT */
-#define RTPROT_ZEBRA 11 /* Zebra */
-#define RTPROT_BIRD 12 /* BIRD */
-#define RTPROT_DNROUTED 13 /* DECnet routing daemon */
-#define RTPROT_XORP 14 /* XORP */
-#define RTPROT_NTK 15 /* Netsukuku */
-#define RTPROT_DHCP 16 /* DHCP client */
-#define RTPROT_MROUTED 17 /* Multicast daemon */
-#define RTPROT_BABEL 42 /* Babel daemon */
-#define RTPROT_BGP 186 /* BGP Routes */
-#define RTPROT_ISIS 187 /* ISIS Routes */
-#define RTPROT_OSPF 188 /* OSPF Routes */
-#define RTPROT_RIP 189 /* RIP Routes */
-#define RTPROT_EIGRP 192 /* EIGRP Routes */
+#define RTPROT_GATED 8 /* Apparently, GateD */
+#define RTPROT_RA 9 /* RDISC/ND router advertisements */
+#define RTPROT_MRT 10 /* Merit MRT */
+#define RTPROT_ZEBRA 11 /* Zebra */
+#define RTPROT_BIRD 12 /* BIRD */
+#define RTPROT_DNROUTED 13 /* DECnet routing daemon */
+#define RTPROT_XORP 14 /* XORP */
+#define RTPROT_NTK 15 /* Netsukuku */
+#define RTPROT_DHCP 16 /* DHCP client */
+#define RTPROT_MROUTED 17 /* Multicast daemon */
+#define RTPROT_KEEPALIVED 18 /* Keepalived daemon */
+#define RTPROT_BABEL 42 /* Babel daemon */
+#define RTPROT_BGP 186 /* BGP Routes */
+#define RTPROT_ISIS 187 /* ISIS Routes */
+#define RTPROT_OSPF 188 /* OSPF Routes */
+#define RTPROT_RIP 189 /* RIP Routes */
+#define RTPROT_EIGRP 192 /* EIGRP Routes */
/* rtm_scope
@@ -777,6 +778,7 @@ enum {
#define RTEXT_FILTER_BRVLAN (1 << 1)
#define RTEXT_FILTER_BRVLAN_COMPRESSED (1 << 2)
#define RTEXT_FILTER_SKIP_STATS (1 << 3)
+#define RTEXT_FILTER_MRP (1 << 4)
/* End of information exported to user level */
diff --git a/include/uapi/linux/seccomp.h b/include/uapi/linux/seccomp.h
index c1735455bc53..6ba18b82a02e 100644
--- a/include/uapi/linux/seccomp.h
+++ b/include/uapi/linux/seccomp.h
@@ -113,6 +113,25 @@ struct seccomp_notif_resp {
__u32 flags;
};
+/* valid flags for seccomp_notif_addfd */
+#define SECCOMP_ADDFD_FLAG_SETFD (1UL << 0) /* Specify remote fd */
+
+/**
+ * struct seccomp_notif_addfd
+ * @id: The ID of the seccomp notification
+ * @flags: SECCOMP_ADDFD_FLAG_*
+ * @srcfd: The local fd number
+ * @newfd: Optional remote FD number if SETFD option is set, otherwise 0.
+ * @newfd_flags: The O_* flags the remote FD should have applied
+ */
+struct seccomp_notif_addfd {
+ __u64 id;
+ __u32 flags;
+ __u32 srcfd;
+ __u32 newfd;
+ __u32 newfd_flags;
+};
+
#define SECCOMP_IOC_MAGIC '!'
#define SECCOMP_IO(nr) _IO(SECCOMP_IOC_MAGIC, nr)
#define SECCOMP_IOR(nr, type) _IOR(SECCOMP_IOC_MAGIC, nr, type)
@@ -123,5 +142,9 @@ struct seccomp_notif_resp {
#define SECCOMP_IOCTL_NOTIF_RECV SECCOMP_IOWR(0, struct seccomp_notif)
#define SECCOMP_IOCTL_NOTIF_SEND SECCOMP_IOWR(1, \
struct seccomp_notif_resp)
-#define SECCOMP_IOCTL_NOTIF_ID_VALID SECCOMP_IOR(2, __u64)
+#define SECCOMP_IOCTL_NOTIF_ID_VALID SECCOMP_IOW(2, __u64)
+/* On success, the return value is the remote process's added fd number */
+#define SECCOMP_IOCTL_NOTIF_ADDFD SECCOMP_IOW(3, \
+ struct seccomp_notif_addfd)
+
#endif /* _UAPI_LINUX_SECCOMP_H */
diff --git a/include/uapi/linux/seg6_iptunnel.h b/include/uapi/linux/seg6_iptunnel.h
index 09fb608a35ec..eb815e0d0ac3 100644
--- a/include/uapi/linux/seg6_iptunnel.h
+++ b/include/uapi/linux/seg6_iptunnel.h
@@ -37,25 +37,4 @@ enum {
SEG6_IPTUN_MODE_L2ENCAP,
};
-#ifdef __KERNEL__
-
-static inline size_t seg6_lwt_headroom(struct seg6_iptunnel_encap *tuninfo)
-{
- int head = 0;
-
- switch (tuninfo->mode) {
- case SEG6_IPTUN_MODE_INLINE:
- break;
- case SEG6_IPTUN_MODE_ENCAP:
- head = sizeof(struct ipv6hdr);
- break;
- case SEG6_IPTUN_MODE_L2ENCAP:
- return 0;
- }
-
- return ((tuninfo->srh->hdrlen + 1) << 3) + head;
-}
-
-#endif
-
#endif
diff --git a/include/uapi/linux/serial_core.h b/include/uapi/linux/serial_core.h
index 8ec3dd742ea4..851b982f8c4b 100644
--- a/include/uapi/linux/serial_core.h
+++ b/include/uapi/linux/serial_core.h
@@ -26,20 +26,6 @@
/*
* The type definitions. These are from Ted Ts'o's serial.h
*/
-#define PORT_UNKNOWN 0
-#define PORT_8250 1
-#define PORT_16450 2
-#define PORT_16550 3
-#define PORT_16550A 4
-#define PORT_CIRRUS 5
-#define PORT_16650 6
-#define PORT_16650V2 7
-#define PORT_16750 8
-#define PORT_STARTECH 9
-#define PORT_16C950 10
-#define PORT_16654 11
-#define PORT_16850 12
-#define PORT_RSA 13
#define PORT_NS16550A 14
#define PORT_XSCALE 15
#define PORT_RM9000 16 /* PMC-Sierra RM9xxx internal UART */
diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h
index 7d91f4debc48..cee9f8e6fce3 100644
--- a/include/uapi/linux/snmp.h
+++ b/include/uapi/linux/snmp.h
@@ -287,6 +287,7 @@ enum
LINUX_MIB_TCPFASTOPENPASSIVEALTKEY, /* TCPFastOpenPassiveAltKey */
LINUX_MIB_TCPTIMEOUTREHASH, /* TCPTimeoutRehash */
LINUX_MIB_TCPDUPLICATEDATAREHASH, /* TCPDuplicateDataRehash */
+ LINUX_MIB_TCPDSACKRECVSEGS, /* TCPDSACKRecvSegs */
__LINUX_MIB_MAX
};
diff --git a/include/uapi/linux/target_core_user.h b/include/uapi/linux/target_core_user.h
index b7b57967d90f..95b1597f16ae 100644
--- a/include/uapi/linux/target_core_user.h
+++ b/include/uapi/linux/target_core_user.h
@@ -45,6 +45,7 @@
#define ALIGN_SIZE 64 /* Should be enough for most CPUs */
#define TCMU_MAILBOX_FLAG_CAP_OOOC (1 << 0) /* Out-of-order completions */
#define TCMU_MAILBOX_FLAG_CAP_READ_LEN (1 << 1) /* Read data length */
+#define TCMU_MAILBOX_FLAG_CAP_TMR (1 << 2) /* TMR notifications */
struct tcmu_mailbox {
__u16 version;
@@ -62,6 +63,7 @@ struct tcmu_mailbox {
enum tcmu_opcode {
TCMU_OP_PAD = 0,
TCMU_OP_CMD,
+ TCMU_OP_TMR,
};
/*
@@ -128,6 +130,29 @@ struct tcmu_cmd_entry {
} __packed;
+struct tcmu_tmr_entry {
+ struct tcmu_cmd_entry_hdr hdr;
+
+#define TCMU_TMR_UNKNOWN 0
+#define TCMU_TMR_ABORT_TASK 1
+#define TCMU_TMR_ABORT_TASK_SET 2
+#define TCMU_TMR_CLEAR_ACA 3
+#define TCMU_TMR_CLEAR_TASK_SET 4
+#define TCMU_TMR_LUN_RESET 5
+#define TCMU_TMR_TARGET_WARM_RESET 6
+#define TCMU_TMR_TARGET_COLD_RESET 7
+/* Pseudo reset due to received PR OUT */
+#define TCMU_TMR_LUN_RESET_PRO 128
+ __u8 tmr_type;
+
+ __u8 __pad1;
+ __u16 __pad2;
+ __u32 cmd_cnt;
+ __u64 __pad3;
+ __u64 __pad4;
+ __u16 cmd_ids[0];
+} __packed;
+
#define TCMU_OP_ALIGN_SIZE sizeof(__u64)
enum tcmu_genl_cmd {
diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h
index f2acb2566333..cfcb10b75483 100644
--- a/include/uapi/linux/tcp.h
+++ b/include/uapi/linux/tcp.h
@@ -313,6 +313,7 @@ enum {
TCP_NLA_SRTT, /* smoothed RTT in usecs */
TCP_NLA_TIMEOUT_REHASH, /* Timeout-triggered rehash attempts */
TCP_NLA_BYTES_NOTSENT, /* Bytes in write queue not yet sent */
+ TCP_NLA_EDT, /* Earliest departure time (CLOCK_MONOTONIC) */
};
/* for TCP_MD5SIG socket option */
diff --git a/include/uapi/linux/thermal.h b/include/uapi/linux/thermal.h
index 96218378dda8..c105054cbb57 100644
--- a/include/uapi/linux/thermal.h
+++ b/include/uapi/linux/thermal.h
@@ -4,31 +4,86 @@
#define THERMAL_NAME_LENGTH 20
-/* Adding event notification support elements */
-#define THERMAL_GENL_FAMILY_NAME "thermal_event"
-#define THERMAL_GENL_VERSION 0x01
-#define THERMAL_GENL_MCAST_GROUP_NAME "thermal_mc_grp"
-
-/* Events supported by Thermal Netlink */
-enum events {
- THERMAL_AUX0,
- THERMAL_AUX1,
- THERMAL_CRITICAL,
- THERMAL_DEV_FAULT,
+enum thermal_device_mode {
+ THERMAL_DEVICE_DISABLED = 0,
+ THERMAL_DEVICE_ENABLED,
+};
+
+enum thermal_trip_type {
+ THERMAL_TRIP_ACTIVE = 0,
+ THERMAL_TRIP_PASSIVE,
+ THERMAL_TRIP_HOT,
+ THERMAL_TRIP_CRITICAL,
};
-/* attributes of thermal_genl_family */
-enum {
+/* Adding event notification support elements */
+#define THERMAL_GENL_FAMILY_NAME "thermal"
+#define THERMAL_GENL_VERSION 0x01
+#define THERMAL_GENL_SAMPLING_GROUP_NAME "sampling"
+#define THERMAL_GENL_EVENT_GROUP_NAME "event"
+
+/* Attributes of thermal_genl_family */
+enum thermal_genl_attr {
THERMAL_GENL_ATTR_UNSPEC,
- THERMAL_GENL_ATTR_EVENT,
+ THERMAL_GENL_ATTR_TZ,
+ THERMAL_GENL_ATTR_TZ_ID,
+ THERMAL_GENL_ATTR_TZ_TEMP,
+ THERMAL_GENL_ATTR_TZ_TRIP,
+ THERMAL_GENL_ATTR_TZ_TRIP_ID,
+ THERMAL_GENL_ATTR_TZ_TRIP_TYPE,
+ THERMAL_GENL_ATTR_TZ_TRIP_TEMP,
+ THERMAL_GENL_ATTR_TZ_TRIP_HYST,
+ THERMAL_GENL_ATTR_TZ_MODE,
+ THERMAL_GENL_ATTR_TZ_NAME,
+ THERMAL_GENL_ATTR_TZ_CDEV_WEIGHT,
+ THERMAL_GENL_ATTR_TZ_GOV,
+ THERMAL_GENL_ATTR_TZ_GOV_NAME,
+ THERMAL_GENL_ATTR_CDEV,
+ THERMAL_GENL_ATTR_CDEV_ID,
+ THERMAL_GENL_ATTR_CDEV_CUR_STATE,
+ THERMAL_GENL_ATTR_CDEV_MAX_STATE,
+ THERMAL_GENL_ATTR_CDEV_NAME,
+ THERMAL_GENL_ATTR_GOV_NAME,
+
__THERMAL_GENL_ATTR_MAX,
};
#define THERMAL_GENL_ATTR_MAX (__THERMAL_GENL_ATTR_MAX - 1)
-/* commands supported by the thermal_genl_family */
-enum {
+enum thermal_genl_sampling {
+ THERMAL_GENL_SAMPLING_TEMP,
+ __THERMAL_GENL_SAMPLING_MAX,
+};
+#define THERMAL_GENL_SAMPLING_MAX (__THERMAL_GENL_SAMPLING_MAX - 1)
+
+/* Events of thermal_genl_family */
+enum thermal_genl_event {
+ THERMAL_GENL_EVENT_UNSPEC,
+ THERMAL_GENL_EVENT_TZ_CREATE, /* Thermal zone creation */
+ THERMAL_GENL_EVENT_TZ_DELETE, /* Thermal zone deletion */
+ THERMAL_GENL_EVENT_TZ_DISABLE, /* Thermal zone disabed */
+ THERMAL_GENL_EVENT_TZ_ENABLE, /* Thermal zone enabled */
+ THERMAL_GENL_EVENT_TZ_TRIP_UP, /* Trip point crossed the way up */
+ THERMAL_GENL_EVENT_TZ_TRIP_DOWN, /* Trip point crossed the way down */
+ THERMAL_GENL_EVENT_TZ_TRIP_CHANGE, /* Trip point changed */
+ THERMAL_GENL_EVENT_TZ_TRIP_ADD, /* Trip point added */
+ THERMAL_GENL_EVENT_TZ_TRIP_DELETE, /* Trip point deleted */
+ THERMAL_GENL_EVENT_CDEV_ADD, /* Cdev bound to the thermal zone */
+ THERMAL_GENL_EVENT_CDEV_DELETE, /* Cdev unbound */
+ THERMAL_GENL_EVENT_CDEV_STATE_UPDATE, /* Cdev state updated */
+ THERMAL_GENL_EVENT_TZ_GOV_CHANGE, /* Governor policy changed */
+ __THERMAL_GENL_EVENT_MAX,
+};
+#define THERMAL_GENL_EVENT_MAX (__THERMAL_GENL_EVENT_MAX - 1)
+
+/* Commands supported by the thermal_genl_family */
+enum thermal_genl_cmd {
THERMAL_GENL_CMD_UNSPEC,
- THERMAL_GENL_CMD_EVENT,
+ THERMAL_GENL_CMD_TZ_GET_ID, /* List of thermal zones id */
+ THERMAL_GENL_CMD_TZ_GET_TRIP, /* List of thermal trips */
+ THERMAL_GENL_CMD_TZ_GET_TEMP, /* Get the thermal zone temperature */
+ THERMAL_GENL_CMD_TZ_GET_GOV, /* Get the thermal zone governor */
+ THERMAL_GENL_CMD_TZ_GET_MODE, /* Get the thermal zone mode */
+ THERMAL_GENL_CMD_CDEV_GET, /* List of cdev id */
__THERMAL_GENL_CMD_MAX,
};
#define THERMAL_GENL_CMD_MAX (__THERMAL_GENL_CMD_MAX - 1)
diff --git a/include/uapi/linux/types.h b/include/uapi/linux/types.h
index 2fce8b6876e9..f6d2f83cbe29 100644
--- a/include/uapi/linux/types.h
+++ b/include/uapi/linux/types.h
@@ -7,7 +7,7 @@
#ifndef __ASSEMBLY__
#ifndef __KERNEL__
#ifndef __EXPORTED_HEADERS__
-#warning "Attempt to use kernel headers from user space, see http://kernelnewbies.org/KernelHeaders"
+#warning "Attempt to use kernel headers from user space, see https://kernelnewbies.org/KernelHeaders"
#endif /* __EXPORTED_HEADERS__ */
#endif
diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h
index 2b623f36af6b..0f865ae4ba89 100644
--- a/include/uapi/linux/usb/ch9.h
+++ b/include/uapi/linux/usb/ch9.h
@@ -138,11 +138,11 @@
* Test Mode Selectors
* See USB 2.0 spec Table 9-7
*/
-#define TEST_J 1
-#define TEST_K 2
-#define TEST_SE0_NAK 3
-#define TEST_PACKET 4
-#define TEST_FORCE_EN 5
+#define USB_TEST_J 1
+#define USB_TEST_K 2
+#define USB_TEST_SE0_NAK 3
+#define USB_TEST_PACKET 4
+#define USB_TEST_FORCE_ENABLE 5
/* Status Type */
#define USB_STATUS_TYPE_STANDARD 0
@@ -326,6 +326,10 @@ struct usb_device_descriptor {
#define USB_CLASS_CONTENT_SEC 0x0d /* content security */
#define USB_CLASS_VIDEO 0x0e
#define USB_CLASS_WIRELESS_CONTROLLER 0xe0
+#define USB_CLASS_PERSONAL_HEALTHCARE 0x0f
+#define USB_CLASS_AUDIO_VIDEO 0x10
+#define USB_CLASS_BILLBOARD 0x11
+#define USB_CLASS_USB_TYPE_C_BRIDGE 0x12
#define USB_CLASS_MISC 0xef
#define USB_CLASS_APP_SPEC 0xfe
#define USB_CLASS_VENDOR_SPEC 0xff
@@ -364,6 +368,9 @@ struct usb_config_descriptor {
/*-------------------------------------------------------------------------*/
+/* USB String descriptors can contain at most 126 characters. */
+#define USB_MAX_STRING_LEN 126
+
/* USB_DT_STRING: String descriptor */
struct usb_string_descriptor {
__u8 bLength;
@@ -1222,7 +1229,7 @@ struct usb_set_sel_req {
* As per USB compliance update, a device that is actively drawing
* more than 100mA from USB must report itself as bus-powered in
* the GetStatus(DEVICE) call.
- * http://compliance.usb.org/index.asp?UpdateFile=Electrical&Format=Standard#34
+ * https://compliance.usb.org/index.asp?UpdateFile=Electrical&Format=Standard#34
*/
#define USB_SELF_POWER_VBUS_MAX_DRAW 100
diff --git a/include/uapi/linux/vbox_vmmdev_types.h b/include/uapi/linux/vbox_vmmdev_types.h
index c27289fd619a..f8a8d6b3c521 100644
--- a/include/uapi/linux/vbox_vmmdev_types.h
+++ b/include/uapi/linux/vbox_vmmdev_types.h
@@ -63,6 +63,7 @@ enum vmmdev_request_type {
VMMDEVREQ_SET_GUEST_CAPABILITIES = 56,
VMMDEVREQ_VIDEMODE_SUPPORTED2 = 57, /* since version 3.2.0 */
VMMDEVREQ_GET_DISPLAY_CHANGE_REQEX = 80, /* since version 4.2.4 */
+ VMMDEVREQ_GET_DISPLAY_CHANGE_REQ_MULTI = 81,
VMMDEVREQ_HGCM_CONNECT = 60,
VMMDEVREQ_HGCM_DISCONNECT = 61,
VMMDEVREQ_HGCM_CALL32 = 62,
@@ -92,6 +93,8 @@ enum vmmdev_request_type {
VMMDEVREQ_WRITE_COREDUMP = 218,
VMMDEVREQ_GUEST_HEARTBEAT = 219,
VMMDEVREQ_HEARTBEAT_CONFIGURE = 220,
+ VMMDEVREQ_NT_BUG_CHECK = 221,
+ VMMDEVREQ_VIDEO_UPDATE_MONITOR_POSITIONS = 222,
/* Ensure the enum is a 32 bit data-type */
VMMDEVREQ_SIZEHACK = 0x7fffffff
};
diff --git a/include/uapi/linux/vboxguest.h b/include/uapi/linux/vboxguest.h
index f79d7abe27db..15125f6ec60d 100644
--- a/include/uapi/linux/vboxguest.h
+++ b/include/uapi/linux/vboxguest.h
@@ -257,6 +257,30 @@ VMMDEV_ASSERT_SIZE(vbg_ioctl_change_filter, 24 + 8);
_IOWR('V', 12, struct vbg_ioctl_change_filter)
+/** VBG_IOCTL_ACQUIRE_GUEST_CAPABILITIES data structure. */
+struct vbg_ioctl_acquire_guest_caps {
+ /** The header. */
+ struct vbg_ioctl_hdr hdr;
+ union {
+ struct {
+ /** Flags (VBGL_IOC_AGC_FLAGS_XXX). */
+ __u32 flags;
+ /** Capabilities to set (VMMDEV_GUEST_SUPPORTS_XXX). */
+ __u32 or_mask;
+ /** Capabilities to drop (VMMDEV_GUEST_SUPPORTS_XXX). */
+ __u32 not_mask;
+ } in;
+ } u;
+};
+VMMDEV_ASSERT_SIZE(vbg_ioctl_acquire_guest_caps, 24 + 12);
+
+#define VBGL_IOC_AGC_FLAGS_CONFIG_ACQUIRE_MODE 0x00000001
+#define VBGL_IOC_AGC_FLAGS_VALID_MASK 0x00000001
+
+#define VBG_IOCTL_ACQUIRE_GUEST_CAPABILITIES \
+ _IOWR('V', 13, struct vbg_ioctl_acquire_guest_caps)
+
+
/** VBG_IOCTL_CHANGE_GUEST_CAPABILITIES data structure. */
struct vbg_ioctl_set_guest_caps {
/** The header. */
diff --git a/include/uapi/linux/vhost.h b/include/uapi/linux/vhost.h
index 0c2349612e77..75232185324a 100644
--- a/include/uapi/linux/vhost.h
+++ b/include/uapi/linux/vhost.h
@@ -91,6 +91,8 @@
/* Use message type V2 */
#define VHOST_BACKEND_F_IOTLB_MSG_V2 0x1
+/* IOTLB can accept batching hints */
+#define VHOST_BACKEND_F_IOTLB_BATCH 0x2
#define VHOST_SET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x25, __u64)
#define VHOST_GET_BACKEND_FEATURES _IOR(VHOST_VIRTIO, 0x26, __u64)
diff --git a/include/uapi/linux/vhost_types.h b/include/uapi/linux/vhost_types.h
index 669457ce5c48..9a269a88a6ff 100644
--- a/include/uapi/linux/vhost_types.h
+++ b/include/uapi/linux/vhost_types.h
@@ -60,6 +60,17 @@ struct vhost_iotlb_msg {
#define VHOST_IOTLB_UPDATE 2
#define VHOST_IOTLB_INVALIDATE 3
#define VHOST_IOTLB_ACCESS_FAIL 4
+/*
+ * VHOST_IOTLB_BATCH_BEGIN and VHOST_IOTLB_BATCH_END allow modifying
+ * multiple mappings in one go: beginning with
+ * VHOST_IOTLB_BATCH_BEGIN, followed by any number of
+ * VHOST_IOTLB_UPDATE messages, and ending with VHOST_IOTLB_BATCH_END.
+ * When one of these two values is used as the message type, the rest
+ * of the fields in the message are ignored. There's no guarantee that
+ * these changes take place automatically in the device.
+ */
+#define VHOST_IOTLB_BATCH_BEGIN 5
+#define VHOST_IOTLB_BATCH_END 6
__u8 type;
};
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index c3a1cf1c507f..c7b70ff53bc1 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -171,6 +171,8 @@ enum v4l2_buf_type {
|| (type) == V4L2_BUF_TYPE_SDR_OUTPUT \
|| (type) == V4L2_BUF_TYPE_META_OUTPUT)
+#define V4L2_TYPE_IS_CAPTURE(type) (!V4L2_TYPE_IS_OUTPUT(type))
+
enum v4l2_tuner_type {
V4L2_TUNER_RADIO = 1,
V4L2_TUNER_ANALOG_TV = 2,
@@ -189,6 +191,8 @@ enum v4l2_memory {
V4L2_MEMORY_DMABUF = 4,
};
+#define V4L2_FLAG_MEMORY_NON_CONSISTENT (1 << 0)
+
/* see also http://vektor.theorem.ca/graphics/ycbcr/ */
enum v4l2_colorspace {
/*
@@ -792,6 +796,7 @@ struct v4l2_fmtdesc {
#define V4L2_FMT_FLAG_EMULATED 0x0002
#define V4L2_FMT_FLAG_CONTINUOUS_BYTESTREAM 0x0004
#define V4L2_FMT_FLAG_DYN_RESOLUTION 0x0008
+#define V4L2_FMT_FLAG_ENC_CAP_FRAME_INTERVAL 0x0010
/* Frame Size and frame rate enumeration */
/*
@@ -944,7 +949,10 @@ struct v4l2_requestbuffers {
__u32 type; /* enum v4l2_buf_type */
__u32 memory; /* enum v4l2_memory */
__u32 capabilities;
- __u32 reserved[1];
+ union {
+ __u32 flags;
+ __u32 reserved[1];
+ };
};
/* capabilities for struct v4l2_requestbuffers and v4l2_create_buffers */
@@ -954,6 +962,7 @@ struct v4l2_requestbuffers {
#define V4L2_BUF_CAP_SUPPORTS_REQUESTS (1 << 3)
#define V4L2_BUF_CAP_SUPPORTS_ORPHANED_BUFS (1 << 4)
#define V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF (1 << 5)
+#define V4L2_BUF_CAP_SUPPORTS_MMAP_CACHE_HINTS (1 << 6)
/**
* struct v4l2_plane - plane info for multi-planar buffers
@@ -2447,6 +2456,9 @@ struct v4l2_dbg_chip_info {
* @memory: enum v4l2_memory; buffer memory type
* @format: frame format, for which buffers are requested
* @capabilities: capabilities of this buffer type.
+ * @flags: additional buffer management attributes (ignored unless the
+ * queue has V4L2_BUF_CAP_SUPPORTS_MMAP_CACHE_HINTS capability
+ * and configured for MMAP streaming I/O).
* @reserved: future extensions
*/
struct v4l2_create_buffers {
@@ -2455,7 +2467,8 @@ struct v4l2_create_buffers {
__u32 memory;
struct v4l2_format format;
__u32 capabilities;
- __u32 reserved[7];
+ __u32 flags;
+ __u32 reserved[6];
};
/*
diff --git a/include/uapi/linux/virtio_9p.h b/include/uapi/linux/virtio_9p.h
index 277c4ad44e84..441047432258 100644
--- a/include/uapi/linux/virtio_9p.h
+++ b/include/uapi/linux/virtio_9p.h
@@ -25,7 +25,7 @@
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE. */
-#include <linux/types.h>
+#include <linux/virtio_types.h>
#include <linux/virtio_ids.h>
#include <linux/virtio_config.h>
@@ -36,7 +36,7 @@
struct virtio_9p_config {
/* length of the tag name */
- __u16 tag_len;
+ __virtio16 tag_len;
/* non-NULL terminated tag name */
__u8 tag[0];
} __attribute__((packed));
diff --git a/include/uapi/linux/virtio_balloon.h b/include/uapi/linux/virtio_balloon.h
index dc3e656470dd..ddaa45e723c4 100644
--- a/include/uapi/linux/virtio_balloon.h
+++ b/include/uapi/linux/virtio_balloon.h
@@ -45,20 +45,20 @@
#define VIRTIO_BALLOON_CMD_ID_DONE 1
struct virtio_balloon_config {
/* Number of pages host wants Guest to give up. */
- __u32 num_pages;
+ __le32 num_pages;
/* Number of pages we've actually got in balloon. */
- __u32 actual;
+ __le32 actual;
/*
* Free page hint command id, readonly by guest.
* Was previously named free_page_report_cmd_id so we
* need to carry that name for legacy support.
*/
union {
- __u32 free_page_hint_cmd_id;
- __u32 free_page_report_cmd_id; /* deprecated */
+ __le32 free_page_hint_cmd_id;
+ __le32 free_page_report_cmd_id; /* deprecated */
};
/* Stores PAGE_POISON if page poisoning is in use */
- __u32 poison_val;
+ __le32 poison_val;
};
#define VIRTIO_BALLOON_S_SWAP_IN 0 /* Amount of memory swapped in */
diff --git a/include/uapi/linux/virtio_blk.h b/include/uapi/linux/virtio_blk.h
index 0f99d7b49ede..d888f013d9ff 100644
--- a/include/uapi/linux/virtio_blk.h
+++ b/include/uapi/linux/virtio_blk.h
@@ -57,20 +57,20 @@
struct virtio_blk_config {
/* The capacity (in 512-byte sectors). */
- __u64 capacity;
+ __virtio64 capacity;
/* The maximum segment size (if VIRTIO_BLK_F_SIZE_MAX) */
- __u32 size_max;
+ __virtio32 size_max;
/* The maximum number of segments (if VIRTIO_BLK_F_SEG_MAX) */
- __u32 seg_max;
+ __virtio32 seg_max;
/* geometry of the device (if VIRTIO_BLK_F_GEOMETRY) */
struct virtio_blk_geometry {
- __u16 cylinders;
+ __virtio16 cylinders;
__u8 heads;
__u8 sectors;
} geometry;
/* block size of device (if VIRTIO_BLK_F_BLK_SIZE) */
- __u32 blk_size;
+ __virtio32 blk_size;
/* the next 4 entries are guarded by VIRTIO_BLK_F_TOPOLOGY */
/* exponent for physical block per logical block. */
@@ -78,42 +78,42 @@ struct virtio_blk_config {
/* alignment offset in logical blocks. */
__u8 alignment_offset;
/* minimum I/O size without performance penalty in logical blocks. */
- __u16 min_io_size;
+ __virtio16 min_io_size;
/* optimal sustained I/O size in logical blocks. */
- __u32 opt_io_size;
+ __virtio32 opt_io_size;
/* writeback mode (if VIRTIO_BLK_F_CONFIG_WCE) */
__u8 wce;
__u8 unused;
/* number of vqs, only available when VIRTIO_BLK_F_MQ is set */
- __u16 num_queues;
+ __virtio16 num_queues;
/* the next 3 entries are guarded by VIRTIO_BLK_F_DISCARD */
/*
* The maximum discard sectors (in 512-byte sectors) for
* one segment.
*/
- __u32 max_discard_sectors;
+ __virtio32 max_discard_sectors;
/*
* The maximum number of discard segments in a
* discard command.
*/
- __u32 max_discard_seg;
+ __virtio32 max_discard_seg;
/* Discard commands must be aligned to this number of sectors. */
- __u32 discard_sector_alignment;
+ __virtio32 discard_sector_alignment;
/* the next 3 entries are guarded by VIRTIO_BLK_F_WRITE_ZEROES */
/*
* The maximum number of write zeroes sectors (in 512-byte sectors) in
* one segment.
*/
- __u32 max_write_zeroes_sectors;
+ __virtio32 max_write_zeroes_sectors;
/*
* The maximum number of segments in a write zeroes
* command.
*/
- __u32 max_write_zeroes_seg;
+ __virtio32 max_write_zeroes_seg;
/*
* Set if a VIRTIO_BLK_T_WRITE_ZEROES request may result in the
* deallocation of one or more of the sectors.
diff --git a/include/uapi/linux/virtio_config.h b/include/uapi/linux/virtio_config.h
index ff8e7dc9d4dd..b5eda06f0d57 100644
--- a/include/uapi/linux/virtio_config.h
+++ b/include/uapi/linux/virtio_config.h
@@ -67,13 +67,17 @@
#define VIRTIO_F_VERSION_1 32
/*
- * If clear - device has the IOMMU bypass quirk feature.
- * If set - use platform tools to detect the IOMMU.
+ * If clear - device has the platform DMA (e.g. IOMMU) bypass quirk feature.
+ * If set - use platform DMA tools to access the memory.
*
* Note the reverse polarity (compared to most other features),
* this is for compatibility with legacy systems.
*/
-#define VIRTIO_F_IOMMU_PLATFORM 33
+#define VIRTIO_F_ACCESS_PLATFORM 33
+#ifndef __KERNEL__
+/* Legacy name for VIRTIO_F_ACCESS_PLATFORM (for compatibility with old userspace) */
+#define VIRTIO_F_IOMMU_PLATFORM VIRTIO_F_ACCESS_PLATFORM
+#endif /* __KERNEL__ */
/* This feature indicates support for the packed virtqueue layout. */
#define VIRTIO_F_RING_PACKED 34
diff --git a/include/uapi/linux/virtio_console.h b/include/uapi/linux/virtio_console.h
index b7fb108c9310..7e6ec2ff0560 100644
--- a/include/uapi/linux/virtio_console.h
+++ b/include/uapi/linux/virtio_console.h
@@ -45,13 +45,13 @@
struct virtio_console_config {
/* colums of the screens */
- __u16 cols;
+ __virtio16 cols;
/* rows of the screens */
- __u16 rows;
+ __virtio16 rows;
/* max. number of ports this device can hold */
- __u32 max_nr_ports;
+ __virtio32 max_nr_ports;
/* emergency write register */
- __u32 emerg_wr;
+ __virtio32 emerg_wr;
} __attribute__((packed));
/*
diff --git a/include/uapi/linux/virtio_crypto.h b/include/uapi/linux/virtio_crypto.h
index 50cdc8aebfcf..a03932f10565 100644
--- a/include/uapi/linux/virtio_crypto.h
+++ b/include/uapi/linux/virtio_crypto.h
@@ -414,33 +414,33 @@ struct virtio_crypto_op_data_req {
struct virtio_crypto_config {
/* See VIRTIO_CRYPTO_OP_* above */
- __u32 status;
+ __le32 status;
/*
* Maximum number of data queue
*/
- __u32 max_dataqueues;
+ __le32 max_dataqueues;
/*
* Specifies the services mask which the device support,
* see VIRTIO_CRYPTO_SERVICE_* above
*/
- __u32 crypto_services;
+ __le32 crypto_services;
/* Detailed algorithms mask */
- __u32 cipher_algo_l;
- __u32 cipher_algo_h;
- __u32 hash_algo;
- __u32 mac_algo_l;
- __u32 mac_algo_h;
- __u32 aead_algo;
+ __le32 cipher_algo_l;
+ __le32 cipher_algo_h;
+ __le32 hash_algo;
+ __le32 mac_algo_l;
+ __le32 mac_algo_h;
+ __le32 aead_algo;
/* Maximum length of cipher key */
- __u32 max_cipher_key_len;
+ __le32 max_cipher_key_len;
/* Maximum length of authenticated key */
- __u32 max_auth_key_len;
- __u32 reserve;
+ __le32 max_auth_key_len;
+ __le32 reserve;
/* Maximum size of each crypto request's content */
- __u64 max_size;
+ __le64 max_size;
};
struct virtio_crypto_inhdr {
diff --git a/include/uapi/linux/virtio_fs.h b/include/uapi/linux/virtio_fs.h
index b02eb2ac3d99..3056b6e9f8ce 100644
--- a/include/uapi/linux/virtio_fs.h
+++ b/include/uapi/linux/virtio_fs.h
@@ -13,7 +13,7 @@ struct virtio_fs_config {
__u8 tag[36];
/* Number of request queues */
- __u32 num_request_queues;
+ __le32 num_request_queues;
} __attribute__((packed));
#endif /* _UAPI_LINUX_VIRTIO_FS_H */
diff --git a/include/uapi/linux/virtio_gpu.h b/include/uapi/linux/virtio_gpu.h
index 0c85914d9369..ccbd174ef321 100644
--- a/include/uapi/linux/virtio_gpu.h
+++ b/include/uapi/linux/virtio_gpu.h
@@ -320,10 +320,10 @@ struct virtio_gpu_resp_edid {
#define VIRTIO_GPU_EVENT_DISPLAY (1 << 0)
struct virtio_gpu_config {
- __u32 events_read;
- __u32 events_clear;
- __u32 num_scanouts;
- __u32 num_capsets;
+ __le32 events_read;
+ __le32 events_clear;
+ __le32 num_scanouts;
+ __le32 num_capsets;
};
/* simple formats for fbcon/X use */
diff --git a/include/uapi/linux/virtio_input.h b/include/uapi/linux/virtio_input.h
index a7fe5c8fb135..52084b1fb965 100644
--- a/include/uapi/linux/virtio_input.h
+++ b/include/uapi/linux/virtio_input.h
@@ -40,18 +40,18 @@ enum virtio_input_config_select {
};
struct virtio_input_absinfo {
- __u32 min;
- __u32 max;
- __u32 fuzz;
- __u32 flat;
- __u32 res;
+ __le32 min;
+ __le32 max;
+ __le32 fuzz;
+ __le32 flat;
+ __le32 res;
};
struct virtio_input_devids {
- __u16 bustype;
- __u16 vendor;
- __u16 product;
- __u16 version;
+ __le16 bustype;
+ __le16 vendor;
+ __le16 product;
+ __le16 version;
};
struct virtio_input_config {
diff --git a/include/uapi/linux/virtio_iommu.h b/include/uapi/linux/virtio_iommu.h
index 48e3c29223b5..237e36a280cb 100644
--- a/include/uapi/linux/virtio_iommu.h
+++ b/include/uapi/linux/virtio_iommu.h
@@ -18,24 +18,24 @@
#define VIRTIO_IOMMU_F_MMIO 5
struct virtio_iommu_range_64 {
- __u64 start;
- __u64 end;
+ __le64 start;
+ __le64 end;
};
struct virtio_iommu_range_32 {
- __u32 start;
- __u32 end;
+ __le32 start;
+ __le32 end;
};
struct virtio_iommu_config {
/* Supported page sizes */
- __u64 page_size_mask;
+ __le64 page_size_mask;
/* Supported IOVA range */
struct virtio_iommu_range_64 input_range;
/* Max domain ID size */
struct virtio_iommu_range_32 domain_range;
/* Probe buffer size */
- __u32 probe_size;
+ __le32 probe_size;
};
/* Request types */
diff --git a/include/uapi/linux/virtio_mem.h b/include/uapi/linux/virtio_mem.h
index a9ffe041843c..70e01c687d5e 100644
--- a/include/uapi/linux/virtio_mem.h
+++ b/include/uapi/linux/virtio_mem.h
@@ -185,27 +185,27 @@ struct virtio_mem_resp {
struct virtio_mem_config {
/* Block size and alignment. Cannot change. */
- __u64 block_size;
+ __le64 block_size;
/* Valid with VIRTIO_MEM_F_ACPI_PXM. Cannot change. */
- __u16 node_id;
+ __le16 node_id;
__u8 padding[6];
/* Start address of the memory region. Cannot change. */
- __u64 addr;
+ __le64 addr;
/* Region size (maximum). Cannot change. */
- __u64 region_size;
+ __le64 region_size;
/*
* Currently usable region size. Can grow up to region_size. Can
* shrink due to VIRTIO_MEM_REQ_UNPLUG_ALL (in which case no config
* update will be sent).
*/
- __u64 usable_region_size;
+ __le64 usable_region_size;
/*
* Currently used size. Changes due to plug/unplug requests, but no
* config updates will be sent.
*/
- __u64 plugged_size;
+ __le64 plugged_size;
/* Requested size. New plug requests cannot exceed it. Can change. */
- __u64 requested_size;
+ __le64 requested_size;
};
#endif /* _LINUX_VIRTIO_MEM_H */
diff --git a/include/uapi/linux/virtio_net.h b/include/uapi/linux/virtio_net.h
index 19d23e5baa4e..3f55a4215f11 100644
--- a/include/uapi/linux/virtio_net.h
+++ b/include/uapi/linux/virtio_net.h
@@ -87,19 +87,19 @@ struct virtio_net_config {
/* The config defining mac address (if VIRTIO_NET_F_MAC) */
__u8 mac[ETH_ALEN];
/* See VIRTIO_NET_F_STATUS and VIRTIO_NET_S_* above */
- __u16 status;
+ __virtio16 status;
/* Maximum number of each of transmit and receive queues;
* see VIRTIO_NET_F_MQ and VIRTIO_NET_CTRL_MQ.
* Legal values are between 1 and 0x8000
*/
- __u16 max_virtqueue_pairs;
+ __virtio16 max_virtqueue_pairs;
/* Default maximum transmit unit advice */
- __u16 mtu;
+ __virtio16 mtu;
/*
* speed, in units of 1Mb. All values 0 to INT_MAX are legal.
* Any other value stands for unknown.
*/
- __u32 speed;
+ __le32 speed;
/*
* 0x00 - half duplex
* 0x01 - full duplex
diff --git a/include/uapi/linux/virtio_pmem.h b/include/uapi/linux/virtio_pmem.h
index b022787ffb94..d676b3620383 100644
--- a/include/uapi/linux/virtio_pmem.h
+++ b/include/uapi/linux/virtio_pmem.h
@@ -15,8 +15,8 @@
#include <linux/virtio_config.h>
struct virtio_pmem_config {
- __u64 start;
- __u64 size;
+ __le64 start;
+ __le64 size;
};
#define VIRTIO_PMEM_REQ_TYPE_FLUSH 0
diff --git a/include/uapi/linux/virtio_scsi.h b/include/uapi/linux/virtio_scsi.h
index cc18ef8825c0..0abaae4027c0 100644
--- a/include/uapi/linux/virtio_scsi.h
+++ b/include/uapi/linux/virtio_scsi.h
@@ -103,16 +103,16 @@ struct virtio_scsi_event {
} __attribute__((packed));
struct virtio_scsi_config {
- __u32 num_queues;
- __u32 seg_max;
- __u32 max_sectors;
- __u32 cmd_per_lun;
- __u32 event_info_size;
- __u32 sense_size;
- __u32 cdb_size;
- __u16 max_channel;
- __u16 max_target;
- __u32 max_lun;
+ __virtio32 num_queues;
+ __virtio32 seg_max;
+ __virtio32 max_sectors;
+ __virtio32 cmd_per_lun;
+ __virtio32 event_info_size;
+ __virtio32 sense_size;
+ __virtio32 cdb_size;
+ __virtio16 max_channel;
+ __virtio16 max_target;
+ __virtio32 max_lun;
} __attribute__((packed));
/* Feature Bits */
diff --git a/include/uapi/linux/wireless.h b/include/uapi/linux/wireless.h
index 24f3371ad826..08967b3f19c8 100644
--- a/include/uapi/linux/wireless.h
+++ b/include/uapi/linux/wireless.h
@@ -914,7 +914,7 @@ union iwreq_data {
struct iw_param sens; /* signal level threshold */
struct iw_param bitrate; /* default bit rate */
struct iw_param txpower; /* default transmit power */
- struct iw_param rts; /* RTS threshold threshold */
+ struct iw_param rts; /* RTS threshold */
struct iw_param frag; /* Fragmentation threshold */
__u32 mode; /* Operation mode */
struct iw_param retry; /* Retry limits & lifetime */
diff --git a/include/uapi/linux/xdp_diag.h b/include/uapi/linux/xdp_diag.h
index 78b2591a7782..66b9973b4f4c 100644
--- a/include/uapi/linux/xdp_diag.h
+++ b/include/uapi/linux/xdp_diag.h
@@ -30,6 +30,7 @@ struct xdp_diag_msg {
#define XDP_SHOW_RING_CFG (1 << 1)
#define XDP_SHOW_UMEM (1 << 2)
#define XDP_SHOW_MEMINFO (1 << 3)
+#define XDP_SHOW_STATS (1 << 4)
enum {
XDP_DIAG_NONE,
@@ -41,6 +42,7 @@ enum {
XDP_DIAG_UMEM_FILL_RING,
XDP_DIAG_UMEM_COMPLETION_RING,
XDP_DIAG_MEMINFO,
+ XDP_DIAG_STATS,
__XDP_DIAG_MAX,
};
@@ -69,4 +71,13 @@ struct xdp_diag_umem {
__u32 refs;
};
+struct xdp_diag_stats {
+ __u64 n_rx_dropped;
+ __u64 n_rx_invalid;
+ __u64 n_rx_full;
+ __u64 n_fill_ring_empty;
+ __u64 n_tx_invalid;
+ __u64 n_tx_ring_empty;
+};
+
#endif /* _LINUX_XDP_DIAG_H */
diff --git a/include/uapi/linux/xfrm.h b/include/uapi/linux/xfrm.h
index ff7cfdc6cb44..ffc6a5391bb7 100644
--- a/include/uapi/linux/xfrm.h
+++ b/include/uapi/linux/xfrm.h
@@ -387,6 +387,7 @@ struct xfrm_usersa_info {
};
#define XFRM_SA_XFLAG_DONT_ENCAP_DSCP 1
+#define XFRM_SA_XFLAG_OSEQ_MAY_WRAP 2
struct xfrm_usersa_id {
xfrm_address_t daddr;
diff --git a/include/uapi/misc/habanalabs.h b/include/uapi/misc/habanalabs.h
index f6267a8d7416..d5c4f983b7a8 100644
--- a/include/uapi/misc/habanalabs.h
+++ b/include/uapi/misc/habanalabs.h
@@ -263,6 +263,7 @@ enum hl_device_status {
* time the driver was loaded.
* HL_INFO_TIME_SYNC - Retrieve the device's time alongside the host's time
* for synchronization.
+ * HL_INFO_CS_COUNTERS - Retrieve command submission counters
*/
#define HL_INFO_HW_IP_INFO 0
#define HL_INFO_HW_EVENTS 1
@@ -274,6 +275,7 @@ enum hl_device_status {
#define HL_INFO_CLK_RATE 8
#define HL_INFO_RESET_COUNT 9
#define HL_INFO_TIME_SYNC 10
+#define HL_INFO_CS_COUNTERS 11
#define HL_INFO_VERSION_MAX_LEN 128
#define HL_INFO_CARD_NAME_MAX_LEN 16
@@ -338,6 +340,25 @@ struct hl_info_time_sync {
__u64 host_time;
};
+/**
+ * struct hl_info_cs_counters - command submission counters
+ * @out_of_mem_drop_cnt: dropped due to memory allocation issue
+ * @parsing_drop_cnt: dropped due to error in packet parsing
+ * @queue_full_drop_cnt: dropped due to queue full
+ * @device_in_reset_drop_cnt: dropped due to device in reset
+ */
+struct hl_cs_counters {
+ __u64 out_of_mem_drop_cnt;
+ __u64 parsing_drop_cnt;
+ __u64 queue_full_drop_cnt;
+ __u64 device_in_reset_drop_cnt;
+};
+
+struct hl_info_cs_counters {
+ struct hl_cs_counters cs_counters;
+ struct hl_cs_counters ctx_cs_counters;
+};
+
struct hl_info_args {
/* Location of relevant struct in userspace */
__u64 return_pointer;
@@ -530,13 +551,13 @@ union hl_wait_cs_args {
struct hl_wait_cs_out out;
};
-/* Opcode to alloc device memory */
+/* Opcode to allocate device memory */
#define HL_MEM_OP_ALLOC 0
/* Opcode to free previously allocated device memory */
#define HL_MEM_OP_FREE 1
-/* Opcode to map host memory */
+/* Opcode to map host and device memory */
#define HL_MEM_OP_MAP 2
-/* Opcode to unmap previously mapped host memory */
+/* Opcode to unmap previously mapped host and device memory */
#define HL_MEM_OP_UNMAP 3
/* Memory flags */
diff --git a/include/uapi/mtd/mtd-abi.h b/include/uapi/mtd/mtd-abi.h
index 4b48fbf7d343..65b9db936557 100644
--- a/include/uapi/mtd/mtd-abi.h
+++ b/include/uapi/mtd/mtd-abi.h
@@ -262,7 +262,7 @@ struct mtd_ecc_stats {
* @MTD_FILE_MODE_OTP_USER: OTP enabled in user mode
* @MTD_FILE_MODE_RAW: OTP disabled, ECC disabled
*
- * These modes can be set via ioctl(MTDFILEMODE). The mode mode will be retained
+ * These modes can be set via ioctl(MTDFILEMODE). The mode will be retained
* separately for each open file descriptor.
*
* Note: %MTD_FILE_MODE_RAW provides the same functionality as %MTD_OPS_RAW -
diff --git a/include/uapi/rdma/efa-abi.h b/include/uapi/rdma/efa-abi.h
index 53b6e2036a9b..507a2862bedb 100644
--- a/include/uapi/rdma/efa-abi.h
+++ b/include/uapi/rdma/efa-abi.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-2-Clause) */
/*
- * Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef EFA_ABI_USER_H
@@ -20,6 +20,16 @@
* hex bit offset of the field.
*/
+enum {
+ EFA_ALLOC_UCONTEXT_CMD_COMP_TX_BATCH = 1 << 0,
+ EFA_ALLOC_UCONTEXT_CMD_COMP_MIN_SQ_WR = 1 << 1,
+};
+
+struct efa_ibv_alloc_ucontext_cmd {
+ __u32 comp_mask;
+ __u8 reserved_20[4];
+};
+
enum efa_ibv_user_cmds_supp_udata {
EFA_USER_CMDS_SUPP_UDATA_QUERY_DEVICE = 1 << 0,
EFA_USER_CMDS_SUPP_UDATA_CREATE_AH = 1 << 1,
@@ -31,6 +41,9 @@ struct efa_ibv_alloc_ucontext_resp {
__u16 sub_cqs_per_cq;
__u16 inline_buf_size;
__u32 max_llq_size; /* bytes */
+ __u16 max_tx_batch; /* units of 64 bytes */
+ __u16 min_sq_wr;
+ __u8 reserved_a0[4];
};
struct efa_ibv_alloc_pd_resp {
diff --git a/include/uapi/rdma/ib_user_ioctl_cmds.h b/include/uapi/rdma/ib_user_ioctl_cmds.h
index 4961d5e858eb..99dcabf61a71 100644
--- a/include/uapi/rdma/ib_user_ioctl_cmds.h
+++ b/include/uapi/rdma/ib_user_ioctl_cmds.h
@@ -69,6 +69,7 @@ enum uverbs_methods_device {
UVERBS_METHOD_INFO_HANDLES,
UVERBS_METHOD_QUERY_PORT,
UVERBS_METHOD_GET_CONTEXT,
+ UVERBS_METHOD_QUERY_CONTEXT,
};
enum uverbs_attrs_invoke_write_cmd_attr_ids {
@@ -87,6 +88,11 @@ enum uverbs_attrs_get_context_attr_ids {
UVERBS_ATTR_GET_CONTEXT_CORE_SUPPORT,
};
+enum uverbs_attrs_query_context_attr_ids {
+ UVERBS_ATTR_QUERY_CONTEXT_NUM_COMP_VECTORS,
+ UVERBS_ATTR_QUERY_CONTEXT_CORE_SUPPORT,
+};
+
enum uverbs_attrs_create_cq_cmd_attr_ids {
UVERBS_ATTR_CREATE_CQ_HANDLE,
UVERBS_ATTR_CREATE_CQ_CQE,
@@ -242,6 +248,7 @@ enum uverbs_methods_mr {
UVERBS_METHOD_DM_MR_REG,
UVERBS_METHOD_MR_DESTROY,
UVERBS_METHOD_ADVISE_MR,
+ UVERBS_METHOD_QUERY_MR,
};
enum uverbs_attrs_mr_destroy_ids {
@@ -255,6 +262,14 @@ enum uverbs_attrs_advise_mr_cmd_attr_ids {
UVERBS_ATTR_ADVISE_MR_SGE_LIST,
};
+enum uverbs_attrs_query_mr_cmd_attr_ids {
+ UVERBS_ATTR_QUERY_MR_HANDLE,
+ UVERBS_ATTR_QUERY_MR_RESP_LKEY,
+ UVERBS_ATTR_QUERY_MR_RESP_RKEY,
+ UVERBS_ATTR_QUERY_MR_RESP_LENGTH,
+ UVERBS_ATTR_QUERY_MR_RESP_IOVA,
+};
+
enum uverbs_attrs_create_counters_cmd_attr_ids {
UVERBS_ATTR_CREATE_COUNTERS_HANDLE,
};
diff --git a/include/uapi/rdma/mlx5_user_ioctl_cmds.h b/include/uapi/rdma/mlx5_user_ioctl_cmds.h
index 8e316ef896b5..e24d66d278cf 100644
--- a/include/uapi/rdma/mlx5_user_ioctl_cmds.h
+++ b/include/uapi/rdma/mlx5_user_ioctl_cmds.h
@@ -228,6 +228,10 @@ enum mlx5_ib_flow_matcher_methods {
MLX5_IB_METHOD_FLOW_MATCHER_DESTROY,
};
+enum mlx5_ib_device_query_context_attrs {
+ MLX5_IB_ATTR_QUERY_CONTEXT_RESP_UCTX = (1U << UVERBS_ID_NS_SHIFT),
+};
+
#define MLX5_IB_DW_MATCH_PARAM 0x80
struct mlx5_ib_match_params {
@@ -259,7 +263,7 @@ enum mlx5_ib_create_flow_attrs {
MLX5_IB_ATTR_CREATE_FLOW_FLAGS,
};
-enum mlx5_ib_destoy_flow_attrs {
+enum mlx5_ib_destroy_flow_attrs {
MLX5_IB_ATTR_DESTROY_FLOW_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
};
@@ -286,4 +290,14 @@ enum mlx5_ib_create_flow_action_create_packet_reformat_attrs {
MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_DATA_BUF,
};
+enum mlx5_ib_query_pd_attrs {
+ MLX5_IB_ATTR_QUERY_PD_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
+ MLX5_IB_ATTR_QUERY_PD_RESP_PDN,
+};
+
+enum mlx5_ib_pd_methods {
+ MLX5_IB_METHOD_PD_QUERY = (1U << UVERBS_ID_NS_SHIFT),
+
+};
+
#endif
diff --git a/include/uapi/rdma/qedr-abi.h b/include/uapi/rdma/qedr-abi.h
index a0b83c9d4498..bf7333b2b5d7 100644
--- a/include/uapi/rdma/qedr-abi.h
+++ b/include/uapi/rdma/qedr-abi.h
@@ -39,8 +39,9 @@
/* user kernel communication data structures. */
enum qedr_alloc_ucontext_flags {
- QEDR_ALLOC_UCTX_RESERVED = 1 << 0,
- QEDR_ALLOC_UCTX_DB_REC = 1 << 1
+ QEDR_ALLOC_UCTX_EDPM_MODE = 1 << 0,
+ QEDR_ALLOC_UCTX_DB_REC = 1 << 1,
+ QEDR_SUPPORT_DPM_SIZES = 1 << 2,
};
struct qedr_alloc_ucontext_req {
@@ -50,13 +51,14 @@ struct qedr_alloc_ucontext_req {
#define QEDR_LDPM_MAX_SIZE (8192)
#define QEDR_EDPM_TRANS_SIZE (64)
+#define QEDR_EDPM_MAX_SIZE (ROCE_REQ_MAX_INLINE_DATA_SIZE)
enum qedr_rdma_dpm_type {
QEDR_DPM_TYPE_NONE = 0,
QEDR_DPM_TYPE_ROCE_ENHANCED = 1 << 0,
QEDR_DPM_TYPE_ROCE_LEGACY = 1 << 1,
QEDR_DPM_TYPE_IWARP_LEGACY = 1 << 2,
- QEDR_DPM_TYPE_RESERVED = 1 << 3,
+ QEDR_DPM_TYPE_ROCE_EDPM_MODE = 1 << 3,
QEDR_DPM_SIZES_SET = 1 << 4,
};
@@ -77,6 +79,8 @@ struct qedr_alloc_ucontext_resp {
__u16 ldpm_limit_size;
__u8 edpm_trans_size;
__u8 reserved;
+ __u16 edpm_limit_size;
+ __u8 padding[6];
};
struct qedr_alloc_pd_ureq {
diff --git a/include/uapi/rdma/rdma_netlink.h b/include/uapi/rdma/rdma_netlink.h
index 8e277783fa96..d2f5b8396243 100644
--- a/include/uapi/rdma/rdma_netlink.h
+++ b/include/uapi/rdma/rdma_netlink.h
@@ -287,6 +287,12 @@ enum rdma_nldev_command {
RDMA_NLDEV_CMD_STAT_DEL,
+ RDMA_NLDEV_CMD_RES_QP_GET_RAW,
+
+ RDMA_NLDEV_CMD_RES_CQ_GET_RAW,
+
+ RDMA_NLDEV_CMD_RES_MR_GET_RAW,
+
RDMA_NLDEV_NUM_OPS
};
@@ -525,6 +531,8 @@ enum rdma_nldev_attr {
*/
RDMA_NLDEV_ATTR_DEV_DIM, /* u8 */
+ RDMA_NLDEV_ATTR_RES_RAW, /* binary */
+
/*
* Always the end
*/
@@ -561,5 +569,6 @@ enum rdma_nl_counter_mode {
*/
enum rdma_nl_counter_mask {
RDMA_COUNTER_MASK_QP_TYPE = 1,
+ RDMA_COUNTER_MASK_PID = 1 << 1,
};
#endif /* _UAPI_RDMA_NETLINK_H */
diff --git a/include/uapi/rdma/rdma_user_ioctl.h b/include/uapi/rdma/rdma_user_ioctl.h
index d92d2721b28c..53c55188dd2a 100644
--- a/include/uapi/rdma/rdma_user_ioctl.h
+++ b/include/uapi/rdma/rdma_user_ioctl.h
@@ -43,7 +43,7 @@
/*
* General blocks assignments
- * It is closed on purpose do not expose it it user space
+ * It is closed on purpose - do not expose it to user space
* #define MAD_CMD_BASE 0x00
* #define HFI1_CMD_BAS 0xE0
*/
diff --git a/include/uapi/scsi/fc/fc_els.h b/include/uapi/scsi/fc/fc_els.h
index 66318c44acd7..8c704e510e39 100644
--- a/include/uapi/scsi/fc/fc_els.h
+++ b/include/uapi/scsi/fc/fc_els.h
@@ -41,6 +41,7 @@ enum fc_els_cmd {
ELS_REC = 0x13, /* read exchange concise */
ELS_SRR = 0x14, /* sequence retransmission request */
ELS_FPIN = 0x16, /* Fabric Performance Impact Notification */
+ ELS_RDP = 0x18, /* Read Diagnostic Parameters */
ELS_RDF = 0x19, /* Register Diagnostic Functions */
ELS_PRLI = 0x20, /* process login */
ELS_PRLO = 0x21, /* process logout */
@@ -110,6 +111,7 @@ enum fc_els_cmd {
[ELS_REC] = "REC", \
[ELS_SRR] = "SRR", \
[ELS_FPIN] = "FPIN", \
+ [ELS_RDP] = "RDP", \
[ELS_RDF] = "RDF", \
[ELS_PRLI] = "PRLI", \
[ELS_PRLO] = "PRLO", \
diff --git a/include/uapi/xen/gntdev.h b/include/uapi/xen/gntdev.h
index fe4423e518c6..9ac5515b9bc2 100644
--- a/include/uapi/xen/gntdev.h
+++ b/include/uapi/xen/gntdev.h
@@ -66,7 +66,7 @@ struct ioctl_gntdev_map_grant_ref {
/*
* Removes the grant references from the mapping table of an instance of
- * of gntdev. N.B. munmap() must be called on the relevant virtual address(es)
+ * gntdev. N.B. munmap() must be called on the relevant virtual address(es)
* before this ioctl is called, or an error will result.
*/
#define IOCTL_GNTDEV_UNMAP_GRANT_REF \
diff --git a/include/vdso/datapage.h b/include/vdso/datapage.h
index 7955c56d6b3c..73eb622e7663 100644
--- a/include/vdso/datapage.h
+++ b/include/vdso/datapage.h
@@ -19,6 +19,12 @@
#include <vdso/time32.h>
#include <vdso/time64.h>
+#ifdef CONFIG_ARCH_HAS_VDSO_DATA
+#include <asm/vdso/data.h>
+#else
+struct arch_vdso_data {};
+#endif
+
#define VDSO_BASES (CLOCK_TAI + 1)
#define VDSO_HRES (BIT(CLOCK_REALTIME) | \
BIT(CLOCK_MONOTONIC) | \
@@ -64,6 +70,8 @@ struct vdso_timestamp {
* @tz_dsttime: type of DST correction
* @hrtimer_res: hrtimer resolution
* @__unused: unused
+ * @arch_data: architecture specific data (optional, defaults
+ * to an empty struct)
*
* vdso_data will be accessed by 64 bit and compat code at the same time
* so we should be careful before modifying this structure.
@@ -97,6 +105,8 @@ struct vdso_data {
s32 tz_dsttime;
u32 hrtimer_res;
u32 __unused;
+
+ struct arch_vdso_data arch_data;
};
/*
@@ -109,6 +119,7 @@ struct vdso_data {
* relocation, and this is what we need.
*/
extern struct vdso_data _vdso_data[CS_BASES] __attribute__((visibility("hidden")));
+extern struct vdso_data _timens_data[CS_BASES] __attribute__((visibility("hidden")));
/*
* The generic vDSO implementation requires that gettimeofday.h
diff --git a/include/vdso/vsyscall.h b/include/vdso/vsyscall.h
index 2c6134e0c23d..b0fdc9c6bf43 100644
--- a/include/vdso/vsyscall.h
+++ b/include/vdso/vsyscall.h
@@ -6,6 +6,9 @@
#include <asm/vdso/vsyscall.h>
+unsigned long vdso_update_begin(void);
+void vdso_update_end(unsigned long flags);
+
#endif /* !__ASSEMBLY__ */
#endif /* __VDSO_VSYSCALL_H */
diff --git a/include/xen/interface/io/displif.h b/include/xen/interface/io/displif.h
index fdc279dc4a88..d43ca0361f86 100644
--- a/include/xen/interface/io/displif.h
+++ b/include/xen/interface/io/displif.h
@@ -38,7 +38,8 @@
* Protocol version
******************************************************************************
*/
-#define XENDISPL_PROTOCOL_VERSION "1"
+#define XENDISPL_PROTOCOL_VERSION "2"
+#define XENDISPL_PROTOCOL_VERSION_INT 2
/*
******************************************************************************
@@ -202,6 +203,9 @@
* Width and height of the connector in pixels separated by
* XENDISPL_RESOLUTION_SEPARATOR. This defines visible area of the
* display.
+ * If backend provides extended display identification data (EDID) with
+ * XENDISPL_OP_GET_EDID request then EDID values must take precedence
+ * over the resolutions defined here.
*
*------------------ Connector Request Transport Parameters -------------------
*
@@ -349,6 +353,8 @@
#define XENDISPL_OP_FB_DETACH 0x13
#define XENDISPL_OP_SET_CONFIG 0x14
#define XENDISPL_OP_PG_FLIP 0x15
+/* The below command is available in protocol version 2 and above. */
+#define XENDISPL_OP_GET_EDID 0x16
/*
******************************************************************************
@@ -377,6 +383,10 @@
#define XENDISPL_FIELD_BE_ALLOC "be-alloc"
#define XENDISPL_FIELD_UNIQUE_ID "unique-id"
+#define XENDISPL_EDID_BLOCK_SIZE 128
+#define XENDISPL_EDID_BLOCK_COUNT 256
+#define XENDISPL_EDID_MAX_SIZE (XENDISPL_EDID_BLOCK_SIZE * XENDISPL_EDID_BLOCK_COUNT)
+
/*
******************************************************************************
* STATUS RETURN CODES
@@ -451,7 +461,9 @@
* +----------------+----------------+----------------+----------------+
* | gref_directory | 40
* +----------------+----------------+----------------+----------------+
- * | reserved | 44
+ * | data_ofs | 44
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 48
* +----------------+----------------+----------------+----------------+
* |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
* +----------------+----------------+----------------+----------------+
@@ -494,6 +506,7 @@
* buffer size (buffer_sz) exceeds what can be addressed by this single page,
* then reference to the next page must be supplied (see gref_dir_next_page
* below)
+ * data_ofs - uint32_t, offset of the data in the buffer, octets
*/
#define XENDISPL_DBUF_FLG_REQ_ALLOC (1 << 0)
@@ -506,6 +519,7 @@ struct xendispl_dbuf_create_req {
uint32_t buffer_sz;
uint32_t flags;
grant_ref_t gref_directory;
+ uint32_t data_ofs;
};
/*
@@ -732,6 +746,44 @@ struct xendispl_page_flip_req {
};
/*
+ * Request EDID - request EDID describing current connector:
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | id | _OP_GET_EDID | reserved | 4
+ * +----------------+----------------+----------------+----------------+
+ * | buffer_sz | 8
+ * +----------------+----------------+----------------+----------------+
+ * | gref_directory | 12
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 16
+ * +----------------+----------------+----------------+----------------+
+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 64
+ * +----------------+----------------+----------------+----------------+
+ *
+ * Notes:
+ * - This command is not available in protocol version 1 and should be
+ * ignored.
+ * - This request is optional and if not supported then visible area
+ * is defined by the relevant XenStore's "resolution" property.
+ * - Shared buffer, allocated for EDID storage, must not be less then
+ * XENDISPL_EDID_MAX_SIZE octets.
+ *
+ * buffer_sz - uint32_t, buffer size to be allocated, octets
+ * gref_directory - grant_ref_t, a reference to the first shared page
+ * describing EDID buffer references. See XENDISPL_OP_DBUF_CREATE for
+ * grant page directory structure (struct xendispl_page_directory).
+ *
+ * See response format for this request.
+ */
+
+struct xendispl_get_edid_req {
+ uint32_t buffer_sz;
+ grant_ref_t gref_directory;
+};
+
+/*
*---------------------------------- Responses --------------------------------
*
* All response packets have the same length (64 octets)
@@ -753,6 +805,35 @@ struct xendispl_page_flip_req {
* id - uint16_t, private guest value, echoed from request
* status - int32_t, response status, zero on success and -XEN_EXX on failure
*
+ *
+ * Get EDID response - response for XENDISPL_OP_GET_EDID:
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | id | operation | reserved | 4
+ * +----------------+----------------+----------------+----------------+
+ * | status | 8
+ * +----------------+----------------+----------------+----------------+
+ * | edid_sz | 12
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 16
+ * +----------------+----------------+----------------+----------------+
+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 64
+ * +----------------+----------------+----------------+----------------+
+ *
+ * Notes:
+ * - This response is not available in protocol version 1 and should be
+ * ignored.
+ *
+ * edid_sz - uint32_t, size of the EDID, octets
+ */
+
+struct xendispl_get_edid_resp {
+ uint32_t edid_sz;
+};
+
+/*
*----------------------------------- Events ----------------------------------
*
* Events are sent via a shared page allocated by the front and propagated by
@@ -804,6 +885,7 @@ struct xendispl_req {
struct xendispl_fb_detach_req fb_detach;
struct xendispl_set_config_req set_config;
struct xendispl_page_flip_req pg_flip;
+ struct xendispl_get_edid_req get_edid;
uint8_t reserved[56];
} op;
};
@@ -813,7 +895,10 @@ struct xendispl_resp {
uint8_t operation;
uint8_t reserved;
int32_t status;
- uint8_t reserved1[56];
+ union {
+ struct xendispl_get_edid_resp get_edid;
+ uint8_t reserved1[56];
+ } op;
};
struct xendispl_evt {
diff --git a/include/xen/interface/io/netif.h b/include/xen/interface/io/netif.h
index 4f20dbc42910..2194322c3c7f 100644
--- a/include/xen/interface/io/netif.h
+++ b/include/xen/interface/io/netif.h
@@ -161,6 +161,19 @@
*/
/*
+ * "xdp-headroom" is used to request that extra space is added
+ * for XDP processing. The value is measured in bytes and passed by
+ * the frontend to be consistent between both ends.
+ * If the value is greater than zero that means that
+ * an RX response is going to be passed to an XDP program for processing.
+ * XEN_NETIF_MAX_XDP_HEADROOM defines the maximum headroom offset in bytes
+ *
+ * "feature-xdp-headroom" is set to "1" by the netback side like other features
+ * so a guest can check if an XDP program can be processed.
+ */
+#define XEN_NETIF_MAX_XDP_HEADROOM 0x7FFF
+
+/*
* Control ring
* ============
*
@@ -846,7 +859,8 @@ struct xen_netif_tx_request {
#define XEN_NETIF_EXTRA_TYPE_MCAST_ADD (2) /* u.mcast */
#define XEN_NETIF_EXTRA_TYPE_MCAST_DEL (3) /* u.mcast */
#define XEN_NETIF_EXTRA_TYPE_HASH (4) /* u.hash */
-#define XEN_NETIF_EXTRA_TYPE_MAX (5)
+#define XEN_NETIF_EXTRA_TYPE_XDP (5) /* u.xdp */
+#define XEN_NETIF_EXTRA_TYPE_MAX (6)
/* xen_netif_extra_info_t flags. */
#define _XEN_NETIF_EXTRA_FLAG_MORE (0)
@@ -879,6 +893,10 @@ struct xen_netif_extra_info {
uint8_t algorithm;
uint8_t value[4];
} hash;
+ struct {
+ uint16_t headroom;
+ uint16_t pad[2];
+ } xdp;
uint16_t pad[3];
} u;
};
diff --git a/include/xen/page.h b/include/xen/page.h
index df6d6b6ec66e..285677b42943 100644
--- a/include/xen/page.h
+++ b/include/xen/page.h
@@ -24,7 +24,6 @@
#define XEN_PFN_DOWN(x) ((x) >> XEN_PAGE_SHIFT)
#define XEN_PFN_UP(x) (((x) + XEN_PAGE_SIZE-1) >> XEN_PAGE_SHIFT)
-#define XEN_PFN_PHYS(x) ((phys_addr_t)(x) << XEN_PAGE_SHIFT)
#include <asm/xen/page.h>
diff --git a/include/xen/swiotlb-xen.h b/include/xen/swiotlb-xen.h
index ffc0d3902b71..d5eaf9d682b8 100644
--- a/include/xen/swiotlb-xen.h
+++ b/include/xen/swiotlb-xen.h
@@ -4,10 +4,10 @@
#include <linux/swiotlb.h>
-void xen_dma_sync_for_cpu(dma_addr_t handle, phys_addr_t paddr, size_t size,
- enum dma_data_direction dir);
-void xen_dma_sync_for_device(dma_addr_t handle, phys_addr_t paddr, size_t size,
- enum dma_data_direction dir);
+void xen_dma_sync_for_cpu(struct device *dev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir);
+void xen_dma_sync_for_device(struct device *dev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir);
extern int xen_swiotlb_init(int verbose, bool early);
extern const struct dma_map_ops xen_swiotlb_dma_ops;